text
stringlengths 0
3.34M
|
---|
# Agave Platform Science API
#
# Power your digital lab and reduce the time from theory to discovery using the Agave Science-as-a-Service API Platform. Agave provides hosted services that allow researchers to manage data, conduct experiments, and publish and share results from anywhere at any time.
#
# Agave Platform version: 2.2.14
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' ApplicationArgumentDetails Class
#'
#'
#'
#' @field description Description of this input.
#' @field label The label for this input
#' @field argument The command line value of this input (ex -n, --name, -name, etc)
#' @field showArgument Whether the argument value should be passed into the wrapper at run time
#' @field repeatArgument Whether the argument value should be repeated in front of each user-supplied input before injection into the wrapper template at runtime
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ApplicationArgumentDetails <- R6::R6Class(
'ApplicationArgumentDetails',
public = list(
`description` = NULL,
`label` = NULL,
`argument` = NULL,
`showArgument` = NULL,
`repeatArgument` = NULL,
initialize = function(`description`, `label`, `argument`, `showArgument`, `repeatArgument`){
if (!missing(`description`)) {
stopifnot(is.character(`description`), length(`description`) == 1)
self$`description` <- `description`
}
if (!missing(`label`)) {
stopifnot(is.character(`label`), length(`label`) == 1)
self$`label` <- `label`
}
if (!missing(`argument`)) {
stopifnot(is.character(`argument`), length(`argument`) == 1)
self$`argument` <- `argument`
}
if (!missing(`showArgument`)) {
self$`showArgument` <- `showArgument`
}
if (!missing(`repeatArgument`)) {
self$`repeatArgument` <- `repeatArgument`
}
},
asJSON = function() {
self$toJSON()
},
toJSON = function() {
ApplicationArgumentDetailsObject <- list()
if (!is.null(self$`description`)) {
ApplicationArgumentDetailsObject[['description']] <- self$`description`
}
else {
ApplicationArgumentDetailsObject[['description']] <- NULL
}
if (!is.null(self$`label`)) {
ApplicationArgumentDetailsObject[['label']] <- self$`label`
}
else {
ApplicationArgumentDetailsObject[['label']] <- NULL
}
if (!is.null(self$`argument`)) {
ApplicationArgumentDetailsObject[['argument']] <- self$`argument`
}
else {
ApplicationArgumentDetailsObject[['argument']] <- NULL
}
if (!is.null(self$`showArgument`)) {
ApplicationArgumentDetailsObject[['showArgument']] <- self$`showArgument`
}
else {
ApplicationArgumentDetailsObject[['showArgument']] <- NULL
}
if (!is.null(self$`repeatArgument`)) {
ApplicationArgumentDetailsObject[['repeatArgument']] <- self$`repeatArgument`
}
else {
ApplicationArgumentDetailsObject[['repeatArgument']] <- NULL
}
ApplicationArgumentDetailsObject
},
fromJSON = function(ApplicationArgumentDetailsObject) {
if (is.character(ApplicationArgumentDetailsObject)) {
ApplicationArgumentDetailsObject <- jsonlite::fromJSON(ApplicationArgumentDetailsJson)
}
if ("result" %in% names(ApplicationArgumentDetailsObject)) {
ApplicationArgumentDetailsObject <- ApplicationArgumentDetailsObject$result
}
if (!is.null(ApplicationArgumentDetailsObject$`description`)) {
self$`description` <- ApplicationArgumentDetailsObject$`description`
}
if (!is.null(ApplicationArgumentDetailsObject$`label`)) {
self$`label` <- ApplicationArgumentDetailsObject$`label`
}
if (!is.null(ApplicationArgumentDetailsObject$`argument`)) {
self$`argument` <- ApplicationArgumentDetailsObject$`argument`
}
if (!is.null(ApplicationArgumentDetailsObject$`showArgument`)) {
self$`showArgument` <- ApplicationArgumentDetailsObject$`showArgument`
}
if (!is.null(ApplicationArgumentDetailsObject$`repeatArgument`)) {
self$`repeatArgument` <- ApplicationArgumentDetailsObject$`repeatArgument`
}
},
toJSONString = function() {
sprintf(
'{
"description": %s,
"label": %s,
"argument": %s,
"showArgument": %s,
"repeatArgument": %s
}',
ifelse( is.null(self$`description`),"null",paste0(c('"', self$`description`, '"'))),
ifelse( is.null(self$`label`),"null",paste0(c('"', self$`label`, '"'))),
ifelse( is.null(self$`argument`),"null",paste0(c('"', self$`argument`, '"'))),
ifelse( is.null(self$`showArgument`),"null",paste0(c('"', self$`showArgument`, '"'))),
ifelse( is.null(self$`repeatArgument`),"null",paste0(c('"', self$`repeatArgument`, '"')))
)
},
fromJSONString = function(ApplicationArgumentDetailsJson) {
ApplicationArgumentDetailsObject <- jsonlite::fromJSON(ApplicationArgumentDetailsJson)
self::fromJSON(ApplicationArgumentDetailsObject)
}
)
)
|
#include "foxy/client.hpp"
#include "foxy/coroutine.hpp"
#include <string>
#include <iostream>
#include <boost/core/ignore_unused.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/beast/http.hpp>
#include <boost/container/pmr/polymorphic_allocator.hpp>
#include <boost/container/pmr/unsynchronized_pool_resource.hpp>
#include <catch.hpp>
namespace asio = boost::asio;
namespace http = boost::beast::http;
namespace beast = boost::beast;
namespace
{
auto make_req_with_allocator(
asio::io_context& io
) -> foxy::awaitable<void, asio::io_context::executor_type>
{
auto token = co_await foxy::this_coro::token();
auto const* const host = "www.google.com";
auto const* const port = "80";
auto const version = 11; // HTTP/1.1
auto request = http::request<http::empty_body>(http::verb::get, "/", version);
boost::container::pmr::unsynchronized_pool_resource pool;
using allocator_type = boost::container::pmr::polymorphic_allocator<char>;
using res_body_type = http::basic_string_body<
char,
std::char_traits<char>,
allocator_type>;
boost::beast::basic_flat_buffer<allocator_type> buffer(allocator_type(std::addressof(pool)));
http::response_parser<res_body_type, allocator_type>
parser(
std::piecewise_construct,
std::make_tuple(allocator_type(std::addressof(pool))),
std::make_tuple(allocator_type(std::addressof(pool))));
auto serializer = http::request_serializer<http::empty_body>(request);
co_await foxy::async_send_request(
io, host, port,
std::move(serializer), parser, buffer,
token);
auto response = parser.release();
CHECK(response.body().size() > 0);
CHECK(response.result_int() == 200);
}
} // anonymous
TEST_CASE("Our allocator-aware HTTP client")
{
SECTION("support allocator-awareness")
{
asio::io_context io;
foxy::co_spawn(
io, [&]() { return make_req_with_allocator(io); }, foxy::detached);
io.run();
}
} |
(**********************************************************************************
* PredomCore.v *
* Formalizing Domains, Ultrametric Spaces and Semantics of Programming Languages *
* Nick Benton, Lars Birkedal, Andrew Kennedy and Carsten Varming *
* July 2010 *
* Build with Coq 8.2pl1 plus SSREFLECT *
**********************************************************************************)
Require Export ssreflect ssrnat ssrbool eqtype.
Set Automatic Coercions Import.
Require Export Categories.
Set Implicit Arguments.
Unset Strict Implicit.
Import Prenex Implicits.
Unset Automatic Introduction.
(** printing -c> %\cont% *)
(** printing -m> %\mon% *)
(** %\include{macros}% *)
(** printing O1 %\ensuremath{O_1}% *)
(** printing O2 %\ensuremath{O_2}% *)
(** printing O3 %\ensuremath{O_3}% *)
(** printing O4 %\ensuremath{O_4}% *)
(** printing D1 %\ensuremath{D_1}% *)
(** printing D2 %\ensuremath{D_2}% *)
(** printing D3 %\ensuremath{D_3}% *)
(** printing x1 %\ensuremath{x_1}% *)
(** printing x2 %\ensuremath{x_2}% *)
(** printing x3 %\ensuremath{x_3}% *)
(** printing y1 %\ensuremath{y_1}% *)
(** printing y2 %\ensuremath{y_2}% *)
(** printing y3 %\ensuremath{y_3}% *)
(** printing p1 %\ensuremath{p_1}% *)
(** printing p2 %\ensuremath{p_2}% *)
(** printing p3 %\ensuremath{p_3}% *)
(** printing natO %\natO% *)
(** printing nat %\nat% *)
(** printing lub %\lub% *)
(** printing cpo %\cpo% *)
(** printing ord %\ord% *)
(** ** Ordered type *)
(*=Ole *)
Module PreOrd.
Definition axiom T (Ole : T -> T -> Prop) :=
forall x , Ole x x /\ forall y z, (Ole x y -> Ole y z -> Ole x z).
Record mixin_of T := Mixin {Ole : T -> T -> Prop; _ : axiom Ole}.
Notation class_of := mixin_of (only parsing).
Lemma setAxiom T (c:mixin_of T):Setoid.axiom (fun x y => Ole c x y /\ Ole c y x). (*CLEAR*)
Proof.
move => T.
case => le A. split ; last split.
- move => x. by split ; apply (proj1 (A x)).
- move => x y z. simpl. case => l0 l1. case => l2 l3.
by split ; [apply (proj2 (A x) y z l0 l2) | apply (proj2 (A z) y x l3 l1)].
- move => x y. simpl. case => l0 l1. by split.
Qed.
(*CLEARED*) Coercion base2 T (c:class_of T) : Setoid.class_of T := Setoid.Mixin (setAxiom c).
Structure type := Pack {sort :> Type; _ : class_of sort; _ : Type}.
Definition class cT := let: Pack _ c _ := cT return class_of cT in c. (*CLEAR*)
Definition unpack K (k : forall T (c : class_of T), K T c) cT :=
let: Pack T c _ := cT return K _ (class cT) in k _ c.
Definition repack cT : _ -> Type -> type := let k T c p := p c in unpack k cT.
Definition pack T c := @Pack T c T.
(* Definition pack T c := @Pack T (Class c) T. *)
(*CLEARED*)
Coercion setoidType (cT:type) := Setoid.Pack (class cT) cT.
End PreOrd.
Notation ordType := PreOrd.type.
Notation OrdMixin := PreOrd.Mixin.
Notation OrdType := PreOrd.pack.
Canonical Structure PreOrd.setoidType.
(*=End *)
Definition Ole := (fun (O:ordType) => PreOrd.Ole (PreOrd.class O)).
Implicit Arguments Ole [O].
Lemma Ole_refl (O:ordType) (x:O) : Ole x x.
unfold Ole. case. simpl. move => O. case.
move => le A _ x. by apply (proj1 (A x)).
Qed.
Lemma Ole_trans (O:ordType) (x y z:O) : Ole x y -> Ole y z -> Ole x z.
case. simpl. move => O. case.
move => le A T x y z L L'.
by apply (proj2 (A x) y z L L').
Qed.
Hint Resolve Ole_refl Ole_trans.
(*Hint Extern 2 (Ole (o:=?X1) ?X2 ?X3 ) => simpl Ole.*)
Bind Scope O_scope with PreOrd.sort.
Delimit Scope O_scope with PreOrd.
(** printing <= %\ensuremath{\sqsubseteq}% *)
Infix "<=" := Ole : O_scope.
Open Scope O_scope.
Arguments Scope Ole [O_scope _ _].
(*=Pointed *)
Module Pointed.
Definition axiom (T:ordType) (l:T) := forall x, l <= x.
Record mixin_of (T:ordType) := Mixin {least_elem : T; _ : axiom least_elem}. (*CLEAR*)
Lemma leastP (T:ordType) (X:mixin_of T) : forall x, (least_elem X) <= x.
move => T; case. simpl. move => l A x. by apply A.
Qed.
(*CLEARED*)
Record class_of T := Class
{ base :> PreOrd.class_of T; ext :> mixin_of (PreOrd.Pack base T)}. (*CLEAR*)
Structure type := Pack {sort :> Type; _ : class_of sort; _ : Type}.
Definition class cT := let: Pack _ c _ := cT return class_of cT in c.
Definition unpack K (k : forall T (c : class_of T), K T c) cT :=
let: Pack T c _ := cT return K _ (class cT) in k _ c.
Definition repack cT : _ -> Type -> type := let k T c p := p c in unpack k cT.
Definition pack := let k T c m := Pack (@Class T c m) T in PreOrd.unpack k.
(*CLEARED*)
Coercion ordType cT := PreOrd.Pack (class cT) cT.
Definition setoidType cT := Setoid.Pack (class cT) cT.
Definition least cT := least_elem (class cT). (*CLEAR*)
Implicit Arguments least [cT].
(*CLEARED*)
End Pointed. (*CLEAR*)
Notation pointedType := Pointed.type.
Notation PointedMixin := Pointed.Mixin.
Notation PointedType := Pointed.pack.
(*CLEARED*)
Notation PBot := Pointed.least.
Canonical Structure Pointed.ordType.
Canonical Structure Pointed.setoidType.
Lemma leastP (O:pointedType) (x:O) : PBot <= x. (*CLEAR*)
move => O x. by apply Pointed.leastP.
Qed.
(*CLEARED*)
(*=End *)
Lemma discrete_ordAxiom T : PreOrd.axiom (fun (x y : T) => x = y).
move => T a. split ; first by [].
by move => y z ; apply trans_eq.
Qed.
Canonical Structure discrete_ordMixin T := OrdMixin (@discrete_ordAxiom T).
Definition discrete_ordType T := Eval hnf in OrdType (discrete_ordMixin T).
Lemma Ole_refl_eq : forall (O:ordType) (x y:O), x = y -> x <= y.
intros O x y H; rewrite H; auto.
Save.
Hint Resolve Ole_refl_eq.
Lemma Ole_antisym : forall (O:ordType) (x y:O), x <= y -> y <= x -> x =-= y.
unfold Ole. unfold tset_eq. simpl.
case. move => T. case. simpl. move => X Y x y l0 l1. by split.
Save.
Hint Immediate Ole_antisym.
Definition Oeq_refl (O:ordType) := @tset_refl O.
Hint Resolve Oeq_refl.
Lemma Oeq_refl_eq : forall (O:ordType) (x y:O), x=y -> x =-= y.
intros O x y H; rewrite H; auto.
Save.
Hint Resolve Oeq_refl_eq.
Lemma Oeq_sym : forall (O:ordType) (x y:O), x =-= y -> y =-= x.
move => O x y X. by apply: tset_sym.
Save.
Lemma Oeq_le : forall (O:ordType) (x y:O), x =-= y -> x <= y.
move => O. move => x y. case. move => A B. by apply A.
Save.
Lemma Oeq_le_sym : forall (O:ordType) (x y:O), x =-= y -> y <= x.
move => O. move => x y. case. move => A B. by apply B.
Save.
Hint Resolve Oeq_le.
Hint Immediate Oeq_sym Oeq_le_sym.
Lemma Oeq_trans : forall (O:ordType) (x y z:O), x =-= y -> y =-= z -> x =-= z.
move => O. apply (@tset_trans O).
Save.
Hint Resolve Oeq_trans.
(** *** Setoid relations *)
Add Parametric Relation (O:ordType) : O (@tset_eq O : O -> O -> Prop)
reflexivity proved by (@Oeq_refl O) symmetry proved by (@Oeq_sym O)
transitivity proved by (@Oeq_trans O) as Oeq_Relation.
Add Parametric Relation (O:ordType) : O (@Ole O)
reflexivity proved by (@Ole_refl O)
transitivity proved by (@Ole_trans O) as Ole_Relation.
Instance Oeq_sub_ord_morpoh_eq (D:ordType) : subrelation (@tset_eq D : D -> D -> Prop) (@Ole D).
move => D d d' x. by apply (proj1 x).
Qed.
Add Parametric Morphism (O:ordType) : (@Ole O)
with signature (@tset_eq O : O -> O -> Prop) ==> (@tset_eq O : O -> O -> Prop) ==> iff as Ole_eq_compat_iff.
split; intuition.
apply Ole_trans with x; firstorder using Ole_trans.
apply Ole_trans with y.
firstorder.
apply Ole_trans with y0.
assumption.
intuition.
Save.
Lemma Ole_eq_compat :
forall (O : ordType) (x1 x2 : O),
x1 =-= x2 -> forall x3 x4 : O, x3 =-= x4 -> x1 <= x3 -> x2 <= x4.
move => O x1 x2 e x3 x4 e'. rewrite -> e. by rewrite -> e'.
Save.
Lemma Ole_eq_right : forall (O : ordType) (x y z: O),
x <= y -> y =-= z -> x <= z.
move => O x y z l e. rewrite l. by rewrite e.
Save.
Lemma Ole_eq_left : forall (O : ordType) (x y z: O),
x =-= y -> y <= z -> x <= z.
move => O x y z e l. rewrite e. by rewrite l.
Save.
(** ** Monotonicity *)
(** *** Definition and properties *)
(*=monotonic *)
Definition monotonic (O1 O2 : ordType) (f : O1->O2) := forall x y, x <= y -> f x <= f y.
Module FMon. Section fmon.
Variable O1 O2 : ordType.
Record mixin_of (f:O1 -> O2) := Mixin { ext :> monotonic f}. (*CLEAR*)
Notation class_of := mixin_of (only parsing).
(*CLEARED*)
Structure type : Type := Pack {sort :> O1 -> O2; _ : class_of sort; _ : O1 -> O2}. (*CLEAR*)
Definition class cT := let: Pack _ c _ := cT return class_of cT in c.
Definition unpack K (k : forall T (c : class_of T), K T c) cT :=
let: Pack T c _ := cT return K _ (class cT) in k _ c.
Definition repack cT : _ -> Type -> type := let k T c p := p c in unpack k cT.
Definition pack f (c:class_of f) := @Pack f c f.
(*CLEARED*)End fmon. End FMon.
Notation fmono := FMon.type. (*CLEAR*)
Notation fmonoMixin := FMon.Mixin.
Notation fmonoType := FMon.pack. (*CLEARED*)
(*=End *)
Lemma fmonotonic O1 O2 (f:fmono O1 O2) : monotonic f.
move => O1 O2. case. simpl. move => f. by case.
Qed.
Hint Resolve fmonotonic.
Hint Unfold monotonic.
Definition stable (O1 O2:ordType) (f : O1 -> O2) := forall x y, x =-= y -> f x =-= f y.
Hint Unfold stable.
Lemma monotonic_stable : forall (O1 O2 : ordType) (f:O1 -> O2),
monotonic f -> stable f.
unfold monotonic, stable. move => O0 O1 f X x y e. split ; apply X ; by case e.
Save.
Hint Resolve monotonic_stable.
(*=fmonoOrd *)
Lemma fmono_axiom (O1 O2:ordType) :
PreOrd.axiom (fun f g:fmono O1 O2 => forall x, f x <= g x). (*CLEAR*)
Proof.
move => O1 O2 x. split ; first by [].
move => y z L L' e ; by apply (Ole_trans (L e) (L' e)).
Qed.
(*CLEARED*)
Canonical Structure fmono_ordMixin (T T':ordType) := OrdMixin (@fmono_axiom T T').
Canonical Structure fmono_ordType T T' :=
Eval hnf in OrdType (fmono_ordMixin T T').
(*=End *)
(* fmon <= *)
Definition fmon_less (A B: ordType): relation (fmono A B) := (@Ole (@fmono_ordType A B)).
Lemma fmon_less_preorder (A B: ordType): PreOrder (@fmon_less A B).
intros. split. red; intros. by apply Ole_refl.
red; intros x y z. by apply Ole_trans.
Qed.
Existing Instance fmon_less_preorder.
Add Parametric Morphism (A B : ordType) :
(@FMon.sort A B) with signature (@fmon_less A B ==> @Ole A ==> @Ole B)
as fmon_le_compat.
intros. apply Ole_trans with (y x0) ; first by auto.
by apply (fmonotonic y).
Qed.
(* fmon == *)
Definition fmon_equal (A B: ordType): relation (fmono A B) := (@tset_eq (@fmono_ordType A B)).
Lemma fmon_equal_equivalence (A B: ordType): Equivalence (@fmon_equal A B).
intros. split. intros x. by apply Oeq_refl.
intros x y E. split ; first by apply (Oeq_sym E). by apply E.
intros x y z E0 E1. by apply (Oeq_trans E0 E1).
Qed.
Existing Instance fmon_equal_equivalence.
Add Parametric Morphism (A B : ordType) :
(@FMon.sort A B) with signature (@fmon_equal A B ==> @tset_eq A ==> @tset_eq B)
as fmon_eq_compat.
move => f g fg x y xy. apply Oeq_trans with (g x) ; first by split ; [apply (proj1 fg) | apply (proj2 fg)].
by apply (monotonic_stable (fmonotonic g)).
Qed.
Lemma id_mon (O:ordType) : @monotonic O O id.
by move => O x y.
Qed.
Definition oidM (O:ordType) := fmonoMixin (@id_mon O).
Definition oid (O:ordType) := Eval hnf in FMon.pack (oidM O).
Lemma ordMorphSetoidAxiom (O0 O1:ordType) : @Setoid.axiom (fmono O0 O1) (@tset_eq (fmono_ordType O0 O1)).
move => O1 O2. split ; first by []. split.
- by apply: Oeq_trans.
- by apply: Oeq_sym.
Qed.
Canonical Structure ordMorphSetoidMixin O0 O1 := SetoidMixin (ordMorphSetoidAxiom O0 O1).
Canonical Structure ordMorphSetoidType O0 O1 := Eval hnf in SetoidType (ordMorphSetoidMixin O0 O1).
Lemma comp_mon (O1 O2 O3:ordType) (f:fmono O2 O3) (g:fmono O1 O2) : monotonic (fun x => f (g x)).
move => O1 O2 O3 f g x y l.
by do 2 apply fmonotonic.
Qed.
Definition ocompM (O1 O2 O3:ordType) (f:fmono O2 O3) (g:fmono O1 O2) := fmonoMixin (comp_mon f g).
Definition ocomp (O1 O2 O3:ordType) (f:fmono O2 O3) (g:fmono O1 O2) := Eval hnf in fmonoType (ocompM f g).
Lemma ordCatAxiom : Category.axiom ocomp oid.
split ; first by move => O0 O1 f ; split.
split ; first by move => O0 O1 f ; split.
split ; first by move => O0 O1 O2 O3 f g h ; split.
move => O0 O1 O2 f f' g g' e e' ; split => x ; apply Ole_trans with (y:=f (g' x)).
- simpl. apply fmonotonic. by apply (proj1 e').
- simpl. by apply (proj1 e).
- simpl. by apply (proj2 e).
- simpl. apply fmonotonic. by apply (proj2 e').
Qed.
Canonical Structure ordCatMixin := CatMixin ordCatAxiom.
Canonical Structure ordCatType := Eval hnf in CatType ordCatMixin.
Open Scope C_scope.
Lemma fmon_stable : forall (O1 O2 : ordType) (f:O1 =-> O2), stable f.
intros; apply monotonic_stable; auto.
Save.
Hint Resolve fmon_stable.
Definition mk_fmonoM (O1 O2:ordType) (f:O1 -> O2) (m:monotonic f) := fmonoMixin m.
Definition mk_fmono (O1 O2:ordType) (f:O1 -> O2) (m:monotonic f) : fmono O1 O2 := Eval hnf in fmonoType (mk_fmonoM m).
Section ordCatProd.
Variable O1 O2 : ordType.
Lemma prod_ord_axiom : PreOrd.axiom (fun x y :O1 * O2 => fst x <= fst y /\ snd x <= snd y).
move => x ; split ; clear ; first by case: x.
move => y z. case: x => x0 x1. case: y => y0 y1. case: z => z0 z1. simpl. move => [A B] [C D].
split ; by [apply Ole_trans with y0 | apply Ole_trans with y1].
Qed.
Canonical Structure prod_ordMixin := OrdMixin prod_ord_axiom.
Canonical Structure prod_ordType := Eval hnf in OrdType prod_ordMixin.
Lemma fst_mon : monotonic (@fst O1 O2).
case => x1 x2. case => y1 y2. by case => X Y.
Qed.
Definition Fst := Eval hnf in mk_fmono fst_mon.
Lemma snd_mon : monotonic (@snd O1 O2).
case => x1 x2. case => y1 y2. by case => X Y.
Qed.
Definition Snd := Eval hnf in mk_fmono snd_mon.
Lemma Prod_fun_mono Z (f:Z =-> O1) (g:Z =-> O2) : monotonic (fun p => (f p, g p)).
move => Z f g x y l. by split ; apply fmonotonic.
Qed.
Definition Prod_fun Z (f:Z =-> O1) (g:Z =-> O2) := Eval hnf in mk_fmono (Prod_fun_mono f g).
End ordCatProd.
Lemma fmon_eq_intro : forall (O1 O2:ordType) (f g:O1 =-> O2), (forall n, f n =-= g n) -> f =-= g.
move => O0 O1 f g X. split => x. by apply (proj1 (X x)). by apply (proj2 (X x)).
Save.
Hint Resolve fmon_eq_intro.
Lemma fmon_eq_elim : forall (O1 O2:ordType) (f g:O1 =-> O2), f =-= g ->forall n, f n =-= g n.
move => O1 O2 f g e n. split ; by [apply (proj1 e) | apply (proj2 e)].
Save.
Hint Immediate fmon_eq_elim.
Lemma ordProdCatAxiom : @CatProduct.axiom _ prod_ordType (@Fst) (@Snd) Prod_fun.
move => O0 O1 O2 f g. split. split ; by apply: fmon_eq_intro.
move => h [X Y]. apply: fmon_eq_intro => x. have X':=fmon_eq_elim X x. have Y':=fmon_eq_elim Y x.
simpl in X', Y'. by split ; destruct X'; destruct Y'.
Qed.
Canonical Structure ordProdCatMixin := prodCatMixin ordProdCatAxiom.
Canonical Structure ordProdCatType := Eval hnf in prodCatType ordProdCatMixin.
Add Parametric Morphism (O0 O1:ordType) : (@pair O0 O1)
with signature (@Ole O0) ++> (@Ole O1) ++> (@Ole (O0 * O1))
as pair_le_compat.
move => x y e x' y' e'. by split.
Qed.
Add Parametric Morphism (O0 O1:ordType) : (@pair O0 O1)
with signature (@tset_eq O0 : O0 -> O0 -> Prop) ==> (@tset_eq O1 : O1 -> O1 -> Prop) ==> (@tset_eq (O0 * O1))
as pair_eq_compat.
move => x y e x' y' e'. split. by rewrite -> e ; rewrite -> e'.
by rewrite <- e ; rewrite <- e'.
Qed.
Lemma pair1_mon (O0 O1 : ordType) (x:O0) : monotonic (fun (y:O1) => (x,y)).
move => O0 O1 x y y' l. by rewrite -> l.
Qed.
Definition Pair (O0 O1 : ordType) (x:O0) : O1 =-> (O0 * O1) := Eval hnf in mk_fmono (pair1_mon x).
Lemma Curry_mono (O0 O1 O2 : ordType) (f:O0 * O1 =-> O2) : monotonic (fun x => f << Pair _ x).
move => O1 O2 O3 f x x' L y. simpl. by apply fmonotonic.
Qed.
Definition Curry (O0 O1 O2 : ordType) (f:O0 * O1 =-> O2) : O0 =-> fmono_ordType O1 O2 :=
Eval hnf in mk_fmono (Curry_mono f).
Lemma Ev_mon O0 O1 : monotonic (fun (p:fmono_ordType O0 O1 * O0) => fst p (snd p)).
move => O0 O1. case => f x. case => f' x'. simpl. case ; simpl => L L'.
rewrite -> L. by rewrite -> L'.
Qed.
Definition Ev O0 O1 : (fmono_ordType O0 O1) * O0 =-> O1 := Eval hnf in mk_fmono (@Ev_mon O0 O1).
Lemma ordExpAxiom : @CatExp.axiom _ fmono_ordType (@Ev) (@Curry).
move => O0 O1 O2 h. split ; first by apply: fmon_eq_intro ; case.
move => h' X. apply: fmon_eq_intro => x. apply: fmon_eq_intro => y.
simpl. by apply (fmon_eq_elim X (x,y)).
Qed.
Canonical Structure ordExpMixin := expCatMixin ordExpAxiom.
Canonical Structure ordExpType := Eval hnf in expCatType ordExpMixin.
(*=natO *)
Lemma natO_axiom : PreOrd.axiom (fun n m : nat => leq n m).
(*CLEAR*)
Proof.
move => x ; split ; clear ; first by [].
move => y z ; by apply leq_trans.
Qed.
(*CLEARED*)Canonical Structure natO_ordMixin := OrdMixin natO_axiom.
Canonical Structure natO_ordType := Eval hnf in OrdType (natO_ordMixin).
Notation natO := natO_ordType.
(*=End *)
Lemma natO_le (x y : natO) : (x <= y) = (x <= y)%N.
by [].
Qed.
(*=CPO *)
Module CPO.
Definition axiom (T:ordType) (lub : (natO =-> T) -> T) :=
forall (c:natO =-> T) x n, (c n <= lub c) /\ ((forall n, c n <= x) -> lub c <= x).
Record mixin_of (T:ordType) : Type := Mixin {lub: (natO =-> T) -> T; _ : axiom lub }.
Record class_of (T:Type) : Type :=
Class {base :> PreOrd.class_of T; ext :> mixin_of (PreOrd.Pack base T) }. (*CLEAR*)
Structure type : Type := Pack { sort :> Type; _ : class_of sort ; _ : Type}.
Definition class cT := let: Pack _ c _ := cT return class_of cT in c.
Definition unpack K (k : forall T (c : class_of T), K T c) cT :=
let: Pack T c _ := cT return K _ (class cT) in k _ c.
Definition repack cT : _ -> Type -> type := let k T c p := p c in unpack k cT.
Definition pack := let k T c m := Pack (@Class T c m) T in PreOrd.unpack k.
(*CLEARED*)
Coercion ordType cT := PreOrd.Pack (class cT) cT.
Definition setoidType cT := Setoid.Pack (class cT) cT.
End CPO.
Notation cpoType := CPO.type.
Notation CpoMixin := CPO.Mixin.
Notation CpoType := CPO.pack.
Canonical Structure CPO.ordType.
Canonical Structure CPO.setoidType.
(*=End *)
Bind Scope D_scope with CPO.sort.
Delimit Scope D_scope with CPO.
Open Scope D_scope.
Definition lub (T:cpoType) : (natO =-> T) -> T := Eval hnf in CPO.lub (CPO.class T).
Lemma le_lub (T:cpoType) (c : natO =-> T) (n : nat) : c n <= lub c.
case. simpl. move => T. case. simpl. move => B. case. simpl. move => l A T' c n.
by apply (proj1 (A c (c 0) n)).
Qed.
Lemma lub_le (T:cpoType) (c : natO =-> T) (x : T) : (forall n, c n <= x) -> lub c <= x.
case. simpl. move => T. case. move => B. case. simpl. move => l A T' c x C.
by apply (proj2 (A c x O)).
Qed.
Hint Resolve le_lub lub_le.
Add Parametric Relation (O:cpoType) : O (@tset_eq O : O -> O -> Prop)
reflexivity proved by (@Oeq_refl O) symmetry proved by (@Oeq_sym O)
transitivity proved by (@Oeq_trans O) as Oeq_CpoRelation.
Add Parametric Relation (O:cpoType) : O (@Ole O)
reflexivity proved by (@Ole_refl O)
transitivity proved by (@Ole_trans O) as Ole_CpoRelation.
Add Parametric Morphism (c:cpoType) : (@lub c)
with signature (@Ole (fmono_ordType natO c) : (natO =-> c) -> (natO =-> c) -> Prop) ++> (@Ole c)
as lub_le_compat.
intros f g H; apply lub_le; intros.
apply Ole_trans with (g n); auto.
Save.
Hint Resolve lub_le_compat.
Add Parametric Morphism (c:cpoType) : (@lub c)
with signature (@tset_eq (natO =-> c) : (natO =-> c) -> (natO =-> c) -> Prop) ++> (@tset_eq c)
as lub_eq_compat.
move => f g H. split. simpl. rewrite -> (proj1 H). by apply: Ole_refl.
rewrite -> (proj2 H). by apply Ole_refl.
Save.
Hint Resolve lub_eq_compat.
Lemma lub_mon (D:cpoType) : monotonic (@lub D).
move => D f g l. by rewrite -> l.
Qed.
Definition Lub (D:cpoType) : (natO -=> D) =-> D := mk_fmono (@lub_mon D).
(*=Cont *)
Definition continuous (D1 D2 : cpoType) (f : ordCatType D1 D2) :=
forall c : natO =-> D1, f (lub c) <= lub (f << c).
Module FCont. Section fcont.
Variable O1 O2 : cpoType.
Record mixin_of (f:fmono O1 O2) := Mixin {cont :> continuous f }.
Record class_of (f : O1 -> O2) :=
Class {base :> FMon.mixin_of f; ext :> mixin_of (FMon.Pack base f) }. (*CLEAR*)
Structure type : Type := Pack {sort :> O1 -> O2; _ : class_of sort; _ : O1 -> O2}.
Definition class cT := let: Pack _ c _ := cT return class_of cT in c.
Definition unpack K (k : forall T (c : class_of T), K T c) cT :=
let: Pack T c _ := cT return K _ (class cT) in k _ c.
Definition repack cT : _ -> Type -> type := let k T c p := p c in unpack k cT.
Definition pack := let k T c m := Pack (@Class T c m) T in FMon.unpack k.
(*CLEARED*) Coercion fmono f : fmono O1 O2 := FMon.Pack (class f) f.
End fcont. End FCont.
Notation fcont := FCont.type. (*CLEAR*)
Notation fcontMixin := FCont.Mixin.
Notation fcontType := FCont.pack.
(*CLEARED*)
Canonical Structure FCont.fmono.
(*=End *)
Lemma fcontinuous O1 O2 (f:fcont O1 O2) : continuous f.
move => O1 O2. case. simpl. move => f. case => fm. by case.
Qed.
Hint Resolve fcontinuous.
Hint Unfold continuous.
Canonical Structure mk_fcontM (D0 D1:cpoType) (f:fmono D0 D1) (c:continuous (FMon.Pack (FMon.class f) f)) := fcontMixin c.
Definition mk_fcont (D0 D1:cpoType) (f:fmono D0 D1) (c:continuous (FMon.Pack (FMon.class f) f)) := Eval hnf in @fcontType D0 D1 f (mk_fcontM c).
Lemma oid_cont (D:cpoType) : continuous (oid D).
move => D c. by rewrite -> comp_idL.
Qed.
Definition cid (D:cpoType) := Eval hnf in mk_fcont (@oid_cont D).
Lemma ocomp_cont (D0 D1 D2 : cpoType) (f:fcont D1 D2) (g:fcont D0 D1) : continuous (ocomp f g).
move => D0 D1 D2 f g c. simpl. apply Ole_trans with (f (lub ((g:fmono D0 D1) << c))).
by apply: fmonotonic ; apply: fcontinuous.
rewrite -> (fcontinuous f ((g:fmono D0 D1) << c)). by rewrite comp_assoc.
Qed.
Definition ccomp (D0 D1 D2 : cpoType) (f:fcont D1 D2) (g:fcont D0 D1) := Eval hnf in mk_fcont (ocomp_cont f g).
Lemma fcont_ord_axiom (D1 D2 :cpoType) : PreOrd.axiom (fun (f g:fcont D1 D2) => (f:fmono D1 D2) <= g).
move => D1 D2 f. split ; first by [].
move => g h ; by apply Ole_trans.
Qed.
Canonical Structure fcont_ordMixin (D1 D2 :cpoType) := OrdMixin (@fcont_ord_axiom D1 D2).
Canonical Structure fcont_ordType (D1 D2 :cpoType) := Eval hnf in OrdType (fcont_ordMixin D1 D2).
Lemma cpoMorphSetoidAxiom (D0 D1 : cpoType) : @Setoid.axiom (fcont D0 D1) (@tset_eq (fcont_ordType D0 D1)).
move => D0 D1. split ; first by [].
split ; first by apply: Oeq_trans.
by apply: Oeq_sym.
Qed.
Canonical Structure cpoMorphSetoidMixin O0 O1 := SetoidMixin (cpoMorphSetoidAxiom O0 O1).
Canonical Structure cpoMorphSetoidType O0 O1 := Eval hnf in SetoidType (cpoMorphSetoidMixin O0 O1).
(* fcont <= *)
Definition fcont_less (A B: cpoType): relation (fcont A B) := (@Ole _).
Definition fcont_less_preorder (A B: cpoType): PreOrder (@fcont_less A B).
move => A B. split ;first by move => x.
move => x y z. by apply Ole_trans.
Defined.
Existing Instance fcont_less_preorder.
Add Parametric Morphism (A B : cpoType) :
(@FCont.sort A B) with signature (@fcont_less A B ++> @Ole A ++> @Ole B)
as fcont_le_compat.
move => x y l x' y' l'. apply Ole_trans with (y:=x y') ; first by apply: fmonotonic.
by apply l.
Qed.
(* fcont == *)
Definition fcont_equal (A B: cpoType): relation (fcont A B) := (@tset_eq _).
Definition fcont_equal_equivalence (A B: cpoType): Equivalence (@fcont_equal A B).
intros A B. split.
- move => x. by apply: Oeq_refl.
- move => x y. by apply: Oeq_sym.
- move => x y z. by apply: Oeq_trans.
Defined.
Existing Instance fcont_equal_equivalence.
Add Parametric Morphism (A B : cpoType) :
(@FCont.sort A B) with signature (@fcont_equal A B ==> @tset_eq A ==> @tset_eq B)
as fcont_eq_compat.
move => x y l x' y' l'. apply Oeq_trans with (y:=x y') ; first by apply: fmon_stable.
by apply (fmon_eq_elim l y').
Qed.
(*=cpoCat *)
Lemma cpoCatAxiom : Category.axiom ccomp cid.
(*CLEAR*)
split ; first by move => D0 D1 f ; apply: fmon_eq_intro.
split ; first by move => D0 D1 f ; apply: fmon_eq_intro.
split ; first by move => D0 D1 D2 D3 f g h ; apply: fmon_eq_intro.
move => D0 D1 D2 f f' g g' e e'. apply: fmon_eq_intro => x. simpl. rewrite -> e. by rewrite -> e'.
Qed.
(*CLEARED*)Canonical Structure cpoCatMixin := CatMixin cpoCatAxiom.
Canonical Structure cpoCatType := Eval hnf in CatType cpoCatMixin.
(*=End *)
Definition prod_lub (D1 D2:cpoType) (f : natO =-> prod_ordType D1 D2) := (lub (pi1 << f), lub (pi2 << f)).
Lemma prod_cpo_axiom D1 D2 : CPO.axiom (prod_lub (D1:=D1) (D2:=D2)).
move => D1 D2 c x n ; split ; clear.
apply Ole_trans with (fst (c n), snd (c n)) ; first by case (c n).
by split ; simpl ; rewrite <- (le_lub _ n).
move => C. by split ; simpl ; apply lub_le => m ; [apply: (fmonotonic (@Fst D1 D2)) | apply: (fmonotonic (@Snd D1 D2))].
Qed.
Canonical Structure prod_cpoMixin D1 D2 := CpoMixin (@prod_cpo_axiom D1 D2).
Canonical Structure prod_cpoType D1 D2 := Eval hnf in CpoType (prod_cpoMixin D1 D2).
Lemma Prod_fun_cont (D1 D2 D3:cpoType) (f:D1=->D2) (g:D1=->D3) : continuous (<|f : ordCatType _ _, g|>).
intros D1 D2 D3 f g c.
simpl. split ; simpl ; rewrite comp_assoc.
- rewrite (fcontinuous f). by rewrite -> prod_fun_fst.
- rewrite fcontinuous. by rewrite prod_fun_snd.
Qed.
Definition PROD_fun (D2 D3 D1:cpoCatType)(f:D1=->D2)(g:D1=->D3) : D1 =-> prod_cpoType D2 D3 :=
Eval hnf in mk_fcont (Prod_fun_cont f g).
Lemma Fst_cont (D1 D2 : cpoType) : continuous (@Fst D1 D2).
by move => D1 D2 c.
Qed.
Definition FST (D1 D2 : cpoType) : prod_cpoType D1 D2 =-> D1 := mk_fcont (@Fst_cont D1 D2).
Lemma Snd_cont (D1 D2 : cpoType) : continuous (@Snd D1 D2).
by move => D1 D2 c.
Qed.
Definition SND (D1 D2 : cpoType) : prod_cpoType D1 D2 =-> D2 := mk_fcont (@Snd_cont D1 D2).
Lemma cpoProdCatAxiom : @CatProduct.axiom _ prod_cpoType FST SND PROD_fun.
move => X Y Z f g.
split ; first by split ; [apply: (@prod_fun_fst ordProdCatType) | apply: (@prod_fun_snd ordProdCatType)].
move => h [A B].
by apply: (@prod_fun_unique ordProdCatType).
Qed.
Canonical Structure cpoProdCatMixin := prodCatMixin cpoProdCatAxiom.
Canonical Structure cpoProdCatType := Eval hnf in prodCatType cpoProdCatMixin.
Add Parametric Morphism (O0 O1:cpoType) : (@pair O0 O1)
with signature (@Ole O0 : O0 -> O0 -> Prop) ++> (@Ole O1 : O1 -> O1 -> Prop) ++> (@Ole (O0 * O1))
as pair_cpo_le_compat.
move => x y e x' y' e'. by split.
Qed.
Add Parametric Morphism (O0 O1:cpoType) : (@pair O0 O1)
with signature (@tset_eq O0 : O0 -> O0 -> Prop) ==> (@tset_eq O1 : O1 -> O1 -> Prop) ==> (@tset_eq (O0 * O1))
as pair_cpo_eq_compat.
move => x y e x' y' e'. case: e => e0 e1 ; case: e' => e2 e3. by split.
Qed.
(* my lemma *)
Lemma fcont_eq_intro : forall (A B : cpoType) (f g : A =-> B), (forall n, f n =-= g n) -> f =-= g.
move => O0 O1 f g X. split => x. by apply (proj1 (X x)). by apply (proj2 (X x)).
Save.
Hint Resolve fcont_eq_intro.
Lemma const_mon (O1 O2 : ordType) (y:O2) : monotonic (fun _ : O1 => y).
by move => O1 O2 y x x'.
Qed.
Definition fmon_cte (O1 O2 : ordType) (y:O2) : O1 =-> O2 := Eval hnf in mk_fmono (@const_mon O1 O2 y).
Lemma lub_const (D:cpoType) (x:D) : lub (fmon_cte natO x) =-= x.
move => D x. split. by apply: lub_le. by apply (le_lub (fmon_cte natO x) O).
Qed.
Definition Fcontit_mono (D1 D2:cpoType) : monotonic (fun f:fcont_ordType D1 D2 => (f:(D1:ordType) -=> D2)) :=
fun x y => id.
Definition Fcontit (D1 D2:cpoType) := Eval hnf in mk_fmono (@Fcontit_mono D1 D2).
Lemma eq_mono (D0 D1:ordType) (f:D0 -> D1) (g:D0 =-> D1) : (forall x, f x =-= g x) -> monotonic f.
move=> D0 D1 f g X x x' L. do 2 rewrite -> X. by rewrite L.
Qed.
Definition gen_mono (D0 D1:ordType) (f:D0 -> D1) (g:D0 =-> D1) (X:forall x, f x =-= g x) : D0 =-> D1 :=
Eval hnf in mk_fmono (eq_mono X).
Lemma fcont_app_def_eq (O:ordType) (D1 D2:cpoType) (f: O =-> (fcont_ordType D1 D2)) (x:D1) :
forall y, (fun y => f y x) y =-= (exp_fun(uncurry (Fcontit _ _ << f) << <|pi2,pi1|>) x) y.
by [].
Qed.
Lemma fmon_app_def_eq (O D1 D2:ordType) (f: O =-> D1 -=> D2) (x:D1) :
forall y, (fun y => f y x) y =-= (exp_fun(uncurry (f:ordCatType _ _) << <|pi2,pi1|>) x) y.
by [].
Qed.
Definition fmon_app (O D1 D2:ordType) (f: O =-> D1 -=> D2) (x:D1) : O =-> D2 :=
Eval hnf in gen_mono (fmon_app_def_eq f x).
Lemma fmon_app_eq (O D1 D2:ordType) (f: O =-> D1 -=> D2) (x:D1) :
fmon_app f x =-= (exp_fun((uncurry f:ordCatType _ _) << <|pi2,pi1|>) x).
move => O D1 D2 f x. by apply fmon_eq_intro => y.
Qed.
Definition fcont_app (O:ordType) (D1 D2:cpoType) (f: O =-> (fcont_ordType D1 D2)) (x:D1) : O =-> D2 :=
Eval hnf in gen_mono (fcont_app_def_eq f x).
Lemma fcont_app_eq (O:ordType) (D1 D2:cpoType) (f: O =-> (fcont_ordType D1 D2)) (x:D1) :
fcont_app f x =-= (exp_fun((uncurry (Fcontit _ _ << f):ordCatType _ _) << <|pi2,pi1|>) x).
move => O D1 D2 f x. by apply fmon_eq_intro => y.
Qed.
Lemma fcont_lub_mono (D1 D2:cpoType) (c:natO =-> (fcont_ordType D1 D2)) : monotonic (fcont_app c).
move => D1 D2 c x y l n. simpl. by rewrite -> l.
Qed.
Lemma fcont_lub_cont (D1 D2:cpoType) (c:natO =-> (fcont_ordType D1 D2)) : continuous (Lub _ << mk_fmono (fcont_lub_mono c)).
move => D1 D2 c c'. simpl. apply lub_le => i. simpl.
rewrite (fcontinuous (c i)). apply lub_le_compat => j. simpl. by apply: (Ole_trans _ (le_lub _ i)).
Qed.
Definition fcont_lub (D1 D2:cpoType) (c:natO =-> (fcont_ordType D1 D2)) : D1 =-> D2 :=
Eval hnf in mk_fcont (fcont_lub_cont c).
Lemma fcont_lub_simpl (D1 D2:cpoType) (c:natO =-> (fcont_ordType D1 D2)) (x:D1) :
fcont_lub c x = lub (fcont_app c x).
by [].
Qed.
Lemma fcont_cpo_axiom (D0 D1:cpoType) : CPO.axiom (@fcont_lub D0 D1).
move => D0 D1 c x n. split.
- move => a. simpl. by rewrite <- (le_lub _ n).
- move => C. move => y. simpl. apply: lub_le => m. specialize (C m). simpl. by rewrite -> C.
Qed.
Canonical Structure fcont_cpoMixin (D0 D1:cpoType) := CpoMixin (@fcont_cpo_axiom D0 D1).
Canonical Structure fcont_cpoType (D0 D1:cpoType) := Eval hnf in CpoType (fcont_cpoMixin D0 D1).
Lemma fcont_app2_cont (D0 D1 D2:cpoType) (f: D0 =-> (fcont_cpoType D1 D2)) (x:D1) : continuous (fcont_app f x).
move => D0 D1 D2 f x c. simpl. rewrite (fcontinuous f). simpl. by apply lub_le_compat => n.
Qed.
Definition Fcont_app (D0 D1 D2:cpoType) (f: D0 =-> (fcont_cpoType D1 D2)) (x:D1) : D0 =-> D2 :=
Eval hnf in mk_fcont (fcont_app2_cont f x).
Lemma Pair_cont (D0 D1 : cpoType) (x:D0) : continuous (Pair D1 x).
move => D0 D1 x c. simpl. split.
- simpl. by apply: (Ole_trans _ (le_lub (pi1 << (Pair D1 x << c)) O)).
- simpl. by apply lub_le_compat => i.
Qed.
Definition PAIR (D0 D1 : cpoType) (x:D0) : D1 =-> D0 * D1 := Eval hnf in mk_fcont (Pair_cont x).
Lemma Curry2_mon (D0 D1 D2 : cpoType) (f:D0 * D1 =-> D2) : monotonic (fun x => f << PAIR _ x).
move => D0 D1 D2 f x x' l y. simpl. by rewrite -> l.
Qed.
Definition CURRY_mon (D0 D1 D2 : cpoType) (f:D0 * D1 =-> D2) := Eval hnf in mk_fmono (Curry2_mon f).
Lemma Curry2_cont (D0 D1 D2 : cpoType) (f:D0 * D1 =-> D2) : continuous (CURRY_mon f).
move => D1 D2 D3 f c x. simpl. rewrite {1} (Oeq_sym (lub_const x)).
rewrite - {1} (prod_fun_snd c (fmon_cte natO x)).
rewrite - {1} (prod_fun_fst c (fmon_cte natO x)).
apply: (Ole_trans (fcontinuous f _)).
by apply lub_le_compat => i.
Qed.
Definition CURRY (D0 D1 D2 : cpoType) (f:D0 * D1 =-> D2) := Eval hnf in mk_fcont (Curry2_cont f).
Lemma Ev1_mon (D0 D1 : cpoType) : monotonic (fun x : ((fcont_cpoType D0 D1) * D0) => (fst x) (snd x)).
move => D0 D1. simpl. case => f x. case => f' x'. case ; simpl => L L'.
rewrite -> L. by rewrite -> L'.
Qed.
Definition EV1 (D0 D1 : cpoType) := Eval hnf in mk_fmono (@Ev1_mon D0 D1).
Lemma Ev_cont (D0 D1 : cpoType) : continuous (@EV1 D0 D1).
move => D0 D1 c. simpl.
apply Ole_trans with ((lub (pi1 << c)) (lub (pi2 << c))) ; first by apply: lub_le_compat => n.
rewrite -> (fcontinuous (lub (pi1 << c)) (pi2 << c)).
apply lub_le => i. simpl. apply lub_le => j. simpl. apply: (Ole_trans _ (le_lub _ (j+i))).
simpl. apply Ole_trans with (Fst _ _ (c j) (Snd _ _ (c i))) ; first by [].
rewrite -> (fmonotonic c (leq_addl j i)). by rewrite -> (fmonotonic c (leq_addr i j)).
Qed.
Definition EV (D0 D1 : cpoType) : (fcont_cpoType D0 D1 * D0) =-> D1 := Eval hnf in mk_fcont (@Ev_cont D0 D1).
Lemma cpoExpAxiom : @CatExp.axiom _ fcont_cpoType (@EV) (@CURRY).
move => D0 D1 D2 h. split ; first by apply: fmon_eq_intro ; case.
move => h' X. apply: fmon_eq_intro => x. apply: fmon_eq_intro => y.
simpl. by apply (fmon_eq_elim X (x,y)).
Qed.
Canonical Structure cpoExpMixin := expCatMixin cpoExpAxiom.
Canonical Structure cpoExpType := Eval hnf in expCatType cpoExpMixin.
Module CPPO.
Record class_of (T:Type) : Type := Class
{ base1 :> CPO.class_of T;
ext :> Pointed.mixin_of (PreOrd.Pack base1 T)}.
Coercion base2 (T:Type) (c:class_of T) := Pointed.Class c.
Structure type : Type := Pack {sort :> Type; _ : class_of sort; _ : Type}.
Definition class cT := let: Pack _ c _ := cT return class_of cT in c.
Definition unpack K (k : forall T (c : class_of T), K T c) cT :=
let: Pack T c _ := cT return K _ (class cT) in k _ c.
Definition repack cT : _ -> Type -> type := let k T c p := p c in unpack k cT.
Definition pack := let k T c m := Pack (@Class T c m) T in CPO.unpack k.
Coercion cpoType (cT:type) := CPO.Pack (class cT) cT.
Definition ordType (cT:type) := PreOrd.Pack (class cT) cT.
Definition pointedType (cT:type) := Pointed.Pack (class cT) cT.
Definition setoidType (cT:type) := Setoid.Pack (class cT) cT.
End CPPO.
Notation cppoType := CPPO.type.
Notation CppoType := CPPO.pack.
Canonical Structure CPPO.cpoType.
Canonical Structure CPPO.ordType.
Canonical Structure CPPO.pointedType.
Canonical Structure CPPO.setoidType.
Definition strict (D E:cppoType) (f:D -> E) := f PBot =-= PBot.
Lemma cppoCatAxiom : @Category.axiom cppoType cpoMorphSetoidType (fun D0 D1 D2 => @ccomp D0 D1 D2) (fun D0 => @cid D0).
split ; first by move => D0 D1 D2 ; apply: (proj1 (cpoCatAxiom)).
split ; first by move => D0 D1 D2 ; apply: (proj1 (proj2 (cpoCatAxiom))).
split ; first by move => D0 D1 D2 D3 ; apply: (proj1 (proj2 (proj2 (cpoCatAxiom)))).
move => D0 D1 D2 ; apply: (proj2 (proj2 (proj2 (cpoCatAxiom)))).
Qed.
Canonical Structure cppoCatMixin := CatMixin cppoCatAxiom.
Canonical Structure cppoCatType := Eval hnf in CatType cppoCatMixin.
Lemma cppoProdPointedAxiom (D0 D1:cppoType) : Pointed.axiom (PBot:D0,PBot : D1).
move => D0 D1. split ; simpl ; apply: leastP.
Qed.
Canonical Structure prod_cppoMixin (D0 D1:cppoType) := PointedMixin (@cppoProdPointedAxiom D0 D1).
Canonical Structure prod_cppoType (D0 D1:cppoType) := Eval hnf in CppoType (prod_cppoMixin D0 D1).
Lemma cppoProdCatAxiom : @CatProduct.axiom _ prod_cppoType (fun A B => @FST A B) (fun A B => @SND A B) (fun A B C => @PROD_fun A B C).
move => X Y Z f g.
split ; first by apply (proj1 (@cpoProdCatAxiom X Y Z f g)).
move => m. by apply (proj2 (@cpoProdCatAxiom X Y Z f g) m).
Qed.
Canonical Structure cppoProdCatMixin := prodCatMixin cppoProdCatAxiom.
Canonical Structure cppoProdCatType := Eval hnf in prodCatType cppoProdCatMixin.
Lemma const_cont (D E : cpoType) (x:E) : continuous (fmon_cte D x).
move => D E x c. simpl. by apply: (Ole_trans _ (le_lub _ O)).
Qed.
Definition const (D E:cpoType) x : D =-> E := Eval hnf in mk_fcont (const_cont x).
Lemma const_com D E F x h : @const D E x =-= @const F E x << h.
move => D E F x h. by apply: fmon_eq_intro.
Qed.
Lemma fcont_cppo_axiom D0 (D1 : cppoType) : Pointed.axiom (const D0 (PBot:D1)).
move => D0 D1 f x. simpl. apply: leastP.
Qed.
Canonical Structure exp_cppoMixin D0 (D1:cppoType) := PointedMixin (@fcont_cppo_axiom D0 D1).
Canonical Structure exp_cppoType D0 (D1:cppoType) := Eval hnf in CppoType (exp_cppoMixin D0 D1).
Lemma cppoExpAxiom : @CatExp.axiom cppoProdCatType exp_cppoType (fun D0 D1 => @EV D0 D1) (fun D0 D1 D2 => @CURRY D0 D1 D2).
move => D0 D1 D2 h.
split. by apply (proj1 (@cpoExpAxiom D0 D1 D2 h)).
move => h'. by apply (proj2 (@cpoExpAxiom D0 D1 D2 h) h').
Qed.
Canonical Structure cppoExpMixin := expCatMixin cppoExpAxiom.
Canonical Structure cppoExpType := Eval hnf in expCatType cppoExpMixin.
Lemma discrete_cpoAxiom T : @CPO.axiom (discrete_ordType T) (fun c => c 0).
move => T c e n. split.
- have L:c 0 <= c n by apply fmonotonic.
have E:c 0 = c n by apply L. by rewrite E.
- move => X. by specialize (X 0).
Qed.
Canonical Structure discrete_cpoMixin T := CpoMixin (@discrete_cpoAxiom T).
Definition discrete_cpoType T := Eval hnf in CpoType (@discrete_cpoMixin (discrete_ordType T)).
Canonical Structure nat_cpoType := Eval hnf in discrete_cpoType nat.
Canonical Structure bool_cpoType := Eval hnf in discrete_cpoType bool.
Definition monic (D E:ordType) (f:D =-> E) := forall x y, f x <= f y -> x <= y.
Lemma subordAxiom (D:ordType) (P:D -> Prop) : PreOrd.axiom
(fun (x' y:{x : D | P x}) => match x' with exist x _ => match y with exist y _ => @Ole D x y end end).
move => D P. case => a Pa. split ; first by []. case => b Pb. case => c Pc.
by apply Ole_trans.
Qed.
Canonical Structure sub_ordMixin D P := OrdMixin (@subordAxiom (D:ordType) (P:D -> Prop)).
Canonical Structure sub_ordType D P := Eval hnf in OrdType (@sub_ordMixin D P).
Definition SubOrde (D:ordType) (P:D -> Prop) (d:D) (X:P d) : sub_ordType P := exist (fun x => P x) d X.
Implicit Arguments SubOrde [D P].
Lemma InheritFun_mono D (E:ordType) (Q:E->Prop) (f:D =-> E) (p:forall d, Q (f d)) : monotonic (fun d => @SubOrde E Q (f d) (p d)).
move => D E Q f p x y lxy. by apply: fmonotonic.
Qed.
Definition InheritFunm D (E:ordType) (Q:E->Prop) (f:D =-> E) (p:forall d, Q (f d)) :
D =-> sub_ordType Q := Eval hnf in mk_fmono (InheritFun_mono p).
Implicit Arguments InheritFunm [D E Q].
Lemma Forget_mono D P : monotonic (fun (O:@sub_ordType D P) => match O with exist x _ => x end).
move => D P. case => x px. by case.
Qed.
Definition Forgetm D P : (@sub_ordType D P) =-> D := Eval hnf in mk_fmono (@Forget_mono D P).
Lemma fmonD_mon D (E:ordType) (f:D -> E) : @monotonic (discrete_cpoType D) E f.
move => D E f x y l. by rewrite -> l.
Qed.
Definition fmonD D (E:ordType) (f:D -> E) : discrete_ordType D =-> E := Eval hnf in mk_fmono (fmonD_mon f).
Lemma ford_axiom (A:Type) (O:ordType) : PreOrd.axiom (fun f g:A->O => forall x, f x <= g x).
move => A O f.
split ; first by apply (proj1 (@fmono_axiom (discrete_ordType A) O (fmonD f))).
move => y z X Y. by apply (proj2 (@fmono_axiom (discrete_ordType A) O (fmonD f)) (fmonD y) (fmonD z) X Y).
Qed.
Canonical Structure ford_ordMixin (T:Type) (O:ordType) := OrdMixin (@ford_axiom T O).
Definition ford_ordType T O := Eval hnf in OrdType (ford_ordMixin T O).
Lemma lub_comp_le :
forall (D1 D2 : cpoType) (f:ordCatType D1 D2) (h : natO =-> D1), lub (f << h) <= f (lub h).
intros; apply lub_le; simpl; intros.
apply (fmonotonic f); auto.
Save.
Hint Resolve lub_comp_le.
Lemma lub_comp_eq :
forall (D1 D2 : cpoType) (f:cpoCatType D1 D2) (h : natO =-> D1), f (lub h) =-= lub ((f:ordCatType _ _) << h).
move => D1 D2 f g. apply: Ole_antisym ; last by apply: lub_comp_le.
rewrite fcontinuous. apply lub_le => n. simpl. by apply: (Ole_trans _ (le_lub _ n)).
Save.
Hint Resolve lub_comp_eq.
Lemma mseq_lift_left_mon (O:ordType) (f:natO =-> O) (n:nat) : monotonic (fun i => f (n+i)%N).
move => O f n x y l. apply fmonotonic. unfold Ole. simpl. rewrite -> (leq_add2l n x y). by apply l.
Qed.
Definition mseq_lift_left (O:ordType) (f:natO =-> O) (n:nat) := Eval hnf in mk_fmono (mseq_lift_left_mon f n).
Lemma lub_lift_left : forall (D:cpoType) (f:natO =-> D) n, lub f =-= lub (mseq_lift_left f n).
move => D f n. apply: Ole_antisym.
- apply lub_le. move => m. rewrite <- (le_lub _ m). simpl. apply fmonotonic. rewrite natO_le. by rewrite leq_addl.
- apply lub_le. move => m. by rewrite <- (le_lub _ (n+m)).
Qed.
Hint Resolve lub_lift_left.
Lemma lub_fun_mon (O:ordType) (D:cpoType) (h : natO =-> O -=> D) : monotonic (fun m => lub (fmon_app h m)).
move => O D h x y l. apply lub_le_compat => n. simpl. by rewrite -> l.
Qed.
Definition lub_fun (O:ordType) (D:cpoType) (h : natO =-> O -=> D) : O =-> D :=
Eval hnf in mk_fmono (lub_fun_mon h).
Lemma fmon_cpo_axiom (O:ordType) (D:cpoType) : @CPO.axiom (O -=> D) (lub_fun (O:=O) (D:=D)).
move => O D c x n ; split => e ; simpl ; first by rewrite <- (le_lub _ n).
move => a. simpl. apply lub_le. move => m. simpl. by rewrite (e m).
Qed.
Canonical Structure fmon_cpoMixin (O:ordType) (D:cpoType) := CpoMixin (@fmon_cpo_axiom O D).
Canonical Structure fmon_cpoType (O:ordType) (D:cpoType) := Eval hnf in CpoType (@fmon_cpoMixin O D).
Definition fmon_shift (O1 O2 O3:ordType) (f: O1 =-> O2 -=> O3) : O2 =-> O1 -=> O3 :=
exp_fun (uncurry f << <|pi2,pi1|>).
Lemma fmon_fcont_shift_cont (O:ordType) (D1 D2:cpoType) (f: O =-> D1 -=> D2) : continuous (fmon_shift (Fcontit D1 D2<<f)).
move => O D1 D2 f c x. simpl. rewrite (fcontinuous (f x)). by apply lub_le_compat => i.
Qed.
Definition fmon_fcont_shift (O:ordType) (D1 D2:cpoType) (f: O =-> D1 -=> D2) : D1 =-> fmon_cpoType O D2 :=
Eval hnf in mk_fcont (fmon_fcont_shift_cont f).
Lemma fcont_app_continuous :
forall (O:ordType) (D1 D2:cpoType) (f: O =-> D1 -=> D2) (h:natO =-> D1),
fcont_app f (lub h) <= lub ((fmon_fcont_shift f:ordCatType _ _) << h).
move => O D1 D2 f h x. simpl. rewrite (fcontinuous (f x)). by apply lub_le_compat.
Save.
Lemma fmon_diag_mon (O1 O2:ordType)(h:O1 =-> (O1 -=> O2)) : monotonic (fun n => h n n).
move => O1 O2 h x y l. by rewrite -> l.
Qed.
Definition fmon_diag (O1 O2:ordType)(h:O1 =-> (O1 -=> O2)) : O1 =-> O2 :=
Eval hnf in mk_fmono (fmon_diag_mon h).
Lemma lub_diag D (c:ordCatType natO (fmon_cpoType natO D)) : lub (lub c) =-= lub (fmon_diag c).
move => D c. apply: Ole_antisym.
- apply: lub_le => i. apply: lub_le => j. apply: (Ole_trans _ (le_lub _ (i+j))).
simpl. rewrite -> (fmonotonic c (leq_addl i j)).
by rewrite -> (fmonotonic (c (i+j)%N) (leq_addr j i)).
- apply: lub_le => i. by do 2 apply: (Ole_trans _ (le_lub _ i)).
Qed.
Lemma zero_ord_axiom : PreOrd.axiom (fun x y : Empty => True).
by case.
Qed.
Canonical Structure zero_ordMixin := OrdMixin zero_ord_axiom.
Canonical Structure zero_ordType := Eval hnf in OrdType zero_ordMixin.
Lemma ordZeroAxiom : @CatInitial.axiom ordCatType zero_ordType.
move => C f g. apply: fmon_eq_intro. by case.
Qed.
Lemma Zero_fun_mono (X:ordType) : monotonic (fun (x:Empty) => match x with end:X).
move => X. by case.
Qed.
Definition Zero_fun X : zero_ordType =-> X := Eval hnf in mk_fmono (@Zero_fun_mono X).
Canonical Structure ordInitialMixin := initialCatMixin Zero_fun ordZeroAxiom.
Canonical Structure ordInitialType := Eval hnf in initialCatType ordInitialMixin.
Lemma zero_cpo_axiom : @CPO.axiom zero_ordType (fun (c:natO =-> Zero) => c 0).
move => c. by case.
Qed.
Canonical Structure zero_cpoMixin := CpoMixin zero_cpo_axiom.
Canonical Structure zero_cpoType := Eval hnf in CpoType zero_cpoMixin.
Lemma cpoZeroAxiom : @CatInitial.axiom cpoCatType zero_cpoType.
move => C f g. by apply: fmon_eq_intro.
Qed.
Lemma ZERO_fun_cont (X:cpoType) : continuous (@Zero_fun X).
move => X c. simpl. by case: (lub c).
Qed.
Definition ZERO_fun X : zero_cpoType =-> X := Eval hnf in mk_fcont (ZERO_fun_cont X).
Canonical Structure cpoInitialMixin := initialCatMixin ZERO_fun cpoZeroAxiom.
Canonical Structure cpoInitialType := Eval hnf in initialCatType cpoInitialMixin.
Lemma one_ord_axiom : PreOrd.axiom (fun x y : unit => True).
by case.
Qed.
Canonical Structure one_ordMixin := OrdMixin one_ord_axiom.
Canonical Structure one_ordType := Eval hnf in OrdType one_ordMixin.
Lemma ordOneAxiom : @CatTerminal.axiom ordCatType one_ordType.
move => C f g. apply: fmon_eq_intro => x. case: (f x). by case: (g x).
Qed.
Canonical Structure ordTerminalMixin := terminalCatMixin (fun O1 => fmon_cte O1 tt) ordOneAxiom.
Canonical Structure ordTerminalType := Eval hnf in terminalCatType ordTerminalMixin.
Lemma one_cpo_axiom : @CPO.axiom one_ordType (fun (c:natO =-> One) => tt).
move => c x n. split ; first by case: (c n).
by move => X ; case x.
Qed.
Canonical Structure one_cpoMixin := CpoMixin one_cpo_axiom.
Canonical Structure one_cpoType := Eval hnf in CpoType one_cpoMixin.
Lemma cpoOneAxiom : @CatTerminal.axiom cpoCatType one_cpoType.
move => C f g. apply: fmon_eq_intro => x. case: (f x). by case: (g x).
Qed.
Canonical Structure cpoTerminalMixin := terminalCatMixin (fun D => const D tt) cpoOneAxiom.
Canonical Structure cpoTerminalType := Eval hnf in terminalCatType cpoTerminalMixin.
Lemma one_pointedAxiom : Pointed.axiom tt.
by case.
Qed.
Canonical Structure one_pointedMixin := PointedMixin one_pointedAxiom.
Canonical Structure one_pointedType := Eval hnf in PointedType one_pointedMixin.
Canonical Structure one_cppoType := Eval hnf in CppoType one_pointedMixin.
Lemma cppoOneAxiom : CatTerminal.axiom one_cppoType.
move => C f g. by apply: (cpoOneAxiom).
Qed.
Canonical Structure cppoTerminalMixin := terminalCatMixin (fun D => const D tt : cppoCatType D one_cppoType) cppoOneAxiom.
Canonical Structure cppoTerminalType := Eval hnf in terminalCatType cppoTerminalMixin.
Lemma eq_cont (D0 D1:cpoType) (f:D0 -> D1) (g:D0 =-> D1) (X:forall x, f x =-= g x) : continuous (gen_mono X).
move => D0 D1 f g X c. simpl. rewrite -> (X (lub c)). rewrite -> (fcontinuous g c).
apply lub_le_compat => n. simpl. by rewrite -> (X (c n)).
Qed.
Definition gen_cont (D0 D1:cpoType) (f:D0 -> D1) (g:D0 =-> D1) (X:forall x, f x =-= g x) := Eval hnf in mk_fcont (eq_cont X).
Add Parametric Relation (O O':cpoType) : (O =-> O') (@Ole (fcont_ordType O O') : (O =-> O') -> (O =-> O') -> Prop)
reflexivity proved by (@Ole_refl _)
transitivity proved by (@Ole_trans _) as Ole_XRelation.
Add Parametric Relation (O O':ordType) : (O =-> O') (@Ole (O -=> O') : (O =-> O') -> (O =-> O') -> Prop)
reflexivity proved by (@Ole_refl _)
transitivity proved by (@Ole_trans _) as Ole_XXRelation.
Add Parametric Morphism (D1 D2 D3:cpoType) : (@Category.comp cpoCatType D1 D2 D3)
with signature (@Ole (fcont_ordType D2 D3) : (D2 =-> D3) -> (D2 =-> D3) -> Prop) ++>
(@Ole (fcont_ordType D1 D2) : (D1 =-> D2) -> (D1 =-> D2) -> Prop) ++>
(@Ole (fcont_ordType D1 D3) : (D1 =-> D3) -> (D1 =-> D3) -> Prop)
as comp_le_compat.
move => f g l f' g' l' x.
simpl. rewrite -> l. by rewrite -> l'.
Qed.
Add Parametric Morphism (D1 D2 D3:ordType) : (@Category.comp ordCatType D1 D2 D3)
with signature (@Ole (D2 -=> D3) : (D2 =-> D3) -> (D2 =-> D3) -> Prop) ++>
(@Ole (D1 -=> D2) : (D1 =-> D2) -> (D1 =-> D2) -> Prop) ++>
(@Ole (D1 -=> D3) : (D1 =-> D3) -> (D1 =-> D3) -> Prop)
as comp_le_ord_compat.
move => f g l f' g' l' x.
simpl. rewrite -> l. by rewrite -> l'.
Qed.
Lemma ccomp_mon (D1 D2 D3:cpoType) : monotonic (fun (p:(D2 -=> D3) * (D1 -=> D2)) => (fst p : cpoCatType _ _) << snd p).
move => D1 D2 D3. case => f g. case => f' g'. simpl.
case ; simpl => l l'. rewrite -> l. by rewrite -> l'.
Qed.
Definition Ccomp (D1 D2 D3:cpoType) := Eval hnf in mk_fmono (@ccomp_mon D1 D2 D3).
Lemma Ccomp_cont (D1 D2 D3:cpoType) : continuous (@Ccomp D1 D2 D3).
move => D1 D2 D3 c x. simpl.
rewrite -> fcont_app_continuous. rewrite lub_diag. by apply lub_le_compat => i.
Qed.
Definition CCOMP (D1 D2 D3:cpoType) := Eval hnf in mk_fcont (@Ccomp_cont D1 D2 D3).
Lemma comp_lub_eq (D0 D1 D2 :cpoType) (f:D1 =-> D2) (c:natO =-> fcont_ordType D0 D1) :
f << lub c =-= lub ((@exp_fun _ (D1 -=> D2) _ _ (CCOMP D0 D1 D2) f : ordCatType _ _) << c).
move => D0 D1 D2 f c. apply: fmon_eq_intro => d. simpl. rewrite lub_comp_eq.
apply lub_eq_compat. by apply fmon_eq_intro => n.
Qed.
Lemma lub_comp (D0 D1 D2 :cpoType) (f:D0 =-> D1) (c:natO =-> fcont_ordType D1 D2) :
(lub c : cpoCatType _ _) << f =-= lub ((@exp_fun _ (D0 -=> D1) _ _ (CCOMP D0 D1 D2 << <|pi2,pi1|>) f : ordCatType _ _) << c).
move => D0 D1 D2 f c. apply: fmon_eq_intro => d. simpl.
apply lub_eq_compat. by apply fmon_eq_intro => n.
Qed.
Lemma lub_comp_both (X Y Z:cpoType) (c:natO =-> fcont_ordType X Y) (c':natO =-> fcont_ordType Y Z) :
(lub c' : cpoCatType _ _) << lub c =-= lub ( (CCOMP _ _ _ : ordCatType _ _) << <|c', c|>).
move => X Y Z c c'.
have a:=@lub_comp_eq _ _ ( ((CCOMP X Y Z))) (<|c',c|>). rewrite <- a.
apply: comp_eq_compat. simpl. by rewrite prod_fun_fst.
simpl. by rewrite prod_fun_snd.
Qed.
Section OrdProdI.
Variable (I:Type) (O:I -> ordType).
Lemma prodI_ord_axiom : PreOrd.axiom (fun p1 p2 :(forall i:I, O i) => forall i:I, p1 i <= p2 i).
move => x ; split ; clear ; first by [].
move => y z X Y i. rewrite (X i). by rewrite (Y i).
Qed.
Canonical Structure prodI_ordMixin := OrdMixin prodI_ord_axiom.
Canonical Structure prodI_ordType := Eval hnf in OrdType prodI_ordMixin.
Lemma Proj_monotonic (i:I) : monotonic (fun (x:prodI_ordType ) => x i).
move => i x y l. by apply l.
Qed.
Definition Proj (i:I) : prodI_ordType =-> O i := Eval hnf in mk_fmono (Proj_monotonic i).
Lemma prodi_fun_mon (D:ordType) (f:forall i:I, D =-> O i) : monotonic (fun d => (fun i => f i d):prodI_ordType).
move => D f d0 d1 deq i.
by rewrite -> deq.
Qed.
Definition Prodi_fun D (f:forall i:I, D =-> O i) : D =-> prodI_ordType := Eval hnf in mk_fmono (prodi_fun_mon f).
End OrdProdI.
Lemma ordProdIAxiom : forall I:Type, @CatIProduct.axiom _ I (@prodI_ordType I) (@Proj I) (@Prodi_fun I).
move => I A X f. split.
- move => i. by apply fmon_eq_intro => x.
- move => m Z. apply fmon_eq_intro => x. split ; simpl => i.
+ specialize (Z i). by apply (proj1 Z x).
+ specialize (Z i). by apply (proj2 Z x).
Qed.
Canonical Structure ordProdIMixin := prodICatMixin ordProdIAxiom.
Canonical Structure ordProdICat := Eval hnf in prodICatType ordProdIMixin.
Section ProdICPO.
Variable I:Type.
Variable D:I -> cpoType.
Lemma prodi_cpo_axiom : CPO.axiom (fun (f : natO =-> prodI_ordType D) (i:I) => lub (Proj D i << f)).
move => c x n ; split ; clear.
- move => i. simpl. by rewrite <- (le_lub _ n).
- move => C i. apply lub_le => n. simpl. by apply C.
Qed.
Canonical Structure prodi_cpoMixin := @CpoMixin (prodI_ordType D) _ prodi_cpo_axiom.
Canonical Structure prodi_cpoType := Eval hnf in @CpoType (prodI_ordType D) prodi_cpoMixin.
Lemma Proj_cont i : continuous (@Proj I D i:ordCatType prodi_cpoType _).
move => i c. simpl. by apply: lub_le_compat.
Qed.
Definition PROJ i := Eval hnf in mk_fcont (Proj_cont i).
Lemma prod_fun_cont (D1:cpoType) (f:forall i:I, D1 =-> D i) :
continuous (@Prodi_fun I _ D1 (fun i => (f i)) : ordCatType D1 prodi_cpoType).
move => D1 f c i.
simpl. rewrite (fcontinuous (f i)). by apply: lub_le_compat => n.
Qed.
Definition PRODI_fun D1 (f:forall i:I, D1 =-> D i) : D1 =-> prodi_cpoType :=
Eval hnf in mk_fcont (prod_fun_cont f).
End ProdICPO.
Lemma cpoProdIAxiom : forall I:Type, @CatIProduct.axiom _ I (@prodi_cpoType I) (@PROJ I) (@PRODI_fun I).
move => I A X f. split.
- move => i. apply (proj1 (ordProdIAxiom f) i).
- move => m Z. by apply (proj2 (ordProdIAxiom f) m Z).
Qed.
Canonical Structure cpoProdIMixin := prodICatMixin cpoProdIAxiom.
Canonical Structure cpoProdICat := prodICatType cpoProdIMixin.
|
#include "log.h"
#include <boost/log/utility/setup/common_attributes.hpp>
#include <boost/log/utility/setup/formatter_parser.hpp>
#include "assertions.h"
using std::make_unique;
namespace execHelper {
namespace log {
LogInit::LogInit() noexcept { init(std::clog); }
LogInit::LogInit(std::ostream& logStream) noexcept { init(logStream); }
LogInit::~LogInit() {
m_consoleLogger.reset();
} // NOLINT(fuchsia-default-arguments-calls)
void LogInit::init(std::ostream& logStream) noexcept {
boost::log::add_common_attributes();
boost::log::register_simple_formatter_factory<LogLevel, char>("Severity");
m_consoleLogger = make_unique<ConsoleLogger>(logStream);
}
void LogInit::setSeverity(const Channel& channel, LogLevel severity) {
m_consoleLogger->setSeverity(channel, severity);
}
} // namespace log
namespace color {
auto operator<<(std::ostream& os, const Modifier& mod) -> std::ostream& {
return os << "\033[" << mod.code << "m";
}
auto operator<<(std::wostream& os, const Modifier& mod) -> std::wostream& {
return os << "\033[" << mod.code << "m";
}
} // namespace color
} // namespace execHelper
|
[STATEMENT]
lemma field_differentiable_divide [derivative_intros]:
assumes "f field_differentiable (at a within S)"
"g field_differentiable (at a within S)"
"g a \<noteq> 0"
shows "(\<lambda>z. f z / g z) field_differentiable (at a within S)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>z. f z / g z) field_differentiable at a within S
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
f field_differentiable at a within S
g field_differentiable at a within S
g a \<noteq> (0::'a)
goal (1 subgoal):
1. (\<lambda>z. f z / g z) field_differentiable at a within S
[PROOF STEP]
unfolding field_differentiable_def
[PROOF STATE]
proof (prove)
using this:
\<exists>f'. (f has_field_derivative f') (at a within S)
\<exists>f'. (g has_field_derivative f') (at a within S)
g a \<noteq> (0::'a)
goal (1 subgoal):
1. \<exists>f'. ((\<lambda>z. f z / g z) has_field_derivative f') (at a within S)
[PROOF STEP]
by (metis DERIV_divide [of f _ a S g]) |
# Copyright (C) 2008 University of Maryland
# All rights reserved.
# See LICENSE.txt for details.
# Author: Christopher Metting
#Starting Date:6/12/2009
from pylab import *
from numpy import *
from time import time
from ..model.sample_prep import Q_space
from .approximations import wavefunction_format
def DWBA_form(cell,lattice,beam,q,refract = True):
'''
The scattering is calculated in scatCalc because we need to open up the
possibility for qx refraction on the interpolation.
'''
if refract == True:
from scipy.interpolate import interp1d
scat = zeros(q.points, dtype = 'complex')
qvec = q.vectorize()
q.getKSpace(beam.wavelength)
qx_refract = qvec[0].repeat(q.points[1],axis=1)
qx_refract = qx_refract.repeat(q.points[2],axis=2)
qx_refract[q.kin <= 0.0] += beam.wavelength*cell.inc_sub[1,0]
qx_refract[q.kout >= 0.0] -= beam.wavelength*cell.inc_sub[1,0]
q.qx_refract = qx_refract
qxMinTemp = qx_refract.min()-3*q.q_step[0]
qxMaxTemp = qx_refract.max()+3*q.q_step[0]
#doubles the interpolation q for a more accurate interpolation
newX = arange(qxMinTemp,qxMaxTemp,q.q_step[0]/2.0)
newQ = Q_space([qxMinTemp,q.minimums[1],q.minimums[2]],
[qxMaxTemp,q.maximums[1],q.maximums[2]],
[size(newX),q.points[1],q.points[2]])
largScat = scatCalc(cell,lattice,beam,newQ)
for ii in range (size(q.q_list[1])):
for iii in range(size(q.q_list[2])):
realSplineFunc = interp1d(newQ.q_list[0],largScat.real[:,ii,iii])
imagSplineFunc = interp1d(newQ.q_list[0],largScat.imag[:,ii,iii])
interpReal = realSplineFunc(qx_refract[:,ii,iii])
interpImag = imagSplineFunc(qx_refract[:,ii,iii])
scat[:,ii,iii].real = interpReal
scat[:,ii,iii].imag = interpImag
else:
scat = scatCalc(cell,lattice,beam,q)
'''
imshow(log10(rot90(sum(((abs(scat)**2)).real,axis=1))), extent = q.getExtent(), aspect = 'auto')
show()
'''
return(scat)
def print_timing(func):
def wrapper(*arg):
t1 = time()
res = func(*arg)
t2 = time()
print '%s took %0.3f ms' % (func.func_name, (t2-t1)*1000.0)
return res
return wrapper
@print_timing
def scatCalc(cell,lattice,beam,q):
'''
Math from Kentzinger et al. in Physical Review B, 77, 1044335(2008)
'''
#Front of Eq (20)
m = 1.674e-27
h_bar = 6.62607e-14
Vfac = -m/(2*pi*h_bar**2)
q.getKSpace(beam.wavelength)
scat = zeros(q.points,dtype = 'complex')
# PSI in one
# PSI in two
# PSI out one
# PSI out two
pio = [None]*cell.n[2]
pit = [None]*cell.n[2]
poo = [None]*cell.n[2]
pot = [None]*cell.n[2]
pil = [None]*cell.n[2]
pfl = [None]*cell.n[2]
q_piopoo = [None]*cell.n[2]
q_piopot = [None]*cell.n[2]
q_pitpoo = [None]*cell.n[2]
q_pitpot = [None]*cell.n[2]
x = cell.value_list[0].reshape((cell.n[0],1,1))
y = cell.value_list[1].reshape((1,cell.n[1],1))
z = cell.value_list[2].reshape((1,1,cell.n[2]))
#Averages the in-plane scattering length density and formats the new
#object as [SLD,Thickeness,Absorbtion] for each z layer
SLDArray = wavefunction_format(cell.unit, cell.step[2], absorbtion = None)
#This is the calculation of the critical edge. It is needed for the
#calculation of p.
pcl = sqrt(4*pi*SLDArray[:,0])
#The cell is originally oriented so that the the bottom of the unit cell
#is located at the origin. This flips the cell so that the stack is ordered
#in the opposite direction.
flipCell = zeros(shape(cell.unit))
for i in range(cell.n[2]):
flipCell[:,:,i] = cell.unit[:,:,shape(cell.unit)[2]-i-1]
#This calculates the residual potential by taking the difference between
#the reference potential and the actual potential
Vres = flipCell - (SLDArray[:,0]).reshape((1,1,cell.n[2]))
#This is the rho used in eq. 20. The integration is the residual potential
#relative to the reference potential.
rhoTilOverRho = Vres/(SLDArray[:,0]).reshape((1,1,cell.n[2]))
rhoTilOverRho[isnan(rhoTilOverRho)] = 0.0
#calculates the structure factor using the gaussian convolution.
if lattice != None:
SF = lattice.gauss_struc_calc(q)
for i in range(size(q.q_list[0])):
print 'qx number: ', i, ' calculating (', i+1, 'of', size(q.q_list[0]), ')'
for ii in range(size(q.q_list[1])):
#The next few lines calculate the c and d values for each layer.
#This is done by calculating the specular reflectivity and then
#tracing the final reflected intensity back into the sample.
poskiWavePar = dwbaWavefunction(q.kin[i,ii,:],SLDArray)
negkfWavePar = dwbaWavefunction(-q.kout[i,ii,:],SLDArray)
pio = poskiWavePar.c
pit = poskiWavePar.d
k_inl =poskiWavePar.kz_l
poo = negkfWavePar.c
pot = negkfWavePar.d
k_outl =negkfWavePar.kz_l
for l in range(cell.n[2]):
#Solves the equation shown after eq. 11 on page 5.
pil[l]=sqrt(asarray((q.kin[i,ii,:]**2)-(pcl[l]**2),
dtype = 'complex'))
pfl[l]=sqrt(asarray((q.kout[i,ii,:]**2)-(pcl[l]**2),
dtype = 'complex'))
#print pil[l], poskiWavePar.nz[l] * pio[0]
#Equations directly after eq (18).
q_piopoo[l] = -pfl[l] - pil[l]
q_piopot[l] = -pfl[l] + pil[l]
q_pitpoo[l] = pfl[l] - pil[l]
q_pitpot[l] = pfl[l] + pil[l]
pil = asarray(pil)
pfl = asarray(pfl)
q_piopoo = asarray(q_piopoo)
q_piopot = asarray(q_piopot)
q_pitpoo = asarray(q_pitpoo)
q_pitpot = asarray(q_pitpot)
pio = asarray(pio)
pit = asarray(pit)
poo = asarray(poo)
pot = asarray(pot)
k_inl = asarray(k_inl)
k_outl = asarray(k_outl)
########
# EDIT: bbm 07/20/2012
# this is not Eq. 18, which refers only to the out-of-plane (z) Laue factor
# this is the necessary Laue factor to do the integral in eq. 20
# as a finite sum over blocks of constant rho in the x-y plane
########
qx = q.q_list[0][i]
if qx != 0:
laux = ((-1j / qx) * (exp(1j * qx * cell.step[0]) - 1.0))
else:
laux = complex(cell.step[0])
qy = q.q_list[1][ii]
if qy != 0:
lauy = ((-1j / qy) * (exp(1j * qy * cell.step[1]) - 1.0))
else:
lauy = complex(cell.step[1])
#Eq. 20 (including only rhoN - rhoM is assumed to be zero)
ftwRef = (Vfac*sum(sum(rhoTilOverRho * exp(1j*q.q_list[0][i]*x)*
exp(1j*q.q_list[1][ii]*y),axis = 0),axis=0))
# finite-sum corrections for the x and y directions
ftwRef *= laux
ftwRef *= lauy
#Eq.18 with the added structure factor.
if lattice != None:
ftwRef *=SF[i,ii,0]
#ftwRef = ftwRef*((lattice.repeat[0]*cell.Dxyz[0]*lattice.repeat[1]*cell.Dxyz[1]))
#ftwRef = ftwRef*(lattice.repeat[0]*cell.Dxyz[0])\
#Eq. 19
ftwRef = ((SLDArray[:,0]).reshape((1,1,cell.n[2]))*
ftwRef.reshape((1,1,cell.n[2])))
for iii in range(size(q.q_list[2])):
ft = ftwRef.copy()
pioSel = pio[:,iii].reshape((1,1,cell.n[2]))
pitSel = pit[:,iii].reshape((1,1,cell.n[2]))
pooSel = poo[:,iii].reshape((1,1,cell.n[2]))
potSel = pot[:,iii].reshape((1,1,cell.n[2]))
q_piopoo_sel = q_piopoo[:,iii].reshape((1,1,cell.n[2]))
q_piopot_sel = q_piopot[:,iii].reshape((1,1,cell.n[2]))
q_pitpoo_sel = q_pitpoo[:,iii].reshape((1,1,cell.n[2]))
q_pitpot_sel = q_pitpot[:,iii].reshape((1,1,cell.n[2]))
pil_sel = pil[:,iii].reshape((1,1,cell.n[2]))
pfl_sel = pfl[:,iii].reshape((1,1,cell.n[2]))
#equation 15
scat_PioPoo = (pioSel * exp(1j*pil_sel*z)*ft*
exp(1j*pfl_sel*z) * pooSel)
scat_PioPot = (pioSel * exp(1j*pil_sel*z)*ft*
exp(-1j*pfl_sel*z)*potSel)
scat_PitPoo = (pitSel * exp(-1j*pil_sel*z)*ft*
exp(1j*pfl_sel*z) *pooSel)
scat_PitPot = (pitSel * exp(-1j*pil_sel*z)*ft*
exp(-1j*pfl_sel*z)* potSel)
#equation 15
# edit 7/23/2012, bbm:
# the integration over z is taken care of by eq. 17 and 18,
# giving the Laue factor -
# the mu and nu sum comes out to 1/4 * 4 * g for unpolarized
# NO - Wait - changing my mind.
#
# looks like Chris was right - the S propagator in eq. 11
# is for a wavefunction referenced to the boundary of the
# current layer, while our c and d are calculated with respect
# to z = 0 (not z=z_l), so the extra factor of e^{ikz_l} might
# be necessary.
# scat_PioPoo = (pioSel * ft * pooSel)
# scat_PioPot = (pioSel * ft * potSel)
# scat_PitPoo = (pitSel * ft * pooSel)
# scat_PitPot = (pitSel * ft * potSel)
#equation 18
# edit 7/23/12, bbm:
# scat_ was incorrectly set to = cell.step[2] for q==0 case,
# instead of multiplying (should be *= )
mask = (q_piopoo_sel != 0)
scat_PioPoo[mask] *= ((-1j / q_piopoo_sel[mask]) *
(exp(1j *q_piopoo_sel[mask] * cell.step[2]) - 1.0))
scat_PioPoo[q_piopoo_sel == 0] *= cell.step[2]
mask = (q_piopot_sel != 0)
scat_PioPot[mask] *= ((-1j / q_piopot_sel[mask]) *
(exp(1j *q_piopot_sel[mask] * cell.step[2]) - 1.0))
scat_PioPot[q_piopot_sel == 0] *= cell.step[2]
mask = (q_pitpoo_sel != 0)
scat_PitPoo[mask] *= ((-1j / q_pitpoo_sel[mask]) *
(exp(1j *q_pitpoo_sel[mask] * cell.step[2]) - 1.0))
scat_PitPoo[q_pitpoo_sel == 0] *= cell.step[2]
mask = (q_pitpot_sel != 0)
scat_PitPot[mask] *= ((-1j / q_pitpot_sel[mask]) *
(exp(1j *q_pitpot_sel[mask] * cell.step[2]) - 1.0))
scat_PitPot[q_pitpot_sel == 0] *= cell.step[2]
#Exactly equation15
scat[i,ii,iii]= sum(scat_PioPoo + scat_PioPot +
scat_PitPoo + scat_PitPot)
k_spec = q.q_list[2]/2.0
dwba_spec = dwbaWavefunction(k_spec,SLDArray)
locx = q.q_list[0].searchsorted(0.0)
locy = q.q_list[1].searchsorted(0.0)
#scat[locx,locy,:] = dwba_spec.r
#semilogy(q.q_list[2],(abs(dwba_spec.r)**2))
#semilogy(q.q_list[2],sum((abs(scat)**2).real,axis=1)[locx+5,:])
#figure()
return(scat)
class dwbaWavefunction:
def __init__(self, kz, SLDArray):
self.kz = kz
self.SLDArray = SLDArray
self.layerCount = SLDArray.shape[0]
self.thickness = sum(SLDArray[1:-1,1])
SLD_inc = SLDArray[0,0]
SLD_sub = SLDArray[-1,0]
B11 = ones(shape(kz),dtype='complex')
B22 = ones(shape(kz),dtype='complex')
B21 = zeros(shape(kz),dtype='complex')
B12 = zeros(shape(kz),dtype='complex')
M11 = [None]*self.layerCount
M12 = [None]*self.layerCount
M21 = [None]*self.layerCount
M22 = [None]*self.layerCount
Bl11 = [None]*self.layerCount
Bl12 = [None]*self.layerCount
Bl21 = [None]*self.layerCount
Bl22 = [None]*self.layerCount
Bl11[0] = B11
Bl12[0] = B22
Bl21[0] = B21
Bl22[0] = B12
self.c = [None]*self.layerCount
self.d = [None]*self.layerCount
nz =[None]*self.layerCount
k0z = sqrt(asarray(kz**2 + 4 * pi * SLD_inc,dtype = 'complex'))
nz[0] = sqrt( complex(1) - 4 * pi * SLD_inc / k0z**2 )
nz[-1] = sqrt( complex(1) - 4 * pi * SLD_sub / k0z**2 )
for l in range(1, self.layerCount-1):
#leaving off the incident medium and substrate from sum
SLD,thickness,mu = self.SLDArray[l]
nz[l] = sqrt(complex(1) - 4 * pi * SLD/ k0z**2 )
kzl =( nz[l] * k0z ) # edit: BBM 02/10/2012
n = nz[l]
M11[l] = asarray(cos(kzl * thickness),dtype = 'complex')
M12[l] = asarray(1/n * sin(kzl * thickness),dtype = 'complex')
M21[l] = asarray((-n) * sin(kzl * thickness),dtype = 'complex')
M22[l] = asarray(cos(kzl * thickness),dtype = 'complex')
C1 = B11*M11[l] + B21*M12[l]
C2 = B11*M21[l] + B21*M22[l]
B11 = C1
B21 = C2
C1 = B12*M11[l] + B22*M12[l]
C2 = B12*M21[l] + B22*M22[l]
B12 = C1
B22 = C2
Bl11[l] = B11
Bl21[l] = B21
Bl12[l] = B12
Bl22[l] = B22
self.kz_l = nz * k0z
r = (B11 + (1j * nz[0] * B12) + (1/(1j * nz[-1])*(
-B21 - 1j * nz[0] * B22))) / (-B11 + (1j * nz[0] * B12) + (
1/(1j * nz[-1])*( B21 - 1j * nz[0] * B22)))
Bl11[-1] = ones(shape(kz))
Bl12[-1] = zeros(shape(kz))
Bl21[-1] = ones(shape(kz))
Bl22[-1] = zeros(shape(kz))
self.r = r
self.t = zeros(shape(r),dtype = 'complex')
self.t[nz[-1].real != 0.0] = 1.0 + self.r[nz[-1].real != 0.0]
self.c[0] = ones(shape(kz),dtype='complex') # incident beam has intensity 1
self.d[0] = r # reflected beam has intensity |r|**2
p = asarray(1.0 + r,dtype ='complex') #psi
pp = asarray(1j * kz[0] * (1 - r),dtype='complex') #psi prime
M11[0] = ones(shape(kz),dtype='complex')
M12[0] = ones(shape(kz),dtype='complex')
M21[0] = ones(shape(kz),dtype='complex')
M22[0] = ones(shape(kz),dtype='complex')
#M11[-1] = zeros(shape(kz),dtype='complex')
#M12[-1] = ones(shape(kz),dtype='complex')
#M21[-1] = ones(shape(kz),dtype='complex')
#M22[-1] = zeros(shape(kz),dtype='complex')
z_interface = 0.0
for l in range(1,self.layerCount-1):
## this algorithm works all the way into the substrate
pForDot = copy(p)
ppForDot = copy(pp)
#Fine, This is c and d
kzl =( nz[l] * k0z )
self.c[l] = (.5* exp(-1j*kzl*(z_interface))*
(p + (pp/(1j*kzl))))
self.d[l] = (.5* exp(1j*kzl*(z_interface))*
(p - (pp/(1j*kzl))))
## Moved ^ above v to model wavefunction.js WRT 7/16/12
p = (M11[l]*pForDot) + (M12[l]*ppForDot/k0z)
pp = (k0z*M21[l]*pForDot) + (M22[l]*ppForDot)
z_interface += thickness
# fill final c,d
self.c[-1] = self.t
self.d[-1] = zeros(shape(kz),dtype='complex')
self.nz = nz
return
def _test():
# run from ipython by starting in root osrefl directory,
# from osrefl.theory.DWBA import _test
# test()
# ...
from osrefl.model.sample_prep import Parallelapiped, Layer, Scene, GeomUnit, Rectilinear, Beam
Au = Parallelapiped(SLD = 4.506842e-6,dim=[3.7e4,3.7e4,630.0])#, curve = .56)
Cr = Layer(SLD = 3.01e-6,thickness_value = 48.0)
#Au.on_top_of(Cr)
#scene = Scene([Au,Cr])
scene = Scene([Au])
GeoUnit = GeomUnit(Dxyz = [10.0e4,10.0e4,700.0], n = [20,21,40],
#scene = scene, inc_sub = [0.0,0.0])
scene = scene, inc_sub = [0.0,2.07e-6])
unit = GeoUnit.buildUnit()
unit.add_media()
lattice = Rectilinear([20.0,20.0,1.0],unit)
beam = Beam(5.0,.02,None,0.02,None)
q = Q_space([-.0002,-0.002,0.00002],[.0002,.002,0.1],[100,5,150])
SLDArray = wavefunction_format(unit.unit, unit.step[2], absorbtion = None)
'''
kiWavePar = dwbaWavefunction(q.kin,SLDArray)
test = 2
bbmTest = neutron_wavefunction(q.kin[test,2,50],SLDArray)
cCollect = zeros(shape(kiWavePar.c)[0])
dCollect = zeros(shape(kiWavePar.d)[0])
c = asarray(kiWavePar.c)
d = asarray(kiWavePar.d)
for i in range(shape(kiWavePar.c)[0]):
temp = kiWavePar.c[i]
cCollect[i] = temp[test,2,50]
temp = kiWavePar.d[i]
dCollect[i] = temp[test,2,50]
cCollect=c[:,test,2,50]
dCollect=d[:,test,2,50]
plot(bbmTest.c,label = 'bbm')
plot(cCollect,label = 'py')
legend()
figure()
plot(bbmTest.d,label = 'bbm')
plot(dCollect,label = 'py')
legend()
figure()
diff = abs(bbmTest.c.real-cCollect.real)/((abs(bbmTest.c.real)+abs(cCollect.real))/2.0)
plot(diff,label = 'diff')
show()
'''
DWBA_form(unit,lattice,beam,q)
if __name__=="__main__": _test()
|
[STATEMENT]
lemma "a < b \<and> b < (c::int) \<Longrightarrow> \<exists>A. a \<notin> A \<and> b \<in> A \<and> c \<notin> A"
\<comment> \<open>Example 4.\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a < b \<and> b < c \<Longrightarrow> \<exists>A. a \<notin> A \<and> b \<in> A \<and> c \<notin> A
[PROOF STEP]
by auto \<comment> \<open>slow\<close> |
Inductive test : ltac:(let U := type of Type in exact U) := t.
|
context("Tidy Up")
dirs <- list.dirs(path = ".",full.names = TRUE,recursive = TRUE)
dirs <- grep(pattern = "methylDB",x = dirs,value = TRUE)
unlink(x = dirs, recursive = TRUE)
unlink("Rplots.pdf")
unlink("test.bed")
|
/**
*
* @file core_zlag2c.c
*
* PLASMA core_blas kernel
* PLASMA is a software package provided by Univ. of Tennessee,
* Univ. of California Berkeley and Univ. of Colorado Denver
*
* @version 2.6.0
* @author Mathieu Faverge
* @date 2010-11-15
* @precisions mixed zc -> ds
*
**/
#include <lapacke.h>
#include "common.h"
/***************************************************************************//**
*
* @ingroup CORE_PLASMA_Complex64_t
*
* CORE_PLASMA_zlag2c converts a PLASMA_Complex64_t matrix, A, to a
* PLASMA_Complex32_t matrix, B.
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the matrices A and B. m >= 0.
*
* @param[in] n
* The number of columns of the matrices A and B. n >= 0.
*
* @param[in] A
* The PLASMA_Complex64_t m-by-n matrix to convert.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
* @param[out] B
* The PLASMA_Complex32_t m-by-n matrix to convert.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
* @param[out] info
* - 0 on successful exit.
* - 1 if an entry of the matrix A is greater than the SINGLE
* PRECISION overflow threshold, in this case, the content
* of B in exit is unspecified.
*
******************************************************************************/
#if defined(PLASMA_HAVE_WEAK)
#pragma weak CORE_zlag2c = PCORE_zlag2c
#define CORE_zlag2c PCORE_zlag2c
#endif
void CORE_zlag2c(int m, int n,
const PLASMA_Complex64_t *A, int lda,
PLASMA_Complex32_t *B, int ldb, int *info)
{
*info = LAPACKE_zlag2c_work(LAPACK_COL_MAJOR, m, n, A, lda, B, ldb);
}
/***************************************************************************//**
*
* @ingroup CORE_PLASMA_Complex64_t
*
* CORE_PLASMA_clag2z converts a PLASMA_Complex32_t matrix, A, to a
* PLASMA_Complex64_t matrix, B.
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the matrices A and B. m >= 0.
*
* @param[in] n
* The number of columns of the matrices A and B. n >= 0.
*
* @param[in] A
* The PLASMA_Complex32_t m-by-n matrix to convert.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
* @param[out] B
* The PLASMA_Complex64_t m-by-n matrix to convert.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
******************************************************************************/
#if defined(PLASMA_HAVE_WEAK)
#pragma weak CORE_clag2z = PCORE_clag2z
#define CORE_clag2z PCORE_clag2z
#endif
void CORE_clag2z(int m, int n,
const PLASMA_Complex32_t *A, int lda,
PLASMA_Complex64_t *B, int ldb)
{
LAPACKE_clag2z_work(LAPACK_COL_MAJOR, m, n, A, lda, B, ldb);
}
|
------------------------------------------------------------------------------
-- ABP Lemma 2
------------------------------------------------------------------------------
{-# OPTIONS --exact-split #-}
{-# OPTIONS --no-sized-types #-}
{-# OPTIONS --no-universe-polymorphism #-}
{-# OPTIONS --without-K #-}
-- From Dybjer and Sander's paper: The second lemma states that given
-- a state of the latter kind (see Lemma 1) we will arrive at a new
-- start state, which is identical to the old start state except that
-- the bit has alternated and the first item in the input stream has
-- been removed.
module FOTC.Program.ABP.Lemma2I where
open import Common.FOL.Relation.Binary.EqReasoning
open import FOTC.Base
open import FOTC.Base.List
open import FOTC.Base.List.PropertiesI
open import FOTC.Base.Loop
open import FOTC.Base.PropertiesI
open import FOTC.Data.Bool
open import FOTC.Data.Bool.PropertiesI
open import FOTC.Data.List
open import FOTC.Data.List.PropertiesI
open import FOTC.Program.ABP.ABP
open import FOTC.Program.ABP.Fair.Type
open import FOTC.Program.ABP.Fair.PropertiesI
open import FOTC.Program.ABP.PropertiesI
open import FOTC.Program.ABP.Terms
------------------------------------------------------------------------------
-- Helper function for Lemma 2.
helper₂ : ∀ {b i' is' os₁' os₂' as' bs' cs' ds' js'} →
Bit b →
Fair os₁' →
S' b i' is' os₁' os₂' as' bs' cs' ds' js' →
∀ ft₂ os₂'' → F*T ft₂ → Fair os₂'' → os₂' ≡ ft₂ ++ os₂'' →
∃[ os₁'' ] ∃[ os₂'' ] ∃[ as'' ] ∃[ bs'' ] ∃[ cs'' ] ∃[ ds'' ]
Fair os₁''
∧ Fair os₂''
∧ S (not b) is' os₁'' os₂'' as'' bs'' cs'' ds'' js'
helper₂ {b} {i'} {is'} {os₁'} {os₂'} {as'} {bs'} {cs'} {ds'} {js'}
Bb Fos₁' (as'S' , bs'S' , cs'S' , ds'S' , js'S')
.(T ∷ []) os₂'' f*tnil Fos₂'' os₂'-eq =
os₁' , os₂'' , as'' , bs'' , cs'' , ds''
, Fos₁' , Fos₂''
, as''-eq , bs''-eq , cs''-eq , refl , js'-eq
where
os₂'-eq-helper : os₂' ≡ T ∷ os₂''
os₂'-eq-helper =
os₂' ≡⟨ os₂'-eq ⟩
(T ∷ []) ++ os₂'' ≡⟨ ++-∷ T [] os₂'' ⟩
T ∷ [] ++ os₂'' ≡⟨ ∷-rightCong (++-leftIdentity os₂'') ⟩
T ∷ os₂'' ∎
ds'' : D
ds'' = corrupt os₂'' · cs'
ds'-eq : ds' ≡ ok b ∷ ds''
ds'-eq =
ds' ≡⟨ ds'S' ⟩
corrupt os₂' · (b ∷ cs')
≡⟨ ·-leftCong (corruptCong os₂'-eq-helper) ⟩
corrupt (T ∷ os₂'') · (b ∷ cs')
≡⟨ corrupt-T os₂'' b cs' ⟩
ok b ∷ corrupt os₂'' · cs'
≡⟨ refl ⟩
ok b ∷ ds'' ∎
as'' : D
as'' = as'
as''-eq : as'' ≡ send (not b) · is' · ds''
as''-eq =
as'' ≡⟨ as'S' ⟩
await b i' is' ds' ≡⟨ awaitCong₄ ds'-eq ⟩
await b i' is' (ok b ∷ ds'') ≡⟨ await-ok≡ b b i' is' ds'' refl ⟩
send (not b) · is' · ds'' ∎
bs'' : D
bs'' = bs'
bs''-eq : bs'' ≡ corrupt os₁' · as'
bs''-eq = bs'S'
cs'' : D
cs'' = cs'
cs''-eq : cs'' ≡ ack (not b) · bs'
cs''-eq = cs'S'
js'-eq : js' ≡ out (not b) · bs''
js'-eq = js'S'
helper₂ {b} {i'} {is'} {os₁'} {os₂'} {as'} {bs'} {cs'} {ds'} {js'}
Bb Fos₁' (as'S' , bs'S' , cs'S' , ds'S' , js'S')
.(F ∷ ft₂) os₂'' (f*tcons {ft₂} FTft₂) Fos₂'' os₂'-eq =
helper₂ Bb (tail-Fair Fos₁') ihS' ft₂ os₂'' FTft₂ Fos₂'' refl
where
os₁^ : D
os₁^ = tail₁ os₁'
os₂^ : D
os₂^ = ft₂ ++ os₂''
os₂'-eq-helper : os₂' ≡ F ∷ os₂^
os₂'-eq-helper = os₂' ≡⟨ os₂'-eq ⟩
(F ∷ ft₂) ++ os₂'' ≡⟨ ++-∷ _ _ _ ⟩
F ∷ ft₂ ++ os₂'' ≡⟨ refl ⟩
F ∷ os₂^ ∎
ds^ : D
ds^ = corrupt os₂^ · cs'
ds'-eq : ds' ≡ error ∷ ds^
ds'-eq =
ds'
≡⟨ ds'S' ⟩
corrupt os₂' · (b ∷ cs')
≡⟨ ·-leftCong (corruptCong os₂'-eq-helper) ⟩
corrupt (F ∷ os₂^) · (b ∷ cs')
≡⟨ corrupt-F _ _ _ ⟩
error ∷ corrupt os₂^ · cs'
≡⟨ refl ⟩
error ∷ ds^ ∎
as^ : D
as^ = await b i' is' ds^
as'-eq : as' ≡ < i' , b > ∷ as^
as'-eq = as' ≡⟨ as'S' ⟩
await b i' is' ds' ≡⟨ awaitCong₄ ds'-eq ⟩
await b i' is' (error ∷ ds^) ≡⟨ await-error _ _ _ _ ⟩
< i' , b > ∷ await b i' is' ds^ ≡⟨ refl ⟩
< i' , b > ∷ as^ ∎
bs^ : D
bs^ = corrupt os₁^ · as^
bs'-eq-helper₁ : os₁' ≡ T ∷ tail₁ os₁' → bs' ≡ ok < i' , b > ∷ bs^
bs'-eq-helper₁ h =
bs'
≡⟨ bs'S' ⟩
corrupt os₁' · as'
≡⟨ subst₂ (λ t t' → corrupt os₁' · as' ≡ corrupt t · t')
h
as'-eq
refl
⟩
corrupt (T ∷ tail₁ os₁') · (< i' , b > ∷ as^)
≡⟨ corrupt-T _ _ _ ⟩
ok < i' , b > ∷ corrupt (tail₁ os₁') · as^
≡⟨ refl ⟩
ok < i' , b > ∷ bs^ ∎
bs'-eq-helper₂ : os₁' ≡ F ∷ tail₁ os₁' → bs' ≡ error ∷ bs^
bs'-eq-helper₂ h =
bs'
≡⟨ bs'S' ⟩
corrupt os₁' · as'
≡⟨ subst₂ (λ t t' → corrupt os₁' · as' ≡ corrupt t · t')
h
as'-eq
refl
⟩
corrupt (F ∷ tail₁ os₁') · (< i' , b > ∷ as^)
≡⟨ corrupt-F _ _ _ ⟩
error ∷ corrupt (tail₁ os₁') · as^
≡⟨ refl ⟩
error ∷ bs^ ∎
bs'-eq : bs' ≡ ok < i' , b > ∷ bs^ ∨ bs' ≡ error ∷ bs^
bs'-eq = case (λ h → inj₁ (bs'-eq-helper₁ h))
(λ h → inj₂ (bs'-eq-helper₂ h))
(head-tail-Fair Fos₁')
cs^ : D
cs^ = ack (not b) · bs^
cs'-eq-helper₁ : bs' ≡ ok < i' , b > ∷ bs^ → cs' ≡ b ∷ cs^
cs'-eq-helper₁ h =
cs' ≡⟨ cs'S' ⟩
ack (not b) · bs' ≡⟨ ·-rightCong h ⟩
ack (not b) · (ok < i' , b > ∷ bs^) ≡⟨ ack-ok≢ _ _ _ _ (not-x≢x Bb) ⟩
not (not b) ∷ ack (not b) · bs^ ≡⟨ ∷-leftCong (not-involutive Bb) ⟩
b ∷ ack (not b) · bs^ ≡⟨ refl ⟩
b ∷ cs^ ∎
cs'-eq-helper₂ : bs' ≡ error ∷ bs^ → cs' ≡ b ∷ cs^
cs'-eq-helper₂ h =
cs' ≡⟨ cs'S' ⟩
ack (not b) · bs' ≡⟨ ·-rightCong h ⟩
ack (not b) · (error ∷ bs^) ≡⟨ ack-error _ _ ⟩
not (not b) ∷ ack (not b) · bs^ ≡⟨ ∷-leftCong (not-involutive Bb) ⟩
b ∷ ack (not b) · bs^ ≡⟨ refl ⟩
b ∷ cs^ ∎
cs'-eq : cs' ≡ b ∷ cs^
cs'-eq = case cs'-eq-helper₁ cs'-eq-helper₂ bs'-eq
js'-eq-helper₁ : bs' ≡ ok < i' , b > ∷ bs^ → js' ≡ out (not b) · bs^
js'-eq-helper₁ h =
js'
≡⟨ js'S' ⟩
out (not b) · bs'
≡⟨ ·-rightCong h ⟩
out (not b) · (ok < i' , b > ∷ bs^)
≡⟨ out-ok≢ (not b) b i' bs^ (not-x≢x Bb) ⟩
out (not b) · bs^ ∎
js'-eq-helper₂ : bs' ≡ error ∷ bs^ → js' ≡ out (not b) · bs^
js'-eq-helper₂ h =
js' ≡⟨ js'S' ⟩
out (not b) · bs' ≡⟨ ·-rightCong h ⟩
out (not b) · (error ∷ bs^) ≡⟨ out-error (not b) bs^ ⟩
out (not b) · bs^ ∎
js'-eq : js' ≡ out (not b) · bs^
js'-eq = case js'-eq-helper₁ js'-eq-helper₂ bs'-eq
ds^-eq : ds^ ≡ corrupt os₂^ · (b ∷ cs^)
ds^-eq = ·-rightCong cs'-eq
ihS' : S' b i' is' os₁^ os₂^ as^ bs^ cs^ ds^ js'
ihS' = refl , refl , refl , ds^-eq , js'-eq
-- From Dybjer and Sander's paper: From the assumption that
-- os₂' ∈ Fair and hence by unfolding Fair, we conclude that there are
-- ft₂ : F*T and os₂'' : Fair, such that os₂' = ft₂ ++ os₂''.
--
-- We proceed by induction on ft₂ : F*T using helper.
lemma₂ : ∀ {b i' is' os₁' os₂' as' bs' cs' ds' js'} →
Bit b →
Fair os₁' →
Fair os₂' →
S' b i' is' os₁' os₂' as' bs' cs' ds' js' →
∃[ os₁'' ] ∃[ os₂'' ] ∃[ as'' ] ∃[ bs'' ] ∃[ cs'' ] ∃[ ds'' ]
Fair os₁''
∧ Fair os₂''
∧ S (not b) is' os₁'' os₂'' as'' bs'' cs'' ds'' js'
lemma₂ {b} {is' = is'} {os₂' = os₂'} {js' = js'} Bb Fos₁' Fos₂' s' =
helper₁ (Fair-out Fos₂')
where
helper₁ : (∃[ ft₂ ] ∃[ os₂'' ] F*T ft₂ ∧ os₂' ≡ ft₂ ++ os₂'' ∧ Fair os₂'') →
∃[ os₁'' ] ∃[ os₂'' ] ∃[ as'' ] ∃[ bs'' ] ∃[ cs'' ] ∃[ ds'' ]
Fair os₁''
∧ Fair os₂''
∧ S (not b) is' os₁'' os₂'' as'' bs'' cs'' ds'' js'
helper₁ (ft₂ , os₂'' , FTft₂ , os₂'-eq , Fos₂'') =
helper₂ Bb Fos₁' s' ft₂ os₂'' FTft₂ Fos₂'' os₂'-eq
|
module Data.List.Kleene.Relation.Unary.All where
open import Data.List.Kleene.Base
open import Relation.Unary
open import Relation.Nullary
open import Level using (_⊔_)
open import Function
mutual
record All⁺ {a p} {A : Set a} (P : Pred A p) (xs : A ⁺) : Set (a ⊔ p) where
constructor P⟨_&_⟩
inductive
field
P⟨head⟩ : P (head xs)
P⟨tail⟩ : All⋆ P (tail xs)
data All⋆ {a p} {A : Set a} (P : Pred A p) : Pred (A ⋆) (a ⊔ p) where
P⟨[]⟩ : All⋆ P []
P⟨∹_⟩ : ∀ {xs} → All⁺ P xs → All⋆ P (∹ xs)
open All⁺ public
module _ {a p} {A : Set a} {P : Pred A p} where
mutual
all⋆ : Decidable P → Decidable (All⋆ P)
all⋆ p? [] = yes P⟨[]⟩
all⋆ p? (∹ xs) with all⁺ p? xs
all⋆ p? (∹ xs) | yes p = yes P⟨∹ p ⟩
all⋆ p? (∹ xs) | no ¬p = no λ { P⟨∹ x ⟩ → ¬p x }
all⁺ : Decidable P → Decidable (All⁺ P)
all⁺ p? xs with p? (head xs) | all⋆ p? (tail xs)
all⁺ p? xs | no ¬p | ys = no (¬p ∘ P⟨head⟩)
all⁺ p? xs | yes p | yes ps = yes P⟨ p & ps ⟩
all⁺ p? xs | yes p | no ¬p = no (¬p ∘ P⟨tail⟩)
|
Formal statement is: lemma summable_complex_of_real [simp]: "summable (\<lambda>n. complex_of_real (f n)) \<longleftrightarrow> summable f" Informal statement is: A real-valued sequence is summable if and only if the corresponding complex-valued sequence is summable. |
module Open2 where
data ⊤
: Set
where
tt
: ⊤
data ⊤'
(x : ⊤)
: Set
where
tt
: ⊤' x
record R
: Set
where
field
x
: ⊤
y
: ⊤
record S
: Set₁
where
field
x
: R
open R x public
renaming (x to y; y to z)
postulate
s
: S
open S s
using (y)
postulate
p
: ⊤' y
|
Businesses: Strategy sessions while sailing, intervision, personal training, etc.
Ideal to combine with onshore activities.
Please let us advice you, anything can be arranged. |
theory Matrix_Invertable
imports Jordan_Normal_Form.Determinant
Smith_Normal_Form.SNF_Missing_Lemmas
begin
lemma invertible_right_inverse1:
fixes A :: "'a::{semiring_1, comm_ring_1} mat"
assumes "A \<in> carrier_mat n n"
shows "invertible_mat A \<longleftrightarrow> (\<exists>B \<in> carrier_mat n n. A * B = 1\<^sub>m n)"
by (metis (no_types, lifting) Determinant.det_mult assms det_one dvdI
invertible_iff_is_unit_JNF inverts_mat_def left_mult_one_mat
obtain_inverse_matrix one_carrier_mat similar_mat_witD(2) similar_mat_wit_refl)
lemma invertible_det_nz1:
fixes A::"'a::field mat"
assumes "A \<in> carrier_mat n n"
shows "invertible_mat A \<longleftrightarrow> det A \<noteq> 0"
proof(cases "n=0")
case True
then show ?thesis
using assms invertible_mat_zero by auto
next
case False
then show ?thesis
using invertible_det_nz[untransferred, cancel_card_constraint, of A n]
assms
by fast
qed
proposition cramer1:
fixes A::"'a::field mat"
assumes "A \<in> carrier_mat n n"
assumes "b \<in> carrier_vec n"
assumes "x \<in> carrier_vec n"
assumes d0: "det A \<noteq> 0"
shows "A *\<^sub>v x = b \<longleftrightarrow> x = vec n (\<lambda> k. det (replace_col A b k) / det A)"
proof -
from d0 obtain B where B: "B \<in> carrier_mat n n" "A * B = 1\<^sub>m n" "B * A = 1\<^sub>m n"
unfolding invertible_det_nz1[symmetric] invertible_mat_def
by (meson Determinant.mat_mult_left_right_inverse
\<open>\<And>n A. A \<in> carrier_mat n n \<Longrightarrow> (Determinant.det A \<noteq> 0) = invertible_mat A\<close> assms(1)
invertible_right_inverse1)
have "(A * B) *\<^sub>v b = b"
by (simp add: B assms(2))
then have "A *\<^sub>v (B *\<^sub>v b) = b"
using B(1) assms(1) assms(2) by force
then have xe: "\<exists>x. A *\<^sub>v x = b"
by blast
{
fix x
assume x: "A *\<^sub>v x = b"
assume "x \<in> carrier_vec n"
have "x = vec n (\<lambda> k. det (replace_col A b k) / det A)"
unfolding x[symmetric]
using d0
by (auto simp: vec_eq_iff cramer_lemma_mat[of A n x] field_simps assms `x \<in> carrier_vec n`)
}
with xe show ?thesis
using \<open>\<And>xa. \<lbrakk>A *\<^sub>v xa = b; xa \<in> carrier_vec n\<rbrakk> \<Longrightarrow>
xa = Matrix.vec n (\<lambda>k. Determinant.det (replace_col A b k) / Determinant.det A)\<close> assms(3)
by (metis B(1) \<open>A *\<^sub>v (B *\<^sub>v b) = b\<close> assms(2) mult_mat_vec_carrier)
qed
end |
import SimpleDataRealizedFunction
%default total
-- module Main
-- Realized incrementer
incrementer : Int -> Int
incrementer = (+1)
rlz_incrementer : SimpleDataRealizedFunction (Int -> Int) 100 1.0
rlz_incrementer = MkSimpleDataRealizedFunction incrementer 100 1.0
-- Realized twicer
twicer : Int -> Int
twicer = (*2)
rlz_twicer : SimpleDataRealizedFunction (Int -> Int) 500 0.9
rlz_twicer = MkSimpleDataRealizedFunction twicer 500 0.9
-- Realized (twicer . incrementer).
rlz_composition : SimpleDataRealizedFunction (Int -> Int) 600 0.9
-- The following does not work because 601 ≠ 100+500 and 1.0 ≠ (min 1.0 0.9)
-- rlz_composition : SimpleDataRealizedFunction (Int -> Int) 601 1.0
rlz_composition = compose rlz_twicer rlz_incrementer
-- Simple test, result should be (3+1)*2 = 8
rslt : Int -> Int
rslt = apply rlz_composition
rsltTest : (rslt 3) = 8
rsltTest = Refl
|
(** * Definition of a [comp]-based non-computational CFG parser *)
Require Import Coq.Lists.List Coq.Program.Program Coq.Program.Wf Coq.Arith.Wf_nat Coq.Arith.Compare_dec Coq.Classes.RelationClasses Coq.Strings.String.
Require Import Parsers.ContextFreeGrammar Parsers.Specification Parsers.DependentlyTyped Parsers.MinimalParse.
Require Parsers.BooleanRecognizer Parsers.BooleanRecognizerCorrect.
Require Import Parsers.DependentlyTypedMinimalOfParseFactored.
Require Import Parsers.WellFoundedParse Parsers.ContextFreeGrammarProperties.
Require Import Common Common.ilist Common.Wf Common.Le.
Set Implicit Arguments.
Local Open Scope string_like_scope.
Local Coercion is_true : bool >-> Sortclass.
Section recursive_descent_parser.
Context (G : grammar Ascii.ascii).
Local Instance predata : parser_computational_predataT
:= { nonterminal_names_listT := BooleanRecognizer.rdp_list_names_listT;
initial_nonterminal_names_data := Valid_nonterminal_symbols G;
is_valid_nonterminal_name := BooleanRecognizer.rdp_list_is_valid_name;
remove_nonterminal_name := BooleanRecognizer.rdp_list_remove_name;
nonterminal_names_listT_R := BooleanRecognizer.rdp_list_names_listT_R;
remove_nonterminal_name_dec := BooleanRecognizer.rdp_list_remove_name_dec;
ntl_wf := BooleanRecognizer.rdp_list_ntl_wf }.
Local Instance types_data : @parser_computational_types_dataT _ string_stringlike
:= {| predata := predata;
split_stateT str0 valid g str := True |}.
Local Instance methods' : @parser_computational_dataT' _ string_stringlike types_data
:= { split_string_for_production str0 valid it its := BooleanRecognizer.make_all_single_splits;
split_string_for_production_correct str0 valid it its str
:= Forall_impl _ _ (BooleanRecognizer.make_all_single_splits_correct_eq str)
}.
Proof.
intros; apply bool_eq_correct; assumption.
Defined.
Local Instance strdata : @parser_computational_prestrdataT _ string_stringlike G {| DependentlyTyped.methods' := methods' |} idM.
Proof.
constructor; simpl; intros; assumption.
Defined.
Local Instance orig_methods : @parser_computational_dataT _ string_stringlike
:= { methods' := methods' }.
Lemma rdp_list_complete'
(str0 : string_stringlike) (valid : nonterminal_names_listT)
(it : item Ascii.ascii) (its : production Ascii.ascii)
(str : StringWithSplitState string_stringlike (split_stateT str0 valid (it :: its : production _)))
(pf : str ≤s str0)
: DependentlyTypedMinimal.split_list_completeT G valid it its str pf
(split_string_for_production str0 valid it its str).
Proof.
simpl.
repeat intro.
apply (@BooleanRecognizerCorrect.make_all_single_splits_complete
G BooleanRecognizer.rdp_list_names_listT
(Valid_nonterminal_symbols G)
BooleanRecognizer.rdp_list_is_valid_name
BooleanRecognizer.rdp_list_remove_name
{| string_val := str0 : string_stringlike ; state_val := I |}
valid valid
str
pf (it::its : production _)).
assumption.
Defined.
Global Instance minimal_of_parse_parser_dependent_types_extra_data
: @parser_dependent_types_extra_dataT _ string_stringlike G
:= @minimal_of_parse_parser_dependent_types_extra_data'
_ string_stringlike G
predata methods' strdata
BooleanRecognizer.rdp_list_remove_name_1 BooleanRecognizer.rdp_list_remove_name_2
rdp_list_complete'.
Definition minimal_parse_nonterminal_name__of__parse
(nonterminal_name : string)
(s : string)
(p : parse_of_item string_stringlike G s (NonTerminal _ nonterminal_name))
(H : Forall_parse_of_item
(fun _ n => is_valid_nonterminal_name initial_nonterminal_names_data n = true)
p)
: minimal_parse_of_name string_stringlike G initial_nonterminal_names_data is_valid_nonterminal_name remove_nonterminal_name s initial_nonterminal_names_data s nonterminal_name.
Proof.
eapply @minimal_parse_nonterminal_name__of__parse'.
exact strdata.
exact BooleanRecognizer.rdp_list_remove_name_1.
exact BooleanRecognizer.rdp_list_remove_name_2.
exact rdp_list_complete'.
exact H.
Defined.
End recursive_descent_parser.
|
#' Monthly ozone measurements over Central America.
#'
#'
#' This data set is a subset of the data from the 2006 ASA Data expo
#' challenge, \url{http://stat-computing.org/dataexpo/2006/}. The data are
#' monthly ozone averages on a very coarse 24 by 24 grid covering Central
#' America, from Jan 1995 to Dec 2000. The data is stored in a 3d area with
#' the first two dimensions representing latitude and longitude, and the third
#' representing time.
#'
#' @docType data
#' @name ozone
#' @usage ozone
#' @format A 24 x 24 x 72 numeric array
#' @references \url{http://stat-computing.org/dataexpo/2006/}
#' @keywords datasets
#' @examples
#' value <- ozone[1, 1, ]
#' time <- 1:72
#' month.abbr <- c("Jan", "Feb", "Mar", "Apr", "May",
#' "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")
#' month <- factor(rep(month.abbr, length = 72), levels = month.abbr)
#' year <- rep(1:6, each = 12)
#' deseasf <- function(value) lm(value ~ month - 1)
#'
#' models <- alply(ozone, 1:2, deseasf)
#' coefs <- laply(models, coef)
#' dimnames(coefs)[[3]] <- month.abbr
#' names(dimnames(coefs))[3] <- "month"
#'
#' deseas <- laply(models, resid)
#' dimnames(deseas)[[3]] <- 1:72
#' names(dimnames(deseas))[3] <- "time"
#'
#' dim(coefs)
#' dim(deseas)
NULL
#' Yearly batting records for all major league baseball players
#'
#' This data frame contains batting statistics for a subset of players
#' collected from \url{http://www.baseball-databank.org/}. There are a total
#' of 21,699 records, covering 1,228 players from 1871 to 2007. Only players
#' with more 15 seasons of play are included.
#'
#' @section Variables:
#' Variables:
#' \itemize{
#' \item id, unique player id
#' \item year, year of data
#' \item stint
#' \item team, team played for
#' \item lg, league
#' \item g, number of games
#' \item ab, number of times at bat
#' \item r, number of runs
#' \item h, hits, times reached base because of a batted, fair ball without
#' error by the defense
#' \item X2b, hits on which the batter reached second base safely
#' \item X3b, hits on which the batter reached third base safely
#' \item hr, number of home runs
#' \item rbi, runs batted in
#' \item sb, stolen bases
#' \item cs, caught stealing
#' \item bb, base on balls (walk)
#' \item so, strike outs
#' \item ibb, intentional base on balls
#' \item hbp, hits by pitch
#' \item sh, sacrifice hits
#' \item sf, sacrifice flies
#' \item gidp, ground into double play
#' }
#' @docType data
#' @name baseball
#' @usage baseball
#' @format A 21699 x 22 data frame
#' @references \url{http://www.baseball-databank.org/}
#' @keywords datasets
#' @examples
#' baberuth <- subset(baseball, id == "ruthba01")
#' baberuth$cyear <- baberuth$year - min(baberuth$year) + 1
#'
#' calculate_cyear <- function(df) {
#' mutate(df,
#' cyear = year - min(year),
#' cpercent = cyear / (max(year) - min(year))
#' )
#' }
#'
#' baseball <- ddply(baseball, .(id), calculate_cyear)
#' baseball <- subset(baseball, ab >= 25)
#'
#' model <- function(df) {
#' lm(rbi / ab ~ cyear, data=df)
#' }
#' model(baberuth)
#' models <- dlply(baseball, .(id), model)
NULL
|
import data.real.basic data.nat.factorial
import measure_theory.interval_integral
import analysis.special_functions.trigonometric
import topology.basic
open interval_integral real set
noncomputable theory
localized "notation n `!`:10000 := nat.factorial n" in nat
def Γ (n : ℕ) := (n-1)!
def G (n : ℕ) (x : ℝ) := x^(n - 1) * exp(-x)
def G' (n : ℕ) (x : ℝ) := exp(-x) * x^(n-2) * (n - 1 - x)
lemma G_def (n : ℕ) (x : ℝ) : G n x = x ^ (n - 1) * exp(-x) := rfl
lemma G'_def (n : ℕ) (x : ℝ) : G' n x = exp(-x) * x^(n-2) * (n - 1 - x) := rfl
-- the integral between 0 and ∞ of x^(z - 1) * exp(-x) = Γ (z) = (z - 1)!
def γ (n : ℕ) (a : ℝ) := ∫ x in 0..a, x^(n - 1) * exp(-x)
lemma has_deriv_at_G (x : ℝ) (n : ℕ): has_deriv_at (G n) (G' n x) x :=
begin
rw G'_def,
sorry
end
theorem integral_deriv_eq_sub'
{E} {f' : ℝ → E} {a b : ℝ} [measurable_space E] [normed_group E]
[topological_space.second_countable_topology E]
[complete_space E] [normed_space ℝ E] [borel_space E]
(f : ℝ → E)
(hf' : deriv f = f')
(hderiv : ∀ x ∈ interval a b, differentiable_at ℝ f x)
(hcont' : continuous_on f' (interval a b)) :
∫ y in a..b, f' y = f b - f a :=
by rw [← hf', integral_deriv_eq_sub hderiv]; cc
example : ∫ x in 0..π, sin x = 2 :=
begin
rw integral_deriv_eq_sub' (λ x, -cos x);
norm_num,
exact continuous_sin.continuous_on,
end
|
% Utility function that transforms a string whose evaluation would give a
% matrix, into a cell array.
%
% designed for use with Ben Petschel stuff on Grobner basis
% |
#include <stdlib.h>
#include <stdio.h>
#include "linterp.h"
#include "qspline.h"
#include "cspline.h"
#include <gsl/gsl_spline.h>
#define NUM_DATAPOINTS 9
/** Read a txt file with two columns seperated by tab and read all the items
* into the arrays x and y.
**/
void read_data(double *x, double *y){
FILE *infile = fopen("xy_data.txt", "r");
for (int i = 0; i < NUM_DATAPOINTS; ++i){
int items = fscanf(infile, "%lf\t%lf\n", &x[i], &y[i]);
if (items == EOF){
break;
}
}
fclose(infile);
}
void exerciseA(){
FILE *outfile = fopen("out1.txt", "w");
double *x, *y;
x = (double *) malloc(sizeof(double *) * NUM_DATAPOINTS);
y = (double *) malloc(sizeof(double *) * NUM_DATAPOINTS);
read_data(x, y);
gsl_interp_accel *acc = gsl_interp_accel_alloc ();
gsl_spline *spline = gsl_spline_alloc (gsl_interp_linear, NUM_DATAPOINTS);
gsl_spline_init (spline, x, y, NUM_DATAPOINTS);
double val = 0.1;
do {
double interp_val = linterp(NUM_DATAPOINTS, x, y, val);
double interp_val_integ = linterp_integ(NUM_DATAPOINTS, x, y, val);
double y_gsl = gsl_spline_eval(spline, val, acc);
fprintf(outfile, "%f\t%f\t%f\t%f\n", val, interp_val, interp_val_integ, y_gsl);
val += 0.1;
} while (val < 8);
gsl_spline_free (spline);
gsl_interp_accel_free (acc);
free(x), free(y);
fclose(outfile);
}
void exerciseB(){
FILE *outfile = fopen("out2.txt", "w");
double *x, *y;
x = (double *) malloc(sizeof(double *) * NUM_DATAPOINTS);
y = (double *) malloc(sizeof(double *) * NUM_DATAPOINTS);
read_data(x, y);
double val = 0.1;
do {
double interp_val = qspline_eval(NUM_DATAPOINTS, x, y, val);
double interp_val_integ = qspline_integ(NUM_DATAPOINTS, x, y, val);
double interp_val_deriv = qspline_deriv(NUM_DATAPOINTS, x, y, val);
fprintf(outfile, "%f\t%f\t%f\t%f\n", val, interp_val, interp_val_integ, interp_val_deriv);
val += 0.1;
} while (val < 8);
free(x), free(y);
fclose(outfile);
}
void exerciseC(){
FILE *outfile = fopen("out3.txt", "w");
double *x, *y;
x = (double *) malloc(sizeof(double *) * NUM_DATAPOINTS);
y = (double *) malloc(sizeof(double *) * NUM_DATAPOINTS);
read_data(x, y);
gsl_interp_accel *acc = gsl_interp_accel_alloc ();
gsl_spline *spline = gsl_spline_alloc (gsl_interp_cspline, NUM_DATAPOINTS);
gsl_spline_init (spline, x, y, NUM_DATAPOINTS);
double val = 0.1;
do {
double interp_val = cspline_eval(NUM_DATAPOINTS, x, y, val);
double interp_val_integ = cspline_integ(NUM_DATAPOINTS, x, y, val);
double y_gsl = gsl_spline_eval(spline, val, acc);
fprintf(outfile, "%f\t%f\t%f\t%f\n", val, interp_val, interp_val_integ, y_gsl);
val += 0.1;
} while (val < 8);
gsl_spline_free (spline);
gsl_interp_accel_free (acc);
free(x), free(y);
fclose(outfile);
}
int main(){
exerciseA();
exerciseB();
exerciseC();
return 0;
} |
A set $U$ is countably compact if and only if it is compact. |
Formal statement is: lemma coeffs_pCons_eq_cCons [simp]: "coeffs (pCons a p) = a ## coeffs p" Informal statement is: The coefficients of a polynomial with a leading coefficient $a$ are the same as the coefficients of the original polynomial with $a$ prepended. |
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE MultiParamTypeClasses #-}
module Statistics.BBVI.Observed
( Obs(..)
, defaultObs
)
where
import qualified Data.Vector as V
import Statistics.BBVI.Class
import Statistics.BBVI.Propagator ( DistCell(..)
, SampleVector
, SampleDouble
)
import System.Random.MWC ( uniformR )
-- | Dummy distribution representing a vector of observations, for use
-- with building "observation" distribution cells. these can be used
-- to easily/selectively subsample (and resample) observations using
-- existing typeclass methods (useful with stochatic gradient
-- updates).
newtype Obs a = O (V.Vector a) deriving (Show, Eq, Ord, Read)
-- | helper function to build a distribution cell of observations
defaultObs
:: V.Vector a -- ^ observations
-> DistCell (Obs a)
defaultObs d = (Node 1 V.empty (O d))
instance DistUtil (Obs Double) where
nParams _x = 0
toParamVector _ = V.empty
fromParamVector _ = O V.empty
instance Dist (Obs Double) SampleDouble where
resample (O d) gen = return . (d V.!) =<< uniformR (0, (V.length d - 1)) gen
logProb _d _x = 0
paramGradOfLogQ _d _x = V.empty
instance DistUtil (Obs SampleVector) where
nParams _x = 0
toParamVector _ = V.empty
fromParamVector _ = O V.empty
instance Dist (Obs SampleVector) SampleVector where
resample (O d) gen = return . (d V.!) =<< uniformR (0, (V.length d - 1)) gen
logProb _d _x = 0
paramGradOfLogQ _d _x = V.empty
|
Formal statement is: lemma coeffs_1_eq [simp, code abstract]: "coeffs 1 = [1]" Informal statement is: The coefficients of the polynomial $1$ are $[1]$. |
------------------------------------------------------------------------------
-- The relation of divisibility on partial natural numbers
------------------------------------------------------------------------------
{-# OPTIONS --exact-split #-}
{-# OPTIONS --no-sized-types #-}
{-# OPTIONS --no-universe-polymorphism #-}
{-# OPTIONS --without-K #-}
module FOTC.Data.Nat.Divisibility.NotBy0 where
open import FOTC.Base
open import FOTC.Data.Nat
infix 4 _∣_
------------------------------------------------------------------------------
-- The relation of divisibility (the symbol is '\mid' not '|')
--
-- (See documentation in FOTC.Data.Nat.Divisibility.By0)
--
-- In our definition 0∤0, which is used to prove properties of the gcd
-- as it is in GHC ≤ 7.0.4, where gcd 0 0 = undefined (see
-- http://hackage.haskell.org/trac/ghc/ticket/3304).
-- Note that @k@ should be a total natural number.
_∣_ : D → D → Set
m ∣ n = (m ≢ zero) ∧ (∃[ k ] N k ∧ n ≡ k * m)
{-# ATP definition _∣_ #-}
|
open import Agda.Builtin.IO
open import Agda.Builtin.Unit
record Box (A : Set) : Set where
field
unbox : A
open Box public
record R : Set where
coinductive
field
force : Box R
open R public
r : R
unbox (force r) = r
postulate
seq : {A B : Set} → A → B → B
return : {A : Set} → A → IO A
{-# COMPILE GHC return = \_ -> return #-}
{-# COMPILE GHC seq = \_ _ -> seq #-}
main : IO ⊤
main = seq r (return tt)
|
[GOAL]
G H : AddCommGroupCat
⊢ ∀ ⦃X Y : Discrete WalkingPair⦄ (f : X ⟶ Y),
((Functor.const (Discrete WalkingPair)).obj (of (↑G × ↑H))).map f ≫
(fun j => Discrete.casesOn j fun j => WalkingPair.casesOn j (AddMonoidHom.fst ↑G ↑H) (AddMonoidHom.snd ↑G ↑H))
Y =
(fun j => Discrete.casesOn j fun j => WalkingPair.casesOn j (AddMonoidHom.fst ↑G ↑H) (AddMonoidHom.snd ↑G ↑H)) X ≫
(pair G H).map f
[PROOFSTEP]
rintro ⟨⟨⟩⟩ ⟨⟨⟩⟩ ⟨⟨⟨⟩⟩⟩
[GOAL]
case mk.left.mk.left.up.up.refl
G H : AddCommGroupCat
⊢ ((Functor.const (Discrete WalkingPair)).obj (of (↑G × ↑H))).map
{ down := { down := (_ : { as := WalkingPair.left }.as = { as := WalkingPair.left }.as) } } ≫
(fun j => Discrete.casesOn j fun j => WalkingPair.casesOn j (AddMonoidHom.fst ↑G ↑H) (AddMonoidHom.snd ↑G ↑H))
{ as := WalkingPair.left } =
(fun j => Discrete.casesOn j fun j => WalkingPair.casesOn j (AddMonoidHom.fst ↑G ↑H) (AddMonoidHom.snd ↑G ↑H))
{ as := WalkingPair.left } ≫
(pair G H).map { down := { down := (_ : { as := WalkingPair.left }.as = { as := WalkingPair.left }.as) } }
[PROOFSTEP]
rfl
[GOAL]
case mk.right.mk.right.up.up.refl
G H : AddCommGroupCat
⊢ ((Functor.const (Discrete WalkingPair)).obj (of (↑G × ↑H))).map
{ down := { down := (_ : { as := WalkingPair.right }.as = { as := WalkingPair.right }.as) } } ≫
(fun j => Discrete.casesOn j fun j => WalkingPair.casesOn j (AddMonoidHom.fst ↑G ↑H) (AddMonoidHom.snd ↑G ↑H))
{ as := WalkingPair.right } =
(fun j => Discrete.casesOn j fun j => WalkingPair.casesOn j (AddMonoidHom.fst ↑G ↑H) (AddMonoidHom.snd ↑G ↑H))
{ as := WalkingPair.right } ≫
(pair G H).map { down := { down := (_ : { as := WalkingPair.right }.as = { as := WalkingPair.right }.as) } }
[PROOFSTEP]
rfl
[GOAL]
G H : AddCommGroupCat
⊢ ∀ (s : Cone (pair G H)) (j : Discrete WalkingPair),
(fun s =>
AddMonoidHom.prod (NatTrans.app s.π { as := WalkingPair.left })
(NatTrans.app s.π { as := WalkingPair.right }))
s ≫
NatTrans.app
{ pt := of (↑G × ↑H),
π :=
NatTrans.mk fun j =>
Discrete.casesOn j fun j =>
WalkingPair.casesOn j (AddMonoidHom.fst ↑G ↑H) (AddMonoidHom.snd ↑G ↑H) }.π
j =
NatTrans.app s.π j
[PROOFSTEP]
rintro s (⟨⟩ | ⟨⟩)
[GOAL]
case mk.left
G H : AddCommGroupCat
s : Cone (pair G H)
⊢ (fun s =>
AddMonoidHom.prod (NatTrans.app s.π { as := WalkingPair.left })
(NatTrans.app s.π { as := WalkingPair.right }))
s ≫
NatTrans.app
{ pt := of (↑G × ↑H),
π :=
NatTrans.mk fun j =>
Discrete.casesOn j fun j => WalkingPair.casesOn j (AddMonoidHom.fst ↑G ↑H) (AddMonoidHom.snd ↑G ↑H) }.π
{ as := WalkingPair.left } =
NatTrans.app s.π { as := WalkingPair.left }
[PROOFSTEP]
rfl
[GOAL]
case mk.right
G H : AddCommGroupCat
s : Cone (pair G H)
⊢ (fun s =>
AddMonoidHom.prod (NatTrans.app s.π { as := WalkingPair.left })
(NatTrans.app s.π { as := WalkingPair.right }))
s ≫
NatTrans.app
{ pt := of (↑G × ↑H),
π :=
NatTrans.mk fun j =>
Discrete.casesOn j fun j => WalkingPair.casesOn j (AddMonoidHom.fst ↑G ↑H) (AddMonoidHom.snd ↑G ↑H) }.π
{ as := WalkingPair.right } =
NatTrans.app s.π { as := WalkingPair.right }
[PROOFSTEP]
rfl
[GOAL]
G H : AddCommGroupCat
s : Cone (pair G H)
m :
s.pt ⟶
{ pt := of (↑G × ↑H),
π :=
NatTrans.mk fun j =>
Discrete.casesOn j fun j => WalkingPair.casesOn j (AddMonoidHom.fst ↑G ↑H) (AddMonoidHom.snd ↑G ↑H) }.pt
w :
∀ (j : Discrete WalkingPair),
m ≫
NatTrans.app
{ pt := of (↑G × ↑H),
π :=
NatTrans.mk fun j =>
Discrete.casesOn j fun j =>
WalkingPair.casesOn j (AddMonoidHom.fst ↑G ↑H) (AddMonoidHom.snd ↑G ↑H) }.π
j =
NatTrans.app s.π j
⊢ m =
(fun s =>
AddMonoidHom.prod (NatTrans.app s.π { as := WalkingPair.left }) (NatTrans.app s.π { as := WalkingPair.right }))
s
[PROOFSTEP]
simp_rw [← w ⟨WalkingPair.left⟩, ← w ⟨WalkingPair.right⟩]
[GOAL]
G H : AddCommGroupCat
s : Cone (pair G H)
m :
s.pt ⟶
{ pt := of (↑G × ↑H),
π :=
NatTrans.mk fun j =>
Discrete.casesOn j fun j => WalkingPair.casesOn j (AddMonoidHom.fst ↑G ↑H) (AddMonoidHom.snd ↑G ↑H) }.pt
w :
∀ (j : Discrete WalkingPair),
m ≫
NatTrans.app
{ pt := of (↑G × ↑H),
π :=
NatTrans.mk fun j =>
Discrete.casesOn j fun j =>
WalkingPair.casesOn j (AddMonoidHom.fst ↑G ↑H) (AddMonoidHom.snd ↑G ↑H) }.π
j =
NatTrans.app s.π j
⊢ m = AddMonoidHom.prod (m ≫ AddMonoidHom.fst ↑G ↑H) (m ≫ AddMonoidHom.snd ↑G ↑H)
[PROOFSTEP]
rfl
[GOAL]
J : Type w
f : J → AddCommGroupCat
s : Fan f
⊢ (fun x j => ↑(NatTrans.app s.π { as := j }) x) 0 = 0
[PROOFSTEP]
simp only [Functor.const_obj_obj, map_zero]
[GOAL]
J : Type w
f : J → AddCommGroupCat
s : Fan f
⊢ (fun j => 0) = 0
[PROOFSTEP]
rfl
[GOAL]
J : Type w
f : J → AddCommGroupCat
s : Fan f
x y : ↑s.pt
⊢ ZeroHom.toFun
{ toFun := fun x j => ↑(NatTrans.app s.π { as := j }) x,
map_zero' := (_ : (fun x j => ↑(NatTrans.app s.π { as := j }) x) 0 = 0) }
(x + y) =
ZeroHom.toFun
{ toFun := fun x j => ↑(NatTrans.app s.π { as := j }) x,
map_zero' := (_ : (fun x j => ↑(NatTrans.app s.π { as := j }) x) 0 = 0) }
x +
ZeroHom.toFun
{ toFun := fun x j => ↑(NatTrans.app s.π { as := j }) x,
map_zero' := (_ : (fun x j => ↑(NatTrans.app s.π { as := j }) x) 0 = 0) }
y
[PROOFSTEP]
simp only [Functor.const_obj_obj, map_add]
[GOAL]
J : Type w
f : J → AddCommGroupCat
s : Fan f
x y : ↑s.pt
⊢ (fun j => ↑(NatTrans.app s.π { as := j }) x + ↑(NatTrans.app s.π { as := j }) y) =
(fun j => ↑(NatTrans.app s.π { as := j }) x) + fun j => ↑(NatTrans.app s.π { as := j }) y
[PROOFSTEP]
rfl
[GOAL]
J : Type w
f : J → AddCommGroupCat
s : Cone (Discrete.functor f)
m :
s.pt ⟶ { pt := of ((j : J) → ↑(f j)), π := Discrete.natTrans fun j => Pi.evalAddMonoidHom (fun j => ↑(f j)) j.as }.pt
w :
∀ (j : Discrete J),
m ≫
NatTrans.app
{ pt := of ((j : J) → ↑(f j)), π := Discrete.natTrans fun j => Pi.evalAddMonoidHom (fun j => ↑(f j)) j.as }.π
j =
NatTrans.app s.π j
⊢ m = lift f s
[PROOFSTEP]
ext x
[GOAL]
case w
J : Type w
f : J → AddCommGroupCat
s : Cone (Discrete.functor f)
m :
s.pt ⟶ { pt := of ((j : J) → ↑(f j)), π := Discrete.natTrans fun j => Pi.evalAddMonoidHom (fun j => ↑(f j)) j.as }.pt
w :
∀ (j : Discrete J),
m ≫
NatTrans.app
{ pt := of ((j : J) → ↑(f j)), π := Discrete.natTrans fun j => Pi.evalAddMonoidHom (fun j => ↑(f j)) j.as }.π
j =
NatTrans.app s.π j
x : ↑s.pt
⊢ ↑m x = ↑(lift f s) x
[PROOFSTEP]
funext j
[GOAL]
case w.h
J : Type w
f : J → AddCommGroupCat
s : Cone (Discrete.functor f)
m :
s.pt ⟶ { pt := of ((j : J) → ↑(f j)), π := Discrete.natTrans fun j => Pi.evalAddMonoidHom (fun j => ↑(f j)) j.as }.pt
w :
∀ (j : Discrete J),
m ≫
NatTrans.app
{ pt := of ((j : J) → ↑(f j)), π := Discrete.natTrans fun j => Pi.evalAddMonoidHom (fun j => ↑(f j)) j.as }.π
j =
NatTrans.app s.π j
x : ↑s.pt
j : J
⊢ ↑m x j = ↑(lift f s) x j
[PROOFSTEP]
exact congr_arg (fun g : s.pt ⟶ f j => (g : s.pt → f j) x) (w ⟨j⟩)
|
[STATEMENT]
lemma parts_UN [simp]: "parts(\<Union>x\<in>A. H x) = (\<Union>x\<in>A. parts(H x))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. parts (\<Union> (H ` A)) = (\<Union>x\<in>A. parts (H x))
[PROOF STEP]
by (intro equalityI parts_UN_subset1 parts_UN_subset2) |
From iris Require Export algebra.auth algebra.numbers.
From iris Require Import base_logic.lib.own proofmode.proofmode.
Notation "'●nat' n" := (auth_auth (A:=natUR) (DfracOwn 1%Qp) n%nat) (at level 20).
Notation "'◯nat' n" := (auth_frag (A:=natUR) n%nat) (at level 20).
Section Auth_nat.
Context `{inG Σ (authR natUR)}.
Lemma auth_nat_alloc (n : nat) :
⊢ |==> ∃ γ, own γ (●nat n) ∗ own γ (◯nat n).
Proof.
iMod (own_alloc (●nat n ⋅ ◯nat n)) as (γ) "[? ?]".
- by apply auth_both_valid_2.
- by auto with iFrame.
Qed.
Lemma own_auth_nat_le (γ : gname) (m n : nat) :
own γ (●nat m) -∗
own γ (◯nat n) -∗
⌜(n ≤ m)%nat⌝.
Proof.
iIntros "H● H◯".
by iDestruct (own_valid_2 with "H● H◯")
as % [?%nat_le_sum _] % auth_both_valid.
Qed.
Lemma own_auth_nat_weaken (γ : gname) (n₁ n₂ : nat) :
(n₂ ≤ n₁)%nat →
own γ (◯nat n₁) -∗
own γ (◯nat n₂).
Proof.
iIntros (I) "H".
rewrite (_ : n₁ = (n₁ - n₂) + n₂)%nat ; last lia.
iDestruct "H" as "[_$]".
Qed.
Lemma own_auth_nat_zero (γ : gname) :
⊢ |==> own γ (◯nat 0).
Proof.
apply own_unit.
Qed.
Lemma auth_nat_update_incr (γ : gname) (m k : nat) :
own γ (●nat m) -∗
|==> own γ (●nat (m + k)) ∗ own γ (◯nat k).
Proof.
iIntros "H●".
rewrite -own_op. iApply (own_update with "H●").
apply auth_update_alloc, nat_local_update. rewrite right_id. lia.
Qed.
Lemma auth_nat_update_decr (γ : gname) (m n k : nat) :
(k ≤ n)%nat →
own γ (●nat m) -∗
own γ (◯nat n) -∗
|==> own γ (●nat (m - k)) ∗ own γ (◯nat (n - k)).
Proof.
iIntros (I) "H● H◯".
iDestruct (own_auth_nat_le with "H● H◯") as %J.
rewrite -own_op. iApply (own_update_2 with "H● H◯").
apply auth_update, nat_local_update. lia.
Qed.
End Auth_nat.
|
lemma complex_Re_of_int [simp]: "Re (of_int z) = of_int z" |
c
c Copyright (C) 1997 Berwin A. Turlach <[email protected]>
c
c This program is free software; you can redistribute it and/or modify
c it under the terms of the GNU General Public License as published by
c the Free Software Foundation; either version 2 of the License, or
c (at your option) any later version.
c
c This program is distributed in the hope that it will be useful,
c but WITHOUT ANY WARRANTY; without even the implied warranty of
c MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
c GNU General Public License for more details.
c
c You should have received a copy of the GNU General Public License
c along with this program; if not, write to the Free Software
c Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
c USA.
c
c this routine checks whether Aind has valid entries, i.e.,
c 1) 1<= Aind(1,i) <= n for i=1,...,q (number of constraints)
c 2) 1<= Aind(j,i) <= n for j=2,...,Aind(1,i)+1, i=1,...,q
c
c Aind is a m times q matrix constructed in Splus
c
subroutine aind(ind,m,q,n,ok)
implicit none
integer m, ind(m,*), q, n, i, j
logical ok
ok = .FALSE.
do i=1,q
if( ind(1,i) .LT. 1 .OR. ind(1,i) .GT. n ) return
do j=2,ind(1,i)+1
if( ind(j,i) .LT. 1 .OR. ind(j,i) .GT. n ) return
enddo
enddo
ok = .TRUE.
return
end
|
/-
Copyright (c) 2019 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel, Yury Kudryashov
! This file was ported from Lean 3 source module topology.algebra.infinite_sum.real
! leanprover-community/mathlib commit 9a59dcb7a2d06bf55da57b9030169219980660cd
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Algebra.BigOperators.Intervals
import Mathlib.Topology.Algebra.InfiniteSum.Order
import Mathlib.Topology.Instances.Real
/-!
# Infinite sum in the reals
This file provides lemmas about Cauchy sequences in terms of infinite sums.
-/
open Filter Finset BigOperators NNReal Topology
variable {α : Type _}
/-- If the extended distance between consecutive points of a sequence is estimated
by a summable series of `NNReal`s, then the original sequence is a Cauchy sequence. -/
theorem cauchySeq_of_edist_le_of_summable [PseudoEMetricSpace α] {f : ℕ → α} (d : ℕ → ℝ≥0)
(hf : ∀ n, edist (f n) (f n.succ) ≤ d n) (hd : Summable d) : CauchySeq f := by
refine EMetric.cauchySeq_iff_NNReal.2 fun ε εpos => ?_
-- Actually we need partial sums of `d` to be a Cauchy sequence
replace hd : CauchySeq fun n : ℕ => ∑ x in range n, d x :=
let ⟨_, H⟩ := hd
H.tendsto_sum_nat.cauchySeq
-- Now we take the same `N` as in one of the definitions of a Cauchy sequence
refine (Metric.cauchySeq_iff'.1 hd ε (NNReal.coe_pos.2 εpos)).imp fun N hN n hn => ?_
specialize hN n hn
-- We simplify the known inequality
rw [dist_nndist, NNReal.nndist_eq, ← sum_range_add_sum_Ico _ hn, add_tsub_cancel_left,
NNReal.coe_lt_coe, max_lt_iff] at hN
rw [edist_comm]
-- Then use `hf` to simplify the goal to the same form
refine lt_of_le_of_lt (edist_le_Ico_sum_of_edist_le hn fun _ _ => hf _) ?_
exact_mod_cast hN.1
#align cauchy_seq_of_edist_le_of_summable cauchySeq_of_edist_le_of_summable
variable [PseudoMetricSpace α] {f : ℕ → α} {a : α}
/-- If the distance between consecutive points of a sequence is estimated by a summable series,
then the original sequence is a Cauchy sequence. -/
theorem cauchySeq_of_dist_le_of_summable (d : ℕ → ℝ) (hf : ∀ n, dist (f n) (f n.succ) ≤ d n)
(hd : Summable d) : CauchySeq f := by
-- Porting note: todo: with `Topology/Instances/NNReal` we can use this:
-- lift d to ℕ → ℝ≥0 using fun n => dist_nonneg.trans (hf n)
-- refine cauchySeq_of_edist_le_of_summable d ?_ ?_
-- · exact_mod_cast hf
-- · exact_mod_cast hd
refine' Metric.cauchySeq_iff'.2 fun ε εpos => _
replace hd : CauchySeq fun n : ℕ => ∑ x in range n, d x :=
let ⟨_, H⟩ := hd
H.tendsto_sum_nat.cauchySeq
refine' (Metric.cauchySeq_iff'.1 hd ε εpos).imp fun N hN n hn => _
have hsum := hN n hn
rw [Real.dist_eq, ← sum_Ico_eq_sub _ hn] at hsum
calc
dist (f n) (f N) = dist (f N) (f n) := dist_comm _ _
_ ≤ ∑ x in Ico N n, d x := dist_le_Ico_sum_of_dist_le hn fun _ _ => hf _
_ ≤ |∑ x in Ico N n, d x| := le_abs_self _
_ < ε := hsum
#align cauchy_seq_of_dist_le_of_summable cauchySeq_of_dist_le_of_summable
theorem cauchySeq_of_summable_dist (h : Summable fun n => dist (f n) (f n.succ)) : CauchySeq f :=
cauchySeq_of_dist_le_of_summable _ (fun _ => le_rfl) h
#align cauchy_seq_of_summable_dist cauchySeq_of_summable_dist
theorem dist_le_tsum_of_dist_le_of_tendsto (d : ℕ → ℝ) (hf : ∀ n, dist (f n) (f n.succ) ≤ d n)
(hd : Summable d) {a : α} (ha : Tendsto f atTop (𝓝 a)) (n : ℕ) :
dist (f n) a ≤ ∑' m, d (n + m) := by
refine' le_of_tendsto (tendsto_const_nhds.dist ha) (eventually_atTop.2 ⟨n, fun m hnm => _⟩)
refine' le_trans (dist_le_Ico_sum_of_dist_le hnm fun _ _ => hf _) _
rw [sum_Ico_eq_sum_range]
refine' sum_le_tsum (range _) (fun _ _ => le_trans dist_nonneg (hf _)) _
exact hd.comp_injective (add_right_injective n)
#align dist_le_tsum_of_dist_le_of_tendsto dist_le_tsum_of_dist_le_of_tendsto
theorem dist_le_tsum_of_dist_le_of_tendsto₀ (d : ℕ → ℝ) (hf : ∀ n, dist (f n) (f n.succ) ≤ d n)
(hd : Summable d) (ha : Tendsto f atTop (𝓝 a)) : dist (f 0) a ≤ tsum d := by
simpa only [zero_add] using dist_le_tsum_of_dist_le_of_tendsto d hf hd ha 0
#align dist_le_tsum_of_dist_le_of_tendsto₀ dist_le_tsum_of_dist_le_of_tendsto₀
theorem dist_le_tsum_dist_of_tendsto (h : Summable fun n => dist (f n) (f n.succ))
(ha : Tendsto f atTop (𝓝 a)) (n) : dist (f n) a ≤ ∑' m, dist (f (n + m)) (f (n + m).succ) :=
show dist (f n) a ≤ ∑' m, (fun x => dist (f x) (f x.succ)) (n + m) from
dist_le_tsum_of_dist_le_of_tendsto (fun n => dist (f n) (f n.succ)) (fun _ => le_rfl) h ha n
#align dist_le_tsum_dist_of_tendsto dist_le_tsum_dist_of_tendsto
theorem dist_le_tsum_dist_of_tendsto₀ (h : Summable fun n => dist (f n) (f n.succ))
(ha : Tendsto f atTop (𝓝 a)) : dist (f 0) a ≤ ∑' n, dist (f n) (f n.succ) := by
simpa only [zero_add] using dist_le_tsum_dist_of_tendsto h ha 0
#align dist_le_tsum_dist_of_tendsto₀ dist_le_tsum_dist_of_tendsto₀
|
import SciLean.Data.Prod
import SciLean.Core.SmoothMap
import SciLean.Core.Diff
namespace SciLean
variable {X Y Z W Y₁ Y₂ Y₃} [Diff X] [Diff Y] [Diff Z] [Diff W] [Diff Y₁] [Diff Y₂] [Diff Y₃]
def is_diff [Diff X] [Diff Y] (f : X → Y) : Prop := sorry
class IsSmoothDepNT {Xs Y' : Type} [Diff Xs] [Diff Y']
(n : Nat) (f : X → Y) [Prod.Uncurry n (X → Y) Xs Y'] : Prop where
proof : is_diff (uncurryN n f)
class IsSmoothDepN {Xs Y'} [Diff Xs] [Diff Y']
(n : Nat) (f : X → Y) [Prod.Uncurry n (X → Y) Xs Y'] extends IsSmoothDepNT n f : Prop
abbrev IsSmoothDep (f : X → Y) : Prop
:= IsSmoothDepN 1 f
abbrev IsSmoothDepT (f : X → Y) : Prop
:= IsSmoothDepNT 1 f
--------------------------------------------------------------------------------
instance (priority := low) IsSmoothDep.remove_2_2 (f : X → Y → Z) [IsSmoothDepNT 2 f]
: IsSmoothDepT (λ x => f x) := sorry_proof
instance (priority := low) IsSmoothDep.remove_2_1 (f : X → Y → Z) [IsSmoothDepNT 2 f] (x : X)
: IsSmoothDepT (λ y => f x y) := sorry_proof
instance (priority := low) IsSmoothDep.remove_3_2_3 (f : X → Y → Z → W) [IsSmoothDepNT 3 f]
: IsSmoothDepT (λ x => f x) := sorry_proof
instance (priority := low) IsSmoothDep.remove_3_1_3 (f : X → Y → Z → W) [IsSmoothDepNT 3 f] (x : X)
: IsSmoothDepT (λ y => f x y) := sorry_proof
instance (priority := low) IsSmoothDep.remove_3_1_2 (f : X → Y → Z → W) [IsSmoothDepNT 3 f] (x : X) (y : Y)
: IsSmoothDepT (λ z => f x y z) := sorry_proof
-- -- adding arguments
instance (priority := low) IsSmoothDep.add_extra_2_1 (f : X → Y) [IsSmoothDepT f]
: IsSmoothDepNT 2 (λ (z : Z) x => f x) := sorry_proof
instance (priority := low) IsSmoothDep.add_extra_2_2 (f : X → Y) [IsSmoothDepT f]
: IsSmoothDepNT 2 (λ x (z : Z) => f x) := sorry_proof
instance (priority := low) IsSmoothDep.add_extra_3_1 (f : Y → Z → W) [IsSmoothDepNT 2 f]
: IsSmoothDepNT 3 (λ (x : X) y z => f y z) := sorry_proof
instance (priority := low) IsSmoothDep.add_extra_3_2 (f : X → Z → W) [IsSmoothDepNT 2 f]
: IsSmoothDepNT 3 (λ x (y : Y) z => f x z) := sorry_proof
instance (priority := low) IsSmoothDep.add_extra_3_3 (f : X → Y → W) [IsSmoothDepNT 2 f]
: IsSmoothDepNT 3 (λ x y (z : Z) => f x y) := sorry_proof
-- Core instances
instance id.arg_x.isSmoothDep
: IsSmoothDepT λ x : X => x := sorry_proof
-- This is problematic - low priority had to be added to `remove_2_2`
example {α : Type} : IsSmoothDepNT 1 (fun (x : α → Y) => x) := inferInstance
instance const.arg_xy.isSmoothDep
: IsSmoothDepNT 2 λ (x : X) (y : Y) => x := inferInstance
instance const.arg_x.isSmoothDep
: IsSmoothDepT λ (x : X) (y : Y) => x := inferInstance
instance const.arg_y.isSmoothDep (x : X)
: IsSmoothDepT λ (y : Y) => x := IsSmoothDep.remove_2_1 (λ x y => x) x
instance (priority := low) swap.arg_y.isSmoothDep {α : Type}
(f : α → Y → Z) [∀ x, IsSmoothDepT (f x)]
: IsSmoothDepT (λ y x => f x y) := sorry_proof
instance parm.arg_x.isSmoothDep
(f : X → β → Z) [IsSmoothDepT f] (y : β)
: IsSmoothDepT (λ x => f x y) := sorry_proof
instance (priority := mid-1) subst.arg_x.isSmoothDep
(f : X → Y → Z) [IsSmoothDepNT 2 f]
(g : X → Y) [IsSmoothDepT g] :
IsSmoothDepT (λ x => f x (g x)) := sorry_proof
instance (priority := mid-1) subst2.arg_x.isSmoothDep
(f : X → Y → Y₁ → Z) [IsSmoothDepNT 3 f]
(g : X → Y → Y₁) [IsSmoothDepNT 2 g] :
IsSmoothDepNT 2 (λ x y => f x y (g x y)) := sorry_proof
instance (priority := mid-1) subst3.arg_x.isSmoothDep
(f : X → Y → Z → Y₁ → W) [IsSmoothDepNT 4 f]
(g : X → Y → Z → Y₁) [IsSmoothDepNT 3 g] :
IsSmoothDepNT 3 (λ x y z => f x y z (g x y z)) := sorry_proof
-- @[infer_tc_goals_rl]
instance comp.arg_x.isSmoothDep
(f : Y → Z) [IsSmoothDepT f]
(g : X → Y) [IsSmoothDepT g]
: IsSmoothDepT (λ x => f (g x)) := by infer_instance
instance {Ws W'} [Diff Ws] [Diff W']
(f : Z → W) [Prod.Uncurry n W Ws W'] [IsSmoothDepNT (n+1) f]
(g : X → Y → Z) [IsSmoothDepNT 2 g]
: IsSmoothDepNT (n+2) fun x y => f (g x y) := sorry_proof
instance {Ws W'} [Diff Ws] [Diff W']
(f : Y₁ → Y₂→ W) [Prod.Uncurry n W Ws W'] [hf : IsSmoothDepNT (n+2) f]
(g₁ : X → Y → Z → Y₁) [IsSmoothDepNT 3 g₁]
(g₂ : X → Y → Z → Y₂) [IsSmoothDepNT 3 g₂]
: IsSmoothDepNT (n+3) fun x y z => f (g₁ x y z) (g₂ x y z) := sorry_proof
instance comp2.arg_x.isSmoothDep
(f : Y₁ → Y₂ → Z) [IsSmoothDepNT 2 f]
(g₁ : X → Y → Y₁) [IsSmoothDepNT 2 g₁]
(g₂ : X → Y → Y₂) [IsSmoothDepNT 2 g₂]
: IsSmoothDepNT 2 (λ x y => f (g₁ x y) (g₂ x y)) :=
by
infer_instance
instance comp3.arg_x.isSmoothDep
(f : Y₁ → Y₂ → Y₃ → W) [hf : IsSmoothDepNT ((1:ℕ) + (2:ℕ)) f]
(g₁ : X → Y → Z → Y₁) [IsSmoothDepNT 3 g₁]
(g₂ : X → Y → Z → Y₂) [IsSmoothDepNT 3 g₂]
(g₃ : X → Y → Z → Y₃) [IsSmoothDepNT 3 g₃]
: IsSmoothDepNT 3 (λ x y z => f (g₁ x y z) (g₂ x y z) (g₃ x y z)) :=
by
infer_instance
instance Prod.fst.arg_xy.isSmoothDep : IsSmoothDep (Prod.fst : X×Y → X) := sorry_proof
instance Prod.snd.arg_xy.isSmoothDep : IsSmoothDep (Prod.snd : X×Y → Y) := sorry_proof
|
function out = PP_ModelFit(y,model,order,randomSeed)
% PP_ModelFit Investigates whether AR model fit improves with different preprocessings.
%
% After performing the range of transformations to the time series, returns the
% in-sample root-mean-square (RMS) prediction errors for an AR model on each
% transformed time series as a ratio of the RMS prediction error of the original
% time series.
%
% PP_PreProcess.m is used to perform the preprocessings
%
% The AR model is fitted using the function ar and pe from Matlab's System
% Identification Toolbox
%
% Transformations performed include:
% (i) incremental differencing,
% (ii) filtering of the power spectral density function,
% (iii) removal of piece-wise polynomial trends, and
% (iv) rank mapping the values of the time series to a Gaussian distribution.
%
%---INPUTS:
%
% y, the input time series
% model, the time-series model to fit to the transformed time series (currently
% 'ar' is the only option)
%
% order, the order of the AR model to fit to the data
%
% randomSeed, whether (and how) to reset the random seed, using BF_ResetSeed
% ------------------------------------------------------------------------------
% Copyright (C) 2020, Ben D. Fulcher <[email protected]>,
% <http://www.benfulcher.com>
%
% If you use this code for your research, please cite the following two papers:
%
% (1) B.D. Fulcher and N.S. Jones, "hctsa: A Computational Framework for Automated
% Time-Series Phenotyping Using Massive Feature Extraction, Cell Systems 5: 527 (2017).
% DOI: 10.1016/j.cels.2017.10.001
%
% (2) B.D. Fulcher, M.A. Little, N.S. Jones, "Highly comparative time-series
% analysis: the empirical structure of time series and their methods",
% J. Roy. Soc. Interface 10(83) 20130048 (2013).
% DOI: 10.1098/rsif.2013.0048
%
% This function is free software: you can redistribute it and/or modify it under
% the terms of the GNU General Public License as published by the Free Software
% Foundation, either version 3 of the License, or (at your option) any later
% version.
%
% This program is distributed in the hope that it will be useful, but WITHOUT
% ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
% FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
% details.
%
% You should have received a copy of the GNU General Public License along with
% this program. If not, see <http://www.gnu.org/licenses/>.
% ------------------------------------------------------------------------------
% ------------------------------------------------------------------------------
%% Preliminaries
% ------------------------------------------------------------------------------
N = length(y); % length of the time series
% ------------------------------------------------------------------------------
%% Inputs
% ------------------------------------------------------------------------------
% Model: the model to fit preprocessed time series to
if nargin < 2 || isempty(model)
model = 'ar';
end
% order: the order of model to fit
if nargin < 3 || isempty(order)
order = 2;
end
% randomSeed: how to treat the randomization
if nargin < 4
randomSeed = []; % default
end
% ------------------------------------------------------------------------------
%% Apply a range of preprocessings
% ------------------------------------------------------------------------------
yp = PP_PreProcess(y,'',[],[],[],randomSeed);
% Returns a structure, yp, with a range of time series in it, each a different
% transformation of the original, y.
%% ____________________FIT MODEL TO ALL:_______________________ %%
fields = fieldnames(yp);
numFields = length(fields);
% statstore = struct('fpes',{});
for i = 1:numFields
% for each preprocessing, fit the model
data = yp.(fields{i});
% data is the current preprocessed data
switch model % SO MANY OPTIONS! ;-)
case 'ar'
%% Check that a System Identification Toolbox license is available
BF_CheckToolbox('identification_toolbox')
data = zscore(data); % zscore the data from this preprocessing
m = ar(data,order); % fit the model
% Get statistics on fit
% () FPE
statstore.fpe(i) = m.EstimationInfo.FPE;
% () in-sample prediction error
e = pe(m,data);
statstore.rmserr(i) = sqrt(mean(e.^2));
statstore.mabserr(i) = mean(abs(e));
statstore.ac1(i) = CO_AutoCorr(e,1,'Fourier');
otherwise
error('Unknown model ''%s''',model);
end
end
% ------------------------------------------------------------------------------
%% Return statistics on statistics
% ------------------------------------------------------------------------------
% actually often as you make more stationary and remove trends it becomes
% harder to predict because these trends are very easy to predict, and
% making the series whiter will obviously decrease its predictability.
% (1) ratio of fpe of preprocessed to unprocessed time series
% I think just this is ok.
% for i=2:numFields
% eval(['out.fperat_' fields{i} ' = ' num2str(statstore.fpe(i)/statstore.fpe(1)) ';']);
% end
% No, I'll just do in-sample rms error, for a single model no point fpeing
for i = 2:numFields
out.(sprintf('rmserrrat_%s',fields{i})) = statstore.rmserr(i)/statstore.rmserr(1);
end
% In fact, greater error in this case means a better detrending in some
% sense -- it's remobed more of the 'obvious' linear structure (assuming
% that's the aim).
% could also return statistics on other things like prediction error, but
% not alot of point, I think.
%
% function ydt = SUB_remps(y,n,method)
% % Removes the first n (proportion) of power spectrum
% % Based on my deseasonalize1.m code
%
%
% %% Take the Fourier Transform
%
% Ny = length(y); % number of samples in y
% % t = linspace(0,1,Ny); % time vector
% NFFT = 2^nextpow2(Ny); % next power of 2
% Fy = fft(y,NFFT); % fast fourier transform of y
% Fy1 = Fy(1:NFFT/2+1);
% % f = 1/2*linspace(0,1,NFFT/2+1); % frequency vector
%
% %% Remove this range
% % set it to (mean of the rest) across this range
% switch method
% case 'lf'
% cullr = 1:floor(length(Fy1)*n);
% case 'biggest'
% cullr = find(abs(Fy1)>quantile(abs(Fy1),n));
% end
%
% meanrest = mean(abs(Fy1(setxor(1:end,cullr))));
% % meanrest = 0;
% FyF = Fy;
% FyF(cullr)=meanrest;
% FyF(end-cullr+2)=meanrest;
%
%
% % PLOT
% % plot(abs(Fy)),hold on; plot(abs(FyF),'--r'); hold off
% % input('Here''s the filtered one...')
% % plot(abs(FyF),'k');
% % input('Again on it''s own...')
%
%
% %% Inverse Fourier Transform
% ydt = ifft(FyF,NFFT);
% ydt = zscore(ydt(1:Ny)); % crop to desired length
%
%
% % PLOT
% % plot(zscore(ydt),'b'); hold on; plot(y,'r'); hold off;
% % input(['Mean difference is ' num2str(mean(y-ydt))])
%
% end
%
% function ydt = SUB_rempt(y,order,nbits)
% N = length(y);
% ydt = zeros(N,1);
% bits = round(linspace(0,N,nbits+1));
% for k=1:nbits
% r = bits(k)+1 : bits(k+1); % range defined by adjacent 'bits'
% x = (1:length(r))'; % faux x-range
% ybit = y(r); % y-range
% p = polyfit(x,ybit,order);
% ydt(r) = ybit-polyval(p,x);
% end
% ydt = zscore(ydt);
% % plot(y,'b'); hold on; plot(ydt,'r');
% % input('here we are')
% end
end
|
(*
File: OrderTopology.thy
Author: Bohua Zhan
Basic results about order topology.
*)
theory OrderTopology
imports Topology Auto2_FOL.Interval Auto2_FOL.AlgStructure
begin
section \<open>Set with at least two element\<close>
definition card_ge2 :: "i \<Rightarrow> o" where [rewrite]:
"card_ge2(X) \<longleftrightarrow> (\<exists>a\<in>X. \<exists>b\<in>X. a \<noteq> b)"
lemma card_ge2I [backward2]: "{a,b} \<subseteq> X \<Longrightarrow> a \<noteq> b \<Longrightarrow> card_ge2(X)" by auto2
lemma card_ge2_D1 [resolve]: "card_ge2(X) \<Longrightarrow> \<exists>a\<in>X. \<exists>b\<in>X. a \<noteq> b" by auto2
lemma card_ge2_D2 [resolve]: "card_ge2(X) \<Longrightarrow> a \<in> X \<Longrightarrow> \<exists>b\<in>X. b \<noteq> a" by auto2
setup {* del_prfstep_thm @{thm card_ge2_def} *}
section \<open>Order topology\<close>
definition ord_basis :: "i \<Rightarrow> i" where [rewrite]:
"ord_basis(X) = ((\<Union>a\<in>.X. \<Union>b\<in>.X. {open_interval(X,a,b)}) \<union>
(\<Union>a\<in>.X. {less_interval(X,a)}) \<union> (\<Union>a\<in>.X. {greater_interval(X,a)}))"
lemma ord_basisE [forward]:
"W \<in> ord_basis(X) \<Longrightarrow> (\<exists>a\<in>.X. \<exists>b\<in>.X. W = open_interval(X,a,b)) \<or>
(\<exists>a\<in>.X. W = less_interval(X,a)) \<or> (\<exists>a\<in>.X. W = greater_interval(X,a))" by auto2
lemma ord_basisI [resolve]:
"a \<in>. X \<Longrightarrow> b \<in>. X \<Longrightarrow> open_interval(X,a,b) \<in> ord_basis(X)"
"a \<in>. X \<Longrightarrow> less_interval(X,a) \<in> ord_basis(X)"
"a \<in>. X \<Longrightarrow> greater_interval(X,a) \<in> ord_basis(X)" by auto2+
setup {* del_prfstep_thm @{thm ord_basis_def} *}
lemma ord_basis_eq_str [rewrite]:
"eq_str_order(X,Y) \<Longrightarrow> ord_basis(X) = ord_basis(Y)" by auto2
lemma ord_basis_is_basis [forward]:
"linorder(X) \<Longrightarrow> collection_is_basis(ord_basis(X))"
@proof @let "\<B> = ord_basis(X)" @have "\<forall>U\<in>\<B>. \<forall>V\<in>\<B>. U \<inter> V \<in> \<B>" @qed
lemma ord_basis_union [rewrite]:
"linorder(X) \<Longrightarrow> card_ge2(carrier(X)) \<Longrightarrow> \<Union>ord_basis(X) = carrier(X)"
@proof
@have "\<forall>x\<in>.X. x \<in> \<Union>ord_basis(X)" @with
@obtain "y\<in>.X" where "y \<noteq> x"
@case "y <\<^sub>X x" @with @have "x \<in> greater_interval(X,y)" @end
@case "y >\<^sub>X x" @with @have "x \<in> less_interval(X,y)" @end
@end
@qed
definition order_topology :: "i \<Rightarrow> o" where [rewrite]:
"order_topology(X) \<longleftrightarrow> (linorder(X) \<and> is_top_space_raw(X) \<and> card_ge2(carrier(X)) \<and>
open_sets(X) = top_from_basis(ord_basis(X)))"
lemma order_topology_has_basis [forward]:
"order_topology(X) \<Longrightarrow> top_has_basis(X,ord_basis(X))" by auto2
lemma order_topologyD [forward]:
"order_topology(X) \<Longrightarrow> linorder(X)"
"order_topology(X) \<Longrightarrow> is_top_space(X)"
"order_topology(X) \<Longrightarrow> card_ge2(carrier(X))" by auto2+
lemma order_topologyI [backward]:
"linorder(X) \<Longrightarrow> is_top_space_raw(X) \<Longrightarrow> card_ge2(carrier(X)) \<Longrightarrow>
open_sets(X) = top_from_basis(ord_basis(X)) \<Longrightarrow> order_topology(X)" by auto2
lemma order_topology_open_interval [resolve]:
"order_topology(X) \<Longrightarrow> a \<in>. X \<Longrightarrow> b \<in>. X \<Longrightarrow> is_open(X,open_interval(X,a,b))" by auto2
lemma order_topology_less_interval [resolve]:
"order_topology(X) \<Longrightarrow> a \<in>. X \<Longrightarrow> is_open(X,less_interval(X,a))" by auto2
lemma order_topology_greater_interval [resolve]:
"order_topology(X) \<Longrightarrow> a \<in>. X \<Longrightarrow> is_open(X,greater_interval(X,a))" by auto2
lemma order_topology_le_interval [resolve]:
"order_topology(X) \<Longrightarrow> a \<in>. X \<Longrightarrow> is_closed(X,le_interval(X,a))" by auto2
lemma order_topology_ge_interval [resolve]:
"order_topology(X) \<Longrightarrow> a \<in>. X \<Longrightarrow> is_closed(X,ge_interval(X,a))" by auto2
lemma order_topology_closed_interval [resolve]:
"order_topology(X) \<Longrightarrow> a \<in>. X \<Longrightarrow> b \<in>. X \<Longrightarrow> is_closed(X,closed_interval(X,a,b))"
@proof
@have "closed_interval(X,a,b) = le_interval(X,b) \<inter> ge_interval(X,a)"
@qed
lemma order_top_is_openI [forward]:
"order_topology(X) \<Longrightarrow> \<forall>x\<in>U. \<exists>a b. x \<in> open_interval(X,a,b) \<and> open_interval(X,a,b) \<subseteq> U \<Longrightarrow> is_open(X,U)" by auto2
lemma order_top_is_openD_gt [backward2]:
"order_topology(X) \<Longrightarrow> is_open(X,U) \<Longrightarrow> a \<in> U \<Longrightarrow> \<exists>M. M >\<^sub>X a \<Longrightarrow> \<exists>c >\<^sub>X a. closed_open_interval(X,a,c) \<subseteq> U"
@proof
@obtain "W\<in>ord_basis(X)" where "a \<in> W \<and> W \<subseteq> U"
@case "\<exists>p\<in>.X. \<exists>q\<in>.X. W = open_interval(X,p,q)"
@qed
lemma order_top_is_openD_lt [backward2]:
"order_topology(X) \<Longrightarrow> is_open(X,U) \<Longrightarrow> a \<in> U \<Longrightarrow> \<exists>M. M <\<^sub>X a \<Longrightarrow> \<exists>c <\<^sub>X a. open_closed_interval(X,c,a) \<subseteq> U"
@proof
@obtain "W\<in>ord_basis(X)" where "a \<in> W \<and> W \<subseteq> U"
@case "\<exists>p\<in>.X. \<exists>q\<in>.X. W = open_interval(X,p,q)"
@qed
lemma order_top_is_openD_unbounded [backward2]:
"order_topology(X) \<Longrightarrow> order_unbounded(X) \<Longrightarrow>
is_open(X,U) \<Longrightarrow> x \<in> U \<Longrightarrow> \<exists>a b. x \<in> open_interval(X,a,b) \<and> open_interval(X,a,b) \<subseteq> U"
@proof
@obtain b where "b >\<^sub>X x" "closed_open_interval(X,x,b) \<subseteq> U"
@obtain a where "a <\<^sub>X x" "open_closed_interval(X,a,x) \<subseteq> U"
@have "x \<in> open_interval(X,a,b)"
@have "open_interval(X,a,b) = open_closed_interval(X,a,x) \<union> closed_open_interval(X,x,b)"
@qed
setup {* fold del_prfstep_thm [@{thm order_topology_has_basis}, @{thm order_topology_def}] *}
setup {* add_resolve_prfstep @{thm order_topology_has_basis} *}
section \<open>Data structure for order topology\<close>
definition is_ord_top_raw :: "i \<Rightarrow> o" where [rewrite]:
"is_ord_top_raw(R) \<longleftrightarrow> is_top_space_raw(R) \<and> raworder(R)"
lemma is_ord_top_rawD [forward]:
"is_ord_top_raw(R) \<Longrightarrow> is_top_space_raw(R)"
"is_ord_top_raw(R) \<Longrightarrow> raworder(R)" by auto2+
setup {* del_prfstep_thm_eqforward @{thm is_ord_top_raw_def} *}
definition ord_top_form :: "i \<Rightarrow> o" where [rewrite]:
"ord_top_form(R) \<longleftrightarrow> is_ord_top_raw(R) \<and> is_func_graph(R,{carrier_name,open_sets_name,order_graph_name})"
lemma ord_top_form_to_raw [forward]: "ord_top_form(R) \<Longrightarrow> is_ord_top_raw(R)" by auto2
definition OrderTop :: "[i, i, i \<Rightarrow> i \<Rightarrow> o] \<Rightarrow> i" where [rewrite]:
"OrderTop(S,T,r) = Struct({\<langle>carrier_name,S\<rangle>, \<langle>open_sets_name,T\<rangle>, \<langle>order_graph_name, rel_graph(S,r)\<rangle>})"
lemma OrderTop_is_ord_top_raw [backward]:
"T \<subseteq> Pow(S) \<Longrightarrow> R = OrderTop(S,T,r) \<Longrightarrow> ord_top_form(R)"
@proof @have "raworder(R)" @qed
lemma OrderTop_eval [rewrite]:
"carrier(OrderTop(S,T,r)) = S"
"open_sets(OrderTop(S,T,r)) = T"
"X = OrderTop(S,T,r) \<Longrightarrow> x \<in>. X \<Longrightarrow> y \<in>. X \<Longrightarrow> x \<le>\<^sub>X y \<longleftrightarrow> r(x,y)" by auto2+
lemma ord_top_eq [backward]:
"ord_top_form(X) \<Longrightarrow> ord_top_form(Y) \<Longrightarrow> eq_str_order(X,Y) \<Longrightarrow> eq_str_top(X,Y) \<Longrightarrow> X = Y" by auto2
setup {* fold del_prfstep_thm [@{thm ord_top_form_def}, @{thm OrderTop_def}] *}
definition order_top_from_order :: "i \<Rightarrow> i" where [rewrite]:
"order_top_from_order(X) = OrderTop(carrier(X),top_from_basis(ord_basis(X)),\<lambda>x y. x \<le>\<^sub>X y)"
lemma order_top_from_order_ord_top_form [forward]:
"raworder(X) \<Longrightarrow> ord_top_form(order_top_from_order(X))" by auto2
lemma order_top_from_order_eq_str:
"raworder(X) \<Longrightarrow> eq_str_order(X,order_top_from_order(X))" by auto2
setup {* add_forward_prfstep_cond @{thm order_top_from_order_eq_str} [with_term "order_top_from_order(?X)"] *}
lemma order_top_from_order_is_ord_top [backward]:
"linorder(X) \<Longrightarrow> card_ge2(carrier(X)) \<Longrightarrow> order_topology(order_top_from_order(X))" by auto2
setup {* add_prfstep_check_req ("order_top_from_order(X)", "order_topology(order_top_from_order(X))") *}
section \<open>Defining topology on an ordered ring\<close>
definition OrdRingTop :: "[i, i, i \<Rightarrow> i \<Rightarrow> i, i, i \<Rightarrow> i \<Rightarrow> i, i \<Rightarrow> i \<Rightarrow> o, i] \<Rightarrow> i" where [rewrite]:
"OrdRingTop(S,z,f,u,g,r,T) = Struct({\<langle>carrier_name,S\<rangle>, \<langle>open_sets_name,T\<rangle>,
\<langle>order_graph_name, rel_graph(S,r)\<rangle>,
\<langle>zero_name, z\<rangle>, \<langle>plus_fun_name, binary_fun_of(S,f)\<rangle>,
\<langle>one_name, u\<rangle>, \<langle>times_fun_name, binary_fun_of(S,g)\<rangle>})"
lemma OrdRingTop_is_ord_ring_raw [backward]:
"z \<in> S \<Longrightarrow> binary_fun(S,f) \<Longrightarrow> u \<in> S \<Longrightarrow> binary_fun(S,g) \<Longrightarrow>
R = OrdRingTop(S,z,f,u,g,r,T) \<Longrightarrow> is_ord_ring_raw(R)"
@proof
@have "is_abgroup_raw(R)"
@have "is_group_raw(R)"
@have "is_ring_raw(R)"
@have "raworder(R)"
@qed
lemma ord_top_ring_eval [rewrite]:
"carrier(OrdRingTop(S,z,f,u,g,r,T)) = S"
"zero(OrdRingTop(S,z,f,u,g,r,T)) = z"
"one(OrdRingTop(S,z,f,u,g,r,T)) = u"
"open_sets(OrdRingTop(S,z,f,u,g,r,T)) = T"
"R = OrdRingTop(S,z,f,u,g,r,T) \<Longrightarrow> x \<in>. R \<Longrightarrow> y \<in>. R \<Longrightarrow> is_abgroup_raw(R) \<Longrightarrow> x +\<^sub>R y = f(x,y)"
"R = OrdRingTop(S,z,f,u,g,r,T) \<Longrightarrow> x \<in>. R \<Longrightarrow> y \<in>. R \<Longrightarrow> is_group_raw(R) \<Longrightarrow> x *\<^sub>R y = g(x,y)"
"R = OrdRingTop(S,z,f,u,g,r,T) \<Longrightarrow> x \<in>. R \<Longrightarrow> y \<in>. R \<Longrightarrow> x \<le>\<^sub>R y \<longleftrightarrow> r(x,y)" by auto2+
setup {* del_prfstep_thm @{thm OrdRingTop_def} *}
section \<open>Order topology from an ordered ring\<close>
definition ord_ring_top_from_ord_ring :: "i \<Rightarrow> i" where [rewrite]:
"ord_ring_top_from_ord_ring(R) =
OrdRingTop(carrier(R), \<zero>\<^sub>R, \<lambda>x y. x +\<^sub>R y, \<one>\<^sub>R, \<lambda>x y. x *\<^sub>R y, \<lambda>x y. x \<le>\<^sub>R y, top_from_basis(ord_basis(R)))"
lemma ord_ring_top_from_ord_ring_is_ord_ring [forward]:
"is_ord_ring_raw(R) \<Longrightarrow> is_ord_ring_raw(ord_ring_top_from_ord_ring(R))" by auto2
lemma ord_ring_top_from_ord_ring_eq_str:
"is_ord_ring_raw(R) \<Longrightarrow> A = ord_ring_top_from_ord_ring(R) \<Longrightarrow> eq_str_ord_ring(R,A)" by auto2
setup {* add_forward_prfstep_cond @{thm ord_ring_top_from_ord_ring_eq_str} [with_term "?A"] *}
lemma ord_ring_top_from_ord_ring_is_top_space_raw [forward]:
"is_ord_ring_raw(R) \<Longrightarrow> linorder(R) \<Longrightarrow> is_ord_top_raw(ord_ring_top_from_ord_ring(R))" by auto2
lemma ord_ring_top_from_ord_ring_is_ord_top [backward]:
"is_ord_ring_raw(R) \<Longrightarrow> linorder(R) \<Longrightarrow> card_ge2(carrier(R)) \<Longrightarrow>
order_topology(ord_ring_top_from_ord_ring(R))" by auto2
section \<open>Subspace on order topology\<close>
definition order_convex :: "i \<Rightarrow> i \<Rightarrow> o" where [rewrite]:
"order_convex(X,A) \<longleftrightarrow> (A \<subseteq> carrier(X) \<and> (\<forall>a\<in>A. \<forall>b\<in>A. closed_interval(X,a,b) \<subseteq> A))"
lemma order_convexD1 [forward]: "order_convex(X,A) \<Longrightarrow> A \<subseteq> carrier(X)" by auto2
lemma order_convexD2a [backward2]:
"order_convex(X,A) \<Longrightarrow> a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> closed_interval(X,a,b) \<subseteq> A" by auto2
lemma order_convexD2b [backward2]:
"linorder(X) \<Longrightarrow> order_convex(X,A) \<Longrightarrow> a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> open_interval(X,a,b) \<subseteq> A"
@proof @have "closed_interval(X,a,b) \<subseteq> A" @qed
setup {* del_prfstep_thm_eqforward @{thm order_convex_def} *}
lemma closed_interval_convex [resolve]:
"linorder(X) \<Longrightarrow> order_convex(X,closed_interval(X,a,b))" by auto2
definition ord_subspace :: "i \<Rightarrow> i \<Rightarrow> i" where [rewrite]:
"ord_subspace(X,A) = OrderTop(A, {A \<inter> U. U \<in> open_sets(X)}, \<lambda>x y. x \<le>\<^sub>X y)"
lemma ord_subspace_ord_top_form [forward]: "ord_top_form(ord_subspace(X,A))" by auto2
lemma ord_subspace_carrier: "carrier(ord_subspace(X,A)) = A" by auto2
setup {* add_forward_prfstep_cond @{thm ord_subspace_carrier} [with_term "ord_subspace(?X,?A)"] *}
lemma ord_subspace_eq_str [resolve]:
"is_top_space(X) \<Longrightarrow> A \<subseteq> carrier(X) \<Longrightarrow> eq_str_top(subspace(X,A),ord_subspace(X,A))"
@proof @have "open_sets(subspace(X,A)) = open_sets(ord_subspace(X,A))" @qed
lemma ord_subspace_is_top_space:
"is_top_space(X) \<Longrightarrow> A \<subseteq> carrier(X) \<Longrightarrow> is_top_space(ord_subspace(X,A))"
@proof @have "eq_str_top(subspace(X,A),ord_subspace(X,A))" @qed
setup {* add_forward_prfstep_cond @{thm ord_subspace_is_top_space} [with_term "ord_subspace(?X,?A)"] *}
lemma order_top_from_order_finer1 [resolve]:
"order_topology(X) \<Longrightarrow> card_ge2(A) \<Longrightarrow> order_convex(X,A) \<Longrightarrow>
Y = order_top_from_order(suborder(X,A)) \<Longrightarrow> is_open(Y, A \<inter> less_interval(X,x))"
@proof
@case "x \<in> A" @with @have "A \<inter> less_interval(X,x) = less_interval(suborder(X,A),x)" @end
@have (@rule) "A \<inter> less_interval(X,x) = \<emptyset> \<or> A \<subseteq> less_interval(X,x)" @with
@contradiction
@obtain "b \<in> A" where "b \<in> less_interval(X,x)"
@obtain "c \<in> A" where "c \<notin> less_interval(X,x)"
@have "closed_interval(X,b,c) \<subseteq> A"
@have "x \<in> closed_interval(X,b,c)" @end
@qed
lemma order_top_from_order_finer2 [resolve]:
"order_topology(X) \<Longrightarrow> card_ge2(A) \<Longrightarrow> order_convex(X,A) \<Longrightarrow>
Y = order_top_from_order(suborder(X,A)) \<Longrightarrow> is_open(Y, A \<inter> greater_interval(X,x))"
@proof
@case "x \<in> A" @with @have "A \<inter> greater_interval(X,x) = greater_interval(suborder(X,A),x)" @end
@have (@rule) "A \<inter> greater_interval(X,x) = \<emptyset> \<or> A \<subseteq> greater_interval(X,x)" @with
@contradiction
@obtain "b \<in> A" where "b \<in> greater_interval(X,x)"
@obtain "c \<in> A" where "c \<notin> greater_interval(X,x)"
@have "closed_interval(X,c,b) \<subseteq> A"
@have "x \<in> closed_interval(X,c,b)" @end
@qed
lemma order_top_from_order_finer3 [resolve]:
"order_topology(X) \<Longrightarrow> card_ge2(A) \<Longrightarrow> order_convex(X,A) \<Longrightarrow>
Y = order_top_from_order(suborder(X,A)) \<Longrightarrow> is_open(Y, A \<inter> open_interval(X,x,y))"
@proof
@have "open_interval(X,x,y) = less_interval(X,y) \<inter> greater_interval(X,x)"
@have "A \<inter> open_interval(X,x,y) = (A \<inter> less_interval(X,y)) \<inter> (A \<inter> greater_interval(X,x))"
@have "is_open(Y, A \<inter> less_interval(X,y))"
@qed
lemma order_top_from_order_eq_sub [backward]:
"order_topology(X) \<Longrightarrow> card_ge2(A) \<Longrightarrow> order_convex(X,A) \<Longrightarrow>
eq_str_top(ord_subspace(X,A),order_top_from_order(suborder(X,A)))"
@proof
@let "Y = order_top_from_order(suborder(X,A))"
@let "Z = ord_subspace(X,A)"
@have "top_space_finer(Z,Y)"
@let "\<B> = {A \<inter> U. U \<in> ord_basis(X)}"
@have "top_has_basis(Z,\<B>)" @with @have "eq_str_top(subspace(X,A),Z)" @end
@have "top_space_finer(Y,Z)" @with @have "\<forall>U\<in>\<B>. is_open(Y,U)" @end
@qed
lemma ord_subspace_is_order_top:
"order_topology(X) \<Longrightarrow> card_ge2(A) \<Longrightarrow> order_convex(X,A) \<Longrightarrow> order_topology(ord_subspace(X,A))"
@proof @have "ord_subspace(X,A) = order_top_from_order(suborder(X,A))"@qed
setup {* add_forward_prfstep_cond @{thm ord_subspace_is_order_top} [with_term "ord_subspace(?X,?A)"] *}
lemma closed_interval_order_topology:
"order_topology(X) \<Longrightarrow> a <\<^sub>X b \<Longrightarrow> I = closed_interval(X,a,b) \<Longrightarrow> order_topology(ord_subspace(X,I))"
@proof
@have "card_ge2(I)" @with @have "{a,b} \<subseteq> I" @end
@have "order_convex(X,I)"
@qed
setup {* add_forward_prfstep_cond @{thm closed_interval_order_topology} [with_term "ord_subspace(?X,?I)"] *}
end
|
import Data.List.Alternating
main : IO ()
main = do
let xs = the (Odd Double String) [1, "Hello", 2, "world", 3]
let ys = the (Odd Double String) [1, "Hello", 0, "world", 3]
printLn xs
printLn $ xs == xs
printLn $ xs == ys
printLn $ compare xs xs == EQ
printLn $ compare xs ys == GT
printLn $ bimap (+ 1) (++ "!") xs
printLn $ bifoldr (mapFst . avg) (mapSnd . join) (0, "") xs
printLn $ bifoldl (flip $ mapFst . avg) (flip $ mapSnd . (flip join)) (0, "") xs
ignore $ bitraverse printLn printLn xs
printLn $ map (++ "!") xs
printLn $ the (Odd Double String) $ [1, "Hello"] ++ [2, "world", 3]
printLn $ the (Odd Double String) $ [1, "Hello", 2] ++ ["world", 3]
let us = the (Odd String Double) ["Hello", 0, "world", 1, "!"]
let vs = the (Odd String Double) ["Lorem", 1, "ipsum"]
printLn $ us <+> vs
printLn $ us +> "!"
printLn $ "Oh, " <+ us
printLn $ the (Odd String Double) neutral
printLn $ foldr avg 0 us
printLn $ foldl avg 0 us
printLn $ the (Odd String Double) $ pure 1
printLn $ the (Odd String Double) $ ["Hello", (+1), "world", (+10), "!"] <*> ["Lorem", 1, "ipsum", 2, "."]
printLn $ the (Odd String Double) empty
printLn $ us <|> vs
printLn $ Snd.do
x <- the (Odd String Double) ["Hello", 1, "world", 2, "!"]
[",", x + 1, " "]
printLn $ Fst.do
x <- the (Odd String Double) ["Hello", 1, "world", 2, "!"]
["Um,", 3, x]
ignore $ traverse printLn us
printLn $ odds xs
printLn $ evens xs
printLn $ the (List String) $ forget $ mapFst show xs
where
avg : Double -> Double -> Double
avg x y = (x + y) / 2
join : String -> String -> String
join "" t = t
join s "" = s
join s t = s ++ " " ++ t
|
Founded in 1996, our flagship beer Moonshine is one of the most popular beers in Sheffield. Having started out on a small 10BBl kit, we now brew around 200BBls a week. in 2016 we launched our first core keg beer, Heathen American Pale and sales of this have grown rapidly.
2017 saw the rebrand of our core cask range and the launch of our new Brewers Emporium range, with Heathen as the focal point. Other notable beers in this range include Voyager IPA (with a new hop grist each brew) and Salvation Stouts (with a different flavour each brew). We also embarked on our first foray into small pack beers with canning Heathen in March 2017. The first batch sold out in two days, so we are planning to make the cans a permanent fixture plus doing small runs of some of our most special beers.
We have done some exciting collaborations over the last few years with breweries here in the UK and overseas, including Magic Rock, Northern Monk, Siren, Founders, Griffin Claw, Kuhnenn Brew Co, Cerveza Artisana, Rat Brewery, Raw, Beer Ink and De Noord-Hollandse Bierbrouwerij. Keep your eyes peeled to our website, social media streams and of course our Eebria marketplace for all our upcoming beers!
Funk Dungeon gets canned! The first beer from the Dungeon to get put into 330ml cans. This mixed fermented beer gives a soft oak character from the wood with a sticky hop presence and with a nice slight brett funk. Not overly sour, but very refreshing.
Inspired by the classic Pina Colada cocktail, with pineapple, toasted coconut and Sorachi Ace hops to provide a totally tropical sensation!
Deliciously fruity and tropical with a low bitterness, as youd expect from our Voyagers, pale and unfined. Packed full of hoppy fruity flavours for that perfect IPA journey.
Our Funk Dungeon brewer Jim and Steve from Siren Craft Brews Barrel Project have teamed up to create a delicately bretted farmhouse twist on the classic IPA. Hopped with Mosaic and Simcoe for notes of fruit and pine which harmonise beuatifully with our House Brett culture.
Brewed in collaboration with our dear friends and family at The Devonshire Cat, our Sheffield city centre pub. This opaque dream is packed with outrageous quantities of Sorachi Ace, Enigma & Vic Secret.
In collaboration with Leeds based Wilde Child Brewery - weve created this classically clean and crushable West Coast IPA. Heavily hopped with Ekuanot, Amarillo & Eureka for a vibrant and fruity character thats packed with waves of papaya, orange, melon and grapefruit flavours that break into a crisp bitter finish.
BA Russian Imperial Stout. A huge, robust and boozy Impy Stout which has been finished in wooden ex whisky barrels. Subtle Brett character which increases the complexity of the finish.
Dry hopped barley wine. Superbly strong with a boozy aroma of caramel & fresh citrus. Smooth & robust with warming alcohol, lots of toffee, candied peel & dried citrus fruits.
Brewed in collaboration with Melissa Cole for Sheffield Beer Week - a traditional Sheffield Pale fermented with Abbaye yeast strain for a fruity Belgian twist. Plenty of hops with the addition of orange peel give a really well balanced beer. |
\documentclass{ltxguidex}
\usepackage{changelog}
\usepackage{fontspec}
\setmainfont{Tiempos Text}
\usepackage{FiraSans}
\usepackage{FiraMono}
\def\labeladvise{→}
\newcommand{\ltx}{\ltxclass{ltxguidex}}
\newcommand{\ltxguide}{\ltxclass{ltxguide}}
\title{Towards Better \LaTeX\ Documentation With the \ltx\ Document Class}
\author{Rebecca Turner\thanks{Brandeis University;
\email{[email protected]}}}
\date{${VERSION}$}
\begin{document}
\maketitle
\begin{abstract}
The \ltx\ document class extends \ltxguide\ with a set of
environments and commands that make writing beautiful \LaTeX\
documentation easier and more natural.
\ltx\ is licensed under the \textsc{lppl} version 1.3c, or any later
version at your choosing.
This document is written with the \ltx\ document class.
\end{abstract}
\begin{note}
This release of \ltx\ is an experimental public beta; it intends to
demonstrate a hopeful new direction without committing to a stable
public \textsc{api}.
Although \ltx\ is now suitable for use in your own documentation, do
not be surprised if future versions break your docs.
\end{note}
\begin{note}
Browse the sources, contribute, or complain at \\
\https{github.com/9999years/ltxguidex}
\end{note}
\tableofcontents
\vfill
\pagebreak
\section{The state of the docs}
\LaTeX\ documentation is easy enough to write that --- in general --- nobody
has bothered to package the improvements made to the \LaTeX documentation
systems. If one examines the documentation for their favorite package,
they'll likely find a few command definitions that make some aspect of
documentation writing more ergonomic. In the case of complex packages like
\ctan{listings} or --- in an extreme case --- \ctan{pgf}, it's commonplace
to see packages define their own internal documentation packages containing
hundreds-to-thousands of lines of documentation macros.
This class repackages useful macros from various packages' documentation,
often changing their form (e.g.\ the macro's interface) but keeping its
style. I've tried to balance versatility against specialization (i.e.\
determining which features are the \textit{most} useful) as well as
balancing short with descriptive names.
\LaTeX\ documentation is enabled with two document classes and several
packages. Document classes include:
\begin{classes}
\item[ltxdoc] Defines very little other than a few
shorthands for documenting commands. Designed to be integrated with
the \docstrip\ system, but I've seen plenty of \extension{dtx} files
documented with \ltxguide. However, I haven't yet used
\docstrip, so my experience here is limited.
\item[ltxguide] Provides several ergonomic features absent
in \ltxclass{ltxdoc}. However, \ltxguide\ is almost entirely
undocumented, a fact which is partially mitigated by the fact that
it's only about 150 lines long. \ltx\ is, as the name implies, based
on \ltxguide.
\end{classes}
And supporting packages include:
\begin{packages}
\item[hypdoc] One of many, many packages by Heiko Oberdiek.
\ctan{hypdoc} undertakes the ambitious task of patching the
\ctan{doc} package in order to generate better indexes. In my
experience, \ctan{hypdoc} is not compatible with
\ltxguide; as such, it isn't loaded in \ltx.
\item[doctools] Provides many useful secondary commands such as
|\ltxclass|, |\package|, and so on. Many are duplicated here.
\item[showexpl] Provides the |LTXexample| environment which typesets
\LaTeX\ code and displays it in a listing side-by-side.
\ctan{showexpl} provides the functionality of \ctan{listings}'
|\lstsample| command and more. \ctan{showexpl} does, however, rely
on the fairly hefty \ctan{listings} package.
Compare to more ``plain'' \LaTeX\ documentation, \ltx\ documentation
can be expected to compile somewhat slower. This author is of the
opinion that the improvements are so numerous that the slow-down is
worth it.
\end{packages}
\section{The \ltx\ document class}
Although \ltx\ provides many useful commands, much of its utility is in its
aesthetics. Much \LaTeX\ documentation is very ugly because producing
beautiful documentation requires significantly more code than most package
authors are interested in writing. This document is written with \ltx\ and
one package loaded (the \ctan{bera} font family). Because \ltx\ is written
with inherent beauty, it ends up being a bit heavier than its competitors;
notably, it loads \ctan{xcolor}, \ctan{listings}, \ctan{graphicx}, and
\ctan{calc} by default.
\section{A note on typefaces}
This document is set in
\href{https://klim.co.nz/retail-fonts/tiempos-text/}{Tiempos Text} and
\href{https://bboxtype.com/typefaces/FiraSans/}{Fira Sans} (available on
\textsc{ctan} as \ctan{fira}).
For your own documents, I would recommend \ctan{bera} or \ctan{plex},
although neither has small caps, which I consider essential.
When deciding on a serif font for \LaTeX\ documentation, I would recommend
picking one with a tall x-height, as larger overall glyphs makes documents
easier to read on small screens (nobody's going to be printing out your
documentation). This will rule out most old-style serif typefaces, such as
Garamond and Calson.
\section{Commands provided by \ltxguide}
In \ltxguide, pipe characters (\texttt{\pipe}) mark verbatim text.
However, between two pipes, the angle brackets |<<| and |>>| typeset as
pretty angle brackets with regular italics between them; therefore,
\texttt{\pipe}|<<package>>|\texttt{\pipe} typesets as |<package>|.
To write literal angle brackets, simply double the characters;
\texttt{\pipe}|<<<<|\texttt{\pipe} will render as |<<| and
\texttt{\pipe}|>>>>|\texttt{\pipe} will render as |>>|.
\begin{desc}
|\pipe|\\
|\bs|
\end{desc}
To write literal pipe characters, use the |\pipe| command. To write a
literal backslash, use the |\bs| command.
\begin{LTXexample}
\pipe \\
\texttt{\pipe} \\
\textit{\pipe} \\
\textbf{\texttt{\pipe}} \\
\bs \\
\texttt{\bs} \\
\textit{\bs} \\
\textbf{\texttt{\bs}}
\end{LTXexample}
\ltxguide\ uses \ctan{shortvrb} to activate pipes as a synonym for
short-verbatim material. There are some small conflicts with \ltx's use of
the \ctan{listings} package (in particular, pipes are silently gobbled from
|lstlistings| environments, although they work normally within |verbatim|),
which will hopefully be resolved with a coming change to \package{listings};
this simply depends on how quickly Jobst Hoffmann emails me back.
\ltxguide\ also provides the |decl| environment that powers the |desc|
environment.
\begin{desc}
|\m{<placeholder>}|\\
|\meta{<placeholder>}|
\end{desc}
Prints |<placeholder>| in italics within angle-brackets.
\ltx\ provides |\meta| as a synonym for |\m|.
\begin{LTXexample}
\m{placeholder}
\end{LTXexample}
\begin{desc}
|\arg{<argument>}|
|\oarg{<argument>}|
\end{desc}
Shorthands for mandatory and optional arguments.
\begin{LTXexample}
\arg{foo}\oarg{bar}
\end{LTXexample}
\begin{desc}
\begin{tabular}{@{}ll@{}}
|\NFSS| & \NFSS\\
|\AmS| & \AmS\\
|\AmSLaTeX| & \AmSLaTeX\\
|\babel| & \babel\\
|\SLiTeX| & \SLiTeX\\
|\ctanlogo| & \ctanlogo\\
\end{tabular}
\end{desc}
Various logos.
\begin{note}
\ltxguide\ actually defines the \ctanlogo\ logo as |\ctan|, but this
class uses |\ctan| to refer to a package, so the \ctanlogo\ logo is
typeset with |\ctanlogo|.
\end{note}
\begin{desc}
\begin{tabular}{@{}ll@{}}
|\clsguide| & \clsguide \\
|\usrguide| & \usrguide \\
|\fntguide| & \fntguide \\
|\cfgguide| & \cfgguide \\
|\cyrguide| & \cyrguide \\
|\modguide| & \modguide \\
|\sourcecode| & \sourcecode \\
|\LaTeXbook| & \LaTeXbook \\
|\LaTeXcomp| & \LaTeXcomp \\
|\LaTeXGcomp| & \LaTeXGcomp \\
|\LaTeXWcomp| & \LaTeXWcomp \\
\end{tabular}
\end{desc}
The names of various documents, presumably intended only for the original
\ltxguide\ document.
\begin{desc}
|\eg|\\
|\ie|
\end{desc}
Shortcuts for ``e.g.,''\ and ``i.e.,''\ followed by a non-breaking space.
\begin{LTXexample}
\ie the document class\dots\\
\eg the package\dots
\end{LTXexample}
\begin{desc}
|\NEWfeature{<version>}|\\
|\NEWdescription{<version>}|
\end{desc}
\NEWfeature{1.0.0}
\NEWdescription{1.0.0}
Typeset their arguments in a |\marginpar|. This paragraph is prepended by
|\NEWfeature{1.0.0}|
|\NEWdescription{1.0.0}|.
\begin{desc}
|\URL{<url>}|
\end{desc}
Typesets its argument in |\texttt|. Obsolete given that \ltx\ loads
\ctan{hyperref}.
\section{New commands}
\ltx\ provides several new commands for convenience.
\begin{desc}
|\begin{desc}...\end{desc}|
\end{desc}
Describes a command or environment, setting it out into the margin and
surrounding it with a frame. Originally written by Javier Bezos for the
\ctan{enumitem} documenation.
\begin{example}
Unfortunately, a side-by-side listing doesn't seem to be possible
here because pipes seem to be gobbled by the \package{listings}
package (a side-effect of loading both \ctan{listings} and
\ctan{shortvrb}, perhaps). However, here's how the |\email| command
is described in this document:
\begin{verbatim}
\begin{desc}
|\email{<<email>>}|
\end{desc}
\end{verbatim}
\end{example}
\begin{desc}
|\email{<email>}|
\end{desc}
Typesets an email address with a |mailto:| link.
\begin{example}
Emails, along with other hyperlinks, are colored |magenta|, although
\ltx's default magenta is a bit closer to purple.
\begin{LTXexample}
\email{[email protected]}
\end{LTXexample}
\end{example}
\begin{desc}
|\https{<url>}|\qquad|\http{<url>}|
\end{desc}
Typesets |<url>| with |https://| or |http://| prepended to the link address;
this makes links display a bit prettier than |\url| might.
\begin{example} The following two listings are equivalent:
\begin{LTXexample}
\https{ctan.org}
\end{LTXexample}
\begin{LTXexample}
\href{https://ctan.org}{ctan.org}
\end{LTXexample}
\end{example}
\begin{desc}
|\ctan{<package>}| \\
|\ctanlogo|
\end{desc}
Typesets a package name with a link to |ctan.org/pkg/<package>|.
\begin{warning}
\ltx's definition of |\ctan| differs from \ltxguide's,
which simply typesets ``\ctanlogo'' in small-caps. The \ctanlogo\
logo is typeset with |\ctanlogo|.
\end{warning}
\begin{LTXexample}
the \ctan{listings} package\dots
\end{LTXexample}
\begin{desc}
|\package{<package>}|\\
|\ltxclass{<document class>}|\\
|\option{<option name>}|\\
|\filename{<filename>}|\\
|\extension{<file extension>}|
\end{desc}
Typesets a \LaTeX\ package, option, file extension, etc.\ in |\texttt|.
\begin{note}
Unlike those defined in the \ctan{doctools} package, these commands
don't add entries to the index.
\end{note}
\begin{LTXexample}
\extension{tex} files
\end{LTXexample}
\begin{desc}
|\begin{warning}...\end{warning}|\\
|\begin{note}...\end{note}|\\
|\begin{example}...\end{example}|\\
|\begin{bug}...\end{bug}|
\end{desc}
These environments typeset ``notices'' with a hanging indent. Original
definitions written by Javier Bezos for the \ctan{enumitem} documenation.
|\noticestyle| is executed before the marker text (``warning,'' ``note,''
etc.). New notice environments can be created with |\newnotice|.
\begin{bug}
If the first content in a notice environment is vertical, the marker
text is hidden. This can be avoided by starting the
environment with |\leavevmode\\| or by adding some introductory
material to the first line.
This is actually a bug in the |\list| command that the notice
environments use.
\end{bug}
\begin{example}
Although this example is short, note that subsequent lines will
be indented. These environments only vary by text.
\begin{LTXexample}
\begin{warning}
Lorem ipsum\dots
\end{warning}
\end{LTXexample}
\end{example}
\begin{desc}
|\newnotice{<environment name>}{<marker text>}|
\end{desc}
Creates a new notice environment in the style of |warning|, |note|, and so
on. The marker text is automatically uppercased.
\begin{desc}
|\begin{LTXexample}[<options>]...\end{LTXexample}|
\end{desc}
Typesets \LaTeX\ code next to a listing of its source. Providing examples
makes your user's lives easier, and should be done as much as possible. The
|LTXexample| environment is provided by the \ctan{showexpl} package.
Excerpted from \ctan{showexpl}'s documentation as of v0.3o 2016/12/11, valid
options include:
\begin{keys}
\key{attachfile}[\bool][false]
If set to true the sourcecode will be attached to the
\extension{pdf} file---presumed that the document is processed by
|pdflatex|.
\key{codefile}[\m{filename}][\bs jobname.tmp]
Name of the (temporary) file that contains the code which will be
formatted as source code. The default value is |\jobname.tmp|.
\key{explpreset}[\m{key val list}][\{language=[LaTeX]TeX,\}]
A |<key val list>| which serves for presetting the properties of the
formatting of the source code, for values see the documentation of
the \ctan{listings} package. The default value is
empty.\footnote{\ltx\ redefines the default to perform syntax
highlighting for \LaTeX, in addition to the general improvements
made for all listings in the document.}
\key{graphic}[\m{filename}]
If present, includes and displays this file instead of the formatted
code.
\key{hsep}[\m{length}]
Defines the horizontal distance between the source code and the
formatted text.
\key{justification}[\m{code}][\bs raggedright]
Defines the justification of the formatted text: reasonable values
are |\raggedleft|, |\raggedright|, |\centering|.
\key{overhang}[\m{dimen}][0pt]
Defines the amount by which the formatted text and the source code
can overlap the print space. The default value is 0\,pt.
\key{pos}[\m{\alternative{t,b,l,r,o,i}}][l]
Defines the relative position of the formatted text relating to the
source code. Allowed values are |t|, |b|, |l|, |r|, |o|, and |i| for
top, bottom, left, right, outer, and inner. The last values give
sense only for two-sided printing, where there are outer and inner
margins of a page.
\key{preset}[\m{code}]
Any \TeX\ code executed before the sample code but not visible in
the listings area.
\key{rangeaccept}[\bool][false]
If set to true, one can define ranges of lines that will be
excerpted from the source code.
\key{rframe}[[\texttt{single}]][\textrm{\textit{(empty)}}]
Defines the form of the frame around the formatted text. With a
non-empty value (e.\,g. ``single'') a simple frame will be drawn. In
the future more kinds of frames will be supported. The default value
is empty (no frame).
\key{varwidth}[\bool][false]
If set to true, the formatted text is set with its ``natural'' width
instead of a fixed width as given by the value of the option
|width|.
\key{vsep}[\m{dimen}]
Defines the vertical distance between the source code and the
formatted text.
\key{wide}[\bool][false]
If set to true, the source code and the formatted text overlap the
print space and the margin area.
\key{width}[\m{dimen}]
Defines the width of the formatted text. The default value depends
of the relative positions of the source code and the formatted text.
\key{scaled}[[\m{scale factor}]]
Without a value the formatted text will be scaled to fit the given
width of the result area. With a number as value the formatted text
will be scaled by this number.
\end{keys}
In addition to these options the kind of the result box (default: |\fbox|)
can be changed. For example:
\begin{latexcode}
\renewcommand\ResultBox{\fcolorbox{green}{lightgray}}
\setlength\ResultBoxSep{5mm}% default: \fboxsep
\setlength\ResultBoxRule{2mm}% default: \fboxrule
\end{latexcode}
\begin{desc}
|\begin{packages}...\end{packages}|\\
|\begin{classes}...\end{classes}|\\
|\begin{options}...\end{options}|\\
\end{desc}
Frequently, package authors need to describe a series of options, packages,
or document classes. These environments wrap the |description| environment
and provide an |\item| which wraps a command like |\package|. In the
|packages| environment, |\item[listings]| translates to
|\item[\package{listings}]|.
\begin{LTXexample}
\begin{options}
\item[foo] \dots
\item[bar] \dots
\end{options}
\end{LTXexample}
\begin{desc}
|\begin{advise}...\end{advise}| $\equiv$\\
|\begin{faq}...\end{faq}|\\
|\Q|\qquad|\A|\qquad|\advisespace|
\end{desc}
Roughly copied from \ctan{listings}' internal \package{lstdoc} package,
these environments represent a list of questions and answers.
\begin{LTXexample}
\begin{faq}
\Q Lorem ipsum dolor sit amet?
\A Consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
\Q Ut enim ad minim veniam, quis nostrud?
\A Exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
\end{faq}
\end{LTXexample}
Within these environments, |\Q| and |\A| indicate a question and an answer;
they're defined to |\item| and |\advisespace|, respectively.
\begin{note}
|faq| is an exact synonym for |advise|.
The list label for the |advise| environment is |\labeladvise|.
The font is set with |\advisestyle|.
\end{note}
\begin{desc}
|\alternative{<comma list>}|
\end{desc}
Prints a comma-separated list delimited by vertical bars. Space around
commas is not trimmed, and alternates are printed in
|\textup{\texttt{...}}|.
This environment is from \package{lstdoc}.
\begin{LTXexample}
\alternative{true,false}
\end{LTXexample}
\begin{desc}
|\begin{keys}...\end{keys}|\\
|\key[<options>]{<key name>}[<key value>][<default value>]|\\
|\bool|
\end{desc}
Describes keys. Within a |keys| environment, |\bool| indicates a true/false
value. This environment is a recreation of \package{lstdoc}'s |syntax|
environment with a more elegant interface.
|<options>| can include:
\begin{keys}
\key{v}[\m{version}]
The version a key was introduced.
\begin{warning}
This key is currently ignored.
\end{warning}
\key{default}[\m{default value}]
An alias for the final argument; a default value if the key isn't
given.
\key{note}[\m{note}]
A note set in the left margin; might note a group of features or
something else.
\end{keys}
\begin{example}
Note the use of |\bool|:
\begin{LTXexample}
\begin{keys}
\key[note=addon]{key}
[\m{value}][default]
Lorem ipsum\dots
\key{display}[\bool][true]
Lorem ipsum\dots
\key{foo}[\m{foo}]
\key[v=1.3]{bar}
Lorem ipsum\dots
\end{keys}
\end{LTXexample}
\end{example}
\begin{changelog}[author=Rebecca Turner]
\begin{version}[v=0.1.1, date=2019-04-15]
\added
\item Renamed \cs{ltxguidex@noticestyle} to \cs{noticestyle} and committed
it to the public \textsc{api}.
\item The \cs{cs} and \cs{command} commands.
\end{version}
\end{changelog}
\end{document}
|
import Mathlib.Algebra.Ring.Basic
import Mathlib.Tactic.Ring
import Mathlib.Tactic.LibrarySearch
import Mathlib.Tactic.Contrapose
variable {R : Type _}
theorem add_self_eq_mul_two [Semiring R] (a : R) : a + a = 2 * a := by
rw [←one_mul a, ←add_mul, one_mul]
congr
norm_num
section Ring
--theorem sub_eq_add_neg (a b : R) : a - b = a + -b := Ring.sub_eq_add_neg a b
variable [Ring R]
-- theorem mul_sub (a b c : R) : a * (b - c) = a * b - a * c := by
-- rw [sub_eq_add_neg, mul_add, ←neg_mul_eq_mul_neg, ←sub_eq_add_neg]
-- theorem sub_mul (a b c : R) : (a - b) * c = a * c - b * c := by
-- rw [sub_eq_add_neg, add_mul, ←neg_mul_eq_neg_mul, ←sub_eq_add_neg]
-- @[simp] theorem sub_zero (a : R) : a - 0 = a := by
-- rw [sub_eq_add_neg, ←add_zero (-0), add_left_neg (0: R)]
-- simp
-- theorem neg_add (a b : R) : - (a + b) = -a + -b := by
-- have h₁ : - (a + b) = -(a + b) + (a + b) + -a + -b := by
-- rw [add_assoc, add_comm (-a), add_assoc, add_assoc, ← add_assoc b]
-- rw [add_right_neg b, zero_add, add_right_neg a, add_zero]
-- rwa [add_left_neg (a + b), zero_add] at h₁
-- theorem sub_add (a b c : R) : a - (b + c) = a - b - c := by
-- rw [sub_eq_add_neg, neg_add, ←add_assoc, ←sub_eq_add_neg, ←sub_eq_add_neg]
-- theorem sub_add_comm (n m k : R) : n + m - k = n - k + m := by
-- rw [sub_eq_add_neg, add_assoc, add_comm m, ←add_assoc, ←sub_eq_add_neg]
end Ring
section CommRing
variable [CommRing R]
theorem evenpow_neg {n m : ℕ} (a : R) (h : n = 2 * m) : (-a) ^ n = a ^ n := by
rw [h, pow_mul, pow_mul, neg_sq]
theorem oddpow_neg {n m : ℕ} (a : R) (h : n = 2 * m + 1) : (-a) ^ n = -(a ^ n) := by
rw [h, pow_succ, evenpow_neg a (show 2 * m = 2 * m by rfl), ←neg_mul_eq_neg_mul, ←pow_succ,
Nat.add_one]
end CommRing
section IntegralDomain
variable [CommRing R] [IsDomain R]
-- TODO maybe delete
theorem nzero_mul_left_cancel (a b c : R) : a ≠ 0 → a * b = a * c → b = c := by
intro a_ne_z ab_eq_ac
rw [←add_left_inj (-(a * c)), add_neg_self (a * c), neg_mul_eq_mul_neg, ←mul_add] at ab_eq_ac
cases (@mul_eq_zero _ _ _ a (b + -c)).1 ab_eq_ac with
| inl h => exact False.elim (a_ne_z h)
| inr h =>
rw [←add_left_inj (-c), add_neg_self c]
exact h
end IntegralDomain
|
module STCR2Z2T0S0EndPoint where
import Control.Monad as M
import Data.Array.Repa as R
import Data.Binary (decodeFile)
import Data.Complex
import Data.List as L
import DFT.Plan
import FokkerPlanck.DomainChange
import FokkerPlanck.MonteCarlo
import FokkerPlanck.Pinwheel
import Image.IO
import Image.Transform (normalizeValueRange)
import STC
import System.Directory
import System.Environment
import System.FilePath
import System.Random
import Text.Printf
import Types
import Utils.Array
import Utils.Parallel
main = do
args@(numPointStr:numOrientationStr:numScaleStr:thetaSigmaStr:scaleSigmaStr:maxScaleStr:taoStr:numTrailStr:maxTrailStr:theta0FreqsStr:thetaFreqsStr:scale0FreqsStr:scaleFreqsStr:histFilePath:numIterationStr:writeSourceFlagStr:cutoffRadiusEndPointStr:cutoffRadiusStr:reversalFactorStr:cStr:patchNormFlagStr:patchNormSizeStr:approximatedEigenValueStr:shape2DStr:segmentsFilePath:segIdxStr:useFFTWWisdomFlagStr:fftwWisdomFileName:numThreadStr:_) <-
getArgs
print args
let numPoint = read numPointStr :: Int
numOrientation = read numOrientationStr :: Int
numScale = read numScaleStr :: Int
thetaSigma = read thetaSigmaStr :: Double
scaleSigma = read scaleSigmaStr :: Double
maxScale = read maxScaleStr :: Double
tao = read taoStr :: Double
numTrail = read numTrailStr :: Int
maxTrail = read maxTrailStr :: Int
theta0Freq = read theta0FreqsStr :: Double
theta0Freqs = [-theta0Freq .. theta0Freq]
thetaFreq = read thetaFreqsStr :: Double
thetaFreqs = [-thetaFreq .. thetaFreq]
scale0Freq = read scale0FreqsStr :: Double
scaleFreq = read scaleFreqsStr :: Double
scale0Freqs = [-scale0Freq .. scale0Freq]
scaleFreqs = [-scaleFreq .. scaleFreq]
numIteration = read numIterationStr :: Int
writeSourceFlag = read writeSourceFlagStr :: Bool
cutoffRadiusEndPoint = read cutoffRadiusEndPointStr :: Int
cutoffRadius = read cutoffRadiusStr :: Int
reversalFactor = read reversalFactorStr :: Double
patchNormFlag = read patchNormFlagStr :: Bool
patchNormSize = read patchNormSizeStr :: Int
approximatedEigenValue = read approximatedEigenValueStr :: Double
shape2D@(Points _ minDist _) = read shape2DStr :: Points Shape2D
segIdx = read segIdxStr :: Int
useFFTWWisdomFlag = read useFFTWWisdomFlagStr :: Bool
numThread = read numThreadStr :: Int
folderPath = "output/test/STCR2Z2T0S0EndPoint"
a = 20 :: Int
b = 5 :: Int
c = read cStr :: Int
endPointFilePath =
folderPath </>
(printf
"EndPoint_%d_%d_%d_%d_%d_%d_%.2f_%.2f_%d_%d_%d_%f.dat"
numPoint
(round thetaFreq :: Int)
(round scaleFreq :: Int)
(round maxScale :: Int)
(round tao :: Int)
cutoffRadiusEndPoint
thetaSigma
scaleSigma
a
b
c
reversalFactor)
fftwWisdomFilePath = folderPath </> fftwWisdomFileName
createDirectoryIfMissing True folderPath
flag <- doesFileExist histFilePath
radialArr <-
if flag
then R.map magnitude . getNormalizedHistogramArr <$>
decodeFile histFilePath
else do
putStrLn "Couldn't find a Green's function data. Start simulation..."
solveMonteCarloR2Z2T0S0Radial
numThread
numTrail
maxTrail
numPoint
numPoint
thetaSigma
scaleSigma
maxScale
tao
theta0Freqs
thetaFreqs
scale0Freqs
scaleFreqs
histFilePath
(emptyHistogram
[ (round . sqrt . fromIntegral $ 2 * (div numPoint 2) ^ 2)
, L.length scale0Freqs
, L.length theta0Freqs
, L.length scaleFreqs
, L.length thetaFreqs
]
0)
arrR2Z2T0S0 <-
computeUnboxedP $
computeR2Z2T0S0ArrayRadial
(PinwheelHollow0 10)
(cutoff cutoffRadius radialArr)
numPoint
numPoint
1
maxScale
thetaFreqs
scaleFreqs
theta0Freqs
scale0Freqs
plan <-
makeR2Z2T0S0Plan emptyPlan useFFTWWisdomFlag fftwWisdomFilePath arrR2Z2T0S0
-- (plan, pathNormMethod) <-
-- makePatchNormFilter plan' numPoint numPoint patchNormFlag patchNormSize
-- minDist = 8
-- kanizsaTriangle1 =
-- makeShape2D $ Points (-30, -30) minDist (Corner 30 60 80) --(PacMan 30 60 50) --(Ehrenstein 8 15 40) -- (IncompleteCircle 0 60 50 ) -- (TJunction 45 50) -- (PacMan 0 60 100 ) -- (Corner 0 60 100) --(KanizsaTriangle1 0 480 160 80)
-- ys <-
-- (\aa -> aa L.!! segIdx) <$> decodeFile segmentsFilePath :: IO [(Int, Int)]
let pointSet = makeShape2D shape2D
shapeArr =
getShape2DRepaArray
numPoint
numPoint
(L.map
(\(x, y) ->
(x + fromIntegral numPoint / 2, y + fromIntegral numPoint / 2))
pointSet)
xs =
L.map (\(x, y) -> R2S1RPPoint (x, y, 0, 1)) . getShape2DIndexList $
pointSet
-- (xAvg, yAvg) =
-- (\(as, bs) ->
-- ( round $
-- (fromIntegral . L.sum $ as) / (fromIntegral . L.length $ as)
-- , round $
-- (fromIntegral . L.sum $ bs) / (fromIntegral . L.length $ bs))) .
-- L.unzip $
-- ys
-- centeredYs = L.map (\(x, y) -> (x - xAvg, y - yAvg)) ys
-- xs = L.map (\(x, y) -> R2S1RPPoint (x, y, 0, 1)) centeredYs
-- shapeArr =
-- L.head . cluster2Array numPoint numPoint $
-- [L.map (\(x, y) -> (x + div numPoint 2, y + div numPoint 2)) centeredYs]
-- pointSet = L.map (\(x, y) -> (fromIntegral x, fromIntegral y)) centeredYs
let bias = computeBiasR2T0S0 numPoint numPoint theta0Freqs scale0Freqs xs
eigenVec =
computeInitialEigenVectorR2T0S0
numPoint
numPoint
theta0Freqs
scale0Freqs
thetaFreqs
scaleFreqs
xs
plotImageRepa (folderPath </> "Shape.png") . ImageRepa 8 $ shapeArr
endPointFlag <- doesFileExist endPointFilePath
completionFieldR2Z2'' <-
if endPointFlag
then readRepaArray endPointFilePath
else (do putStrLn "Couldn't find the endpoint data. Start computing..."
arrR2Z2T0S0EndPoint <-
computeUnboxedP $
computeR2Z2T0S0ArrayRadial
-- pinwheel
-- (pinwheelHollowNonzeronCenter 16)
(PinwheelHollow0 4)
(cutoff cutoffRadiusEndPoint radialArr)
numPoint
numPoint
1
maxScale
thetaFreqs
scaleFreqs
theta0Freqs
scale0Freqs
pathNormMethod <-
if patchNormFlag
then do
let points =
createIndex2D .
L.map
(\(i, j) ->
(i + div numPoint 2, j + div numPoint 2)) .
getShape2DIndexList $
pointSet
ys =
pointCluster
(connectionMatrixP
(ParallelParams numThread 1)
(minDist + 1)
points) $
points
M.zipWithM_
(\i ->
plotImageRepa
(folderPath </> (printf "Cluster%03d.png" i)) .
ImageRepa 8)
[1 :: Int ..] .
cluster2Array numPoint numPoint $
ys
return . PowerMethodConnection $ ys
else return PowerMethodGlobal
(R.foldAllP max 0 . R.map magnitude $ arrR2Z2T0S0EndPoint) >>=
print
completionFieldR2Z2' <-
powerMethodR2Z2T0S0Reversal
plan
folderPath
numPoint
numPoint
numOrientation
thetaFreqs
theta0Freqs
numScale
scaleFreqs
scale0Freqs
maxScale
arrR2Z2T0S0EndPoint
pathNormMethod
numIteration
writeSourceFlag
(printf
"_%d_%d_%d_%d_%d_%d_%.2f_%.2f_%f_EndPoint"
numPoint
(round thetaFreq :: Int)
(round scaleFreq :: Int)
(round maxScale :: Int)
(round tao :: Int)
cutoffRadiusEndPoint
thetaSigma
scaleSigma
reversalFactor)
0.5
reversalFactor
bias
eigenVec
-- writeRepaArray endPointFilePath completionFieldR2Z2'
return completionFieldR2Z2')
let completionFieldR2Z2 = R.zipWith (*) completionFieldR2Z2'' bias
endpointBias = rotateBiasR2Z2T0S0 180 theta0Freqs completionFieldR2Z2
-- endpointBias =
-- R.zipWith
-- (+)
-- (rotateBiasR2Z2T0S0 90 theta0Freqs completionFieldR2Z2)
-- (rotateBiasR2Z2T0S0 (-90) theta0Freqs completionFieldR2Z2)
-- rotateBiasR2Z2T0S0 0 theta0Freqs . R.traverse completionFieldR2Z2 id $ \f idx@(Z :. _ :. _ :. i :. j) ->
-- if (sqrt . fromIntegral $
-- (i - div numPoint 2) ^ 2 + (j - div numPoint 2) ^ 2) >
-- 35
-- then 0
-- else f idx
biasMag =
R.sumS .
R.sumS .
rotate4D .
rotate4D .
R.map magnitude .
r2z2Tor2s1rp numOrientation thetaFreqs numScale scaleFreqs $
endpointBias
plotImageRepa (folderPath </> "EndPointBias.png") .
ImageRepa 8 . computeS . R.extend (Z :. (1 :: Int) :. All :. All) $
biasMag
printf "%f %f\n" reversalFactor (R.sumAllS biasMag)
-- powerMethodR2Z2T0S0BiasReversal
-- plan
-- folderPath
-- numPoint
-- numPoint
-- numOrientation
-- thetaFreqs
-- theta0Freqs
-- numScale
-- scaleFreqs
-- scale0Freqs
-- arrR2Z2T0S0
-- -- numIteration
-- 10
-- writeSourceFlag
-- (printf
-- "_%d_%d_%d_%d_%d_%d_%.2f_%.2f"
-- numPoint
-- (round thetaFreq :: Int)
-- (round scaleFreq :: Int)
-- (round maxScale :: Int)
-- (round tao :: Int)
-- cutoffRadius
-- thetaSigma
-- scaleSigma)
-- 0.1
-- (computeS endpointBias)
-- (R.fromFunction
-- (Z :. (L.length thetaFreqs) :. (L.length scaleFreqs) :.
-- (L.length theta0Freqs) :.
-- (L.length scale0Freqs) :.
-- numPoint :.
-- numPoint) $ \(Z :. k :. l :. _ :. _ :. i :. j) ->
-- if k == div (L.length thetaFreqs) 2 && l == div (L.length scaleFreqs) 2
-- then 1 / (fromIntegral $ (L.length theta0Freqs * L.length scale0Freqs)) :+
-- 0
-- else 0)
|
/* Copyright (c) 2014, Giuseppe Argentieri <[email protected]>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* =====================================================================================
*
* Filename: Expi.c
*
* Description: Calculation of the exponential integrals (w/o the GSL special
* functions).
*
* Version: 1.0
* Created: 15/04/2013 11:54:19
* Revision: none
* Compiler: gcc
*
* Author: Giuseppe Argentieri (ga), [email protected]
* Organization:
*
* =====================================================================================
*/
#include "funcs.h"
#include <gsl/gsl_integration.h>
/* #include <gsl/gsl_sf_expint.h> */
double fu ( double t, void* params )
{
double f = -exp(-t)/t ;
return f ;
}
double ex ( double t, void* params )
{
double e = exp(-t) ;
return e ;
}
int expi ( double x, double* result, double* abserr )
{
double r, err ;
gsl_integration_workspace* expi_ws =
gsl_integration_workspace_alloc (WS_SZ) ;
gsl_function F ;
F.function = &fu ;
int status ;
status = gsl_integration_qagiu ( &F, x, 10e-9, .001 , WS_SZ, expi_ws, &r,
&err) ;
*result = r ; *abserr = err ;
/* Using the GSL special functions, it is simply:
*
* *result = - gsl_sf_expint_E1(x) ;
*/
gsl_integration_workspace_free (expi_ws) ;
return status;
}
int expi_plus ( double x, double* result, double* abserr )
{
double r, err ;
gsl_integration_workspace *expi_ws =
gsl_integration_workspace_alloc (WS_SZ) ;
gsl_function F ;
F.function = &ex ;
int status ;
status = gsl_integration_qawc ( &F, -x, x, 0, 1e-9, .001, WS_SZ,
expi_ws, &r, &err ) ;
double R , EXPI , ERREXPI ;
int s = expi ( x , &EXPI, &ERREXPI ) ;
R = - (r - EXPI) ; /* The minus (-) sign because of the def.
of ex */
double ERR ;
ERR = err + ERREXPI ;
*result = R ;
*abserr = ERR ;
/* Using the GSL exponential integral, it is simply:
*
* R = gsl_sf_expint_Ei(x) ;
*
*/
gsl_integration_workspace_free (expi_ws) ;
return status + s ;
}
|
[STATEMENT]
lemma parts_UN [simp]: "parts(\<Union>x\<in>A. H x) = (\<Union>x\<in>A. parts(H x))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. parts (\<Union> (H ` A)) = (\<Union>x\<in>A. parts (H x))
[PROOF STEP]
by (intro equalityI parts_UN_subset1 parts_UN_subset2) |
-- Andreas, 2017-02-20, issue #2467
-- Proper error on missing BUILTIN REWRITE
{-# OPTIONS --rewriting #-}
postulate A : Set
{-# REWRITE A #-}
-- Should fail with error like
|
# 13. Gyakorlat - Rezgésgerjesztő
2021.05.05.
## Feladat:
<center></center>
A mellékelt ábrán egy két szabadságfokú rendszer látható, melyet két merev test alkot: egy $m_1$ tömegű, $R$ sugarú tárcsa és egy $m_2$ tömegű test. A tárcsa vízszintes talajon gördül és a tömegközéppontja egy $k_1$ merevségű rugóval van a környezethez rögzítve. A másik test a gravitációs térben van és függőlegesen mozog egy súrlódásmentes megvezetés mentén, miközben a $k_2$ merevségű rugóhoz van rögzítve. A $k_2$ rugó másik vége egy idális kötélhez csatlakozik, ami egy ideális (súrlódásmentes/tömeg nélküli) csigán keresztül a tárcsa tömegközéppontjához van rögzítve. A kötél végig feszített állapotban van.
### Adatok:
|||
|-------------------------------------|-------------------------------------|
| $m_0$ = 0.1 kg | $R$ = 0.3 m |
| $m_1$ = 1 kg | $e$ = 0.01 m |
| $m_2$ = 3 kg | $M_0$ = 3 Nm |
| $k_1$ = 100 N/m | $\omega$ = 30 rad/s |
| $k_2$ = 200 N/m | $\varepsilon$ = $\pi$/6 rad/s$^2$ |
### Részfeladatok:
1. Írja fel a lineáris mátrix együtthatós mozgásegyenletet!
2. Határozza meg a mozgástörvény állandósult állapotbeli tagját!
3. Mekkora a $k_2$ merevségű rugóban ébredő erő legnagyobb értéke az állandósult állapotban?
4. Határozza meg a sajátkörfrekvenciákat és a hozzátartozó sajátvektorokat!
## Megoldás:
## 1. Feladat:
Kis elmozdulások esetén a lineáris mozgásegyenlet mátrixegyütthatós alakja a következő egyenlettel adható meg
$$\mathbf{M}\mathbf{\ddot{q}}+\mathbf{C\dot{q}}+\mathbf{Kq} = \mathbf{Q^*},$$
ahol $\mathbf{q}$ az általános koordináták vektora, $\mathbf{M}$ a tömegmátrix, $\mathbf{C}$ a csillapítási mátrix, $\mathbf{K}$ a merevségi mátrix, a $\mathbf{Q^*}$ pedig az általános erők vektora. (Disszipatív energia nincs a rendszerben ezért a csillapítási mátrix zérus lesz.)
Első lépésként az általános koordinátákat kell meghatározni. A rendszer 2 szabadsági fokú, tehát két általános koordinátát kell definiálni, melyből az egyik az ábra alapján legyen a merev test $y$ irányú elmozdulása a másik pedig a tárcsa $\psi$ szögelfordulása:
$$
\mathbf{q} = \begin{bmatrix}
q_1\\
q_2
\end{bmatrix} = \begin{bmatrix}
y\\
\psi
\end{bmatrix}.
$$
```python
import sympy as sp
from IPython.display import display, Math
sp.init_printing()
```
```python
## Függvények, szimbólumok definiálása
m0, m1, m2, R, e, k1, k2, M0, ω, ε, g = sp.symbols("m0, m1, m2, R, e, k1, k2, M0, ω, ε, g", real=True)
# Készítsünk behelyettesítési listát az adatok alapján, SI-ben
adatok = [(m0, 0.1), (m1, 1), (m2, 3), (R, 0.2),
(e, 0.01), (k1, 100), (k2, 200), (M0, 3),
(ω, 30), (ε, sp.pi/6), (g, 9.81)]
# általános koordináták
t = sp.symbols("t", real=True, positive=True)
y = sp.Function('y')(t)
ψ = sp.Function('ψ')(t)
# gerjesztés
M_t = M0*sp.cos(ω*t+ε)
```
```python
### Kinetikus energia, potenciális energia, disszipatív energia
### Először fejezzük ki a mennyiségeket az általános koordinátákkal
# B pont sebessége
vB = R*ψ.diff(t)
# 1-es test szögsebessége
ω1 = ψ.diff(t)
# C pont sebessége
vC = y.diff(t)
# Tárcsa tehetetlenségi nyomatéka a B pontra
ΘB = sp.Rational(1,2)*m1*R**2
# m0 tömeg sebessége (helyvektor deriváltja)
konst = sp.symbols("konst") # konstans tag (deriválás után kiesik a kifejezésből)
r0 = sp.Matrix([[e*sp.cos(ω*t)+konst],[y + e*sp.sin(ω*t)+konst]])
v0 = r0.diff(t)
# tárcsa x irányú elmozdulása
x = R*ψ
## Kinetikus energia
T = (sp.Rational(1,2)*m1*vB**2 + sp.Rational(1,2)*ΘB*ω1**2 +
sp.Rational(1,2)*m2*vC**2 + sp.Rational(1,2)*m0*v0.dot(v0)).expand().trigsimp().simplify()
display(Math('T = {}'.format(sp.latex(T))))
## Potenciális energia
U = sp.Rational(1,2)*k1*(x)**2 + sp.Rational(1,2)*k2*(x-y)**2+m0*g*e*sp.sin(ω*t)
display(Math('U = {}'.format(sp.latex(U))))
## Disszipatív energia most nincs!
```
$\displaystyle T = \frac{3 R^{2} m_{1} \left(\frac{d}{d t} ψ{\left(t \right)}\right)^{2}}{4} + \frac{e^{2} m_{0} ω^{2}}{2} + e m_{0} ω \cos{\left(t ω \right)} \frac{d}{d t} y{\left(t \right)} + \frac{m_{0} \left(\frac{d}{d t} y{\left(t \right)}\right)^{2}}{2} + \frac{m_{2} \left(\frac{d}{d t} y{\left(t \right)}\right)^{2}}{2}$
$\displaystyle U = \frac{R^{2} k_{1} ψ^{2}{\left(t \right)}}{2} + e g m_{0} \sin{\left(t ω \right)} + \frac{k_{2} \left(R ψ{\left(t \right)} - y{\left(t \right)}\right)^{2}}{2}$
```python
### Mátrix együtthatók legenerálása
""" A tömegmátrix most nem számítható közvetlenül a kinetikus energiából,
mert az excentrikus tag forgása egy álatlános erő tagot is eredményez,
ami a parciális deriválásnál kiesne az egyenletből.
Ilyen esetben a másodfajú Lagrange-egyenletet kell használni
"""
# Állítsuk elő a Lagrange-egyenletben szereplő deriváltakat
# Ehhez rendezzük listába az általános koordinátákat
q = [y, ψ]
# Majd hozzunk létre egy 2 dimenziós nullvektort a 2 Lagrange egyenlet első két tagjának
Mat = sp.zeros(2,1)
for i in range(2):
Mat[i] = (T.diff((q[i]).diff(t))).diff(t)-T.diff(q[i])
display(Mat)
```
$\displaystyle \left[\begin{matrix}- e m_{0} ω^{2} \sin{\left(t ω \right)} + m_{0} \frac{d^{2}}{d t^{2}} y{\left(t \right)} + m_{2} \frac{d^{2}}{d t^{2}} y{\left(t \right)}\\\frac{3 R^{2} m_{1} \frac{d^{2}}{d t^{2}} ψ{\left(t \right)}}{2}\end{matrix}\right]$
Ebből a kétdimenziós rendszerből már könnyen kifejezhető a tömegmátrix és az általános erővektor tagja is, mivel erre az kifejezésre az alábbi írható fel (Lagrange alapján)
$$
\left[\begin{matrix}- e m_{0} ω^{2} \sin{\left(t ω \right)} + m_{0} \frac{d^{2}}{d t^{2}} y{\left(t \right)} + m_{2} \frac{d^{2}}{d t^{2}} y{\left(t \right)}\\\frac{3 R^{2} m_{1} \frac{d^{2}}{d t^{2}} ψ{\left(t \right)}}{2}\end{matrix}\right] = \mathbf{M\ddot{q}}-\mathbf{Q}^{m_0}(t)
$$
Tehát a tömegmátrix az általános erővektor második időszerinti deriváltjának az együttható mátrixa, míg az excentrikus forgómozgásból származó általános erő tag az inhomogenitást okozó tag.
```python
# nullmátrix létrehozása a tömegmátrixnak és az erővektornak
M = sp.zeros(2)
Q = sp.zeros(2,1)
# általános koordináták második deriváltja
ddq = sp.Matrix([y.diff(t,2), ψ.diff(t,2)])
for i in range(2):
for j in range(2):
M[i,j] = Mat[i].expand().coeff(ddq[j])
Q_m0 = (M*ddq).expand()-Mat.expand()
display(Math('Q^{{m_0}} = {}'.format(sp.latex(Q_m0))))
display(Math('M = {}'.format(sp.latex(M))))
```
$\displaystyle Q^{m_0} = \left[\begin{matrix}e m_{0} ω^{2} \sin{\left(t ω \right)}\\0\end{matrix}\right]$
$\displaystyle M = \left[\begin{matrix}m_{0} + m_{2} & 0\\0 & \frac{3 R^{2} m_{1}}{2}\end{matrix}\right]$
```python
## Merevségi mátrix már közvetlenül kapható a potenciális energiából
# nullmátrix létrehozása a merevségi mátrixnak
K = sp.zeros(2,2)
# nullmátrix feltöltése a megfelelő parciális derivált értékekkel
for i in range(2):
for j in range(2):
K[i,j] = U.expand().diff(q[i]).diff(q[j])
display(Math('K = {}'.format(sp.latex(K))))
```
$\displaystyle K = \left[\begin{matrix}k_{2} & - R k_{2}\\- R k_{2} & R^{2} k_{1} + R^{2} k_{2}\end{matrix}\right]$
```python
### Az általános erővektor másik tagja a külső erők teljesítményéből számítható
# Ebben a feladatban csak az M(t) nyomaték működik külső erőként, ennek teljesítménye pedig a következő:
P = -M_t*ψ.diff(t)
"""Ebből a külső erők vektora kapható ha vesszük az általános koordináták
deriváltjainak az együtthatóit a megfelelő helyen"""
Q_M = sp.zeros(2,1)
for i in range(2):
Q_M[i] = P.expand().coeff(q[i].diff(t))
Q_M
```
$\displaystyle \left[\begin{matrix}0\\- M_{0} \cos{\left(t ω + ε \right)}\end{matrix}\right]$
```python
## Az általános erő a két erő tag összegéből kapható
Q = Q_M+Q_m0
display(Math('Q = {}'.format(sp.latex(Q))))
"""Az általános erő szétszedhető sin-os és cos-os tagokra,
(ez a sajátkörfrekvencia számolásnál egy fontos lépés lesz).
Ehhez először használjuk a trig_expand() parancsot, hogy kibontsuk a cos-os tagot"""
Q[1] = sp.expand_trig(Q[1])
display(Math('Q = {}'.format(sp.latex(Q))))
# Majd szedjuk ki a sin(tω) és cos(tω) együtthatóit
Fc = sp.zeros(2,1)
Fs = sp.zeros(2,1)
for i in range(2):
Fc[i] = Q[i].expand().coeff(sp.cos(ω*t))
Fs[i] = Q[i].expand().coeff(sp.sin(ω*t))
display(Math('F_s = {}'.format(sp.latex(Fs))))
display(Math('F_c = {}'.format(sp.latex(Fc))))
```
$\displaystyle Q = \left[\begin{matrix}e m_{0} ω^{2} \sin{\left(t ω \right)}\\- M_{0} \cos{\left(t ω + ε \right)}\end{matrix}\right]$
$\displaystyle Q = \left[\begin{matrix}e m_{0} ω^{2} \sin{\left(t ω \right)}\\- M_{0} \left(- \sin{\left(ε \right)} \sin{\left(t ω \right)} + \cos{\left(ε \right)} \cos{\left(t ω \right)}\right)\end{matrix}\right]$
$\displaystyle F_s = \left[\begin{matrix}e m_{0} ω^{2}\\M_{0} \sin{\left(ε \right)}\end{matrix}\right]$
$\displaystyle F_c = \left[\begin{matrix}0\\- M_{0} \cos{\left(ε \right)}\end{matrix}\right]$
Ezzel a mozgásegyenlet
$$\mathbf{M}\mathbf{\ddot{q}}+\mathbf{Kq} = F_s\sin(\omega t)+F_c\cos(\omega t).$$
## 2. Feladat
A harmonikus gerjesztés miatt a partikuláris megoldást harmonikus próbafüggvény segaítségével keressük:
$$
\mathbf{q}(t) = \mathbf{L}\cos(\omega t)+\mathbf{N}\sin(\omega t).
$$
Ennek a deriváltjai:
$$
\mathbf{\dot{q}}(t) = -\omega\mathbf{L}\sin(\omega t)+\omega\mathbf{N}\cos(\omega t),
$$
$$
\mathbf{\ddot{q}}(t) = -\omega^2\mathbf{L}\cos(\omega t)-\omega^2\mathbf{N}\sin(\omega t).
$$
Visszaírva a próbafüggvényt és a deriváltjait a mozgásegyenletbe, majd a $\sin(\omega t)$ és $\cos(\omega t)$ együtthatókat összegyűjtve adódik az egyenletrendszer $\mathbf{L}$-re és $\mathbf{N}$ -re:
$$
\begin{bmatrix}
-\omega^2\mathbf{M}+ \mathbf{K} & \mathbf{0}\\
\mathbf{0} & -\omega^2\mathbf{M}+ \mathbf{K}
\end{bmatrix} \begin{bmatrix}
\mathbf{L}\\
\mathbf{N}
\end{bmatrix} = \begin{bmatrix}
\mathbf{F}_c\\
\mathbf{F}_s
\end{bmatrix}.
$$
```python
### Oldjuk meg az egyenletrendszert
# Hozzunk létre szimbolikusan vektorokat a megoldásnak
L1, L2, N1, N2 = sp.symbols("L1, L2, N1, N2")
L = sp.Matrix([[L1],[L2]])
N = sp.Matrix([[N1],[N2]])
# Megoldás
L_sol = sp.solve(((-ω**2*M+K)*L-Fc).subs(adatok))
N_sol = sp.solve(((-ω**2*M+K)*N-Fs).subs(adatok))
L[0] = L_sol[L1].evalf(4)
L[1] = L_sol[L2].evalf(4)
N[0] = N_sol[N1].evalf(4)
N[1] = N_sol[N2].evalf(4)
# írjuk be a partikuláris megoldásba az eredményeket
q_p = (L*sp.cos(ω*t)+N*sp.sin(ω*t)).expand().subs(adatok)
display(Math('\mathbf{{q}}_p = {}'.format(sp.latex(q_p))))
```
$\displaystyle \mathbf{q}_p = \left[\begin{matrix}0.0002071 \sin{\left(30 t \right)} - 0.0009696 \cos{\left(30 t \right)}\\- 0.03591 \sin{\left(30 t \right)} + 0.06278 \cos{\left(30 t \right)}\end{matrix}\right]$
## 3. Feladat
```python
## A rugerő maximumánál figyelembe kell venni a statikus és dinamikus részt is
# Statikus deformációból adódó rész:
Fk2_st = ((m0+m2)*g).subs(adatok).evalf(4)
display(Math('F_\\mathrm{{k2,st}} = {}\\ \mathrm{{N}}'.format(sp.latex(Fk2_st))))
# A dinamikus rész numerikusan könnyen számítható
import numpy as np
t_val = np.linspace(0,0.5,1000) # lista létrehozása a [0 ; 0,5] intervallum 1000 részre való bontásával
Fk2_din = np.zeros(len(t_val)) # nulla lista létrehozása (ugyanannyi elemszámmal)
# dinamikus tag számítása adott időpillanatban
for i in range(len(t_val)):
Fk2_din[i] = (k2*(R*q_p[1]-q_p[0])).subs(adatok).subs(t,t_val[i]).evalf()
Fk2_din_max = max(Fk2_din).round(2)
# Dinamikus tag
display(Math('F_\\mathrm{{k2,din,max}} = {}\\ \mathrm{{N}}'.format(sp.latex(Fk2_din_max))))
# Az erő maximuma
Fk2_max = (Fk2_din_max + Fk2_st).evalf(4)
display(Math('F_\\mathrm{{k2,max}} = {}\\ \mathrm{{N}}'.format(sp.latex(Fk2_max))))
```
$\displaystyle F_\mathrm{k2,st} = 30.41\ \mathrm{N}$
$\displaystyle F_\mathrm{k2,din,max} = 3.08\ \mathrm{N}$
$\displaystyle F_\mathrm{k2,max} = 33.49\ \mathrm{N}$
## 4. Feladat
```python
## A sajátfrekvenciák a frekvencia egyenletből kaphatók
ω_n2, ω_n = sp.symbols("ω_n2, ω_n")
# oldjuk meg az egyenletet `ω_n^2`-re, majd vonjunk gyököt
ω_n2_val = sp.solve((-ω_n2*M+K).subs(adatok).det())
ω_n = [(sp.sqrt(i)) for i in ω_n2_val]
display(Math('ω_{{n,1}} = {}\\ \mathrm{{rad/s}}'.format(sp.latex(ω_n[0].evalf(3)))))
display(Math('ω_{{n,2}} = {}\\ \mathrm{{rad/s}}'.format(sp.latex(ω_n[1].evalf(4)))))
```
$\displaystyle ω_{n,1} = 4.17\ \mathrm{rad/s}$
$\displaystyle ω_{n,2} = 15.72\ \mathrm{rad/s}$
```python
## lengéskép vektorok meghatározása
# Hozzunk létre a lengésképvektoroknak egy üres listát, majd töltsük fel 2 lengésképvektorral, melyek első elemme 1
A = []
A2 = sp.symbols("A2")
for i in range(2):
A.append(sp.Matrix([[1],[A2]]))
# oldjuk meg az egyenletet a lengésképekre és írjuk be a megoldásokat a lengésképvektorba (2. koordináta)
A[i][1] = sp.solve((((-ω_n[i]**2*M+K)*A[i]).subs(adatok))[0])[0]
display(Math('A_{{1}} = {}\\begin{{bmatrix}}\\mathrm{{m}} \\\\ \\mathrm{{rad}}\\end{{bmatrix}} '.format(sp.latex(A[0].evalf(3)))))
display(Math('A_{{2}} = {}\\begin{{bmatrix}}\\mathrm{{m}} \\\\ \\mathrm{{rad}}\\end{{bmatrix}} '.format(sp.latex(A[1].evalf(4)))))
```
$\displaystyle A_{1} = \left[\begin{matrix}1.0\\3.65\end{matrix}\right]\begin{bmatrix}\mathrm{m} \\ \mathrm{rad}\end{bmatrix} $
$\displaystyle A_{2} = \left[\begin{matrix}1.0\\-14.15\end{matrix}\right]\begin{bmatrix}\mathrm{m} \\ \mathrm{rad}\end{bmatrix} $
Készítette:
Juhos-Kiss Álmos (Alkalmazott Mechanika Szakosztály)
Bachrathy Dániel (BME MM) kidolgozása alapján.
Hibák, javaslatok:
[email protected]
[email protected]
[email protected]
2021.05.05.
|
{-# OPTIONS_GHC -fno-warn-unused-binds -fno-warn-unused-matches -fno-warn-name-shadowing -fno-warn-missing-signatures #-}
{-# LANGUAGE FlexibleInstances, MultiParamTypeClasses, UndecidableInstances, FlexibleContexts, TypeSynonymInstances #-}
---------------------------------------------------------------------------------------------------
---------------------------------------------------------------------------------------------------
-- |
-- | Module : Benchmark approx median
-- | Creator: Xiao Ling
-- | Created: 12/14/2015
-- | see : http://www.serpentine.com/criterion/tutorial.html
-- | TODO : move this to benchmark directory
-- |
---------------------------------------------------------------------------------------------------
---------------------------------------------------------------------------------------------------
module BapproxMedian where
import Data.List
import Data.Random
import Data.Conduit
import qualified Data.Conduit.List as Cl
import Criterion.Main
import Core
import Statistics
import ApproxMedian
main :: IO ()
main = return () |
{-# OPTIONS --without-K --rewriting #-}
open import HoTT
module homotopy.CoHSpace where
record CoHSpaceStructure {i} (X : Ptd i) : Type i where
constructor coHSpaceStructure
field
⊙coμ : X ⊙→ X ⊙∨ X
coμ : de⊙ X → X ∨ X
coμ = fst ⊙coμ
field
⊙unit-l : ⊙projr ⊙∘ ⊙coμ ⊙∼ ⊙idf X
⊙unit-r : ⊙projl ⊙∘ ⊙coμ ⊙∼ ⊙idf X
{-
module _ {i j : ULevel} {X : Ptd i} (CHSS : CoHSpaceStructure X) where
open CoHSpaceStructure CHSS
private
lemma-l : ⊙projr ⊙∘ ⊙∨-fmap (⊙lift {j = j}) (⊙lift {j = j}) ⊙∘ ⊙coμ ⊙∘ ⊙lower {j = j}
== ⊙idf _
abstract
lemma-l =
! (⊙λ= (⊙∘-assoc ⊙projr (⊙∨-fmap (⊙lift {j = j}) (⊙lift {j = j})) (⊙coμ ⊙∘ ⊙lower {j = j})))
∙ ap (_⊙∘ ⊙coμ ⊙∘ ⊙lower) (⊙λ= (⊙Wedge-rec-fmap ⊙cst (⊙idf _) (⊙lift {j = j}) (⊙lift {j = j})))
∙ ap (_⊙∘ ⊙coμ ⊙∘ ⊙lower) (! (⊙λ= (⊙Wedge-rec-post∘ (⊙lift {j = j}) ⊙cst (⊙idf _))))
∙ ⊙λ= (⊙∘-assoc (⊙lift {j = j}) ⊙projr (⊙coμ ⊙∘ ⊙lower {j = j}))
∙ ap (λ f → ⊙lift {j = j} ⊙∘ f ⊙∘ ⊙lower {j = j}) (⊙λ= ⊙unit-l)
private
lemma-r : ⊙projl ⊙∘ ⊙∨-fmap (⊙lift {j = j}) (⊙lift {j = j}) ⊙∘ ⊙coμ ⊙∘ ⊙lower {j = j}
== ⊙idf _
abstract
lemma-r =
! (⊙λ= (⊙∘-assoc ⊙projl (⊙∨-fmap (⊙lift {j = j}) (⊙lift {j = j})) (⊙coμ ⊙∘ ⊙lower {j = j})))
∙ ap (_⊙∘ ⊙coμ ⊙∘ ⊙lower) (⊙λ= (⊙Wedge-rec-fmap (⊙idf _) ⊙cst (⊙lift {j = j}) (⊙lift {j = j})))
∙ ap (_⊙∘ ⊙coμ ⊙∘ ⊙lower) (! (⊙λ= (⊙Wedge-rec-post∘ (⊙lift {j = j}) (⊙idf _) ⊙cst)))
∙ ⊙λ= (⊙∘-assoc (⊙lift {j = j}) ⊙projl (⊙coμ ⊙∘ ⊙lower {j = j}))
∙ ap (λ f → ⊙lift {j = j} ⊙∘ f ⊙∘ ⊙lower {j = j}) (⊙λ= ⊙unit-r)
Lift-co-h-space-structure : CoHSpaceStructure (⊙Lift {j = j} X)
Lift-co-h-space-structure = record
{ ⊙coμ = ⊙∨-fmap ⊙lift ⊙lift ⊙∘ ⊙coμ ⊙∘ ⊙lower
; ⊙unit-l = ⊙app= lemma-l
; ⊙unit-r = ⊙app= lemma-r
}
-}
|
% -*- root: ../thesis.tex -*-
%!TEX root = ../thesis.tex
% this file is called up by thesis.tex
% content in this file will be fed into the main document
%level followed %by section, subsection
% ----------------------- paths to graphics ------------------------
% change according to folder and file names
\graphicspath{{6/figures/}}
% ----------------------- contents from here ------------------------
\section{Evaluation}
% ---------------------------------------------------------------------------
%: ----------------------- end of thesis sub-document ------------------------
% ---------------------------------------------------------------------------
|
[STATEMENT]
lemma lift_state_oracle_extend_state_oracle:
includes lifting_syntax
assumes "\<And>B. Transfer.Rel (((=) ===> (=) ===> rel_spmf (rel_prod B (=))) ===> (=) ===> (=) ===> rel_spmf (rel_prod B (=))) G F"
(* TODO: implement simproc to discharge parametricity assumptions like this one *)
shows "lift_state_oracle F (extend_state_oracle oracle) = extend_state_oracle (G oracle)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. lift_state_oracle F \<dagger>oracle = \<dagger>(G oracle)
[PROOF STEP]
unfolding lift_state_oracle_def extend_state_oracle_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>(t, s') a. map_spmf rprodl (F ((Pair t ---> id ---> map_spmf lprodr) (\<lambda>(s', s) x. map_spmf (\<lambda>(y, s). (y, s', s)) (oracle s x))) s' a)) = (\<lambda>(s', s) x. map_spmf (\<lambda>(y, s). (y, s', s)) (G oracle s x))
[PROOF STEP]
apply(clarsimp simp add: fun_eq_iff map_fun_def o_def spmf.map_comp split_def rprodl_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a b x. map_spmf (\<lambda>p. (fst (fst p), snd (fst p), snd p)) (F (\<lambda>x xa. map_spmf (\<lambda>x. ((fst x, a), snd x)) (oracle x xa)) b x) = map_spmf (\<lambda>p. (fst p, a, snd p)) (G oracle b x)
[PROOF STEP]
subgoal for t s a
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map_spmf (\<lambda>p. (fst (fst p), snd (fst p), snd p)) (F (\<lambda>x xa. map_spmf (\<lambda>x. ((fst x, t), snd x)) (oracle x xa)) s a) = map_spmf (\<lambda>p. (fst p, t, snd p)) (G oracle s a)
[PROOF STEP]
apply(rule sym)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map_spmf (\<lambda>p. (fst p, t, snd p)) (G oracle s a) = map_spmf (\<lambda>p. (fst (fst p), snd (fst p), snd p)) (F (\<lambda>x xa. map_spmf (\<lambda>x. ((fst x, t), snd x)) (oracle x xa)) s a)
[PROOF STEP]
apply(fold spmf_rel_eq)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. rel_spmf (=) (map_spmf (\<lambda>p. (fst p, t, snd p)) (G oracle s a)) (map_spmf (\<lambda>p. (fst (fst p), snd (fst p), snd p)) (F (\<lambda>x xa. map_spmf (\<lambda>x. ((fst x, t), snd x)) (oracle x xa)) s a))
[PROOF STEP]
apply(simp add: spmf_rel_map)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. rel_spmf (\<lambda>x y. fst x = fst (fst y) \<and> t = snd (fst y) \<and> snd x = snd y) (G oracle s a) (F (\<lambda>x xa. map_spmf (\<lambda>x. ((fst x, t), snd x)) (oracle x xa)) s a)
[PROOF STEP]
apply(rule rel_spmf_mono)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. rel_spmf ?A2 (G oracle s a) (F (\<lambda>x xa. map_spmf (\<lambda>x. ((fst x, t), snd x)) (oracle x xa)) s a)
2. \<And>x y. ?A2 x y \<Longrightarrow> fst x = fst (fst y) \<and> t = snd (fst y) \<and> snd x = snd y
[PROOF STEP]
apply(rule assms[unfolded Rel_def, where B="\<lambda>x (y, z). x = y \<and> z = t", THEN rel_funD, THEN rel_funD, THEN rel_funD])
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. ((=) ===> (=) ===> rel_spmf (rel_prod (\<lambda>x (y, z). x = y \<and> z = t) (=))) oracle (\<lambda>x xa. map_spmf (\<lambda>x. ((fst x, t), snd x)) (oracle x xa))
2. s = s
3. a = a
4. \<And>x y. rel_prod (\<lambda>x (y, z). x = y \<and> z = t) (=) x y \<Longrightarrow> fst x = fst (fst y) \<and> t = snd (fst y) \<and> snd x = snd y
[PROOF STEP]
apply(auto simp add: rel_fun_def spmf_rel_map intro!: rel_spmf_reflI)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
function P = cpf_p(parameterization, step, z, V, lam, Vprv, lamprv, pv, pq)
%CPF_P Computes the value of the CPF parameterization function.
% P = CPF_P(PARAMETERIZATION, STEP, Z, V, LAM, VPRV, LAMPRV, PV, PQ)
%
% Computes the value of the parameterization function at the current
% solution point.
%
% Inputs:
% PARAMETERIZATION : Value of cpf.parameterization option
% STEP : continuation step size
% Z : normalized tangent prediction vector from previous step
% V : complex bus voltage vector at current solution
% LAM : scalar lambda value at current solution
% VPRV : complex bus voltage vector at previous solution
% LAMPRV : scalar lambda value at previous solution
% PV : vector of indices of PV buses
% PQ : vector of indices of PQ buses
%
% Outputs:
% P : value of the parameterization function at the current point
%
% See also CPF_PREDICTOR, CPF_CORRECTOR.
% MATPOWER
% Copyright (c) 1996-2016, Power Systems Engineering Research Center (PSERC)
% by Shrirang Abhyankar, Argonne National Laboratory
% and Ray Zimmerman, PSERC Cornell
%
% This file is part of MATPOWER.
% Covered by the 3-clause BSD License (see LICENSE file for details).
% See https://matpower.org for more info.
%% evaluate P(x0, lambda0)
if parameterization == 1 %% natural
if lam >= lamprv
P = lam - lamprv - step;
else
P = lamprv - lam - step;
end
elseif parameterization == 2 %% arc length
Va = angle(V);
Vm = abs(V);
Vaprv = angle(Vprv);
Vmprv = abs(Vprv);
P = sum(([Va([pv; pq]); Vm(pq); lam] - [Vaprv([pv; pq]); Vmprv(pq); lamprv]).^2) - step^2;
elseif parameterization == 3 %% pseudo arc length
nb = length(V);
Va = angle(V);
Vm = abs(V);
Vaprv = angle(Vprv);
Vmprv = abs(Vprv);
P = z([pv; pq; nb+pq; 2*nb+1])' * ...
( [Va([pv; pq]); Vm(pq); lam] - [Vaprv([pv; pq]); Vmprv(pq); lamprv] )...
- step;
end
|
function ekg_filt(up)
%EKG_FILT extracts respiratory signals using various filtering techniques
% from the ECG signal as specified in PC's literature review.
% ekg_filt(option, up)
%
% Inputs:
% option the option which has led to this function being used
% up universal parameters structure
%
% Outputs:
% ...
%
fprintf('\n--- Extracting Respiratory Signals from ECG using Filtering Techniques ');
log_int_respSig = 0; % Has value 1 unless this is a final respiratory signal
for subj = up.paramSet.subj_list
%% Cycle through each ecg signal
for sig_no = 1 : length(up.paramSet.ekg_sigs)
%% Cycle through each method
for filt_no = 1 : length(up.al.options.ekg_filt)
%% Skip if this processing has been done previously
eval(['save_name = ''' up.paramSet.ekg_sigs{sig_no}, up.paths.filenames.filt '_' up.al.options.ekg_filt{filt_no} ''';']);
iden_resp_sig_file_ending
savepath = [up.paths.data_save_folder, num2str(subj), ending];
exist_log = check_exists(savepath, save_name);
if exist_log
continue
end
%% Load relevant data
if ~exist('data', 'var')
load([up.paths.data_load_folder, up.paths.data_load_filename]);
end
% Extract EKG data
eval(['rel_data = data(subj).' up.paramSet.ekg_sigs{sig_no} ';']);
%% Filter the raw signal using this method
respWave = feval(up.al.options.ekg_filt{filt_no}, rel_data, up);
%% Band-pass filter
filtered_data = bpf_signal_to_remove_non_resp_freqs(respWave, respWave.fs, up);
eval([save_name ' = filtered_data;']);
%% Save processed data
save_or_append_data
end
end
end
end |
/*
*
* Copyright (c) Kresimir Fresl 2002
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Author acknowledges the support of the Faculty of Civil Engineering,
* University of Zagreb, Croatia.
*
*/
#ifndef BOOST_NUMERIC_BINDINGS_BLAS_DETAIL_CBLAS_H
#define BOOST_NUMERIC_BINDINGS_BLAS_DETAIL_CBLAS_H
//
// MKL-specific CBLAS include
//
#if defined BOOST_NUMERIC_BINDINGS_BLAS_MKL
extern "C" {
#include <mkl_cblas.h>
//#include <mkl_service.h>
//
// mkl_types.h defines P4 macro which breaks MPL, undefine it here.
//
#undef P4
}
//
// Default CBLAS include
//
#else
extern "C" {
#include <cblas.h>
}
#endif
#endif
|
Formal statement is: lemma open_image_fst: assumes "open S" shows "open (fst ` S)" Informal statement is: If $S$ is an open set in $\mathbb{R}^2$, then the projection of $S$ onto the first coordinate is open in $\mathbb{R}$. |
Through <unk> <unk> and winding mossy ways . 40
|
[STATEMENT]
lemma Semaphores_parameterized_mutex:
"0<n \<Longrightarrow> \<parallel>- \<lbrace>True\<rbrace>
\<acute>out:=True ,,
COBEGIN
SCHEME [0\<le> i< n]
\<lbrace>True\<rbrace>
WHILE True INV \<lbrace>True\<rbrace>
DO \<lbrace>True\<rbrace> AWAIT \<acute>out THEN \<acute>out:=False,, \<acute>who:=i END;;
\<lbrace>\<not>\<acute>out \<and> \<acute>who=i\<rbrace> \<acute>out:=True OD
\<lbrace>False\<rbrace>
COEND
\<lbrace>False\<rbrace>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < n \<Longrightarrow> \<parallel>- \<lbrace>True\<rbrace>
\<acute>out := True,, COBEGIN
SCHEME [0 \<le> i < n] \<lbrace>True\<rbrace>
WHILE True INV \<lbrace>True\<rbrace>
DO \<lbrace>True\<rbrace>
AWAIT \<acute>out THEN \<acute>out := False,, \<acute>who := i END;; \<lbrace>\<not> \<acute>out \<and> \<acute>who = i\<rbrace> \<acute>out := True
OD
\<lbrace>False\<rbrace>
COEND
\<lbrace>False\<rbrace>
[PROOF STEP]
apply oghoare
\<comment> \<open>20 vc\<close>
[PROOF STATE]
proof (prove)
goal (20 subgoals):
1. 0 < n \<Longrightarrow> \<lbrace>True\<rbrace> \<subseteq> \<lbrace>\<acute>(out_update (\<lambda>_. True)) \<in> \<lbrace>\<forall>i<n - 0. \<acute>(\<in>) (pre (the (OG_Hoare.com ((SCHEME [0 \<le> i < n] \<lbrace>True\<rbrace>
WHILE True INV \<lbrace>True\<rbrace>
DO AnnAwait \<lbrace>True\<rbrace> (Collect out) (Basic (out_update (\<lambda>_. False)),, Basic (who_update (\<lambda>_. i)));; AnnBasic (\<lbrace>\<not> \<acute>out\<rbrace> \<inter> \<lbrace>\<acute>who = i\<rbrace>) (out_update (\<lambda>_. True))
OD
\<lbrace>False\<rbrace>) ! i))))\<rbrace>\<rbrace>
2. \<And>i. \<lbrakk>0 < n; 0 \<le> i \<and> i < n\<rbrakk> \<Longrightarrow> \<lbrace>True\<rbrace> \<subseteq> \<lbrace>True\<rbrace>
3. \<And>i. \<lbrakk>0 < n; 0 \<le> i \<and> i < n\<rbrakk> \<Longrightarrow> \<lbrace>True\<rbrace> \<inter> \<lbrace>True\<rbrace> \<subseteq> pre (\<lbrace>True\<rbrace>
AWAIT \<acute>out THEN \<acute>out := False,, \<acute>who := i END;; \<lbrace>\<not> \<acute>out \<and> \<acute>who = i\<rbrace> \<acute>out := True)
4. \<And>i. \<lbrakk>0 < n; 0 \<le> i \<and> i < n\<rbrakk> \<Longrightarrow> atom_com (\<acute>out := False,, \<acute>who := i)
5. \<And>i. \<lbrakk>0 < n; 0 \<le> i \<and> i < n\<rbrakk> \<Longrightarrow> \<lbrace>True\<rbrace> \<inter> \<lbrace>\<acute>out\<rbrace> \<subseteq> \<lbrace>\<acute>(out_update (\<lambda>_. False)) \<in> \<lbrace>\<acute>(who_update (\<lambda>_. i)) \<in> pre (\<lbrace>\<not> \<acute>out \<and> \<acute>who = i\<rbrace> \<acute>out := True)\<rbrace>\<rbrace>
6. \<And>i. \<lbrakk>0 < n; 0 \<le> i \<and> i < n\<rbrakk> \<Longrightarrow> \<lbrace>\<not> \<acute>out \<and> \<acute>who = i\<rbrace> \<subseteq> \<lbrace>\<acute>(out_update (\<lambda>_. True)) \<in> \<lbrace>True\<rbrace>\<rbrace>
7. \<And>i. \<lbrakk>0 < n; 0 \<le> i \<and> i < n\<rbrakk> \<Longrightarrow> \<lbrace>True\<rbrace> \<inter> - \<lbrace>True\<rbrace> \<subseteq> \<lbrace>False\<rbrace>
8. \<And>i j. \<lbrakk>0 < n; 0 \<le> i \<and> i < n \<and> 0 \<le> j \<and> j < n \<and> i \<noteq> j\<rbrakk> \<Longrightarrow> \<lbrace>True\<rbrace> \<inter> (\<lbrace>True\<rbrace> \<inter> \<lbrace>\<acute>out\<rbrace>) \<subseteq> \<lbrace>\<acute>(out_update (\<lambda>_. False)) \<in> \<lbrace>\<acute>(who_update (\<lambda>_. j)) \<in> \<lbrace>True\<rbrace>\<rbrace>\<rbrace>
9. \<And>i j. \<lbrakk>0 < n; 0 \<le> i \<and> i < n \<and> 0 \<le> j \<and> j < n \<and> i \<noteq> j\<rbrakk> \<Longrightarrow> \<lbrace>True\<rbrace> \<inter> \<lbrace>\<not> \<acute>out \<and> \<acute>who = j\<rbrace> \<subseteq> \<lbrace>\<acute>(out_update (\<lambda>_. True)) \<in> \<lbrace>True\<rbrace>\<rbrace>
10. \<And>i j. \<lbrakk>0 < n; 0 \<le> i \<and> i < n \<and> 0 \<le> j \<and> j < n \<and> i \<noteq> j\<rbrakk> \<Longrightarrow> \<lbrace>True\<rbrace> \<inter> (\<lbrace>True\<rbrace> \<inter> \<lbrace>\<acute>out\<rbrace>) \<subseteq> \<lbrace>\<acute>(out_update (\<lambda>_. False)) \<in> \<lbrace>\<acute>(who_update (\<lambda>_. j)) \<in> \<lbrace>True\<rbrace>\<rbrace>\<rbrace>
A total of 20 subgoals...
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
{-# OPTIONS --safe --no-guardedness #-}
record Stream (A : Set) : Set where
coinductive
field
head : A
tail : Stream A
open Stream
repeat : ∀ {A} → A → Stream A
repeat x .head = x
repeat x .tail = repeat x
|
using DictFiles, HDF5, JLD
|
import .infinite_sums
import .log_harmonic
import .factors_le
import data.finset.image
import data.pnat.defs
import data.real.ennreal
import order.filter.basic
import order.filter.at_top_bot
import algebra.big_operators.basic
import topology.algebra.infinite_sum
open real filter
open_locale big_operators
lemma prod_geom_series_primes_eq (n : ℕ) :
∏ p in primes_le n, ((1 - (↑p)⁻¹)⁻¹ : ℝ) ≤
↑(finset.card (primes_le n) + 1) :=
begin
sorry,
end
-- def pnat_range (n : ℕ) : finset ℕ+ := finset.image nat.to_pnat' (finset.Icc 1 n)
--
-- lemma coe_pnat_range_eq {n : ℕ} : finset.image coe (pnat_range n) = finset.Icc 1 n :=
-- begin
-- rw pnat_range,
-- simp [finset.image_image],
-- rw (_ : finset.image (coe ∘ nat.to_pnat') (finset.Icc 1 n) = finset.image id (finset.Icc 1 n)),
-- { simp, },
-- apply finset.image_congr,
-- simp [set.eq_on],
-- intros x h1 h2 h3,
-- exfalso,
-- revert h1,
-- simp [h3],
-- end
lemma mem_pnat_fac_le_of_mem_Icc {n : ℕ} {x : ℕ} (hx : x ∈ finset.Icc 1 n) : (nat.to_pnat' x ∈ pnat_fac_le n) :=
begin
simp [pnat_fac_le],
cases x,
{ exfalso, revert hx, simp, },
apply all_factors_le_of_le,
simp at *,
exact hx.right,
end
def pnat_fac_le_range (n : ℕ) : finset ↥(pnat_fac_le n) :=
-- finset.image (λ x, ⟨nat.to_pnat' x.val, mem_pnat_fac_le_of_mem_Icc x.property⟩) (finset.Icc 1 n).attach
(finset.Icc 1 n).attach.image (λ x, ⟨nat.to_pnat' x.val, mem_pnat_fac_le_of_mem_Icc x.property⟩)
lemma coe_pnat_fac_le_range_eq {n : ℕ} : finset.image coe (pnat_fac_le_range n) = finset.Icc 1 n :=
begin
rw (_ : finset.image coe (pnat_fac_le_range n) = finset.image coe (finset.Icc 1 n).attach),
{ rw finset.attach_image_coe, },
rw pnat_fac_le_range,
rw finset.image_image,
apply finset.image_congr,
simp [set.eq_on],
intros a ha _ h,
exfalso,
revert ha,
rw h,
simp,
end
-- lemma nnreal_sum_le_tsum {α : Type*} {f : α → nnreal} (s : finset α) :
-- s.sum (λ (x : α), f x) ≤ ∑' (x : α), f x :=
-- begin
-- -- Should be easy to prove this? Non-negative! Needs summable?!
-- rw sum_eq_tsum_indicator,
-- sorry,
-- end
lemma finset_sum_inv_le_tsum_pnat_inv {n : ℕ} :
-- ∑ k : ℕ+ in finset.Icc 1 n, ((↑k)⁻¹ : nnreal) ≤
harmonic n ≤ ∑' k : ↥(pnat_fac_le n), (↑k)⁻¹ :=
begin
rw harmonic,
rw ← coe_pnat_fac_le_range_eq,
rw finset.sum_image,
{ rw ← ennreal.coe_le_coe,
push_cast,
-- Should be possible to avoid need for summable proof here?
rw ennreal.coe_tsum (sum_inv_pnat_fac_le_eq_prod_geom_series n).summable,
apply ennreal.sum_le_tsum, },
{ simp, },
end
lemma log_le_card_primes {n : ℕ} :
log (↑n + 1) ≤ ↑(finset.card (primes_le n) + 1) :=
begin
apply le_trans log_add_one_le_harmonic,
norm_cast,
apply le_trans finset_sum_inv_le_tsum_pnat_inv,
-- rw (by simp : ∀ {x : ℕ}, (↑x : ℝ) = ((↑x : nnreal) : ℝ)),
-- norm_cast,
rw has_sum.tsum_eq (sum_inv_pnat_fac_le_eq_prod_geom_series n),
apply le_trans _ (prod_geom_series_primes_eq _),
push_cast,
apply le_of_eq,
apply finset.prod_congr rfl,
intros p _,
simp,
rw nnreal.coe_sub _,
{ simp, },
simp,
norm_cast,
simp,
end
theorem infinite_primes : tendsto (λ n, finset.card (primes_le n)) at_top at_top :=
begin
sorry,
end
|
# ifndef __HEADER__GA__
# define __HEADER__GA__
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <gsl/gsl_math.h>
#include "utils.h"
# define UL_SIZE sizeof(unsigned long)
# define GENES_C1 3
# define GENES_C2 11
// we are going to use a two chromosome genome,
typedef struct Genome {
unsigned long c1[GENES_C1];
unsigned long c2[GENES_C2];
double fitness;
} Genome;
void generate_genome(Genome * genome);
Genome * generate_population(int individuals);
int next_generation(
Genome * parents, Genome * children,
int n_elitism, int n_select, int n_cross, int n_new, double p_mutation, int mutation_bit
);
/*
* Copies the information in the input genome to the output genome.
* Note that the fitness function is also copied over.
*/
void copy_genome(Genome * in, Genome * out);
// a.k.a crossover
void tinder(Genome * population, int pop_size, Genome * out);
// actual crossover
void crossover_genomes(
Genome * gen1in,
Genome * gen2in,
Genome * out
);
void mutate_genome(Genome * genome, double p_mut);
/*
* Implementation of elitism operator.
* This selects the best of the best individuals to be passed over to the next generation.
*
* @param population the initial population with fitness values calculated
* @param pop_size the total population size
* @param number_elitism the number of best individuals to be selected
* @param out the population to be filled with the best individuals
*
* @return the position of the best individual
*/
int extinction(int ek, Genome * population, Genome * survivors, int pop_size, int number_survivors);
void elitism(Genome * population, int pop_size, int number_elitism, Genome * out);
/*
* Implementation of roulette wheel method.
* This method chooses the individuals according to their fidelity, individuals with
* lower fidelity have a lower change of being selected.
*
* @param population the initial population with fitness values calculated
* @param pop_size the total population size
* @param best_genomes the number of best individuals to choose
* @param out the population to be filled with the best individuals
*/
int casting(Genome * population, int pop_size, int best_genomes, Genome * out);
/*
* Add `n_new` individuals to the start of the passed genome array.
*/
void migration(
Genome * genome,
int n_new
);
# endif
|
/-
Copyright (c) 2018 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro, Kenny Lau, Yury Kudryashov
-/
import data.list.pairwise
import logic.relation
/-!
# Relation chain
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This file provides basic results about `list.chain` (definition in `data.list.defs`).
A list `[a₂, ..., aₙ]` is a `chain` starting at `a₁` with respect to the relation `r` if `r a₁ a₂`
and `r a₂ a₃` and ... and `r aₙ₋₁ aₙ`. We write it `chain r a₁ [a₂, ..., aₙ]`.
A graph-specialized version is in development and will hopefully be added under `combinatorics.`
sometime soon.
-/
universes u v
open nat
namespace list
variables {α : Type u} {β : Type v} {R r : α → α → Prop} {l l₁ l₂ : list α} {a b : α}
mk_iff_of_inductive_prop list.chain list.chain_iff
theorem rel_of_chain_cons {a b : α} {l : list α}
(p : chain R a (b :: l)) : R a b :=
(chain_cons.1 p).1
theorem chain_of_chain_cons {a b : α} {l : list α}
(p : chain R a (b :: l)) : chain R b l :=
(chain_cons.1 p).2
theorem chain.imp' {S : α → α → Prop}
(HRS : ∀ ⦃a b⦄, R a b → S a b) {a b : α} (Hab : ∀ ⦃c⦄, R a c → S b c)
{l : list α} (p : chain R a l) : chain S b l :=
by induction p with _ a c l r p IH generalizing b; constructor;
[exact Hab r, exact IH (@HRS _)]
theorem chain.imp {S : α → α → Prop}
(H : ∀ a b, R a b → S a b) {a : α} {l : list α} (p : chain R a l) : chain S a l :=
p.imp' H (H a)
theorem chain.iff {S : α → α → Prop}
(H : ∀ a b, R a b ↔ S a b) {a : α} {l : list α} : chain R a l ↔ chain S a l :=
⟨chain.imp (λ a b, (H a b).1), chain.imp (λ a b, (H a b).2)⟩
theorem chain.iff_mem {a : α} {l : list α} :
chain R a l ↔ chain (λ x y, x ∈ a :: l ∧ y ∈ l ∧ R x y) a l :=
⟨λ p, by induction p with _ a b l r p IH; constructor;
[exact ⟨mem_cons_self _ _, mem_cons_self _ _, r⟩,
exact IH.imp (λ a b ⟨am, bm, h⟩,
⟨mem_cons_of_mem _ am, mem_cons_of_mem _ bm, h⟩)],
chain.imp (λ a b h, h.2.2)⟩
theorem chain_singleton {a b : α} : chain R a [b] ↔ R a b :=
by simp only [chain_cons, chain.nil, and_true]
theorem chain_split {a b : α} {l₁ l₂ : list α} : chain R a (l₁ ++ b :: l₂) ↔
chain R a (l₁ ++ [b]) ∧ chain R b l₂ :=
by induction l₁ with x l₁ IH generalizing a;
simp only [*, nil_append, cons_append, chain.nil, chain_cons, and_true, and_assoc]
@[simp] theorem chain_append_cons_cons {a b c : α} {l₁ l₂ : list α} :
chain R a (l₁ ++ b :: c :: l₂) ↔ chain R a (l₁ ++ [b]) ∧ R b c ∧ chain R c l₂ :=
by rw [chain_split, chain_cons]
theorem chain_iff_forall₂ :
∀ {a : α} {l : list α}, chain R a l ↔ l = [] ∨ forall₂ R (a :: init l) l
| a [] := by simp
| a [b] := by simp [init]
| a (b :: c :: l) := by simp [@chain_iff_forall₂ b]
theorem chain_append_singleton_iff_forall₂ :
chain R a (l ++ [b]) ↔ forall₂ R (a :: l) (l ++ [b]) :=
by simp [chain_iff_forall₂, init]
theorem chain_map (f : β → α) {b : β} {l : list β} :
chain R (f b) (map f l) ↔ chain (λ a b : β, R (f a) (f b)) b l :=
by induction l generalizing b; simp only [map, chain.nil, chain_cons, *]
theorem chain_of_chain_map {S : β → β → Prop} (f : α → β)
(H : ∀ a b : α, S (f a) (f b) → R a b) {a : α} {l : list α}
(p : chain S (f a) (map f l)) : chain R a l :=
((chain_map f).1 p).imp H
theorem chain_map_of_chain {S : β → β → Prop} (f : α → β)
(H : ∀ a b : α, R a b → S (f a) (f b)) {a : α} {l : list α}
(p : chain R a l) : chain S (f a) (map f l) :=
(chain_map f).2 $ p.imp H
theorem chain_pmap_of_chain {S : β → β → Prop} {p : α → Prop}
{f : Π a, p a → β}
(H : ∀ a b ha hb, R a b → S (f a ha) (f b hb))
{a : α} {l : list α}
(hl₁ : chain R a l) (ha : p a) (hl₂ : ∀ a ∈ l, p a) :
chain S (f a ha) (list.pmap f l hl₂) :=
begin
induction l with lh lt l_ih generalizing a,
{ simp },
{ simp [H _ _ _ _ (rel_of_chain_cons hl₁), l_ih _ (chain_of_chain_cons hl₁)] }
end
theorem chain_of_chain_pmap {S : β → β → Prop} {p : α → Prop}
(f : Π a, p a → β) {l : list α} (hl₁ : ∀ a ∈ l, p a)
{a : α} (ha : p a) (hl₂ : chain S (f a ha) (list.pmap f l hl₁))
(H : ∀ a b ha hb, S (f a ha) (f b hb) → R a b) :
chain R a l :=
begin
induction l with lh lt l_ih generalizing a,
{ simp },
{ simp [H _ _ _ _ (rel_of_chain_cons hl₂), l_ih _ _ (chain_of_chain_cons hl₂)] }
end
protected lemma pairwise.chain (p : pairwise R (a :: l)) : chain R a l :=
begin
cases pairwise_cons.1 p with r p', clear p,
induction p' with b l r' p IH generalizing a, {exact chain.nil},
simp only [chain_cons, forall_mem_cons] at r,
exact chain_cons.2 ⟨r.1, IH r'⟩
end
protected lemma chain.pairwise [is_trans α R] :
∀ {a : α} {l : list α}, chain R a l → pairwise R (a :: l)
| a [] chain.nil := pairwise_singleton _ _
| a _ (@chain.cons _ _ _ b l h hb) := hb.pairwise.cons begin
simp only [mem_cons_iff, forall_eq_or_imp, h, true_and],
exact λ c hc, trans h (rel_of_pairwise_cons hb.pairwise hc),
end
theorem chain_iff_pairwise [is_trans α R] {a : α} {l : list α} :
chain R a l ↔ pairwise R (a :: l) :=
⟨chain.pairwise, pairwise.chain⟩
protected lemma chain.sublist [is_trans α R] (hl : l₂.chain R a) (h : l₁ <+ l₂) : l₁.chain R a :=
by { rw chain_iff_pairwise at ⊢ hl, exact hl.sublist (h.cons_cons a) }
protected lemma chain.rel [is_trans α R] (hl : l.chain R a) (hb : b ∈ l) : R a b :=
by { rw chain_iff_pairwise at hl, exact rel_of_pairwise_cons hl hb }
theorem chain_iff_nth_le {R} : ∀ {a : α} {l : list α},
chain R a l ↔ (∀ h : 0 < length l, R a (nth_le l 0 h)) ∧ (∀ i (h : i < length l - 1),
R (nth_le l i (lt_of_lt_pred h)) (nth_le l (i+1) (lt_pred_iff.mp h)))
| a [] := by simp
| a (b :: t) :=
begin
rw [chain_cons, chain_iff_nth_le],
split,
{ rintro ⟨R, ⟨h0, h⟩⟩,
split,
{ intro w, exact R },
intros i w,
cases i,
{ apply h0 },
convert h i _ using 1,
simp only [succ_eq_add_one, add_succ_sub_one, add_zero, length, add_lt_add_iff_right] at w,
exact lt_pred_iff.mpr w, },
rintro ⟨h0, h⟩, split,
{ apply h0, simp, },
split,
{ apply h 0, },
intros i w, convert h (i+1) _ using 1,
exact lt_pred_iff.mp w,
end
theorem chain'.imp {S : α → α → Prop}
(H : ∀ a b, R a b → S a b) {l : list α} (p : chain' R l) : chain' S l :=
by cases l; [trivial, exact p.imp H]
theorem chain'.iff {S : α → α → Prop}
(H : ∀ a b, R a b ↔ S a b) {l : list α} : chain' R l ↔ chain' S l :=
⟨chain'.imp (λ a b, (H a b).1), chain'.imp (λ a b, (H a b).2)⟩
theorem chain'.iff_mem : ∀ {l : list α}, chain' R l ↔ chain' (λ x y, x ∈ l ∧ y ∈ l ∧ R x y) l
| [] := iff.rfl
| (x :: l) :=
⟨λ h, (chain.iff_mem.1 h).imp $ λ a b ⟨h₁, h₂, h₃⟩, ⟨h₁, or.inr h₂, h₃⟩,
chain'.imp $ λ a b h, h.2.2⟩
@[simp] theorem chain'_nil : chain' R [] := trivial
@[simp] theorem chain'_singleton (a : α) : chain' R [a] := chain.nil
@[simp] theorem chain'_cons {x y l} : chain' R (x :: y :: l) ↔ R x y ∧ chain' R (y :: l) :=
chain_cons
theorem chain'_is_infix : ∀ l : list α, chain' (λ x y, [x, y] <:+: l) l
| [] := chain'_nil
| [a] := chain'_singleton _
| (a::b::l) := chain'_cons.2 ⟨⟨[], l, by simp⟩,
(chain'_is_infix (b::l)).imp $ λ x y h, h.trans ⟨[a], [], by simp⟩⟩
theorem chain'_split {a : α} : ∀ {l₁ l₂ : list α}, chain' R (l₁ ++ a :: l₂) ↔
chain' R (l₁ ++ [a]) ∧ chain' R (a :: l₂)
| [] l₂ := (and_iff_right (chain'_singleton a)).symm
| (b :: l₁) l₂ := chain_split
@[simp] theorem chain'_append_cons_cons {b c : α} {l₁ l₂ : list α} :
chain' R (l₁ ++ b :: c :: l₂) ↔ chain' R (l₁ ++ [b]) ∧ R b c ∧ chain' R (c :: l₂) :=
by rw [chain'_split, chain'_cons]
theorem chain'_map (f : β → α) {l : list β} :
chain' R (map f l) ↔ chain' (λ a b : β, R (f a) (f b)) l :=
by cases l; [refl, exact chain_map _]
theorem chain'_of_chain'_map {S : β → β → Prop} (f : α → β)
(H : ∀ a b : α, S (f a) (f b) → R a b) {l : list α}
(p : chain' S (map f l)) : chain' R l :=
((chain'_map f).1 p).imp H
theorem chain'_map_of_chain' {S : β → β → Prop} (f : α → β)
(H : ∀ a b : α, R a b → S (f a) (f b)) {l : list α}
(p : chain' R l) : chain' S (map f l) :=
(chain'_map f).2 $ p.imp H
theorem pairwise.chain' : ∀ {l : list α}, pairwise R l → chain' R l
| [] _ := trivial
| (a :: l) h := pairwise.chain h
theorem chain'_iff_pairwise [is_trans α R] : ∀ {l : list α},
chain' R l ↔ pairwise R l
| [] := (iff_true_intro pairwise.nil).symm
| (a :: l) := chain_iff_pairwise
protected lemma chain'.sublist [is_trans α R] (hl : l₂.chain' R) (h : l₁ <+ l₂) : l₁.chain' R :=
by { rw chain'_iff_pairwise at ⊢ hl, exact hl.sublist h }
theorem chain'.cons {x y l} (h₁ : R x y) (h₂ : chain' R (y :: l)) :
chain' R (x :: y :: l) :=
chain'_cons.2 ⟨h₁, h₂⟩
theorem chain'.tail : ∀ {l} (h : chain' R l), chain' R l.tail
| [] _ := trivial
| [x] _ := trivial
| (x :: y :: l) h := (chain'_cons.mp h).right
theorem chain'.rel_head {x y l} (h : chain' R (x :: y :: l)) : R x y :=
rel_of_chain_cons h
theorem chain'.rel_head' {x l} (h : chain' R (x :: l)) ⦃y⦄ (hy : y ∈ head' l) : R x y :=
by { rw ← cons_head'_tail hy at h, exact h.rel_head }
theorem chain'.cons' {x} :
∀ {l : list α}, chain' R l → (∀ y ∈ l.head', R x y) → chain' R (x :: l)
| [] _ _ := chain'_singleton x
| (a :: l) hl H := hl.cons $ H _ rfl
theorem chain'_cons' {x l} : chain' R (x :: l) ↔ (∀ y ∈ head' l, R x y) ∧ chain' R l :=
⟨λ h, ⟨h.rel_head', h.tail⟩, λ ⟨h₁, h₂⟩, h₂.cons' h₁⟩
theorem chain'.append (h₁ : chain' R l₁) (h₂ : chain' R l₂)
(h : ∀ (x ∈ l₁.last') (y ∈ l₂.head'), R x y) :
chain' R (l₁ ++ l₂) :=
chain'_append.2 ⟨h₁, h₂, h⟩
theorem chain'.left_of_append (h : chain' R (l₁ ++ l₂)) : chain' R l₁ := (chain'_append.1 h).1
theorem chain'.right_of_append (h : chain' R (l₁ ++ l₂)) : chain' R l₂ := (chain'_append.1 h).2.1
theorem chain'.infix (h : chain' R l) (h' : l₁ <:+: l) : chain' R l₁ :=
by { rcases h' with ⟨l₂, l₃, rfl⟩, exact h.left_of_append.right_of_append }
theorem chain'.suffix (h : chain' R l) (h' : l₁ <:+ l) : chain' R l₁ := h.infix h'.is_infix
theorem chain'.prefix (h : chain' R l) (h' : l₁ <+: l) : chain' R l₁ := h.infix h'.is_infix
theorem chain'.drop (h : chain' R l) (n : ℕ) : chain' R (drop n l) := h.suffix (drop_suffix _ _)
theorem chain'.init (h : chain' R l) : chain' R l.init := h.prefix l.init_prefix
theorem chain'.take (h : chain' R l) (n : ℕ) : chain' R (take n l) := h.prefix (take_prefix _ _)
theorem chain'_pair {x y} : chain' R [x, y] ↔ R x y :=
by simp only [chain'_singleton, chain'_cons, and_true]
theorem chain'.imp_head {x y} (h : ∀ {z}, R x z → R y z) {l} (hl : chain' R (x :: l)) :
chain' R (y :: l) :=
hl.tail.cons' $ λ z hz, h $ hl.rel_head' hz
theorem chain'_reverse : ∀ {l}, chain' R (reverse l) ↔ chain' (flip R) l
| [] := iff.rfl
| [a] := by simp only [chain'_singleton, reverse_singleton]
| (a :: b :: l) := by rw [chain'_cons, reverse_cons, reverse_cons, append_assoc, cons_append,
nil_append, chain'_split, ← reverse_cons, @chain'_reverse (b :: l), and_comm, chain'_pair, flip]
theorem chain'_iff_nth_le {R} : ∀ {l : list α},
chain' R l ↔ ∀ i (h : i < length l - 1),
R (nth_le l i (lt_of_lt_pred h)) (nth_le l (i+1) (lt_pred_iff.mp h))
| [] := by simp
| [a] := by simp
| (a :: b :: t) :=
begin
rw [← and_forall_succ, chain'_cons, chain'_iff_nth_le],
simp only [length, nth_le, add_tsub_cancel_right, add_lt_add_iff_right, tsub_pos_iff_lt,
one_lt_succ_succ, true_implies_iff],
refl,
end
/-- If `l₁ l₂` and `l₃` are lists and `l₁ ++ l₂` and `l₂ ++ l₃` both satisfy
`chain' R`, then so does `l₁ ++ l₂ ++ l₃` provided `l₂ ≠ []` -/
lemma chain'.append_overlap {l₁ l₂ l₃ : list α}
(h₁ : chain' R (l₁ ++ l₂)) (h₂ : chain' R (l₂ ++ l₃)) (hn : l₂ ≠ []) :
chain' R (l₁ ++ l₂ ++ l₃) :=
h₁.append h₂.right_of_append $
by simpa only [last'_append_of_ne_nil _ hn] using (chain'_append.1 h₂).2.2
/--
If `a` and `b` are related by the reflexive transitive closure of `r`, then there is a `r`-chain
starting from `a` and ending on `b`.
The converse of `relation_refl_trans_gen_of_exists_chain`.
-/
lemma exists_chain_of_relation_refl_trans_gen (h : relation.refl_trans_gen r a b) :
∃ l, chain r a l ∧ last (a :: l) (cons_ne_nil _ _) = b :=
begin
apply relation.refl_trans_gen.head_induction_on h,
{ exact ⟨[], chain.nil, rfl⟩ },
{ intros c d e t ih,
obtain ⟨l, hl₁, hl₂⟩ := ih,
refine ⟨d :: l, chain.cons e hl₁, _⟩,
rwa last_cons_cons }
end
/--
Given a chain from `a` to `b`, and a predicate true at `b`, if `r x y → p y → p x` then
the predicate is true everywhere in the chain and at `a`.
That is, we can propagate the predicate up the chain.
-/
lemma chain.induction (p : α → Prop)
(l : list α) (h : chain r a l)
(hb : last (a :: l) (cons_ne_nil _ _) = b)
(carries : ∀ ⦃x y : α⦄, r x y → p y → p x) (final : p b) : ∀ i ∈ a :: l, p i :=
begin
induction l generalizing a,
{ cases hb,
simp [final] },
{ rw chain_cons at h,
rintro _ (rfl | _),
apply carries h.1 (l_ih h.2 hb _ (or.inl rfl)),
apply l_ih h.2 hb _ H }
end
/--
Given a chain from `a` to `b`, and a predicate true at `b`, if `r x y → p y → p x` then
the predicate is true at `a`.
That is, we can propagate the predicate all the way up the chain.
-/
@[elab_as_eliminator]
lemma chain.induction_head (p : α → Prop)
(l : list α) (h : chain r a l)
(hb : last (a :: l) (cons_ne_nil _ _) = b)
(carries : ∀ ⦃x y : α⦄, r x y → p y → p x) (final : p b) : p a :=
(chain.induction p l h hb carries final) _ (mem_cons_self _ _)
/--
If there is an `r`-chain starting from `a` and ending at `b`, then `a` and `b` are related by the
reflexive transitive closure of `r`. The converse of `exists_chain_of_relation_refl_trans_gen`.
-/
lemma relation_refl_trans_gen_of_exists_chain (l) (hl₁ : chain r a l)
(hl₂ : last (a :: l) (cons_ne_nil _ _) = b) :
relation.refl_trans_gen r a b :=
chain.induction_head _ l hl₁ hl₂ (λ x y, relation.refl_trans_gen.head) relation.refl_trans_gen.refl
end list
|
[STATEMENT]
lemma fls_compose_power_uminus [simp]:
"fls_compose_power (-f) d = -fls_compose_power f d"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fls_compose_power (- f) d = - fls_compose_power f d
[PROOF STEP]
by transfer auto |
lemma locally_path_connected_1: assumes "\<And>v x. \<lbrakk>openin (top_of_set S) v; x \<in> v\<rbrakk> \<Longrightarrow> \<exists>u. openin (top_of_set S) u \<and> path_connected u \<and> x \<in> u \<and> u \<subseteq> v" shows "locally path_connected S" |
/* examples/C/ssmfe/shift_invert.c */
/* Laplacian on a rectangular grid by shift-invert via LDLT factorization */
#include "spral.h"
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <cblas.h>
/* Headers that implements Laplacian and preconditioners and LDLT support */
#include "laplace2d.h"
#include "ldltf.h"
int main(void) {
const int nx = 8; /* grid points along x */
const int ny = 8; /* grid points along y */
const int n = nx*ny; /* problem size */
const double sigma = 1.0; /* shift */
int ipiv[n]; /* LDLT pivot index */
double lambda[n]; /* eigenvalues */
double X[n][n]; /* eigenvectors */
double A[n][n]; /* matrix */
double LDLT[n][n]; /* factors */
double work[n][n]; /* work array for dsytrf */
struct spral_ssmfe_options options; /* eigensolver options */
struct spral_ssmfe_inform inform; /* information */
struct spral_ssmfe_rcid rci; /* reverse communication data */
void *keep; /* private data */
/* Initialize options to default values */
spral_ssmfe_default_options(&options);
/* Set up then perform LDLT factorization of the shifted matrix */
set_laplacian_matrix(nx, ny, n, A);
for(int j=0; j<n; j++)
for(int i=0; i<n; i++)
LDLT[j][i] = (i==j) ? A[j][i] - sigma
: A[j][i];
cwrap_dsytrf('L', n, &LDLT[0][0], n, ipiv, &work[0][0], n*n);
/* Main loop */
int left = num_neg_D(n, n, LDLT, ipiv); /* all evalues to left of sigma */
int right = 5; /* 5 evalues to right of sigma */
rci.job = 0; keep = NULL;
while(true) {
spral_ssmfe_standard_shift_double(&rci, sigma, left, right, n, lambda,
n, &X[0][0], n, &keep, &options, &inform);
switch( rci.job ) {
case 1:
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, n, rci.nx, n,
1.0, &A[0][0], n, rci.x, n, 0.0, rci.y, n);
break;
case 2:
// No preconditioning
break;
case 9:
cblas_dcopy(n*rci.nx, rci.x, 1, rci.y, 1);
cwrap_dsytrs('L', n, rci.nx, &LDLT[0][0], n, ipiv, rci.y, n);
break;
default:
goto finished;
}
}
finished:
printf("Eigenvalues near %e (took %d iterations)\n", sigma, inform.iteration);
for(int i=0; i<inform.left+inform.right; i++)
printf(" lambda[%1d] = %13.7e\n", i, lambda[i]);
spral_ssmfe_free_double(&keep, &inform);
/* Success */
return 0;
}
|
module Foo
-- fact'' : ((Nat -> Nat) -> (Nat -> Nat))
-- fact'' f1 = (\x => case x of
-- Z => Z
-- (S k) => (S k) * (f1 k))
-- fact' : ((Nat -> Nat) -> (Nat -> Nat)) -> Nat -> Nat
-- fact' f = f (\y => f
-- fact' f (S k) = (S k) * (f k)
-- fact : Nat -> Nat
-- fact x = fact' fact' x
--cant do y combinator, because can't specify type of self within self
-- part-fact : ((Nat -> Nat) -> Nat -> Nat) -> Nat -> Nat
-- part-fact self n =
-- case n of
-- Z => 1
-- (S k) => (* (S k) (self self k))
|
lemma tendsto_Lim: "\<not> trivial_limit net \<Longrightarrow> (f \<longlongrightarrow> l) net \<Longrightarrow> Lim net f = l" |
Formal statement is: lemmas tendsto_Im [tendsto_intros] = bounded_linear.tendsto [OF bounded_linear_Im] Informal statement is: If $f$ is a bounded linear operator, then $\lim_{n \to \infty} f(x_n) = f(\lim_{n \to \infty} x_n)$. |
function meshCanopy(I,Mdata,Mmap,height)
%function meshCanopy: display a mesh above a grayscale image
%SCd 11/18/2010
%
%Updates:
% -11/22/2010: Added example (requires IPT)
% Added height input argument
%
%Input Arguments:
% -I: 2-dimensional grayscale image slice. The values are expected to
% range from 0 to 255. If the maximum value is greater than 255 or
% the minimum value is less than 0, it will be scaled to 0:255. Else
% it will remain unchanged.
% -Mdata: 2-dimensional mesh data (Z for a meshplot).
% NOTE: the mesh command will be called with:
% >>[ii jj] = meshgrid(ceil(cumsum(diff([0 linspace(1,size(I,2),size(Mdata,2))]))),ceil(cumsum(diff([0 linspace(1,size(I,1),size(Mdata,1))]))));
% >>mesh(ii,jj,Mdata);
% and thus does not need to be the same size as the image!
% -Mmap: string, function_handle or nx3, mesh color map. See:
% >>doc colormap
% for valid options. The argument is optional and defaults to 'jet'
% Examples: 'jet', @jet, [0 0 1; 0.5 0;.1 .1 .1]
% -height: scalar height of the mesh above the image so you can see both.
% Optional defaults to 80.
%
%Output Arguments:
% -None!
%
%Example: (Requires the Image Processing Toolbox)
% %Display a Mesh Canopy of a standard deviation image, above the original image
% I = imread('cameraman.tif');
% M = stdfilt(I);
% meshCanopy(I,M,@spring)
%
%See also: mesh colormap
%
%Error Checking:
assert(nargin==2||nargin==3||nargin==4,'The number of input arguments is expected to be 2, 3 or 4.\n It was %i',nargin);
assert(ndims(I)==2,'The first input argument, I, is required to be 2-dimensional');
assert(ndims(Mdata)==2,'The second input argument, Mdata, is required to be 2-dimensional');
%Assigning and checking the mesh colormap/height
if nargin == 2
Cmap = [gray(256); jet(256)]; %Default
elseif ischar(Mmap)
%String is used, assert it's right.
valid_maps = {'jet','hsv','hot','cool','spring','summer','autumn','winter','gray','bone','copper','pink','lines'};
assert(any(ismember(valid_maps,Mmap)),'If a string is used as a colormap, it is expected to match one of these:\n%s',sprintf('%s\n',valid_maps{:}));
Mmap = str2func(Mmap);
Cmap = [gray(256); Mmap(256)];
elseif isa(Mmap,'function_handle')
%Function handle is user, assert it's right
valid_maps = {'jet','hsv','hot','cool','spring','summer','autumn','winter','gray','bone','copper','pink','lines'};
assert(any(ismember(valid_maps,func2str(Mmap))),'If a function_handle is used as a colormap, it is expected to match one of these:\n%s',sprintf('%s\n',valid_maps{:}));
Cmap = [gray(256); Mmap(256)];
else
%Explicit color map is used, make sure it's ok
assert(size(Mmap,2)==3,'If a matrix colormap is used the second dimension must be 3');
assert(all(Mmap(:)<=1&Mmap(:)>=0),'If a matrix colormap is used the values must all: 0 <= Mmap <= 1');
Cmap = [gray(256); Mmap];
end
if ~exist('height','var');
height = 80;
else
assert(isscalar(height),'The fourth argument, height, is expected to be a scalar');
end
%Making required pieces
I = double(I); %Needs to be double for slice() and all other calculations
Mdata = double(Mdata);
[ii jj] = meshgrid(ceil(cumsum(diff([0 linspace(1,size(I,2),size(Mdata,2))]))),ceil(cumsum(diff([0 linspace(1,size(I,1),size(Mdata,1))]))));
Mdata = (Mdata - min(Mdata(:)))+height; %Scale so minimum is height (so it doesn't conflict with image)
Midx = ceil(min((length(Cmap)-256),round((length(Cmap)-255)*(Mdata-min(Mdata(:)))/(max(Mdata(:))-min(Mdata(:))))+1))+256;
if any(I(:)<0)||any(I(:)>255)
%Scale whole image to 1:256 for the map (only if it was out of bounds before!
I = ceil(min(256,round((255)*(I-min(I(:)))/(max(I(:))-min(I(:))))+1));
else
%Else adjust to 1:256 integer increment
I = ceil(I+1);
end
%Plotting7
figure;
H(1) = slice(repmat(I,[1 1 2]),[],[],1); %slice() requires at least 2x2x2
set(H(1),'EdgeColor','none') %required so image isn't just an edge
hold on
H(2) = mesh(ii,jj,double(Mdata));
hold off
%Plot Properties
axis vis3d
axis ij
axis tight
colormap(Cmap)
set(H(1),'CData',I);
set(H(2),'CData',Midx);
caxis([1 length(Cmap)])
|
theory DBM
imports Floyd_Warshall Timed_Automata
begin
chapter \<open>Difference Bound Matrices\<close>
section \<open>Definitions\<close>
text \<open>
Difference Bound Matrices (DBMs) constrain differences of clocks
(or more precisely, the difference of values assigned to individual clocks by a valuation).
The possible constraints are given by the following datatype:
\<close>
datatype ('t::time) DBMEntry = Le 't | Lt 't | INF ("\<infinity>")
text \<open>\noindent This yields a simple definition of DBMs:\<close>
type_synonym 't DBM = "nat \<Rightarrow> nat \<Rightarrow> 't DBMEntry"
text \<open>\noindent
To relate clocks with rows and columns of
a DBM, we use a clock numbering \<open>v\<close> of type @{typ "'c \<Rightarrow> nat"} to map clocks to indices.
DBMs will regularly be accompanied by a natural number $n$,
which designates the number of clocks constrained by the matrix.
To be able to represent the full set of clock constraints with DBMs, we add an imaginary
clock \<open>\<zero>\<close>, which shall be assigned to 0 in every valuation.
In the following predicate we explicitly keep track of \<open>\<zero>\<close>.
\<close>
inductive dbm_entry_val :: "('c, 't) cval \<Rightarrow> 'c option \<Rightarrow> 'c option \<Rightarrow> ('t::time) DBMEntry \<Rightarrow> bool"
where
"u r \<le> d \<Longrightarrow> dbm_entry_val u (Some r) None (Le d)" |
"-u c \<le> d \<Longrightarrow> dbm_entry_val u None (Some c) (Le d)" |
"u r < d \<Longrightarrow> dbm_entry_val u (Some r) None (Lt d)" |
"-u c < d \<Longrightarrow> dbm_entry_val u None (Some c) (Lt d)" |
"u r - u c \<le> d \<Longrightarrow> dbm_entry_val u (Some r) (Some c) (Le d)" |
"u r - u c < d \<Longrightarrow> dbm_entry_val u (Some r) (Some c) (Lt d)" |
"dbm_entry_val _ _ _ \<infinity>"
declare dbm_entry_val.intros[intro]
inductive_cases[elim!]: "dbm_entry_val u None (Some c) (Le d)"
inductive_cases[elim!]: "dbm_entry_val u (Some c) None (Le d)"
inductive_cases[elim!]: "dbm_entry_val u None (Some c) (Lt d)"
inductive_cases[elim!]: "dbm_entry_val u (Some c) None (Lt d)"
inductive_cases[elim!]: "dbm_entry_val u (Some r) (Some c) (Le d)"
inductive_cases[elim!]: "dbm_entry_val u (Some r) (Some c) (Lt d)"
fun dbm_entry_bound :: "('t::time) DBMEntry \<Rightarrow> 't"
where
"dbm_entry_bound (Le t) = t" |
"dbm_entry_bound (Lt t) = t" |
"dbm_entry_bound \<infinity> = 0"
inductive dbm_lt :: "('t::time) DBMEntry \<Rightarrow> 't DBMEntry \<Rightarrow> bool"
("_ \<prec> _" [51, 51] 50)
where
"dbm_lt (Lt _) \<infinity>" |
"dbm_lt (Le _) \<infinity>" |
"a < b \<Longrightarrow> dbm_lt (Le a) (Le b)" |
"a < b \<Longrightarrow> dbm_lt (Le a) (Lt b)" |
"a \<le> b \<Longrightarrow> dbm_lt (Lt a) (Le b)" |
"a < b \<Longrightarrow> dbm_lt (Lt a) (Lt b)"
declare dbm_lt.intros[intro]
definition dbm_le :: "('t::time) DBMEntry \<Rightarrow> 't DBMEntry \<Rightarrow> bool"
("_ \<preceq> _" [51, 51] 50)
where
"dbm_le a b \<equiv> (a \<prec> b) \<or> a = b"
text \<open>
Now a valuation is contained in the zone represented by a DBM if it fulfills all individual
constraints:
\<close>
definition DBM_val_bounded :: "('c \<Rightarrow> nat) \<Rightarrow> ('c, 't) cval \<Rightarrow> ('t::time) DBM \<Rightarrow> nat \<Rightarrow> bool"
where
"DBM_val_bounded v u m n \<equiv> Le 0 \<preceq> m 0 0 \<and>
(\<forall> c. v c \<le> n \<longrightarrow> (dbm_entry_val u None (Some c) (m 0 (v c))
\<and> dbm_entry_val u (Some c) None (m (v c) 0)))
\<and> (\<forall> c1 c2. v c1 \<le> n \<and> v c2 \<le> n \<longrightarrow> dbm_entry_val u (Some c1) (Some c2) (m (v c1) (v c2)))"
abbreviation DBM_val_bounded_abbrev ::
"('c, 't) cval \<Rightarrow> ('c \<Rightarrow> nat) \<Rightarrow> nat \<Rightarrow> ('t::time) DBM \<Rightarrow> bool"
("_ \<turnstile>\<^bsub>_,_\<^esub> _")
where
"u \<turnstile>\<^bsub>v,n\<^esub> M \<equiv> DBM_val_bounded v u M n"
abbreviation
"dmin a b \<equiv> if a \<prec> b then a else b"
lemma dbm_le_dbm_min:
"a \<preceq> b \<Longrightarrow> a = dmin a b" unfolding dbm_le_def
by auto
lemma dbm_lt_asym:
assumes "e \<prec> f"
shows "~ f \<prec> e"
using assms
proof (safe, cases e f rule: dbm_lt.cases, goal_cases)
case 1 from this(2) show ?case using 1(3-) by (cases f e rule: dbm_lt.cases) auto
next
case 2 from this(2) show ?case using 2(3-) by (cases f e rule: dbm_lt.cases) auto
next
case 3 from this(2) show ?case using 3(3-) by (cases f e rule: dbm_lt.cases) auto
next
case 4 from this(2) show ?case using 4(3-) by (cases f e rule: dbm_lt.cases) auto
next
case 5 from this(2) show ?case using 5(3-) by (cases f e rule: dbm_lt.cases) auto
next
case 6 from this(2) show ?case using 6(3-) by (cases f e rule: dbm_lt.cases) auto
qed
lemma dbm_le_dbm_min2:
"a \<preceq> b \<Longrightarrow> a = dmin b a"
using dbm_lt_asym by (auto simp: dbm_le_def)
lemma dmb_le_dbm_entry_bound_inf:
"a \<preceq> b \<Longrightarrow> a = \<infinity> \<Longrightarrow> b = \<infinity>"
apply (auto simp: dbm_le_def)
apply (cases rule: dbm_lt.cases)
by auto
lemma dbm_not_lt_eq: "\<not> a \<prec> b \<Longrightarrow> \<not> b \<prec> a \<Longrightarrow> a = b"
apply (cases a)
apply (cases b, fastforce+)+
done
lemma dbm_not_lt_impl: "\<not> a \<prec> b \<Longrightarrow> b \<prec> a \<or> a = b" using dbm_not_lt_eq by auto
lemma "dmin a b = dmin b a"
proof (cases "a \<prec> b")
case True thus ?thesis by (simp add: dbm_lt_asym)
next
case False thus ?thesis by (simp add: dbm_not_lt_eq)
qed
lemma dbm_lt_trans: "a \<prec> b \<Longrightarrow> b \<prec> c \<Longrightarrow> a \<prec> c"
proof (cases a b rule: dbm_lt.cases, goal_cases)
case 1 thus ?case by simp
next
case 2 from this(2-) show ?case by (cases rule: dbm_lt.cases) simp+
next
case 3 from this(2-) show ?case by (cases rule: dbm_lt.cases) simp+
next
case 4 from this(2-) show ?case by (cases rule: dbm_lt.cases) auto
next
case 5 from this(2-) show ?case by (cases rule: dbm_lt.cases) auto
next
case 6 from this(2-) show ?case by (cases rule: dbm_lt.cases) auto
next
case 7 from this(2-) show ?case by (cases rule: dbm_lt.cases) auto
qed
lemma aux_3: "\<not> a \<prec> b \<Longrightarrow> \<not> b \<prec> c \<Longrightarrow> a \<prec> c \<Longrightarrow> c = a"
proof goal_cases
case 1 thus ?case
proof (cases "c \<prec> b")
case True
with \<open>a \<prec> c\<close> have "a \<prec> b" by (rule dbm_lt_trans)
thus ?thesis using 1 by auto
next
case False thus ?thesis using dbm_not_lt_eq 1 by auto
qed
qed
inductive_cases[elim!]: "\<infinity> \<prec> x"
lemma dbm_lt_asymmetric[simp]: "x \<prec> y \<Longrightarrow> y \<prec> x \<Longrightarrow> False"
by (cases x y rule: dbm_lt.cases) (auto elim: dbm_lt.cases)
lemma le_dbm_le: "Le a \<preceq> Le b \<Longrightarrow> a \<le> b" unfolding dbm_le_def by (auto elim: dbm_lt.cases)
lemma le_dbm_lt: "Le a \<preceq> Lt b \<Longrightarrow> a < b" unfolding dbm_le_def by (auto elim: dbm_lt.cases)
lemma lt_dbm_le: "Lt a \<preceq> Le b \<Longrightarrow> a \<le> b" unfolding dbm_le_def by (auto elim: dbm_lt.cases)
lemma lt_dbm_lt: "Lt a \<preceq> Lt b \<Longrightarrow> a \<le> b" unfolding dbm_le_def by (auto elim: dbm_lt.cases)
lemma not_dbm_le_le_impl: "\<not> Le a \<prec> Le b \<Longrightarrow> a \<ge> b" by (metis dbm_lt.intros(3) not_less)
lemma not_dbm_lt_le_impl: "\<not> Lt a \<prec> Le b \<Longrightarrow> a > b" by (metis dbm_lt.intros(5) not_less)
lemma not_dbm_lt_lt_impl: "\<not> Lt a \<prec> Lt b \<Longrightarrow> a \<ge> b" by (metis dbm_lt.intros(6) not_less)
lemma not_dbm_le_lt_impl: "\<not> Le a \<prec> Lt b \<Longrightarrow> a \<ge> b" by (metis dbm_lt.intros(4) not_less)
(*>*)
(*<*)
fun dbm_add :: "('t::time) DBMEntry \<Rightarrow> 't DBMEntry \<Rightarrow> 't DBMEntry" (infixl "\<otimes>" 70)
where
"dbm_add \<infinity> _ = \<infinity>" |
"dbm_add _ \<infinity> = \<infinity>" |
"dbm_add (Le a) (Le b) = (Le (a+b))" |
"dbm_add (Le a) (Lt b) = (Lt (a+b))" |
"dbm_add (Lt a) (Le b) = (Lt (a+b))" |
"dbm_add (Lt a) (Lt b) = (Lt (a+b))"
thm dbm_add.simps
lemma aux_4: "x \<prec> y \<Longrightarrow> \<not> dbm_add x z \<prec> dbm_add y z \<Longrightarrow> dbm_add x z = dbm_add y z"
by (cases x y rule: dbm_lt.cases) ((cases z), auto)+
lemma aux_5: "\<not> x \<prec> y \<Longrightarrow> dbm_add x z \<prec> dbm_add y z \<Longrightarrow> dbm_add y z = dbm_add x z"
proof -
assume lt: "dbm_add x z \<prec> dbm_add y z" "\<not> x \<prec> y"
hence "x = y \<or> y \<prec> x" by (auto simp: dbm_not_lt_eq)
thus ?thesis
proof
assume "x = y" thus ?thesis by simp
next
assume "y \<prec> x"
thus ?thesis
proof (cases y x rule: dbm_lt.cases, goal_cases)
case 1 thus ?case using lt by auto
next
case 2 thus ?case using lt by auto
next
case 3 thus ?case using dbm_lt_asymmetric lt(1) by (cases z) fastforce+
next
case 4 thus ?case using dbm_lt_asymmetric lt(1) by (cases z) fastforce+
next
case 5 thus ?case using dbm_lt_asymmetric lt(1) by (cases z) fastforce+
next
case 6 thus ?case using dbm_lt_asymmetric lt(1) by (cases z) fastforce+
qed
qed
qed
lemma aux_42: "x \<prec> y \<Longrightarrow> \<not> dbm_add z x \<prec> dbm_add z y \<Longrightarrow> dbm_add z x = dbm_add z y"
by (cases x y rule: dbm_lt.cases) ((cases z), auto)+
lemma aux_52: "\<not> x \<prec> y \<Longrightarrow> dbm_add z x \<prec> dbm_add z y \<Longrightarrow> dbm_add z y = dbm_add z x"
proof -
assume lt: "dbm_add z x \<prec> dbm_add z y" "\<not> x \<prec> y"
hence "x = y \<or> y \<prec> x" by (auto simp: dbm_not_lt_eq)
thus ?thesis
proof
assume "x = y" thus ?thesis by simp
next
assume "y \<prec> x"
thus ?thesis
proof (cases y x rule: dbm_lt.cases, goal_cases)
case 1 thus ?case using lt by (cases z) fastforce+
next
case 2 thus ?case using lt by (cases z) fastforce+
next
case 3 thus ?case using dbm_lt_asymmetric lt(1) by (cases z) fastforce+
next
case 4 thus ?case using dbm_lt_asymmetric lt(1) by (cases z) fastforce+
next
case 5 thus ?case using dbm_lt_asymmetric lt(1) by (cases z) fastforce+
next
case 6 thus ?case using dbm_lt_asymmetric lt(1) by (cases z) fastforce+
qed
qed
qed
lemma dbm_add_not_inf:
"a \<noteq> \<infinity> \<Longrightarrow> b \<noteq> \<infinity> \<Longrightarrow> dbm_add a b \<noteq> \<infinity>"
by (cases a, auto, cases b, auto, cases b, auto)
lemma dbm_le_not_inf:
"a \<preceq> b \<Longrightarrow> b \<noteq> \<infinity> \<Longrightarrow> a \<noteq> \<infinity>"
by (cases "a = b") (auto simp: dbm_le_def)
section \<open>DBM Entries Form a Linearly Ordered Abelian Monoid\<close>
instantiation DBMEntry :: (time) linorder
begin
definition less_eq: "(\<le>) \<equiv> dbm_le"
definition less: "(<) = dbm_lt"
instance
proof ((standard; unfold less less_eq), goal_cases)
case 1 thus ?case unfolding dbm_le_def using dbm_lt_asymmetric by auto
next
case 2 thus ?case by (simp add: dbm_le_def)
next
case 3 thus ?case unfolding dbm_le_def using dbm_lt_trans by auto
next
case 4 thus ?case unfolding dbm_le_def using dbm_lt_asymmetric by auto
next
case 5 thus ?case unfolding dbm_le_def using dbm_not_lt_eq by auto
qed
end
instantiation DBMEntry :: (time) linordered_ab_monoid_add
begin
definition mult: "(+) = dbm_add"
definition neutral: "neutral = Le 0"
instance proof ((standard; unfold mult neutral less less_eq), goal_cases)
case (1 a b c) thus ?case by (cases a; cases b; cases c; auto)
next
case (2 a b) thus ?case by (cases a; cases b) auto
next
case (3 a b c)
thus ?case unfolding dbm_le_def
apply safe
apply (rule dbm_lt.cases)
apply assumption
by (cases c; fastforce)+
next
case (4 x) thus ?case by (cases x) auto
next
case (5 x) thus ?case by (cases x) auto
qed
end
interpretation linordered_monoid: linordered_ab_monoid_add dbm_add dbm_le dbm_lt "Le 0"
apply (standard, fold neutral mult less_eq less)
using add.commute add.commute add_left_mono assoc by auto
lemma Le_Le_dbm_lt_D[dest]: "Le a \<prec> Lt b \<Longrightarrow> a < b" by (cases rule: dbm_lt.cases) auto
lemma Le_Lt_dbm_lt_D[dest]: "Le a \<prec> Le b \<Longrightarrow> a < b" by (cases rule: dbm_lt.cases) auto
lemma Lt_Le_dbm_lt_D[dest]: "Lt a \<prec> Le b \<Longrightarrow> a \<le> b" by (cases rule: dbm_lt.cases) auto
lemma Lt_Lt_dbm_lt_D[dest]: "Lt a \<prec> Lt b \<Longrightarrow> a < b" by (cases rule: dbm_lt.cases) auto
lemma Le_le_LeI[intro]: "a \<le> b \<Longrightarrow> Le a \<le> Le b" unfolding less_eq dbm_le_def by auto
lemma Lt_le_LeI[intro]: "a \<le> b \<Longrightarrow> Lt a \<le> Le b" unfolding less_eq dbm_le_def by auto
lemma Lt_le_LtI[intro]: "a \<le> b \<Longrightarrow> Lt a \<le> Lt b" unfolding less_eq dbm_le_def by auto
lemma Le_le_LtI[intro]: "a < b \<Longrightarrow> Le a \<le> Lt b" unfolding less_eq dbm_le_def by auto
lemma Lt_lt_LeI: "x \<le> y \<Longrightarrow> Lt x < Le y" unfolding less by auto
lemma Le_le_LeD[dest]: "Le a \<le> Le b \<Longrightarrow> a \<le> b" unfolding dbm_le_def less_eq by auto
lemma Le_le_LtD[dest]: "Le a \<le> Lt b \<Longrightarrow> a < b" unfolding dbm_le_def less_eq by auto
lemma Lt_le_LeD[dest]: "Lt a \<le> Le b \<Longrightarrow> a \<le> b" unfolding less_eq dbm_le_def by auto
lemma Lt_le_LtD[dest]: "Lt a \<le> Lt b \<Longrightarrow> a \<le> b" unfolding less_eq dbm_le_def by auto
lemma inf_not_le_Le[simp]: "\<infinity> \<le> Le x = False" unfolding less_eq dbm_le_def by auto
lemma inf_not_le_Lt[simp]: "\<infinity> \<le> Lt x = False" unfolding less_eq dbm_le_def by auto
lemma inf_not_lt[simp]: "\<infinity> \<prec> x = False" by auto
lemma any_le_inf: "x \<le> \<infinity>" by (metis less_eq dmb_le_dbm_entry_bound_inf le_cases)
section \<open>Basic Properties of DBMs\<close>
subsection \<open>DBMs and Length of Paths\<close>
lemma dbm_entry_val_add_1: "dbm_entry_val u (Some c) (Some d) a \<Longrightarrow> dbm_entry_val u (Some d) None b
\<Longrightarrow> dbm_entry_val u (Some c) None (dbm_add a b)"
proof (cases a, goal_cases)
case 1 thus ?thesis
apply (cases b)
apply auto
using add_mono_thms_linordered_semiring(1) apply fastforce
using add_le_less_mono by fastforce
next
case 2 thus ?thesis
apply (cases b)
apply auto
apply (simp add: dbm_entry_val.intros(3) diff_less_eq less_le_trans)
by (metis add_le_less_mono dbm_entry_val.intros(3) diff_add_cancel less_imp_le)
next
case 3 thus ?thesis by (cases b) auto
qed
lemma dbm_entry_val_add_2: "dbm_entry_val u None (Some c) a \<Longrightarrow> dbm_entry_val u (Some c) (Some d) b
\<Longrightarrow> dbm_entry_val u None (Some d) (dbm_add a b)"
proof (cases a, goal_cases)
case 1 thus ?thesis
apply (cases b)
apply auto
using add_mono_thms_linordered_semiring(1) apply fastforce
using add_le_less_mono by fastforce
next
case 2 thus ?thesis
apply (cases b)
apply auto
using add_mono_thms_linordered_field(3) apply fastforce
using add_strict_mono by fastforce
next
case 3 thus ?thesis by (cases b) auto
qed
lemma dbm_entry_val_add_3:
"dbm_entry_val u (Some c) (Some d) a \<Longrightarrow> dbm_entry_val u (Some d) (Some e) b
\<Longrightarrow> dbm_entry_val u (Some c) (Some e) (dbm_add a b)"
proof (cases a, goal_cases)
case 1 thus ?thesis
apply (cases b)
apply auto
using add_mono_thms_linordered_semiring(1) apply fastforce
using add_le_less_mono by fastforce
next
case 2 thus ?thesis
apply (cases b)
apply auto
using add_mono_thms_linordered_field(3) apply fastforce
using add_strict_mono by fastforce
next
case 3 thus ?thesis by (cases b) auto
qed
lemma dbm_entry_val_add_4:
"dbm_entry_val u (Some c) None a \<Longrightarrow> dbm_entry_val u None (Some d) b
\<Longrightarrow> dbm_entry_val u (Some c) (Some d) (dbm_add a b)"
proof (cases a, goal_cases)
case 1 thus ?thesis
apply (cases b)
apply auto
using add_mono_thms_linordered_semiring(1) apply fastforce
using add_le_less_mono by fastforce
next
case 2 thus ?thesis
apply (cases b)
apply auto
using add_mono_thms_linordered_field(3) apply fastforce
using add_strict_mono by fastforce
next
case 3 thus ?thesis by (cases b) auto
qed
no_notation dbm_add (infixl "\<otimes>" 70)
lemma DBM_val_bounded_len_1'_aux:
assumes "DBM_val_bounded v u m n" "v c \<le> n" "\<forall> k \<in> set vs. k > 0 \<and> k \<le> n \<and> (\<exists> c. v c = k)"
shows "dbm_entry_val u (Some c) None (len m (v c) 0 vs)" using assms
proof (induction vs arbitrary: c)
case Nil then show ?case unfolding DBM_val_bounded_def by auto
next
case (Cons k vs)
then obtain c' where c': "k > 0" "k \<le> n" "v c' = k" by auto
with Cons have "dbm_entry_val u (Some c') None (len m (v c') 0 vs)" by auto
moreover have "dbm_entry_val u (Some c) (Some c') (m (v c) (v c'))" using Cons.prems c'
by (auto simp add: DBM_val_bounded_def)
ultimately have "dbm_entry_val u (Some c) None (m (v c) (v c') + len m (v c') 0 vs)"
using dbm_entry_val_add_1 unfolding mult by fastforce
with c' show ?case unfolding DBM_val_bounded_def by simp
qed
lemma DBM_val_bounded_len_3'_aux:
"DBM_val_bounded v u m n \<Longrightarrow> v c \<le> n \<Longrightarrow> v d \<le> n \<Longrightarrow> \<forall> k \<in> set vs. k > 0 \<and> k \<le> n \<and> (\<exists> c. v c = k)
\<Longrightarrow> dbm_entry_val u (Some c) (Some d) (len m (v c) (v d) vs)"
proof (induction vs arbitrary: c)
case Nil thus ?case unfolding DBM_val_bounded_def by auto
next
case (Cons k vs)
then obtain c' where c': "k > 0" "k \<le> n" "v c' = k" by auto
with Cons have "dbm_entry_val u (Some c') (Some d) (len m (v c') (v d) vs)" by auto
moreover have "dbm_entry_val u (Some c) (Some c') (m (v c) (v c'))" using Cons.prems c'
by (auto simp add: DBM_val_bounded_def)
ultimately have "dbm_entry_val u (Some c) (Some d) (m (v c) (v c') + len m (v c') (v d) vs)"
using dbm_entry_val_add_3 unfolding mult by fastforce
with c' show ?case unfolding DBM_val_bounded_def by simp
qed
lemma DBM_val_bounded_len_2'_aux:
"DBM_val_bounded v u m n \<Longrightarrow> v c \<le> n \<Longrightarrow> \<forall> k \<in> set vs. k > 0 \<and> k \<le> n \<and> (\<exists> c. v c = k)
\<Longrightarrow> dbm_entry_val u None (Some c) (len m 0 (v c) vs)"
proof (cases vs, goal_cases)
case 1 then show ?thesis unfolding DBM_val_bounded_def by auto
next
case (2 k vs)
then obtain c' where c': "k > 0" "k \<le> n" "v c' = k" by auto
with 2 have "dbm_entry_val u (Some c') (Some c) (len m (v c') (v c) vs)"
using DBM_val_bounded_len_3'_aux by auto
moreover have "dbm_entry_val u None (Some c') (m 0 (v c'))"
using 2 c' by (auto simp add: DBM_val_bounded_def)
ultimately have "dbm_entry_val u None (Some c) (m 0 (v c') + len m (v c') (v c) vs)"
using dbm_entry_val_add_2 unfolding mult by fastforce
with 2(4) c' show ?case unfolding DBM_val_bounded_def by simp
qed
lemma cnt_0_D:
"cnt x xs = 0 \<Longrightarrow> x \<notin> set xs"
apply (induction xs)
apply simp
apply (rename_tac a xs)
apply (case_tac "x = a")
by simp+
lemma cnt_at_most_1_D:
"cnt x (xs @ x # ys) \<le> 1 \<Longrightarrow> x \<notin> set xs \<and> x \<notin> set ys"
apply (induction xs)
apply auto[]
using cnt_0_D apply force
apply (rename_tac a xs)
apply (case_tac "a = x")
apply simp
apply simp
done
lemma nat_list_0 [intro]:
"x \<in> set xs \<Longrightarrow> 0 \<notin> set (xs :: nat list) \<Longrightarrow> x > 0"
by (induction xs) auto
lemma DBM_val_bounded_len':
fixes v
defines "vo \<equiv> \<lambda> k. if k = 0 then None else Some (SOME c. v c = k)"
assumes "DBM_val_bounded v u m n" "cnt 0 (i # j # vs) \<le> 1"
"\<forall> k \<in> set (i # j # vs). k > 0 \<longrightarrow> k \<le> n \<and> (\<exists> c. v c = k)"
shows "dbm_entry_val u (vo i) (vo j) (len m i j vs)"
proof -
show ?thesis
proof (cases "\<forall> k \<in> set vs. k > 0")
case True
with assms have *: "\<forall> k \<in> set vs. k > 0 \<and> k \<le> n \<and> (\<exists> c. v c = k)" by auto
show ?thesis
proof (cases "i = 0")
case True
then have i: "vo i = None" by (simp add: vo_def)
show ?thesis
proof (cases "j = 0")
case True with assms \<open>i = 0\<close> show ?thesis by auto
next
case False
with assms obtain c2 where c2: "j \<le> n" "v c2 = j" "vo j = Some c2"
unfolding vo_def by (fastforce intro: someI)
with \<open>i = 0\<close> i DBM_val_bounded_len_2'_aux[OF assms(2) _ *] show ?thesis by auto
qed
next
case False
with assms(4) obtain c1 where c1: "i \<le> n" "v c1 = i" "vo i = Some c1"
unfolding vo_def by (fastforce intro: someI)
show ?thesis
proof (cases "j = 0")
case True
with DBM_val_bounded_len_1'_aux[OF assms(2) _ *] c1 show ?thesis by (auto simp: vo_def)
next
case False
with assms obtain c2 where c2: "j \<le> n" "v c2 = j" "vo j = Some c2"
unfolding vo_def by (fastforce intro: someI)
with c1 DBM_val_bounded_len_3'_aux[OF assms(2) _ _ *] show ?thesis by auto
qed
qed
next
case False
then have "\<exists> k \<in> set vs. k = 0" by auto
then obtain us ws where vs: "vs = us @ 0 # ws" by (meson split_list_last)
with cnt_at_most_1_D[of 0 "i # j # us"] assms(3) have
"0 \<notin> set us" "0 \<notin> set ws" "i \<noteq> 0" "j \<noteq> 0"
by auto
with vs have vs: "vs = us @ 0 # ws" "\<forall> k \<in> set us. k > 0" "\<forall> k \<in> set ws. k > 0" by auto
with assms(4) have v:
"\<forall>k\<in>set us. 0 < k \<and> k \<le> n \<and> (\<exists>c. v c = k)" "\<forall>k\<in>set ws. 0 < k \<and> k \<le> n \<and> (\<exists>c. v c = k)"
by auto
from \<open>i \<noteq> 0\<close> \<open>j \<noteq> 0\<close> assms obtain c1 c2 where
c1: "i \<le> n" "v c1 = i" "vo i = Some c1" and c2: "j \<le> n" "v c2 = j" "vo j = Some c2"
unfolding vo_def by (fastforce intro: someI)
with dbm_entry_val_add_4 [OF DBM_val_bounded_len_1'_aux[OF assms(2) _ v(1)] DBM_val_bounded_len_2'_aux[OF assms(2) _ v(2)]]
have "dbm_entry_val u (Some c1) (Some c2) (dbm_add (len m (v c1) 0 us) (len m 0 (v c2) ws))" by auto
moreover from vs have "len m (v c1) (v c2) vs = dbm_add (len m (v c1) 0 us) (len m 0 (v c2) ws)"
by (simp add: len_comp mult)
ultimately show ?thesis using c1 c2 by auto
qed
qed
lemma DBM_val_bounded_len'2:
fixes v
assumes "DBM_val_bounded v u m n" "0 \<notin> set vs" "v c \<le> n"
"\<forall> k \<in> set vs. k > 0 \<longrightarrow> k \<le> n \<and> (\<exists> c. v c = k)"
shows "dbm_entry_val u None (Some c) (len m 0 (v c) vs)"
using DBM_val_bounded_len_2'_aux[OF assms(1,3)] assms(2,4) by fastforce
lemma DBM_val_bounded_len'3:
fixes v
assumes "DBM_val_bounded v u m n" "cnt 0 vs \<le> 1" "v c1 \<le> n" "v c2 \<le> n"
"\<forall> k \<in> set vs. k > 0 \<longrightarrow> k \<le> n \<and> (\<exists> c. v c = k)"
shows "dbm_entry_val u (Some c1) (Some c2) (len m (v c1) (v c2) vs)"
proof -
show ?thesis
proof (cases "\<forall> k \<in> set vs. k > 0")
case True
with assms have "\<forall> k \<in> set vs. k > 0 \<and> k \<le> n \<and> (\<exists> c. v c = k)" by auto
with DBM_val_bounded_len_3'_aux[OF assms(1,3,4)] show ?thesis by auto
next
case False
then have "\<exists> k \<in> set vs. k = 0" by auto
then obtain us ws where vs: "vs = us @ 0 # ws" by (meson split_list_last)
with cnt_at_most_1_D[of 0 "us"] assms(2) have
"0 \<notin> set us" "0 \<notin> set ws"
by auto
with vs have vs: "vs = us @ 0 # ws" "\<forall> k \<in> set us. k > 0" "\<forall> k \<in> set ws. k > 0" by auto
with assms(5) have v:
"\<forall>k\<in>set us. 0 < k \<and> k \<le> n \<and> (\<exists>c. v c = k)" "\<forall>k\<in>set ws. 0 < k \<and> k \<le> n \<and> (\<exists>c. v c = k)"
by auto
with dbm_entry_val_add_4 [OF DBM_val_bounded_len_1'_aux[OF assms(1,3) v(1)] DBM_val_bounded_len_2'_aux[OF assms(1,4) v(2)]]
have "dbm_entry_val u (Some c1) (Some c2) (dbm_add (len m (v c1) 0 us) (len m 0 (v c2) ws))" by auto
moreover from vs have "len m (v c1) (v c2) vs = dbm_add (len m (v c1) 0 us) (len m 0 (v c2) ws)"
by (simp add: len_comp mult)
ultimately show ?thesis by auto
qed
qed
lemma DBM_val_bounded_len'':
fixes v
defines "vo \<equiv> \<lambda> k. if k = 0 then None else Some (SOME c. v c = k)"
assumes "DBM_val_bounded v u m n" "i \<noteq> 0 \<or> j \<noteq> 0"
"\<forall> k \<in> set (i # j # vs). k > 0 \<longrightarrow> k \<le> n \<and> (\<exists> c. v c = k)"
shows "dbm_entry_val u (vo i) (vo j) (len m i j vs)" using assms
proof (induction "length vs" arbitrary: i vs rule: less_induct)
case less
show ?case
proof (cases "\<forall> k \<in> set vs. k > 0")
case True
with less.prems have *: "\<forall> k \<in> set vs. k > 0 \<and> k \<le> n \<and> (\<exists> c. v c = k)" by auto
show ?thesis
proof (cases "i = 0")
case True
then have i: "vo i = None" by (simp add: vo_def)
show ?thesis
proof (cases "j = 0")
case True with less.prems \<open>i = 0\<close> show ?thesis by auto
next
case False
with less.prems obtain c2 where c2: "j \<le> n" "v c2 = j" "vo j = Some c2"
unfolding vo_def by (fastforce intro: someI)
with \<open>i = 0\<close> i DBM_val_bounded_len_2'_aux[OF less.prems(1) _ *] show ?thesis by auto
qed
next
case False
with less.prems obtain c1 where c1: "i \<le> n" "v c1 = i" "vo i = Some c1"
unfolding vo_def by (fastforce intro: someI)
show ?thesis
proof (cases "j = 0")
case True
with DBM_val_bounded_len_1'_aux[OF less.prems(1) _ *] c1 show ?thesis by (auto simp: vo_def)
next
case False
with less.prems obtain c2 where c2: "j \<le> n" "v c2 = j" "vo j = Some c2"
unfolding vo_def by (fastforce intro: someI)
with c1 DBM_val_bounded_len_3'_aux[OF less.prems(1) _ _ *] show ?thesis by auto
qed
qed
next
case False
then have "\<exists> us ws. vs = us @ 0 # ws \<and> (\<forall> k \<in> set us. k > 0)"
proof (induction vs)
case Nil then show ?case by auto
next
case (Cons x vs)
show ?case
proof (cases "x = 0")
case True then show ?thesis by fastforce
next
case False
with Cons.prems have "\<not> (\<forall>a\<in>set vs. 0 < a)" by auto
from Cons.IH[OF this] obtain us ws where "vs = us @ 0 # ws" "\<forall>a\<in>set us. 0 < a" by auto
with False have "x # vs = (x # us) @ 0 # ws" "\<forall>a\<in>set (x # us). 0 < a" by auto
then show ?thesis by blast
qed
qed
then obtain us ws where vs: "vs = us @ 0 # ws" "\<forall> k \<in> set us. k > 0" by blast
then show ?thesis
oops
lemma DBM_val_bounded_len_1: "DBM_val_bounded v u m n \<Longrightarrow> v c \<le> n \<Longrightarrow> \<forall> c \<in> set cs. v c \<le> n
\<Longrightarrow> dbm_entry_val u (Some c) None (len m (v c) 0 (map v cs))"
proof (induction cs arbitrary: c)
case Nil thus ?case unfolding DBM_val_bounded_def by auto
next
case (Cons c' cs)
hence "dbm_entry_val u (Some c') None (len m (v c') 0 (map v cs))" by auto
moreover have "dbm_entry_val u (Some c) (Some c') (m (v c) (v c'))" using Cons.prems
by (simp add: DBM_val_bounded_def)
ultimately have "dbm_entry_val u (Some c) None (m (v c) (v c') + len m (v c') 0 (map v cs))"
using dbm_entry_val_add_1 unfolding mult by fastforce
thus ?case unfolding DBM_val_bounded_def by simp
qed
lemma DBM_val_bounded_len_3: "DBM_val_bounded v u m n \<Longrightarrow> v c \<le> n \<Longrightarrow> v d \<le> n \<Longrightarrow> \<forall> c \<in> set cs. v c \<le> n
\<Longrightarrow> dbm_entry_val u (Some c) (Some d) (len m (v c) (v d) (map v cs))"
proof (induction cs arbitrary: c)
case Nil thus ?case unfolding DBM_val_bounded_def by auto
next
case (Cons c' cs)
hence "dbm_entry_val u (Some c') (Some d) (len m (v c') (v d) (map v cs))" by auto
moreover have "dbm_entry_val u (Some c) (Some c') (m (v c) (v c'))" using Cons.prems
by (simp add: DBM_val_bounded_def)
ultimately have "dbm_entry_val u (Some c) (Some d) (m (v c) (v c') + len m (v c') (v d) (map v cs))"
using dbm_entry_val_add_3 unfolding mult by fastforce
thus ?case unfolding DBM_val_bounded_def by simp
qed
lemma DBM_val_bounded_len_2: "DBM_val_bounded v u m n \<Longrightarrow> v c \<le> n \<Longrightarrow> \<forall> c \<in> set cs. v c \<le> n
\<Longrightarrow> dbm_entry_val u None (Some c) (len m 0 (v c) (map v cs))"
proof (cases cs, goal_cases)
case 1 thus ?thesis unfolding DBM_val_bounded_def by auto
next
case (2 c' cs)
hence "dbm_entry_val u (Some c') (Some c) (len m (v c') (v c) (map v cs))"
using DBM_val_bounded_len_3 by auto
moreover have "dbm_entry_val u None (Some c') (m 0 (v c'))"
using 2 by (simp add: DBM_val_bounded_def)
ultimately have "dbm_entry_val u None (Some c) (m 0 (v c') + len m (v c') (v c) (map v cs))"
using dbm_entry_val_add_2 unfolding mult by fastforce
thus ?case using 2(4) unfolding DBM_val_bounded_def by simp
qed
end
|
/-
Four natural lemmas missing from the quotient fintype API.
Author: Adrián Doña Mateo
These were contributed to mathlib in
[#6964](https://github.com/leanprover-community/mathlib/pull/6964/).
An apostrophe was added at the end of the names to avoid clashes.
-/
import data.fintype.basic
-- These lemmas were added to src/data/fintype/basic.lean.
variables {α β : Type*}
namespace fintype
variables [fintype α] [fintype β]
lemma card_le_of_surjective' (f : α → β) (h : function.surjective f) : card β ≤ card α :=
card_le_of_injective _ (function.injective_surj_inv h)
lemma card_lt_of_surjective_not_injective' [fintype α] [fintype β] (f : α → β)
(h : function.surjective f) (h' : ¬function.injective f) : card β < card α :=
card_lt_of_injective_not_surjective _ (function.injective_surj_inv h) $ λ hg,
have w : function.bijective (function.surj_inv h) := ⟨function.injective_surj_inv h, hg⟩,
h' $ (injective_iff_surjective_of_equiv (equiv.of_bijective _ w).symm).mpr h
end fintype
theorem fintype.card_quotient_le' [fintype α] (s : setoid α) [decidable_rel ((≈) : α → α → Prop)] :
fintype.card (quotient s) ≤ fintype.card α :=
fintype.card_le_of_surjective _ (surjective_quotient_mk _)
theorem fintype.card_quotient_lt' [fintype α] {s : setoid α} [decidable_rel ((≈) : α → α → Prop)]
{x y : α} (h1 : x ≠ y) (h2 : x ≈ y) : fintype.card (quotient s) < fintype.card α :=
fintype.card_lt_of_surjective_not_injective _ (surjective_quotient_mk _) $ λ w,
h1 (w $ quotient.eq.mpr h2) |
//////////////////////////////////////////////////////////////////////////////
/// Copyright 2003 and onward LASMEA UMR 6602 CNRS/U.B.P Clermont-Ferrand
/// Copyright 2009 and onward LRI UMR 8623 CNRS/Univ Paris Sud XI
///
/// Distributed under the Boost Software License, Version 1.0
/// See accompanying file LICENSE.txt or copy at
/// http://www.boost.org/LICENSE_1_0.txt
//////////////////////////////////////////////////////////////////////////////
#ifndef NT2_TOOLBOX_ELLIPTIC_FUNCTION_SCALAR_ELLIE_HPP_INCLUDED
#define NT2_TOOLBOX_ELLIPTIC_FUNCTION_SCALAR_ELLIE_HPP_INCLUDED
#include <boost/math/special_functions.hpp>
#include <nt2/sdk/constant/eps_related.hpp>
#include <nt2/sdk/constant/real.hpp>
#include <nt2/sdk/constant/digits.hpp>
#include <nt2/include/functions/is_ltz.hpp>
#include <nt2/include/functions/sin.hpp>
#include <nt2/include/functions/is_eqz.hpp>
#include <nt2/include/functions/abs.hpp>
#include <nt2/include/functions/sqr.hpp>
#include <nt2/include/functions/sqrt.hpp>
#include <nt2/include/functions/oneminus.hpp>
#include <nt2/include/functions/oneplus.hpp>
#include <nt2/include/functions/tan.hpp>
#include <nt2/include/functions/atan.hpp>
#include <nt2/include/functions/average.hpp>
#include <nt2/include/functions/ellpe.hpp>
#include <nt2/include/functions/ellpk.hpp>
/////////////////////////////////////////////////////////////////////////////
// Implementation when type A0 is arithmetic_
/////////////////////////////////////////////////////////////////////////////
NT2_REGISTER_DISPATCH(tag::ellie_, tag::cpu_,
(A0)(A1),
(arithmetic_<A0>)(arithmetic_<A1>)
)
namespace nt2 { namespace ext
{
template<class Dummy>
struct call<tag::ellie_(tag::arithmetic_,tag::arithmetic_),
tag::cpu_, Dummy> : callable
{
template<class Sig> struct result;
template<class This,class A0,class A1>
struct result<This(A0,A1)> :
std::tr1::result_of<meta::floating(A0,A1)>{};
NT2_FUNCTOR_CALL(2)
{
typedef typename NT2_RETURN_TYPE(2)::type type;
return nt2::ellie(type(a0), type(a1));
}
};
} }
/////////////////////////////////////////////////////////////////////////////
// Implementation when type A0 is double
/////////////////////////////////////////////////////////////////////////////
NT2_REGISTER_DISPATCH(tag::ellie_, tag::cpu_,
(A0)(A1),
(double_<A0>)(double_<A1>)
)
namespace nt2 { namespace ext
{
template<class Dummy>
struct call<tag::ellie_(tag::double_,tag::double_),
tag::cpu_, Dummy> : callable
{
template<class Sig> struct result;
template<class This,class A0,class A1>
struct result<This(A0,A1)> :
std::tr1::result_of<meta::floating(A0,A1)>{};
NT2_FUNCTOR_CALL(2)
{
typedef typename NT2_RETURN_TYPE(2)::type type;
if (a1>One<A1>()||(is_ltz(a1))) return Nan<type>();
if (is_eqz(a1)) return type(a0);
return boost::math::ellint_2(nt2::sqrt(type(a1)), type(a0));
}
};
} }
/////////////////////////////////////////////////////////////////////////////
// Implementation when type A0 is float
/////////////////////////////////////////////////////////////////////////////
NT2_REGISTER_DISPATCH(tag::ellie_, tag::cpu_,
(A0)(A1),
(float_<A0>)(float_<A1>)
)
namespace nt2 { namespace ext
{
template<class Dummy>
struct call<tag::ellie_(tag::float_,tag::float_),
tag::cpu_, Dummy> : callable
{
template<class Sig> struct result;
template<class This,class A0,class A1>
struct result<This(A0,A1)> :
std::tr1::result_of<meta::floating(A0,A1)>{};
NT2_FUNCTOR_CALL(2)
{
typedef typename NT2_RETURN_TYPE(2)::type type;
if (a1>One<A1>()||(is_ltz(a1))) return Nan<type>();
else if (is_eqz(a1))
return a0;
else if (a1 == One<A1>())
return nt2::sin(a0);
else
{
type lphi = nt2::abs(a0);
type m = a1;
type a = One<type>();
type b = nt2::sqrt(oneminus(m));
type c = nt2::sqrt(m);
int d = One<int>();
type e = Zero<type>();
type t = nt2::tan( lphi );
int mod = (lphi + Pio_2<type>())/Pi<type>();
while( nt2::abs(c) > Eps<type>()*nt2::abs(a) )
{
type temp = b/a;
lphi = lphi + nt2::atan(t*temp) + mod * Pi < type>();
mod = (lphi + Pio_2<type>())/Pi<type>();
t *= oneplus(temp)/( oneminus(temp * sqr(t)));
c = average(a,-b);
temp = nt2::sqrt(a*b);
a = nt2::average(a,b);
b = temp;
d += d;
e += c*nt2::sin(lphi);
}
b = oneminus(m);
type temp = nt2::ellpe(b)/nt2::ellpk(b);
temp *= (nt2::atan(t) + mod * Pi < float>())/(d * a);
temp += e;
if(is_ltz(a0)) temp = -temp;
return temp ;
}
}
};
} }
#endif
// modified by jt the 26/12/2010 |
\documentclass{isprs}
\usepackage{subfigure}
\usepackage{setspace}
\usepackage{geometry}
\usepackage{epstopdf}
\usepackage{booktabs}
\usepackage{enumitem}
\usepackage{url}
\geometry{a4paper, top=25mm, left=20mm, right=20mm, bottom=25mm, headsep=10mm, footskip=12mm}
%\usepackage{enumitem}
%\usepackage{isprs}
%\usepackage[perpage,para,symbol*]{footmisc}
%\renewcommand*{\thefootnote}{\fnsymbol{footnote}}
\begin{document}
\title{Open source approach to urban growth simulation}
\author{
A. Petrasova\textsuperscript{a,b,}\thanks{Corresponding author}\,,
V. Petras\textsuperscript{a,b},
D. Van Berkel\textsuperscript{a},
B. A. Harmon\textsuperscript{a,d},
H. Mitasova\textsuperscript{a,b},
R. K. Meentemeyer\textsuperscript{a,c}
}
\address
{
\textsuperscript{a }Center for Geospatial Analytics, North Carolina State University, USA - [email protected]\\
\textsuperscript{b }Department of Marine, Earth, and Atmospheric Sciences, North Carolina State University, USA - (vpetras, akratoc, hmitaso)@ncsu.edu\\
\textsuperscript{c }Department of Forestry and Environmental Resources, North Carolina State University, USA - [email protected]\\
\textsuperscript{d }Department of Landscape Architecture, North Carolina State University, USA - [email protected]\\
}
\commission{VII, }{SpS10 - FOSS4G: FOSS4G Session (coorganized with OSGeo)} %This field is optional.
% \workinggroup{VI/4} %This field is optional.
\icwg{}
\abstract
{
Spatial patterns of land use change due to urbanization and its impact on the landscape
are the subject of ongoing research. Urban growth scenario simulation is a powerful tool
for exploring these impacts and empowering planners to make informed decisions.
We present FUTURES (FUTure Urban -- Regional Environment Simulation) -- a patch-based,
stochastic, multi-level land change modeling framework as a case showing how
what was once a
closed and inaccessible model benefited from integration with open source GIS.
We will describe our motivation for releasing this project as open source
and the advantages of integrating it with GRASS GIS, a free, libre and open source GIS
and research platform for the geospatial domain.
GRASS GIS provides efficient libraries for FUTURES model development
as well as standard GIS tools and graphical user interface for model users.
Releasing FUTURES as a GRASS GIS add-on simplifies the distribution of FUTURES
across all main operating systems and ensures the maintainability of our project in the future.
We will describe FUTURES integration into GRASS GIS and demonstrate its usage
on a case study in Asheville, North Carolina.
The developed dataset and tutorial for this case study
enable researchers to experiment with the model, explore its potential or even
modify the model for their applications.}
\keywords{GRASS GIS, FUTURES, urbanization, land change, open science, simulation}
\maketitle
\newcommand{\normalniradky}{%
\renewcommand{\baselinestretch}{0.96}%
\selectfont%
}
\newcommand{\roztazeneradky}{%
\renewcommand{\baselinestretch}{0.98}%
\selectfont%
}
\section{INTRODUCTION}\label{INTRODUCTION}
Population growth in cities worldwide drives changes in land use
often negatively impacting the environments in which people live and undermining
the resilience of local ecosystems.
The need to understand the trade-offs urban planners are facing
gave rise to a number of different land change simulation models,
which proved to be powerful tools for exploring
alternative scenarios and their impacts on various aspects of human-environmental systems
\cite{chaudhuri2013sleuth,verburg2002modeling,sohl2007fore,waddell2002urbansim}.
Despite the influence of the spatial structure and connectivity
of urbanizing landscapes on
biodiversity, water quality, or flood risks \cite{alberti2005effects},
most urban growth models are based on cell-level conversions and
have not focused on generating realistic spatial structures across scales \cite{jantz2005analysis}.
To bridge the gap between cell- and object-based representation, we developed
FUTURES (FUTure Urban-Regional Environment Simulation), a patch-based, multilevel
modeling framework for simulating the emergence of landscape spatial structure
in urbanizing regions \cite{Meentemeyer2012}.
%
The FUTURES model was successfully applied in several cases
including a study of land development dynamics in the rapidly
expanding metro\-po\-litan region of Charlotte, North Carolina \cite{Meentemeyer2012} and an analysis of the impacts
of urbanization on natural resources under different conservation strategies \cite{Dorning2015}.
Most recently, FUTURES was coupled with ecosystem services models to examine the impacts of projected urbanization and urban pattern on several
ecosystem services and their trade-offs \cite{doug2016,Brian2016}.
In order to study the complex interactions between human and natural systems,
interdisciplinary researchers are coupling
existing simulation models.
Land change modeling plays often a crucial role in these coupled models.
Previous case studies with FUTURES have demonstrated
that the model can be applied to a wide range of cases with different study systems and aims.
%
The initial implementation of model, however, was a prototype that was not ready
to be shared
with scientific community.
%
The model accumulated too much ``technical depth'' \cite{easterbrook2014open} during its initial development,
making it difficult to add new features or run the simulation at larger scales.
%
In order to continue adding new capabilities to FUTURES and to promote
its usage
both inside and outside of
the land use community,
we decided to revise the implementation of the FUTURES model
and develop a new version which would be (a) more efficient and scalable,
(b) as easy to use as possible for a wider audience and (c)
fully open source and maintainable in the long run.
To achieve these goals we
decided that instead of keeping FUTURES as a standalone application,
we would take advantage of existing geospatial software and integrate FUTURES
into open source GRASS GIS \cite{Neteler08}.
By using GRASS GIS' efficient geospatial libraries
we can develop better and higher-level code.
%
Providing open source software to the scientific community entails more than just releasing the actual code --
documentation, tutorials, installation
instructions, binaries and support are also needed
and require considerable effort. % Consider explaining why these things are needed
Without this effort, models cannot be practically used by other researchers.
%
By using GRASS GIS' existing infrastructure we could focus on developing the actual materials
instead of managing our own server infrastructure.
In this article we present a new version of the FUTURES urban growth model that is
available as the \emph{r.futures} module set from the GRASS GIS add-on repository.
This new version of FUTURES
streamlines data processing, provides opportunities to study urbanization on mega-regional scales,
and allows for more reproducible research in the land change community.
We demonstrate this new version of FUTURES with a case study of the Asheville metropolitan area in North Carolina, USA.
% must be open source to attract users chaudhuri2013sleuth
\section{FUTURES model}
FUTure Urban-Regional Environment Simulation
is a stochastic, patched-based model
for projecting landscape patterns of urban growth \cite{Meentemeyer2012}.
FUTURES has a modular structure consisting of 3 main submodels: DEMAND, POTENTIAL and
PGA (patch-growing algorithm), see Figure \ref{fig:schema}.
Land conversion is driven by projected population demand computed by the DEMAND submodel,
and is spatially defined by a probability surface derived by the POTENTIAL submodel
from multiple environmental and socio-economic predictors.
The population demand and the effects of land change drivers can vary in space by subregions,
such as jurisdictional units, allowing projections across heterogeneous landscape.
FUTURES main strength lies in realistically modeling the
spatial structure of urban change by growing patches
that are
parameterized by size and compactness
and calibrated using historical data.
For a detailed explanation of FUTURES' components, please refer to Meentemeyer et al. (2013).
\begin{figure}[h!]
\centering
\includegraphics[width=0.9\columnwidth]{./figures/schema.pdf}
\caption{Simplified schema of FUTURES conceptual model with inputs and outputs in gray and submodels in yellow}
\label{fig:schema}
\end{figure}
The original implementation of FUTURES consisted mainly of the patch-growing
algorithm, a standalone program written in a mixture of C and C++.
The PGA program itself utilized inefficient algorithms and required raster data in ASCII
format as input leading to very slow initialization. % such as linear search in a long sorted list.
The DEMAND submodel was computed in a spreadsheet and POTENTIAL coefficents
were derived using R statistical software. No official
implementation of these submodels existed so each researcher
developed a different workflow. This made it difficult for peers to verify each other's work.
%
Several scripts for calibrating patch
characteristics derived with FRAGSTATS \cite{fragstats} existed, however these tools
were written
in an unnecessarily low-level language
for a specific case
using the author's directory layout.
When revising the original implementation of FUTURES we identified
several issues which needed to be addressed.
First, it is important to follow best practices for scientific computing \cite{wilson2014best}
including use of a versioning system, writing documentation and testing.
We also wanted to minimize tasks that had been done manually in order to make the process more efficient and avoid errors
that are often difficult to detect.
When automating tasks
we had to compromise between
the flexibility and simplicity %efficiency
of the workflow.
%
We also focused on making FUTURES scalable
so that it can run
large scale applications
at a relatively fine spatial resolution.
Finally, we designed FUTURES to be more user-friendly and easy to test so that
anyone can confidently apply it to their research.
\section{INTEGRATION IN GRASS GIS}
GRASS GIS has had a long history as a platform for scientific models \cite{chemin2015grass}.
As an open source GIS used by researchers worldwide and one of the founding projects of OSGeo,
GRASS GIS provides a stable environment for the development of
scientific models
for studying problems from various domains
including geomorphology, hydrology, planetary science, landscape ecology, hazard mapping, archaeology, renewable energy and transportation.
Thanks to the numerous scientist and developers who have been involved, GRASS GIS today provides a large spectrum of geospatial modules
ranging from basic GIS functionality to highly specialized models.
Most of the specialized tools are not part of standard GRASS GIS installations,
but are easily accessible from the add-on repository.
There were multiple reasons for our decision to integrate FUTURES into GRASS GIS as an add-on. Some of these reasons were specific to FUTURES, but others apply to any spatial, scientific model.
Integrating a model into GIS gives both users and developers
a wide array of standard geospatial tools that simplify the implementation of a model,
and streamline pre- and post-processing and visualization.
%
%From the point of view of the model developer,
GRASS GIS provides
model developers
a raster library for highly efficient data reading and writing.
This means that FUTURES no longer has to read in ASCII files,
significantly reducing time needed for initialization.
Furthermore, raster data from FUTURES simulations are efficiently compressed.
Despite ever increasing disk space, it is still quite important to reduce the file size, especially for
stochastic spatio-temporal simulations, which typically generate huge datasets.
In order to achieve the best speed performance,
most GRASS GIS functionality is implemented in C.
Because of this, we could easily integrate FUTURES
code, written in a mix of C and C++,
without major rewriting.
For portability reasons we later decided to use the C99 standard.
While C and C++ are the preferred languages for computationally expensive algorithms,
GRASS GIS also supports Python as the primary scripting language.
This is crucial because FUTURES had many steps of data preparation that we were easily able to automate using Python scripting.
%
Model developers can appreciate
GRASS GIS' automatic generation of
command line interfaces, Python interfaces and graphical user interfaces (GUI).
%
Simply by defining
options in C or Python modules
we can call the same module from a GUI dialog, a Python script or a Bash script.
A graphical interface makes FUTURES easy to use,
especially for users on the Windows platform.
A Python or Bash interface, however, is needed for more advanced applications such as running FUTURES in parallel on a high performance computer.
GRASS GIS provides infrastructure for publishing and distributing models to users on all major platforms.
Models and tools in GRASS GIS's Add-on repository\footnote{\url{https://grass.osgeo.org/download/addons/}}
can be easily browsed and installed with their documentation,
relieving researchers
of the burden of maintaining
such
infrastructure.
\subsection{Implementation}
We implemented FUTURES as a set of GRASS GIS modules starting with a common prefix \emph{r.futures}:
\begin{itemize}[noitemsep,nolistsep]
\item \emph{r.futures.demand} extrapolates the area of developed land from population trends and projections.
\item \emph{r.futures.devpressure} computes the development pressure predictor.
\item \emph{r.futures.potential} models the development probability surface through multi-level logistic regression.
\item \emph{r.futures.calib} calibrates patch sizes and shapes.
\item \emph{r.futures.pga} simulates urban development using the patch growing algorithm.
\end{itemize}
In addition, we implemented the add-on \emph{r.sample.category} needed for the workflow.
Since its functionality is not specific to FUTURES,
we kept it separate.
All of these add-ons can be conveniently installed from GRASS GIS using the GUI or command line%
\footnote{\texttt{g.extension r.futures}}.
Each individual add-on has a manual page accessible both online and offline.
Figure \ref{fig:schemaGRASS} shows FUTURES workflow and the inputs needed for each tool.
In the following sections we describe the developed tools, their functionality and implementation in GRASS GIS.
\begin{figure}[h!]
\centering
\includegraphics[width=\columnwidth]{./figures/grass_futures_diagram.pdf}
\caption{Diagram of FUTURES workflow showing how are \emph{r.futures} modules (yellow boxes) chained
and what are their input data (grey boxes). As indicated by the light yellow box,
module \emph{r.futures.calib} calls \emph{r.futures.pga}.}
\label{fig:schemaGRASS}
\end{figure}
\subsubsection{r.futures.demand}
Based on historical land development and population growth, the DEMAND submodel
(implemented as \emph{r.futures.demand})
projects the rate of per capita land consumption for each year of the simulation
and each subregion. This Python module uses GRASS GIS Python Scripting Library
and the NumPy, SciPy and matplotlib
libraries for scientific computing
to approximate the relation between population and land consumption
with a statistical model described by a linear, logarithmic or exponential curve.
For example, a logarithmic relation means that a growing population requires
less developed land per person over time.
With enough data points, the module
can select the best curve for each subregion based on residuals.
The primary outputs are plain text files with tab-separated values
representing the number of cells to be converted to developed land each year for each subregion.
The module
plots the resulting curves and projected points for each subregion (Figure \ref{fig:demand})
so that the results can be visually inspected.
The module \emph{r.futures.demand} provides a fast way to estimate
the land demand given a large number of subregions with diverse population
trends and thus allows us to quickly explore different population scenarios.
\begin{figure}[h!]
\centering
\includegraphics[width=0.4\textwidth]{./figures/plot_demand.pdf}
\caption{An example of \emph{r.futures.demand} output plot showing
the logarithmic relation between population and land consumption
for the county with ID 37021.
Observed data are showed as blue dots and predicted data as circles.
}
\label{fig:demand}
\end{figure}
\subsubsection{r.futures.devpressure}
Development pressure is one of the most
important predictors of where development is likely to happen.
For each cell it is computed as a distance decay function of neighboring
developed cells \cite{Meentemeyer2012}.
Compared to the tool previously used for computing development pressure,
the new Python module \emph{r.futures.devpressure} provides a faster and more efficient
implementation by taking advantage of the existing GRASS GIS
module \emph{r.mfilter} written in C for moving window analysis with custom designed matrix filters.
By precomputing the matrix of distances we avoid repeated distance computations
resulting in faster processing.
Because the new implementation
is less memory intensive
it can be used for
larger regions than the previous tool.
\subsubsection{r.futures.potential}
uses multilevel logistic regression to model development
suitability based on environmental, infrastructural, and socio-economic predictors such as distance to roads or topographic slope.
We randomly sample these predictors and the observed change from undeveloped to developed cells
to estimate the coefficients of the multilevel logistic regression.
The core of this
module is a script in the R language \cite{rstats}, which uses the package lme4 \cite{lme4}
for fitting generalized linear mixed-effects models and the package MuMIn \cite{mumin}
for automatic model selection.
The output file is a plain text file with tab-separated regression coefficients.
This script is wrapped in Python for more seamless processing
and chaining of modules.
The coupling between R, Python and GRASS GIS
is intentionally very loose to make the workflow possible in the Windows environment
where some of the other, more elegant, options such as
rpy2\footnote{rpy2 is a Python package for using R from Python (\url{http://rpy2.bitbucket.org/})} are complicated to use.
We performed stratified sampling of observed new development and predictors using GRASS GIS add-on \emph{r.sample.category}.
Although we developed this add-on for urban growth modeling with FUTURES,
its application is much broader.
In order to encourage its use in other applications we made it a general module rather than making it part of the \emph{r.futures} tool set.
\subsubsection{r.futures.pga}
is the main engine of FUTURES -- it simulates urban growth using inputs from the
DEMAND and POTENTIAL submodels.
The patch growing algorithm (PGA) sto\-chasti\-cally allocates
a seed for new development across the development suitability surface,
challenges the seed by comparing it with a random number between 0 and 1, and
then grows a discrete patch from the seed if it survives \cite{Meentemeyer2012}.
This process repeats
until the number of converted cells specified by DEMAND is met.
The development pressure predictor and then
the development suitability values are updated based on the newly developed cells.
(The development suitability is computed internally
from predictors and regression coefficients supplied by POTENTIAL.)
We kept the original patch growing algorithm,
but significantly improved its implementation to make it
faster, more memory efficient and simpler to use.
We replaced a custom, undocumented configuration file with a standard module interface
usable from GUI or the command line, and restructured the input and output
parameters and their names so that they are easy for users to understand.
We used efficient GRASS GIS libraries for reading and writing raster data,
which minimized the time needed to initialize the simulation.
FUTURES now reads rasters in GRASS's native format
instead of ASCII files.
This decreased the time needed for model initialization from several minutes to several seconds
for a region with tens of millions of cells.
Furthermore, we replaced the static allocation of internal structures with dynamic allocation
and reduced the overall memory requirements
so that FUTURES could run on large regions with tens or hundreds of counties
as well as smaller areas like our case study.
Finally, through the use of appropriate programming techniques, such as binary search, we significantly increased the
speed of the algorithm.
\subsubsection{r.futures.calib}
We developed a dedicated Python module for calibrating patch sizes and shapes
that runs the module \emph{r.futures.pga}
with different combinations of patch parameters and outputs a table
with scores for each combination of patch parameters.
%
The simulation is run
multiple times for each combination
to account for the stochasticity of the model.
To speed up the calibration process \emph{r.futures.calib} can take advantage of multiple
computer cores.
\section{CASE STUDY}
To demonstrate how the new FUTURES framework
can be used to simulate urban growth,
we present a case study for
Asheville metropolitan area located in the Blue Ridge Mountains in the west of North Carolina, USA.
The region consists of five counties with total area of 6,271 km$^2$ and around 477,000 people
based on 2014 population estimates.
It is characterized by rapid population growth around Asheville, the largest city of the region.
New development is constrained by the steep mountainous terrain and large national and state parks.
We simulate urban growth from 2012 to 2030 using publicly available data,
including the USGS's National Land Cover Database (NLCD) \cite{nlcd2011,nlcd2006,nlcd2001,nlcdretro},
past estimates and future projections of county populations \cite{NCOSBM},
boundaries and roads provided by the United States Census Bureau's database (TIGER)
and a digital elevation model from the National Elevation Dataset (NED) distributed by the USGS.
\begin{figure}[h!]
\centering
\includegraphics[width=\columnwidth]{./figures/study_area_all.png}
\caption{2011 land cover \protect\cite{nlcd2011} and protected areas \protect\cite{anderson2011conservation} in the Asheville metropolitan area in the west of North Carolina, USA. Inset A is used in Figure~\ref{fig:results}.}
\label{fig:study_area}
\end{figure}
\subsection{Approach}
There are several steps required to run the FUTURES simulation:
\begin{itemize}[noitemsep,nolistsep]
\item Preprocess the data.
\item Estimate per capita land consumption
controlling the total area of converted land.
\item Derive the development suitability statistical model to control where the new development happens.
\item Calibrate patch size and shape.
\item Run the urban growth simulation.
\end{itemize}
%(a) preprocess the data,
%(b) estimate per capita land consumption
%controlling the total area of converted land,
%(c) derive the development suitability statistical model to control where the new development happens,
%(d) calibrate patch size and shape and finally
%(e) run the urban growth simulation.
\subsubsection{Data preparation}
The core input data for urban growth modeling with FUTURES is a timeseries of land cover maps,
which can be derived by various methods from satellite imagery.
In this study
we used the 2001, 2006 and 2011 NLCD Land Cover products
and 1992/2001 Retro\-fit Land Cover Change product to derive a 30-meter binary representation
of developed areas. We excluded national and state parks,
water bodies and wetlands from further analysis.
We used NLCD products that are available for the contiguous USA
so that this study and its workflow would be easier to reproduce and apply to other study areas.
We obtained population statistics from the North Carolina Office of State Budget and Management,
which are based on 2000 and 2010 censuses and include past as well as future estimates
of population per county for each year up to 2035. Data for the 5 counties studied
were extracted and formatted as a comma-separated values (CSV) file.
\subsubsection{DEMAND}
We derived the relation between population
and land consumption
from the series of binary rasters of developed areas and
population statistics
to model how much land will be developed
each year of the simulation.
Using the module \emph{r.futures.demand} we explored different curve fitting
methods and derived the per capita land consumption
from period 1992 through 2011, which was characterized by
population growth with decreasing demand for land per person over time.
We expect similarly low rates of per capita land consumption
in the following years because development is restricted by the mountainous terrain and
large protected areas.
Based on RMSE and visual inspection of the plots
created by \emph{r.futures.demand} we
selected either linear or logarithmic relations % ($y = a \ln(x) + c$)
for each county, where the function coefficients were automatically determined using linear regression and non-linear least squares optimization
in \emph{r.futures.demand} (Figure \ref{fig:demand}).
\begin{figure*}[tbh]
\centering
\includegraphics[width=2.0\columnwidth]{./figures/results_maps.pdf}
\caption{Results of three realizations of multiple stochastic runs with different scenarios.
Depending on the scenario, simulated development is more diffuse (a) or more compact (c).}
\label{fig:results}
\end{figure*}
\subsubsection{POTENTIAL}
We used multilevel logistic regression to predict
where new development happens based on
environmental, infrastructural and socio-economic site suitability
predictors.
Using \emph{r.sample.category} we sampled predictors on 8000
randomly selected locations and estimated the model coefficients
using the R package lme4 integrated into the module \emph{r.futures.potential}.
The sample points were stratified by the response variable
where new sites developed since 1992 have a value of 1
and sites that are undeveloped in 2011 have a value of 0.
We included counties as the group level indicator
in the multilevel model to account for differences
across jurisdictional boundaries.
From the initial list of hypothesized predictors
(slope, distance to water, protected areas, interchanges,
travel time to cities and towns, forest occurrence and road density)
we identified a set of predictors (Table \ref{tab:predictors})
resulting in a model with the lowest AIC (Akaike information criterion) score.
We verified the robustness of the selected predictors
by repeating the random sampling and model selection process multiple times.
In addition to these predictors, we included also development pressure,
a special, dynamic predictor that is
updated during the simulation based on new simulated development to enable positive feedback.
We computed the initial development pressure raster
with \emph{r.futures.devpressure}; its subsequent updates are performed in memory during the simulation.
\begin{table}[htb]
\centering
\begin{center}
\begin{tabular}{lrr}
\toprule
Predictors & Estimate{\scriptsize *} & Std. Error \\ \midrule
Intercept (varies by county) & -2.593 & 0.269\\
Development pressure & 0.058 & 0.005\\
Road density & 0.118 & 0.007\\
Percentage of forest & -0.013 & 0.002 \\
Distance to protected areas & -0.140 & 0.039\\
Distance to water bodies & -0.148 & 0.022\\
\bottomrule
{\scriptsize * all P-values $<$ 0.001}
\end{tabular}
\end{center}
\caption{List of selected predictors and estimated coefficients
for site suitability model}
\label{tab:predictors}
\end{table}
\subsubsection{Patch calibration}
Prior to running the urban growth simulation implemented in \emph{r.futures.pga}
we calibrated the input patch compactness and size to match
the simulated patterns with the observed patterns from 1992 to 2011.
Since calibration is a time consuming process, we ran the module
\emph{r.futures.calib} for Buncombe county
and applied the results to the rest of our study region.
We choose Buncombe County which includes the city of Asheville because it is where most new development will occur.
For each combination of patch parameters we compared the patch characteristics
averaged from 20 runs of the urban growth simulation with the known patches.
Based on the score we selected patch parameters resulting in high compactness
which is expected for mountainous regions.
\subsubsection{Urban growth simulation}
Having collected all necessary input data, we ran
\emph{r.futures.pga} with a 1 year time step until 2035 for the entire study region at 30 m resolution.
To account for different future policies regarding new development, we explored
scenarios altering the site suitability to encourage infill or sprawl by changing
\texttt{incentive\_power} parameter of \emph{r.futures.pga}. This value transforms
the probability $p$ a cell is developed to $p^x$ where $x = 1$ represents status quo,
higher values of $x$ result in infill and lower values in sprawl.
In addition to the status quo we simulated scenarios with $x$ equals 0.25, 0.5, 2 and 4.
We repeated each scenario 50 times to account for the model's stochastic behavior.
\subsection{Results}
The resulting development patterns of three realizations of the random runs
are visible in Figure \ref{fig:results}
for the status quo, infill scenario ($x = 4$) and sprawl scenario ($x=0.25$).
The simulated patches realistically mimic the current patches of development in shape and size
and are mostly, but not exclusively adjacent to roads as expected.
Furthermore, we post-processed the results to study how different urban growth policies
influence the loss of forest and agricultural land in the Asheville area
(Figure \ref{fig:results_plot})
by averaging the loss of both land use categories over the 50 runs.
In all scenarios, forest is more affected by future development than farmland.
The extreme case of urban sprawl results in twice as much forest as farmland being developed.
Status quo scenario leads to the smallest difference between the areas converted to forest and farmland.
Interestingly, infill scenario develops the forested area in a similar way as sprawl does, which
is not surprising considering developed areas are largely surrounded by forest patches.
\begin{figure}[!ht]
\centering
\includegraphics[width=0.9\columnwidth]{./figures/converted_land_new.pdf}
\caption{Area in km$^2$ of converted land from forest (green) and farmland (yellow) to urban
differs for urban sprawl and infill scenarios. Numbers $0.25$ to $4$ represent the exponent $x$ which
transforms development probability $p$ to $p^x$.}
\label{fig:results_plot}
\end{figure}
Table \ref{tab:benchmark} shows the computational resources
necessary for running this case study and compares
the time and memory requirements with the original
implementation of FUTURES. Note that
our study area is fairly small (12 million cells)
and when applied to larger regions with more projected development,
the expected speed gain is even more significant
as we changed the complexity of one of the core algorithms from linear to logarithmic.
Because the individual stochastic runs are independent of each other
this simulation is an
``embarrassingly parallel'' problem \cite{herlihy2012art}
in which the computation can easily be distributed across multiple computer cores.
\begin{table}[htp]
\centering
\begin{center}
\begin{tabular}{lccc}
\toprule
FUTURES version & memory & 1 run & all runs (250)\\ \midrule
original & 1.7 GB & 60 s & 4 h 10 min\\
r.futures & 0.86 GB & 19 s & 1 h 20 min\\
\bottomrule
\end{tabular}
\end{center}
\caption{Time and memory needed to run the simulations
with the old version of FUTURES and the new \emph{r.futures}
implemented in GRASS GIS on a laptop with 64-bit Ubuntu 14.04 LTS,
Intel Core i7-4760HQ $@$ 2.10GHz using 1 CPU and running on external hard drive.}
\label{tab:benchmark}
\end{table}
The input data and instructions to run the model are available as part of material
developed for the US-IALE 2016 Annual Meeting workshop on FUTURES\footnote{\url{https://grasswiki.osgeo.org/wiki/Workshop_on_urban_growth_modeling_with_FUTURES}}.
\section{Discussion}
The new FUTURES framework is split into independent GRASS GIS modules
so that the modeling workflow is flexible and extendable.
By using standardized inputs and outputs (raster layers and CSV files)
and described interface
we allow FUTURES' users to replace DEMAND and POTENTIAL implementations
by their own tools, which may be better suited to the characteristics and datasets available for their study systems.
We ran all previous studies on county level at 30 m resolution.
FUTURES, however, can be applied to larger or smaller scales
as long as there is data available and the patch characteristics are properly calibrated.
Future research will explore nested scales
in order to
address the different scales of the population data
and the spatial drivers of land change.
\section{Conclusion}
We presented a new, open source version of the FUTURES urban growth model that is
integrated into GRASS GIS,
opening new possibilities
for environmental scientists
and urban planners to
project and understand the impacts of urbanization at relevant ecological and decision-making scales.
Integration into GRASS GIS allowed us to make FUTURES more efficient,
simple to use and transparent.
With documented code running on all platforms, FUTURES can now be easily tested
and applied to study sites at local to megaregional scales.
We illustrated how FUTURES can be used in a small case study of the Asheville metropolitan area.
We also provided the instructions and data
needed to reproduce this study
as a step towards more reproducible research in land change science.
\section*{ACKNOWLEDGEMENTS}\label{ACKNOWLEDGEMENTS}
We would like to thank Monica Dorning and Douglas Shoemaker
for discussing with us the original model implementation, and Brian Pickard and Georgina Sanchez for testing new FUTURES implementation.
{%\footnotesize
\begin{spacing}{0.9}% tune the size by altering the parameter
\bibliography{FUTURES_in_GRASS.bib} % Include your own bibliography (*.bib), style is given in isprs.cls
\end{spacing}
}
\end{document}
|
[STATEMENT]
lemma juncts_eq_set_juncts_list:
"disjuncts \<phi> = set (disjuncts_list \<phi>)"
"conjuncts \<phi> = set (conjuncts_list \<phi>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. disjuncts \<phi> = set (disjuncts_list \<phi>) &&& conjuncts \<phi> = set (conjuncts_list \<phi>)
[PROOF STEP]
by (induct \<phi>) auto |
import data.finset.basic
import data.fintype.basic
/-
High-level model of Soong build system (very incomplete)
Concrete examples of libraries:
/system/bin/cameraserver
- soong world: no options specified (meaning, 'local to system')
/vendor/bin/vndservicemanager
- soong world: "vendor: true" (meaning, 'local to vendor')
libgui
- soong world: "vendor_available: false, vndk: { enabled: true, },"
- "vndk-private"
libcamerahelper
- soong world: "vendor_available: true, vndk: { enabled: true, },"
- soong world: "libcamerahelper loads libgui" -- not important to model, this
actually the relationship between vndk and vndk-private
- "vndk"
libbinder_ndk
- soong world: "vendor_available: true, isLlNdk() true"
- ll-ndk
-/
--------------------------------------------------------------------------------
/-
- Core data structures of Soong model
-/
/-
- Different kinds of libraries which have regular behavior in some sense
- This does not correspond to any particular abstraction in the codebase but
- is very useful for clarifying a mental model of how things work (lots of
- behavior can be defined purely in virtue of what library classes a library
- inhabits, without having to look at details of the library itself).
-/
@[derive decidable_eq]
inductive Library_class
| system_local: Library_class
| system_ext: Library_class
| vendor_local: Library_class
| llndk: Library_class
| vndk: Library_class
| vndk_sp: Library_class
| vndk_ext: Library_class
| vndk_private: Library_class
| product: Library_class
| recovery: Library_class
open Library_class
/-
- Being assigned some subset of these variants is a property of a given library
- Which are assigned ends up dictating the build environment to a large degree.
-/
@[derive decidable_eq]
inductive Variant
| core: Variant
| vendor_platform : Variant
| product_platform : Variant
| product_product : Variant
| ramdisk : Variant
| recovery : Variant
/-
- Specified by a user in a Soong input file.
-/
@[derive decidable_eq]
structure Library :=
(name: string)
(vendor_available: option bool)
(vendor: option bool)
-- declared as a VNDK or VNDK-SP module. The vendor variant
-- will be installed in /system instead of /vendor partition.
-- if true, then vendor_available must be explicitly set to either ⊤ or ⊥
(vndk_enabled: option bool)
-- declared as a VNDK-SP module, which is a subset of VNDK (need vndk_enabled).
-- All these modules are allowed to link to VNDK-SP or LL-NDK
-- modules only. Other dependency will cause link-type errors.
-- none/false means lib is VNDK-core
-- can link to other VNDK-core ,VNDK-SP or LL-NDK modules only.
-- Warning: sometimes erroneously referred to as support_same_process
(vndk_support_system_process: option bool)
-- Whether m.linker(*llndkStubDecorator) returns true or not
-- Assume that llndkHeadersDecorator has same value
-- Modeled as a boolean field for simplicity
(llndk_stub: bool)
(device_specific: bool)
(product_specific: bool)
(is_vndk: bool)
(is_vndkext: bool)
-- Dependencies on other libraries by name
(deps: finset string)
/-
- Based on Jiyong's summary
- The "none" corresponds to the very first check of ImageMutatorBegin, where
- it fails because having both of these options set doesn't make sense
- TODO consider all four booleans together,
- vndk-private is when vendor_available is false but vndk_enabled is true
- vndk_sp_private is the same as above but also has
- vndk_support_system_process true
-/
def assign_library_classes (lib: Library): option (finset Library_class) :=
(option.lift_or_get finset.has_union.1)
(match lib.vendor, lib.vendor_available with
| (some _), (some _) := none
| (some vendor), _ := some [vendor_local].to_finset
| _, (some tt) := some [vendor_local, system_local].to_finset
| _, (some ff) := some [system_local].to_finset -- TODO CHECK THIS
| none, none := some [system_local].to_finset
end)
$ (option.lift_or_get finset.has_union.1)
(match lib.vndk_enabled, lib.vndk_support_system_process with
| (some tt), (some tt) := some [vndk_sp].to_finset
| (some tt), _ := some [vndk].to_finset
| _, (some _) := none
| _, _ := some ∅
end)
(some $ if lib.llndk_stub then [llndk].to_finset else ∅)
/-
- Based on the implemenation at ~3122 of cc.go
- Relies on assign_library_classes
-/
open Variant
def libaryclass_to_variants: Library_class → finset Variant
| system_local:= [core].to_finset
| system_ext:= [core].to_finset
| vendor_local:= [].to_finset -- aka vendorSpecific (ignore kernelHeadersDecorator) and ignore vendor board (it's experimental) only look at line 255
| llndk:= [].to_finset -- variants from lines 187-197
| vndk:= [core].to_finset -- AND whatever is in vendor_local
| vndk_sp:= [].to_finset -- SAME as vndk
| vndk_ext:= [].to_finset -- same as vendor_local
| vndk_private:= [].to_finset -- same as vendor_local
| product:= [core].to_finset
| recovery:= [Variant.recovery].to_finset
/-
- Map over and union
- That we can do this is the justification of library_class' existance.
- All members of a library class share variants.
-/
def libary_to_variant (libc: Library): option (finset Variant):=
assign_library_classes libc >>= λ lcs, some $
(libclasses.1.map library_class_to_variants).fold (∪) ∅
--------------------------------------------------------------------------------
/-
- Transitive relationship of reachability in a graph
-/
inductive depends: finset Library → Library → Library → Prop
| edge: ∀ (ctx: finset Library) (src tar: Library),
src ∈ ctx → tar ∈ ctx
→ tar.name ∈ src.deps
→ depends ctx src tar
| trans: ∀ (ctx: finset Library) (src mid tar: Library),
src ∈ ctx → tar ∈ ctx
→ depends ctx src mid
→ depends ctx mid tar
→ depends ctx src tar
--------------------------------------------------------------------------------
/-
- Concrete library examples
-/
-- def cameraserver: Library := ⟨"cameraserver", none, none, none, ∅⟩
-- def libcamerahelper: Library := ⟨"libcamerahelper", some tt, none, none, ∅⟩
-- def vndservicemanager: Library := ⟨"vndservicemanager", ff, none, none, ∅⟩
-- def libgui: Library := ⟨"libgui", some ff, none, none, ∅⟩
-- def libutils: Library := ⟨"libutils", some tt, none, none, ∅⟩
--------------------------------------------------------------------------------
/-
- Proofs about the model
-/
theorem assign_library_classes_nonempty:
forall lib: Library, ¬ (assign_library_classes lib) = some ∅
:= sorry
-- theorem double_loadable:
-- forall llndklib vndklib: LibraryWithClass, llndklib <= vndklib
-- → vndklib.double_loadable
-- := sorry
|
function [trMatrix,elecStatus] = fns_read_transfer(datafile)
% Note that Data only stores the potential values for nodes, which belong to
% the head
trMatrix = hdf5read(datafile, '/forward/transfer_matrix');
elecStatus = hdf5read(datafile, '/forward/status');
|
module Bautzen.Game.Map
import Bautzen.Terrain
import Bautzen.GameUnit
import Bautzen.Pos
import Data.Maybe.Extra
import Data.Nat
import Data.Vect
V : Terrain -> Terrain
V = Village
Wd : Terrain
Wd = Wood
Cl : Terrain
Cl = Clear
Rg : Terrain
Rg = Rough
RW : Terrain
RW = RoughWood
H : Terrain -> Terrain
H = Hill
SA : Terrain -> Terrain
SA = SupplySource Allies
SX : Terrain -> Terrain
SX = SupplySource Axis
T : Terrain
T = Town
N : Terrain
N = Clear
A : Side
A = Allies
terrains : Vect 13 (Vect 23 Terrain)
terrains = [-- 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18 19 20 21 22 23
{- 01 -} [ Cl , Wd , Cl , Wd , Wd , V Cl , Cl , Wd , Cl , Wd , Wd , Wd , Wd , Wd , Wd , Wd , Wd , Wd , Cl , SA (V Wd) , Cl , Wd , Cl ]
{- 02 -} , [ Wd , Wd , Wd , Wd , Wd , Wd , Wd , Wd , Wd , V Wd , Wd , Wd , Wd , Wd , Wd , Wd , Wd , V Wd , Wd , V Wd , V Wd , Wd , Wd ]
{- 03 -} , [ Wd , Wd , Wd , Wd , Wd , Cl , V Wd , Wd , Wd , Cl , Wd , Wd , Wd , Wd , Wd , Wd , Wd , Wd , Wd , Wd , Wd , Wd , Cl ]
{- 04 -} , [ SX Wd , Cl , Wd , Wd , Wd , Cl , V Cl , Cl , Cl , V Cl , Cl , Cl , V Wd , Wd , V Wd , Wd , V Wd , Cl , Cl , Cl , V Cl , Cl , Cl ]
{- 05 -} , [ Cl , Wd , Wd , Cl , Cl , Cl , Cl , Cl , Cl , Cl , Cl , Cl , Cl , Cl , Wd , Wd , Wd , Wd , V Cl , Cl , Cl , Cl , SA Cl ]
{- 06 -} , [ Cl , Wd , Wd , Cl , Cl , Cl , V Cl , Cl , Cl , H RW , RW , Wd , RW , H (V RW) , Cl , V Wd , Wd , Wd , V Wd , Wd , Cl , T , SA Cl ]
{- 07 -} , [ SX Wd , Wd , V Wd , H Rg, Cl , Cl , H Rg , T , Cl , Cl , RW , V Cl , Cl , V Cl , Wd , Cl , Cl , Wd , Cl , Wd , V Cl , V Cl , Cl ]
{- 08 -} , [ Wd , RW , Wd , RW , Cl , Cl , V Cl , Rg , V Cl , Cl , V Cl , Cl , Cl , Cl , Cl , Cl , V Cl , Cl , Cl , Cl , V Cl , Cl , Cl ]
{- 09 -} , [ Cl , Rg , Wd , Rg , Cl , Wd , Cl , H RW , Wd , V Cl , Cl , V Cl , Cl , T , Cl , Cl , Cl , Cl , V Cl , Cl , Cl , Cl , Cl ]
{- 10 -} , [ SX Wd , V Cl , V Cl , Wd , V Cl , Cl , Wd , Cl , V Rg , Rg , Wd , Cl , Cl , Cl , T , Cl , Cl , Cl , V Cl , Cl , Cl , Cl , Cl ]
{- 11 -} , [ Wd , V Wd , Cl , Cl , Wd , Cl , Cl , Cl , T , H Rg , Cl , RW , RW , Rg , Cl , H RW , RW , H Rg , Cl , Cl , Cl , V Cl , Cl ]
{- 12 -} , [ Wd , Wd , V Wd , Wd , Cl , Cl , Rg , SX Rg , Rg , Cl , RW , Cl , Rg , Cl , RW , Cl , Cl , SX Cl , T , Cl , Cl , Cl , Cl ]
{- 13 -} , [ T , N , SX (V Cl) , N , Cl , N , Cl , N , Cl , N , Cl , N , Cl , N , Cl , N , Cl , N , Cl , N , Cl , N , Cl ]
]
public export
PartialGameMap : Map
PartialGameMap = MkMap positions []
where
positions : List (Pos, Terrain)
positions = [(Hex 0 0, Clear),
(Hex 0 1, Clear),
(Hex 0 2, Clear),
(Hex 0 3, SupplySource Axis $ Clear),
(Hex 0 4, Clear),
(Hex 1 0, Clear),
(Hex 1 1, Clear),
(Hex 1 2, Clear),
(Hex 1 3, Clear),
(Hex 1 4, Clear),
(Hex 2 0, Clear),
(Hex 2 1, Clear),
(Hex 2 2, Clear),
(Hex 2 3, Clear),
(Hex 2 4, Clear),
(Hex 3 0, Clear),
(Hex 3 1, Clear),
(Hex 3 2, Clear),
(Hex 3 3, Clear),
(Hex 3 4, Clear),
(Hex 4 0, Clear),
(Hex 4 1, Clear),
(Hex 4 2, Clear),
(Hex 4 3, Clear),
(Hex 4 4, Clear)]
public export
FullGameMap : Map
FullGameMap = MkMap positions []
where
mkPosition : (c : Nat) -> (r : Nat) -> Maybe ( Pos, Terrain)
mkPosition c r with (natToFin c 23, natToFin r 13, isLTE c 22, isLTE r 12)
mkPosition c r | (Just col, Just row, Yes cprf, Yes rprf) =
let terrain = index col $ index row terrains
in Just (Hex c r, terrain)
mkPosition c r | (_, _, _, _) = Nothing
positions : List (Pos, Terrain)
positions = catMaybes $ [ mkPosition c r | c <- [ 0 .. 22 ], r <- [ 0 .. 12 ]]
|
Amatuka On a minor note, in Japanese '二人' this, that and the other is common usage while in English 'the pair' would generally sound odd - and 'They' be used in its place.
Amatuka Formed from Verb -masu base + nagara.
beholder while "ながら" is a word used in speaking, ”つつ" is used in writing. if you know some other similar situations regarding other words listed on JGram, please post this kind of explanation, won't you.
anon ながら is used in both in speaking and in writing.
yookoso ながら can only be used when the subject of the two actions is the same; use Vte-iru aida if there are two different people doing two things at the same time. Also, the action expressed by Vm nagara is always secondary to the action expressed in the main clause.
Should not "nomitsu_tsu_kete ita" be changed to "nomitsu_zu_kete ita"?
Vconj + ながら "while doing~"
Vconj + nagara expresses simultaneous actions performed by one subject.
Of the two verbs in a sentence, -nagara is added to the one in which you'd in English use -ing. |
------------------------------------------------------------------------------
-- Example using distributive laws on a binary operation via Agsy
------------------------------------------------------------------------------
{-# OPTIONS --allow-unsolved-metas #-}
{-# OPTIONS --exact-split #-}
{-# OPTIONS --no-sized-types #-}
{-# OPTIONS --no-universe-polymorphism #-}
{-# OPTIONS --without-K #-}
-- Tested with the development version of the Agda standard library on
-- 02 February 2012.
module Agsy.DistributiveLaws.TaskB where
open import Relation.Binary.PropositionalEquality
open ≡-Reasoning
infixl 7 _·_
------------------------------------------------------------------------------
-- Distributive laws axioms
postulate
D : Set -- The universe
_·_ : D → D → D -- The binary operation.
leftDistributive : ∀ x y z → x · (y · z) ≡ (x · y) · (x · z)
rightDistributive : ∀ x y z → (x · y) · z ≡ (x · z) · (y · z)
-- Properties
taskB : ∀ u x y z →
(x · y · (z · u)) · ((x · y · ( z · u)) · (x · z · (y · u))) ≡
x · z · (y · u)
taskB u x y z = {!-t 20 -m!} -- Agsy fails
|
#include <boost/spirit/home/lex/tokenize_and_parse.hpp>
|
From isla Require Import opsem.
Definition a80168 : isla_trace :=
WriteReg "R2" [] (RegVal_Base (Val_Bits (BV 64%N 0x5054%Z))) Mk_annot :t:
Smt (DeclareConst 46%Z (Ty_BitVec 64%N)) Mk_annot :t:
ReadReg "_PC" [] (RegVal_Base (Val_Symbolic 46%Z)) Mk_annot :t:
Smt (DefineConst 47%Z (Manyop (Bvmanyarith Bvadd) [Val (Val_Symbolic 46%Z) Mk_annot; Val (Val_Bits (BV 64%N 0x4%Z)) Mk_annot] Mk_annot)) Mk_annot :t:
WriteReg "_PC" [] (RegVal_Base (Val_Symbolic 47%Z)) Mk_annot :t:
tnil
.
|
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! Originally created by James McDonagh at the University of Manchester 2015, in the Popelier group !
! Components of the module are acknowledged to Dr Tanja van Mourik University of St Andrews !
! Licensed under MIT License !
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!
! Originally created by James.
! Version 1.1
! CHANGE LOG
! Version 1 : Paramters for calculating structural parameters
! Version 1.1: Modular format and incorporation in the Hermes program.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
module module_constants
implicit none
real, parameter :: pi = 3.1415926536
real, parameter :: e = 2.7182818285
real, parameter :: degrees_to_radians = 0.0174532925
Real, parameter :: radians_to_degrees = 57.2957795
contains
subroutine show_contants()
Print*, "Constants used are "
Write(*,'(A,f12.10)') "pi = ",pi
Write(*,'(A,f12.10)') "e = ",e
Write(*,'(A,f12.10)') "Degrees to radians = ",degrees_to_radians
Write(*,'(A,f12.10)') "Radians to degrees = ",radians_to_degrees
end subroutine show_contants
end module module_constants
|
Inductive Either (a b : Type) : Type :=
| Left : a -> Either a b
| Right : b -> Either a b
.
Print or.
Arguments Left {a} {b} _.
Arguments Right {a} {b} _.
(*
Inductive or (A B : Prop) : Prop :=
| or_introl : A -> A \/ B
| or_intror : B -> A \/ B
*)
(* this conversion is possible *)
Definition toOr (A B:Prop) (p:Either A B) : or A B :=
match p with
| Left x => or_introl x
| Right y => or_intror y
end.
(* but this one is not:
Incorrect elimination of "p" in the inductive type "or":
the return type has sort "Set" while it should be "Prop".
Elimination of an inductive object of sort Prop
is not allowed on a predicate in sort Set
because proofs can be eliminated only to build proofs.
Definition toEither (A B:Prop) (p:or A B) : Either A B :=
match p with
| or_introl x => Left x
| or_intror y => Right y
end.
*)
|
The exact date of " Ode to a Nightingale " , as well as " Ode on Indolence " , " Ode on Melancholy " , and " Ode on a Grecian Urn " , is unknown , as Keats dated all as ' May 1819 ' . However , he worked on the four poems together , and there is a unity in both their stanza forms and their themes . The exact order the poems were written in is also unknown , but they form a sequence within their structures . While Keats was writing " Ode on a Grecian Urn " and the other poems , Brown transcribed copies of the poems and submitted them to Richard Woodhouse . During this time , Benjamin Haydon , Keats ' friend , was given a copy of " Ode to a Nightingale " , and he shared the poem with the editor of the Annals of the Fine Arts , James Elmes . Elmes paid Keats a small sum of money , and the poem was published in the July issue . The poem was later included in Keats ' 1820 collection of poems , Lamia , Isabella , The Eve of St Agnes , and Other written by <unk> Kumar
|
[STATEMENT]
lemma Scons_inject2: "Scons M N = Scons M' N' ==> N=N'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Scons M N = Scons M' N' \<Longrightarrow> N = N'
[PROOF STEP]
apply (erule equalityE)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>Scons M N \<subseteq> Scons M' N'; Scons M' N' \<subseteq> Scons M N\<rbrakk> \<Longrightarrow> N = N'
[PROOF STEP]
apply (iprover intro: equalityI Scons_inject_lemma2)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
# Use baremodule to shave off a few KB from the serialized `.ji` file
baremodule XML2_jll
using Base
using Base: UUID
import JLLWrappers
JLLWrappers.@generate_main_file_header("XML2")
JLLWrappers.@generate_main_file("XML2", UUID("02c8fc9c-b97f-50b9-bbe4-9be30ff0a78a"))
end # module XML2_jll
|
lemma has_field_derivative_Im[derivative_intros]: "(f has_vector_derivative D) F \<Longrightarrow> ((\<lambda>x. Im (f x)) has_field_derivative (Im D)) F" |
[GOAL]
α : Sort u_1
a : α
⊢ out (mk a) = a
[PROOFSTEP]
let h := (mk a).2
[GOAL]
α : Sort u_1
a : α
h : ∃ a_1, (fun b => a_1 = b) = (mk a).fst := (mk a).snd
⊢ out (mk a) = a
[PROOFSTEP]
show Classical.choose h = a
[GOAL]
α : Sort u_1
a : α
h : ∃ a_1, (fun b => a_1 = b) = (mk a).fst := (mk a).snd
⊢ Classical.choose h = a
[PROOFSTEP]
have := Classical.choose_spec h
[GOAL]
α : Sort u_1
a : α
h : ∃ a_1, (fun b => a_1 = b) = (mk a).fst := (mk a).snd
this : (fun b => Classical.choose h = b) = (mk a).fst
⊢ Classical.choose h = a
[PROOFSTEP]
exact cast (congr_fun this a).symm rfl
[GOAL]
α : Sort u_1
s : α → Prop
h : ∃ a, (fun b => a = b) = s
⊢ mk (out { fst := s, snd := h }) = { fst := s, snd := h }
[PROOFSTEP]
simp [mk]
[GOAL]
α : Sort u_1
s : α → Prop
h : ∃ a, (fun b => a = b) = s
⊢ { fst := fun b => out { fst := s, snd := h } = b,
snd := (_ : ∃ a, (fun b => a = b) = fun b => out { fst := s, snd := h } = b) } =
{ fst := s, snd := h }
[PROOFSTEP]
congr
[GOAL]
case e_fst
α : Sort u_1
s : α → Prop
h : ∃ a, (fun b => a = b) = s
⊢ (fun b => out { fst := s, snd := h } = b) = s
[PROOFSTEP]
exact Classical.choose_spec h
[GOAL]
α : Sort u_1
a b : Erased α
h : out a = out b
⊢ a = b
[PROOFSTEP]
simpa using congr_arg mk h
[GOAL]
α : Sort u_1
β : Sort u_2
f : α → β
a : Erased α
⊢ out (map f a) = f (out a)
[PROOFSTEP]
simp [map]
[GOAL]
src✝ : Monad Erased := Erased.Monad
⊢ ∀ {α β : Type ?u.5529}, Functor.mapConst = Functor.map ∘ Function.const β
[PROOFSTEP]
intros
[GOAL]
src✝ : Monad Erased := Erased.Monad
α✝ β✝ : Type ?u.5529
⊢ Functor.mapConst = Functor.map ∘ Function.const β✝
[PROOFSTEP]
ext
[GOAL]
case h.h.h
src✝ : Monad Erased := Erased.Monad
α✝ β✝ : Type ?u.5529
x✝¹ : α✝
x✝ : Erased β✝
⊢ out (Functor.mapConst x✝¹ x✝) = out ((Functor.map ∘ Function.const β✝) x✝¹ x✝)
[PROOFSTEP]
simp [Functor.mapConst]
[GOAL]
src✝ : Monad Erased := Erased.Monad
⊢ ∀ {α : Type ?u.5529} (x : Erased α), id <$> x = x
[PROOFSTEP]
intros
[GOAL]
src✝ : Monad Erased := Erased.Monad
α✝ : Type ?u.5529
x✝ : Erased α✝
⊢ id <$> x✝ = x✝
[PROOFSTEP]
ext
[GOAL]
case h
src✝ : Monad Erased := Erased.Monad
α✝ : Type ?u.5529
x✝ : Erased α✝
⊢ out (id <$> x✝) = out x✝
[PROOFSTEP]
simp
[GOAL]
src✝ : Monad Erased := Erased.Monad
⊢ ∀ {α β : Type ?u.5529} (x : Erased α) (y : Erased β),
(SeqLeft.seqLeft x fun x => y) = Seq.seq (Function.const β <$> x) fun x => y
[PROOFSTEP]
intros
[GOAL]
src✝ : Monad Erased := Erased.Monad
α✝ β✝ : Type ?u.5529
x✝ : Erased α✝
y✝ : Erased β✝
⊢ (SeqLeft.seqLeft x✝ fun x => y✝) = Seq.seq (Function.const β✝ <$> x✝) fun x => y✝
[PROOFSTEP]
ext
[GOAL]
case h
src✝ : Monad Erased := Erased.Monad
α✝ β✝ : Type ?u.5529
x✝ : Erased α✝
y✝ : Erased β✝
⊢ out (SeqLeft.seqLeft x✝ fun x => y✝) = out (Seq.seq (Function.const β✝ <$> x✝) fun x => y✝)
[PROOFSTEP]
simp [Seq.seq, Functor.mapConst, SeqLeft.seqLeft]
[GOAL]
src✝ : Monad Erased := Erased.Monad
⊢ ∀ {α β : Type ?u.5529} (x : Erased α) (y : Erased β),
(SeqRight.seqRight x fun x => y) = Seq.seq (Function.const α id <$> x) fun x => y
[PROOFSTEP]
intros
[GOAL]
src✝ : Monad Erased := Erased.Monad
α✝ β✝ : Type ?u.5529
x✝ : Erased α✝
y✝ : Erased β✝
⊢ (SeqRight.seqRight x✝ fun x => y✝) = Seq.seq (Function.const α✝ id <$> x✝) fun x => y✝
[PROOFSTEP]
ext
[GOAL]
case h
src✝ : Monad Erased := Erased.Monad
α✝ β✝ : Type ?u.5529
x✝ : Erased α✝
y✝ : Erased β✝
⊢ out (SeqRight.seqRight x✝ fun x => y✝) = out (Seq.seq (Function.const α✝ id <$> x✝) fun x => y✝)
[PROOFSTEP]
simp [Seq.seq, Functor.mapConst, SeqRight.seqRight]
[GOAL]
src✝ : Monad Erased := Erased.Monad
⊢ ∀ {α β : Type ?u.5529} (g : α → β) (x : Erased α), (Seq.seq (pure g) fun x_1 => x) = g <$> x
[PROOFSTEP]
intros
[GOAL]
src✝ : Monad Erased := Erased.Monad
α✝ β✝ : Type ?u.5529
g✝ : α✝ → β✝
x✝ : Erased α✝
⊢ (Seq.seq (pure g✝) fun x => x✝) = g✝ <$> x✝
[PROOFSTEP]
ext
[GOAL]
case h
src✝ : Monad Erased := Erased.Monad
α✝ β✝ : Type ?u.5529
g✝ : α✝ → β✝
x✝ : Erased α✝
⊢ out (Seq.seq (pure g✝) fun x => x✝) = out (g✝ <$> x✝)
[PROOFSTEP]
simp [Seq.seq, Functor.mapConst, SeqRight.seqRight]
[GOAL]
src✝ : Monad Erased := Erased.Monad
⊢ ∀ {α β : Type ?u.5529} (f : α → β) (x : Erased α),
(do
let a ← x
pure (f a)) =
f <$> x
[PROOFSTEP]
intros
[GOAL]
src✝ : Monad Erased := Erased.Monad
α✝ β✝ : Type ?u.5529
f✝ : α✝ → β✝
x✝ : Erased α✝
⊢ (do
let a ← x✝
pure (f✝ a)) =
f✝ <$> x✝
[PROOFSTEP]
ext
[GOAL]
case h
src✝ : Monad Erased := Erased.Monad
α✝ β✝ : Type ?u.5529
f✝ : α✝ → β✝
x✝ : Erased α✝
⊢ (out do
let a ← x✝
pure (f✝ a)) =
out (f✝ <$> x✝)
[PROOFSTEP]
simp
[GOAL]
src✝ : Monad Erased := Erased.Monad
⊢ ∀ {α β : Type ?u.5529} (f : Erased (α → β)) (x : Erased α),
(do
let x_1 ← f
x_1 <$> x) =
Seq.seq f fun x_1 => x
[PROOFSTEP]
intros
[GOAL]
src✝ : Monad Erased := Erased.Monad
α✝ β✝ : Type ?u.5529
f✝ : Erased (α✝ → β✝)
x✝ : Erased α✝
⊢ (do
let x ← f✝
x <$> x✝) =
Seq.seq f✝ fun x => x✝
[PROOFSTEP]
ext
[GOAL]
case h
src✝ : Monad Erased := Erased.Monad
α✝ β✝ : Type ?u.5529
f✝ : Erased (α✝ → β✝)
x✝ : Erased α✝
⊢ (out do
let x ← f✝
x <$> x✝) =
out (Seq.seq f✝ fun x => x✝)
[PROOFSTEP]
simp [Seq.seq]
[GOAL]
src✝ : Monad Erased := Erased.Monad
⊢ ∀ {α β : Type ?u.5529} (x : α) (f : α → Erased β), pure x >>= f = f x
[PROOFSTEP]
intros
[GOAL]
src✝ : Monad Erased := Erased.Monad
α✝ β✝ : Type ?u.5529
x✝ : α✝
f✝ : α✝ → Erased β✝
⊢ pure x✝ >>= f✝ = f✝ x✝
[PROOFSTEP]
ext
[GOAL]
case h
src✝ : Monad Erased := Erased.Monad
α✝ β✝ : Type ?u.5529
x✝ : α✝
f✝ : α✝ → Erased β✝
⊢ out (pure x✝ >>= f✝) = out (f✝ x✝)
[PROOFSTEP]
simp
[GOAL]
src✝ : Monad Erased := Erased.Monad
⊢ ∀ {α β γ : Type ?u.5529} (x : Erased α) (f : α → Erased β) (g : β → Erased γ),
x >>= f >>= g = x >>= fun x => f x >>= g
[PROOFSTEP]
intros
[GOAL]
src✝ : Monad Erased := Erased.Monad
α✝ β✝ γ✝ : Type ?u.5529
x✝ : Erased α✝
f✝ : α✝ → Erased β✝
g✝ : β✝ → Erased γ✝
⊢ x✝ >>= f✝ >>= g✝ = x✝ >>= fun x => f✝ x >>= g✝
[PROOFSTEP]
ext
[GOAL]
case h
src✝ : Monad Erased := Erased.Monad
α✝ β✝ γ✝ : Type ?u.5529
x✝ : Erased α✝
f✝ : α✝ → Erased β✝
g✝ : β✝ → Erased γ✝
⊢ out (x✝ >>= f✝ >>= g✝) = out (x✝ >>= fun x => f✝ x >>= g✝)
[PROOFSTEP]
simp
|
#' Title
#'
#' @param HPOpatient
#'
#' @return
#' @export
#'
#' @examples
proteinScore<-function(HPOpatient)
{
library(igraph)
library(Matrix)
HPO2genes<-HPO2genes
treureHPO<-treureHPO
HPOqueryGene<-HPOqueryGene
total_unique<-total_unique
g1<-as.undirected(g1)
HPOpatient<-as.matrix(unique(HPOpatient))
treureHPO <- paste(treureHPO,collapse="|")
posTreure<-grep(treureHPO, HPO2genes[,2])
if(length(posTreure)>0) HPO2genes<-HPO2genes[-posTreure,]
# Delete some irrelevant HPOs
posTreure<-grep(treureHPO, HPOpatient)
if(length(posTreure)>0) HPOpatient<-HPOpatient[-posTreure]
#create a subgraph with the expanded (1 order) patient HPOs
HPOorig_expanded<-unique(unlist(sapply(HPOpatient, function (x) rownames(as.matrix(igraph::ego(g1, order = 1, x)[[1]])))))
g.sub <- induced.subgraph(graph = g1, HPOorig_expanded)
res<-cluster_edge_betweenness(g.sub)
HPOorig_expanded<-cbind(res$names,res$membership)
HPOorigGroups <<- HPOorig_expanded[match(HPOpatient,HPOorig_expanded[,1]),]
genes <<- unique(HPO2genes[, 1]) #genes with HPO
posGenes<-match(genes,rownames(HPOadj))
acumulat<-Matrix(matrix(0,ncol = 1,nrow =length(rownames(HPOadj)[posGenes])))
acumulatFreq<-acumulat
for(z in 1:length(HPOpatient))
{
pos<-match(HPOpatient[z],colnames(HPOadj))
HPOPatientItem = HPOdistance[pos,]
column<-HPOadj[posGenes,] %*% HPOPatientItem
acumulat<-cbind(acumulat,column)
}
acumulat<-acumulat[,-1]
if(length(HPOorigGroups[,2])!=length(unique(HPOorigGroups[,2])))
{
memo = ""
dupli = unique(HPOorigGroups[duplicated(HPOorigGroups[,2]),2])
for(i in 1:length(dupli))
{
pos<-which(HPOorigGroups[,2] == dupli[i])
new<-Matrix::rowSums(acumulat[,pos])
acumulat[,pos[1]]<-new
memo<-c(memo,pos[-1])
}
memo <- memo[-1]
acumulat<-acumulat[,-as.numeric(memo)]
}
acumulat = acumulat / acumulat
acumulat[is.na(acumulat)]=0
acumulat = Matrix(acumulat)
HPOmatch_quant<-Matrix::rowSums(acumulat)
q=HPOmatch_quant-1
m=length(unique(HPOorigGroups[,2]))
n=length(V(g1)$name)-length(unique(HPOorigGroups[,2]))
k=as.matrix(HPOqueryGene)*10
stats<-phyper(q,m,n,k,lower.tail = FALSE, log.p = FALSE)
gc()
stats[stats == -Inf] = 1
stats[stats == 0] = 10^(log10(min(stats[stats != 0]))-1)
testResult<-stats
D = abs(log10(abs(testResult)))
Dred <- as.numeric(D)
pos <- match(genes, total_unique[,2])
D <- matrix(0, nrow = length(total_unique[,2]))
D[pos] <- Dred
DNormed <- (D - min(D, na.rm = TRUE))/(max(D, na.rm = TRUE) - min(D, na.rm = TRUE))
Y = 1/(1 + exp((DNormed * (-12)) + log(9999)))
Y = as.numeric(Y)
return(Y)
}
|
lemma interval_subset_is_interval: assumes "is_interval S" shows "cbox a b \<subseteq> S \<longleftrightarrow> cbox a b = {} \<or> a \<in> S \<and> b \<in> S" (is "?lhs = ?rhs") |
(**
This file is part of the Elfic library
Copyright (C) Boldo, Clément, Leclerc
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
COPYING file for more details.
*)
From Coq Require Import
Utf8
ssreflect
Rdefinitions
RIneq
Rbasic_fun
Lia
Lra
.
From Coquelicot Require Import
Hierarchy
Rbar
.
Require Import
hierarchy_notations
simpl_fun
sigma_algebra
measurable_fun
series
R_compl
.
Open Scope nat_scope.
Open Scope R_scope.
(*
Dans cette première section, je reprend exactement le code de Coquelicot où
je retire les existentiels pour obtenir des preuves constructives.
*)
Section filterlim_complete_constr.
Context {T : Type}.
Context {U : CompleteSpace}.
Lemma filterlim_locally_closely_correct :
forall {F} {FF : ProperFilter F} (f : T -> U),
filterlim (fun x => (f (fst x), f (snd x))) (filter_prod F F) closely ->
filterlim f F (locally (lim (filtermap f F))).
Proof.
intros F FF f H.
intros P [eps HP].
refine (_ (complete_cauchy (filtermap f F) _ _ eps)).
+ now apply filter_imp.
+ apply cauchy_distance'.
apply filter_le_trans with (2 := H).
apply prod_filtermap_le.
Qed.
Lemma filterlim_locally_cauchy_correct :
forall {F} {FF : ProperFilter F} (f : T -> U),
(forall eps : posreal, exists P, F P /\ forall u v : T, P u -> P v -> ball (f u) eps (f v)) ->
filterlim f F (locally (lim (filtermap f F))).
Proof.
intros F FF f H.
apply (filterlim_locally_closely_correct f).
apply filterlim_closely => //.
Qed.
End filterlim_complete_constr.
Definition Cauchy_seq {S : UniformSpace} (u : nat -> S) : Prop :=
∀ ɛ : R, ɛ > 0 -> ∃ n : nat, ∀ p q : nat,
p ≥ n -> q ≥ n -> ball (u p) ɛ (u q).
Section Cauchy_lim_seq_def.
Context {S : CompleteSpace}.
Definition lim_seq (u : nat -> S) :=
lim (filtermap u eventually).
Lemma lim_seq_ext :
∀ u u' : nat -> S,
(∀ n : nat, u n = u' n) -> ∀ l : S,
filterlim u eventually (locally l) -> filterlim u' eventually (locally l).
Proof.
move => u u' Huu' l Hl.
apply filterlim_ext with u => //.
Qed.
Lemma filterlim_cauchy_seq_correct :
∀ u : nat → S,
(∀ eps : posreal, ∃ P, eventually P ∧ ∀ p q : nat, P p → P q → ball (u p) eps (u q))
-> filterlim u eventually (locally (lim_seq u)).
Proof.
move => u Hu.
apply filterlim_locally_cauchy_correct => //.
Qed.
Lemma Cauchy_seq_eventually {u : nat -> S} :
Cauchy_seq u -> (∀ eps : posreal, ∃ P, eventually P ∧ ∀ p q : nat, P p → P q → ball (u p) eps (u q)).
Proof.
unfold Cauchy_seq => Hu ɛ.
case: ɛ => ɛ Pɛ.
pose Pɛ' := Rlt_gt _ _ Pɛ; clearbody Pɛ'.
case: (Hu ɛ Pɛ') => N HuN.
exists (fun n => n ≥ N); split => //.
exists N => //.
Qed.
Lemma is_filterlim_Cauchy_lim_seq :
∀ (u : nat -> S), Cauchy_seq u ->
filterlim u eventually (locally (lim_seq u)).
Proof.
move => u /Cauchy_seq_eventually π.
apply (filterlim_cauchy_seq_correct u π).
Qed.
End Cauchy_lim_seq_def.
Section lim_seq_prop.
Context {S : UniformSpace}.
Context {T : UniformSpace}.
Lemma lim_seq_cte :
∀ s : S,
filterlim (fun _ : nat => s) eventually (locally s).
Proof.
move => s.
apply filterlim_const.
Qed.
Lemma lim_seq_continuity :
∀ f : S -> T, ∀ s : S,
filterlim f (locally s) (locally (f s))
-> ∀ u : nat -> S,
filterlim u eventually (locally s) -> filterlim (fun n => f (u n)) eventually (locally (f s)).
Proof.
move => f s Hf u Hu.
apply filterlim_comp with (locally s) => //.
Qed.
Lemma lim_seq_pair :
∀ u : nat -> S, ∀ v : nat -> T, ∀ lu : S, ∀ lv : T,
filterlim u eventually (locally lu) ->
filterlim v eventually (locally lv) ->
filterlim (fun n => (u n, v n)) eventually (filter_prod (locally lu) (locally lv)).
Proof.
move => u v lu lv Hu Hv.
apply filterlim_pair => //.
Qed.
End lim_seq_prop.
Definition NM_Cauchy_seq {A : AbsRing} {E : NormedModule A} (u : nat -> E) : Prop :=
∀ ɛ : R, ɛ > 0 -> ∃ n : nat, ∀ p q : nat,
p ≥ n -> q ≥ n -> ball_norm (u p) ɛ (u q).
Section NM_Cauchy_lim_seq_def.
Context {A : AbsRing}.
Context {E : CompleteNormedModule A}.
Lemma NM_Cauchy_seq_Cauchy_seq :
∀ u : nat -> E, NM_Cauchy_seq u -> Cauchy_seq u.
Proof.
move => u.
unfold NM_Cauchy_seq, Cauchy_seq.
move => Hnorm ɛ Hɛ.
case: (Hnorm ɛ Hɛ).
move => N Hn.
exists N => p q Hp Hq.
pose HnormNpq := Hn p q Hp Hq; clearbody HnormNpq.
apply: norm_compat1 => //.
Qed.
Lemma NM_Cauchy_seq_lim_seq_correct :
∀ (u : nat -> E), ∀ (π : NM_Cauchy_seq u),
is_lim_seq u (lim_seq u).
Proof.
move => u /NM_Cauchy_seq_Cauchy_seq π.
apply: is_filterlim_Cauchy_lim_seq => //.
Qed.
Lemma NM_is_lim_seq_unique :
∀ u : nat -> E, ∀ l l' : E,
is_lim_seq u l -> is_lim_seq u l' -> l = l'.
Proof.
move => u l l' Hl Hl';
unfold is_lim_seq in Hl;
unfold is_lim_seq in Hl'.
pose H := filterlim_locally_unique u l l'.
pose H' := H eventually (Proper_StrongProper _ eventually_filter);
clearbody H'.
apply H' => //.
Qed.
Lemma lim_seq_eq :
∀ u : nat -> E, ∀ l : E,
is_lim_seq u l -> lim_seq u = l.
Proof.
move => u l Hl.
assert (is_lim_seq u (lim_seq u)).
apply NM_Cauchy_seq_lim_seq_correct.
unfold NM_Cauchy_seq => ɛ Hɛ.
move: Hl => /filterlim_locally_ball_norm => Hl.
pose sighalfɛ := RIneq.mkposreal (ɛ * /2) (R_compl.Rmult_lt_pos_pos_pos _ _ (RIneq.Rgt_lt _ _ Hɛ) RIneq.pos_half_prf).
case: (Hl sighalfɛ) => N /= HN; clear Hl.
exists N => p q Hp Hq; replace ɛ with (ɛ*/2 + ɛ*/2)
by rewrite Rlimit.eps2 => //.
apply ball_norm_triangle with l.
apply (ball_norm_sym l (u p) sighalfɛ) => /=.
1, 2 : apply HN => //.
apply (NM_is_lim_seq_unique u) => //.
Qed.
End NM_Cauchy_lim_seq_def.
Section NM_lim_seq_prop.
Open Scope hy_scope.
Open Scope fun_scope.
Lemma is_lim_seq_epsilon {A : AbsRing} {E : NormedModule A} :
∀ u : nat -> E, ∀ l : E,
is_lim_seq u l <->
(∀ ɛ : R, 0 < ɛ -> ∃ N : nat, ∀ n : nat, N ≤ n -> (‖ minus (u n) l ‖%hy < ɛ)%R).
Proof.
move => u l; split.
unfold is_lim_seq, filterlim, eventually.
move => H.
assert (filter_le
(filtermap u (λ P : nat → Prop, ∃ N : nat, ∀ n : nat, N ≤ n → P n))
(locally_norm l)).
apply filter_le_trans with (locally l) => //.
apply locally_le_locally_norm.
clear H => ɛ Hɛ.
unfold filter_le, filtermap, locally_norm in H0.
assert (∃ η : posreal, ∀ y : E, ball_norm l η y -> ball_norm l ɛ y).
exists (RIneq.mkposreal ɛ Hɛ) => //.
case: (H0 _ H) => N HN.
exists N; unfold ball_norm in HN.
assumption.
move => Hloc.
unfold is_lim_seq, filterlim, eventually.
suff: (filter_le (filtermap u (λ P : nat → Prop, ∃ N : nat, ∀ n : nat, N ≤ n → P n))
(locally_norm l)).
move => H.
apply filter_le_trans with (locally_norm l) => //.
apply locally_norm_le_locally.
unfold locally_norm, filter_le, filtermap.
move => P; case; case => ɛ Hɛ Hloc'.
case: (Hloc ɛ Hɛ) => N HN.
exists N => n; move => /HN/Hloc'//.
Qed.
Context {A : AbsRing}.
Context {E : NormedModule A}.
Lemma lim_seq_plus :
∀ u v : nat -> E, ∀ lu lv : E,
is_lim_seq u lu -> is_lim_seq v lv ->
is_lim_seq (u + v) (lu + lv)%hy.
Proof.
move => u v lu lv Hu Hv.
apply
(filterlim_comp
nat (E * E) E
(fun n : nat => (u n, v n)) (fun c : E * E => fst c + snd c)%hy
eventually (filter_prod (locally lu) (locally lv)) (locally (lu + lv)%hy)
).
apply lim_seq_pair => //.
apply filterlim_plus.
Qed.
Lemma lim_seq_scal :
∀ a : nat -> A, ∀ u : nat -> E, ∀ la : A, ∀ lu : E,
is_lim_seq a la -> is_lim_seq u lu ->
is_lim_seq (fun n : nat => (a n) ⋅ (u n))%hy (la ⋅ lu)%hy.
Proof.
move => a u la lu Ha Hu.
apply
(filterlim_comp
nat (A * E) E
(fun n : nat => (a n, u n)) (fun c : A * E => (fst c) ⋅ (snd c))%hy
eventually (filter_prod (locally la) (locally lu)) (locally (la ⋅ lu)%hy)
).
apply lim_seq_pair => //.
apply filterlim_scal.
Qed.
Lemma lim_seq_scal_r :
∀ a : A, ∀ u : nat -> E, ∀ lu : E,
is_lim_seq u lu ->
is_lim_seq (a ⋅ u) (a ⋅ lu)%hy.
Proof.
move => a u lu Hu.
apply (lim_seq_continuity (fun x : E => a ⋅ x)%hy) => //.
apply filterlim_scal_r.
Qed.
Lemma lim_seq_scal_l :
∀ a : nat -> A, ∀ u : E, ∀ la : A,
is_lim_seq a la ->
is_lim_seq (fun n => a n ⋅ u)%hy (la ⋅ u)%hy.
Proof.
move => a u la Ha.
apply (lim_seq_continuity (fun b : A => b ⋅ u)%hy) => //.
apply filterlim_scal_l.
Qed.
Lemma lim_seq_opp :
∀ u : nat -> E, ∀ lu : E,
is_lim_seq u lu ->
is_lim_seq (fun n : nat => opp (u n)) (opp lu).
Proof.
move => u lu Hu.
apply (lim_seq_continuity (fun x : E => opp x)) => //.
apply filterlim_opp.
Qed.
Lemma lim_seq_mult :
∀ a b : nat -> A, ∀ la lb : A,
is_lim_seq a la -> is_lim_seq b lb ->
is_lim_seq (fun n => (a n) * (b n)) (la * lb)%hy.
Proof.
move => a b la lb Ha Hb.
apply
(filterlim_comp
nat (A * A) A
(fun n : nat => (a n, b n)) (fun c : A * A => fst c * snd c)%hy
eventually (filter_prod (locally la) (locally lb)) (locally (la * lb)%hy)
).
apply lim_seq_pair => //.
apply filterlim_mult.
Qed.
Lemma lim_seq_norm :
∀ u : nat -> E, ∀ lu : E,
is_lim_seq u lu -> is_lim_seq (‖ u ‖)%fn (‖ lu ‖)%hy.
Proof.
move => u lu Hu.
apply (lim_seq_continuity (fun x : E => norm x)) => //.
apply filterlim_norm.
Qed.
Lemma lim_seq_norm_zero :
∀ u : nat -> E,
is_lim_seq (‖u‖)%fn 0 -> is_lim_seq u zero.
Proof.
move => u Hu.
apply filterlim_norm_zero => //.
Qed.
Lemma lim_seq_power :
∀ p : posreal, ∀ u : nat -> R, (∀ n : nat, 0 <= u n) -> ∀ l : R,
is_lim_seq u l -> is_lim_seq (u ^ p) (Rpow l p).
Proof.
move => p u u_pos l Hu.
unfold fun_power, Rpow.
assert (0 <= l) as l_pos.
assert (Lim_seq.is_lim_seq u (Rbar.Finite l)) as Hu' by easy.
suff: Rbar.Rbar_le (Rbar.Finite 0) (Rbar.Finite l).
simpl => //.
apply Lim_seq.is_lim_seq_le with (fun _ => 0) u.
assumption.
apply lim_seq_cte.
assumption.
case: (Req_EM_T l 0); swap 1 2.
move => H.
suff: (Lim_seq.is_lim_seq (λ x : nat,
match Req_EM_T (u x) 0 with | left _ => 0 | right _ => Rtrigo_def.exp (p * Rpower.ln (u x)) end)
(Rbar.Finite (Rtrigo_def.exp (p * Rpower.ln l)))).
easy.
apply Lim_seq.is_lim_seq_ext_loc with (fun n => Rtrigo_def.exp (p * Rpower.ln (u n))).
assert (0 < l) as l_stpos by lra.
pose sigl := {| RIneq.pos := l; RIneq.cond_pos := l_stpos |}.
move: Hu => /is_lim_seq_epsilon Hu.
case: (Hu sigl) => //.
move => N HN; exists N => n /HN.
unfold norm => /=; unfold abs => /=.
unfold minus, plus, opp => /=/Rcomplements.Rabs_lt_between'.
move => [Hul _]; rewrite Rminus_eq_0 in Hul.
case: (Req_EM_T (u n) 0); lra.
apply: filterlim_comp.
2 : apply: ElemFct.continuous_exp.
suff: (Lim_seq.is_lim_seq (λ x : nat, (p * Rpower.ln (u x))%R) (Rbar_mult p (Rpower.ln l))%R).
easy.
apply Lim_seq.is_lim_seq_scal_l.
apply: filterlim_comp.
exact Hu.
apply ElemFct.continuous_ln.
lra.
move => Eql0; rewrite Eql0 in Hu.
apply is_lim_seq_epsilon => ɛ Hɛ.
move: ElemFct.is_lim_exp_m.
unfold Continuity.is_lim, filterlim, filter_le, filtermap,
Rbar_locally, Rbar_locally', locally => Hexp.
move: ElemFct.is_lim_ln_0.
unfold Continuity.is_lim, filterlim, filter_le, filtermap,
Rbar_locally, Rbar_locally', at_right, within, locally => Hln.
assert ((∃ eps : posreal, ∀ y : R_UniformSpace, ball 0 eps y → ball 0 ɛ y)) as Hballɛ.
exists (RIneq.mkposreal ɛ Hɛ); easy.
case: (Hexp _ Hballɛ) => M HM; clear Hballɛ.
assert (∃ M' : R, ∀ x : R, x < M' → x < M*/p) as HMloc.
exists (M*/p) => //.
case: (Hln _ HMloc) => sigη Hη; clear HMloc.
unfold is_lim_seq, filterlim, eventually, filter_le, filtermap, locally in Hu.
assert (∃ eps : posreal, ∀ y : R_NormedModule, ball 0 eps y → ball 0 sigη y) as Hballη.
exists sigη => //.
case: (Hu _ Hballη) => N HN; clear Hballη.
exists N => n /HN/Hη Hun.
case: (Req_EM_T (u n) 0).
move => _; rewrite minus_eq_zero.
rewrite norm_zero //.
move => Nequn0.
assert (Rpower.ln (u n) < M * / p).
apply Hun; pose Hun' := u_pos n; lra.
assert (p * Rpower.ln (u n) < M).
replace M with (p.(RIneq.pos) * (M * /p.(RIneq.pos))).
all : swap 1 2.
setoid_rewrite Rmult_comm.
rewrite Rmult_assoc.
rewrite Raxioms.Rinv_l.
rewrite Rmult_1_r => //.
case p => p' Hp' /=; lra.
unfold mult => /=.
apply Rmult_lt_compat_l.
case p => p' Hp' /=; lra.
assumption.
clear H; move: H0 => /HM//.
Qed.
Lemma lim_seq_bounded :
∀ u : nat -> E,
(∃ lu : E, is_lim_seq u lu)
-> { M : R | ∀ n : nat, (‖ u n ‖)%hy <= M }.
Proof.
move => u Hu.
apply filterlim_bounded => //.
Qed.
Lemma measurable_fun_lim_seq {X : Set} {gen : (X -> Prop) -> Prop} :
∀ s : nat -> X -> E, (∀ n : nat, measurable_fun gen open (s n)) ->
∀ f : X -> E,
(∀ x : X, is_lim_seq (fun n => s n x) (f x))
-> measurable_fun gen open f.
Proof.
move => s Hs f Hf.
suff: (∀ U : E -> Prop, open U -> measurable gen (λ x : X, U (f x))).
move => H P HP.
induction HP.
move: H0 => /H//.
apply measurable_empty.
apply measurable_compl => //.
apply measurable_union_countable => //.
move => U HU.
pose Ω := fun r : nat => (fun x : E => ∃ d : R, ( / (INR r + 1)) < d ∧ (∀ y : E, U y ∨ (‖ minus x y ‖%hy > d))).
assert (∀ x : X, (∃ r : nat, Ω r (f x)) ↔ U (f x)) as Decomp.
move => x; split.
case => r; unfold Ω; case => [d [Hlt H]].
case (H (f x)) => //.
rewrite minus_eq_zero norm_zero => Abs.
apply False_ind.
assert (0 < /(INR r + 1)).
apply RiemannInt.RinvN_pos.
apply (Rlt_asym _ _ H0) => //.
apply Rlt_trans with d => //.
move => Ufx.
unfold open in HU.
move: (HU (f x) Ufx) => /locally_norm_le_locally.
case; move => [ɛ Hɛ] Hloc.
assert (0 < ɛ*/2) as Hhalfɛ by lra.
case: (Rtrigo_def.archimed_cor1 (ɛ*/2) Hhalfɛ).
move => m' [Hm'0 Hm'1].
assert (m' = (m' - 1) + 1)%nat by lia.
pose m := (m' - 1)%nat.
fold m in H; rewrite H in Hm'0.
replace (m + 1)%nat with (S m) in Hm'0 by lia.
rewrite RIneq.S_INR in Hm'0.
clear H; clearbody m; clear Hm'1 m'.
exists m.
exists (ɛ*/2); split => //.
move => y.
case: (ball_norm_dec (f x) y {| pos := ɛ; cond_pos := Hɛ |}).
move => /Hloc; left => //.
move => /Rnot_gt_le/=H; right.
apply Rlt_gt, RIneq.Rlt_le_trans with ɛ => //.
apply Rlimit.Rlt_eps2_eps => //.
rewrite <-norm_opp, opp_minus => //.
apply measurable_ext with (fun x => ∃ r : nat, Ω r (f x)).
exact Decomp.
apply measurable_ext with (fun x => ∃ r : nat, ∃ m : nat, ∀ n : nat, m ≤ n -> Ω r (s n x)).
move => x; split.
case => r; case => m H.
apply Decomp.
unfold Ω in H.
unfold is_lim_seq in Hf.
pose sigr := {|
RIneq.pos := / (INR r + 1);
RIneq.cond_pos := RiemannInt.RinvN_pos r |}.
assert (locally (f x) (ball_norm (f x) sigr)) as Hloc_norm.
apply locally_le_locally_norm; unfold locally_norm.
exists sigr => //.
pose Hloc := Hf x (ball_norm (f x) sigr) Hloc_norm; clearbody Hloc; clear Hloc_norm.
case: Hloc => N /= HN.
assert (m ≤ max m N) as Ineq by lia.
case: (H (max m N) Ineq) => d [Hlt {}H].
case: (H (f x)) => //.
move => Abs; apply False_ind.
assert (N ≤ max m N) as Ineq' by lia.
pose Abs' := (HN (max m N) Ineq'); clearbody Abs'.
unfold ball_norm in Abs'.
apply (Rlt_asym _ _ Abs).
apply Rlt_trans with (/ (INR r + 1)) => //.
case => r Ωrfx.
exists r.
unfold is_lim_seq, filterlim, eventually, filter_le, filtermap
in Hf.
case: Ωrfx => d; move => [Hlt Hloc].
apply Hf.
pose ɛ := (d - / (INR r + 1))%R.
assert (0 < ɛ) as Hɛ.
apply Rlt_Rminus => //.
pose sigɛ := {| pos := ɛ; cond_pos := Hɛ |}.
suff: locally_norm (f x) (Ω r).
apply locally_le_locally_norm.
exists sigɛ => y Hy.
pose (d' := (d - ‖ minus y (f x) ‖%hy)%R).
assert (/ (INR r + 1) < d') as Hlt'.
unfold ball_norm in Hy; simpl in Hy.
unfold d'; unfold ɛ in Hy; lra.
exists d'; split => //.
move => z.
case: (Hloc z).
left => //.
move => Hz.
right.
unfold d'.
apply Rlt_gt.
unfold Rminus.
apply Rlt_le_trans with ((‖ minus (f x) z ‖)%hy + - (‖ minus y (f x) ‖)%hy)%R.
apply Rplus_lt_compat_r => //.
setoid_rewrite <-norm_opp at 2.
setoid_rewrite opp_minus.
apply Rle_trans with (‖ minus (minus (f x) z) (minus (f x) y) ‖)%hy.
apply Rle_trans with (| ‖ minus (f x) z ‖%hy - ‖ minus (f x) y ‖%hy |)%R.
apply Rbasic_fun.Rle_abs.
apply norm_triangle_inv.
unfold minus.
rewrite opp_plus opp_opp.
rewrite plus_assoc.
rewrite plus_comm.
rewrite plus_assoc.
setoid_rewrite plus_comm at 3.
do 2 rewrite <-plus_assoc.
rewrite plus_opp_r plus_zero_r.
apply Rle_refl.
apply measurable_union_countable => r.
apply measurable_union_countable => m.
apply measurable_inter_countable => n.
case_eq (m <=? n).
move => /Nat.leb_le Lemn.
apply measurable_ext with (λ x : X, Ω r (s n x)).
move => x; split => //.
move => H; exact (H Lemn).
apply (Hs n).
apply measurable_gen.
move => x; case => d [Hd H].
suff: (locally_norm x (Ω r)).
apply locally_le_locally_norm.
pose ɛ := (d - / (INR r + 1))%R.
assert (0 < ɛ) as Hɛ.
apply Rlt_Rminus => //.
pose sigɛ := {| pos := ɛ; cond_pos := Hɛ |}.
exists sigɛ => y Hy.
pose (d' := (d - ‖ minus y x ‖%hy)%R).
assert (/ (INR r + 1) < d') as Hlt'.
unfold ball_norm in Hy; simpl in Hy.
unfold d'; unfold ɛ in Hy; lra.
exists d'; split => //.
move => z.
case: (H z).
left => //.
move => Hz.
right.
unfold d'.
apply Rlt_gt.
unfold Rminus.
apply Rlt_le_trans with ((‖ minus x z ‖)%hy + - (‖ minus y x ‖)%hy)%R.
apply Rplus_lt_compat_r => //.
setoid_rewrite <-norm_opp at 2.
setoid_rewrite opp_minus.
apply Rle_trans with (‖ minus (minus x z) (minus x y) ‖)%hy.
apply Rle_trans with (| ‖ minus x z ‖%hy - ‖ minus x y ‖%hy |)%R.
apply Rbasic_fun.Rle_abs.
apply norm_triangle_inv.
unfold minus.
rewrite opp_plus opp_opp.
rewrite plus_assoc.
rewrite plus_comm.
rewrite plus_assoc.
setoid_rewrite plus_comm at 3.
do 2 rewrite <-plus_assoc.
rewrite plus_opp_r plus_zero_r.
apply Rle_refl.
move => /Nat.leb_gt Ltnm.
apply measurable_ext with (fun _ => True).
move => x; split => //.
move => _ Abs; lia.
apply measurable_full.
Qed.
End NM_lim_seq_prop. |
module HVX.Internal.Matrix
( Mat
, allMat
, anyMat
, diagMat
, ei
, fpequalsMat
, lpnorm
, matrixPow
, reduceMat
, scalarMat
, zeroMat
, zeroVec
) where
import Numeric.LinearAlgebra hiding (i)
import Numeric.LinearAlgebra.Util
import HVX.Internal.Util
type Mat = Matrix Double
allMat :: (Double -> Bool) -> Mat -> Bool
allMat f x = all f (toList . flatten $ x)
anyMat :: (Double -> Bool) -> Mat -> Bool
anyMat f x = any f (toList . flatten $ x)
diagMat :: Mat -> Mat
diagMat = diag . flatten
ei :: Int -> Int -> Mat
ei n i = buildMatrix n 1 (\(j, _) -> if i == j then 1 else 0)
fpequalsMat :: Mat -> Mat -> Bool
fpequalsMat a b
| ra == rb && ca == cb = all (uncurry fpequals) $ zip alist blist
| otherwise = error "Two matrices with different dimensions cannot possibley be equal!"
where
ra = rows a
rb = rows b
ca = cols a
cb = cols b
alist = toList . flatten $ a
blist = toList . flatten $ b
lpnorm :: Double -> Mat -> Double
lpnorm p x = sumElements y ** (1/p)
where pMat = (1><1) [p]
y = abs x ** pMat
matrixPow :: Double -> Mat -> Mat
matrixPow p = mapMatrix (** p)
reduceMat :: ([Double] -> Double) -> Mat -> Mat
reduceMat f = (1><1) . (:[]) . f . toList . flatten
scalarMat :: Double -> Mat
scalarMat x = (1><1) [x]
zeroMat :: Int -> Mat
zeroMat n = zeros n n
zeroVec :: Int -> Mat
zeroVec n = zeros n 1
|
State Before: p : ℝ≥0∞
𝕜 : Type ?u.215698
𝕜' : Type ?u.215701
ι : Type u_2
α : ι → Type ?u.215709
β : ι → Type u_1
inst✝² : Fintype ι
inst✝¹ : Fact (1 ≤ p)
inst✝ : (i : ι) → PseudoEMetricSpace (β i)
x y : PiLp ⊤ β
⊢ edist (↑(PiLp.equiv ⊤ β) x) (↑(PiLp.equiv ⊤ β) y) ≤ edist x y State After: no goals Tactic: simpa only [ENNReal.coe_one, one_mul] using lipschitzWith_equiv ∞ β x y State Before: p : ℝ≥0∞
𝕜 : Type ?u.215698
𝕜' : Type ?u.215701
ι : Type u_2
α : ι → Type ?u.215709
β : ι → Type u_1
inst✝² : Fintype ι
inst✝¹ : Fact (1 ≤ p)
inst✝ : (i : ι) → PseudoEMetricSpace (β i)
x y : PiLp ⊤ β
⊢ edist x y ≤ edist (↑(PiLp.equiv ⊤ β) x) (↑(PiLp.equiv ⊤ β) y) State After: no goals Tactic: simpa only [ENNReal.div_top, ENNReal.zero_toReal, NNReal.rpow_zero, ENNReal.coe_one,
one_mul] using antilipschitzWith_equiv ∞ β x y |
module Data.Bytes.Strict.Internal
import Data.Bytes.Util
import Data.Bytes.Prim
import Data.Word.Word8
import Data.Strings -- fastAppend
import Data.List -- intersperse
import Data.So -- for our NonEmpty
%default total
private
moduleName : String
moduleName = "Data.Bytes.Strict.Internal"
-- Our Bytes type, a Ptr to a `block` of memory, the current 0-based offset
-- into that memory, its size in bytes.
public export
data Bytes : Type where
MkB : (b : Block) -> (pos : Int) -> (len : Int) -> Bytes
-- It'd be nice to be able to provide NonEmpty based on `len` but it's quite a
-- hassle to have a Nat here with the various castings around the codebase and
-- other concerns.
public export
data NonEmpty : Bytes -> Type where
IsNonEmpty : So (len > 0) -> NonEmpty (MkB _ _ len)
-- ^ how can I provide this proof without making the user need access
-- to MkB? Simply by having my own functions imlpicity prove that when
-- used? Things like cons and singleton and append? And if a
-- ByteString comes from elsewhere we use Dec?
-- This is here instead of in a 'where' due to a current coverage checking bug
private
soNotSonot' : So x -> So (not x) -> Void -- So x -> Not (So (not x))
soNotSonot' Oh Oh impossible
export -- Does this need public export or is being Dec enough for proofs?
isNonEmpty : (b : Bytes) -> Dec (NonEmpty b)
isNonEmpty (MkB _ _ len) with (choose (len > 0))
isNonEmpty (MkB _ _ len) | (Left t) = Yes (IsNonEmpty t)
isNonEmpty (MkB _ _ len) | (Right f) = No (\(IsNonEmpty t) => soNotSonot' t f)
----------------------------------------------------------------------
{- NB: In the bytestring lib of haskell they use unsafePerformIO
because of how the strictness analzer and sharing works, they note:
The use of unsafePerformIO in allocating functions (unsafeCreate) is critical!
Otherwise:
singleton 255 `compare` singleton 127
is compiled to:
case mallocByteString 2 of
ForeignPtr f internals ->
case writeWord8OffAddr# f 0 255 of _ ->
case writeWord8OffAddr# f 0 127 of _ ->
case eqAddr# f f of
False -> case compare (GHC.Prim.plusAddr# f 0)
(GHC.Prim.plusAddr# f 0)
TODO: Find out if this distinction is true in idris.
-}
export
unsafeCreateBytes : Int -> (MutBlock -> IO ()) -> Bytes
unsafeCreateBytes len f = unsafePerformIO $ do
b <- allocateAndFill len f
pure (MkB b 0 len)
export
unsafeCreateNBytes : Int -> (MutBlock -> IO Int) -> Bytes
unsafeCreateNBytes len f = unsafePerformIO $ do
(b, i) <- allocateAndFillToN len f
pure (MkB b 0 i)
export
unsafeCreateNBytes' : Int -> (MutBlock -> IO (Int,a)) -> (Bytes, a)
unsafeCreateNBytes' len f = unsafePerformIO $ do
(b, i, a) <- allocateAndFillToN' len f
pure ((MkB b 0 i), a)
export
unsafeCreateAndTrim : Int -> (MutBlock -> IO Int) -> Bytes
unsafeCreateAndTrim len0 f = unsafePerformIO $ do
(b, len) <- allocateAndTrim len0 f
pure (MkB b 0 len)
-- This is internal so NonEmpty is used to keep the library maker honest.
-- TODO: Why is NonEmpty for List available here? I have not imported
-- Data.List!
export
packUpToNBytes : Int -> (l : List Word8) -> (Bytes, List Word8)
packUpToNBytes len xs0 = unsafeCreateNBytes' len $ \p => go p len 0 xs0
where
go : MutBlock -> Int -> Int -> List Word8 -> IO (Int, List Word8)
go b n pos [] = pure (len - n, [])
go b 0 pos xs = pure (len, xs)
go b n pos (x :: xs) = setByte b pos x *> go b (n-1) (pos+1) xs
-- Unpacking bytestrings into lists effeciently is a tradeoff: on the one hand
-- we would like to write a tight loop that just blats the list into memory, on
-- the other hand we want it to be unpacked lazily so we don't end up with a
-- massive list data structure in memory.
--
-- Our strategy is to combine both: we will unpack lazily in reasonable sized
-- chunks, where each chunk is unpacked strictly.
--
-- unpackBytes and unpackChars do the lazy loop, while unpackAppendBytes and
-- unpackAppendChars do the chunks strictly.
-- unpackBytes : Bytes -> List Word8
-- Intended complexity: O(n+m)
-- No need to export, simply use <+>
private
append : Bytes -> Bytes -> Bytes
append (MkB xs xpos xlen) (MkB ys ypos ylen)
= unsafeCreateBytes (xlen + ylen) $ \p => do
copyBlock xs xpos xlen p 0
copyBlock ys ypos ylen p xlen
-- Debug use ONLY, **don't ever use this**, bytes are not characters!
-- Further, this shows the whole block, not just the live part.
-- TODO: Remove this down the road, or otherwise prevent it from escaping this
-- package. If a person wants to Show/Read/OverloadedString Bytes they should
-- create a principled package that does this. Bytes are not Strings.
export
Show Bytes where
show (MkB b pos len) = "MkB " ++ show_block b ++ " " ++ show pos ++ " " ++ show len
where
show_block : Block -> String
show_block buf
= "[" ++ fastAppend (intersperse ","
(map show . unsafePerformIO $ blockData buf)) ++ "]"
private
compareBytes : Bytes -> Bytes -> Ordering
compareBytes (MkB _ _ 0 ) (MkB _ _ 0 ) = EQ
compareBytes (MkB xb xpos xlen) (MkB yb ypos ylen) =
unsafePerformIO $ go (xlen-1) (ylen-1) xb yb
where
go : Int -> Int -> Block -> Block -> IO Ordering
go 0 0 xb yb = [| getByte xb xpos `compare` getByte yb ypos |]
go 0 _ _ _ = pure LT
go _ 0 _ _ = pure GT
go k j xb yb
= do EQ <- [| getByte xb (xpos + k) `compare` getByte yb (ypos + j) |]
| res => pure res
assert_total $ go (k-1) (j-1) xb yb
-- Offset will tend to be 0 so we first compare length, then block object
-- equality, then offset. The block should be a simple ptr comparison so it's
-- worth doing before trying for a full compareBytes. This order is partly
-- because it makes sense and partly because ByteString does it this way.
-- However I'm not sure why length is checked before block object equality,
-- perhaps in ghc length has a cheaper comparison than than the extra
-- foreignPtr prodding that ptr equality needs.
-- To ask if we are nominally equal.
infixl 9 `basicEq`
private
basicEq : Bytes -> Bytes -> Bool
MkB xb xpos xlen `basicEq` MkB yb ypos ylen
= xlen == ylen && (unsafePerformIO (exactEqBlock xb yb) && xpos == ypos)
export
implementation
Eq Bytes where
x == y = x `basicEq` y || compareBytes x y == EQ
-- basicEq is much speedier than compareBytes, use it when possible!
export
Ord Bytes where
compare = compareBytes
x >= y = x `basicEq` y || compareBytes x y == GT
x <= y = x `basicEq` y || compareBytes x y == LT
export
Semigroup Bytes where
(<+>) = append
-- Is there any way to make this a unique object? There's no real reason to
-- have this be anything other than the null ptr. Or at the very least there's
-- no reason to ever create more than one.
-- TODO: Investigate this as backends develop ^
export
Monoid Bytes where
neutral = unsafeCreateBytes 0 (\_ => pure ())
-- Other things down the road that might need to concat, e.g.
-- sconcat/mconcat/foldMap should use this since it avoids a ton of copying by
-- checking the required size upfront. The idea here is to first compute the
-- size of the Bytes we need to make and then fill it. Despite going over the
-- list twice this makes it significantly faster than piece-wise methods that
-- must copy over and over to build up.
-- TODO: test this!
-- TODO: This could be even faster with a foldl style library that traverses the
-- list only once
-- NB: Unlike for allocation the crash here is for bad behavior (overflow) so it
-- would not be correct to lie about its totality. To that end we should want to
-- create a different total concat later to reduce partiality infection.
export
partial
concat : List Bytes -> Bytes
concat bs = let maxlen = getLen bs
in unsafeCreateBytes maxlen (go 0 (maxlen-1) bs)
where
partial
getLen : List Bytes -> Int
getLen [] = 0 -- Check overflow of Int, which would be bad.
getLen (MkB _ _ len :: bs) = checkedAdd moduleName "concat" len (getLen bs)
go : (buf_pos : Int) -> (end : Int) -> List Bytes -> MutBlock -> IO ()
go n_pos end [] buf = pure ()
go n_pos end (MkB b pos len :: bs) buf
= if n_pos > end then pure ()
else do copyBlock b pos len buf n_pos
go (n_pos + len) end bs buf
export
total
concat' : List Bytes -> Maybe Bytes
concat' bs = let maxlen = getLen bs
in (\l => unsafeCreateBytes l (go 0 (l-1) bs)) <$> maxlen
where
getLen : List Bytes -> Maybe Int
getLen [] = Just 0 -- Check overflow of Int, which would be bad.
getLen (MkB _ _ len :: bs) =
getLen bs >>= \v => let v = len + v
in if v >= 0 then Just v
else Nothing
go : (buf_pos : Int) -> (end : Int) -> List Bytes -> MutBlock -> IO ()
go n_pos end [] buf = pure ()
go n_pos end (MkB b pos len :: bs) buf
= if n_pos > end then pure ()
else do copyBlock b pos len buf n_pos
go (n_pos + len) end bs buf
|
(* Title: Isabelle Collections Library
Author: Andreas Lochbihler <andreas dot lochbihler at kit.edu>
Maintainer: Andreas Lochbihler <andreas dot lochbihler at kit.edu>
*)
section {* \isaheader{Map implementation via tries} *}
theory TrieMapImpl imports
Trie2
"../gen_algo/MapGA"
begin
(*@impl Map
@type ('k,'v) tm
@abbrv tm,t
Maps over keys of type @{typ "'k list"} implemented by tries.
*)
subsection {* Operations *}
type_synonym ('k, 'v) tm = "('k, 'v) trie"
definition [icf_rec_def]: "tm_basic_ops \<equiv> \<lparr>
bmap_op_\<alpha> = Trie2.lookup,
bmap_op_invar = \<lambda>_. True,
bmap_op_empty = (\<lambda>_::unit. Trie2.empty),
bmap_op_lookup = (\<lambda>k m. Trie2.lookup m k),
bmap_op_update = Trie2.update,
bmap_op_update_dj = Trie2.update,
bmap_op_delete = Trie2.delete,
bmap_op_list_it = Trie2.iteratei
\<rparr>"
setup Locale_Code.open_block
interpretation tm_basic: StdBasicMap tm_basic_ops
apply unfold_locales
apply (simp_all add: icf_rec_unf Trie2.finite_dom_lookup Trie2.iteratei_correct)
done
setup Locale_Code.close_block
definition [icf_rec_def]: "tm_ops \<equiv> tm_basic.dflt_ops"
setup Locale_Code.open_block
interpretation tm: StdMap tm_ops
unfolding tm_ops_def
by (rule tm_basic.dflt_ops_impl)
interpretation tm: StdMap_no_invar tm_ops
by unfold_locales (simp add: icf_rec_unf)
setup Locale_Code.close_block
setup {* ICF_Tools.revert_abbrevs "tm"*}
lemma pi_trie_impl[proper_it]:
shows "proper_it'
((Trie_Impl.iteratei) :: _ \<Rightarrow> (_,'\<sigma>a) set_iterator)
((Trie_Impl.iteratei) :: _ \<Rightarrow> (_,'\<sigma>b) set_iterator)"
unfolding Trie_Impl.iteratei_def[abs_def]
proof (rule proper_it'I)
(*note [[show_types, show_consts]]*)
fix t :: "('k,'v) Trie.trie"
{
fix l and t :: "('k,'v) Trie.trie"
have "proper_it ((Trie_Impl.iteratei_postfixed l t)
:: (_,'\<sigma>a) set_iterator)
((Trie_Impl.iteratei_postfixed l t)
:: (_,'\<sigma>b) set_iterator)"
proof (induct t arbitrary: l)
case (Trie vo kvs l)
let ?ITA = "\<lambda>l t. (Trie_Impl.iteratei_postfixed l t)
:: (_,'\<sigma>a) set_iterator"
let ?ITB = "\<lambda>l t. (Trie_Impl.iteratei_postfixed l t)
:: (_,'\<sigma>b) set_iterator"
show ?case
unfolding Trie_Impl.iteratei_postfixed_alt_def
apply (rule pi_union)
apply (auto split: option.split intro: icf_proper_iteratorI) []
proof (rule pi_image)
def bs \<equiv> "\<lambda>(k,t). SOME l'::('k list \<times> 'v) list.
?ITA (k#l) t = foldli l' \<and> ?ITB (k#l) t = foldli l'"
have EQ1: "\<forall>(k,t)\<in>set kvs. ?ITA (k#l) t = foldli (bs (k,t))" and
EQ2: "\<forall>(k,t)\<in>set kvs. ?ITB (k#l) t = foldli (bs (k,t))"
proof (safe)
fix k t
assume A: "(k,t) \<in> set kvs"
from Trie.hyps[OF A, of "k#l"] have
PI: "proper_it (?ITA (k#l) t) (?ITB (k#l) t)"
by assumption
obtain l' where
"?ITA (k#l) t = foldli l'
\<and> (?ITB (k#l) t) = foldli l'"
by (blast intro: proper_itE[OF PI])
thus "?ITA (k#l) t = foldli (bs (k,t))"
"?ITB (k#l) t = foldli (bs (k,t))"
unfolding bs_def
apply auto
apply (metis (lifting, full_types) someI_ex)
apply (metis (lifting, full_types) someI_ex)
done
qed
have PEQ1: "set_iterator_product (foldli kvs) (\<lambda>(k,t). ?ITA (k#l) t)
= set_iterator_product (foldli kvs) (\<lambda>kt. foldli (bs kt))"
apply (rule set_iterator_product_eq2)
using EQ1 by auto
have PEQ2: "set_iterator_product (foldli kvs) (\<lambda>(k,t). ?ITB (k#l) t)
= set_iterator_product (foldli kvs) (\<lambda>kt. foldli (bs kt))"
apply (rule set_iterator_product_eq2)
using EQ2 by auto
show "proper_it
(set_iterator_product (foldli kvs) (\<lambda>(k,t). ?ITA (k#l) t))
(set_iterator_product (foldli kvs) (\<lambda>(k,t). ?ITB (k#l) t))"
apply (subst PEQ1)
apply (subst PEQ2)
apply (auto simp: set_iterator_product_foldli_conv)
by (blast intro: proper_itI)
qed
qed
} thus "proper_it
(iteratei_postfixed [] t :: (_,'\<sigma>a) set_iterator)
(iteratei_postfixed [] t :: (_,'\<sigma>b) set_iterator)" .
qed
lemma pi_trie[proper_it]:
"proper_it' Trie2.iteratei Trie2.iteratei"
unfolding Trie2.iteratei_def[abs_def]
apply (rule proper_it'I)
apply (intro icf_proper_iteratorI)
apply (rule proper_it'D)
by (rule pi_trie_impl)
interpretation pi_trie: proper_it_loc Trie2.iteratei Trie2.iteratei
apply unfold_locales
apply (rule pi_trie)
done
text {* Code generator test *}
definition "test_codegen \<equiv> (
tm.add ,
tm.add_dj ,
tm.ball ,
tm.bex ,
tm.delete ,
tm.empty ,
tm.isEmpty ,
tm.isSng ,
tm.iterate ,
tm.iteratei ,
tm.list_it ,
tm.lookup ,
tm.restrict ,
tm.sel ,
tm.size ,
tm.size_abort ,
tm.sng ,
tm.to_list ,
tm.to_map ,
tm.update ,
tm.update_dj)"
export_code test_codegen in SML
end
|
Require Import Arith.
Require Import compcert.lib.Integers.
Require Import compcert.lib.Coqlib.
Require Import List.
Require Import List. Import ListNotations.
Definition asZ (x : bool) : Z := if x then 1 else 0.
Lemma asZT: asZ true = 1. reflexivity. Qed.
Lemma asZF: asZ false = 0. reflexivity. Qed.
Definition convertByteBits bits (byte : Z) : Prop :=
exists (b0 b1 b2 b3 b4 b5 b6 b7 : bool),
bits = [b0; b1; b2; b3; b4; b5; b6; b7] /\
byte = (1 * (asZ b0) + 2 * (asZ b1) + 4 * (asZ b2) + 8 * (asZ b3)
+ 16 * (asZ b4) + 32 * (asZ b5) + 64 * (asZ b6) + 128 * (asZ b7)).
(* TODO: stuck here *)
Lemma AUX1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15:
Z.lxor
(1 * 1 + 2 * 1 + 4 * asZ b2 + 8 * asZ b3 + 16 * asZ b4 + 32 * asZ b5 +
64 * asZ b6 + 128 * asZ b7)
(1 * asZ b8 + 2 * asZ b9 + 4 * asZ b10 + 8 * asZ b11 + 16 * asZ b12 +
32 * asZ b13 + 64 * asZ b14 + 128 * asZ b15) =
1 * (if negb b8 then 1 else 0) + 2 * (if negb b9 then 1 else 0) +
4 * (if xorb b2 b10 then 1 else 0) + 8 * (if xorb b3 b11 then 1 else 0) +
16 * (if xorb b4 b12 then 1 else 0) + 32 * (if xorb b5 b13 then 1 else 0) +
64 * (if xorb b6 b14 then 1 else 0) + 128 * (if xorb b7 b15 then 1 else 0).
Proof.
destruct b2; try rewrite asZT; try rewrite asZF.
{ rewrite (xorb_true_l b10).
destruct b3; try rewrite asZT; try rewrite asZF.
{ rewrite (xorb_true_l b11).
destruct b4; destruct b5; destruct b6; destruct b7; destruct b8; destruct b9;
destruct b10; destruct b11; destruct b12; destruct b13; destruct b14; destruct b15; trivial.
}
{ rewrite (xorb_false_l b11).
destruct b4; destruct b5; destruct b6; destruct b7; destruct b8; destruct b9;
destruct b10; destruct b11; destruct b12; destruct b13; destruct b14; destruct b15; trivial.
}
}
{ rewrite (xorb_false_l b10).
destruct b3; try rewrite asZT; try rewrite asZF.
{ rewrite (xorb_true_l b11).
destruct b4; destruct b5; destruct b6; destruct b7; destruct b8; destruct b9;
destruct b10; destruct b11; destruct b12; destruct b13; destruct b14; destruct b15; trivial.
}
{ rewrite (xorb_false_l b11).
destruct b4; destruct b5; destruct b6; destruct b7; destruct b8; destruct b9;
destruct b10; destruct b11; destruct b12; destruct b13; destruct b14; destruct b15; trivial.
}
}
Qed.
Lemma AUX2 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15:
Z.lxor (1 * 1 + 2 * 0 + 4 * asZ b2 + 8 * asZ b3 + 16 * asZ b4 + 32 * asZ b5 +
64 * asZ b6 + 128 * asZ b7)
(1 * asZ b8 + 2 * asZ b9 + 4 * asZ b10 + 8 * asZ b11 + 16 * asZ b12 +
32 * asZ b13 + 64 * asZ b14 + 128 * asZ b15) =
1 * (if negb b8 then 1 else 0) + 2 * (if b9 then 1 else 0) +
4 * (if xorb b2 b10 then 1 else 0) + 8 * (if xorb b3 b11 then 1 else 0) +
16 * (if xorb b4 b12 then 1 else 0) + 32 * (if xorb b5 b13 then 1 else 0) +
64 * (if xorb b6 b14 then 1 else 0) + 128 * (if xorb b7 b15 then 1 else 0).
Proof.
destruct b2; try rewrite asZT; try rewrite asZF.
{ rewrite (xorb_true_l b10).
destruct b3; try rewrite asZT; try rewrite asZF.
{ rewrite (xorb_true_l b11).
destruct b4; destruct b5; destruct b6; destruct b7; destruct b8; destruct b9;
destruct b10; destruct b11; destruct b12; destruct b13; destruct b14; destruct b15; trivial.
}
{ rewrite (xorb_false_l b11).
destruct b4; destruct b5; destruct b6; destruct b7; destruct b8; destruct b9;
destruct b10; destruct b11; destruct b12; destruct b13; destruct b14; destruct b15; trivial.
}
}
{ rewrite (xorb_false_l b10).
destruct b3; try rewrite asZT; try rewrite asZF.
{ rewrite (xorb_true_l b11).
destruct b4; destruct b5; destruct b6; destruct b7; destruct b8; destruct b9;
destruct b10; destruct b11; destruct b12; destruct b13; destruct b14; destruct b15; trivial.
}
{ rewrite (xorb_false_l b11).
destruct b4; destruct b5; destruct b6; destruct b7; destruct b8; destruct b9;
destruct b10; destruct b11; destruct b12; destruct b13; destruct b14; destruct b15; trivial.
}
}
Qed.
Lemma AUX3 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15:
Z.lxor
(1 * 0 + 2 * 1 + 4 * asZ b2 + 8 * asZ b3 + 16 * asZ b4 + 32 * asZ b5 +
64 * asZ b6 + 128 * asZ b7)
(1 * asZ b8 + 2 * asZ b9 + 4 * asZ b10 + 8 * asZ b11 + 16 * asZ b12 +
32 * asZ b13 + 64 * asZ b14 + 128 * asZ b15) =
1 * (if b8 then 1 else 0) + 2 * (if negb b9 then 1 else 0) +
4 * (if xorb b2 b10 then 1 else 0) + 8 * (if xorb b3 b11 then 1 else 0) +
16 * (if xorb b4 b12 then 1 else 0) + 32 * (if xorb b5 b13 then 1 else 0) +
64 * (if xorb b6 b14 then 1 else 0) + 128 * (if xorb b7 b15 then 1 else 0).
Proof.
destruct b2; try rewrite asZT; try rewrite asZF.
{ rewrite (xorb_true_l b10).
destruct b3; try rewrite asZT; try rewrite asZF.
{ rewrite (xorb_true_l b11).
destruct b4; destruct b5; destruct b6; destruct b7; destruct b8; destruct b9;
destruct b10; destruct b11; destruct b12; destruct b13; destruct b14; destruct b15; trivial.
}
{ rewrite (xorb_false_l b11).
destruct b4; destruct b5; destruct b6; destruct b7; destruct b8; destruct b9;
destruct b10; destruct b11; destruct b12; destruct b13; destruct b14; destruct b15; trivial.
}
}
{ rewrite (xorb_false_l b10).
destruct b3; try rewrite asZT; try rewrite asZF.
{ rewrite (xorb_true_l b11).
destruct b4; destruct b5; destruct b6; destruct b7; destruct b8; destruct b9;
destruct b10; destruct b11; destruct b12; destruct b13; destruct b14; destruct b15; trivial.
}
{ rewrite (xorb_false_l b11).
destruct b4; destruct b5; destruct b6; destruct b7; destruct b8; destruct b9;
destruct b10; destruct b11; destruct b12; destruct b13; destruct b14; destruct b15; trivial.
}
}
Qed.
Lemma AUX4 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15:
Z.lxor (1 * 0 + 2 * 0 + 4 * asZ b2 + 8 * asZ b3 + 16 * asZ b4 + 32 * asZ b5 +
64 * asZ b6 + 128 * asZ b7)
(1 * asZ b8 + 2 * asZ b9 + 4 * asZ b10 + 8 * asZ b11 + 16 * asZ b12 +
32 * asZ b13 + 64 * asZ b14 + 128 * asZ b15) =
1 * (if b8 then 1 else 0) + 2 * (if b9 then 1 else 0) +
4 * (if xorb b2 b10 then 1 else 0) + 8 * (if xorb b3 b11 then 1 else 0) +
16 * (if xorb b4 b12 then 1 else 0) + 32 * (if xorb b5 b13 then 1 else 0) +
64 * (if xorb b6 b14 then 1 else 0) + 128 * (if xorb b7 b15 then 1 else 0).
Proof.
destruct b2; try rewrite asZT; try rewrite asZF.
{ rewrite (xorb_true_l b10).
destruct b3; try rewrite asZT; try rewrite asZF.
{ rewrite (xorb_true_l b11).
destruct b4; destruct b5; destruct b6; destruct b7; destruct b8; destruct b9;
destruct b10; destruct b11; destruct b12; destruct b13; destruct b14; destruct b15; trivial.
}
{ rewrite (xorb_false_l b11).
destruct b4; destruct b5; destruct b6; destruct b7; destruct b8; destruct b9;
destruct b10; destruct b11; destruct b12; destruct b13; destruct b14; destruct b15; trivial.
}
}
{ rewrite (xorb_false_l b10).
destruct b3; try rewrite asZT; try rewrite asZF.
{ rewrite (xorb_true_l b11).
destruct b4; destruct b5; destruct b6; destruct b7; destruct b8; destruct b9;
destruct b10; destruct b11; destruct b12; destruct b13; destruct b14; destruct b15; trivial.
}
{ rewrite (xorb_false_l b11).
destruct b4; destruct b5; destruct b6; destruct b7; destruct b8; destruct b9;
destruct b10; destruct b11; destruct b12; destruct b13; destruct b14; destruct b15; trivial.
}
}
Qed.
Lemma xor_correspondence :
forall (b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15 : bool)
(byte0 byte1 : Z),
convertByteBits [b0; b1; b2; b3; b4; b5; b6; b7] byte0 ->
convertByteBits [b8; b9; b10; b11; b12; b13; b14; b15] byte1 ->
convertByteBits
[xorb b0 b8; xorb b1 b9; xorb b2 b10; xorb b3 b11;
xorb b4 b12; xorb b5 b13; xorb b6 b14; xorb b7 b15]
(Z.lxor byte0 byte1).
Proof.
intros.
generalize dependent H. generalize dependent H0. intros H0 H1.
unfold convertByteBits. unfold asZ.
do 8 eexists. split. reflexivity.
unfold convertByteBits in *.
destruct H0 as [ ? [ ? [ ? [ ? [ ? [ ? [ ? [ ? ? ] ] ] ]] ]] ]. (* nested 8 *)
destruct H.
symmetry in H.
inversion H. clear H.
subst.
destruct H1 as [ ? [ ? [ ? [ ? [ ? [ ? [ ? [ ? ? ] ] ] ]] ]] ]. (* nested 8 *)
destruct H.
symmetry in H.
inversion H. clear H.
subst.
destruct b0; try rewrite asZT; try rewrite asZF.
{ rewrite (xorb_true_l b8).
destruct b1; try rewrite asZT; try rewrite asZF.
{ rewrite (xorb_true_l b9). apply AUX1. }
{ rewrite (xorb_false_l b9). apply AUX2. }
}
{ rewrite (xorb_false_l b8).
destruct b1; try rewrite asZT; try rewrite asZF.
{ rewrite (xorb_true_l b9). apply AUX3. }
{ rewrite (xorb_false_l b9). apply AUX4. }
}
Qed. |
(**********************************************************************)
(* This Program is free sofut even the implied warranty of *)
(* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *)
(* GNU General Public License for more details. *)
(* *)
(* You should have received a copy of the GNU Lesser General Public *)
(* License along with this program; if not, write to the Free *)
(* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA *)
(* 021101301 USA *)
(**********************************************************************)
(**********************************************************************)
(* Abstraction_to_Tree.v *)
(* *)
(* Barry Jay *)
(* *)
(**********************************************************************)
Require Import Omega Max Bool List.
Require Import IntensionalLib.SF_calculus.Test.
Require Import IntensionalLib.SF_calculus.General.
Require Import IntensionalLib.Tree_calculus.Abstraction_Terms.
Require Import IntensionalLib.Tree_calculus.Abstraction_Reduction.
Require Import IntensionalLib.Tree_calculus.Tree_Terms.
Require Import IntensionalLib.Tree_calculus.Tree_Tactics.
Require Import IntensionalLib.Tree_calculus.Tree_reduction.
Require Import IntensionalLib.Tree_calculus.Tree_Normal.
Require Import IntensionalLib.Tree_calculus.Tree_Closed.
Require Import IntensionalLib.Tree_calculus.Substitution.
Require Import IntensionalLib.Tree_calculus.Tree_Eval.
Require Import IntensionalLib.Tree_calculus.Star.
Require Import IntensionalLib.Tree_calculus.Wait.
Require Import IntensionalLib.Tree_calculus.Fixpoints.
Require Import IntensionalLib.Tree_calculus.Wave_Factor.
Require Import IntensionalLib.Tree_calculus.Wave_Factor2.
Require Import IntensionalLib.Tree_calculus.Equal.
Require Import IntensionalLib.Tree_calculus.Case.
Require Import IntensionalLib.Tree_calculus.Extensions.
Require Import IntensionalLib.Tree_calculus.Wait2.
Require Import IntensionalLib.Tree_calculus.Abstraction.
Require Import IntensionalLib.Tree_calculus.Abstraction2.
Require Import IntensionalLib.Tree_calculus.Abstraction3.
Require Import IntensionalLib.Tree_calculus.Abstraction4.
Require Import IntensionalLib.Tree_calculus.Abstraction5.
Definition op_to_tree o :=
match o with
| Jop => j_op
| Rop => r_op
| Hop => h_op
| Aop => abs_op
| Iop => i_op
| Bop => b_op
end.
Fixpoint abs_to_tree M :=
match M with
| Abstraction_Terms.Op o => op_to_tree o
| Abstraction_Terms.App M1 M2 => App (abs_to_tree M1) (abs_to_tree M2)
end.
Theorem translation_preserves_abs_reduction:
forall M N, abs_red1 M N -> sf_red (abs_to_tree M) (abs_to_tree N).
Proof.
intros M N r; induction r; intros;
unfold abs_to_tree; fold abs_to_tree; unfold op_to_tree.
(* 14 *)
auto.
eapply2 j_red.
eapply2 r_red.
eapply2 h_red.
eapply2 abs_red.
unfold_op. repeat eval_tac.
eapply2 b_j_red.
eapply2 b_r_red.
eapply2 b_h_red.
eapply2 b_a_red.
eapply2 b_i_red.
eapply2 b_b_red.
(* 2 *)
generalize H; case o; intro.
congruence.
eapply2 b_r_op_red.
eapply2 b_h_op_red.
eapply2 b_abs_op_red.
eapply2 b_i_op_red.
eapply2 b_b_op_red.
(* 1 *)
inversion H; subst; unfold abs_to_tree; fold abs_to_tree;
unfold op_to_tree; subst.
eapply2 b_h1_red.
eapply2 b_a1_red.
eapply2 b_b1_red.
Qed.
|
subroutine EDGETOCELLMAX(
& edgeData
& ,iedgeDatalo0,iedgeDatalo1
& ,iedgeDatahi0,iedgeDatahi1
& ,cellData
& ,icellDatalo0,icellDatalo1
& ,icellDatahi0,icellDatahi1
& ,icellBoxlo0,icellBoxlo1
& ,icellBoxhi0,icellBoxhi1
& ,dir
& )
implicit none
integer*8 chiterations
integer*8 chflops,chloads,chstores
common /chiterations/ chiterations
common /chflops/ chflops,chloads,chstores
EXTERNAL CHCOMMON
integer CHF_ID(0:5,0:5)
data CHF_ID/ 1,0,0,0,0,0 ,0,1,0,0,0,0 ,0,0,1,0,0,0 ,0,0,0,1,0,0 ,0
&,0,0,0,1,0 ,0,0,0,0,0,1 /
integer iedgeDatalo0,iedgeDatalo1
integer iedgeDatahi0,iedgeDatahi1
REAL*8 edgeData(
& iedgeDatalo0:iedgeDatahi0,
& iedgeDatalo1:iedgeDatahi1)
integer icellDatalo0,icellDatalo1
integer icellDatahi0,icellDatahi1
REAL*8 cellData(
& icellDatalo0:icellDatahi0,
& icellDatalo1:icellDatahi1)
integer icellBoxlo0,icellBoxlo1
integer icellBoxhi0,icellBoxhi1
integer dir
integer i,j
integer ii,jj
chiterations=(1+icellBoxhi1-icellBoxlo1)*(1+icellBoxhi0-icellBoxlo
&0)*1
do j = icellBoxlo1,icellBoxhi1
do i = icellBoxlo0,icellBoxhi0
ii = i+CHF_ID(0,dir)
jj = j+CHF_ID(1,dir)
cellData(i,j) = max(
& edgeData(i,j),
& edgeData(ii,jj))
enddo
enddo
chflops=chflops+chiterations
chloads=chloads+chiterations*2
chstores=chstores+chiterations
return
end
|
#include <stdio.h>
#include <gsl/gsl_multiset.h>
int
main (void)
{
gsl_multiset * c;
size_t i;
printf ("All multisets of {0,1,2,3} by size:\n") ;
for (i = 0; i <= 4; i++)
{
c = gsl_multiset_calloc (4, i);
do
{
printf ("{");
gsl_multiset_fprintf (stdout, c, " %u");
printf (" }\n");
}
while (gsl_multiset_next (c) == GSL_SUCCESS);
gsl_multiset_free (c);
}
return 0;
}
|
测试DVI正态近似的准确性
```python
%matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from IPython.display import clear_output
import os, json
import gaussian_variables as gv
import utils as u
import plot_utils as pu
import bayes_layers as bnn
from bayes_models import MLP, PointMLP, AdaptedMLP
np.random.seed(3)
```
首先构建数据集:
\begin{equation}
y = -(x+0.5)\sin(3\pi x) + \eta
\end{equation}
\begin{equation}
\eta = 0.45(x + 0.5)^2
\end{equation}
```python
def base_model(x):
return -(x+0.5)*np.sin(3 * np.pi *x)
def noise_model(x):
return 0.45*(x+0.5)**2
def sample_data(x):
return base_model(x) + np.random.normal(0, noise_model(x))
data_size = {'train': 500, 'valid': 100, 'test': 100}
toy_data = []
for section in ['train', 'valid', 'test']:
x = (np.random.rand(data_size['train'], 1) - 0.5)
toy_data.append([x, sample_data(x).reshape(-1)])
x = np.arange(-1,1,1/100)
toy_data.append([[[_] for _ in x], base_model(x)])
pu.toy_results_plot(toy_data, {'mean':base_model, 'std':noise_model})
```
设置模型参数,这里以一个三层、每层5个神经元的网络为例(本项目中的其他网络结果修改“hidden_dims”参数可得到)
```python
hypers = {
"x_dim": 1, "y_dim": 2,
"hidden_dims": [5,5,5], "nonlinearity": "relu",
"adapter": {
'in' : {"scale": [[1.0]], "shift": [[0.0]]},
'out': {"scale": [[1.0, 0.83]], "shift": [[0.0, -3.5]]}
},
"method": "bayes", "style": "heteroskedastic",
"homo_logvar_scale": 2*np.log(0.2), "prior_type": ["empirical", "wider_he", "wider_he"],
"n_epochs": 20000, "batch_size": 500,
"learning_rate": 0.001, "lambda": 1.0,
"warmup_updates": {'lambda': 14000.0}, "anneal_updates": {'lambda': 1000.0},
"optimizer": "adam", "gradient_clip": 0.1,
"data_fraction": 1.0, "sections_to_run": ["train"]
}
```
构建模型
```python
def make_model(hypers):
if hypers['method'].lower().strip() == 'bayes':
MLP_factory = MLP
prediction = lambda y: tf.reshape(y.mean[:,0], [-1])
loss = bnn.regression_loss
else:
MLP_factory = PointMLP
prediction = lambda y: tf.reshape(y.mean[:,0], [-1])
loss = bnn.point_regression_loss
mlp = MLP_factory(hypers['x_dim'], hypers['y_dim'], hypers)
mlp = AdaptedMLP(mlp)
mlp.make_placeholders()
ipt = mlp.placeholders['ipt_mean']
y = mlp(ipt)
target = tf.placeholder(tf.float32, [None])
mlp.placeholders['target'] = target
global_step = tf.Variable(0, trainable=False, name='global_step')
loss, logprob, all_surprise = loss(y, target, mlp, hypers, global_step)
accuracy = tf.reduce_mean(tf.abs(target - prediction(y)))
return {
'model': mlp,
'metrics': {
'accuracy': accuracy, 'loss': loss,
'logprob': logprob, 'all_surprise': all_surprise
},
'global_step': global_step}
```
给出一个比较DVI和MCVI算法结果的函数
```python
from scipy import stats
def show_compare(model_and_metrics, sess):
plt.figure()
n_samp = 20000
x = 0.25
ipt = [[[x]] for _ in range(n_samp)]
sample_op = model_and_metrics['model'].run_with_MC(
ipt, n_sample=n_samp)
approx_op = model_and_metrics['model'](x)
samples = sess.run(sample_op)
approx = sess.run([approx_op.mean, approx_op.var])
# samples_b.shape
m_min = stats.norm.ppf(
0.0001, loc=approx[0][0, 0], scale=np.sqrt(approx[1][0, 0, 0]))
m_max = stats.norm.ppf(
0.9999, loc=approx[0][0, 0], scale=np.sqrt(approx[1][0, 0, 0]))
l_min = stats.norm.ppf(
0.0001, loc=approx[0][0, 1], scale=np.sqrt(approx[1][0, 1, 1]))
l_max = stats.norm.ppf(
0.9999, loc=approx[0][0, 1], scale=np.sqrt(approx[1][0, 1, 1]))
bin_no_m = np.linspace(m_min, m_max, 50)
bin_no_l = np.linspace(l_min, l_max, 50)
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax1.hist(samples[:, 0, 0], bin_no_m,
density=True, edgecolor='k', facecolor='#b4c7e7')
ax1.plot(*gaussian1d(approx[0][0, 0], approx[1][0, 0, 0],
m_min, m_max), 'b')
plt.xlim([m_min, m_max])
ax1.set_yticks([])
ax1.set_xlabel('$m$')
ax1.set_ylabel('$q(m)$')
ax2 = fig.add_subplot(222)
ax2.hist(samples[:, 0, 1], bin_no_l,
density=True, edgecolor='k', facecolor='#b4c7e7', label="MC")
ax2.plot(*gaussian1d(approx[0][0, 1],
approx[1][0, 1, 1],
l_min,
l_max), 'b', label="ours")
plt.xlim([l_min, l_max])
ax2.set_yticks([])
ax2.set_xlabel('$\ell$')
ax2.set_ylabel('$q(\ell)$')
plt.show()
return None
```
```python
def gaussian1d(mean, var, min, max):
x_axis = np.linspace(min, max, 1000)
return x_axis, 1.0 / np.sqrt(2.0 * np.pi * var) * \
np.exp(-1.0 / (2.0 * var) * (x_axis - mean)**2)
```
定义训练函数
```python
def run(data):
run_id = u.start_run()
restricted_training_set = u.restrict_dataset_size(data[0], hypers['data_fraction'])
hypers['dataset_size'] = len(restricted_training_set[0])
device_id = 1
device_string = u.get_device_string(device_id)
print(hypers)
with tf.device(device_string):
if True:
model_and_metrics = make_model(hypers)
train_op = u.make_optimizer(model_and_metrics, hypers)
sess = u.get_session()
saver = tf.train.Saver()
all_summaries = []
best_valid_accuracy = np.inf
show_compare(model_and_metrics, sess)
for epoch in range(hypers['n_epochs']):
verbose = (epoch % 20 == 0)
if verbose:
print("Epoch %i: " % epoch, end='')
epoch_summary, accuracies = u.train_valid_test(
{
'train': restricted_training_set,
'valid': data[1],
'test': data[2]
},
sess, model_and_metrics, train_op, hypers, verbose)
show_compare(model_and_metrics, sess)
```
开始训练,并得到两个DVI和MCVI之间的比较
```python
run(toy_data)
```
将之前的数据集整体增大10倍
```python
def base_model_10(x):
return 10*(-(x+0.5)*np.sin(3 * np.pi *x))
def noise_model_10(x):
return 10*(0.45*(x+0.5)**2)
def sample_data_10(x):
return base_model_10(x) + np.random.normal(0, noise_model_10(x))
data_size = {'train': 500, 'valid': 100, 'test': 100}
toy_data = []
for section in ['train', 'valid', 'test']:
x = (np.random.rand(data_size['train'], 1) - 0.5)
toy_data.append([x, sample_data_10(x).reshape(-1)])
x = np.arange(-1,1,1/100)
toy_data.append([[[_] for _ in x], base_model_10(x)])
pu.toy_results_plot(toy_data, {'mean':base_model_10, 'std':noise_model_10})
plt.ylim([-11,16])
```
同样的方式进行训练,并得到DVI和MCVI之间的比较
```python
run(toy_data)
```
对于m的结果,DVI和MCVI的一致性变强了。
```python
```
|
[STATEMENT]
lemma map_filter_Some:
assumes "P x = Some y"
shows "List.map_filter P [x] = [y]"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. List.map_filter P [x] = [y]
[PROOF STEP]
by (simp add: assms map_filter_simps(1) map_filter_simps(2)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.