text
stringlengths 0
3.34M
|
---|
# Copyright 2020 Juan L Gamella
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import stars
from sklearn.covariance import GraphicalLasso
def fit(X, beta=0.05, N=None, start=1, step=1, tol=1e-5, precision_tol = 1e-4, max_iter=20, glasso_params={}, debug=False):
"""Function to run StARS using the Graphical Lasso from
scikit.learn.
Parameters:
- X (np.array): Array containing n observations of p
variables. Columns are the observations of a single variable
- beta (float, optional): Maximum allowed instability between
subsample estimates. Defaults to 0.05, the value recommended in the
paper.
- N (int, optional): Number of subsamples, must be divisor of
n. Defaults to the value recommended in the paper,
i.e. int(n / np.floor(10 np.sqrt(n))).
- start (float, optional): Starting lambda in the search
procedure. Defaults to 1.
- step (float, optional): Initial step at which to increase
lambda. Defaults to 1.
- tol (float, optional): Tolerance of the search procedure,
i.e. the search procedure stops when the instability at a given
lambda is below `tol` of `beta`. Defaults to 1e-5.
- precision_tol (float, optional): Cutoff value at which nonzero
elements of the precision matrix returned by GLASSO are
considered edges in the graph. Defaults to 1e-4.
- max_iter (int, optional): Maximum number of iterations for which
the search procedure is run, i.e. the maximum number of times
the estimator is run. Defaults to 20.
- glasso_params (dict, optional): Dictionary used to pass
additional parameters to sklearn.covariance.GraphicalLasso. Defaults to `{}`.
- debug (bool, optional): If debugging messages should be printed
during execution. Defaults to `False`.
Returns:
- estimate (np.array): The adjacency matrix of the resulting
graph estimate.
"""
estimator = lambda subsamples, alpha: glasso(subsamples, alpha, precision_tol = precision_tol, glasso_params = glasso_params)
return stars.fit(X, estimator, beta, N, start, step, tol, max_iter, debug)
def glasso(subsamples, alpha, precision_tol=1e-4, glasso_params = {}):
"""Run the graphical lasso from scikit learn over the given
subsamples, at the given regularization level.
Parameters:
- subsamples (np.array): the subsample array
- alpha (float): the regularization parameter at which to run
the estimator, taken as 1/lambda, i.e, lower values mean
sparser
Returns:
- estimates (np.array): The adjacency matrices of the graphs
estimated for each subsample
"""
(N,_,p) = subsamples.shape
precisions = np.zeros((len(subsamples),p,p))
g = GraphicalLasso(alpha = 1 / alpha,
**glasso_params)
for j,sample in enumerate(subsamples):
precision = g.fit(sample).precision_
precisions[j,:,:] = precision - np.diag(np.diag(precision))
estimates = (abs(precisions) > precision_tol).astype(int)
return estimates
|
/*=============================================================================
Copyright (c) 2010-2016 Bolero MURAKAMI
https://github.com/bolero-MURAKAMI/Sprig
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
=============================================================================*/
#ifndef SPRIG_KRKR_TJS_HASH_HPP
#define SPRIG_KRKR_TJS_HASH_HPP
#include <sprig/config/config.hpp>
#ifdef SPRIG_USING_PRAGMA_ONCE
# pragma once
#endif // #ifdef SPRIG_USING_PRAGMA_ONCE
#include <boost/functional/hash/hash.hpp>
#include <sprig/external/tp_stub.hpp>
//
// hash_value
//
static std::size_t hash_value(tTJSString const& target) {
return boost::hash_range(target.c_str(), target.c_str() + target.length());
}
#endif // #ifndef SPRIG_KRKR_TJS_HASH_HPP
|
Formal statement is: lemma cauchy_imp_bounded: assumes "Cauchy s" shows "bounded (range s)" Informal statement is: If a sequence is Cauchy, then its range is bounded. |
/-
Copyright (c) 2020 Bhavik Mehta. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Bhavik Mehta
-/
import data.list.chain
import category_theory.punit
import category_theory.groupoid
/-!
# Connected category
Define a connected category as a _nonempty_ category for which every functor
to a discrete category is isomorphic to the constant functor.
NB. Some authors include the empty category as connected, we do not.
We instead are interested in categories with exactly one 'connected
component'.
We give some equivalent definitions:
- A nonempty category for which every functor to a discrete category is
constant on objects.
See `any_functor_const_on_obj` and `connected.of_any_functor_const_on_obj`.
- A nonempty category for which every function `F` for which the presence of a
morphism `f : j₁ ⟶ j₂` implies `F j₁ = F j₂` must be constant everywhere.
See `constant_of_preserves_morphisms` and `connected.of_constant_of_preserves_morphisms`.
- A nonempty category for which any subset of its elements containing the
default and closed under morphisms is everything.
See `induct_on_objects` and `connected.of_induct`.
- A nonempty category for which every object is related under the reflexive
transitive closure of the relation "there is a morphism in some direction
from `j₁` to `j₂`".
See `connected_zigzag` and `zigzag_connected`.
- A nonempty category for which for any two objects there is a sequence of
morphisms (some reversed) from one to the other.
See `exists_zigzag'` and `connected_of_zigzag`.
We also prove the result that the functor given by `(X × -)` preserves any
connected limit. That is, any limit of shape `J` where `J` is a connected
category is preserved by the functor `(X × -)`. This appears in `category_theory.limits.connected`.
-/
universes v₁ v₂ u₁ u₂
noncomputable theory
open category_theory.category
open opposite
namespace category_theory
/--
A possibly empty category for which every functor to a discrete category is constant.
-/
class is_preconnected (J : Type u₁) [category.{v₁} J] : Prop :=
(iso_constant : Π {α : Type u₁} (F : J ⥤ discrete α) (j : J),
nonempty (F ≅ (functor.const J).obj (F.obj j)))
/--
We define a connected category as a _nonempty_ category for which every
functor to a discrete category is constant.
NB. Some authors include the empty category as connected, we do not.
We instead are interested in categories with exactly one 'connected
component'.
This allows us to show that the functor X ⨯ - preserves connected limits.
See https://stacks.math.columbia.edu/tag/002S
-/
class is_connected (J : Type u₁) [category.{v₁} J] extends is_preconnected J : Prop :=
[is_nonempty : nonempty J]
attribute [instance, priority 100] is_connected.is_nonempty
variables {J : Type u₁} [category.{v₁} J]
variables {K : Type u₂} [category.{v₂} K]
/--
If `J` is connected, any functor `F : J ⥤ discrete α` is isomorphic to
the constant functor with value `F.obj j` (for any choice of `j`).
-/
def iso_constant [is_preconnected J] {α : Type u₁} (F : J ⥤ discrete α) (j : J) :
F ≅ (functor.const J).obj (F.obj j) :=
(is_preconnected.iso_constant F j).some
/--
If J is connected, any functor to a discrete category is constant on objects.
The converse is given in `is_connected.of_any_functor_const_on_obj`.
-/
lemma any_functor_const_on_obj [is_preconnected J]
{α : Type u₁} (F : J ⥤ discrete α) (j j' : J) :
F.obj j = F.obj j' :=
((iso_constant F j').hom.app j).down.1
/--
If any functor to a discrete category is constant on objects, J is connected.
The converse of `any_functor_const_on_obj`.
-/
lemma is_connected.of_any_functor_const_on_obj [nonempty J]
(h : ∀ {α : Type u₁} (F : J ⥤ discrete α), ∀ (j j' : J), F.obj j = F.obj j') :
is_connected J :=
{ iso_constant := λ α F j',
⟨nat_iso.of_components (λ j, eq_to_iso (h F j j')) (λ _ _ _, subsingleton.elim _ _)⟩ }
/--
If `J` is connected, then given any function `F` such that the presence of a
morphism `j₁ ⟶ j₂` implies `F j₁ = F j₂`, we have that `F` is constant.
This can be thought of as a local-to-global property.
The converse is shown in `is_connected.of_constant_of_preserves_morphisms`
-/
lemma constant_of_preserves_morphisms [is_preconnected J] {α : Type u₁} (F : J → α)
(h : ∀ (j₁ j₂ : J) (f : j₁ ⟶ j₂), F j₁ = F j₂) (j j' : J) :
F j = F j' :=
any_functor_const_on_obj { obj := F, map := λ _ _ f, eq_to_hom (h _ _ f) } j j'
/--
`J` is connected if: given any function `F : J → α` which is constant for any
`j₁, j₂` for which there is a morphism `j₁ ⟶ j₂`, then `F` is constant.
This can be thought of as a local-to-global property.
The converse of `constant_of_preserves_morphisms`.
-/
lemma is_connected.of_constant_of_preserves_morphisms [nonempty J]
(h : ∀ {α : Type u₁} (F : J → α), (∀ {j₁ j₂ : J} (f : j₁ ⟶ j₂), F j₁ = F j₂) →
(∀ j j' : J, F j = F j')) :
is_connected J :=
is_connected.of_any_functor_const_on_obj (λ _ F, h F.obj (λ _ _ f, (F.map f).down.1))
/--
An inductive-like property for the objects of a connected category.
If the set `p` is nonempty, and `p` is closed under morphisms of `J`,
then `p` contains all of `J`.
The converse is given in `is_connected.of_induct`.
-/
lemma induct_on_objects [is_preconnected J] (p : set J) {j₀ : J} (h0 : j₀ ∈ p)
(h1 : ∀ {j₁ j₂ : J} (f : j₁ ⟶ j₂), j₁ ∈ p ↔ j₂ ∈ p) (j : J) :
j ∈ p :=
begin
injection (constant_of_preserves_morphisms (λ k, ulift.up (k ∈ p)) (λ j₁ j₂ f, _) j j₀) with i,
rwa i,
dsimp,
exact congr_arg ulift.up (propext (h1 f)),
end
/--
If any maximal connected component containing some element j₀ of J is all of J, then J is connected.
The converse of `induct_on_objects`.
-/
lemma is_connected.of_induct [nonempty J] {j₀ : J}
(h : ∀ (p : set J), j₀ ∈ p → (∀ {j₁ j₂ : J} (f : j₁ ⟶ j₂), j₁ ∈ p ↔ j₂ ∈ p) → ∀ (j : J), j ∈ p) :
is_connected J :=
is_connected.of_constant_of_preserves_morphisms (λ α F a,
begin
have w := h {j | F j = F j₀} rfl (λ _ _ f, by simp [a f]),
dsimp at w,
intros j j',
rw [w j, w j'],
end)
/--
Another induction principle for `is_preconnected J`:
given a type family `Z : J → Sort*` and
a rule for transporting in *both* directions along a morphism in `J`,
we can transport an `x : Z j₀` to a point in `Z j` for any `j`.
-/
lemma is_preconnected_induction [is_preconnected J] (Z : J → Sort*)
(h₁ : Π {j₁ j₂ : J} (f : j₁ ⟶ j₂), Z j₁ → Z j₂)
(h₂ : Π {j₁ j₂ : J} (f : j₁ ⟶ j₂), Z j₂ → Z j₁)
{j₀ : J} (x : Z j₀) (j : J) : nonempty (Z j) :=
(induct_on_objects {j | nonempty (Z j)} ⟨x⟩
(λ j₁ j₂ f, ⟨by { rintro ⟨y⟩, exact ⟨h₁ f y⟩, }, by { rintro ⟨y⟩, exact ⟨h₂ f y⟩, }⟩) j : _)
/-- If `J` and `K` are equivalent, then if `J` is preconnected then `K` is as well. -/
lemma is_preconnected_of_equivalent {K : Type u₁} [category.{v₂} K] [is_preconnected J]
(e : J ≌ K) :
is_preconnected K :=
{ iso_constant := λ α F k, ⟨
calc F ≅ e.inverse ⋙ e.functor ⋙ F : (e.inv_fun_id_assoc F).symm
... ≅ e.inverse ⋙ (functor.const J).obj ((e.functor ⋙ F).obj (e.inverse.obj k)) :
iso_whisker_left e.inverse (iso_constant (e.functor ⋙ F) (e.inverse.obj k))
... ≅ e.inverse ⋙ (functor.const J).obj (F.obj k) :
iso_whisker_left _ ((F ⋙ functor.const J).map_iso (e.counit_iso.app k))
... ≅ (functor.const K).obj (F.obj k) : nat_iso.of_components (λ X, iso.refl _) (by simp),
⟩ }
/-- If `J` and `K` are equivalent, then if `J` is connected then `K` is as well. -/
lemma is_connected_of_equivalent {K : Type u₁} [category.{v₂} K]
(e : J ≌ K) [is_connected J] :
is_connected K :=
{ is_nonempty := nonempty.map e.functor.obj (by apply_instance),
to_is_preconnected := is_preconnected_of_equivalent e }
/-- If `J` is preconnected, then `Jᵒᵖ` is preconnected as well. -/
instance is_preconnected_op [is_preconnected J] : is_preconnected Jᵒᵖ :=
{ iso_constant := λ α F X, ⟨
nat_iso.of_components
(λ Y, (nonempty.some $ is_preconnected.iso_constant
(F.right_op ⋙ (discrete.opposite α).functor) (unop X)).app (unop Y))
(λ Y Z f, subsingleton.elim _ _)
⟩ }
/-- If `J` is connected, then `Jᵒᵖ` is connected as well. -/
instance is_connected_op [is_connected J] : is_connected Jᵒᵖ :=
{ is_nonempty := nonempty.intro (op (classical.arbitrary J)) }
lemma is_preconnected_of_is_preconnected_op [is_preconnected Jᵒᵖ] : is_preconnected J :=
is_preconnected_of_equivalent (op_op_equivalence J)
lemma is_connected_of_is_connected_op [is_connected Jᵒᵖ] : is_connected J :=
is_connected_of_equivalent (op_op_equivalence J)
/-- j₁ and j₂ are related by `zag` if there is a morphism between them. -/
@[reducible]
def zag (j₁ j₂ : J) : Prop := nonempty (j₁ ⟶ j₂) ∨ nonempty (j₂ ⟶ j₁)
lemma zag_symmetric : symmetric (@zag J _) :=
λ j₂ j₁ h, h.swap
/--
`j₁` and `j₂` are related by `zigzag` if there is a chain of
morphisms from `j₁` to `j₂`, with backward morphisms allowed.
-/
@[reducible]
def zigzag : J → J → Prop := relation.refl_trans_gen zag
lemma zigzag_symmetric : symmetric (@zigzag J _) :=
relation.refl_trans_gen.symmetric zag_symmetric
lemma zigzag_equivalence : _root_.equivalence (@zigzag J _) :=
mk_equivalence _
relation.reflexive_refl_trans_gen
zigzag_symmetric
relation.transitive_refl_trans_gen
/--
The setoid given by the equivalence relation `zigzag`. A quotient for this
setoid is a connected component of the category.
-/
def zigzag.setoid (J : Type u₂) [category.{v₁} J] : setoid J :=
{ r := zigzag,
iseqv := zigzag_equivalence }
/--
If there is a zigzag from `j₁` to `j₂`, then there is a zigzag from `F j₁` to
`F j₂` as long as `F` is a functor.
-/
lemma zigzag_obj_of_zigzag (F : J ⥤ K) {j₁ j₂ : J} (h : zigzag j₁ j₂) :
zigzag (F.obj j₁) (F.obj j₂) :=
h.lift _ $ λ j k, or.imp (nonempty.map (λ f, F.map f)) (nonempty.map (λ f, F.map f))
-- TODO: figure out the right way to generalise this to `zigzag`.
lemma zag_of_zag_obj (F : J ⥤ K) [full F] {j₁ j₂ : J} (h : zag (F.obj j₁) (F.obj j₂)) :
zag j₁ j₂ :=
or.imp (nonempty.map F.preimage) (nonempty.map F.preimage) h
/-- Any equivalence relation containing (⟶) holds for all pairs of a connected category. -/
lemma equiv_relation [is_connected J] (r : J → J → Prop) (hr : _root_.equivalence r)
(h : ∀ {j₁ j₂ : J} (f : j₁ ⟶ j₂), r j₁ j₂) :
∀ (j₁ j₂ : J), r j₁ j₂ :=
begin
have z : ∀ (j : J), r (classical.arbitrary J) j :=
induct_on_objects (λ k, r (classical.arbitrary J) k)
(hr.1 (classical.arbitrary J)) (λ _ _ f, ⟨λ t, hr.2.2 t (h f), λ t, hr.2.2 t (hr.2.1 (h f))⟩),
intros, apply hr.2.2 (hr.2.1 (z _)) (z _)
end
/-- In a connected category, any two objects are related by `zigzag`. -/
lemma is_connected_zigzag [is_connected J] (j₁ j₂ : J) : zigzag j₁ j₂ :=
equiv_relation _ zigzag_equivalence
(λ _ _ f, relation.refl_trans_gen.single (or.inl (nonempty.intro f))) _ _
/--
If any two objects in an nonempty category are related by `zigzag`, the category is connected.
-/
lemma zigzag_is_connected [nonempty J] (h : ∀ (j₁ j₂ : J), zigzag j₁ j₂) : is_connected J :=
begin
apply is_connected.of_induct,
intros p hp hjp j,
have: ∀ (j₁ j₂ : J), zigzag j₁ j₂ → (j₁ ∈ p ↔ j₂ ∈ p),
{ introv k,
induction k with _ _ rt_zag zag,
{ refl },
{ rw k_ih,
rcases zag with ⟨⟨_⟩⟩ | ⟨⟨_⟩⟩,
apply hjp zag,
apply (hjp zag).symm } },
rwa this j (classical.arbitrary J) (h _ _)
end
lemma exists_zigzag' [is_connected J] (j₁ j₂ : J) :
∃ l, list.chain zag j₁ l ∧ list.last (j₁ :: l) (list.cons_ne_nil _ _) = j₂ :=
list.exists_chain_of_relation_refl_trans_gen (is_connected_zigzag _ _)
/--
If any two objects in an nonempty category are linked by a sequence of (potentially reversed)
morphisms, then J is connected.
The converse of `exists_zigzag'`.
-/
lemma is_connected_of_zigzag [nonempty J]
(h : ∀ (j₁ j₂ : J), ∃ l, list.chain zag j₁ l ∧ list.last (j₁ :: l) (list.cons_ne_nil _ _) = j₂) :
is_connected J :=
begin
apply zigzag_is_connected,
intros j₁ j₂,
rcases h j₁ j₂ with ⟨l, hl₁, hl₂⟩,
apply list.relation_refl_trans_gen_of_exists_chain l hl₁ hl₂,
end
/-- If `discrete α` is connected, then `α` is (type-)equivalent to `punit`. -/
def discrete_is_connected_equiv_punit {α : Type u₁} [is_connected (discrete α)] : α ≃ punit :=
discrete.equiv_of_equivalence.{u₁ u₁}
{ functor := functor.star α,
inverse := discrete.functor (λ _, classical.arbitrary _),
unit_iso := by { exact (iso_constant _ (classical.arbitrary _)), },
counit_iso := functor.punit_ext _ _ }
variables {C : Type u₂} [category.{u₁} C]
/--
For objects `X Y : C`, any natural transformation `α : const X ⟶ const Y` from a connected
category must be constant.
This is the key property of connected categories which we use to establish properties about limits.
-/
instance [is_connected J] : full (functor.const J : C ⥤ J ⥤ C) :=
{ preimage := λ X Y f, f.app (classical.arbitrary J),
witness' := λ X Y f,
begin
ext j,
apply nat_trans_from_is_connected f (classical.arbitrary J) j,
end }
instance nonempty_hom_of_connected_groupoid {G} [groupoid G] [is_connected G] :
∀ (x y : G), nonempty (x ⟶ y) :=
begin
refine equiv_relation _ _ (λ j₁ j₂, nonempty.intro),
exact ⟨λ j, ⟨𝟙 _⟩, λ j₁ j₂, nonempty.map (λ f, inv f), λ _ _ _, nonempty.map2 (≫)⟩,
end
end category_theory
|
Require Import
Coq.Arith.Peano_dec
Coq.Structures.OrderedTypeEx
Coq.Lists.SetoidList
Fiat.Common
Fiat.Common.DecideableEnsembles
Fiat.Common.String_as_OT
Fiat.Common.List.ListFacts
Fiat.Common.List.FlattenList
Fiat.Common.SetEqProperties
Fiat.Common.FMapExtensions
Fiat.Common.List.PermutationFacts
Fiat.QueryStructure.Specification.Representation.QueryStructureNotations.
Section IncludedInAClauses.
Context {X : Type}
(X_eq : X -> X -> Prop)
{X_eq_dec : forall x x', {X_eq x x'} + {~ X_eq x x'}}
{X_equiv : Equivalence X_eq}.
Definition IncludedInA := inclA X_eq.
Fixpoint IncludedInA_dec (l l' : list X)
: {IncludedInA l l'} + {~ IncludedInA l l'}.
refine (match l with
| nil => left _
| e :: l =>
if InA_dec X_eq_dec e l' then
if IncludedInA_dec l l' then
left _
else right _
else right _
end); unfold IncludedInA, inclA in *; intros.
- inversion H.
- inversion H; subst; eauto.
rewrite H1; eauto.
- unfold not; intros.
let _H0 := match goal with _H0 : ~ _ |- _ => constr:(_H0) end in
apply _H0; intros.
eapply H; econstructor 2; eauto.
- unfold not; intros.
let _H := match goal with _H0 : ~ _ |- _ => constr:(_H0) end in
eapply _H.
apply H; econstructor; eauto.
reflexivity.
Defined.
Global Instance DecideableEnsemble_IncludedInA st :
DecideableEnsemble (IncludedInA st) :=
{| dec a := ?[IncludedInA_dec st a] |}.
Proof.
intros; destruct (IncludedInA_dec st a); intuition eauto.
discriminate.
Defined.
Global Instance DecideableEnsemble_IncludedInA_f
(A : Type)
(f : A -> list X)
b :
DecideableEnsemble (fun a => IncludedInA b (f a) ) :=
{| dec a := ?[IncludedInA_dec b (f a)]|}.
Proof.
intros; destruct (IncludedInA_dec b (f a)); intuition eauto.
discriminate.
Defined.
End IncludedInAClauses.
Section IncludedInClauses.
Context {X : Type}
{X_eq : Query_eq X}.
Definition IncludedIn := IncludedInA (@eq X).
Definition IncludedIn_dec (l l' : list X)
: {IncludedIn l l'} + {~ IncludedIn l l'} :=
IncludedInA_dec (X_eq_dec := A_eq_dec) l l'.
Global Instance DecideableEnsemble_IncludedIn st :
DecideableEnsemble (IncludedIn st) :=
{| dec a := ?[IncludedIn_dec st a] |}.
Proof.
intros; destruct (IncludedIn_dec st a); intuition eauto.
discriminate.
Defined.
Global Instance DecideableEnsemble_IncludedIn_f
(A : Type)
(f : A -> list X)
b :
DecideableEnsemble (fun a => IncludedIn b (f a) ) :=
{| dec a := ?[IncludedIn_dec b (f a)]|}.
Proof.
intros; destruct (IncludedIn_dec b (f a)); intuition eauto.
discriminate.
Defined.
End IncludedInClauses.
|
(* "especially useful" *)
Inductive natlist : Type :=
| nil
| cons (n : nat) (l : natlist).
Notation "x :: l" := (cons x l)
(at level 60, right associativity).
Notation "[ ]" := nil.
Notation "[ x ; .. ; y ]" := (cons x .. (cons y nil) ..).
Fixpoint app (l1 l2 : natlist) : natlist :=
match l1 with
| nil => l2
| h :: t => h :: (app t l2)
end.
Notation "x ++ y" := (app x y)
(right associativity, at level 60).
(* A bag (or multiset) is like a set, except that each element can appear multiple times rather than just once. One possible representation for a bag of numbers is as a list. *)
Definition bag := natlist.
Fixpoint eqb (n m : nat) : bool :=
match n with
| O => match m with
| O => true
| S m' => false
end
| S n' => match m with
| O => false
| S m' => eqb n' m'
end
end.
Fixpoint count (v : nat) (s : bag) : nat :=
match s with
| nil => O
| h :: t => match eqb v h with
| true => 1 + count v t
| false => count v t
end
end.
Example test_count1: count 1 [1;2;3;1;4;1] = 3.
Proof. reflexivity. Qed.
Example test_count2: count 6 [1;2;3;1;4;1] = 0.
Proof. reflexivity. Qed.
Definition sum : bag -> bag -> bag := app.
Example test_sum1: count 1 (sum [1;2;3] [1;4;1]) = 3.
Proof. simpl. reflexivity. Qed.
Definition add (v : nat) (s : bag) : bag :=
sum [v] s.
Example test_add1: count 1 (add 1 [1;4;1]) = 3.
Proof. simpl. reflexivity. Qed.
Example test_add2: count 5 (add 1 [1;4;1]) = 0.
Proof. simpl. reflexivity. Qed.
Fixpoint member (v : nat) (s : bag) : bool :=
match s with
| nil => false
| h :: t => match eqb h v with
| true => true
| false => member v t
end
end.
Example test_member1: member 1 [1;4;1] = true.
Proof. simpl. reflexivity. Qed.
Example test_member2: member 2 [1;4;1] = false.
Proof. simpl. reflexivity. Qed. |
(*
* Copyright 2014, NICTA
*
* This software may be distributed and modified according to the terms of
* the BSD 2-Clause license. Note that NO WARRANTY is provided.
* See "LICENSE_BSD2.txt" for details.
*
* @TAG(NICTA_BSD)
*)
(* Author: Andrew Boyton, 2012
Maintainers: Gerwin Klein <kleing at cse.unsw.edu.au>
Rafal Kolanski <rafal.kolanski at nicta.com.au>
*)
chapter "Defining some separation logic maps-to predicates on top of the instantiation."
theory Separation_D
imports Abstract_Separation_D
begin
type_synonym sep_pred = "sep_state \<Rightarrow> bool"
definition
state_sep_projection :: "cdl_state \<Rightarrow> sep_state"
where
"state_sep_projection \<equiv> \<lambda>s. SepState (cdl_objects s) (cdl_ghost_state s)"
(* This turns a separation logic predicate into a predicate on the capDL state. *)
abbreviation
lift' :: "(sep_state \<Rightarrow> 'a) \<Rightarrow> cdl_state \<Rightarrow> 'a" ("<_>")
where
"<P> s \<equiv> P (state_sep_projection s)"
(* The generalisation of the maps to operator for separation logic. *)
definition
sep_map_general :: "cdl_object_id \<Rightarrow> cdl_object \<Rightarrow> cdl_components \<Rightarrow> sep_pred"
where
"sep_map_general p obj gs \<equiv> \<lambda>s. sep_heap s = [p \<mapsto> obj] \<and> sep_ghost_state s p = gs"
(* Alternate definition without the [p \<mapsto> obj] notation. *)
lemma sep_map_general_def2:
"sep_map_general p obj gs s =
(dom (sep_heap s) = {p} \<and> ko_at obj p (sep_heap s) \<and> sep_ghost_state s p = gs)"
apply (clarsimp simp: sep_map_general_def object_at_def)
apply (rule)
apply clarsimp
apply (clarsimp simp: fun_upd_def)
apply (rule ext)
apply (fastforce simp: dom_def split:if_split)
done
(* There is an object there. *)
definition
sep_map_i :: "cdl_object_id \<Rightarrow> cdl_object \<Rightarrow> sep_pred" ("_ \<mapsto>i _" [76,71] 76)
where
"p \<mapsto>i obj \<equiv> sep_map_general p obj UNIV"
(* The fields are there (and there are no caps). *)
definition
sep_map_f :: "cdl_object_id \<Rightarrow> cdl_object \<Rightarrow> sep_pred" ("_ \<mapsto>f _" [76,71] 76)
where
"p \<mapsto>f obj \<equiv> sep_map_general p (update_slots empty obj) {None}"
(* There is that cap there. *)
definition
sep_map_c :: "cdl_cap_ref \<Rightarrow> cdl_cap \<Rightarrow> sep_pred" ("_ \<mapsto>c _" [76,71] 76)
where
"p \<mapsto>c cap \<equiv> \<lambda>s. let (obj_id, slot) = p; heap = sep_heap s in
\<exists>obj. sep_map_general obj_id obj {Some slot} s \<and> object_slots obj = [slot \<mapsto> cap]"
definition
sep_any :: "('a \<Rightarrow> 'b \<Rightarrow> sep_pred) \<Rightarrow> ('a \<Rightarrow> sep_pred)" where
"sep_any m \<equiv> (\<lambda>p s. \<exists>v. (m p v) s)"
abbreviation "sep_any_map_i \<equiv> sep_any sep_map_i"
notation sep_any_map_i ("_ \<mapsto>i -" 76)
abbreviation "sep_any_map_c \<equiv> sep_any sep_map_c"
notation sep_any_map_c ("_ \<mapsto>c -" 76)
end
|
using HMCExamples
HMCExamples.main_rbc_2_joint()
|
" I warned him , if you follow this , you 're going to get in trouble — because you 're going to expose the American intellectual community as a gang of frauds , and they are not going to like it , and they 're going to destroy you . "
|
#-----------------------------------------------------------------#
# This routine computes the RHS forcing of a simple two-rate ODE
# Written by F.X. Giraldo / P.R. Mugg on 9/19/2019
# Department of Applied Mathematics
# Naval Postgraduate School
# Monterey, CA 93943-5216
#------------------------------------------------------------------#
function rhs_function(q0,c,time)
# RHS components
f = c*q0; #fast component
g = sin(time); #slow component
# Construct RHS
rhs = f + g;
return (rhs,f,g)
end #end of rhs_function
|
library(ggplot2)
library(data.table) # for fread()
library(ggthemes) # Themes
# Used to label the countries
pointsToLabel <- c("Russia", "Venezuela", "Iraq", "Myanmar", "Sudan",
"Afghanistan", "Congo", "Greece", "Argentina", "Brazil",
"India", "Italy", "China", "South Africa", "Spane",
"Botswana", "Cape Verde", "Bhutan", "Rwanda", "France",
"United States", "Germany", "Britain", "Barbados", "Norway", "Japan",
"New Zealand", "Singapore")
# Reads the document, drop = 1 skips the first column
d <- fread('economist-project-data.csv', drop = 1)
# Create the scatterplot
plot <- ggplot(d, aes(x = CPI, y = HDI, color = Region))
# Create circles
plot <- plot + geom_point(size = 4, shape = 1)
# Creates a line through data
# se = F removes the gray area around the line
# method = 'lm' means linear modeling
plot <- plot + geom_smooth(aes(group = 1), method = 'lm', formula = y~log(x), se = F, color = 'red') # Linear regression
# Add text to the circles
plot <- plot + geom_text(aes(label = Country), color = 'gray20', data = subset(d, Country %in% pointsToLabel), check_overlap = T)
plot <- plot + scale_x_continous(limits = c(.9, 10.5), breaks = 1:10)
plot <- plot + theme_economist_white() # theme to mimic the economist |
/-
Copyright (c) 2021 Kyle Miller. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kyle Miller
-/
import data.fin.basic
import data.finset.sort
import order.lexicographic
/-!
# Sorting tuples by their values
Given an `n`-tuple `f : fin n → α` where `α` is ordered,
we may want to turn it into a sorted `n`-tuple.
This file provides an API for doing so, with the sorted `n`-tuple given by
`f ∘ tuple.sort f`.
## Main declarations
* `tuple.sort`: given `f : fin n → α`, produces a permutation on `fin n`
* `tuple.monotone_sort`: `f ∘ tuple.sort f` is `monotone`
-/
namespace tuple
variables {n : ℕ}
variables {α : Type*} [linear_order α]
/--
`graph f` produces the finset of pairs `(f i, i)`
equipped with the lexicographic order.
-/
def graph (f : fin n → α) : finset (α ×ₗ (fin n)) :=
finset.univ.image (λ i, (f i, i))
/--
Given `p : α ×ₗ (fin n) := (f i, i)` with `p ∈ graph f`,
`graph.proj p` is defined to be `f i`.
-/
def graph.proj {f : fin n → α} : graph f → α := λ p, p.1.1
@[simp] lemma graph.card (f : fin n → α) : (graph f).card = n :=
begin
rw [graph, finset.card_image_of_injective],
{ exact finset.card_fin _ },
{ intros _ _,
simp }
end
/--
`graph_equiv₁ f` is the natural equivalence between `fin n` and `graph f`,
mapping `i` to `(f i, i)`. -/
def graph_equiv₁ (f : fin n → α) : fin n ≃ graph f :=
{ to_fun := λ i, ⟨(f i, i), by simp [graph]⟩,
inv_fun := λ p, p.1.2,
left_inv := λ i, by simp,
right_inv := λ ⟨⟨x, i⟩, h⟩, by simpa [graph] using h }
@[simp] lemma proj_equiv₁' (f : fin n → α) : graph.proj ∘ graph_equiv₁ f = f :=
rfl
/--
`graph_equiv₂ f` is an equivalence between `fin n` and `graph f` that respects the order.
-/
def graph_equiv₂ (f : fin n → α) : fin n ≃o graph f :=
finset.order_iso_of_fin _ (by simp)
/-- `sort f` is the permutation that orders `fin n` according to the order of the outputs of `f`. -/
def sort (f : fin n → α) : equiv.perm (fin n) :=
(graph_equiv₂ f).to_equiv.trans (graph_equiv₁ f).symm
lemma self_comp_sort (f : fin n → α) : f ∘ sort f = graph.proj ∘ graph_equiv₂ f :=
show graph.proj ∘ ((graph_equiv₁ f) ∘ (graph_equiv₁ f).symm) ∘ (graph_equiv₂ f).to_equiv = _,
by simp
lemma monotone_proj (f : fin n → α) : monotone (graph.proj : graph f → α) :=
begin
rintro ⟨⟨x, i⟩, hx⟩ ⟨⟨y, j⟩, hy⟩ (h|h),
{ exact le_of_lt ‹_› },
{ simp [graph.proj] },
end
lemma monotone_sort (f : fin n → α) : monotone (f ∘ sort f) :=
begin
rw [self_comp_sort],
exact (monotone_proj f).comp (graph_equiv₂ f).monotone,
end
end tuple
|
/*
For more information, please see: http://software.sci.utah.edu
The MIT License
Copyright (c) 2020 Scientific Computing and Imaging Institute,
University of Utah.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*/
#include <Core/Algorithms/Legacy/DataIO/TriSurfSTLBinaryConverter.h>
#include <Core/Algorithms/Legacy/DataIO/STLUtils.h>
#include <Core/Datatypes/Legacy/Field/VField.h>
#include <Core/Datatypes/Legacy/Field/VMesh.h>
#include <Core/Datatypes/Legacy/Field/FieldInformation.h>
#include <boost/shared_array.hpp>
#include <boost/shared_ptr.hpp>
#include <iomanip>
#include <fstream>
#include <sstream>
#include <string>
#include <locale>
/// TODO: use std::unordered_map when porting to SCIRun 5
//#include <unordered_map>
using namespace SCIRun;
using namespace SCIRun::Core::Algorithms;
using namespace SCIRun::Core::Logging;
using namespace SCIRun::Core::Geometry;
namespace SCIRun {
namespace Core {
namespace Algorithms {
class ConverterPrivate
{
public:
explicit ConverterPrivate(LoggerHandle pr)
: pr_(pr),
STL_HEADER_LENGTH(80),
STL_FIELD_LENGTH(4),
POINT_LEN(3),
CELL_SIZE(3),
FIELD_LEN(12),
ATTRIBUTE_BYTE_COUNT(2)
{}
bool readFile(const std::string& filename, FieldHandle& field);
bool writeFile(const std::string& filename, VMesh *vmesh);
// assumes always array length 3
inline Point arrayToPoint(const boost::shared_array<float>& array)
{
return Point( array[0], array[1], array[2] );
}
private:
LoggerHandle pr_;
/// 80 byte header, usually ignored
const unsigned short STL_HEADER_LENGTH;
/// STL binary contains unsigned ints, floats
const unsigned short STL_FIELD_LENGTH;
const unsigned short POINT_LEN;
const unsigned short CELL_SIZE;
const unsigned short FIELD_LEN;
const unsigned short ATTRIBUTE_BYTE_COUNT;
PointTable pointsLookupTable;
};
}}}
bool
ConverterPrivate::readFile(const std::string& filename, FieldHandle& field)
{
std::ifstream inputfile;
inputfile.exceptions( std::ifstream::failbit | std::ifstream::badbit );
VMesh *vmesh = field->vmesh();
try
{
inputfile.open(filename.c_str(), std::ios::in | std::ios::binary);
// check for solid and discard
boost::shared_ptr<char> headerBuffer(new char[STL_HEADER_LENGTH]);
inputfile.read(headerBuffer.get(), STL_HEADER_LENGTH);
std::string header( headerBuffer.get() );
const std::string solidString("solid");
std::locale loc;
for (unsigned int i = 0; i < solidString.length() && i < header.length(); ++i)
{
header[i] = std::tolower(header[i], loc);
}
// verify that we're not reading an ASCII file, which should start with 'solid'
std::size_t index = header.find(solidString);
if (index == 0)
{
// warn, but attempt to parse anyway
if (this->pr_)
this->pr_->warning(filename + " header begins with \"solid\". This may be an ASCII STL file.");
}
boost::shared_ptr<char> numTrianglesBuffer(new char[STL_FIELD_LENGTH]);
inputfile.read(numTrianglesBuffer.get(), STL_FIELD_LENGTH);
unsigned int numTriangles = *( reinterpret_cast<unsigned int*>( numTrianglesBuffer.get() ) );
FacetList facetList;
vmesh->elem_reserve(numTriangles);
unsigned int pointIndex = 0;
for (unsigned int i = 0; (i < numTriangles) && (! inputfile.eof()) ; ++i)
{
// discard normals
inputfile.seekg(FIELD_LEN, std::ios_base::cur);
boost::shared_array<char> vertex1Buffer(new char[FIELD_LEN]);
inputfile.read(vertex1Buffer.get(), FIELD_LEN);
boost::shared_array<float> vertex1(new float[POINT_LEN]);
memcpy(vertex1.get(), vertex1Buffer.get(), FIELD_LEN);
boost::shared_array<char> vertex2Buffer(new char[FIELD_LEN]);
inputfile.read(vertex2Buffer.get(), FIELD_LEN);
boost::shared_array<float> vertex2(new float[POINT_LEN]);
memcpy(vertex2.get(), vertex2Buffer.get(), FIELD_LEN);
boost::shared_array<char> vertex3Buffer(new char[FIELD_LEN]);
inputfile.read(vertex3Buffer.get(), FIELD_LEN);
boost::shared_array<float> vertex3(new float[POINT_LEN]);
memcpy(vertex3.get(), vertex3Buffer.get(), FIELD_LEN);
// discard attribute byte count
inputfile.seekg(ATTRIBUTE_BYTE_COUNT, std::ios_base::cur);
Point p1 = arrayToPoint(vertex1),
p2 = arrayToPoint(vertex2),
p3 = arrayToPoint(vertex3);
facetList.push_back( Facet(p1, p2, p3) );
// adding points to mesh here ensures points get added in order
// (very important)
PointTable::iterator it = pointsLookupTable.find(p1);
if ( it == pointsLookupTable.end() )
{
pointsLookupTable[p1] = pointIndex++;
vmesh->add_point(p1);
}
it = pointsLookupTable.find(p2);
if ( it == pointsLookupTable.end() )
{
pointsLookupTable[p2] = pointIndex++;
vmesh->add_point(p2);
}
it = pointsLookupTable.find(p3);
if ( it == pointsLookupTable.end() )
{
pointsLookupTable[p3] = pointIndex++;
vmesh->add_point(p3);
}
}
inputfile.close();
FacetList::iterator listIter;
for (listIter = facetList.begin(); listIter != facetList.end(); ++listIter)
{
// use the facet list and point lookup table to match points and element indices
VMesh::Node::array_type vdata;
vdata.resize(CELL_SIZE);
PointTable::iterator it = pointsLookupTable.find(listIter->point1_);
vdata[0] = it->second;
it = pointsLookupTable.find(listIter->point2_);
vdata[1] = it->second;
it = pointsLookupTable.find(listIter->point3_);
vdata[2] = it->second;
vmesh->add_elem(vdata);
}
}
catch (std::ifstream::failure& e)
{
if (this->pr_) this->pr_->error("Could not open and read from file " + filename);
return false;
}
return true;
}
bool
ConverterPrivate::writeFile(const std::string& filename, VMesh *vmesh)
{
std::ofstream outputfile;
outputfile.exceptions( std::ofstream::failbit | std::ofstream::badbit );
try
{
outputfile.open(filename.c_str(), std::ios::out | std::ios::binary);
std::string header("STL header: SCIRun TriSurf field to STL Binary export");
char* headerBuffer = const_cast<char*>(header.c_str());
outputfile.write(headerBuffer, STL_HEADER_LENGTH);
unsigned int numTriangles = static_cast<unsigned int>( vmesh->num_faces() );
outputfile.write(reinterpret_cast<char*>(&numTriangles), STL_FIELD_LENGTH);
VMesh::Face::iterator meshFaceIter;
VMesh::Face::iterator meshFaceEnd;
VMesh::Node::array_type nodesFromFace(CELL_SIZE);
vmesh->end(meshFaceEnd);
// 0 is an acceptable value for this field
unsigned short byteAttributeCount = 0;
for (vmesh->begin(meshFaceIter); meshFaceIter != meshFaceEnd; ++meshFaceIter)
{
// get nodes from mesh element
VMesh::Face::index_type elemID = *meshFaceIter;
vmesh->get_nodes(nodesFromFace, elemID);
Point p1, p2, p3;
vmesh->get_center(p1, nodesFromFace[0]);
vmesh->get_center(p2, nodesFromFace[1]);
vmesh->get_center(p3, nodesFromFace[2]);
boost::shared_array<float> normal = computeFaceNormal(p1, p2, p3);
outputfile.write(reinterpret_cast<char*>(normal.get()), FIELD_LEN);
std::vector<float> vertex1 = {static_cast<float>(p1.x()), static_cast<float>(p1.y()), static_cast<float>(p1.z())};
outputfile.write(reinterpret_cast<char*>(&vertex1[0]), FIELD_LEN);
boost::shared_array<float> vertex2(new float[POINT_LEN]);
vertex2[0] = p2.x();
vertex2[1] = p2.y();
vertex2[2] = p2.z();
outputfile.write(reinterpret_cast<char*>(vertex2.get()), FIELD_LEN);
boost::shared_array<float> vertex3(new float[POINT_LEN]);
vertex3[0] = p3.x();
vertex3[1] = p3.y();
vertex3[2] = p3.z();
outputfile.write(reinterpret_cast<char*>(vertex3.get()), FIELD_LEN);
outputfile.write(reinterpret_cast<char*>(&byteAttributeCount), ATTRIBUTE_BYTE_COUNT);
}
outputfile.close();
}
catch (std::ifstream::failure e)
{
if (this->pr_) this->pr_->error("Could not open and write to file " + filename);
return false;
}
catch (...)
{
if (this->pr_) this->pr_->error("Error while exporting TriSurf field to " + filename);
return false;
}
return true;
}
TriSurfSTLBinaryConverter::TriSurfSTLBinaryConverter(LoggerHandle pr) :
pr_(pr), converter_(new ConverterPrivate(pr))
{}
bool
TriSurfSTLBinaryConverter::read(const std::string& filename, FieldHandle& field)
{
// no data in STL file, make no basis for now
FieldInformation fieldInfo("TriSurfMesh", -1, "double");
field = CreateField(fieldInfo);
bool result = converter_->readFile(filename, field);
return result;
}
bool
TriSurfSTLBinaryConverter::write(const std::string& filename, const FieldHandle& field)
{
VMesh *vmesh = field->vmesh();
// validate
if (! vmesh->is_trisurfmesh() )
{
if (this->pr_) this->pr_->error("STL Binary converter only supports TriSurf mesh fields.");
return false;
}
bool result = converter_->writeFile(filename, vmesh);
return result;
}
|
! Angelica de Oliveira-Costa & Max Tegmark 2007
program gsm
implicit none
integer ncomp, nside, npix
parameter(ncomp=3,nside=512,npix=12*512**2)
integer ncomploaded, i, j, lnblnk
real*8 nu, f(ncomp+1), norm, A(npix,ncomp), t
character*60 infile, outfile
print *,'Frequency at which to make a map (in MHz)?'
read *,nu
print *,'Name of file in which to save the map?'
read *,outfile
print *,'Making sky map at frequeny__________',nu
print *,'Outfile_____________________________',outfile(1:lnblnk(outfile))
call LoadComponents(ncomploaded)
if (ncomploaded.ne.ncomp) stop 'DEATH ERROR: WRONG NUMBER OF COMPONENTS LOADED'
call ComputeComponents(nu,ncomp,f)
norm = f(ncomp+1)
infile = 'share/gsm/component_maps_408locked.dat'
print *,'Loading ',infile(1:lnblnk(infile))
open(2,file=infile,status='old')
do i=1,npix
read(2,*) (A(i,j),j=1,ncomp)
end do
close(2)
print *,'Saving ',outfile(1:lnblnk(outfile))
open(3,file=outfile)
do i=1,npix
t = 0
do j=1,ncomp
t = t + f(j)*A(i,j)
end do
t = norm*t
write(3,*) t
end do
close(3)
return
777 call usage
end
subroutine LoadComponents(ncomp) ! ncomp = Number of components to load
! Load the principal components from file and spline them for later use.
! The "extra" component (ncomp+1) is the overall scaling - we spline its logarithm.
implicit none
integer nmax, ncompmax, n, ncomp
parameter(nmax=1000,ncompmax=11)
real*8 x(nmax), y(nmax,ncompmax+1), ypp(nmax,ncompmax+1)
common/PCA/x, y, ypp, n
integer i, lnblnk
real*8 xn, scaling, tmp(nmax), yp0, yp1
character*80 infile, comline
!
infile = 'share/gsm/components.dat'
! Count number of columns in the infile:'
comline = 'head -1 '//infile(1:lnblnk(infile))//' | wc | cut -c9-16 >/tmp/qaz_cols.dat'
!print *,'###'//comline(1:lnblnk(comline))//'###'
if (system(comline).ne.0) stop 'DEATH ERROR COUNTING COLUMNS'
open (2,file='/tmp/qaz_cols.dat',status='old',err=777)
read (2,*,end=777,err=777) n
close(2)
ncomp = n - 2
if (ncomp.lt.0 ) stop 'DEATH ERROR: TOO FEW COMPONENTS.'
if (ncomp.gt.ncompmax) stop 'DEATH ERROR: TOO MANY COMPONENTS.'
n = 0
open (2,file=infile,status='old')
555 read (2,*,end=666) xn, scaling, (tmp(i),i=1,ncomp)
n = n + 1
if (n.gt.nmax) pause 'n>nmax in LoadVector'
x(n) = log(xn) ! We'll spline against lg(nu)
do i=1,ncomp
y(n,i) = tmp(i)
end do
y(n,ncomp+1) = log(scaling)
goto 555
666 close(2)
print *,ncomp,' components read from ',infile(1:lnblnk(infile)),' with',n,' spline points'
yp0 = 1.d30 ! Imposes y''=0 at starting point
yp1 = 1.d30 ! Imposes y''=0 at endpoint
do i=1,ncomp+1
call myspline_r8(x,y(1,i),n,yp0,yp1,ypp(1,i))
end do
return
777 stop 'DEATH ERROR 2 COUNTING COLUMNS'
end
subroutine ComputeComponents(nu,ncomp,a) ! Computes the principal components at frequency nu
implicit none
integer nmax, ncompmax, n, ncomp
parameter(nmax=1000,ncompmax=11)
real*8 x(nmax), y(nmax,ncompmax+1), ypp(nmax,ncompmax+1)
common/PCA/x, y, ypp, n
integer i
real*8 a(ncompmax+1), nu, lnnu, scaling
lnnu = log(nu)
do i=1,ncomp+1
call mysplint_r8(x,y(1,i),ypp(1,i),n,lnnu,a(i))
end do
a(ncomp+1) = exp(a(ncomp+1)) ! The overall scaling factor
return
end
SUBROUTINE myspline_r8 (x,y,n,yp1,ypn,y2)
! From numerical recipes.
INTEGER n, NMAX, i, k
REAL*8 yp1,ypn,x(n),y(n),y2(n)
PARAMETER(NMAX=10000) ! Increased from 500 by Max
REAL*8 p,qn,sig,un,u(NMAX)
if (N.gt.NMAX) pause 'SPLINE NMAX DEATH ERROR' ! Added by Max
if (x(1).gt.x(n)) pause 'SPLINE WARNING: x NOT INCREASING' ! Added by Max
if (yp1.gt..99e30) then
y2(1)=0.d0
u (1)=0.d0
else
y2(1)=-0.5d0
u (1)=(3.d0/(x(2)-x(1)))*((y(2)-y(1))/(x(2)-x(1))-yp1)
endif
do 11 i=2,n-1
sig =(x(i)-x(i-1))/(x(i+1)-x(i-1))
p = sig*y2(i-1)+2.d0
y2(i)=(sig-1.d0)/p
u (i)=(6.d0*((y(i+1)-y(i))/(x(i+
*1)-x(i))-(y(i)-y(i-1))/(x(i)-x(i-1)))/(x(i+1)-x(i-1))-sig*
*u(i-1))/p
11 continue
if (ypn.gt..99e30) then
qn=0.d0
un=0.d0
else
qn=0.5d0
un=(3.d0/(x(n)-x(n-1)))*(ypn-(y(n)-y(n-1))/(x(n)-x(n-1)))
endif
y2(n)=(un-qn*u(n-1))/(qn*y2(n-1)+1.d0)
do 12 k=n-1,1,-1
y2(k)=y2(k)*y2(k+1)+u(k)
12 continue
return
END
SUBROUTINE MYSPLINT_r8 (XA,YA,Y2A,N,X,Y)
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! From numerical recipes.
! Modified to be more robust when
! extrapolating - It is linear if X lies outside
! the bounds.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
integer N, KLO, KHI, K
REAL*8 XA(N),YA(N),Y2A(N), X, Y, H, A, B
KLO=1
KHI=N
1 IF (KHI-KLO.GT.1) THEN
K=(KHI+KLO)/2
IF(XA(K).GT.X)THEN
KHI=K
ELSE
KLO=K
ENDIF
GOTO 1
ENDIF
H=XA(KHI)-XA(KLO)
IF (H.EQ.0.d0) PAUSE 'Bad XA input.'
if ((x-xa(1))*(x-xa(N)).gt.0.d0) then
! Outside bounds; do LINEAR extrapolation rather than cubic
A = (YA(KHI)-YA(KLO))/H
Y = ya(KLO) + A * (x-xa(KLO))
else
! Within bounds; do cubic interpolation
A=(XA(KHI)-X)/H
B=(X-XA(KLO))/H
Y=A*YA(KLO)+B*YA(KHI)+
* ((A**3-A)*Y2A(KLO)+(B**3-B)*Y2A(KHI))*(H**2)/6.
end if
RETURN
END
|
module InterpSpec where
import Commons
import Data.Complex (Complex (..))
import Data.Map.Strict (union)
import Data.Maybe (fromJust)
import Debug.Trace (traceShowId)
import HashedExpression.Internal.Expression
import HashedExpression.Internal.Normalize
import HashedExpression.Internal.Utils
import HashedExpression.Interp
import HashedExpression.Operation hiding (product, sum)
import qualified HashedExpression.Operation
import HashedExpression.Prettify
import Test.Hspec
import Test.QuickCheck (property)
import Var
import Prelude hiding ((^))
-- |
prop_AddScalarR :: SuiteScalarR -> SuiteScalarR -> Bool
prop_AddScalarR (Suite exp1 valMaps1) (Suite exp2 valMaps2) =
eval valMaps (exp1 + exp2) == eval valMaps exp1 + eval valMaps exp2
where
valMaps = valMaps1 `union` valMaps2
-- |
prop_MultiplyScalarR :: SuiteScalarR -> SuiteScalarR -> Bool
prop_MultiplyScalarR (Suite exp1 valMaps1) (Suite exp2 valMaps2) =
eval valMaps (exp1 * exp2) == eval valMaps exp1 * eval valMaps exp2
where
valMaps = valMaps1 `union` valMaps2
-- |
prop_AddScalarC :: SuiteScalarC -> SuiteScalarC -> Bool
prop_AddScalarC (Suite exp1 valMaps1) (Suite exp2 valMaps2) =
eval valMaps (exp1 + exp2) == eval valMaps exp1 + eval valMaps exp2
where
valMaps = valMaps1 `union` valMaps2
-- |
prop_MultiplyScalarC :: SuiteScalarC -> SuiteScalarC -> Bool
prop_MultiplyScalarC (Suite exp1 valMaps1) (Suite exp2 valMaps2) =
eval valMaps (exp1 * exp2) == eval valMaps exp1 * eval valMaps exp2
where
valMaps = valMaps1 `union` valMaps2
-- |
prop_RotateOneR1 :: SuiteOneR -> Bool
prop_RotateOneR1 (Suite exp valMaps) =
eval valMaps (rotate 0 exp) == eval valMaps exp
-- |
prop_RotateOneR2 :: SuiteOneR -> Int -> Bool
prop_RotateOneR2 (Suite exp valMaps) amount =
eval valMaps (f exp) == eval valMaps exp
where
f = rotate amount . rotate (- amount)
-- |
prop_RotateOneR3 :: SuiteOneR -> Int -> Int -> Bool
prop_RotateOneR3 (Suite exp valMaps) amount1 amount2 =
eval valMaps (f1 exp) == eval valMaps (f2 exp)
where
f1 = rotate amount1 . rotate amount2
f2 = rotate (amount1 + amount2)
-- |
prop_RotateTwoR1 :: SuiteTwoR -> Bool
prop_RotateTwoR1 (Suite exp valMaps) =
eval valMaps (rotate (0, 0) exp) == eval valMaps exp
-- |
prop_RotateTwoR2 :: SuiteTwoR -> (Int, Int) -> Bool
prop_RotateTwoR2 (Suite exp valMaps) (offset1, offset2) =
eval valMaps (f exp) == eval valMaps exp
where
f = rotate (offset1, offset2) . rotate (- offset1, - offset2)
-- |
prop_RotateTwoR3 :: SuiteTwoR -> (Int, Int) -> (Int, Int) -> Bool
prop_RotateTwoR3 (Suite exp valMaps) amount1 amount2 =
eval valMaps (f1 exp) == eval valMaps (f2 exp)
where
f1 = rotate amount1 . rotate amount2
f2 = rotate (fst amount1 + fst amount2, snd amount1 + snd amount2)
spec :: Spec
spec =
describe "Interp spec" $ do
specify "prop_Add Scalar R" $ property prop_AddScalarR
specify "prop_Multiply Scalar R" $ property prop_MultiplyScalarR
specify "prop_Add Scalar C" $ property prop_AddScalarC
specify "prop_Multiply Scalar C" $ property prop_MultiplyScalarC
specify "prop_Rotate One R rotate 0 should stay the same" $
property prop_RotateOneR1
specify "prop_Rotate One R rotate a and -a should stay the same" $
property prop_RotateOneR2
specify
"prop_Rotate One R rotate a then rotate b should equal rotate (a + b)"
$ property prop_RotateOneR3
specify "prop_Rotate Two R rotate (0, 0) should stay the same" $
property prop_RotateTwoR1
specify "prop_Rotate Two R rotate a and -a should stay the same" $
property prop_RotateTwoR2
specify
"prop_Rotate Two R rotate a then rotate b should equal rotate (a + b)"
$ property prop_RotateTwoR3
|
(** * Coequalizer **)
Set Implicit Arguments.
Unset Strict Implicit.
Unset Printing Implicit Defensive.
Generalizable All Variables.
Set Primitive Projections.
Set Universe Polymorphism.
Require Import COC.Base.Main.
Class IsCoequalizer (C: Category)(X Y: C)(f g: C X Y)
(Coeq: C)
(coeq: C Y Coeq)
(univ: forall {Z: C}{h: C Y Z},
h \o f == h \o g -> C Coeq Z) :=
{
coequalize: coeq \o f == coeq \o g;
coequalizer_universality: forall Z (h: C Y Z)(Heq: h \o f == h \o g),
(univ Heq) \o coeq == h;
coequalizer_uniqueness: forall Z (h: C Y Z)(Heq: h \o f == h \o g)(u: C Coeq Z),
u \o coeq == h -> u == univ Heq
}.
Structure Coequalizer (C: Category)(X Y: C)(f g: C X Y) :=
{
coequalizer_obj: C;
coequalizer_map:> C Y coequalizer_obj;
coequalizer_univ: forall (Z: C)(h:C Y Z), h \o f == h \o g -> C coequalizer_obj Z;
coequalizer_prf:> IsCoequalizer coequalizer_map coequalizer_univ
}.
Existing Instance coequalizer_prf.
Notation "[ 'Coequalizer' 'of' f , g 'by' univ 'with' map 'to' Coeq 'in' C ]" :=
(@Build_Coequalizer C _ _ f g Coeq map univ _).
Notation "[ 'Coequalizer' 'of' f , g 'by' univ 'with' map ]" :=
[Coequalizer of f,g by univ with map to _ in _].
Notation "[ 'Coequalizer' 'by' univ 'with' map ]" :=
[Coequalizer of _,_ by univ with map].
Notation "[ 'Coequalizer' 'by' univ ]" := [Coequalizer by univ with _].
Lemma coequalizer_univ_subst
(C: Category)(X Y: C)(f g: C X Y)
(coeq: Coequalizer f g)
(Z: C)(h h': C Y Z)
(Heq: h \o f == h \o g)
(Heq': h' \o f == h' \o g):
h == h' ->
coequalizer_univ coeq Heq == coequalizer_univ coeq Heq'.
Proof.
intros Heqh.
apply coequalizer_uniqueness; symmetry; rewrite <- Heqh.
now rewrite coequalizer_universality.
Qed.
(** Isomorphic **)
Lemma coequalizer_isomorphic:
forall (C: Category)(X Y: C)(f g: C X Y)
(eq eq': Coequalizer f g),
coequalizer_obj eq === coequalizer_obj eq' in C.
Proof.
intros; simpl; unfold isomorphic.
exists (coequalizer_univ eq (coequalize (IsCoequalizer:=eq'))), (coequalizer_univ eq' (coequalize (IsCoequalizer:=eq))); split.
- rewrite (coequalizer_uniqueness (coequalize (IsCoequalizer:=eq)) (u:=Id (coequalizer_obj eq))); [| now rewrite cat_comp_id_cod].
apply coequalizer_uniqueness.
now rewrite cat_comp_assoc, !coequalizer_universality.
- rewrite (coequalizer_uniqueness (coequalize (IsCoequalizer:=eq')) (u:=Id (coequalizer_obj eq'))); [| now rewrite cat_comp_id_cod].
apply coequalizer_uniqueness.
now rewrite cat_comp_assoc, !coequalizer_universality.
Qed.
(** Example **)
(** Coequalizer of Setoids **)
Class IsRelation (X: Setoid)(R: X -> X -> Prop) :=
{
relation_proper:> Proper ((== in X) ==> (== in X) ==> iff) R
}.
Structure Relation (X: Setoid) :=
{
relation_rel:> X -> X -> Prop;
relation_prf:> IsRelation X relation_rel
}.
Existing Instance relation_prf.
Notation "[ 'Rel' 'by' rel 'on' X ]" := (@Build_Relation X rel _).
Inductive EquivClosure (A: Setoid)(R: Relation A): A -> A -> Prop :=
| eqcl_base: forall a b, R a b -> EquivClosure R a b
| eqcl_refl: forall a b: A, a == b -> EquivClosure R a b
| eqcl_sym: forall a b: A, EquivClosure R a b -> EquivClosure R b a
| eqcl_trans: forall a b c, EquivClosure R a b -> EquivClosure R b c -> EquivClosure R a c.
Program Instance EquivClosure_Equivalence (A: Setoid)(R: Relation A)
: Equivalence (EquivClosure R).
Next Obligation.
now intros x; apply eqcl_refl.
Qed.
Next Obligation.
now intros x y; apply eqcl_sym.
Qed.
Next Obligation.
now intros x y z; apply eqcl_trans.
Qed.
Program Definition EqCl (X: Setoid)(R: Relation X) :=
[Rel by EquivClosure R on X].
Next Obligation.
split; intros.
- revert y y0 H H0.
induction H1; intros.
+ apply eqcl_base.
now rewrite <- H0, <- H1.
+ rewrite H0, H1 in H.
now apply eqcl_refl.
+ now apply eqcl_sym, IHEquivClosure.
+ apply eqcl_trans with b.
* now apply IHEquivClosure1.
* now apply IHEquivClosure2.
- rename x into y, y into x, x0 into y0, y0 into x0.
revert y y0 H H0.
induction H1; intros.
+ apply eqcl_base.
now rewrite H0, H1.
+ rewrite <- H0, <- H1 in H.
now apply eqcl_refl.
+ now apply eqcl_sym, IHEquivClosure.
+ apply eqcl_trans with b.
* now apply IHEquivClosure1.
* now apply IHEquivClosure2.
Qed.
Program Definition QuotientSetoid (X: Setoid)(R: Relation X)
(Heq: Equivalence R) :=
[Setoid by R on X].
Inductive coequalize_pair (X Y: Setoid)(f g: Map X Y): Y -> Y -> Prop :=
| coequalize_pair_def: forall (x: X)(y y': Y),
f x == y -> g x == y' ->
coequalize_pair f g y y'.
Program Definition coequalize_pair_relation (X Y: Setoid)(f g: Map X Y) :=
[Rel by coequalize_pair f g on Y].
Next Obligation.
split; intros.
- destruct H1.
apply coequalize_pair_def with x.
+ now rewrite H in H1.
+ now rewrite H0 in H2.
- destruct H1.
apply coequalize_pair_def with x1.
+ now rewrite H.
+ now rewrite H0.
Qed.
Program Definition coequalizer_of_Setoids (X Y: Setoid)(f g: Map X Y) :=
[Coequalizer of f, g
by (fun z h H => [ y :-> h y])
with [y :-> y]
to QuotientSetoid (EqCl (coequalize_pair_relation f g)) (EquivClosure_Equivalence Y (coequalize_pair_relation f g))
in Setoids].
Next Obligation.
intros y y' Heq.
now apply eqcl_refl.
Qed.
Next Obligation.
intros y y' Hr.
induction Hr.
- destruct H0.
rewrite <- H0, <- H1.
now rewrite (H x).
- now rewrite H0.
- now symmetry.
- now transitivity (h b).
Qed.
Next Obligation.
now apply eqcl_base, coequalize_pair_def with x.
Qed.
|
module Decidable.Decidable
import Data.Rel
import Data.Fun
||| Interface for decidable n-ary Relations
public export
interface Decidable (ts : Vect k Type) (p : Rel ts) where
total decide : liftRel ts p Dec
||| Given a `Decidable` n-ary relation, provides a decision procedure for
||| this relation.
decision : (ts : Vect k Type) -> (p : Rel ts) -> (Decidable ts p) => liftRel ts p Dec
decision ts p = decide {ts} {p}
using (a : Type, x : a)
public export
data Given : Dec a -> Type where
Always : Given (Yes x)
|
f = open("jawiki-country.txt", "r")
for line in readlines(f)
category_reg = r"\[\[Category:"
if ismatch(category_reg, line)
print(replace(line, r"\[\[Category:|(\|\*)*\]\].*", ""))
end
end
|
(*
* Copyright 2014, NICTA
*
* This software may be distributed and modified according to the terms of
* the GNU General Public License version 2. Note that NO WARRANTY is provided.
* See "LICENSE_GPLv2.txt" for details.
*
* @TAG(NICTA_GPL)
*)
(* Automation framework for general C refinement *)
theory Ctac
imports
Corres_C
"../XPres"
begin
(* This file includes theorems associated with ctac and friends *)
context kernel
begin
(* tactic setup *)
lemma match_ccorres:
"ccorres_underlying sr G r xf' arrel axf P P' hs a c
\<Longrightarrow> ccorres_underlying sr G r xf' arrel axf P P' hs a c" .
(* xfru needs to appear in the lemma ... *)
lemma match_ccorres_record:
fixes xfru :: "('a \<Rightarrow> 'a) \<Rightarrow> 'b \<Rightarrow> 'b" and xf' :: "cstate \<Rightarrow> 'b" and xfr :: "'b \<Rightarrow> 'a"
shows "ccorres_underlying sr G r (xfr \<circ> xf') arrel axf P P' hs a (c xfru)
\<Longrightarrow> ccorres_underlying sr G r (xfr \<circ> xf') arrel axf P P' hs a (c xfru)" .
lemma match_ccorres_Seq:
"ccorres_underlying sr G r xf arrel axf P P' hs a (c ;; d)
\<Longrightarrow> ccorres_underlying sr G r xf arrel axf P P' hs a (c ;; d)" .
lemma match_ccorres_call_Seq:
"ccorres_underlying sr G r xf arrel axf P P' hs a (call i f g c ;; d)
\<Longrightarrow> ccorres_underlying sr G r xf arrel axf P P' hs a (call i f g c ;; d)" .
(* Most specific to least specific. ctac uses <base> ^ "_novcg" so the suffic is important for the
* ctac_splits lemmas *)
lemmas ctac_splits_non_call = ccorres_split_nothrowE [where F = UNIV] ccorres_split_nothrow [where F = UNIV]
lemmas ctac_splits_non_call_novcg = ccorres_split_nothrow_novcgE ccorres_split_nothrow_novcg
lemmas ctac_splits_call = ccorres_split_nothrow_callE [where F = UNIV] ccorres_split_nothrow_call [where F = UNIV]
lemmas ctac_splits_call_novcg = ccorres_split_nothrow_call_novcgE ccorres_split_nothrow_call_novcg
lemmas ctac_splits_record =
ccorres_split_nothrow_call_record [where F = UNIV] ccorres_split_nothrow_record [where F = UNIV]
(* Probably useless as-is *)
lemmas ctac_splits_record_novcg =
ccorres_split_nothrow_call_record_novcg ccorres_split_nothrow_record_novcg
(* None of these generate vcg obligations, so we don't need a _novcg alternative *)
lemmas ctac_nosplit_non_call = match_ccorres
lemmas ctac_nosplit_call = ccorres_callE ccorres_call
lemmas ctac_nosplit_record = ccorres_call_record match_ccorres_record
(* Used with the _spec and _modifies rules. WARNING: the order and
position of these assumptions is relied on by the tactic csymbr. The
guard for cc is then simplified and gen_asm2 can be used. *)
lemma ccorres_lift_rhs_call:
assumes cc: "\<And>rv'. ccorres_underlying rf_sr \<Gamma> r xf arrel axf G (G' rv' \<inter> {s. P' rv' (i s)}) hs a (d' rv')"
(* WARNING: the tactic csymbr relies on the outermost variable (v) being of the same type as the return type of xf' *)
and xfxfu: "\<And>v s. xf' (xfu' (\<lambda>_. v) s) = v"
and f_spec: "\<forall>s. \<Gamma> \<turnstile> {\<sigma>. s = \<sigma> \<and> P \<sigma> s} Call f {t. P' (xf'' t) s}"
and f_modifies: "modifies_spec f"
and ceqv: "\<And>rv' t t'. ceqv \<Gamma> xf' rv' t t' d (d' rv')"
and gg: "\<And>x f s. globals (xfu' f s) = globals s"
and gi: "\<And>s. globals (i s) = globals s"
(* This is annoying, as simp doesn't always solve it *)
and Pig: "\<And>x (s :: cstate) v'. P' x (i s)
\<Longrightarrow> P' x (i (xfu' (\<lambda>_. x) s))"
(* The concrete guard here is stronger than required --- we really need xf'' t not v *)
shows "ccorres_underlying rf_sr \<Gamma> r xf arrel axf G
({s. \<forall>v v'. P' v (i s) \<longrightarrow> xfu' (\<lambda>_. v) s \<in> {s. s \<in> G' v}}
\<inter> {s. P (i s) (i s)}) hs a
(call i f (\<lambda>s t. s\<lparr>globals := globals t\<rparr>) (\<lambda>_ t. Basic (xfu' (\<lambda>_. xf'' t))) ;; d)"
(is "ccorres_underlying rf_sr \<Gamma> r xf arrel axf G ?G' hs a (?c ;; d)")
proof (subst return_bind [where x = "()" and f = "\<lambda>_. a", symmetric],
rule ccorres_guard_imp [OF ccorres_split_nothrow, OF ccorres_call ceqv, OF _ gg xfxfu gi cc])
show "ccorres dc xf'' \<top> (Collect (\<lambda>s. P s s)) [] (return ()) (Call f)"
apply (rule ccorres_guard_imp2)
apply (rule ccorres_noop_spec [OF f_spec f_modifies])
apply simp
done
next
show "\<lbrace>G\<rbrace> return () \<lbrace>\<lambda>_. G\<rbrace>" by wp
next
show "\<Gamma>\<turnstile> ?G' ?c {s. \<forall>uu. dc uu (xf' s) \<longrightarrow> s \<in> G' (xf' s) \<inter> {sa. P' (xf' s) (i sa)}}"
apply (rule HoarePartial.ProcModifyReturnNoAbr
[where return' = "\<lambda>s t. s",
OF _ _ f_modifies])
apply (rule HoarePartial.ProcSpecNoAbrupt [OF _ _ f_spec])
defer
apply vcg
apply (clarsimp simp add: gi mex_def meq_def xfxfu)
apply (clarsimp simp add: gi mex_def meq_def xfxfu Pig)
done
qed simp_all
lemma ccorres_lift_rhs_Basic:
assumes cc: "ccorres_underlying rf_sr \<Gamma> r xf arrel axf G G' hs a (d' v)"
and xfxfu: "\<And>v s. xf' (xfu' (\<lambda>_. v) s) = v"
and ceqv: "\<And>rv' t t'. ceqv \<Gamma> xf' rv' t t' d (d' rv')"
and gg: "\<And>x f s. globals (xfu' f s) = globals s"
(* WARNING: the tactic csymbr relies on the outermost variable being of the same type as the return type of xf' *)
(* and Pig: "\<And>x (s :: cstate) v'. P' x (i s) \<Longrightarrow> P' x (i (xfu' (\<lambda>_. x) s))" *)
(* The concrete guard here is stronger than required --- we really need xf'' t not v *)
shows "ccorres_underlying rf_sr \<Gamma> r xf arrel axf G {s. xfu' (\<lambda>_. v) s \<in> G'} hs a (Basic (xfu' (\<lambda>_. v)) ;; d)"
(is "ccorres_underlying rf_sr \<Gamma> r xf arrel axf G ?G' hs a (?c ;; d)")
proof (subst return_bind [where x = "()" and f = "\<lambda>_. a", symmetric],
rule ccorres_guard_imp [OF ccorres_split_nothrow, OF _ ceqv])
show "ccorres dc xf' \<top> UNIV hs (return ()) ?c"
apply (rule ccorres_noop)
apply (vcg spec=modifies)
done
next
fix rv'
show "ccorres_underlying rf_sr \<Gamma> r xf arrel axf G (G' \<inter> {s. rv' = v}) hs a (d' rv')"
apply (rule ccorres_gen_asm2)
apply (erule ssubst)
apply (rule cc)
done
next
show "\<lbrace>G\<rbrace> return () \<lbrace>\<lambda>_. G\<rbrace>" by wp
next
show "\<Gamma>\<turnstile> ?G' ?c {s. \<forall>uu. dc uu (xf' s) \<longrightarrow> s \<in> G' \<inter> {_. xf' s = v}}"
apply vcg
apply (clarsimp simp add: xfxfu)
done
qed simp_all
(* This needs to correspond in the first 4 assumptions to the above _call: *)
lemma ccorres_lift_rhs_call_record:
fixes xf' :: "cstate \<Rightarrow> 'a" and xfu' :: "('a \<Rightarrow> 'a) \<Rightarrow> cstate \<Rightarrow> cstate"
and xf'' :: "cstate \<Rightarrow> 'b" and xfr :: "'a \<Rightarrow> 'b" and xfru :: "('b \<Rightarrow> 'b) \<Rightarrow> 'a \<Rightarrow> 'a"
assumes cc: "\<And>rv' :: 'b. ccorres_underlying rf_sr \<Gamma> r xf arrel axf
G (G' rv' \<inter> {s. P' rv' (i s)}) hs a (d' (xfru (\<lambda>_. rv') oldv))"
(* WARNING: the tactic csymbr relies on the outermost variable being of the same type as the return type of xf' *)
and xfxfu: "\<And>v s. xf' (xfu' (\<lambda>_. v) s) = v"
and f_spec: "\<forall>s. \<Gamma> \<turnstile> {\<sigma>. s = \<sigma> \<and> P \<sigma> s} Call f {t. P' (xf'' t) s}"
and f_modifies: "modifies_spec f"
(* WARNING: the tactic csymbr relies on the outermost variable being of the same type as the return type of xfr *)
and xfrxfru: "\<And>v s. xfr (xfru (\<lambda>_. v) s) = v"
and ceqv: "\<And>rv' t t'. ceqv \<Gamma> xf' rv' t t' d (d' rv')"
and gg: "\<And>x f s. globals (xfu' f s) = globals s"
and gi: "\<And>s. globals (i s) = globals s"
and Pig: "\<And>x (s :: cstate) v'. P' x (i s) \<Longrightarrow>
P' x (i (xfu' (\<lambda>_. xfru (\<lambda>_. x) oldv) s))"
(* The concrete guard here is stronger than required --- we really need xf'' t not v *)
shows "ccorres_underlying rf_sr \<Gamma> r xf arrel axf G
({s. \<forall>v v'. P' v (i s) \<longrightarrow> xfu' (\<lambda>_. xfru (\<lambda>_. v) oldv) s \<in> {s. s \<in> G' v}}
\<inter> {s. P (i s) (i s)}) hs a
(call i f (\<lambda>s t. s\<lparr>globals := globals t\<rparr>) (\<lambda>_ t. Basic (xfu' (\<lambda>_. xfru (\<lambda>_. xf'' t) oldv))) ;; d)"
(is "ccorres_underlying rf_sr \<Gamma> r xf arrel axf G ?G' hs a (?c ;; d)")
proof (subst return_bind [where x = "()" and f = "\<lambda>_. a", symmetric],
rule ccorres_guard_imp [OF ccorres_split_nothrow_record [where xfru = "xfru"], OF ccorres_call ceqv, OF _ gg _ gi cc])
show "ccorres dc xf'' \<top> (Collect (\<lambda>s. P s s)) [] (return ()) (Call f)"
apply (rule ccorres_guard_imp2)
apply (rule ccorres_noop_spec [OF f_spec f_modifies])
apply simp
done
next
fix a s t
show "(xfr \<circ> xf') (xfu' (\<lambda>_. xfru (\<lambda>_. xf'' t) oldv) (s\<lparr>globals := globals t\<rparr>)) = xf'' t"
by (simp add: xfxfu xfrxfru)
next
show "\<lbrace>G\<rbrace> return () \<lbrace>\<lambda>_. G\<rbrace>" by wp
next
show "\<Gamma>\<turnstile> ?G' ?c {s. xf' s = xfru (\<lambda>_. (xfr \<circ> xf') s) oldv \<and>
(\<forall>uu. dc uu ((xfr \<circ> xf') s) \<longrightarrow> s \<in> G' ((xfr \<circ> xf') s) \<inter> {sa. P' ((xfr \<circ> xf') s) (i sa)})}"
apply (rule HoarePartial.ProcModifyReturnNoAbr
[where return' = "\<lambda>s t. s",
OF _ _ f_modifies])
apply (rule HoarePartial.ProcSpecNoAbrupt [OF _ _ f_spec])
defer
apply vcg
apply (clarsimp simp add: gi mex_def meq_def xfxfu)
apply (clarsimp simp add: gi mex_def meq_def xfxfu xfrxfru Pig)
done
qed simp_all
lemma ccorres_lift_rhs_Basic_record:
fixes xf' :: "cstate \<Rightarrow> 'a" and xfu' :: "('a \<Rightarrow> 'a) \<Rightarrow> cstate \<Rightarrow> cstate" and xfru :: "('b \<Rightarrow> 'b) \<Rightarrow> 'a \<Rightarrow> 'a"
assumes cc: "ccorres_underlying rf_sr \<Gamma> r xf arrel axf G G' hs a (d' (xfru (\<lambda>_. v) oldv))"
and xfxfu: "\<And>v s. xf' (xfu' (\<lambda>_. v) s) = v"
and ceqv: "\<And>rv' t t'. ceqv \<Gamma> xf' rv' t t' d (d' rv')"
and gg: "\<And>x f s. globals (xfu' f s) = globals s"
(* The concrete guard here is stronger than required --- we really need xf'' t not v *)
shows "ccorres_underlying rf_sr \<Gamma> r xf arrel axf G {s. xfu' (\<lambda>_. xfru (\<lambda>_. v) oldv) s \<in> G'} hs a (Basic (xfu' (\<lambda>_. xfru (\<lambda>_. v) oldv)) ;; d)"
(is "ccorres_underlying rf_sr \<Gamma> r xf arrel axf G ?G' hs a (?c ;; d)")
proof (subst return_bind [where x = "()" and f = "\<lambda>_. a", symmetric],
rule ccorres_guard_imp [OF ccorres_split_nothrow_record [where xfru = "xfru"], OF _ ceqv])
show "ccorres dc ((\<lambda>_. v) \<circ> xf') \<top> UNIV hs (return ()) ?c"
apply (rule ccorres_noop)
apply (vcg spec=modifies)
done
next
fix rv'
show "ccorres_underlying rf_sr \<Gamma> r xf arrel axf G (G' \<inter> {s. rv' = v}) hs a (d' (xfru (\<lambda>_. rv') oldv))"
apply (rule ccorres_gen_asm2)
apply (erule ssubst)
apply (rule cc)
done
next
show "\<lbrace>G\<rbrace> return () \<lbrace>\<lambda>_. G\<rbrace>" by wp
next
show "\<Gamma>\<turnstile> ?G' ?c {s. xf' s = xfru (\<lambda>_. ((\<lambda>_. v) \<circ> xf') s) oldv \<and>
(\<forall>uu. dc uu (((\<lambda>_. v) \<circ> xf') s) \<longrightarrow> s \<in> G' \<inter> \<lbrace>((\<lambda>_. v) \<circ> xf') s = v\<rbrace>)}"
apply (rule conseqPre)
apply vcg
apply (clarsimp simp add: xfxfu)
done
qed simp_all
thm ccorres_lift_rhs_call [where P = "\<lambda>_ s. hrs_htd \<^bsup>s\<^esup>t_hrs \<Turnstile>\<^sub>t (xfa s)"]
lemmas ccorres_lift_rhs_no_guard = ccorres_lift_rhs_call [where P = "\<lambda>_ _. True", simplified]
lemmas ccorres_lift_rhss = ccorres_lift_rhs_no_guard ccorres_lift_rhs_call
lemmas ccorres_lift_rhs_record_no_guard = ccorres_lift_rhs_call_record [where P = "\<lambda>_ _. True", simplified]
lemmas ccorres_lift_rhss_record = ccorres_lift_rhs_record_no_guard ccorres_lift_rhs_call_record
lemma ccorres_lift_rhs_Basic_stateful:
assumes cc: "\<And>v. ccorres_underlying rf_sr \<Gamma> r xf arrel axf G (G' v) hs a (d' v)"
and xfxfu: "\<And>v s. xf' (xfu' (\<lambda>_. v) s) = v"
and ceqv: "\<And>rv' t t'. ceqv \<Gamma> xf' rv' t t' d (d' rv')"
and gg: "\<And>x f s. globals (xfu' f s) = globals s"
shows "ccorres_underlying rf_sr \<Gamma> r xf arrel axf G {s. xfu' (\<lambda>_. g s) s \<in> G' (g s)} hs a (Basic (\<lambda>s. xfu' (\<lambda>_. g s) s) ;; d)"
(is "ccorres_underlying rf_sr \<Gamma> r xf arrel axf G ?G' hs a (?c ;; d)")
proof (subst return_bind [where x = "()" and f = "\<lambda>_. a", symmetric],
rule ccorres_guard_imp [OF ccorres_split_nothrow, OF _ ceqv])
show "ccorres dc xf' \<top> UNIV hs (return ()) ?c"
apply (rule ccorres_noop)
apply (vcg spec=modifies)
done
next
fix rv'
show "ccorres_underlying rf_sr \<Gamma> r xf arrel axf G (G' rv') hs a (d' rv')"
by (rule cc)
next
show "\<lbrace>G\<rbrace> return () \<lbrace>\<lambda>_. G\<rbrace>" by wp
next
show "\<Gamma>\<turnstile> ?G' ?c {s. \<forall>uu. dc uu (xf' s) \<longrightarrow> s \<in> G' (xf' s)}"
apply (rule conseqPre)
apply vcg
apply (clarsimp simp add: xfxfu)
done
qed simp_all
lemma ccorres_lift_rhs_Spec_stateful:
assumes cc: "\<And>v. ccorres_underlying rf_sr \<Gamma> r xf arrel axf G (G' v) hs a (d' v)"
and ceqv: "\<And>rv' t t'. ceqv \<Gamma> xf' rv' t t' d (d' rv')"
assumes gg: "\<And>v s. globals (upd (\<lambda>_. v) s) = globals s"
assumes upd_acc: "\<And>s. upd (\<lambda>_. accessor s) s = s"
shows "ccorres_underlying rf_sr \<Gamma> r xf arrel axf G {s. \<forall>v. upd (\<lambda>_. v) s \<in> G' (xf' (upd (\<lambda>_. v) s))} hs a (Spec {(s,t). \<exists>v. t = upd (\<lambda>_. v) s};; d)"
(is "ccorres_underlying rf_sr \<Gamma> r xf arrel axf G ?G' hs a (?c ;; d)")
proof (subst return_bind [where x = "()" and f = "\<lambda>_. a", symmetric],
rule ccorres_guard_imp [OF ccorres_split_nothrow, OF _ ceqv])
show "ccorres dc xf' \<top> UNIV hs (return ()) ?c"
apply (rule ccorres_noop)
apply (vcg spec=modifies)
apply clarsimp
apply (drule arg_cong[where f=globals])
apply (simp add: gg)
apply (rule_tac x=x in exI)
apply (rule_tac x="accessor x" in exI)
apply (rule upd_acc [symmetric])
done
next
fix rv'
show "ccorres_underlying rf_sr \<Gamma> r xf arrel axf G (G' rv') hs a (d' rv')"
by (rule cc)
next
show "\<lbrace>G\<rbrace> return () \<lbrace>\<lambda>_. G\<rbrace>" by wp
next
show "\<Gamma>\<turnstile> ?G' ?c {s. \<forall>uu. dc uu (xf' s) \<longrightarrow> s \<in> G' (xf' s)}"
apply (rule conseqPre)
apply vcg
apply clarsimp
apply (rule conjI)
apply clarsimp
apply (rule_tac x=x in exI)
apply (rule_tac x="accessor x" in exI)
apply (rule upd_acc [symmetric])
done
qed simp_all
(* For something like
int foo(int x)
{
int i;
...
}
the parser spits out
lvar_nondet_init i_' i_'_update;;
...
which we need to remove --- lvar_nondet_init is a nondeterministic SPEC
statement that picks any value for i, but leaves the rest unchanged.
*)
lemma ccorres_lift_rhs_remove_lvar_init:
assumes cc: "ccorres_underlying rf_sr \<Gamma> r xf ar arf G G' hs a d"
assumes gg: "\<And>x f s. globals (i_upd f s) = globals s"
assumes upd_acc: "\<And>s. i_upd (\<lambda>_. i_acc s) s = s"
assumes arb: "\<And>s v. s \<in> G' \<Longrightarrow> i_upd (\<lambda>_. v) s \<in> G'"
shows "ccorres_underlying rf_sr \<Gamma> r xf ar arf G G' hs a (lvar_nondet_init i_acc i_upd ;; d)"
apply (unfold lvar_nondet_init_def)
apply (rule ccorres_guard_imp)
apply (rule ccorres_lift_rhs_Spec_stateful [OF cc ceqv_refl gg])
apply (rule upd_acc)
apply assumption
apply (simp add: arb)
done
lemma ccorres_lift_rhs_remove_lvar_init_unknown_guard:
assumes cc: "ccorres_underlying rf_sr \<Gamma> r xf ar arf G G' hs a d"
assumes gg: "\<And>x f s. globals (i_upd f s) = globals s"
assumes upd_acc: "\<And>s. i_upd (\<lambda>_. i_acc s) s = s"
shows "ccorres_underlying rf_sr \<Gamma> r xf ar arf G {s. \<forall>v. i_upd (\<lambda>_. v) s \<in> G'} hs a (lvar_nondet_init i_acc i_upd ;; d)"
unfolding lvar_nondet_init_def
by (rule ccorres_lift_rhs_Spec_stateful [OF cc ceqv_refl gg], rule upd_acc)
lemma ccorres_special_Int_cong:
"(\<And>s. P s = P' s) \<Longrightarrow> ccorres r xf G (G' \<inter> {s. P s}) hs a c = ccorres r xf G (G' \<inter> {s. P' s}) hs a c" by simp
lemma ccorres_special_trim_Int:
"ccorres r xf G G' hs a c \<Longrightarrow> ccorres r xf G (G' \<inter> P') hs a c"
apply (erule ccorres_guard_imp)
apply simp
apply simp
done
lemma semantic_equiv_def2:
fixes s :: "'b" and s' :: "('b, 'c) xstate"
shows "semantic_equiv G s s' a a' \<equiv> ((G \<turnstile> \<langle>a,Normal s\<rangle> \<Rightarrow> s') = (G \<turnstile> \<langle>a',Normal s\<rangle> \<Rightarrow> s'))"
unfolding semantic_equiv_def ceqv_def
by simp
(* MOVE *)
lemma semantic_equiv_While_cong:
assumes se: "\<And>s s'. semantic_equiv Gamma s s' b b'"
shows "semantic_equiv Gamma s s' (While G b) (While G b')"
using se
apply -
apply (rule semantic_equivI)
apply (rule exec_While_cong)
apply (simp add: semantic_equiv_def2)
done
lemma semantic_equiv_Seq_cong:
assumes sea: "\<And>s'. semantic_equiv Gamma s s' a a'"
and seb: "\<And>s. semantic_equiv Gamma s s' b b'"
shows "semantic_equiv Gamma s s' (a ;; b) (a' ;; b')"
using sea seb
apply (simp add: semantic_equiv_def2)
apply (rule exec_Seq_cong)
apply assumption
apply assumption
done
lemma semantic_equiv_Seq_Skip:
assumes se: "semantic_equiv Gamma s s' a a'"
shows "semantic_equiv Gamma s s' (a ;; SKIP) a'"
using se unfolding semantic_equiv_def2
by (simp add: exec_Seq_Skip_simps)
lemma semantic_equiv_Guard_cong:
assumes se: "semantic_equiv Gamma s s' a a'"
shows "semantic_equiv Gamma s s' (Guard F G a) (Guard F G a')"
using se
by (simp add: semantic_equiv_def2 exec_Guard)
lemma semantic_equiv_Guard_UNIV:
assumes se: "semantic_equiv Gamma s s' a a'"
shows "semantic_equiv Gamma s s' (Guard F UNIV a) a'"
using se
by (simp add: semantic_equiv_def2 exec_Guard_UNIV_simp)
lemma semantic_equiv_Guard_True:
assumes se: "semantic_equiv Gamma s s' a a'"
shows "semantic_equiv Gamma s s' (Guard F \<lbrace>True\<rbrace> a) a'"
using se
by (simp add: semantic_equiv_def2 exec_Guard_UNIV_simp)
lemma semantic_equiv_refl:
shows "semantic_equiv Gamma s s' a a"
by (rule semantic_equivI, simp)
lemma semantic_equiv_trans:
assumes sea: "semantic_equiv Gamma s s' a b"
and seb: "semantic_equiv Gamma s s' b c"
shows "semantic_equiv Gamma s s' a c"
using sea seb
by (simp add: semantic_equiv_def2)
(* Ugh, a bit tricky to get this outcome without this sort of specialisation :( *)
lemma semantic_equiv_Guard_Skip_Seq:
shows "semantic_equiv Gamma s s' (a ;; Guard F \<lbrace>True\<rbrace> SKIP) a"
apply (rule semantic_equiv_trans)
apply (rule semantic_equiv_Seq_cong)
apply (rule semantic_equiv_refl)
apply (rule semantic_equiv_Guard_True)
apply (rule semantic_equiv_refl)
apply (rule semantic_equiv_Seq_Skip)
apply (rule semantic_equiv_refl)
done
lemma semantic_equiv_Seq_assoc:
shows "semantic_equiv Gamma s s' (a ;; (b ;; c)) (a ;; b ;; c)"
apply (rule semantic_equivI)
apply (rule exec_assoc)
done
lemma semantic_equiv_seq_assoc_eq:
"semantic_equiv Gamma s s' (a ;; (b ;; c)) d
= semantic_equiv Gamma s s' (a ;; b ;; c) d"
"semantic_equiv Gamma s s' d (a ;; (b ;; c))
= semantic_equiv Gamma s s' d (a ;; b ;; c)"
by (metis semantic_equiv_trans semantic_equiv_Seq_assoc
semantic_equiv_Seq_assoc[THEN semantic_equiv_sym[THEN iffD1]])+
lemma semantic_equiv_Cond:
assumes sel: "semantic_equiv Gamma s s' l l'"
and ser: "semantic_equiv Gamma s s' r r'"
shows "semantic_equiv Gamma s s' (Cond P l r) (Cond P l' r')"
using sel ser
by (auto elim!: exec_Normal_elim_cases simp: semantic_equiv_def2 intro: exec.intros)
lemma semantic_equiv_Cond_True:
"semantic_equiv G s s' (Cond UNIV c c') c"
by (auto elim!: exec_Normal_elim_cases simp: semantic_equiv_def2 intro: exec.intros)
lemma semantic_equiv_Cond_False:
"semantic_equiv G s s' (Cond {} c c') c'"
by (auto elim!: exec_Normal_elim_cases simp: semantic_equiv_def2 intro: exec.intros)
lemma semantic_equiv_Cond_cases:
"semantic_equiv G s s' a (Cond P c d)
= semantic_equiv G s s' a (if s \<in> P then c else d)"
"semantic_equiv G s s' (Cond P c d) e
= semantic_equiv G s s' (if s \<in> P then c else d) e"
by (auto simp: semantic_equiv_def2 elim!: exec_Normal_elim_cases intro: exec.intros)
lemma semantic_equiv_cond_seq2:
"semantic_equiv G s s' (e;; Cond Q (c;;d) (c';;d)) (e;; Cond Q c c';; d)"
apply (simp add: semantic_equiv_seq_assoc_eq[symmetric])
apply (rule semantic_equiv_Seq_cong, rule semantic_equiv_refl)
by (auto simp: semantic_equiv_def2 elim!: exec_Normal_elim_cases intro: exec.intros)
lemmas ccorres_cond_seq2 = ccorres_semantic_equiv[OF semantic_equiv_cond_seq2]
lemma semantic_equiv_cond_seq2_seq:
"semantic_equiv G s s' (ci;; Cond Q (c;;ce) (c';;ce);; d) (ci;; Cond Q c c';; ce;; d)"
apply (simp add: semantic_equiv_seq_assoc_eq[symmetric])
apply (rule semantic_equiv_Seq_cong, rule semantic_equiv_refl)
apply (simp add: semantic_equiv_seq_assoc_eq)
by (auto simp: semantic_equiv_def2 elim!: exec_Normal_elim_cases intro: exec.intros)
lemmas ccorres_cond_seq2_seq = ccorres_semantic_equiv[OF semantic_equiv_cond_seq2_seq]
(* FIXME: move
It appears that the semantic equiv. lemmas should go into their own file, then
CCorresLemmas on top of that, and then finally Ctac on top of CCorresLemmas *)
lemma ccorres_rewrite_cond_sr:
assumes abs: "\<forall>s s'. (s, s') \<in> sr \<and> Q s \<and> s' \<in> Q' \<longrightarrow> (s' \<in> C) = (s' \<in> C') "
and c1: "ccorres_underlying sr \<Gamma> r xf arrel axf P P' hs m (Cond C' c d)"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf (P and Q) (P' \<inter> Q') hs
m (Cond C c d)"
apply (rule ccorres_name_pre)
apply (rule_tac Q="op = s" and Q'="P' \<inter> Q' \<inter> {s'. (s, s') \<in> sr}" in stronger_ccorres_guard_imp)
apply (rule ccorres_semantic_equiv[THEN iffD1, rotated])
apply (rule ccorres_guard_imp, rule c1, simp_all)
apply (clarsimp simp add: semantic_equiv_Cond_cases abs semantic_equiv_refl)
done
lemma ccorres_rewrite_cond_sr_Seq:
assumes abs: "\<forall>s s'. (s, s') \<in> sr \<and> Q s \<and> s' \<in> Q' \<longrightarrow> (s' \<in> C) = (s' \<in> C') "
and c1: "ccorres_underlying sr \<Gamma> r xf arrel axf P P' hs m (Cond C' c d ;; e)"
shows "ccorres_underlying sr \<Gamma> r xf arrel axf (P and Q) (P' \<inter> Q') hs
m (Cond C c d ;; e)"
apply (rule ccorres_name_pre)
apply (rule_tac Q="op = s" and Q'="P' \<inter> Q' \<inter> {s'. (s, s') \<in> sr}" in stronger_ccorres_guard_imp)
apply (rule ccorres_semantic_equiv[THEN iffD1, rotated])
apply (rule ccorres_guard_imp, rule c1, simp_all)
apply (rule semantic_equiv_Seq_cong)
apply (clarsimp simp add: semantic_equiv_Cond_cases abs semantic_equiv_refl)+
done
definition
"push_in_stmt G stmt c c' \<equiv> (\<forall>s s'. semantic_equiv G s s' (c ;; stmt) c')"
lemma pis_base:
"push_in_stmt G stmt c (c ;; stmt)"
unfolding push_in_stmt_def by (clarsimp intro!: semantic_equivI)
lemma pis_throw:
"push_in_stmt G stmt THROW THROW"
unfolding push_in_stmt_def
by (auto elim!: exec_Normal_elim_cases intro: semantic_equivI exec.intros)
lemma pis_Seq_right:
"push_in_stmt G stmt d d' \<Longrightarrow> push_in_stmt G stmt (c ;; d) (c ;; d')"
unfolding push_in_stmt_def
apply (intro allI)
apply (rule semantic_equiv_trans [rotated])
apply (rule semantic_equiv_Seq_cong [OF semantic_equiv_refl])
apply (drule spec, drule spec, assumption)
apply (subst semantic_equiv_sym)
apply (rule semantic_equiv_Seq_assoc)
done
lemma pis_creturn:
"push_in_stmt G stmt (return_C xfu xf) (return_C xfu xf)"
unfolding creturn_def
by (rule pis_Seq_right | rule pis_throw)+
lemma pis_Cond:
"\<lbrakk> push_in_stmt G stmt l l'; push_in_stmt G stmt r r' \<rbrakk> \<Longrightarrow>
push_in_stmt G stmt (Cond P l r) (Cond P l' r')"
unfolding push_in_stmt_def
apply (intro allI)
apply (drule_tac x = s in spec, drule_tac x = s' in spec)+
apply (case_tac "s \<in> P")
apply (auto elim!: exec_Normal_elim_cases simp: semantic_equiv_def2 intro: exec.intros)
done
(* We check this before simplifying everything, so we need to deal with switch *)
lemma pis_switch_Cons:
"\<lbrakk> push_in_stmt G stmt c c';
push_in_stmt G stmt (switch v (x # xs)) (switch v cs) \<rbrakk>
\<Longrightarrow> push_in_stmt G stmt (switch v ((g, c) # (x # xs))) (switch v ((g, c') # cs))"
by (simp add: pis_Cond)
lemma pis_switch_Singleton:
"\<lbrakk> push_in_stmt G stmt c c' \<rbrakk> \<Longrightarrow> push_in_stmt G stmt (switch v [(UNIV, c)]) (switch v [(UNIV, c')])"
apply (clarsimp simp: push_in_stmt_def)
apply (rule semantic_equiv_trans [OF _ iffD2 [OF semantic_equiv_sym, OF semantic_equiv_Cond_True]])
apply (rule semantic_equiv_trans [rotated])
apply (drule spec, drule spec, assumption)
apply (rule semantic_equiv_Seq_cong [OF semantic_equiv_Cond_True semantic_equiv_refl])
done
lemma pis_Guard:
"push_in_stmt G stmt c c' \<Longrightarrow> push_in_stmt G stmt (Guard f G' c) (Guard f G' c')"
unfolding push_in_stmt_def
apply (intro allI)
apply (rule semantic_equiv_trans [OF Guard_Seq_semantic_equiv])
apply (rule semantic_equiv_Guard_cong)
apply (drule spec, erule spec)
done
lemmas push_in_stmt_rules =
-- "No ordering apart from pis_base which must be last."
pis_throw
pis_creturn
pis_Seq_right
pis_Cond
pis_switch_Singleton pis_switch_Cons
pis_Guard
-- "Last, just stick it where it is"
pis_base
lemma ccorres_special_trim_guard_DontReach_pis:
assumes at: "push_in_stmt Gamma (Guard DontReach {} c) b b'"
and c: "ccorres_underlying sr Gamma r xf ar axf G G' hs a b'"
shows "ccorres_underlying sr Gamma r xf ar axf G G' hs a (b ;; Guard DontReach {} c)"
using c at unfolding push_in_stmt_def
apply -
apply (erule ccorres_semantic_equivD2)
apply (drule spec, erule spec)
done
end
lemmas ccorres_boilerplace_simp_dels =
Collect_const -- "Avoid getting an implication due to if_split. Should probably just remove if_split"
lemma ccorres_introduce_UNIV_Int_when_needed:
"ccorres_underlying sr Gamm r xf ar axf P (UNIV \<inter> {x. Q x}) hs a c
\<Longrightarrow> ccorres_underlying sr Gamm r xf ar axf P {x. Q x} hs a c"
by simp
lemma Normal_Abrupt_resultE [consumes 2, case_names normal abrupt]:
assumes ex: "\<Gamma> \<turnstile> \<langle>c, s\<rangle> \<Rightarrow> t"
and t: "t = Normal t' \<or> t = Abrupt t'"
and r1: "\<And>s'. \<lbrakk>\<Gamma> \<turnstile> \<langle>c, Normal s'\<rangle> \<Rightarrow> t; s = Normal s'\<rbrakk> \<Longrightarrow> R"
and r2: "\<And>s'. \<lbrakk>\<Gamma> \<turnstile> \<langle>c, Abrupt s'\<rangle> \<Rightarrow> t; s = Abrupt s'\<rbrakk> \<Longrightarrow> R"
shows R
using ex t
apply -
apply (erule disjE)
apply simp
apply (erule Normal_resultE)
apply (rule r1)
apply simp
apply simp
apply simp
apply (erule Abrupt_resultE)
apply (rule r1)
apply simp
apply simp
apply (rule r2)
apply simp
apply simp
done
(* Used so we can pattern match in ceqv (and hopefully speed things up *)
definition
"rewrite_xf xf t v f f' \<equiv> xf t = v \<longrightarrow> f t = f' t"
lemma rewrite_xfI:
"(xf t = v \<Longrightarrow> f t = f' t) \<Longrightarrow> rewrite_xf xf t v f f'"
unfolding rewrite_xf_def by auto
lemma rewrite_xfD:
"\<lbrakk> rewrite_xf xf t v f f'; xf t = v \<rbrakk> \<Longrightarrow> f t = f' t"
unfolding rewrite_xf_def by auto
lemma Basic_ceqv:
assumes rl: "rewrite_xf xf t v f f'"
shows "ceqv \<Gamma> xf v t t' (Basic f) (Basic f')"
apply -
apply (rule ceqvI)
apply (drule rewrite_xfD [OF rl])
apply rule
apply (erule exec_Normal_elim_cases)
apply simp
apply rule
apply (erule exec_Normal_elim_cases)
apply simp
apply (erule subst)
apply (rule exec.Basic)
done
(* the tactic uses THEN_ALL_NEW, which goes backwards *)
lemma Seq_ceqv:
assumes ra: "\<And>t'. ceqv \<Gamma> xf v t t' a a'"
and rb: "\<And>t. ceqv \<Gamma> xf v t t' b b'"
and xp: "xpres xf v \<Gamma> a" (* Check that a does preserve xf first *)
shows "ceqv \<Gamma> xf v t t' (a ;; b) (a' ;; b')"
using xp
apply -
apply (rule ceqvI)
apply rule
apply (erule exec_Normal_elim_cases)
apply rule
apply (erule (1) ceqvD1 [OF _ _ ra])
apply (case_tac s')
apply simp
apply (erule ceqvD1)
prefer 2
apply (rule assms)
apply (rule xpresD [where xf = xf], assumption+)
apply (fastforce dest: Abrupt_end Fault_end Stuck_end)+
(* clag *)
apply (erule exec_Normal_elim_cases)
apply rule
apply (erule (1) ceqvD2 [OF _ _ ra])
apply (case_tac s')
apply simp
apply (erule ceqvD2)
prefer 2
apply (rule assms)
apply (rule xpresD [where xf = xf], assumption+)
apply (erule (1) ceqvD2 [OF _ _ ra])
apply (fastforce dest: Abrupt_end Fault_end Stuck_end)+
done
lemma Seq_weak_ceqv: (* A weaker form where xpres doesn't hold for a *)
assumes ra: "\<And>t'. ceqv \<Gamma> xf v t t' a a'"
shows "ceqv \<Gamma> xf v t t' (a ;; b) (a' ;; b)"
apply -
apply (rule ceqvI)
apply rule
apply (erule exec_Normal_elim_cases)
apply rule
apply (erule (2) ceqvD1 [OF _ _ ra])
(* clag *)
apply (erule exec_Normal_elim_cases)
apply rule
apply (erule (2) ceqvD2 [OF _ _ ra])
done
lemma xpres_ceqv:
assumes xp: "xpres xf v \<Gamma> a"
and ceq: "\<And>t t'. ceqv \<Gamma> xf v t t' a a'"
shows "xpres xf v \<Gamma> a'"
apply (rule xpresI)
apply (drule (1) ceqvD2 [OF _ _ ceq])
apply (erule (2) xpres_exec0 [OF xp])
done
lemma While_ceqv_na0:
assumes ra: "\<And>t t'. ceqv \<Gamma> xf v t t' a a'"
and xp: "xpres xf v \<Gamma> a"
and ex: "\<Gamma>\<turnstile> \<langle>d,s\<rangle> \<Rightarrow> t'"
and beq0: "\<And>t. xf t = v \<longrightarrow> (t \<in> b) = (t \<in> b')"
and d: "d = While b a"
and s: "s \<in> Normal ` {s. xf s = v} \<union> Abrupt ` {s. xf s = v}"
and d': "d' = While b' a'"
and t: "\<not> isFault t'" "t' \<noteq> Stuck"
shows "\<Gamma>\<turnstile> \<langle>d',s\<rangle> \<Rightarrow> t'"
using ex d s d' t
proof (induct)
case (WhileTrue s' b'' c' t u)
hence bv: "b'' = b" and cv: "c' = a" and xfs: "xf s' = v" by auto
note xp = xpres_ceqv [OF xp ra]
note beq = beq0 [rule_format]
have "\<Gamma> \<turnstile> \<langle>While b' a', Normal s'\<rangle> \<Rightarrow> u"
proof (rule exec.WhileTrue)
show "s' \<in> b'" using beq xfs bv[symmetric] WhileTrue
by auto
show ae: "\<Gamma>\<turnstile> \<langle>a',Normal s'\<rangle> \<Rightarrow> t" using WhileTrue ceqvD1[OF _ _ ra]
by auto
show "\<Gamma>\<turnstile> \<langle>While b' a',t\<rangle> \<Rightarrow> u"
proof (subst d' [symmetric], rule WhileTrue.hyps(5))
obtain z where "u = Normal z \<or> u = Abrupt z"
using WhileTrue.prems by (cases u, auto)
then obtain z' where "t = Normal z' \<or> t = Abrupt z'"
using WhileTrue.prems WhileTrue.hyps(2) WhileTrue.hyps(4)
by (auto elim: Normal_resultE Abrupt_resultE)
thus "t \<in> Normal ` {s. xf s = v} \<union> Abrupt ` {s. xf s = v}" using xp ae xfs
by (auto dest: xpres_exec0)
qed fact+
qed
thus ?case using WhileTrue.prems by simp
next
note beq = beq0 [rule_format]
case WhileFalse
thus ?case
apply simp
apply rule
apply (erule disjE)
apply (erule imageE, simp)
apply (auto simp: beq)
done
qed simp_all
lemmas While_ceqv_na = While_ceqv_na0 [OF _ _ _ _ refl _ refl]
lemma While_ceqv_fs0:
assumes ra: "\<And>t t'. ceqv \<Gamma> xf v t t' a a'"
and xp: "xpres xf v \<Gamma> a"
and ex: "\<Gamma>\<turnstile> \<langle>d,x\<rangle> \<Rightarrow> t'"
and d: "d = While b a"
and d': "d' = While b' a'"
and beq0: "\<And>t. xf t = v \<longrightarrow> (t \<in> b) = (t \<in> b')"
and t: "isFault t' \<or> t' = Stuck"
and s: "x \<in> Normal ` {s. xf s = v}"
shows "\<Gamma>\<turnstile> \<langle>d',x\<rangle> \<Rightarrow> t'"
using ex d d' t s
proof (induct)
case (WhileTrue s' b'' c' t u)
hence bv: "b'' = b" and cv: "c' = a" and xfs: "xf s' = v" by auto
note xp = xpres_ceqv [OF xp ra]
note beq = beq0 [rule_format]
have "\<Gamma> \<turnstile> \<langle>While b' a', Normal s'\<rangle> \<Rightarrow> u"
proof (rule exec.WhileTrue)
show sb: "s' \<in> b'" using WhileTrue beq by auto
show ae: "\<Gamma>\<turnstile> \<langle>a',Normal s'\<rangle> \<Rightarrow> t"
using xfs WhileTrue ceqvD1[OF _ _ ra] by auto
{
fix f
assume "u = Fault f" and "t = Fault f"
hence "\<Gamma>\<turnstile> \<langle>While b' a',t\<rangle> \<Rightarrow> u" by simp
} moreover
{
fix f z
assume uv: "u = Fault f" and tv: "t = Normal z"
have "\<Gamma>\<turnstile> \<langle>d',t\<rangle> \<Rightarrow> u"
proof (rule WhileTrue.hyps(5))
show "isFault u \<or> u = Stuck" using uv by simp
have "xf z = v" using xfs ae
apply -
apply (erule xpresD [OF _ xp])
apply (simp add: tv)
done
thus "t \<in> Normal ` {s. xf s = v}" by (simp add: tv)
qed fact+
hence "\<Gamma>\<turnstile> \<langle>While b' a',t\<rangle> \<Rightarrow> u" using d' by simp
} moreover
{
assume "u = Stuck" and "t = Stuck"
hence "\<Gamma>\<turnstile> \<langle>While b' a',t\<rangle> \<Rightarrow> u" by simp
} moreover
{
fix z
assume uv: "u = Stuck" and tv: "t = Normal z"
(* clag *)
have "\<Gamma>\<turnstile> \<langle>d',t\<rangle> \<Rightarrow> u"
proof (rule WhileTrue.hyps(5))
show "isFault u \<or> u = Stuck" using uv by simp
have "xf z = v" using xfs ae
apply -
apply (erule xpresD [OF _ xp])
apply (simp add: tv)
done
thus "t \<in> Normal ` {s. xf s = v}" by (simp add: tv)
qed fact+
hence "\<Gamma>\<turnstile> \<langle>While b' a',t\<rangle> \<Rightarrow> u" using d' by simp
}
ultimately show "\<Gamma>\<turnstile> \<langle>While b' a',t\<rangle> \<Rightarrow> u" using WhileTrue.prems WhileTrue.hyps(4)
by (auto elim: Fault_resultE Stuck_resultE elim!: isFaultE)
qed
thus ?case by (simp add: d')
qed simp_all
lemmas While_ceqv_fs = While_ceqv_fs0 [OF _ _ _ refl refl]
lemma While_ceqv:
assumes beq: "\<And>t. xf t = v \<longrightarrow> (t \<in> b) = (t \<in> b')"
and ra: "\<And>t t'. ceqv \<Gamma> xf v t t' a a'"
and xp: "xpres xf v \<Gamma> a" (* So we fail as early as possible *)
shows "ceqv \<Gamma> xf v t t' (While b a) (While b' a')" (* b is a set, doesn't rewrite nicely *)
using xp
apply -
apply (rule ceqvI)
apply (cases t')
apply rule
apply (erule (1) While_ceqv_na [OF ra])
apply (rule beq)
apply simp
apply simp
apply simp
apply (rule While_ceqv_na [OF ceqv_sym [OF ra]])
apply (erule (1) xpres_ceqv [OF _ ra])
apply (rule impI)
apply (erule beq [rule_format, symmetric])
apply simp
apply simp
apply simp
(* clag *)
apply rule
apply (erule (1) While_ceqv_na [OF ra])
apply (rule beq)
apply simp
apply simp
apply simp
apply simp
apply (rule While_ceqv_na [OF ceqv_sym [OF ra]])
apply (erule (1) xpres_ceqv [OF _ ra])
apply (rule impI)
apply (erule beq [rule_format, symmetric])
apply simp
apply simp
apply simp
apply rule
apply (erule While_ceqv_fs [OF ra xp])
apply (rule beq)
apply simp
apply simp
apply (rule While_ceqv_fs [OF ceqv_sym [OF ra]])
apply (erule (1) xpres_ceqv [OF _ ra])
apply (rule impI)
apply (erule beq [rule_format, symmetric])
apply simp
apply simp
(* clag *)
apply rule
apply (erule While_ceqv_fs [OF ra xp])
apply (rule beq)
apply simp
apply simp
apply (rule While_ceqv_fs [OF ceqv_sym [OF ra]])
apply (erule (1) xpres_ceqv [OF _ ra])
apply (rule impI)
apply (rule beq [rule_format, symmetric])
apply simp
apply simp
apply simp
done
lemma call_ceqv':
assumes ieq: "\<And>t. rewrite_xf xf t v i i'"
and ceqv: "\<And>t t' s'. ceqv \<Gamma> xf v (r t s') t' (c t s') (c' t s')" (* For record field updates *)
and xf: "\<And>t t'. xf t = v \<Longrightarrow> xf (r t t') = v"
shows "ceqv \<Gamma> xf v t t' (call i f r c) (call i' f r c')"
apply (rule ceqvI)
apply (rule iffI)
apply (erule exec_call_Normal_elim)
apply (drule ceqvD1 [OF _ _ ceqv])
apply (simp add: xf)
apply (erule exec_call)
apply (simp add: rewrite_xfD [OF ieq])
apply assumption
apply (clarsimp simp: rewrite_xfD [OF ieq] elim!: exec_callAbrupt exec_callFault exec_callStuck exec_callUndefined)
apply (clarsimp simp: rewrite_xfD [OF ieq] elim!: exec_callAbrupt exec_callFault exec_callStuck exec_callUndefined)
apply (clarsimp simp: rewrite_xfD [OF ieq] elim!: exec_callAbrupt exec_callFault exec_callStuck exec_callUndefined)
apply (clarsimp simp: rewrite_xfD [OF ieq] elim!: exec_callAbrupt exec_callFault exec_callStuck exec_callUndefined)
(* clag *)
apply (erule exec_call_Normal_elim)
apply (drule ceqvD2 [OF _ _ ceqv])
apply (simp add: xf)
apply (erule exec_call)
apply (simp add: rewrite_xfD [OF ieq])
apply assumption
apply (clarsimp simp: rewrite_xfD [OF ieq, symmetric]
elim!: exec_callAbrupt exec_callFault exec_callStuck exec_callUndefined)+
done
lemma call_ceqv:
assumes ieq: "\<And>t. rewrite_xf xf t v i i'"
and ceqv: "\<And>t t' s'. ceqv \<Gamma> xf v (r t s') t' (c t s') (c' t s')" (* For record field updates *)
and xf: "\<And>t t'. xf (r t t') = xf t"
shows "ceqv \<Gamma> xf v t t' (call i f r c) (call i' f r c')"
by (rule call_ceqv' [OF ieq ceqv], simp add: xf)
lemmas Skip_ceqv = ceqv_refl [where c = Skip]
lemma Catch_ceqv:
assumes ca: "\<And>t t'. ceqv \<Gamma> xf v t t' a a'"
and cb: "\<And>t t'. ceqv \<Gamma> xf v t t' b b'"
and xp: "xpres xf v \<Gamma> a"
shows "ceqv \<Gamma> xf v t t' (Catch a b) (Catch a' b')"
apply (rule ceqvI)
apply rule
apply (erule exec_Normal_elim_cases)
apply (drule (1) ceqvD1 [OF _ _ ca])
apply (rule exec.CatchMatch, assumption)
apply (erule ceqvD1 [OF _ _ cb])
apply (erule (1) xpres_abruptD [OF _ xpres_ceqv [OF xp ca]])
apply (rule exec.CatchMiss)
apply (erule (1) ceqvD1 [OF _ _ ca])
apply assumption
(* clag *)
apply (erule exec_Normal_elim_cases)
apply (drule (1) ceqvD2 [OF _ _ ca])
apply (rule exec.CatchMatch, assumption)
apply (erule ceqvD2 [OF _ _ cb])
apply (erule xpres_abruptD [where xf = xf])
apply (rule xp)
apply assumption
apply (rule exec.CatchMiss)
apply (erule (1) ceqvD2 [OF _ _ ca])
apply assumption
done
lemma Cond_ceqv:
assumes be: "\<And>t. xf t = v \<longrightarrow> (t \<in> x) = (t \<in> x')"
and ca: "ceqv \<Gamma> xf v t t' a a'"
and cb: "ceqv \<Gamma> xf v t t' b b'"
shows "ceqv \<Gamma> xf v t t' (Cond x a b) (Cond x' a' b')"
apply (rule ceqvI)
apply rule
apply (erule exec_Normal_elim_cases)
apply (frule (1) iffD1 [OF be [rule_format]])
apply (erule exec.CondTrue)
apply (erule (1) ceqvD1 [OF _ _ ca])
apply (subst (asm) be)
apply assumption
apply (erule exec.CondFalse)
apply (erule (1) ceqvD1 [OF _ _ cb])
(* clag *)
apply (erule exec_Normal_elim_cases)
apply (frule (1) iffD2 [OF be [ rule_format]])
apply (erule exec.CondTrue)
apply (erule (1) ceqvD2 [OF _ _ ca])
apply (subst (asm) be [rule_format, symmetric])
apply assumption
apply (erule exec.CondFalse)
apply (erule (1) ceqvD2 [OF _ _ cb])
done
lemmas Throw_ceqv = ceqv_refl [where c = Throw]
lemma Collect_mem_eqv:
"rewrite_xf xf t v Q Q' \<Longrightarrow> xf t = v \<longrightarrow> (t \<in> Collect Q) = (t \<in> Collect Q')"
apply (rule impI)
apply (drule (1) rewrite_xfD)
apply simp
done
(* We could just use ceqv_refl at the end, but I want to catch missed cases.
I also assume that While and Cond have {s. P s} as conditionals.
The WhileAnnot case is a consequence of While.
*)
lemma UNIV_mem_eqv:
"xf t = v \<longrightarrow> (t \<in> UNIV) = (t \<in> UNIV)"
by simp
lemma empty_mem_eqv:
"xf t = v \<longrightarrow> (t \<in> {}) = (t \<in> {})"
by simp
lemma creturn_ceqv_xf:
fixes \<Gamma> :: "(('a globals_scheme, 'b) myvars_scheme) c_body"
assumes xfg: "\<And>s f. xf (global_exn_var_'_update f s) = xf s"
and xfxfu: "\<And>v s. xf (xfu (\<lambda>_. v) s) = v"
shows "ceqv \<Gamma> xf v t t' (return_C xfu xf) (return_C xfu (\<lambda>_. v))"
unfolding creturn_def
apply (rule Seq_ceqv)+
apply (rule Basic_ceqv)
apply (rule rewrite_xfI)
apply simp
apply (rule Seq_ceqv)
apply (rule Basic_ceqv)
apply (simp add: rewrite_xf_def)
apply (rule Throw_ceqv)
apply (rule xpres_basic)
apply simp
apply (simp add: xfg)
apply (rule xpres_basic)
apply (simp add: xfxfu)
done
lemma creturn_ceqv_not_xf:
fixes \<Gamma> :: "(('a globals_scheme, 'b) myvars_scheme) c_body"
assumes rl: "\<And>t. rewrite_xf xf t v rv rv'"
and xfu: "\<And>s f. xf (xfu' f s) = xf s" -- "i.e., xf is independent of xfu"
and xfg: "\<And>s f. xf (global_exn_var_'_update f s) = xf s"
shows "ceqv \<Gamma> xf v t t' (return_C xfu' rv) (return_C xfu' rv')"
unfolding creturn_def
apply (rule Seq_ceqv)+
apply (rule Basic_ceqv)
apply (rule rewrite_xfI)
apply (simp add: rewrite_xfD [OF rl] xfu)
apply (rule Seq_ceqv)
apply (rule Basic_ceqv)
apply (simp add: rewrite_xf_def)
apply (rule Throw_ceqv)
apply (rule xpres_basic)
apply (simp add: xfg)
apply (rule xpres_basic)
apply (simp add: xfu)
done
lemma Guard_ceqv:
assumes be: "\<And>t. xf t = v \<longrightarrow> (t \<in> x) = (t \<in> x')"
and ca: "ceqv \<Gamma> xf v t t' a a'"
shows "ceqv \<Gamma> xf v t t' (Guard f x a) (Guard f x' a')"
apply (rule ceqvI)
apply rule
apply (erule exec_Normal_elim_cases)
apply (frule (1) iffD1 [OF be [rule_format]])
apply (erule exec.Guard)
apply (erule (1) ceqvD1 [OF _ _ ca])
apply (subst (asm) be)
apply assumption
apply simp
apply (erule exec.GuardFault)
apply (erule exec_Normal_elim_cases)
apply (frule (1) iffD2 [OF be [rule_format]])
apply (erule exec.Guard)
apply (erule (1) ceqvD2 [OF _ _ ca])
apply (subst (asm) be [rule_format, symmetric])
apply assumption
apply simp
apply (erule exec.GuardFault)
done
lemma Cond_UNIV_ceqv: (* Crops up occasionally *)
assumes ca: "ceqv \<Gamma> xf v t t' a a'"
shows "ceqv \<Gamma> xf v t t' (Cond UNIV a b) a'"
using ca
apply -
apply (rule ceqvI)
apply (auto elim!: exec_Normal_elim_cases dest: ceqvD1 ceqvD2 intro: exec.intros)
done
lemma Cond_empty_ceqv: (* Crops up occasionally *)
assumes ca: "ceqv \<Gamma> xf v t t' b b'"
shows "ceqv \<Gamma> xf v t t' (Cond {} a b) b'"
using ca
apply -
apply (rule ceqvI)
apply (auto elim!: exec_Normal_elim_cases dest: ceqvD1 ceqvD2 intro: exec.intros)
done
lemmas Guard_UNIV_ceqv = Guard_ceqv [where x = UNIV and x' = UNIV, simplified]
lemmas ceqv_rules = ceqv_refl [where xf' = xfdc] -- "Any ceqv with xfdc should be ignored"
While_ceqv [OF Collect_mem_eqv] While_ceqv [OF UNIV_mem_eqv]
Cond_ceqv [OF Collect_mem_eqv] Cond_UNIV_ceqv Cond_empty_ceqv
Guard_ceqv [OF Collect_mem_eqv] Guard_UNIV_ceqv
Seq_ceqv Seq_weak_ceqv
Basic_ceqv call_ceqv Skip_ceqv
Catch_ceqv Throw_ceqv
creturn_ceqv_xf creturn_ceqv_not_xf -- "order is important with these two, the second is more general"
ceqv_refl [where c = return_void_C] ceqv_refl [where c = break_C]
ceqv_refl [where c = catchbrk_C]
definition
ceqv_xpres :: "('p \<rightharpoonup> ('s, 'p, 'x) com) \<Rightarrow> ('s \<Rightarrow> 'a) \<Rightarrow> 'a
\<Rightarrow> bool \<Rightarrow> ('s, 'p, 'x) com \<Rightarrow> bool \<Rightarrow> ('s, 'p, 'x) com \<Rightarrow> bool"
where
"ceqv_xpres \<Gamma> xf v pres c pres' c'
\<equiv> \<forall>s s' s''. (pres \<longrightarrow> xf s = v)
\<longrightarrow> (\<Gamma> \<turnstile> \<langle>c, Normal s\<rangle> \<Rightarrow> s' = \<Gamma> \<turnstile> \<langle>c', Normal s\<rangle> \<Rightarrow> s')
\<and> (\<Gamma> \<turnstile> \<langle>c, Normal s\<rangle> \<Rightarrow> s' \<and> (s' = Normal s'' \<or> s' = Abrupt s'') \<and> pres'
\<longrightarrow> xf s'' = v)
\<and> (\<not> pres \<longrightarrow> \<not> pres' \<and> c = c')"
definition
ceqv_xpres_rewrite_set :: "('s \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> 's set \<Rightarrow> 's set \<Rightarrow> bool" where
"ceqv_xpres_rewrite_set xf v S S'
\<equiv> \<forall>s. xf s = v \<longrightarrow> (s \<in> S) = (s \<in> S')"
definition
ceqv_xpres_rewrite_basic :: "('s \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> ('s \<Rightarrow> 't) \<Rightarrow> ('s \<Rightarrow> 't) \<Rightarrow> bool" where
"ceqv_xpres_rewrite_basic xf v f f'
\<equiv> \<forall>s. xf s = v \<longrightarrow> f s = f' s"
definition
ceqv_xpres_basic_preserves :: "('s \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> ('s \<Rightarrow> 's) \<Rightarrow> bool \<Rightarrow> bool" where
"ceqv_xpres_basic_preserves xf v f b
\<equiv> b \<longrightarrow> (\<forall>s. xf s = v \<longrightarrow> xf (f s) = v)"
definition
ceqv_xpres_eq_If :: "bool \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool" where
"ceqv_xpres_eq_If b x y z \<equiv> z = (if b then x else y)"
definition
ceqv_xpres_call_restore_args :: "'a \<Rightarrow> ('s \<Rightarrow> 's) \<Rightarrow> ('s \<Rightarrow> 's) \<Rightarrow> bool"
where
"ceqv_xpres_call_restore_args x f g = (f = g)"
lemma ceqv_xpres_eq_both:
"ceqv_xpres \<Gamma> xf v True c True c'
= ((\<forall>t t'. ceqv \<Gamma> xf v t t' c c') \<and> xpres xf v \<Gamma> c')"
apply (simp add: ceqv_xpres_def ceqv_def xpres_def)
apply blast
done
lemma ceqv_xpres_eq_ceqv:
"ceqv_xpres \<Gamma> xf v True c False c'
= (\<forall> t t'. ceqv \<Gamma> xf v t t' c c')"
by (simp add: ceqv_xpres_def ceqv_def)
lemma ceqv_xpres_eq_imp:
"ceqv_xpres \<Gamma> xf v True c pres c'
= ((\<forall>t t'. ceqv \<Gamma> xf v t t' c c') \<and> (pres \<longrightarrow> xpres xf v \<Gamma> c'))"
by (cases pres, simp_all add: ceqv_xpres_eq_ceqv ceqv_xpres_eq_both)
lemma ceqv_xpres_False:
"ceqv_xpres \<Gamma> xf v False c pres c' = (\<not> pres \<and> c = c')"
by (auto simp add: ceqv_xpres_def)
lemma ceqv_xpres_eq_If_False:
"ceqv_xpres_eq_If P Q False R = (R = (P \<and> Q))"
by (simp add: ceqv_xpres_eq_If_def)
lemma ceqv_xpres_False_pres:
"ceqv_xpres \<Gamma> xf v False c False c"
by (simp add: ceqv_xpres_def)
lemma ceqv_xpres_xfdc:
"ceqv_xpres \<Gamma> xfdc v pres c pres c"
by (simp add: ceqv_xpres_def xfdc_def)
lemma ceqv_xpres_call_restore_argsI:
"\<forall> s. f s = g s \<Longrightarrow> ceqv_xpres_call_restore_args i f g"
by (simp add: ceqv_xpres_call_restore_args_def fun_eq_iff)
lemma ceqv_xpres_whileAnno:
"\<lbrakk> ceqv_xpres \<Gamma> xf v True c pres c'; ceqv_xpres_rewrite_set xf v S S';
ceqv_xpres_eq_If pres c' c c''; ceqv_xpres_eq_If pres S' S S'' \<rbrakk>
\<Longrightarrow> ceqv_xpres \<Gamma> xf v True (whileAnno S I V c) pres (whileAnno S'' I V c'')"
apply (cases pres)
apply (clarsimp simp add: ceqv_xpres_eq_both ceqv_xpres_eq_If_def
whileAnno_def)
apply (intro conjI allI)
apply (rule While_ceqv)
apply (simp add: ceqv_xpres_rewrite_set_def)
apply simp
apply (erule xpres_ceqv)
apply (rule ceqv_sym, simp)
apply (erule xpres_while)
apply (simp add: ceqv_xpres_def ceqv_xpres_eq_If_def)
done
lemmas ceqv_xpres_While = ceqv_xpres_whileAnno[unfolded whileAnno_def]
lemma ceqv_xpres_Cond:
"\<lbrakk> ceqv_xpres_rewrite_set xf v S S'; ceqv_xpres \<Gamma> xf v True c cpres c';
ceqv_xpres \<Gamma> xf v True d dpres d'; ceqv_xpres_eq_If dpres cpres False pres \<rbrakk>
\<Longrightarrow> ceqv_xpres \<Gamma> xf v True (Cond S c d) pres (Cond S' c' d')"
apply (clarsimp simp add: ceqv_xpres_eq_imp ceqv_xpres_eq_If_False)
apply (intro allI conjI impI)
apply (rule Cond_ceqv)
apply (simp add: ceqv_xpres_rewrite_set_def)
apply simp
apply simp
apply simp
apply (erule(1) xpres_cond)
done
lemma ceqv_xpres_Guard:
"\<lbrakk> ceqv_xpres_rewrite_set xf v S S'; ceqv_xpres \<Gamma> xf v True c pres c' \<rbrakk>
\<Longrightarrow> ceqv_xpres \<Gamma> xf v True (Guard g S c) pres (Guard g S' c')"
apply (clarsimp simp: ceqv_xpres_eq_imp xpres_guard)
apply (rule Guard_ceqv)
apply (simp add: ceqv_xpres_rewrite_set_def)
apply simp
done
lemma ceqv_xpres_Seq:
"\<lbrakk> ceqv_xpres \<Gamma> xf v True c cpres c'; ceqv_xpres \<Gamma> xf v cpres d pres d' \<rbrakk>
\<Longrightarrow> ceqv_xpres \<Gamma> xf v True (c ;; d) pres (c' ;; d')"
apply (cases cpres)
apply (clarsimp simp add: ceqv_xpres_eq_imp xpres_seq)
apply (rule Seq_ceqv, simp+)
apply (erule xpres_ceqv)
apply (rule ceqv_sym, simp)
apply (clarsimp simp add: ceqv_xpres_eq_imp ceqv_xpres_False)
apply (rule Seq_weak_ceqv, simp)
done
lemma ceqv_xpres_Basic:
"\<lbrakk> ceqv_xpres_rewrite_basic xf v f f';
ceqv_xpres_basic_preserves xf v f' pres \<rbrakk>
\<Longrightarrow> ceqv_xpres \<Gamma> xf v True (Basic f) pres (Basic f')"
apply (simp add: ceqv_xpres_eq_imp)
apply (intro allI impI conjI)
apply (rule Basic_ceqv)
apply (simp add: rewrite_xf_def ceqv_xpres_rewrite_basic_def)
apply (rule xpres_basic)
apply (simp add: ceqv_xpres_basic_preserves_def)
done
lemma ceqv_xpres_call:
"\<lbrakk> ceqv_xpres_call_restore_args f i i';
ceqv_xpres_rewrite_basic xf v i' i'';
\<And>s'. ceqv_xpres_basic_preserves xf v (\<lambda>s. r s s') pres';
\<And>s' s''. ceqv_xpres \<Gamma> xf v pres' (c s' s'') pres (c' s' s'') \<rbrakk>
\<Longrightarrow> ceqv_xpres \<Gamma> xf v True (call i f r c) pres (call i'' f r c')"
apply (simp add: ceqv_xpres_eq_imp ceqv_xpres_call_restore_args_def)
apply (intro allI conjI impI)
defer
apply (cases pres')
apply (rule xpres_call)
apply (simp add: ceqv_xpres_basic_preserves_def)
apply (simp add: ceqv_xpres_eq_imp)
apply (simp add: ceqv_xpres_False)
apply (cases pres')
apply (clarsimp simp add: ceqv_xpres_eq_imp)
apply (rule call_ceqv')
apply (simp add: rewrite_xf_def ceqv_xpres_rewrite_basic_def)
apply simp
apply (simp add: ceqv_xpres_basic_preserves_def)
apply (simp add: ceqv_xpres_False)
apply (clarsimp simp add: ceqv_def ceqv_xpres_rewrite_basic_def)
apply (auto elim!: exec_call_Normal_elim exec_call
exec_callAbrupt exec_callFault exec_callStuck exec_callUndefined)
done
lemma ceqv_xpres_Skip:
"ceqv_xpres \<Gamma> xf v True Skip True Skip"
by (simp add: ceqv_xpres_eq_imp Skip_ceqv xpres_skip)
lemma ceqv_xpres_Catch:
"\<lbrakk> ceqv_xpres \<Gamma> xf v True c pres c'; ceqv_xpres \<Gamma> xf v pres h pres' h' \<rbrakk>
\<Longrightarrow> ceqv_xpres \<Gamma> xf v True (Catch c h) pres' (Catch c' h')"
apply (cases pres)
apply (clarsimp simp add: ceqv_xpres_eq_imp xpres_catch)
apply (rule Catch_ceqv)
apply simp
apply simp
apply (erule xpres_ceqv)
apply (rule ceqv_sym, simp)
apply (clarsimp simp add: ceqv_xpres_False ceqv_xpres_eq_imp)
apply (clarsimp simp: ceqv_def)
apply (auto elim!: exec_Normal_elim_cases
intro: exec.intros)
done
lemma ceqv_xpres_Throw:
"ceqv_xpres \<Gamma> xf v True Throw True Throw"
by (simp add: ceqv_xpres_eq_imp Throw_ceqv xpres_throw)
lemma exec_Basic_Seq:
"\<Gamma> \<turnstile> \<langle>Basic f ;; c, Normal s\<rangle> \<Rightarrow> s'
= \<Gamma> \<turnstile> \<langle>c, Normal (f s)\<rangle> \<Rightarrow> s'"
by (auto elim: exec_elim_cases intro: exec.Basic exec.Seq)
lemma exec_Basic_Seq_Basic:
"\<Gamma>\<turnstile> \<langle>Basic f;; Basic g, x\<rangle> \<Rightarrow> y = \<Gamma>\<turnstile> \<langle>Basic (g \<circ> f), x\<rangle> \<Rightarrow> y"
by (auto simp: o_def elim: exec_elim_cases intro: exec.Basic exec.Seq)
lemma ceqv_xpres_return_C:
"\<lbrakk> ceqv_xpres_rewrite_basic xf v qf qf';
\<And>f. ceqv_xpres_basic_preserves xf v (xfu f) pres';
\<And>f. ceqv_xpres_basic_preserves xf v
(global_exn_var_'_update f) pres'';
ceqv_xpres_eq_If pres' pres'' False pres \<rbrakk>
\<Longrightarrow> ceqv_xpres \<Gamma> xf v True (return_C xfu qf) pres (return_C xfu qf')"
apply (simp add: ceqv_xpres_def ceqv_xpres_rewrite_basic_def
ceqv_xpres_basic_preserves_def creturn_def
ceqv_xpres_eq_If_False)
apply (auto elim!: exec_elim_cases simp: exec_Basic_Seq)
done
lemma ceqv_xpres_C_bits:
"\<lbrakk> \<And>f. ceqv_xpres_basic_preserves xf v
(global_exn_var_'_update f) pres \<rbrakk>
\<Longrightarrow> ceqv_xpres \<Gamma> xf v True return_void_C pres return_void_C"
"\<lbrakk> \<And>f. ceqv_xpres_basic_preserves xf v
(global_exn_var_'_update f) pres \<rbrakk>
\<Longrightarrow> ceqv_xpres \<Gamma> xf v True break_C pres break_C"
"ceqv_xpres \<Gamma> xf v True catchbrk_C True catchbrk_C"
by (auto simp: ceqv_xpres_def
return_void_C_def cbreak_def
catchbrk_C_def
ceqv_xpres_basic_preserves_def
elim!: exec_elim_cases)
definition
"ceqv_xpres_lvar_nondet_init xf v_upd pres \<equiv> pres \<longrightarrow> (\<forall>s v. xf (v_upd (\<lambda>_. v) s) = xf s)"
lemma ceqv_xpres_lvar_nondet_init:
"ceqv_xpres_lvar_nondet_init xf v_upd pres \<Longrightarrow>
ceqv_xpres \<Gamma> xf v True (lvar_nondet_init v_acc v_upd) pres (lvar_nondet_init v_acc v_upd)"
apply (clarsimp simp: ceqv_xpres_def lvar_nondet_init_def)
apply (erule exec.cases, simp_all)
apply (clarsimp simp: ceqv_xpres_lvar_nondet_init_def)
done
lemmas ceqv_xpres_rules =
ceqv_xpres_False_pres ceqv_xpres_xfdc ceqv_xpres_whileAnno
ceqv_xpres_While ceqv_xpres_Cond ceqv_xpres_Guard ceqv_xpres_Seq
ceqv_xpres_lvar_nondet_init ceqv_xpres_Basic ceqv_xpres_call
ceqv_xpres_Skip ceqv_xpres_Catch ceqv_xpres_Throw ceqv_xpres_return_C
ceqv_xpres_C_bits
lemma ceqv_xpres_FalseI:
"ceqv_xpres \<Gamma> xf v pres c False c"
by (simp add: ceqv_xpres_def)
lemma ceqv_xpres_ceqvI:
"ceqv_xpres \<Gamma> xf v True c pres c' \<Longrightarrow> ceqv \<Gamma> xf v t t' c c'"
by (simp add: ceqv_xpres_eq_imp)
lemma ceqv_xpres_rewrite_basic_left_cong:
"\<lbrakk> \<And>s. xf s = v \<Longrightarrow> f s = f'' s \<rbrakk>
\<Longrightarrow> ceqv_xpres_rewrite_basic xf v f f'
= ceqv_xpres_rewrite_basic xf v f'' f'"
by (simp add: ceqv_xpres_rewrite_basic_def)
lemma ceqv_xpres_rewrite_basic_refl:
"ceqv_xpres_rewrite_basic xf v f f"
by (simp add: ceqv_xpres_rewrite_basic_def)
lemma ceqv_xpres_basic_preserves_TrueI:
"\<lbrakk> \<And>s. xf s = v \<longrightarrow> xf (f s) = v \<rbrakk> \<Longrightarrow> ceqv_xpres_basic_preserves xf v f True"
by (simp add: ceqv_xpres_basic_preserves_def)
lemma ceqv_xpres_basic_preserves_FalseI:
"ceqv_xpres_basic_preserves xf v f False"
by (simp add: ceqv_xpres_basic_preserves_def)
lemma ceqv_xpres_lvar_nondet_init_TrueI:
"(\<And>s v. xf (v_upd (\<lambda>_. v) s) = xf s) \<Longrightarrow> ceqv_xpres_lvar_nondet_init xf v_upd True"
by (simp add: ceqv_xpres_lvar_nondet_init_def)
lemma ceqv_xpres_lvar_nondet_init_FalseI:
"ceqv_xpres_lvar_nondet_init xf v_upd False"
by (simp add: ceqv_xpres_lvar_nondet_init_def)
lemma ceqv_xpres_rewrite_set_rules:
"ceqv_xpres_rewrite_basic xf v P P'
\<Longrightarrow> ceqv_xpres_rewrite_set xf v {s. P s} {s. P' s}"
"ceqv_xpres_rewrite_set xf v UNIV UNIV"
"ceqv_xpres_rewrite_set xf v {} {}"
"\<lbrakk> ceqv_xpres_rewrite_set xf v S S''; ceqv_xpres_rewrite_set xf v S' S''' \<rbrakk>
\<Longrightarrow> ceqv_xpres_rewrite_set xf v (if G then S else S') (if G then S'' else S''')"
by (simp_all add: ceqv_xpres_rewrite_set_def ceqv_xpres_rewrite_basic_def
split: if_split)
lemma ceqv_xpres_eq_If_rules:
"ceqv_xpres_eq_If False x y y"
"ceqv_xpres_eq_If True x y x"
by (simp add: ceqv_xpres_eq_If_def)+
definition
"simpl_sequence f xs
= foldr (Seq) (map f xs) Skip"
lemma simpl_sequence_Cons:
"simpl_sequence f (x # xs) = Seq (f x) (simpl_sequence f xs)"
by (simp add: simpl_sequence_def)
fun(sequential)
simpl_final_basic_opts :: "('s, 'p, 'x) com \<Rightarrow> ('s \<Rightarrow> 's) option"
where
"simpl_final_basic_opts (x ;; y)
= (case (simpl_final_basic_opts y) of None \<Rightarrow> simpl_final_basic_opts x
| Some v \<Rightarrow> Some v)"
| "simpl_final_basic_opts (Basic f) = Some f"
| "simpl_final_basic_opts (Guard E F c) = simpl_final_basic_opts c"
| "simpl_final_basic_opts Skip = None"
| "simpl_final_basic_opts Throw = None"
| "simpl_final_basic_opts c = Some id"
definition
"simpl_final_basic c = (case simpl_final_basic_opts c
of Some v \<Rightarrow> v | None \<Rightarrow> id)"
lemmas simpl_final_basic_simps[simp]
= simpl_final_basic_def[where c="Seq c c'" for c c']
simpl_final_basic_def[where c="Basic f" for f]
simpl_final_basic_def[where c="Guard E F c" for E F c]
lemma simpl_final_basic_opts_exec[OF _ refl refl]:
"\<Gamma> \<turnstile> \<langle>c, xs\<rangle> \<Rightarrow> xs' \<Longrightarrow> xs = Normal t \<Longrightarrow> xs' = Normal t'
\<Longrightarrow> (case simpl_final_basic_opts c of None \<Rightarrow> t' = t
| Some f \<Rightarrow> \<exists>s. t' = f s)"
apply (induct arbitrary: t t' rule: exec.induct, simp_all)
apply metis
apply atomize
apply clarsimp
apply (case_tac s')
apply (auto split: option.split_asm)[1]
apply (auto elim!: exec_elim_cases)
done
lemma simpl_final_basic_exec:
"\<Gamma> \<turnstile> \<langle>c, Normal t\<rangle> \<Rightarrow> Normal t'
\<Longrightarrow> \<exists>s. t' = simpl_final_basic c s"
apply (frule simpl_final_basic_opts_exec)
apply (simp add: simpl_final_basic_def split: option.split_asm)
done
lemma ceqv_xpres_to_simpl_sequence:
fixes v :: "'a :: ring_1"
assumes c: "\<And>v. ceqv_xpres \<Gamma> xf' v True c pres (c' v)"
and v: "\<And>v s. xf' (simpl_final_basic (c' v) s) - v = offs"
shows "\<not> CP (v + of_nat n * offs)
\<Longrightarrow> ceqv_xpres \<Gamma> xf' v True (While {s. CP (xf' s)} c) False
(simpl_sequence c' (takeWhile CP (map (\<lambda>x. v + of_nat x * offs) [0 ..< n])))"
(is "_ \<Longrightarrow> ceqv_xpres _ _ _ _ (While ?S _) _ _")
proof (induct n arbitrary: v)
case 0
show ?case using c[where v=v] 0
apply (simp add: simpl_sequence_def)
apply (simp add: ceqv_xpres_eq_imp ceqv_def)
apply (auto elim!: exec_Normal_elim_cases intro: exec.intros)[1]
done
next
case (Suc n)
have foo: "\<And>t t'. (\<Gamma> \<turnstile> \<langle>c' v, Normal t\<rangle> \<Rightarrow> Normal t') \<longrightarrow> xf' t' = v + offs"
using v
by (clarsimp simp: field_simps dest!: simpl_final_basic_exec)
show ?case using c[where v=v] Suc.hyps[where v="v + offs"] Suc.prems
apply (subst upt_conv_Cons, simp)
apply (simp only: map_Suc_upt[symmetric] list.simps)
apply (cases "CP v")
apply (simp add: o_def field_simps simpl_sequence_Cons
ceqv_xpres_eq_imp)
apply (clarsimp, rule ceqv_trans[where c'="c ;; While ?S c"])
apply (simp add: ceqv_def)
apply (auto elim!: exec_Normal_elim_cases intro: exec.Seq exec.WhileTrue)[1]
apply (rule ceqv_trans[where c'="c' v ;; While ?S c"])
apply (simp add: ceqv_def)
apply (auto elim!: exec_Normal_elim_cases intro: exec.Seq)[1]
apply (simp add: ceqv_def)
apply (intro impI exec_Seq_cong refl)
apply (simp add: foo)
apply (simp add: simpl_sequence_def field_simps)
apply (simp add: ceqv_xpres_eq_imp ceqv_def)
apply (auto intro: exec.WhileFalse exec.Skip elim!: exec_Normal_elim_cases)[1]
done
qed
lemma ceqv_xpres_While_simpl_sequence:
fixes v :: "'a :: ring_1"
assumes c: "\<And>v. ceqv_xpres \<Gamma> xf' v True c pres (c' v)"
shows "ceqv_xpres \<Gamma> xf' v True (While {s. CP (xf' s)} c) False
(if \<exists>n offs. (\<forall>s v. (xf' (simpl_final_basic (c' v) s) - v = offs)) \<and> \<not> CP (v + of_nat n * offs)
then simpl_sequence c' (map (\<lambda>x. v + of_nat x
* (THE offs. \<forall>s v. (xf' (simpl_final_basic (c' v) s) - v = offs)))
[0 ..< (LEAST n. \<not> CP (v + of_nat n
* (THE offs. \<forall>s v. (xf' (simpl_final_basic (c' v) s) - v = offs))))])
else While {s. CP (xf' s)} c)"
apply (split if_split, simp add: ceqv_xpres_def[where c=c and c'=c for c])
apply (clarsimp simp: ceqv_xpres_eq_ceqv)
apply (rule ceqv_trans)
apply (rule_tac n="LEAST n. \<not> CP (v + of_nat n * offs)"
in ceqv_xpres_to_simpl_sequence[simplified ceqv_xpres_eq_ceqv, rule_format])
apply (rule c)
apply simp
apply simp
apply (rule LeastI_ex)
apply blast
apply (subst takeWhile_eq_all_conv[THEN iffD2])
apply (clarsimp dest!: not_less_Least)
apply (simp add: ceqv_def)
done
lemma ccorres_underlying_name_seq_bound:
"(\<not> CP n \<and> (\<forall>n' < n. CP n'))
\<Longrightarrow> ccorres_underlying srel \<Gamma> rrel xf arrel axf G G' hs m
(simpl_sequence c' (map f [0 ..< n]))
\<Longrightarrow> ccorres_underlying srel \<Gamma> rrel xf arrel axf
G G' hs m (if \<exists>n. \<not> CP n
then simpl_sequence c' (map f [0 ..< (LEAST n. \<not> CP n)])
else c)"
apply (subst if_P, blast)
apply (subst Least_equality[where x=n], simp_all)
apply (rule ccontr, simp add: linorder_not_le)
done
lemma sequenceE_simpl_sequence_nth_corres':
"\<lbrakk> length xs = length ys;
\<And>zs. length zs < length xs \<Longrightarrow>
ccorres_underlying sr \<Gamma> (inr_rrel (\<lambda>rv rv'. r' (prev_xs @ zs @ [rv]) rv')) xf'
(inl_rrel arrel) axf
(P and F (length prev_xs + length zs)) (Q \<inter> {s. r' (prev_xs @ zs) (xf' s)}) hs
(xs ! length zs) (f (ys ! length zs));
\<And>s \<sigma>. s \<in> Q \<Longrightarrow> P \<sigma> \<Longrightarrow>
(\<sigma>, s) \<in> sr \<Longrightarrow> \<forall>y \<in> set ys. \<Gamma>\<turnstile>\<^bsub>/UNIV\<^esub> {s} (f y) Q,UNIV;
\<And>n. Suc n < length xs \<Longrightarrow> \<lbrace>P and F (length prev_xs + n)\<rbrace> xs ! n \<lbrace>\<lambda>_. P and F (length prev_xs + Suc n)\<rbrace>, -
\<rbrakk>
\<Longrightarrow> ccorres_underlying sr \<Gamma> (inr_rrel (\<lambda>rv rv'. r' (prev_xs @ rv) rv')) xf'
(inl_rrel arrel) axf
(\<lambda>s. xs \<noteq> [] \<longrightarrow> P s \<and> F (length prev_xs) s) (Q \<inter> {s. r' prev_xs (xf' s)}) hs
(sequenceE xs)
(simpl_sequence f ys)"
proof (induct xs ys arbitrary: prev_xs rule: list_induct2)
case Nil
show ?case
apply (simp add: sequenceE_def simpl_sequence_def)
apply (rule ccorres_guard_imp2, rule ccorres_returnOk_skip)
apply simp
done
next
case (Cons x xs y ys)
show ?case
apply (simp add: simpl_sequence_Cons sequenceE_Cons)
apply (rule ccorres_guard_imp2)
apply (rule ccorres_splitE)
apply (simp add: inl_rrel_inl_rrel)
apply (rule Cons.prems(1)[where zs=Nil, simplified])
apply (rule ceqv_refl)
apply (simp add: liftME_def[symmetric] liftME_liftM)
apply (rule ccorres_rel_imp2, rule Cons.hyps(2)[where prev_xs="prev_xs @ [rv]" for rv])
apply (rule ccorres_guard_imp2, rule ccorres_rel_imp2,
rule Cons.prems(1)[where zs="z # zs" for z zs, simplified])
apply simp+
apply (blast dest: Cons.prems[simplified])
apply simp
apply (cut_tac n="Suc n" in Cons.prems(3), simp, simp)
apply (clarsimp elim!: inl_inrE)
apply assumption
apply (clarsimp elim!: inl_inrE)
apply simp
apply (rule hoare_vcg_const_imp_lift_R)
apply (rule hoare_gen_asmE)
apply (erule Cons.prems(3)[where n=0, simplified])
apply (rule_tac P="Q \<inter> {s. \<exists>\<sigma>. P \<sigma> \<and> (\<sigma>, s) \<in> sr}"
in HoarePartial.conseq_exploit_pre)
apply (clarsimp, rule conseqPost, rule Cons.prems(2)[simplified, THEN conjunct1],
simp+)
apply (clarsimp simp: ccHoarePost_def elim!: inl_inrE)
apply simp
apply auto
done
qed
lemmas sequenceE_simpl_sequence_nth_corres
= sequenceE_simpl_sequence_nth_corres'[where prev_xs=Nil, simplified]
lemma mapME_x_simpl_sequence_fun_related:
"\<lbrakk> ys = map yf xs;
\<And>n x. x \<in> set xs \<Longrightarrow>
ccorres_underlying sr \<Gamma> (inr_rrel dc) xfdc (inl_rrel arrel) axf
(P and F n (n < length xs) x) Q hs
(f x) (f' (yf x));
\<And>s \<sigma>. s \<in> Q \<Longrightarrow> P \<sigma> \<Longrightarrow>
(\<sigma>, s) \<in> sr \<Longrightarrow> \<forall>x \<in> set xs. \<Gamma>\<turnstile>\<^bsub>/UNIV\<^esub> {s} (f' (yf x)) Q,UNIV;
\<And>n. Suc n < length xs \<Longrightarrow> \<lbrace>P and F n True (xs ! n)\<rbrace> f (xs ! n) \<lbrace>\<lambda>_. P and F (Suc n) (Suc n < length xs) (xs ! Suc n)\<rbrace>, -
\<rbrakk>
\<Longrightarrow> ccorres_underlying sr \<Gamma> (inr_rrel dc) xfdc
(inl_rrel arrel) axf
(P and F 0 (xs \<noteq> []) (xs ! 0)) Q hs
(mapME_x f xs)
(simpl_sequence f' ys)"
apply (simp add: mapME_x_sequenceE liftME_def[symmetric]
liftME_liftM)
apply (rule ccorres_rel_imp2, rule ccorres_guard_imp2,
rule sequenceE_simpl_sequence_nth_corres[where r'=dc and xf'=xfdc
and P=P and F="\<lambda>i. F i (i < length xs) (xs ! i)" and Q=Q and arrel=arrel and axf=axf];
clarsimp elim!: inl_inrE)
apply (erule_tac x="length zs" in meta_allE
| erule_tac x="xs ! length zs" in meta_allE)+
apply (simp add: dc_def)
done
lemmas mapME_x_simpl_sequence_same
= mapME_x_simpl_sequence_fun_related[where yf=id, simplified]
lemma call_ignore_cong:
"call i f g r = call i f g r" by (rule refl)
(* These could be done with ML patterns, but this fits in better with tactics *)
lemma match_valid:
"NonDetMonad.valid P a P' \<Longrightarrow> NonDetMonad.valid P a P'" .
lemma match_validE:
"NonDetMonad.validE P a P' P'' \<Longrightarrow> NonDetMonad.validE P a P' P''" .
lemma match_hoare:
"HoarePartialDef.hoarep G T F P C P' A \<Longrightarrow> HoarePartialDef.hoarep G T F P C P' A" .
lemma match_all_hoare:
"\<forall>x. HoarePartialDef.hoarep G T F (P x) C (P' x) (A x) \<Longrightarrow>
\<forall>x. HoarePartialDef.hoarep G T F (P x) C (P' x) (A x)" .
lemmas ctac_skips = match_valid match_validE match_all_hoare match_hoare
lemma match_xpres:
"xpres xf v \<Gamma> c \<Longrightarrow> xpres xf v \<Gamma> c" .
lemma match_ceqv:
"ceqv \<Gamma> xf v t t' c c' \<Longrightarrow> ceqv \<Gamma> xf v t t' c c'" .
ML_file "ctac-method.ML"
setup CtacImpl.setup
method_setup ctac = {* CtacImpl.corres_ctac_tactic *}
"Split and rewrite corres rules. Arguments simp (add|del|only), pre (add|del|only), (ccorres) (add|del|only)"
method_setup clift = {* CtacImpl.corres_abstract_args *}
"Abstract a local variable into a HOL variable"
method_setup cinitlift = {* CtacImpl.corres_abstract_init_args *}
"Abstract a list of local variables into HOL variable without touching the remaining guards"
method_setup csymbr = {* CtacImpl.corres_symb_rhs *}
"Symbolically execute the call on the right hand side of corres (see ccorres_lift_rhss). Arguments simp (add|del|only)."
method_setup ceqv = {* CtacImpl.corres_ceqv *}
"Solve ceqv goals."
(* The true here says to unfold the Haskell side *)
method_setup cinit = {* CtacImpl.corres_boilerplate true *}
"Boilerplate tactic for the start of a Call ccorres proof. Arguments 'lift' then 'simp (add|del|only)', e.g. apply (cinit lift: var1_' var2_' simp add: return_bind)"
method_setup cinit' = {* CtacImpl.corres_boilerplate false *}
"As for cinit without unfolding the abstract side"
(* Debugging *)
method_setup ctac_print_xf = {* CtacImpl.corres_print_xf *}
"Print out what ctac thinks is the current xf"
(* Set up wpc *)
lemma
wpc_helper_ccorres_final:
"ccorres_underlying sr G rv xf arrel axf Q Q' hs f f'
\<Longrightarrow> wpc_helper (P, P') (Q, Q')
(ccorres_underlying sr G rv xf arrel axf P P' hs f f')"
apply (clarsimp simp: wpc_helper_def)
apply (erule ccorres_guard_imp)
apply auto
done
wpc_setup "\<lambda>m. ccorres_underlying sr G rv xf arrel axf P P' hs m conc" wpc_helper_ccorres_final
wpc_setup "\<lambda>m. ccorres_underlying sr G rv xf arrel axf P P' hs (m >>= a) conc" wpc_helper_ccorres_final
context kernel
begin
(* Set up ctac proof sets. These are tried in reverse order (further down is tried first) *)
declare ccorres_Guard [corres_pre]
declare ccorres_Guard_Seq [corres_pre]
lemma c_guard_field_abs:
fixes p :: "'a :: mem_type ptr"
assumes abs: "\<forall>s s'. (s, s') \<in> rf_sr \<and> P s \<and> P' s' \<longrightarrow> c_guard p"
shows "\<forall>s s'. (s, s') \<in> rf_sr \<and> P s
\<and> (P' s' \<and> (\<exists>t. field_ti TYPE('a) f = Some t \<and> export_uinfo t = export_uinfo (typ_info_t TYPE('b :: mem_type))))
\<longrightarrow> c_guard (Ptr &(p\<rightarrow>f) :: 'b :: mem_type ptr)"
using c_guard_field abs by blast
lemma h_t_valid_field_abs:
fixes p :: "'a :: mem_type ptr"
assumes abs: "\<forall>s s'. (s, s') \<in> rf_sr \<and> P s \<and> P' s' \<longrightarrow> s' \<Turnstile>\<^sub>c p"
shows "\<forall>s s'. (s, s') \<in> rf_sr \<and> P s
\<and> (P' s' \<and> (\<exists>t. field_ti TYPE('a) f = Some t \<and> export_uinfo t = export_uinfo (typ_info_t TYPE('b :: mem_type))))
\<longrightarrow> s' \<Turnstile>\<^sub>c (Ptr &(p\<rightarrow>f) :: 'b :: mem_type ptr)"
using h_t_valid_field abs by blast
lemmas ccorres_move_c_guard_Seq_field = ccorres_move_Guard_Seq [OF c_guard_field_abs]
lemmas ccorres_move_c_guard_field = ccorres_move_Guard [OF c_guard_field_abs]
lemma abs_c_guard_from_abs_h_t_valid:
"(\<forall>s s'. (s, s') \<in> rf_sr \<and> P s \<and> P' s' \<longrightarrow> s' \<Turnstile>\<^sub>c p)
\<Longrightarrow> (\<forall>s s'. (s, s') \<in> rf_sr \<and> P s \<and> P' s' \<longrightarrow> c_guard p)"
by (auto intro: h_t_valid_c_guard)
lemmas ccorres_move_c_guards =
ccorres_move_c_guard_Seq_field[OF abs_c_guard_from_abs_h_t_valid]
ccorres_move_Guard_Seq[OF h_t_valid_field_abs]
ccorres_move_Guard_Seq[OF abs_c_guard_from_abs_h_t_valid]
ccorres_move_Guard_Seq
ccorres_move_c_guard_field[OF abs_c_guard_from_abs_h_t_valid]
ccorres_move_Guard[OF h_t_valid_field_abs]
ccorres_move_Guard[OF abs_c_guard_from_abs_h_t_valid]
ccorres_move_Guard
lemma h_t_array_valid_array_assertion:
"h_t_array_valid htd ptr n \<Longrightarrow> 0 < n
\<Longrightarrow> array_assertion ptr n htd"
apply (simp add: array_assertion_def)
apply (fastforce intro: exI[where x=0])
done
lemma array_assertion_abs_to_const:
"\<forall>s s'. (s, s') \<in> rf_sr \<and> P s \<and> P' s'
\<longrightarrow> (Suc 0 = 0 \<or> array_assertion (ptr s s') (n s s') (htd s s'))
\<Longrightarrow> \<forall>s s'. (s, s') \<in> rf_sr \<and> P s \<and> P' s'
\<longrightarrow> array_assertion (ptr s s') (n s s') (htd s s')"
by simp
lemmas ccorres_move_array_assertions
= ccorres_move_Guard_Seq ccorres_move_Guard
ccorres_move_Guard_Seq[OF array_assertion_abs_to_const]
ccorres_move_Guard[OF array_assertion_abs_to_const]
lemma ptr_add_assertion_positive_helper:
"n == m \<Longrightarrow> 0 \<le> sint m \<Longrightarrow> 0 \<le> sint n"
by simp
lemma cvariable_array_map_const_add_map_option:
"cvariable_array_map_relation m (\<lambda>_. n)
= cvariable_array_map_relation (map_option f o m) (\<lambda>_. n)"
by (simp add: cvariable_array_map_relation_def fun_eq_iff)
lemma ccorres_move_const_guard:
"ccorres_underlying rf_sr Gamm rrel xf arrel axf P P' hs m c
\<Longrightarrow> ccorres_underlying rf_sr Gamm rrel xf arrel axf
(P and K G) P' hs m (Guard F {s. G} c)"
"ccorres_underlying rf_sr Gamm rrel xf arrel axf P P' hs m (c ;; d)
\<Longrightarrow> ccorres_underlying rf_sr Gamm rrel xf arrel axf
(P and K G) P' hs m (Guard F {s. G} c ;; d)"
apply (rule ccorres_guard_imp2, erule ccorres_Guard, simp)
apply (rule ccorres_guard_imp2, erule ccorres_Guard_Seq, simp)
done
lemmas ccorres_move_const_guards
= ccorres_move_const_guard
ccorres_move_const_guard[unfolded Collect_const]
lemma liftM_exs_valid:
"\<lbrace>P\<rbrace> m \<exists>\<lbrace>\<lambda>rv. Q (f rv)\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> liftM f m \<exists>\<lbrace>Q\<rbrace>"
unfolding liftM_def exs_valid_def
apply (clarsimp)
apply (drule spec, drule (1) mp)
apply (clarsimp simp: bind_def return_def)
apply (erule bexI [rotated])
apply simp
done
lemma ceqv_remove_eqv_skip:
"\<lbrakk> \<And>s. ceqv \<Gamma> xf () s s' b Skip \<rbrakk> \<Longrightarrow>
ceqv \<Gamma> xf () s s' (a ;; b) a"
apply (rule ceqv_trans)
apply (erule Seq_ceqv [OF ceqv_refl])
apply (simp add: xpres_def)
apply (clarsimp simp add: ceqv_def)
apply (rule iffI)
apply (auto elim!: exec_elim_cases)[1]
apply (erule exec.intros)
apply (cases s', simp_all)
apply (rule exec.intros)
done
lemma ceqv_remove_eqv_skip':
"\<lbrakk> \<And>s. ceqv \<Gamma> xf v s s' b Skip; \<And>s'. ceqv \<Gamma> xf v s s' a a'; xpres xf v \<Gamma> a \<rbrakk> \<Longrightarrow>
ceqv \<Gamma> xf v s s' (a ;; b) a'"
apply (rule ceqv_trans)
apply (erule Seq_ceqv [OF ceqv_refl])
apply (simp add: xpres_ceqv)
apply (clarsimp simp add: ceqv_def)
apply (rule iffI)
apply (erule exec_elim_cases, simp_all)
apply (auto elim!: exec_elim_cases)[1]
apply (rule exec.intros, simp)
apply (cases s', simp_all)
apply (rule exec.intros)
done
lemma xpres_triv:
"xpres xf () G c"
by (simp add: xpres_def)
lemma ceqv_Guard_UNIV:
"ceqv G xf v s s' (Guard Err UNIV c) c'
= ceqv G xf v s s' c c'"
by (simp add: ceqv_def exec_Guard)
lemma ceqv_guard_into_seq:
"ceqv \<Gamma> xf v s s' (Guard Err S (a ;; b)) (Guard Err S a ;; b)"
by (auto simp: ceqv_def elim!: exec_elim_cases intro: exec.intros)
lemma ceqv_Seq_Skip_cases:
"\<lbrakk> \<And>s'. ceqv \<Gamma> xf v s s' a a'; \<And>s. ceqv \<Gamma> xf v s s' b c; xpres xf v \<Gamma> a;
(c = Skip \<and> c' = a' \<or> c' = (a' ;; c)) \<rbrakk> \<Longrightarrow>
ceqv \<Gamma> xf v s s' (a ;; b) c'"
by (metis Seq_ceqv ceqv_remove_eqv_skip')
lemma finish_ceqv_Seq_Skip_cases:
"(Skip = Skip \<and> x = x \<or> x = y)"
"(y = Skip \<and> (x ;; y) = z \<or> (x ;; y) = (x ;; y))"
by simp_all
lemma semantic_equiv_IF_True:
"semantic_equiv G s s' (IF True THEN c ELSE c' FI) c"
apply (simp add: semantic_equiv_seq_assoc_eq[symmetric])
by (auto simp: semantic_equiv_def2 elim!: exec_Normal_elim_cases intro: exec.intros)
lemmas ccorres_IF_True = ccorres_semantic_equiv[OF semantic_equiv_IF_True]
ML {*
fun tac ctxt =
resolve_tac ctxt [@{thm ccorres_abstract[where xf'="\<lambda>s. ()"]}] 1
THEN (REPEAT_DETERM
(resolve_tac ctxt @{thms While_ceqv[OF impI, OF refl] Cond_ceqv[OF impI, OF refl]
ceqv_Seq_Skip_cases ceqv_Guard_UNIV[THEN iffD2]
Guard_ceqv[OF impI, OF refl] ceqv_refl
finish_ceqv_Seq_Skip_cases} 1
ORELSE (resolve_tac ctxt [@{thm xpresI}] THEN' simp_tac (ctxt |> Splitter.del_split @{thm "if_split"})) 1
))
THEN simp_tac (put_simpset HOL_basic_ss ctxt addsimps @{thms com.case}) 1
*}
end
method_setup ccorres_remove_UNIV_guard = {* Args.context >> (fn ctxt => (K (Method.SIMPLE_METHOD (tac ctxt)))) *}
"removes UNIV guards"
end
|
= = = East wing = = =
|
%!TEX root = ../thesis.tex
\chapter{Conclusions}
\ifpdf
\graphicspath{{Chapters/Figs/Raster/}{Chapters/Figs/PDF/}{Chapters/Figs/}}
\else
\graphicspath{{Chapters/Figs/Vector/}{Chapters/Figs/}}
\fi
%********************************** % Section **************************************
\section{Contributions}
%Goals: Studiare l'applicazione di hypervisors per mix-critical applications. Usare il Model-Based Design approach per superare la complessita' di implementare applicazioni safe & secure.
%è stato sviluppato un framework personalizzabile per la generazione di codice per sistemi multicore ARINC-xxx compliant. Sfrutta le potezialità (ormai mature) di RTW ma senza usare il concurrent workflow (che invece non è maturo). è stato sviluppato un framework per la schedulazione di sistemi mix-critical anche se non è sufficiente a gestire correntamente applicazioni mix-critical è un punto di partenza.
Multi-core platforms have been introduced in many different settings, but so far they have not been utilized in the domain of safety-critical avionic real-time systems. A substantial amount of work has been put into the evaluation of hypervisors to address the security and safety for such applications.
\par In this thesis, the area of code generation for mixed-critical application in multi-core embedded systems is taken into account. For this purpose, a Model-Based design framework, supported by Simulink, has been developed. It is a proof-of-concept integrated tool that allows the designer to design and deploy hard real-time, mixed-critical applications with the assistance of optimization problems and code generation.
%********************************** % Section **************************************
\section{Future works}
\paragraph{} The planned future work has two separate tracks. One is related to the optimization framework and aims at extending it with support for more comprehensive, robust partitioning algorithm. The second track of future work relates to the code generation process.
\subsection{Scheduling and allocation}
A current limitation of this work is that the validity of the proposed design framework has been extensively tested in simulation, but no experimental validation was conducted. Extensive testing and robustness improvements are needed to improve the effectiveness of the partitioning and scheduling framework. Improvements on the partitioning algorithm are necessary to improve safety and security of the implemented code. One step ahead in this direction might find some heuristics that estimates the impact of one step to the others. Another approach might be trying to encode each phase into a single MILP optimization problem. A possible drawback of the second method is that it can lead to problems which complexity makes them barely solvable for a small task-set.
\par A possible improvement for the scheduling algorithm might be to analyze more in depth how cache and memory interferences and delays can be minimized by the partitioning or the scheduling. An additional step might be added, in the current implementation there is a one-to-one map between subsystems and tasks, there can be cases in which this is not the optimal choice.
\paragraph{} Another current limitation of the current implementation it the needs of the system designer to specify the Worst-Case Execution Time of each task.
\par Worst Case Execution Time analyses aim at determining an upper bound for a task execution time. Usually, the result of a WCET analysis is an upper approximation of the exact WCET which is nearly impossible to determine for real life Software. Simple architectures allow WCET determination using static analysis techniques using a model of execution. That means that the analyzed software is not executed but compiled and analyzed. On complex COTS processors architectures, it is not possible to determine an accurate model. Today, an alternative method is used. A worst case scenario is defined from an analysis performed on the Airborne Software. The execution time is measured under this scenario and is further corrected with parameters taking into account variability during operations. Timing analysis is tough in multi-core COTS due to the lack of information on the processor behavior. It may lead to a pessimistic estimation of those parameters.
\par There are a plenty of free and commercial tool for the timing analysis, some of them are directly integrated into the Simulink environment. These tools can be used to alleviate the workload of the system designer that can focus more on the architecture optimization.
However, there is a lack of research on WCET estimation under faulty conditions on safety-critical, multi-core COTS platforms that require temporal partitioning. This research is needed to deploy multi-core platforms in the avionics domain safely and for certification authorities to accept and approve their usage.
\paragraph{} Another path to explore is the area of formal verification instead of empirical measurement-based methods. Formal methods with temporal specifications can be used to prove that a given set of partitions, with their tasks, can never reach hazardous states. This proof would be another great help for certification authorities.
\subsection{Code Generation}
The biggest limitation of the current code generator is that it does not support non-periodic tasks. This is not an intrinsic limitation of the approach, instead, it has not been implemented yet. All the non-periodic tasks can be placed on PikeOS \TP{0} thanks to the gaps left in the priority assignment. At the moment has not been implemented a semantics to mark those tasks.
\paragraph{} An interesting improvements might be the support for the Hardware-in-the-Loop (HIL) Simulation. HIL simulation, which is quite common in the Aviation industry, is a type of real-time simulation, it can be used to test the controller design. In HIL simulations, the real controller responds to realistic stimuli coming from the virtual plant included in the model. HIL simulation can add great value to WCET estimation.
\paragraph{} Another possible improvement might be improving the resource usage in the generated code. For example, if a block is sending data to another block via two different ports, and these are converted into sampling ports due to the partitioned architecture, it can be useful to merge the two data into the same port.
\paragraph{} Moreover, the work-flow can be extended to other code generators, and eventually to hand-coded applications. The step in this direction is to model the system in \emph{SysML} \cite{sysml} which is a general-purpose modeling language for engineering systems (defined as an extension of UML) and to use a more generic code generator, such as Acceleo \cite{Acceleo}, to generate the glue code.
|
#!/usr/bin/env python
import gym
import numpy
import time
import qlearn
from gym import wrappers
# ROS packages required
import rospy
import rospkg
# import our training environment
from openai_ros.task_envs.turtlebot2 import turtlebot2_wall
if __name__ == '__main__':
rospy.init_node('turtlebot_wall_qlearn', anonymous=True, log_level=rospy.WARN)
# Create the Gym environment
env = gym.make('MyTurtleBot2Wall-v0')
rospy.loginfo("Gym environment done")
# Set the logging system
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('turtlebot_training')
outdir = pkg_path + '/training_results'
env = wrappers.Monitor(env, outdir, force=True)
rospy.loginfo("Monitor Wrapper started")
last_time_steps = numpy.ndarray(0)
# Loads parameters from the ROS param server
# Parameters are stored in a yaml file inside the config directory
# They are loaded at runtime by the launch file
Alpha = rospy.get_param("/turtlebot2/alpha")
Epsilon = rospy.get_param("/turtlebot2/epsilon")
Gamma = rospy.get_param("/turtlebot2/gamma")
epsilon_discount = rospy.get_param("/turtlebot2/epsilon_discount")
nepisodes = rospy.get_param("/turtlebot2/nepisodes")
nsteps = rospy.get_param("/turtlebot2/nsteps")
running_step = rospy.get_param("/turtlebot2/running_step")
# Initialises the algorithm that we are going to use for learning
qlearn = qlearn.QLearn(actions=range(env.action_space.n),
alpha=Alpha, gamma=Gamma, epsilon=Epsilon)
initial_epsilon = qlearn.epsilon
start_time = time.time()
highest_reward = 0
# Starts the main training loop: the one about the episodes to do
for x in range(nepisodes):
rospy.logdebug("############### WALL START EPISODE=>" + str(x))
cumulated_reward = 0
done = False
if qlearn.epsilon > 0.05:
qlearn.epsilon *= epsilon_discount
# Initialize the environment and get first state of the robot
observation = env.reset()
state = ''.join(map(str, observation))
# Show on screen the actual situation of the robot
# env.render()
# for each episode, we test the robot for nsteps
for i in range(nsteps):
rospy.logwarn("############### Start Step=>" + str(i))
# Pick an action based on the current state
action = qlearn.chooseAction(state)
rospy.logwarn("Next action is:%d", action)
# Execute the action in the environment and get feedback
observation, reward, done, info = env.step(action)
rospy.logwarn(str(observation) + " " + str(reward))
cumulated_reward += reward
if highest_reward < cumulated_reward:
highest_reward = cumulated_reward
nextState = ''.join(map(str, observation))
# Make the algorithm learn based on the results
rospy.logwarn("# state we were=>" + str(state))
rospy.logwarn("# action that we took=>" + str(action))
rospy.logwarn("# reward that action gave=>" + str(reward))
rospy.logwarn("# episode cumulated_reward=>" + str(cumulated_reward))
rospy.logwarn("# State in which we will start next step=>" + str(nextState))
qlearn.learn(state, action, reward, nextState)
if not (done):
rospy.logwarn("NOT DONE")
state = nextState
else:
rospy.logwarn("DONE")
last_time_steps = numpy.append(last_time_steps, [int(i + 1)])
break
rospy.logwarn("############### END Step=>" + str(i))
#raw_input("Next Step...PRESS KEY")
# rospy.sleep(2.0)
m, s = divmod(int(time.time() - start_time), 60)
h, m = divmod(m, 60)
rospy.logerr(("EP: " + str(x + 1) + " - [alpha: " + str(round(qlearn.alpha, 2)) + " - gamma: " + str(
round(qlearn.gamma, 2)) + " - epsilon: " + str(round(qlearn.epsilon, 2)) + "] - Reward: " + str(
cumulated_reward) + " Time: %d:%02d:%02d" % (h, m, s)))
rospy.loginfo(("\n|" + str(nepisodes) + "|" + str(qlearn.alpha) + "|" + str(qlearn.gamma) + "|" + str(
initial_epsilon) + "*" + str(epsilon_discount) + "|" + str(highest_reward) + "| PICTURE |"))
l = last_time_steps.tolist()
l.sort()
# print("Parameters: a="+str)
rospy.loginfo("Overall score: {:0.2f}".format(last_time_steps.mean()))
rospy.loginfo("Best 100 score: {:0.2f}".format(reduce(lambda x, y: x + y, l[-100:]) / len(l[-100:])))
env.close()
|
\chapter{Current Research Achievements} % Main chapter title
\label{Current Research Achievements} % For referencing the chapter elsewhere, use \ref{Chapter1}
\section{ScTI Methods are Constantly Being Developed}
In the past few years, single-cell omics technology has flourished, and more and more scTI methods have been invented. Every month, new scTI methods are published, and researchers from around the world are continually experimenting with these new methods to get the most complete cell map. In the repository of commonly used single-cell omics tools \parencite{henry_omictools:_2014,davis_seandavi/awesome-single-cell:_2018,zappia_exploring_2018}, it is not difficult to find that the scTI tool is one of the largest categories of current single-cell omics tools. The core algorithms of each scTI are different, which means that the prior knowledge they rely on and their inferable trajectory structures are dissimilar. Also, different methods often have their own unique output structure.
\section{ScTI Methods Analyzing Different Cell Lineage}
Current computational methods have proven useful for analyzing cell lineage and corresponding trajectories based on large numbers of single-cell omics data, but these strategies still have limitations on many issues. We need better algorithms to derive multi-branched structures, to achieve more efficient extraction of cell features, and to take into account multiple pathways in order to show the fact that the same cell in a cell lineage may follow multiple dynamic paths simultaneously \parencite{ferrell_bistability_2012}. In the study of cell lineage with simple topologies, researchers have achieved many results, such as inferring cell lineage during differentiation of B cells by single-cell proteomic data
\parencite{bendall_single-cell_2014}, and studying the lineage of nervous system development
\parencite{habib_div-seq:_2016,chen_mpath_2016,shin_single-cell_2015} and early hematopoietic process
\parencite{nestorowa_single-cell_2016} with single-cell transcriptome data. \\
Of course, in more complex cell differentiation systems, cell lineage constructed using single-cell omics can also reveal the answers to important biological questions. Studies of embryonic stem cells have helped us understand embryonic development at the cellular level and find marker molecules for different cells at specific stages of embryonic development
\parencite{haghverdi_diffusion_2016,haghverdi_diffusion_2015}. Researchers who focus on bone marrow cells solve the problem that has plagued the academic world for many years through the method of single-cell omics: whether hematopoietic stem cells in the bone marrow have differentiated preferences after maturity and tend to differentiate to a certain kind of cell ? \parencite{paul_transcriptional_2015,olsson_single-cell_2016}
\subsection{Hematopoietic System}
Previous studies have shown that in the hematopoietic system, the use of scTI methods to infer cell lineage is quite appropriate. In the single-cell data on hematopoietic cells, the researchers accurately isolated hematopoietic stem cells and progenitor cells (HSPCs) from single-cell data from acute myeloid leukemia by analyzing data from normal hematopoietic cells. This method is more accurate than traditional methods. Because traditional methods are based on classical cell surface markers, in some cases, such strategies do not accurately identify diseased cells in a disease. Single-cell omics data provides ultra-high-dimensional feature information, which makes feature-based recognition more accurate \parencite{levine_data-driven_2015}.
\subsection{Cancer Research}
Single-cell omics has revolutionized the entire field of cancer research. In the field of single cells just introduced into cancer research, qPCR-based single cell methods have been used to study radiation resistance of cancer cells and heterogeneity of colon cancer tissues at the cellular level
\parencite{diehn_association_2009, dalerba_single-cell_2011}. With the rise of second-generation sequencing technology, single-cell omics analysis provides new tools for researchers studying breast cancer and acute lymphoblastic leukemia \parencite{wang_clonal_2014, gawad_dissecting_2014}. On this basis, researchers can also infer the order in which various mutations lead to cell carcinogenesis \parencite{corces-zimmerman_preleukemic_2014,jan_clonal_2012}. \\
Analysis of single-cell RNA-seq data from some fresh tumor tissues can distinguish epithelial cells, immune cells, stromal cells and cancer cells. This method has achieved very good results in melanoma \parencite{tirosh_dissecting_2016}, myeloproliferative neoplasms \parencite{kiselev_sc3:_2017} and glioblastoma \parencite{patel_single-cell_2014}. Among the identified cancer cells, single-cell transcriptome data can also be used to distinguish cancer cells of different states, such as cancer stem cells
\parencite{patel_single-cell_2014,tirosh_single-cell_2016} and resistant cancer cells
\parencite{tirosh_dissecting_2016}. In cancer stem cells, cells in an active value-added state and cells in a relatively static state can also be identified
\parencite{patel_single-cell_2014,tirosh_dissecting_2016,tirosh_single-cell_2016}.
\section{Summery of Commonly Used scTI Methods}
In the next sections, I have selected 20 commonly used scTI methods and divided them into different groups according to the different characteristics of their core algorithms. These are all based on $Python$ or $R$. In Table \ref{tab:Methods} below, I listed the priori requirements, basing platform, topology features and references of these methods. In Table \ref{tab:Trajectory}, seven basic inferable trajectory types of scTI methods are defined, however, not all scTI methods are applicable to all of these topologies. When it comes to Figure \ref{fig:Trajectory}, these inferable trajectory types of scTI methods are represented in cartoons. Last, I show the inferable trajectory types of every scTI method in Table \ref{tab:Inferable Trajectory}. \\
These methods are different from each other and have their own characteristics. In the course of practical research, researchers tend to combine results from multiple methods in order to achieve a satisfying end result.
\subsection{Commonly Used scTI Methods}
\begin{spacing}{1.15}
\begin{table}[H]
\caption{Commonly Used scTI Methods}
\label{tab:Methods}
\centering
\begin{tabular}{p{4cm} p{3cm}<{\centering} *{3}{p{2cm}<{\centering}}}
\toprule
\tabhead{Method} & \tabhead{Topology} & \tabhead{Priori} & \tabhead{Platform}
& \tabhead{Reference} \\
\midrule
\multicolumn{2}{l}{\keyword{Tree}} \\
Monocle\_1 & Flexible & $\vartriangle$ & $R$ &\parencite{trapnell_dynamics_2014}\\
Monocle\_2 & Unfettered & & $R$ &\parencite{qiu_reversed_2017}\\
Slingshot & Unfettered & & $R$ &\parencite{street_slingshot:_2018}\\
MST & Unfettered & & $R$ &$R^*$\\
SCUBA & Unfettered & & $Python$ &\parencite{marco_bifurcation_2014}\\
pCreode & Unfettered & & $Python$ &\parencite{herring_unsupervised_2018}\\
\midrule
\multicolumn{2}{l}{\keyword{Linear}} \\
Embeddr & Constant & & $R$ &\parencite{campbell_laplacian_2015}\\
TSCAN & Constant & & $R$ &\parencite{ji_tscan:_2016}\\
SCORPIUS & Constant & & $R$ &\parencite{cannoodt_scorpius_2016}\\Component\_1 & Constant & & $R$ &$R^*$\\
MATCHER & Constant & & $Python$ &\parencite{welch_matcher:_2017}\\
\midrule
\multicolumn{2}{l}{\keyword{Multi-diverging}} \\
STEMNET & Flexible & $\blacktriangle$ & $R$ &\parencite{velten_human_2017}\\
MFA & Flexible & $\vartriangle$ & $R$ &\parencite{campbell_probabilistic_2017}\\
FateID & Flexible & $\blacktriangle$ & $R$ &\parencite{herman_fateid_2018}\\
\midrule
\multicolumn{2}{l}{\keyword{Bi-diverging}} \\
DPT & Constant & & $R$ & \parencite{haghverdi_diffusion_2016}\\
Wishbone & Flexible & $\vartriangle$ & $Python$ & \parencite{bendall_single-cell_2014}\\
\midrule
\multicolumn{2}{l}{\keyword{Graph}} \\
RaceID & Unfettered & & $R$ & \parencite{grun_novo_2016} \\
PAGA & Unfettered & $\vartriangle$ & $Python$ & \parencite{wolf_paga:_2019} \\
\midrule
\multicolumn{2}{l}{\keyword{Cyclic}} \\
EIPiGraph & Constant & & $R$ & $**$ \\
Angle & Constant & & $R$ & $R^*$ \\
% ${\rm R^{2}_{i}}$ 改成正常体
\bottomrule
\end{tabular}
\end{table}
$\vartriangle$ : Needing priori information like start or end cells in lineage.\\
$\blacktriangle$ : Needing priori information like cell clustering or time series.\\
Unfettered : Topological structures deduced form data are free.\\
Constant : Topological structures deduced form data are constant.\\
Flexible : Topological structures deduced form data depend on the parameters.\\
$R^*$ : This method could be implemented with a bit of R code. \\
$**$ : Only in \url{github.com/Albluca/ElPiGraph.R}.
\end{spacing}
\subsection{Basic Trajectory Structure Types}
\begin{spacing}{1.5}
\begin{table}[H]
\caption{The definition of basic trajectory structure types \parencite{saelens_comparison_2019}}
\label{tab:Trajectory}
\centering
\begin{tabular}{p{4cm} p{10cm}}
% \times \checkmark
\toprule
\tabhead{Types} & \tabhead{Definition} \\
\midrule
Linear &
A graph that every node in this graph has an in-degree and an out-degree not higher than 1 and 2 nodes in this graph have degrees equal to 1.\\
\midrule
Ring &
A graph that every node in this graph has an in-degree and an out-degree equal to 1.\\
\midrule
Tree &
A graph that every node in this graph has an in-degree lower than 1.\\
\midrule
Mutil-diverging &
A tree graph that every node except one in this tree graph has a degree not higher than 1.\\
\midrule
Bi-diverging &
A mutil-diverging graph where a node with its degree equal to 3.\\
\midrule
Unconnected &
A graph where not all nodes are connected.\\
\midrule
Connected &
A graph where all nodes are connected.\\
% ${\rm R^{2}_{i}}$ 改成正常体
\bottomrule
\end{tabular}
\end{table}
\end{spacing}
\vspace{1.0cm}
\begin{figure}[H]
\centering
\includegraphics[width=1\linewidth]{Figures/types.png}
\caption{Seven basic trajectory structure types}
\label{fig:Trajectory}
\end{figure}
\subsection{Inferable Trajectory Structures of scTI Methods}
\newcommand{\Able}{\blacksquare}
\newcommand{\Unab}{\square}
\begin{spacing}{1.16}
\begin{table}[H]
\caption{Inferable Trajectory Structures of scTI Methods}
\label{tab:Inferable Trajectory}
\centering
\begin{tabular}{p{4cm} *{7}{p{1cm}<{\centering}}}
% \times \checkmark
\toprule
\tabhead{Method} & \tabhead{R} & \tabhead{L} & \tabhead{B} & \tabhead{M}
& \tabhead{T} & \tabhead{C} & \tabhead{U} \\
\midrule
\multicolumn{2}{l}{\keyword{Tree}} \\
Monocle\_1 &$\Unab$&$\Able$&$\Able$&$\Able$&$\Able$&$\Unab$&$\Unab$ \\
Monocle\_2 &$\Unab$&$\Able$&$\Able$&$\Able$&$\Able$&$\Unab$&$\Unab$ \\
Slingshot &$\Unab$&$\Able$&$\Able$&$\Able$&$\Able$&$\Unab$&$\Unab$ \\
MST &$\Unab$&$\Able$&$\Able$&$\Able$&$\Able$&$\Unab$&$\Unab$ \\
SCUBA &$\Unab$&$\Able$&$\Able$&$\Able$&$\Able$&$\Unab$&$\Unab$ \\
pCreode &$\Unab$&$\Able$&$\Able$&$\Able$&$\Able$&$\Unab$&$\Unab$ \\
\midrule
\multicolumn{2}{l}{\keyword{Linear}} \\
Embeddr &$\Unab$&$\Able$&$\Unab$&$\Unab$&$\Unab$&$\Unab$&$\Unab$ \\
TSCAN &$\Unab$&$\Able$&$\Unab$&$\Unab$&$\Unab$&$\Unab$&$\Unab$ \\
SCORPIU &$\Unab$&$\Able$&$\Unab$&$\Unab$&$\Unab$&$\Unab$&$\Unab$ \\
Component\_1 &$\Unab$&$\Able$&$\Unab$&$\Unab$&$\Unab$&$\Unab$&$\Unab$ \\
MATCHER &$\Unab$&$\Able$&$\Unab$&$\Unab$&$\Unab$&$\Unab$&$\Unab$ \\
\midrule
\multicolumn{2}{l}{\keyword{Multi-diverging}} \\
STEMNET &$\Unab$&$\Unab$&$\Able$&$\Able$&$\Unab$&$\Unab$&$\Unab$ \\
MFA &$\Unab$&$\Able$&$\Able$&$\Able$&$\Unab$&$\Unab$&$\Unab$ \\
FateID &$\Unab$&$\Unab$&$\Able$&$\Able$&$\Unab$&$\Unab$&$\Unab$ \\
\midrule
\multicolumn{2}{l}{\keyword{Bi-diverging}} \\
DPT &$\Unab$&$\Unab$&$\Able$&$\Unab$&$\Unab$&$\Unab$&$\Unab$ \\
Wishbone &$\Unab$&$\Able$&$\Able$&$\Unab$&$\Unab$&$\Unab$&$\Unab$ \\
\midrule
\multicolumn{2}{l}{\keyword{Graph}} \\
RaceID &$\Able$&$\Able$&$\Able$&$\Able$&$\Able$&$\Able$&$\Able$ \\
PAGA &$\Able$&$\Able$&$\Able$&$\Able$&$\Able$&$\Able$&$\Able$ \\
\midrule
\multicolumn{2}{l}{\keyword{Cyclic}} \\
EIPiGraph &$\Able$&$\Unab$&$\Unab$&$\Unab$&$\Unab$&$\Unab$&$\Unab$ \\
Angle &$\Able$&$\Unab$&$\Unab$&$\Unab$&$\Unab$&$\Unab$&$\Unab$ \\
% ${\rm R^{2}_{i}}$ 改成正常体
\bottomrule
\end{tabular}
\end{table}
\keyword{R} : Ring structure, \keyword{L} : Linear structure, \\
\keyword{B} : Bi-diverging structure, \keyword{M} : Multi-diverging structure, \\
\keyword{T} : Tree structure, \keyword{C} : Connected structure,
\keyword{U} : Unconnected structure \\
$\Able$ : This method is able to infer this kind of trajectory structure. \\
$\Unab$ : This method is not able to infer this kind of trajectory structure.
\end{spacing}
%----------------------------------------------------------------------------------------
%----------------------------------------------------------------------------------------
%The \code{biblatex} package is used to format the bibliography and inserts references such as this one \parencite{Reference1}. The options used in the \file{main.tex} file mean that the in-text citations of references are formatted with the author(s) listed with the date of the publication. Multiple references are separated by semicolons (e.g. \parencite{Reference2, Reference1}) and references with more than three authors only show the first author with \emph{et al.} indicating there are more authors (e.g. \parencite{Reference3}). This is done automatically for you. To see how you use references, have a look at the \file{Chapter1.tex} source file. Many reference managers allow you to simply drag the reference into the document as you type.
|
function trans = homothecy(point, ratio)
%HOMOTHECY create a homothecy as an affine transform.
%
% TRANS = homothecy(POINT, K);
% POINT is the center of the homothecy, K is its factor.
%
% See also:
% transforms2d, transformPoint, createTranslation
% ------
% Author: David Legland
% e-mail: [email protected]
% Created: 2005-01-20
% Copyright 2005 INRA - TPV URPOI - BIA IMASTE
% deprecation warning
warning('geom2d:deprecated', ...
'''homothecy'' is deprecated, use ''createHomothecy'' instead');
% call current implementation
trans = createHomothecy(point, ratio);
|
[STATEMENT]
lemma foldl_map [code_unfold]:
"foldl g a (map f xs) = foldl (\<lambda>a x. g a (f x)) a xs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. foldl g a (map f xs) = foldl (\<lambda>a x. g a (f x)) a xs
[PROOF STEP]
by (simp add: foldl_conv_fold fold_map comp_def) |
import Std.Lean.Parser
import Std.Lean.Meta.DiscrTree
import Mathlib.Algebra.Invertible
import Mathlib.Data.Rat.Cast
import Mathlib.Data.Nat.Basic
import Mathlib.Data.Int.Basic
import Mathlib.Tactic.Conv
import Qq.MetaM
import Qq.Delab
open Lean
open Lean.Meta Qq Lean.Elab Term
initialize registerTraceClass `Meta.Tactic.fun_trans
initialize registerTraceClass `Meta.Tactic.fun_trans.step
initialize registerTraceClass `Meta.Tactic.fun_trans.trans
def diff (f : α → β) : α → α → β := sorry
prefix:max "∂" => diff
theorem diff_I : ∂ (λ x : α => x) = λ x dx => dx := sorry
-- theorem diff_K : ∂ (λ (x : α) (y : β) => x) = λ x dx y => dx := sorry
theorem diff_K [OfNat α 0] (β : Type _) (x : α) : ∂ (λ (y : β) => x) = λ y dy => 0 := sorry
theorem diff_B (f : β → γ) (g : α → β)
: ∂ (λ x => f (g x))
=
λ x dx => ∂ f (g x) (∂ g x dx) := sorry
theorem diff_S [Add γ] (f : β → α → γ) (g : α → β)
: ∂ (λ x => f (g x) x)
=
λ x dx =>
∂ (f (g x)) x dx
+
∂ (λ y' => f y' x) (g x) (∂ g x dx) := sorry
theorem diff_C (f : β → α → γ)
: ∂ (λ (x : α) (y : β) => f y x)
=
λ x dx y => ∂ (f y) x dx := sorry
theorem diff_eval (β) (x : α)
: ∂ (λ (f : α → β) => f x)
=
λ f df => df x := sorry
theorem diff_let [Add γ] (f : β → α → γ) (g : α → β)
: ∂ (λ x =>
let y := g x
f y x)
=
λ x dx =>
let y := g x
let dy := ∂ g x dx
∂ (λ yx' : β × α => f yx'.1 yx'.2) (y,x) (dy,dx) :=
by
dsimp
sorry
theorem diff_let_B (f : β → γ) (g : α → β)
: ∂ (λ x =>
let y := g x
f y)
=
λ x dx =>
let y := g x
let dy := ∂ g x dx
∂ f y dy :=
by
dsimp
sorry
abbrev uncurry (f : α → β → γ) := λ (x,y) => f x y
abbrev uncurry3 (f : α → β → γ → δ) := λ (x,y,z) => f x y z
@[simp ↓]
theorem diff_uncurry_add [Add γ] (f : α → β → γ)
: ∂(uncurry λ x y => f x y)
=
λ (x,y) (dx,dy) =>
∂ (λ x' => f x' y) x dx
+
∂ (f x) y dy := sorry
@[simp ↓]
theorem diff_prod_map (f : α → β) (g : α → γ)
: ∂ (λ x => (f x, g x))
=
λ x dx => (∂ f x dx, ∂ g x dx) := sorry
def adj (f : α → β) : β → α := sorry
postfix:max "†" => adj
theorem adj_I : ∂ (λ x : α => x) = λ x dx => dx := sorry
theorem adj_B (f : β → γ) (g : α → β)
: (λ x => f (g x))†
=
λ z => g† (f† z) := sorry
theorem adj_S [Add α] (f : β → α → γ) (g : α → β)
: (λ x => f (g x) x)†
=
λ z =>
let (b,a) := (λ (b,a) => f b a)† z
g† b + a := sorry
def sum (f : α → β) : β := sorry
@[simp ↓] theorem sum_diff
: ∂ (λ (f : α → β) => sum f)
=
λ f df => sum df := sorry
@[simp ↓] theorem sum_adj
: (λ (f : α → β) => sum f)†
=
λ x i => x := sorry
@[simp] theorem sum_eval (f : α → β → γ) (b : β)
: sum f b
=
sum (λ a => f a b) := sorry
theorem adj_C (f : β → α → γ)
: (λ (x : α) (y : β) => f y x)†
=
λ g => sum λ y => (f y)† (g y) := sorry
def kron (i i' : α) (b : β) : β := sorry
theorem adj_eval (β) (x : α)
: (λ (f : α → β) => f x)†
=
λ y x' => kron x x' y := sorry
theorem adj_let {α β γ : Type} [Add α] (f : β → α → γ) (g : α → β)
: (λ x =>
let y := g x
f y x)†
=
λ z =>
let yx := (λ yx' : β × α => f yx'.1 yx'.2)† z
g† yx.1 + yx.2 :=
by
sorry
theorem adj_let_B {α β γ : Type} [Add α] (f : β → γ) (g : α → β)
: (λ x =>
let y := g x
f y)†
=
λ z =>
let y := f† z
g† y :=
by
sorry
@[simp ↓]
theorem adj_prod_map (f : α → β) (g : α → γ) [Add α]
: (λ x => (f x, g x))†
=
λ (y,z) => f† y + g† z := sorry
@[simp ↓]
theorem ajd_uncurry_add [Add α]
: (uncurry λ x y : α => x + y)†
=
λ x => (x,x) := sorry
/--
Constructs a proof that the original expression is true
given a simp result which simplifies the target to `True`.
-/
def _root_.Lean.Meta.Simp.Result.ofTrue (r : Simp.Result) : MetaM (Option Expr) :=
if r.expr.isConstOf ``True then
some <$> match r.proof? with
| some proof => mkOfEqTrue proof
| none => pure (mkConst ``True.intro)
else
pure none
def _root_.Array.filterIdx (p : α → Bool) (as : Array α) : Array Nat :=
as |>.mapIdx (λ i a => if p a then some i.1 else none)
|>.filterMap id
def _root_.Array.findRevIdx? {α : Type} (as : Array α) (p : α → Bool) : Option Nat :=
as.reverse.findIdx? p |>.map λ i => as.size - 1 - i
def getNameOfRuleI (transName : Name) : Option Name :=
if transName == ``diff then
return ``diff_I
else if transName == ``adj then
return ``adj_I
else
none
def applyRuleI (transName : Name) (X : Expr) : MetaM (Option (Expr×Expr)) := do
if let .some rule := getNameOfRuleI transName then
let proof ← Meta.mkAppOptM rule #[X]
let rhs := (← inferType proof).getArg! 2
return (rhs, proof)
else
return none
def getNameOfRuleK (transName : Name) : Option Name :=
if transName == ``diff then
return ``diff_K
else
none
def applyRuleK (transName : Name) (x Y : Expr) : MetaM (Option (Expr×Expr)) := do
if let .some rule := getNameOfRuleK transName then
let proof ← Meta.mkAppM rule #[Y, x]
let rhs := (← inferType proof).getArg! 2
return (rhs, proof)
else
trace[Meta.Tactic.fun_trans.trans] s!"Failed applying rule K"
return none
def getNameOfRuleS (transName : Name) : Option Name :=
if transName == ``diff then
return ``diff_S
else if transName == ``adj then
return ``adj_S
else
none
def applyRuleS (transName : Name) (f g : Expr) : MetaM (Option (Expr×Expr)) := do
if let .some rule := getNameOfRuleS transName then
let proof ← Meta.mkAppM rule #[f,g]
let rhs := (← inferType proof).getArg! 2
return (rhs, proof)
else
return none
def getNameOfRuleB (transName : Name) : Option Name :=
if transName == ``diff then
return ``diff_B
else if transName == ``adj then
return ``adj_B
else
none
def applyRuleB (transName : Name) (f g : Expr) : MetaM (Option (Expr×Expr)) := do
if let .some rule := getNameOfRuleB transName then
let proof ← Meta.mkAppM rule #[f,g]
trace[Meta.Tactic.fun_trans.trans] s!"case: B '{← Meta.ppExpr (← inferType proof)}'"
let rhs := (← inferType proof).getArg! 2
return (rhs, proof)
else
return none
def getNameOfRuleC (transName : Name) : Option Name :=
if transName == ``diff then
return ``diff_C
else if transName == ``adj then
return ``adj_C
else
none
def applyRuleC (transName : Name) (f : Expr) : MetaM (Option (Expr×Expr)) := do
if let .some rule := getNameOfRuleC transName then
let proof ← Meta.mkAppM rule #[f]
let rhs := (← inferType proof).getArg! 2
return (rhs, proof)
else
return none
def getNameOfRuleEval (transName : Name) : Option Name :=
if transName == ``diff then
return ``diff_eval
else if transName == ``adj then
return ``adj_eval
else
none
def applyRuleEval (transName : Name) (x Y : Expr) : MetaM (Option (Expr×Expr)) := do
if let .some rule := getNameOfRuleEval transName then
let proof ← Meta.mkAppM rule #[Y, x]
let rhs := (← inferType proof).getArg! 2
return (rhs, proof)
else
return none
def getNameOfRuleLet (transName : Name) : Option Name :=
if transName == ``diff then
return ``diff_let
else if transName == ``adj then
return ``adj_let
else
none
def applyRuleLet (transName : Name) (f g : Expr) : MetaM (Option (Expr×Expr)) := do
if let .some rule := getNameOfRuleLet transName then
let proof ← Meta.mkAppM rule #[f, g]
let rhs := (← inferType proof).getArg! 2
return (rhs, proof)
else
return none
def getNameOfRuleLetB (transName : Name) : Option Name :=
if transName == ``diff then
return ``diff_let_B
else if transName == ``adj then
return ``adj_let_B
else
none
def applyRuleLetB (transName : Name) (f g : Expr) : MetaM (Option (Expr×Expr)) := do
if let .some rule := getNameOfRuleLetB transName then
let proof ← Meta.mkAppM rule #[f, g]
let rhs := (← inferType proof).getArg! 2
return (rhs, proof)
else
return none
/--
Is expression `e` of the form `T f x₀ x₁ .. xₙ` where `T` is some function transformation?
-/
def getFunctionTransform (e : Expr) : Option (Name × Expr × Array Expr) :=
if e.isApp && (e.isAppOf ``diff) then
return (``diff, e.getAppArgs[2]!, e.getAppArgs[3:])
else if e.isApp && (e.isAppOf ``adj) then
return (``adj, e.getAppArgs[2]!, e.getAppArgs[3:])
else
none
-- #check Prod.mk 0 (Prod.mk 1 2)
-- TODO: generalize to other monads
def _root_.Lean.Meta.letTelescope (e : Expr) (k : Array Expr → Expr → MetaM α) : MetaM α :=
lambdaLetTelescope e λ xs b => do
if let .some i ← xs.findIdxM? (λ x => do pure ¬(← x.fvarId!.isLetVar)) then
k xs[0:i] (← mkLambdaFVars xs[i+1:] b)
else
k xs b
/-- Modifies expression of the form:
```
let a :=
let b := x
g b
f a b
```
to
```
let b := x
let a := g b
f a b
```
-/
def normalizeLetBindings (e : Expr) : MetaM (Option Expr) :=
match e with
| .letE .. => letTelescope e λ as fVal => do
let a := as[0]!
let aId := a.fvarId!
if let .some aVal ← aId.getValue? then
match aVal with
| .letE .. => letTelescope aVal λ bs gVal => do
withLetDecl (← aId.getUserName) (← aId.getType) gVal λ a' => do
let fVal ← mkLambdaFVars as[1:] fVal
let fVal := fVal.replaceFVar a a'
mkLambdaFVars (bs |>.append #[a']) fVal
| _ => return none
else
return none
| _ => return none
/--
-/
def transformFunction (transName : Name) (f : Expr) : MetaM (Option (Expr × Expr)) := do
match f with
| .lam .. => lambdaLetTelescope f λ xs b => do
trace[Meta.Tactic.fun_trans.trans] s!"Transforming '{← Meta.ppExpr f}'"
if h : xs.size > 0 then
if (xs.size ≠ 1) then
let x := xs[0]!
let y := xs[1]!
let xId := x.fvarId!
let yId := y.fvarId!
-- let binding
if let .some yVal ← yId.getValue? then
let g ← mkLambdaFVars #[x] yVal
return ← withLocalDecl
(← yId.getUserName) default (← yId.getType) λ y' => do
let b' ← mkLambdaFVars (xs[2:]) b
if b'.containsFVar xId then
let f ← mkLambdaFVars #[y', x] (b'.replaceFVar y y')
trace[Meta.Tactic.fun_trans.trans] s!"case: let 'f:{← Meta.ppExpr f}' 'g:{← Meta.ppExpr g}'"
applyRuleLet transName f.eta g.eta
else
let f ← mkLambdaFVars #[y'] (b'.replaceFVar y y')
trace[Meta.Tactic.fun_trans.trans] s!"case: letB 'f:{← Meta.ppExpr f}' 'g:{← Meta.ppExpr g}'"
applyRuleLetB transName f.eta g.eta
-- rule C: λ x y => f y x
else
trace[Meta.Tactic.fun_trans.trans] s!"case: C 'f:{← Meta.ppExpr f}'"
let f ← Meta.mkLambdaFVars (#[xs[1]!, xs[0]!].append xs[2:]) b
return ← applyRuleC transName f.eta
else
let x := xs[0]
let xId := x.fvarId!
-- rule I: λ x => x
if (b == x) then
trace[Meta.Tactic.fun_trans.trans] s!"case: I '{← Meta.ppExpr f}'"
return ← applyRuleI transName (← inferType x)
-- rule K: λ x => y
if ¬(b.containsFVar xId) then
trace[Meta.Tactic.fun_trans.trans] s!"case: K '{← Meta.ppExpr f}'"
return ← applyRuleK transName b (← inferType x)
-- case: λ x => F x
else if b.isApp then
let F := b.getAppFn
let args := b.getAppArgs
trace[Meta.Tactic.fun_trans.trans] s!"Application case 'F:{← Meta.ppExpr F}' 'args:{← args.mapM Meta.ppExpr}'"
if let some info ← getMatcherInfo? F.constName then
trace[Meta.Tactic.fun_trans.trans] s!"Encountered matcher!"
return none
if b.isAppOf ``Prod.mk then
return none
-- if b.isAppOf ``Prod.fst then
-- return none
-- if b.isAppOf ``Prod.snd then
-- return none
let doArity := true
if doArity then do
let depArgs := args.mapIdx (λ i arg => if arg.containsFVar xId then some (arg, i.1) else none) |>.filterMap id
if depArgs.size >= 2 then
let g : Expr ←
(depArgs[0:depArgs.size-1]).foldrM (init:=depArgs[depArgs.size-1]!.1)
(λ y ys => mkAppOptM ``Prod.mk #[none, none, y.1,ys]) >>=
λ g => mkLambdaFVars #[x] g
let Ys := depArgs.map λ (arg, _) => (Name.anonymous, λ _ => inferType arg)
let f ←
withLocalDeclsD Ys λ ys => do
let mut args' := args
for i in [0:ys.size] do
args' := args'.set! depArgs[i]!.2 ys[i]!
let b' ← mkAppOptM' F (args'.map some)
mkLambdaFVars ys b'
-- mkAppM ``uncurry #[← mkLambdaFVars ys b']
if depArgs.size == 2 then
let f ← mkAppM ``uncurry #[f]
trace[Meta.Tactic.fun_trans.trans] s!"case: binary operation 'f:{← Meta.ppExpr f}' 'g:{← Meta.ppExpr g}'"
return ← applyRuleB transName f g
if depArgs.size == 3 then
let f ← mkAppM ``uncurry3 #[f]
trace[Meta.Tactic.fun_trans.trans] s!"case: ternary operation 'f:{← Meta.ppExpr f}' 'g:{← Meta.ppExpr g}'"
return ← applyRuleB transName f g
-- the first arguments with non-trivial occurence of `x`
let id? := args.findIdx? (λ arg => (arg != x) && (arg.containsFVar xId))
-- non trivial composition?
if let .some id := id? then
let yVal := args[id]!
let yType ← inferType yVal
let g ← mkLambdaFVars #[x] yVal
let f'proof : Option (Expr × Expr) ←
withLocalDecl `y .default yType λ y => do
let fbody ← mkAppOptM' F ((args.set! id y).map .some)
-- rule B: λ x => f (g x)
if ¬(fbody.containsFVar xId) then
let f ← mkLambdaFVars #[y] fbody
trace[Meta.Tactic.fun_trans.trans] s!"case: B 'f:{← Meta.ppExpr f}' 'g:{← Meta.ppExpr g}'"
return ← applyRuleB transName f.eta g.eta
-- rule S: λ x => f x (g x)
else
let f ← mkLambdaFVars #[y,x] fbody
trace[Meta.Tactic.fun_trans.trans] s!"case: S 'f:{← Meta.ppExpr f}' 'g:{← Meta.ppExpr g}'"
return ← applyRuleS transName f.eta g.eta
return f'proof
-- arguments containing `x`
let ids := args.filterIdx (λ arg => arg.containsFVar xId)
-- case: λ f => f x₀ .. xₙ
if (ids.size == 0) && (F == x) then
trace[Meta.Tactic.fun_trans.trans] s!"case: π '{← Meta.ppExpr f}'"
let lastId := args.size - 1
let lastArg := args[args.size - 1]!
let αtype ← inferType lastArg
let βtype ← inferType b
if args.size == 1 then
return ← applyRuleEval transName lastArg βtype
else
let g ← mkLambdaFVars #[x] (← mkAppM' F args[0:lastId])
let f ← withLocalDecl `F .default (← mkArrow αtype βtype) λ F => do
mkLambdaFVars #[F] (← mkAppM' F #[lastArg])
return ← applyRuleB transName f.eta g.eta
return none
| _ => return none
/-- A simp plugin which calls `NormNum.eval`. -/
def tryFunTrans? (post := false) (e : Expr) : SimpM (Option Simp.Step) := do
if post then
trace[Meta.Tactic.fun_trans.step] s!"Post-step through {← Meta.ppExpr e}"
else
trace[Meta.Tactic.fun_trans.step] s!"Pre-step through {← Meta.ppExpr e}"
if post then
if let .some e' ← normalizeLetBindings e then
trace[Meta.Tactic.fun_trans.trans] s!"Normalizing let binding from:\n{← Meta.ppExpr e} \n\nto:\n\n{← Meta.ppExpr e'}"
return .some (.visit (.mk e' none 0))
if let .some (transName, f, args) := getFunctionTransform e then
if let .some (f', proof) ← transformFunction transName f then
if args.size == 0 then
return some (.visit (.mk f' proof 0))
else if args.size == 1 then
let f'' ← mkAppM' f' args
let proof' ← mkAppM ``congr_fun #[proof, args[0]!]
return some (.visit (.mk f'' proof' 0))
else if args.size == 2 then
let f'' ← mkAppM' f' args
let proof' ← mkAppM ``congr_fun₂ #[proof, args[0]!, args[1]!]
return some (.visit (.mk f'' proof' 0))
else if args.size == 3 then
let f'' ← mkAppM' f' args
let proof' ← mkAppM ``congr_fun₃ #[proof, args[0]!, args[1]!, args[2]!]
return some (.visit (.mk f'' proof' 0))
else
throwError "Finish implementings tryFunTrans?"
-- return some (.visit (.mk e none 0))
else return some (.visit (.mk e none 0))
else
return some (.visit (.mk e none 0))
variable (ctx : Simp.Context) (useSimp := true) in
mutual
/-- A discharger which calls `norm_num`. -/
partial def discharge (e : Expr) : SimpM (Option Expr) := do (← deriveSimp e).ofTrue
/-- A `Methods` implementation which calls `norm_num`. -/
partial def methods : Simp.Methods :=
if useSimp then {
pre := fun e ↦ do
Simp.andThen (← Simp.preDefault e discharge) tryFunTrans?
post := fun e ↦ do
Simp.andThen (← Simp.postDefault e discharge) (tryFunTrans? (post := true))
discharge? := discharge
} else {
pre := fun e ↦ Simp.andThen (.visit { expr := e }) tryFunTrans?
post := fun e ↦ Simp.andThen (.visit { expr := e }) (tryFunTrans? (post := true))
discharge? := discharge
}
/-- Traverses the given expression using simp and normalises any numbers it finds. -/
partial def deriveSimp (e : Expr) : MetaM Simp.Result :=
(·.1) <$> Simp.main e ctx (methods := methods)
end
-- FIXME: had to inline a bunch of stuff from `simpGoal` here
/--
The core of `norm_num` as a tactic in `MetaM`.
* `g`: The goal to simplify
* `ctx`: The simp context, constructed by `mkSimpContext` and
containing any additional simp rules we want to use
* `fvarIdsToSimp`: The selected set of hypotheses used in the location argument
* `simplifyTarget`: true if the target is selected in the location argument
* `useSimp`: true if we used `norm_num` instead of `norm_num1`
-/
def funTransAt (g : MVarId) (ctx : Simp.Context) (fvarIdsToSimp : Array FVarId)
(simplifyTarget := true) (useSimp := true) :
MetaM (Option (Array FVarId × MVarId)) := g.withContext do
g.checkNotAssigned `norm_num
let mut g := g
let mut toAssert := #[]
let mut replaced := #[]
for fvarId in fvarIdsToSimp do
let localDecl ← fvarId.getDecl
let type ← instantiateMVars localDecl.type
let ctx := { ctx with simpTheorems := ctx.simpTheorems.eraseTheorem (.fvar localDecl.fvarId) }
let r ← deriveSimp ctx useSimp type
match r.proof? with
| some _ =>
let some (value, type) ← applySimpResultToProp g (mkFVar fvarId) type r
| return none
toAssert := toAssert.push { userName := localDecl.userName, type, value }
| none =>
if r.expr.isConstOf ``False then
g.assign (← mkFalseElim (← g.getType) (mkFVar fvarId))
return none
g ← g.replaceLocalDeclDefEq fvarId r.expr
replaced := replaced.push fvarId
if simplifyTarget then
let res ← g.withContext do
let target ← instantiateMVars (← g.getType)
let r ← deriveSimp ctx useSimp target
let some proof ← r.ofTrue
| some <$> applySimpResultToTarget g target r
g.assign proof
pure none
let some gNew := res | return none
g := gNew
let (fvarIdsNew, gNew) ← g.assertHypotheses toAssert
let toClear := fvarIdsToSimp.filter fun fvarId ↦ !replaced.contains fvarId
let gNew ← gNew.tryClearMany toClear
return some (fvarIdsNew, gNew)
open Qq Lean Meta Elab Tactic Term
/-- Constructs a simp context from the simp argument syntax. -/
def getSimpContext (args : Syntax) (simpOnly := false) :
TacticM Simp.Context := do
let simpTheorems ←
if simpOnly then simpOnlyBuiltins.foldlM (·.addConst ·) {} else getSimpTheorems
let mut { ctx, starArg } ← elabSimpArgs args (eraseLocal := false) (kind := .simp)
{ simpTheorems := #[simpTheorems], congrTheorems := ← getSimpCongrTheorems }
unless starArg do return ctx
let mut simpTheorems := ctx.simpTheorems
for h in ← getPropHyps do
unless simpTheorems.isErased (.fvar h) do
simpTheorems ← simpTheorems.addTheorem (.fvar h) (← h.getDecl).toExpr
pure { ctx with simpTheorems }
open Elab.Tactic in
/--
Elaborates a call to `norm_num only? [args]` or `norm_num1`.
* `args`: the `(simpArgs)?` syntax for simp arguments
* `loc`: the `(location)?` syntax for the optional location argument
* `simpOnly`: true if `only` was used in `norm_num`
* `useSimp`: false if `norm_num1` was used, in which case only the structural parts
of `simp` will be used, not any of the post-processing that `simp only` does without lemmas
-/
-- FIXME: had to inline a bunch of stuff from `mkSimpContext` and `simpLocation` here
def elabFunTrans (args : Syntax) (loc : Syntax)
(simpOnly := false) (useSimp := true) : TacticM Unit := do
let ctx ← getSimpContext args (!useSimp || simpOnly)
let ctx := {ctx with config := {ctx.config with iota := true, zeta := false, singlePass := true}}
let g ← getMainGoal
let res ← match expandOptLocation loc with
| .targets hyps simplifyTarget => funTransAt g ctx (← getFVarIds hyps) simplifyTarget useSimp
| .wildcard => funTransAt g ctx (← g.getNondepPropHyps) (simplifyTarget := true) useSimp
match res with
| none => replaceMainGoal []
| some (_, g) => replaceMainGoal [g]
open Lean.Parser.Tactic -- Meta.NormNum
elab (name := funTrans) "fun_trans" only:&" only"? args:(simpArgs ?) loc:(location ?) : tactic =>
elabFunTrans args loc (simpOnly := only.isSome) (useSimp := true)
-- /-- Basic version of `norm_num` that does not call `simp`. -/
-- elab (name := normNum1) "norm_num1" loc:(location ?) : tactic =>
-- elabNormNum mkNullNode loc (simpOnly := true) (useSimp := false)
|
[STATEMENT]
lemma decode_plan_set_is[simp]:
"set (\<Phi>\<inverse> \<Pi> \<A> t) = (\<Union>k \<in> {..<t}. { decode_plan' \<Pi> \<A> k })"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set (\<Phi>\<inverse> \<Pi> \<A> t) = (\<Union>k<t. {decode_plan' \<Pi> \<A> k})
[PROOF STEP]
unfolding decode_plan_def SAT_Plan_Base.decode_plan_def set_map
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. decode_plan' \<Pi> \<A> ` set [0..<t] = (\<Union>k<t. {decode_plan' \<Pi> \<A> k})
[PROOF STEP]
using atLeast_upt
[PROOF STATE]
proof (prove)
using this:
{..<?n} = set [0..<?n]
goal (1 subgoal):
1. decode_plan' \<Pi> \<A> ` set [0..<t] = (\<Union>k<t. {decode_plan' \<Pi> \<A> k})
[PROOF STEP]
by blast |
! #################################################################################################################################
! Begin MIT license text.
! _______________________________________________________________________________________________________
! Copyright 2019 Dr William R Case, Jr ([email protected])
! Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
! associated documentation files (the "Software"), to deal in the Software without restriction, including
! without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
! copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to
! the following conditions:
! The above copyright notice and this permission notice shall be included in all copies or substantial
! portions of the Software and documentation.
! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
! OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
! FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
! AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
! LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
! OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
! THE SOFTWARE.
! _______________________________________________________________________________________________________
! End MIT license text.
SUBROUTINE QDEL1 ( OPT, WRITE_WARN )
! Calculates, or calls subr's to calculate, quadrilateral element matrices:
! 1) ME = element mass matrix , if OPT(1) = 'Y'
! 2) PTE = element thermal load vectors , if OPT(2) = 'Y'
! 3) SEi, STEi = element stress data recovery matrices, if OPT(3) = 'Y'
! 4) KE = element linea stiffness matrix , if OPT(4) = 'Y'
! 5) PPE = element pressure load matrix , if OPT(5) = 'Y'
! 6) KED = element differen stiff matrix calc , if OPT(6) = 'Y' = 'Y'
USE PENTIUM_II_KIND, ONLY : BYTE, LONG, DOUBLE
USE IOUNT1, ONLY : BUG, ERR, F04, F06, WRT_BUG, WRT_ERR, WRT_LOG
USE SCONTR, ONLY : BLNK_SUB_NAM, FATAL_ERR, MAX_ORDER_GAUSS, MEFE
USE TIMDAT, ONLY : TSEC
USE SUBR_BEGEND_LEVELS, ONLY : QDEL1_BEGEND
USE CONSTANTS_1, ONLY : ZERO, ONE, FOUR, TWELVE
USE DEBUG_PARAMETERS, ONLY : DEBUG
USE PARAMS, ONLY : EPSIL, IORQ1B, IORQ1M, IORQ1S, IORQ2B, QUAD4TYP
USE MODEL_STUF, ONLY : EID, ELDOF, EMG_IFE, EMG_RFE, EMAT, ERR_SUB_NAM, EB, INTL_MID, KE, &
MASS_PER_UNIT_AREA, NUM_EMG_FATAL_ERRS, ME, PCOMP_LAM, PCOMP_PROPS, SHELL_B, TYPE, XEL
USE MODEL_STUF, ONLY : BENSUM, SHRSUM, PHI_SQ, PSI_HAT
USE QDEL1_USE_IFs
IMPLICIT NONE
CHARACTER(LEN=LEN(BLNK_SUB_NAM)):: SUBR_NAME = 'QDEL1'
CHARACTER(1*BYTE), INTENT(IN) :: OPT(6) ! 'Y'/'N' flags for whether to calc certain elem matrices
CHARACTER(LEN=*), INTENT(IN) :: WRITE_WARN ! If 'Y" write warning messages, otherwise do not
CHARACTER( 1*BYTE) :: RED_INT_SHEAR ! If 'Y', use Gaussian weighted average of B matrices for shear terms
INTEGER(LONG) :: GAUSS_PT ! Gauss point number (used for DEBUG output in subr SHP2DQ
INTEGER(LONG) :: I,J,K,L ! DO loop indices
INTEGER(LONG) :: IORD ! Gaussian integration order for QMEM1 portion of element
! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
! Do not change IORD_PCOMP. It must be such that it squared = number of nodes for the QUAD4 (MIN4T)
INTEGER(LONG), PARAMETER :: IORD_PCOMP = 2 ! Int order for nonsym layup PCOMP must be 2 (checked in subr
! SHELL_ABD_MATRICES)
! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
INTEGER(LONG), PARAMETER :: SUBR_BEGEND = QDEL1_BEGEND
REAL(DOUBLE) :: AREA ! Elem area
REAL(DOUBLE) :: AR ! Elem aspect ratio
REAL(DOUBLE) :: BIG_BB(3,ELDOF,IORQ2B*IORQ2B)
! Strain-displ matrix for bending for all Gauss points and all DOF's
REAL(DOUBLE) :: BIG_BBI(3,ELDOF) ! BIG_BB for 1 Gauss point
REAL(DOUBLE) :: BIG_BM(3,ELDOF,IORQ1M*IORQ1M)
! Strain-displ matrix for this elem for all Gauss points/all DOF's
REAL(DOUBLE) :: BIG_BMI(3,ELDOF) ! BIG_BM for 1 Gauss point
REAL(DOUBLE) :: D1(3) ! Vector from G.P. 1 to G.P. 3 (a diagonal)
REAL(DOUBLE) :: D2(3) ! Vector from G.P. 2 to G.P. 4 (a diagonal)
REAL(DOUBLE) :: D1M ! Mag of D1
REAL(DOUBLE) :: D2M ! Mag of D2
REAL(DOUBLE) :: DETJ ! An output from subr JAC2D4, called herein. Determinant of JAC
REAL(DOUBLE) :: DUM1(3,ELDOF) ! Intermediate result in calc SHELL_B effect on KE
REAL(DOUBLE) :: DUM2(ELDOF,ELDOF) ! Intermediate result in calc SHELL_B effect on KE
REAL(DOUBLE) :: EPS1 ! A small number to compare to real zero
REAL(DOUBLE) :: HHH(MAX_ORDER_GAUSS) ! An output from subr ORDER, called herein. Gauss weights.
REAL(DOUBLE) :: INTFAC ! An integration factor (constant multiplier for the Gauss integr)
REAL(DOUBLE) :: JAC(2,2) ! An output from subr JAC2D4, called herein. 2 x 2 Jacobian matrix.
REAL(DOUBLE) :: JACI(2,2) ! An output from subr JAC2D4, called herein. 2 x 2 Jacobian inverse.
REAL(DOUBLE) :: M0 ! An intermediate variable used in calc elem mass, ME
REAL(DOUBLE) :: SSS(MAX_ORDER_GAUSS) ! An output from subr ORDER, called herein. Gauss abscissa's.
REAL(DOUBLE) :: XSD(4) ! Diffs in x coords of quad sides in local coords
REAL(DOUBLE) :: YSD(4) ! Diffs in y coords of quad sides in local coords
INTRINSIC DSQRT
! **********************************************************************************************************************************
IF (WRT_LOG >= SUBR_BEGEND) THEN
CALL OURTIM
WRITE(F04,9001) SUBR_NAME,TSEC
9001 FORMAT(1X,A,' BEGN ',F10.3)
ENDIF
! **********************************************************************************************************************************
EPS1 = EPSIL(1)
! Initialize
BENSUM = ZERO
SHRSUM = ZERO
PSI_HAT = ZERO
PHI_SQ = ZERO
! Calculate side diffs
XSD(1) = XEL(1,1) - XEL(2,1) ! x coord diffs (in local elem coords)
XSD(2) = XEL(2,1) - XEL(3,1)
XSD(3) = XEL(3,1) - XEL(4,1)
XSD(4) = XEL(4,1) - XEL(1,1)
YSD(1) = XEL(1,2) - XEL(2,2) ! y coord diffs (in local elem coords)
YSD(2) = XEL(2,2) - XEL(3,2)
YSD(3) = XEL(3,2) - XEL(4,2)
YSD(4) = XEL(4,2) - XEL(1,2)
IF ((DEBUG(6) > 0) .AND. (WRT_BUG(0) > 0)) THEN
WRITE(BUG,*) ' Element side differences in x, y coords:'
WRITE(BUG,*) ' ---------------------------------------'
WRITE(BUG,98761) XSD(1), YSD(1)
WRITE(BUG,98762) XSD(2), YSD(2)
WRITE(BUG,98763) XSD(3), YSD(3)
WRITE(BUG,98764) XSD(4), YSD(4)
WRITE(BUG,*)
ENDIF
! Calculate area by Gaussian integration
AREA = ZERO
CALL ORDER_GAUSS ( 2, SSS, HHH )
DO I=1,2
DO J=1,2
CALL JAC2D ( SSS(I), SSS(J), XSD, YSD, 'N', JAC, JACI, DETJ )
AREA = AREA + HHH(I)*HHH(J)*DETJ
ENDDO
ENDDO
! If AREA <= 0, set error and return
IF (AREA < EPS1) THEN
NUM_EMG_FATAL_ERRS = NUM_EMG_FATAL_ERRS + 1
FATAL_ERR = FATAL_ERR + 1
IF (WRT_ERR > 0) THEN
WRITE(ERR,1925) EID, TYPE, 'AREA', AREA
WRITE(F06,1925) EID, TYPE, 'AREA', AREA
ELSE
IF (NUM_EMG_FATAL_ERRS <= MEFE) THEN
ERR_SUB_NAM(NUM_EMG_FATAL_ERRS) = SUBR_NAME
EMG_IFE(NUM_EMG_FATAL_ERRS,1) = 1925
EMG_RFE(NUM_EMG_FATAL_ERRS,1) = AREA
ENDIF
ENDIF
RETURN
ENDIF
! Calculate aspect ratio. Note that D1M and D2M have been checked to be > 0 in subr ELMGM2
DO I=1,2
D1(I) = XEL(3,I) - XEL(1,I)
D2(I) = XEL(4,I) - XEL(2,I)
ENDDO
D1M = DSQRT(D1(1)*D1(1) + D1(2)*D1(2))
D2M = DSQRT(D2(1)*D2(1) + D2(2)*D2(2))
AR = D1M/D2M
IF (AR < ONE) THEN
AR = ONE/AR
ENDIF
! **********************************************************************************************************************************
! Generate the mass matrix for this element. For the pure bending element the mass is based only on the non-structural mass.
IF (OPT(1) == 'Y') THEN
M0 = MASS_PER_UNIT_AREA*AREA/FOUR
ME( 1 ,1) = M0
ME( 2 ,2) = M0
ME( 3 ,3) = M0
ME( 7 ,7) = M0
ME( 8 ,8) = M0
ME( 9 ,9) = M0
ME(13,13) = M0
ME(14,14) = M0
ME(15,15) = M0
ME(19,19) = M0
ME(20,20) = M0
ME(21,21) = M0
ENDIF
! **********************************************************************************************************************************
IF ((OPT(2) == 'Y') .OR. (OPT(3) == 'Y') .OR. (OPT(4) == 'Y') .OR. (OPT(5) == 'Y')) THEN
IF (TYPE == 'SHEAR ') THEN
IORD = IORQ1S
RED_INT_SHEAR = 'N'
CALL QSHEAR ( OPT, IORD, RED_INT_SHEAR, XSD, YSD )
ENDIF
IF (TYPE(1:5) == 'QUAD4') THEN
IF (INTL_MID(1) /= 0) THEN
IORD = IORQ1M
IF (IORQ1S < IORQ1M) THEN
RED_INT_SHEAR = 'Y'
ELSE
RED_INT_SHEAR = 'N'
ENDIF
CALL QMEM1 ( OPT, IORD, RED_INT_SHEAR, AREA, XSD, YSD, BIG_BM )
ENDIF
ENDIF
IF (TYPE(1:6) == 'QUAD4K') THEN
IF (INTL_MID(2) /= 0) THEN
if ((pcomp_props == 'Y') .and. (pcomp_lam == 'NON')) then
WRITE(ERR,*) ' *ERROR: Code not written for SHELL_B effect on KE yet for QUAD4K elements'
WRITE(ERR,*) ' Or, if QUAD4, make sure that the element has nonzero transverse shear moduli, G1Z, G2Z'
WRITE(F06,*) ' *ERROR :Code not written for SHELL_B effect on KE yet for QUAD4K elements'
WRITE(F06,*) ' Or, if QUAD4, make sure that the element has nonzero transverse shear moduli, G1Z, G2Z'
call outa_here ( 'Y' )
endif
CALL QPLT1 ( OPT, AREA, XSD, YSD )
ENDIF
ELSE IF (TYPE(1:6) == 'QUAD4 ') THEN
IF (INTL_MID(2) /= 0) THEN ! If MID2 = 0, do not calculate bending or transverse shear stiffness
IF (QUAD4TYP == 'MIN4 ') THEN
CALL QPLT2 ( OPT, AREA, XSD, YSD, BIG_BB )
ELSE IF (QUAD4TYP == 'MIN4T') THEN
CALL QPLT3 ( OPT, AREA, XSD, YSD, BIG_BB )
ELSE
NUM_EMG_FATAL_ERRS = NUM_EMG_FATAL_ERRS + 1
FATAL_ERR = FATAL_ERR + 1
IF (WRT_ERR > 0) THEN
WRITE(ERR,1927) SUBR_NAME, QUAD4TYP
WRITE(F06,1927) SUBR_NAME, QUAD4TYP
ELSE
IF (NUM_EMG_FATAL_ERRS <= MEFE) THEN
ERR_SUB_NAM(NUM_EMG_FATAL_ERRS) = SUBR_NAME
EMG_IFE(NUM_EMG_FATAL_ERRS,1) = 1927
ENDIF
ENDIF
CALL OUTA_HERE ( 'Y' )
ENDIF
ENDIF
ENDIF
ENDIF
! **********************************************************************************************************************************
! Calc BM'*SHELL_B*BB (and its transpose) and add to KE. Only do this if this is a composite element with nonsym layup
IF (OPT(4) == 'Y') THEN
IF ((PCOMP_PROPS == 'Y') .AND. (PCOMP_LAM == 'NON')) THEN
IF (TYPE(1:5) == 'QUAD4') THEN
if (type == 'QUAD4K ') then
WRITE(ERR,*) ' *ERROR: Code not written for SHELL_B effect on KE yet for QUAD4K elements'
WRITE(F06,*) ' *ERROR :Code not written for SHELL_B effect on KE yet for QUAD4K elements'
call outa_here ( 'Y' )
endif
CALL ORDER_GAUSS ( IORD_PCOMP, SSS, HHH )
GAUSS_PT = 0
DO I=1,IORD_PCOMP
DO J=1,IORD_PCOMP
GAUSS_PT = GAUSS_PT + 1
DO K=1,3
DO L=1,ELDOF
BIG_BMI(K,L) = BIG_BM(K,L,GAUSS_PT)
BIG_BMI(K,L) = BIG_BM(K,L,GAUSS_PT)
ENDDO
ENDDO
DO K=1,3
DO L=1,ELDOF
BIG_BBI(K,L) = BIG_BB(K,L,GAUSS_PT)
BIG_BBI(K,L) = BIG_BB(K,L,GAUSS_PT)
ENDDO
ENDDO
CALL JAC2D ( SSS(I), SSS(J), XSD, YSD, 'N', JAC, JACI, DETJ )
CALL MATMULT_FFF ( SHELL_B, BIG_BBI, 3, 3, ELDOF, DUM1 )
CALL MATMULT_FFF_T ( BIG_BMI, DUM1, 3, ELDOF, ELDOF, DUM2 )
INTFAC = DETJ*HHH(I)*HHH(J)
DO K=1,ELDOF
DO L=1,ELDOF
KE(K,L) = KE(K,L) + INTFAC*(DUM2(K,L) + DUM2(L,K))
ENDDO
ENDDO
ENDDO
ENDDO
ELSE IF (TYPE(1:5) == 'TRIA3') THEN
if (type == 'TRIA3K ') then
WRITE(ERR,*) ' *ERROR: Code not written for SHELL_B effect on KE yet for TRIA3K elements'
WRITE(F06,*) ' *ERROR :Code not written for SHELL_B effect on KE yet for TRIA3K elements'
call outa_here ( 'Y' )
endif
DO K=1,3
DO L=1,ELDOF
BIG_BMI(K,L) = BIG_BM(K,L,1)
BIG_BMI(K,L) = BIG_BM(K,L,1)
ENDDO
ENDDO
DO K=1,3
DO L=1,ELDOF
BIG_BBI(K,L) = BIG_BB(K,L,1)
BIG_BBI(K,L) = BIG_BB(K,L,1)
ENDDO
ENDDO
CALL MATMULT_FFF ( SHELL_B, BIG_BBI, 3, 3, ELDOF, DUM1 )
CALL MATMULT_FFF_T ( BIG_BMI, DUM1, 3, ELDOF, ELDOF, DUM2 )
DO K=1,ELDOF
DO L=1,ELDOF
KE(K,L) = KE(K,L) + (DUM2(K,L) + DUM2(L,K))
ENDDO
ENDDO
ENDIF
ENDIF
ENDIF
! **********************************************************************************************************************************
IF (WRT_LOG >= SUBR_BEGEND) THEN
CALL OURTIM
WRITE(F04,9002) SUBR_NAME,TSEC
9002 FORMAT(1X,A,' END ',F10.3)
ENDIF
RETURN
! **********************************************************************************************************************************
1925 FORMAT(' *ERROR 1925: ELEMENT ',I8,', TYPE ',A,', HAS ZERO OR NEGATIVE ',A,' = ',1ES9.1)
1927 FORMAT(' *ERROR 1927: PROGRAMMING ERROR IN SUBROUTINE ',A &
,/,14X,' CHAR PARAMETER QUAD4TYP MUST BE EITHER "MIN4T" OR "MIN4 " BUT IS "',A,'"')
1948 FORMAT(' *ERROR 1948: ',A,I8,' MUST HAVE INTEGRATION ORDERS FOR PARAMS ',A,' = ',I3,' IF THE ELEMENT IS A PCOMP' &
,/,14X,' WITH SYM LAYUP. HOWEVER, THE TWO INTEGRATION ORDERS WERE: ',A,' = ',I3,' AND ',A,' = ',I3)
98761 FORMAT(' X1-X2 = ',1ES14.6,' Y1-Y2 = ',1ES14.6)
98762 FORMAT(' X2-X3 = ',1ES14.6,' Y2-Y3 = ',1ES14.6)
98763 FORMAT(' X3-X4 = ',1ES14.6,' Y3-Y4 = ',1ES14.6)
98764 FORMAT(' X4-X1 = ',1ES14.6,' Y4-Y1 = ',1ES14.6)
! **********************************************************************************************************************************
END SUBROUTINE QDEL1
|
-- | This module provides the core functionality of the ansigraph package:
-- terminal-based graphing for vectors and matrices of real and complex numbers.
--
-- This is implemented via a 'Graphable' type class.
--
-- __Ansigraph is intended to be used in on of two ways:__
--
-- * __By importing "System.Console.Ansigraph"__.
-- This provides all the functionality we typically use, including the FlexibleInstances
-- extension which makes it easier to use graphing functions by allowing instances like
-- 'Graphable [Double]'.
--
--
-- * __By directly importing "System.Console.Ansigraph.Core"__, which does not activate
-- FlexibleInstances but includes everything else provided by the other module. This just means
-- you must use one of a few newtype wrappers, namely: 'Graph', 'PosGraph', 'CGraph',
-- 'Mat', 'CMat'. They are also available from the standard module.
module System.Console.Ansigraph.Core (
-- * Core Functionality
-- ** The Graphable class
Graphable (..)
, graph
, animateWith
, animate
, transientAnim
, transientAnimWith
-- *** Graphing options
, GraphSettings (..)
-- **** Default options
, graphDefaults
, blue, pink, white, red, green
, noColoring
-- *** ANSI data
-- **** Basic types from ANSI package
, Color (..)
, ColorIntensity (..)
-- **** Custom composite data types
, AnsiColor (..)
, Coloring (..)
-- *** ANSI helpers
, mkColoring
, fromFG
, fromBG
, realColors
, imagColors
, colorSets
, invert
, interpAnsiColor
, setColor
, clear
, clearLn
, applyColoring
, colorStr
, colorStrLn
, boldStr
, boldStrLn
-- * Graphable wrapper types
, Graph (..)
, CGraph (..)
, PosGraph (..)
, Mat (..)
, CMat (..)
-- * Graphing
-- *** Horizontal vector graphing (IO actions)
, displayPV
, displayRV
, displayCV
-- *** Horizontal rendering logic (producing strings)
, renderPV
, renderRV
, renderCV
-- *** Matrix graphing
, displayMat
, displayCMat
, matShow
-- *** Simple (non-ANSI) graphing for strictly-positive data
, posGraph
, posAnim
-- *** For clearing
, clearBack
) where
import System.Console.Ansigraph.Internal.Core
import System.Console.Ansigraph.Internal.Horizontal
import System.Console.Ansigraph.Internal.Matrix
import System.Console.ANSI
import Control.Concurrent (threadDelay)
import Control.Monad (replicateM_)
import Data.Complex (Complex)
import Control.Monad.IO.Class (MonadIO, liftIO)
-- | Things that ansigraph knows how to render at the terminal are instances of this class.
--
-- In general, when ANSI codes are involved, a 'graphWith' method should fush stdout when
-- finished, and whenever codes are invoked to i.e. change terminal colors. This is easily
-- handled by defining it in terms of 'colorStr' and 'colorStrLn'.
--
-- The 'graphHeight' function specifies how many vertical lines a graph occupies and is
-- needed for animations to work properly
class Graphable a where
-- | Render a graph to standard output.
graphWith :: MonadIO m => GraphSettings -> a -> m ()
-- | The number of vertical lines a graph occupies.
graphHeight :: a -> Int
-- | Invokes the 'Graphable' type class method 'graphWith' with the
-- default 'GraphSettings' record, 'graphDefaults'.
graph :: MonadIO m => Graphable a => a -> m ()
graph = graphWith graphDefaults
---- IO / ANSI helpers ----
-- | Clear the last @n@ lines of terminal text. Used to make graph animations. Rexported as
-- a handy convenience for other uses.
clearBack :: MonadIO m => Int -> m ()
clearBack n = do
putStr' "\r" -- return cursor to horizontal position 0
replicateM_ n (liftIO $ cursorUpLine 1 >> clearLine)
-- | For some number of frames per second, return the corresponding time delta in microseconds.
deltaFromFPS :: Int -> Int
deltaFromFPS fps = 1000000 `div` fps
---- Animation ----
clearGraph :: MonadIO m => Graphable a => a -> m ()
clearGraph = clearBack . graphHeight
animationFrame :: MonadIO m => Graphable a => GraphSettings -> a -> m ()
animationFrame s x = do
graphWith s x
liftIO . threadDelay . deltaFromFPS . framerate $ s
clearGraph x
-- | Any list of a 'Graphable' type can be made into an animation, by
-- 'graph'ing each element with a time delay and screen-clear after each.
-- 'GraphSettings' are used to determine the time delta and any coloring/scaling options.
animateWith :: MonadIO m => Graphable a => GraphSettings -> [a] -> m ()
animateWith _ [] = return ()
animateWith s [x] = graphWith s x
animateWith s (x:y:zs) = animationFrame s x >> animateWith s (y:zs)
-- | Perform 'animateWith' using default options. Equivalent to 'graph'ing each member
-- of the supplied list with a short delay and screen-clear after each.
animate :: MonadIO m => Graphable a => [a] -> m ()
animate = animateWith graphDefaults
-- | Like 'animateWith', only it does not leave the final frame of the animation visible.
transientAnimWith :: MonadIO m => Graphable a => GraphSettings -> [a] -> m ()
transientAnimWith = mapM_ . animationFrame
-- | Like 'animate', only it does not leave the final frame of the animation visible.
transientAnim :: (MonadIO m, Graphable a) => [a] -> m ()
transientAnim = transientAnimWith graphDefaults
---- Wrappers to avoid needing FlexibleInstances ----
-- | Wrapper type for graph of a real vector/function.
newtype Graph = Graph { unGraph :: [Double] }
-- | Wrapper type for graph of a complex vector/function.
newtype CGraph = CGraph { unCGraph :: [Complex Double] }
-- | Wrapper type for graph of a non-negative real vector/function.
newtype PosGraph = PosGraph { unPosGraph :: [Double] }
-- | Wrapper type for graph of a real two-index vector/two-argument function.
newtype Mat = Mat { unMat :: [[Double]] }
-- | Wrapper type for graph of a complex two-index vector/two-argument function.
newtype CMat = CMat { unCMat :: [[Complex Double]] }
instance Graphable Graph where
graphWith s = displayRV s . unGraph
graphHeight _ = 2
instance Graphable CGraph where
graphWith s = displayCV s . unCGraph
graphHeight _ = 4
instance Graphable PosGraph where
graphWith s = displayPV s . unPosGraph
graphHeight _ = 1
instance Graphable Mat where
graphWith s = displayMat s . unMat
graphHeight = length . unMat
instance Graphable CMat where
graphWith s = displayCMat s . unCMat
graphHeight = length . unCMat
---- helpers for graphing/animating strictly-positive real functions ----
-- | Display a graph of the supplied (non-negative) real vector.
posGraph :: MonadIO m => [Double] -> m ()
posGraph = graph . PosGraph
-- | Display an animation of the supplied list of (non-negative) real vectors.
posAnim :: MonadIO m => [[Double]] -> m ()
posAnim = animate . map PosGraph
|
(* Title: HOL/Auth/n_mutualExFsm.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_mutualExFsm Protocol Case Study*}
theory n_mutualExFsm imports n_mutualExFsm_lemma_invs_on_rules n_mutualExFsm_on_inis
begin
lemma main:
assumes a1: "s \<in> reachableSet {andList (allInitSpecs N)} (rules N)"
and a2: "0 < N"
shows "\<forall> f. f \<in> (invariants N) --> formEval f s"
proof (rule consistentLemma)
show "consistent (invariants N) {andList (allInitSpecs N)} (rules N)"
proof (cut_tac a1, unfold consistent_def, rule conjI)
show "\<forall> f ini s. f \<in> (invariants N) --> ini \<in> {andList (allInitSpecs N)} --> formEval ini s --> formEval f s"
proof ((rule allI)+, (rule impI)+)
fix f ini s
assume b1: "f \<in> (invariants N)" and b2: "ini \<in> {andList (allInitSpecs N)}" and b3: "formEval ini s"
have b4: "formEval (andList (allInitSpecs N)) s"
apply (cut_tac b2 b3, simp) done
show "formEval f s"
apply (rule on_inis, cut_tac b1, assumption, cut_tac b2, assumption, cut_tac b3, assumption) done
qed
next show "\<forall> f r s. f \<in> invariants N --> r \<in> rules N --> invHoldForRule s f r (invariants N)"
proof ((rule allI)+, (rule impI)+)
fix f r s
assume b1: "f \<in> invariants N" and b2: "r \<in> rules N"
show "invHoldForRule s f r (invariants N)"
apply (rule invs_on_rules, cut_tac b1, assumption, cut_tac b2, assumption) done
qed
qed
next show "s \<in> reachableSet {andList (allInitSpecs N)} (rules N)"
apply (metis a1) done
qed
end
|
[STATEMENT]
lemma wellformed_transaction_send_receive_subst_trm_cases:
assumes T: "wellformed_transaction T"
shows "t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_receive T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta> \<Longrightarrow> \<exists>ts. t \<in> set ts \<and> receive\<langle>ts\<rangle> \<in> set (unlabel (transaction_receive T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))"
and "t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_send T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta> \<Longrightarrow> \<exists>ts. t \<in> set ts \<and> send\<langle>ts\<rangle> \<in> set (unlabel (transaction_send T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_receive T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta> \<Longrightarrow> \<exists>ts. t \<in> set ts \<and> receive\<langle>ts\<rangle> \<in> set (unlabel (transaction_receive T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))) &&& (t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_send T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta> \<Longrightarrow> \<exists>ts. t \<in> set ts \<and> send\<langle>ts\<rangle> \<in> set (unlabel (transaction_send T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>)))
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_receive T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta> \<Longrightarrow> \<exists>ts. t \<in> set ts \<and> receive\<langle>ts\<rangle> \<in> set (unlabel (transaction_receive T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))
2. t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_send T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta> \<Longrightarrow> \<exists>ts. t \<in> set ts \<and> send\<langle>ts\<rangle> \<in> set (unlabel (transaction_send T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))
[PROOF STEP]
assume "t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_receive T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta>"
[PROOF STATE]
proof (state)
this:
t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_receive T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta>
goal (2 subgoals):
1. t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_receive T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta> \<Longrightarrow> \<exists>ts. t \<in> set ts \<and> receive\<langle>ts\<rangle> \<in> set (unlabel (transaction_receive T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))
2. t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_send T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta> \<Longrightarrow> \<exists>ts. t \<in> set ts \<and> send\<langle>ts\<rangle> \<in> set (unlabel (transaction_send T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_receive T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta>
[PROOF STEP]
obtain s where s: "s \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_receive T)" "t = s \<cdot> \<theta>"
[PROOF STATE]
proof (prove)
using this:
t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_receive T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta>
goal (1 subgoal):
1. (\<And>s. \<lbrakk>s \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_receive T); t = s \<cdot> \<theta>\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
s \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_receive T)
t = s \<cdot> \<theta>
goal (2 subgoals):
1. t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_receive T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta> \<Longrightarrow> \<exists>ts. t \<in> set ts \<and> receive\<langle>ts\<rangle> \<in> set (unlabel (transaction_receive T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))
2. t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_send T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta> \<Longrightarrow> \<exists>ts. t \<in> set ts \<and> send\<langle>ts\<rangle> \<in> set (unlabel (transaction_send T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))
[PROOF STEP]
hence "\<exists>ts. s \<in> set ts \<and> receive\<langle>ts\<rangle> \<in> set (unlabel (transaction_receive T))"
[PROOF STATE]
proof (prove)
using this:
s \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_receive T)
t = s \<cdot> \<theta>
goal (1 subgoal):
1. \<exists>ts. s \<in> set ts \<and> receive\<langle>ts\<rangle> \<in> set (unlabel (transaction_receive T))
[PROOF STEP]
using wellformed_transaction_send_receive_trm_cases(1)[OF T]
[PROOF STATE]
proof (prove)
using this:
s \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_receive T)
t = s \<cdot> \<theta>
?t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_receive T) \<Longrightarrow> \<exists>ts. ?t \<in> set ts \<and> receive\<langle>ts\<rangle> \<in> set (unlabel (transaction_receive T))
goal (1 subgoal):
1. \<exists>ts. s \<in> set ts \<and> receive\<langle>ts\<rangle> \<in> set (unlabel (transaction_receive T))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<exists>ts. s \<in> set ts \<and> receive\<langle>ts\<rangle> \<in> set (unlabel (transaction_receive T))
goal (2 subgoals):
1. t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_receive T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta> \<Longrightarrow> \<exists>ts. t \<in> set ts \<and> receive\<langle>ts\<rangle> \<in> set (unlabel (transaction_receive T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))
2. t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_send T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta> \<Longrightarrow> \<exists>ts. t \<in> set ts \<and> send\<langle>ts\<rangle> \<in> set (unlabel (transaction_send T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))
[PROOF STEP]
thus "\<exists>ts. t \<in> set ts \<and> receive\<langle>ts\<rangle> \<in> set (unlabel (transaction_receive T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))"
[PROOF STATE]
proof (prove)
using this:
\<exists>ts. s \<in> set ts \<and> receive\<langle>ts\<rangle> \<in> set (unlabel (transaction_receive T))
goal (1 subgoal):
1. \<exists>ts. t \<in> set ts \<and> receive\<langle>ts\<rangle> \<in> set (unlabel (transaction_receive T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))
[PROOF STEP]
using s(2) unlabel_subst[of _ \<theta>] stateful_strand_step_subst_inI(2)
[PROOF STATE]
proof (prove)
using this:
\<exists>ts. s \<in> set ts \<and> receive\<langle>ts\<rangle> \<in> set (unlabel (transaction_receive T))
t = s \<cdot> \<theta>
unlabel ?S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta> = unlabel (?S \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>)
receive\<langle>?ts\<rangle> \<in> set ?A \<Longrightarrow> receive\<langle>?ts \<cdot>\<^sub>l\<^sub>i\<^sub>s\<^sub>t ?\<theta>\<rangle> \<in> set (?A \<cdot>\<^sub>s\<^sub>s\<^sub>t ?\<theta>)
goal (1 subgoal):
1. \<exists>ts. t \<in> set ts \<and> receive\<langle>ts\<rangle> \<in> set (unlabel (transaction_receive T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))
[PROOF STEP]
by (smt image_eqI list.set_map)
[PROOF STATE]
proof (state)
this:
\<exists>ts. t \<in> set ts \<and> receive\<langle>ts\<rangle> \<in> set (unlabel (transaction_receive T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))
goal (1 subgoal):
1. t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_send T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta> \<Longrightarrow> \<exists>ts. t \<in> set ts \<and> send\<langle>ts\<rangle> \<in> set (unlabel (transaction_send T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_send T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta> \<Longrightarrow> \<exists>ts. t \<in> set ts \<and> send\<langle>ts\<rangle> \<in> set (unlabel (transaction_send T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))
[PROOF STEP]
assume "t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_send T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta>"
[PROOF STATE]
proof (state)
this:
t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_send T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta>
goal (1 subgoal):
1. t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_send T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta> \<Longrightarrow> \<exists>ts. t \<in> set ts \<and> send\<langle>ts\<rangle> \<in> set (unlabel (transaction_send T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_send T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta>
[PROOF STEP]
obtain s where s: "s \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_send T)" "t = s \<cdot> \<theta>"
[PROOF STATE]
proof (prove)
using this:
t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_send T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta>
goal (1 subgoal):
1. (\<And>s. \<lbrakk>s \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_send T); t = s \<cdot> \<theta>\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
s \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_send T)
t = s \<cdot> \<theta>
goal (1 subgoal):
1. t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_send T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta> \<Longrightarrow> \<exists>ts. t \<in> set ts \<and> send\<langle>ts\<rangle> \<in> set (unlabel (transaction_send T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))
[PROOF STEP]
hence "\<exists>ts. s \<in> set ts \<and> send\<langle>ts\<rangle> \<in> set (unlabel (transaction_send T))"
[PROOF STATE]
proof (prove)
using this:
s \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_send T)
t = s \<cdot> \<theta>
goal (1 subgoal):
1. \<exists>ts. s \<in> set ts \<and> send\<langle>ts\<rangle> \<in> set (unlabel (transaction_send T))
[PROOF STEP]
using wellformed_transaction_send_receive_trm_cases(2)[OF T]
[PROOF STATE]
proof (prove)
using this:
s \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_send T)
t = s \<cdot> \<theta>
?t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_send T) \<Longrightarrow> \<exists>ts. ?t \<in> set ts \<and> send\<langle>ts\<rangle> \<in> set (unlabel (transaction_send T))
goal (1 subgoal):
1. \<exists>ts. s \<in> set ts \<and> send\<langle>ts\<rangle> \<in> set (unlabel (transaction_send T))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<exists>ts. s \<in> set ts \<and> send\<langle>ts\<rangle> \<in> set (unlabel (transaction_send T))
goal (1 subgoal):
1. t \<in> trms\<^sub>l\<^sub>s\<^sub>s\<^sub>t (transaction_send T) \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta> \<Longrightarrow> \<exists>ts. t \<in> set ts \<and> send\<langle>ts\<rangle> \<in> set (unlabel (transaction_send T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))
[PROOF STEP]
thus "\<exists>ts. t \<in> set ts \<and> send\<langle>ts\<rangle> \<in> set (unlabel (transaction_send T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))"
[PROOF STATE]
proof (prove)
using this:
\<exists>ts. s \<in> set ts \<and> send\<langle>ts\<rangle> \<in> set (unlabel (transaction_send T))
goal (1 subgoal):
1. \<exists>ts. t \<in> set ts \<and> send\<langle>ts\<rangle> \<in> set (unlabel (transaction_send T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))
[PROOF STEP]
using s(2) unlabel_subst[of _ \<theta>] stateful_strand_step_subst_inI(1)
[PROOF STATE]
proof (prove)
using this:
\<exists>ts. s \<in> set ts \<and> send\<langle>ts\<rangle> \<in> set (unlabel (transaction_send T))
t = s \<cdot> \<theta>
unlabel ?S \<cdot>\<^sub>s\<^sub>s\<^sub>t \<theta> = unlabel (?S \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>)
send\<langle>?ts\<rangle> \<in> set ?A \<Longrightarrow> send\<langle>?ts \<cdot>\<^sub>l\<^sub>i\<^sub>s\<^sub>t ?\<theta>\<rangle> \<in> set (?A \<cdot>\<^sub>s\<^sub>s\<^sub>t ?\<theta>)
goal (1 subgoal):
1. \<exists>ts. t \<in> set ts \<and> send\<langle>ts\<rangle> \<in> set (unlabel (transaction_send T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))
[PROOF STEP]
by (smt image_eqI list.set_map)
[PROOF STATE]
proof (state)
this:
\<exists>ts. t \<in> set ts \<and> send\<langle>ts\<rangle> \<in> set (unlabel (transaction_send T \<cdot>\<^sub>l\<^sub>s\<^sub>s\<^sub>t \<theta>))
goal:
No subgoals!
[PROOF STEP]
qed |
The text editing environment in Emacs uses a double space following a period to identify the end of sentences unambiguously ; the double space convention prevents confusion with periods within sentences that signify abbreviations . How Emacs recognizes the end of a sentence is controlled by the settings sentence @-@ end @-@ double @-@ space and sentence @-@ end . The vi editor also follows this convention ; thus , it is relatively easy to manipulate ( jump over , copy , delete ) whole sentences in both <unk> and vi .
|
```python
# This code block is for automatic testing purposes, please ignore.
try:
import openfermionprojectq
except:
import os
os.chdir('..')
```
## Simulating a variational quantum eigensolver using OpenFermion-ProjectQ
We now demonstrate how one can use both openfermion and ProjectQ to run a simple VQE example using a Unitary Coupled Cluster ansatz. It demonstrates a simple way to evaluate the energy, optimize the energy with respect to the ansatz and build the corresponding compiled quantum circuit. It utilizes OpenFermion to prepare the Hamiltonians as well as initial parameters and ProjectQ to build and simulate the circuit.
```python
import os
from numpy import array, concatenate, zeros
from numpy.random import randn
from scipy.optimize import minimize
from openfermion.config import *
from openfermionprojectq import *
from openfermion.hamiltonians import MolecularData
from openfermion.transforms import jordan_wigner
from openfermion.utils import uccsd_singlet_paramsize
from projectq.ops import X, All, Measure
from projectq.backends import CommandPrinter, CircuitDrawer
```
Here we load $\textrm{H}_2$ from a precomputed molecule file found in the test data directory, and initialize the ProjectQ circuit compiler to a standard setting that uses a first-order Trotter decomposition to break up the exponentials of non-commuting operators.
```python
# Load the molecule.
filename = os.path.join(DATA_DIRECTORY, 'H2_sto-3g_singlet_0.7414')
molecule = MolecularData(filename=filename)
# Use a Jordan-Wigner encoding, and compress to remove 0 imaginary components
qubit_hamiltonian = jordan_wigner(molecule.get_molecular_hamiltonian())
qubit_hamiltonian.compress()
compiler_engine = uccsd_trotter_engine()
```
The Variational Quantum Eigensolver (or VQE), works by parameterizing a wavefunction $| \Psi(\theta) \rangle$ through some quantum circuit, and minimzing the energy with respect to that angle, which is defined by
\begin{align}
E(\theta) = \langle \Psi(\theta)| H | \Psi(\theta) \rangle
\end{align}
To perform the VQE loop with a simple molecule, it helps to wrap the evaluation of the energy into a simple objective function that takes the parameters of the circuit and returns the energy. Here we define that function using ProjectQ to handle the qubits and the simulation.
```python
def energy_objective(packed_amplitudes):
"""Evaluate the energy of a UCCSD singlet wavefunction with packed_amplitudes
Args:
packed_amplitudes(ndarray): Compact array that stores the unique
amplitudes for a UCCSD singlet wavefunction.
Returns:
energy(float): Energy corresponding to the given amplitudes
"""
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
# Set Jordan-Wigner initial state with correct number of electrons
wavefunction = compiler_engine.allocate_qureg(molecule.n_qubits)
for i in range(molecule.n_electrons):
X | wavefunction[i]
# Build the circuit and act it on the wavefunction
evolution_operator = uccsd_singlet_evolution(packed_amplitudes,
molecule.n_qubits,
molecule.n_electrons)
evolution_operator | wavefunction
compiler_engine.flush()
# Evaluate the energy and reset wavefunction
energy = compiler_engine.backend.get_expectation_value(qubit_hamiltonian, wavefunction)
All(Measure) | wavefunction
compiler_engine.flush()
return energy
```
While we could plug this objective function into any optimizer, SciPy offers a convenient framework within the Python ecosystem. We'll choose as starting amplitudes the classical CCSD values that can be loaded from the molecule if desired. The optimal energy is found and compared to the exact values to verify that our simulation was successful.
```python
n_amplitudes = uccsd_singlet_paramsize(molecule.n_qubits, molecule.n_electrons)
initial_amplitudes = [0, 0.05677]
initial_energy = energy_objective(initial_amplitudes)
# Run VQE Optimization to find new CCSD parameters
opt_result = minimize(energy_objective, initial_amplitudes,
method="CG", options={'disp':True})
opt_energy, opt_amplitudes = opt_result.fun, opt_result.x
print("\nOptimal UCCSD Singlet Energy: {}".format(opt_energy))
print("Optimal UCCSD Singlet Amplitudes: {}".format(opt_amplitudes))
print("Classical CCSD Energy: {} Hartrees".format(molecule.ccsd_energy))
print("Exact FCI Energy: {} Hartrees".format(molecule.fci_energy))
print("Initial Energy of UCCSD with CCSD amplitudes: {} Hartrees".format(initial_energy))
```
Optimization terminated successfully.
Current function value: -1.137270
Iterations: 1
Function evaluations: 12
Gradient evaluations: 3
Optimal UCCSD Singlet Energy: -1.13727017463
Optimal UCCSD Singlet Amplitudes: [ -1.03662149e-08 5.65340580e-02]
Classical CCSD Energy: -1.13727017465 Hartrees
Exact FCI Energy: -1.13727017463 Hartrees
Initial Energy of UCCSD with CCSD amplitudes: -1.13726981456 Hartrees
As we can see, the optimization terminates extremely quickly because the classical coupled cluster amplitudes were (for this molecule) already optimal. We can now use ProjectQ to compile this simulation circuit to a set of two-body quanutm gates.
```python
compiler_engine = uccsd_trotter_engine(CommandPrinter())
wavefunction = compiler_engine.allocate_qureg(molecule.n_qubits)
for i in range(molecule.n_electrons):
X | wavefunction[i]
# Build the circuit and act it on the wavefunction
evolution_operator = uccsd_singlet_evolution(opt_amplitudes,
molecule.n_qubits,
molecule.n_electrons)
evolution_operator | wavefunction
compiler_engine.flush()
```
Allocate | Qureg[0]
Allocate | Qureg[1]
Allocate | Qureg[2]
Allocate | Qureg[3]
X | Qureg[0]
X | Qureg[1]
Rx(1.57079632679) | Qureg[1]
H | Qureg[3]
CX | ( Qureg[1], Qureg[2] )
CX | ( Qureg[2], Qureg[3] )
Rz(12.566370604) | Qureg[3]
CX | ( Qureg[2], Qureg[3] )
CX | ( Qureg[1], Qureg[2] )
H | Qureg[3]
Rx(10.9955742876) | Qureg[1]
Rx(1.57079632679) | Qureg[0]
H | Qureg[1]
Rx(1.57079632679) | Qureg[2]
Rx(1.57079632679) | Qureg[3]
CX | ( Qureg[0], Qureg[1] )
CX | ( Qureg[1], Qureg[2] )
CX | ( Qureg[2], Qureg[3] )
Rz(12.5381035853) | Qureg[3]
CX | ( Qureg[2], Qureg[3] )
CX | ( Qureg[1], Qureg[2] )
CX | ( Qureg[0], Qureg[1] )
Rx(10.9955742876) | Qureg[3]
Rx(10.9955742876) | Qureg[2]
H | Qureg[1]
Rx(10.9955742876) | Qureg[0]
H | Qureg[0]
H | Qureg[1]
Rx(1.57079632679) | Qureg[2]
H | Qureg[3]
CX | ( Qureg[0], Qureg[1] )
CX | ( Qureg[1], Qureg[2] )
CX | ( Qureg[2], Qureg[3] )
Rz(12.5381035853) | Qureg[3]
CX | ( Qureg[2], Qureg[3] )
CX | ( Qureg[1], Qureg[2] )
CX | ( Qureg[0], Qureg[1] )
H | Qureg[3]
Rx(10.9955742876) | Qureg[2]
H | Qureg[1]
H | Qureg[0]
H | Qureg[1]
Rx(1.57079632679) | Qureg[3]
CX | ( Qureg[1], Qureg[2] )
CX | ( Qureg[2], Qureg[3] )
Rz(1.03662148687e-08) | Qureg[3]
CX | ( Qureg[2], Qureg[3] )
CX | ( Qureg[1], Qureg[2] )
Rx(10.9955742876) | Qureg[3]
H | Qureg[1]
H | Qureg[0]
Rx(1.57079632679) | Qureg[1]
H | Qureg[2]
H | Qureg[3]
CX | ( Qureg[0], Qureg[1] )
CX | ( Qureg[1], Qureg[2] )
CX | ( Qureg[2], Qureg[3] )
Rz(0.0282670290194) | Qureg[3]
CX | ( Qureg[2], Qureg[3] )
CX | ( Qureg[1], Qureg[2] )
CX | ( Qureg[0], Qureg[1] )
H | Qureg[3]
H | Qureg[2]
Rx(10.9955742876) | Qureg[1]
H | Qureg[0]
H | Qureg[0]
H | Qureg[1]
H | Qureg[2]
Rx(1.57079632679) | Qureg[3]
CX | ( Qureg[0], Qureg[1] )
CX | ( Qureg[1], Qureg[2] )
CX | ( Qureg[2], Qureg[3] )
Rz(12.5381035853) | Qureg[3]
CX | ( Qureg[2], Qureg[3] )
CX | ( Qureg[1], Qureg[2] )
CX | ( Qureg[0], Qureg[1] )
Rx(10.9955742876) | Qureg[3]
H | Qureg[2]
H | Qureg[1]
H | Qureg[0]
Rx(1.57079632679) | Qureg[0]
H | Qureg[1]
H | Qureg[2]
H | Qureg[3]
CX | ( Qureg[0], Qureg[1] )
CX | ( Qureg[1], Qureg[2] )
CX | ( Qureg[2], Qureg[3] )
Rz(0.0282670290194) | Qureg[3]
CX | ( Qureg[2], Qureg[3] )
CX | ( Qureg[1], Qureg[2] )
CX | ( Qureg[0], Qureg[1] )
H | Qureg[3]
H | Qureg[2]
H | Qureg[1]
Rx(10.9955742876) | Qureg[0]
Rx(1.57079632679) | Qureg[0]
H | Qureg[2]
CX | ( Qureg[0], Qureg[1] )
CX | ( Qureg[1], Qureg[2] )
Rz(12.566370604) | Qureg[2]
CX | ( Qureg[1], Qureg[2] )
CX | ( Qureg[0], Qureg[1] )
H | Qureg[2]
Rx(10.9955742876) | Qureg[0]
H | Qureg[0]
Rx(1.57079632679) | Qureg[2]
CX | ( Qureg[0], Qureg[1] )
CX | ( Qureg[1], Qureg[2] )
Rz(1.03662148687e-08) | Qureg[2]
CX | ( Qureg[1], Qureg[2] )
CX | ( Qureg[0], Qureg[1] )
Rx(10.9955742876) | Qureg[2]
H | Qureg[0]
H | Qureg[0]
Rx(1.57079632679) | Qureg[1]
Rx(1.57079632679) | Qureg[2]
Rx(1.57079632679) | Qureg[3]
CX | ( Qureg[0], Qureg[1] )
CX | ( Qureg[1], Qureg[2] )
CX | ( Qureg[2], Qureg[3] )
Rz(12.5381035853) | Qureg[3]
CX | ( Qureg[2], Qureg[3] )
CX | ( Qureg[1], Qureg[2] )
CX | ( Qureg[0], Qureg[1] )
Rx(10.9955742876) | Qureg[3]
Rx(10.9955742876) | Qureg[2]
Rx(10.9955742876) | Qureg[1]
H | Qureg[0]
Rx(1.57079632679) | Qureg[0]
Rx(1.57079632679) | Qureg[1]
Rx(1.57079632679) | Qureg[2]
H | Qureg[3]
CX | ( Qureg[0], Qureg[1] )
CX | ( Qureg[1], Qureg[2] )
CX | ( Qureg[2], Qureg[3] )
Rz(0.0282670290194) | Qureg[3]
CX | ( Qureg[2], Qureg[3] )
CX | ( Qureg[1], Qureg[2] )
CX | ( Qureg[0], Qureg[1] )
H | Qureg[3]
Rx(10.9955742876) | Qureg[2]
Rx(10.9955742876) | Qureg[1]
Rx(10.9955742876) | Qureg[0]
Rx(1.57079632679) | Qureg[0]
Rx(1.57079632679) | Qureg[1]
H | Qureg[2]
Rx(1.57079632679) | Qureg[3]
CX | ( Qureg[0], Qureg[1] )
CX | ( Qureg[1], Qureg[2] )
CX | ( Qureg[2], Qureg[3] )
Rz(0.0282670290194) | Qureg[3]
CX | ( Qureg[2], Qureg[3] )
CX | ( Qureg[1], Qureg[2] )
CX | ( Qureg[0], Qureg[1] )
Rx(10.9955742876) | Qureg[3]
H | Qureg[2]
Rx(10.9955742876) | Qureg[1]
Rx(10.9955742876) | Qureg[0]
|
# -*- coding: utf-8 -*-
import os
import re
import sys
import json
import time
import click
import logging
import pypandoc
import pdftotext
import numpy as np
import pandas as pd
import subprocess
from pathlib import Path, PurePath
from dotenv import find_dotenv, load_dotenv
def get_measure_df(measure_files_list, logger):
"""
Combine all measure files into a single dataframe.
"""
for i, fp in enumerate(measure_files_list):
logger.info(f'Processing fp ({i+1}/{len(measure_files_list)}')
with open(fp, 'r') as f:
measure_data = json.load(f)
def deduplicate_measures_list(measures_list):
"""
Take a list of file paths to measures; remove duplicates (preferring newer).
"""
pass
def list_raw_measures(data_directory, logger) -> list:
"""
List all measure files in the data directory.
"""
measure_files = [os.path.join(data_directory, f) for f in os.listdir(data_directory)]
measure_files = [f for f in measure_files if f.endswith('.json')]
measure_files = [f for f in measure_files if '_measure_' in f]
return measure_files
@click.command()
@click.argument('input_dir',
type=click.Path(exists=True,
file_okay=False,
dir_okay=True,
readable=True,
resolve_path=True),)
@click.argument('output_dir',)
def main(input_dir, output_dir):
""" Runs data processing scripts to turn raw data from (../raw) into
a single interim dataset (../interim), for which later features
will be engineered.
"""
logger = logging.getLogger(__name__)
logger.info('making measures data set from raw data measures directory')
raw_measures_outpath = PurePath(project_dir).joinpath(output_dir).joinpath('raw_measures.pkl')
raw_measure_files = list_raw_measures(input_dir, logger)
raw_measures = get_measure_df(raw_measure_files, logger)
raw_measures.to_pickle(raw_measures_outpath)
if __name__ == '__main__':
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# setting the logs
os.makedirs(PurePath(project_dir).joinpath('logs'), exist_ok=True)
log_path = PurePath(project_dir).joinpath('logs/combine_measures.log')
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(filename=log_path, level=logging.INFO, format=log_fmt)
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
|
/-
Copyright (c) 2021 Ashvni Narayanan. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Ashvni Narayanan
-/
import norm_properties
import nat_properties
/-!
# Miscellaneous assisting lemmas
This file describes several miscellaneous lemmas that are written specifically
for proving the main results of this project. It includes specific properties of
`smul` which are used frequently.
-/
open dirichlet_character zmod
variables {p d m : nat} [fact (nat.prime p)] {R : Type*} [normed_comm_ring R]
(χ : dirichlet_character R (d * p^m))
-- replaced `neg_one_pow_eq_neg_one` with `neg_one_pow_prime_sub_two_eq_neg_one`
lemma neg_one_pow_prime_sub_two_eq_neg_one (hp : 2 < p) : (-1 : units R)^(p - 2) = -1 :=
begin
rw ←units.eq_iff,
simp only [units.coe_neg_one, units.coe_pow],
rw neg_one_pow_eq_pow_mod_two,
cases @nat.prime.eq_two_or_odd p (fact.out _),
{ exfalso, apply ne_of_gt hp h, },
{ rw [←nat.mod_eq_sub_mod (le_of_lt hp), h, pow_one], },
end
variable (p)
-- `two_mul_eq_inv_two_smul` replaced with `helper_6`
-- can be generalized to n : ℕ and z : ℤ, possibly more
lemma helper_6 [algebra ℚ_[p] R] {a b : R} (h : (2 : R) * a = b) : a = (2 : ℚ_[p])⁻¹ • b :=
begin
symmetry,
rw inv_smul_eq_iff₀ _,
{ rw [←h, ←nat.cast_two, ←map_nat_cast (algebra_map ℚ_[p] R) 2, ←smul_eq_mul, algebra_map_smul,
nat.cast_two], },
{ apply two_ne_zero', },
end
--variables (d R)
variables {p d R} (χ)
-- `exists_mul_mul_mul_lt` replaced with `helper_10`
lemma helper_10 [normed_algebra ℚ_[p] R] [norm_one_class R] {k : ℕ} {ε : ℝ} (hk : 1 < k)
(hp : 2 < p) (hε : 0 < ε) : ∃ x : ℕ,
∥(2⁻¹ : ℚ_[p])∥ * (↑k - 1 + ∥((d * p ^ x : ℕ) : R)^(k - 2) * (1 + 1)∥) *
(∥(((d * p ^ x) : ℕ) : R)∥ * (χ.mul (teichmuller_character_mod_p' p R ^ k)).bound) < ε :=
begin
have one_div_lt_one : 1 / (p : ℝ) < 1,
{ refine (one_div_lt _ _).2 _,
{ refine nat.cast_pos.2 (nat.prime.pos (fact.out _)), },
{ refine zero_lt_one, },
{ rw one_div_one, refine nat.one_lt_cast.2 (nat.prime.one_lt (fact.out _)), }, },
have pos' : 0 < ↑k * (χ.mul (teichmuller_character_mod_p' p R ^ k)).bound,
{ apply mul_pos (nat.cast_pos.2 (lt_trans zero_lt_one hk)) (dirichlet_character.bound_pos _), },
have pos : 0 < ε / (↑k * (χ.mul (teichmuller_character_mod_p' p R ^ k)).bound) := div_pos hε pos',
refine ⟨classical.some (exists_pow_lt_of_lt_one pos one_div_lt_one), lt_of_le_of_lt (mul_le_mul
(helper_9 hk hp _) le_rfl (mul_nonneg (norm_nonneg _)
(le_of_lt (dirichlet_character.bound_pos _))) (nat.cast_nonneg _)) _⟩,
rw mul_left_comm,
refine lt_of_le_of_lt (mul_le_mul (norm_mul_pow_le_one_div_pow p d R _) le_rfl
(le_of_lt pos') _) _,
{ rw ←one_div_pow,
refine pow_nonneg (div_nonneg zero_le_one (nat.cast_nonneg _)) _, },
{ rw ←one_div_pow,
have := classical.some_spec (exists_pow_lt_of_lt_one pos one_div_lt_one),
apply lt_of_lt_of_le (mul_lt_mul this le_rfl pos' (div_nonneg (le_of_lt hε) (le_of_lt pos'))) _,
rw [div_mul_eq_mul_div, mul_div_assoc, div_self (λ h, _), mul_one],
rw mul_eq_zero at h,
cases h,
{ rw (nat.cast_eq_zero.1 h) at hk,
simp only [not_lt_zero'] at hk,
apply hk, },
{ have := (χ.mul (teichmuller_character_mod_p' p R ^ k)).bound_pos,
rw h at this,
simp only [lt_self_iff_false] at this,
exact this, }, },
end
namespace filter
lemma tendsto_zero_of_tendsto_const_smul_zero [algebra ℚ_[p] R] {f : ℕ → R} {x : filter ℕ}
{c : ℚ_[p]} (hc : c ≠ 0) (hf : tendsto (λ y, c • f y) x (nhds 0)) :
tendsto (λ x, f x) x (nhds 0) :=
begin
rw ←smul_zero c⁻¹,
conv { congr, funext, rw [←one_smul _ (f x), ←inv_mul_cancel hc, ←smul_smul], },
{ apply tendsto.const_smul hf _, apply_instance, },
end
end filter
open_locale big_operators
lemma sum_odd_fn_eq_zero {G G' : Type*} {s : finset G} [has_involutive_neg G]
[non_assoc_ring G'] [no_zero_divisors G'] [char_zero G'] {f : G → G'}
(h1 : ∀ x ∈ s, -x ∈ s) (h2 : ∀ x ∈ s, f (-x) = - f x) : ∑ (x : G) in s, f x = 0 :=
begin
have h : ∑ (x : G) in s, f x = ∑ (x : G) in s, f (-x) :=
finset.sum_bij (λ a ha, -a) (λ a ha, h1 a ha) (λ a ha, by {rw neg_neg})
(λ a₁ a₂ ha₁ ha₂ h, neg_inj.1 h) (λ b hb, ⟨-b, h1 b hb, by {rw neg_neg}⟩),
conv_rhs at h { apply_congr, skip, rw h2 x H, },
rw [finset.sum_neg_distrib, eq_neg_self_iff] at h,
rw h,
end
lemma finset.neg_sum {α β : Type*} [ring β] (s : finset α) (f : α → β) :
∑ x in s, - (f x) = - ∑ x in s, f x :=
begin
conv_lhs { apply_congr, skip, rw neg_eq_neg_one_mul, },
rw ← finset.mul_sum, rw ← neg_eq_neg_one_mul,
end
lemma inv_smul_self [algebra ℚ R] {n : ℕ} (hn : n ≠ 0) :
(n : ℚ)⁻¹ • (n : R) = 1 :=
begin
rw ← one_mul (n : R), rw ← smul_mul_assoc, rw ← algebra.algebra_map_eq_smul_one,
have : (algebra_map ℚ R) (n : ℚ) = (n : R), simp only [map_nat_cast],
conv_lhs { congr, skip, rw ← this, }, rw ← (algebra_map ℚ R).map_mul, rw inv_mul_cancel _,
simp only [ring_hom.map_one],
{ norm_cast, apply hn, },
end
lemma int.exists_int_eq_fract_mul_self (a : ℕ) {b : ℕ} (hb : b ≠ 0) : ∃ z : ℤ, (z : ℚ) = int.fract (a / b : ℚ) * b :=
begin
obtain ⟨z, hz⟩ := int.fract_mul_nat (a / b : ℚ) b,
refine ⟨z, _⟩,
have : (b : ℚ) ≠ 0,
{ norm_cast, apply hb, },
simp_rw [div_mul_cancel (a : ℚ) this] at hz,
rw ← hz,
rw sub_eq_self,
change int.fract ((a : ℤ) : ℚ) = 0, rw int.fract_coe,
end
variable (R)
lemma one_div_smul_self [algebra ℚ R] {n : ℕ} (hn : n ≠ 0) :
(1 / (n : ℚ)) • (n : R) = 1 :=
by { rw [← inv_eq_one_div, inv_smul_self hn], }
variables (p d)
lemma div_smul_eq_div_smul [algebra ℚ_[p] R] [algebra ℚ R] [is_scalar_tower ℚ ℚ_[p] R] (a : ℕ)
(x : R) : (1 / a : ℚ) • x = (1 / a : ℚ_[p]) • x :=
begin
rw ←is_scalar_tower.algebra_map_smul ℚ_[p] (1 / a : ℚ) x,
congr,
simp only [one_div],
rw [ring_hom.map_inv, map_nat_cast],
end
lemma helper_14 [algebra ℚ R] [algebra ℚ_[p] R] [is_scalar_tower ℚ ℚ_[p] R] (a : ℚ) (r : R) :
a • r = (algebra_map ℚ ℚ_[p]) a • r := by { simp }
-- generalize
lemma inv_smul_self' [algebra ℚ_[p] R] [algebra ℚ R] [is_scalar_tower ℚ ℚ_[p] R] {n : ℕ} (hn : n ≠ 0) :
(n : ℚ_[p])⁻¹ • (n : R) = 1 :=
begin
have : (n : ℚ_[p]) = (algebra_map ℚ ℚ_[p]) n, simp only [map_nat_cast],
rw this, rw ←ring_hom.map_inv,
rw ←helper_14, rw inv_smul_self, apply hn,
end
open filter
variables (p d R)
lemma nat_cast_mul_prime_pow_tendsto_zero [normed_algebra ℚ_[p] R] [norm_one_class R] :
tendsto (λ x : nat, ((d * p^x : nat) : R)) at_top (nhds 0) :=
begin
have : |(1 / p : ℝ)| < 1,
{ rw [←inv_eq_one_div, ←padic_norm_e.norm_p, abs_norm_eq_norm],
apply padic_norm_e.norm_p_lt_one, },
have f1 := tendsto_pow_const_mul_const_pow_of_abs_lt_one 0 this,
conv at f1 { congr, funext, rw [pow_zero, one_mul, ←inv_eq_one_div, ←zpow_coe_nat, inv_zpow,
←zpow_neg, ←padic_int.norm_p_pow], },
conv { congr, funext, rw nat.cast_mul, skip, skip, rw ←mul_zero (d : R), },
refine tendsto.const_mul _ (tendsto_zero_iff_norm_tendsto_zero.2 _),
convert f1,
ext,
rw [←nat.cast_pow, norm_coe_nat_eq_norm_ring_hom_map p R],
simp,
end |
function res = uminusFactorized( xi )
%UMINUSFACTORIZED Unary minus for a tangent tensor.
% RES = UMINUSFACTORIZED( XI ) performs unary minus operation
% on the tangent tensor XI given in factorized form
%
% See also addFactorized
%
% GeomCG Tensor Completion. Copyright 2013 by
% Michael Steinlechner
% Questions and contact: [email protected]
% BSD 2-clause license, see LICENSE.txt
res.Y_tilde = -xi.Y_tilde;
res.U1_tilde = -xi.U1_tilde;
res.U2_tilde = -xi.U2_tilde;
res.U3_tilde = -xi.U3_tilde;
end
|
@testset "Time grid generation" begin
@test isempty( MP.generateTimeGrid( mpSim, -12 ) )
@test MP.generateTimeGrid( mpSim, 12 ) == [0.0]
run( mpSim.sim )
@test MP.generateTimeGrid( mpSim, 12 ) == collect( 0.0:12.0:300.0 )
end # @testset "Time grid generation" |
[STATEMENT]
lemma pw_CHECK[simp, refine_pw_simps]:
"nofail (CHECK \<Phi> e)"
"inres (CHECK \<Phi> e) (Inl f) \<longleftrightarrow> \<not>\<Phi> \<and> f=e"
"inres (CHECK \<Phi> e) (Inr u) \<longleftrightarrow> \<Phi>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. nofail (CHECK \<Phi> e) &&& inres (CHECK \<Phi> e) (Inl f) = (\<not> \<Phi> \<and> f = e) &&& inres (CHECK \<Phi> e) (Inr u) = \<Phi>
[PROOF STEP]
unfolding enres_unfolds
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. nofail (if \<Phi> then RETURN (Inr ()) else RETURN (Inl e)) &&& inres (if \<Phi> then RETURN (Inr ()) else RETURN (Inl e)) (Inl f) = (\<not> \<Phi> \<and> f = e) &&& inres (if \<Phi> then RETURN (Inr ()) else RETURN (Inl e)) (Inr u) = \<Phi>
[PROOF STEP]
by (auto simp: refine_pw_simps) |
% Title:
% ARTICLE PLUS
% ----------------------
% Description:
% A template for scientific reports/articles.
% Most necessary packages are imported,
% so this should be a good starting point.
%
% Creator: Tommy O.
% -----------------------------------------------
% Package imports
% -----------------------------------------------
\documentclass[12pt, a4paper]{article} % 'twoside' for printing
\usepackage[utf8]{inputenc} % Allow input to be UTF-8
\usepackage[english]{babel} % Alternative: 'norsk' for norwegian
\usepackage{graphicx} % For importing graphics
\usepackage{amsthm, amsfonts, amssymb, amssymb} % All the AMS packages
\usepackage{mathtools} % Fixes a few AMS bugs
\usepackage{etoolbox} % Used to change mark at the end of thms
\usepackage[expansion=false]{microtype}% Fixes to make typography better
\usepackage[textsize=footnotesize]{todonotes} % Adds \todo{text} comments
\setlength{\marginparwidth}{2cm} % More space for the todo notes
\usepackage{hyperref} % For \href{URL}{text}
\usepackage{fancyhdr} % For fancy headers
\usepackage{cleveref} % Clever referencing using \cref
\usepackage[sharp]{easylist} % Easy nested lists using # (sharp)
\usepackage{parskip} % Web-like paragraphs, with spacing
\usepackage{multicol} % For multiple columns
\usepackage[linesnumbered,ruled]{algorithm2e} % For algorithms
\usepackage{tikz-cd} % For commutative diagrams
\usepackage{listings} % To include source-code
\usepackage{blindtext} % To generate lorem ipsum text
\usepackage[sc]{mathpazo} % A nice font, alternative to CM (default)
\usepackage[headings]{fullpage} % Make margins smaller
% \usepackage{geometry} % May be used to set margins, alternative to fullpage
% -----------------------------------------------
% Package setup
% -----------------------------------------------
% Add spacing to theorems and related environments
\newtheoremstyle{plainspaced}
{1em} % Space above
{1em} % Space below
{} % Body font
{} % Indent amount
{\bfseries} % Theorem head font
{.} % Punctuation after theorem head
{.5em} % Space after theorem head
{\thmname{#1}\thmnumber{ #2}\thmnote{ (#3)}} % Theorem head spec (can be left
%empty, meaning `normal')
% Theorems, definition and examples
\theoremstyle{plainspaced}
\newtheorem{theorem}{Theorem}
\newtheorem{definition}{Definition}
\AtEndEnvironment{definition}{\null\hfill $\lrcorner$}
\newtheorem{example}{Example}
\AtEndEnvironment{example}{\null\hfill $\lrcorner$}
% Setup for the fancyhdr package
\rhead{\thepage}
\lhead{\nouppercase{\leftmark}}
% Section numbers in equations
\numberwithin{equation}{section}
% -----------------------------------------------
% Misc settings
% -----------------------------------------------
% Spacing between easylist items
\newcommand{\listSpace}{-0.25em}
% Some math operators
\DeclareMathOperator{\R}{\mathbb{R}}
\DeclareMathOperator{\Z}{\mathbb{Z}}
% Make theorem and definition titles bold
\makeatletter
\def\th@plain{%
\thm@notefont{}% same as heading font
\itshape % body font
}
\def\th@definition{%
\thm@notefont{}% same as heading font
\normalfont % body font
}
\makeatother
% -----------------------------------------------
% Document variables
% -----------------------------------------------
\title{An introduction to Markov Chain Monte Carlo}
\author{Sean Meling Murray}
% -----------------------------------------------
% Document start
% -----------------------------------------------
\begin{document}
\maketitle
\pagestyle{fancy}
\begin{abstract}
\todo{Write this.}
\blindmathtrue
\blindtext[1]
\end{abstract}
\tableofcontents
% -----------------------------------------------
% Document content start
% -----------------------------------------------
\section{Introduction}
\blindmathtrue
\blindtext[1]
\begin{definition}[Group]
\label{def:group}
A group is a set $S$ along with an operation $\circ$
with four axioms: identity, associativity, closure and inverse.
\todo{Add citation.}
\end{definition}
There are two ways to cite the definition above.
Look at definition \ref{def:group}, or at \cref{def:group}.
\begin{theorem}[Pytagorean theorem]
\label{thm:pyta}
For a triangle with sides $a$, $b$ and $c$,
it is true that $a^2 + b^2 = c^2$.
\end{theorem}
\begin{proof}
Consider
\begin{equation}
\label{eqn:bayes_theorem}
f_Y(y) = \int_{-\infty}^\infty f_Y(y\mid X=\xi )\,f_X(\xi)\,d\xi .
\end{equation}
\end{proof}
Look closely at \cref{thm:pyta}, in other words
\cref{eqn:bayes_theorem}.
\blindmathtrue
\blindtext[1]
\section{Lists}
Here is a list:
\begin{easylist}[itemize]
\ListProperties(Space=\listSpace, Space*=\listSpace)
# But if the rebellion is to be successful.
# not to between two, it is necessary.
# think that there is no.
# When a rational conviction has.
\end{easylist}
Another list:
\begin{easylist}[enumerate]
\ListProperties(Space=\listSpace, Space*=\listSpace)
# think that there is no.
# When a rational conviction has.
# in oneself whatever.
\end{easylist}
\begin{equation}
T = \frac{1}{2}m v^2
\end{equation}
\section{Diagrams}
Consider the following diagram, which shows the relationship between grad, curl
and div
\begin{equation*}
\begin{tikzcd}
f \arrow{r}{\text{grad}} \arrow[swap, bend right=30]{rr}{0} & \vec{F}
\arrow[bend left=40]{rr}{0}
\arrow{r}{\text{curl}} & \vec{F}
\arrow{r}{\text{div}} & \mathbb{R} .
\end{tikzcd}
\end{equation*}
\section{The bibliography}
A .bib file will contain the bibliographic information of our document.
A .bib file will contain the bibliographic information of our document. I will only give a simple example, since there are many tools to generate the entries automatically.
\todo{Write more about this.}
A .bib file will contain the bibliographic information of our document. I will only give a simple example, since there are many tools to generate the entries automatically. I will not explain the structure of the file itself. Oh well.
% -----------------------------------------------
% ---- BIBLIOGRAPHY
% -----------------------------------------------
\bibliographystyle{apalike} % 'alpha' is also good
%\bibliography{bibliography} % Reference to 'bibliography.bib'
\end{document}
|
State Before: ⊢ StrictConcaveOn ℝ (Icc (-(π / 2)) (π / 2)) cos State After: x : ℝ
hx : x ∈ interior (Icc (-(π / 2)) (π / 2))
⊢ (deriv^[2]) cos x < 0 Tactic: apply strictConcaveOn_of_deriv2_neg (convex_Icc _ _) continuousOn_cos fun x hx => ?_ State Before: x : ℝ
hx : x ∈ interior (Icc (-(π / 2)) (π / 2))
⊢ (deriv^[2]) cos x < 0 State After: x : ℝ
hx : x ∈ Ioo (-(π / 2)) (π / 2)
⊢ (deriv^[2]) cos x < 0 Tactic: rw [interior_Icc] at hx State Before: x : ℝ
hx : x ∈ Ioo (-(π / 2)) (π / 2)
⊢ (deriv^[2]) cos x < 0 State After: no goals Tactic: simp [cos_pos_of_mem_Ioo hx] |
module materialTypes
use iso_c_binding
implicit none
private
public modelState
type, bind(c) :: modelState
real (C_DOUBLE) time
real (C_DOUBLE) dt
end type modelState
c type modelState
c real *8::time;
c real *8::dt;
c end type modelState
public matObject
type, bind(C) :: matObject
integer(C_INT) ::tag;
integer(C_INT) ::matType; ! GR added
integer(C_INT) ::nParam;
integer(C_INT) ::nState
type (c_ptr) :: theParam;
type (c_ptr) :: cState;
type (c_ptr) :: tState;
type(c_funptr) ::functPtr;
type (c_ptr) :: matObjPtr;
end type matObject;
c real (C_DOUBLE) :: theParam;
c real (C_DOUBLE) :: cState;
c real (C_DOUBLE) :: tState;
c public matObject
c type :: matObject
c integer::tag
c integer::nParam
c integer::nState
c real *8, pointer ::theParam(:)
c real *8, pointer ::cState(:)
c real *8, pointer ::tState(:)
c type(c_funptr)::functPtr
c end type matObject
end module
|
State Before: R : Type u_1
inst✝ : CommRing R
v : Fin 3 → R
⊢ ↑(↑crossProduct v) v = 0 State After: R : Type u_1
inst✝ : CommRing R
v : Fin 3 → R
⊢ v 1 * v 2 - v 1 * v 2 = 0 ∧ v 0 * v 2 - v 0 * v 2 = 0 ∧ v 0 * v 1 - v 0 * v 1 = 0 ∧ ![] = 0 Tactic: simp_rw [cross_apply, mul_comm, cons_eq_zero_iff] State Before: R : Type u_1
inst✝ : CommRing R
v : Fin 3 → R
⊢ v 1 * v 2 - v 1 * v 2 = 0 ∧ v 0 * v 2 - v 0 * v 2 = 0 ∧ v 0 * v 1 - v 0 * v 1 = 0 ∧ ![] = 0 State After: no goals Tactic: exact ⟨sub_self _, sub_self _, sub_self _, zero_empty.symm⟩ |
State Before: K : Type u
inst✝ : CommRing K
P : Sort v
n : K[X]
d : { x // x ∈ K[X]⁰ }
f : K[X] → K[X] → P
H : ∀ {p q p' q' : K[X]}, q ∈ K[X]⁰ → q' ∈ K[X]⁰ → q' * p = q * p' → f p q = f p' q'
⊢ RatFunc.liftOn { toFractionRing := Localization.mk n d } f H = f n ↑d State After: K : Type u
inst✝ : CommRing K
P : Sort v
n : K[X]
d : { x // x ∈ K[X]⁰ }
f : K[X] → K[X] → P
H : ∀ {p q p' q' : K[X]}, q ∈ K[X]⁰ → q' ∈ K[X]⁰ → q' * p = q * p' → f p q = f p' q'
⊢ Localization.liftOn { toFractionRing := Localization.mk n d }.toFractionRing (fun p q => f p ↑q)
(_ : ∀ {p p' : K[X]} {q q' : { x // x ∈ K[X]⁰ }}, ↑(Localization.r K[X]⁰) (p, q) (p', q') → f p ↑q = f p' ↑q') =
f n ↑d Tactic: rw [RatFunc.liftOn] State Before: K : Type u
inst✝ : CommRing K
P : Sort v
n : K[X]
d : { x // x ∈ K[X]⁰ }
f : K[X] → K[X] → P
H : ∀ {p q p' q' : K[X]}, q ∈ K[X]⁰ → q' ∈ K[X]⁰ → q' * p = q * p' → f p q = f p' q'
⊢ Localization.liftOn { toFractionRing := Localization.mk n d }.toFractionRing (fun p q => f p ↑q)
(_ : ∀ {p p' : K[X]} {q q' : { x // x ∈ K[X]⁰ }}, ↑(Localization.r K[X]⁰) (p, q) (p', q') → f p ↑q = f p' ↑q') =
f n ↑d State After: no goals Tactic: exact Localization.liftOn_mk _ _ _ _ |
* AB08ND EXAMPLE PROGRAM TEXT
* Copyright (c) 2002-2020 NICONET e.V.
*
* .. Parameters ..
DOUBLE PRECISION ZERO
PARAMETER ( ZERO = 0.0D0 )
INTEGER NIN, NOUT
PARAMETER ( NIN = 5, NOUT = 6 )
INTEGER NMAX, MMAX, PMAX
PARAMETER ( NMAX = 20, MMAX = 20, PMAX = 20 )
INTEGER MPMAX
PARAMETER ( MPMAX = MAX( MMAX, PMAX ) )
INTEGER LDA, LDB, LDC, LDD, LDAF, LDBF, LDQ, LDZ
PARAMETER ( LDA = NMAX, LDB = NMAX, LDC = PMAX,
$ LDD = PMAX, LDAF = NMAX+MPMAX,
$ LDBF = NMAX+PMAX, LDQ = 1, LDZ = 1 )
INTEGER LDWORK
PARAMETER ( LDWORK = MAX( MAX( MPMAX+1, NMAX ) +
$ MAX( 3*(MPMAX+1), NMAX+MPMAX ),
$ 8*NMAX ) )
* .. Local Scalars ..
DOUBLE PRECISION TOL
INTEGER DINFZ, I, INFO, J, M, N, NINFZ, NKROL, NKROR,
$ NU, P, RANK
CHARACTER*1 EQUIL
* .. Local Arrays ..
DOUBLE PRECISION A(LDA,NMAX), AF(LDAF,NMAX+PMAX), ALFI(NMAX),
$ ALFR(NMAX), B(LDB,MMAX), BETA(NMAX),
$ BF(LDBF,MMAX+NMAX), C(LDC,NMAX), D(LDD,MMAX),
$ DWORK(LDWORK), Q(LDQ,1), Z(LDZ,1)
INTEGER INFZ(NMAX), IWORK(MPMAX+1), KRONL(NMAX+1),
$ KRONR(NMAX+1)
* .. External Subroutines ..
EXTERNAL AB08ND, DGEGV
* .. Intrinsic Functions ..
INTRINSIC MAX
* .. Executable Statements ..
*
WRITE ( NOUT, FMT = 99999 )
* Skip the heading in the data file and read the data.
READ ( NIN, FMT = '()' )
READ ( NIN, FMT = * ) N, M, P, TOL, EQUIL
IF ( N.LT.0 .OR. N.GT.NMAX ) THEN
WRITE ( NOUT, FMT = 99972 ) N
ELSE
READ ( NIN, FMT = * ) ( ( A(I,J), J = 1,N ), I = 1,N )
IF ( M.LT.0 .OR. M.GT.MMAX ) THEN
WRITE ( NOUT, FMT = 99971 ) M
ELSE
READ ( NIN, FMT = * ) ( ( B(I,J), J = 1,M ), I = 1,N )
IF ( P.LT.0 .OR. P.GT.PMAX ) THEN
WRITE ( NOUT, FMT = 99970 ) P
ELSE
READ ( NIN, FMT = * ) ( ( C(I,J), J = 1,N ), I = 1,P )
READ ( NIN, FMT = * ) ( ( D(I,J), J = 1,M ), I = 1,P )
* Check the observability and compute the ordered set of
* the observability indices (call the routine with M = 0).
CALL AB08ND( EQUIL, N, 0, P, A, LDA, B, LDB, C, LDC, D,
$ LDD, NU, RANK, DINFZ, NKROR, NKROL, INFZ,
$ KRONR, KRONL, AF, LDAF, BF, LDBF, TOL,
$ IWORK, DWORK, LDWORK, INFO )
*
IF ( INFO.NE.0 ) THEN
WRITE ( NOUT, FMT = 99998 ) INFO
ELSE
WRITE ( NOUT, FMT = 99994 ) ( KRONL(I), I = 1,P )
IF ( NU.EQ.0 ) THEN
WRITE ( NOUT, FMT = 99993 )
ELSE
WRITE ( NOUT, FMT = 99992 ) N - NU
WRITE ( NOUT, FMT = 99991 )
WRITE ( NOUT, FMT = 99990 )
DO 20 I = 1, NU
WRITE ( NOUT, FMT = 99989 )
$ ( AF(I,J), J = 1,NU )
20 CONTINUE
END IF
END IF
* Check the controllability and compute the ordered set of
* the controllability indices (call the routine with P = 0)
CALL AB08ND( EQUIL, N, M, 0, A, LDA, B, LDB, C, LDC, D,
$ LDD, NU, RANK, DINFZ, NKROR, NKROL, INFZ,
$ KRONR, KRONL, AF, LDAF, BF, LDBF, TOL,
$ IWORK, DWORK, LDWORK, INFO )
*
IF ( INFO.NE.0 ) THEN
WRITE ( NOUT, FMT = 99998 ) INFO
ELSE
WRITE ( NOUT, FMT = 99988 ) ( KRONR(I), I = 1,M )
IF ( NU.EQ.0 ) THEN
WRITE ( NOUT, FMT = 99987 )
ELSE
WRITE ( NOUT, FMT = 99986 ) N - NU
WRITE ( NOUT, FMT = 99985 )
WRITE ( NOUT, FMT = 99990 )
DO 40 I = 1, NU
WRITE ( NOUT, FMT = 99989 )
$ ( AF(I,J), J = 1,NU )
40 CONTINUE
END IF
END IF
* Compute the structural invariants of the given system.
CALL AB08ND( EQUIL, N, M, P, A, LDA, B, LDB, C, LDC, D,
$ LDD, NU, RANK, DINFZ, NKROR, NKROL, INFZ,
$ KRONR, KRONL, AF, LDAF, BF, LDBF, TOL,
$ IWORK, DWORK, LDWORK, INFO )
*
IF ( INFO.NE.0 ) THEN
WRITE ( NOUT, FMT = 99998 ) INFO
ELSE
WRITE ( NOUT, FMT = 99984 ) NU
IF ( NU.GT.0 ) THEN
* Compute the invariant zeros of the given system.
* Workspace: need 8*NU.
WRITE ( NOUT, FMT = 99983 )
CALL DGEGV( 'No vectors', 'No vectors', NU, AF,
$ LDAF, BF, LDBF, ALFR, ALFI, BETA, Q,
$ LDQ, Z, LDZ, DWORK, LDWORK, INFO )
*
IF ( INFO.NE.0 ) THEN
WRITE ( NOUT, FMT = 99997 ) INFO
ELSE
WRITE ( NOUT, FMT = 99981 )
DO 60 I = 1, NU
IF ( ALFI(I).EQ.ZERO ) THEN
WRITE ( NOUT, FMT = 99980 )
$ ALFR(I)/BETA(I)
ELSE
WRITE ( NOUT, FMT = 99979 )
$ ALFR(I)/BETA(I),
$ ALFI(I)/BETA(I)
END IF
60 CONTINUE
WRITE ( NOUT, FMT = 99982 )
END IF
END IF
NINFZ = 0
DO 80 I = 1, DINFZ
IF ( INFZ(I).GT.0 ) THEN
NINFZ = NINFZ + INFZ(I)*I
END IF
80 CONTINUE
WRITE ( NOUT, FMT = 99978 ) NINFZ
IF ( NINFZ.GT.0 ) THEN
DO 100 I = 1, DINFZ
WRITE ( NOUT, FMT = 99977 ) INFZ(I), I
100 CONTINUE
END IF
WRITE ( NOUT, FMT = 99976 ) NKROR
IF ( NKROR.GT.0 ) WRITE ( NOUT, FMT = 99975 )
$ ( KRONR(I), I = 1,NKROR )
WRITE ( NOUT, FMT = 99974 ) NKROL
IF ( NKROL.GT.0 ) WRITE ( NOUT, FMT = 99973 )
$ ( KRONL(I), I = 1,NKROL )
END IF
END IF
END IF
END IF
*
STOP
*
99999 FORMAT (' AB08ND EXAMPLE PROGRAM RESULTS',/1X)
99998 FORMAT (' INFO on exit from AB08ND = ',I2)
99997 FORMAT (' INFO on exit from DGEGV = ',I2)
99994 FORMAT (' The left Kronecker indices of (A,C) are ',/(20(I3,2X)))
99993 FORMAT (/' The system (A,C) is completely observable ')
99992 FORMAT (/' The dimension of the observable subspace = ',I3)
99991 FORMAT (/' The output decoupling zeros are the eigenvalues of th',
$ 'e matrix AF. ')
99990 FORMAT (/' The matrix AF is ')
99989 FORMAT (20(1X,F8.4))
99988 FORMAT (//' The right Kronecker indices of (A,B) are ',/(20(I3,2X)
$ ))
99987 FORMAT (/' The system (A,B) is completely controllable ')
99986 FORMAT (/' The dimension of the controllable subspace = ',I3)
99985 FORMAT (/' The input decoupling zeros are the eigenvalues of the',
$ ' matrix AF. ')
99984 FORMAT (//' The number of finite invariant zeros = ',I3)
99983 FORMAT (/' The finite invariant zeros are ')
99982 FORMAT (/' which correspond to the generalized eigenvalues of (l',
$ 'ambda*BF - AF).')
99981 FORMAT (/' real part imag part ')
99980 FORMAT (1X,F9.4)
99979 FORMAT (1X,F9.4,6X,F9.4)
99978 FORMAT (//' The number of infinite zeros = ',I3)
99977 FORMAT ( I4,' infinite zero(s) of order ',I3)
99976 FORMAT (/' The number of right Kronecker indices = ',I3)
99975 FORMAT (/' Right Kronecker (column) indices of (A,B,C,D) are ',
$ /(20(I3,2X)))
99974 FORMAT (/' The number of left Kronecker indices = ',I3)
99973 FORMAT (/' The left Kronecker (row) indices of (A,B,C,D) are ',
$ /(20(I3,2X)))
99972 FORMAT (/' N is out of range.',/' N = ',I5)
99971 FORMAT (/' M is out of range.',/' M = ',I5)
99970 FORMAT (/' P is out of range.',/' P = ',I5)
END
|
#############################################################################
####
##
#A anupq.gi ANUPQ package Eamonn O'Brien
#A & Frank Celler
##
#Y Copyright 1992-1994, Lehrstuhl D fuer Mathematik, RWTH Aachen, Germany
#Y Copyright 1992-1994, School of Mathematical Sciences, ANU, Australia
##
#############################################################################
##
#F ANUPQDirectoryTemporary( <dir> ) . . . . . redefine ANUPQ temp directory
##
## calls the UNIX command `mkdir' to create <dir>, which must be a string,
## and if successful a directory object for <dir> is both assigned to
## `ANUPQData.tmpdir' and returned. The field `ANUPQData.outfile' is also
## set to be a file in `ANUPQData.tmpdir', and on exit from {\GAP} <dir> is
## removed.
##
InstallGlobalFunction(ANUPQDirectoryTemporary, function(dir)
local created;
# check arguments
if not IsString(dir) then
Error(
"usage: ANUPQDirectoryTemporary( <dir> ) : <dir> must be a string.\n");
fi;
# create temporary directory
CreateDir(dir);
if not IsDirectoryPath(dir) then
return fail;
fi;
Add( GAPInfo.DirectoriesTemporary, dir );
ANUPQData.tmpdir := Directory(dir);
ANUPQData.outfile := Filename(ANUPQData.tmpdir, "PQ_OUTPUT");
return ANUPQData.tmpdir;
end);
#############################################################################
##
#F ANUPQerrorPq( <param> ) . . . . . . . . . . . . . . . . . report an error
##
InstallGlobalFunction( ANUPQerrorPq, function( param )
Error(
"Valid Options:\n",
" \"ClassBound\", <bound>\n",
" \"Prime\", <prime>\n",
" \"Exponent\", <exponent>\n",
" \"Metabelian\"\n",
" \"OutputLevel\", <level>\n",
" \"Verbose\"\n",
" \"SetupFile\", <file>\n",
" \"PqWorkspace\", <workspace>\n",
"Illegal Parameter: \"", param, "\"" );
end );
#############################################################################
##
#F ANUPQextractPqArgs( <args> ) . . . . . . . . . . . . . extract arguments
##
InstallGlobalFunction( ANUPQextractPqArgs, function( args )
local CR, i, act, match;
# allow to give only a prefix
match := function( g, w )
return 1 < Length(g)
and Length(g) <= Length(w)
and w{[1..Length(g)]} = g;
end;
# extract arguments
CR := rec();
i := 2;
while i <= Length(args) do
act := args[i];
if not IsString( act ) then ANUPQerrorPq( act ); fi;
# "ClassBound", <class>
if match( act, "ClassBound" ) then
i := i + 1;
CR.ClassBound := args[i];
# "Prime", <prime>
elif match( act, "Prime" ) then
i := i + 1;
CR.Prime := args[i];
# "Exponent", <exp>
elif match( act, "Exponent" ) then
i := i + 1;
CR.Exponent := args[i];
# "Metabelian"
elif match( act, "Metabelian" ) then
CR.Metabelian := true;
# "Output", <level>
elif match( act, "OutputLevel" ) then
i := i + 1;
CR.OutputLevel := args[i];
CR.Verbose := true;
# "SetupFile", <file>
elif match( act, "SetupFile" ) then
i := i + 1;
CR.SetupFile := args[i];
# "PqWorkspace", <workspace>
elif match( act, "PqWorkspace" ) then
i := i + 1;
CR.PqWorkspace := args[i];
# "Verbose"
elif match( act, "Verbose" ) then
CR.Verbose := true;
# signal an error
else
ANUPQerrorPq( act );
fi;
i := i + 1;
od;
return CR;
end );
#############################################################################
##
#V ANUPQGlobalVariables
##
InstallValue( ANUPQGlobalVariables,
[ "F", # a free group
"MapImages" # images of the generators in G
] );
#############################################################################
##
#F ANUPQReadOutput . . . . read pq output without affecting global variables
##
InstallGlobalFunction( ANUPQReadOutput, function( file )
local globalvars, var, result;
globalvars := [ "ANUPQmagic", "ANUPQautos", "ANUPQgroups" ];
for var in globalvars do
HideGlobalVariables( var );
od;
Read( file );
result := rec();
for var in globalvars do
if IsBoundGlobal( var ) then
result.(var) := ValueGlobal( var );
else
result.(var) := fail;
fi;
od;
for var in globalvars do
UnhideGlobalVariables( var );
od;
return result;
end );
#############################################################################
##
#F PqEpimorphism( <arg> : <options> ) . . . . . epimorphism onto p-quotient
##
InstallGlobalFunction( PqEpimorphism, function( arg )
return PQ_EPI_OR_PCOVER(arg : PqEpiOrPCover := "pQepi");
end );
#############################################################################
##
#F Pq( <arg> : <options> ) . . . . . . . . . . . . . . . . . . . p-quotient
##
InstallGlobalFunction( Pq, function( arg )
return PQ_EPI_OR_PCOVER(arg : PqEpiOrPCover := "pQuotient");
end );
#############################################################################
##
#F PqPCover( <arg> : <options> ) . . . . . . p-covering group of p-quotient
##
InstallGlobalFunction( PqPCover, function( arg )
return PQ_EPI_OR_PCOVER(arg : PqEpiOrPCover := "pCover");
end );
#############################################################################
##
#F PQ_GROUP_FROM_PCP(<datarec>,<out>) . extract gp from pq pcp file into GAP
##
InstallGlobalFunction( PQ_GROUP_FROM_PCP, function( datarec, out )
local gens;
HideGlobalVariables( "F", "MapImages" );
Read( datarec.outfname );
if out = "pCover" then
datarec.pCover := ValueGlobal( "F" );
IsPGroup( datarec.pCover );
else
if IsBound(datarec.pcgs) then
gens := datarec.pcgs;
else
gens := GeneratorsOfGroup( datarec.group );
fi;
datarec.pQepi := GroupHomomorphismByImagesNC(
datarec.group,
ValueGlobal( "F" ),
gens,
ValueGlobal( "MapImages" )
);
SetIsSurjective( datarec.pQepi, true );
datarec.pQuotient := Image( datarec.pQepi );
IsPGroup( datarec.pQuotient );
fi;
UnhideGlobalVariables( "F", "MapImages" );
end );
#############################################################################
##
#F TRIVIAL_PQ_GROUP(<datarec>, <out>) . . . extract gp when trivial into GAP
##
InstallGlobalFunction( TRIVIAL_PQ_GROUP, function( datarec, out )
local Q;
Q := TrivialGroup( IsPcGroup );
if out = "pCover" then
datarec.pCover := Q;
IsPGroup( datarec.pCover );
else
datarec.pQepi := GroupHomomorphismByFunction(
datarec.group, Q, g -> One(Q) );
SetIsSurjective( datarec.pQepi, true );
datarec.pQuotient := Image( datarec.pQepi );
IsPGroup( datarec.pQuotient );
fi;
end );
#############################################################################
##
#F PQ_EPI_OR_PCOVER(<args>:<options>) . p-quotient, its epi. or its p-cover
##
InstallGlobalFunction( PQ_EPI_OR_PCOVER, function( args )
local out, datarec, AtClass, trivial;
out := ValueOption("PqEpiOrPCover");
datarec := ANUPQ_ARG_CHK("Pq", args);
datarec.filter := ["Output file in", "Group presentation"];
VALUE_PQ_OPTION("Identities", [], datarec);
if datarec.calltype = "GAP3compatible" then
# ANUPQ_ARG_CHK calls PQ_EPI_OR_PCOVER itself in this case
# (so datarec.(out) has already been computed)
return datarec.(out);
fi;
trivial := IsEmpty( datarec.group!.GeneratorsOfMagmaWithInverses );
if trivial then
; #the `pq' binary spits out nonsense if given a trivial pres'n
elif datarec.calltype = "interactive" and
( IsBound(datarec.pQuotient) or IsBound(datarec.pCover) ) then
AtClass := function()
return IsBound(datarec.complete) and datarec.complete or
IsBound(datarec.class) and datarec.class = datarec.ClassBound;
end;
if IsBound(datarec.pcoverclass) and
datarec.pcoverclass = datarec.class and not AtClass() then
# ``reduce'' the p-cover to a p-class
PQ_FINISH_NEXT_CLASS( datarec );
fi;
while not AtClass() do
PQ_NEXT_CLASS( datarec );
od;
# the following is not executed if the while-loop is
# executed at least once
if IsBound( datarec.(out) ) then
return datarec.(out); # it had already been computed
fi;
else
PQ_PC_PRESENTATION(datarec, "pQ");
if datarec.class < Minimum(63, datarec.ClassBound) then
datarec.complete := true;
fi;
fi;
trivial := trivial or IsEmpty(datarec.ngens) or datarec.ngens[1] = 0;
if not trivial then
if out = "pCover" then
PQ_P_COVER( datarec );
fi;
PushOptions( rec(nonuser := true) );
PQ_WRITE_PC_PRESENTATION(datarec, datarec.outfname);
PopOptions();
fi;
if datarec.calltype = "non-interactive" then
PQ_COMPLETE_NONINTERACTIVE_FUNC_CALL(datarec);
if IsBound( datarec.setupfile ) then
if trivial then
return fail;
fi;
return true;
fi;
fi;
if trivial then
TRIVIAL_PQ_GROUP( datarec, out );
else
# read group and images from file
PQ_GROUP_FROM_PCP( datarec, out );
fi;
return datarec.(out);
end );
#############################################################################
##
#F PqRecoverDefinitions( <G> ) . . . . . . . . . . . . . . . . . definitions
##
## This function finds a definition for each generator of the p-group <G>.
## These definitions need not be the same as the ones used by pq. But
## they serve the purpose of defining each generator as a commutator or
## power of earlier ones. This is useful for extending an automorphism that
## is given on a set of minimal generators of <G>.
##
InstallGlobalFunction( PqRecoverDefinitions, function( G )
local col, gens, definitions, h, g, rhs, gen;
col := ElementsFamily( FamilyObj( G ) )!.rewritingSystem;
gens := GeneratorsOfRws( col );
definitions := [];
for h in [1..NumberGeneratorsOfRws( col )] do
rhs := GetPowerNC( col, h );
if Length( rhs ) = 1 then
gen := Position( gens, rhs );
if not IsBound( definitions[gen] ) then
definitions[gen] := h;
fi;
fi;
for g in [1..h-1] do
rhs := GetConjugateNC( col, h, g );
if Length( rhs ) = 2 then
gen := SubSyllables( rhs, 2, 2 );
gen := Position( gens, gen );
if not IsBound( definitions[gen] ) then
definitions[gen] := [h, g];
fi;
fi;
od;
od;
return definitions;
end );
#############################################################################
##
#F PqAutomorphism( <epi>, <autoimages> ) . . . . . . . . . . . . definitions
##
## Take an automorphism of the preimage and produce the induced automorphism
## of the image of the epimorphism.
##
InstallGlobalFunction( PqAutomorphism, function( epi, autoimages )
local G, p, gens, definitions, d, epimages, i, pos, def,
phi;
G := Image( epi );
p := PrimePGroup( G );
gens := GeneratorsOfGroup( G );
autoimages := List( autoimages, im->Image( epi, im ) );
## Get a definition for each generator.
definitions := PqRecoverDefinitions( G );
d := Number( [1..Length(definitions)],
i->not IsBound( definitions[i] ) );
## Find the images for the defining generators of G under the
## automorphism. We have to be careful, as some of the generators for
## the source might be redundant as generators of G.
epimages := List( GeneratorsOfGroup(Source(epi)), g->Image(epi,g) );
for i in [1..d] do
## Find G.i ...
pos := Position( epimages, G.(i) );
if pos = fail then
Error( "generators ", i, "not image of a generators" );
fi;
## ... and set its image.
definitions[i] := autoimages[pos];
od;
## Replace each definition by its image under the automorphism.
for i in [d+1..Length(definitions)] do
def := definitions[i];
if IsInt( def ) then
definitions[i] := definitions[ def ]^p;
else
definitions[i] := Comm( definitions[ def[1] ],
definitions[ def[2] ] );
fi;
od;
phi := GroupHomomorphismByImages( G, G, gens, definitions );
SetIsBijective( phi, true );
return phi;
end );
#############################################################################
##
#F PqLeftNormComm( <words> ) . . . . . . . . . . . . . left norm commutator
##
## returns for a list <words> of words in the generators of a group the left
## norm commutator of <words>, e.g.~if <w1>, <w2>, <w3> are words in the
## generators of some free or fp group then `PqLeftNormComm( [<w1>, <w2>,
## <w3>] );' is equivalent to `Comm( Comm( <w1>, <w2> ), <w3> );'. Actually,
## the only restrictions on <words> are that <words> must constitute a list
## of group elements of the same group (so a list of permutations is
## allowed, for example) and that <words> must contain at least *two* words.
##
InstallGlobalFunction( PqLeftNormComm, function( words )
local fam, comm, word;
if not IsList(words) or 2 > Length(words) or
not ForAll(words, IsMultiplicativeElementWithInverse) then
Error( "<words> should be a list of at least 2 group elements\n" );
else
fam := FamilyObj(words[1]);
if not ForAll(words, w -> IsIdenticalObj(FamilyObj(w), fam)) then
Error( "<words> should belong to the same group\n" );
fi;
fi;
comm := words[1];
for word in words{[2 .. Length(words)]} do
comm := Comm(comm, word);
od;
return comm;
end );
#############################################################################
##
#F PqGAPRelators( <group>, <rels> ) . . . . . . . . pq relators as GAP words
##
## returns a list of words that {\GAP} understands, given a list <rels> of
## strings in the string representations of the generators of the fp group
## <group> prepared as a list of relators for the `pq' program.
##
## *Note:*
## The `pq' program does not use `/' to indicate multiplication by an
## inverse and uses square brackets to represent (left normed) commutators.
## Also, even though the `pq' program accepts relations, all elements of
## <rels> *must* be in relator form, i.e.~a relation of form `<w1> = <w2>'
## must be written as `<w1>*(<w2>)^-1'.
##
## Here is an example:
##
## \beginexample
## gap> F := FreeGroup("a", "b");
## gap> PqGAPRelators(F, [ "a*b^2", "[a,b]^2*a", "([a,b,a,b,b]*a*b)^2*a" ]);
## [ a*b^2, a^-1*b^-1*a*b*a^-1*b^-1*a*b*a, b^-1*a^-1*b^-1*a^-1*b*a*b^-1*a*b*a^
## -1*b*a^-1*b^-1*a*b*a*b^-1*a^-1*b^-1*a^-1*b*a*b^-1*a*b^-1*a^-1*b*a^-1*b^
## -1*a*b*a*b*a^-1*b*a*b^-1*a*b*a^-1*b*a^-1*b^-1*a*b*a*b^-1*a^-1*b^-1*a^
## -1*b*a*b^-1*a*b^-1*a^-1*b*a^-1*b^-1*a*b*a*b^2*a*b*a ]
## \endexample
##
InstallGlobalFunction( PqGAPRelators, function( group, rels )
local gens, relgens, diff, g;
if not( IsFpGroup(group) ) then
Error("<group> must be an fp group\n");
fi;
gens := List( FreeGeneratorsOfFpGroup(group), String );
if not ForAll(rels, rel -> Position(rel, '/') = fail) then
Error( "pq binary does not understand `/' in relators\n" );
fi;
relgens := Set( Concatenation(
List( rels, rel -> Filtered(
SplitString(rel, "", "*[]()^, "),
str -> Int(str) = fail) ) ) );
diff := Difference(relgens, gens);
if not IsEmpty(diff) then
Error( "generators: ", diff,
"\nare not among the generators of the group supplied\n" );
fi;
CallFuncList(HideGlobalVariables, gens);
for g in FreeGeneratorsOfFpGroup(group) do
ASS_GVAR(String(g), g);
od;
rels := List( rels, rel -> EvalString(
ReplacedString(
ReplacedString(rel, "]", "])"),
"[", "PqLeftNormComm(["
) ) );
CallFuncList(UnhideGlobalVariables, gens);
return rels;
end );
#############################################################################
##
#F PqParseWord( <F>, <word> ) . . . . . . . . . . . . parse word through GAP
#F PqParseWord( <n>, <word> )
##
## parse <word> through {\GAP}, where <word> is a string representing a word
## in the generators of <F> (the first form of `PqParseWord') or <n> pc
## generators `x1,...,x<n>'. `PqParseWord' is provided as a rough-and-ready
## check of <word> for syntax errors. A syntax error will cause the entering
## of a `break'-loop, in which the error message may or may not be
## meaningful (depending on whether the syntax error gets caught at the
## {\GAP} or kernel level).
##
## *Note:*
## The reason the generators *must* be `x1,...,x<n>' in the second form of
## `PqParseWord' is that these are the pc generator names used by the `pq'
## program (as distinct from the generator names for the group provided by
## the user to a function like `Pq' that invokes the `pq' program).
##
InstallGlobalFunction( PqParseWord, function( n, word )
local ParseOnBreak, ParseOnBreakMessage, NormalOnBreak, NormalOnBreakMessage,
parts, gens;
if IsGroup(n) or
Position(word, '[') <> fail or Position(word, '(') <> fail then
#pass word through GAP's parser to see if it's ok
NormalOnBreak := OnBreak;
ParseOnBreak := function()
Where(0);
OnBreak := NormalOnBreak;
end;
OnBreak := ParseOnBreak;
if IsFunction(OnBreakMessage) then
NormalOnBreakMessage := OnBreakMessage;
ParseOnBreakMessage := function()
Print( " syntax error in: ", word, "\n" );
Print( " you can type: 'quit;' to quit to outer loop.\n" );
OnBreakMessage := NormalOnBreakMessage;
end;
OnBreakMessage := ParseOnBreakMessage;
fi;
if IsGroup(n) then
PqGAPRelators(n, [ word ]);
else
PqGAPRelators(FreeGroup(n, "x"), [ word ]);
fi;
OnBreak := NormalOnBreak;
if IsFunction(OnBreakMessage) then
OnBreakMessage := NormalOnBreakMessage;
fi;
else
parts := List( SplitString(word, "*"), part -> SplitString(part, "^") );
if ForAny( parts, part -> 2 < Length(part) or
2 = Length(part) and not IsInt( Int(part[2]) ) )
then
Error( "detected invalid exponent in argument <word>: ", word, "\n");
fi;
if ForAny( parts, part -> IsEmpty( part[1] ) or part[1][1] <> 'x' ) then
Error( "generators in argument <word> must all be of form:\n",
"`x<i>' for some integer <i>\n" );
fi;
gens := List( parts, part -> Int( part[1]{[2 .. Length(part[1])]} ) );
if not ForAll(gens, gen -> IsPosInt(gen) and gen <= n) then
Error( "generators in argument <word> must be in the range: ",
"x1,...,x", n, "\n" );
fi;
fi;
return true;
end );
#############################################################################
##
#F PQ_EVALUATE( <string> ) . . . . . . . . . evaluate a string emulating GAP
##
## For each substring of the string <string> that is a statement (i.e. ends
## in a `;'), `PQ_EVALUATE( <string> )' evaluates it in the same way {\GAP}
## would. If the substring is further followed by a `;' (i.e. there was
## `;;'), this is an indication that the statement would produce no output;
## otherwise the output that the user would normally see if she typed the
## statement interactively is displayed.
##
InstallGlobalFunction(PQ_EVALUATE, function(string)
local from, pos, statement, parts, var;
from := 0;
pos := Position(string, ';', from);
while pos <> fail do
statement := string{[from + 1..pos]};
statement := ReplacedString(statement," last "," ANUPQData.example.last ");
if pos < Length(string) and string[pos + 1] = ';' then
Read( InputTextString(statement) );
from := pos + 1;
else
parts := SplitString(statement, "", " \n");
if 1 < Length(parts) and parts[2] = ":=" then
Read( InputTextString(statement) );
Read( InputTextString(
Concatenation( "View(", parts[1], "); Print(\"\\n\");" ) ) );
ANUPQData.example.last := parts[1];
else
var := EvalString(statement);
View( var );
Print( "\n" );
ANUPQData.example.last := var;
fi;
from := pos;
fi;
pos := Position(string, ';', from);
od;
end );
#############################################################################
##
#F PqExample() . . . . . . . . . . execute a pq example or display the index
#F PqExample( <example>[, PqStart][, Display] )
#F PqExample( <example>[, PqStart][, <filename>] )
##
## With no arguments, or with single argument `"index"', or a string
## <example> that is not the name of a file in the `examples' directory, an
## index of available examples is displayed.
##
## With just the one argument <example> that is the name of a file in the
## `examples' directory, the example contained in that file is executed in
## its simplest form. Some examples accept options which you may use to
## modify some of the options used in the commands of the example. To find
## out which options an example accepts, use one of the mechanisms for
## displaying the example described below.
##
## Some examples have both non-interactive and interactive forms; those that
## are non-interactive only have a name ending in `-ni'; those that are
## interactive only have a name ending in `-i'; examples with names ending
## in `.g' also have only one form; all other examples have both
## non-interactive and interactive forms and for these giving `PqStart' as
## second argument invokes `PqStart' initially and makes the appropriate
## adjustments so that the example is executed or displayed using
## interactive functions.
##
## If `PqExample' is called with last (second or third) argument `Display'
## then the example is displayed without being executed. If the last
## argument is a non-empty string <filename> then the example is also
## displayed without being executed but is also written to a file with that
## name. Passing an empty string as last argument has the same effect as
## passing `Display'.
##
## *Note:*
## The variables used in `PqExample' are local to the running of
## `PqExample', so there's no danger of having some of your variables
## over-written. However, they are not completely lost either. They are
## saved to a record `ANUPQData.examples.vars', i.e.~if `F' is a variable
## used in the example then you will be able to access it after `PqExample'
## has finished as `ANUPQData.examples.vars.F'.
##
InstallGlobalFunction(PqExample, function(arg)
local name, file, instream, line, input, doPqStart, vars, var, printonly,
filename, DoAltAction, GetNextLine, PrintLine, action, datarec, optname,
linewidth, sizescreen, CheckForCompoundKeywords, hasFunctionExpr, parts,
iscompoundStatement, compoundDepth;
sizescreen := SizeScreen();
if sizescreen[1] < 80 then
SizeScreen([80, sizescreen[2]]);
linewidth := 80;
else
linewidth := sizescreen[1];
fi;
if IsEmpty(arg) then
name := "index";
else
name := arg[1];
fi;
if name = "README" then
file := fail;
else
file := Filename(DirectoriesPackageLibrary( "anupq", "examples"), name);
fi;
if file = fail then
Info(InfoANUPQ + InfoWarning, 1,
"Sorry! There is no ANUPQ example with name `", name, "'",
" ... displaying index.");
name := "index";
file := Filename(DirectoriesPackageLibrary( "anupq", "examples"), name);
fi;
if name <> "index" then
doPqStart := false;
if Length(arg) > 1 then
# At this point the name of the variable <printonly> doesn't make
# sense; however, if the value assigned to <printonly> is `Display'
# or an empty string then we ``print only'' and if it is a non-empty
# string then it is assumed to be a filename and we `LogTo' that filename.
printonly := arg[Minimum(3, Length(arg))];
if arg[2] = PqStart then
if 2 < Length(name) and
name{[Length(name) - 1 .. Length(name)]} in ["-i", "ni", ".g"] then
Error( "example does not have a (different) interactive form\n" );
fi;
doPqStart := true;
fi;
else
printonly := false;
fi;
DoAltAction := function()
if doPqStart then
if action[2] = "do" then
# uncomment line
line := line{[2..Length(line)]};
else
# replace a variable with a proc id
line := ReplacedString( line, action[5], action[3] );
fi;
fi;
end;
if printonly = Display or IsString(printonly) then
GetNextLine := function()
local from, to;
line := ReadLine(instream);
if line = fail then
return;
elif IsBound(action) then
action := SplitString(action, "", "# <>\n");
DoAltAction();
Unbind(action);
elif 3 < Length(line) and line{[1..4]} = "#alt" then
# only "#alt" actions recognised
action := line;
elif IsMatchingSublist(line, "#comment:") then
line := ReplacedString(line, " supplying", "");
from := Position(line, ' ');
to := Position(line, '<', from);
Info(InfoANUPQ, 1,
"In the next command, you may", line{[from .. to - 1]});
from := to + 1;
to := Position(line, '>') - 1;
Info(InfoANUPQ, 1, "supplying to `PqExample' the option: `",
line{[from .. to]}, "'");
fi;
end;
if IsString(printonly) and printonly <> "" then
filename := printonly;
LogTo( filename ); #Make sure it's empty and writable
fi;
PrintLine := function()
if IsMatchingSublist(line, "##") then
line := line{[2..Length(line)]};
elif line[1] = '#' then
return;
fi;
Print( ReplacedString(line, ";;", ";") );
end;
printonly := true; #now the name of the variable makes sense
else
printonly := false;
ANUPQData.example := rec(options := rec());
datarec := ANUPQData.example.options;
CheckForCompoundKeywords := function()
local compoundkeywords;
compoundkeywords := Filtered( SplitString(line, "", "( ;\n"),
w -> w in ["do", "od", "if", "fi",
"repeat", "until",
"function", "end"] );
hasFunctionExpr := "function" in compoundkeywords;
compoundDepth := compoundDepth
+ Number(compoundkeywords,
w -> w in ["do", "if", "repeat", "function"])
- Number(compoundkeywords,
w -> w in ["od", "fi", "until", "end"]);
return not IsEmpty(compoundkeywords);
end;
GetNextLine := function()
local from, to, bhsinput;
repeat
line := ReadLine(instream);
if line = fail then return; fi;
until not IsMatchingSublist(line, "#comment:");
if IsBound(action) then
action := SplitString(action, "", "# <>\n");
if action[1] = "alt:" then
DoAltAction();
else
# action[2] = name of a possible option passed to `PqExample'
# action[4] = string to be replaced in <line> with the value
# of the option if ok and set
optname := action[2];
if IsDigitChar(optname[ Length(optname) ]) then
optname := optname{[1..Length(optname) - 1]};
fi;
datarec.(action[2]) := ValueOption(action[2]);
if datarec.(action[2]) = fail then
Unbind( datarec.(action[2]) );
else
if not ANUPQoptionChecks.(optname)( datarec.(action[2]) ) then
Info(InfoANUPQ, 1, "\"", action[2], "\" value must be a ",
ANUPQoptionTypes.(optname),
": option ignored.");
Unbind( datarec.(action[2]) );
else
if action[1] = "add" then
line[1] := ' ';
fi;
if IsString( datarec.(action[2]) ) then
line := ReplacedString( line, action[4],
Flat(['"',datarec.(action[2]),'"']) );
else
line := ReplacedString( line, action[4],
String( datarec.(action[2]) ) );
fi;
fi;
fi;
fi;
Unbind(action);
elif IsMatchingSublist(line, "##") then
; # do nothing
elif 3 < Length(line) and line{[1..4]} in ["#sub", "#add", "#alt"] then
action := line;
elif line[1] = '#' then
# execute instructions behind the scenes
bhsinput := "";
repeat
Append( bhsinput,
ReplacedString(line{[2..Length(line)]},
"datarec",
"ANUPQData.example.options") );
line := ReadLine(instream);
until line[1] <> '#' or
(3 < Length(line) and line{[1..4]} in ["#sub", "#add", "#com"]);
Read( InputTextString(bhsinput) );
fi;
end;
PrintLine := function()
if IsMatchingSublist(line, "##") then
line := line{[2..Length(line)]};
elif line[1] = '#' then
return;
fi;
if input = "" then
Print("gap> ");
else
Print("> ");
fi;
Print( ReplacedString(line, ";;", ";") );
end;
fi;
fi;
instream := InputTextFile(file);
if name <> "index" then
FLUSH_PQ_STREAM_UNTIL( instream, 10, 1, ReadLine,
line -> IsMatchingSublist(line, "#Example") );
line := FLUSH_PQ_STREAM_UNTIL( instream, 1, 10, ReadLine,
line -> IsMatchingSublist(line, "#vars:") );
if Length(line) + 21 < linewidth then
Info(InfoANUPQ, 1, line{[Position(line, ' ')+1..Position(line, ';')-1]},
" are local to `PqExample'");
else
#this assumes one has been careful to ensure the `#vars:' line is not
#longer than 72 characters.
Info(InfoANUPQ, 1, line{[Position(line, ' ')+1..Position(line, ';')-1]},
" are");
Info(InfoANUPQ, 1, "local to `PqExample'");
fi;
vars := SplitString(line, "", " ,;\n");
vars := vars{[2 .. Length(vars)]};
if not printonly then
CallFuncList(HideGlobalVariables, vars);
fi;
line := FLUSH_PQ_STREAM_UNTIL(instream, 1, 10, ReadLine,
line -> IsMatchingSublist(line, "#options:"));
input := "";
GetNextLine();
while line <> fail do
PrintLine();
if line[1] <> '#' then
if not printonly then
if input = "" then
compoundDepth := 0;
iscompoundStatement := CheckForCompoundKeywords();
elif iscompoundStatement and compoundDepth > 0 then
CheckForCompoundKeywords();
fi;
if line <> "\n" then
Append(input, line);
if iscompoundStatement then
if compoundDepth = 0 and Position(input, ';') <> fail then
Read( InputTextString(input) );
if hasFunctionExpr then
parts := SplitString(input, "", ":= \n");
Read( InputTextString(
Concatenation(
"View(", parts[1], "); Print(\"\\n\");" ) ) );
ANUPQData.example.last := parts[1];
fi;
iscompoundStatement := false;
input := "";
fi;
elif Position(input, ';') <> fail then
PQ_EVALUATE(input);
input := "";
fi;
fi;
fi;
fi;
GetNextLine();
od;
if printonly then
if IsBound(filename) then
LogTo();
fi;
else
ANUPQData.example.vars := rec();
for var in Filtered(vars, ISBOUND_GLOBAL) do
ANUPQData.example.vars.(var) := VALUE_GLOBAL(var);
od;
Info(InfoANUPQ, 1, "Variables used in `PqExample' are saved ",
"in `ANUPQData.example.vars'.");
CallFuncList(UnhideGlobalVariables, vars);
fi;
else
FLUSH_PQ_STREAM_UNTIL(instream, 1, 10, ReadLine, line -> line = fail);
fi;
CloseStream(instream);
if linewidth <> sizescreen[1] then
SizeScreen( sizescreen ); # restore what was there before
fi;
end);
#############################################################################
##
#F AllPqExamples() . . . . . . . . . . list the names of all ANUPQ examples
##
InstallGlobalFunction( AllPqExamples, function()
local dir, files;
dir := DirectoriesPackageLibrary( "anupq", "examples" )[1];
files := DirectoryContents( Filename( dir, "" ));
# Remove certain files
files := Difference( files, [".", "..", "index", "README", "CVS",
"5gp-PG-e5-i", "7gp-a-x-Rel-i"] );
# Remove files ending with a tilde
files := Filtered( files, file -> file[ Length(file) ] <> '~' );
return files;
end );
#############################################################################
##
#F GrepPqExamples( <string> ) . . . . . . . grep ANUPQ examples for a string
##
## runs the UNIX command `grep <string>' over the {\ANUPQ} examples and
## returns the list of examples for which there is a match. The actual
## matches are `Info'-ed at `InfoANUPQ' level 2.
##
InstallGlobalFunction( GrepPqExamples, function( string )
local dir, str, grep, out, opts, lines, matches, line;
dir := DirectoriesPackageLibrary( "anupq", "examples" )[1];
grep := Filename( DirectoriesSystemPrograms(), "grep" );
str := "";
out := OutputTextString( str, true );
opts := Concatenation( [ string ], AllPqExamples() );
Process( dir, grep, InputTextNone(), out, opts );
CloseStream( out );
lines := SplitString( str, "", "\n" );
matches := [];
for line in lines do
Info(InfoANUPQ, 2, line);
Add( matches, SplitString(line, "", ":")[1] );
od;
return Set(matches);
end );
#E anupq.gi . . . . . . . . . . . . . . . . . . . . . . . . . . . ends here
|
%% Copyright (C) 2015, 2016, 2019 Colin B. Macdonald
%%
%% This file is part of OctSymPy.
%%
%% OctSymPy is free software; you can redistribute it and/or modify
%% it under the terms of the GNU General Public License as published
%% by the Free Software Foundation; either version 3 of the License,
%% or (at your option) any later version.
%%
%% This software is distributed in the hope that it will be useful,
%% but WITHOUT ANY WARRANTY; without even the implied warranty
%% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
%% the GNU General Public License for more details.
%%
%% You should have received a copy of the GNU General Public
%% License along with this software; see the file COPYING.
%% If not, see <http://www.gnu.org/licenses/>.
%% -*- texinfo -*-
%% @documentencoding UTF-8
%% @deftypemethod @@sym {} max (@var{a})
%% @deftypemethodx @@sym {} max (@var{a}, @var{b})
%% @deftypemethodx @@sym {} max (@var{a}, [], @var{dim})
%% @deftypemethodx @@sym {[@var{r}, @var{I}] =} max (@dots{})
%% Return maximum value of a symbolic vector or vectors.
%%
%% Example:
%% @example
%% @group
%% max(sym(1), sym(2))
%% @result{} (sym) 2
%% max([1 2*sym(pi) 6])
%% @result{} (sym) 2⋅π
%% [M, I] = max([1 2*sym(pi) 6])
%% @result{} M = (sym) 2⋅π
%% @result{} I = 2
%% @end group
%% @end example
%%
%% @seealso{@@sym/min}
%% @end deftypemethod
function [z, I] = max(A, B, dim)
if (nargout <= 1)
if (nargin == 1)
if (isvector(A))
z = pycall_sympy__ ('return Max(*_ins[0])', A);
else
z = max(A, [], 1);
end
elseif (nargin == 2)
z = elementwise_op ('Max', sym(A), sym(B));
elseif (nargin == 3)
assert (isempty (B))
assert (logical(dim == 1) || logical(dim == 2))
cmd = { '(A, dim) = _ins'
'if not A.is_Matrix:'
' A = sp.Matrix([A])'
'if dim == 0:'
' if A.rows == 0:'
' return A'
' return Matrix([[Max(*A.col(i)) for i in range(0, A.cols)]])'
'elif dim == 1:'
' if A.cols == 0:'
' return A'
' return Matrix([Max(*A.row(i)) for i in range(0, A.rows)])' };
z = pycall_sympy__ (cmd, A, dim - 1);
else
print_usage ();
end
return
end
% dealing with the index (2nd output) is complicated, defer to min
if (nargin == 1)
[z, I] = min(-A);
z = -z;
elseif (nargin == 3)
[z, I] = min(-A, -B, dim);
z = -z;
else
print_usage ();
end
end
%% many other tests are in @sym/min
%!test
%! % simple
%! assert (isequal (max([sym(10) sym(11)]), sym(11)))
%!test
%! syms x y
%! assert (isequal (children (max (x, y)), [x y]))
|
[STATEMENT]
lemma Abs_bit1'_code [code abstract]:
"Rep_bit1 (Abs_bit1' x :: 'a :: finite bit1) = x mod int (CARD('a bit1))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Rep_bit1 (Abs_bit1' x) = x mod int CARD('a bit1)
[PROOF STEP]
by(auto simp add: Abs_bit1'_def intro!: Abs_bit1_inverse) |
module Luau.OpSem where
open import Agda.Builtin.Equality using (_≡_)
open import Agda.Builtin.Float using (Float; primFloatPlus; primFloatMinus; primFloatTimes; primFloatDiv)
open import FFI.Data.Maybe using (just)
open import Luau.Heap using (Heap; _≡_⊕_↦_; _[_]; function_is_end)
open import Luau.Substitution using (_[_/_]ᴮ)
open import Luau.Syntax using (Expr; Stat; Block; nil; addr; var; function_is_end; _$_; block_is_end; local_←_; _∙_; done; return; name; fun; arg; binexp; BinaryOperator; +; -; *; /; number)
open import Luau.Value using (addr; val; number)
evalBinOp : Float → BinaryOperator → Float → Float
evalBinOp x + y = primFloatPlus x y
evalBinOp x - y = primFloatMinus x y
evalBinOp x * y = primFloatTimes x y
evalBinOp x / y = primFloatDiv x y
data _⊢_⟶ᴮ_⊣_ {a} : Heap a → Block a → Block a → Heap a → Set
data _⊢_⟶ᴱ_⊣_ {a} : Heap a → Expr a → Expr a → Heap a → Set
data _⊢_⟶ᴱ_⊣_ where
nil : ∀ {H} →
-------------------
H ⊢ nil ⟶ᴱ nil ⊣ H
function : ∀ {H H′ a F B} →
H′ ≡ H ⊕ a ↦ (function F is B end) →
-------------------------------------------
H ⊢ (function F is B end) ⟶ᴱ (addr a) ⊣ H′
app₁ : ∀ {H H′ M M′ N} →
H ⊢ M ⟶ᴱ M′ ⊣ H′ →
-----------------------------
H ⊢ (M $ N) ⟶ᴱ (M′ $ N) ⊣ H′
app₂ : ∀ {H H′ V N N′} →
H ⊢ N ⟶ᴱ N′ ⊣ H′ →
-----------------------------
H ⊢ (val V $ N) ⟶ᴱ (val V $ N′) ⊣ H′
beta : ∀ {H a F B V} →
H [ a ] ≡ just(function F is B end) →
-----------------------------------------------------------------------------
H ⊢ (addr a $ val V) ⟶ᴱ (block (fun F) is (B [ V / name(arg F) ]ᴮ) end) ⊣ H
block : ∀ {H H′ B B′ b} →
H ⊢ B ⟶ᴮ B′ ⊣ H′ →
----------------------------------------------------
H ⊢ (block b is B end) ⟶ᴱ (block b is B′ end) ⊣ H′
return : ∀ {H V B b} →
--------------------------------------------------------
H ⊢ (block b is return (val V) ∙ B end) ⟶ᴱ (val V) ⊣ H
done : ∀ {H b} →
---------------------------------
H ⊢ (block b is done end) ⟶ᴱ nil ⊣ H
binOpEval :
∀ {H x op y} →
--------------------------------------------------------------------------
H ⊢ (binexp (number x) op (number y)) ⟶ᴱ (number (evalBinOp x op y)) ⊣ H
binOp₁ :
∀ {H H′ x x′ op y} →
H ⊢ x ⟶ᴱ x′ ⊣ H′ →
---------------------------------------------
H ⊢ (binexp x op y) ⟶ᴱ (binexp x′ op y) ⊣ H′
binOp₂ :
∀ {H H′ x op y y′} →
H ⊢ y ⟶ᴱ y′ ⊣ H′ →
---------------------------------------------
H ⊢ (binexp x op y) ⟶ᴱ (binexp x op y′) ⊣ H′
data _⊢_⟶ᴮ_⊣_ where
local : ∀ {H H′ x M M′ B} →
H ⊢ M ⟶ᴱ M′ ⊣ H′ →
-------------------------------------------------
H ⊢ (local x ← M ∙ B) ⟶ᴮ (local x ← M′ ∙ B) ⊣ H′
subst : ∀ {H x v B} →
------------------------------------------------------
H ⊢ (local x ← val v ∙ B) ⟶ᴮ (B [ v / name x ]ᴮ) ⊣ H
function : ∀ {H H′ a F B C} →
H′ ≡ H ⊕ a ↦ (function F is C end) →
--------------------------------------------------------------
H ⊢ (function F is C end ∙ B) ⟶ᴮ (B [ addr a / fun F ]ᴮ) ⊣ H′
return : ∀ {H H′ M M′ B} →
H ⊢ M ⟶ᴱ M′ ⊣ H′ →
--------------------------------------------
H ⊢ (return M ∙ B) ⟶ᴮ (return M′ ∙ B) ⊣ H′
data _⊢_⟶*_⊣_ {a} : Heap a → Block a → Block a → Heap a → Set where
refl : ∀ {H B} →
----------------
H ⊢ B ⟶* B ⊣ H
step : ∀ {H H′ H″ B B′ B″} →
H ⊢ B ⟶ᴮ B′ ⊣ H′ →
H′ ⊢ B′ ⟶* B″ ⊣ H″ →
------------------
H ⊢ B ⟶* B″ ⊣ H″
|
State Before: α : Type u_1
β : α → Type u_2
f : ((a : α) → Part (β a)) →o (a : α) → Part (β a)
i : ℕ
⊢ approx (↑f) i ≤ approx (↑f) (Nat.succ i) State After: no goals Tactic: induction i with
| zero => dsimp [approx]; apply @bot_le _ _ _ (f ⊥)
| succ _ i_ih => intro ; apply f.monotone; apply i_ih State Before: case zero
α : Type u_1
β : α → Type u_2
f : ((a : α) → Part (β a)) →o (a : α) → Part (β a)
⊢ approx (↑f) Nat.zero ≤ approx (↑f) (Nat.succ Nat.zero) State After: case zero
α : Type u_1
β : α → Type u_2
f : ((a : α) → Part (β a)) →o (a : α) → Part (β a)
⊢ ⊥ ≤ ↑f ⊥ Tactic: dsimp [approx] State Before: case zero
α : Type u_1
β : α → Type u_2
f : ((a : α) → Part (β a)) →o (a : α) → Part (β a)
⊢ ⊥ ≤ ↑f ⊥ State After: no goals Tactic: apply @bot_le _ _ _ (f ⊥) State Before: case succ
α : Type u_1
β : α → Type u_2
f : ((a : α) → Part (β a)) →o (a : α) → Part (β a)
n✝ : ℕ
i_ih : approx (↑f) n✝ ≤ approx (↑f) (Nat.succ n✝)
⊢ approx (↑f) (Nat.succ n✝) ≤ approx (↑f) (Nat.succ (Nat.succ n✝)) State After: case succ
α : Type u_1
β : α → Type u_2
f : ((a : α) → Part (β a)) →o (a : α) → Part (β a)
n✝ : ℕ
i_ih : approx (↑f) n✝ ≤ approx (↑f) (Nat.succ n✝)
i✝ : α
⊢ approx (↑f) (Nat.succ n✝) i✝ ≤ approx (↑f) (Nat.succ (Nat.succ n✝)) i✝ Tactic: intro State Before: case succ
α : Type u_1
β : α → Type u_2
f : ((a : α) → Part (β a)) →o (a : α) → Part (β a)
n✝ : ℕ
i_ih : approx (↑f) n✝ ≤ approx (↑f) (Nat.succ n✝)
i✝ : α
⊢ approx (↑f) (Nat.succ n✝) i✝ ≤ approx (↑f) (Nat.succ (Nat.succ n✝)) i✝ State After: case succ.a
α : Type u_1
β : α → Type u_2
f : ((a : α) → Part (β a)) →o (a : α) → Part (β a)
n✝ : ℕ
i_ih : approx (↑f) n✝ ≤ approx (↑f) (Nat.succ n✝)
i✝ : α
⊢ approx (↑f) n✝ ≤ approx (↑f) (Nat.succ n✝) Tactic: apply f.monotone State Before: case succ.a
α : Type u_1
β : α → Type u_2
f : ((a : α) → Part (β a)) →o (a : α) → Part (β a)
n✝ : ℕ
i_ih : approx (↑f) n✝ ≤ approx (↑f) (Nat.succ n✝)
i✝ : α
⊢ approx (↑f) n✝ ≤ approx (↑f) (Nat.succ n✝) State After: no goals Tactic: apply i_ih |
= = Academic response = =
|
import .basic
namespace premodel
--- The Pi-type of a family of premodels is a premodel, with the componentwise operations in terms of `vect.unzip_fam`.
definition pi {th : theory} {α : Type*} (M : α → Type _) [∀ a, premodel th (M a)] : premodel th (Π a, M a) :=
{
act :=
λ n f xs, λ a,
premodel.act f (xs.unzip_fam a)
}
attribute [instance] premodel.pi
--- The evaluation defines a morphism from the pi-type to its component.
definition pi_eval {th : theory} {α : Type _} (M : α → Type _) [∀ a, premodel th (M a)] (a : α) : morphism th (Π a, M a) (M a) :=
⟨(λ f, f a), by intros _ μ fs; rw [←vect.unzip_fam_eval]; unfold act⟩
end premodel
namespace model
--- The Pi-type of a family of models is virtually a model; i.e.~ up to `funext`.
theorem pi_axiom {th : theory} {α : Type _} {C : α → Type _} [∀ a, model th (C a)] : ∀ {n : ℕ} (r : th.rel n) (var : finord n → Π (a : α), C a) (a : α), optree.elim (@premodel.act th (Π a, C a) _) var (th.rel_lhs r) a = optree.elim (@premodel.act th (Π a, C a) _) var (th.rel_rhs r) a :=
begin
intros _ r var a,
unfold premodel.act at *; dsimp *,
let dact : Π (a : α) {k : ℕ}, th.op k → vect (C a) k → (C a) := λ a k f rs , premodel.act f rs,
let dvar : Π (a : α), finord n → (C a) := λ k a, var a k,
rw [@optree.elim_pi th.op _ α C dact dvar _ a],
rw [@optree.elim_pi th.op _ α C dact dvar _ a],
rw [axiom_eq]
end
#print axioms pi_axiom
namespace unsafe
-- WARNING: Use of `funext`.
--- The Pi-type of a family of models is a model.
definition pi {th : theory} {α : Type _} {C : α → Type _} [∀ a, model th (C a)] : model th (Π a, C a):=
{
haxiom := λ _ r var, funext (pi_axiom r var)
}
#print axioms model.unsafe.pi
end unsafe
end model
|
[STATEMENT]
lemma mark_guards_whileAnno [simp]:
"mark_guards f (whileAnno b I V c) = whileAnno b I V (mark_guards f c)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mark_guards f (whileAnno b I V c) = whileAnno b I V (mark_guards f c)
[PROOF STEP]
by (simp add: whileAnno_def while_def) |
module Sum
using DelimitedFiles
values = readdlm("../input/input.txt")
println(sum(values))
end
|
/-
Copyright (c) 2017 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
! This file was ported from Lean 3 source module init.data.bool.lemmas
! leanprover-community/mathlib commit 9af482290ef68e8aaa5ead01aa7b09b7be7019fd
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
prelude
import Leanbin.Init.Data.Bool.Basic
import Leanbin.Init.Meta.Default
attribute [simp] cond or and not xor
#print Bool.cond_self /-
@[simp]
theorem Bool.cond_self.{u} {α : Type u} (b : Bool) (a : α) : cond b a a = a := by cases b <;> simp
#align cond_a_a Bool.cond_self
-/
#print Bool.and_self /-
@[simp]
theorem Bool.and_self (b : Bool) : (b && b) = b := by cases b <;> simp
#align band_self Bool.and_self
-/
#print Bool.and_true /-
@[simp]
theorem Bool.and_true (b : Bool) : (b && true) = b := by cases b <;> simp
#align band_tt Bool.and_true
-/
#print Bool.and_false /-
@[simp]
theorem Bool.and_false (b : Bool) : (b && false) = false := by cases b <;> simp
#align band_ff Bool.and_false
-/
#print Bool.true_and /-
@[simp]
theorem Bool.true_and (b : Bool) : (true && b) = b := by cases b <;> simp
#align tt_band Bool.true_and
-/
#print Bool.false_and /-
@[simp]
theorem Bool.false_and (b : Bool) : (false && b) = false := by cases b <;> simp
#align ff_band Bool.false_and
-/
#print Bool.or_self /-
@[simp]
theorem Bool.or_self (b : Bool) : (b || b) = b := by cases b <;> simp
#align bor_self Bool.or_self
-/
#print Bool.or_true /-
@[simp]
theorem Bool.or_true (b : Bool) : (b || true) = true := by cases b <;> simp
#align bor_tt Bool.or_true
-/
#print Bool.or_false /-
@[simp]
theorem Bool.or_false (b : Bool) : (b || false) = b := by cases b <;> simp
#align bor_ff Bool.or_false
-/
#print Bool.true_or /-
@[simp]
theorem Bool.true_or (b : Bool) : (true || b) = true := by cases b <;> simp
#align tt_bor Bool.true_or
-/
#print Bool.false_or /-
@[simp]
theorem Bool.false_or (b : Bool) : (false || b) = b := by cases b <;> simp
#align ff_bor Bool.false_or
-/
#print Bool.xor_self /-
@[simp]
theorem Bool.xor_self (b : Bool) : xor b b = false := by cases b <;> simp
#align bxor_self Bool.xor_self
-/
#print Bool.xor_true /-
@[simp]
theorem Bool.xor_true (b : Bool) : xor b true = not b := by cases b <;> simp
#align bxor_tt Bool.xor_true
-/
#print Bool.xor_false /-
theorem Bool.xor_false (b : Bool) : xor b false = b := by cases b <;> simp
#align bxor_ff Bool.xor_false
-/
#print Bool.true_xor /-
@[simp]
theorem Bool.true_xor (b : Bool) : xor true b = not b := by cases b <;> simp
#align tt_bxor Bool.true_xor
-/
#print Bool.false_xor /-
theorem Bool.false_xor (b : Bool) : xor false b = b := by cases b <;> simp
#align ff_bxor Bool.false_xor
-/
#print Bool.not_not /-
@[simp]
theorem Bool.not_not (b : Bool) : not (not b) = b := by cases b <;> simp
#align bnot_bnot Bool.not_not
-/
#print Bool.true_eq_false_eq_False /-
theorem Bool.true_eq_false_eq_False : ¬true = false := by contradiction
#align tt_eq_ff_eq_false Bool.true_eq_false_eq_False
-/
#print Bool.false_eq_true_eq_False /-
theorem Bool.false_eq_true_eq_False : ¬false = true := by contradiction
#align ff_eq_tt_eq_false Bool.false_eq_true_eq_False
-/
#print Bool.eq_false_eq_not_eq_true /-
@[simp]
theorem Bool.eq_false_eq_not_eq_true (b : Bool) : (¬b = true) = (b = false) := by cases b <;> simp
#align eq_ff_eq_not_eq_tt Bool.eq_false_eq_not_eq_true
-/
@[simp]
theorem eq_true_eq_not_eq_false (b : Bool) : (¬b = false) = (b = true) := by cases b <;> simp
#align eq_tt_eq_not_eq_ff eq_true_eq_not_eq_false
#print Bool.eq_false_of_not_eq_true /-
theorem Bool.eq_false_of_not_eq_true {b : Bool} : ¬b = true → b = false :=
Eq.mp (Bool.eq_false_eq_not_eq_true b)
#align eq_ff_of_not_eq_tt Bool.eq_false_of_not_eq_true
-/
#print Bool.eq_true_of_not_eq_false /-
theorem Bool.eq_true_of_not_eq_false {b : Bool} : ¬b = false → b = true :=
Eq.mp (eq_true_eq_not_eq_false b)
#align eq_tt_of_not_eq_ff Bool.eq_true_of_not_eq_false
-/
#print Bool.and_eq_true_eq_eq_true_and_eq_true /-
@[simp]
theorem Bool.and_eq_true_eq_eq_true_and_eq_true (a b : Bool) :
((a && b) = true) = (a = true ∧ b = true) := by cases a <;> cases b <;> simp
#align band_eq_true_eq_eq_tt_and_eq_tt Bool.and_eq_true_eq_eq_true_and_eq_true
-/
#print Bool.or_eq_true_eq_eq_true_or_eq_true /-
@[simp]
theorem Bool.or_eq_true_eq_eq_true_or_eq_true (a b : Bool) :
((a || b) = true) = (a = true ∨ b = true) := by cases a <;> cases b <;> simp
#align bor_eq_true_eq_eq_tt_or_eq_tt Bool.or_eq_true_eq_eq_true_or_eq_true
-/
#print Bool.not_eq_true_eq_eq_false /-
@[simp]
theorem Bool.not_eq_true_eq_eq_false (a : Bool) : (not a = true) = (a = false) := by
cases a <;> simp
#align bnot_eq_true_eq_eq_ff Bool.not_eq_true_eq_eq_false
-/
#print Bool.and_eq_false_eq_eq_false_or_eq_false /-
@[simp]
theorem Bool.and_eq_false_eq_eq_false_or_eq_false (a b : Bool) :
((a && b) = false) = (a = false ∨ b = false) := by cases a <;> cases b <;> simp
#align band_eq_false_eq_eq_ff_or_eq_ff Bool.and_eq_false_eq_eq_false_or_eq_false
-/
#print Bool.or_eq_false_eq_eq_false_and_eq_false /-
@[simp]
theorem Bool.or_eq_false_eq_eq_false_and_eq_false (a b : Bool) :
((a || b) = false) = (a = false ∧ b = false) := by cases a <;> cases b <;> simp
#align bor_eq_false_eq_eq_ff_and_eq_ff Bool.or_eq_false_eq_eq_false_and_eq_false
-/
#print Bool.not_eq_false_eq_eq_true /-
@[simp]
theorem Bool.not_eq_false_eq_eq_true (a : Bool) : (not a = false) = (a = true) := by
cases a <;> simp
#align bnot_eq_ff_eq_eq_tt Bool.not_eq_false_eq_eq_true
-/
#print Bool.coe_false /-
@[simp]
theorem Bool.coe_false : ↑false = False :=
show (false = true) = False by simp
#align coe_ff Bool.coe_false
-/
#print Bool.coe_true /-
@[simp]
theorem Bool.coe_true : ↑true = True :=
show (true = true) = True by simp
#align coe_tt Bool.coe_true
-/
#print Bool.coe_sort_false /-
@[simp]
theorem Bool.coe_sort_false : ↥false = False :=
show (false = true) = False by simp
#align coe_sort_ff Bool.coe_sort_false
-/
#print Bool.coe_sort_true /-
@[simp]
theorem Bool.coe_sort_true : ↥true = True :=
show (true = true) = True by simp
#align coe_sort_tt Bool.coe_sort_true
-/
#print Bool.decide_iff /-
@[simp]
theorem Bool.decide_iff (p : Prop) [d : Decidable p] : decide p = true ↔ p :=
match d with
| is_true hp => ⟨fun h => hp, fun _ => rfl⟩
| is_false hnp => ⟨fun h => Bool.noConfusion h, fun hp => absurd hp hnp⟩
#align to_bool_iff Bool.decide_iff
-/
#print Bool.decide_true /-
theorem Bool.decide_true {p : Prop} [Decidable p] : p → decide p :=
(Bool.decide_iff p).2
#align to_bool_true Bool.decide_true
-/
/- warning: to_bool_tt clashes with to_bool_true -> Bool.decide_true
Case conversion may be inaccurate. Consider using '#align to_bool_tt Bool.decide_trueₓ'. -/
#print Bool.decide_true /-
theorem Bool.decide_true {p : Prop} [Decidable p] : p → decide p = true :=
Bool.decide_true
#align to_bool_tt Bool.decide_true
-/
#print Bool.of_decide_true /-
theorem Bool.of_decide_true {p : Prop} [Decidable p] : decide p → p :=
(Bool.decide_iff p).1
#align of_to_bool_true Bool.of_decide_true
-/
#print Bool.bool_iff_false /-
theorem Bool.bool_iff_false {b : Bool} : ¬b ↔ b = false := by cases b <;> exact by decide
#align bool_iff_false Bool.bool_iff_false
-/
#print Bool.bool_eq_false /-
theorem Bool.bool_eq_false {b : Bool} : ¬b → b = false :=
Bool.bool_iff_false.1
#align bool_eq_false Bool.bool_eq_false
-/
#print Bool.decide_false_iff /-
@[simp]
theorem Bool.decide_false_iff (p : Prop) [Decidable p] : decide p = false ↔ ¬p :=
Bool.bool_iff_false.symm.trans (not_congr (Bool.decide_iff _))
#align to_bool_ff_iff Bool.decide_false_iff
-/
#print Bool.decide_false /-
theorem Bool.decide_false {p : Prop} [Decidable p] : ¬p → decide p = false :=
(Bool.decide_false_iff p).2
#align to_bool_ff Bool.decide_false
-/
#print Bool.of_decide_false /-
theorem Bool.of_decide_false {p : Prop} [Decidable p] : decide p = false → ¬p :=
(Bool.decide_false_iff p).1
#align of_to_bool_ff Bool.of_decide_false
-/
#print Bool.decide_congr /-
theorem Bool.decide_congr {p q : Prop} [Decidable p] [Decidable q] (h : p ↔ q) :
decide p = decide q := by
induction' h' : to_bool q with
exact Bool.decide_false (mt h.1 <| Bool.of_decide_false h')
exact Bool.decide_true (h.2 <| Bool.of_decide_true h')
#align to_bool_congr Bool.decide_congr
-/
#print Bool.or_coe_iff /-
@[simp]
theorem Bool.or_coe_iff (a b : Bool) : a || b ↔ a ∨ b := by cases a <;> cases b <;> exact by decide
#align bor_coe_iff Bool.or_coe_iff
-/
#print Bool.and_coe_iff /-
@[simp]
theorem Bool.and_coe_iff (a b : Bool) : a && b ↔ a ∧ b := by cases a <;> cases b <;> exact by decide
#align band_coe_iff Bool.and_coe_iff
-/
#print Bool.xor_coe_iff /-
@[simp]
theorem Bool.xor_coe_iff (a b : Bool) : xor a b ↔ Xor' a b := by
cases a <;> cases b <;> exact by decide
#align bxor_coe_iff Bool.xor_coe_iff
-/
#print Bool.ite_eq_true_distrib /-
@[simp]
theorem Bool.ite_eq_true_distrib (c : Prop) [Decidable c] (a b : Bool) :
((if c then a else b) = true) = if c then a = true else b = true := by by_cases c <;> simp [*]
#align ite_eq_tt_distrib Bool.ite_eq_true_distrib
-/
#print Bool.ite_eq_false_distrib /-
@[simp]
theorem Bool.ite_eq_false_distrib (c : Prop) [Decidable c] (a b : Bool) :
((if c then a else b) = false) = if c then a = false else b = false := by
by_cases c <;> simp [*]
#align ite_eq_ff_distrib Bool.ite_eq_false_distrib
-/
|
module Oscar.Data.Vec where
open import Data.Vec public
open import Oscar.Data.Nat
open import Oscar.Data.Equality
open import Data.Nat
map₂ : ∀ {a b} {A : Set a} {B : Set b} {m n}
→ ∀ {c} {C : Set c} (f : A → B → C)
→ Vec A m → Vec B n → Vec C (m * n)
map₂ f xs ys = map f xs ⊛* ys
open import Data.Fin
delete : ∀ {a n} {A : Set a} → Fin (suc n) → Vec A (suc n) → Vec A n
delete zero (x ∷ xs) = xs
delete {n = zero} (suc ()) _
delete {n = suc n} (suc i) (x ∷ xs) = x ∷ delete i xs
open import Function
open import Data.Product
tabulate⋆ : ∀ {n a} {A : Set a} → (F : Fin n → A) → ∃ λ (v : Vec A n) → ∀ (i : Fin n) → F i ∈ v
tabulate⋆ {zero} F = [] , (λ ())
tabulate⋆ {suc n} F = let v , t = tabulate⋆ (F ∘ suc) in F zero ∷ v , (λ { zero → here ; (suc i) → there (t i)})
|
"""
niw_hyperparams(κ::Float32, m::AbstractArray{Float32}, ν::Float32, ψ::AbstractArray{Float32})
[Normal Inverse Wishart](https://en.wikipedia.org/wiki/Normal-inverse-Wishart_distribution)
"""
struct niw_hyperparams <: distribution_hyper_params
κ::Float32
m::AbstractArray{Float64}
ν::Float32
ψ::AbstractArray{Float64}
end
mutable struct niw_sufficient_statistics <: sufficient_statistics
N::Float32
points_sum::AbstractArray{Float64,1}
S::AbstractArray{Float64,2}
end
function calc_posterior(prior::niw_hyperparams, suff_statistics::niw_sufficient_statistics)
if suff_statistics.N == 0
return prior
end
κ = prior.κ + suff_statistics.N
ν = prior.ν + suff_statistics.N
m = (prior.m.*prior.κ + suff_statistics.points_sum) / κ
ψ = (prior.ν * prior.ψ + prior.κ*prior.m*prior.m' -κ*m*m'+ suff_statistics.S) / ν
ψ = Matrix(Symmetric(ψ))
ψ = (ψ+ψ')/2
return niw_hyperparams(κ,m,ν,ψ)
end
function sample_distribution(hyperparams::niw_hyperparams)
Σ = rand(Distributions.InverseWishart(hyperparams.ν, hyperparams.ν * hyperparams.ψ))
μ = rand(Distributions.MvNormal(hyperparams.m, Σ / hyperparams.κ))
invΣ = inv(Σ)
chol = cholesky(Hermitian(invΣ))
return mv_gaussian(μ, Σ, invΣ, logdet(Σ), chol.U)
end
function create_sufficient_statistics(
hyper::niw_hyperparams,
posterior::niw_hyperparams,
points::AbstractArray{Float32,2},
pts_to_group = 0,
)
if size(points, 2) == 0
return niw_sufficient_statistics(
size(points, 2),
zeros(Float32, length(hyper.m)),
zeros(Float32, length(hyper.m), length(hyper.m)),
)
end
pts = Array{Float64}(points)
points_sum = sum(pts, dims = 2)[:]
S = pts * pts'
S = 0.5*(S+S')
return niw_sufficient_statistics(size(points,2),points_sum,S)
end
function log_marginal_likelihood(
hyper::niw_hyperparams,
posterior_hyper::niw_hyperparams,
suff_stats::niw_sufficient_statistics,
)
D = length(suff_stats.points_sum)
logpi = log(pi)
return -suff_stats.N * D * 0.5 * logpi +
log_multivariate_gamma(posterior_hyper.ν / 2, D) -
log_multivariate_gamma(hyper.ν / 2, D) +
(hyper.ν / 2) * (D * log(hyper.ν) + logdet(hyper.ψ)) -
(posterior_hyper.ν / 2) *
(D * log(posterior_hyper.ν) + logdet(posterior_hyper.ψ)) +
(D / 2) * (log(hyper.κ / posterior_hyper.κ))
end
function aggregate_suff_stats(
suff_l::niw_sufficient_statistics,
suff_r::niw_sufficient_statistics,
)
return niw_sufficient_statistics(
suff_l.N + suff_r.N,
suff_l.points_sum + suff_r.points_sum,
suff_l.S + suff_r.S,
)
end
|
import numpy as np
def numpy_random14_test(n):
return np.random.random(n)
|
State Before: b x : Ordinal
hx : x = 0
⊢ log b x ≤ x State After: no goals Tactic: simp only [hx, log_zero_right, Ordinal.zero_le] State Before: b x : Ordinal
hx : ¬x = 0
hb : ¬1 < b
⊢ log b x ≤ x State After: no goals Tactic: simp only [log_of_not_one_lt_left hb, Ordinal.zero_le] |
c**********************************************************************
c twod.f - a solution to the Poisson problem by using Jacobi
c interation on a 2-d decomposition
c
c .... the rest of this is from pi3.f to show the style ...
c
c Each node:
c 1) receives the number of rectangles used in the approximation.
c 2) calculates the areas of it's rectangles.
c 3) Synchronizes for a global summation.
c Node 0 prints the result.
c
c Variables:
c
c pi the calculated result
c n number of points of integration.
c x midpoint of each rectangle's interval
c f function to integrate
c sum,pi area of rectangles
c tmp temporary scratch space for global summation
c i do loop index
c
c This code is included (without the prints) because one version of
c MPICH SEGV'ed (probably because of errors in handling send/recv of
c MPI_PROC_NULL source/destination).
c
c****************************************************************************
program main
include "mpif.h"
integer maxn
parameter (maxn = 128)
double precision a(maxn,maxn), b(maxn,maxn), f(maxn,maxn)
integer nx, ny
integer myid, numprocs, it, rc, comm2d, ierr, stride
integer nbrleft, nbrright, nbrtop, nbrbottom
integer sx, ex, sy, ey
integer dims(2)
logical periods(2)
double precision diff2d, diffnorm, dwork
double precision t1, t2
external diff2d
data periods/2*.false./
call MPI_INIT( ierr )
call MPI_COMM_RANK( MPI_COMM_WORLD, myid, ierr )
call MPI_COMM_SIZE( MPI_COMM_WORLD, numprocs, ierr )
c print *, "Process ", myid, " of ", numprocs, " is alive"
if (myid .eq. 0) then
c
c Get the size of the problem
c
c print *, 'Enter nx'
c read *, nx
nx = 10
endif
c print *, 'About to do bcast on ', myid
call MPI_BCAST(nx,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr)
ny = nx
c
c Get a new communicator for a decomposition of the domain. Let MPI
c find a "good" decomposition
c
dims(1) = 0
dims(2) = 0
call MPI_DIMS_CREATE( numprocs, 2, dims, ierr )
call MPI_CART_CREATE( MPI_COMM_WORLD, 2, dims, periods, .true.,
* comm2d, ierr )
c
c Get my position in this communicator
c
call MPI_COMM_RANK( comm2d, myid, ierr )
c print *, "Process ", myid, " of ", numprocs, " is alive"
c
c My neighbors are now +/- 1 with my rank. Handle the case of the
c boundaries by using MPI_PROCNULL.
call fnd2dnbrs( comm2d, nbrleft, nbrright, nbrtop, nbrbottom )
c print *, "Process ", myid, ":",
c * nbrleft, nbrright, nbrtop, nbrbottom
c
c Compute the decomposition
c
call fnd2ddecomp( comm2d, nx, sx, ex, sy, ey )
c print *, "Process ", myid, ":", sx, ex, sy, ey
c
c Create a new, "strided" datatype for the exchange in the "non-contiguous"
c direction
c
call mpi_Type_vector( ey-sy+1, 1, ex-sx+3,
$ MPI_DOUBLE_PRECISION, stride, ierr )
call mpi_Type_commit( stride, ierr )
c
c
c Initialize the right-hand-side (f) and the initial solution guess (a)
c
call twodinit( a, b, f, nx, sx, ex, sy, ey )
c
c Actually do the computation. Note the use of a collective operation to
c check for convergence, and a do-loop to bound the number of iterations.
c
call MPI_BARRIER( MPI_COMM_WORLD, ierr )
t1 = MPI_WTIME()
do 10 it=1, 100
call exchng2( b, sx, ex, sy, ey, comm2d, stride,
$ nbrleft, nbrright, nbrtop, nbrbottom )
call sweep2d( b, f, nx, sx, ex, sy, ey, a )
call exchng2( a, sx, ex, sy, ey, comm2d, stride,
$ nbrleft, nbrright, nbrtop, nbrbottom )
call sweep2d( a, f, nx, sx, ex, sy, ey, b )
dwork = diff2d( a, b, nx, sx, ex, sy, ey )
call MPI_Allreduce( dwork, diffnorm, 1, MPI_DOUBLE_PRECISION,
$ MPI_SUM, comm2d, ierr )
if (diffnorm .lt. 1.0e-5) goto 20
if (myid .eq. 0) print *, 2*it, ' Difference is ', diffnorm
10 continue
if (myid .eq. 0) print *, 'Failed to converge'
20 continue
t2 = MPI_WTIME()
c if (myid .eq. 0) then
c print *, 'Converged after ', 2*it, ' Iterations in ', t2 - t1,
c $ ' secs '
c endif
c
c
call MPI_Type_free( stride, ierr )
call MPI_Comm_free( comm2d, ierr )
call MPI_FINALIZE(rc)
end
c
c Perform a Jacobi sweep for a 2-d decomposition
c
subroutine sweep2d( a, f, n, sx, ex, sy, ey, b )
integer n, sx, ex, sy, ey
double precision a(sx-1:ex+1, sy-1:ey+1), f(sx-1:ex+1, sy-1:ey+1),
+ b(sx-1:ex+1, sy-1:ey+1)
c
integer i, j
double precision h
c
h = 1.0d0 / dble(n+1)
do 10 j=sy, ey
do 10 i=sx, ex
b(i,j) = 0.25 * (a(i-1,j)+a(i,j+1)+a(i,j-1)+a(i+1,j)) -
+ h * h * f(i,j)
10 continue
return
end
c
subroutine exchng2( a, sx, ex, sy, ey,
$ comm2d, stride,
$ nbrleft, nbrright, nbrtop, nbrbottom )
include "mpif.h"
integer sx, ex, sy, ey, stride
double precision a(sx-1:ex+1, sy-1:ey+1)
integer nbrleft, nbrright, nbrtop, nbrbottom, comm2d
integer status(MPI_STATUS_SIZE), ierr, nx
c
nx = ex - sx + 1
c These are just like the 1-d versions, except for less data
call MPI_SENDRECV( a(sx,ey), nx, MPI_DOUBLE_PRECISION,
$ nbrtop, 0,
$ a(sx,sy-1), nx, MPI_DOUBLE_PRECISION,
$ nbrbottom, 0, comm2d, status, ierr )
call MPI_SENDRECV( a(sx,sy), nx, MPI_DOUBLE_PRECISION,
$ nbrbottom, 1,
$ a(sx,ey+1), nx, MPI_DOUBLE_PRECISION,
$ nbrtop, 1, comm2d, status, ierr )
c
c This uses the "strided" datatype
call MPI_SENDRECV( a(ex,sy), 1, stride, nbrright, 0,
$ a(sx-1,sy), 1, stride, nbrleft, 0,
$ comm2d, status, ierr )
call MPI_SENDRECV( a(sx,sy), 1, stride, nbrleft, 1,
$ a(ex+1,sy), 1, stride, nbrright, 1,
$ comm2d, status, ierr )
return
end
c
c The rest of the 2-d program
c
double precision function diff2d( a, b, nx, sx, ex, sy, ey )
integer nx, sx, ex, sy, ey
double precision a(sx-1:ex+1, sy-1:ey+1), b(sx-1:ex+1, sy-1:ey+1)
c
double precision sum
integer i, j
c
sum = 0.0d0
do 10 j=sy,ey
do 10 i=sx,ex
sum = sum + (a(i,j) - b(i,j)) ** 2
10 continue
c
diff2d = sum
return
end
subroutine twodinit( a, b, f, nx, sx, ex, sy, ey )
integer nx, sx, ex, sy, ey
double precision a(sx-1:ex+1, sy-1:ey+1), b(sx-1:ex+1, sy-1:ey+1),
& f(sx-1:ex+1, sy-1:ey+1)
c
integer i, j
c
do 10 j=sy-1,ey+1
do 10 i=sx-1,ex+1
a(i,j) = 0.0d0
b(i,j) = 0.0d0
f(i,j) = 0.0d0
10 continue
c
c Handle boundary conditions
c
if (sx .eq. 1) then
do 20 j=sy,ey
a(0,j) = 1.0d0
b(0,j) = 1.0d0
20 continue
endif
if (ex .eq. nx) then
do 21 j=sy,ey
a(nx+1,j) = 0.0d0
b(nx+1,j) = 0.0d0
21 continue
endif
if (sy .eq. 1) then
do 30 i=sx,ex
a(i,0) = 1.0d0
b(i,0) = 1.0d0
30 continue
endif
c
return
end
c
c This file contains a routine for producing a decomposition of a 1-d array
c when given a number of processors. It may be used in "direct" product
c decomposition. The values returned assume a "global" domain in [1:n]
c
subroutine MPE_DECOMP1D( n, numprocs, myid, s, e )
integer n, numprocs, myid, s, e
integer nlocal
integer deficit
c
nlocal = n / numprocs
s = myid * nlocal + 1
deficit = mod(n,numprocs)
s = s + min(myid,deficit)
if (myid .lt. deficit) then
nlocal = nlocal + 1
endif
e = s + nlocal - 1
if (e .gt. n .or. myid .eq. numprocs-1) e = n
return
end
c
c This routine show how to determine the neighbors in a 2-d decomposition of
c the domain. This assumes that MPI_Cart_create has already been called
c
subroutine fnd2dnbrs( comm2d,
$ nbrleft, nbrright, nbrtop, nbrbottom )
integer comm2d, nbrleft, nbrright, nbrtop, nbrbottom
c
integer ierr
c
call MPI_Cart_shift( comm2d, 0, 1, nbrleft, nbrright, ierr )
call MPI_Cart_shift( comm2d, 1, 1, nbrbottom, nbrtop, ierr )
c
return
end
c
c Note: THIS IS A TEST PROGRAM. THE ACTUAL VALUES MOVED ARE NOT
c CORRECT FOR A POISSON SOLVER.
c
subroutine fnd2ddecomp( comm2d, n, sx, ex, sy, ey )
integer comm2d
integer n, sx, ex, sy, ey
integer dims(2), coords(2), ierr
logical periods(2)
c
call MPI_Cart_get( comm2d, 2, dims, periods, coords, ierr )
call MPE_DECOMP1D( n, dims(1), coords(1), sx, ex )
call MPE_DECOMP1D( n, dims(2), coords(2), sy, ey )
c
return
end
|
# Exploring the static force model in Seaman
```python
%load_ext autoreload
%autoreload 2
%matplotlib inline
```
```python
import sympy as sp
from sympy.plotting import plot as plot
from sympy.plotting import plot3d as plot3d
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaman.helpers
import seaman_symbol as ss
import total_equations as equations
import total_lambda_functions as lambda_functions
sp.init_printing()
```
## Coordinate system
In the static force model forces and moments are referred to a righthanded
coordinate system with 4 degrees of freedom (surge, sway,roll, yaw) with origin in L/2, in the centre line of the ship and
vertically in the mean water line:
$\left[\frac{L}{2},0,t_m\right]$
* X : forces in ship heading direction
* Y : forces perpendicular to ship heading direction on the water plane.
**Note!** Y direction is not changing with roll.
* P : moment around X
* N : moment around Z
Ship motions in time domain simulations are however referred to CG.
## Nomenclature
The following symbols are used in this documentation:
```python
import seaman_symbols
from IPython.core.display import HTML
```
```python
symbols = [item for key,item in seaman_symbols.__dict__.items()]
```
```python
HTML(ss.create_html_table(symbols=symbols))
```
## Bis system
The bis system is used in Seaman to make physical quantities nondimensional. The denominators in the table below is used. The quantities are denoted with a '' sign when they are in bis units, otherwise they are in SI units.
```python
from bis_system import BisSystem
```
```python
HTML(BisSystem.html_table())
```
## Bis example
Suppose that we have a force $Y''$ that can be calculated from linear velocity $x''$ multiplied with some kind of nondimensional coefficient $C$:
```python
l = ss.Symbol('l')
m = ss.Symbol('m')
C = ss.Coefficient('C')
x = ss.Bis('x',denominator=BisSystem.linear_velocity)
Y = ss.Bis('Y',denominator=BisSystem.force)
eq = sp.Eq(Y.bis,C*x.bis**2)
```
```python
eq
```
Relation between $Y$ (SI force [N]) and $Y''$ (bis force):
```python
Y.bis_eq
```
Relation between $x$ (SI linear velocity [m/s]) and $x''$ (bis linear velocity):
```python
x.bis_eq
```
**Note!** Linear velocity in bis system is the same thing as Froude Number.
The equation can be written in SI units by substitution of the bis relations above:
```python
eq3 = ss.expand_bis(equation = eq)
eq3
```
## Total forces
The total forces on the ship can be expressed as described below.
The underlying equations are explained in:
### Hull
* [Surge hull equations](05_seaman_surge_hull_equation.ipynb)
* [Sway hull equations](02_seaman_sway_hull_equation.ipynb)
* [Yaw hull equations](03_seaman_yaw_hull_equation.ipynb)
### Rudder:
* [Rudder equations](04_seaman_rudder_equation.ipynb)
## Surge
```python
equations.surge_equation
```
Expanding the surge equation (using equations for hull and rudder) and converting to SI units:
```python
equations.surge_equation_expanded_SI
```
## Sway
```python
equations.sway_equation
```
```python
equations.sway_equation_expanded_SI
```
## Yaw
```python
equations.yaw_equation
```
```python
equations.yaw_equation_expanded_SI
```
## Roll
```python
equations.roll_equation
```
```python
equations.roll_equation_expanded_SI
```
```python
import surge_hull_equations as surge_hull_equations
import sway_hull_equations as sway_hull_equations
import yaw_hull_equations as yaw_hull_equations
import roll_hull_equations as roll_hull_equations
import rudder_equations as rudder_equations
```
```python
from seaman_symbols import *
```
## Real seaman++
Run real seaman in C++ to verify that the documented model is correct.
```python
import generate_input
shipdict = seaman.ShipDict.load('../../tests/test_ship.ship')
import run_real_seaman
```
### Surge
```python
%connect_info
```
```python
df = pd.DataFrame()
df['v_w'] = np.linspace(-3,3,20)
df['delta'] = 0
df['T'] = 0
df['rho'] = 1025.0
df['g'] = 9.81
df['u_w'] = 5.0
df['r_w'] = 0.0
df['X_res'] = -np.interp(df['u_w'],shipdict.res_data['vres'],shipdict.res_data['res'])
result_comparison = run_real_seaman.compare_with_seaman(lambda_function=lambda_functions.total_surge_function,
shipdict = shipdict,
df = df,
label = 'fx',
seaman_function=run_real_seaman.calculate_static_ship
)
fig,ax = plt.subplots()
result_comparison.plot(x = 'v_w',y = ['fx','fx_seaman'],ax = ax)
ax.set_title('Drift angle variation');
```
Real seaman has a maximum effective rudder angle 0.61 rad for the rudder drag, which is why seaman gives different result for really large drift angles or yaw rates:
```python
df = pd.DataFrame()
df['delta'] = np.deg2rad(np.linspace(-45,45,50))
df['r_w'] = 0
df['T'] = 0
df['rho'] = 1025.0
df['g'] = 9.81
df['u_w'] = 5.0
df['v_w'] = 0.0
df['X_res'] = -np.interp(df['u_w'],shipdict.res_data['vres'],shipdict.res_data['res'])
result_comparison = run_real_seaman.compare_with_seaman(lambda_function=lambda_functions.total_surge_function,
shipdict = shipdict,
df = df,
label = 'fx',
seaman_function=run_real_seaman.calculate_static_ship
)
fig,ax = plt.subplots()
result_comparison.plot(x = 'delta',y = ['fx','fx_seaman'],ax = ax)
ax.set_title('Rudder angle variation');
```
```python
df = pd.DataFrame()
df['r_w'] = np.linspace(-0.05,0.05,20)
df['delta'] = 0
df['T'] = 0
df['rho'] = 1025.0
df['g'] = 9.81
df['u_w'] = 5.0
df['v_w'] = 0.0
df['X_res'] = -np.interp(df['u_w'],shipdict.res_data['vres'],shipdict.res_data['res'])
shipdict2 = shipdict.copy()
#shipdict2.design_particulars['lcg'] = 0.0
df_input = generate_input.add_shipdict_inputs(lambda_function=lambda_functions.total_surge_function,
shipdict=shipdict2,
df=df)
result_comparison = run_real_seaman.compare_with_seaman(lambda_function=lambda_functions.total_surge_function,
shipdict = shipdict2,
df = df,
label = 'fx',
seaman_function=run_real_seaman.calculate_static_ship
)
fig,ax = plt.subplots()
result_comparison.plot(x = 'r_w',y = ['fx','fx_seaman'],ax = ax)
ax.set_title('Yaw rate variation');
```
```python
result_comparison['fx']
```
```python
df_input.head()
```
### Sway
```python
df = pd.DataFrame()
df['v_w'] = np.linspace(-6,6,20)
df['delta'] = 0
df['T'] = 0
df['rho'] = 1025.0
df['g'] = 9.81
df['u_w'] = 5.0
df['r_w'] = 0.0
result_comparison = run_real_seaman.compare_with_seaman(lambda_function=lambda_functions.total_sway_function,
shipdict = shipdict,
df = df,
label = 'fy',
seaman_function=run_real_seaman.calculate_static_ship
)
fig,ax = plt.subplots()
result_comparison.plot(x = 'v_w',y = ['fy','fy_seaman'],ax = ax)
ax.set_title('Drift angle variation');
```
```python
%connect_info
```
```python
df = pd.DataFrame()
df['r_w'] = np.linspace(-0.1,0.1,20)
df['delta'] = 0
df['T'] = 0
df['rho'] = 1025.0
df['g'] = 9.81
df['u_w'] = 5.0
df['v_w'] = 0.0
shipdict2 = shipdict.copy()
#shipdict2.design_particulars['lcg'] = 0 # Something strange with lcg in Seaman?
result_comparison = run_real_seaman.compare_with_seaman(lambda_function=lambda_functions.total_sway_function,
shipdict = shipdict2,
df = df,
label = 'fy',
seaman_function=run_real_seaman.calculate_static_ship
)
fig,ax = plt.subplots()
result_comparison.plot(x = 'r_w',y = ['fy','fy_seaman'],ax = ax)
ax.set_title('Yaw rate variation');
fig,ax = plt.subplots()
result_comparison.plot(x = 'r_w',y = ['fy_rudders_seaman'],ax = ax)
ax.set_title('Yaw rate variation Rudder');
```
```python
df = pd.DataFrame()
df['delta'] = np.deg2rad(np.linspace(-45,45,20))
df['r_w'] = 0
df['T'] = 0
df['rho'] = 1025.0
df['g'] = 9.81
df['u_w'] = 5.0
df['v_w'] = 0.0
shipdict2 = shipdict.copy()
#shipdict2.rudder_coeff_data['s'] = 0
result_comparison = run_real_seaman.compare_with_seaman(lambda_function=lambda_functions.total_sway_function,
shipdict = shipdict2,
df = df,
label = 'fy',
seaman_function=run_real_seaman.calculate_static_ship
)
fig,ax = plt.subplots()
result_comparison.plot(x = 'delta',y = ['fy','fy_seaman'],ax = ax)
ax.set_title('Rudder angle variation');
```
### Yaw
```python
df = pd.DataFrame()
df['v_w'] = np.linspace(-2,2,20)
df['delta'] = 0
df['T'] = 0
df['rho'] = 1025.0
df['g'] = 9.81
df['u_w'] = 5.0
df['r_w'] = 0.0
df['Cd_lever'] = 0.0
shipdict2 = shipdict.copy()
#shipdict2.rudder_coeff_data['s'] = 0
result_comparison = run_real_seaman.compare_with_seaman(lambda_function=lambda_functions.total_yaw_function,
shipdict = shipdict2,
df = df,
label = 'mz',
seaman_function=run_real_seaman.calculate_static_ship
)
fig,ax = plt.subplots()
result_comparison.plot(x = 'v_w',y = ['mz','mz_seaman'],ax = ax)
ax.set_title('Drift angle variation');
ax.grid()
```
```python
df = pd.DataFrame()
df['delta'] = np.deg2rad(np.linspace(-20,20,20))
df['v_w'] = 0
df['T'] = 0
df['rho'] = 1025.0
df['g'] = 9.81
df['u_w'] = 5.0
df['r_w'] = 0.0
df['Cd_lever'] = 0
result_comparison = run_real_seaman.compare_with_seaman(lambda_function=lambda_functions.total_yaw_function,
shipdict = shipdict,
df = df,
label = 'mz',
seaman_function=run_real_seaman.calculate_static_ship
)
fig,ax = plt.subplots()
result_comparison.plot(x = 'delta',y = ['mz','mz_seaman'],ax = ax)
ax.set_title('Rudder angle variation');
```
### Roll
```python
%connect_info
```
```python
df = pd.DataFrame()
df['delta'] = np.deg2rad(np.linspace(-5,5,20))
df['v_w'] = 0
df['T'] = 0
df['rho'] = 1025.0
df['g'] = 9.81
df['u_w'] = 5.0
df['r_w'] = 0.0
df['p'] = 0
df['Cd_lever'] = 0
result_comparison = run_real_seaman.compare_with_seaman(lambda_function=lambda_functions.total_roll_function,
shipdict = shipdict,
df = df,
label = 'mx',
seaman_function=run_real_seaman.calculate_static_ship
)
fig,ax = plt.subplots()
result_comparison.plot(x = 'delta',y = ['mx','mx_seaman'],ax = ax)
ax.set_title('Rudder angle variation');
```
```python
df = pd.DataFrame()
df['beta'] = np.deg2rad(np.linspace(-20,20,20))
df['V'] = V = 5.0
df['u_w'] = V*np.cos(df['beta'])
df['v_w'] = -V*np.sin(df['beta'])
df['delta'] = 0
df['T'] = 0
df['rho'] = 1025.0
df['g'] = 9.81
df['r_w'] = 0.0
df['p'] = 0
df['Cd_lever'] = 0
shipdict2 = shipdict.copy()
#shipdict2.rudder_coeff_data['s'] = 0
#shipdict2.non_lin_coeff_data['cd'] = 3
result_comparison = run_real_seaman.compare_with_seaman(lambda_function=lambda_functions.total_roll_function,
shipdict = shipdict2,
df = df,
label = 'mx',
seaman_function=run_real_seaman.calculate_static_ship
)
fig,ax = plt.subplots()
result_comparison.plot(x = 'v_w',y = ['mx','mx_seaman'],ax = ax)
ax.set_title('Drift angle variation');
```
```python
```
|
#!/usr/bin/env python
import numpy as np
from time import time
import pyfftw
from numpy.fft import fft, ifft, fftshift, ifftshift, fft2, ifft2
from scipy.special import jv as besselj
import finufftpy
def translations_brute_force(Shathat, Mhat, cmul_trans):
# Shathat: (q, te, k)
# Mhat: (im, k × γ)
# cmul_trans: (tr, k × γ)
n_trans = cmul_trans.shape[-2]
n_images = Mhat.shape[-2]
Shathat = Shathat.transpose((2, 0, 1))
# Shathat: (q, te, k)
n_templates = Shathat.shape[-2]
ngridr = Shathat.shape[-1]
n_gamma = Shathat.shape[-3]
Mhat = Mhat.reshape((n_images, ngridr, n_gamma))
cmul_trans = cmul_trans.reshape((n_trans, ngridr, n_gamma))
# Mhat: (im, k, γ)
# cmul_trans: (tr, k, γ)
Mhat = Mhat[:, np.newaxis, :, :]
cmul_trans = cmul_trans[np.newaxis, :, :, :]
# Mhat: (im, 1, k, γ)
# cmul_trans: (1, tr, k, γ)
Mhat = Mhat.transpose((3, 2, 0, 1)).copy()
cmul_trans = cmul_trans.transpose((3, 2, 0, 1)).copy()
# Mhat: (γ, k, im, 1)
# cmul_trans: (γ, k, 1, tr)
Mhat_trans = pyfftw.empty_aligned((n_gamma, ngridr, n_images, n_trans),
dtype='complex128')
# Mhat_trans: (γ, k, im × tr)
plan = pyfftw.FFTW(Mhat_trans, Mhat_trans, axes=(0,),
direction='FFTW_FORWARD', flags=('FFTW_ESTIMATE',), threads=12)
tmr_start = time()
np.multiply(Mhat, cmul_trans, out=Mhat_trans)
plan()
Mhathat_trans = Mhat_trans.reshape((n_gamma, ngridr, n_images * n_trans))
# Mhathat_trans: (q, k, im × tr)
ptm = time() - tmr_start
tmr_start = time()
c_n2 = np.zeros((n_gamma, n_templates, n_images*n_trans),
dtype=np.complex128)
# c_n2: (q, te, im × tr)
for k1 in range(n_gamma):
k1p = (k1 + n_gamma // 2) % n_gamma
c_n2[k1, :, :] = np.matmul(np.conj(Shathat[k1p, :, :]), Mhathat_trans[k1, :, :])
c_n2 = 2 * np.pi * c_n2
c_n2 = ifft(c_n2, axis=0)
# c_n2: (γ, te, im × tr)
c_n2 = c_n2.reshape((n_gamma, n_templates, n_images, n_trans))
c_n2 = np.real(c_n2)
# c_n2: (γ, te, im, tr)
tm = time() - tmr_start
return c_n2, ptm, tm
def translations_brute_force_batch(Shathat, Mhat, pf_grid, tr_grid, n_psi,
n_batch_im=None, n_batch_trans=500):
n_templates = Shathat.shape[0]
n_images = Mhat.shape[0]
trans = tr_grid['trans']
n_trans = tr_grid['n_trans']
if n_batch_im is None:
n_batch_im = n_images
n_batch_trans = min(n_batch_trans, n_trans)
zprods1 = np.zeros((n_psi, n_templates, n_images, n_trans))
# zprods1: (γ, te, im, tr)
tm1 = 0
precomp1 = 0
for cn in range(0, n_images, n_batch_im):
idx_im = range(cn, min(cn + n_batch_im, n_images))
for ttt in range(0, n_trans, n_batch_trans):
idx_trans = range(ttt, min(ttt + n_batch_trans, n_trans))
cmul_trans = pft_phase_shift(-trans[idx_trans, :], pf_grid)
# cmul_trans: (tr, k × γ)
tmp, ptm, tm = translations_brute_force(
Shathat, Mhat[idx_im, :], cmul_trans)
zprods1[np.ix_(range(n_psi),
range(n_templates),
idx_im,
idx_trans)] = tmp
precomp1 += ptm
tm1 += tm
zprods1 = zprods1.transpose((2, 1, 0, 3))
return zprods1, precomp1, tm1
def svd_decomposition_alignment(SSS, Mhat, n_bessel, all_rnks, BigMul_left):
ngridr = SSS.shape[-1]
n_templates = SSS.shape[-2]
n_gamma = SSS.shape[-3]
n_images = Mhat.shape[-2]
n_trans = BigMul_left.shape[-1]
tmr_start = time()
Mhathat = Mhat.reshape((n_images, ngridr, n_gamma))
Mhathat = fftshift(fft(Mhathat, axis=-1), axes=-1) / n_gamma
MMM = np.zeros((n_images, 2 * n_bessel + 1, ngridr, n_gamma),
dtype=np.complex128)
for im in range(n_images):
for qp in range(-n_bessel, n_bessel + 1):
tmp = Mhathat[im, :, :]
MMM[im, qp + n_bessel, :, :] = np.roll(tmp, -qp, axis=-1)
MMM = MMM.transpose((1, 3, 2, 0)).copy()
precomp2 = time() - tmr_start
tmr_start = time()
BigMul_right = np.zeros((sum(all_rnks), n_gamma, n_templates, n_images),
dtype=np.complex128)
for qp in range(-n_bessel, n_bessel + 1):
rnk = all_rnks[qp + n_bessel]
ofst = sum(all_rnks[:qp + n_bessel])
for ll in range(rnk):
for q in range(n_gamma):
tmp = np.matmul(SSS[ofst + ll, q, :, :],
MMM[qp + n_bessel, q, :, :])
BigMul_right[ofst + ll, q, :, :] = tmp
BigMul_right = BigMul_right.transpose((3, 2, 1, 0)).copy()
c_n = np.zeros((n_images, n_templates, n_gamma, n_trans),
dtype=np.complex128)
for im in range(n_images):
for tt in range(n_templates):
c_n[im, tt, :, :] = np.matmul(BigMul_right[im, tt, :, :],
BigMul_left)
c_n = 2 * np.pi * c_n
zprods = ifft(ifftshift(c_n, axes=-2), axis=-2) * n_gamma
tm2 = time() - tmr_start
return zprods, precomp2, tm2
def cartesian_to_pft(templates, T, pf_grid):
xnodesr = pf_grid['xnodesr']
n_psi = pf_grid['n_psi']
ngridr = xnodesr.shape[0]
n_templates = templates.shape[0]
N = templates.shape[1]
dx = T / N
dy = T / N
wx = pf_grid['wx']
wy = pf_grid['wy']
Shat = np.zeros((n_templates, ngridr * n_psi), dtype=np.complex128)
upsampfac = 1.25
fcc = np.empty(len(wx), dtype=np.complex128)
for k in range(n_templates):
template = templates[k, :, :]
# Need to force Fortran ordering because that's what the FINUFFT
# interface expects.
gg = np.asfortranarray(template.transpose((1, 0)))
isign = -1
eps = 1e-6
# Note: Crashes if gg is a 1D vector (raveled). Why?
finufftpy.nufft2d2(wx * dx, wy * dy, fcc,
isign, eps, gg, upsampfac=upsampfac)
Shat[k, :] = fcc
return Shat
def pft_to_cartesian(Shat, T, N, pf_grid):
xnodesr = pf_grid['xnodesr']
n_psi = pf_grid['n_psi']
quad_wts = pf_grid['quad_wts']
ngridr = xnodesr.shape[0]
n_templates = Shat.shape[0]
dx = T / N
dy = T / N
wx = pf_grid['wx']
wy = pf_grid['wy']
templates1 = np.zeros((n_templates, N, N))
# Again, Fortran ordering is necessary for FINUFFT.
gxx = np.empty((N, N), dtype=np.complex128, order='F')
upsampfac = 1.25
for k in range(n_templates):
fcc1 = Shat[k, :] * quad_wts
isign = 1
eps = 1e-6
finufftpy.nufft2d1(wx * dx, wy * dy, fcc1, isign, eps, N, N, gxx,
upsampfac=upsampfac)
gxx = gxx*dx*dy/(4*np.pi**2)
templates1[k, :, :] = np.real(gxx.transpose((1, 0)))
return templates1
def rotate_pft(fcc, rgamma, pf_grid):
xnodesr = pf_grid['xnodesr']
n_psi = pf_grid['n_psi']
ngridr = xnodesr.shape[0]
ngridc = n_psi * np.ones(ngridr, dtype=np.int32)
fcc_rot = np.zeros(fcc.shape, dtype=np.complex128)
cnt = 0
for rr in range(ngridr):
tmp = fcc[:, cnt:cnt + ngridc[rr]]
ffcc = fft(tmp)
n_theta = ngridc[rr]
wth = ifftshift(np.arange(-n_theta/2, n_theta/2))
mul = np.exp(-1j * wth * rgamma[:, np.newaxis])
ffcc_rot = ffcc * mul
tmp = ifft(ffcc_rot)
fcc_rot[:, cnt:cnt + ngridc[rr]] = tmp
cnt += ngridc[rr]
return fcc_rot
def pft_phase_shift(sh, pf_grid):
all_psi = pf_grid['all_psi']
quad_xnodesr = pf_grid['all_r']
phase = (np.cos(all_psi) * sh[:, np.newaxis, 0]
+ np.sin(all_psi) * sh[:, np.newaxis, 1])
cmul = np.exp(-1j * quad_xnodesr * phase)
return cmul
def translate_pft(fcc, sh, pf_grid):
cmul = pft_phase_shift(sh, pf_grid)
return fcc * cmul
def pft_norm(Mhat, pf_grid):
quad_wts = pf_grid['quad_wts']
return np.sqrt(np.sum((np.abs(Mhat) ** 2) * quad_wts, axis=-1))
def pft_to_fb(Shat, pf_grid):
ngridr = pf_grid['ngridr']
n_psi = pf_grid['n_psi']
quad_wts = pf_grid['quad_wts']
n_templates = Shat.shape[0]
quad_wts_sq = quad_wts.reshape((ngridr, n_psi))
Shathat = Shat.reshape((n_templates, ngridr, n_psi))
# Shathat: (te, k, γ)
Shathat = np.fft.fftshift(np.fft.fft(Shathat, axis=-1), axes=-1)
Shathat = Shathat * quad_wts_sq[np.newaxis, :, :]
# Shathat: (te, k, q)
# There was a 2π factor missing before. Let's remove it.
Shathat = Shathat / (2 * np.pi)
return Shathat
def make_tensor_grid(rmax, ngridr, n_psi):
dr = rmax/ngridr
xnodesr = dr*np.arange(1, ngridr+1)
weights = dr*np.ones(ngridr)
psi = 2 * np.pi / n_psi * np.arange(n_psi)
all_psi = np.repeat(psi[np.newaxis, :], ngridr, axis=0)
all_psi = np.ravel(all_psi)
all_r = np.repeat(xnodesr[:, np.newaxis], n_psi, axis=1)
all_r = np.ravel(all_r)
wts_theta = 2 * np.pi / n_psi
quad_wts = wts_theta * xnodesr * weights
quad_wts = np.repeat(quad_wts[:, np.newaxis], n_psi, axis=-1)
quad_wts = np.ravel(quad_wts)
wx = np.zeros(n_psi * ngridr)
wy = np.zeros(n_psi * ngridr)
cnt = 0
for rr in range(ngridr):
dd = xnodesr[rr]
theta = 2 * np.pi / n_psi * np.arange(n_psi)
wx[cnt:cnt + n_psi] = dd * np.cos(theta)
wy[cnt:cnt + n_psi] = dd * np.sin(theta)
cnt = cnt + n_psi
grid = dict()
grid['rmax'] = rmax
grid['ngridr'] = ngridr
grid['n_psi'] = n_psi
grid['xnodesr'] = xnodesr
grid['all_psi'] = all_psi
grid['all_r'] = all_r
grid['quad_wts'] = quad_wts
grid['wx'] = wx
grid['wy'] = wy
return grid
def make_adaptive_grid(delta_range, dx, oversampling):
all_delta = dx / oversampling * np.arange(oversampling * delta_range + 1e-10)
n_delta = all_delta.shape[0]
n_omega = oversampling * np.int32(np.ceil(2 * np.pi / dx * all_delta))
n_trans = np.sum(n_omega)
trans = np.zeros((n_trans, 2))
cnt = 0
for kk in range(n_delta):
n_om = n_omega[kk]
all_om = 2 * np.pi * np.arange(n_om) / n_om
trans[cnt:cnt + n_om, 0] = all_delta[kk] * np.cos(all_om)
trans[cnt:cnt + n_om, 1] = all_delta[kk] * np.sin(all_om)
cnt += n_om
grid = dict()
grid['all_delta'] = all_delta
grid['n_delta'] = n_delta
grid['n_omega'] = n_omega
grid['n_trans'] = n_trans
grid['trans'] = trans
return grid
def make_cartesian_grid(delta_range, dx, oversampling):
Nkeep = 2 * oversampling * delta_range
xfine = dx * np.arange(-Nkeep // 2, Nkeep // 2)
trans = xfine
trans = np.meshgrid(trans, trans, indexing='ij')
trans = np.stack(trans[::-1], -1)
trans = trans.reshape((Nkeep ** 2, 2))
grid = {'n_trans': Nkeep ** 2, 'trans': trans}
return grid
def extract_alignments(inner_prods3, tr_grid):
n_images = inner_prods3.shape[0]
n_templates = inner_prods3.shape[1]
n_psi = inner_prods3.shape[2]
n_trans = inner_prods3.shape[3]
trans = tr_grid['trans']
inner_prods3 = inner_prods3.reshape((n_images,
n_templates*n_psi*n_trans))
est_template_ind = np.zeros(n_images, dtype=np.int32)
est_trans = np.zeros((n_images, 2))
est_gamma = np.zeros(n_images)
idx = inner_prods3.argmax(axis=-1)
for cn in range(n_images):
I3, I2, I1 = np.unravel_index(idx[cn],
(n_templates, n_psi, n_trans))
shiftx = trans[I1, 0]
shifty = trans[I1, 1]
rgamma = I2 * 2 * np.pi / n_psi
est_template_ind[cn] = I3
est_trans[cn, 0] = shiftx
est_trans[cn, 1] = shifty
est_gamma[cn] = rgamma
return est_template_ind, est_trans, est_gamma
def rotations_brute_force(fimages, Shat, n_gamma, pf_grid, Nfine):
eval_results = False
if Shat.ndim == 2:
Shat = Shat[np.newaxis, :, :]
n_images, N, _ = fimages.shape
n_templates, ngridr, ngridp = Shat.shape
quad_wts_sq = pf_grid['quad_wts'].reshape((ngridr, ngridp))
wx = pf_grid['wx']
wy = pf_grid['wy']
all_gamma = 2 * np.pi / n_gamma * np.arange(n_gamma)
tmr_start = time()
Shathat = fft(Shat) / ngridp
# Shat: (te, k, γ)
# Shathat: (te, k, q)
Shathat = Shathat.reshape((n_templates, 1, ngridr, ngridp))
# Shathat: (te, 1, k, q)
wth = ifftshift(np.arange(-ngridp / 2, ngridp / 2))
mul = np.exp(-1j * wth[np.newaxis, :] * all_gamma[:,np.newaxis])
# mul: (γ, q)
Shathat_rot = Shathat * mul[:, np.newaxis, :]
# Shathat_rot: (te, γ, k, q)
# NOTE: This can be sped up by using PyFFTW. However, for the execution to
# be efficent, the plan must be created using FFTW_MEASURE, which takes a
# long time. The solution will be to separate this our to the BFR
# “planning” stage for some fixed number of images–template pairs, then
# loop over these, computing the IFFT batchwise at execution (since the
# exact number of pairs is not known as planning time).
Shat_rot = ifft(Shathat_rot)
fx1 = quad_wts_sq * Shat_rot
T = 2
dx = dy = T / N
templates_rot = np.empty((N, N, n_gamma, n_templates),
dtype=np.complex128, order='F')
upsampfac = 1.25
isign = 1
eps = 1e-2
finufftpy.nufft2d1many(wx * dx, wy * dy, fx1, isign, eps, N, N,
templates_rot, upsampfac=upsampfac)
templates_rot = templates_rot / (4 * np.pi ** 2)
# templates_rot: (trx, try, γ, te)
templates_rot = templates_rot.transpose((3, 2, 1, 0)).copy()
# templates_rot: (te, γ, try, trx)
ftemplates_rot = fft2(ifftshift(templates_rot, axes=(-2, -1)))
# ftemplates_rot: (te, γ, trky, trkx)
precomp = time() - tmr_start
tmr_start = time()
ftemplates_rot = ftemplates_rot[:, np.newaxis, :, :, :]
# ftemplates_rot: (te, im, γ, trky, trkx)
fxx = fimages[:, np.newaxis, :, :] * np.conj(ftemplates_rot)
# ftemplates_rot: (te, im, γ, trky, trkx)
inner_prods = pyfftw.zeros_aligned((n_templates, n_images, n_gamma, Nfine, Nfine), dtype='complex128')
inner_prods[:, :, :, :N // 2, :N // 2] = fxx[:, :, :, :N // 2, :N // 2]
inner_prods[:, :, :, :N // 2, -N // 2:] = fxx[:, :, :, :N // 2, -N // 2:]
inner_prods[:, :, :, -N // 2:, :N // 2] = fxx[:, :, :, -N // 2:, :N // 2]
inner_prods[:, :, :, -N // 2:, -N // 2:] = fxx[:, :, :, -N // 2:, -N // 2:]
plan = pyfftw.FFTW(inner_prods, inner_prods, axes=(-2, -1),
direction='FFTW_BACKWARD',
flags=('FFTW_MEASURE',), threads=12)
plan()
inner_prods = np.real(inner_prods)
inner_prods *= (Nfine / N) ** 2
# inner_prods: (te, im, γ, try, trx)
comp = time() - tmr_start
return inner_prods, precomp, comp
def calc_ftk_svd(n_bessel, eps, pf_grid, tr_grid):
all_UU = [None] * (2 * n_bessel + 1)
all_SSVV = [None] * (2 * n_bessel + 1)
all_rnks = np.zeros(2 * n_bessel + 1, dtype=np.int32)
xnodesr = pf_grid['xnodesr']
all_delta = tr_grid['all_delta']
n_delta = tr_grid['n_delta']
n_omega = tr_grid['n_omega']
n_trans = tr_grid['n_trans']
for qp in range(-n_bessel, n_bessel + 1):
J_n = besselj(qp, -all_delta[:, np.newaxis] * xnodesr[np.newaxis, :])
U, S, Vh = np.linalg.svd(J_n)
ind = S > eps
rnk = sum(ind)
all_rnks[qp + n_bessel] = rnk
all_UU[qp + n_bessel] = U[:, :rnk]
all_SSVV[qp + n_bessel] = S[:rnk, np.newaxis] * Vh[:rnk, :]
SSVV_big = np.concatenate(all_SSVV, axis=0)
UUU = np.concatenate(all_UU, axis=1)
all_omega = np.concatenate([2 * np.pi / n_om * np.arange(n_om)
for n_om in n_omega if n_om > 0])
all_qp = np.concatenate([(k - n_bessel) * np.ones(n)
for k, n in enumerate(all_rnks)])
vec_omega = np.exp(1j * all_qp[np.newaxis, :]
* (all_omega[:, np.newaxis] - np.pi / 2))
BigMul_left = np.zeros((sum(all_rnks), n_trans), dtype=np.complex128)
cnt = 0
for kk in range(n_delta):
n_om = n_omega[kk]
BigMul_left[:, cnt:cnt + n_om] = (UUU[kk, :][np.newaxis, :].T
* vec_omega[cnt:cnt + n_om, :].T)
cnt += n_om
return all_rnks, BigMul_left, SSVV_big
def premult_right_fb(Shathat, SSVV_big, all_rnks):
n_psi = Shathat.shape[2]
ngridr = Shathat.shape[1]
n_templates = Shathat.shape[0]
Shathat = Shathat.transpose((2, 0, 1))
Shathat = Shathat.reshape((1, n_psi * n_templates, ngridr))
SSS = SSVV_big[:, np.newaxis, :] * Shathat.conj()
SSS = SSS.reshape((sum(all_rnks), n_psi, n_templates, ngridr))
return SSS
def bft_plan(tr_grid, pf_grid):
plan = {'tr_grid': tr_grid,
'pf_grid': pf_grid}
return plan
def bft_execute(plan, Mhat, Shat):
pf_grid = plan['pf_grid']
tr_grid = plan['tr_grid']
n_psi = pf_grid['n_psi']
Mnorm = pft_norm(Mhat, pf_grid)
Snorm = pft_norm(Shat, pf_grid)
MSnorm = Mnorm[:, np.newaxis] * Snorm[np.newaxis, :]
tmr_start = time()
Shathat = pft_to_fb(Shat, pf_grid)
precomp1 = time() - tmr_start
zprods1, ptm, tm = translations_brute_force_batch(Shathat, Mhat,
pf_grid, tr_grid, n_psi)
precomp1 += ptm
inner_prods3 = zprods1 / MSnorm[..., np.newaxis, np.newaxis]
return inner_prods3, (precomp1, tm)
def ftk_plan(tr_grid, pf_grid, n_bessel, eps):
all_rnks, BigMul_left, SSVV_big = calc_ftk_svd(n_bessel, eps, pf_grid, tr_grid)
plan = {'tr_grid': tr_grid,
'pf_grid': pf_grid,
'n_bessel': n_bessel,
'eps': eps,
'all_rnks': all_rnks,
'BigMul_left': BigMul_left,
'SSVV_big': SSVV_big}
return plan
def ftk_execute(plan, Mhat, Shat):
pf_grid = plan['pf_grid']
SSVV_big = plan['SSVV_big']
all_rnks = plan['all_rnks']
n_bessel = plan['n_bessel']
BigMul_left = plan['BigMul_left']
Mnorm = pft_norm(Mhat, pf_grid)
Snorm = pft_norm(Shat, pf_grid)
MSnorm = Mnorm[:, np.newaxis] * Snorm[np.newaxis, :]
tmr_start = time()
Shathat = pft_to_fb(Shat, pf_grid)
SSS = premult_right_fb(Shathat, SSVV_big, all_rnks)
precomp2 = time() - tmr_start
zprods4, ptm, tm = svd_decomposition_alignment(SSS, Mhat, n_bessel,
all_rnks, BigMul_left)
precomp2 += ptm
inner_prods4 = np.real(zprods4) / MSnorm[..., np.newaxis, np.newaxis]
return inner_prods4, (precomp2, tm)
def bfr_plan(Nfine, Nkeep, n_gamma, pf_grid, T, N):
plan = {'Nfine': Nfine,
'Nkeep': Nkeep,
'n_gamma': n_gamma,
'pf_grid': pf_grid,
'T': T,
'N': N}
# TODO: FFTW plans, etc.
return plan
def bfr_execute(plan, Mhat, Shat):
pf_grid = plan['pf_grid']
T = plan['T']
N = plan['N']
Nfine = plan['Nfine']
Nkeep = plan['Nkeep']
n_gamma = plan['n_gamma']
ngridr = pf_grid['ngridr']
n_psi = pf_grid['n_psi']
n_templates = Shat.shape[0]
n_images = Mhat.shape[0]
dx = dy = T / N
images = pft_to_cartesian(Mhat, T, N, pf_grid) / (dx * dy)
Mnorm = pft_norm(Mhat, pf_grid)
Snorm = pft_norm(Shat, pf_grid)
fimages = fft2(ifftshift(images, axes=(-2, -1)))
SShat = Shat.reshape((n_templates, ngridr, n_psi))
fimages = fimages / Mnorm[:, np.newaxis, np.newaxis]
SShat = SShat / Snorm[:, np.newaxis, np.newaxis]
precomp3 = 0
comp3 = 0
inner_prods = np.zeros((n_images, n_templates, n_gamma, Nkeep, Nkeep), dtype=np.complex128)
for tt in range(n_templates):
inn, precomp, comp = rotations_brute_force(fimages, SShat[tt],
n_gamma, pf_grid, Nfine)
# NOTE: The following truncates *and* inverts the FFT shift.
inner_prods[:, tt, :, -Nkeep // 2:, -Nkeep // 2:] = inn[:, :, :, :Nkeep // 2, :Nkeep // 2]
inner_prods[:, tt, :, -Nkeep // 2:, :Nkeep // 2] = inn[:, :, :, :Nkeep // 2, -Nkeep // 2:]
inner_prods[:, tt, :, :Nkeep // 2, -Nkeep // 2:] = inn[:, :, :, -Nkeep // 2:, :Nkeep // 2]
inner_prods[:, tt, :, :Nkeep // 2, :Nkeep // 2] = inn[:, :, :, -Nkeep // 2:, -Nkeep // 2:]
precomp3 += precomp
comp3 += comp
inner_prods = inner_prods.reshape((n_images, n_templates, n_gamma, Nkeep ** 2))
return inner_prods, (precomp3, comp3)
|
% This file should NOT change when run through MBeautify
% If you find anything that is difficult or failed in some MBeautify
% version, please add it here.
% unary operator testcases
+2
+2.
+.2
' string with lots of spaces '
1 + 2
f(+1) + 1
x = y
x + 1. + 2
x + 1. + +.1
x + 1 + 2
x = 1
x = -1
x = +1
x = +.1
+(-[-.1])
z = [1, 2, 3, 4]
if 1 > +2
return
end; % comment +-+-+- +++ 123 ***
if 1 > -2
end
% different meanings of 'end'
if any(z == -[-1, -2, -3, -4])
ifmyvariablenamecontainsif = z(1:end);
end
% old-style function calls
disp +end+ this is not any keyword if else endif while +1
% bracket handling
while (1)
a = [0, 1];
a(1) = 2 * [a(0)];
break
end;
% transpose
-x' + +1 + x'' + 2 * x''' * 1
a = eye(27)
a(3, 4:5) = [3, 1]
% norm notation
1.e-6
% #36
a(1, 1:2) = [3, 1]
a(3, :) = [3, 1]
b = zeros(3, 3, 3);
b(:, :, 2) = rand(2, 2);
%if AddCommasToMatrices=1
a=[@(x) minus(x,1),]
%if AddCommasToCellArrays=1
a={@(x) minus(x,1), @(x,y) minus(x,y)}
% #34
if -1 > -2
end
if +1 > -2
end
% #59
a = [1, 2, 3];
a = [1, a...
.* 2]
% #58
a = [1, a ... % 111
.* 2, ... % 222
123, 4- ...
5] % 333
a = {'', ... % hello
1, 4+ ... %hello2
2} % hello3
if true || ... % aaa
true || ... asd
false % //
end
%80
a + sprintf("%d", b)%comment
a + sprintf("'%d'", b) %comment
a + sprintf("""%d""", b) % comment
a + sprintf('"%d"', b)
a.' + sprintf('''%d''', b)
a' + sprintf('%d', b)
% Remove extra space after @
f = @ (x) a
% Remove extra space before unary operator
f = @(x) - a
num@MySuper(obj) - a
% remove spaces around @
num @ MySuper(obj) - a
% if AddCommasToMatrices=1
% add comma after b
[a, - b c + d]
% add comma before b
[a -b]
% one comma added after b
[a *b (c -d)]
% treat whitespace as delimiter regardless of AddCommasToMatrices
[1 (2) {3}]
% same for cells
{1 (2) {3}} |
# Tutorial 4 - Pressure vs Temperature
Surfinpy has the functionality to generate phase diagrams as a function of pressure vs temperature based upon the methodology used in Molinari et al. (J. Phys. Chem. C 116, 12, 7073-7082) according to
\begin{align}
\gamma_{adsorbed, T, P} & = \gamma_{bare} + ( C ( E_{ads, T} - RTln(\frac{p}{p^o})
\end{align}
where $\gamma_{adsorbed, T, p}$ is the surface energy of the surface with adsorbed species at temperature (T) and pressure (P), $\gamma_{bare}$ is the suface energy of the bare surface, C is the coverage of adsorbed species, $E_{ads}$ is the adsorption energy,
\begin{align}
E_{ads, T} & = E_{slab, adsorbant} - (E_{slab, bare} + n_{H_2O} E_{H_2O, T}) / n_{H_2O}
\end{align}
where $E_{slab, adsorbant}$ is the energy of the surface and the adsorbed species, $n_{H_2O}$ is he number of adsorbed species,
\begin{align}
E_{H_2O, (T)} & = E_{H_2O, (g)} - TS_{(T)}
\end{align}
where $S_{(T)}$ is the experimental entropy of gaseous water in the standard state.
So let's look at this in action. The module used is called p_vs_t
```python
import matplotlib.pyplot as plt
from surfinpy import utils as ut
from surfinpy import p_vs_t
from surfinpy import data
```
There are a number of user inputs that are required, the inputs are similiar to mu_vs_mu but with some small differences. First we need the energy from DFT of the adsorbant (eV), the energy of the surface ($j_m^2$) and the data for each surface. For clarity, in the `surfinpy.data.DataSet` objects for each surface it is always assumed that the adsorbing species is 'Y'.
```python
adsorbant = -14.00
```
```python
SE = 1.40
```
```python
stoich = data.DataSet(cation = 24, x = 48, y = 0, area = 60.22, energy = -575.00, label = 'Bare')
H2O = data.DataSet(cation = 24, x = 48, y = 2, area = 60.22, energy = -605.00, label = '1 Water')
H2O_2 = data.DataSet(cation = 24, x = 48, y = 8, area = 60.22, energy = -695.00, label = '2 Water')
```
```python
data = [H2O, H2O_2]
```
The coverage of the adsorbing species (in this case water) is also needed. surfinpy has a built in function within the utils module to do this for you, it takes the number of adsorbing species and the surface area and calculates the coverage for you.
```python
cov = ut.calculate_coverage(data)
```
We need the thermochemical data for the adsorbed species in order to make the DFT energy of our adsorbing species a temperature dependent term. This data has been taken directly from the NIST Janaf database and has been cut to just include the temperature and S values within the temperature range that interests us (In this case 0 - 1000 K).
```python
thermochem = ut.read_nist("H2O.txt")
```
Now we can generate our phase diagram .
```python
system = p_vs_t.calculate(stoich, data, SE, adsorbant, thermochem, coverage=cov)
ax = system.plot( colourmap='RdBu', figsize=(6, 4), ylabel='$log_{10} (P_{CO_2})$')
plt.savefig("../../../docs/source/Figures/Surfaces_6.png", dpi=600)
plt.show()
```
```python
```
|
{-# OPTIONS_GHC -fno-warn-unused-binds -fno-warn-unused-matches -fno-warn-name-shadowing -fno-warn-missing-signatures #-}
{-# LANGUAGE FlexibleInstances, MultiParamTypeClasses, UndecidableInstances, FlexibleContexts, TypeSynonymInstances #-}
---------------------------------------------------------------------------------------------------
---------------------------------------------------------------------------------------------------
-- |
-- | Module : Test approx median
-- | Creator: Xiao Ling
-- | Created: 12/14/2015
-- | see : http://www.serpentine.com/criterion/tutorial.html
-- |
---------------------------------------------------------------------------------------------------
---------------------------------------------------------------------------------------------------
module TapproxMedian where
import Control.Monad.Random.Class
import Control.Monad.Random
import Control.Monad.State
import Control.Monad.Reader
import Data.Conduit
import qualified Data.Conduit.List as Cl
import Core
import Statistics
import ApproxMedian
{-----------------------------------------------------------------------------
Benchmark counter vs counter'
------------------------------------------------------------------------------}
main :: IO ()
main = return ()
|
######################################################## model
F(z::BorderedArray,θ::AbstractVector) = F(z.u,(θ=θ,p=z.p))
function F(u::AbstractVector,parameters::NamedTuple)
@unpack θ,p = parameters
μ₁,μ₂, a₁,a₂, k = θ
f = first(u)*first(p)*first(θ)
F = similar(u,typeof(f))
F[1] = ( 10^a₁ + (p*u[2])^2 ) / ( 1 + (p*u[2])^2 ) - u[1]*10^μ₁
F[2] = ( 10^a₂ + (k*u[1])^2 ) / ( 1 + (k*u[1])^2 ) - u[2]*10^μ₂
return F
end
######################################################### targets and initial guess
X = StateSpace( 2, 3:0.001:7, [4,5] )
θ = SizedVector{5}(0.0,0.0,-1.0,-1.0,2.0) |
[STATEMENT]
lemma peek_st_def2 [simp]: "(\<lambda>s.. Pf s) Y s = Pf (store s) Y s"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. peek_st Pf Y s = Pf (snd s) Y s
[PROOF STEP]
apply (unfold peek_st_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Pf (snd s) Y s = Pf (snd s) Y s
[PROOF STEP]
apply (simp (no_asm))
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
```python
from __future__ import print_function
"""
Tutorial: A reference implementation of configuration interactions singles.
"""
__authors__ = ["Boyi Zhang", "Adam S. Abbott"]
__credits__ = ["Boyi Zhang", "Adam S. Abbott", "Justin M. Turney"]
__copyright_amp__ = "(c) 2014-2018, The Psi4NumPy Developers"
__license__ = "BSD-3-Clause"
__date__ = "2017-08-08"
```
# Configuration Interaction Singles (CIS)
## I. Theoretical Overview
In this tutorial, we will implement the configuration interaction singles method in the spin orbital notation. The groundwork for working in the spin orbital notation has been laid out in "Introduction to the Spin Orbital Formulation of Post-HF methods" [tutorial](../08_CEPA0_and_CCD/8a_Intro_to_spin_orbital_postHF.ipynb). It is highly recommended to work through that introduction before starting this tutorial.
### Configuration Interaction (CI)
The configuration interaction wavefunction is constructed as a linear combination of the reference determinants and all singly, doubly, ... n-tuple excited determinants where n is the number of electrons in a given system:
\begin{equation}
\Psi_\mathrm{CI} = (1 + \hat{C_1} + \hat{C_2} + ...\hat{C_n)}\Phi
\end{equation}
Here, $\hat{C_n}$ is the n configuration excitation operator.
In Full CI, all possible excitations are included in the wavefunction expansion. In truncated CI methods, only a subset of excitations are included.
## CIS
In CIS, only single excitations from the occupied (indices i,j,k...) to the virtual (indices a,b,c...) orbitals are included. As a result, CIS gives transition energies to an excited state.
Assuming we are using canonical Hartree-Fock spin orbitals($\{\mathrm{\psi_p}\}$) with orbital energies $\{\epsilon_p\}$, we can build a shifted CIS Hamiltonian matrix:
\begin{equation}
\tilde{\textbf{H}} = \textbf{H} - E_0 \textbf{I} = [\langle \Phi_P | \hat{H_e} - E_0|\Phi_Q \rangle],\,
\Phi_P \in {\Phi_i^a}
\end{equation}
where $E_0$ is the ground state Hartree-Fock state energy given by $\langle \Phi | \hat{H_e}|\Phi \rangle$.
The matrix elements of this shifted CIS Hamiltonian matrix can be evaluated using Slater's rules to give:
\begin{equation}
\langle \Phi_i^a | \hat{H_e} - E_0|\Phi_j^b \rangle = (\epsilon_a - \epsilon_i)\delta_{ij} \delta_{ab}
+ \langle aj || ib \rangle
\end{equation}
This then becomes a standard eigenvalue equation from which we can solve for the excitation energies and the wavefunction expansion coefficients:
\begin{equation}
\tilde{\textbf{H}} \textbf{c}_K = \Delta E_K\textbf{c}_K, \,\Delta E_K = E_K - E_0
\end{equation}
## II. Implementation
As with previous tutorials, let's begin by importing Psi4 and NumPy and setting memory and output file options.
```python
# ==> Import Psi4, NumPy, & SciPy <==
import psi4
import numpy as np
# ==> Set Basic Psi4 Options <==
# Memory specifications
psi4.set_memory(int(2e9))
numpy_memory = 2
# Output options
psi4.core.set_output_file('output.dat', False)
```
We now define the molecule and set Psi4 options:
```python
mol = psi4.geometry("""
0 1
O
H 1 1.1
H 1 1.1 2 104
symmetry c1
""")
psi4.set_options({'basis': 'sto-3g',
'scf_type': 'pk',
'reference': 'rhf',
'mp2_type': 'conv',
'e_convergence': 1e-8,
'd_convergence': 1e-8})
```
We use Psi4 to compute the RHF energy and wavefunction and store them in variables `scf_e and scf_wfn`. We also check the memory requirements for computation:
```python
# Get the SCF wavefunction & energies
scf_e, scf_wfn = psi4.energy('scf', return_wfn=True)
# Check memory requirements
nmo = scf_wfn.nmo()
I_size = (nmo**4) * 8e-9
print('\nSize of the ERI tensor will be %4.2f GB.\n' % I_size)
memory_footprint = I_size * 1.5
if I_size > numpy_memory:
psi4.core.clean()
raise Exception("Estimated memory utilization (%4.2f GB) exceeds allotted \
memory limit of %4.2f GB." % (memory_footprint, numpy_memory))
```
Size of the ERI tensor will be 0.00 GB.
We first obtain orbital information from our wavefunction. We also create an instance of MintsHelper to help build our molecular integrals:
```python
# Create instance of MintsHelper class
mints = psi4.core.MintsHelper(scf_wfn.basisset())
# Get basis and orbital information
nbf = mints.nbf() # Number of basis functions
nalpha = scf_wfn.nalpha() # Number of alpha electrons
nbeta = scf_wfn.nbeta() # Number of beta electrons
nocc = nalpha + nbeta # Total number of electrons
nso = 2 * nbf # Total number of spin orbitals
nvirt = nso - nocc # Number of virtual orbitals
```
We now build our 2-electron integral, a 4D tensor, in the spin orbital formulation. We also convert it into physicist's notation and antisymmetrize for easier manipulation of the tensor later on.
```python
def spin_block_tei(I):
'''
Spin blocks 2-electron integrals
Using np.kron, we project I and I tranpose into the space of the 2x2 ide
The result is our 2-electron integral tensor in spin orbital notation
'''
identity = np.eye(2)
I = np.kron(identity, I)
return np.kron(identity, I.T)
I = np.asarray(mints.ao_eri())
I_spinblock = spin_block_tei(I)
# Convert chemist's notation to physicist's notation, and antisymmetrize
# (pq | rs) ---> <pr | qs>
# <pr||qs> = <pr | qs> - <pr | sq>
gao = I_spinblock.transpose(0, 2, 1, 3) - I_spinblock.transpose(0, 2, 3, 1)
```
We get the orbital energies from alpha and beta electrons and append them together. We spin-block the coefficients obtained from the reference wavefunction and convert them into NumPy arrays. There is a set corresponding to coefficients from alpha electrons and a set of coefficients from beta electrons. We then sort them according to the order of the orbital energies using argsort():
```python
# Get orbital energies, cast into NumPy array, and extend eigenvalues
eps_a = np.asarray(scf_wfn.epsilon_a())
eps_b = np.asarray(scf_wfn.epsilon_b())
eps = np.append(eps_a, eps_b)
# Get coefficients, block, and sort
Ca = np.asarray(scf_wfn.Ca())
Cb = np.asarray(scf_wfn.Cb())
C = np.block([
[ Ca, np.zeros_like(Cb)],
[np.zeros_like(Ca), Cb ]])
# Sort the columns of C according to the order of orbital energies
C = C[:, eps.argsort()]
# Sort orbital energies
eps = np.sort(eps)
```
We now transform the 2-electron integral from the AO basis into the MO basis using the coefficients:
```python
# Transform gao, which is the spin-blocked 4d array of physicist's notation,
# antisymmetric two-electron integrals, into the MO basis using MO coefficients
gmo = np.einsum('pQRS, pP -> PQRS',
np.einsum('pqRS, qQ -> pQRS',
np.einsum('pqrS, rR -> pqRS',
np.einsum('pqrs, sS -> pqrS', gao, C), C), C), C)
```
Now that we have our integrals, coefficents, and orbital energies set up in with spin orbitals, we can start our CIS procedure. We first start by initializing the shifted Hamiltonion matrix $\tilde{\textbf{H}}$ (`HCIS`). Let's think about the size of $\tilde{\textbf{H}}$. We need all possible single excitations from the occupied to virtual orbitals. This is given by the number of occupied orbitals times the number of virtual orbitals (`nocc * nvirt`).
The size of our matrix is thus `nocc * nvirt` by `nocc * nvirt`.
```python
# Initialize CIS matrix.
# The dimensions are the number of possible single excitations
HCIS = np.zeros((nocc * nvirt, nocc * nvirt))
```
Next, we want to build all possible excitations from occupied to virtual orbitals. We create two for-loops that will loop over the number of occupied orbitals and number of virtual orbitals, respectively, and store the combination of occupied and virtual indices as a tuple `(i, a)`. We put all tuples in a list called `excitations`.
```python
# Build the possible excitations, collect indices into a list
excitations = []
for i in range(nocc):
for a in range(nocc, nso):
excitations.append((i, a))
```
Now we can evaluate the matrix elements of the shifted CIS Hamiltonian matrix using the equation given above. For each element, there are several layers of indexing that we have to consider.
First, there are the indices of the element itself, which gives the position of the element in the matrix. Indices `p` and `q` are used:
`HCIS[p, q]`
Second, there are two sets of excitations from occupied to virtual orbitals corresponding to the bra and ket of each matrix element. For these, we will take advantage of the `excitations` list that we build with the list of all possible excitations. We will use indices i and a to denote the excitation in the bra (`left_excitation`) and j and b to denote the excitation in the ket (`right_excitation`).
To manage these indices, we will use the `enumerate` function.
Note that a Kronecker delta $\delta_{pq}$ can be represented as p == q.
```python
# Form matrix elements of shifted CIS Hamiltonian
for p, left_excitation in enumerate(excitations):
i, a = left_excitation
for q, right_excitation in enumerate(excitations):
j, b = right_excitation
HCIS[p, q] = (eps[a] - eps[i]) * (i == j) * (a == b) + gmo[a, j, i, b]
```
We now use the NumPy function `linalg.eigh` (for hermitian matrices) to diagonalize the shifted CIS Hamiltonian. This will give us the excitation energies (`ECIS`). These eigenvalues correspond to the CIS total energies for various excited states. The columns of matrix `CCIS` give us the coefficients which describe the relative contribution of each singly excited determinant to the excitation energy.
```python
# Diagonalize the shifted CIS Hamiltonian
ECIS, CCIS = np.linalg.eigh(HCIS)
```
For a given excitation energy, each coefficent in the linear combination of excitations represents the amount that a particular excitation contributes to the overall excitation energy. The percentage contribution of each coefficient can be calculated by squaring the coefficent and multiplying by 100.
```python
# Percentage contributions of coefficients for each state vector
percent_contrib = np.round(CCIS**2 * 100)
```
In addition to excitation energies, we want to print the excitations that contribute 10% or more to the overall energy, as well as their percent contribution.
Note that `end = ''` parameter in the print function allows us to print different sections to the same line without a line break.
```python
# Print detailed information on significant excitations
print('CIS:')
for state in range(len(ECIS)):
# Print state, energy
print('State %3d Energy (Eh) %10.7f' % (state + 1, ECIS[state]), end=' ')
for idx, excitation in enumerate(excitations):
if percent_contrib[idx, state] > 10:
i, a = excitation
# Print percentage contribution and the excitation
print('%4d%% %2d -> %2d' % (percent_contrib[idx, state], i, a), end=' ')
print()
```
CIS:
State 1 Energy (Eh) 0.2872554 99% 9 -> 10
State 2 Energy (Eh) 0.2872554 99% 8 -> 11
State 3 Energy (Eh) 0.2872554 50% 8 -> 10 50% 9 -> 11
State 4 Energy (Eh) 0.3444249 44% 6 -> 10 44% 7 -> 11
State 5 Energy (Eh) 0.3444249 88% 6 -> 11
State 6 Energy (Eh) 0.3444249 88% 7 -> 10
State 7 Energy (Eh) 0.3564617 50% 8 -> 10 50% 9 -> 11
State 8 Energy (Eh) 0.3659889 95% 8 -> 13
State 9 Energy (Eh) 0.3659889 95% 9 -> 12
State 10 Energy (Eh) 0.3659889 50% 8 -> 12 50% 9 -> 13
State 11 Energy (Eh) 0.3945137 35% 4 -> 11 51% 6 -> 13
State 12 Energy (Eh) 0.3945137 20% 4 -> 10 20% 5 -> 11 30% 6 -> 12 30% 7 -> 13
State 13 Energy (Eh) 0.3945137 35% 5 -> 10 51% 7 -> 12
State 14 Energy (Eh) 0.4160717 50% 8 -> 12 50% 9 -> 13
State 15 Energy (Eh) 0.5056282 44% 6 -> 10 44% 7 -> 11
State 16 Energy (Eh) 0.5142899 51% 4 -> 11 36% 6 -> 13
State 17 Energy (Eh) 0.5142899 29% 4 -> 10 29% 5 -> 11 20% 6 -> 12 20% 7 -> 13
State 18 Energy (Eh) 0.5142899 51% 5 -> 10 36% 7 -> 12
State 19 Energy (Eh) 0.5551918 15% 4 -> 10 15% 5 -> 11 35% 6 -> 12 35% 7 -> 13
State 20 Energy (Eh) 0.5630557 49% 4 -> 13 39% 5 -> 12
State 21 Energy (Eh) 0.5630557 39% 4 -> 13 49% 5 -> 12
State 22 Energy (Eh) 0.5630557 44% 4 -> 12 44% 5 -> 13
State 23 Energy (Eh) 0.6553184 35% 4 -> 10 35% 5 -> 11 15% 6 -> 12 15% 7 -> 13
State 24 Energy (Eh) 0.9101216 43% 4 -> 12 43% 5 -> 13
State 25 Energy (Eh) 1.1087709 49% 2 -> 10 49% 3 -> 11
State 26 Energy (Eh) 1.1087709 91% 2 -> 11
State 27 Energy (Eh) 1.1087709 91% 3 -> 10
State 28 Energy (Eh) 1.2000960 49% 2 -> 12 49% 3 -> 13
State 29 Energy (Eh) 1.2000960 83% 2 -> 13 15% 3 -> 12
State 30 Energy (Eh) 1.2000960 15% 2 -> 13 83% 3 -> 12
State 31 Energy (Eh) 1.3007851 48% 2 -> 10 48% 3 -> 11
State 32 Energy (Eh) 1.3257620 50% 2 -> 12 50% 3 -> 13
State 33 Energy (Eh) 19.9585264 100% 1 -> 10
State 34 Energy (Eh) 19.9585264 50% 0 -> 10 50% 1 -> 11
State 35 Energy (Eh) 19.9585264 100% 0 -> 11
State 36 Energy (Eh) 20.0109794 50% 0 -> 10 50% 1 -> 11
State 37 Energy (Eh) 20.0113420 100% 0 -> 13
State 38 Energy (Eh) 20.0113420 100% 1 -> 12
State 39 Energy (Eh) 20.0113420 50% 0 -> 12 50% 1 -> 13
State 40 Energy (Eh) 20.0505319 50% 0 -> 12 50% 1 -> 13
## References
1. Background paper:
>"Toward a systematic molecular orbital theory for excited states"
[[Foresman:1992:96](http://pubs.acs.org/doi/abs/10.1021/j100180a030)] J. B. Foresman, M. Head-Gordon, J. A. Pople, M. J. Frisch, *J. Phys. Chem.* **96**, 135 (1992).
2. Algorithms from:
> [[CCQC:CIS](https://github.com/CCQC/summer-program/tree/master/7)] CCQC Summer Program, "CIS" accessed with https://github.com/CCQC/summer-program/tree/master/7.
```python
```
|
Join us for a unique day camp experience combining summer fun with nature-based science education activities. We offer a variety of camps for ages 7-15. Campers will take on the role of naturalist, scientist, and adventurer as they explore the forest and the creatures that inhabit it. Traditional summer camp fun will round out the experience, creating an adventure to remember for a lifetime!
What makes Jefferson Memorial Forest Camps special?
• A great counselor-to-camper ratio! We have two counselors for every 15 campers. Our Forest Explorers programs work on a 1:8 ratio.
• A highly trained staff.
• Hands-on nature activities and adventures.
• Campers learn new skills and build confidence in the natural world.
• Because this camp is about exploring the natural world, campers should be able to walk on trails, walk up and down hills, and traverse flat and uneven ground.
• Campers should be prepared to be outdoors all day (learn more about preparing for camp here).
• Because campers work cooperatively together in small groups, it is necessary that your child has the ability to maintain appropriate self-control in social situations.
• Parents with questions regarding the ability of Jefferson Memorial Forest staff to meet their child’s needs should contact the Camp Director at (502) 366-5432 well in advance of the start of camp. We are happy to make reasonable accommodations to ensure all campers have the best experience.
Shuttle service will be offered from George Rogers Clark Park at Thruston Ave. and Poplar Level Rd. Parents may purchase a pass for $40 (per child, per session) that will provide transportation for the week. The service is only for children ages 7 to 15. Make reservations early.
Is your child an adventurer, ready to explore what’s around the next bend in the trail? If so, then Adventure Camp is just what your child needs! Campers explore the trails deep in the forest, climb our alpine tower, shoot bows and arrows, use map and compass to discover buried treasure, and learn outdoor survival skills.
Shelter-Water-Fire-Food: These are the four basic human needs for survival. In training for the ultimate survival challenge on Friday, campers will develop their ability to build a shelter, start a fire, and find food and water in the wilderness. Teams will be challenged to complete their survival skills and obtain their shelter, water, fire, and food emblems to make them an ultimate survivor. We have added a second session this year due to popularity!
Discover the exciting world within a pond, lake, stream and river. Campers explore a lake by canoe, play in a flowing creek, and hike through a forest to a winding stream. And of course, no water-themed camp is complete without water games! Personal floatation devices (PFDs) are provided to each camper and trained staff accompany the campers for our canoe excursion and creek play day. Parents are welcome to join their child for the canoeing excursion.
Prepare yourself for adventure on a whole new level! This is THE camp that sets itself apart from other camps! From camping out under the stars on an overnight trip to canoeing at Mitchell Hill Lake, you’re sure to get your dose of adventure at ECO X Camp, all while learning backcountry cooking, canoeing techniques, Leave No Trace skills, and more.
Enter a magical, hidden world in the Forest, where creatures of the imagination live. Campers use their creativity and natural materials to craft an enchanted land where anything is possible, while learning about the plants and animals of the forest. Looking for Lilith Theatre Company will create various drama activities with campers to develop their characters for the play on Friday. Families are invited to come out for the play and enjoy a Woodland Fairy Tea Party.
Travel back in time more than 200 years to discover nature and people through Historic Locust Grove. Discover life in the 18th and 19th centuries for the first European settlers and American Indians. This hands-on camp will take kids on a time-traveling adventure! PLEASE NOTE: Drop off and pick up will take place at Locust Grove. There will be no shuttle service for this session.
Price includes all camp supplies and field trip admissions. Registration forms can be mailed with check or credit card information to Jefferson Memorial Forest, P.O. Box 467, Fairdale, KY 40118. Reservations cannot be made over the phone. Make checks payable to Jefferson Memorial Forest. Include the session dates your child is attending on the check. Refunds, less a $25.00 processing fee, are given only if a cancellation is made in writing at least 2 weeks prior to the first day of camp. If a camp is filled or cancelled, a full refund will be made to you. All camps are subject to maximum and minimum enrollment requirements.
If you would like your child to be grouped with a sibling or friend, please indicate this on the registration form. This cannot be guaranteed if the children are in different age groups or if space is not available. Campers are grouped according to age. This allows for an optimum learning experience.
Prices include all supplies, back pack and admission for special field trips. To guarantee your child’s spot, you may pay with a check through the mail or pay in person by cash, check or credit card at the Forest Welcome Center. Make checks payable to Jefferson Memorial Forest. Include the session dates your child is attending on the check.
A cancellation must be received in writing no less than 2 weeks prior to the first day of camp. A refund (less a $25 processing fee) will be made. If the camp is cancelled by Louisville Parks, a full refund will be made. Refunds cannot be made due to weather. Forest Staff will make additions, omissions, and or adjustments to the schedule in the interest of group safety, well-being, and minimum staffing requirements.
It is the intent of Louisville Parks and Recreation to make all facilities accessible to individuals with disabilities; if an accommodation is necessary for your participation, please advise us of the needed service in advance. Call (502) 456-8148 (voice) or (502) 456-8183 (TDD). |
lemma (in semiring_of_sets) finite_INT[intro]: assumes "finite I" "I \<noteq> {}" "\<And>i. i \<in> I \<Longrightarrow> A i \<in> M" shows "(\<Inter>i\<in>I. A i) \<in> M" |
# -*- coding: utf-8 -*-
# Copyright (c) 2016, French National Center for Scientific Research (CNRS)
# Distributed under the (new) BSD License. See LICENSE for more info.
import time
import numpy as np
import cProfile
import sys
from test_stream import protocols, compressions
from pyacq.core.stream import OutputStream, InputStream
def benchmark_stream(protocol, transfermode, compression, chunksize, nb_channels=16, nloop=10, profile=False):
ring_size = chunksize*20
stream_spec = dict(protocol=protocol, interface='127.0.0.1', port='*',
transfermode=transfermode, streamtype = 'analogsignal',
dtype='float32', shape=(-1, nb_channels), compression=compression,
scale=None, offset=None, units='',
# for sharedarray
sharedarray_shape = ( ring_size, nb_channels), timeaxis = 0,
ring_buffer_method = 'double',
)
outstream = OutputStream()
outstream.configure(**stream_spec)
time.sleep(.5)
instream = InputStream()
instream.connect(outstream)
arr = np.random.rand(chunksize, nb_channels).astype(stream_spec['dtype'])
perf = []
prof = cProfile.Profile()
for i in range(nloop):
start = time.perf_counter()
if profile:
prof.enable()
outstream.send(arr)
index2, arr2 = instream.recv()
if profile:
prof.disable()
perf.append(time.perf_counter() - start)
if profile:
prof.print_stats('cumulative')
outstream.close()
instream.close()
dt = np.min(perf)
print(chunksize, nloop, transfermode, protocol.ljust(6), compression.ljust(13), 'time = %0.02f ms' % (dt*1000), 'speed = ', chunksize*nb_channels*4*1e-6/dt, 'MB/s')
return dt
if len(sys.argv) > 1 and sys.argv[1] == 'profile':
benchmark_stream(protocol='inproc', transfermode='plaindata',
compression='', chunksize=100000, nb_channels=16,
profile=True, nloop=100)
else:
nb_channels = 16
for chunksize in [2**10, 2**14, 2**16]:
print('#'*5)
for compression in compressions:
for protocol in protocols:
benchmark_stream(protocol=protocol, transfermode='plaindata',
compression=compression,
chunksize=chunksize, nb_channels=nb_channels)
benchmark_stream(protocol='tcp', transfermode='sharedarray', compression='',
chunksize=chunksize, nb_channels=nb_channels)
|
# Module 2: Inversion
In the previous module we started with a continuous distribution of a physical property and discretized it into many cells, then we performed a forward simulation that created data from known model parameters. Inversion, of course, is exactly the opposite process. Imagine each model parameter that we had represents a layer in a 1D layered earth. At the surface of the earth we measure the data, and when we invert we do so for the model parameters. Our goal is to take the observed data and recover models that emulate the real Earth as closely as possible.
You may have noticed that the act of discretizing our problem created more cells than data values. In our latter example we produced 20 data points from 1000 model parameters, which is only a few data points and many model parameters. While this was not much of a problem in the forward simulation, when we want to do the inverse process, that is, obtain the model parameters from the data, it is clear that we have many more unknowns than knowns. In short, we have an underdetermined problem, and therefore infinite possible solutions. In mathematical terms, geophysical surveys represent what are called "ill-posed" problems.
An "ill-posed" problem is any problem that does not satisfy the requirements for the definition of "well-posed" problem. A *well-posed* problem is a problem in mathematics that must satisfy all three of the following criteria:
<ol>
<li> A solution exists.
<li> The solution is unique.
<li> The solution's behaviors change continuously with continuously changing initial conditions.
</ol>
Any mathematical formulation that does not satisfy all three of the above is, by definition, an ill-posed problem. Since we are dealing with an underdetermined system, I hope that it is clear that we are dealing with an ill-posed problem (i.e., we have no unique solution), and we are going to have to come up with a method (or methods) that can help us choose from the available solutions. We need to devise an algorithm that can choose the "best" model from the infinitely many that are available to us.
In short, we are going to have to find an optimum model. More specifically, in the context of most geophysics problems, we are going to use gradient-based optimization. This process involves building an *objective function*, which is a function that casts our inverse problem as an optimization problem. We will build an objective function consisting of two parts:
$$
\phi = \phi_d + \beta \phi_m
$$
Where the terms on the right hand side are (1) a data misfit (denoted as $\phi_d$) and (2) a model regularization (denoted as $\phi_m$). These two parts will be elaborated in detail below.
Once we have formulated the objective function, we will take derivatives and obtain a recovered model. This module will flesh out the details of the model objective function, and then take first and second derivatives to derive an expression that gives us a solution for our model parameters.
## The Data Misfit, $\phi_d$
A *misfit* describes how close synthetic data matches measurements that are made in the field. Traditionally this term refers to the difference between the measured data and the predicted data. If these two quantities are sufficiently close, then we consider the model to be a viable candidate for the solution to our problem. Because the data are inaccurate, a model that reproduces those data exactly is not feasible. A realistic goal, rather, is to find a model whose predicted data are consistent with the errors in the observations, and this requires incorporating knowledge about the noise and uncertainties. The concept of fitting the data means that some estimate of the “noise” be available. Unfortunately “noise” within the context of inversion is everything that cannot be accounted for by a compatible relationship between the model and the data. More specifically, noise refers to (1) noise from data aquisition in the field, (2) uncertainty in source and receiver locations, (3) numerical error, (4) physical assumptions about our model that do not capture all of the physics.
A standard approach is to assume that each datum, $d_i$, contains errors that can be described as Gaussian with a standard deviation $\epsilon_i$. It is important to give a significant amount of thought towards assigning standard deviations in the data, but a reasonable starting point is to assign each $\epsilon_i$ as $\epsilon_i= floor +\%|d_i|$.
Incorporating both the differences between predicted and measured data and a measure of the uncertainties in the data yields our misfit function, $\phi_d$:
$$
\phi_d (m) = \frac{1}{2} \sum_{i=1}^N \left( \frac{F[m] -d_i^{obs} }{\epsilon_i}\right)^2 = \frac{1}{2} \|W_d(F[m] - d^{obs}) \|_2^2
$$
Note that the right hand size of the equation is written as a matrix-vector product, with each $\epsilon_i$ in the denominator placed as elements on a diagonal matrix $W_d$, as follows:
\begin{equation}
\begin{split}
W_d =
\begin{bmatrix}
\frac{1}{\epsilon_1} & 0 & 0 & \cdots & 0\\
0 & \frac{1}{\epsilon_2} & 0 & \cdots & 0\\
0 & 0 & \frac{1}{\epsilon_3} & \cdots & \vdots\\
0 & 0 & 0 & \ddots & \frac{1}{\epsilon_M}\\
\end{bmatrix}
\end{split}
\end{equation}
If we return to linear problem from the previous section where our forward operator was simply a matrix of kernel functions, we can substitute $F[m]$ with $G$ and obtain
$$
\phi_d (m) = \frac{1}{2} \sum_{i=1}^N \left( \frac{(Gm)_i -d_i^{obs} }{\epsilon_i}\right)^2 = \frac{1}{2} \|W_d(Gm - d^{obs}) \|_2^2
$$
Now that we have defined a measure of misfit, the next task is to determine a tolerance value, such that if the misfit is about equal to that value, then we have an acceptable fit. Suppose that the standard deviations are known and that errors are Gaussian, then $\phi_d$ becomes a $\chi_N^2$ variable with $N$ degrees of freedom. This is a well-known quantity with an expected value $E[\chi_N^2]=N$ and a standard deviation of $\sqrt{2N}$. Basically, what this means is that computing $\phi_d$ should give us a value that is close to the number of data, $N$.
## The Model Regularization, $\phi_m$
There are many options for choosing a model regularization, but the goal in determining a model regularization is the same: given that we have no unique solution, we must make assumptions in order to recast the problem in such a way that a solution exists. A general function used in 1D is as follows:
$$
\phi_m = \alpha_s \int (m)^2 dx + \alpha_x \int \left( \frac{dm}{dx} \right)^2 dx
$$
Each term in the above expression is a norm that measures characteristics about our model. The first term is a representation of the square of the Euclidean length for a continuous function, and therefore measures the length of the model, while the second term uses derivative information to measure the model's smoothness. Usually the model regularization is defined with respect to a reference model. In the above, the reference model would simply be zero, but choosing a non-zero reference model $m_{ref}$, yields the following:
$$
\phi_m = \alpha_s \int (m-m_{ref})^2 dx + \alpha_x \int \left( \frac{d}{dx} (m-m_{ref}) \right)^2 dx
$$
As before, we will discretize this expression. It is easiest to break up each term and treat them separately, at first.
We will denote each term of $\phi_m$ as $\phi_s$ and $\phi_x$, respectively. Consider the first term. Translating the integral to a sum yields:
$$
\phi_s = \alpha_s \int (m)^2 dx \rightarrow \sum_{i=1}^N \int_{x_{i-1}}^{x_i} (m_i)^2 dx = \sum_{i=1}^N m_i^2 (x_i - x_{i-1})
$$
Each spatial "cell" is $x_i - x_{i-1}$, which is the distance between nodes, as you may recall from the previous module. To simplify notation, we will use $\Delta x_{n_i}$ to denote the *ith* distance between nodes:
<br>
We can then write $\phi_s$ as:
$$
\phi_s = \alpha_s \sum_{i=1}^N m_i^2 \Delta x_{n_i} = \alpha_s m^T W_s^T W_s m = \alpha_s \|W_s m\|_2^2
$$
with:
\begin{equation}
\begin{split}
W_d =
\begin{bmatrix}
\frac{1}{\sqrt{\Delta x_{n_1}}} & 0 & 0 & \cdots & 0\\
0 & \frac{1}{\sqrt{\Delta x_{n_2}}} & 0 & \cdots & 0\\
0 & 0 & \frac{1}{\sqrt{\Delta x_{n_3}}} & \cdots & \vdots\\
0 & 0 & 0 & \ddots & \frac{1}{\sqrt{\Delta x_{n_N}}}\\
\end{bmatrix}
\end{split}
\end{equation}
For the second term, we will do a similar process. First, we will delineate $\Delta x_{c_i}$ as the distance between cell centers:
<br>
A discrete approximation to the integral can be made by evaluating the derivative of the model based on how much it changes between the cell-centers, that is, we will take the average gradient between the *ith* and *i+1th* cells:
$$
\phi_x = \alpha_x \int \left( \frac{dm}{dx} \right)^2 dx \rightarrow \sum_{i=1}^{N-1} \left( \frac{m_{i+1}-m_i}{h_k}\right) \Delta x_{c_i} = m^T W_x^T W_x m = \|W_x m\|_2^2
$$
The matrix $W_x$ is a finite difference matrix constructed thus:
\begin{equation}
\begin{split}
W_d =
\begin{bmatrix}
-\frac{1}{\sqrt{\Delta x_{c_1}}} & \frac{1}{\sqrt{\Delta x_{c_1}}} & 0 & \cdots & 0\\
0 & -\frac{1}{\sqrt{\Delta x_{c_2}}} & \frac{1}{\sqrt{\Delta x_{c_2}}} & \cdots & 0\\
0 & 0 & \ddots & \ddots & \vdots\\
0 & 0 & 0 & -\frac{1}{\sqrt{\Delta x_{c_{M-1}}}} & \frac{1}{\sqrt{\Delta x_{c_{M-1}}}}\\
0 & 0 & 0 & 0 & 0
\end{bmatrix}
\end{split}
\end{equation}
If $W_x$ is an $M \times M$ matrix, then the last row is zero. The reason the last row is zero is because there are $M-1$ segments on which linear gradients have been defined. Effectively the two $1/2$ cells on each end are neglected.
So to summarize, we have $\phi_m = \phi_s + \phi_x$ with
\begin{equation}
\begin{split}
\phi_m & = \phi_s + \phi_x \\[0.4em]
& = \alpha_s \|W_s m\|_2^2 + \alpha_x \|W_x m\|_2^2 \\[0.4em]
\end{split}
\end{equation}
Next, we will write this more compactly by stacking $W_s$ and $W_x$ into a matrix $W_m$ as follows
\begin{equation}
\begin{split}
W_m =
\begin{bmatrix}
\alpha_s W_s\\
\alpha_x W_x
\end{bmatrix}
\end{split}
\end{equation}
Now we can write $\phi_m$ as the 2-norm of one matrix-vector operation:
$$
\phi_m = \| W_m m \|_2^2
$$
As before, if we want to describe with respect to a reference model $m_{ref}$ we could write:
$$
\phi_m = \| W_m (m-m_{ref})\|_2^2
$$
## Model Objective Function
If we go back and recall what was discussed in the introduction, the model objective function casts the inverse problem as an optimization problem, and as mentioned, we will be using gradient-based optimization, so we will need to take derivatives. The complete model objective function that we are dealing with will contain both the data misfit and the model regularization. This means that we can write it as $\phi$ as the sum of the two and then differentiate:
$$
\phi = \phi_d + \beta \phi_m
$$
For the linear problem we are considering
$$
\phi_d = \frac{1}{2}\| W_d (Gm-d^{obs})\|_2^2 = \frac{1}{2}(Gm-d^{obs})^T W_d^T W_d (Gm-d^{obs})
$$
and
$$
\phi_m = \frac{1}{2} \|W_m (m-m_{ref}) \|^2_2 = \frac{1}{2}(m-m_{ref})^T W_m^T W_m (m-m_{ref})
$$
To simplify the terms and see the math a little more clearly, let's note that $W_d(Gm-d^{obs})$, and $\beta W_m(m-m_{ref})$ are simply vectors. And since we are taking the square of the 2-norm, all that we are really doing is taking the dot product of each vector with itself. So let $z=W_d(Gm-d^{obs})$, and let $y=W_m(m-m_{ref})$ where both $z$ and $y$ vectors are functions of $m$. So then:
$$
\phi_d = \frac{1}{2}\|z\|_2^2 = \frac{1}{2}z^T z
$$<br>
$$
\phi_m = \frac{1}{2}\|y\|_2^2 =\frac{1}{2}y^T y
$$
To minimize this, we want to look at $\nabla \phi$. Using our compact expressions:
$$
\phi = \phi_d + \beta \phi_m = \frac{1}{2}z^Tz + \beta \frac{1}{2}y^Ty \\
$$
Taking the derivative with respect to $m$ yields:
\begin{equation}
\begin{split}
\frac{d \phi}{dm}& = \frac{1}{2} \left(z^T \frac{dz}{dm} + z^T \frac{dz}{dm} + \beta y^T \frac{dy}{dm} + \beta y^T \frac{dy}{dm}\right)\\\\[0.6em]
& = z^T \frac{dz}{dm} + \beta y^T \frac{dy}{dm}
\end{split}
\end{equation}
Note that
$$\frac{dz}{dm} = \frac{d}{dm}(W_d(Gm-d^{obs})) = W_d G $$
and
$$ \frac{dy}{dm} = \frac{d}{dm}(W_m (m-m_{ref})) = W_m $$
Next, let's substitute both derivatives, our expressions for $z$ and $y$, apply the transposes, and rearrange:<br>
\begin{equation}
\begin{split}
\frac{d \phi}{dm} & = z^T \frac{dz}{dm} + \beta y^T \frac{dy}{dm} \\[0.6em]
& = (W_d(Gm-d^{obs}))^T W_d G + \beta (W_m (m-m_{ref}))^T W_m\\[0.6em]
& = (Gm-d^{obs})^T W_d^T W_d G + \beta (m-m_{ref})^T W_m^T W_m \\[0.6em]
& = ((Gm)^T - d^T) W_d^T W_d G + \beta (m^T-m_{ref}^T)W_m^T W_m \\[0.6em]
& = (m^T G^T - d^T) W_d^T W_d G + \beta m^T W_m^T W_m - \beta m_{ref}^T W_m^T W_m \\[0.6em]
& = m^T G^T W_d^T W_d G - d^T W_d^T W_d G + \beta m^T W_m^T W_m - \beta m_{ref}^T W_m^T W_m\\[0.6em]
& = m^T G^T W_d^T W_d G + \beta m^T W_m^T W_m - d^T W_d^T W_d G - \beta m_{ref}^T W_m^T W_m
\end{split}
\end{equation}
Now we have an expression for the derivative of our equation that we can work with. Setting the gradient to zero and gathering like terms gives:<br>
\begin{equation}
\begin{split}
m^T G^T W_d^T W_d G + \beta m^T W_m^T W_m = d^T W_d^T W_d G + \beta m_{ref}^T W_m^T W_m\\[0.6em]
(G^T W_d^T W_d G + \beta W_m^T W_m)m = G^T W_d^T W_d d + \beta W_m^T W_m m_{ref}\\[0.6em]
\end{split}
\end{equation}
From here we can do two things. First, we can solve for $m$, our recovered model:
\begin{equation}
\begin{split}
m = (G^T W_d^T W_d G + \beta W_m^T W_m)^{-1} (G^T W_d^T W_d d + \beta W_m^T W_m m_{ref})\\[0.6em]
\end{split}
\end{equation}
Second, we can get the second derivative simply from the bracketed terms on the left hand side of the equation above:
\begin{equation}
\frac{d^2 \phi}{dm^2} = G^T W_d^T W_d G + \beta W_m^T W_m
\end{equation}
In the model problem that we are solving, second derivative information is not required to obtain a solution, however, in non-linear problems or situations when higher order information is required, it is useful to have this available when we need it.
####
## Solving for $m$ in Python
Before we solve for $m$, we will recreate what we had in the first module. First, install the appropriate packages:
```
# Import Packages
from SimPEG import *
from IPython.html.widgets import interactive
import numpy as np
%pylab inline
```
Efficiency Warning: Interpolation will be slow, use setup.py!
python setup.py build_ext --inplace
Populating the interactive namespace from numpy and matplotlib
WARNING: pylab import has clobbered these variables: ['interactive']
`%matplotlib` prevents importing * from pylab and numpy
Here is the model that we had previously:
```
# Begin by creating a ficticious set of model data
M=1000 # Set number of model parameters
x=np.linspace(0,1,M+1) # Define 1D domain on nodes
xc = 0.5*(x[1:] + x[0:-1]) # Define 1D domain on cell centers
m = np.zeros(M) # preallocate m array
# Define Gaussian function:
gauss = lambda x, a, mean, SD: a*np.exp(-(x-mean)**2./(2.*SD**2.)) # create a Gaussian function
# Choose parameters for Gaussian, evaluate, and store in an array, f.
SD=6
mean=0
a=1/(SD*sqrt(2*pi))
x2=np.linspace(-20,20,0.2*M)
f = gauss(x2,a, mean,SD)
# Define a boxcar function:
box = 0.4*np.ones(0.2*M)
# Insert the Gaussian and Boxcar into m:
m[0.2*M:0.4*M]=box
m[0.6*M:0.8*M]=10*f
# Plot
pylab.plot(xc,m)
pylab.xlabel('x')
pylab.ylabel('m(x)')
pylab.title('Model, $m(x)$')
```
Again, we define out kernel functions and averaging and volume matrices as before:
```
# Make the set of kernel functions
g = lambda x, i, p, q: np.exp(-p*i*x)*np.cos(2*np.pi*q*i*x) # create an anonymous function as immediately above
p = 0.01 # Set values for p, q
q = 0.15
N = 20 # specify number of output data
Gn = np.zeros((M+1,N))
for i in range(N):
f = g(x,i,p,q)
Gn[:,i] = f
# Plot
pylab.plot(x,Gn)
pylab.xlabel('x')
pylab.ylabel('g(x)')
pylab.title('Kernel functions, $g(x)$')
# Make Averaging Matrix
n=M+1 # Define n as the N+1 dimension of the matrix
w=n-1 # Define w and the n-1 dimentsion of the matix
s = (M,n) # Store matrix values
Av = np.zeros(s) # Create a matrix of zeros of the correct dimensions
# and fill in with elements usin the loop below (note the 1/2 is included in here).
for i in range(M):
j=i
k=i+1
Av[i,j] = 0.5
Av[i,k] = 0.5
# make the Volume, "delta x" array
Deltax = diff(x)
V = np.diag(Deltax) # create diagonal matrix
```
Last, we produce our data:
```
G=np.dot(np.transpose(np.dot(Av, Gn)), V)
d = np.dot(np.dot(np.transpose(np.dot(Av, Gn)), V),m) # Create synthetic data
xd=np.linspace(0,1,len(d)) # make x array for data
# Plot
pylab.plot(xd,d)
pylab.xlabel('')
pylab.ylabel('d')
pylab.title('Synthetic Data $d$')
```
## Introducing noise to the data
This is where we stood at the end of the last module. Next, to simulate taking data in the field, we are going to add a noise to the data before we perform our inversion. We will do this by defining a lambda function that assigns a floor value and percent scaling factor. Also, we will assume that the noise is Gaussian. We then add the noise to the original data to make a simulated vector of observed data. The superposition of our noise and original data is plotted below.
```
# Add noise to our synthetic data
noise = lambda fl, length, data, s: fl + randn(length)*data*s # introduce noise using a floor (f), length (l) and scaling factor (s)
noi = noise(0, len(d), d, 0.04)
dobs= d + noi
#pylab.plot(xd,r)
pylab.plot(xd,d)
pylab.plot(xd,dobs)
pylab.xlabel('')
pylab.ylabel('d')
pylab.title('Synthetic Data and noise, $d$')
```
## Calculating $\phi_d$
We are now in a position to build up the data misfit term, $\phi_d$. We will need a function to compute the 2-norm, so constructing a lambda function to do this is useful. Next we will make the matrix $W_d$, which is a diagonal matrix that contains the inverses of the variances in the uncertainty in our data. Again, we will define a floor and percent error for each datum. Assigning a floor to the uncertainties in our case will consist of taking the Euclidian length of our data vector and scaling it by the number of data values that we have. Last, we calculate $\phi_d$ using our 2-norm function that we created. It is insightful to see what values have been assigned to our floor and misfit, so they are printed below.
```
# Calculate the data misfit, phi_d #
####################################
# Anonymous function for 2-Norm
L2 = lambda A, w: dot(dot(w.T,A.T),dot(A,w))
#This constructs the inverses of the variances.
invvar = lambda floor, percent, data: 1./(floor + percent*np.abs(data))
# assign a floor
flr = 0.015*dot(d.T,d)**0.5/len(d)
# Make Wd
eps = invvar(flr,0.02,dobs) # define epsilon and Wd
Wd=np.diag(eps)
# Take the 2-norm
phi_d = L2(Wd, np.dot(G,m)-dobs)
print phi_d
print flr
```
28.8794257836
0.000200320925096
## Choosing a reference model
In choosing a reference model, we need to know something about the variation of physical properties in the subsurface. Given that we are looking for property contrasts relative to a background property, and assuming that we know nothing about the number of targets, their values, or their locations, a good first place to start is to assign a constant background value. In real-world situations, this would represent the background value of the surrounding earth (for example, the conductivity, seismic velocity, density, etc. of our country rock). In the case of our synthetic model, we are going to take the mean value of our model and use that as a constant background.
```
# Reference Model#
##################
## A constant reference model ##
mref = np.mean(m)*np.ones(M)
# Plot
pylab.plot(xc,mref)
pylab.xlabel('x')
pylab.ylabel('m(x)')
pylab.title('Reference Model, $m(x)$')
```
## Setting up the spacial domains
Here we are going to set up the domains, with a vector for points on the cell centers and for points on the nodes. These will be defined as $l$ and $h$.
```
# Domains #
############################### # Set up domains:
l = np.power(x[1:len(x)]-x[0:len(x)-1],0.5) # vector of distances between nodes, l
midx=np.dot(Av,x) # vector of midpoints, midx
h = np.power(midx[1:len(midx)]-x[0:len(midx)-1], 0.5) # Calculate distances between midpoints & take sqr roots of each value in vector h
```
## Calculating the model regularization, $\phi_m$
As discussed above, we are going to first need to make our $W_m$ matrix, which is a partitioned matrix from two other matrices, $W_s$ and $W_x$, each scaled by a separate parameter $\alpha_s$ and $\alpha_x$. We are going to discuss the manner in which $\alpha_s$ and $\alpha_x$ are selected in more detail during the next module. But for the moment, we will set them as they are defined below. Once this matrix is built up, calculating $\phi_m$ is a simple matter, given that we have made a function to compute the 2-norm already. For the sake of illustration, I compute and print $\phi_m$ from the residual of our reference model $m_{ref}$ and our true model $m$. However, of interest to us will be the residual of the model that we recover $m_{rec}$ and our reference model.
```
# Calculate the model regularization, phi_m #
##############################################
# Create Ws, Wx matriceshes
Ws=np.diag(l) # put length vector into a diagonal matrix, Ws
Wx = np.zeros((len(m), len(m))) # preaollocate array and enter values into matrix, Wx
for i in range(len(h)):
j=i
k=i+1
Wx[i,j] = h[i]
Wx[i,k] = h[i]
alpha_s=0.1 # Set alpha_s and alpha_x values
alpha_x=0.15
# build Wm #
Wm=np.concatenate((alpha_s*Ws, alpha_x*Wx), axis=0)
phi_m = L2(Wm, mref-m)
print (phi_m)
```
0.00557850734497
## Inverting for our recovered model
At last we can invert to find our recovered model and see how it compares with the true model. First we will assign a value for $\beta$. As with the $\alpha$ parameters from before, we will assign a value, but the choice of beta will be a topic that we explore more fully in the next module. Once our $\beta$ value is assigned, we will define yet another lambda function to obtain the recovered model, plot it against our true model, and then output our results for $\phi_d$ and $\phi_m$.
```
beta = 1000000
# Set beta value
# Get recovered model
mrecovered = lambda G,Wd,Wm,data,mref, beta: dot(np.linalg.inv(dot(dot(G.T,Wd.T),dot(Wd,G)) + beta*dot(Wm.T,Wm)),dot(dot(G.T,Wd),dot(Wd,dobs)) + beta*dot(dot(Wm.T,Wm),mref))
# mrec = dot(np.linalg.inv(dot(dot(G.T,Wd.T),dot(Wd,G)) + beta*dot(Wm.T,Wm)),dot(dot(G.T,Wd),dot(Wd,dobs)) + beta*dot(dot(Wm.T,Wm),mref))
mrec = mrecovered(G,Wd,Wm,dobs,mref,beta)
# Get residual of mref and mrec
phi_d2 = L2(Wd,np.dot(G,mrec)-dobs)
phi_m2 = L2(Wm, mref-mrec)
print phi_d2
print phi_m2
# Plot
pylab.plot(xc,m)
pylab.plot(xc,mrec)
pylab.xlabel('x')
pylab.ylabel('m(x)')
pylab.title('Model, $m(x)$')
```
As a last step, we will obtain our predicted data and see how it compares with the synthetic data that we used initially.
```
# Get predicted data
dpred = np.dot(G,mrec)
#pylab.plot(xd,r)
pylab.plot(xd,dpred,'o')
pylab.plot(xd,dobs)
pylab.plot(xd,d,'x')
pylab.xlabel('')
pylab.ylabel('d')
pylab.title('Predicted and Observed Data')
```
This concludes the current module. For the next module, we will examine the constraints on our choice for $\alpha_s$ and $\alpha_x$, and then introduce the Tikhonov curve and a method for choosing $\beta$.
|
#include "storm/storage/jani/ParallelComposition.h"
#include <sstream>
#include <boost/algorithm/string/join.hpp>
#include "storm/utility/macros.h"
#include "storm/exceptions/WrongFormatException.h"
#include "storm/storage/jani/Model.h"
namespace storm {
namespace jani {
const std::string SynchronizationVector::NO_ACTION_INPUT = "-";
SynchronizationVector::SynchronizationVector(std::vector<std::string> const& input, std::string const& output) : input(input), output(output) {
// Intentionally left empty.
}
SynchronizationVector::SynchronizationVector(std::vector<std::string> const& input) : input(input), output(storm::jani::Model::SILENT_ACTION_NAME) {
}
std::size_t SynchronizationVector::size() const {
return input.size();
}
std::vector<std::string> const& SynchronizationVector::getInput() const {
return input;
}
std::string const& SynchronizationVector::getInput(uint64_t index) const {
return input[index];
}
std::string const& SynchronizationVector::getOutput() const {
return output;
}
boost::optional<std::string> SynchronizationVector::getPrecedingParticipatingAction(uint64_t index) const {
boost::optional<uint64_t> position = getPositionOfPrecedingParticipatingAction(index);
if (position) {
return getInput(position.get());
} else {
return boost::none;
}
}
boost::optional<uint64_t> SynchronizationVector::getPositionOfPrecedingParticipatingAction(uint64_t index) const {
if (index == 0) {
return boost::none;
}
uint64_t i = index - 1;
for (; i > 0; --i) {
if (this->getInput(i) != NO_ACTION_INPUT) {
return boost::make_optional(i);
}
}
// Check the 0-index.
if (this->getInput(i) != NO_ACTION_INPUT) {
return boost::make_optional(i);
}
return boost::none;
}
uint64_t SynchronizationVector::getPositionOfFirstParticipatingAction() const {
for (uint64_t result = 0; result < this->size(); ++result) {
if (this->getInput(result) != NO_ACTION_INPUT) {
return result;
}
}
STORM_LOG_THROW(false, storm::exceptions::WrongFormatException, "Synchronization vector must have at least one participating action.");
}
uint64_t SynchronizationVector::getNumberOfActionInputs() const {
uint64_t result = 0;
for (auto const& inputEntry : input) {
if (!isNoActionInput(inputEntry)) {
++result;
}
}
return result;
}
bool SynchronizationVector::isNoActionInput(std::string const& action) {
return action == NO_ACTION_INPUT;
}
std::ostream& operator<<(std::ostream& stream, SynchronizationVector const& synchronizationVector) {
bool first = true;
stream << "(";
for (auto const& element : synchronizationVector.getInput()) {
if (!first) {
stream << ", ";
}
stream << element;
first = false;
}
stream << ") -> " << synchronizationVector.getOutput();
return stream;
}
bool operator==(SynchronizationVector const& vector1, SynchronizationVector const& vector2) {
if (vector1.getOutput() != vector2.getOutput()) {
return false;
}
if (vector1.getInput() != vector1.getInput()) {
return false;
}
return true;
}
bool operator!=(SynchronizationVector const& vector1, SynchronizationVector const& vector2) {
return !(vector1 == vector2);
}
bool SynchronizationVectorLexicographicalLess::operator()(SynchronizationVector const& vector1, SynchronizationVector const& vector2) const {
STORM_LOG_THROW(vector1.size() == vector2.size(), storm::exceptions::WrongFormatException, "Cannot compare synchronization vectors of different size.");
for (uint64_t i = 0; i < vector1.size(); ++i) {
if (vector1.getInput(i) < vector2.getInput(i)) {
return true;
} else if (vector1.getInput(i) > vector2.getInput(i)) {
return false;
}
}
if (vector1.getOutput() < vector2.getOutput()) {
return true;
} else if (vector1.getOutput() > vector2.getOutput()) {
return false;
}
return false;
}
ParallelComposition::ParallelComposition(std::shared_ptr<Composition> const& subcomposition, std::vector<SynchronizationVector> const& synchronizationVectors) : ParallelComposition(std::vector<std::shared_ptr<Composition>>{subcomposition}, synchronizationVectors) {
// Intentionally left empty.
}
ParallelComposition::ParallelComposition(std::vector<std::shared_ptr<Composition>> const& subcompositions, std::vector<SynchronizationVector> const& synchronizationVectors) : subcompositions(subcompositions), synchronizationVectors(synchronizationVectors) {
STORM_LOG_THROW(!subcompositions.empty(), storm::exceptions::WrongFormatException, "At least one automaton required for parallel composition.");
this->checkSynchronizationVectors();
}
ParallelComposition::ParallelComposition(std::vector<std::shared_ptr<Composition>> const& subcompositions, std::set<std::string> const& synchronizationAlphabet) : subcompositions(subcompositions), synchronizationVectors() {
STORM_LOG_THROW(!subcompositions.empty(), storm::exceptions::WrongFormatException, "At least one automaton required for parallel composition.");
// Manually construct the synchronization vectors for all elements of the synchronization alphabet.
for (auto const& action : synchronizationAlphabet) {
synchronizationVectors.emplace_back(std::vector<std::string>(this->subcompositions.size(), action), action);
}
}
ParallelComposition::ParallelComposition(std::shared_ptr<Composition> const& leftSubcomposition, std::shared_ptr<Composition> const& rightSubcomposition, std::set<std::string> const& synchronizationAlphabet) {
subcompositions.push_back(leftSubcomposition);
subcompositions.push_back(rightSubcomposition);
// Manually construct the synchronization vectors for all elements of the synchronization alphabet.
for (auto const& action : synchronizationAlphabet) {
synchronizationVectors.emplace_back(std::vector<std::string>(this->subcompositions.size(), action), action);
}
}
bool ParallelComposition::isParallelComposition() const {
return true;
}
Composition const& ParallelComposition::getSubcomposition(uint64_t index) const {
return *subcompositions[index];
}
std::vector<std::shared_ptr<Composition>> const& ParallelComposition::getSubcompositions() const {
return subcompositions;
}
uint64_t ParallelComposition::getNumberOfSubcompositions() const {
return subcompositions.size();
}
SynchronizationVector const& ParallelComposition::getSynchronizationVector(uint64_t index) const {
return synchronizationVectors[index];
}
std::vector<SynchronizationVector> const& ParallelComposition::getSynchronizationVectors() const {
return synchronizationVectors;
}
std::size_t ParallelComposition::getNumberOfSynchronizationVectors() const {
return synchronizationVectors.size();
}
bool ParallelComposition::areActionsReused() const {
for (uint_fast64_t inputIndex = 0; inputIndex < subcompositions.size(); ++ inputIndex) {
std::set<std::string> actions;
for (auto const& vector : synchronizationVectors) {
std::string const& action = vector.getInput(inputIndex);
if (action != SynchronizationVector::NO_ACTION_INPUT) {
if (actions.find(action) != actions.end()) {
return true;
}
actions.insert(action);
}
}
// And check recursively, in case we have nested parallel composition
if (subcompositions.at(inputIndex)->isParallelComposition()) {
if (subcompositions.at(inputIndex)->asParallelComposition().areActionsReused()) {
return true;
}
}
}
return false;
}
void ParallelComposition::checkSynchronizationVectors() const {
for (uint_fast64_t inputIndex = 0; inputIndex < subcompositions.size(); ++ inputIndex) {
for (auto const& vector : synchronizationVectors) {
STORM_LOG_THROW(vector.size() == this->subcompositions.size(), storm::exceptions::WrongFormatException, "Synchronization vectors must match parallel composition size.");
}
}
for (auto const& vector : synchronizationVectors) {
bool hasInput = false;
for (auto const& input : vector.getInput()) {
if (input != SynchronizationVector::NO_ACTION_INPUT) {
hasInput = true;
break;
}
}
STORM_LOG_THROW(hasInput, storm::exceptions::WrongFormatException, "Synchronization vector must have at least one proper input.");
}
}
boost::any ParallelComposition::accept(CompositionVisitor& visitor, boost::any const& data) const {
return visitor.visit(*this, data);
}
void ParallelComposition::write(std::ostream& stream) const {
std::vector<std::string> synchronizationVectorsAsStrings;
for (auto const& synchronizationVector : synchronizationVectors) {
std::stringstream tmpStream;
tmpStream << synchronizationVector;
synchronizationVectorsAsStrings.push_back(tmpStream.str());
}
bool first = true;
bool hasSynchVectors = !synchronizationVectors.empty();
stream << "(";
for (auto const& subcomposition : subcompositions) {
if (!first) {
stream << (hasSynchVectors ? " || " : " ||| ");
}
stream << *subcomposition;
first = false;
}
stream << ")";
if (hasSynchVectors) {
stream << "[" << boost::algorithm::join(synchronizationVectorsAsStrings, ", ") << "]";
}
}
}
}
|
#!/usr/bin/env Rscript
# Notebook dependencies:
# 1. ../preprocessing_notebooks/MK_2020-10-16_load_data.ipynb - extracts the raw matrix, subsets and splits and
# the genes and cell names as .csv files
# 2. utils/MK_2020-10-16_save_drivers.ipynb - extracts the driver genes for each lineage using CellRanks' GPCCA
library(Matrix)
library(SparseM)
library(FateID)
library(RaceID)
library(R.utils)
print(packageVersion("FateID"))
print(packageVersion("RaceID"))
options("scipen"=10)
sizes <- seq(10000, 100000, 10000)
select_driver_genes <- function(valid_genes, ld) {
selected_genes = {}
ld <- ld[ld[[1]] %in% valid_genes,]
selected_genes <- list()
for (i in c(2, 3, 4)) {
c <- colnames(ld)[i]
genes <- unlist(selected_genes)
y <- ld[!(ld[[1]] %in% genes), ]
y <- y[order(y[, c], decreasing=TRUE),] # select the driver genes with highest correlation
selected_genes[[c]] <- as.vector(y$X[seq(1, 3)])
}
selected_genes
}
root_dir <- "../../../data/morris_data/"
print("Reading data")
mat <- Matrix::readMM(paste(root_dir, "raw.mtx", sep=""))
mat <- as(mat, "dgCMatrix")
dim(mat)
res <- matrix(nrow=10, ncol=length(sizes))
colnames(res) <- sizes
rownames(res) <- seq(1, 10)
split_dir <- paste(root_dir, "splits/", sep="")
drivers <- paste(root_dir, "lin_drivers/", sep="")
genes <- read.csv(paste(root_dir, "annotations/genes.csv", sep=""), col.names=c('gene'), skip=0)[['gene']]
obs <- read.csv(paste(root_dir, "annotations/obs_names.csv", sep=""), col.names=c('obs'), skip=0)[['obs']]
rownames(mat) <- genes
colnames(mat) <- obs
split_rows = sapply(seq(0, 9), function(i) {paste("X", i, sep="")})
for (i in seq_along(sizes)) {
size <- sizes[i]
fname <- paste(split_dir, "size_", size, ".csv", sep='')
print(fname)
splits <- read.csv(fname, header=TRUE) + 1
splits <- splits[split_rows]
tryCatch({
for (j in seq(1, 10)) {
print("Split", j)
ixs <- splits[[j]]
stopifnot(length(ixs) == size)
X <- mat[,ixs]
stopifnot(dim(X)[2] == size)
mintotal = size %/% 1000
print("Preprocessing")
sc <- SCseq(X)
sc <- filterdata(sc, mintotal=mintotal)
x <- getfdata(sc)[sc@cluster$features,]
print("Getting marker genes")
valid_genes <- rownames(x)
ld <- read.csv(paste(drivers, size, "_", j - 1, ".csv", sep=""))
markers <- select_driver_genes(valid_genes, ld)
minnr = minnrh = size %/% 100
n = size %/% 500
pa <- getPart(x, markers, n=n)
clustering <- pa$part
endpoints <- pa$tar
z <- sc@distances
print("Running fate bias")
runtime <- withTimeout({{
x <- as.matrix(x)
start_time <- Sys.time()
fb <- fateBias(x, clustering,
endpoints, z=z,
minnr=minnr, minnrh=minnrh,
seed=123, use.dist=FALSE)
end_time <- difftime(Sys.time(), start_time, units="secs")
end_time
}},
timeout=60 * 60 * 4, # 4 hours
onTimeout="silent"
)
if (is.null(runtime)) { # timeout
runtime <- NA
}
res[j, i] = runtime
write.csv(res, "../../../data/benchmarking/runtime_analysis/fateid.csv")
}
},
error=function(e) {print(e)}
)
}
|
Formal statement is: lemma nth_root_nat_aux2: assumes "k > 0" shows "finite {m::nat. m ^ k \<le> n}" "{m::nat. m ^ k \<le> n} \<noteq> {}" Informal statement is: If $k > 0$, then the set of all $m \in \mathbb{N}$ such that $m^k \leq n$ is finite and non-empty. |
State Before: α : Type u_1
as : List α
bs : Array α
⊢ Array.size (toArrayAux as bs) = length as + Array.size bs State After: no goals Tactic: induction as generalizing bs with
| nil => simp [toArrayAux]
| cons a as ih => simp_arith [toArrayAux, *] State Before: case nil
α : Type u_1
bs : Array α
⊢ Array.size (toArrayAux [] bs) = length [] + Array.size bs State After: no goals Tactic: simp [toArrayAux] State Before: case cons
α : Type u_1
a : α
as : List α
ih : ∀ (bs : Array α), Array.size (toArrayAux as bs) = length as + Array.size bs
bs : Array α
⊢ Array.size (toArrayAux (a :: as) bs) = length (a :: as) + Array.size bs State After: no goals Tactic: simp_arith [toArrayAux, *] |
State Before: C : Type u
inst✝ : Category C
J : GrothendieckTopology C
F F' F'' : Cᵒᵖ ⥤ Type w
G G' : Subpresheaf F
h : Presieve.IsSheaf J F
hG : Presieve.IsSheaf J (toPresheaf G)
⊢ G = sheafify J G State After: C : Type u
inst✝ : Category C
J : GrothendieckTopology C
F F' F'' : Cᵒᵖ ⥤ Type w
G G' : Subpresheaf F
h : Presieve.IsSheaf J F
hG : Presieve.IsSheaf J (toPresheaf G)
⊢ sheafify J G ≤ G Tactic: apply (G.le_sheafify J).antisymm State Before: C : Type u
inst✝ : Category C
J : GrothendieckTopology C
F F' F'' : Cᵒᵖ ⥤ Type w
G G' : Subpresheaf F
h : Presieve.IsSheaf J F
hG : Presieve.IsSheaf J (toPresheaf G)
⊢ sheafify J G ≤ G State After: C : Type u
inst✝ : Category C
J : GrothendieckTopology C
F F' F'' : Cᵒᵖ ⥤ Type w
G G' : Subpresheaf F
h : Presieve.IsSheaf J F
hG : Presieve.IsSheaf J (toPresheaf G)
U : Cᵒᵖ
s : F.obj U
hs : s ∈ obj (sheafify J G) U
⊢ s ∈ obj G U Tactic: intro U s hs State Before: C : Type u
inst✝ : Category C
J : GrothendieckTopology C
F F' F'' : Cᵒᵖ ⥤ Type w
G G' : Subpresheaf F
h : Presieve.IsSheaf J F
hG : Presieve.IsSheaf J (toPresheaf G)
U : Cᵒᵖ
s : F.obj U
hs : s ∈ obj (sheafify J G) U
⊢ s ∈ obj G U State After: C : Type u
inst✝ : Category C
J : GrothendieckTopology C
F F' F'' : Cᵒᵖ ⥤ Type w
G G' : Subpresheaf F
h : Presieve.IsSheaf J F
hG : Presieve.IsSheaf J (toPresheaf G)
U : Cᵒᵖ
s : F.obj U
hs : s ∈ obj (sheafify J G) U
⊢ ↑(Presieve.IsSheafFor.amalgamate (_ : Presieve.IsSheafFor (toPresheaf G) (sieveOfSection G s).arrows)
(familyOfElementsOfSection G s) (_ : Presieve.FamilyOfElements.Compatible (familyOfElementsOfSection G s))) =
s Tactic: suffices ((hG _ hs).amalgamate _ (G.family_of_elements_compatible s)).1 = s by
rw [← this]
exact ((hG _ hs).amalgamate _ (G.family_of_elements_compatible s)).2 State Before: C : Type u
inst✝ : Category C
J : GrothendieckTopology C
F F' F'' : Cᵒᵖ ⥤ Type w
G G' : Subpresheaf F
h : Presieve.IsSheaf J F
hG : Presieve.IsSheaf J (toPresheaf G)
U : Cᵒᵖ
s : F.obj U
hs : s ∈ obj (sheafify J G) U
⊢ ↑(Presieve.IsSheafFor.amalgamate (_ : Presieve.IsSheafFor (toPresheaf G) (sieveOfSection G s).arrows)
(familyOfElementsOfSection G s) (_ : Presieve.FamilyOfElements.Compatible (familyOfElementsOfSection G s))) =
s State After: C : Type u
inst✝ : Category C
J : GrothendieckTopology C
F F' F'' : Cᵒᵖ ⥤ Type w
G G' : Subpresheaf F
h : Presieve.IsSheaf J F
hG : Presieve.IsSheaf J (toPresheaf G)
U : Cᵒᵖ
s : F.obj U
hs : s ∈ obj (sheafify J G) U
⊢ ∀ ⦃Y : C⦄ ⦃f : Y ⟶ U.unop⦄,
(sieveOfSection G s).arrows f →
F.map f.op
↑(Presieve.IsSheafFor.amalgamate (_ : Presieve.IsSheafFor (toPresheaf G) (sieveOfSection G s).arrows)
(familyOfElementsOfSection G s)
(_ : Presieve.FamilyOfElements.Compatible (familyOfElementsOfSection G s))) =
F.map f.op s Tactic: apply (h _ hs).isSeparatedFor.ext State Before: C : Type u
inst✝ : Category C
J : GrothendieckTopology C
F F' F'' : Cᵒᵖ ⥤ Type w
G G' : Subpresheaf F
h : Presieve.IsSheaf J F
hG : Presieve.IsSheaf J (toPresheaf G)
U : Cᵒᵖ
s : F.obj U
hs : s ∈ obj (sheafify J G) U
⊢ ∀ ⦃Y : C⦄ ⦃f : Y ⟶ U.unop⦄,
(sieveOfSection G s).arrows f →
F.map f.op
↑(Presieve.IsSheafFor.amalgamate (_ : Presieve.IsSheafFor (toPresheaf G) (sieveOfSection G s).arrows)
(familyOfElementsOfSection G s)
(_ : Presieve.FamilyOfElements.Compatible (familyOfElementsOfSection G s))) =
F.map f.op s State After: C : Type u
inst✝ : Category C
J : GrothendieckTopology C
F F' F'' : Cᵒᵖ ⥤ Type w
G G' : Subpresheaf F
h : Presieve.IsSheaf J F
hG : Presieve.IsSheaf J (toPresheaf G)
U : Cᵒᵖ
s : F.obj U
hs : s ∈ obj (sheafify J G) U
V : C
i : V ⟶ U.unop
hi : (sieveOfSection G s).arrows i
⊢ F.map i.op
↑(Presieve.IsSheafFor.amalgamate (_ : Presieve.IsSheafFor (toPresheaf G) (sieveOfSection G s).arrows)
(familyOfElementsOfSection G s) (_ : Presieve.FamilyOfElements.Compatible (familyOfElementsOfSection G s))) =
F.map i.op s Tactic: intro V i hi State Before: C : Type u
inst✝ : Category C
J : GrothendieckTopology C
F F' F'' : Cᵒᵖ ⥤ Type w
G G' : Subpresheaf F
h : Presieve.IsSheaf J F
hG : Presieve.IsSheaf J (toPresheaf G)
U : Cᵒᵖ
s : F.obj U
hs : s ∈ obj (sheafify J G) U
V : C
i : V ⟶ U.unop
hi : (sieveOfSection G s).arrows i
⊢ F.map i.op
↑(Presieve.IsSheafFor.amalgamate (_ : Presieve.IsSheafFor (toPresheaf G) (sieveOfSection G s).arrows)
(familyOfElementsOfSection G s) (_ : Presieve.FamilyOfElements.Compatible (familyOfElementsOfSection G s))) =
F.map i.op s State After: no goals Tactic: exact (congr_arg Subtype.val ((hG _ hs).valid_glue (G.family_of_elements_compatible s) _ hi) : _) State Before: C : Type u
inst✝ : Category C
J : GrothendieckTopology C
F F' F'' : Cᵒᵖ ⥤ Type w
G G' : Subpresheaf F
h : Presieve.IsSheaf J F
hG : Presieve.IsSheaf J (toPresheaf G)
U : Cᵒᵖ
s : F.obj U
hs : s ∈ obj (sheafify J G) U
this :
↑(Presieve.IsSheafFor.amalgamate (_ : Presieve.IsSheafFor (toPresheaf G) (sieveOfSection G s).arrows)
(familyOfElementsOfSection G s) (_ : Presieve.FamilyOfElements.Compatible (familyOfElementsOfSection G s))) =
s
⊢ s ∈ obj G U State After: C : Type u
inst✝ : Category C
J : GrothendieckTopology C
F F' F'' : Cᵒᵖ ⥤ Type w
G G' : Subpresheaf F
h : Presieve.IsSheaf J F
hG : Presieve.IsSheaf J (toPresheaf G)
U : Cᵒᵖ
s : F.obj U
hs : s ∈ obj (sheafify J G) U
this :
↑(Presieve.IsSheafFor.amalgamate (_ : Presieve.IsSheafFor (toPresheaf G) (sieveOfSection G s).arrows)
(familyOfElementsOfSection G s) (_ : Presieve.FamilyOfElements.Compatible (familyOfElementsOfSection G s))) =
s
⊢ ↑(Presieve.IsSheafFor.amalgamate (_ : Presieve.IsSheafFor (toPresheaf G) (sieveOfSection G s).arrows)
(familyOfElementsOfSection G s) (_ : Presieve.FamilyOfElements.Compatible (familyOfElementsOfSection G s))) ∈
obj G U Tactic: rw [← this] State Before: C : Type u
inst✝ : Category C
J : GrothendieckTopology C
F F' F'' : Cᵒᵖ ⥤ Type w
G G' : Subpresheaf F
h : Presieve.IsSheaf J F
hG : Presieve.IsSheaf J (toPresheaf G)
U : Cᵒᵖ
s : F.obj U
hs : s ∈ obj (sheafify J G) U
this :
↑(Presieve.IsSheafFor.amalgamate (_ : Presieve.IsSheafFor (toPresheaf G) (sieveOfSection G s).arrows)
(familyOfElementsOfSection G s) (_ : Presieve.FamilyOfElements.Compatible (familyOfElementsOfSection G s))) =
s
⊢ ↑(Presieve.IsSheafFor.amalgamate (_ : Presieve.IsSheafFor (toPresheaf G) (sieveOfSection G s).arrows)
(familyOfElementsOfSection G s) (_ : Presieve.FamilyOfElements.Compatible (familyOfElementsOfSection G s))) ∈
obj G U State After: no goals Tactic: exact ((hG _ hs).amalgamate _ (G.family_of_elements_compatible s)).2 |
rm(list=ls(all=TRUE))
regSimu <- function(m=3, sig_ep=1, min=0, max=10, b0=7, b1=3){
e <- rnorm(m, mean = 0, sd = sig_ep)
x <- runif(m, min=min, max=max)
ypop <- (b1*x + b0)
y <- ypop + e
model <- lm(y~x)
RMSE <- sqrt(anova(model)[2,3])
result <- c(coef(model), RMSE)
names(result) <- c("Beta-Zero","Beta-One", "Sigma-Epsilon-Hat")
result
}
regModel <- function(n=45, m=3, sig_ep=1, min=0, max=10, b0=7, b1=3){
count <- 1
sampleMatrix <-matrix(nrow = 0, ncol = 3)
while(count <= n){
sampleMatrix <- rbind(sampleMatrix ,regSimu())
count <- count + 1
}
sampleMatrix
}
x<-regModel(n=400, m=400, min=0, max=10, b0=7, b1=3,sig_ep=1)
#mean(x[,1]);mean(x[,2]);mean(x[,3])
|
Formal statement is: lemma mod_poly_code [code]: "f mod g = (let cg = coeffs g in if cg = [] then f else let cf = coeffs f; ilc = inverse (last cg); ch = map ((*) ilc) cg; r = mod_poly_one_main_list (rev cf) (rev ch) (1 + length cf - length cg) in poly_of_list (rev r))" (is "_ = ?rhs") Informal statement is: The function mod_poly computes the remainder of the polynomial division of $f$ by $g$. |
module _ where
open import Common.Prelude
open import Lib.Vec
_∘_ : ∀ {a b c} {A : Set a} {B : A → Set b} {C : ∀ x → B x → Set c}
(f : ∀ {x} (y : B x) → C x y) (g : ∀ x → B x) → ∀ x → C x (g x)
(f ∘ g) x = f (g x)
sum : ∀ {n} → Vec Nat n → Nat
sum (x ∷ xs) = x + sum xs
sum [] = 0
foldl : ∀ {A}{B : Nat → Set} → (∀ {n} → B n → A → B (suc n)) → B 0 → ∀ {n} → Vec A n → B n
foldl {B = B} f z (x ∷ xs) = foldl {B = λ n → B (suc n)} f (f z x) xs
foldl f z [] = z
reverse : ∀ {A n} → Vec A n → Vec A n
reverse = foldl {B = Vec _} (λ xs x → x ∷ xs) []
downFrom : ∀ n → Vec Nat n
downFrom zero = []
downFrom (suc n) = n ∷ downFrom n
main : IO Unit
main = printNat (sum (reverse (downFrom 6000)))
|
# ------------------------------------------------------------------
# Licensed under the ISC License. See LICENSE in the project root.
# ------------------------------------------------------------------
"""
KrigingEstimator
A Kriging estimator (e.g. Simple Kriging).
"""
abstract type KrigingEstimator end
"""
KrigingState(X, z, LHS, RHS)
A Kriging state stores information needed
to perform estimation at any given location.
"""
mutable struct KrigingState{T<:Real,V,
A<:AbstractMatrix{T},
B<:AbstractVector{V},
F<:Factorization,R}
X::A
z::B
LHS::F
RHS::Vector{R}
end
"""
KrigingWeights(λ, ν)
An object storing Kriging weights `λ` and Lagrange multipliers `ν`.
"""
struct KrigingWeights{T<:Real,A<:AbstractVector{T}}
λ::A
ν::A
end
"""
FittedKriging(estimator, state)
An object that can be used for making predictions using the
parameters in `estimator` and the current Kriging `state`.
"""
struct FittedKriging{E<:KrigingEstimator,S<:KrigingState}
estimator::E
state::S
end
"""
status(fittedkrig)
Return the status of the `fittedkrig` object, meaning
the factorization of the Kriging system was successful.
"""
status(fittedkrig::FittedKriging) = issuccess(fittedkrig.state.LHS)
#--------------
# FITTING STEP
#--------------
"""
fit(estimator, X, z)
Build Kriging system from coordinates `X` and
values `z` and return a fitted estimator.
"""
function fit(estimator::KrigingEstimator, X::AbstractMatrix, z::AbstractVector)
# build Kriging system
LHS = lhs(estimator, X)
RHS = Vector{eltype(LHS)}(undef, size(LHS,1))
# factorize LHS
FLHS = factorize(estimator, LHS)
# record Kriging state
state = KrigingState(X, z, FLHS, RHS)
# return fitted estimator
FittedKriging(estimator, state)
end
"""
lhs(estimator, X)
Return LHS of Kriging system using spatial configuration `X`.
"""
function lhs(estimator::KrigingEstimator, X::AbstractMatrix)
γ = estimator.γ
nobs = size(X, 2)
ncons = nconstraints(estimator)
# pre-allocate memory for LHS
x = view(X,:,1)
T = Variography.result_type(γ, x, x)
m = nobs + ncons
LHS = Matrix{T}(undef, m, m)
# set variogram/covariance block
pairwise!(LHS, γ, X)
if isstationary(γ)
for j=1:nobs, i=1:nobs
@inbounds LHS[i,j] = sill(γ) - LHS[i,j]
end
end
# set blocks of constraints
set_constraints_lhs!(estimator, LHS, X)
LHS
end
"""
nconstraints(estimator)
Return number of constraints for `estimator`.
"""
nconstraints(estimator::KrigingEstimator) = error("not implemented")
"""
set_constraints_lhs!(estimator, LHS, X)
Set constraints in LHS of Kriging system.
"""
set_constraints_lhs!(estimator::KrigingEstimator,
LHS::AbstractMatrix, X::AbstractMatrix) = error("not implemented")
"""
factorize(estimator, LHS)
Factorize LHS of Kriging system with appropriate factorization method.
"""
factorize(estimator::KrigingEstimator, LHS::AbstractMatrix) = error("not implemented")
#-----------------
# PREDICTION STEP
#-----------------
"""
predict(estimator, xₒ)
Compute mean and variance for the `estimator` at coordinates `xₒ`.
"""
predict(estimator::FittedKriging, xₒ::AbstractVector) =
combine(estimator, weights(estimator, xₒ), estimator.state.z)
"""
weights(estimator, xₒ)
Compute the weights λ (and Lagrange multipliers ν) for the
`estimator` at coordinates `xₒ`.
"""
function weights(estimator::FittedKriging, xₒ::AbstractVector)
nobs = size(estimator.state.X, 2)
set_rhs!(estimator, xₒ)
# solve Kriging system
x = estimator.state.LHS \ estimator.state.RHS
λ = view(x,1:nobs)
ν = view(x,nobs+1:length(x))
KrigingWeights(λ, ν)
end
"""
set_rhs!(estimator, xₒ)
Set RHS of Kriging system at coodinates `xₒ`.
"""
function set_rhs!(estimator::FittedKriging, xₒ::AbstractVector)
γ = estimator.estimator.γ
X = estimator.state.X
RHS = estimator.state.RHS
# RHS variogram/covariance
@inbounds for j in 1:size(X, 2)
xj = view(X,:,j)
RHS[j] = isstationary(γ) ? sill(γ) - γ(xj, xₒ) : γ(xj, xₒ)
end
set_constraints_rhs!(estimator, xₒ)
end
"""
set_constraints_rhs!(estimator, xₒ)
Set constraints in RHS of Kriging system.
"""
set_constraints_rhs!(estimator::FittedKriging, xₒ::AbstractVector) = error("not implemented")
"""
combine(estimator, weights, z)
Combine `weights` with values `z` to produce mean and variance.
"""
function combine(estimator::FittedKriging, weights::KrigingWeights, z::AbstractVector)
γ = estimator.estimator.γ
b = estimator.state.RHS
λ = weights.λ
ν = weights.ν
# compute b⋅[λ;ν]
nobs = length(λ)
c₁ = view(b,1:nobs)⋅λ
c₂ = view(b,nobs+1:length(b))⋅ν
c = c₁ + c₂
if isstationary(γ)
z⋅λ, sill(γ) - c
else
z⋅λ, c
end
end
#-----------------
# IMPLEMENTATIONS
#-----------------
include("estimators/simple_kriging.jl")
include("estimators/ordinary_kriging.jl")
include("estimators/universal_kriging.jl")
include("estimators/external_drift_kriging.jl")
|
/* Copyright John Reid 2007
*/
#include "bio-pch.h"
#include "bio/application.h"
#include "bio/remo.h"
#include "bio/markov_model.h"
USING_BIO_NS
#include <boost/filesystem/path.hpp>
using namespace boost;
namespace po = boost::program_options;
namespace fs = boost::filesystem;
using namespace std;
struct MarkovModelsApp : Application
{
std::string remo_extraction_filename;
bool masked;
bool sorted;
MarkovModelsApp()
{
get_options().add_options()
("remo,r", po::value(&remo_extraction_filename)->default_value("remo_space.bin"), "remo extraction file")
("masked,m", po::value(&masked)->default_value(true), "use masked remos")
("sorted,s", po::value(&sorted)->default_value(true), "sort markov model counts")
;
}
int task()
{
cout << "Reading remos from " << remo_extraction_filename << "\n";
cout << (masked ? "Using" : "Not using") << " masked remos\n";
cout << (sorted ? "Sorting" : "Not sorting") << " markov model counts\n";
//deserialise the binary remo archive
ReMoExtraction::ptr_t remo;
{
fs::path
remo_extraction_archive(
remo_extraction_filename
);
cout << "Deserialising remo extraction from \"" << remo_extraction_archive._BOOST_FS_NATIVE() << "\"\n";
remo = ReMoExtraction::deserialise(remo_extraction_archive);
}
DnaSymbolAlphabet alphabet;
MarkovModel< 0 > zeroth_order_mm(4);
MarkovModel< 1 > first_order_mm(4);
MarkovModel< 2 > second_order_mm(4);
MarkovModel< 3 > third_order_mm(4);
//do the analysis on each remo
{
const unsigned num_groups = remo->sequence_groups.size();
for (ReMoSequenceGroup::list_t::const_iterator sg = remo->sequence_groups.begin();
remo->sequence_groups.end() != sg;
++sg)
{
for (ReMoBundle::map_t::const_iterator rb = sg->get()->remo_bundles.begin();
sg->get()->remo_bundles.end() != rb;
++rb)
{
try
{
//get the centre sequence
const seq_t centre_sequence = rb->second->get_sequence(rb->second->centre_sequence, masked);
zeroth_order_mm.add_to_counts(
centre_sequence.begin(),
centre_sequence.end(),
alphabet);
first_order_mm.add_to_counts(
centre_sequence.begin(),
centre_sequence.end(),
alphabet);
second_order_mm.add_to_counts(
centre_sequence.begin(),
centre_sequence.end(),
alphabet);
third_order_mm.add_to_counts(
centre_sequence.begin(),
centre_sequence.end(),
alphabet);
}
catch (const std::exception & ex)
{
cerr << "Error: " << ex.what() << endl;
}
catch (const string & msg)
{
cerr << "Error: " << msg << endl;
}
catch (const char * msg)
{
cerr << "Error: " << msg << endl;
}
catch (...)
{
cerr << "Undefined error" << endl;
}
}
}
}
std::cout << "\n";
zeroth_order_mm.print(alphabet, sorted); std::cout << "\n";
first_order_mm.print(alphabet, sorted); std::cout << "\n";
//second_order_mm.print(alphabet, sorted); std::cout << "\n";
//third_order_mm.print(alphabet, sorted); std::cout << "\n";
return 0;
}
};
int
main(int argc, char * argv[])
{
return MarkovModelsApp().main(argc, argv);
}
|
## <center>Computer Science Intensive Course - MindX</center>
# <center>LAB 1. ÔN TẬP PYTHON</center>
```python
# run this cell FIRST
%run test_cases_1.ipynb
```
## Bài 1. Ví Dụ
**Input**: Một chuỗi ký tự có độ dài > 1.
**Output**: Chuỗi ký tự được in hoa.
**Hướng dẫn**: Hiện thực hàm uppercase(), nhận một tham số string là chuỗi ký tự được nhập.
**Ví dụ**:
- Input: This is a string
- Output: result = "THIS IS A STRING"
```python
def uppercase(input_str):
return input_str.upper()
# do some test
uppercase("This is a string")
```
'THIS IS A STRING'
Kiểm tra kết quả:
```python
# !!! DO NOT MODIFY THIS CELL
# Check result on test cases
test1(uppercase)
```
Testing on 5 cases.
- Test 1 PASSED.
- Test 2 PASSED.
- Test 3 PASSED.
- Test 4 PASSED.
- Test 5 PASSED.
CONGRATULATIONS! All test cases passed!
## Bài 2. Đường Chéo Hình Vuông
**Input**: Một số thực *a* là độ dài một cạnh của hình vuông.
**Output**: Độ dài đường chéo của hình vuông đó.
Biết:
- Công thức Pytago cho tam giác ABC vuông tại A:
$\begin{equation} AB^2 + AC^2 = BC^2 \end{equation}$
- Hàm tính căn bậc 2 trong thư viện *math*: *sqrt()*
**Ví dụ**:
- Input: a = 2
- Output: 2.8284271247461903
```python
import math
def cal_diagonal(edge_length):
pass
```
```python
# !!! DO NOT MODIFY THIS CELL
# Check result on test cases
test2(cal_diagonal)
```
Testing on 7 cases.
- Test 1 PASSED.
- Test 2 PASSED.
- Test 3 PASSED.
- Test 4 PASSED.
- Test 5 PASSED.
- Test 6 PASSED.
- Test 7 PASSED.
CONGRATULATIONS! All test cases passed!
## Bài 3. Tổng Chữ Số
**Input**: Một số nguyên *n* có 4 chữ số
**Output**: Tổng các chữ số trong *n*
**Ví dụ**:
- Input: n = 1234
- Output: result = 10
- Giải thích: result = 1+2+3+4 = 10
```python
def sum_digits(num):
pass
sum_digits(1234)
```
10
```python
# !!! DO NOT MODIFY THIS CELL
# Check result on test cases
test3(sum_digits)
```
Testing on 6 cases.
- Test 1 PASSED.
- Test 2 PASSED.
- Test 3 PASSED.
- Test 4 PASSED.
- Test 5 PASSED.
- Test 6 PASSED.
CONGRATULATIONS! All test cases passed!
## Bài 4. Thứ Trong Tuần
**Input**: Một số nguyên *2 <= n <= 8*.
**Output**: Tên của thứ tương ứng trong tuần.
**Ví dụ**:
Ví dụ 1:
- Input: n = 2
- Output: Monday
Ví dụ 2:
- Input: n = 8
- Output: Sunday
```python
def day_in_week(day_int):
pass
```
```python
# !!! DO NOT MODIFY THIS CELL
# Check result on test cases
test4(day_in_week)
```
Testing on 7 cases.
- Test 1 PASSED.
- Test 2 PASSED.
- Test 3 PASSED.
- Test 4 PASSED.
- Test 5 PASSED.
- Test 6 PASSED.
- Test 7 PASSED.
CONGRATULATIONS! All test cases passed!
## Bài 5. Tìm Dãy Con
Một dãy con là một dãy liên tiếp các ký tự trong một chuỗi ký tự (string).
**Input**: Một chuỗi ký tự S và một chuỗi ký tự sub_s có độ dài >= 1.
**Output**: Index đầu tiên của sub_s trong S. Trả về -1 nếu không tìm thấy.
**Ví dụ**:
Ví dụ 1:
- Input: S = "abcdef", s = "bc"
- Output: result = 1
- Giải thích: Dãy con "bc" được tìm thấy tại vị trí S[1]
Ví dụ 2:
- Input: S = "abcdefabcdef", s = "bc"
- Output: result = 1
- Giải thích: Dãy con "bc" được tìm thấy tại vị trí S[1] và S[7], tuy nhiên ta chỉ cần in ra vị trí đầu tiên tìm thấy.
Ví dụ 3:
- Input: S = "ABCDEF", s = "bc"
- Output: result = -1
```python
def find_substring(string, substring):
pass
```
```python
# !!! DO NOT MODIFY THIS CELL
# Check result on test cases
test5(find_substring)
```
Testing on 7 cases.
- Test 1 PASSED.
- Test 2 PASSED.
- Test 3 PASSED.
- Test 4 PASSED.
- Test 5 PASSED.
- Test 6 PASSED.
- Test 7 PASSED.
CONGRATULATIONS! All test cases passed!
```python
```
|
lemma infdist_mono: assumes "A \<subseteq> B" "A \<noteq> {}" shows "infdist x B \<le> infdist x A" |
From Hammer Require Import Hammer.
Require Import Coq.Program.Basics.
Require Export FunctionalExtensionality.
Open Scope program_scope.
Lemma compose_id_left : forall A B (f : A -> B), id ∘ f = f.
Proof. hammer_hook "Combinators" "Combinators.compose_id_left".
intros.
unfold id, compose.
symmetry. apply eta_expansion.
Qed.
Lemma compose_id_right : forall A B (f : A -> B), f ∘ id = f.
Proof. hammer_hook "Combinators" "Combinators.compose_id_right".
intros.
unfold id, compose.
symmetry ; apply eta_expansion.
Qed.
Lemma compose_assoc : forall A B C D (f : A -> B) (g : B -> C) (h : C -> D),
h ∘ g ∘ f = h ∘ (g ∘ f).
Proof. hammer_hook "Combinators" "Combinators.compose_assoc".
intros.
reflexivity.
Qed.
Hint Rewrite @compose_id_left @compose_id_right : core.
Hint Rewrite <- @compose_assoc : core.
Lemma flip_flip : forall A B C, @flip A B C ∘ flip = id.
Proof. hammer_hook "Combinators" "Combinators.flip_flip".
unfold flip, compose.
intros.
extensionality x ; extensionality y ; extensionality z.
reflexivity.
Qed.
Lemma prod_uncurry_curry : forall A B C, @prod_uncurry A B C ∘ prod_curry = id.
Proof. hammer_hook "Combinators" "Combinators.prod_uncurry_curry".
simpl ; intros.
unfold prod_uncurry, prod_curry, compose.
extensionality x ; extensionality y ; extensionality z.
reflexivity.
Qed.
Lemma prod_curry_uncurry : forall A B C, @prod_curry A B C ∘ prod_uncurry = id.
Proof. hammer_hook "Combinators" "Combinators.prod_curry_uncurry".
simpl ; intros.
unfold prod_uncurry, prod_curry, compose.
extensionality x ; extensionality p.
destruct p ; simpl ; reflexivity.
Qed.
|
import os
import logging
from argparse import Namespace
import numpy as np
import torch as th
from fairseq import utils, metrics, criterions
from fairseq.data import encoders
from fairseq.data.audio.cs291k_dataset import CS291KDataConfig, CS291KDataset, CS291KDatasetCreator
from fairseq.data.audio.speech_to_text_dataset import SpeechToTextDataset, get_features_or_waveform
from fairseq.data.dictionary import Dictionary
from fairseq.dataclass.configs import GenerationConfig
from fairseq.tasks import register_task
from fairseq.tasks.fairseq_task import LegacyFairseqTask
logger = logging.getLogger(__name__)
EVAL_BLEU_ORDER = 4
@register_task('cs291k_task')
class CS291KTask(LegacyFairseqTask):
@staticmethod
def add_args(parser):
parser.add_argument(
'data',
help='Manifest root path'
)
parser.add_argument(
'--normalize',
action='store_true',
help='If set, normalizes input to have 0 mean and unit variance'
)
parser.add_argument(
'--config-yaml',
type=str,
default='config.yaml',
help='Configuration YAML filename (under manifest root)'
)
parser.add_argument(
'--max-source-positions',
type=int,
default=6000,
help='Max number of tokens in the source sequence'
)
parser.add_argument(
'--max-target-positions',
type=int,
default=1024,
help='Max number of tokens in the target sequence'
)
parser.add_argument(
'--sample-rate',
type=int,
default=16000,
help='Sample rate of audio'
)
# options for reporting BLEU during validation
parser.add_argument('--eval-bleu', action='store_true',
help='evaluation with BLEU scores')
parser.add_argument('--eval-bleu-detok', type=str, default="space",
help='detokenize before computing BLEU (e.g., "moses"); '
'required if using --eval-bleu; use "space" to '
'disable detokenization; see fairseq.data.encoders '
'for other options')
parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',
help='args for building the tokenizer, if needed')
parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,
help='compute tokenized BLEU instead of sacrebleu')
parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE before computing BLEU')
parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',
help='generation args for BLUE scoring, '
'e.g., \'{"beam": 4, "lenpen": 0.6}\'')
parser.add_argument('--eval-bleu-print-samples', action='store_true',
help='print sample generations during validation')
parser.add_argument('--eval-bleu-bpe', type=str, metavar='BPE',
default=None,
help='args for building the bpe, if needed')
parser.add_argument('--eval-bleu-bpe-path', type=str, metavar='BPE',
help='args for building the bpe, if needed')
def __init__(self, args, src_dict, tgt_dict, data_cfg):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.data_cfg = data_cfg
@classmethod
def setup_task(cls, args, **kwargs):
data_cfg = CS291KDataConfig(os.path.join(args.data, args.config_yaml))
def load_dict(vocab_filename):
_dict_path = os.path.join(args.data, vocab_filename)
if not os.path.exists(_dict_path):
raise FileNotFoundError('Dict not found: {}'.format(_dict_path))
_dict = Dictionary.load(_dict_path)
return _dict
src_dict = load_dict(data_cfg.src_vocab_filename)
tgt_dict = load_dict(data_cfg.vocab_filename)
logger.info(
'source dictionary size ({}): {}'.format(data_cfg.src_vocab_filename, len(src_dict))
)
logger.info(
'target dictionary size ({}): {}'.format(data_cfg.vocab_filename, len(tgt_dict))
)
return cls(args, src_dict, tgt_dict, data_cfg)
def build_criterion(self, args):
if self.data_cfg.prepend_tgt_lang_tag and args.ignore_prefix_size != 1:
raise ValueError(
'Please set "--ignore-prefix-size 1" since '
'target language ID token is prepended as BOS.'
)
return criterions.build_criterion(args, self)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
is_train_split = split.startswith('train')
pre_tokenizer = self.build_tokenizer(self.args)
src_bpe_tokenizer = self.build_bpe('source')
bpe_tokenizer = self.build_bpe('target')
self.datasets[split] = CS291KDatasetCreator.from_tsv(
self.args.data,
self.data_cfg,
split,
self.src_dict,
self.tgt_dict,
pre_tokenizer,
src_bpe_tokenizer,
bpe_tokenizer,
is_train_split,
epoch,
self.args.seed,
self.args.normalize,
)
def build_tokenizer(self, tokenizer_config):
logger.info('pre-tokenizer: {}'.format(self.data_cfg.pre_tokenizer))
self.tokenizer = encoders.build_tokenizer(Namespace(**self.data_cfg.pre_tokenizer))
return self.tokenizer
def build_bpe(self, side):
logger.info('{} tokenizer: {}'.format(side, self.data_cfg.bpe_tokenizer))
if side == 'target':
self.bpe = encoders.build_bpe(Namespace(**self.data_cfg.bpe_tokenizer))
return self.bpe
else:
self.src_bpe = None
if getattr(self.data_cfg, 'src_bpe_tokenizer', None): # None if use phoneme src
self.src_bpe = encoders.build_bpe(Namespace(**self.data_cfg.src_bpe_tokenizer))
return self.src_bpe
@property
def source_dictionary(self):
return self.src_dict
@property
def target_dictionary(self):
return self.tgt_dict
def max_positions(self):
return self.args.max_source_positions, self.args.max_target_positions
def build_model(self, args):
model = super(CS291KTask, self).build_model(args)
if getattr(args, "eval_bleu", False):
import json
assert getattr(args, "eval_bleu_detok", None) is not None, (
"--eval-bleu-detok is required if using --eval-bleu; "
"try --eval-bleu-detok=moses (or --eval-bleu-detok=space "
"to disable detokenization, e.g., when using sentencepiece)"
)
detok_args = json.loads(getattr(args, "eval_bleu_detok_args", "{}") or "{}")
self.tokenizer = encoders.build_tokenizer(
Namespace(tokenizer=getattr(args, "eval_bleu_detok", None), **detok_args))
if args.eval_bleu_bpe is None:
self.bpe = None
else:
logger.info(f"tokenizer: {self.data_cfg.bpe_tokenizer}")
self.bpe = encoders.build_bpe(Namespace(**self.data_cfg.bpe_tokenizer))
gen_args = json.loads(getattr(args, "eval_bleu_args", "{}") or "{}")
self.sequence_generator = self.build_generator([model], GenerationConfig(**gen_args))
return model
def build_generator(self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None):
if self.data_cfg.prepend_tgt_lang_tag and args.prefix_size != 1:
raise ValueError(
'Please set "--prefix-size 1" since '
'target language ID token is prepended as BOS.'
)
lang_token_ids = {
i for s, i in self.tgt_dict.indices.items() if SpeechToTextDataset.is_lang_tag(s)
}
extra_gen_cls_kwargs = {"symbols_to_strip_from_output": lang_token_ids}
return super().build_generator(
models, args, seq_gen_cls=seq_gen_cls, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):
return CS291KDataset(
"interactive", False, self.data_cfg, src_tokens, src_lengths
)
def get_interactive_tokens_and_lengths(self, lines, encode_fn):
n_frames = [get_features_or_waveform(p, True).shape[0] for p in lines]
return lines, n_frames
def valid_step(self, sample, model, criterion):
sample["net_input"]["src_group_lengths"] = None
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.args.eval_bleu:
sample["net_input"]["src_group_lengths"] = None
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output["_bleu_sys_len"] = bleu.sys_len
logging_output["_bleu_ref_len"] = bleu.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(bleu.counts) == EVAL_BLEU_ORDER
for i in range(EVAL_BLEU_ORDER):
logging_output["_bleu_counts_" + str(i)] = bleu.counts[i]
logging_output["_bleu_totals_" + str(i)] = bleu.totals[i]
return loss, sample_size, logging_output
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.tgt_dict.string(
toks.int().cpu(),
self.args.eval_bleu_remove_bpe,
escape_unk=escape_unk,
)
if self.bpe is not None:
s = self.bpe.decode(s)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)
hyps, refs = [], []
for i in range(len(gen_out)):
hyp = decode(gen_out[i][0]["tokens"])
ref = decode(
utils.strip_pad(sample["target"][i], self.tgt_dict.pad()),
escape_unk=True, # don't count <unk> as matches to the hypo
)
# if self.args.lang_prefix_tok is not None:
# hyp = hyp.replace(self.args.lang_prefix_tok, "")
# ref = ref.replace(self.args.lang_prefix_tok, "")
hyps.append(hyp)
refs.append(ref)
if self.args.eval_bleu_print_samples:
logger.info("example hypothesis: " + hyps[0])
logger.info("example reference: " + refs[0])
if self.args.eval_tokenized_bleu:
return sacrebleu.corpus_bleu(hyps, [refs], tokenize="none")
else:
return sacrebleu.corpus_bleu(hyps, [refs])
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.args.eval_bleu:
def sum_logs(key):
return sum(log.get(key, 0) for log in logging_outputs)
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs("_bleu_counts_" + str(i)))
totals.append(sum_logs("_bleu_totals_" + str(i)))
for i in range(EVAL_BLEU_ORDER):
if type(counts[i]) is th.Tensor:
counts[i] = counts[i].cpu()
if type(totals[i]) is th.Tensor:
totals[i] = totals[i].cpu()
if max(totals) > 0:
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar("_bleu_counts", np.array(counts))
metrics.log_scalar("_bleu_totals", np.array(totals))
metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len"))
metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len"))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if "smooth_method" in fn_sig:
smooth = {"smooth_method": "exp"}
else:
smooth = {"smooth": "exp"}
bleu = sacrebleu.compute_bleu(
correct=meters["_bleu_counts"].sum,
total=meters["_bleu_totals"].sum,
sys_len=meters["_bleu_sys_len"].sum,
ref_len=meters["_bleu_ref_len"].sum,
**smooth
)
return round(bleu.score, 2)
metrics.log_derived("bleu", compute_bleu) |
[STATEMENT]
lemma cont_Abs_listfun_down' [simp]:
"cont (\<lambda>v. Abs_listfun (down'\<cdot>v, down'\<cdot>v\<cdot>(:)\<cdot>[]))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cont (\<lambda>v. Abs_listfun (down'\<cdot>v, down'\<cdot>v\<cdot>Cons\<cdot>[]))
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. cont (\<lambda>v. Abs_listfun (down'\<cdot>v, down'\<cdot>v\<cdot>Cons\<cdot>[]))
[PROOF STEP]
have "\<And>v. (down'\<cdot>v, down'\<cdot>v\<cdot>(:)\<cdot>[]) \<in> {(f, xs). f = abstract_list xs}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>v. (down'\<cdot>v, down'\<cdot>v\<cdot>Cons\<cdot>[]) \<in> {(f, xs). f = (\<Lambda> c n. foldr\<cdot>c\<cdot>n\<cdot>xs)}
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(down'\<cdot>?v, down'\<cdot>?v\<cdot>Cons\<cdot>[]) \<in> {(f, xs). f = (\<Lambda> c n. foldr\<cdot>c\<cdot>n\<cdot>xs)}
goal (1 subgoal):
1. cont (\<lambda>v. Abs_listfun (down'\<cdot>v, down'\<cdot>v\<cdot>Cons\<cdot>[]))
[PROOF STEP]
from cont_Abs_listfun [OF this, of id]
[PROOF STATE]
proof (chain)
picking this:
cont (\<lambda>x. (down'\<cdot>(id x), down'\<cdot>(id x)\<cdot>Cons\<cdot>[])) \<Longrightarrow> cont (\<lambda>x. Abs_listfun (down'\<cdot>(id x), down'\<cdot>(id x)\<cdot>Cons\<cdot>[]))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
cont (\<lambda>x. (down'\<cdot>(id x), down'\<cdot>(id x)\<cdot>Cons\<cdot>[])) \<Longrightarrow> cont (\<lambda>x. Abs_listfun (down'\<cdot>(id x), down'\<cdot>(id x)\<cdot>Cons\<cdot>[]))
goal (1 subgoal):
1. cont (\<lambda>v. Abs_listfun (down'\<cdot>v, down'\<cdot>v\<cdot>Cons\<cdot>[]))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
cont (\<lambda>v. Abs_listfun (down'\<cdot>v, down'\<cdot>v\<cdot>Cons\<cdot>[]))
goal:
No subgoals!
[PROOF STEP]
qed |
[STATEMENT]
lemma gx0:
assumes "x \<ge> gx0"
shows "g x \<ge> gc2 * g' (real x)" "f x > 0" "g' (real x) \<ge> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. gc2 * g' (real x) \<le> g x &&& 0 < f x &&& 0 \<le> g' (real x)
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. gc2 * g' (real x) \<le> g x
2. 0 < f x
3. 0 \<le> g' (real x)
[PROOF STEP]
from eventually_conj[OF gc2(2) eventually_conj[OF f_pos eventually_nat_real[OF g'_nonneg]]]
[PROOF STATE]
proof (chain)
picking this:
\<forall>\<^sub>F x in sequentially. gc2 * g' (real x) \<le> g x \<and> 0 < f x \<and> 0 \<le> g' (real x)
[PROOF STEP]
have "\<exists>gx0. \<forall>x\<ge>gx0. g x \<ge> gc2 * g' (real x) \<and> f x > 0 \<and> g' (real x) \<ge> 0"
[PROOF STATE]
proof (prove)
using this:
\<forall>\<^sub>F x in sequentially. gc2 * g' (real x) \<le> g x \<and> 0 < f x \<and> 0 \<le> g' (real x)
goal (1 subgoal):
1. \<exists>gx0. \<forall>x\<ge>gx0. gc2 * g' (real x) \<le> g x \<and> 0 < f x \<and> 0 \<le> g' (real x)
[PROOF STEP]
by (simp add: eventually_at_top_linorder)
[PROOF STATE]
proof (state)
this:
\<exists>gx0. \<forall>x\<ge>gx0. gc2 * g' (real x) \<le> g x \<and> 0 < f x \<and> 0 \<le> g' (real x)
goal (3 subgoals):
1. gc2 * g' (real x) \<le> g x
2. 0 < f x
3. 0 \<le> g' (real x)
[PROOF STEP]
note someI_ex[OF this]
[PROOF STATE]
proof (state)
this:
\<forall>x\<ge>SOME x. \<forall>xa\<ge>x. gc2 * g' (real xa) \<le> g xa \<and> 0 < f xa \<and> 0 \<le> g' (real xa). gc2 * g' (real x) \<le> g x \<and> 0 < f x \<and> 0 \<le> g' (real x)
goal (3 subgoals):
1. gc2 * g' (real x) \<le> g x
2. 0 < f x
3. 0 \<le> g' (real x)
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<forall>x\<ge>SOME x. \<forall>xa\<ge>x. gc2 * g' (real xa) \<le> g xa \<and> 0 < f xa \<and> 0 \<le> g' (real xa). gc2 * g' (real x) \<le> g x \<and> 0 < f x \<and> 0 \<le> g' (real x)
goal (3 subgoals):
1. gc2 * g' (real x) \<le> g x
2. 0 < f x
3. 0 \<le> g' (real x)
[PROOF STEP]
have "x \<ge> (SOME gx0. \<forall>x\<ge>gx0. g x \<ge> gc2 * g' (real x) \<and>f x > 0 \<and> g' (real x) \<ge> 0)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (SOME gx0. \<forall>x\<ge>gx0. gc2 * g' (real x) \<le> g x \<and> 0 < f x \<and> 0 \<le> g' (real x)) \<le> x
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
gx0 \<le> x
goal (1 subgoal):
1. (SOME gx0. \<forall>x\<ge>gx0. gc2 * g' (real x) \<le> g x \<and> 0 < f x \<and> 0 \<le> g' (real x)) \<le> x
[PROOF STEP]
unfolding gx0_def
[PROOF STATE]
proof (prove)
using this:
max x\<^sub>1 (SOME gx0. \<forall>x\<ge>gx0. gc2 * g' (real x) \<le> g x \<and> 0 < f x \<and> 0 \<le> g' (real x)) \<le> x
goal (1 subgoal):
1. (SOME gx0. \<forall>x\<ge>gx0. gc2 * g' (real x) \<le> g x \<and> 0 < f x \<and> 0 \<le> g' (real x)) \<le> x
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(SOME gx0. \<forall>x\<ge>gx0. gc2 * g' (real x) \<le> g x \<and> 0 < f x \<and> 0 \<le> g' (real x)) \<le> x
goal (3 subgoals):
1. gc2 * g' (real x) \<le> g x
2. 0 < f x
3. 0 \<le> g' (real x)
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
\<forall>x\<ge>SOME x. \<forall>xa\<ge>x. gc2 * g' (real xa) \<le> g xa \<and> 0 < f xa \<and> 0 \<le> g' (real xa). gc2 * g' (real x) \<le> g x \<and> 0 < f x \<and> 0 \<le> g' (real x)
(SOME gx0. \<forall>x\<ge>gx0. gc2 * g' (real x) \<le> g x \<and> 0 < f x \<and> 0 \<le> g' (real x)) \<le> x
[PROOF STEP]
show "g x \<ge> gc2 * g' (real x)" "f x > 0" "g' (real x) \<ge> 0"
[PROOF STATE]
proof (prove)
using this:
\<forall>x\<ge>SOME x. \<forall>xa\<ge>x. gc2 * g' (real xa) \<le> g xa \<and> 0 < f xa \<and> 0 \<le> g' (real xa). gc2 * g' (real x) \<le> g x \<and> 0 < f x \<and> 0 \<le> g' (real x)
(SOME gx0. \<forall>x\<ge>gx0. gc2 * g' (real x) \<le> g x \<and> 0 < f x \<and> 0 \<le> g' (real x)) \<le> x
goal (1 subgoal):
1. gc2 * g' (real x) \<le> g x &&& 0 < f x &&& 0 \<le> g' (real x)
[PROOF STEP]
unfolding gx0_def
[PROOF STATE]
proof (prove)
using this:
\<forall>x\<ge>SOME x. \<forall>xa\<ge>x. gc2 * g' (real xa) \<le> g xa \<and> 0 < f xa \<and> 0 \<le> g' (real xa). gc2 * g' (real x) \<le> g x \<and> 0 < f x \<and> 0 \<le> g' (real x)
(SOME gx0. \<forall>x\<ge>gx0. gc2 * g' (real x) \<le> g x \<and> 0 < f x \<and> 0 \<le> g' (real x)) \<le> x
goal (1 subgoal):
1. gc2 * g' (real x) \<le> g x &&& 0 < f x &&& 0 \<le> g' (real x)
[PROOF STEP]
by blast+
[PROOF STATE]
proof (state)
this:
gc2 * g' (real x) \<le> g x
0 < f x
0 \<le> g' (real x)
goal:
No subgoals!
[PROOF STEP]
qed |
import Sets.Basic
import Functions.Basic
open Set
open Func
variable (α β γ : Type)
variable (f : α → β) (g : β → γ)
variable (X Y : Set α) (U V : Set β)
theorem problem1 (h : U ⊆ V) : f ⁻¹ U ⊆ f ⁻¹ V := by
intro a h'
have : f a ∈ V := h (f a) h'
exact this
theorem problem2 (h₁ : α ≅ β) (h₂ : β ≅ γ) : α ≅ γ := by
have ⟨f,u⟩ := h₁
have ⟨g,v⟩ := h₂
have l : Bijective (g ∘ f) := by
have (inj : Injective (g ∘ f)) := inj_comp u.left v.left
have (surj : Surjective (g ∘ f)) := surj_comp u.right v.right
exact ⟨inj,surj⟩
exact ⟨g ∘ f,l⟩
theorem problem3 (h : Surjective f) : HasRightInv f := by
let g : β → α := by
intro b
have : ∃ a, f a = b := h b
have (a : α) := Classical.choose this
exact a
have (l : f ∘ g = id) := by
apply funext
intro b
have u : g b = Classical.choose (h b) := by rfl
have v : f (Classical.choose (h b)) = b := Classical.choose_spec (h b)
calc
f (g b) = f (Classical.choose (h b)) := by rw [u]
_ = b := by rw [v]
exact ⟨g,l⟩
theorem problem4 : sorry := sorry
theorem problem5 : sorry := sorry
|
# install.packages("blockrand")
# install.packages("psych")
# library(psych)
# require(blockrand)
condition <- block.random( n= 150,c(block = 3,drug = 2))
headTail(condition)
condition
write.csv(condition,"randomized_design.csv")
pairs.panels(condition)
|
module Data.Compress.ZLib
import Data.Compress.Utils.Parser
import Data.Compress.Utils.Bytes
import Data.Compress.Utils.Misc
import Data.Compress.Interface
import Data.Vect
import Data.Bits
import Data.List
import Data.SnocList
import Data.Stream
import Data.Compress.CRC
import Control.Monad.Error.Either
import public Data.Compress.Inflate
public export
data ZLibParserState'
= ZLibHead
| ZLibFoot
| ZLibInflate
public export
data ZLibParserState : ZLibParserState' -> Type where
AtZHeader : ZLibParserState ZLibHead
AtInflate : (adler32 : Bits32) -> InflateState -> ZLibParserState ZLibInflate
AtZFooter : (adler32 : Bits32) -> ZLibParserState ZLibFoot
public export
data ZLibState
= MkState (List Bits8) (DPair ZLibParserState' ZLibParserState)
nul_term_string : Semigroup e => Parser (List Bits8) e String
nul_term_string = map ascii_to_string $ take_until (0 ==) token
record ZLibFooter where
constructor MkZLibFooter
adler32 : Bits32
export
parse_zlib_header : Parser (List Bits8) (SimpleError String) ()
parse_zlib_header = do
cmf <- token
flg <- token
let True = 0 == (((shiftL {a=Bits16} (cast cmf) 8) .|. cast flg) `mod` 31)
| False => fail $ msg "zlib: fcheck checksum failed"
let False = testBit flg 5
| True => fail $ msg "zlib: fdict is set"
pure ()
update_adler32 : Bits32 -> List Bits8 -> Bits32
update_adler32 alder = go (alder .&. 0xffff) (shiftR alder 16) where
go : Bits32 -> Bits32 -> List Bits8 -> Bits32
go a b (x :: xs) =
let base = 65521
a' = (a + (cast x)) `mod` base
b' = (a' + b) `mod` base
in go a' b' xs
go a b [] = (shiftL b 16) .|. a
export
parse_zlib_footer : Parser (List Bits8) (SimpleError String) ZLibFooter
parse_zlib_footer = do
adler32 <- cast <$> p_be_nat 4
pure (MkZLibFooter adler32)
feed_zlib' : SnocList Bits8 -> DPair ZLibParserState' ZLibParserState -> List Bits8 -> Either String (SnocList Bits8, ZLibState)
feed_zlib' acc (_ ** AtZHeader) [] = Right (acc, MkState [] (_ ** AtZHeader))
feed_zlib' acc (_ ** AtZHeader) content =
case feed content parse_zlib_header of
Pure leftover header => feed_zlib' acc (_ ** AtInflate 1 init) leftover
Fail err => Left $ show err
_ => Right (acc, MkState content (_ ** AtZHeader))
feed_zlib' acc (_ ** AtInflate adler32 inflate_state) content =
case feed inflate_state content of
Left err => Left err
Right (uncompressed, (MkState _ _ (_ ** AtEnd leftover))) =>
let adler32 = update_adler32 adler32 uncompressed
in feed_zlib' (acc <>< uncompressed) (_ ** AtZFooter adler32) (toList leftover)
Right (uncompressed, inflate_state) => -- underfed
let adler32 = update_adler32 adler32 uncompressed
in Right (acc <>< uncompressed, MkState [] (_ ** AtInflate adler32 inflate_state))
feed_zlib' acc (_ ** AtZFooter adler32) content =
case feed content parse_zlib_footer of
Pure leftover footer =>
if footer.adler32 /= adler32
then Left "adler32 mismatch \{show footer.adler32} \{show adler32}"
else feed_zlib' acc (_ ** AtZHeader) leftover
Fail err => Left $ show err
_ => Right (acc, MkState content (_ ** AtZFooter adler32))
export
Decompressor ZLibState where
feed (MkState leftover state) input = mapFst toList <$> feed_zlib' Lin state (leftover <+> input)
done (MkState [] (_ ** AtZHeader)) = Right []
done (MkState _ (_ ** AtZHeader)) = Left "zlib: leftover data after header"
done _ = Left "zlib: underfed"
init = MkState [] (_ ** AtZHeader)
|
import Smt.Tactic.Concretize
def generalAdd [Add α] (a b : α) := a + b
example : @generalAdd Int _ 3 3 = (6 : Int) := by
concretize [generalAdd]
rfl
|
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE Strict #-}
{-# LANGUAGE FlexibleContexts #-}
module CPVO.IO.Reader.Ecalj.MMOM where
import CPVO.Numeric
import CPVO.IO
import CPVO.IO.Reader.Ecalj.Common
import CPVO.IO.Reader.Ecalj.DOS
import qualified Data.Text as T
import qualified Data.Text.IO as T
import qualified Data.Text.Read as T
import Data.Either (rights)
-------------------------
import Numeric.LinearAlgebra
readMMOM :: Int -> String -> IO [Double]
readMMOM nAtom foldernya = do
fLLMF <- fmap (T.unpack . head) $ getLastLLMF foldernya
putStrLn $ "===================processed LLMF=" ++ fLLMF
mmom <- fmap (map T.double) $ inshell2text $ concat [ "mkdir -p temp; grep mmom ", fLLMF
,"| tail -n", show (nAtom + 1)
,"| head -n", show nAtom
,"| awk '{print $2}'"
]
sdtMMOM <- fmap (map T.double) $ inshell2text $ concat [ "grep mmom ", fLLMF, "| grep ehf | tail -1 | sed -e 's/^.*mmom=//g'| awk '{print $1}'"
]
return ( map fst $ rights $ concat [sdtMMOM,mmom])
----------------------------------------------------------------------
--getMMOM allArgs@(texFile:jd:jdHead:colAlign:xr:ymax':wTot:tumpuk:invS:tailer:foldernya:aos) = do
getMMOM :: [String] -> IO ()
getMMOM allArgs = do
--getMMOM allArgs = do
putStrLn "===start ==== CPVO.IO.Reader.Ecalj.MMOM: getMMOM ==="
--(invStat, ymax, xmin, xmax, ctrlAtoms, uniqAtoms, ctrlAtomicAOs,jdTable, cleanedJdHead, foldernya, tailer,colAlign,texFile) <- readHeaderData allArgs
Right (invStat,_,_,_,ctrlAtoms,_, ctrlAtomicAOs,jdTable, cleanedJdHead,foldernya,tailer,_,texFile) <- readHeaderData allArgs
-------------------------------generating data------------------------
-------------------------------generating DOS data------------------------
totalDOS <- readTotalDOSText tailer foldernya
-------------------------------integrating DOS data-----------------------
let intgTot = map (\i -> integrateToZero $ totalDOS ¿ [0,i]) $ flipBy invStat [1,2] -- run it on spin [1,2]
putStrLn $ show intgTot
-------------------------------integrating PDOS data------------------------
let nAtom = length ctrlAtoms
-------------------------------generating PDOS data------------------------
-- map ditambah -1 karena input mengikuti gnuplot
-- input : d kolom 6-10
-- gnuplot : d kolom 6-10
-- hmatrix : d kolom 5-9
-------------------------------integrating PDOS data------------------------
putStrLn $ "========invStat=" ++ (show invStat)
(tMMomSD:mmomSD) <- fmap (map (* invStat)) $ readMMOM nAtom foldernya
putStrLn $ show $ map (showDouble (3::Integer)) mmomSD
putStrLn $ show tMMomSD
putStrLn "==========show tMMomSD==========="
pdosAtomicAll <- readPDOS invStat tailer foldernya ctrlAtomicAOs
let integratedAtomicPDOS = integrateAtomicPDOS pdosAtomicAll
let rIntgAll' = rendertable
$ (:) cleanedJdHead
$ (:) (concat [ ["Total" ]
, [" "]
, map (showDouble (3::Integer)) $ (\[t,iu,idn] -> [t,iu-idn,t-(iu-idn)]) $ (tMMomSD:intgTot)
])
$ zipWith (\a b -> a:b) (map show ([1,2..]::[Integer]))
$ zipWith (\sdMom (intMom,(_,(j,_))) -> j:(map (showDouble (3::Integer)) [sdMom,intMom,sdMom-intMom])) mmomSD
$ map (\(iu,idn,b) -> ((iu-idn),b) ) integratedAtomicPDOS
let rIntgAll = unlines [
rIntgAll'
, jdTable
]
{-
resIntAll' <- markdownToTex rIntgAll
let resIntAll = T.replace "\\}" "}"
$ T.replace "\\{" "{" $ T.pack
$ unlines [
"\\begin{longtable}[]{" ++ colAlign ++ "}"
, unlines $ tail $ lines $ T.unpack resIntAll'
]
putStrLn rIntgAll
T.putStrLn resIntAll
-}
T.writeFile texFile $ T.pack rIntgAll
putStrLn "===done CPVO.IO.Reader.Ecalj.MMOM: getMMOM ==="
|
Giovanni Carlo <unk> ( July 21 , 1766 ) – Cardinal @-@ Priest of SS . Giovanni e Paolo ; Grand penitentiary ; prefect of the Congregation for the correction of the books of the Oriental Church
|
State Before: m : Type ?u.2570289
n✝ : Type ?u.2570292
inst✝⁴ : DecidableEq n✝
inst✝³ : Fintype n✝
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
n : ℕ
A : Matrix (Fin (Nat.succ n)) (Fin (Nat.succ n)) R
j : Fin (Nat.succ n)
⊢ det A = ∑ i : Fin (Nat.succ n), (-1) ^ (↑i + ↑j) * A i j * det (submatrix A ↑(Fin.succAbove i) ↑(Fin.succAbove j)) State After: m : Type ?u.2570289
n✝ : Type ?u.2570292
inst✝⁴ : DecidableEq n✝
inst✝³ : Fintype n✝
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
n : ℕ
A : Matrix (Fin (Nat.succ n)) (Fin (Nat.succ n)) R
j : Fin (Nat.succ n)
⊢ ∑ j_1 : Fin (Nat.succ n), (-1) ^ (↑j + ↑j_1) * Aᵀ j j_1 * det (submatrix Aᵀ ↑(Fin.succAbove j) ↑(Fin.succAbove j_1)) =
∑ i : Fin (Nat.succ n), (-1) ^ (↑i + ↑j) * A i j * det (submatrix A ↑(Fin.succAbove i) ↑(Fin.succAbove j)) Tactic: rw [← det_transpose, det_succ_row _ j] State Before: m : Type ?u.2570289
n✝ : Type ?u.2570292
inst✝⁴ : DecidableEq n✝
inst✝³ : Fintype n✝
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
n : ℕ
A : Matrix (Fin (Nat.succ n)) (Fin (Nat.succ n)) R
j : Fin (Nat.succ n)
⊢ ∑ j_1 : Fin (Nat.succ n), (-1) ^ (↑j + ↑j_1) * Aᵀ j j_1 * det (submatrix Aᵀ ↑(Fin.succAbove j) ↑(Fin.succAbove j_1)) =
∑ i : Fin (Nat.succ n), (-1) ^ (↑i + ↑j) * A i j * det (submatrix A ↑(Fin.succAbove i) ↑(Fin.succAbove j)) State After: m : Type ?u.2570289
n✝ : Type ?u.2570292
inst✝⁴ : DecidableEq n✝
inst✝³ : Fintype n✝
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
n : ℕ
A : Matrix (Fin (Nat.succ n)) (Fin (Nat.succ n)) R
j i : Fin (Nat.succ n)
x✝ : i ∈ univ
⊢ (-1) ^ (↑j + ↑i) * Aᵀ j i * det (submatrix Aᵀ ↑(Fin.succAbove j) ↑(Fin.succAbove i)) =
(-1) ^ (↑i + ↑j) * A i j * det (submatrix A ↑(Fin.succAbove i) ↑(Fin.succAbove j)) Tactic: refine' Finset.sum_congr rfl fun i _ => _ State Before: m : Type ?u.2570289
n✝ : Type ?u.2570292
inst✝⁴ : DecidableEq n✝
inst✝³ : Fintype n✝
inst✝² : DecidableEq m
inst✝¹ : Fintype m
R : Type v
inst✝ : CommRing R
n : ℕ
A : Matrix (Fin (Nat.succ n)) (Fin (Nat.succ n)) R
j i : Fin (Nat.succ n)
x✝ : i ∈ univ
⊢ (-1) ^ (↑j + ↑i) * Aᵀ j i * det (submatrix Aᵀ ↑(Fin.succAbove j) ↑(Fin.succAbove i)) =
(-1) ^ (↑i + ↑j) * A i j * det (submatrix A ↑(Fin.succAbove i) ↑(Fin.succAbove j)) State After: no goals Tactic: rw [add_comm, ← det_transpose, transpose_apply, transpose_submatrix, transpose_transpose] |
"""
__FileCreationDate__ : 4/5/2020
__Author__ : CodePerfectPlus
__Package__ : Python 3
__GitHub__ : https://www.github.com/codeperfectplus
"""
import numpy as np
# Call Method : https://www.geeksforgeeks.org/__call__-in-python/
class Sigmoid:
""" Softmax gives the probability of output function
Range : 0 to 1
"""
def __call__(self, x):
return 1.0 / (1.0 + np.exp(-x))
def gradient(self, x):
return self.__call__(x) * (1 - self.__call__(x))
class TanH:
"""TanH Gives the probability of output function
Range : - 1 to 1
"""
def __call__(self, x):
pass
class ReLU:
"""
Argument:
x: relu input
dout: gradient error
"""
def __call__(self, x):
return np.maximum(0,x)
def gradient(self, x, dout):
return np.where(x>=1, 1, 0) * dout
class LeakyReLU:
def __call__(self, x):
pass
class Softmax:
def __call__(self, x):
pass
class BinaryStepFunction:
def __call__(self, x):
pass
class IdentityFunction:
def __call__(self, x):
pass
|
function n = ndims( x )
n = length( x.size_ );
% Copyright 2005-2014 CVX Research, Inc.
% See the file LICENSE.txt for full copyright information.
% The command 'cvx_where' will show where this file is located.
|
# Romeo LTI kernel
#
# Implementation of kernel function for various systems.
import SpecialFunctions
"""
kernel([T=typeof(1.0), ] sys, N, Δt)
Given a system `sys` compute its kernel vector of length `N`, uniformly
discretized with sample time `Δt`.
When simulating the response `y` of any system to a given signal `u`, the kernel is
defined here as a sequence of samples `k` such that the response is equal to the
discrete convolution `k ⋆ u`.
"""
kernel(sys::SisoLtiSystem, N::Integer, Δt::Real) = kernel(typeof(1.0), sys, N, Δt)
kernel(T::Type{<:Number}, sys::ZeroSys, N::Integer, Δt::Real) = zeros(T, N)
kernel(T::Type{<:Number}, sys::UnitSys, N::Integer, Δt::Real) = convid(T, N)
function kernel(T::Type{<:Number}, sys::Diff{<:Number}, N::Integer, Δt::Real)
@assert N ≥ 1 "Kernel should be at least one sample long"
if sys.α == 0
res = zeros(T, N)
res[1] = one(T)
return res
elseif sys.α == 1
res = zeros(T, N)
res[1] = one(T)
res[2] = -one(T)
return res / Δt
elseif real(sys.α) > 0
# TODO: fix this branch
ddt = kernel(T, Diff(one(typeof(sys.α))), N, Δt)
temp = kernel(T, Diff(sys.α - 1), N, Δt)
return convolve(ddt, temp)
elseif real(sys.α) < 0
ikt = (0:N) .^ (-sys.α)
return (ikt[2:end] - ikt[1:end-1]) .* (Δt^(-sys.α) / SpecialFunctions.gamma(-sys.α + 1))
else
@error "Not implemented!"
end
end
kernel(T::Type{<:Number}, sys::ScaledSystem, N::Integer, Δt::Real) = sys.k * kernel(T, sys.inner, N, Δt)
kernel(T::Type{<:Number}, sys::ParallelSystem, N::Integer, Δt::Real) = kernel(T, sys.first, N, Δt) .+ kernel(T, sys.second, N, Δt)
kernel(T::Type{<:Number}, sys::SeriesSystem, N::Integer, Δt::Real) = convolve(kernel(T, sys.first, N, Δt), kernel(T, sys.second, N, Δt))
kernel(T::Type{<:Number}, sys::RationalSystem, N::Integer, Δt::Real) = deconvolve(kernel(T, sys.num, N, Δt), kernel(T, sys.den, N, Δt))
function kernel(T::Type{<:Number}, sys::PowerSystem{<:Number}, N::Integer, Δt::Real)
if imag(sys.α ≠ 0)
@error "Cannot compute kernel of a complex power of an arbitrary system."
end
α = real(sys.α)
if α > 0
if floor(α) == α
k1 = kernel(T, sys.inner, N, Δt)
k = copy(k)
for i = 2:floor(Int, sys.α)
k = convolve(k, k1)
end
return k
else
h = floor(Int, α)
r = α - h
if r == 0.5
k1 = convroot(kernel(T, sys.inner, N, Δt))
k = copy(k)
for i = 2 * floor(Int, h) + 1
k = convolve(k, k1)
end
return k
else
@error "Unable to compute kernel of this power system."
end
end
else
convinv(kernel(T, PowerSystem(-sys.α, sys.inner), N, Δt))
end
end
export kernel |
install.packages(c("gganimate", "viridis"))
library(gganimate)
library(viridis)
source("../utility_scripts/data_prep.r")
source("../utility_scripts/annotation_defaults.r")
get_breed_percentage_at_level <- function(levels_filter, levels_string, levels_order) {
breed_percentages <- scent_data %>%
filter(level %in% levels_filter) %>%
select(breed, team) %>%
distinct(team, .keep_all=TRUE) %>%
mutate(total_dogs = n()) %>%
group_by(breed) %>%
summarize(number_of_dogs_this_breed = n(), total_dogs=total_dogs) %>%
ungroup() %>%
distinct(breed, .keep_all=TRUE) %>%
mutate(
breed_percentage = 100 * number_of_dogs_this_breed / total_dogs,
level = levels_string,
levels_order = levels_order) %>%
arrange(desc(breed_percentage))
}
summit_top_data <- get_breed_percentage_at_level("Summit", "Summit", 5) %>%
slice_max(breed_percentage, n=5) %>%
select(breed, level, levels_order, breed_percentage)
summit_top_breeds <- summit_top_data$breed
elite_data <- get_breed_percentage_at_level("Elite", "Elite", 4) %>%
filter(breed %in% summit_top_breeds) %>%
select(breed, level, levels_order, breed_percentage)
nw3l3_data <- get_breed_percentage_at_level(c("NW3", "L3"), "L3-NW3", 3) %>%
filter(breed %in% summit_top_breeds) %>%
select(breed, level, levels_order, breed_percentage)
nw2l2_data <- get_breed_percentage_at_level(c("NW2", "L2"), "L2-NW2", 2) %>%
filter(breed %in% summit_top_breeds) %>%
select(breed, level, levels_order, breed_percentage)
nw1l1_data <- get_breed_percentage_at_level(c("NW1", "L1"), "L1-NW1", 1) %>%
filter(breed %in% summit_top_breeds) %>%
select(breed, level, levels_order, breed_percentage)
breed_percentage_data <- bind_rows(
nw1l1_data,
nw2l2_data,
nw3l3_data,
elite_data,
summit_top_data) %>%
mutate(
breed_order = case_when(
breed == "Labrador Retriever" ~ 1,
breed == "Australian Shepherd" ~ 2,
breed == "Golden Retriever" ~ 3,
breed == "German Shepherd Dog" ~ 4,
breed == "Border Collie" ~ 5,
TRUE ~ 6
)) %>%
mutate(breed = str_replace_all(breed, " ", "\n"))
write_csv(breed_percentage_data, "test.csv")
plot <- ggplot(data = breed_percentage_data) +
geom_col(
mapping = aes(x = breed_percentage, y = fct_reorder(breed, breed_order, .desc=TRUE), fill = breed),
color = "grey85",
width = 0.7) +
scale_fill_viridis(option="turbo", name="Breed", discrete=TRUE) +
coord_cartesian(xlim=c(0,15)) +
scale_x_continuous(
breaks = c(0, 5, 7, 10, 15),
labels = c("", "5% of all\ndogs that\nlevel", "7% of all\ndogs that\nlevel", "10% of all\ndogs that\nlevel",
"15% of all\ndogs that\nlevel")) +
annotate(geom="curve", x=7, y=5, xend=9.5, yend =4.5, curvature =0.6, arrow=arrow(angle=10), size = 0.75,
color = "orange") +
annotate(geom="text", x = 10, y = 4.4,
label = "Labrador Retrievers grew in proportion from\n7% of all NW1/L1 competing dogs to\n14% of all Summit level dogs",
hjust = "left", family = "mono", size = 7) +
annotate(geom="segment", x=5, y=1, xend=9.5, yend = 1, arrow=arrow(angle=10), size = 0.75, color = "orange") +
annotate(geom="text", x = 10, y = 1,
label = "The proportion of Border Collies\nremained fairly constant between\n5% and 6% of competing dogs\nacross the levels of competition",
hjust = "left", family = "mono", size = 7) +
theme_minimal() +
theme(
text = element_text("mono"),
legend.position = 'none',
legend.key.height = unit(1.2, "cm"),
axis.title = element_blank(),
axis.text = element_text(size = 16),
plot.title = element_text(size = 24),
plot.subtitle = element_text(size = 22)
) +
labs(
title = "Breed Proportions at Various Competition Levels",
subtitle = 'Competition Level: {closest_state}',
caption = getCaption()
) +
transition_states(
fct_reorder(level, levels_order, min),
transition_length = 2,
state_length = 1
) +
enter_fade() +
exit_shrink() +
ease_aes("sine-in-out")
animation <- animate(annotated_plot, width = 1920, height = 1080, bg="white")
anim_save(filename = "breed_proportions_at_levels.gif", animation = animation, path = ".")
|
theory Phi_Logic_Programming_Reasoner
imports Main "HOL-Eisbach.Eisbach" "HOL-Eisbach.Eisbach_Tools" "Phi_Document.Base"
keywords "except" :: quasi_command
and "\<phi>reasoner" "\<phi>reasoner_ML" :: thy_decl % "ML"
and "print_\<phi>reasoners" :: diag
abbrevs
"<premise>" = "\<p>\<r>\<e>\<m>\<i>\<s>\<e>"
and "<simprem>" = "\<s>\<i>\<m>\<p>\<r>\<e>\<m>"
and "<@GOAL>" = "\<^bold>@\<^bold>G\<^bold>O\<^bold>A\<^bold>L"
and "<threshold>" = "\<t>\<h>\<r>\<e>\<s>\<h>\<o>\<l>\<d>"
begin
subsubsection \<open>Prelude Settings\<close>
ML \<open>Timing.cond_timeit false "asd" (fn () => OS.Process.sleep (seconds 1.0))\<close>
ML_file \<open>library/pattern.ML\<close>
ML_file \<open>library/helpers.ML\<close>
ML_file \<open>library/handlers.ML\<close>
ML_file \<open>library/pattern_translation.ML\<close>
definition \<r>Require :: \<open>prop \<Rightarrow> prop\<close> ("\<r>REQUIRE _" [2] 2) where [iff]: \<open>\<r>Require X \<equiv> X\<close>
ML_file_debug \<open>library/reasoner.ML\<close>
lemma \<r>Require_I[\<phi>reason 1000]: \<open>PROP P \<Longrightarrow> PROP \<r>Require P\<close> unfolding \<r>Require_def .
section \<open>Introduction\<close>
text \<open>
\<phi>-Logic Programming Reasoner is a extensible reasoning engine
based on logic programming like Prolog.
It allows arbitrary user reasoners to be integrated freely, and applies them selectively
by matching the pattern of the goals.
The reasoning is a depth-first heuristic search guided by \<^emph>\<open>priority\<close> of each branch.
A reasoning state is represented by a \<^emph>\<open>pair\<close> of \<^verbatim>\<open>Proof.context\<close> and a sequent, of type
\<^verbatim>\<open>Proof.context * thm\<close>.
Search branches on a reasoning state are admissible reasoners on the sequent.
A reasoner is admissible on a sequent if the sequent matches the pattern of the reasoner
(cf. patterns in \cref{sec:patterns}).
The reasoning accepts several reasoning states, and outputs \<^emph>\<open>one\<close> reasoning state which is the
first one satisfies the termination condition, \<^emph>\<open>or\<close> none if every search branches fail.
The priorities of rules demonstrate which rules are better among admissible reasoners.
The priority makes sense only locally, among all admissible reasoners on a reasoning state.
The accumulation of priority values (i.e. the sum of the priority of all applied reasoners) of a
reasoning state is meaningless and merely for debug-usage.
Because it is a DFS, the first reached result is the optimal one w.r.t each search branches in a
greedy sense. (the global maximum is senseless here because the priority accumulation is
meaningless).
The sequent of the reasoning state is a Harrop Formula (HF), e.g.,
\[ \<open>Antecedent1 \<Longrightarrow> Antecedent2 \<Longrightarrow> Conclusion\<close>, \]
where antecedents represent sub-goals that have to be reasoned \textit{in order}.
The \xphi-LPR engine reasons antecedents in order, invoking the reasoners that match the pattern
of the leading antecedent best (cf. Priority).
An antecedent can be augmented by conditions that can be utilized during the reasoning.
It can also be universally quantified.
\[ \<open>(\<And>x. P1 x \<Longrightarrow> P2 x \<Longrightarrow> Conclusion_of_Antecedent1 x) \<Longrightarrow> A2 \<Longrightarrow> C\<close> \]
A typically reasoner is to deduce the conclusion of the antecedent by applying an introduction
rule like \<open>A11 x \<Longrightarrow> A12 x \<Longrightarrow> Conclusion_of_Antecedent1 x\<close>, resulting in
\[ \<open>(\<And>x. P1 x \<Longrightarrow> P2 x \<Longrightarrow> A11 x) \<Longrightarrow> (\<And>x. P11 x \<Longrightarrow> P12 x \<Longrightarrow> A12 x) \<Longrightarrow> A2 \<Longrightarrow> C\<close>. \]
Then, the engine reasons the currently heading antecedent \<open>(\<And>x. P1 x \<Longrightarrow> P2 x \<Longrightarrow> A11 x)\<close>
recursively. The antecedent list of a reasoning state resembles a calling stack of usual programs.
From this perspective, the introduction rule of \<^prop>\<open>Antecedent1\<close> invokes two 'sub-routines'
(or the reasoners of) \<^prop>\<open>A11\<close> and \<^prop>\<open>A22\<close>.
\<close>
section \<open>The Engine \& The Concepts\<close>
text \<open>
The engine is implemented in \<^verbatim>\<open>library/reasoner.ML\<close>.
\<^verbatim>\<open>structure Phi_Reasoner = struct
(*Reasoning state*)
type context_state = Proof.context * thm
type name = term (* the name as a term is just for pretty printing *)
val pattern_on_conclusion : term -> pattern
val pattern_on_condition : term -> pattern
(*A reasoner is a quintuple*)
type reasoner = {
name: name,
pos: Position.T,
pattern: pattern list,
blacklist: pattern list,
tactic: context_state -> context_state Seq.seq
}
type priority = int
val add : priority * reasoner -> Context.generic -> Context.generic
val del : name -> Context.generic -> Context.generic
val reason : context_state -> context_state option
val auto_level : int Config.T
exception Success of context_state
exception Global_Cut of context_state
...
end
\<close>\<close>
paragraph \<open>Patterns \label{sec:patterns}\<close>
text \<open>
The \<^bold>\<open>pattern\<close> and the \<^bold>\<open>blacklist\<close> stipulate the range in which a reasoner will be invoked.
A reasoner is invoked iff the antecedent matches at least one pattern in the pattern list and none
in the blacklist.\<close>
text \<open>
There are two kinds of patterns, that match on conclusion and that on condition, constructed by
\<^verbatim>\<open>pattern_on_conclusion\<close> and \<^verbatim>\<open>pattern_on_conclusion\<close> respectively.
\<close>
text \<open>\<^bold>\<open>Prefix \<^verbatim>\<open>var\<close>\<close>. A schematic variable in a pattern can have name prefix \<^verbatim>\<open>var_\<close>.
In this case, the variable only matches schematic variables.
\<^emph>\<open>Remark\<close>: It is important to write schematic variables in patterns explicitly. The engine
does not convert any free variables to schematic variables implicitly.\<close>
paragraph \<open>Automatic Level\<close> text \<open>by \<^verbatim>\<open>auto_level\<close>
is a general configuration deciding whether the engine applies
some aggressive tactics that may consume considerable time or never terminate.
There are 3 levels:
\<^enum>[0]: the most safe, which may mean manual mode for some reasoner.
It does not exclude non-termination or blocking when some tactics are necessary for the
features. Method @{method simp} and @{method clarify} are acceptable on this level.
\<^enum>[1]: relatively safe automation, where aggressive tactics are forbidden but non-termination is
still possible. Method @{method auto} is forbidden in this level because it blocks too easily.
\<^enum>[2]: the most powerful automation, where no limitation is imposed on automation strategies.\<close>
paragraph \<open>Priority \label{sec:cut}\<close>
text \<open>
The reasoning is a depth-first search and every reasoner is registered with a priority deciding
the order of attempting the reasoners. Reasoners with higher priority are attempted first.
According to the priority of reasoners, reasoners fall into 3 sorts corresponding to
different pruning optimization strategy.
\<^enum> When the priorities of the candidate reasoners on a certain reasoning state are all less than 1000,
the reasoning works in the normal behavior where it attempts the highest candidate and once fails
backtracks to the next candidate.
\<^enum> When the highest priority of the candidates $\geq$ 1000 and $<$ than 1000,000,
this candidate becomes a \<^emph>\<open>local cut\<close>. The reasoning attempts only the local cut and if it fails,
no other candidates will be attempted, but the backtrack is still propagated to the upper layer
(of the search tree).
Any presence of a candidate with priority $\geq$ 1000, causes the reasoning (at this point)
is confident (in the sense that no alternative search branch will be attempted).
\<^enum> When the highest priority of the candidates $\geq$ 100,000,
this candidate becomes a \<^emph>\<open>global cut\<close>, which forgets all the previous search history.
No backtrack will be propagated to the past before the global cut so it improves the performance.
Once the reasoning of the branch of the cut fails, the whole reasoning fails.
Reasoners of priority $\geq$ 1000 are named \<^emph>\<open>confident reasoners\<close> and others are
\<^emph>\<open>submissive reasoners\<close>.
\<^emph>\<open>Remark\<close>: a local cut reasoner can throw \<^verbatim>\<open>Global_Cut s\<close> to trigger a global cut with the
reasoning state \<^verbatim>\<open>s\<close>.
\<close>
paragraph \<open>Termination\<close>
text \<open>The reasoning terminates when:
\<^item> Any reasoning state has no antecedent any more or all its designated leading
antecedents are solved. This reasoning state is returned.
\<^item> Any reasoner throws \<^verbatim>\<open>Success result\<close>.
\<^item> All accessible search paths are traversed.
\<close>
text \<open>\<open>\<r>Success\<close> is an antecedent that throws \<^verbatim>\<open>Success\<close>.
Therefore it remarks the reasoning is succeeded.
A typical usage of \<open>\<r>Success\<close> is shown in the following sequent,
\[ \<open>A1 \<Longrightarrow> A2 \<Longrightarrow> \<r>Success \<Longrightarrow> P \<Longrightarrow> Q\<close> \]
which expresses the reasoning succeeds after solving \<^prop>\<open>A1\<close>, \<^prop>\<open>A2\<close>, and it outputs
result \<^prop>\<open>P \<Longrightarrow> Q\<close>.\<close>
text \<open>\<open>Pure.prop P\<close> is helpful to protect remaining antecedents if you only want to reason
the beginning several antecedents instead of all antecedents, e.g.,
\[ \<open>Solve_A1 \<Longrightarrow> Pure.prop (Protect_A2 \<Longrightarrow> C)\<close> \]\<close>
paragraph \<open>Output\<close>
text \<open>The output reasoning state can be:
\<^item> The first traversed reasoning state that has no antecedent or all the designated leading
antecedents are solved.
\<^item> The \<^verbatim>\<open>result\<close> threw out by \<^verbatim>\<open>Success result\<close>.
\<close>
text \<open>If none of the above are reached during a reasoning process, the process returns nothing
(\<^verbatim>\<open>None\<close> or \<^verbatim>\<open>Seq.empty\<close>).
The reasoning only outputs \<^emph>\<open>milestone states\<close> representing the problem is indeed solved partially
instead of any unfinished intermediate reasoning state.
Milestone states are explicitly annotated by user (e.g.,
by antecedent \<^prop>\<open>\<r>Success\<close> or by setting the priority to 1000,000).
Any other intermediate reasoning state is not considered a successfully finished state
so that is not outputted.\<close>
section \<open>Provide User Reasoners \& Apply the Engine\<close>
text \<open>\xphi-LPR can be augmented by user reasoners.
The system predefines a resolution based reasoner using introducing rules and elimination rules.
Other arbitrary reasoners can also be built from tactics or ML code.\<close>
subsection \<open>Reasoning by Rules\<close>
text \<open>Attributes @{attribute_def \<phi>reason} is provided for introducing resolution rules.
\begin{matharray}{rcl}
@{attribute_def \<phi>reason} & : & \<open>attribute\<close>
\end{matharray}
\small
\<^rail>\<open>
@@{attribute \<phi>reason} (@{syntax add_rule} | 'add' @{syntax add_rule} | 'del')
;
@{syntax_def add_rule}: @{syntax priority}?
('for' @{syntax patterns})? ('except' @{syntax blacklist})?
;
@{syntax_def priority}: (@{syntax nat} | '!')
;
@{syntax_def patterns}: (() + @{syntax term})
;
@{syntax_def blacklist}: (() + @{syntax term})
\<close>
\normalsize
\<^descr> @{attribute \<phi>reason}~\<^verbatim>\<open>add\<close> declares reasoning rules used in \<phi>-LPR.
@{attribute \<phi>reason}~\<^verbatim>\<open>del\<close> removes the reasoning rule.
If no keyword \<^verbatim>\<open>add\<close> or \<^verbatim>\<open>del\<close> is given, \<^verbatim>\<open>add\<close> is the default option.
\<^descr> The @{syntax patterns} and @{syntax blacklist} are that described in \cref{sec:patterns}.
For introduction rules, the patterns and the blacklist match only the conclusion of the
leading antecedent; for elimination rules, they match only the conditions of the
leading antecedent.
Patterns can be omitted. For introduction rule, the default pattern is the conclusion
of the rule; for elimination rule, the default is the first premise.
\<^descr> @{syntax priority} can be a natural number or, an exclamation mark denoting the priority of
1000,000, i.e., the minimal priority for a global cut.
If the priority is not given explicitly, by default it is 100.
\<close>
text \<open>\<^emph>\<open>Remark\<close>: Rules of priority $\geq$ 1000 are named \<^emph>\<open>confident rules\<close> and others are
\<^emph>\<open>submissive rules\<close>.\<close>
text \<open>\<^emph>\<open>Remark\<close>: Attribute @{attribute \<phi>reason} can be used without any argument.
\<^verbatim>\<open>[[\<phi>reason]]\<close> denotes \<^verbatim>\<open>[[\<phi>reason add]]\<close> exactly.
However, the usage of empty arguments is not recommended
due to technical reasons that in this case of empty argument
the attribute cannot get the position of the associated reasoning rule, and
this position is displayed in debug printing.\<close>
paragraph \<open>Example\<close>
declare conjI[\<phi>reason add] TrueI[\<phi>reason 1000]
paragraph \<open>\<open>\<r>\<close>Feasible \label{sec:rFeasible}\<close>
text \<open>Cut rules including local cut and global cut are those of priority $\geq$ 1000.
A cut rule can have at most one special \<open>\<r>Require\<close> antecedent at the leading position,
which determines the condition of the rule to be applied, e.g. the following rule can be applied
only if \<open>A1\<close> and \<open>A2\<close> are solvable.
\[ \<open>\<r>Require (A1 &&& A2) \<Longrightarrow> A3 \<Longrightarrow> C\<close> \]
It provides a mechanism to constrain semantic conditions of applying the rule,
whereas the pattern matches mentioned earlier are only able to check the syntactical conditions.
\<close>
subsection \<open>Reasoners by Isar Methods and ML code\<close>
text \<open>
There are two commands defining reasoners, respectively by Eisbach expression and by ML code.
\begin{matharray}{rcl}
@{command_def \<phi>reasoner} & : & \<open>local_theory \<rightarrow> local_theory\<close>\\
@{command_def \<phi>reasoner_ML} & : & \<open>local_theory \<rightarrow> local_theory\<close>\\
\end{matharray}
\<^rail>\<open>
@@{command \<phi>reasoner} @{syntax name} @{syntax priority} @{syntax patterns'} '=' @{syntax Eisabach_method}
;
@@{command \<phi>reasoner_ML} @{syntax name} @{syntax priority} @{syntax patterns'} '=' @{syntax ML_code}
;
@{syntax_def patterns'}: '(' (@{syntax term} + '\<bar>') ')'
\<close>
\<^descr> @{command \<phi>reasoner} defines a reasoner using an Eisabach expression. The Eisabach expression
defines a proof method in Isabelle/Isar and this proof method is invoked on the leading antecedent
as a sub-goal when @{syntax patterns'} match.
\<^descr> @{command \<phi>reasoner_ML} defines a reasoner from ML code. The given code should be a ML function
of type \<^verbatim>\<open>context_state -> context_state Seq.seq\<close>, i.e., a contextual tactic.
\<close>
subsection \<open>Apply the Engine\<close>
text \<open>There are two ways to use the reasoning engine, from ML code by using \<^verbatim>\<open>Phi_Reasoner.reason\<close>,
and as a proof method.\<close>
subsubsection \<open>Proof Method\<close>
text \<open>
There are two commands defining reasoners, respectively by Eisbach expression and by ML code.
\begin{matharray}{rcl}
@{method_def \<phi>reason} & : & \<open>method\<close>\\
\end{matharray}
\<^rail>\<open>
@@{method \<phi>reason} ('add' @{syntax thms})? ('del' @{syntax thms})?
\<close>
\<^descr> @{method \<phi>reason}~\<^verbatim>\<open>add\<close>~\<open>a\<close>~\<^verbatim>\<open>del\<close>~\<open>b\<close>
applies \<phi>-LPR on the proof state (which is a HHF sequent~\cite{isar-ref}).
It means subgoals of the proof are regarded as antecedents and \<phi>-LPR reasons them one by one
in order.
Optional modifier \<^verbatim>\<open>add\<close>~\<open>a\<close> adds introduction rules \<open>a\<close> temporarily with default patterns
(the conclusion of the rule) and default priority (100).
Modifier \<^verbatim>\<open>del\<close>~\<open>b\<close> removes introductions rules \<open>b\<close> temporarily.
We do not provide modifiers to alter elimination rules now.
\<close>
section \<open>Predefined Antecedents, Reasoners, and Rules\<close>
subsection \<open>Auxiliary Structures\<close>
subsubsection \<open>Isomorphic Atomize\<close>
text \<open>The system \<open>Object_Logic.atomize\<close> and \<open>Object_Logic.rulify\<close> is not isomorphic in the sense
that for any given rule \<open>R\<close>, \<open>Object_Logic.rulify (Object_Logic.atomize R)\<close> does not exactly
equal \<open>R\<close>. The section gives a way addressing this issue.\<close>
ML_file \<open>library/iso_atomize.ML\<close>
definition \<open>pure_imp_embed \<equiv> (\<longrightarrow>)\<close>
definition pure_all_embed :: \<open>('a \<Rightarrow> bool) \<Rightarrow> bool\<close> (binder \<open>\<forall>\<^sub>e\<^sub>m\<^sub>b\<^sub>e\<^sub>d \<close> 10)
\<comment> \<open>We give it a binder syntax to prevent eta-contraction which
deprives names of quantifier variables\<close>
where \<open>pure_all_embed \<equiv> (All)\<close>
definition \<open>pure_conj_embed \<equiv> (\<and>)\<close>
definition \<open>pure_prop_embed x \<equiv> x\<close>
lemma [iso_atomize_rules, symmetric, iso_rulify_rules]:
\<open>(P \<Longrightarrow> Q) \<equiv> Trueprop (pure_imp_embed P Q)\<close>
unfolding atomize_imp pure_imp_embed_def .
lemma [iso_atomize_rules, symmetric, iso_rulify_rules]:
\<open>(P &&& Q) \<equiv> Trueprop (pure_conj_embed P Q)\<close>
unfolding atomize_conj pure_conj_embed_def .
(*TODO: find a way to preserve the name*)
lemma [iso_atomize_rules, symmetric, iso_rulify_rules]:
\<open>(\<And>x. P x) \<equiv> Trueprop (pure_all_embed (\<lambda>x. P x))\<close>
unfolding atomize_all pure_all_embed_def .
lemma [iso_atomize_rules, symmetric, iso_rulify_rules]:
\<open>PROP Pure.prop (Trueprop P) \<equiv> Trueprop (pure_prop_embed P)\<close>
unfolding Pure.prop_def pure_prop_embed_def .
subsubsection \<open>Action\<close>
text \<open>In the reasoning, antecedents of the same form may have different purposes, e.g.,
antecedent \<open>P = ?Q\<close> may except a complete simplification or numeric calculation only or any other
specific conversion. Of different purposes, antecedents are expected to be processed by
different reasoners. To achieves this, because the engine selects reasoners by syntactic pattern,
this section proposes a general structure tagging the purpose of antecedents.
The purpose is denoted by \<open>action\<close> type, which is an unspecified type because it serves only for
syntactic purpose.\<close>
typedecl action
definition Action_Tag :: \<open>prop \<Rightarrow> action \<Rightarrow> prop\<close> ("_ @action _" [3,4] 3)
where [iff]: \<open>Action_Tag P A \<equiv> P\<close>
text \<open>
\<open>\<open>P @action A\<close>\<close> tags antecedent \<^prop>\<open>P\<close> by the specific purpose denoted by \<^term>\<open>A\<close>.
The type variable \<^typ>\<open>'category\<close> enables to classify actions by types and type classes.
For example, some operation may be designed for any generic action \<open>?act :: (?'ty::cls) action\<close>
that fall into class \<open>cls\<close>.
\<^emph>\<open>Comment: I am thinking this category type variable is a bad design because the indexing
data structure (Net) we are using doesn't support type sort, causing this feature is actually
not indexed at all, causing the reasoning here becomes searching one by one in linear time!
Maybe classification should be done by some term-level structure. Let's think when have time!\<close>\<close>
definition Action_Tag_embed :: \<open>bool \<Rightarrow> action \<Rightarrow> bool\<close>
where \<open>Action_Tag_embed P A \<equiv> P\<close>
lemma [iso_atomize_rules, symmetric, iso_rulify_rules]:
\<open>PROP Action_Tag (Trueprop P) A \<equiv> Trueprop (Action_Tag_embed P A)\<close>
unfolding Action_Tag_def Action_Tag_embed_def .
lemma Action_Tag_I:
\<open>P \<Longrightarrow> P @action A\<close>
unfolding Action_Tag_def .
lemma Action_Tag_D:
\<open>P @action A \<Longrightarrow> P\<close>
unfolding Action_Tag_def .
lemma Conv_Action_Tag_I:
\<open>X = X @action A\<close>
unfolding Action_Tag_def ..
ML_file \<open>library/action_tag.ML\<close>
subsubsection \<open>Mode\<close>
text \<open>Modes are general annotations used in various antecedents, which may configure
for the specific reasoning behavior among slight different options.
The exact meaning of them depend on the specific antecedent using them.
An example can be found in \cref{sec:proof-obligation}.\<close>
type_synonym mode = action
text \<open>We provide a serial of predefined modes, which may be commonly useful.\<close>
consts default :: mode
consts MODE_SIMP :: mode \<comment> \<open>relating to simplification\<close>
consts MODE_COLLECT :: mode \<comment> \<open>relating to collection\<close>
consts MODE_AUTO :: mode \<comment> \<open>something that will be triggered automatically\<close>
subsection \<open>General Rules\<close>
text \<open>\<^bold>\<open>Schematic variables\<close> are able to be instantiated (assigned) by reasoners.
The instantiation of an schematic variable \<open>?v\<close> updates all the occurrences of \<open>?v\<close> in the
remaining sequent, and this instantion can be seen as assigning results of the execution of the
antecedent.
For example,
\[ \<open>1 + 2 = ?result \<Longrightarrow> Print ?result \<Longrightarrow> Done\<close> \]
the reasoning of antecedent \<open>1 + 2 = ?result\<close> instantiates \<open>?result\<close> to \<open>3\<close>, and results in
\[ \<open>Print 3 \<Longrightarrow> Done\<close> \]
If view the antecedent as a program (sub-routine),
the schematic variables of the antecedent have a meaning of \<^emph>\<open>output\<close>,
and we name them \<^emph>\<open>output variables\<close>.
The following \<open>Try\<close> antecedent is a such example.\<close>
subsubsection \<open>Try\<close>
definition Try :: \<open>bool \<Rightarrow> bool \<Rightarrow> bool\<close> where \<open>Try success_or_fail P = P\<close>
text \<open>
The typical usage is \<open>\<open>Try ?success_or_fail P\<close>\<close>, where
\<open>P\<close> should be an antecedent having some fallback reasoner (not given here),
and \<open>?success_or_fail\<close> is an output variable representing whether the \<open>P\<close> is successfully
deduced \<^emph>\<open>without\<close> using fallback.
A high priority (800) rule reasons \<open>\<open>Try True P\<close>\<close> normally and set the output variable
\<open>success_or_fail\<close> to be true.\<close>
lemma [\<phi>reason 800 for \<open>Try ?S ?P\<close>]:
\<open> P
\<Longrightarrow> Try True P\<close>
unfolding Try_def .
text \<open>
Users using \<open>\<open>Try True P\<close>\<close> should provide the fallback rule for their own \<open>P\<close>.
It depends on the application scenario and there is not a general rule for fallback of course.
The fallback rule may has the following form,
\[ \<open> Fallback_of_P \<Longrightarrow> Try False P \<close> \]
\<close>
subsubsection \<open>Compact Representation of Antecedents\<close>
text \<open>Meta-programming is feasible on \<phi>-LPR.
The reasoning of an antecedent may generate dynamically another antecedent, and assign it to
an output variable of type \<^typ>\<open>bool\<close>.
When multiple antecedents are going to be generated, it is
more efficient to contract them into one antecedent using conjunctions (e.g. \<open>A1 \<and> A2 \<and> A3 \<and> \<cdots>\<close>),
so they can be represented by one output variable of type \<^typ>\<open>bool\<close>.
\<open>(\<and>\<^sub>r)\<close> and \<open>(\<forall>\<^sub>r)\<close> are used to contract antecedents and embed universally quantified variables
respectively.
\<close>
definition Compact_Antecedent :: \<open>bool \<Rightarrow> bool \<Rightarrow> bool\<close> (infixr "\<and>\<^sub>\<r>" 35)
where [iff]: \<open>Compact_Antecedent = (\<and>)\<close>
definition Compact_Forall :: \<open>('a \<Rightarrow> bool) \<Rightarrow> bool\<close> (binder "\<forall>\<^sub>\<r>" 10)
where [iff]: \<open>Compact_Forall = All\<close>
text \<open>Assertive rules are given to unfold the compression and reason the antecedents in order.\<close>
lemma [\<phi>reason 1000]:
\<open>P \<Longrightarrow> Q \<Longrightarrow> P \<and>\<^sub>\<r> Q\<close>
unfolding Compact_Antecedent_def ..
lemma [\<phi>reason 1000]:
\<open>(\<And>x. P x) \<Longrightarrow> \<forall>\<^sub>\<r>x. P x\<close>
unfolding Compact_Forall_def ..
declare conjunctionI[\<phi>reason 1000] \<comment> \<open>Meta-conjunction \<open>P &&& Q\<close> is also a compression.\<close>
subsubsection \<open>Matches\<close>
text \<open>Antecedent \<^prop>\<open>Matches pattern term\<close> asserts \<^term>\<open>pattern\<close> matches \<^term>\<open>term\<close>;
\<^prop>\<open>NO_MATCH pattern term\<close> asserts \<^term>\<open>pattern\<close> does not match \<^term>\<open>term\<close>.\<close>
definition Matches :: \<open>'a \<Rightarrow> 'a \<Rightarrow> bool\<close> where \<open>Matches _ _ = True\<close>
lemma Matches_I: \<open>Matches pattern term\<close> unfolding Matches_def ..
\<phi>reasoner_ML Matches 2000 (\<open>Matches ?pattern ?term\<close>) =
\<open>fn (ctxt, sequent) =>
let
val (\<^const>\<open>Trueprop\<close> $ (Const (\<^const_name>\<open>Matches\<close>,_) $ pattern $ term))
= Thm.major_prem_of sequent
in
if Pattern.matches (Proof_Context.theory_of ctxt) (pattern, term)
then Seq.single (ctxt, @{thm Matches_I} RS sequent)
else Seq.empty
end\<close>
lemma NO_MATCH_I: "NO_MATCH A B" unfolding NO_MATCH_def ..
\<phi>reasoner_ML NO_MATCH 0 ("NO_MATCH ?A ?B") = \<open>
fn (ctxt,th) =>
let
val (\<^const>\<open>Trueprop\<close> $ (Const (\<^const_name>\<open>NO_MATCH\<close>, _) $ a $ b)) = Thm.major_prem_of th
in
if Pattern.matches (Proof_Context.theory_of ctxt) (a,b)
then Seq.empty
else Seq.single (ctxt, @{thm NO_MATCH_I} RS th)
end
\<close>
subsubsection \<open>Proof By Assumption\<close>
definition By_Assumption :: \<open>prop \<Rightarrow> prop\<close> where \<open>By_Assumption P \<equiv> P\<close>
definition May_By_Assumption :: \<open>prop \<Rightarrow> prop\<close> where \<open>May_By_Assumption P \<equiv> P\<close>
lemma By_Assumption_I: \<open>PROP P \<Longrightarrow> PROP By_Assumption P\<close> unfolding By_Assumption_def .
lemma May_By_Assumption_I: \<open>PROP P \<Longrightarrow> PROP May_By_Assumption P\<close> unfolding May_By_Assumption_def .
\<phi>reasoner_ML By_Assumption 1000 (\<open>PROP By_Assumption _\<close>) = \<open>fn (ctxt,sequent) =>
HEADGOAL (Tactic.assume_tac ctxt) (@{thm By_Assumption_I} RS sequent)
|> Seq.map (pair ctxt)
\<close>
\<phi>reasoner_ML May_By_Assumption 1000 (\<open>PROP May_By_Assumption _\<close>) = \<open>fn (ctxt,sequent) =>
let val sequent' = @{thm May_By_Assumption_I} RS sequent
in (HEADGOAL (Tactic.assume_tac ctxt) ORELSE Seq.single) sequent'
|> Seq.map (pair ctxt)
end
\<close>
subsection \<open>Cut\<close>
text \<open>The cuts have been introduced in \cref{sec:cut}.
Antecedent \<open>\<r>Cut\<close> triggers a global cut.
\<close>
definition \<r>Cut :: bool where \<open>\<r>Cut = True\<close>
lemma [iff, \<phi>reason 1000000]: \<open>\<r>Cut\<close> unfolding \<r>Cut_def ..
text \<open>Antecedent \<open>\<r>Success\<close> terminates the reasoning successfully with the reasoning state as
the result.\<close>
definition \<r>Success :: bool where \<open>\<r>Success = True\<close>
lemma \<r>Success_I[iff]: \<open>\<r>Success\<close> unfolding \<r>Success_def ..
\<phi>reasoner_ML \<r>Success 10000 (\<open>\<r>Success\<close>) = \<open>fn (ctxt,sequent) =>
raise Phi_Reasoner.Success (ctxt, @{thm \<r>Success_I} RS sequent)\<close>
subsection \<open>Proof Obligation \& Guard of Rule \label{sec:proof-obligation}\<close>
definition Premise :: "mode \<Rightarrow> bool \<Rightarrow> bool" where "Premise _ x = x"
abbreviation Normal_Premise ("\<p>\<r>\<e>\<m>\<i>\<s>\<e> _" [27] 26)
where "Normal_Premise \<equiv> Premise default"
abbreviation Simp_Premise ("\<s>\<i>\<m>\<p>\<r>\<e>\<m> _" [27] 26)
where "Simp_Premise \<equiv> Premise MODE_SIMP"
abbreviation Proof_Obligation ("\<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> _" [27] 26)
where "Proof_Obligation \<equiv> Premise MODE_COLLECT"
text \<open>
\<^prop>\<open>Premise mode P\<close> represents an ordinary proposition has to be proved during the reasoning.
There are different modes expressing different roles in the reasoning.
\<^descr> \<^prop>\<open>\<s>\<i>\<m>\<p>\<r>\<e>\<m> P\<close> is a \<^emph>\<open>guard\<close> of a rule, which constrains that the rule is appliable only
when \<^prop>\<open>P\<close> can be solved \<^emph>\<open>automatically\<close> during the reasoning.
If \<^prop>\<open>P\<close> fails to be solved, even if it is actually valid, the rule will not be applied.
Therefore, \<^prop>\<open>P\<close> has to be as simple as possible. The tactic used to solve \<^prop>\<open>P\<close> is
@{method clarsimp}.
A more powerful tactic like @{method auto} is not adoptable because the tactic must be safe and
non-blocking commonly.
A blocking search branch blocks the whole reasoning, which is not acceptable.
\<^prop>\<open>\<s>\<i>\<m>\<p>\<r>\<e>\<m> P\<close> is not for proof obligations that are intended to be solved by users.
It is more like 'controller or switch' of the rules, i.e. \<^emph>\<open>guard\<close>.
\<^descr> \<^prop>\<open>\<p>\<r>\<e>\<m>\<i>\<s>\<e> P\<close> represents a proof obligation.
Proof obligations in reasoning rules should be represented by it.
\<^descr> \<^prop>\<open>\<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> Q\<close> by contrast
represents proof obligations \<open>Q\<close> that are ready to be solved by user (or by automatic tools).
\<close>
text \<open>
The difference between \<^prop>\<open>\<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> Q\<close> and \<^prop>\<open>\<p>\<r>\<e>\<m>\<i>\<s>\<e> P\<close> is subtle:
In a reasoning process, many reasoning rules may be applied, which may generate many
\<^prop>\<open>\<p>\<r>\<e>\<m>\<i>\<s>\<e> P\<close>.
The engine tries to solve \<^prop>\<open>\<p>\<r>\<e>\<m>\<i>\<s>\<e> P\<close> automatically but if it fails the search branch
would be stuck. Because the search has not been finished, it is bad to ask users' intervention
to solve the goal because the search branch may high-likely fail later.
It is \<^emph>\<open>not ready\<close> for user to solve \<open>P\<close> here, and suggestively \<open>P\<close> should be deferred to
an ideal moment for user solving obligations.
This is `ideal moment' is \<^prop>\<open>\<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> Q\<close>. If any \<^prop>\<open>\<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> Q\<close> exists in the antecedents
of the sequent, the engine contracts \<open>P\<close> into the latest \<^prop>\<open>\<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> Q\<close>, e.g., from
\[ \<open>\<p>\<r>\<e>\<m>\<i>\<s>\<e> P \<Longrightarrow> A1 \<Longrightarrow> \<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> Q \<Longrightarrow> \<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> Q' \<Longrightarrow> \<cdots> \<close> \]
it deduces
\[ \<open>A1 \<Longrightarrow> \<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> Q \<and> P \<Longrightarrow> \<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> Q' \<Longrightarrow> \<cdots> \<close> \]
In short, \<^prop>\<open>\<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> Q\<close> collects obligations generated during a reasoning process,
and enables user to solve them at an idea moment.
A typical reasoning request (the initial reasoning state namely the argument of the reasoning
process) is of the following form,
\[ \<open>Problem \<Longrightarrow> \<r>Success \<Longrightarrow> \<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> True \<Longrightarrow> Conclusion\<close> \]
The \<open>True\<close> represents empty collection or none obligation.
If the reasoning succeeds, it returns sequent in form
\[ \<open>\<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> True \<and> P1 \<and> P2 \<and> \<cdots> \<Longrightarrow> Conclusion\<close> \]
where \<open>P1, P2, \<cdots>\<close> are obligations generated by reasoning \<open>Problem\<close>.
And then, user may solve the obligations manually or by automatic tools.
For antecedent \<^prop>\<open>\<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> Q\<close>,
if there is another \<^prop>\<open>\<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> Q'\<close> in the remaining antecedents,
the reasoner also defer \<open>Q\<close> to \<open>Q'\<close>, just like \<^prop>\<open>\<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> Q\<close> is a \<^prop>\<open>\<p>\<r>\<e>\<m>\<i>\<s>\<e> Q\<close>.
If no \<^prop>\<open>\<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> Q'\<close> exists in the remaining antecedents,
the reasoner of \<^prop>\<open>\<p>\<r>\<e>\<m>\<i>\<s>\<e> P\<close> and \<^prop>\<open>\<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> Q\<close> raises
an error aborting the whole reasoning, because the reasoning request is not configured correctly.
Semantically, \<^prop>\<open>\<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> Q\<close> represents a proof obligation \<open>Q\<close> intended to be addressed by
user. It can be deferred but the reasoner never attempts to solve \<^prop>\<open>\<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> Q\<close> practically.
Nonetheless, we still provide tool for reasoning obligations automatically, albeit they have
to be called separately with the reasoning engine. See \<^verbatim>\<open>auto_obligation_solver\<close> and
\<^verbatim>\<open>safer_obligation_solver\<close> in \<^file>\<open>library/reasoners.ML\<close>.
\<close>
lemma Premise_I[intro!]: "P \<Longrightarrow> Premise mode P" unfolding Premise_def by simp
lemma Premise_D: "Premise mode P \<Longrightarrow> P" unfolding Premise_def by simp
lemma Premise_E[elim!]: "Premise mode P \<Longrightarrow> (P \<Longrightarrow> C) \<Longrightarrow> C" unfolding Premise_def by simp
subsubsection \<open>Implementation of the reasoners\<close>
lemma Premise_True[\<phi>reason 5000]: "Premise mode True" unfolding Premise_def ..
lemma [\<phi>reason 5000]:
" Premise mode P
\<Longrightarrow> Premise mode (Premise any_mode P)"
unfolding Premise_def .
lemma Premise_refl[\<phi>reason 2000 for \<open>Premise ?mode (?x = ?x)\<close>
\<open>Premise ?mode (?x = ?var_x)\<close>
\<open>Premise ?mode (?var_x = ?x)\<close>]:
"Premise mode (x = x)"
unfolding Premise_def ..
lemma contract_obligations:
"(Premise mode P \<Longrightarrow> \<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> Q \<Longrightarrow> PROP C) \<equiv> (\<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> P \<and> Q \<Longrightarrow> PROP C)"
unfolding Premise_def by rule simp+
lemma contract_premise_true:
"(True \<Longrightarrow> Premise mode B) \<equiv> Trueprop (Premise mode B) "
by simp
lemma contract_premise_imp:
"(A \<Longrightarrow> Premise mode B) \<equiv> Trueprop (Premise mode (A \<longrightarrow> B)) "
unfolding Premise_def atomize_imp .
lemma contract_premise_all:
"(\<And>x. Premise mode (P x)) \<equiv> Trueprop ( Premise mode (\<forall>x. P x)) "
unfolding Premise_def atomize_all .
named_theorems useful \<open>theorems to be inserted in the automatic proving,
having the same effect of using the @{command using} command.\<close>
ML_file \<open>library/PLPR_Syntax.ML\<close>
ML_file "library/reasoners.ML"
\<phi>reasoner_ML Normal_Premise 10 (\<open>\<p>\<r>\<e>\<m>\<i>\<s>\<e> ?P\<close> | \<open>\<o>\<b>\<l>\<i>\<g>\<a>\<t>\<i>\<o>\<n> ?P\<close>)
= \<open>Phi_Reasoners.wrap Phi_Reasoners.defer_obligation_tac\<close>
subsection \<open>Reasoning Frame\<close>
definition \<open>\<r>BEGIN \<longleftrightarrow> True\<close>
definition \<open>\<r>END \<longleftrightarrow> True\<close>
text \<open>Antecedents \<^prop>\<open>\<r>BEGIN\<close> and \<^prop>\<open>\<r>END\<close> conform a nested reasoning scope
resembling a subroutine for specific reasoning tasks or problems.
\[ \<open>\<dots> \<Longrightarrow> \<r>BEGIN \<Longrightarrow> Nested \<Longrightarrow> Reasoning \<Longrightarrow> \<r>END \<Longrightarrow> \<dots>\<close> \]
The scoped antecedents should be regarded as a \<^emph>\<open>unit antecedent\<close>
invoking a nested \<phi>-LPR reasoning process and returning \<^emph>\<open>only\<close> the first reached solution (
just as the behaviour of \<phi>-LPR engine).
During backtracking, search branches before the unit will be backtracked but sub-optimal solutions
of the unit are not backtracked.
In addition, cut is confined among the search paths in the scope as a unit.
Because of the cut and the reduced backtrack behavior, the performance is improved.
Sometimes a cut is admissible (green) as an expected behavior among several rules and reasoners
which constitute a loosely-gathered module for a specific problem.
However the cut is still not safe to be used because an external rule using the reasoning module
may demand the behavior of backtracking but the cut inside the module prevents
backtracks in the external rule.
In this case, the reasoning scope is helpful to wrap the loosely-gathered module to be confined
by closing side effects like cuts.
Specifically, any search path that reaches \<^prop>\<open>\<r>BEGIN\<close> opens a new \<^emph>\<open>frame\<close> namely a space
of search paths.
The sub-searches continuing the path and before reaching
the paired \<^prop>\<open>\<r>END\<close> are in this frame.
As \<phi>-LPR works in BFS, a frame can contain another frame just if the search in the frame
encounters another \<^prop>\<open>\<r>BEGIN\<close>.
\[ \<open>\<dots> \<Longrightarrow> \<r>BEGIN \<Longrightarrow> A\<^sub>1 \<Longrightarrow> \<r>BEGIN \<Longrightarrow> A\<^sub>2 \<Longrightarrow> \<r>END \<Longrightarrow> A\<^sub>3 \<Longrightarrow> \<r>END \<Longrightarrow> \<dots>\<close> \]
Once any search path encounters a \<^prop>\<open>\<r>END\<close>, the innermost frame is closed and the sequent of the
search path is returned with dropping all other branches in the frame.
The mechanism checks whether all \<^prop>\<open>\<r>BEGIN\<close> and \<^prop>\<open>\<r>END\<close> are paired.
Any global cut cuts all and only all search branches in the innermost frame to which the cut
belongs. \<^prop>\<open>\<r>Success\<close> is prohibited in the nested scope because we do not know how to process
the remain antecedents after the \<^prop>\<open>\<r>Success\<close> and how to return them into the outer scope.
\<close>
definition \<r>Call :: \<open>prop \<Rightarrow> prop\<close> ("\<r>CALL _" [3] 2)
where \<open>\<r>Call P \<equiv> PROP P\<close>
\<comment> \<open>Call the antecedent \<^prop>\<open>P\<close> in a frame\<close>
lemma \<r>BEGIN_I: \<open>\<r>BEGIN\<close> unfolding \<r>BEGIN_def ..
lemma \<r>END_I: \<open>\<r>END\<close> unfolding \<r>END_def ..
lemma \<r>Call_I: \<open>PROP P \<Longrightarrow> \<r>CALL PROP P\<close> unfolding \<r>Call_def .
ML_file \<open>library/nested.ML\<close>
\<phi>reasoner_ML \<r>BEGIN 1000 (\<open>\<r>BEGIN\<close>) = \<open>PLPR_Nested_Reasoning.enter_scope\<close>
\<phi>reasoner_ML \<r>END 1000 (\<open>\<r>END\<close>) = \<open>PLPR_Nested_Reasoning.exit_scope\<close>
\<phi>reasoner_ML \<r>Call 1000 (\<open>PROP \<r>Call _\<close>) = \<open>PLPR_Nested_Reasoning.call\<close>
definition \<r>Call_embed :: \<open>bool \<Rightarrow> bool\<close> where \<open>\<r>Call_embed P \<equiv> P\<close>
lemma [iso_atomize_rules, symmetric, iso_rulify_rules]:
\<open>\<r>Call (Trueprop P) \<equiv> Trueprop (\<r>Call_embed P)\<close>
unfolding \<r>Call_def \<r>Call_embed_def .
subsection \<open>Pruning\<close>
text \<open>At a reasoning state \<open>A\<close>, multiple search branches may be emitted parallel to
find a solution of the antecedent.
A branch may find the solution while other branches from \<open>A\<close> still remain in the search history.
Then the reasoning in DFS manner keeps to solve next antecedent \<open>B\<close> and we assume \<open>B\<close> fails.
The reasoning then backtrack, and redo the search of \<open>A\<close> on remaining branches of \<open>A\<close>.
It is not reasonable because the reasoning is redoing a solved problem on \<open>A\<close>.
To address this, a solution is to prune branches of \<open>A\<close> after \<open>A\<close> succeeds.
In this section we introduce \<open>subgoal\<close> mechanism achieving the pruning.
Each antecedent \<open>A\<close> is tagged with a goal context \<open>G\<close>, as \<open>\<open>A \<^bold>@\<^bold>G\<^bold>O\<^bold>A\<^bold>L G\<close>\<close>.
A reasoning rule may check that the goal \<open>G\<close> has not been solved before doing any substantial
computation, e.g.,
\[ \<open>CHK_SUBGOAL G \<Longrightarrow> Computation \<Longrightarrow> (Ant \<^bold>@\<^bold>G\<^bold>O\<^bold>A\<^bold>L G)\<close> \]
Antecedent \<open>CHK_SUBGOAL G\<close> succeeds only when the goal \<open>G\<close> is not marked solved, \<^emph>\<open>or\<close>, the current
search branch is the thread that marked \<open>G\<close> solved previously.
When a rule succeeds, the rule may mark the goal \<open>G\<close> solved to prune other branches that check \<open>G\<close>.
\[ \<open>Computation \<Longrightarrow> SOLVE_SUBGOAL G \<Longrightarrow> (Ant \<^bold>@\<^bold>G\<^bold>O\<^bold>A\<^bold>L G)\<close> \]
If a goal \<open>G\<close> has been marked solved, any other antecedent \<open>SOLVE_SUBGOAL G\<close> marking \<open>G\<close> again, will
fail, unless the current search branch is the thread that marked \<open>G\<close> solved previously.
A subgoal is represented by an unspecified type which only has a syntactic effect in the reasoning.\<close>
typedecl "subgoal"
consts subgoal_context :: \<open>subgoal \<Rightarrow> action\<close>
abbreviation GOAL_CTXT :: "prop \<Rightarrow> subgoal \<Rightarrow> prop" ("_ \<^bold>@\<^bold>G\<^bold>O\<^bold>A\<^bold>L _" [2,1000] 2)
where "(PROP P \<^bold>@\<^bold>G\<^bold>O\<^bold>A\<^bold>L G) \<equiv> (PROP P @action subgoal_context G)"
definition CHK_SUBGOAL :: "subgoal \<Rightarrow> bool" \<comment> \<open>Check whether the goal is solved\<close>
where "CHK_SUBGOAL X \<longleftrightarrow> True"
definition SOLVE_SUBGOAL :: "subgoal \<Rightarrow> bool"
where "SOLVE_SUBGOAL X \<longleftrightarrow> True"
text \<open>Subgoals are hierarchical, having the unique top-most goal named \<open>\<open>TOP_GOAL\<close>\<close>.
New goal contexts are obtained by antecedent \<open>\<open>SUBGOAL G ?G'\<close>\<close> which assigns a new subgoal
under an unsolved \<open>G\<close> to output variable \<open>?G'\<close>.
The reasoning raises an error if \<open>?G'\<close> is not a schematic variable.
\<open>\<open>SOLVE_SUBGOAL G\<close>\<close> marks the goal \<open>G\<close> and all its subgoals solved.
The \<open>TOP_GOAL\<close> can never be solved.\<close>
consts TOP_GOAL :: "subgoal"
definition SUBGOAL :: "subgoal \<Rightarrow> subgoal \<Rightarrow> bool" where "SUBGOAL ROOT NEW_GOAL = True"
subsubsection \<open>Implementation of the Subgoal Reasoners\<close>
lemma SUBGOAL_I[iff]: "SUBGOAL ROOT NEWGOAL" unfolding SUBGOAL_def ..
lemma CHK_SUBGOAL_I[iff]: "CHK_SUBGOAL X" unfolding CHK_SUBGOAL_def ..
lemma SOLVE_SUBGOAL_I[iff]: "SOLVE_SUBGOAL X" unfolding SOLVE_SUBGOAL_def ..
ML_file \<open>library/Subgoal_Env.ML\<close>
\<phi>reasoner_ML SUBGOAL 2000 (\<open>SUBGOAL ?ROOT ?NEWGOAL\<close>) = \<open>Subgoal_Env.subgoal\<close>
\<phi>reasoner_ML CHK_SUBGOAL 2000 (\<open>CHK_SUBGOAL ?GOAL\<close>) = \<open>Subgoal_Env.chk_subgoal\<close>
\<phi>reasoner_ML SOLVE_SUBGOAL 9900 (\<open>SOLVE_SUBGOAL ?GOAL\<close>) = \<open>Subgoal_Env.solve_subgoal\<close>
lemma [\<phi>reason 800 for \<open>Try ?S ?P \<^bold>@\<^bold>G\<^bold>O\<^bold>A\<^bold>L ?G\<close>]:
\<open> P \<^bold>@\<^bold>G\<^bold>O\<^bold>A\<^bold>L G
\<Longrightarrow> Try True P \<^bold>@\<^bold>G\<^bold>O\<^bold>A\<^bold>L G\<close>
unfolding Try_def .
subsection \<open>Branch\<close>
text \<open>\<open>A ||| B\<close> is an antecedent way to encode search branch.
Compared with the ordinary approach using multiple submissive rules,
short-cut is featured by using subgoal. It tries each antecedent from left to right until
the first success of solving an antecedent, and none of the remains are attempted.\<close>
definition Branch :: \<open>prop \<Rightarrow> prop \<Rightarrow> prop\<close> (infixr "|||" 3)
where \<open>Branch A B \<equiv> (\<And>C. (PROP A \<Longrightarrow> C) \<Longrightarrow> (PROP B \<Longrightarrow> C) \<Longrightarrow> C)\<close>
definition Branch_embed :: \<open>bool \<Rightarrow> bool \<Rightarrow> bool\<close>
where \<open>Branch_embed A B \<equiv> A \<or> B\<close>
lemma atomize_Branch:
\<open>Branch (Trueprop A) (Trueprop B) \<equiv> Trueprop (A \<or> B)\<close>
unfolding Branch_def or_def atomize_eq atomize_imp atomize_all .
lemma [iso_atomize_rules, symmetric, iso_rulify_rules]:
\<open>Branch (Trueprop A) (Trueprop B) \<equiv> Trueprop (Branch_embed A B)\<close>
unfolding Branch_embed_def atomize_Branch .
subsubsection \<open>Implementation\<close>
lemma Branch_L:
\<open> PROP A
\<Longrightarrow> PROP A ||| PROP B\<close>
unfolding Action_Tag_def Branch_def
proof -
assume A: \<open>PROP A\<close>
show \<open>(\<And>C. (PROP A \<Longrightarrow> C) \<Longrightarrow> (PROP B \<Longrightarrow> C) \<Longrightarrow> C)\<close> proof -
fix C :: "bool"
assume A': \<open>PROP A \<Longrightarrow> C\<close>
show \<open>C\<close> using A'[OF A] .
qed
qed
lemma Branch_R:
\<open> PROP B
\<Longrightarrow> PROP A ||| PROP B\<close>
unfolding Action_Tag_def Branch_def
proof -
assume B: \<open>PROP B\<close>
show \<open>(\<And>C. (PROP A \<Longrightarrow> C) \<Longrightarrow> (PROP B \<Longrightarrow> C) \<Longrightarrow> C)\<close> proof -
fix C :: "bool"
assume B': \<open>PROP B \<Longrightarrow> C\<close>
show \<open>C\<close> using B'[OF B] .
qed
qed
declare [[\<phi>reason 1000 Branch_L Branch_R for \<open>PROP ?A ||| PROP ?B\<close>]]
subsection \<open>Simplification \& Rewrite\<close>
text \<open>\<open>\<open>\<s>\<i>\<m>\<p>\<l>\<i>\<f>\<y>[mode] ?result : term\<close>\<close> is generic antecedent for simplifying \<open>term\<close> in different
\<open>mode\<close>. The \<open>?result\<close> should be an output variable for the result of the simplification.
We implement a \<open>default\<close> mode where the system simple-set is used to simplify
\<open>term\<close>. Users may configure their mode and their reasoner using different simple-set.\<close>
definition Simplify :: " mode \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool " ("\<s>\<i>\<m>\<p>\<l>\<i>\<f>\<y>[_] _ :/ _" [10,1000,10] 9)
where "Simplify setting result origin \<longleftrightarrow> result = origin"
definition Do_Simplificatin :: \<open>'a \<Rightarrow> 'a \<Rightarrow> prop\<close>
where \<open>Do_Simplificatin result origin \<equiv> (result \<equiv> origin)\<close>
lemma [cong]: "A \<equiv> A' \<Longrightarrow> Simplify s x A \<equiv> Simplify s x A' " by simp
lemma Simplify_D: \<open>Simplify m A B \<Longrightarrow> A = B\<close> unfolding Simplify_def .
lemma Simplify_I: \<open>A = B \<Longrightarrow> Simplify m A B\<close> unfolding Simplify_def .
lemma Do_Simplification:
\<open>PROP Do_Simplificatin A B \<Longrightarrow> Simplify s A B\<close>
unfolding Do_Simplificatin_def Simplify_def atomize_eq .
lemma End_Simplification : \<open>PROP Do_Simplificatin A A\<close> unfolding Do_Simplificatin_def .
lemma End_Simplification': \<open>\<p>\<r>\<e>\<m>\<i>\<s>\<e> A = B \<Longrightarrow> PROP Do_Simplificatin A B\<close>
unfolding Do_Simplificatin_def Premise_def atomize_eq .
ML_file \<open>library/simplifier.ML\<close>
hide_fact End_Simplification' End_Simplification Do_Simplification
subsubsection \<open>Default Simplifier\<close>
abbreviation Default_Simplify :: " 'a \<Rightarrow> 'a \<Rightarrow> bool " ("\<s>\<i>\<m>\<p>\<l>\<i>\<f>\<y> _ : _" [1000,10] 9)
where "Default_Simplify \<equiv> Simplify default"
\<phi>reasoner_ML Default_Simplify 1000 (\<open>Default_Simplify ?X' ?X\<close>)
= \<open>PLPR_Simplifier.simplifier NONE I\<close>
\<phi>reasoner_ML Simp_Premise 10 (\<open>\<s>\<i>\<m>\<p>\<r>\<e>\<m> ?P\<close>)
= \<open>PLPR_Simplifier.simplifier NONE I\<close>
(* subsection \<open>Exhaustive Divergence\<close>
ML_file \<open>library/exhaustive_divergen.ML\<close>
definition \<open>Begin_Exhaustive_Divergence \<longleftrightarrow> True\<close>
definition \<open> End_Exhaustive_Divergence \<longleftrightarrow> True\<close>
definition [iff]: \<open>Stop_Divergence \<longleftrightarrow> True\<close>
lemma Stop_Divergence_I: \<open>Stop_Divergence\<close> unfolding Stop_Divergence_def ..
lemma Begin_Exhaustive_Divergence_I: \<open>Begin_Exhaustive_Divergence\<close>
unfolding Begin_Exhaustive_Divergence_def ..
lemma End_Exhaustive_Divergence_I: \<open>End_Exhaustive_Divergence\<close>
unfolding End_Exhaustive_Divergence_def ..
\<phi>reasoner_ML Begin_Exhaustive_Divergence 1000 (\<open>Begin_Exhaustive_Divergence\<close>)
= \<open>PLPR_Exhaustive_Divergence.begin Seq.of_list\<close>
\<phi>reasoner_ML Stop_Divergence 1000 (\<open>Stop_Divergence\<close>) =
\<open>apsnd (fn th => @{thm Stop_Divergence_I} RS th) #> PLPR_Exhaustive_Divergence.stop\<close>
\<phi>reasoner_ML End_Exhaustive_Divergence 1000 (\<open>End_Exhaustive_Divergence\<close>)
= \<open>PLPR_Exhaustive_Divergence.exit\<close>
*)
subsection \<open>Optimal Solution\<close>
text \<open>\<phi>-LPR is priority-driven DFS searching the first reached solution which may not be the optimal
one for certain measure. The section gives a way to find out the solution of the minimum cost
among a given set of candidates.
\<close>
definition Optimum_Solution :: \<open>prop \<Rightarrow> prop\<close> where [iff]: \<open>Optimum_Solution P \<equiv> P\<close>
definition [iff]: \<open>Begin_Optimum_Solution \<longleftrightarrow> True\<close>
definition [iff]: \<open>End_Optimum_Solution \<longleftrightarrow> True\<close>
text \<open>Each individual invocation of \<open>Optimum_Solution P\<close>
invokes an individual instance of the optimal solution reasoning.
The reasoning of \<open>P\<close> is proceeded exhaustively meaning exploring all backtracks except local cuts.
\<close>
paragraph \<open>Candidates\<close>
text \<open>The candidates are all search branches diverged from the antecedents marked by \<close>
(* definition \<r>Choice :: \<open>prop \<Rightarrow> prop\<close> ("\<r>CHOICE _" [3] 2) where \<open>\<r>Choice P \<equiv> P\<close>
lemma \<r>Choice_I: \<open> PROP P \<Longrightarrow> PROP \<r>Choice P\<close> unfolding \<r>Choice_def . *)
text \<open>For the antecedents marked by \<open>\<r>Choice\<close>, the mechanism traverses exhaustively all
combinations of their (direct) solvers, but for other not marked antecedents, the strategy is
not changed and is as greedy as the usual behavior --- returning the first-reached solution
and discarding the others.
As an example, in
\<open>Begin_Optimum_Solution \<Longrightarrow> \<r>Choice A \<Longrightarrow> B \<Longrightarrow> \<r>Choice C \<Longrightarrow> End_Optimum_Solution \<Longrightarrow> \<dots>\<close>,
assuming both \<open>A,B,C\<close> have 2 solvers \<open>A\<^sub>1,A\<^sub>2,B\<^sub>1,B\<^sub>2,C\<^sub>1,C\<^sub>2\<close> and assuming \<open>B\<^sub>1\<close> have higher priority
than \<open>B\<^sub>2\<close> and can success, the mechanism traverses 4 combination of the solvers \<open>A\<^sub>1,C\<^sub>1\<close>,
\<open>A\<^sub>1,C\<^sub>2\<close>, \<open>A\<^sub>2,C\<^sub>1\<close>, \<open>A\<^sub>2,C\<^sub>2\<close>, i.e., only exhaustively on \<open>\<r>Choice\<close>-marked antecedents but still
greedy on others.
Note, even marked by \<open>\<r>Choice\<close>, local cuts are still valid and cuts search branches.
Global cut is disabled during the whole reasoning because it kills other search branches.
\<open>\<r>Success\<close> is available and the mechanism ensures it is always the optimal one invokes the \<open>\<r>Success\<close>.
\<close>
paragraph \<open>Cost\<close>
text \<open>The cost is measured by reports from the following antecedents inserted in the user rules.\<close>
definition Incremental_Cost :: \<open>int \<Rightarrow> bool\<close> where [iff]: \<open>Incremental_Cost _ = True\<close>
definition Threshold_Cost :: \<open>int \<Rightarrow> bool\<close> ("\<t>\<h>\<r>\<e>\<s>\<h>\<o>\<l>\<d>") where [iff]: \<open>Threshold_Cost _ = True\<close>
text \<open>The final cost of a reasoning process is the sum of all the reported \<open>Incremental_Cost\<close> or
the maximum \<open>Threshold_Cost\<close>, the one which is larger.
If the cost of two branches are the same, the first reached one is considered better.
\<close>
subsubsection \<open>Implementation\<close>
definition Optimum_Solution_embed :: \<open>bool \<Rightarrow> bool\<close> where \<open>Optimum_Solution_embed P \<equiv> P\<close>
lemma [iso_atomize_rules, symmetric, iso_rulify_rules]:
\<open>Optimum_Solution (Trueprop P) \<equiv> Trueprop (Optimum_Solution_embed P)\<close>
unfolding Optimum_Solution_embed_def Optimum_Solution_def .
lemma Incremental_Cost_I: \<open>Incremental_Cost X\<close> unfolding Incremental_Cost_def ..
lemma Threshold_Cost_I: \<open>Threshold_Cost X\<close> unfolding Threshold_Cost_def ..
lemma Begin_Optimum_Solution_I: \<open>Begin_Optimum_Solution\<close> unfolding Begin_Optimum_Solution_def ..
lemma End_Optimum_Solution_I: \<open>End_Optimum_Solution\<close> unfolding End_Optimum_Solution_def ..
lemma Do_Optimum_Solution:
\<open> PROP X
\<Longrightarrow> End_Optimum_Solution
\<Longrightarrow> PROP Optimum_Solution X\<close>
unfolding Optimum_Solution_def .
ML_file_debug \<open>library/optimum_solution.ML\<close>
\<phi>reasoner_ML Incremental_Cost 1000 (\<open>Incremental_Cost _\<close>) = \<open>fn (ctxt,sequent) => Seq.make (fn () =>
let val _ $ (_ $ N) = Thm.major_prem_of sequent
val (_, n) = HOLogic.dest_number N
val sequent' = @{thm Incremental_Cost_I} RS sequent
in Seq.pull (PLPR_Optimum_Solution.report_cost (n,0) (ctxt,sequent'))
end
)\<close>
\<phi>reasoner_ML Threshold_Cost 1000 (\<open>Threshold_Cost _\<close>) = \<open>fn (ctxt,sequent) => Seq.make (fn () =>
let val _ $ (_ $ N) = Thm.major_prem_of sequent
val (_, n) = HOLogic.dest_number N
val sequent' = @{thm Threshold_Cost_I} RS sequent
in Seq.pull (PLPR_Optimum_Solution.report_cost (0,n) (ctxt,sequent'))
end
)\<close>
\<phi>reasoner_ML Optimum_Solution 1000 (\<open>PROP Optimum_Solution _\<close>) = \<open>
apsnd (fn th => @{thm Do_Optimum_Solution} RS th)
#> PLPR_Optimum_Solution.start
\<close>
\<phi>reasoner_ML Begin_Optimum_Solution 1000 (\<open>Begin_Optimum_Solution\<close>) = \<open>
apsnd (fn th => @{thm Begin_Optimum_Solution_I} RS th)
#> PLPR_Optimum_Solution.start
\<close>
\<phi>reasoner_ML End_Optimum_Solution 1000 (\<open>End_Optimum_Solution\<close>) = \<open>
apsnd (fn th => @{thm End_Optimum_Solution_I} RS th)
#> PLPR_Optimum_Solution.finish
\<close>
(*\<phi>reasoner_ML \<r>Choice 1000 (\<open>PROP \<r>Choice _\<close>) = \<open>fn (ctxt,sequent) =>
PLPR_Optimum_Solution.choices (ctxt, @{thm \<r>Choice_I} RS sequent)\<close> *)
subsubsection \<open>Derivations\<close>
definition Optimum_Among :: \<open>prop \<Rightarrow> prop\<close> where \<open>Optimum_Among Candidates \<equiv> Candidates\<close>
\<comment> \<open>We leave it as a syntax merely\<close>
definition Optimum_Among_embed :: \<open>bool \<Rightarrow> bool\<close> where \<open>Optimum_Among_embed X \<equiv> X\<close>
lemma [iso_atomize_rules, symmetric, iso_rulify_rules]:
\<open>Optimum_Among (Trueprop P) \<equiv> Trueprop (Optimum_Among_embed P)\<close>
unfolding Optimum_Among_embed_def Optimum_Among_def .
subsection \<open>Environment Variables\<close>
definition Push_Envir_Var :: \<open>'name \<Rightarrow> 'a::{} \<Rightarrow> bool\<close>
where \<open>Push_Envir_Var Name Val \<longleftrightarrow> True\<close>
definition Pop_Envir_Var :: \<open>'name \<Rightarrow> bool\<close> where \<open>Pop_Envir_Var Name \<longleftrightarrow> True\<close>
definition Get_Envir_Var :: \<open>'name \<Rightarrow> 'a::{} \<Rightarrow> bool\<close>
where \<open>Get_Envir_Var Name Return \<longleftrightarrow> True\<close>
definition Get_Envir_Var' :: \<open>'name \<Rightarrow> 'a::{} \<Rightarrow> 'a \<Rightarrow> bool\<close>
where \<open>Get_Envir_Var' Name Default Return \<longleftrightarrow> True\<close>
subsubsection \<open>Implementation\<close>
ML_file \<open>library/envir_var.ML\<close>
lemma Push_Envir_Var_I: \<open>Push_Envir_Var N V\<close> unfolding Push_Envir_Var_def ..
lemma Pop_Envir_Var_I: \<open>Pop_Envir_Var N\<close> unfolding Pop_Envir_Var_def ..
lemma Get_Envir_Var_I : \<open>Get_Envir_Var N V\<close> for V :: \<open>'v::{}\<close> unfolding Get_Envir_Var_def ..
lemma Get_Envir_Var'_I: \<open>Get_Envir_Var' N D V\<close> for V :: \<open>'v::{}\<close> unfolding Get_Envir_Var'_def ..
\<phi>reasoner_ML Push_Envir_Var 1000 (\<open>Push_Envir_Var _ _\<close>) = \<open>fn (ctxt,sequent) => Seq.make (fn () =>
let val _ $ (_ $ N $ V) = Thm.major_prem_of sequent
val _ = if maxidx_of_term V <> ~1
then warning "PLPR Envir Var: The value to be assigned has schematic variables \
\which will not be retained!"
else ()
in SOME ((PLPR_Env.push (PLPR_Env.name_of N) (Thm.cterm_of ctxt V) ctxt,
@{thm Push_Envir_Var_I} RS sequent),
Seq.empty) end
)\<close>
\<phi>reasoner_ML Pop_Envir_Var 1000 (\<open>Pop_Envir_Var _\<close>) = \<open>fn (ctxt,sequent) => Seq.make (fn () =>
let val _ $ (_ $ N) = Thm.major_prem_of sequent
in SOME ((PLPR_Env.pop (PLPR_Env.name_of N) ctxt, @{thm Pop_Envir_Var_I} RS sequent),
Seq.empty) end
)\<close>
\<phi>reasoner_ML Get_Envir_Var 1000 (\<open>Get_Envir_Var _ _\<close>) = \<open>fn (ctxt,sequent) => Seq.make (fn () =>
let val _ $ (_ $ N $ _) = Thm.major_prem_of sequent
val idx = Thm.maxidx_of sequent + 1
in case PLPR_Env.get (PLPR_Env.name_of N) ctxt
of NONE => Phi_Reasoner.error
("No enviromental variable " ^ PLPR_Env.name_of N ^ " is set")
| SOME V' =>
let val V = Thm.incr_indexes_cterm idx V'
in SOME ((ctxt, ( @{thm Get_Envir_Var_I}
|> Thm.incr_indexes idx
|> Thm.instantiate (TVars.make [((("'v",idx),[]), Thm.ctyp_of_cterm V)],
Vars.make [((("V", idx),Thm.typ_of_cterm V), V)])
) RS sequent),
Seq.empty)
end
end
)\<close>
\<phi>reasoner_ML Get_Envir_Var' 1000 (\<open>Get_Envir_Var' _ _ _\<close>) = \<open>fn (ctxt,sequent) => Seq.make (fn () =>
let val _ $ (_ $ N $ D $ _) = Thm.major_prem_of sequent
val idx = Thm.maxidx_of sequent + 1
val V = (case PLPR_Env.get (PLPR_Env.name_of N) ctxt
of SOME V => V | NONE => Thm.cterm_of ctxt D)
|> Thm.incr_indexes_cterm idx
in SOME ((ctxt, ( @{thm Get_Envir_Var'_I}
|> Thm.incr_indexes idx
|> Thm.instantiate (TVars.make [((("'v",idx),[]), Thm.ctyp_of_cterm V)],
Vars.make [((("V", idx),Thm.typ_of_cterm V), V)])
) RS sequent),
Seq.empty)
end
)\<close>
subsection \<open>Recursion Guard\<close>
definition \<r>Recursion_Guard :: \<open>'a::{} \<Rightarrow> prop \<Rightarrow> prop\<close> ("\<r>RECURSION'_GUARD'(_')/ _" [2,2] 2)
where [iff]: \<open>(\<r>RECURSION_GUARD(X) (PROP P)) \<equiv> PROP P\<close>
text \<open>\<^prop>\<open>\<r>RECURSION_GUARD(X) (PROP P)\<close> annotates the reasoning of \<^prop>\<open>P\<close> is about goal \<^term>\<open>X\<close>.
It remembers \<^term>\<open>X\<close> and once in the following reasoning the same goal \<^term>\<open>X\<close> occurs again,
it aborts the search branch because an infinite recursion happens.\<close>
definition \<r>Recursion_Guard_embed :: \<open>'a::{} \<Rightarrow> bool \<Rightarrow> bool\<close>
where \<open>\<r>Recursion_Guard_embed _ P \<equiv> P\<close>
lemma [iso_atomize_rules, symmetric, iso_rulify_rules]:
\<open>\<r>Recursion_Guard X (Trueprop P) \<equiv> Trueprop (\<r>Recursion_Guard_embed X P)\<close>
unfolding \<r>Recursion_Guard_embed_def \<r>Recursion_Guard_def .
subsubsection \<open>Implementation\<close>
definition \<r>Recursion_Residue :: \<open>'a::{} \<Rightarrow> bool\<close>
where \<open>\<r>Recursion_Residue _ \<equiv> True\<close>
lemma Do_\<r>Recursion_Guard:
\<open> PROP P
\<Longrightarrow> \<r>Recursion_Residue X
\<Longrightarrow> \<r>RECURSION_GUARD(X) (PROP P) \<close>
unfolding \<r>Recursion_Guard_def .
lemma [\<phi>reason 1000]:
\<open>\<r>Recursion_Residue X\<close>
unfolding \<r>Recursion_Residue_def ..
ML_file \<open>library/recursion_guard.ML\<close>
\<phi>reasoner_ML \<r>Recursion_Guard 1000 (\<open>\<r>RECURSION_GUARD(?X) (PROP ?P)\<close>) = \<open>PLPR_Recursion_Guard.reason\<close>
hide_fact Do_\<r>Recursion_Guard
(*
subsection \<open>Obtain\<close> \<comment> \<open>A restricted version of generalized elimination for existential only\<close>
\<comment> \<open>Maybe Useless, considering to discard!\<close>
definition Obtain :: \<open>'a \<Rightarrow> ('a \<Rightarrow> bool) \<Rightarrow> bool\<close> where \<open>Obtain x P \<longleftrightarrow> P x\<close>
definition \<open>DO_OBTAIN \<equiv> Trueprop True\<close>
lemma DO_OBTAIN_I: \<open>PROP DO_OBTAIN\<close> unfolding DO_OBTAIN_def ..
lemma Obtain_Framework:
\<open>PROP Sequent \<Longrightarrow> PROP GE \<Longrightarrow> PROP DO_OBTAIN \<Longrightarrow> PROP Sequent &&& PROP GE\<close>
using conjunctionI .
lemma Obtain_I:
\<open>P x \<Longrightarrow> Obtain x P\<close>
unfolding Obtain_def .
\<phi>reasoner_ML Obtain 1200 (\<open>Obtain ?x ?P\<close>) = \<open>
fn (ctxt, sequent) =>
let
val obtain_goal = Thm.major_prem_of sequent
fun obtain_goal_vars L (Const (\<^const_name>\<open>Obtain\<close>, _) $ V $ P) = obtain_goal_vars (V::L) P
| obtain_goal_vars L (\<^const>\<open>Trueprop\<close> $ P) = obtain_goal_vars L P
| obtain_goal_vars L (Abs (_,_,P)) = obtain_goal_vars L P
| obtain_goal_vars L _ = L
fun to_ex_goal (Const (\<^const_name>\<open>Obtain\<close>, Type ("fun", [_, ty])) $ _ $ P)
= Const (\<^const_name>\<open>Ex\<close>, ty) $ to_ex_goal P
| to_ex_goal (\<^const>\<open>Trueprop\<close> $ P) = \<^const>\<open>Trueprop\<close> $ to_ex_goal P
| to_ex_goal (Abs (N,Ty,P)) = Abs (N,Ty, to_ex_goal P)
| to_ex_goal P = P
val goal = Thm.trivial (Thm.cterm_of ctxt (to_ex_goal obtain_goal))
val L = obtain_goal_vars [] obtain_goal
in
if forall is_Var L
then Seq.single (ctxt, goal RS (sequent COMP @{thm Obtain_Framework}))
else error("asdwh78")
end
\<close>
\<phi>reasoner_ML DO_OBTAIN 1200 (\<open>PROP DO_OBTAIN\<close>) = \<open>
fn (ctxt, sequent') => Seq.make (fn _ =>
let
val sequent'' = @{thm DO_OBTAIN_I} RS sequent'
val (sequent, GE') = Conjunction.elim sequent''
val obtain_goal = Thm.major_prem_of sequent
fun obtain_goal_vars L (Const (\<^const_name>\<open>Obtain\<close>, _) $ V $ P) = obtain_goal_vars (V::L) P
| obtain_goal_vars L (\<^const>\<open>Trueprop\<close> $ P) = obtain_goal_vars L P
| obtain_goal_vars L (Abs (_,_,P)) = obtain_goal_vars L P
| obtain_goal_vars L _ = L
fun get_goal (Const (\<^const_name>\<open>Obtain\<close>, _) $ _ $ P) = get_goal P
| get_goal (Abs (_,_,P)) = get_goal P
| get_goal (\<^const>\<open>Trueprop\<close> $ P) = get_goal P
| get_goal P = P
val L = obtain_goal_vars [] obtain_goal
val N = length L
val GE = Tactical.REPEAT_DETERM_N N
(Thm.biresolution NONE false [(true, @{thm exE})] 1) GE' |> Seq.hd
val (var_names, ctxt') = Proof_Context.add_fixes
(map (fn tm => (Binding.name (Term.term_name tm), SOME (fastype_of tm), NoSyn)) L) ctxt
val vars = map Free (var_names ~~ map Term.fastype_of L)
val vars_c = map (Thm.cterm_of ctxt') vars
val assm =
Term.subst_bounds (vars, get_goal obtain_goal)
|> Thm.cterm_of ctxt'
fun export_assm thm = thm
|> Thm.implies_intr assm
|> Drule.forall_intr_list vars_c
|> (fn th => th COMP GE)
val ([assm_thm], ctxt'') = Assumption.add_assms (fn _ => fn _ => (export_assm, I)) [assm] ctxt'
val sequent1 = Tactical.REPEAT_DETERM_N N
(Thm.biresolution NONE false [(true, @{thm Obtain_I})] 1) sequent |> Seq.hd
in SOME ((ctxt'', assm_thm RS sequent1), Seq.empty)
end
)\<close>
*)
(* subsection \<open>Generalized Elimination\<close>
definition "\<phi>Generalized_Elimination x = x"
definition \<open>DO_GENERALIZED_ELIMINATION \<equiv> Trueprop True\<close>
lemma DO_GENERALIZED_ELIMINATION_I:
\<open>PROP DO_GENERALIZED_ELIMINATION\<close>
unfolding DO_GENERALIZED_ELIMINATION_def ..
lemma Generalized_Elimination_Framework:
\<open> TERM P
\<Longrightarrow> TERM P \<comment> \<open>Unifies prop in Sequent and that in GE here\<close>
\<Longrightarrow> PROP Sequent
\<Longrightarrow> PROP GE
\<Longrightarrow> PROP DO_GENERALIZED_ELIMINATION
\<Longrightarrow> PROP GE &&& PROP Sequent\<close>
using Pure.conjunctionI .
ML_file \<open>library/elimination.ML\<close>
*)
(*
subsection \<open>Misc\<close>
subsubsection \<open>Collect Schematic \& Free \& other terms\<close> \<comment> \<open>Not Stable!\<close>
paragraph \<open>Schematic\<close>
definition \<open>Collect_Schematic (typ::'a itself) sch Term \<equiv> Trueprop True\<close>
text \<open>It collects all schematic variables matching type \<^typ>\<open>'a\<close> in \<^term>\<open>Term\<close>.
The return is in form \<^term>\<open>Collect_Schematic TYPE('a) (v1, v2, v3) Term\<close>.
The matching of \<^typ>\<open>'a\<close> is in the usual way, where only schematic variables but no free variables
are considered as variables that can match something.\<close>
lemma Collect_Schematic_I: \<open>PROP Collect_Schematic TY sch Term\<close>
unfolding Collect_Schematic_def ..
\<phi>reasoner_ML Collect_Schematic 1200 (\<open>PROP Collect_Schematic TYPE(?'a) ?sch ?Term\<close>) = \<open>
fn (ctxt, sequent) =>
let
val (Const (\<^const_name>\<open>Collect_Schematic\<close>, _)
$ Const (\<^const_name>\<open>Pure.type\<close>, Type(\<^type_name>\<open>itself\<close>, [T])))
$ _
$ Term
= Thm.major_prem_of sequent
val vs = fold_aterms (fn (v as Var (_, T')) => (fn L =>
if Type.could_match (T,T') then insert (op =) v L else L)
| _ => I) Term []
val vs' = Thm.cterm_of ctxt (HOLogic.mk_tuple vs)
val idx = Thm.maxidx_of_cterm vs' + 1
val rule = Drule.infer_instantiate ctxt [(("sch",idx),vs')]
(Thm.incr_indexes idx @{thm Collect_Schematic_I})
in Seq.single (ctxt, rule RS sequent)
end
\<close>
*)
(*Others, to be done!*)
end
|
# BigBed Section Header
# =====================
# Supplemental Table 13.
struct SectionHeader
chromid::UInt32
chromstart::UInt32
chromend::UInt32
itemstep::UInt32
itemspan::UInt32
datatype::UInt8
reserved::UInt8
itemcount::UInt16
end
const SECTION_HEADER_SIZE = 24
function isbedgraph(datatype::UInt8)
return datatype == 0x01
end
function isvarstep(datatype::UInt8)
return datatype == 0x02
end
function isfixedstep(datatype::UInt8)
return datatype == 0x03
end
function encode_datatype(datatype::Symbol)
if datatype == :bedgraph
return 0x01
elseif datatype == :varstep
return 0x02
elseif datatype == :fixedstep
return 0x03
else
throw(ArgumentError("invalid data type: $(datatype)"))
end
end
function Base.read(io::IO, ::Type{SectionHeader})
return SectionHeader(
read(io, UInt32), read(io, UInt32), read(io, UInt32),
read(io, UInt32), read(io, UInt32),
read(io, UInt8), read(io, UInt8), read(io, UInt16))
end
function Base.write(stream::IO, header::SectionHeader)
return write(
stream,
header.chromid, header.chromstart, header.chromend,
header.itemstep, header.itemspan, header.datatype,
header.reserved, header.itemcount)
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.