Datasets:
AI4M
/

text
stringlengths
0
3.34M
c "comprime2" extrae los valores de las atmosferas en los nodos c y los devuelve en atmosr c c i variable i variable c - tau1 - tau2 profundidad optica a 5000 /AA c 1 t1 9 t2 temperatura en ambos modelos (k) c 2 p1 10 p2 presion electronica (dinas/cm**2) c 3 mic1 11 mic2 microturbulencia (cm/s) c 4 h1 12 h2 campo magnetico (G) c 5 v1 13 v2 velocidad eje z (cm/s) c 6 g1 14 g2 gamma (radianes) c 7 f1 15 f2 fi (radianes) c 8 mac1 16 mac2 macroturbulencia (en Km/s) c - ff1 17 ff2 factor de llenado (ff1=1-ff2) c 18 %stray c trabajo siempre con el ff del segundo modelo c c Basilio 22-3-93 c Basilio 1-4-93 modifico la variable ff2 a (1-ff2)/ff2 c Basilio y Jose Carlos 9-1-95 pertu. aditiva para fi (no mult. a tan(fi)) c Basilio y Jose Carlos 6-2-95 pertu. aditiva para gamma (no mult. a tan(gamma)) c c _____________________________________________________________ subroutine comprime2(ntau,m,atmos,atmosr) include 'PARAMETER' implicit real*4 (a-h,o-z) real*4 atmos(*),atmosr(*) integer m(*) common/offset/voffset !para respuestas common/ivez/ivez if(ivez.eq.0)then ivez=1 c el calculo de vof esta duplicado en fperfil2 OJO!!!!!!!!!!!! vmin=7.e5 if(m(13).ne.0.or.m(5).ne.0)vmin=1.e20 do i=1,ntau if(atmos(2+13*ntau+i).lt.vmin.and.m(13).ne.0) & vmin=atmos(2+13*ntau+i) if(atmos(5*ntau+i).lt.vmin.and.m(5).ne.0)vmin=atmos(5*ntau+i) voffset=vmin-7.e5 !cm/s end do end if kred=0 !indice reducido kamp=ntau !indice ampliado (los ntau puntos de tau1) do i=1,18 !do en grupos de variables (1=t,2=p,...etc) ntau2=ntau if(i.eq.8.or.i.eq.16.or.i.eq.17.or.i.eq.18)ntau2=1 !si mac1,mac2 o ff22,% if(m(i).eq.1)then !si pert. constante promedio la atm. kred=kred+1 sum=0. do ii=1,ntau2 sum=sum+atmos(kamp+ii) end do atmosr(kred)=sum/float(ntau2) c if(i .eq.2 .or. i .eq. 10)then c sum=0. c do ii=1,ntau2 c sum=sum+alog(atmos(kamp+ii)) c end do c atmosr(kred)=sum/float(ntau2) c end if if(i.eq.17)atmosr(kred)=(1.d0-atmosr(kred))/atmosr(kred) !ff if(i.eq.18)atmosr(kred)=atmosr(kred)/(100.-atmosr(kred)) !% c if(i.eq.6.or.i.eq.14)atmosr(kred)=tan(atmosr(kred)/2.0) c if(i.eq.7.or.i.eq.15)atmosr(kred)=tan(atmosr(kred)/4.0) c print*,'comprime2 kred=',kred,'a=',atmos(kamp+ntau2),'ar=',atmosr(kred) if(i.eq.5.or.i.eq.13)atmosr(kred)=atmosr(kred)-voffset else if(m(i).gt.1)then mm=(ntau-1)/(m(i)-1) !espaciado entre nodos c if(i.eq.6.or.i.eq.14)then !g1 o g2 c do j=1,m(i) c kred=kred+1 c jj=kamp+(j-1)*mm+1 c atmosr(kred)=tan(atmos(jj)/2.0) c end do c else if(i.eq.7.or.i.eq.15)then !f1 o f2 c do j=1,m(i) c kred=kred+1 c jj=kamp+(j-1)*mm+1 c atmosr(kred)=tan(atmos(jj)/4.0) c end do c else do j=1,m(i) kred=kred+1 jj=kamp+(j-1)*mm+1 atmosr(kred)=atmos(jj) if(i.eq.5.or.i.eq.13)atmosr(kred)=atmosr(kred)-voffset c if(i.eq.2.or.i.eq.10)atmosr(kred)=alog(atmos(kred)) end do c end if end if kamp=kamp+ntau2 if(i.eq.8)kamp=kamp+ntau+1 !los ntau puntos de tau2 y el de ff1 end do return end c _____________________________________________________________
(** * Pre-lB-systems (unital) By Vladimir Voevodsky, split off the file prelBsystems.v on March 3, 2015 *) Require Import UniMath.Foundations.All. Require Import TypeTheory.Csystems.hSet_ltowers. Require Export TypeTheory.Bsystems.prelB_non_unital. Require Export TypeTheory.Bsystems.dlt. (** *** The structure formed by operations dlt *) Definition dlt_layer_0 ( BB : lBsystem_carrier ) := ∑ dlt : dlt_ops_type BB, dlt_ax0_type dlt. Definition dlt_layer_0_to_dlt_ops_type ( BB : lBsystem_carrier ) : dlt_layer_0 BB -> dlt_ops_type BB := pr1 . Coercion dlt_layer_0_to_dlt_ops_type : dlt_layer_0 >-> dlt_ops_type . (** *** Complete definition of a (unital) pre-lB-system *) Definition prelBsystem := ∑ BB : prelBsystem_non_unital, dlt_layer_0 BB. (** This definition adds exactly what Definition 2.2 adds in arXiv:1410.5389v1 to a non-unital pre-B-system. *) Definition prelBsystem_pr1 : prelBsystem -> prelBsystem_non_unital := pr1 . Coercion prelBsystem_pr1 : prelBsystem >-> prelBsystem_non_unital . (** *** Access functions for the operation dlt and its zeroth axiom *) Definition dlt_op { BB : prelBsystem } : dlt_ops_type BB := pr2 BB . Definition dlt_ax0 { BB : prelBsystem } : dlt_ax0_type ( @dlt_op BB ) := pr2 ( pr2 BB ) . (* End of the file prelB.v *)
function d = distToEpipolarLine(F, p1, p2) %% Given a fundamental matrix F, compute the matched point pair to their % corresponding epipolarlines' distances, then sum the two distances. % Extracted from Matlab built-in function estimateFundamentalMatrix.m % Very important: F must map a point p1 to an epipolar line in p2 image space, not the other way around. % More details: http://stackoverflow.com/questions/26582960/sampson-error-for-five-point-essential-matrix-estimation %% License % ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY % Copyright (c) 2018 Bingyao Huang % All rights reserved. % Redistribution and use in source and binary forms, with or without % modification, are permitted provided that the following conditions are met: % The above copyright notice and this permission notice shall be included in all % copies or substantial portions of the Software. % If you publish results obtained using this software, please cite our paper. % THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR % IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, % FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE % AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER % LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, % OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE % SOFTWARE. % convert to homogenious p1 = [p1, ones(size(p1,1),1)]'; p2 = [p2, ones(size(p2,1),1)]'; % euclidean distance d = sum(p2.*(F*p1), 1) .^ 2; % sampson distance epl1 = F * p1; epl2 = F' * p2; d = d ./ (epl1(1,:).^2 + epl1(2,:).^2 + epl2(1,:).^2 + epl2(2,:).^2); end
[STATEMENT] lemma AE_notin_null_part: "S \<in> completion M \<Longrightarrow> (AE x in M. x \<notin> null_part M S)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. S \<in> sets (completion M) \<Longrightarrow> AE x in M. x \<notin> null_part M S [PROOF STEP] by (auto dest!: null_part_null_sets AE_not_in simp: AE_completion_iff)
{-# OPTIONS --cubical --no-import-sorts --safe #-} module Cubical.DStructures.Structures.Category where open import Cubical.Foundations.Prelude open import Cubical.DStructures.Base open import Cubical.DStructures.Meta.Properties open import Cubical.Categories.Category renaming (isUnivalent to isUnivalentCat) private variable ℓ ℓ' ℓ'' ℓ₁ ℓ₁' ℓ₁'' ℓ₂ ℓA ℓ≅A ℓB ℓ≅B ℓ≅ᴰ : Level -- every univalent 1-precategory gives a URGStr Cat→𝒮 : (𝒞 : Precategory ℓ ℓ') → (uni : isUnivalentCat 𝒞) → URGStr (𝒞 .ob) ℓ' Cat→𝒮 𝒞 uni = urgstr (CatIso {𝒞 = 𝒞}) idCatIso λ x y → isUnivalentCat.univ uni x y
lemma lim_at_infinity_0: fixes l :: "'a::{real_normed_field,field}" shows "(f \<longlongrightarrow> l) at_infinity \<longleftrightarrow> ((f \<circ> inverse) \<longlongrightarrow> l) (at (0::'a))"
import tactic.linarith import algebra.order.with_zero import topology.locally_finite import data.fin.interval import data.fin.succ_pred import to_mathlib.set_theory.cardinal.basic /-! # Indexing types This is a stupid file introducing a type class for types that will index locally finite covers of (paracompact) manifolds without having to discuss according to compactness. The only intended instances are `ℕ` and `fin (n+1)`. It also includes a lemma about locally finite cover that doesn't require an indexing index type but will be used with one. -/ open set class indexing (α : Type*) [linear_order α] := (from_nat : ℕ → α) (to_nat : α → ℕ) (mono_from : monotone from_nat) (from_to : ∀ a, from_nat (to_nat a) = a) @[priority 100] instance indexing.has_coe (α : Type*) [linear_order α] [indexing α] : has_coe ℕ α := ⟨indexing.from_nat⟩ @[simp] lemma indexing.coe_to {α : Type*} [linear_order α] [indexing α] (i : α) : ((indexing.to_nat i) : α) = i := indexing.from_to i lemma indexing.coe_mono {α : Type*} [linear_order α] [indexing α] {i j : ℕ} (h : i ≤ j) : (i : α) ≤ j := indexing.mono_from h instance indexing.nonempty (α : Type*) [linear_order α] [indexing α] : nonempty α := ⟨indexing.from_nat 0⟩ instance : indexing ℕ := { from_nat := id, to_nat := id, mono_from := monotone_id, from_to := λ n, rfl } def fin.indexing (n : ℕ) : indexing (fin $ n + 1) := { from_nat := λ k, if h : k < n + 1 then ⟨k, h⟩ else fin.last n, to_nat := coe, mono_from := λ k l hkl, begin dsimp [fin.of_nat], split_ifs ; try { simp [fin.le_last] }; linarith, end, from_to := begin rintros ⟨k, hk⟩, erw dif_pos hk, refl end } local attribute [instance] fin.indexing open_locale topology /-- Our model indexing type depending on `n : ℕ` is `ℕ` if `n = 0` and `fin n` otherwise-/ def index_type (n : ℕ) : Type := nat.cases_on n ℕ (λ k, fin $ k + 1) @[simp] lemma index_type_zero : index_type 0 = ℕ := rfl @[simp] lemma index_type_succ (n : ℕ) : index_type (n + 1) = fin (n + 1) := rfl @[simp] lemma index_type_of_zero_lt {n : ℕ} (h : 0 < n) : index_type n = fin n := by rw [← nat.succ_pred_eq_of_pos h, index_type_succ] instance (n : ℕ) : linear_order (index_type n) := nat.cases_on n nat.linear_order (λ _, fin.linear_order) instance (n : ℕ) : indexing (index_type n) := nat.cases_on n nat.indexing (λ _, fin.indexing _) instance (n : ℕ) : locally_finite_order (index_type n) := nat.cases_on n nat.locally_finite_order (λ _, fin.locally_finite_order _) instance (n : ℕ) : order_bot (index_type n) := nat.cases_on n nat.order_bot (λ k, show order_bot $ fin (k + 1), by apply_instance) instance (N : ℕ) : has_zero (index_type N) := ⟨indexing.from_nat 0⟩ lemma set.countable_iff_exists_nonempty_index_type_equiv {α : Type*} {s : set α} (hne : s.nonempty) : s.countable ↔ ∃ n, nonempty (index_type n ≃ s) := begin -- Huge golfing opportunity. cases @set.finite_or_infinite _ s, { refine ⟨λ hh, ⟨h.to_finset.card, _⟩, λ _, h.countable⟩, have : 0 < h.to_finset.card, { rw finset.card_pos, exact (set.finite.to_finset_nonempty h).mpr hne}, simp only [this, index_type_of_zero_lt], have e₁ := fintype.equiv_fin h.to_finset, rw [fintype.card_coe, h.coe_sort_to_finset] at e₁, exact ⟨e₁.symm⟩, }, { refine ⟨λ hh, ⟨0, _⟩, _⟩, { simp only [index_type_zero], obtain ⟨_i⟩ := set.countable_infinite_iff_nonempty_denumerable.mp ⟨hh, h⟩, haveI := _i, exact ⟨(denumerable.eqv s).symm⟩, }, { rintros ⟨n, ⟨fn⟩⟩, have hn : n = 0, { by_contra hn, replace hn : 0 < n := zero_lt_iff.mpr hn, simp only [hn, index_type_of_zero_lt] at fn, exact set.not_infinite.mpr ⟨fintype.of_equiv (fin n) fn⟩ h, }, simp only [hn, index_type_zero] at fn, exact set.countable_iff_exists_injective.mpr ⟨fn.symm, fn.symm.injective⟩, }, }, end open filter lemma index_type.lt_or_eq_succ (N n : ℕ) : (n : index_type N) < (n+1 : ℕ) ∨ (n : index_type N) = (n+1 : ℕ) := begin rw or_comm, exact eq_or_lt_of_le (indexing.mono_from n.le_succ) end lemma index_type.le_or_lt_succ {N n : ℕ} (hn : (n : index_type N) < (n+1 : ℕ)) (j : index_type N) : j ≤ n ↔ j < (n + 1 : ℕ) := begin cases N, { exact nat.lt_succ_iff.symm, }, refine ⟨λ h, lt_of_le_of_lt h hn, λ h, _⟩, clear hn, obtain ⟨j, hj⟩ := j, change _ ≤ indexing.from_nat n, change _ < indexing.from_nat (n + 1) at h, unfold indexing.from_nat at ⊢ h, rcases lt_trichotomy N n with hNn | rfl | hNn, { replace hNn : ¬ (n < N + 1) := by simpa using nat.succ_le_iff.mpr hNn, simp only [hNn, not_false_iff, dif_neg], exact fin.le_last _ }, { simpa using nat.lt_succ_iff.mp hj }, { simp only [hNn, add_lt_add_iff_right, dif_pos, fin.mk_lt_mk] at h, simpa only [nat.lt.step hNn, dif_pos, fin.mk_le_mk] using nat.lt_succ_iff.mp h } end lemma index_type.not_lt_zero {N : ℕ} (j : index_type N) : ¬ (j < 0) := nat.cases_on N nat.not_lt_zero (λ n, fin.not_lt_zero) j
[STATEMENT] lemma zero_minus_vec[simp]: "(v :: 'a :: group_add vec) \<in> carrier_vec n \<Longrightarrow> 0\<^sub>v n - v = - v" [PROOF STATE] proof (prove) goal (1 subgoal): 1. v \<in> carrier_vec n \<Longrightarrow> 0\<^sub>v n - v = - v [PROOF STEP] by (intro eq_vecI, auto)
module Occupation public export record Occupation where constructor CreateOccupation type : String id : String name : String
module Data.Fin.Extra where open import Data.Nat renaming (suc to S; zero to Z; _+_ to _ℕ+_; _*_ to _ℕ*_) open import Data.Fin open import Function open import Relation.Nullary.Negation open import Relation.Binary.PropositionalEquality inject-1 : ∀ {n} → (i : Fin (S n)) → toℕ i ≢ n → Fin n inject-1 {Z} zero p = contradiction refl p inject-1 {Z} (suc i) p = i inject-1 {S n} zero p = zero inject-1 {S n} (suc i) p = suc (inject-1 i (p ∘ cong S))
@testset "filter.jl" begin @testset "IdentityFilter" begin f = IdentityFilter() g = IdentityFilter() @test f === g end @testset "TopHatFilter" begin h(x) = 1 - 1 / 2 * cos(x) f = TopHatFilter(h) @test f.width == h end @testset "ConvolutionalFilter" begin h = x -> 1 G = x -> x^2 f = ConvolutionalFilter(h, G) @test f.width == h @test f.kernel == G end @testset "GaussianFilter" begin σ = 0.1 G = GaussianFilter(σ) @test G.kernel(0.0) ≈ 1 / √(2π * σ^2) end end
[STATEMENT] lemma dj_perm_set_forget: fixes pi::"'y prm" and x ::"'x set" assumes dj: "disjoint TYPE('x) TYPE('y)" shows "pi\<bullet>x=x" [PROOF STATE] proof (prove) goal (1 subgoal): 1. pi \<bullet> x = x [PROOF STEP] using dj [PROOF STATE] proof (prove) using this: disjoint TYPE('x) TYPE('y) goal (1 subgoal): 1. pi \<bullet> x = x [PROOF STEP] by (simp_all add: perm_set_def disjoint_def)
module Js.React.Element import Control.Monad.Syntax import Data.Foldable.Extras import Js import Js.Array import Js.Object %default total %access export public export data Element = MkElement Ptr Class Element where ptr (MkElement p) = p public export data Child = ChildElement Element | Text String private childrenArray : Foldable f => f Child -> JS_IO Ptr childrenArray = createWith appendChild >=> pure . ptr where appendChild : Child -> Array -> JS_IO () appendChild (ChildElement (MkElement p)) = append p appendChild (Text t) = append t private %inline jsElement : (ty : Type) -> {auto fty : FTy FFI_JS [] ty} -> ty jsElement ty = js "React.createElement(%0, %1, ...%2)" ty %inline tag : (Class p, Foldable f) => (name : String) -> (props : p) -> (children : f Child) -> JS_IO Element tag name props = childrenArray >=> jsElement (String -> Ptr -> Ptr -> JS_IO Ptr) name (ptr props) >=> pure . MkElement %inline simple : Member a => (display : a -> JS_IO Element) -> (arg : a) -> JS_IO Element simple display arg = assert_total (MkElement <$> js "React.createElement(%0, %1)" (JsFn (Ptr -> JS_IO Ptr) -> Ptr -> JS_IO Ptr) (MkJsFn $ get "v" . MkObject >=> (display >=> pure . ptr)) (ptr !(wrap "v" arg))) %inline class' : (Class c, Class p, Foldable f) => (cl : c) -> (props : p) -> (children : f Child) -> JS_IO Element class' cl props = childrenArray >=> jsElement (Ptr -> Ptr -> Ptr -> JS_IO Ptr) (ptr cl) (ptr props) >=> pure . MkElement %inline div : (Class p, Foldable f) => (props : p) -> (children : f Child) -> JS_IO Element div = tag "div" %inline button : (Class p, Foldable f) => (props : p) -> (children : f Child) -> JS_IO Element button = tag "button"
(* Property from Case-Analysis for Rippling and Inductive Proof, Moa Johansson, Lucas Dixon and Alan Bundy, ITP 2010. This Isabelle theory is produced using the TIP tool offered at the following website: https://github.com/tip-org/tools This file was originally provided as part of TIP benchmark at the following website: https://github.com/tip-org/benchmarks Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly to make it compatible with Isabelle2017.*) theory TIP_prop_48 imports "../../Test_Base" begin datatype 'a list = nil2 | cons2 "'a" "'a list" datatype Nat = Z | S "Nat" fun x :: "'a list => 'a list => 'a list" where "x (nil2) z = z" | "x (cons2 z2 xs) z = cons2 z2 (x xs z)" fun last :: "Nat list => Nat" where "last (nil2) = Z" | "last (cons2 z (nil2)) = z" | "last (cons2 z (cons2 x2 x3)) = last (cons2 x2 x3)" fun butlast :: "'a list => 'a list" where "butlast (nil2) = nil2" | "butlast (cons2 z (nil2)) = nil2" | "butlast (cons2 z (cons2 x2 x3)) = cons2 z (butlast (cons2 x2 x3))" theorem property0 :(*Manually fixed TIP's bug.*) "((case xs of nil2 => True | cons2 y z => False) ==> ((x (butlast xs) (cons2 (last xs) (nil2))) = xs))" oops end
module Esterel.Context.Properties where open import Esterel.Lang open import Esterel.Environment as Env using (Env ; Θ ; _←_ ; module SigMap ; module ShrMap ; module VarMap) open import Esterel.Context using (EvaluationContext ; EvaluationContext1 ; _⟦_⟧e ; _≐_⟦_⟧e ; Context ; Context1 ; _⟦_⟧c ; _≐_⟦_⟧c) open import Esterel.Variable.Signal as Signal using (Signal) open import Esterel.Variable.Shared as SharedVar using (SharedVar) open import Esterel.Variable.Sequential as SeqVar using (SeqVar) open import Data.List using (List ; _∷_ ; [] ; map ; _++_) open import Data.Product using (Σ-syntax ; Σ ; _,_ ; proj₁ ; proj₂ ; _×_ ; _,′_) open import Data.Sum using (_⊎_ ; inj₁ ; inj₂) open import Relation.Binary.PropositionalEquality using (_≡_ ; refl ; sym ; cong ; subst ; trans) open _≐_⟦_⟧e open _≐_⟦_⟧c open EvaluationContext1 open Context1 plug : ∀{p E q} → E ⟦ q ⟧e ≡ p → p ≐ E ⟦ q ⟧e plug {E = []} refl = dehole plug {E = epar₁ q ∷ E} refl = depar₁ (plug {E = E} refl) plug {E = epar₂ p₁ ∷ E} refl = depar₂ (plug {E = E} refl) plug {E = eseq q ∷ E} refl = deseq (plug {E = E} refl) plug {E = eloopˢ q ∷ E} refl = deloopˢ (plug {E = E} refl) plug {E = esuspend S ∷ E} refl = desuspend (plug {E = E} refl) plug {E = etrap ∷ E} refl = detrap (plug {E = E} refl) unplug : ∀{p E q} → p ≐ E ⟦ q ⟧e → E ⟦ q ⟧e ≡ p unplug dehole = refl unplug (depar₁ eq) rewrite unplug eq = refl unplug (depar₂ eq) rewrite unplug eq = refl unplug (deseq eq) rewrite unplug eq = refl unplug (deloopˢ eq) rewrite unplug eq = refl unplug (desuspend eq) rewrite unplug eq = refl unplug (detrap eq) rewrite unplug eq = refl plugc : ∀ {C q p} → C ⟦ q ⟧c ≡ p → p ≐ C ⟦ q ⟧c plugc {[]} refl = dchole plugc {ceval (epar₁ q) ∷ C} refl = dcpar₁ (plugc {C} refl) plugc {ceval (epar₂ p) ∷ C} refl = dcpar₂ (plugc {C} refl) plugc {ceval (eseq q) ∷ C} refl = dcseq₁ (plugc {C} refl) plugc {ceval (eloopˢ q) ∷ C} refl = dcloopˢ₁ (plugc {C} refl) plugc {ceval (esuspend S) ∷ C} refl = dcsuspend (plugc {C} refl) plugc {ceval etrap ∷ C} refl = dctrap (plugc {C} refl) plugc {csignl S ∷ C} refl = dcsignl (plugc {C} refl) plugc {cpresent₁ S q ∷ C} refl = dcpresent₁ (plugc {C} refl) plugc {cpresent₂ S p ∷ C} refl = dcpresent₂ (plugc {C} refl) plugc {cloop ∷ C} refl = dcloop (plugc {C} refl) plugc {cloopˢ₂ p ∷ C} refl = dcloopˢ₂ (plugc {C} refl) plugc {cseq₂ p ∷ C} refl = dcseq₂ (plugc {C} refl) plugc {cshared s e ∷ C} refl = dcshared (plugc {C} refl) plugc {cvar x e ∷ C} refl = dcvar (plugc {C} refl) plugc {cif₁ x q ∷ C} refl = dcif₁ (plugc {C} refl) plugc {cif₂ x p ∷ C} refl = dcif₂ (plugc {C} refl) plugc {cenv θ A ∷ C} refl = dcenv (plugc {C} refl) unplugc : ∀{p C q} → p ≐ C ⟦ q ⟧c → C ⟦ q ⟧c ≡ p unplugc dchole = refl unplugc (dcpar₁ eq) rewrite unplugc eq = refl unplugc (dcpar₂ eq) rewrite unplugc eq = refl unplugc (dcseq₁ eq) rewrite unplugc eq = refl unplugc (dcseq₂ eq) rewrite unplugc eq = refl unplugc (dcsuspend eq) rewrite unplugc eq = refl unplugc (dctrap eq) rewrite unplugc eq = refl unplugc (dcsignl eq) rewrite unplugc eq = refl unplugc (dcpresent₁ eq) rewrite unplugc eq = refl unplugc (dcpresent₂ eq) rewrite unplugc eq = refl unplugc (dcloop eq) rewrite unplugc eq = refl unplugc (dcloopˢ₁ eq) rewrite unplugc eq = refl unplugc (dcloopˢ₂ eq) rewrite unplugc eq = refl unplugc (dcshared eq) rewrite unplugc eq = refl unplugc (dcvar eq) rewrite unplugc eq = refl unplugc (dcif₁ eq) rewrite unplugc eq = refl unplugc (dcif₂ eq) rewrite unplugc eq = refl unplugc (dcenv eq) rewrite unplugc eq = refl plug-sym : ∀{E E' p q} → E ⟦ p ⟧e ≐ E' ⟦ q ⟧e → E' ⟦ q ⟧e ≐ E ⟦ p ⟧e plug-sym eq = plug (sym (unplug eq)) unplug-eq : ∀{p q r E} → p ≐ E ⟦ q ⟧e → p ≐ E ⟦ r ⟧e → q ≡ r unplug-eq dehole dehole = refl unplug-eq (depar₁ qeq) (depar₁ req) = unplug-eq qeq req unplug-eq (depar₂ qeq) (depar₂ req) = unplug-eq qeq req unplug-eq (deseq qeq) (deseq req) = unplug-eq qeq req unplug-eq (deloopˢ qeq) (deloopˢ req) = unplug-eq qeq req unplug-eq (desuspend qeq) (desuspend req) = unplug-eq qeq req unplug-eq (detrap qeq) (detrap req) = unplug-eq qeq req plug-eq : ∀{p q r E} → p ≐ E ⟦ r ⟧e → q ≐ E ⟦ r ⟧e → p ≡ q plug-eq peq qeq = trans (sym (unplug peq)) (unplug qeq) Erefl : ∀{E p} → E ⟦ p ⟧e ≐ E ⟦ p ⟧e Erefl = plug refl Crefl : ∀{C p} → C ⟦ p ⟧c ≐ C ⟦ p ⟧c Crefl = plugc refl ⟦⟧e-to-⟦⟧c : ∀ {E p q} -> p ≐ E ⟦ q ⟧e -> p ≐ (map ceval E) ⟦ q ⟧c ⟦⟧e-to-⟦⟧c dehole = dchole ⟦⟧e-to-⟦⟧c (depar₁ decomp) = dcpar₁ (⟦⟧e-to-⟦⟧c decomp) ⟦⟧e-to-⟦⟧c (depar₂ decomp) = dcpar₂ (⟦⟧e-to-⟦⟧c decomp) ⟦⟧e-to-⟦⟧c (deseq decomp) = dcseq₁ (⟦⟧e-to-⟦⟧c decomp) ⟦⟧e-to-⟦⟧c (deloopˢ decomp) = dcloopˢ₁ (⟦⟧e-to-⟦⟧c decomp) ⟦⟧e-to-⟦⟧c (desuspend decomp) = dcsuspend (⟦⟧e-to-⟦⟧c decomp) ⟦⟧e-to-⟦⟧c (detrap decomp) = dctrap (⟦⟧e-to-⟦⟧c decomp) ⟦⟧c-to-⟦⟧e : ∀ {E p q} → p ≐ (map ceval E) ⟦ q ⟧c → p ≐ E ⟦ q ⟧e ⟦⟧c-to-⟦⟧e {[]} dchole = dehole ⟦⟧c-to-⟦⟧e {_ ∷ _} (dcpar₁ p≐E⟦q⟧) = depar₁ (⟦⟧c-to-⟦⟧e p≐E⟦q⟧) ⟦⟧c-to-⟦⟧e {_ ∷ _} (dcpar₂ p≐E⟦q⟧) = depar₂ (⟦⟧c-to-⟦⟧e p≐E⟦q⟧) ⟦⟧c-to-⟦⟧e {_ ∷ _} (dcseq₁ p≐E⟦q⟧) = deseq (⟦⟧c-to-⟦⟧e p≐E⟦q⟧) ⟦⟧c-to-⟦⟧e {_ ∷ _} (dcloopˢ₁ p≐E⟦q⟧) = deloopˢ (⟦⟧c-to-⟦⟧e p≐E⟦q⟧) ⟦⟧c-to-⟦⟧e {_ ∷ _} (dcsuspend p≐E⟦q⟧) = desuspend (⟦⟧c-to-⟦⟧e p≐E⟦q⟧) ⟦⟧c-to-⟦⟧e {_ ∷ _} (dctrap p≐E⟦q⟧) = detrap (⟦⟧c-to-⟦⟧e p≐E⟦q⟧) C++ : ∀ {C1 C2 p1 p2 p3} -> p1 ≐ C1 ⟦ p2 ⟧c -> p2 ≐ C2 ⟦ p3 ⟧c -> p1 ≐ C1 ++ C2 ⟦ p3 ⟧c C++ dchole p2C = p2C C++ (dcpar₁ p1C) p2C = dcpar₁ (C++ p1C p2C) C++ (dcpar₂ p1C) p2C = dcpar₂ (C++ p1C p2C) C++ (dcseq₁ p1C) p2C = dcseq₁ (C++ p1C p2C) C++ (dcseq₂ p1C) p2C = dcseq₂ (C++ p1C p2C) C++ (dcsuspend p1C) p2C = dcsuspend (C++ p1C p2C) C++ (dctrap p1C) p2C = dctrap (C++ p1C p2C) C++ (dcsignl p1C) p2C = dcsignl (C++ p1C p2C) C++ (dcpresent₁ p1C) p2C = dcpresent₁ (C++ p1C p2C) C++ (dcpresent₂ p1C) p2C = dcpresent₂ (C++ p1C p2C) C++ (dcloop p1C) p2C = dcloop (C++ p1C p2C) C++ (dcloopˢ₁ p1C) p2C = dcloopˢ₁ (C++ p1C p2C) C++ (dcloopˢ₂ p1C) p2C = dcloopˢ₂ (C++ p1C p2C) C++ (dcshared p1C) p2C = dcshared (C++ p1C p2C) C++ (dcvar p1C) p2C = dcvar (C++ p1C p2C) C++ (dcif₁ p1C) p2C = dcif₁ (C++ p1C p2C) C++ (dcif₂ p1C) p2C = dcif₂ (C++ p1C p2C) C++ (dcenv p1C) p2C = dcenv (C++ p1C p2C) ++-is-nesting : ∀ C′ C q -> (C′ ++ C) ⟦ q ⟧c ≐ C′ ⟦ C ⟦ q ⟧c ⟧c ++-is-nesting [] C q = dchole ++-is-nesting (ceval (epar₁ q) ∷ C′) C q₁ = dcpar₁ (++-is-nesting C′ C q₁) ++-is-nesting (ceval (epar₂ p) ∷ C′) C q = dcpar₂ (++-is-nesting C′ C q) ++-is-nesting (ceval (eseq q) ∷ C′) C q₁ = dcseq₁ (++-is-nesting C′ C q₁) ++-is-nesting (ceval (eloopˢ q) ∷ C′) C q₁ = dcloopˢ₁ (++-is-nesting C′ C q₁) ++-is-nesting (ceval (esuspend S) ∷ C′) C q = dcsuspend (++-is-nesting C′ C q) ++-is-nesting (ceval etrap ∷ C′) C q = dctrap (++-is-nesting C′ C q) ++-is-nesting (csignl S ∷ C′) C q = dcsignl (++-is-nesting C′ C q) ++-is-nesting (cpresent₁ S q ∷ C′) C q₁ = dcpresent₁ (++-is-nesting C′ C q₁) ++-is-nesting (cpresent₂ S p ∷ C′) C q = dcpresent₂ (++-is-nesting C′ C q) ++-is-nesting (cloop ∷ C′) C q = dcloop (++-is-nesting C′ C q) ++-is-nesting (cloopˢ₂ p ∷ C′) C q = dcloopˢ₂ (++-is-nesting C′ C q) ++-is-nesting (cseq₂ p ∷ C′) C q = dcseq₂ (++-is-nesting C′ C q) ++-is-nesting (cshared s e ∷ C′) C q = dcshared (++-is-nesting C′ C q) ++-is-nesting (cvar x e ∷ C′) C q = dcvar (++-is-nesting C′ C q) ++-is-nesting (cif₁ x q ∷ C′) C q₁ = dcif₁ (++-is-nesting C′ C q₁) ++-is-nesting (cif₂ x p ∷ C′) C q = dcif₂ (++-is-nesting C′ C q) ++-is-nesting (cenv θ A ∷ C′) C q = dcenv (++-is-nesting C′ C q)
(* Author: Xingyuan Zhang, Chunhan Wu, Christian Urban *) theory Folds imports "Regular-Sets.Regular_Exp" begin section \<open>``Summation'' for regular expressions\<close> text \<open> To obtain equational system out of finite set of equivalence classes, a fold operation on finite sets \<open>folds\<close> is defined. The use of \<open>SOME\<close> makes \<open>folds\<close> more robust than the \<open>fold\<close> in the Isabelle library. The expression \<open>folds f\<close> makes sense when \<open>f\<close> is not \<open>associative\<close> and \<open>commutitive\<close>, while \<open>fold f\<close> does not. \<close> definition folds :: "('a \<Rightarrow> 'b \<Rightarrow> 'b) \<Rightarrow> 'b \<Rightarrow> 'a set \<Rightarrow> 'b" where "folds f z S \<equiv> SOME x. fold_graph f z S x" text \<open>Plus-combination for a set of regular expressions\<close> abbreviation Setalt :: "'a rexp set \<Rightarrow> 'a rexp" ("\<Uplus>_" [1000] 999) where "\<Uplus>A \<equiv> folds Plus Zero A" text \<open> For finite sets, @{term Setalt} is preserved under @{term lang}. \<close> lemma folds_plus_simp [simp]: fixes rs::"('a rexp) set" assumes a: "finite rs" shows "lang (\<Uplus>rs) = \<Union> (lang ` rs)" unfolding folds_def apply(rule set_eqI) apply(rule someI2_ex) apply(rule_tac finite_imp_fold_graph[OF a]) apply(erule fold_graph.induct) apply(auto) done end
function gf=gfobs_L1L5(obsr,obsb,lam) pi=sdobs(obsr,obsb,1)*lam(1); pj=sdobs(obsr,obsb,3)*lam(3); if pi==0||pj==0 gf=0; else gf=pi-pj; end return
Formal statement is: lemma pderiv_mult: "pderiv (p * q) = p * pderiv q + q * pderiv p" Informal statement is: The derivative of the product of two polynomials is the sum of the product of the first polynomial with the derivative of the second polynomial and the product of the second polynomial with the derivative of the first polynomial.
||| Shaped data. module Data.Schema.Data import Data.Schema.Restricted import Data.Schema %default total public export data Data : Schema n b -> Type where -- [ Shape ] -- -- The core shape of data is that of a Rose Tree: Leaves are concrete values; and branches are nodes. -- We differ by loosing the restrictions on node shapes. -- Leaf : (name : n) -> (value : type) -> (prf : Restriction type restriction value) -> Data (Simple (MkAtom name type restriction validType)) Branch : (name : n) -> (value : Data complex) -> Data (Complex name complex) Empty : Data Empty -- [ Sequencing ] -- -- Eat and Empty are artefacts from the total schema specification. -- It would be better if there is a single `Seq` constructor but alas no. SeqEat : (this : Data sthis) -> (that : Inf (Data sthat)) -> Data (SeqEat sthis sthat) SeqEmpty : (this : Data sthis) -> (that : Data sthat) -> Data (SeqEmpty sthis sthat) -- [ Choice ] -- -- This and that, may lead to noisy data when `Alt` is nested. -- -- [ Q ] How to address such noisey shaped data? This : (this : Data sthis) -> Data (Alt sthis sthat) That : (that : Data sthat) -> Data (Alt sthis sthat) -- [ EOF ]
import Lean import SciLean.Core namespace SciLean -- abbrev ℝ := Float -- instance : DecidableEq ℝ := λ x y => if x==y then .isTrue sorry else .isFalse sorry -- structure PhysicalScale where -- Change to rational numbers -- Maybe change scale to positive rationals structure PhysicalUnit where -- meter mLog10Scale : Int := 0 mPower : Int := 0 -- second sLog10Scale : Int := 0 sPower : Int := 0 -- kilogram kgLog10Scale : Int := 0 kgPower : Int := 0 deriving DecidableEq def PhysicalUnit.scale (unit : PhysicalUnit) : Float := 2^(Float.ofInt (unit.mLog10Scale + unit.sLog10Scale + unit.kgLog10Scale)) instance : Mul PhysicalUnit := ⟨λ x y => { mLog10Scale := x.mLog10Scale + y.mLog10Scale, mPower := x.mPower + y.mPower, sLog10Scale := x.sLog10Scale + y.sLog10Scale, sPower := x.sPower + y.sPower, kgLog10Scale := x.kgLog10Scale + y.kgLog10Scale, kgPower := x.kgPower + y.kgPower }⟩ instance : Div PhysicalUnit := ⟨λ x y => { mLog10Scale := x.mLog10Scale - y.mLog10Scale, mPower := x.mPower - y.mPower, sLog10Scale := x.sLog10Scale - y.sLog10Scale, sPower := x.sPower - y.sPower, kgLog10Scale := x.kgLog10Scale - y.kgLog10Scale, kgPower := x.kgPower - y.kgPower }⟩ instance : HPow PhysicalUnit Int PhysicalUnit := ⟨λ x y => { mLog10Scale := x.mLog10Scale * y, mPower := x.mPower * y, sLog10Scale := x.sLog10Scale * y, sPower := x.sPower * y, kgLog10Scale := x.kgLog10Scale * y, kgPower := x.kgPower * y }⟩ instance : OfNat PhysicalUnit 1 := ⟨{}⟩ -- Maybe unit can be anything that is multiplicatie abelian group structure PhysicalQuantity (α : Type u) (unit : PhysicalUnit) where val : α instance {α units} [Add α] : Add (PhysicalQuantity α units) := ⟨λ x y => ⟨x.val + y.val⟩⟩ instance {α units} [OfNat α n] : OfNat (PhysicalQuantity α units) n := ⟨⟨OfNat.ofNat n⟩⟩ instance {α β γ units units'} [HMul α β γ] : HMul (PhysicalQuantity α units) (PhysicalQuantity β units') (PhysicalQuantity γ (units*units')) := ⟨λ x y => ⟨x.val * y.val⟩⟩ instance {α β γ units} [HMul α β γ] : HMul (PhysicalQuantity α units) β (PhysicalQuantity γ (units)) := ⟨λ x y => ⟨x.val * y⟩⟩ instance {α β γ units'} [HMul α β γ] : HMul α (PhysicalQuantity β units') (PhysicalQuantity γ (units')) := ⟨λ x y => ⟨x * y.val⟩⟩ instance {α β γ units units'} [HDiv α β γ] : HDiv (PhysicalQuantity α units) (PhysicalQuantity β units') (PhysicalQuantity γ (units/units')) := ⟨λ x y => ⟨x.val / y.val⟩⟩ instance {α β γ units} [HDiv α β γ] : HDiv (PhysicalQuantity α units) β (PhysicalQuantity γ (units)) := ⟨λ x y => ⟨x.val / y⟩⟩ instance {α β γ units'} [HDiv α β γ] : HDiv α (PhysicalQuantity β units') (PhysicalQuantity γ (1/units')) := ⟨λ x y => ⟨x / y.val⟩⟩ declare_syntax_cat siunit (behavior := both) syntax "unit" term : siunit def meter : PhysicalUnit := { mPower := 1} macro "m" : siunit => `(siunit| unit meter) def kilometer : PhysicalUnit := { mPower := 1, mLog10Scale := 3 } macro "km" : siunit => `(siunit| unit kilometer) def centimeter : PhysicalUnit := { mPower := 1, mLog10Scale := -2 } macro "cm" : siunit => `(siunit| unit centimeter) def millimeter : PhysicalUnit := { mPower := 1, mLog10Scale := -3 } macro "mm" : siunit => `(siunit| unit millimeter) def mile : PhysicalUnit := { mPower := 1, mLog10Scale := Float.log10 1609.34 |>.toUInt64.toNat} macro "mi" : siunit => `(siunit| unit mile) def yard : PhysicalUnit := { mPower := 1, mLog10Scale := Float.log10 0.9144 |>.toUInt64.toNat} macro "yd" : siunit => `(siunit| unit yard) def feet : PhysicalUnit := { mPower := 1, mLog10Scale := Float.log10 0.3048 |>.toUInt64.toNat} macro "ft" : siunit => `(siunit| unit feet) def inch : PhysicalUnit := { mPower := 1, mLog10Scale := Float.log10 0.0254 |>.toUInt64.toNat} macro "in" : siunit => `(siunit| unit inch) def hectare : PhysicalUnit := { mPower := 2, mLog10Scale := 4 } macro "ha" : siunit => `(siunit| unit hectare) def acre : PhysicalUnit := { mPower := 2, mLog10Scale := Float.log10 4046.86 |>.toUInt64.toNat} macro "ac" : siunit => `(siunit| unit acre) def second : PhysicalUnit := { sPower := 1 } macro "s" : siunit => `(siunit| unit second) def minute : PhysicalUnit := { sPower := 1, sLog10Scale := Float.log10 60 |>.toUInt64.toNat} macro "min" : siunit => `(siunit| unit minute) def hour : PhysicalUnit := { sPower := 1, sLog10Scale := Float.log10 360 |>.toUInt64.toNat} macro "h" : siunit => `(siunit| unit hour) def day : PhysicalUnit := { sPower := 1, sLog10Scale := Float.log10 8640 |>.toUInt64.toNat} macro "day" : siunit => `(siunit| unit day) def kilogram : PhysicalUnit := { kgPower := 1 } macro "kg" : siunit => `(siunit| unit kilogram) def gram : PhysicalUnit := { kgPower := 1, kgLog10Scale := -3} macro "g" : siunit => `(siunit| unit gram) def milligram : PhysicalUnit := { kgPower := 1, kgLog10Scale := -6} macro "mg" : siunit => `(siunit| unit milligram) def stone : PhysicalUnit := { kgPower := 1, kgLog10Scale := Float.log10 6.350288000002350941 |>.toUInt64.toNat} macro "st" : siunit => `(siunit| unit stone) def pound : PhysicalUnit := { kgPower := 1, kgLog10Scale := Float.log10 0.4535920000001679 |>.toUInt64.toNat} macro "Lb" : siunit => `(siunit| unit pound) def ounce : PhysicalUnit := { kgPower := 1, kgLog10Scale := Float.log10 0.028349500000010494777 |>.toUInt64.toNat} macro "oz" : siunit => `(siunit| unit ounce) def hertz : PhysicalUnit := { sPower := -1 } macro "Hz" : siunit => `(siunit| unit hertz) def newton : PhysicalUnit := { mPower := 1, sPower := -2, kgPower := 1 } macro "N" : siunit => `(siunit| unit newton) def pascal : PhysicalUnit := { mPower := -1, sPower := -2, kgPower := 1 } macro "Pa" : siunit => `(siunit| unit pascal) def watt : PhysicalUnit := { mPower := 2, sPower := -3, kgPower := 1 } macro "W" : siunit => `(siunit| unit watt) def joule : PhysicalUnit := { mPower := 2, sPower := -2, kgPower := 1 } macro "J" : siunit => `(siunit| unit joule) syntax siunit:71 "*" siunit:70 : siunit syntax siunit:71 "/" siunit:70 : siunit syntax siunit noWs "²" : siunit syntax siunit noWs "³" : siunit syntax siunit noWs "⁻¹" : siunit syntax siunit noWs "⁻²" : siunit syntax siunit noWs "⁻³" : siunit syntax "ℝ[" siunit "]" : term macro_rules | `(siunit| $unit * $unit') => do match (← Lean.expandMacros unit) with | `(siunit| unit $u) => match (← Lean.expandMacros unit') with | `(siunit| unit $u') => `(siunit| unit ($u * $u')) | _ => Lean.Macro.throwUnsupported | _ => Lean.Macro.throwUnsupported | `(siunit| $unit / $unit') => do match (← Lean.expandMacros unit) with | `(siunit| unit $u) => match (← Lean.expandMacros unit') with | `(siunit| unit $u') => `(siunit| unit ($u / $u')) | _ => Lean.Macro.throwUnsupported | _ => Lean.Macro.throwUnsupported | `(siunit| $unit²) => do match (← Lean.expandMacros unit) with | `(siunit| unit $u) => `(siunit| unit $u^(2:Int)) | _ => Lean.Macro.throwUnsupported | `(siunit| $unit³) => do match (← Lean.expandMacros unit) with | `(siunit| unit $u) => `(siunit| unit $u^(3:Int)) | _ => Lean.Macro.throwUnsupported | `(siunit| $unit⁻¹) => do match (← Lean.expandMacros unit) with | `(siunit| unit $u) => `(siunit| unit $u^(-1:Int)) | _ => Lean.Macro.throwUnsupported | `(siunit| $unit⁻²) => do match (← Lean.expandMacros unit) with | `(siunit| unit $u) => `(siunit| unit $u^(-2:Int)) | _ => Lean.Macro.throwUnsupported | `(siunit| $unit⁻³) => do match (← Lean.expandMacros unit) with | `(siunit| unit $u) => `(siunit| unit $u^(-3:Int)) | _ => Lean.Macro.throwUnsupported | `(ℝ[ $units ]) => do match (← Lean.expandMacros units) with | `(siunit| unit $u) => `(PhysicalQuantity ℝ $u) | _ => Lean.Macro.throwUnsupported open Lean Lean.Meta Lean.Elab Lean.Elab.Term in elab_rules : term | `(PhysicalQuantity $α $unit) => do let α ← elabTerm α none let unit ← reduce (← elabTerm unit (mkConst ``PhysicalUnit)) mkAppM ``PhysicalQuantity #[α, unit] example : ℝ[mm*km] = ℝ[m²] := by rfl example : ℝ[m*s⁻¹] = ℝ[s⁻¹*m] := by rfl example : ℝ[kg*mg] = ℝ[g²] := by rfl example : ℝ[N] = ℝ[kg*m*s⁻²] := by rfl example : ℝ[J] = ℝ[N*m] := by rfl example : ℝ[W] = ℝ[J*s⁻¹] := by rfl example : ℝ[Pa] = ℝ[N*m⁻²] := by rfl example : ℝ[N*m²*kg⁻²] = ℝ[m³*kg⁻¹*s⁻²] := by rfl example : ℝ[J*Hz⁻¹] = ℝ[J*s] := by rfl namespace PhysicalConstants --- Source: https://en.wikipedia.org/wiki/Physical_constant#Table_of_physical_constants def gravitationalConstant : ℝ[N*m²*kg⁻²] := ⟨6.674 * 10.0^(-11.0)⟩ abbrev G := gravitationalConstant def speedOfLight : ℝ[m*s⁻¹] := ⟨299792458.0⟩ abbrev c := speedOfLight def planckConstant : ℝ[J*Hz⁻¹] := ⟨6.62607015 * 10.0^(-34.0)⟩ abbrev h := planckConstant abbrev ℎ := planckConstant def reducedPlanckConstant : ℝ[J*s] := h / Math.pi abbrev ℏ := reducedPlanckConstant def electronMass : ℝ[kg] := ⟨9.1093837015 * 10.0^(-31.0)⟩ abbrev mₑ : ℝ[kg] := electronMass end PhysicalConstants
Set Implicit Arguments. Require Import Arith_Max_extra. Require Import LNameless_Meta. Require Import LNameless_Meta_Env. Require Import LNameless_Isomorphism. Require Import LNameless_Fsub_Iso. Require Import LN_Template_Two_Sort. (* ************************************************************ *) (** ** Fsub Part 1A and 2A *) (** Reference: Chargueraud's POPL solution using Locally Nameless style and cofinite quantification *) (****************************************************************************) (****************************************************************************) (** Here begins a concrete formalization of System Fsub for part 1A and 2A. *) (****************************************************************************) (****************************************************************************) (** [typ] and [trm] are already defined in LNameless_Fsub_Iso.v << Notation var := atom. Inductive typ : Set := | typ_bvar : nat -> typ | typ_fvar : var -> typ | typ_top : typ | typ_arrow : typ -> typ -> typ | typ_all : typ -> typ -> typ. Inductive trm : Set := | trm_bvar : nat -> trm | trm_fvar : var -> trm | trm_app : trm -> trm -> trm | trm_abs : typ -> trm -> trm | trm_tapp : trm -> typ -> trm | trm_tabs : typ -> trm -> trm. >> *) (** Many of the generic properties about substitution, environments are already proved in a generic *) (** M_tt, M_yt, M_yy, and M_ty are already defined in LN_Template_Two_Sort.v *) (** Notation for opening up binders with type or term variables *) Notation "T 'open_tt_var' X" := (M_yy.M.Tbsubst T 0 (typ_fvar X)) (at level 67). Notation "t 'open_te_var' X" := (M_yt.M.Tbsubst t 0 (typ_fvar X)) (at level 67). Notation "t 'open_ee_var' x" := (M_tt.M.Tbsubst t 0 (trm_fvar x)) (at level 67). (** Types as locally closed pre-types *) Inductive type : typ -> Prop := | type_top : type typ_top | type_var : forall (X:atom), type (typ_fvar X) | type_arrow : forall T1 T2, type T1 -> type T2 -> type (typ_arrow T1 T2) | type_all : forall L T1 T2, type T1 -> (forall (X:atom), X `notin` L -> type (T2 open_tt_var X)) -> type (typ_all T1 T2). (** Terms as locally closed pre-terms *) Inductive term : trm -> Prop := | term_var : forall (x:atom), term (trm_fvar x) | term_abs : forall L V e1, type V -> (forall (x:atom), x `notin` L -> term (e1 open_ee_var x)) -> term (trm_abs V e1) | term_app : forall e1 e2, term e1 -> term e2 -> term (trm_app e1 e2) | term_tabs : forall L V e1, type V -> (forall (X:atom), X `notin` L -> term (e1 open_te_var X)) -> term (trm_tabs V e1) | term_tapp : forall e1 V, term e1 -> type V -> term (trm_tapp e1 V). (** Binding are either mapping type or term variables. [X ~<: T] is a subtyping asumption and [x ~: T] is a typing assumption *) Inductive bind : Set := | bind_sub : typ -> bind | bind_typ : typ -> bind. Notation "X ~<: T" := (X ~ bind_sub T) (at level 31, left associativity). Notation "x ~: T" := (x ~ bind_typ T) (at level 31, left associativity). (** Environment is an associative list of bindings. *) (** Well-formedness of a pre-type T in an environment E: all the type variables of T must be bound via a subtyping relation in E. This predicates implies that T is a type *) Inductive wft : env bind -> typ -> Prop := | wft_top : forall E, wft E typ_top | wft_var : forall U E X, binds X (bind_sub U) E -> wft E (typ_fvar X) | wft_arrow : forall E T1 T2, wft E T1 -> wft E T2 -> wft E (typ_arrow T1 T2) | wft_all : forall L E T1 T2, wft E T1 -> (forall (X:atom), X `notin` L -> wft (X ~<: T1 ++ E) (T2 open_tt_var X)) -> wft E (typ_all T1 T2). (** A environment E is well-formed if it contains no duplicate bindings and if each type in it is well-formed with respect to the environment it is pushed on to. *) Inductive okt : env bind -> Prop := | okt_empty : okt empty_env | okt_sub : forall E X T, okt E -> wft E T -> X # E -> okt (X ~<: T ++ E) | okt_typ : forall E x T, okt E -> wft E T -> x # E -> okt (x ~: T ++ E). (** Subtyping relation *) Inductive sub : env bind -> typ -> typ -> Prop := | sub_top : forall E S, okt E -> wft E S -> sub E S typ_top | sub_refl_tvar : forall E X, okt E -> wft E (typ_fvar X) -> sub E (typ_fvar X) (typ_fvar X) | sub_trans_tvar : forall U E T X, binds X (bind_sub U) E -> sub E U T -> sub E (typ_fvar X) T | sub_arrow : forall E S1 S2 T1 T2, sub E T1 S1 -> sub E S2 T2 -> sub E (typ_arrow S1 S2) (typ_arrow T1 T2) | sub_all : forall L E S1 S2 T1 T2, sub E T1 S1 -> (forall (X:atom), X `notin` L -> sub (X ~<: T1 ++ E) (S2 open_tt_var X) (T2 open_tt_var X)) -> sub E (typ_all S1 S2) (typ_all T1 T2). (** Typing relation *) Inductive typing : env bind -> trm -> typ -> Prop := | typing_var : forall E x T, okt E -> binds x (bind_typ T) E -> typing E (trm_fvar x) T | typing_abs : forall L E V e1 T1, (forall (x:atom), x `notin` L -> typing (x ~: V ++ E) (e1 open_ee_var x) T1) -> typing E (trm_abs V e1) (typ_arrow V T1) | typing_app : forall T1 E e1 e2 T2, typing E e1 (typ_arrow T1 T2) -> typing E e2 T1 -> typing E (trm_app e1 e2) T2 | typing_tabs : forall L E V e1 T1, (forall (X:atom), X `notin` L -> typing (X ~<: V ++ E) (e1 open_te_var X) (T1 open_tt_var X)) -> typing E (trm_tabs V e1) (typ_all V T1) | typing_tapp : forall T1 E e1 T T2, typing E e1 (typ_all T1 T2) -> sub E T T1 -> typing E (trm_tapp e1 T) (M_yy.M.Tbsubst T2 0 T) | typing_sub : forall S E e T, typing E e S -> sub E S T -> typing E e T. (** Values *) Inductive value : trm -> Prop := | value_abs : forall V e1, term (trm_abs V e1) -> value (trm_abs V e1) | value_tabs : forall V e1, term (trm_tabs V e1) -> value (trm_tabs V e1). (** One-step reduction *) Inductive red : trm -> trm -> Prop := | red_app_1 : forall e1 e1' e2, term e2 -> red e1 e1' -> red (trm_app e1 e2) (trm_app e1' e2) | red_app_2 : forall e1 e2 e2', value e1 -> red e2 e2' -> red (trm_app e1 e2) (trm_app e1 e2') | red_tapp : forall e1 e1' V, type V -> red e1 e1' -> red (trm_tapp e1 V) (trm_tapp e1' V) | red_abs : forall V e1 v2, term (trm_abs V e1) -> value v2 -> red (trm_app (trm_abs V e1) v2) (M_tt.M.Tbsubst e1 0 v2) | red_tabs : forall V1 e1 V2, term (trm_tabs V1 e1) -> type V2 -> red (trm_tapp (trm_tabs V1 e1) V2) (M_yt.M.Tbsubst e1 0 V2). (** Our goal is to prove preservation and progress *) Definition preservation := forall E e e' T, typing E e T -> red e e' -> typing E e' T. Definition progress := forall e T, typing empty_env e T -> value e \/ exists e', red e e'.
/* ----------------------------------------------------------------------------- * Copyright 2021 Jonathan Haigh * SPDX-License-Identifier: MIT * ---------------------------------------------------------------------------*/ #ifndef SQ_INCLUDE_GUARD_core_narrow_inl_h_ #define SQ_INCLUDE_GUARD_core_narrow_inl_h_ #include "core/errors.h" #include <fmt/format.h> #include <fmt/ostream.h> #include <gsl/gsl> namespace sq { template <typename T, typename U, typename... FormatArgs> constexpr T narrow(U value, FormatArgs &&...format_args) { try { return gsl::narrow<T>(value); } catch (gsl::narrowing_error &e) { throw NarrowingError{T{}, value, SQ_FWD(format_args)...}; } } constexpr gsl::index to_index(auto value, auto &&...format_args) { return narrow<gsl::index>(SQ_FWD(value), SQ_FWD(format_args)...); } constexpr std::size_t to_size(auto value, auto &&...format_args) { return narrow<std::size_t>(SQ_FWD(value), SQ_FWD(format_args)...); } } // namespace sq #endif // SQ_INCLUDE_GUARD_core_narrow_inl_h_
[STATEMENT] lemma finite_all_edges_between: assumes "finite X" "finite Y" shows "finite (all_edges_between X Y)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. finite (all_edges_between X Y) [PROOF STEP] by (meson all_edges_between_subset assms finite_cartesian_product finite_subset)
/* * Copyright (c) 2017 Juniper Networks, Inc. All rights reserved. */ #include <boost/assign/list_of.hpp> #include "base/test/task_test_util.h" #include "bgp/bgp_log.h" #include "bgp/bgp_server.h" #include "bgp/routing-policy/routing_policy_action.h" #include "bgp/rtarget/rtarget_address.h" #include "control-node/control_node.h" using boost::assign::list_of; using std::find; using std::string; using std::vector; class UpdateExtCommunityTest : public ::testing::Test { protected: UpdateExtCommunityTest() : server_(&evm_), attr_db_(server_.attr_db()) { } void TearDown() { server_.Shutdown(); task_util::WaitForIdle(); } EventManager evm_; BgpServer server_; BgpAttrDB *attr_db_; }; TEST_F(UpdateExtCommunityTest, Update) { vector<string> communities = list_of("target:23:11")("target:43:11"); UpdateExtCommunity action(communities, "add"); ExtCommunitySpec comm_spec; comm_spec.communities.clear(); BgpAttrSpec spec; spec.push_back(&comm_spec); BgpAttrPtr attr = attr_db_->Locate(spec); action(const_cast<BgpAttr *>(attr.get())); const ExtCommunity *comm = attr->ext_community(); EXPECT_TRUE(comm!= NULL); EXPECT_TRUE(comm->communities().size() == 2); RouteTarget val0 = RouteTarget(comm->communities()[0]); RouteTarget val1 = RouteTarget(comm->communities()[1]); EXPECT_TRUE(val0.ToString() == communities[0]); EXPECT_TRUE(val1.ToString() == communities[1]); communities = list_of("target:33:11")("target:53:11") .convert_to_container<vector<string> >(); UpdateExtCommunity action2(communities, "set"); action2(const_cast<BgpAttr *>(attr.get())); comm = attr->ext_community(); EXPECT_TRUE(comm!= NULL); EXPECT_TRUE(comm->communities().size() == 2); val0 = RouteTarget(comm->communities()[0]); val1 = RouteTarget(comm->communities()[1]); EXPECT_TRUE(val0.ToString() == communities[0]); EXPECT_TRUE(val1.ToString() == communities[1]); vector<string> communities2 = list_of("target:53:11"); UpdateExtCommunity action3(communities2, "remove"); action3(const_cast<BgpAttr *>(attr.get())); comm = attr->ext_community(); EXPECT_TRUE(comm!= NULL); EXPECT_TRUE(comm->communities().size() == 1); val0 = RouteTarget(comm->communities()[0]); EXPECT_TRUE(val0.ToString() == communities[0]); } TEST_F(UpdateExtCommunityTest, ToString) { vector<string> communities = list_of("target:23:11")("target:43:11"); UpdateExtCommunity action(communities, "add"); EXPECT_EQ("Extcommunity add [ target:23:11,target:43:11 ]", action.ToString()); } TEST_F(UpdateExtCommunityTest, IsEqual1) { vector<string> communities1 = list_of("target:23:11")("target:43:11"); vector<string> communities2 = list_of("target:23:11")("target:43:11"); UpdateExtCommunity action1(communities1, "add"); UpdateExtCommunity action2(communities2, "add"); EXPECT_TRUE(action1.IsEqual(action2)); EXPECT_TRUE(action2.IsEqual(action1)); } TEST_F(UpdateExtCommunityTest, IsEqual2) { vector<string> communities1 = list_of("target:23:11")("target:43:11"); vector<string> communities2 = list_of("target:23:11")("target:53:11"); UpdateExtCommunity action1(communities1, "add"); UpdateExtCommunity action2(communities2, "add"); EXPECT_FALSE(action1.IsEqual(action2)); EXPECT_FALSE(action2.IsEqual(action1)); } class UpdateAsPathTest : public ::testing::Test { protected: UpdateAsPathTest() : server_(&evm_), attr_db_(server_.attr_db()) { } void TearDown() { server_.Shutdown(); task_util::WaitForIdle(); } EventManager evm_; BgpServer server_; BgpAttrDB *attr_db_; }; TEST_F(UpdateAsPathTest, ToString) { vector<as_t> asn_list = list_of(1000)(2000); UpdateAsPath action(asn_list); EXPECT_EQ(asn_list, action.asn_list()); EXPECT_EQ("as-path expand [ 1000,2000 ]", action.ToString()); } TEST_F(UpdateAsPathTest, IsEqual1) { vector<as_t> asn_list1 = list_of(1000)(2000); vector<as_t> asn_list2 = list_of(1000)(2000); UpdateAsPath action1(asn_list1); UpdateAsPath action2(asn_list2); EXPECT_TRUE(action1.IsEqual(action2)); EXPECT_TRUE(action2.IsEqual(action1)); } TEST_F(UpdateAsPathTest, IsEqual2) { vector<as_t> asn_list1 = list_of(1000)(2000); vector<as_t> asn_list2 = list_of(1000)(3000); UpdateAsPath action1(asn_list1); UpdateAsPath action2(asn_list2); EXPECT_FALSE(action1.IsEqual(action2)); EXPECT_FALSE(action2.IsEqual(action1)); } TEST_F(UpdateAsPathTest, UpdateNull) { vector<as_t> asn_list = list_of(1000)(2000); UpdateAsPath action(asn_list); EXPECT_EQ(asn_list, action.asn_list()); BgpAttr attr(attr_db_); action(&attr); const AsPath *as_path = attr.as_path(); EXPECT_TRUE(as_path != NULL); const AsPathSpec &as_path_spec = as_path->path(); EXPECT_EQ(1, as_path_spec.path_segments.size()); EXPECT_EQ(2, as_path_spec.path_segments[0]->path_segment.size()); EXPECT_EQ(1000, as_path_spec.path_segments[0]->path_segment[0]); EXPECT_EQ(2000, as_path_spec.path_segments[0]->path_segment[1]); } TEST_F(UpdateAsPathTest, UpdateNullAs4) { attr_db_->server()->set_enable_4byte_as(true); vector<as_t> asn_list = list_of(1000)(2000); UpdateAsPath action(asn_list); EXPECT_EQ(asn_list, action.asn_list()); BgpAttr attr(attr_db_); action(&attr); const AsPath4Byte *as_path = attr.aspath_4byte(); EXPECT_TRUE(as_path != NULL); const AsPath4ByteSpec &as_path_spec = as_path->path(); EXPECT_EQ(1, as_path_spec.path_segments.size()); EXPECT_EQ(2, as_path_spec.path_segments[0]->path_segment.size()); EXPECT_EQ(1000, as_path_spec.path_segments[0]->path_segment[0]); EXPECT_EQ(2000, as_path_spec.path_segments[0]->path_segment[1]); } TEST_F(UpdateAsPathTest, UpdateNonNull) { vector<as_t> asn_list = list_of(1000)(2000); UpdateAsPath action(asn_list); EXPECT_EQ(asn_list, action.asn_list()); BgpAttrSpec spec; AsPathSpec *path = new AsPathSpec; AsPathSpec::PathSegment *ps = new AsPathSpec::PathSegment; ps->path_segment_type = AsPathSpec::PathSegment::AS_SEQUENCE; ps->path_segment = list_of(3000)(4000) .convert_to_container<vector<as2_t> >() ; path->path_segments.push_back(ps); spec.push_back(path); BgpAttr attr(attr_db_, spec); action(&attr); const AsPath *as_path = attr.as_path(); EXPECT_TRUE(as_path != NULL); const AsPathSpec &as_path_spec = as_path->path(); EXPECT_EQ(1, as_path_spec.path_segments.size()); EXPECT_EQ(4, as_path_spec.path_segments[0]->path_segment.size()); EXPECT_EQ(1000, as_path_spec.path_segments[0]->path_segment[0]); EXPECT_EQ(2000, as_path_spec.path_segments[0]->path_segment[1]); EXPECT_EQ(3000, as_path_spec.path_segments[0]->path_segment[2]); EXPECT_EQ(4000, as_path_spec.path_segments[0]->path_segment[3]); } TEST_F(UpdateAsPathTest, UpdateNonNullAs4) { attr_db_->server()->set_enable_4byte_as(true); vector<as_t> asn_list = list_of(1000)(2000); UpdateAsPath action(asn_list); EXPECT_EQ(asn_list, action.asn_list()); BgpAttrSpec spec; AsPath4ByteSpec *path = new AsPath4ByteSpec; AsPath4ByteSpec::PathSegment *ps = new AsPath4ByteSpec::PathSegment; ps->path_segment_type = AsPath4ByteSpec::PathSegment::AS_SEQUENCE; ps->path_segment = list_of(3000)(4000) .convert_to_container<std::vector<as_t> >(); path->path_segments.push_back(ps); spec.push_back(path); BgpAttr attr(attr_db_, spec); action(&attr); const AsPath4Byte *as_path = attr.aspath_4byte(); EXPECT_TRUE(as_path != NULL); const AsPath4ByteSpec &as_path_spec = as_path->path(); EXPECT_EQ(1, as_path_spec.path_segments.size()); EXPECT_EQ(4, as_path_spec.path_segments[0]->path_segment.size()); EXPECT_EQ(1000, as_path_spec.path_segments[0]->path_segment[0]); EXPECT_EQ(2000, as_path_spec.path_segments[0]->path_segment[1]); EXPECT_EQ(3000, as_path_spec.path_segments[0]->path_segment[2]); EXPECT_EQ(4000, as_path_spec.path_segments[0]->path_segment[3]); } TEST_F(UpdateExtCommunityTest, ValidHexString1) { vector<string> communities = list_of("0x123456789abcdef0"); UpdateExtCommunity action(communities, "add"); ExtCommunitySpec comm_spec; comm_spec.communities.clear(); BgpAttrSpec spec; spec.push_back(&comm_spec); BgpAttrPtr attr = attr_db_->Locate(spec); action(const_cast<BgpAttr *>(attr.get())); const ExtCommunity *comm = attr->ext_community(); EXPECT_TRUE(comm!= NULL); EXPECT_TRUE(comm->communities().size() == 1); EXPECT_EQ("Extcommunity add [ 123456789abcdef0 ]", action.ToString()); } TEST_F(UpdateExtCommunityTest, ValidHexString2) { //hex string without prefix '0x' vector<string> communities = list_of("123456789abcdef0"); UpdateExtCommunity action(communities, "add"); ExtCommunitySpec comm_spec; comm_spec.communities.clear(); BgpAttrSpec spec; spec.push_back(&comm_spec); BgpAttrPtr attr = attr_db_->Locate(spec); action(const_cast<BgpAttr *>(attr.get())); const ExtCommunity *comm = attr->ext_community(); EXPECT_TRUE(comm!= NULL); EXPECT_TRUE(comm->communities().size() == 1); EXPECT_EQ("Extcommunity add [ 123456789abcdef0 ]", action.ToString()); } TEST_F(UpdateExtCommunityTest, ValidHexString3) { //hex string for target:53:11 is 0x200350000000b vector<string> communities = list_of("0x200350000000b"); UpdateExtCommunity action(communities, "add"); ExtCommunitySpec comm_spec; comm_spec.communities.clear(); BgpAttrSpec spec; spec.push_back(&comm_spec); BgpAttrPtr attr = attr_db_->Locate(spec); action(const_cast<BgpAttr *>(attr.get())); const ExtCommunity *comm = attr->ext_community(); EXPECT_TRUE(comm!= NULL); EXPECT_TRUE(comm->communities().size() == 1); EXPECT_EQ("Extcommunity add [ target:53:11 ]", action.ToString()); } TEST_F(UpdateExtCommunityTest, ValidHexString4) { vector<string> communities = list_of("0xffffffffffffffff"); UpdateExtCommunity action(communities, "add"); ExtCommunitySpec comm_spec; comm_spec.communities.clear(); BgpAttrSpec spec; spec.push_back(&comm_spec); BgpAttrPtr attr = attr_db_->Locate(spec); action(const_cast<BgpAttr *>(attr.get())); const ExtCommunity *comm = attr->ext_community(); EXPECT_TRUE(comm!= NULL); EXPECT_TRUE(comm->communities().size() == 1); EXPECT_EQ("Extcommunity add [ ffffffffffffffff ]", action.ToString()); } TEST_F(UpdateExtCommunityTest, InValidHexString1) { vector<string> communities = list_of("xxxxxxxxffffffff"); UpdateExtCommunity action(communities, "add"); ExtCommunitySpec comm_spec; comm_spec.communities.clear(); BgpAttrSpec spec; spec.push_back(&comm_spec); BgpAttrPtr attr = attr_db_->Locate(spec); action(const_cast<BgpAttr *>(attr.get())); const ExtCommunity *comm = attr->ext_community(); EXPECT_TRUE(comm!= NULL); EXPECT_TRUE(comm->communities().size() == 0); } TEST_F(UpdateExtCommunityTest, InValidHexString2) { //hex string greater than 8 Byte (0xffffffffffffffff) vector<string> communities = list_of("123456789abcdef0f"); UpdateExtCommunity action(communities, "add"); ExtCommunitySpec comm_spec; comm_spec.communities.clear(); BgpAttrSpec spec; spec.push_back(&comm_spec); BgpAttrPtr attr = attr_db_->Locate(spec); action(const_cast<BgpAttr *>(attr.get())); const ExtCommunity *comm = attr->ext_community(); EXPECT_TRUE(comm!= NULL); EXPECT_TRUE(comm->communities().size() == 0); } TEST_F(UpdateExtCommunityTest, InValidHexString3) { //hex string with prefix '0x' greater than 8 Byte (0xffffffffffffffff) vector<string> communities = list_of("0x123456789abcdef0f"); UpdateExtCommunity action(communities, "add"); ExtCommunitySpec comm_spec; comm_spec.communities.clear(); BgpAttrSpec spec; spec.push_back(&comm_spec); BgpAttrPtr attr = attr_db_->Locate(spec); action(const_cast<BgpAttr *>(attr.get())); const ExtCommunity *comm = attr->ext_community(); EXPECT_TRUE(comm!= NULL); EXPECT_TRUE(comm->communities().size() == 0); } TEST_F(UpdateExtCommunityTest, InValidHexString4) { vector<string> communities = list_of("123x456"); UpdateExtCommunity action(communities, "add"); ExtCommunitySpec comm_spec; comm_spec.communities.clear(); BgpAttrSpec spec; spec.push_back(&comm_spec); BgpAttrPtr attr = attr_db_->Locate(spec); action(const_cast<BgpAttr *>(attr.get())); const ExtCommunity *comm = attr->ext_community(); EXPECT_TRUE(comm!= NULL); EXPECT_TRUE(comm->communities().size() == 0); } static void SetUp() { bgp_log_test::init(); ControlNode::SetDefaultSchedulingPolicy(); } static void TearDown() { task_util::WaitForIdle(); TaskScheduler *scheduler = TaskScheduler::GetInstance(); scheduler->Terminate(); } int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); SetUp(); int result = RUN_ALL_TESTS(); TearDown(); return result; }
*.............................................................. subroutine ODS_ReSetP ( id, ierr ) implicit NONE !------------------------------------------------------------------------- ! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! !------------------------------------------------------------------------- ! ! !ROUTINE: ODS_ReSetP ! ! !DESCRIPTION: ! Initializes or resets all tables of pointers for a NetCDF ! file (identified by the ODS file handle, id ). These tables ! contain all pointers that are necessary to locate a block ! of data within a file ( specified by the file handle, id ) ! partitioned according to the julian day and synoptic hour. ! The tables are stored in common defined in the header file, ! ods_hdf.h. In addition, the routine extracts relavent ! NetCDF attributes from the ODS file if the file is opened. ! ! !INTERFACE: call ODS_ReSetP ( id, ierr ) ! ! !INPUT PARAMETER: integer id ! ODS file handle ! ! !OUTPUT PARAMETERS: integer ierr ! The return error code ! ! !SEE ALSO: ! ODS_UpdateP ( Updates the pointer data ) ! ODS_ReadP ( Read all pointer data from file. ) ! ODS_WriteP ( Write all pointer data to file. ) ! ODS_GetP ( Gets pointer data for reading/writing ! a block of data. ) ! ODS_GetAttP ( Gets the attribute of the pointers ) ! ODS_JulHr ( Returns the number of hours from the ! Julian hour of the last block of data ! written to file ) ! ! !FILES USED: ! netcdf.inc, a header file, for defining NetCDF library ! parameters ! ods_hdf.h, a header file, for defining hardwired constants ! and defining global variables and setting up data ! structures ! ods_stdio.h, a header file, for defining standard input/output ! unit numbers ! ! !REVISION HISTORY: ! 24Apr96 Redder Origional version ! !------------------------------------------------------------------------- include 'netcdf.inc' include 'ods_hdf.h' include 'ods_stdio.h' * Variable storing information about the ... * ------------------------------------------ * NetCDF file id * -------------- integer nc_id * NetCDF dimension * ---------------- integer dimid ! dimension id character * ( max_strlen ) . DimName ! name of dimension integer DimSz ! dimension size * NetCDF variable * --------------- integer varid ! variable id character * ( max_strlen ) . VarName ! name of variable integer VarType ! variable type integer NDims ! number of associated dimensions integer DimIDs ( 2 )! dimension ids integer NAtts ! number of associated attributes * NetCDF attribute * ---------------- integer AttLen ! string length for attribute or ! attribute length * Other varibles * -------------- integer itemp ! temporary storage for attribute ! value integer iday, isyn ! index variable for do loops * Reset all tables of pointers * ---------------------------- do 10, iday = 1, mdays days ( iday, id ) = CLEAR 10 continue do 30, isyn = 1, msyn do 20, iday = 1, mdays syn_beg ( isyn, iday, id ) = CLEAR + 1 syn_len ( isyn, iday, id ) = CLEAR 20 continue 30 continue * Reset the arrays containing the pointers for append mode * -------------------------------------------------------- append_mode ( id ) = OFF append_beg ( id ) = CLEAR + 1 append_len ( id ) = 0 * If file is not opened, then return * ---------------------------------- if ( IOMode ( id ) .eq. CLOSED ) return * else ... * -------- * Get NetCDF file id * ------------------ nc_id = ncid ( id ) * Get information abount the NetCDF variable * ------------------------------------------ varid = NCVID ( nc_id, 'syn_beg', ierr ) if ( ierr .ne. NCNoErr ) return call ncvinq ( nc_id, varid, . VarName, VarType, . NDims, DimIDs, . NAtts, ierr ) if ( ierr .ne. NCNoErr ) return * Get NetCDF Dimension * -------------------- dimid = DimIDs ( 1 ) call NCDINQ ( nc_id, dimid, DimName, DimSz, ierr ) if ( ierr .ne. NCNoErr ) return nsyn ( id ) = DimSz dimid = DimIDs ( 2 ) call NCDINQ ( nc_id, dimid, DimName, DimSz, ierr ) if ( ierr .ne. NCNoErr ) return ndays ( id ) = DimSz * Get attribute values and information * related to the use of pointers * ------------------------------------ * Julian day offset * ----------------- call ODS_NCAGTI ( nc_id, varid, . 'first_julian_day', . AttLen, . itemp, . ierr ) julian_offset ( id ) = itemp - 1 if ( ierr .ne. NCNoErr ) return * latest Julian day for which there is data * ----------------------------------------- call ODS_NCAGTI ( nc_id, varid, . 'latest_julian_day', . AttLen, . itemp, . ierr ) if ( ierr .ne. NCNoErr ) return latest_day ( id ) = itemp * latest synoptic hour for which there is data * -------------------------------------------- call ODS_NCAGTI ( nc_id, varid, . 'latest_synoptic_hour', . AttLen, . itemp, . ierr ) if ( ierr .ne. NCNoErr ) return latest_hour ( id ) = itemp * no more attributes * ------------------ return end
/- Copyright (c) 2019 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison, Bhavik Mehta -/ import category_theory.monad.basic import category_theory.adjunction.basic import category_theory.reflects_isomorphisms /-! # Eilenberg-Moore (co)algebras for a (co)monad This file defines Eilenberg-Moore (co)algebras for a (co)monad, and provides the category instance for them. Further it defines the adjoint pair of free and forgetful functors, respectively from and to the original category, as well as the adjoint pair of forgetful and cofree functors, respectively from and to the original category. ## References * [Riehl, *Category theory in context*, Section 5.2.4][riehl2017] -/ namespace category_theory open category universes v₁ u₁ -- morphism levels before object levels. See note [category_theory universes]. variables {C : Type u₁} [category.{v₁} C] namespace monad /-- An Eilenberg-Moore algebra for a monad `T`. cf Definition 5.2.3 in [Riehl][riehl2017]. -/ structure algebra (T : monad C) : Type (max u₁ v₁) := (A : C) (a : (T : C ⥤ C).obj A ⟶ A) (unit' : T.η.app A ≫ a = 𝟙 A . obviously) (assoc' : T.μ.app A ≫ a = (T : C ⥤ C).map a ≫ a . obviously) restate_axiom algebra.unit' restate_axiom algebra.assoc' attribute [reassoc] algebra.unit algebra.assoc namespace algebra variables {T : monad C} /-- A morphism of Eilenberg–Moore algebras for the monad `T`. -/ @[ext] structure hom (A B : algebra T) := (f : A.A ⟶ B.A) (h' : (T : C ⥤ C).map f ≫ B.a = A.a ≫ f . obviously) restate_axiom hom.h' attribute [simp, reassoc] hom.h namespace hom /-- The identity homomorphism for an Eilenberg–Moore algebra. -/ def id (A : algebra T) : hom A A := { f := 𝟙 A.A } instance (A : algebra T) : inhabited (hom A A) := ⟨{ f := 𝟙 _ }⟩ /-- Composition of Eilenberg–Moore algebra homomorphisms. -/ def comp {P Q R : algebra T} (f : hom P Q) (g : hom Q R) : hom P R := { f := f.f ≫ g.f } end hom instance : category_struct (algebra T) := { hom := hom, id := hom.id, comp := @hom.comp _ _ _ } @[simp] lemma comp_eq_comp {A A' A'' : algebra T} (f : A ⟶ A') (g : A' ⟶ A'') : algebra.hom.comp f g = f ≫ g := rfl @[simp] lemma id_eq_id (A : algebra T) : algebra.hom.id A = 𝟙 A := rfl @[simp] /-- The category of Eilenberg-Moore algebras for a monad. cf Definition 5.2.4 in [Riehl][riehl2017]. -/ instance EilenbergMoore : category (algebra T) := {}. /-- To construct an isomorphism of algebras, it suffices to give an isomorphism of the carriers which commutes with the structure morphisms. -/ @[simps] def iso_mk {A B : algebra T} (h : A.A ≅ B.A) (w : (T : C ⥤ C).map h.hom ≫ B.a = A.a ≫ h.hom) : A ≅ B := { hom := { f := h.hom }, inv := { f := h.inv, h' := by { rw [h.eq_comp_inv, category.assoc, ←w, ←functor.map_comp_assoc], simp } } } end algebra variables (T : monad C) /-- The forgetful functor from the Eilenberg-Moore category, forgetting the algebraic structure. -/ @[simps] def forget : algebra T ⥤ C := { obj := λ A, A.A, map := λ A B f, f.f } /-- The free functor from the Eilenberg-Moore category, constructing an algebra for any object. -/ @[simps] def free : C ⥤ algebra T := { obj := λ X, { A := T.obj X, a := T.μ.app X, assoc' := (T.assoc _).symm }, map := λ X Y f, { f := T.map f, h' := T.μ.naturality _ } } instance [inhabited C] : inhabited (algebra T) := ⟨(free T).obj (default C)⟩ /-- The adjunction between the free and forgetful constructions for Eilenberg-Moore algebras for a monad. cf Lemma 5.2.8 of [Riehl][riehl2017]. -/ -- The other two `simps` projection lemmas can be derived from these two, so `simp_nf` complains if -- those are added too @[simps unit counit] def adj : T.free ⊣ T.forget := adjunction.mk_of_hom_equiv { hom_equiv := λ X Y, { to_fun := λ f, T.η.app X ≫ f.f, inv_fun := λ f, { f := T.map f ≫ Y.a, h' := by { dsimp, simp [←Y.assoc, ←T.μ.naturality_assoc] } }, left_inv := λ f, by { ext, dsimp, simp }, right_inv := λ f, begin dsimp only [forget_obj, monad_to_functor_eq_coe], rw [←T.η.naturality_assoc, Y.unit], apply category.comp_id, end }} /-- Given an algebra morphism whose carrier part is an isomorphism, we get an algebra isomorphism. -/ lemma algebra_iso_of_iso {A B : algebra T} (f : A ⟶ B) [is_iso f.f] : is_iso f := ⟨⟨{ f := inv f.f, h' := by { rw [is_iso.eq_comp_inv f.f, category.assoc, ← f.h], simp } }, by tidy⟩⟩ instance forget_reflects_iso : reflects_isomorphisms T.forget := { reflects := λ A B, algebra_iso_of_iso T } instance forget_faithful : faithful T.forget := {} instance : is_right_adjoint T.forget := ⟨T.free, T.adj⟩ @[simp] lemma left_adjoint_forget : left_adjoint T.forget = T.free := rfl @[simp] lemma of_right_adjoint_forget : adjunction.of_right_adjoint T.forget = T.adj := rfl /-- Given a monad morphism from `T₂` to `T₁`, we get a functor from the algebras of `T₁` to algebras of `T₂`. -/ @[simps] def algebra_functor_of_monad_hom {T₁ T₂ : monad C} (h : T₂ ⟶ T₁) : algebra T₁ ⥤ algebra T₂ := { obj := λ A, { A := A.A, a := h.app A.A ≫ A.a, unit' := by { dsimp, simp [A.unit] }, assoc' := by { dsimp, simp [A.assoc] } }, map := λ A₁ A₂ f, { f := f.f } } /-- The identity monad morphism induces the identity functor from the category of algebras to itself. -/ @[simps {rhs_md := semireducible}] def algebra_functor_of_monad_hom_id {T₁ : monad C} : algebra_functor_of_monad_hom (𝟙 T₁) ≅ 𝟭 _ := nat_iso.of_components (λ X, algebra.iso_mk (iso.refl _) (by { dsimp, simp, })) (λ X Y f, by { ext, dsimp, simp }) /-- A composition of monad morphisms gives the composition of corresponding functors. -/ @[simps {rhs_md := semireducible}] def algebra_functor_of_monad_hom_comp {T₁ T₂ T₃ : monad C} (f : T₁ ⟶ T₂) (g : T₂ ⟶ T₃) : algebra_functor_of_monad_hom (f ≫ g) ≅ algebra_functor_of_monad_hom g ⋙ algebra_functor_of_monad_hom f := nat_iso.of_components (λ X, algebra.iso_mk (iso.refl _) (by { dsimp, simp })) (λ X Y f, by { ext, dsimp, simp }) /-- If `f` and `g` are two equal morphisms of monads, then the functors of algebras induced by them are isomorphic. We define it like this as opposed to using `eq_to_iso` so that the components are nicer to prove lemmas about. -/ @[simps {rhs_md := semireducible}] def algebra_functor_of_monad_hom_eq {T₁ T₂ : monad C} {f g : T₁ ⟶ T₂} (h : f = g) : algebra_functor_of_monad_hom f ≅ algebra_functor_of_monad_hom g := nat_iso.of_components (λ X, algebra.iso_mk (iso.refl _) (by { dsimp, simp [h] })) (λ X Y f, by { ext, dsimp, simp }) /-- Isomorphic monads give equivalent categories of algebras. Furthermore, they are equivalent as categories over `C`, that is, we have `algebra_equiv_of_iso_monads h ⋙ forget = forget`. -/ @[simps] def algebra_equiv_of_iso_monads {T₁ T₂ : monad C} (h : T₁ ≅ T₂) : algebra T₁ ≌ algebra T₂ := { functor := algebra_functor_of_monad_hom h.inv, inverse := algebra_functor_of_monad_hom h.hom, unit_iso := algebra_functor_of_monad_hom_id.symm ≪≫ algebra_functor_of_monad_hom_eq (by simp) ≪≫ algebra_functor_of_monad_hom_comp _ _, counit_iso := (algebra_functor_of_monad_hom_comp _ _).symm ≪≫ algebra_functor_of_monad_hom_eq (by simp) ≪≫ algebra_functor_of_monad_hom_id } @[simp] lemma algebra_equiv_of_iso_monads_comp_forget {T₁ T₂ : monad C} (h : T₁ ⟶ T₂) : algebra_functor_of_monad_hom h ⋙ forget _ = forget _ := rfl end monad namespace comonad /-- An Eilenberg-Moore coalgebra for a comonad `T`. -/ @[nolint has_inhabited_instance] structure coalgebra (G : comonad C) : Type (max u₁ v₁) := (A : C) (a : A ⟶ (G : C ⥤ C).obj A) (counit' : a ≫ G.ε.app A = 𝟙 A . obviously) (coassoc' : a ≫ G.δ.app A = a ≫ G.map a . obviously) restate_axiom coalgebra.counit' restate_axiom coalgebra.coassoc' attribute [reassoc] coalgebra.counit coalgebra.coassoc namespace coalgebra variables {G : comonad C} /-- A morphism of Eilenberg-Moore coalgebras for the comonad `G`. -/ @[ext, nolint has_inhabited_instance] structure hom (A B : coalgebra G) := (f : A.A ⟶ B.A) (h' : A.a ≫ (G : C ⥤ C).map f = f ≫ B.a . obviously) restate_axiom hom.h' attribute [simp, reassoc] hom.h namespace hom /-- The identity homomorphism for an Eilenberg–Moore coalgebra. -/ def id (A : coalgebra G) : hom A A := { f := 𝟙 A.A } /-- Composition of Eilenberg–Moore coalgebra homomorphisms. -/ def comp {P Q R : coalgebra G} (f : hom P Q) (g : hom Q R) : hom P R := { f := f.f ≫ g.f } end hom /-- The category of Eilenberg-Moore coalgebras for a comonad. -/ instance : category_struct (coalgebra G) := { hom := hom, id := hom.id, comp := @hom.comp _ _ _ } @[simp] lemma comp_eq_comp {A A' A'' : coalgebra G} (f : A ⟶ A') (g : A' ⟶ A'') : coalgebra.hom.comp f g = f ≫ g := rfl @[simp] lemma id_eq_id (A : coalgebra G) : coalgebra.hom.id A = 𝟙 A := rfl @[simp] lemma id_f (A : coalgebra G) : (𝟙 A : A ⟶ A).f = 𝟙 A.A := rfl @[simp] lemma comp_f {A A' A'' : coalgebra G} (f : A ⟶ A') (g : A' ⟶ A'') : (f ≫ g).f = f.f ≫ g.f := rfl /-- The category of Eilenberg-Moore coalgebras for a comonad. -/ instance EilenbergMoore : category (coalgebra G) := {}. /-- To construct an isomorphism of coalgebras, it suffices to give an isomorphism of the carriers which commutes with the structure morphisms. -/ @[simps] def iso_mk {A B : coalgebra G} (h : A.A ≅ B.A) (w : A.a ≫ (G : C ⥤ C).map h.hom = h.hom ≫ B.a) : A ≅ B := { hom := { f := h.hom }, inv := { f := h.inv, h' := by { rw [h.eq_inv_comp, ←reassoc_of w, ←functor.map_comp], simp } } } end coalgebra variables (G : comonad C) /-- The forgetful functor from the Eilenberg-Moore category, forgetting the coalgebraic structure. -/ @[simps] def forget : coalgebra G ⥤ C := { obj := λ A, A.A, map := λ A B f, f.f } /-- The cofree functor from the Eilenberg-Moore category, constructing a coalgebra for any object. -/ @[simps] def cofree : C ⥤ coalgebra G := { obj := λ X, { A := G.obj X, a := G.δ.app X, coassoc' := (G.coassoc _).symm }, map := λ X Y f, { f := G.map f, h' := (G.δ.naturality _).symm } } /-- The adjunction between the cofree and forgetful constructions for Eilenberg-Moore coalgebras for a comonad. -/ -- The other two `simps` projection lemmas can be derived from these two, so `simp_nf` complains if -- those are added too @[simps unit counit] def adj : G.forget ⊣ G.cofree := adjunction.mk_of_hom_equiv { hom_equiv := λ X Y, { to_fun := λ f, { f := X.a ≫ G.map f, h' := by { dsimp, simp [←coalgebra.coassoc_assoc] } }, inv_fun := λ g, g.f ≫ G.ε.app Y, left_inv := λ f, by { dsimp, rw [category.assoc, G.ε.naturality, functor.id_map, X.counit_assoc] }, right_inv := λ g, begin ext1, dsimp, rw [functor.map_comp, g.h_assoc, cofree_obj_a, comonad.right_counit], apply comp_id, end }} /-- Given a coalgebra morphism whose carrier part is an isomorphism, we get a coalgebra isomorphism. -/ lemma coalgebra_iso_of_iso {A B : coalgebra G} (f : A ⟶ B) [is_iso f.f] : is_iso f := ⟨⟨{ f := inv f.f, h' := by { rw [is_iso.eq_inv_comp f.f, ←f.h_assoc], simp } }, by tidy⟩⟩ instance forget_reflects_iso : reflects_isomorphisms G.forget := { reflects := λ A B, coalgebra_iso_of_iso G } instance forget_faithful : faithful (forget G) := {} instance : is_left_adjoint G.forget := ⟨_, G.adj⟩ @[simp] lemma right_adjoint_forget : right_adjoint G.forget = G.cofree := rfl @[simp] lemma of_left_adjoint_forget : adjunction.of_left_adjoint G.forget = G.adj := rfl end comonad end category_theory
[STATEMENT] lemma snd_eqvt: "pi\<bullet>(snd x) = snd (pi\<bullet>x)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. pi \<bullet> snd x = snd (pi \<bullet> x) [PROOF STEP] by (cases x) simp
I am not a hamster, there is a basic problem in the process where drugs get approved in the US. It comes from the fact that the drug companies themselves are expected to provide test results that the FDA is supposed to use to make the determination of whether it's safe or not. plaintiffs who claimed the drug Vioxx proved fatal or injured its users. Then people actually wonder why drugs like Vioxx manage to "slip through the cracks" and get approved anyway when later testing by independent people produce findings that, like in the case of Vioxx, prove that it shouldn't have ever been on the market to begin with. I guess it comes down to two things... the FDA is seriously overworked and understaffed / underfunded. and the drug companies are a VERY powerful force on capitol hill.
If $f$ is a function with a nonzero derivative at $x$, and $g$ is the inverse of $f$, then $g$ has a derivative at $f(x)$ equal to the reciprocal of the derivative of $f$ at $x$.
module Data.Trimorphisms %default total %access public export data Trimorphism : Type -> Type -> Type -> Type where Trimo : (a -> b -> c) -> Trimorphism a b c applyTimo : Trimorphism a b c -> a -> b -> c applyTimo (Trimo t) a b = (t a) b
/* * Copyright (C) 2012 Open Source Robotics Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include <boost/shared_ptr.hpp> #include "gazebo/util/system.hh" /// \file /// \ingroup gazebo_physics /// \ingroup gazebo_physics_simbody /// \brief Simbody wrapper forward declarations and typedefs namespace gazebo { namespace physics { /// \addtogroup gazebo_physics_simbody /// \{ class SimbodyCollision; class SimbodyLink; class SimbodyModel; class SimbodyPhysics; class SimbodyRayShape; /// \def SimbodyPhysicsPtr /// \brief Boost shared point to SimbodyPhysics typedef boost::shared_ptr<SimbodyPhysics> SimbodyPhysicsPtr; /// \def SimbodyCollisionPtr /// \brief Boost shared point to SimbodyCollision typedef boost::shared_ptr<SimbodyCollision> SimbodyCollisionPtr; /// \def SimbodyLinkPtr /// \brief Boost shared point to SimbodyLink typedef boost::shared_ptr<SimbodyLink> SimbodyLinkPtr; /// \def SimbodyModelPtr /// \brief Boost shared point to SimbodyModel typedef boost::shared_ptr<SimbodyModel> SimbodyModelPtr; /// \def SimbodyRayShapePtr /// \brief Boost shared point to SimbodyRayShape typedef boost::shared_ptr<SimbodyRayShape> SimbodyRayShapePtr; /// \} } }
Ethereum Classic is a smart contract platform and ecosystem for decentralized applications and distributed computing. It was born due to a chain split on the original Ethereum blockchain. When the DAO-hack occurred, where 3.6m Ether was stolen, the Ethereum community discussed what to do. Can Ethereum Classic be mined? What differs Ethereum Classic from other Cryptocurrencies? How much is Ethereum Classic worth today? Where can you buy ETC? Can ETC be sold for cash? The majority favored a hard fork with the purpose of rendering the stolen Ether useless, but a small minority was strictly against it. They stated that “code is law” and it would be against the spirit of decentralized systems to “turn back time” and reverse the blockchain with the help of a 51% majority. The hard fork happened, and Ethereum abandoned the old chain. The small minority forked off of the new Ethereum and decided to continue developing the old chain. This old chain is now known as Ethereum Classic; the ticker is ETC. In July 2016, more specifically at block 1 920 000, the hard fork was executed. After the fork, the chains were split in two and since then act individually. Ethereum Classic became a parallel blockchain, where in contrast to Ethereum the stolen funds were never returned to their owners. Yes, it is based on Proof-of-Work (PoW). The mining algorithm is Ethash, which is a modified version of the Dagger-Hashimoto algorithm. The main points that differ Ethereum Classic from his bigger counterpart are that its maximum supply is capped and that they don’t have the massive resources and industry support like their bigger brother. The maximum supply of ETC is 230 000 000 and the circulating supply at the time of writing is 101 442 324 ETC. You can buy ETC on several cryptocurrency exchanges, most notably Bithump, Binance, Upbit, Huobi, and OKEx. You can sell ETC for cash on several cryptocurrency exchanges that have fiat-pairings, most notably Bithump and Bitfinex. The ETC team is actively working on further development and scalability of their platform. Also, they are focussed on finding ways to integrate Ethereum Classic in the growing sector of the Internet of Things. Additionally, they work on the “Emerald Platform,” which is a set of libraries and tools for third-party application developers. 5.5/10 What I like is that ETC capped their maximum supply, this creates SoV-characteristics. The downside is that ETH has far more resources, industry contacts, and partnerships than Ethereum Classic. This could lead to ETC being tailed off. Another negative is that the stolen coins from the DAO-hack are still accessible for the hacker on the ETH chain. He currently holds 3.36 million ETC. The Ethereum foundation is also in possession of a massive amount of ETC, although they liquidated 90% of their position which they acquired through the hard fork. But there 10% is still a big enough number to move the market.
using Clang using Clang.LibClang using Test @testset "c basic" begin # parse file trans_unit = parse_header(joinpath(@__DIR__, "c", "cbasic.h"), flags = CXTranslationUnit_DetailedPreprocessingRecord | CXTranslationUnit_SkipFunctionBodies) ctx = DefaultContext() push!(ctx.trans_units, trans_unit) # get root cursor root_cursor = getcursor(trans_unit) # search the first macro defination "#define CONST 1" cursorCONST = search(root_cursor, x->kind(x)==CXCursor_MacroDefinition && name(x) == "CONST") toksCONST = tokenize(cursorCONST[1]) @test kind(toksCONST[1]) == CXToken_Identifier @test kind(toksCONST[2]) == CXToken_Literal @test toksCONST[2].text == "1" # search the second macro defination "#define CONSTADD CONST + 2" cursorCONSTADD = search(root_cursor, x->kind(x)==CXCursor_MacroDefinition && name(x) == "CONSTADD") toksCONSTADD = tokenize(cursorCONSTADD[1]) @test toksCONSTADD[1].text == "CONSTADD" @test toksCONSTADD[2].text == "CONST" @test toksCONSTADD[3].text == "+" @test toksCONSTADD[4].text == "2" # function arguments func1 = search(root_cursor, "func1")[1] @test argnum(func1) == 4 func1_args = function_args(func1) # TODO should return a structure or namedtuple @test map(spelling, func1_args) == ["a","b","c","d"] @test endswith(filename(func1), joinpath("c", "cbasic.h")) @test spelling(trans_unit) == filename(func1) # function constant array arguments should be converted to common Ptr func_arg = search(root_cursor, "func_constarr_arg")[1] wrap!(ctx, func_arg) expr = :(function func_constarr_arg(x) ccall((:func_constarr_arg, libxxx), Cint, (Ptr{Cdouble},), x) end) Base.remove_linenums!(expr) @test ctx.api_buffer[1] == expr end
!############################################################################## !# Tutorial 004f: Datastructures - comparison of linked lists vs. maps !############################################################################## module tutorial004f ! Include basic Feat-2 modules use fsystem use genoutput use listInt use mapInt use random use statistics implicit none private public :: start_tutorial004f contains ! *************************************************************************** subroutine start_tutorial004f ! Declare some variables type(t_random) :: rrandom type(t_timer) :: rtimer type(t_listInt) :: rlist type(it_listInt) :: rlistIterator type(t_mapInt) :: rmap type(it_mapInt) :: rmapIterator integer(I32), dimension(:), allocatable :: Irandom integer :: i ! Print a message call output_lbrk() call output_separator (OU_SEP_STAR) call output_line ("This is FEAT-2. Tutorial 004f") call output_separator (OU_SEP_MINUS) ! ================================= ! Initialise random number generator ! ================================= call rng_initByClock(rrandom) ! ================================= ! Generate 20000 random integer values ! ================================= allocate(Irandom(20000)) do i=1,size(Irandom) call rng_get(rrandom,Irandom(i)) end do ! ================================= ! Start time measurement for list ! ================================= call stat_clearTimer(rtimer) call stat_startTimer(rtimer) ! ================================= ! Create list ! ================================= call list_create(rlist, size(Irandom)) ! ================================= ! Insert items at the end ! ================================= do i=1,size(Irandom) #ifdef USE_LARGEINT call list_push_back(rlist,int(Irandom(i), I64)) #else call list_push_back(rlist,Irandom(i)) #endif end do ! ================================= ! Search for all items in list ! ================================= do i=1,size(Irandom) #ifdef USE_LARGEINT rlistIterator = list_find(rlist, int(Irandom(i), I64)) #else rlistIterator = list_find(rlist, Irandom(i)) #endif end do ! ================================= ! Release list ! ================================= call list_release(rlist) ! ================================= ! Stop time measurement ! ================================= call stat_stopTimer(rtimer) call output_line ("Total time for linked list: "& // trim(adjustl(sys_sd(rtimer%delapsedReal,10)))& // " seconds") ! ================================= ! Start time measurement for map ! ================================= call stat_clearTimer(rtimer) call stat_startTimer(rtimer) ! ================================= ! Create map ! ================================= call map_create(rmap, size(Irandom)) ! ================================= ! Insert items ! ================================= do i=1,size(Irandom) #ifdef USE_LARGEINT rmapIterator = map_insert(rmap,int(Irandom(i), I64)) #else rmapIterator = map_insert(rmap,Irandom(i)) #endif end do ! ================================= ! Search for all items in map ! ================================= do i=1,size(Irandom) #ifdef USE_LARGEINT rmapIterator = map_find(rmap, int(Irandom(i), I64)) #else rmapIterator = map_find(rmap, Irandom(i)) #endif end do ! ================================= ! Release map ! ================================= call map_release(rmap) ! ================================= ! Stop time measurement ! ================================= call stat_stopTimer(rtimer) call output_line ("Total time for map : "& // trim(adjustl(sys_sd(rtimer%delapsedReal,10)))& // " seconds") ! ================================= ! Free memory ! ================================= deallocate(Irandom) end subroutine end module
State Before: α : Type u β : Type v ι : Sort w a : α s✝ s₁ s₂ t✝ : Set α p p₁ p₂ : α → Prop inst✝ : TopologicalSpace α s t : Set α h : IsOpen s ⊢ closure (s ∩ t)ᶜ ⊆ (s ∩ closure t)ᶜ State After: no goals Tactic: simpa only [← interior_compl, compl_inter] using IsClosed.interior_union_left h.isClosed_compl
/- Copyright (c) 2019 Floris van Doorn. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Floris van Doorn -/ import data.real.basic import data.set.finite import data.set.intervals.disjoint /-! Proof that a cube (in dimension n ≥ 3) cannot be cubed: There does not exist a partition of a cube into finitely many smaller cubes (at least two) of different sizes. We follow the proof described here: http://www.alaricstephen.com/main-featured/2017/9/28/cubing-a-cube-proof -/ open real set function fin noncomputable theory namespace «82» variable {n : ℕ} /-- Given three intervals `I, J, K` such that `J ⊂ I`, neither endpoint of `J` coincides with an endpoint of `I`, `¬ (K ⊆ J)` and `K` does not lie completely to the left nor completely to the right of `J`. Then `I ∩ K \ J` is nonempty. -/ lemma Ico_lemma {α} [linear_order α] {x₁ x₂ y₁ y₂ z₁ z₂ w : α} (h₁ : x₁ < y₁) (hy : y₁ < y₂) (h₂ : y₂ < x₂) (hz₁ : z₁ ≤ y₂) (hz₂ : y₁ ≤ z₂) (hw : w ∉ Ico y₁ y₂ ∧ w ∈ Ico z₁ z₂) : ∃w, w ∈ Ico x₁ x₂ ∧ w ∉ Ico y₁ y₂ ∧ w ∈ Ico z₁ z₂ := begin simp only [not_and, not_lt, mem_Ico] at hw, refine ⟨max x₁ (min w y₂), _, _, _⟩, { simp [le_refl, lt_trans h₁ (lt_trans hy h₂), h₂] }, { simp [hw, lt_irrefl, not_le_of_lt h₁] {contextual := tt} }, { simp [hw.2.1, hw.2.2, hz₁, lt_of_lt_of_le h₁ hz₂] at ⊢ } end /-- A (hyper)-cube (in standard orientation) is a vector `b` consisting of the bottom-left point of the cube, a width `w` and a proof that `w > 0`. We use functions from `fin n` to denote vectors. -/ structure cube (n : ℕ) : Type := (b : fin n → ℝ) -- bottom-left coordinate (w : ℝ) -- width (hw : 0 < w) namespace cube lemma hw' (c : cube n) : 0 ≤ c.w := le_of_lt c.hw /-- The j-th side of a cube is the half-open interval `[b j, b j + w)` -/ def side (c : cube n) (j : fin n) : set ℝ := Ico (c.b j) (c.b j + c.w) @[simp] lemma b_mem_side (c : cube n) (j : fin n) : c.b j ∈ c.side j := by simp [side, cube.hw, le_refl] def to_set (c : cube n) : set (fin n → ℝ) := { x | ∀j, x j ∈ side c j } lemma side_nonempty (c : cube n) (i : fin n) : (side c i).nonempty := by simp [side, c.hw] lemma univ_pi_side (c : cube n) : pi univ (side c) = c.to_set := ext $ λ x, mem_univ_pi lemma to_set_subset {c c' : cube n} : c.to_set ⊆ c'.to_set ↔ ∀j, c.side j ⊆ c'.side j := by simp only [← univ_pi_side, univ_pi_subset_univ_pi_iff, (c.side_nonempty _).ne_empty, exists_false, or_false] lemma to_set_disjoint {c c' : cube n} : disjoint c.to_set c'.to_set ↔ ∃ j, disjoint (c.side j) (c'.side j) := by simp only [← univ_pi_side, disjoint_univ_pi] lemma b_mem_to_set (c : cube n) : c.b ∈ c.to_set := by simp [to_set] protected def tail (c : cube (n+1)) : cube n := ⟨tail c.b, c.w, c.hw⟩ lemma side_tail (c : cube (n+1)) (j : fin n) : c.tail.side j = c.side j.succ := rfl def bottom (c : cube (n+1)) : set (fin (n+1) → ℝ) := { x | x 0 = c.b 0 ∧ tail x ∈ c.tail.to_set } lemma b_mem_bottom (c : cube (n+1)) : c.b ∈ c.bottom := by simp [bottom, to_set, side, cube.hw, le_refl, cube.tail] def xm (c : cube (n+1)) : ℝ := c.b 0 + c.w lemma b_lt_xm (c : cube (n+1)) : c.b 0 < c.xm := by simp [xm, hw] lemma b_ne_xm (c : cube (n+1)) : c.b 0 ≠ c.xm := ne_of_lt c.b_lt_xm def shift_up (c : cube (n+1)) : cube (n+1) := ⟨cons c.xm $ tail c.b, c.w, c.hw⟩ @[simp] lemma tail_shift_up (c : cube (n+1)) : c.shift_up.tail = c.tail := by simp [shift_up, cube.tail] @[simp] lemma head_shift_up (c : cube (n+1)) : c.shift_up.b 0 = c.xm := rfl def unit_cube : cube n := ⟨λ _, 0, 1, by norm_num⟩ @[simp] lemma side_unit_cube {j : fin n} : unit_cube.side j = Ico 0 1 := by norm_num [unit_cube, side] end cube open cube variables {ι : Type} {cs : ι → cube (n+1)} {i i' : ι} /-- A finite family of (at least 2) cubes partitioning the unit cube with different sizes -/ @[protect_proj] structure correct (cs : ι → cube n) : Prop := (pairwise_disjoint : pairwise (disjoint on (cube.to_set ∘ cs))) (Union_eq : (⋃(i : ι), (cs i).to_set) = unit_cube.to_set) (injective : injective (cube.w ∘ cs)) (three_le : 3 ≤ n) namespace correct variable (h : correct cs) include h lemma to_set_subset_unit_cube {i} : (cs i).to_set ⊆ unit_cube.to_set := h.Union_eq ▸ subset_Union _ i lemma side_subset {i j} : (cs i).side j ⊆ Ico 0 1 := by simpa only [side_unit_cube] using to_set_subset.1 h.to_set_subset_unit_cube j lemma zero_le_of_mem_side {i j x} (hx : x ∈ (cs i).side j) : 0 ≤ x := (side_subset h hx).1 lemma zero_le_of_mem {i p} (hp : p ∈ (cs i).to_set) (j) : 0 ≤ p j := zero_le_of_mem_side h (hp j) lemma zero_le_b {i j} : 0 ≤ (cs i).b j := zero_le_of_mem h (cs i).b_mem_to_set j lemma b_add_w_le_one {j} : (cs i).b j + (cs i).w ≤ 1 := by { have := side_subset h, rw [side, Ico_subset_Ico_iff] at this, convert this.2, simp [hw] } lemma nontrivial_fin : nontrivial (fin n) := fin.nontrivial_iff_two_le.2 (nat.le_of_succ_le_succ h.three_le) /-- The width of any cube in the partition cannot be 1. -/ lemma w_ne_one [nontrivial ι] (i : ι) : (cs i).w ≠ 1 := begin intro hi, cases exists_ne i with i' hi', let p := (cs i').b, have hp : p ∈ (cs i').to_set := (cs i').b_mem_to_set, have h2p : p ∈ (cs i).to_set, { intro j, split, transitivity (0 : ℝ), { rw [←add_le_add_iff_right (1 : ℝ)], convert b_add_w_le_one h, rw hi, rw zero_add }, apply zero_le_b h, apply lt_of_lt_of_le (side_subset h $ (cs i').b_mem_side j).2, simp [hi, zero_le_b h] }, exact (h.pairwise_disjoint hi').le_bot ⟨hp, h2p⟩ end /-- The top of a cube (which is the bottom of the cube shifted up by its width) must be covered by bottoms of (other) cubes in the family. -/ lemma shift_up_bottom_subset_bottoms (hc : (cs i).xm ≠ 1) : (cs i).shift_up.bottom ⊆ ⋃(i : ι), (cs i).bottom := begin intros p hp, cases hp with hp0 hps, rw [tail_shift_up] at hps, have : p ∈ (unit_cube : cube (n+1)).to_set, { simp only [to_set, forall_fin_succ, hp0, side_unit_cube, mem_set_of_eq, mem_Ico, head_shift_up], refine ⟨⟨_, _⟩, _⟩, { rw [←zero_add (0 : ℝ)], apply add_le_add, apply zero_le_b h, apply (cs i).hw' }, { exact lt_of_le_of_ne (b_add_w_le_one h) hc }, intro j, exact side_subset h (hps j) }, rw [← h.2, mem_Union] at this, rcases this with ⟨i', hi'⟩, rw [mem_Union], use i', refine ⟨_, λ j, hi' j.succ⟩, have : i ≠ i', { rintro rfl, apply not_le_of_lt (hi' 0).2, rw [hp0], refl }, have := h.1 this, rw [on_fun, to_set_disjoint, exists_fin_succ] at this, rcases this with h0|⟨j, hj⟩, rw [hp0], symmetry, apply eq_of_Ico_disjoint h0 (by simp [hw]) _, convert hi' 0, rw [hp0], refl, exfalso, apply not_disjoint_iff.mpr ⟨tail p j, hps j, hi' j.succ⟩ hj end end correct /-- A valley is a square on which cubes in the family of cubes are placed, so that the cubes completely cover the valley and none of those cubes is partially outside the square. We also require that no cube on it has the same size as the valley (so that there are at least two cubes on the valley). This is the main concept in the formalization. We prove that the smallest cube on a valley has another valley on the top of it, which gives an infinite sequence of cubes in the partition, which contradicts the finiteness. A valley is characterized by a cube `c` (which is not a cube in the family cs) by considering the bottom face of `c`. -/ def valley (cs : ι → cube (n+1)) (c : cube (n+1)) : Prop := c.bottom ⊆ (⋃(i : ι), (cs i).bottom) ∧ (∀i, (cs i).b 0 = c.b 0 → (∃x, x ∈ (cs i).tail.to_set ∩ c.tail.to_set) → (cs i).tail.to_set ⊆ c.tail.to_set) ∧ ∀(i : ι), (cs i).b 0 = c.b 0 → (cs i).w ≠ c.w variables {c : cube (n+1)} (h : correct cs) (v : valley cs c) /-- The bottom of the unit cube is a valley -/ lemma valley_unit_cube [nontrivial ι] (h : correct cs) : valley cs unit_cube := begin refine ⟨_, _, _⟩, { intro v, simp only [bottom, and_imp, mem_Union, mem_set_of_eq], intros h0 hv, have : v ∈ (unit_cube : cube (n+1)).to_set, { dsimp only [to_set, unit_cube, mem_set_of_eq], rw [forall_fin_succ, h0], split, norm_num [side, unit_cube], exact hv }, rw [← h.2, mem_Union] at this, rcases this with ⟨i, hi⟩, use i, split, { apply le_antisymm, rw h0, exact h.zero_le_b, exact (hi 0).1 }, intro j, exact hi _ }, { intros i hi h', rw to_set_subset, intro j, convert h.side_subset using 1, simp [side_tail] }, { intros i hi, exact h.w_ne_one i } end /-- the cubes which lie in the valley `c` -/ def bcubes (cs : ι → cube (n+1)) (c : cube (n+1)) : set ι := { i : ι | (cs i).b 0 = c.b 0 ∧ (cs i).tail.to_set ⊆ c.tail.to_set } /-- A cube which lies on the boundary of a valley in dimension `j` -/ def on_boundary (hi : i ∈ bcubes cs c) (j : fin n) : Prop := c.b j.succ = (cs i).b j.succ ∨ (cs i).b j.succ + (cs i).w = c.b j.succ + c.w lemma tail_sub (hi : i ∈ bcubes cs c) : ∀j, (cs i).tail.side j ⊆ c.tail.side j := by { rw [←to_set_subset], exact hi.2 } lemma bottom_mem_side (hi : i ∈ bcubes cs c) : c.b 0 ∈ (cs i).side 0 := by { convert b_mem_side (cs i) _ using 1, rw hi.1 } lemma b_le_b (hi : i ∈ bcubes cs c) (j : fin n) : c.b j.succ ≤ (cs i).b j.succ := (tail_sub hi j $ b_mem_side _ _).1 lemma t_le_t (hi : i ∈ bcubes cs c) (j : fin n) : (cs i).b j.succ + (cs i).w ≤ c.b j.succ + c.w := begin have h' := tail_sub hi j, dsimp only [side] at h', rw [Ico_subset_Ico_iff] at h', exact h'.2, simp [hw] end include h v /-- Every cube in the valley must be smaller than it -/ lemma w_lt_w (hi : i ∈ bcubes cs c) : (cs i).w < c.w := begin apply lt_of_le_of_ne _ (v.2.2 i hi.1), have j : fin n := ⟨1, nat.le_of_succ_le_succ h.three_le⟩, rw [←add_le_add_iff_left ((cs i).b j.succ)], apply le_trans (t_le_t hi j), rw [add_le_add_iff_right], apply b_le_b hi, end /-- There are at least two cubes in a valley -/ lemma nontrivial_bcubes : (bcubes cs c).nontrivial := begin rcases v.1 c.b_mem_bottom with ⟨_, ⟨i, rfl⟩, hi⟩, have h2i : i ∈ bcubes cs c := ⟨hi.1.symm, v.2.1 i hi.1.symm ⟨tail c.b, hi.2, λ j, c.b_mem_side j.succ⟩⟩, let j : fin (n+1) := ⟨2, h.three_le⟩, have hj : 0 ≠ j := by { simp only [fin.ext_iff, ne.def], contradiction }, let p : fin (n+1) → ℝ := λ j', if j' = j then c.b j + (cs i).w else c.b j', have hp : p ∈ c.bottom, { split, { simp only [bottom, p, if_neg hj] }, intro j', simp only [tail, side_tail], by_cases hj' : j'.succ = j, { simp [p, -add_comm, if_pos, side, hj', hw', w_lt_w h v h2i] }, { simp [p, -add_comm, if_neg hj'] }}, rcases v.1 hp with ⟨_, ⟨i', rfl⟩, hi'⟩, have h2i' : i' ∈ bcubes cs c := ⟨hi'.1.symm, v.2.1 i' hi'.1.symm ⟨tail p, hi'.2, hp.2⟩⟩, refine ⟨i, h2i, i', h2i', _⟩, rintro rfl, apply not_le_of_lt (hi'.2 ⟨1, nat.le_of_succ_le_succ h.three_le⟩).2, simp only [tail, cube.tail, p], rw [if_pos, add_le_add_iff_right], { exact (hi.2 _).1 }, refl end /-- There is a cube in the valley -/ lemma nonempty_bcubes : (bcubes cs c).nonempty := (nontrivial_bcubes h v).nonempty variables [finite ι] /-- There is a smallest cube in the valley -/ lemma exists_mi : ∃ i ∈ bcubes cs c, ∀ i' ∈ bcubes cs c, (cs i).w ≤ (cs i').w := (bcubes cs c).exists_min_image (λ i, (cs i).w) (set.to_finite _) (nonempty_bcubes h v) /-- We let `mi` be the (index for the) smallest cube in the valley `c` -/ def mi : ι := classical.some $ exists_mi h v variables {h v} lemma mi_mem_bcubes : mi h v ∈ bcubes cs c := (classical.some_spec $ exists_mi h v).fst lemma mi_minimal (hi : i ∈ bcubes cs c) : (cs $ mi h v).w ≤ (cs i).w := (classical.some_spec $ exists_mi h v).snd i hi lemma mi_strict_minimal (hii' : mi h v ≠ i) (hi : i ∈ bcubes cs c) : (cs $ mi h v).w < (cs i).w := (mi_minimal hi).lt_of_ne $ h.injective.ne hii' /-- The top of `mi` cannot be 1, since there is a larger cube in the valley -/ lemma mi_xm_ne_one : (cs $ mi h v).xm ≠ 1 := begin apply ne_of_lt, rcases (nontrivial_bcubes h v).exists_ne (mi h v) with ⟨i, hi, h2i⟩, apply lt_of_lt_of_le _ h.b_add_w_le_one, exact i, exact 0, rw [xm, mi_mem_bcubes.1, hi.1, _root_.add_lt_add_iff_left], exact mi_strict_minimal h2i.symm hi end /-- If `mi` lies on the boundary of the valley in dimension j, then this lemma expresses that all other cubes on the same boundary extend further from the boundary. More precisely, there is a j-th coordinate `x : ℝ` in the valley, but not in `mi`, such that every cube that shares a (particular) j-th coordinate with `mi` also contains j-th coordinate `x` -/ lemma smallest_on_boundary {j} (bi : on_boundary (mi_mem_bcubes : mi h v ∈ _) j) : ∃(x : ℝ), x ∈ c.side j.succ \ (cs $ mi h v).side j.succ ∧ ∀ ⦃i'⦄ (hi' : i' ∈ bcubes cs c), i' ≠ mi h v → (cs $ mi h v).b j.succ ∈ (cs i').side j.succ → x ∈ (cs i').side j.succ := begin let i := mi h v, have hi : i ∈ bcubes cs c := mi_mem_bcubes, cases bi, { refine ⟨(cs i).b j.succ + (cs i).w, ⟨_, _⟩, _⟩, { simp [side, bi, hw', w_lt_w h v hi] }, { intro h', simpa [i, lt_irrefl] using h'.2 }, intros i' hi' i'_i h2i', split, apply le_trans h2i'.1, { simp [hw'] }, apply lt_of_lt_of_le (add_lt_add_left (mi_strict_minimal i'_i.symm hi') _), simp [bi.symm, b_le_b hi'] }, let s := bcubes cs c \ { i }, have hs : s.nonempty, { rcases (nontrivial_bcubes h v).exists_ne i with ⟨i', hi', h2i'⟩, exact ⟨i', hi', h2i'⟩ }, rcases set.exists_min_image s (w ∘ cs) (set.to_finite _) hs with ⟨i', ⟨hi', h2i'⟩, h3i'⟩, rw [mem_singleton_iff] at h2i', let x := c.b j.succ + c.w - (cs i').w, have hx : x < (cs i).b j.succ, { dsimp only [x], rw [←bi, add_sub_assoc, add_lt_iff_neg_left, sub_lt_zero], apply mi_strict_minimal (ne.symm h2i') hi' }, refine ⟨x, ⟨_, _⟩, _⟩, { simp only [side, x, -add_comm, -add_assoc, neg_lt_zero, hw, add_lt_iff_neg_left, and_true, mem_Ico, sub_eq_add_neg], rw [add_assoc, le_add_iff_nonneg_right, ←sub_eq_add_neg, sub_nonneg], apply le_of_lt (w_lt_w h v hi') }, { simp only [side, not_and_distrib, not_lt, add_comm, not_le, mem_Ico], left, exact hx }, intros i'' hi'' h2i'' h3i'', split, swap, apply lt_trans hx h3i''.2, simp only [x], rw [le_sub_iff_add_le], refine le_trans _ (t_le_t hi'' j), rw [add_le_add_iff_left], apply h3i' i'' ⟨hi'', _⟩, simp [mem_singleton, h2i''] end variables (h v) /-- `mi` cannot lie on the boundary of the valley. Otherwise, the cube adjacent to it in the `j`-th direction will intersect one of the neighbouring cubes on the same boundary as `mi`. -/ lemma mi_not_on_boundary (j : fin n) : ¬on_boundary (mi_mem_bcubes : mi h v ∈ _) j := begin let i := mi h v, have hi : i ∈ bcubes cs c := mi_mem_bcubes, haveI := h.nontrivial_fin, rcases exists_ne j with ⟨j', hj'⟩, swap, intro hj, rcases smallest_on_boundary hj with ⟨x, ⟨hx, h2x⟩, h3x⟩, let p : fin (n+1) → ℝ := cons (c.b 0) (λ j₂, if j₂ = j then x else (cs i).b j₂.succ), have hp : p ∈ c.bottom, { suffices : ∀ (j' : fin n), ite (j' = j) x ((cs i).b j'.succ) ∈ c.side j'.succ, { simpa [bottom, p, to_set, tail, side_tail] }, intro j₂, by_cases hj₂ : j₂ = j, { simp [hj₂, hx] }, simp only [hj₂, if_false], apply tail_sub hi, apply b_mem_side }, rcases v.1 hp with ⟨_, ⟨i', rfl⟩, hi'⟩, have h2i' : i' ∈ bcubes cs c := ⟨hi'.1.symm, v.2.1 i' hi'.1.symm ⟨tail p, hi'.2, hp.2⟩⟩, have i_i' : i ≠ i', { rintro rfl, simpa [p, side_tail, i, h2x] using hi'.2 j }, have : nonempty ↥((cs i').tail.side j' \ (cs i).tail.side j'), { apply nonempty_Ico_sdiff, apply mi_strict_minimal i_i' h2i', apply hw }, rcases this with ⟨⟨x', hx'⟩⟩, let p' : fin (n+1) → ℝ := cons (c.b 0) (λ j₂, if j₂ = j' then x' else (cs i).b j₂.succ), have hp' : p' ∈ c.bottom, { suffices : ∀ (j : fin n), ite (j = j') x' ((cs i).b j.succ) ∈ c.side j.succ, { simpa [bottom, p', to_set, tail, side_tail] }, intro j₂, by_cases hj₂ : j₂ = j', simp [hj₂], apply tail_sub h2i', apply hx'.1, simp only [if_congr, if_false, hj₂], apply tail_sub hi, apply b_mem_side }, rcases v.1 hp' with ⟨_, ⟨i'', rfl⟩, hi''⟩, have h2i'' : i'' ∈ bcubes cs c := ⟨hi''.1.symm, v.2.1 i'' hi''.1.symm ⟨tail p', hi''.2, hp'.2⟩⟩, have i'_i'' : i' ≠ i'', { rintro ⟨⟩, have : (cs i).b ∈ (cs i').to_set, { simp only [to_set, forall_fin_succ, hi.1, bottom_mem_side h2i', true_and, mem_set_of_eq], intro j₂, by_cases hj₂ : j₂ = j, { simpa [side_tail, p', hj'.symm, hj₂] using hi''.2 j }, { simpa [hj₂] using hi'.2 j₂ } }, apply not_disjoint_iff.mpr ⟨(cs i).b, (cs i).b_mem_to_set, this⟩ (h.1 i_i') }, have i_i'' : i ≠ i'', { intro h, induction h, simpa [hx'.2] using hi''.2 j' }, apply not.elim _ (h.1 i'_i''), simp only [on_fun, to_set_disjoint, not_disjoint_iff, forall_fin_succ, not_exists, comp_app], refine ⟨⟨c.b 0, bottom_mem_side h2i', bottom_mem_side h2i''⟩, _⟩, intro j₂, by_cases hj₂ : j₂ = j, { cases hj₂, refine ⟨x, _, _⟩, { convert hi'.2 j, simp [p] }, apply h3x h2i'' i_i''.symm, convert hi''.2 j, simp [p', hj'.symm] }, by_cases h2j₂ : j₂ = j', { cases h2j₂, refine ⟨x', hx'.1, _⟩, convert hi''.2 j', simp }, refine ⟨(cs i).b j₂.succ, _, _⟩, { convert hi'.2 j₂, simp [hj₂] }, { convert hi''.2 j₂, simp [h2j₂] } end variables {h v} /-- The same result that `mi` cannot lie on the boundary of the valley written as inequalities. -/ lemma mi_not_on_boundary' (j : fin n) : c.tail.b j < (cs (mi h v)).tail.b j ∧ (cs (mi h v)).tail.b j + (cs (mi h v)).w < c.tail.b j + c.w := begin have := mi_not_on_boundary h v j, simp only [on_boundary, not_or_distrib] at this, cases this with h1 h2, split, apply lt_of_le_of_ne (b_le_b mi_mem_bcubes _) h1, apply lt_of_le_of_ne _ h2, apply ((Ico_subset_Ico_iff _).mp (tail_sub mi_mem_bcubes j)).2, simp [hw] end /-- The top of `mi` gives rise to a new valley, since the neighbouring cubes extend further upward than `mi`. -/ lemma valley_mi : valley cs ((cs (mi h v)).shift_up) := begin let i := mi h v, have hi : i ∈ bcubes cs c := mi_mem_bcubes, refine ⟨_, _, _⟩, { intro p, apply h.shift_up_bottom_subset_bottoms mi_xm_ne_one }, { rintros i' hi' ⟨p2, hp2, h2p2⟩, simp only [head_shift_up] at hi', classical, by_contra h2i', rw [tail_shift_up] at h2p2, simp only [not_subset, tail_shift_up] at h2i', rcases h2i' with ⟨p1, hp1, h2p1⟩, have : ∃p3, p3 ∈ (cs i').tail.to_set ∧ p3 ∉ (cs i).tail.to_set ∧ p3 ∈ c.tail.to_set, { simp only [to_set, not_forall, mem_set_of_eq] at h2p1, cases h2p1 with j hj, rcases Ico_lemma (mi_not_on_boundary' j).1 (by simp [hw]) (mi_not_on_boundary' j).2 (le_trans (hp2 j).1 $ le_of_lt (h2p2 j).2) (le_trans (h2p2 j).1 $ le_of_lt (hp2 j).2) ⟨hj, hp1 j⟩ with ⟨w, hw, h2w, h3w⟩, refine ⟨λ j', if j' = j then w else p2 j', _, _, _⟩, { intro j', by_cases h : j' = j, { simp only [if_pos h], convert h3w }, { simp only [if_neg h], exact hp2 j' } }, { simp only [to_set, not_forall, mem_set_of_eq], use j, rw [if_pos rfl], convert h2w }, { intro j', by_cases h : j' = j, { simp only [if_pos h, side_tail], convert hw }, { simp only [if_neg h], apply hi.2, apply h2p2 } } }, rcases this with ⟨p3, h1p3, h2p3, h3p3⟩, let p := @cons n (λ_, ℝ) (c.b 0) p3, have hp : p ∈ c.bottom, { refine ⟨rfl, _⟩, rwa [tail_cons] }, rcases v.1 hp with ⟨_, ⟨i'', rfl⟩, hi''⟩, have h2i'' : i'' ∈ bcubes cs c, { use hi''.1.symm, apply v.2.1 i'' hi''.1.symm, use tail p, split, exact hi''.2, rw [tail_cons], exact h3p3 }, have h3i'' : (cs i).w < (cs i'').w, { apply mi_strict_minimal _ h2i'', rintro rfl, apply h2p3, convert hi''.2, rw [tail_cons] }, let p' := @cons n (λ_, ℝ) (cs i).xm p3, have hp' : p' ∈ (cs i').to_set, { simpa [to_set, forall_fin_succ, p', hi'.symm] using h1p3 }, have h2p' : p' ∈ (cs i'').to_set, { simp only [to_set, forall_fin_succ, p', cons_succ, cons_zero, mem_set_of_eq], refine ⟨_, by simpa [to_set, p] using hi''.2⟩, have : (cs i).b 0 = (cs i'').b 0, { rw [hi.1, h2i''.1] }, simp [side, hw', xm, this, h3i''] }, apply not_disjoint_iff.mpr ⟨p', hp', h2p'⟩, apply h.1, rintro rfl, apply (cs i).b_ne_xm, rw [←hi', ←hi''.1, hi.1], refl }, { intros i' hi' h2i', dsimp only [shift_up] at h2i', replace h2i' := h.injective h2i'.symm, induction h2i', exact b_ne_xm (cs i) hi' } end variables (h) [nontrivial ι] omit v /-- We get a sequence of cubes whose size is decreasing -/ noncomputable def sequence_of_cubes : ℕ → { i : ι // valley cs ((cs i).shift_up) } | 0 := let v := valley_unit_cube h in ⟨mi h v, valley_mi⟩ | (k+1) := let v := (sequence_of_cubes k).2 in ⟨mi h v, valley_mi⟩ def decreasing_sequence (k : ℕ) : ℝ := (cs (sequence_of_cubes h k).1).w lemma strict_anti_sequence_of_cubes : strict_anti $ decreasing_sequence h := strict_anti_nat_of_succ_lt $ λ k, begin let v := (sequence_of_cubes h k).2, dsimp only [decreasing_sequence, sequence_of_cubes], apply w_lt_w h v (mi_mem_bcubes : mi h v ∈ _), end lemma injective_sequence_of_cubes : injective (sequence_of_cubes h) := @injective.of_comp _ _ _ (λ x : {i : ι // _}, (cs x.1).w) _ (strict_anti_sequence_of_cubes h).injective omit h /-- The infinite sequence of cubes contradicts the finiteness of the family. -/ theorem not_correct : ¬correct cs := λ h, (finite.of_injective _ $ injective_sequence_of_cubes h).false /-- **Dissection of Cubes**: A cube cannot be cubed. -/ theorem cannot_cube_a_cube : ∀ {n : ℕ}, n ≥ 3 → -- In ℝ^n for n ≥ 3 ∀ {s : set (cube n)}, s.finite → -- given a finite collection of (hyper)cubes s.nontrivial → -- containing at least two elements s.pairwise_disjoint cube.to_set → -- which is pairwise disjoint (⋃ c ∈ s, cube.to_set c) = unit_cube.to_set → -- whose union is the unit cube inj_on cube.w s → -- such that the widths of all cubes are different false := -- then we can derive a contradiction begin intros n hn s hfin h2 hd hU hinj, cases n, { cases hn }, exact @not_correct n s coe hfin.to_subtype h2.coe_sort ⟨hd.subtype _ _, (Union_subtype _ _).trans hU, hinj.injective, hn⟩ end end «82»
Harley Davidson Gas Tanks - stretched gas tanks for Harley motorcycles and other custom applications. This section contains a vast majority of Harley Davidson gas tanks. Get that stretched look and aggressive styling with a new custom gas tank.
State Before: α : Type u_1 β : Type u_2 γ : Type ?u.12527 δ : Type ?u.12530 s s₁ s₂ : Set α t t₁ t₂ : Set β a : α b : β ⊢ (s₁ ∩ s₂) ×ˢ t = s₁ ×ˢ t ∩ s₂ ×ˢ t State After: case h.mk α : Type u_1 β : Type u_2 γ : Type ?u.12527 δ : Type ?u.12530 s s₁ s₂ : Set α t t₁ t₂ : Set β a : α b : β x : α y : β ⊢ (x, y) ∈ (s₁ ∩ s₂) ×ˢ t ↔ (x, y) ∈ s₁ ×ˢ t ∩ s₂ ×ˢ t Tactic: ext ⟨x, y⟩ State Before: case h.mk α : Type u_1 β : Type u_2 γ : Type ?u.12527 δ : Type ?u.12530 s s₁ s₂ : Set α t t₁ t₂ : Set β a : α b : β x : α y : β ⊢ (x, y) ∈ (s₁ ∩ s₂) ×ˢ t ↔ (x, y) ∈ s₁ ×ˢ t ∩ s₂ ×ˢ t State After: no goals Tactic: simp only [← and_and_right, mem_inter_iff, mem_prod]
# Multivariate Calculus Author: Vo, Huynh Quang Nguyen ```python import numpy as np import sympy import matplotlib.pyplot as plt ``` ## Acknowledgements: The contents of this note are based on the lecture notes and the materials from the following source: 1. _Mathematics for Machine Learning_ specializatin given by Prof. David Dye, Dr. Samuel J. Cooper, and Dr. A. Freddie Page from Imperial College London. Available in Coursera: [Mathematics for Machine Learning specialization](https://www.coursera.org/specializations/mathematics-machine-learning) 2. _Mathematics for Machine Learning_ textbook by Prof. Marc Peter Deisenroth, Prof. A. Aldo Faisal, and Prof. Cheng Soon Ong. The book is freely available at: [Mathematics for Machine Learning textbook](https://mml-book.github.io/) 3. _Essential Math for Data Science in 6 Weeks_ webinar given by Dr. Thomas Nield. Available in O'Reily Learning: [Essential Math for Data Science in 6 Weeks](https://learning.oreilly.com/attend/essential-math-for-data-science-in-6-weeks/0636920055929/0636920055928/) 4. _The Matrix Calculus You Need For Deep Learning_ leaflet by Prof. Terence Parr and Dr. Jeremy Howard from University of San Francisco. Available at arXiv.org: [The Matrix Calculus You Need For Deep Learning](https://arxiv.org/abs/1802.01528) 5. _MIT 8.02t Electricity and Magnetism_ course given by Dr. Eric Hudson, Dr. George Stephans, Prof. John Belcher, Prof. John Joannopoulos, Prof. Michael Feld, and Dr. Peter Dourmashkin from MIT. ## Table of Contents 1. [Introduction to Multivariate Calculus](#Section1) 2. [Derivatives](#Section2) 3. [Vector Calculus](#Section3) 4. [Integrals](#Section4) 6. [Appendix](#Section5) ## I. Introduction to Multivariate Calculus <a name = 'Section1'></a> ### 1. Why we have to learn? Multivariate Calculus plays a critical role in machine learning. If we pick up any machine learning paper or documentation of any library such as PyTorch, the first thing we see is calculus. Therefore, if we want to understand machine learning, we must understand calculus. ### 2. Applications of Multivariate Calculus Together with linear algebra, calculus serves as another pillar in many scientific fields. Below are selected examples of how it is applied in machine learning and data science. <div> </div> Figure 1: Visualization of gradient descent. This algorithm is an optimization method to find local minimum/maximum; thus, it is highly popular due to the need to optimize thousands and even millions of machine learning variables, like neural networks and their node weight values. $$ $$ <div> </div> Figure 2: Visualization of finding areas under probability distributions to get probabilities, which is a commonly used case for integration in data science (adapted from **Digital Underwater Acoustic Communication Equipment** from Tianzeng Xu et al). ## II. Derivatives ### 1. What are derivatives: 1. According to formal definition, the derivative of a function $f$ at $x = a$ $$ \lim_{h \to 0} \frac{f(a + h) - f(a)}{h} $$ provided the limit exists. 2. We can simply understand that a derivative tells the slope of a function $f(x)$ at a designated point $x = a$. Therefore, it is useful to measure the rate of change at any point in a function. When the slope is 0, that means we are at the minimum or maximum of a variable. 3. As an example, here is how we find the slope of the function $f(x) = x^2$ at $x = 2$: * We can measure "steepness" at any point in the curve, and we can visualize this with a tangent line. * A tangent line is a straight line that "just touches" the curve at a given 𝑥 value, and it also provides the slope at that point. * We can estimate this slope using the following steps: *** * Take $x = 2$ and a nearby value $x = 2.1$. * When passed to the function $f(x) = x^2$ these two x-values will yield the following values. $$ f(2) = (2)^2 = 4 $$ $$ f(2.1) = (2.1)^2 = 4.41 $$ * From here, we can estimate the slope between these two points using the simple rise-over-run formula: $$ m = \frac{y_2 - y_1}{x_2 - x_1} = 4.41 $$ * Therefore, the resulting line that passes through these two points has a slope of 4.41. We can increase the accuracy of our slope estimation by selecting a point (for example, $x = 2.000001$ that is infinitely close to $x = 2$. Thus, the slope is: $$ m = \frac{y_2 - y_1}{x_2 - x_1} = 4.000001 \approx 4 $$ *** <div> </div> Figure 1: An example of finding a slope for a function at a designated point by estimation. This method of estimation is widely used if we cannot compute the exact derivative of our function-under-interest. 4. In practice, we compute the slope by taking the derivative $\frac{d}{d_x}$ of our function $f(x)$. 5. Noted: the **derivatives** describe the rate of change of a function concerning the change in its variables. The **differentiation** describes the process of finding the derivatives. ### 2. What are partial derivatives: 1. Partial derivatives are derivatives on functions that have multiple input variables. Because there is more than one input variable, we have slopes in more than one direction as plotted with these functions. 2. Logically, if there are multiple slopes then we can have multiple derivatives for each variable. Rather than finding the slope on a 1-dimensional function, we have slopes concerning multiple variables in several directions. 3. As an example, consider this function $f(x,y) = 2x^3 + 3y^3$, we can compute the partial derivatives of this function as follows: $$ \frac{\partial f(x,y)}{\partial x} = 6x^2 + 0 $$ $$ \frac{\partial f(x,y)}{\partial y} = 0 + 9y^2 $$ * At a point $(x,y) = (1,2)$, we have the slope concerning x and the slope concerning y computed as follows: $$ \frac{\partial f(x,y)}{\partial x} = 6(1^2) = 6 $$ $$ \frac{\partial f(x,y)}{\partial y} = 9(2)^2 = 36 $$ * Therefore, the slope concerning $x$ is 6, and the slope concerning $y$ is 36. 5. Noted: the function $f(x,y) = 2x^3 + 3y^3$ can be denoted as $f(\mathbf{x}) = 2x_1^3 + 3x_2^3$ with $\mathbf{x} = [x_1, x_2]$. ### 3. Basic rules of differentiation: Below are the basic rules of differentiation: 1. Product rule: $$ (f(x)g(x))' = f'(x)g(x) + f(x)g'(x) $$ 2. Quotient rule: $$ (\frac{f(x)}{g(x)})' = \frac{f'(x)g(x) + f(x)g'(x)}{(g(x))^2} $$ 3. Sum rule: $$ (f(x) + g(x))' = f'(x) + g'(x) $$ 4. Chain rule: $$ (g(f(x)))' = g'(f(x))f'(x) $$ The last rule is crucial because it is the backbone of the backpropagation algorithm in machine learning. <div> </div> Figure 2: Application of the chain rule: the backpropagation algorithm. This algorithm describes how to find the influence of a certain input, on systems that are composed of multiple functions. (adapted from **Hands-On Convolutional Neural Networks with TensorFlow** by Zafar et al.) ```python ## # DEMO: FIND DERIVATIVE AND COMPUTE SLOPE # from sympy import * x = symbols('x') # We use this function to declare our variable for symbolic math. f = x**2 df = diff(f,x) slope = df.subs(x,2) print(slope) ## # DEMO: FIND THE PARTIAL DERIVATIVE # from sympy import * from sympy.plotting import plot3d x,y = symbols('x y') f = 2*x**3 + 3*y**3 dx_f = diff(f, x) dy_f = diff(f, y) print(dx_f) print(dy_f) ``` 4 6*x**2 9*y**2 # II. Vector Calculus ## 1. Basic components of vector calculus ### a) Field A field is a function that has a different value at every point in space. Technically speaking, there are two popular types of fields: scalar field, and vector field. #### Scalar field 1. A scalar field is a function associates a scalar value to every point in space. In principle, a scalar field provides values not only on a two-dimensional surface in space but for every point in space. To represent three-dimensional scalar fields, a common practice is to create a three-dimensional atmospheric volume element and colour it to represent the temperature variation. 2. One example of a scalar field is this expression, and it defines the value of the scalar function $\phi$ at every point $(x,y,z)$ in space.: $$ \phi(x,y,z) = \frac{1}{\sqrt{x^2 + y^2 + z^2}} - \frac{1}{\sqrt{x^2 + y^2 - z^2}} $$ 3. Noted: the function $\phi(x,y,z)$ can be denoted as $\phi(\mathbf{x})$ with $\mathbf{x} = [x_1,x_2,x_3]$. This is the function that maps a space $\mathbb{R}^3$ to a new space $\mathbb{R}$ because for every point $(x,y,z)$ in a 3-D space, there is one point $\phi$ in a 1-D space. In pratice, a scalar-valued function $f(\mathbf{x})$ maps $\mathbb{R}^m → \mathbb{R}$ with $\mathbf{x} = [x_1,...,x_m]$ and $\mathbf{f} = f_1$. $$ $$ <div> </div> Figure 1: An example of a scalar field: the night-time temperatures measured by the Thermal Emission Spectrometer instrument on the Mars Global Surveyor (MGS). The various colours on the map represent the surface temperature. This map, however, is limited to representing only the temperature on a two-dimensional surface and thus, it does not show how temperature varies as a function of altitude. 4. To visualize a scalar field, we use the following approaches: * **Contour Maps**: in this approach, we fix one of our independent variables (z, for example) and then show a contour map for the two remaining dimensions, in which the curves represent lines of constant values of our function-under-interest. A series of these maps for various (fixed) values of z will give a feel for the properties of the scalar function. * **Color Coding**: in this approach, we represent the values of the scalar field is by colour-coding in two dimensions for a fixed value of the third. An example is the night-time temperatures map we see above. * **Relief Maps**: in this approach, we represent a scalar field by fixing one of the dimensions (z, for example), and then plotting the value of the function as a height versus the remaining spatial coordinates, say x and y. <div> </div> Figure 2: Methods of visualizing a scalar field: using Contour Maps (a); using Color Coding (b); using Relief Maps (c). #### Vector field 1. Recall that a vector is a quantity that has both a magnitude (scalar) and a direction in space. In practice, we use vectors to physical quantities such as velocity, momentum, acceleration and force, associated with an object. However, when we try to describe a system that consists of a large number of objects, we need to assign a vector to each object. 2. As an example, let's consider falling snowflakes whereas snow falls, each snowflake moves in a specific direction. Here, we can assign to each snowflake a velocity vector that characterizes its movement. The falling snowflakes above is an example of a collection of discrete bodies. On the other hand, if we try to analyze the motion of continuous bodies such as fluids or flows, a velocity vector then needs to be assigned to every point in the fluid at any instant in time. 3. The result of this assignment is a vector field that represents both a magnitude (scalar) and a direction of every point in space. Thus, the distinction between a vector field and a scalar field is that the former contains information about both the direction and the magnitude at every point in space, while only a single variable is specified for the latter. 4. One example of a vector field is this expression: $$ \vec{F}(x,y,z) = F_x(x,y,z)\hat{i} + F_y(x,y,z)\hat{j} + F_z(x,y,z)\hat{k} $$ where the components are scalar fields. Noted: the function $\vec{F}(x,y,z)$ can be denoted as $\mathbf{F}(\mathbf{x})$ with $\mathbf{x} = [x_1,x_2,x_3]$ and $\mathbf{F} = [f_1,f_2,f_3]$. This is the function that maps a space $\mathbb{R}^3$ to a new space $\mathbb{R^3}$ because for every point $(x,y,z)$ in a 3-D space, there is one point $(x_\hat{i},y_\hat{j},z_\hat{k})$ in a 3-D space. In practice, a vector-valued function $\mathbf{F}$ maps $\mathbb{R}^m → \mathbb{R}^n$ with $\mathbf{x} = [x_1,...,x_n]$ and $\mathbf{F} = [f_1,...,f_n]$. $$ $$ <div> </div> Figure 3: An example of a vector field: a scenario of the variation of the jet stream, which is the wind velocity as a function of position. The “streamlines” are formed by joining arrows from head to tail. 5. To visualize a vector field, we use the following approaches: * **Quiver**: in this approach, we put arrows representing the field direction. The direction of the arrow at a given location represents the direction of the vector field at that point. In many cases, we also make the length of the vector proportional to the magnitude of the vector field at that point. However, we also may show only the direction with the vectors (that is make all vectors the same length) and colour-code the arrows according to the magnitude of the vector. * **Field Line**: in this approach, we first start at any point in space and move a very short distance in the direction of the local vector field, drawing a line as you do so. After that short distance, we stop, find the new direction of the local vector field at the point where we stopped, and begin moving again in that new direction. We continue this process indefinitely. Thereby we construct a line in space that is everywhere tangent to the local vector field. <div> </div> Figure 4: Methods of visualizing a vector field: using Quiver (a); using Field Line (b). ### b) Gradient 1. Consider a multivariable scalar function $f(x_1,x_2,x_3,...,x_n)$, a gradient is a vector-valued function that stores all the partial derivative information of this function. $$ \nabla f = [\frac{\partial f}{\partial x_1}, \frac{\partial f}{\partial x_2},\frac{\partial f}{\partial x_3},...,\frac{\partial f}{\partial x_n} ] $$ 2. Imagine that we are standing at a point $(x_1,x_2,x_3,...,x_n)$ in the space created by the function $f(x_1,x_2,x_3,...,x_n)$, the gradient $\nabla f$ tells us which direction we should travel to increase or decrease the value of $f(x_1,x_2,x_3,...,x_n)$ most rapidly. 3. As mentioned above, the gradient is a vector-valued function which means we can consider it as a vector field and visualize it as such. ### c) Divergence 1. Consider a vector-valued function $\vec{F}(\vec{F_1},\vec{F_2},\vec{F_3},...,\vec{F_n})$ that defines a vector field, a divergence is an operator that takes in the vector-valued function and outputs a scalar-valued function measuring how much the vector field is expanding or contracting at each point. $$ \nabla \bullet \vec{F} = \frac{\partial \vec{F_1}}{\partial x_1} + \frac{\partial \vec{F_2}}{\partial x_2} + \frac{\partial \vec{F_3}}{\partial x_3} + ... + \frac{\partial \vec{F_n}}{\partial x_n} $$ 2. For example, consider the following vector $\vec{F} = (2x−y)\hat{i} + (y^2)\hat{j} = \begin{bmatrix} 2x - y \\ y^2 \end{bmatrix}$, we can compute the divergence of vector $\vec{F}$ as: $$ \nabla \bullet \vec{F} = \begin{bmatrix} \frac{\partial}{\partial x} \\ \frac{\partial}{\partial y} \end{bmatrix} \bullet \begin{bmatrix} 2x - y \\ y^2 \end{bmatrix} = 2 + 2y $$ 3. The result tells us that at each point in the vector field, the expansion or contraction obeys the scalar-valued function $\nabla \bullet \vec{F} = 2 + 2y$. 4. The divergence at a point $(x_0, y_0, z_0)$ has the following properties: * $\nabla \bullet \vec{F} > 0 $: this means at this point, the vector field is flowing outwardly making it less dense (expanding). Thus, the point $(x_0, y_0, z_0)$ can be considered as a source. * $\nabla \bullet \vec{F} < 0 $: this means at this point, the vector field is flowing inwardly making it denser (contracting). Thus, the point $(x_0, y_0, z_0)$ can be considered as a sink. * $\nabla \bullet \vec{F} = 0 $: this means at this point, the vector field stays constant. Thus, the point $(x_0, y_0, z_0)$ is a divergence-free. <div> </div> Figure 5: Visualization of a divergence evaluated at a designated point: the value of the divergence indicates whether the vector field is expanding, contracting or remains constant. ### d) Curl 1. Consider a vector-valued function $\vec{F}(\vec{F_1},\vec{F_2},\vec{F_3},...,\vec{F_n})$ that defines a vector field, a **curl** is an operator that takes in the vector-valued function and outputs a scalar-valued function measuring the rotation of the vector field at each point. 2. As an example, let's consider a 2-D and a 3-D vector fields, respectively: $\vec{v} = v_1(x,y)\hat{i} + v_2(x,y)\hat{j}$ and $\vec{w} = w_1(x,y,z)\hat{i} + w_2(x,y)\hat{j} + w_3\hat(k)$. We compute curl of these vector fields as follows: $$ \nabla \times \vec{v} = det(\begin{bmatrix} i & j \\ \frac{d}{dx} & \frac{d}{dy} \\ v_1 & v_2 \end{bmatrix}) =\frac{\partial v_2}{\partial x} - \frac{\partial v_1}{\partial y} $$ $$ \nabla \times \vec{w} = det(\begin{bmatrix} i & j & k \\ \frac{d}{dx} & \frac{d}{dy} & \frac{d}{dz} \\ w_1 & w_2 & w_3 \end{bmatrix}) = (\frac{\partial w_3}{\partial y} - \frac{\partial w_2}{\partial z})\hat{i} + (\frac{\partial w_1}{\partial z} - \frac{\partial w_3}{\partial x})\hat{j} + (\frac{\partial w_2}{\partial x} - \frac{\partial w_1}{\partial y})\hat{k} $$ 3. The curl at a point $(x_0, y_0, z_0)$ has the following properties: * $\nabla \times \vec{F} > 0 $: this means at this point, the vector field is flowing counter-clockwise (right-hand curl). * $\nabla \times \vec{F} < 0 $: this means at this point, the vector field is flowing clockwise (left-hand curl). * $\nabla \times \vec{F} = 0 $: this means at this point, the vector field is irrotational. <div> </div> Figure 6: Visualization of a curl evaluated at a designated point: the value of the curl indicates whether the vector field is right-hand curl (RHC), left-hand curl (LHC) or irrotational. ```python from sympy import * x,y = symbols('x y') ## # DEMO: VISUALIZING A SCALAR FIELD # f = 2*x**3 + 3*y**3 plot3d(f) # We use this function to create a 3-D plot x1 = np.linspace(-10,10,1000) x2 = np.linspace(-10,10,1000) X1, X2 = np.meshgrid(x1,x2) Y = 2*X1**3 + 3*X2**3 fig, axs = plt.subplots(1,3, figsize = (10,6)) # We use this function to create a 2x2 subplots ### cm = plt.cm.get_cmap('viridis') a = axs[0].scatter(X1,X2, c = Y, cmap = cm) # We use this function to create a heatmap plot ### b = axs[1].contour(X1,X2,Y) # We use this function to create a contour plot plt.clabel(b, inline = 1, fontsize = 10) ### c = axs[2].contourf(X1,X2,Y) #We use this function to create a color coding plot plt.clabel(c, inline = 1, fontsize = 10) ### for ax in axs.flat: ax.set(xlabel = 'x', ylabel = 'y') ### fig.tight_layout(pad = 3.0) plt.show() ## # DEMO: FINDING A GRADIENT OF A SCALAR FIELD # from sympy.vector import CoordSys3D, Del R = CoordSys3D("R") # We use this function to define a frame R. delop = Del() # We use this function to define the nabla operator ### f = 1 / (R.x ** 2 + R.y ** 2 + R.z ** 2) - 1 / (R.x ** 2 + R.y ** 2 - R.z ** 2) res = delop.gradient(f, doit = True) # We use this function to apply the nabla operator print(res) ``` ```python ## # DEMO: FINDING A GRADIENT OF A SCALAR FIELD EVALUATED AT A POINT (x_0,y_0) # from sympy import symbols, Matrix x, y, z = symbols('x y z') f = 1/(x**2 + y**2 + z**2) - 1/(x**2 + y**2 - z**2) ### dx = diff(f,x) dy = diff(f,y) grad_f = Matrix([dx, dy]) ``` ## 2. Jacobian 1. Consider a scalar-valued function $f(\mathbf{x})$ that maps a space $\mathbb{R}^n$ to a new one $\mathbb{R}$ where $\mathbf{x} = x_1,x_2,..,x_n>$, we can say the gradient is a transformation of $f(\mathbf{x})$: $$ \nabla f(\mathbf{x}) = \frac{\partial f(\mathbf{x})}{\partial x_1} + \frac{\partial f(\mathbf{x})}{\partial x_2} + ... + \frac{\partial f(\mathbf{x})}{\partial x_n} $$ 2. From this knowledge, let's consider a vector-valued function $\mathbf{f}(\mathbf{x})$ that maps the space $\mathbb{R}^n$ to a new space $\mathbb{R^m}$, where $\mathbf{f} = [f_1,f_2,...,f_m]$ and $\mathbf{x} = [x_1, x_2,...,x_3]$. The so-called Jacobian is a transformation of $\mathbf{f}(\mathbf{x})$: $$ \mathbb{J}_f = \frac{\partial (f_1, f_2,..., f_m)}{\partial (x_1,x_2,...,x_n)} = \begin{bmatrix} \frac{\partial f_1}{\partial x_1} & ... & \frac{\partial f_1}{\partial x_m} \\ ... & ... & ... \\ \frac{\partial f_m}{\partial x_1} & ... & \frac{\partial f_m}{\partial x_n} \end{bmatrix} $$ 3. In other words, the Jacobian matrix is the generalization of a gradient. The Jacobian matrix is the same as the gradient when $m = 1$. 4. The determinant of the Jacobian matrix is typically used to describe changes of variables because it can be viewed as the ratio of an infinitesimal change in the variables of one coordinate system to another. To do so, the vector-valued function $\mathbf{f}(\mathbf{x})$ must map the space $\mathbb{R}^n$ to a new space $\mathbb{R^n}$, which means no change in the dimension. ## 3. Hessian 1. Consider the same scalar-valued function $f(\mathbf{x})$ that maps a space $\mathbb{R}^n$ to a new one $\mathbb{R}$ where $\mathbf{x} = x_1,x_2,..,x_n>$, we now take the gradient of its gradient: $$ \nabla \bullet (\nabla f(\mathbf{x})) = \frac{\partial^2 f(\mathbf{x})}{\partial^2 x_1} + \frac{\partial^2 f(\mathbf{x})}{\partial^2 x_2} + ... + \frac{\partial^2 f(\mathbf{x})}{\partial^2 x_n} $$ 2. From this knowledge, let's consider a vector-valued function $\mathbf{f}(\mathbf{x})$ that maps the space $\mathbb{R}^n$ to a new space $\mathbb{R^m}$, where $\mathbf{f} = [f_1,f_2,...,f_m]$ and $\mathbf{x} = [x_1, x_2,...,x_3]$. The so-called Hessian is just the second-derivative of the Jacobian matrix: $$ \mathbb{H}_f = \nabla \bullet \frac{\partial (f_1, f_2,..., f_m)}{\partial (x_1,x_2,...,x_n)} = \begin{bmatrix} \frac{\partial^2 f_1}{\partial^2 x_1} & \frac{\partial^2 f_1}{\partial x_1 \partial x_2} & ... & \frac{\partial^2 f_1}{\partial x_1 \partial x_n} \\ \frac{\partial^2 f_2}{\partial x_2 \partial x_1} & \frac{\partial^2 f_2}{\partial^2 x_2} & ... & \frac{\partial f_2}{\partial x_2 \partial x_n} \\ \frac{\partial^2 f_m}{\partial x_n \partial x_1} & \frac{\partial^2 f_m}{\partial x_m \partial x_2} & ... & \frac{\partial^2 f_m}{\partial^2 x_n} \end{bmatrix} $$ ## 4. Gradient Descent 1. So why do we have to care about Gradient, Jacobian and Hessian? Because it is the backbone of the gradient descent algorithm used in machine learning. 2. Considering a simple machine learning that takes many input variables $X = [x_1, x_2, x_3,...,x_n]$ (our data) and outputs a set of decisions $Y = [y_1, y_2, y_3,...,y_n]$, we define the loss function $\mathbb{L}$ as the difference between the model output and the ground truth $\hat{Y} = [\hat{y}_1, \hat{y}_2, \hat{y}_3,...,\hat{y}_n]$. $$ \mathbb{L} = |\hat{Y} - Y| $$ 3. We also know the relationship between the model output and the input variables can be simply described as: $$ Y = W\bullet X + B = [w_1x_1 + b_1, w_2x_2 + b_2,...,w_nx_n + b_n] $$ 4. To make our model feasible, we need to optimize our loss function $\mathbb{L}$ by finding its global minimum where we are certain that our weights $W$ are the most optimal. Figure 1: Visualization of our multivariate loss function on the input space. 5. To do so, we first compute the gradient of our loss function because the gradient gives us the direction of the steepest increase. Figure 2: Visualization of the gradient of our loss function. As mentioned above, the gradient gives us the direction of the steepest increase, which is illustrated here as a ball rolling upwards from the global minimum. 6. By taking the negative of the gradient, we now know the direction of the steepest decrease. Figure 2: Visualization of the gradient descent. Having armed with the knowledge of which direction to go, we can optimize our loss function. After a set of periods of training usually referred as epochs, we manage to find where our weights are most optimal. ```python ## # DEMO: FINDING THE JACOBIAN OF A VECTOR FUNCTION # from sympy import symbols, Matrix, Function, exp x1, x2 = symbols('x1 x2') f = symbols('f', cls = Function) X = Matrix([x1,x2]) f = Matrix([-x1*x2*exp(-(x1**2 + x2**2)/2)]) f.jacobian(X) ``` $\displaystyle \left[\begin{matrix}x_{1}^{2} x_{2} e^{- \frac{x_{1}^{2}}{2} - \frac{x_{2}^{2}}{2}} - x_{2} e^{- \frac{x_{1}^{2}}{2} - \frac{x_{2}^{2}}{2}} & x_{1} x_{2}^{2} e^{- \frac{x_{1}^{2}}{2} - \frac{x_{2}^{2}}{2}} - x_{1} e^{- \frac{x_{1}^{2}}{2} - \frac{x_{2}^{2}}{2}}\end{matrix}\right]$ ```python ## # DEMO: FINDING THE HESSIAN OF A VECTOR FUNCTION # from sympy import hessian hessianf = hessian(f,X) hessianf ``` $\displaystyle \left[\begin{matrix}- x_{1}^{3} x_{2} e^{- \frac{x_{1}^{2}}{2} - \frac{x_{2}^{2}}{2}} + 3 x_{1} x_{2} e^{- \frac{x_{1}^{2}}{2} - \frac{x_{2}^{2}}{2}} & - x_{1}^{2} x_{2}^{2} e^{- \frac{x_{1}^{2}}{2} - \frac{x_{2}^{2}}{2}} + x_{1}^{2} e^{- \frac{x_{1}^{2}}{2} - \frac{x_{2}^{2}}{2}} + x_{2}^{2} e^{- \frac{x_{1}^{2}}{2} - \frac{x_{2}^{2}}{2}} - e^{- \frac{x_{1}^{2}}{2} - \frac{x_{2}^{2}}{2}}\\- x_{1}^{2} x_{2}^{2} e^{- \frac{x_{1}^{2}}{2} - \frac{x_{2}^{2}}{2}} + x_{1}^{2} e^{- \frac{x_{1}^{2}}{2} - \frac{x_{2}^{2}}{2}} + x_{2}^{2} e^{- \frac{x_{1}^{2}}{2} - \frac{x_{2}^{2}}{2}} - e^{- \frac{x_{1}^{2}}{2} - \frac{x_{2}^{2}}{2}} & - x_{1} x_{2}^{3} e^{- \frac{x_{1}^{2}}{2} - \frac{x_{2}^{2}}{2}} + 3 x_{1} x_{2} e^{- \frac{x_{1}^{2}}{2} - \frac{x_{2}^{2}}{2}}\end{matrix}\right]$ # IV. Integrals <a name = "Practice4"></a> ## 1. What is an integral? 1. An integral is an opposite of a derivate: a derivate finds a slope of a function's curve, while an integral finds an area under a function's curve. 2. For example, we have a function $f(x) = x^2$, and we want to find the area between $x =0$ and $x = 2$ for this function. We can find the area by using a method called **Riemann Sum**. *** * From the interval [0,2], we partitions it into a $n$ number of subintervals, each has a width of: $\Delta x = \frac{2 - 0}{n}$. * The points of in the partition will then be: $0, 0 + \Delta x, 0 + 2\Delta x, ..., 0 + (n-2)\Delta x, 0 + (n-1)\Delta x, 2$ * We then compute the area under the curve: $$ A = \Delta x[f(0) + f(0 + \Delta x) + f(0 + 2\Delta x) + ... f(2 - \Delta x) + f(2)] $$ *** <div> </div> Figure 1: Visualization of the Riemann Sum for $f(x) = x^2$: to compute the area under a function's curve from point $(x_0,y_0)$ to $(x_k,y_k)$. we pack some rectangles with equal width under the curve and sum their areas. This method is extremely powerful for integrals we cannot analytically solve. 3. In practice, we compute the area under a function's curve by taking a definite integral from point $a$ to point $b$: $$ A = \int_a^b f(x)dx $$ 4. Let's move back to the Riemann Sum, there are three types of Riemann sum: * In a **Left Riemann Sum**, we approximate the area using rectangles (usually of equal width), where the height of each rectangle is equal to the value of the function at the left endpoint of its base. * In a **Right Riemann Sum**, we approximate the area using rectangles (usually of equal width), where the height of each rectangle is equal to the value of the function at the right endpoint of its base. * In a **Midpoint Riemann Sum**, the height of each rectangle is equal to the value of the function at the midpoint of its base. * In a **Trapezoidal Riemann Sum** also known as the **Trapezoidal Rule**, the area under the curve is the average of the Left Sum and the Right Sum. <div> </div> Figure 2: Types of Riemann Sum: Left Riemann Sum (a); Right Riemann Sum (b); Trapezoidal Riemann Sum (c). The Midpoint Riemann Sum is not shown here. ```python ## # DEMO: COMPUTE THE DEFINITE INTEGRAL: # from sympy import integrate, symbols, sin x = symbols('x') f = sin(x) integrate(f,(x,0,np.pi)) ``` $\displaystyle 2.0$ ```python ## # DEMO: COMPUTE THE RIEMANN SUM USING TRAPEZOIDAL RULE # def trapezoidal_riemann(a,b, n,*args): h = (b - a) / (n-1) x = np.linspace(a,b,n) f = np.sin(x) A = (h/2) * (f[0] + 2 * sum(f[1: n-1] + f[n-1])) return A trapezoidal_riemann(a = 0, b = np.pi, n = 1000) ``` 1.999998351770849 ## 2. Applications of Integrals 1. One application of integrals is computing the finding areas under probability distributions to get probabilities. For example, we want to compute the probability that any given golden retriever’s weight is between 61 and 62 pounds, and we know that the population mean is 64.43 pounds and the population standard deviation is 2.99 pounds. <div> </div> Figure 1: An example of using integrals: computing the area under the curve of a probability density function. ```python ## # DEMO: COMPUTE THE PROBABILITY DENSITY FUNCTION # import math def normal_pdf(x: float, mean: float, std_dev: float) -> float: return (1.0 / (2.0 * math.pi * std_dev ** 2) ** 0.5) * math.exp(-1.0 * ((x - mean) ** 2 / (2.0 * std_dev ** 2))) def approximate_integral(a, b, n, f): delta_x = (b - a) / (n - 1) total_sum = 0 for i in range(1, n + 1): midpoint = 0.5 * (2 * a + delta_x * (2 * i - 1)) total_sum += f(midpoint) return total_sum * delta_x p_between_61_and_62 = approximate_integral(a = 61, b = 62, n = 1000, f = lambda x: normal_pdf(x,64.43,2.99)) print(p_between_61_and_62) ``` 0.08263012897923018 # Appendix <a name = "Practice5"></a> ## 1. Understanding the gradient 1. As previously mention, we picture $\nabla f$ as a vector field, but how do we intepret these vector fields? So, let's think about the case where the input of fff is two-dimensional. The gradient turns each input point $(x_0, y_0)$ into the vector $\nabla f(x_0,y_0) = [\frac{\partial f}{\partial x}(x_0,y_0),\frac{\partial f}{\partial y}(x_0,y_0)]$. 2. Next, we think of the graph of $f$ as hilly terrain. If we are standing on the part of the graph directly above—or below—the point $(x_0, y_0)$, the slope of the hill depends on which direction you walk. For example, if you step straight in the positive $x$ direction, the slope is $\frac{\partial f}{\partial x}$; if we step straight in the positive $y$-direction, the slope is $\frac{\partial f}{\partial y}$. 3. Therefore, if we walk in the direction of the gradient, we will be going straight up the hill. Similarly, the magnitude of the vector $\nabla f(x_0, y_0)$ tells us what the slope of the hill is in that direction. $$ $$ <div> </div> Figure 1: Understanding the gradient (adapted from **The Gradient** by Khan Academy). ## 2. Number Theory ### a) Number Systems 1. The first system is **natural numbers** $\mathbb{N}$. **Natural numbers** are whole numbers such as 0, 1, 2, 3, 4, 5, ..., and so on. 2. The second system is **integers** $\mathbb{Z}$. **Integers** include positive and negative whole numbers as well as 0. 3. The third system is **ration numbers**. **Rational numbers** are any number that we can express as a fraction such as $\frac{3}{2}$. This includes all finite decimals, repeating decimals, and integers since they can be expressed as fractions too. 4. The fourth system is **irrational numbers**. **Irrational numbers** are any number that we cannot be expressed as a fraction and have an infinite number of decimal places with no pattern. Examples include the famous $\pi$, Euler’s number $e$, and the the square root of certain numbers like 2. 5. The fifth system is **real numbers** $\mathbb{R}$. Real numbers include rational as well as irrational numbers. In practicality, when we are doing any data science/machine learning/deep learning work, we can treat any decimals you work with as real numbers. In plain Python, we effectively treat any floating type as a real number. 6. The final system is **complex numbers** $\mathbb{C}$. When we take the square root of a negative number, we end up with an imaginary number. **Imaginary Numbers** are often denoted by $i$. A **complex number** composes of a real part and an imaginary part: $m = a + bi$. <div> </div> Figure 2: Visualiztion of the number systems. ### b) Variables and function 1. A **variable** is a named placeholder for an unspecified or unknown number. Meanwhile, Functions are expressions that define relationships between two or more variables. More specifically a function takes independent variables (also called domain variables or input variables), plugs them into an expression, and then results in a dependent variable (also called an output variable). # Suggestive Reading 1. Khan Academy has a comprehensive list of multivariate calculus tutorial given by the creator of the _3 Blue 1 Brown_ Youtube channel: [Multivariate Calculus](https://youtube.com/playlist?list=PLSQl0a2vh4HC5feHa6Rc5c0wbRTx56nF7)
module System.File.Support %default total ||| Shorthand for a function in the C support libary ||| (libidris2_support, idris_file.h) ||| ||| @ fn the function name to refer to in the C support library public export support : String -> String support fn = "C:" ++ fn ++ ", libidris2_support, idris_file.h" ||| Wrap x in the `Right` part of an `io . Either`. export ok : HasIO io => (x : a) -> io (Either err a) ok x = pure (Right x)
theory LRE_Z_Machine_deadlock_free imports "Z_Machines.Z_Machine" begin notation undefined ("???") subsection \<open> Introduction \<close> text \<open> This theory file is to model the LRE_Beh state machine in Z Machine notations.\<close> subsection \<open> type definition \<close> enumtype St = OCM | MOM | HCM | CAM | initial definition [z_defs]: "St = {OCM, MOM, HCM, CAM, initial}" enumtype Evt = advVel | reqHCM | reqOCM | reqMOM | endTask | reqVel definition "Evt = {advVel, reqHCM, reqOCM, reqMOM, endTask, reqVel}" datatype ('s, 'e) tag = State (ofState: 's) | Event (ofEvent: 'e) abbreviation "is_Event x \<equiv> \<not> is_State x" type_synonym ('s, 'e) rctrace = "('s, 'e) tag list" definition wf_rcstore :: "('s, 'e) rctrace \<Rightarrow> 's \<Rightarrow> 's option \<Rightarrow> bool" where [z_defs]: "wf_rcstore tr st final = (length(tr) > 0 \<and> tr ! ((length tr) -1) = State st \<and> ( final \<noteq> None \<longrightarrow> (\<forall>i<length tr. tr ! i = State (the final) \<longrightarrow> i= (length tr) -1)) \<and> (filter is_State tr) ! (length (filter is_State tr) -1) = State st)" type_synonym coord="integer\<times>integer" record Obstacle = obspos :: coord id :: nat consts Obsts :: " coord list" consts Positions::"coord set" consts Velocities:: "(integer\<times>integer) set" consts HCMVel:: "integer" consts MOMVel:: "integer" consts MinSafeDist :: "integer" consts Opez_min:: "coord" consts Opez_max:: "coord" consts SafeVel :: "integer" consts ZeroVel:: "coord" text \<open> function definition \<close> fun inOPEZ:: "coord\<Rightarrow> bool" where "inOPEZ (x,y) = ( x\<ge> fst Opez_min \<and> x< fst Opez_max \<and> y\<ge>snd Opez_min \<and> y<fst Opez_max)" fun single_dist:: " coord \<times> coord \<Rightarrow> integer" where "single_dist((x,y), (m,n)) =(x-m)^2+ (y-n)^2" fun dist:: " coord \<times> (coord list) \<Rightarrow> integer" where "dist((x,y),[]) = 200^2+ 200^2" | " dist((x,y), g#gs) =( if single_dist((x,y),g) \<le> dist ((x,y),gs) then single_dist((x,y),g) else dist ((x,y),gs))" fun obst_index:: "coord \<times> (coord list) \<Rightarrow> nat" where "obst_index ((x,y), [])=100 " | "obst_index ((x,y), g#gs)= (if single_dist ((x,y),g) = dist ((x,y),g#gs) then 0 else (obst_index ((x,y), gs)+1))" fun abslt:: "integer\<Rightarrow> integer" where "abslt(x) = (if x\<ge>0 then x else -x)" fun closestObs_xpos :: "coord \<times> (coord list) \<Rightarrow> integer" where "closestObs_xpos((xp,yp),[]) = 1000000"| "closestObs_xpos((xp,yp),g#gs) = fst ((g#gs) ! obst_index((xp,yp), g#gs))" fun closestObs_ypos :: "coord \<times> (coord list) \<Rightarrow> integer" where "closestObs_ypos((xp,yp),[]) = 1000000"| "closestObs_ypos((xp,yp),g#gs) = snd ((g#gs) ! obst_index((xp,yp), g#gs))" fun CDA :: " coord \<times> (coord list)\<times> (integer \<times> integer) \<Rightarrow> integer" where "CDA((xp,yp),[], (xv,yv)) = (10+MinSafeDist+5)^2" | "CDA ((xp,yp), g#gs, (xv,yv)) = (if xv\<noteq>0 \<and> yv=0 then (if (closestObs_xpos((xp,yp),g#gs)- xp) * xv\<ge>0 then dist((xp,yp),g#gs) -(closestObs_xpos((xp,yp),g#gs) - xp)^2 else dist((xp,yp),g#gs) ) else (if xv=0 \<and> yv\<noteq>0 then (if (closestObs_ypos((xp,yp),g#gs) - yp) * yv\<ge>0 then dist((xp,yp),g#gs)- (closestObs_ypos((xp,yp),g#gs) - yp)^2 else dist((xp,yp),g#gs) ) else dist((xp,yp),g#gs) ) )" fun maneuv :: "integer\<times> integer \<Rightarrow> integer\<times> integer" where "maneuv(x,y) = (y,-x)" fun setVel :: "(integer \<times> integer) \<times> integer \<Rightarrow> (integer \<times> integer) " where "setVel((xv, yv), setpoint) = (if xv=0 then (if yv>0 then (0, setpoint) else ( if yv<0 then(0, (- setpoint) ) else (setpoint, 0) ) ) else (if xv>0 then (setpoint, 0) else ((-setpoint), 0) ) )" subsection \<open> State Space \<close> (*pos::"Coord"*) zstore LRE_Beh = pos:: "coord" xvel :: "integer" yvel :: "integer" advV::" integer\<times> integer" st::"St" tr :: "(St, Evt)tag list" triggers:: "Evt set" where inv: "wf_rcstore tr st None" subsection \<open> Operations \<close> zoperation Move = over LRE_Beh update "[ pos\<Zprime>=(fst(pos) + xvel, snd(pos) + yvel) ]" zoperation Display = over LRE_Beh params p \<in> "{pos}" v \<in> "{(xvel,yvel)}" t\<in>"{tr}" state\<in> "{st}" zoperation InitialToOCM = over LRE_Beh pre "st= initial" update "[ st\<Zprime>= OCM , tr\<Zprime>=tr @ [State OCM] , triggers\<Zprime> = {reqMOM, reqVel} ]" zoperation OCMToMOM = over LRE_Beh pre "st= OCM \<and> ( xvel^2 + yvel^2)\<le> SafeVel^2 \<and> dist(pos,Obsts)> (MinSafeDist+10)^2 \<and> \<not>inOPEZ(pos)" update "[ st\<Zprime>= MOM , tr\<Zprime>=tr @ [Event reqMOM] @ [Event advVel]@ [State MOM] , advV\<Zprime> = setVel((xvel, yvel), MOMVel) , (xvel,yvel)\<Zprime> = setVel((xvel, yvel), MOMVel) , triggers\<Zprime> = {endTask, reqOCM} ]" zoperation MOMToOCM = over LRE_Beh pre "st= MOM" update "[ st\<Zprime>= OCM , tr\<Zprime>=tr @ [Event reqOCM] @ [State OCM] , triggers\<Zprime> = {reqMOM, reqVel} ]" zoperation MOMToOCM_1 = over LRE_Beh pre "st= MOM \<and> inOPEZ(pos) \<and> (dist(pos,Obsts)> (MinSafeDist+5)^2 \<or> CDA(pos,Obsts, (xvel,yvel))> MinSafeDist^2)" update "[ st\<Zprime>= OCM , tr\<Zprime>=tr @ [State OCM] , triggers\<Zprime> = {reqMOM, reqVel} ]" zoperation MOMToOCM_2 = over LRE_Beh pre "st= MOM " update "[ st\<Zprime>= OCM , tr\<Zprime>=tr @ [Event endTask] @ [Event advVel] @ [State OCM] , advV\<Zprime> = ZeroVel , (xvel,yvel)\<Zprime> = ZeroVel , triggers\<Zprime> = {reqMOM, reqVel} ]" zoperation HCMToOCM = over LRE_Beh pre "st= HCM " update "[ st\<Zprime>= OCM , tr\<Zprime>=tr @ [Event reqOCM] @ [State OCM] , triggers\<Zprime> = {reqMOM, reqVel} ]" zoperation HCMToOCM_1 = over LRE_Beh pre "st= HCM \<and> inOPEZ(pos)\<and> (dist(pos,Obsts)> (MinSafeDist+5)^2 \<or> CDA(pos,Obsts, (xvel,yvel))> MinSafeDist^2)" update "[ st\<Zprime>= OCM , tr\<Zprime>=tr @ [State OCM] , triggers\<Zprime> = {reqMOM, reqVel} ]" zoperation MOMToHCM = over LRE_Beh pre "st= MOM \<and> ( xvel^2 + yvel^2)> SafeVel^2 \<and> dist(pos,Obsts)\<le> (MinSafeDist+5)^2 \<and> CDA(pos,Obsts, (xvel,yvel))> MinSafeDist^2" update "[ st\<Zprime>= HCM , tr\<Zprime>=tr @ [Event advVel] @ [State HCM] , advV\<Zprime> = setVel((xvel, yvel), HCMVel) , (xvel,yvel)\<Zprime> = setVel((xvel, yvel), HCMVel) , triggers\<Zprime> = {reqOCM} ]" zoperation HCMToMOM = over LRE_Beh pre "st= HCM \<and> dist(pos,Obsts)> (MinSafeDist+5)^2 \<and> \<not>inOPEZ(pos) " update "[ st\<Zprime>= MOM , tr\<Zprime>=tr @ [Event advVel] @ [State MOM] , advV\<Zprime> = setVel((xvel, yvel), MOMVel) , (xvel,yvel)\<Zprime> = setVel((xvel, yvel), MOMVel) , triggers\<Zprime> = { endTask, reqOCM} ]" zoperation OCMToOCM = over LRE_Beh params reqV \<in> " Velocities" pre "st= OCM " update "[ st\<Zprime>= OCM , tr\<Zprime>=tr @ [Event reqVel] @ [Event advVel] @[State OCM] , advV\<Zprime> = reqV , (xvel,yvel)\<Zprime> = reqV , triggers\<Zprime> = {reqMOM, reqVel} ]" zoperation HCMToCAM = over LRE_Beh pre "st= HCM \<and> CDA(pos,Obsts, (xvel,yvel))\<le> MinSafeDist^2 \<and>dist(pos,Obsts) \<le> (MinSafeDist+5)^2 " update "[ st\<Zprime>= CAM , tr\<Zprime>=tr @ [Event advVel] @ [State CAM] , advV\<Zprime> = maneuv(xvel, yvel) , (xvel,yvel)\<Zprime> = maneuv(xvel, yvel) , triggers\<Zprime> = {reqOCM} ]" zoperation HCMToCAM_1 = over LRE_Beh pre "st= HCM \<and> (-100> (fst(pos) + xvel) \<or> (fst(pos) + xvel) >100 \<or> -100> (snd(pos) + yvel) \<or> (snd(pos) + yvel) >100)" update "[ st\<Zprime>= CAM , tr\<Zprime>=tr @ [Event advVel] @ [State CAM] , advV\<Zprime> = maneuv(xvel, yvel) , (xvel,yvel)\<Zprime> = maneuv(xvel, yvel) , triggers\<Zprime> = {reqOCM} ]" zoperation MOMToCAM = over LRE_Beh pre "st= MOM \<and> CDA(pos,Obsts, (xvel,yvel))\<le> MinSafeDist^2 \<and>dist(pos,Obsts)\<le> (MinSafeDist+5)^2 " update "[ st\<Zprime>= CAM , tr\<Zprime>=tr @ [Event advVel] @ [State CAM] , advV\<Zprime> = maneuv(xvel, yvel) , (xvel,yvel)\<Zprime> = maneuv(xvel, yvel) , triggers\<Zprime> = {reqOCM} ]" zoperation MOMToCAM_1 = over LRE_Beh pre "st= MOM \<and> (-100> (fst(pos) + xvel) \<or> (fst(pos) + xvel) >100 \<or> -100> (snd(pos) + yvel) \<or> (snd(pos) + yvel) >100)" update "[ st\<Zprime>= CAM , tr\<Zprime>=tr @ [Event advVel] @ [State CAM] , advV\<Zprime> = maneuv(xvel, yvel) , (xvel,yvel)\<Zprime> = maneuv(xvel, yvel) , triggers\<Zprime> = {reqOCM} ]" zoperation CAMToCAM = over LRE_Beh pre "st= CAM \<and> CDA(pos,Obsts, (xvel,yvel))\<le> MinSafeDist^2 \<and>dist(pos,Obsts)\<le> (MinSafeDist+5)^2 " update "[ st\<Zprime>= CAM , tr\<Zprime>=tr @ [Event advVel] @ [State CAM] , advV\<Zprime> = maneuv(xvel, yvel) , (xvel,yvel)\<Zprime> = maneuv(xvel, yvel) , triggers\<Zprime> = {reqOCM} ]" zoperation CAMToCAM_1 = over LRE_Beh pre "st= CAM \<and> (-100> (fst(pos) + xvel) \<or> (fst(pos) + xvel) >100 \<or> -100> (snd(pos) + yvel) \<or> (snd(pos) + yvel) >100)" update "[ st\<Zprime>= CAM , tr\<Zprime>=tr @ [Event advVel] @ [State CAM] , advV\<Zprime> = maneuv(xvel, yvel) , (xvel,yvel)\<Zprime> = maneuv(xvel, yvel) , triggers\<Zprime> = {reqOCM} ]" zoperation CAMToOCM = over LRE_Beh pre "st= CAM \<and> CDA(pos,Obsts, (xvel,yvel))> MinSafeDist^2" update "[ st\<Zprime>= OCM , tr\<Zprime>=tr @[Event advVel] @ [State OCM] , advV\<Zprime> = ZeroVel , (xvel,yvel)\<Zprime> = ZeroVel , triggers\<Zprime> = {reqMOM, reqVel} ]" zoperation CAMToOCM_1 = over LRE_Beh pre "st= CAM " update "[ st\<Zprime>= OCM , tr\<Zprime>=tr @ [Event reqOCM] @ [State OCM] , triggers\<Zprime> = {reqMOM, reqVel} ]" definition Init :: "LRE_Beh subst" where [z_defs]: "Init = [pos\<leadsto>(0,0), xvel \<leadsto> 0, yvel \<leadsto> 0, advV \<leadsto> (0,0), st \<leadsto> OCM, tr \<leadsto> [State OCM], triggers \<leadsto> {reqOCM} ]" def_consts Velocities = "{(0,1),(0,-2), (2,0),(-4,0)}" declare Velocities_def [z_defs] def_consts MinSafeDist= "2" declare MinSafeDist_def [z_defs] def_consts HCMVel = "3" declare HCMVel_def [z_defs] def_consts MOMVel = "5" declare MOMVel_def [z_defs] def_consts ZeroVel = "(0,0)" declare ZeroVel_def [z_defs] def_consts SafeVel = "3" declare SafeVel_def [z_defs] subsection \<open> Structural Invariants \<close> lemma Init_inv [hoare_lemmas]: "Init establishes LRE_Beh_inv" by (zpog_full; auto) lemma InitialToOCM_inv [hoare_lemmas]: "InitialToOCM () preserves LRE_Beh_inv" by (zpog_full; auto) lemma OCMToMOM_inv [hoare_lemmas]: "OCMToMOM() preserves LRE_Beh_inv" apply (zpog_full; auto) by (metis add_2_eq_Suc' cancel_ab_semigroup_add_class.add_diff_cancel_left' not_add_less1 nth_Cons_0 nth_Cons_Suc nth_append numeral_2_eq_2)+ lemma MOMToOCM_inv [hoare_lemmas]: "MOMToOCM () preserves LRE_Beh_inv" apply (zpog_full ) apply (simp add: nth_append) done lemma MOMToOCM_1_inv [hoare_lemmas]: "MOMToOCM_1 () preserves LRE_Beh_inv" by (zpog_full; auto) lemma MOMToOCM_2_inv [hoare_lemmas]: "MOMToOCM_2 () preserves LRE_Beh_inv" apply (zpog_full) apply (simp add: nth_append) done lemma HCMToOCM_inv [hoare_lemmas]: "HCMToOCM () preserves LRE_Beh_inv" apply (zpog_full) apply (metis One_nat_def Suc_eq_plus1 cancel_ab_semigroup_add_class.add_diff_cancel_left' not_add_less1 nth_Cons_0 nth_Cons_Suc nth_append) done lemma HCMToOCM_1_inv [hoare_lemmas]: "HCMToOCM_1() preserves LRE_Beh_inv" by (zpog_full; auto) lemma MOMToHCM_inv [hoare_lemmas]: "MOMToHCM() preserves LRE_Beh_inv" apply (zpog_full ) by (metis One_nat_def Suc_eq_plus1 nth_Cons_0 nth_Cons_Suc nth_append_length_plus) lemma HCMToMOM_inv [hoare_lemmas]: "HCMToMOM() preserves LRE_Beh_inv" apply (zpog_full ) by (metis One_nat_def Suc_eq_plus1 Suc_pred cancel_ab_semigroup_add_class.add_diff_cancel_left' diff_less_Suc not_less_eq nth_Cons_0 nth_Cons_Suc nth_append) lemma OCMToOCM_inv [hoare_lemmas]: "OCMToOCM (v) preserves LRE_Beh_inv" apply (zpog_full ) apply (simp add: nth_append) by (simp add: nth_append) lemma HCMToCAM_inv [hoare_lemmas]: "HCMToCAM() preserves LRE_Beh_inv" apply (zpog_full ) by (metis One_nat_def Suc_eq_plus1 Suc_pred cancel_ab_semigroup_add_class.add_diff_cancel_left' diff_less_Suc not_less_eq nth_Cons_0 nth_Cons_Suc nth_append) lemma HCMToCAM_1_inv [hoare_lemmas]: "HCMToCAM_1() preserves LRE_Beh_inv" apply (zpog_full ) apply (metis One_nat_def Suc_eq_plus1 nth_Cons_0 nth_Cons_Suc nth_append_length_plus) by (simp add: nth_append) lemma MOMToCAM_inv [hoare_lemmas]: "MOMToCAM() preserves LRE_Beh_inv" apply (zpog_full ) by (metis One_nat_def Suc_eq_plus1 nth_Cons_0 nth_Cons_Suc nth_append_length_plus) lemma MOMToCAM_1_inv [hoare_lemmas]: "MOMToCAM_1() preserves LRE_Beh_inv" apply (zpog_full ) apply (simp add: nth_append) by (simp add: nth_append) lemma CAMToCAM_inv [hoare_lemmas]: "CAMToCAM () preserves LRE_Beh_inv" apply (zpog_full ) by (metis One_nat_def Suc_eq_plus1 nth_Cons_0 nth_Cons_Suc nth_append_length_plus) lemma CAMToCAM_1_inv [hoare_lemmas]: "CAMToCAM_1 () preserves LRE_Beh_inv" apply (zpog_full ) apply (simp add: nth_append) by (simp add: nth_append) lemma CAMToOCM_inv [hoare_lemmas]: "CAMToOCM () preserves LRE_Beh_inv" apply (zpog_full ) apply (metis One_nat_def Suc_eq_plus1 nth_Cons_0 nth_Cons_Suc nth_append_length_plus) done lemma CAMToOCM_1_inv [hoare_lemmas]: "CAMToOCM_1() preserves LRE_Beh_inv" apply (zpog_full ) apply (metis append_Cons append_Nil append_assoc length_append_singleton nth_append_length) done lemma Move_inv [hoare_lemmas]: "Move() preserves LRE_Beh_inv" by (zpog_full; auto) subsection \<open> Safety Requirements \<close> zmachine LRE_BehMachine = over LRE_Beh init " [pos\<leadsto>(0,0), xvel \<leadsto> 0, yvel \<leadsto> 0, advV \<leadsto> (0,0), st \<leadsto> initial, tr \<leadsto> [State initial], triggers \<leadsto> {reqOCM} ]" invariant LRE_Beh_inv operations InitialToOCM OCMToOCM OCMToMOM MOMToOCM MOMToOCM_1 MOMToOCM_2 HCMToOCM HCMToOCM_1 MOMToHCM HCMToMOM HCMToCAM HCMToCAM_1 MOMToCAM MOMToCAM_1 CAMToCAM CAMToCAM_1 CAMToOCM CAMToOCM_1 definition [z_defs]: "LRE_Beh_axioms = (MinSafeDist>0)" lemma LRE_Beh_deadlock_free: "LRE_Beh_axioms \<Longrightarrow> deadlock_free LRE_BehMachine" apply deadlock_free by (metis St.exhaust_disc) end
# Convert Domain to a KB. Very partial, mainly for backwards compatibility # for parts of the code which still work with KB. export Bound := module () export toKB := proc(dom :: DomBound, kb0 :: t_kb, $)::[t_kb_mb, list(name=name)]; local kb := kb0, vs := op(1, dom), rn := [] , vn, vt, make, lo, hi, vn_rn, rn_t, v ; for v in vs do vn, vt, make := op(v); lo, hi := ExtBound[make]:-SplitRange(vt); vn_rn, kb := ExtBound[make]:-MakeKB( vn, lo, hi, kb ); rn := [ vn=vn_rn, op(rn) ]; end do; [ kb, rn ] end proc; export varsOf := proc(dom :: DomBound, output::identical("list","set","seq"):="list") local v := map(x->op(1,x), op(1, dom)); op(table(["list"=[v],"set"=[{op(v)}],"seq"=v])[output]); end proc; export withVarsIxs := proc(dom :: DomBound, $) :: DomBound; DBound( op(1..2,dom), table([ seq(op([1,i,1],dom)=i,i=1..nops(op(1,dom))) ]) ); end proc; export varIx := proc(dom0::DomBound, v::DomBoundVar, $)::nonnegint; local i := varIx_mb(dom0,v); if i = -1 then error "cannot find var %1 in %2", v, dom0; else i end if; end proc; export varIx_mb := proc(dom0::DomBound, v::DomBoundVar, $)::{nonnegint,identical(-1)}; local dom := `if`(nops(dom0)>=3, (_->_), withVarsIxs)(dom0); if assigned(op(3,dom)[v]) then op(3,dom)[v] else -1 end if; end proc; export isEmpty := proc(dom :: DomBound, $)::truefalse; evalb(nops(op(1,dom))=0); end proc; export get := proc(dom :: DomBound, var :: DomBoundVar, $) local th; th := select(x->op(1,x)=var, op(1,dom)); if nops(th) = 1 then op([1,2], th), op([1,3], th) elif nops(th) = 0 then error "cannot find var %1 in %2", var, dom; else error "multiple references in %1", dom; end if; end proc; export upd := proc(dom0 :: DomBound, vr :: DomBoundVar, ty :: DomBoundRange, $) local dom := `if`(nops(dom0)>=3, (_->_), withVarsIxs)(dom0), i , old_lo,old_hi, lo,hi, old_ty, new_ty, _, vr_k; i := varIx(dom, vr); _, old_ty, vr_k := op(op([1,i], dom)); old_lo,old_hi := Domain:-ExtBound[vr_k]:-SplitRange(old_ty); lo, hi := Domain:-ExtBound[vr_k]:-SplitRange(ty); if lo :: realcons then lo := Domain:-ExtBound[vr_k]:-Max(old_lo, lo); end if; if hi :: realcons then hi := Domain:-ExtBound[vr_k]:-Min(old_hi, hi); end if; new_ty := Domain:-ExtBound[vr_k]:-MakeRange(lo,hi); subsop([1,i,2]=new_ty, dom); end proc; local constrain := proc( opts, vn::DomBoundVar, ty::DomBoundRange, mk :: DomBoundKind, $ ) :: set({relation, `::`}); local lo, hi, noi, bt , noinf := evalb('no_infinity' in opts) , btys := evalb('bound_types' in opts); lo,hi := Domain:-ExtBound[mk]:-SplitRange(ty); bt := `if`(btys, { vn :: Domain:-ExtBound[mk]:-BoundType }, {}); bt := { Domain:-ExtBound[mk]:-Constrain( lo, vn ) , Domain:-ExtBound[mk]:-Constrain( vn, hi ) , bt[]}; `if`(noinf,x->remove(c->c::relation and ormap(s->s(c)::identical(infinity,-infinity),[lhs,rhs]), x),_->_) (bt); end proc; local toConstraints_opts := { 'no_infinity', 'bound_types' }; export toConstraints := proc( bnd :: DomBound ) local cs, opts, bad_opts, noinf; opts := { args[2..-1] } ; bad_opts := opts minus toConstraints_opts; if bad_opts <> {} then error "invalid arguments: %1", bad_opts; end if; {op(map(b->constrain(opts, op(b))[], op(1,bnd))) ,op( `if`(nops(bnd)>1,op(2,bnd),{}) ) }; end proc; end module;#Bound
{-# OPTIONS --without-K --safe #-} open import Relation.Binary using (Rel; Setoid; IsEquivalence) module Quasigroup.Structures {a ℓ} {A : Set a} -- The underlying set (_≈_ : Rel A ℓ) -- The underlying equality relation where open import Algebra.Core open import Level using (_⊔_) open import Data.Product using (_,_; proj₁; proj₂) open import Algebra.Definitions _≈_ open import Algebra.Structures _≈_ open import Quasigroup.Definitions _≈_ record IsPique (∙ \\ // : Op₂ A) (ε : A) : Set (a ⊔ ℓ) where field isQuasigroup : IsQuasigroup ∙ \\ // idem : Idempotent ∙ open IsQuasigroup isQuasigroup public
import Data.Integer as Integer open Integer using (ℤ) module NumberTheory.ModularArithmetic (m : ℤ) where open import Algebra.Structures open Integer hiding (+_; _-_) -- using (_+_; _*_; -_; +0) open import Data.Integer.GCD open import Data.Integer.Properties hiding (*-1-isMonoid; *-1-isCommutativeMonoid; +-*-isCommutativeRing) open import Data.Integer.Tactic.RingSolver open import Data.Product open import Relation.Binary open import Relation.Binary.PropositionalEquality as ≡ using (_≡_) _≈_ : Rel ℤ _ x ≈ y = ∃ λ k → k * m + x ≡ y refl : Reflexive _≈_ refl {x} = +0 , (begin +0 * m + x ≡⟨ ≡.cong (_+ x) (*-zeroˡ m) ⟩ +0 + x ≡⟨ +-identityˡ x ⟩ x ∎) where open ≡.≡-Reasoning sym : Symmetric _≈_ sym {x} {y} (k , p) = (- k) , (begin - k * m + y ≡˘⟨ ≡.cong₂ _+_ (neg-distribˡ-* k m) (neg-involutive y) ⟩ - (k * m) + - - y ≡˘⟨ neg-distrib-+ (k * m) (- y) ⟩ - (k * m + - y) ≡⟨ ≡.cong -_ (begin k * m + - y ≡˘⟨ +-identityʳ _ ⟩ k * m + - y + +0 ≡˘⟨ ≡.cong (k * m + - y +_) (+-inverseʳ x) ⟩ k * m + - y + (x + - x) ≡˘⟨ +-assoc (k * m + - y) x (- x) ⟩ k * m + - y + x + - x ≡⟨ ≡.cong (_+ - x) (begin k * m + - y + x ≡⟨ +-assoc (k * m) (- y) x ⟩ k * m + (- y + x) ≡⟨ ≡.cong (k * m +_) (+-comm (- y) x) ⟩ k * m + (x + - y) ≡˘⟨ +-assoc (k * m) x (- y) ⟩ k * m + x + - y ≡⟨ ≡.cong (_+ - y) p ⟩ y + - y ≡⟨ +-inverseʳ y ⟩ +0 ∎) ⟩ +0 + - x ≡⟨ +-identityˡ (- x) ⟩ - x ∎) ⟩ - - x ≡⟨ neg-involutive x ⟩ x ∎) where open ≡.≡-Reasoning trans : Transitive _≈_ trans {x} {y} {z} (j , p) (k , q) = k + j , (begin (k + j) * m + x ≡⟨ ≡.cong (_+ x) (*-distribʳ-+ (m) k j) ⟩ k * m + j * m + x ≡⟨ +-assoc (k * m) (j * m) x ⟩ k * m + (j * m + x) ≡⟨ ≡.cong (_+_ (k * m)) p ⟩ k * m + y ≡⟨ q ⟩ z ∎) where open ≡.≡-Reasoning ≈-isEquivalence : IsEquivalence _≈_ ≈-isEquivalence = record { refl = refl ; sym = sym ; trans = trans } open IsEquivalence ≈-isEquivalence public using (reflexive) setoid : Setoid _ _ setoid = record { isEquivalence = ≈-isEquivalence } open import Algebra.Definitions _≈_ +-cong : Congruent₂ _+_ +-cong {x} {y} {u} {v} (j , p) (k , q) = j + k , (begin (j + k) * m + (x + u) ≡⟨ ≡.cong (_+ (x + u)) (*-distribʳ-+ m j k) ⟩ j * m + k * m + (x + u) ≡˘⟨ +-assoc (j * m + k * m) x u ⟩ j * m + k * m + x + u ≡⟨ ≡.cong (_+ u) (begin j * m + k * m + x ≡⟨ +-assoc (j * m) (k * m) x ⟩ j * m + (k * m + x) ≡⟨ ≡.cong (j * m +_) (+-comm (k * m) x) ⟩ j * m + (x + k * m) ≡˘⟨ +-assoc (j * m) x (k * m) ⟩ j * m + x + k * m ∎) ⟩ j * m + x + k * m + u ≡⟨ +-assoc (j * m + x) (k * m) u ⟩ j * m + x + (k * m + u) ≡⟨ ≡.cong₂ _+_ p q ⟩ y + v ∎) where open ≡.≡-Reasoning *-cong : Congruent₂ _*_ *-cong {x} {y} {u} {v} (j , p) (k , q) = j * m * k + j * u + x * k , (begin (j * m * k + j * u + x * k) * m + x * u ≡⟨ ≡.cong (_+ x * u) (begin (j * m * k + j * u + x * k) * m ≡⟨ *-distribʳ-+ m (j * m * k + j * u) (x * k) ⟩ (j * m * k + j * u) * m + (x * k) * m ≡⟨ ≡.cong₂ _+_ (*-distribʳ-+ m (j * m * k) (j * u)) (*-assoc x k m) ⟩ j * m * k * m + j * u * m + x * (k * m) ≡⟨ ≡.cong (_+ x * (k * m)) (begin j * m * k * m + (j * u) * m ≡⟨ ≡.cong (_+ j * u * m) (*-assoc (j * m) k m) ⟩ j * m * (k * m) + j * u * m ≡⟨ ≡.cong (j * m * (k * m) +_) (begin j * u * m ≡⟨ *-assoc j u m ⟩ j * (u * m) ≡⟨ ≡.cong (j *_) (*-comm u m) ⟩ j * (m * u) ≡˘⟨ *-assoc j m u ⟩ j * m * u ∎) ⟩ (j * m) * (k * m) + (j * m) * u ∎) ⟩ j * m * (k * m) + j * m * u + x * (k * m) ∎) ⟩ (j * m) * (k * m) + (j * m) * u + x * (k * m) + x * u ≡⟨ +-assoc ((j * m) * (k * m) + (j * m) * u) (x * (k * m)) (x * u) ⟩ (j * m) * (k * m) + (j * m) * u + (x * (k * m) + x * u) ≡˘⟨ ≡.cong₂ _+_ (*-distribˡ-+ (j * m) (k * m) u) (*-distribˡ-+ x (k * m) u) ⟩ (j * m) * (k * m + u) + x * (k * m + u) ≡˘⟨ *-distribʳ-+ (k * m + u) (j * m) x ⟩ (j * m + x) * (k * m + u) ≡⟨ ≡.cong₂ _*_ p q ⟩ y * v ∎) where open ≡.≡-Reasoning neg-cong : Congruent₁ -_ neg-cong {x} {y} (k , p) = - k , (begin - k * m + - x ≡˘⟨ ≡.cong (_+ - x) (neg-distribˡ-* k m) ⟩ - (k * m) + - x ≡˘⟨ neg-distrib-+ (k * m) x ⟩ - (k * m + x) ≡⟨ ≡.cong -_ p ⟩ - y ∎) where open ≡.≡-Reasoning +-0-isAbelianGroup : IsAbelianGroup _≈_ _+_ +0 -_ +-0-isAbelianGroup = record { isGroup = record { isMonoid = record { isSemigroup = record { isMagma = record { isEquivalence = ≈-isEquivalence ; ∙-cong = +-cong } ; assoc = λ x y z → reflexive (+-assoc x y z) } ; identity = (λ x → reflexive (+-identityˡ x)) , λ x → reflexive (+-identityʳ x) } ; inverse = (λ x → reflexive (+-inverseˡ x)) , λ x → reflexive(+-inverseʳ x) ; ⁻¹-cong = neg-cong } ; comm = λ x y → reflexive (+-comm x y) } *-1-isCommutativeMonoid : IsCommutativeMonoid _≈_ _*_ (Integer.+ 1) *-1-isCommutativeMonoid = record { isMonoid = record { isSemigroup = record { isMagma = record { isEquivalence = ≈-isEquivalence ; ∙-cong = *-cong } ; assoc = λ x y z → reflexive (*-assoc x y z) } ; identity = (λ x → reflexive (*-identityˡ x)) , λ x → reflexive (*-identityʳ x ) } ; comm = λ x y → reflexive (*-comm x y) } open IsCommutativeMonoid *-1-isCommutativeMonoid using () renaming (isMonoid to *-1-isMonoid) +-*-isCommutativeRing : IsCommutativeRing _≈_ _+_ _*_ -_ +0 (Integer.+ 1) +-*-isCommutativeRing = record { isRing = record { +-isAbelianGroup = +-0-isAbelianGroup ; *-isMonoid = *-1-isMonoid ; distrib = (λ x y z → reflexive (*-distribˡ-+ x y z)) , λ x y z → reflexive (*-distribʳ-+ x y z) ; zero = (λ x → reflexive (*-zeroˡ x)) , λ x → reflexive (*-zeroʳ x) } ; *-comm = λ x y → reflexive (*-comm x y) }
amazon› Search› thin metal sheets Amazon: thin metal sheets. Skip to main content. From The Community. Try Prime All ... Eapele Double-Sided Wire/Metal Sheet Thickness Gauge Stainless Steel Welding Gage Plated Size Inspection Tool[Full Refund for Any Dissatisfaction] by Eapele. 4.7 out of 5 stars 36. $6.99 $ 6. 99. ebay› Search› thin steel sheet Save thin steel sheet to get e-mail alerts and updates on your eBay Feed. + ... Disco Thin Sheet Steel Nutsert Assortment 195 Pieces 8207 See more like this. ... 24 X 36 30-Gauge Galvanized Steel Flat Sheet Metal Industrial Project Material. Brand New. 5.0 out of 5 stars. Precision Thin Metals - Ultra Thin Gauge Alloy Strip and Foil. The Precision Thin Metals (PTM) division of Arnold Magnetic Technologies produces thin and ultra-thin alloys that improve the power density of motors, transformers, batteries and many other applications in automotive, aerospace, energy exploration, industrial and medical markets. Thin Gauge Corrugated Steel Sheets--Henan BBN ... Corrugated galvanised iron or steel is a building material composed of sheets of hot-dip Common sizes of corrugated material can range from a very thin 30 gauge (0.012 inches, 0.30 mm) to a relatively thick 6 gauge (0.1943 inches, 4.94. For many applications, there are performance, weight and efficiency advantages to using precision thin rolled metal products. We produce thin and ultra-thin gauge metal and alloy strips and foil products rolled down to 2 microns. See the variety of materials we can custom produce for your applications.
If $f$ is holomorphic on a punctured neighborhood of $z$, then the residue of $f$ at $z$ is equal to the residue of $cf$ at $z$.
If $f$ is holomorphic on a punctured neighborhood of $z$, then the residue of $f$ at $z$ is equal to the residue of $cf$ at $z$.
{-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE TypeOperators #-} {-| Module : Grenade.Layers.Gelu Description : Gaussian Error Linear Unit (GELU) Copyright : (c) Manuel Schneckenreither, 2020 License : BSD2 Stability : experimental This module implements the Gaussian Error Linear Unit (GELU) activiation function. See Hendrycks, Dan, and Kevin Gimpel. "Gaussian error linear units (gelus)." arXiv preprint arXiv:1606.08415 (2016). Available at: https://arxiv.org/pdf/1606.08415.pdf As in the paper we simply let μ = 0 and σ = 1. Futher, we use the simplified and thus fast representation: x * sigmoid (1.702 * x) -} module Grenade.Layers.Gelu ( Gelu (..) ) where import Control.DeepSeq (NFData (..)) import Data.Serialize import GHC.Generics (Generic) import GHC.TypeLits import qualified Numeric.LinearAlgebra.Static as LAS import Grenade.Core -- | A Gaussion Error Linear Unit. -- A layer which can act between any shape of the same dimension, acting as a -- diode on every neuron individually. -- -- Hendrycks, Dan, and Kevin Gimpel. "Gaussian error linear units (gelus)." arXiv preprint arXiv:1606.08415 (2016). data Gelu = Gelu deriving (Generic, NFData, Show) instance UpdateLayer Gelu where type Gradient Gelu = () runUpdate _ _ _ = Gelu reduceGradient _ = () instance RandomLayer Gelu where createRandomWith _ _ = return Gelu instance Serialize Gelu where put _ = return () get = return Gelu geluForwardFast :: Floating x => x -> x geluForwardFast x = x / (e ** (-1.702 * x) + 1) -- = x * sigmoid (1.702 * x) where e = 2.71828 geluBackwardFast :: Floating x => x -> x geluBackwardFast x = (e ** (1.702 * x) * (1 + e ** (1.702 * x) + 1.702 * x)) / (1 + e ** (1.702 * x)) ** 2 where e = 2.71828 instance (KnownNat i) => Layer Gelu ('D1 i) ('D1 i) where type Tape Gelu ('D1 i) ('D1 i) = S ('D1 i) runForwards _ (S1D y) = (S1D y, S1D (gelu y)) where gelu = LAS.dvmap geluForwardFast runBackwards _ (S1D y) (S1D dEdy) = ((), S1D (gelu' y * dEdy)) where gelu' = LAS.dvmap geluBackwardFast instance (KnownNat i, KnownNat j) => Layer Gelu ('D2 i j) ('D2 i j) where type Tape Gelu ('D2 i j) ('D2 i j) = S ('D2 i j) runForwards _ (S2D y) = (S2D y, S2D (gelu y)) where gelu = LAS.dmmap geluForwardFast runBackwards _ (S2D y) (S2D dEdy) = ((), S2D (gelu' y * dEdy)) where gelu' = LAS.dmmap geluBackwardFast instance (KnownNat i, KnownNat j, KnownNat k) => Layer Gelu ('D3 i j k) ('D3 i j k) where type Tape Gelu ('D3 i j k) ('D3 i j k) = S ('D3 i j k) runForwards _ (S3D y) = (S3D y, S3D (gelu y)) where gelu = LAS.dmmap geluForwardFast runBackwards _ (S3D y) (S3D dEdy) = ((), S3D (gelu' y * dEdy)) where gelu' = LAS.dmmap geluBackwardFast
using Compat.Test @testset "Core" begin include("Core.jl") end @testset "RegionIter" begin include("RegionIter.jl") end @testset "kron macros" begin include("KronMacro.jl") end @testset "Fused Hamiltonian" begin include("FusedHamiltonian.jl") end
lemma AE_I2[simp, intro]: "(\<And>x. x \<in> space M \<Longrightarrow> P x) \<Longrightarrow> AE x in M. P x"
[STATEMENT] lemma (in pmpt) Tinv_fmpt: assumes "invertible_qmpt" shows "pmpt M Tinv" [PROOF STATE] proof (prove) goal (1 subgoal): 1. pmpt M Tinv [PROOF STEP] unfolding pmpt_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. fmpt M Tinv \<and> prob_space M [PROOF STEP] using Tinv_fmpt[OF assms] [PROOF STATE] proof (prove) using this: fmpt M Tinv goal (1 subgoal): 1. fmpt M Tinv \<and> prob_space M [PROOF STEP] by (simp add: prob_space_axioms)
module Laplace ## Package imports #==========================================================================================# using StaticArrays using LinearAlgebra import ..MathTools: Point2D, Point3D, magnitude, angle import ..AeroMDAO: velocity abstract type AbstractLaplace end ## Legacy (to be removed?) #==========================================================================================# # Performs velocity and potential computations for an object on a grid grid_data(object :: AbstractLaplace, xs) = velocity(object, xs), potential(object, xs) velocity(object :: AbstractLaplace, xs) = map(x -> velocity(object, x...), xs) potential(object :: AbstractLaplace, xs) = map(x -> potential(object, x...), xs) # Performs velocity and potential calculations on a grid function grid_data(objects :: Vector{<: AbstractLaplace}, xs) vels = foldl((v1, v2) -> [ u .+ v for (u, v) ∈ zip(v1, v2) ], velocity(object, xs) for object ∈ objects) pots = foldl((v1, v2) -> v1 + v2, potential(object, xs) for object ∈ objects) vels, pots end ## 2D singularities #============================================# struct Singularity2D{T <: Real} <: AbstractLaplace strength :: T r :: Point2D{T} end # Getters strength(s :: Singularity2D) = s.strength x(s :: Singularity2D) = s.r.x y(s :: Singularity2D) = s.r.y source_velocity(src :: Singularity2D, x, y) = SVector(strength(src) / (2π) * (x - y(src)) / ((x - y(src))^2 + (y - x(src))^2), str / (2π) * (y - x(src)) / ((x - y(src))^2 + (y - x(src))^2)) source_potential(src :: Singularity2D, x, y) = strength(src) / (4π) * log((x - y(src))^2 + (y - x(src))^2) source_stream(src :: Singularity2D, x, y) = strength(src) / (2π) * atan(y - x(src), x - y(src)) doublet_velocity(dub :: Singularity2D, x, y) = SVector(strength(dub) / (2π) * ((x - y(dub))^2 - (y - x(dub))^2) / ((x - y(dub))^2 + (y - x(dub))^2)^2, - strength(dub) / (2π) * 2 * (x - y(dub)) * (y - x(dub)) / ((x - y(dub))^2 + (y - x(dub))^2)^2) doublet_potential(dub :: Singularity2D, x, y) = -strength(dub) / (2π) * (y - x(dub)) / ((x - y(dub))^2 + (y - x(dub))^2) doublet_stream(dub :: Singularity2D, x, y) = -strength(dub) / (2π) * (y - x(dub)) / ((x - y(dub))^2 + (y - x(dub))^2) vortex_velocity(vor :: Singularity2D, x, y) = SVector(-strength(vor) / (2π) * (y - x(vor)) / ((x - y(vor))^2 + (y - x(vor))^2), str / (2π) * (x - y(vor)) / ((x - y(vor))^2 + (y - x(vor))^2)) vortex_potential(vor :: Singularity2D, x, y) = strength(vor) / (2π) * atan(y - x(vor), x - y(vor)) vortex_stream(vor :: Singularity2D, x, y) = -strength(vor) / (4π) * log((x - y(vor))^2 + (y - x(vor))^2) struct Uniform2D{T <: Real} <: AbstractLaplace magnitude :: T angle :: T Uniform2D{T}(mag, ang) where T <: Real = new(mag, deg2rad(ang)) end magnitude(uni :: Uniform2D) = uni.magnitude angle(uni :: Uniform2D) = uni.angle Uniform2D(mag :: T, ang :: T) where T <: Real = Uniform2D{T}(mag, ang) Uniform2D(mag, ang) = Uniform2D(promote(mag, ang)...) Uniform2D(velocity) = Uniform2D((sqrt ∘ sum)(velocity.^2), atan(velocity[2], velocity[1])) Uniform2D(; angle) = Uniform2D(1., angle) velocity(uni :: Uniform2D) = let (sa, ca) = sincos(uni.angle); uni.magnitude * SVector(ca, sa) end potential(uni :: Uniform2D, x, y) = uni.magnitude * (x * cos(uni.angle) + y * sin(uni.angle)) stream(uni :: Uniform2D, x, y) = uni.magnitude * (y * cos(uni.angle) - x * sin(uni.angle)) ## 3D singularities #============================================# # struct Singularity3D{T <: Real} <: AbstractLaplace # str :: T # r :: Point3D{T} # end # source_velocity(src :: Source2D, x, y, z) # source_potential(src :: Source2D, x, y, z) # source_stream(src :: Source2D, x, y, z) struct DoubletLine3D{T <: Real} <: AbstractLaplace strength :: T r1 :: SVector{3,T} r2 :: SVector{3,T} eta :: SVector{3,T} end function doublet_influence(r, φ, η) r_φ = dot(r, φ) r_η = dot(r, η) den = (norm(r)^2 - dot(r, φ)^2 ) * r ((r_φ * η + r_η * φ) * den - (den * r / norm(r)^2 + 2 * (r - r_φ * η) * r) * r_φ * r_η) / den^2 end function velocity(src :: DoubletLine3D) l = normalize(src.r2 - src.r1) f(x) = src.strength / 4π * (doublet_influence(x - src.r2, l, src.eta) - doublet_influence(x - src.r1, l, src.eta)) end ## Freestream #============================================# """ freestream_to_cartesian(r, θ, φ) Convert freestream flow (spherical polar) coordinates to Cartesian coordinates. """ freestream_to_cartesian(r, θ, φ) = r * SVector(cos(θ) * cos(φ), -sin(φ), sin(θ) * cos(φ)) """ cartesian_to_freestream(U) Convert Cartesian coordinates to freestream (spherical polar) flow coordinates. """ cartesian_to_freestream(U) = SVector(norm(U), -atand(U[3], U[1]), -atand(U[2], √(U[1]^2 + U[3]^2))) # 2D versions cartesian_to_freestream(u, w) = magnitude(u, w), angle(u, w) freestream_to_cartesian(V, α) = V * cos(α), V * sin(α) end
module AKS.Rational where open import AKS.Rational.Base public open import AKS.Rational.Properties public
module Test.Bits8 import Data.Prim.Bits8 import Data.SOP import Hedgehog import Test.RingLaws allBits8 : Gen Bits8 allBits8 = bits8 (linear 0 0xffff) gt0 : Gen Bits8 gt0 = bits8 (linear 1 MaxBits8) gt1 : Gen Bits8 gt1 = bits8 (linear 2 MaxBits8) prop_ltMax : Property prop_ltMax = property $ do b8 <- forAll allBits8 (b8 <= MaxBits8) === True prop_ltMin : Property prop_ltMin = property $ do b8 <- forAll allBits8 (b8 >= MinBits8) === True prop_comp : Property prop_comp = property $ do [m,n] <- forAll $ np [allBits8, allBits8] toOrdering (comp m n) === compare m n prop_mod : Property prop_mod = property $ do [n,d] <- forAll $ np [allBits8, gt0] compare (n `mod` d) d === LT prop_div : Property prop_div = property $ do [n,d] <- forAll $ np [gt0, gt1] compare (n `div` d) n === LT prop_divMod : Property prop_divMod = property $ do [n,d] <- forAll $ np [allBits8, gt0] let x = n `div` d r = n `mod` d n === x * d + r export props : Group props = MkGroup "Bits8" $ [ ("prop_ltMax", prop_ltMax) , ("prop_ltMin", prop_ltMin) , ("prop_comp", prop_comp) , ("prop_mod", prop_mod) , ("prop_div", prop_div) , ("prop_divMod", prop_divMod) ] ++ ringProps allBits8
{-# OPTIONS --cubical-compatible #-} variable @0 A : Set data D : Set₁ where c : A → D
# -*- coding: utf-8 -*- """ Created on Thu Oct 17 01:05:29 2019 @author: Guilherme """ import numpy as np def baskara(a,b,c): d = ((b^2) - 4*a*c + 0.0j) x = (-b+np.sqrt(d+0j))/2*a y = (-b-np.sqrt(d+0j))/2*a return(x,y)
State Before: 𝕜 : Type ?u.41739 E : Type u_1 inst✝⁴ : NormedLinearOrderedField 𝕜 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E inst✝¹ : NormedSpace ℝ E inst✝ : StrictConvexSpace ℝ E x y z : E a b r : ℝ hx : x ∈ closedBall z r hy : y ∈ closedBall z r hne : x ≠ y ha : 0 < a hb : 0 < b hab : a + b = 1 ⊢ a • x + b • y ∈ ball z r State After: case inl 𝕜 : Type ?u.41739 E : Type u_1 inst✝⁴ : NormedLinearOrderedField 𝕜 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E inst✝¹ : NormedSpace ℝ E inst✝ : StrictConvexSpace ℝ E x y z : E a b : ℝ hne : x ≠ y ha : 0 < a hb : 0 < b hab : a + b = 1 hx : x ∈ closedBall z 0 hy : y ∈ closedBall z 0 ⊢ a • x + b • y ∈ ball z 0 case inr 𝕜 : Type ?u.41739 E : Type u_1 inst✝⁴ : NormedLinearOrderedField 𝕜 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E inst✝¹ : NormedSpace ℝ E inst✝ : StrictConvexSpace ℝ E x y z : E a b r : ℝ hx : x ∈ closedBall z r hy : y ∈ closedBall z r hne : x ≠ y ha : 0 < a hb : 0 < b hab : a + b = 1 hr : r ≠ 0 ⊢ a • x + b • y ∈ ball z r Tactic: rcases eq_or_ne r 0 with (rfl | hr) State Before: case inl 𝕜 : Type ?u.41739 E : Type u_1 inst✝⁴ : NormedLinearOrderedField 𝕜 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E inst✝¹ : NormedSpace ℝ E inst✝ : StrictConvexSpace ℝ E x y z : E a b : ℝ hne : x ≠ y ha : 0 < a hb : 0 < b hab : a + b = 1 hx : x ∈ closedBall z 0 hy : y ∈ closedBall z 0 ⊢ a • x + b • y ∈ ball z 0 State After: case inl 𝕜 : Type ?u.41739 E : Type u_1 inst✝⁴ : NormedLinearOrderedField 𝕜 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E inst✝¹ : NormedSpace ℝ E inst✝ : StrictConvexSpace ℝ E x y z : E a b : ℝ hne : x ≠ y ha : 0 < a hb : 0 < b hab : a + b = 1 hx : x = z hy : y = z ⊢ a • x + b • y ∈ ball z 0 Tactic: rw [closedBall_zero, mem_singleton_iff] at hx hy State Before: case inl 𝕜 : Type ?u.41739 E : Type u_1 inst✝⁴ : NormedLinearOrderedField 𝕜 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E inst✝¹ : NormedSpace ℝ E inst✝ : StrictConvexSpace ℝ E x y z : E a b : ℝ hne : x ≠ y ha : 0 < a hb : 0 < b hab : a + b = 1 hx : x = z hy : y = z ⊢ a • x + b • y ∈ ball z 0 State After: no goals Tactic: exact (hne (hx.trans hy.symm)).elim State Before: case inr 𝕜 : Type ?u.41739 E : Type u_1 inst✝⁴ : NormedLinearOrderedField 𝕜 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E inst✝¹ : NormedSpace ℝ E inst✝ : StrictConvexSpace ℝ E x y z : E a b r : ℝ hx : x ∈ closedBall z r hy : y ∈ closedBall z r hne : x ≠ y ha : 0 < a hb : 0 < b hab : a + b = 1 hr : r ≠ 0 ⊢ a • x + b • y ∈ ball z r State After: case inr 𝕜 : Type ?u.41739 E : Type u_1 inst✝⁴ : NormedLinearOrderedField 𝕜 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E inst✝¹ : NormedSpace ℝ E inst✝ : StrictConvexSpace ℝ E x y z : E a b r : ℝ hx : x ∈ closedBall z r hy : y ∈ closedBall z r hne : x ≠ y ha : 0 < a hb : 0 < b hab : a + b = 1 hr : r ≠ 0 ⊢ a • x + b • y ∈ interior (closedBall z r) Tactic: simp only [← interior_closedBall _ hr] at hx hy⊢ State Before: case inr 𝕜 : Type ?u.41739 E : Type u_1 inst✝⁴ : NormedLinearOrderedField 𝕜 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E inst✝¹ : NormedSpace ℝ E inst✝ : StrictConvexSpace ℝ E x y z : E a b r : ℝ hx : x ∈ closedBall z r hy : y ∈ closedBall z r hne : x ≠ y ha : 0 < a hb : 0 < b hab : a + b = 1 hr : r ≠ 0 ⊢ a • x + b • y ∈ interior (closedBall z r) State After: no goals Tactic: exact strictConvex_closedBall ℝ z r hx hy hne ha hb hab
#include <stdio.h> #include <gsl/gsl_qrng.h> int main (void) { int i; gsl_qrng * q = gsl_qrng_alloc (gsl_qrng_sobol, 2); for (i = 0; i < 1024; i++) { double v[2]; gsl_qrng_get (q, v); printf ("%.5f %.5f\n", v[0], v[1]); } gsl_qrng_free (q); return 0; }
using FastHenryHelper using Test # compare what an element shows to string function testelement(e::Element, verified::String) ebuf = IOBuffer() show(ebuf,e) @test String(take!(ebuf)) == verified end function testelementdebug(e::Element, verified::String) ebuf = IOBuffer() show(ebuf,e) debugio = open("debug.txt","w") println(debugio, String(take!(ebuf))) close(debugio) verifiedio = open("verified.txt","w") println(verifiedio, verified) close(verifiedio) warn("testelementdebug called") end include("testelement.jl") include("groupsfortests.jl") include("testcontext.jl") include("testtraversetree.jl") include("testtransform.jl") #include("testmesh.jl") include("testplot.jl") include("testgroups.jl") include("testutil.jl")
lemma interior_subset: "interior S \<subseteq> S"
'''Multimethods for fast Hankel transforms. ''' import numpy as np from ._basic import _dispatch from ._fftlog import fht as _fht from ._fftlog import ifht as _ifht from scipy._lib.uarray import Dispatchable __all__ = ['fht', 'ifht'] @_dispatch def fht(a, dln, mu, offset=0.0, bias=0.0): """fht multimethod.""" return (Dispatchable(a, np.ndarray),) @_dispatch def ifht(A, dln, mu, offset=0.0, bias=0.0): """ifht multimethod.""" return (Dispatchable(A, np.ndarray),) # copy over the docstrings fht.__doc__ = _fht.__doc__ ifht.__doc__ = _ifht.__doc__
[GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 inst✝³ : NontriviallyNormedField 𝕜' inst✝² : NormedAlgebra 𝕜 𝕜' inst✝¹ : NormedSpace 𝕜' F inst✝ : IsScalarTower 𝕜 𝕜' F c : 𝕜 → 𝕜' c' : 𝕜' hc : HasDerivWithinAt c c' s x hf : HasDerivWithinAt f f' s x ⊢ HasDerivWithinAt (fun y => c y • f y) (c x • f' + c' • f x) s x [PROOFSTEP] simpa using (HasFDerivWithinAt.smul hc hf).hasDerivWithinAt [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 inst✝³ : NontriviallyNormedField 𝕜' inst✝² : NormedAlgebra 𝕜 𝕜' inst✝¹ : NormedSpace 𝕜' F inst✝ : IsScalarTower 𝕜 𝕜' F c : 𝕜 → 𝕜' c' : 𝕜' hc : HasDerivAt c c' x hf : HasDerivAt f f' x ⊢ HasDerivAt (fun y => c y • f y) (c x • f' + c' • f x) x [PROOFSTEP] rw [← hasDerivWithinAt_univ] at * [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 inst✝³ : NontriviallyNormedField 𝕜' inst✝² : NormedAlgebra 𝕜 𝕜' inst✝¹ : NormedSpace 𝕜' F inst✝ : IsScalarTower 𝕜 𝕜' F c : 𝕜 → 𝕜' c' : 𝕜' hc : HasDerivWithinAt c c' univ x hf : HasDerivWithinAt f f' univ x ⊢ HasDerivWithinAt (fun y => c y • f y) (c x • f' + c' • f x) univ x [PROOFSTEP] exact hc.smul hf [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 inst✝³ : NontriviallyNormedField 𝕜' inst✝² : NormedAlgebra 𝕜 𝕜' inst✝¹ : NormedSpace 𝕜' F inst✝ : IsScalarTower 𝕜 𝕜' F c : 𝕜 → 𝕜' c' : 𝕜' hc : HasStrictDerivAt c c' x hf : HasStrictDerivAt f f' x ⊢ HasStrictDerivAt (fun y => c y • f y) (c x • f' + c' • f x) x [PROOFSTEP] simpa using (hc.smul hf).hasStrictDerivAt [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f✝ f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 inst✝³ : NontriviallyNormedField 𝕜' inst✝² : NormedAlgebra 𝕜 𝕜' inst✝¹ : NormedSpace 𝕜' F inst✝ : IsScalarTower 𝕜 𝕜' F c : 𝕜 → 𝕜' c' : 𝕜' hc : HasStrictDerivAt c c' x f : F ⊢ HasStrictDerivAt (fun y => c y • f) (c' • f) x [PROOFSTEP] have := hc.smul (hasStrictDerivAt_const x f) [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f✝ f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 inst✝³ : NontriviallyNormedField 𝕜' inst✝² : NormedAlgebra 𝕜 𝕜' inst✝¹ : NormedSpace 𝕜' F inst✝ : IsScalarTower 𝕜 𝕜' F c : 𝕜 → 𝕜' c' : 𝕜' hc : HasStrictDerivAt c c' x f : F this : HasStrictDerivAt (fun y => c y • f) (c x • 0 + c' • f) x ⊢ HasStrictDerivAt (fun y => c y • f) (c' • f) x [PROOFSTEP] rwa [smul_zero, zero_add] at this [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f✝ f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 inst✝³ : NontriviallyNormedField 𝕜' inst✝² : NormedAlgebra 𝕜 𝕜' inst✝¹ : NormedSpace 𝕜' F inst✝ : IsScalarTower 𝕜 𝕜' F c : 𝕜 → 𝕜' c' : 𝕜' hc : HasDerivWithinAt c c' s x f : F ⊢ HasDerivWithinAt (fun y => c y • f) (c' • f) s x [PROOFSTEP] have := hc.smul (hasDerivWithinAt_const x s f) [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f✝ f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 inst✝³ : NontriviallyNormedField 𝕜' inst✝² : NormedAlgebra 𝕜 𝕜' inst✝¹ : NormedSpace 𝕜' F inst✝ : IsScalarTower 𝕜 𝕜' F c : 𝕜 → 𝕜' c' : 𝕜' hc : HasDerivWithinAt c c' s x f : F this : HasDerivWithinAt (fun y => c y • f) (c x • 0 + c' • f) s x ⊢ HasDerivWithinAt (fun y => c y • f) (c' • f) s x [PROOFSTEP] rwa [smul_zero, zero_add] at this [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f✝ f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 inst✝³ : NontriviallyNormedField 𝕜' inst✝² : NormedAlgebra 𝕜 𝕜' inst✝¹ : NormedSpace 𝕜' F inst✝ : IsScalarTower 𝕜 𝕜' F c : 𝕜 → 𝕜' c' : 𝕜' hc : HasDerivAt c c' x f : F ⊢ HasDerivAt (fun y => c y • f) (c' • f) x [PROOFSTEP] rw [← hasDerivWithinAt_univ] at * [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f✝ f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 inst✝³ : NontriviallyNormedField 𝕜' inst✝² : NormedAlgebra 𝕜 𝕜' inst✝¹ : NormedSpace 𝕜' F inst✝ : IsScalarTower 𝕜 𝕜' F c : 𝕜 → 𝕜' c' : 𝕜' hc : HasDerivWithinAt c c' univ x f : F ⊢ HasDerivWithinAt (fun y => c y • f) (c' • f) univ x [PROOFSTEP] exact hc.smul_const f [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 R : Type u_1 inst✝³ : Semiring R inst✝² : Module R F inst✝¹ : SMulCommClass 𝕜 R F inst✝ : ContinuousConstSMul R F c : R hf : HasStrictDerivAt f f' x ⊢ HasStrictDerivAt (fun y => c • f y) (c • f') x [PROOFSTEP] simpa using (hf.const_smul c).hasStrictDerivAt [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 R : Type u_1 inst✝³ : Semiring R inst✝² : Module R F inst✝¹ : SMulCommClass 𝕜 R F inst✝ : ContinuousConstSMul R F c : R hf : HasDerivAtFilter f f' x L ⊢ HasDerivAtFilter (fun y => c • f y) (c • f') x L [PROOFSTEP] simpa using (hf.const_smul c).hasDerivAtFilter [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c d : 𝕜 → 𝔸 c' d' : 𝔸 u v : 𝕜 → 𝕜' hc : HasDerivWithinAt c c' s x hd : HasDerivWithinAt d d' s x ⊢ HasDerivWithinAt (fun y => c y * d y) (c' * d x + c x * d') s x [PROOFSTEP] have := (HasFDerivWithinAt.mul' hc hd).hasDerivWithinAt [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c d : 𝕜 → 𝔸 c' d' : 𝔸 u v : 𝕜 → 𝕜' hc : HasDerivWithinAt c c' s x hd : HasDerivWithinAt d d' s x this : HasDerivWithinAt (fun y => c y * d y) (↑(c x • smulRight 1 d' + smulRight (smulRight 1 c') (d x)) 1) s x ⊢ HasDerivWithinAt (fun y => c y * d y) (c' * d x + c x * d') s x [PROOFSTEP] rwa [ContinuousLinearMap.add_apply, ContinuousLinearMap.smul_apply, ContinuousLinearMap.smulRight_apply, ContinuousLinearMap.smulRight_apply, ContinuousLinearMap.smulRight_apply, ContinuousLinearMap.one_apply, one_smul, one_smul, add_comm] at this [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c d : 𝕜 → 𝔸 c' d' : 𝔸 u v : 𝕜 → 𝕜' hc : HasDerivAt c c' x hd : HasDerivAt d d' x ⊢ HasDerivAt (fun y => c y * d y) (c' * d x + c x * d') x [PROOFSTEP] rw [← hasDerivWithinAt_univ] at * [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c d : 𝕜 → 𝔸 c' d' : 𝔸 u v : 𝕜 → 𝕜' hc : HasDerivWithinAt c c' univ x hd : HasDerivWithinAt d d' univ x ⊢ HasDerivWithinAt (fun y => c y * d y) (c' * d x + c x * d') univ x [PROOFSTEP] exact hc.mul hd [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c d : 𝕜 → 𝔸 c' d' : 𝔸 u v : 𝕜 → 𝕜' hc : HasStrictDerivAt c c' x hd : HasStrictDerivAt d d' x ⊢ HasStrictDerivAt (fun y => c y * d y) (c' * d x + c x * d') x [PROOFSTEP] have := (HasStrictFDerivAt.mul' hc hd).hasStrictDerivAt [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c d : 𝕜 → 𝔸 c' d' : 𝔸 u v : 𝕜 → 𝕜' hc : HasStrictDerivAt c c' x hd : HasStrictDerivAt d d' x this : HasStrictDerivAt (fun y => c y * d y) (↑(c x • smulRight 1 d' + smulRight (smulRight 1 c') (d x)) 1) x ⊢ HasStrictDerivAt (fun y => c y * d y) (c' * d x + c x * d') x [PROOFSTEP] rwa [ContinuousLinearMap.add_apply, ContinuousLinearMap.smul_apply, ContinuousLinearMap.smulRight_apply, ContinuousLinearMap.smulRight_apply, ContinuousLinearMap.smulRight_apply, ContinuousLinearMap.one_apply, one_smul, one_smul, add_comm] at this [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c d✝ : 𝕜 → 𝔸 c' d' : 𝔸 u v : 𝕜 → 𝕜' hc : HasDerivWithinAt c c' s x d : 𝔸 ⊢ HasDerivWithinAt (fun y => c y * d) (c' * d) s x [PROOFSTEP] convert hc.mul (hasDerivWithinAt_const x s d) using 1 [GOAL] case h.e'_7 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c d✝ : 𝕜 → 𝔸 c' d' : 𝔸 u v : 𝕜 → 𝕜' hc : HasDerivWithinAt c c' s x d : 𝔸 ⊢ c' * d = c' * d + c x * 0 [PROOFSTEP] rw [mul_zero, add_zero] [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c d✝ : 𝕜 → 𝔸 c' d' : 𝔸 u v : 𝕜 → 𝕜' hc : HasDerivAt c c' x d : 𝔸 ⊢ HasDerivAt (fun y => c y * d) (c' * d) x [PROOFSTEP] rw [← hasDerivWithinAt_univ] at * [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c d✝ : 𝕜 → 𝔸 c' d' : 𝔸 u v : 𝕜 → 𝕜' hc : HasDerivWithinAt c c' univ x d : 𝔸 ⊢ HasDerivWithinAt (fun y => c y * d) (c' * d) univ x [PROOFSTEP] exact hc.mul_const d [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c✝ d : 𝕜 → 𝔸 c' d' : 𝔸 u v : 𝕜 → 𝕜' c : 𝕜 ⊢ HasDerivAt (fun x => x * c) c x [PROOFSTEP] simpa only [one_mul] using (hasDerivAt_id' x).mul_const c [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c d✝ : 𝕜 → 𝔸 c' d' : 𝔸 u v : 𝕜 → 𝕜' hc : HasStrictDerivAt c c' x d : 𝔸 ⊢ HasStrictDerivAt (fun y => c y * d) (c' * d) x [PROOFSTEP] convert hc.mul (hasStrictDerivAt_const x d) using 1 [GOAL] case h.e'_7 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c d✝ : 𝕜 → 𝔸 c' d' : 𝔸 u v : 𝕜 → 𝕜' hc : HasStrictDerivAt c c' x d : 𝔸 ⊢ c' * d = c' * d + c x * 0 [PROOFSTEP] rw [mul_zero, add_zero] [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c d : 𝕜 → 𝔸 c' d' : 𝔸 u v✝ : 𝕜 → 𝕜' v : 𝕜' ⊢ deriv (fun y => u y * v) x = deriv u x * v [PROOFSTEP] by_cases hu : DifferentiableAt 𝕜 u x [GOAL] case pos 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c d : 𝕜 → 𝔸 c' d' : 𝔸 u v✝ : 𝕜 → 𝕜' v : 𝕜' hu : DifferentiableAt 𝕜 u x ⊢ deriv (fun y => u y * v) x = deriv u x * v [PROOFSTEP] exact deriv_mul_const hu v [GOAL] case neg 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c d : 𝕜 → 𝔸 c' d' : 𝔸 u v✝ : 𝕜 → 𝕜' v : 𝕜' hu : ¬DifferentiableAt 𝕜 u x ⊢ deriv (fun y => u y * v) x = deriv u x * v [PROOFSTEP] rw [deriv_zero_of_not_differentiableAt hu, zero_mul] [GOAL] case neg 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c d : 𝕜 → 𝔸 c' d' : 𝔸 u v✝ : 𝕜 → 𝕜' v : 𝕜' hu : ¬DifferentiableAt 𝕜 u x ⊢ deriv (fun y => u y * v) x = 0 [PROOFSTEP] rcases eq_or_ne v 0 with (rfl | hd) [GOAL] case neg.inl 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c d : 𝕜 → 𝔸 c' d' : 𝔸 u v : 𝕜 → 𝕜' hu : ¬DifferentiableAt 𝕜 u x ⊢ deriv (fun y => u y * 0) x = 0 [PROOFSTEP] simp only [mul_zero, deriv_const] [GOAL] case neg.inr 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c d : 𝕜 → 𝔸 c' d' : 𝔸 u v✝ : 𝕜 → 𝕜' v : 𝕜' hu : ¬DifferentiableAt 𝕜 u x hd : v ≠ 0 ⊢ deriv (fun y => u y * v) x = 0 [PROOFSTEP] refine' deriv_zero_of_not_differentiableAt (mt (fun H => _) hu) [GOAL] case neg.inr 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c d : 𝕜 → 𝔸 c' d' : 𝔸 u v✝ : 𝕜 → 𝕜' v : 𝕜' hu : ¬DifferentiableAt 𝕜 u x hd : v ≠ 0 H : DifferentiableAt 𝕜 (fun y => u y * v) x ⊢ DifferentiableAt 𝕜 u x [PROOFSTEP] simpa only [mul_inv_cancel_right₀ hd] using H.mul_const v⁻¹ [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c✝ d : 𝕜 → 𝔸 c' d' : 𝔸 u v : 𝕜 → 𝕜' c : 𝔸 hd : HasDerivWithinAt d d' s x ⊢ HasDerivWithinAt (fun y => c * d y) (c * d') s x [PROOFSTEP] convert (hasDerivWithinAt_const x s c).mul hd using 1 [GOAL] case h.e'_7 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c✝ d : 𝕜 → 𝔸 c' d' : 𝔸 u v : 𝕜 → 𝕜' c : 𝔸 hd : HasDerivWithinAt d d' s x ⊢ c * d' = 0 * d x + c * d' [PROOFSTEP] rw [zero_mul, zero_add] [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c✝ d : 𝕜 → 𝔸 c' d' : 𝔸 u v : 𝕜 → 𝕜' c : 𝔸 hd : HasDerivAt d d' x ⊢ HasDerivAt (fun y => c * d y) (c * d') x [PROOFSTEP] rw [← hasDerivWithinAt_univ] at * [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c✝ d : 𝕜 → 𝔸 c' d' : 𝔸 u v : 𝕜 → 𝕜' c : 𝔸 hd : HasDerivWithinAt d d' univ x ⊢ HasDerivWithinAt (fun y => c * d y) (c * d') univ x [PROOFSTEP] exact hd.const_mul c [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c✝ d : 𝕜 → 𝔸 c' d' : 𝔸 u v : 𝕜 → 𝕜' c : 𝔸 hd : HasStrictDerivAt d d' x ⊢ HasStrictDerivAt (fun y => c * d y) (c * d') x [PROOFSTEP] convert (hasStrictDerivAt_const _ _).mul hd using 1 [GOAL] case h.e'_7 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c✝ d : 𝕜 → 𝔸 c' d' : 𝔸 u v : 𝕜 → 𝕜' c : 𝔸 hd : HasStrictDerivAt d d' x ⊢ c * d' = 0 * d x + c * d' [PROOFSTEP] rw [zero_mul, zero_add] [GOAL] 𝕜 : Type u inst✝⁸ : NontriviallyNormedField 𝕜 F : Type v inst✝⁷ : NormedAddCommGroup F inst✝⁶ : NormedSpace 𝕜 F E : Type w inst✝⁵ : NormedAddCommGroup E inst✝⁴ : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 𝔸 : Type u_2 inst✝³ : NormedField 𝕜' inst✝² : NormedRing 𝔸 inst✝¹ : NormedAlgebra 𝕜 𝕜' inst✝ : NormedAlgebra 𝕜 𝔸 c d : 𝕜 → 𝔸 c' d' : 𝔸 u✝ v : 𝕜 → 𝕜' u : 𝕜' ⊢ deriv (fun y => u * v y) x = u * deriv v x [PROOFSTEP] simp only [mul_comm u, deriv_mul_const_field] [GOAL] 𝕜 : Type u inst✝⁶ : NontriviallyNormedField 𝕜 F : Type v inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F E : Type w inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 inst✝¹ : NontriviallyNormedField 𝕜' inst✝ : NormedAlgebra 𝕜 𝕜' c d✝ : 𝕜 → 𝕜' c' d' : 𝕜' hc : HasDerivAt c c' x d : 𝕜' ⊢ HasDerivAt (fun x => c x / d) (c' / d) x [PROOFSTEP] simpa only [div_eq_mul_inv] using hc.mul_const d⁻¹ [GOAL] 𝕜 : Type u inst✝⁶ : NontriviallyNormedField 𝕜 F : Type v inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F E : Type w inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 inst✝¹ : NontriviallyNormedField 𝕜' inst✝ : NormedAlgebra 𝕜 𝕜' c d✝ : 𝕜 → 𝕜' c' d' : 𝕜' hc : HasDerivWithinAt c c' s x d : 𝕜' ⊢ HasDerivWithinAt (fun x => c x / d) (c' / d) s x [PROOFSTEP] simpa only [div_eq_mul_inv] using hc.mul_const d⁻¹ [GOAL] 𝕜 : Type u inst✝⁶ : NontriviallyNormedField 𝕜 F : Type v inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F E : Type w inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 inst✝¹ : NontriviallyNormedField 𝕜' inst✝ : NormedAlgebra 𝕜 𝕜' c d✝ : 𝕜 → 𝕜' c' d' : 𝕜' hc : HasStrictDerivAt c c' x d : 𝕜' ⊢ HasStrictDerivAt (fun x => c x / d) (c' / d) x [PROOFSTEP] simpa only [div_eq_mul_inv] using hc.mul_const d⁻¹ [GOAL] 𝕜 : Type u inst✝⁶ : NontriviallyNormedField 𝕜 F : Type v inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F E : Type w inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 inst✝¹ : NontriviallyNormedField 𝕜' inst✝ : NormedAlgebra 𝕜 𝕜' c d✝ : 𝕜 → 𝕜' c' d' : 𝕜' hc : DifferentiableWithinAt 𝕜 c s x d : 𝕜' hxs : UniqueDiffWithinAt 𝕜 s x ⊢ derivWithin (fun x => c x / d) s x = derivWithin c s x / d [PROOFSTEP] simp [div_eq_inv_mul, derivWithin_const_mul, hc, hxs] [GOAL] 𝕜 : Type u inst✝⁶ : NontriviallyNormedField 𝕜 F : Type v inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F E : Type w inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 𝕜' : Type u_1 inst✝¹ : NontriviallyNormedField 𝕜' inst✝ : NormedAlgebra 𝕜 𝕜' c d✝ : 𝕜 → 𝕜' c' d' d : 𝕜' ⊢ deriv (fun x => c x / d) x = deriv c x / d [PROOFSTEP] simp only [div_eq_mul_inv, deriv_mul_const_field] [GOAL] 𝕜 : Type u inst✝⁶ : NontriviallyNormedField 𝕜 F : Type v inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F E : Type w inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 G : Type u_1 inst✝¹ : NormedAddCommGroup G inst✝ : NormedSpace 𝕜 G c : 𝕜 → F →L[𝕜] G c' : F →L[𝕜] G d : 𝕜 → E →L[𝕜] F d' : E →L[𝕜] F u : 𝕜 → F u' : F hc : HasStrictDerivAt c c' x hd : HasStrictDerivAt d d' x ⊢ HasStrictDerivAt (fun y => comp (c y) (d y)) (comp c' (d x) + comp (c x) d') x [PROOFSTEP] have := (hc.hasStrictFDerivAt.clm_comp hd.hasStrictFDerivAt).hasStrictDerivAt [GOAL] 𝕜 : Type u inst✝⁶ : NontriviallyNormedField 𝕜 F : Type v inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F E : Type w inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 G : Type u_1 inst✝¹ : NormedAddCommGroup G inst✝ : NormedSpace 𝕜 G c : 𝕜 → F →L[𝕜] G c' : F →L[𝕜] G d : 𝕜 → E →L[𝕜] F d' : E →L[𝕜] F u : 𝕜 → F u' : F hc : HasStrictDerivAt c c' x hd : HasStrictDerivAt d d' x this : HasStrictDerivAt (fun y => comp (c y) (d y)) (↑(comp (↑(compL 𝕜 E F G) (c x)) (smulRight 1 d') + comp (↑(ContinuousLinearMap.flip (compL 𝕜 E F G)) (d x)) (smulRight 1 c')) 1) x ⊢ HasStrictDerivAt (fun y => comp (c y) (d y)) (comp c' (d x) + comp (c x) d') x [PROOFSTEP] rwa [add_apply, comp_apply, comp_apply, smulRight_apply, smulRight_apply, one_apply, one_smul, one_smul, add_comm] at this [GOAL] 𝕜 : Type u inst✝⁶ : NontriviallyNormedField 𝕜 F : Type v inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F E : Type w inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 G : Type u_1 inst✝¹ : NormedAddCommGroup G inst✝ : NormedSpace 𝕜 G c : 𝕜 → F →L[𝕜] G c' : F →L[𝕜] G d : 𝕜 → E →L[𝕜] F d' : E →L[𝕜] F u : 𝕜 → F u' : F hc : HasDerivWithinAt c c' s x hd : HasDerivWithinAt d d' s x ⊢ HasDerivWithinAt (fun y => comp (c y) (d y)) (comp c' (d x) + comp (c x) d') s x [PROOFSTEP] have := (hc.hasFDerivWithinAt.clm_comp hd.hasFDerivWithinAt).hasDerivWithinAt [GOAL] 𝕜 : Type u inst✝⁶ : NontriviallyNormedField 𝕜 F : Type v inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F E : Type w inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 G : Type u_1 inst✝¹ : NormedAddCommGroup G inst✝ : NormedSpace 𝕜 G c : 𝕜 → F →L[𝕜] G c' : F →L[𝕜] G d : 𝕜 → E →L[𝕜] F d' : E →L[𝕜] F u : 𝕜 → F u' : F hc : HasDerivWithinAt c c' s x hd : HasDerivWithinAt d d' s x this : HasDerivWithinAt (fun y => comp (c y) (d y)) (↑(comp (↑(compL 𝕜 E F G) (c x)) (smulRight 1 d') + comp (↑(ContinuousLinearMap.flip (compL 𝕜 E F G)) (d x)) (smulRight 1 c')) 1) s x ⊢ HasDerivWithinAt (fun y => comp (c y) (d y)) (comp c' (d x) + comp (c x) d') s x [PROOFSTEP] rwa [add_apply, comp_apply, comp_apply, smulRight_apply, smulRight_apply, one_apply, one_smul, one_smul, add_comm] at this [GOAL] 𝕜 : Type u inst✝⁶ : NontriviallyNormedField 𝕜 F : Type v inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F E : Type w inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 G : Type u_1 inst✝¹ : NormedAddCommGroup G inst✝ : NormedSpace 𝕜 G c : 𝕜 → F →L[𝕜] G c' : F →L[𝕜] G d : 𝕜 → E →L[𝕜] F d' : E →L[𝕜] F u : 𝕜 → F u' : F hc : HasDerivAt c c' x hd : HasDerivAt d d' x ⊢ HasDerivAt (fun y => comp (c y) (d y)) (comp c' (d x) + comp (c x) d') x [PROOFSTEP] rw [← hasDerivWithinAt_univ] at * [GOAL] 𝕜 : Type u inst✝⁶ : NontriviallyNormedField 𝕜 F : Type v inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F E : Type w inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 G : Type u_1 inst✝¹ : NormedAddCommGroup G inst✝ : NormedSpace 𝕜 G c : 𝕜 → F →L[𝕜] G c' : F →L[𝕜] G d : 𝕜 → E →L[𝕜] F d' : E →L[𝕜] F u : 𝕜 → F u' : F hc : HasDerivWithinAt c c' univ x hd : HasDerivWithinAt d d' univ x ⊢ HasDerivWithinAt (fun y => comp (c y) (d y)) (comp c' (d x) + comp (c x) d') univ x [PROOFSTEP] exact hc.clm_comp hd [GOAL] 𝕜 : Type u inst✝⁶ : NontriviallyNormedField 𝕜 F : Type v inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F E : Type w inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 G : Type u_1 inst✝¹ : NormedAddCommGroup G inst✝ : NormedSpace 𝕜 G c : 𝕜 → F →L[𝕜] G c' : F →L[𝕜] G d : 𝕜 → E →L[𝕜] F d' : E →L[𝕜] F u : 𝕜 → F u' : F hc : HasStrictDerivAt c c' x hu : HasStrictDerivAt u u' x ⊢ HasStrictDerivAt (fun y => ↑(c y) (u y)) (↑c' (u x) + ↑(c x) u') x [PROOFSTEP] have := (hc.hasStrictFDerivAt.clm_apply hu.hasStrictFDerivAt).hasStrictDerivAt [GOAL] 𝕜 : Type u inst✝⁶ : NontriviallyNormedField 𝕜 F : Type v inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F E : Type w inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 G : Type u_1 inst✝¹ : NormedAddCommGroup G inst✝ : NormedSpace 𝕜 G c : 𝕜 → F →L[𝕜] G c' : F →L[𝕜] G d : 𝕜 → E →L[𝕜] F d' : E →L[𝕜] F u : 𝕜 → F u' : F hc : HasStrictDerivAt c c' x hu : HasStrictDerivAt u u' x this : HasStrictDerivAt (fun y => ↑(c y) (u y)) (↑(comp (c x) (smulRight 1 u') + ↑(ContinuousLinearMap.flip (smulRight 1 c')) (u x)) 1) x ⊢ HasStrictDerivAt (fun y => ↑(c y) (u y)) (↑c' (u x) + ↑(c x) u') x [PROOFSTEP] rwa [add_apply, comp_apply, flip_apply, smulRight_apply, smulRight_apply, one_apply, one_smul, one_smul, add_comm] at this [GOAL] 𝕜 : Type u inst✝⁶ : NontriviallyNormedField 𝕜 F : Type v inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F E : Type w inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 G : Type u_1 inst✝¹ : NormedAddCommGroup G inst✝ : NormedSpace 𝕜 G c : 𝕜 → F →L[𝕜] G c' : F →L[𝕜] G d : 𝕜 → E →L[𝕜] F d' : E →L[𝕜] F u : 𝕜 → F u' : F hc : HasDerivWithinAt c c' s x hu : HasDerivWithinAt u u' s x ⊢ HasDerivWithinAt (fun y => ↑(c y) (u y)) (↑c' (u x) + ↑(c x) u') s x [PROOFSTEP] have := (hc.hasFDerivWithinAt.clm_apply hu.hasFDerivWithinAt).hasDerivWithinAt [GOAL] 𝕜 : Type u inst✝⁶ : NontriviallyNormedField 𝕜 F : Type v inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F E : Type w inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 G : Type u_1 inst✝¹ : NormedAddCommGroup G inst✝ : NormedSpace 𝕜 G c : 𝕜 → F →L[𝕜] G c' : F →L[𝕜] G d : 𝕜 → E →L[𝕜] F d' : E →L[𝕜] F u : 𝕜 → F u' : F hc : HasDerivWithinAt c c' s x hu : HasDerivWithinAt u u' s x this : HasDerivWithinAt (fun y => ↑(c y) (u y)) (↑(comp (c x) (smulRight 1 u') + ↑(ContinuousLinearMap.flip (smulRight 1 c')) (u x)) 1) s x ⊢ HasDerivWithinAt (fun y => ↑(c y) (u y)) (↑c' (u x) + ↑(c x) u') s x [PROOFSTEP] rwa [add_apply, comp_apply, flip_apply, smulRight_apply, smulRight_apply, one_apply, one_smul, one_smul, add_comm] at this [GOAL] 𝕜 : Type u inst✝⁶ : NontriviallyNormedField 𝕜 F : Type v inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F E : Type w inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 G : Type u_1 inst✝¹ : NormedAddCommGroup G inst✝ : NormedSpace 𝕜 G c : 𝕜 → F →L[𝕜] G c' : F →L[𝕜] G d : 𝕜 → E →L[𝕜] F d' : E →L[𝕜] F u : 𝕜 → F u' : F hc : HasDerivAt c c' x hu : HasDerivAt u u' x ⊢ HasDerivAt (fun y => ↑(c y) (u y)) (↑c' (u x) + ↑(c x) u') x [PROOFSTEP] have := (hc.hasFDerivAt.clm_apply hu.hasFDerivAt).hasDerivAt [GOAL] 𝕜 : Type u inst✝⁶ : NontriviallyNormedField 𝕜 F : Type v inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F E : Type w inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E f f₀ f₁ g : 𝕜 → F f' f₀' f₁' g' : F x : 𝕜 s t : Set 𝕜 L L₁ L₂ : Filter 𝕜 G : Type u_1 inst✝¹ : NormedAddCommGroup G inst✝ : NormedSpace 𝕜 G c : 𝕜 → F →L[𝕜] G c' : F →L[𝕜] G d : 𝕜 → E →L[𝕜] F d' : E →L[𝕜] F u : 𝕜 → F u' : F hc : HasDerivAt c c' x hu : HasDerivAt u u' x this : HasDerivAt (fun y => ↑(c y) (u y)) (↑(comp (c x) (smulRight 1 u') + ↑(ContinuousLinearMap.flip (smulRight 1 c')) (u x)) 1) x ⊢ HasDerivAt (fun y => ↑(c y) (u y)) (↑c' (u x) + ↑(c x) u') x [PROOFSTEP] rwa [add_apply, comp_apply, flip_apply, smulRight_apply, smulRight_apply, one_apply, one_smul, one_smul, add_comm] at this
State Before: α : Type u_1 β : Type u_2 inst✝¹ : Preorder β inst✝ : DecidableRel fun x x_1 => x < x_1 f✝ : α → β l✝ : List α o : Option α a✝ m : α f : α → β a : α l : List α ⊢ argmax f (l ++ [a]) = Option.casesOn (argmax f l) (some a) fun c => if f c < f a then some a else some c State After: α : Type u_1 β : Type u_2 inst✝¹ : Preorder β inst✝ : DecidableRel fun x x_1 => x < x_1 f✝ : α → β l✝ : List α o : Option α a✝ m : α f : α → β a : α l : List α ⊢ foldl (argAux fun b c => f c < f b) none (l ++ [a]) = Option.casesOn (foldl (argAux fun b c => f c < f b) none l) (some a) fun c => if f c < f a then some a else some c Tactic: rw [argmax, argmax] State Before: α : Type u_1 β : Type u_2 inst✝¹ : Preorder β inst✝ : DecidableRel fun x x_1 => x < x_1 f✝ : α → β l✝ : List α o : Option α a✝ m : α f : α → β a : α l : List α ⊢ foldl (argAux fun b c => f c < f b) none (l ++ [a]) = Option.casesOn (foldl (argAux fun b c => f c < f b) none l) (some a) fun c => if f c < f a then some a else some c State After: no goals Tactic: simp [argAux]
!Copyright (c) 2013 Farhan J. Khan !THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR !IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS !FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR !COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER !IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN !CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. !Main Program !To compile: !$ gfortran -c factory.f90 !$ gfortran factory_main.f90 factory.o -o factory_main program factory_main !Shape Factory module use factory !Alocatable shape address class(shape), allocatable :: shapeInstance !!! allocate(shapeInstance, source=shape("shape")) !!! !!! Error: Can't construct ABSTRACT type 'shape' !!! !Get a rectangle and verify by printing call fromShapeFactoryGetShape("Rectangle", shapeInstance) call shapeInstance%printShape deallocate(shapeInstance) !Get a circle and verify by printing call fromShapeFactoryGetShape("Circle", shapeInstance) call shapeInstance%printShape deallocate(shapeInstance) end program factory_main
ManagerSilverstone appears to be management for Silverstone Apartments. 20070328 17:57:00 nbsp Welcome to the Wiki/Business Owner Welcome to the Wiki. There is a lot of value in Importance of using your RealName using your real name when you edit. Users/JasonAller 20100921 11:38:40 nbsp I switched the email macro on the apartments page back to the {{{Mailto(silverstone AT capitalvalleyinvestments DOT com)}}} format. The email macro makes it appear correctly on the page, but protects you from spam by hiding the email address from bots that search the web for normal email addresses (or at least, thats my understanding). Anyone viewing the page will still see the correct email address, and clicking on it will open an email window like normal. Users/TomGarberson
Welcome to Conway's Car Care! Your first choice for most vehicle repairs. Conway’s was founded in 2004 at the site that was formerly Paul’s Car Care which took care of the Rockwood area for many years. We have maintained Paul’s commitment to good work, fair prices and putting our customer’s first.
[STATEMENT] lemma "neg\<cdot>(neq\<cdot>a\<cdot>b) = eq\<cdot>a\<cdot>b" [PROOF STATE] proof (prove) goal (1 subgoal): 1. neg\<cdot>(neq\<cdot>a\<cdot>b) = eq\<cdot>a\<cdot>b [PROOF STEP] by auto
# Spectral Estimation of Random Signals *This jupyter/Python notebook is part of a [collection of notebooks](../index.ipynb) in the masters module [Digital Signal Processing](http://www.int.uni-rostock.de/Digitale-Signalverarbeitung.48.0.html), Comunications Engineering, Universität Rostock. Please direct questions and suggestions to <mailto:[email protected]>.* ## Parametric Methods ### Motivation Non-parametric methods for the estimation of the power spectral density (PSD), like the [periodogram](periodogram.ipynb) or [Welch's method](welch_method.ipynb), don't rely on a-priori information about the process generating the random signal. Often some a-priori information is available that can be used to formulate a parametric model of the random process. The goal is then to estimate these parameters in order to characterize the random signal. Such techniques are known as *[parametric methods](https://en.wikipedia.org/wiki/Spectral_density_estimation#Parametric_estimation)* or *model-based methods*. The incorporation of a-priori knowledge can improve the estimation of the PSD significantly, as long as the underlying model is a valid description of the random process. The parametric model of the random process can also be used to generate random signals with a desired PSD. ### Process Models For the remainder we assume wide-sense stationary real-valued random processes. For many applications the process can be modeled by a linear-time invariant (LTI) system where $n[k]$ is [white noise](../random_signals/white_noise.ipynb) and $H(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ denotes the transfer function of the system. In general, the random signal $x[k]$ will be correlated as a result of the processing of the uncorrelated input signal $n[k]$ by the system $H(\mathrm{e}^{\,\mathrm{j}\,\Omega})$. Due to the white noise assumption $\Phi_{nn}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = N_0$, the PSD of the random process is given as \begin{equation} \Phi_{xx}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = N_0 \cdot | H(\mathrm{e}^{\,\mathrm{j}\,\Omega}) |^2 \end{equation} Parametric methods model the system $H(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ by a limited number of parameters. These parameters are then estimated from $x[k]$, providing an estimate $\hat{H}(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ of the transfer function. This estimate is then used to calculate the desired estimate $\hat{\Phi}_{xx}(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ of the PSD. #### Autoregressive model The [autoregressive](https://en.wikipedia.org/wiki/Autoregressive_model) (AR) model assumes a recursive system with a direct path. Its output relation is given as \begin{equation} x[k] = \sum_{n=1}^{N} a_n \cdot x[k-n] + n[k] \end{equation} where $a_n$ denote the coefficients of the recursive path and $N$ the order of the model. Its system function $H(z)$ is derived by $z$-transformation of the output relation \begin{equation} H(z) = \frac{1}{1 - \sum_{n=1}^{N} a_n z^{-n}} \end{equation} Hence, the AR model is a pole-only model of the system. #### Moving average model The [moving average](https://en.wikipedia.org/wiki/Moving-average_model) (MA) model assumes a non-recursive system. The output relation is given as \begin{equation} x[k] = \sum_{m=0}^{M-1} b_m \cdot n[k-m] = h[k] * n[k] \end{equation} with the impulse response of the system $h[k] = [ b_0, b_1, \dots, b_{M-1} ]$. The MA model is a finite impulse response (FIR) model of the random process. Its system function is given as \begin{equation} H(z) = \mathcal{Z} \{ h[k] \} = \sum_{m=0}^{M-1} b_m \; z^{-m} \end{equation} #### Autoregressive moving average model The [autoregressive moving average](https://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model) (ARMA) model is a combination of the AR and MA model. It constitutes a general linear process model. Its output relation is given as \begin{equation} x[k] = \sum_{n=1}^{N} a_n \cdot x[k-n] + \sum_{m=0}^{M-1} b_m \cdot n[k-m] \end{equation} Its system function reads \begin{equation} H(z) = \frac{\sum_{m=0}^{M-1} b_m \; z^{-m}}{1 - \sum_{n=1}^{N} a_n z^{-n}} \end{equation} ### Parametric Spectral Estimation The models above describe the synthesis of the samples $x[k]$ from the white noise $n[k]$. For spectral estimation only the random signal $x[k]$ is known and we are aiming at estimating the parameters of the model. This can be achieved by determining an analyzing system $G(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ such to decorrelate the signal $x[k]$ where $e[k]$ should be white noise. Due to its desired operation, the filter $G(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ is also denoted as *whitening filter*. The optimal filter $G(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ is given by the inverse system $\frac{1}{H(\mathrm{e}^{\,\mathrm{j}\,\Omega})}$. However, $H(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ is in general not known. But this nevertheless implies that our linear process model of $H(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ also applies to $G(\mathrm{e}^{\,\mathrm{j}\,\Omega})$. Various techniques have been developed to estimate the parameters of the filter $G(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ such that $e[k]$ becomes decorrelated. For instance, by expressing the auto-correlation function (ACF) $\varphi_{xx}[\kappa]$ in terms of the model parameters and solving with respect to these. The underlying set of equations are known as [Yule-Walker equations](https://en.wikipedia.org/wiki/Autoregressive_model#Yule-Walker_equations). Once the model parameters have been estimated, these can be used to calculate an estimate $\hat{G}(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ of the analysis system. The desired estimate of the PSD is then given as \begin{equation} \hat{\Phi}_{xx}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = \frac{\Phi_{ee}(\mathrm{e}^{\,\mathrm{j}\,\Omega})}{|\hat{G}(\mathrm{e}^{\,\mathrm{j}\,\Omega})|^2} \end{equation} where if $e[k]$ is white noise, $\Phi_{ee}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = N_0$. ### Example In the following example $n[k]$ is drawn from normal distributed white noise with $N_0 = 1$. The Yule-Walker equations are used to estimate the parameters of an AR model of $H(\mathrm{e}^{\,\mathrm{j}\,\Omega})$. The implementation provided by `statsmodels.api.regression.yule_walker` returns the estimated AR coefficients of the system $H(\mathrm{e}^{\,\mathrm{j}\,\Omega})$. These parameters are then used to numerically evaluate the estimated transfer function, resulting in $\hat{\Phi}_{xx}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = 1 \cdot \vert \hat{H}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) \vert^2$. ```python import numpy as np import matplotlib.pyplot as plt import statsmodels.api as sm import scipy.signal as sig K = 4096 # length of random signal N = 3 # order of AR model a = np.array((1, -1, .5)) # coefficients of AR model # generate random signal n[k] np.random.seed(2) n = np.random.normal(size=K) # AR model for random signal x[k] x = np.zeros(K) for k in np.arange(3, K): x[k] = a[0]*x[k-1] + a[1]*x[k-2] + a[2]*x[k-3] + n[k] # estimate AR parameters by Yule-Walker method rho, sigma = sm.regression.yule_walker(x, order=N, method='mle') # compute true and estimated transfer function Om, H = sig.freqz(1, np.insert(-a, 0, 1), worN=256) Om, He = sig.freqz(1, np.insert(-rho, 0, 1), worN=256) # compute PSD by Welch method Om2, Pxx = sig.welch(x, nperseg=511, return_onesided=True) # plot PSDs plt.figure(figsize=(10, 5)) plt.plot(Om, np.abs(H)**2, label=r'$\Phi_{xx}(e^{j\Omega})$') plt.plot(Om2*2*np.pi, .5*Pxx, 'k-', alpha=.3, label=r'$\hat{\Phi}_{xx}(e^{j\Omega})$ (Welch)') plt.plot(Om, np.abs(He)**2, label=r'$\hat{\Phi}_{xx}(e^{j\Omega})$ (parametric)') plt.xlabel(r'$\Omega$') plt.axis([0, np.pi, 0, 20]) plt.legend() plt.grid() # compute bias/variance of the estimators print('Bias of the Welch estimate: \t\t {0:1.4f}'.format( np.mean(Pxx-np.abs(H)**2))) print('Variance of the Welch estimate: \t {0:1.4f}'.format(np.var(Pxx))) print('Bias of the parametric estimate: \t {0:1.4f}'.format( np.mean(np.abs(H)**2-np.abs(He)**2))) print('Variance of the parametric estimate: \t {0:1.4f}'.format( np.var(np.abs(He)**2))) ``` Bias of the Welch estimate: 2.8932 Variance of the Welch estimate: 62.0748 Bias of the parametric estimate: -0.0291 Variance of the parametric estimate: 15.3476 **Exercise** * Change the order `N` of the AR model used for estimation by the Yule-Walker equations. What happens if the order is smaller or higher than the order of the true system? Why? * Change the number of samples `K`. Is the estimator consistent? Solution: Choosing the order of the estimated AR model differently from the true process results in a mismatch of the model with the consequence of potentially large deviations between the estimated PSD and true PSD. These deviations are typically larger when choosing the order smaller since the model does definitely not fit to the true process. However, choosing the order larger is typically not that problematic since some of the AR coefficients are estimated as approximately zero due to the lower order of the process generating the random signal. Increasing the number of samples seems to lower the bias and variance of the estimated PSD, the estimator can therefore be assumed to be consistent. **Copyright** This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *Sascha Spors, Digital Signal Processing - Lecture notes featuring computational examples*.
\chapter{Lipsum} Demo text but of course: TODO \lipsum[2-4]
State Before: α : Type u_1 a b c d : α inst✝⁵ : MulZeroClass α inst✝⁴ : PartialOrder α inst✝³ : PosMulStrictMono α inst✝² : MulPosStrictMono α inst✝¹ : PosMulMonoRev α inst✝ : MulPosMonoRev α hac : a ≤ b hbd : c ≤ d a0 : 0 < a d0 : 0 < d h : a * c = b * d ⊢ a = b ∧ c = d State After: case inl α : Type u_1 a c d : α inst✝⁵ : MulZeroClass α inst✝⁴ : PartialOrder α inst✝³ : PosMulStrictMono α inst✝² : MulPosStrictMono α inst✝¹ : PosMulMonoRev α inst✝ : MulPosMonoRev α hbd : c ≤ d a0 : 0 < a d0 : 0 < d hac : a ≤ a h : a * c = a * d ⊢ a = a ∧ c = d case inr α : Type u_1 a b c d : α inst✝⁵ : MulZeroClass α inst✝⁴ : PartialOrder α inst✝³ : PosMulStrictMono α inst✝² : MulPosStrictMono α inst✝¹ : PosMulMonoRev α inst✝ : MulPosMonoRev α hac✝ : a ≤ b hbd : c ≤ d a0 : 0 < a d0 : 0 < d h : a * c = b * d hac : a < b ⊢ a = b ∧ c = d Tactic: rcases hac.eq_or_lt with (rfl | hac) State Before: case inr α : Type u_1 a b c d : α inst✝⁵ : MulZeroClass α inst✝⁴ : PartialOrder α inst✝³ : PosMulStrictMono α inst✝² : MulPosStrictMono α inst✝¹ : PosMulMonoRev α inst✝ : MulPosMonoRev α hac✝ : a ≤ b hbd : c ≤ d a0 : 0 < a d0 : 0 < d h : a * c = b * d hac : a < b ⊢ a = b ∧ c = d State After: case inr.inl α : Type u_1 a b c : α inst✝⁵ : MulZeroClass α inst✝⁴ : PartialOrder α inst✝³ : PosMulStrictMono α inst✝² : MulPosStrictMono α inst✝¹ : PosMulMonoRev α inst✝ : MulPosMonoRev α hac✝ : a ≤ b a0 : 0 < a hac : a < b hbd : c ≤ c d0 : 0 < c h : a * c = b * c ⊢ a = b ∧ c = c case inr.inr α : Type u_1 a b c d : α inst✝⁵ : MulZeroClass α inst✝⁴ : PartialOrder α inst✝³ : PosMulStrictMono α inst✝² : MulPosStrictMono α inst✝¹ : PosMulMonoRev α inst✝ : MulPosMonoRev α hac✝ : a ≤ b hbd✝ : c ≤ d a0 : 0 < a d0 : 0 < d h : a * c = b * d hac : a < b hbd : c < d ⊢ a = b ∧ c = d Tactic: rcases eq_or_lt_of_le hbd with (rfl | hbd) State Before: case inr.inr α : Type u_1 a b c d : α inst✝⁵ : MulZeroClass α inst✝⁴ : PartialOrder α inst✝³ : PosMulStrictMono α inst✝² : MulPosStrictMono α inst✝¹ : PosMulMonoRev α inst✝ : MulPosMonoRev α hac✝ : a ≤ b hbd✝ : c ≤ d a0 : 0 < a d0 : 0 < d h : a * c = b * d hac : a < b hbd : c < d ⊢ a = b ∧ c = d State After: no goals Tactic: exact ((mul_lt_mul_of_pos_of_pos hac hbd a0 d0).ne h).elim State Before: case inl α : Type u_1 a c d : α inst✝⁵ : MulZeroClass α inst✝⁴ : PartialOrder α inst✝³ : PosMulStrictMono α inst✝² : MulPosStrictMono α inst✝¹ : PosMulMonoRev α inst✝ : MulPosMonoRev α hbd : c ≤ d a0 : 0 < a d0 : 0 < d hac : a ≤ a h : a * c = a * d ⊢ a = a ∧ c = d State After: no goals Tactic: exact ⟨rfl, (mul_left_cancel_iff_of_pos a0).mp h⟩ State Before: case inr.inl α : Type u_1 a b c : α inst✝⁵ : MulZeroClass α inst✝⁴ : PartialOrder α inst✝³ : PosMulStrictMono α inst✝² : MulPosStrictMono α inst✝¹ : PosMulMonoRev α inst✝ : MulPosMonoRev α hac✝ : a ≤ b a0 : 0 < a hac : a < b hbd : c ≤ c d0 : 0 < c h : a * c = b * c ⊢ a = b ∧ c = c State After: no goals Tactic: exact ⟨(mul_right_cancel_iff_of_pos d0).mp h, rfl⟩
(* Title: HOL/Auth/n_germanSymIndex_lemma_on_inv__23.thy Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences *) header{*The n_germanSymIndex Protocol Case Study*} theory n_germanSymIndex_lemma_on_inv__23 imports n_germanSymIndex_base begin section{*All lemmas on causal relation between inv__23 and some rule r*} lemma n_SendInvAckVsinv__23: assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__23 p__Inv2)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvAck i" apply fastforce done from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__23 p__Inv2" apply fastforce done have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(i=p__Inv2)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(i~=p__Inv2)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_RecvInvAckVsinv__23: assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__23 p__Inv2)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvInvAck i" apply fastforce done from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__23 p__Inv2" apply fastforce done have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(i=p__Inv2)" have "?P1 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(i~=p__Inv2)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_RecvGntSVsinv__23: assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__23 p__Inv2)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntS i" apply fastforce done from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__23 p__Inv2" apply fastforce done have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(i=p__Inv2)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv2) ''Cmd'')) (Const GntS)) (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv2) ''Cmd'')) (Const InvAck))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(i~=p__Inv2)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_RecvGntEVsinv__23: assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__23 p__Inv2)" shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s") proof - from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntE i" apply fastforce done from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__23 p__Inv2" apply fastforce done have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done moreover { assume b1: "(i=p__Inv2)" have "?P3 s" apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv2) ''Cmd'')) (Const InvAck)) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv2) ''Cmd'')) (Const GntE))))" in exI, auto) done then have "invHoldForRule s f r (invariants N)" by auto } moreover { assume b1: "(i~=p__Inv2)" have "?P2 s" proof(cut_tac a1 a2 b1, auto) qed then have "invHoldForRule s f r (invariants N)" by auto } ultimately show "invHoldForRule s f r (invariants N)" by satx qed lemma n_SendReqE__part__1Vsinv__23: assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__23 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_StoreVsinv__23: assumes a1: "\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__23 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_SendGntSVsinv__23: assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendGntS i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__23 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_RecvReqEVsinv__23: assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqE N i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__23 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_SendInv__part__0Vsinv__23: assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__23 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_SendReqE__part__0Vsinv__23: assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__23 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_SendInv__part__1Vsinv__23: assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__23 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_SendReqSVsinv__23: assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqS i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__23 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_SendGntEVsinv__23: assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendGntE N i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__23 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done lemma n_RecvReqSVsinv__23: assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqS N i" and a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__23 p__Inv2)" shows "invHoldForRule s f r (invariants N)" apply (rule noEffectOnRule, cut_tac a1 a2, auto) done end
c>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> c ACKNOWLEDGE THE USE OF THIS PACKAGE c WHEN YOU PUBLISH YOUR RESULTS !!! c>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> c c************ Sample main program #3 ************** c 1d Heisenberg antiferromagnet with 16 spins c Eigenvectors of excited states by lncv1 c********************************************* parameter (n=16,idim=12870,ibond=n) parameter (nvec=3,nbond=1) implicit real*8 (a-h,o-z) dimension E(4) dimension list1(idim),list2(2,0:2**15) dimension bondwt(ibond),ipair(2*ibond),zrtio(ibond) dimension wk(idim,2) dimension x(idim,nvec) c data bondwt/ibond*-1.0d0/ data zrtio/ibond*1.0d0/ data ipair/1,2, 2,3, 3,4, 4,5, 5,6, 6,7, 7,8, 8,9, & 9,10, 10,11, 11,12, 12,13, 13,14, 14,15, 15,16, 16,1/ iv=idim/3 call sz(n,idim,0.0d0,list1,list2) c- You may alternatively use szdy or sztn for faster processing - c call szdy(n,idim,0.0d0,list1,list2) c or c call sztn(n,idim,0.0d0,list1,list2) c------------------------------------------------------------ c c*** Eigenvalues call lnc1(n,idim,ipair,bondwt,zrtio,ibond, & nvec,iv,E,itr,wk,idim,list1,list2) print 100,e,itr 100 format(/' [Eigenvalues] '/2x,4f14.8 & /' [Iteration number]'/i8) c c*** Eigenvectors call lncv1(n,idim,ipair,bondwt,zrtio,ibond, & nvec,iv,x,itr,wk,idim,list1,list2) print *,'[Eigenvector components (selected)]' print 120,(x(j,nvec),j=13,idim,idim/20) 120 format(4d18.9) c c*** Precision check and correlation functions call check1(n,idim,ipair,bondwt,zrtio,ibond, & x(1,nvec),wk,Hexpec,list1,list2) end
[STATEMENT] lemma cap_positive: "e \<in> E \<Longrightarrow> c e > 0" [PROOF STATE] proof (prove) goal (1 subgoal): 1. e \<in> E \<Longrightarrow> (0::'capacity) < c e [PROOF STEP] unfolding E_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. e \<in> {(u, v). c (u, v) \<noteq> (0::'capacity)} \<Longrightarrow> (0::'capacity) < c e [PROOF STEP] using cap_non_negative le_neq_trans [PROOF STATE] proof (prove) using this: \<forall>u v. (0::'capacity) \<le> c (u, v) \<lbrakk>?a \<le> ?b; ?a \<noteq> ?b\<rbrakk> \<Longrightarrow> ?a < ?b goal (1 subgoal): 1. e \<in> {(u, v). c (u, v) \<noteq> (0::'capacity)} \<Longrightarrow> (0::'capacity) < c e [PROOF STEP] by fastforce
State Before: C✝ : Type u inst✝³ : Category C✝ inst✝² : HasColimits C✝ X✝ Y Z : TopCat C : Type u_1 inst✝¹ : Category C inst✝ : HasColimits C X : TopCat F : Presheaf C X x y z : ↑X h : x ⤳ y h' : y ⤳ z x✝¹ : Opens ↑X x✝ : z ∈ x✝¹ ⊢ germ F { val := z, property := x✝ } ≫ stalkSpecializes F h' ≫ stalkSpecializes F h = germ F { val := z, property := x✝ } ≫ stalkSpecializes F (_ : x ⤳ z) State After: no goals Tactic: simp
#' BED to GRanges #' #' This function loads a BED-like file and stores it as a GRanges object. #' The tab-delimited file must be ordered as 'chr', 'start', 'end', 'id', 'score', 'strand'. #' The minimal BED file must have the 'chr', 'start', 'end' columns. #' Any columns after the strand column are ignored. #' #' This is a forked version of the function written by Dave Tang #' (https://github.com/davetang/bedr), with added support for bed files with headers #' #' @param file Location of your file #' @param hd logical; indicates whether or not bed file has a header #' @keywords BED GRanges #' @importFrom GenomicRanges GRanges #' @importFrom IRanges IRanges #' @importFrom utils read.table #' @export #' @examples #' \dontrun{bed_to_granges('my_bed_file.bed')} bed_to_granges <- function(file, hd=FALSE){ df <- read.table(file, header=hd, stringsAsFactors=F) if(length(df) > 6){ df <- df[,-c(7:length(df))] } if(length(df)<3){ stop("File has less than 3 columns") } header <- c('chr','start','end','id','score','strand') names(df) <- header[1:length(names(df))] if('strand' %in% colnames(df)){ df$strand <- gsub(pattern="[^+-]+", replacement = '*', x = df$strand) } if(length(df)==3){ gr <- with(df, GenomicRanges::GRanges(chr, IRanges(start, end))) } else if (length(df)==4){ gr <- with(df, GenomicRanges::GRanges(chr, IRanges(start, end), id=id)) } else if (length(df)==5){ gr <- with(df, GenomicRanges::GRanges(chr, IRanges(start, end), id=id, score=score)) } else if (length(df)==6){ gr <- with(df, GenomicRanges::GRanges(chr, IRanges(start, end), id=id, score=score, strand=strand)) } return(gr) } #' Get recombination rate at each site #' @param sites A dataframe with 10 columns (CHR, POS, Sequence, Dummy Variables for the 6 mutation types, and read depth) #' @param rcrfile Path to file containing recombination rates #' @importFrom bedr bedr.sort.region bedr.join.region #' @importFrom dplyr select mutate arrange #' @import magrittr #' @return Vector containing recombination rates in order matching input dataset #' @export rcrCol <- function(sites, rcrfile){ rcr <- read.table(rcrfile, header=T, stringsAsFactors=F) names(rcr) <- c("CHROM", "s", "e", "rate") rcr$CHROM <- as.character(rcr$CHROM) rcr <- bedr.sort.region(rcr, check.chr=FALSE, check.merge=FALSE, check.zero.based=FALSE, check.valid=FALSE, verbose=FALSE) site_ranges <- sites %>% mutate(CHR=paste0("chr", CHR), START=POS-1, END=POS) %>% dplyr::select(CHR, START, END) site_ranges <- bedr.sort.region(site_ranges, check.chr=FALSE, check.merge=FALSE, check.zero.based=FALSE, check.valid=FALSE, verbose=FALSE) tmp <- bedr.join.region(site_ranges, rcr, check.chr=FALSE, check.merge=FALSE, check.zero.based=FALSE, check.sort=FALSE, check.valid=FALSE, verbose=FALSE) %>% dplyr::select(CHR, POS=END, rate) %>% mutate(CHR=as.numeric(gsub("chr", "", CHR)), rate=as.numeric(rate)) %>% arrange(CHR, POS) return(tmp$rate) } #' Get replication timing rate for each site #' @param sites A dataframe with 10 columns (CHR, POS, Sequence, Dummy Variables for the 6 mutation types, and read depth) #' @param repfile Path to file containing replication timing #' @import magrittr #' @return Vector with replication timing to be appended as column to input data.frame #' @importFrom bedr bedr.sort.region bedr.join.region #' @importFrom dplyr select mutate arrange filter #' @importFrom utils read.table #' @export repCol <- function(sites, repfile){ reptime <- read.table(repfile, header=F, stringsAsFactors=F, sep="\t") names(reptime) <- c("CHR", "END", "TIME") reptime <- reptime[!duplicated(reptime[,1:2]),] %>% filter(CHR<=22) %>% arrange(CHR, END) %>% mutate(CHR=paste0("chr", CHR), START=imputeStart(END)) %>% mutate(START=ifelse(START>END, 0, START)) %>% dplyr::select(CHROM=CHR, s=START, e=END, TIME) reptime <- bedr.sort.region(reptime, check.chr=FALSE, check.merge=FALSE, check.zero.based=FALSE, check.valid=FALSE, verbose=FALSE) site_ranges <- sites %>% mutate(CHR=paste0("chr", CHR), START=POS-1, END=POS) %>% dplyr::select(CHR, START, END) site_ranges <- bedr.sort.region(site_ranges, check.chr=FALSE, check.merge=FALSE, check.zero.based=FALSE, check.valid=FALSE, verbose=FALSE) tmp <- bedr.join.region(site_ranges, reptime, check.chr=FALSE, check.merge=FALSE, check.zero.based=FALSE, check.sort=FALSE, check.valid=FALSE, verbose=FALSE) %>% dplyr::select(CHR, POS=END, TIME) %>% mutate(CHR=as.numeric(gsub("chr", "", CHR)), TIME=as.numeric(TIME)) %>% arrange(CHR, POS) return(tmp$TIME) } #' Check if site in bed file; returns 0 or 1 #' @param sites A dataframe with 10 columns (CHR, POS, Sequence, Dummy Variables for the 6 mutation types, and read depth) #' @param bedfile Path to bedfile #' @return Vector with binary indicators to be appended as column to input data.frame #' @importFrom GenomicRanges GRanges findOverlaps #' @importFrom IRanges IRanges #' @export binaryCol <- function(sites, bedfile){ feat_ranges <- bed_to_granges(bedfile, hd=F) site_ranges <- GRanges(seqnames=paste0("chr",sites$CHR), ranges=IRanges(start=sites$POS, end=sites$POS)) out <- findOverlaps(site_ranges, feat_ranges, type="within", select="first") out[is.na(out)] <- 0 return(as.integer(as.logical(out))) } #' Get GC content in 10kb window #' @param sites A dataframe with 10 columns (CHR, POS, Sequence, Dummy Variables for the 6 mutation types, and read depth) #' @param gcfile Path to file containing percent gc content #' @return Vector with gc content to be appended as column to input data.frame #' @importFrom utils read.table #' @import magrittr #' @importFrom dplyr select mutate arrange #' @export gcCol <- function(sites, gcfile){ gcbins <- read.table(gcfile, header=F, stringsAsFactors=F)[,1:4] names(gcbins) <- c("CHR", "start", "end", "prop_GC") gcbins$CHR <- as.character(gcbins$CHR) site_tmp <- sites %>% dplyr::select(CHR, POS) %>% mutate(start=floor(POS/10000)*10000, end=ceiling(POS/10000)*10000) out1 <- merge(site_tmp, gcbins, by=c("CHR", "start")) %>% arrange(CHR, POS) return(out1$prop_GC) } #' Function determines start of interval from value in previous row #' @param ends vector containing positions #' @return Vector of starting positions #' @export imputeStart <- function(ends){ starts<-c(0, ends[-(length(ends))]) return(starts) }
%ADABOOSTC % % [W,V,ALF] = ADABOOSTC(A,CLASSF,N,RULE,VERBOSE); % % INPUT % A Dataset % CLASSF Untrained weak classifier % N Number of classifiers to be trained % RULE Combining rule (default: weighted voting) % VERBOSE Suppress progress report if 0 (default) % % OUTPUT % W Combined trained classifier % V Cell array of all classifiers % Use VC = stacked(V) for combining % ALF Weights % % DESCRIPTION % % Computation of a combined classifier according to adaboost. % % In total N weighted versions of the training set A are generated % iteratevely and used for the training of the specified classifier. % Weights, to be used for the probabilities of the objects in the training % set to be selected, are updated according to the Adaboost rule. % % The entire set of generated classifiers is given in V. % The set of classifier weigths, according to Adaboost is returned in ALF % % Various aggregating possibilities can be given in % the final parameter rule: % []: WVOTEC, weighted voting. % VOTEC voting % MEANC sum rule % AVERAGEC averaging of coeffients (for linear combiners) % PRODC product rule % MAXC maximum rule % MINC minimum rule % MEDIANC median rule % % REFERENCE % Ji Zhu, Saharon Rosset, Hui Zhou and Trevor Hastie, % Multiclass Adaboost. A multiclass generalization of the Adaboost % algorithm, based on a generalization of the exponential loss. % http://www-stat.stanford.edu/~hastie/Papers/samme.pdf % % SEE ALSO (<a href="http://37steps.com/prtools">PRTools Guide</a>) % MAPPINGS, DATASETS % Copyright: R.P.W. Duin, [email protected] % Faculty EWI, Delft University of Technology % P.O. Box 5031, 2600 GA Delft, The Netherlands % (Multiclass correction by Marcin Budka, Bournemouth Univ., UK) %function [W,V,alf] = adaboostc(a,clasf,n,rule,verbose) function [out,V,alf] = adaboostc(varargin) %% INITIALISATION argin = setdefaults(varargin,[],nmc,100,[],0); if mapping_task(argin,'definition') out = define_mapping(argin,'untrained','Adaboost'); %% TRAINING elseif mapping_task(argin,'training') [a,clasf,n,rule,verbose] = deal(argin{:}); [m,k,c] = getsize(a); V = []; laba = getlab(a); u = ones(m,1)/m; % initialise object weights alf = zeros(1,n); % space for classifier weights isseparable = 0; % check if we can make 0 error if verbose && k == 2 figure(verbose); scatterd(a); end %% generate n classifiers for i = 1:n b = gendatw(a,u,m); % sample training set b = setprior(b,getprior(a)); % use original priors w = b*clasf; % train weak classifier ra = a*w; % test weak classifier if verbose && k == 2 plotc(w,1); drawnow end labc = labeld(ra); diff = sum(labc~=laba,2)~=0; % objects erroneously classified erra = sum((diff).*u); % weighted error on original dataset if (erra==0) isseparable = 1; V = w; break; end if (erra < (1-1/c)) % if classifier better then random guessing... alf(i) = 0.5*(log((1-erra)/erra) + log(c-1)); correct = find(diff==0); % find correctly classified objects wrong = find(diff==1); % find incorrectly classified objects u(correct) = u(correct)*exp(-alf(i)); % give them the ... u(wrong) = u(wrong)*exp(alf(i)); % proper weights u = u./sum(u); % normalize weights else alf(i) = 0; end if verbose disp([erra alf(i) sum(alf)]) end V = [V w]; % store all classifiers end %% combine and return if isseparable W = V; W = setname(W,['Boosted ',getname(V)]); else if isempty(rule) W = wvotec(V,alf); % default is weighted combiner else W = traincc(a,V,rule); % otherwise, use user supplied combiner end end if verbose > 0 && k == 2 plotc(W,'r',3) ee = a*W*testc; title(['Error: ', num2str(ee)]); end out = W; else error('Illegal call') end return
Joe Logon, my former co-worker/manager, briefly opined about the TSA blog. I decided to take a stab in the dark and share my thoughts on the new blog that our government is running to help address the concerns, the stealth-like knowledge and alleged secrecy in that part of the government. Layout: The blog is very clean and simple, using Blogger under the hood. The lack of widgets and other distracting devices places focus solely on the content, which should be the goal of a blog. The headlines stand out, but require you to scroll down the page to see it. Conversely to content above the fold, anything below the fold gets minimal attention; so I suggest they place a widget on the sidebar with “Most Commented” so users can find popular content without necessarily scrolling down. Blog’s Purpose: The true intent is to discuss openly concerns and policy changes within the TSA, “…facilitate an ongoing dialogue on innovations in security, technology and the checkpoint screening process.” Despite this, frustrated users often drive the discussion into negative feedback targeted against the TSA and the blog author(s). “Meet Our Bloggers”: I’ll echo what Joe said, that is, the blog should detail the blogger’s full names or a lead editor. Blogging via pseudo-first name aliases removes some of the trust (and accessibility) from the bloggers. Blogging is about communication, trust and being straightforward — not just sweetening press releases; which I can see the site potentially scaling back and doing. Videos: Why aren’t they using YouTube, Metacafe, Revver or some other online video service to play videos? Not only will videos play much better, they can reach a much larger audience and capture people to visit their blog. Windows Media just isn’t a great way for your audience to watch it. Header Image: Although I like the image, it seems too generic. They should include a photo montage of the American flag, a picture of an airport (or security screening) and include official branding on it. In addition, linking the header image to the main blog page is apropos as it can function as a part of the navigation, subconsciously. Blogging Activity: Overall, I appreciate the candid nature of the bloggers there. They seem to be doing a pretty darn good job embracing their diverse audience and have been answering questions from the comments. However, I suggest they break big posts up into chunks and post them with full details on each point. This is mainly because not a lot of people have enough passion to read a full blog entry, they just focus on the finer, important points of it. Aside from that, I hope they keep it up! Additionally, I think it’s a good move by the government to embrace social media. I believe that other departments of the government could actually benefit from a blog, not just the TSA. What other functions should the government be blogging about? Share your thoughts (or disagreements) in the comments.
lemma orthogonal_commute: "orthogonal x y \<longleftrightarrow> orthogonal y x"
## config dataset = "data_std" # swtich to "data_per_year" if yearly data is used lags = 8 # swtich to "4" if time series is short (when fishing mortality is included) is_robust_each_lag = FALSE # swtich to "TRUE" if time series is short time_ccm = 200 detrend_fun = detrend_sig is_full = FALSE library_var = "CV.CPUE" ## path for the results of ccm ccm_path = paste0("output\\ccm\\", library_var, "_sig_detrend\\") dir.create(file.path(wd, ccm_path), showWarnings = FALSE) ## path for the results of smap smap_path = paste0("output\\smap\\", library_var, "_sig_detrend\\") dir.create(file.path(wd, smap_path), showWarnings = FALSE) ## path for the results of robustness tes robust_path = paste0("output\\robustness_test\\", library_var, "_sig_detrend\\") dir.create(file.path(wd, robust_path), showWarnings = FALSE)
If a reduced labelling of a graph with $n+1$ vertices is not equal to $n+1$, then it is equal to the reduced labelling of the same graph with $n$ vertices.
(* Author: Benedikt Seidl License: BSD *) section \<open>Code export to Standard ML\<close> theory Code_Export imports "LTL_to_DRA/DRA_Instantiation" LTL.Code_Equations "HOL-Library.Code_Target_Numeral" begin subsection \<open>Hashing Sets\<close> global_interpretation comp_fun_commute "plus o cube o hashcode :: ('a :: hashable) \<Rightarrow> hashcode \<Rightarrow> hashcode" by unfold_locales (auto simp: cube_def) lemma [code]: "hashcode (abs_ltln\<^sub>P \<phi>) = hashcode (min_dnf \<phi>)" by simp lemma [code]: "hashcode (abs_ltln\<^sub>Q \<phi>) = hashcode (min_dnf (Unf \<phi>))" by simp subsection \<open>LTL to DRA\<close> declare ltl_to_dra\<^sub>P.af_letter\<^sub>F_lifted_semantics [code] declare ltl_to_dra\<^sub>P.af_letter\<^sub>G_lifted_semantics [code] declare ltl_to_dra\<^sub>P.af_letter\<^sub>\<nu>_lifted_semantics [code] declare ltl_to_dra\<^sub>Q.af_letter\<^sub>F_lifted_semantics [code] declare ltl_to_dra\<^sub>Q.af_letter\<^sub>G_lifted_semantics [code] declare ltl_to_dra\<^sub>Q.af_letter\<^sub>\<nu>_lifted_semantics [code] definition atoms_ltlc_list_literals :: "String.literal ltlc \<Rightarrow> String.literal list" where "atoms_ltlc_list_literals = atoms_ltlc_list" definition ltlc_to_draei_literals :: "equiv \<Rightarrow> String.literal ltlc \<Rightarrow> (String.literal set, nat) draei" where "ltlc_to_draei_literals = ltlc_to_draei" definition sort_transitions :: "(nat \<times> String.literal set \<times> nat) list \<Rightarrow> (nat \<times> String.literal set \<times> nat) list" where "sort_transitions = sort_key fst" export_code True_ltlc Iff_ltlc ltlc_to_draei_literals Prop PropUnfold alphabetei initialei transitionei conditionei integer_of_nat atoms_ltlc_list_literals sort_transitions set in SML module_name LTL file_prefix LTL_to_DRA subsection \<open>LTL to NBA\<close> (* TODO *) subsection \<open>LTL to LDBA\<close> (* TODO *) end
startingval(e::GenericMomentBasedEstimator) = e.x0 startingval(g::MomentBasedEstimator) = startingval(g.e) npar(e::GenericMomentBasedEstimator) = e.npar nmom(e::GenericMomentBasedEstimator) = e.nmom StatsBase.nobs(e::GenericMomentBasedEstimator) = e.nobs Base.size(e::GenericMomentBasedEstimator) = (nobs(e), npar(e), nmom(e)) StatsBase.nobs(g::MomentBasedEstimator) = nobs(g.e) npar(g::MomentBasedEstimator) = npar(g.e) nmom(g::MomentBasedEstimator) = nmom(g.e) Base.size(g::MomentBasedEstimator) = (nobs(g.e), npar(g.e), nmom(g.e)) objval(e::MomentBasedEstimator) = e.r.objval # StatsBase.nobs(m::MomentFunction) = m.nobs # npar(m::MomentFunction) = m.npar # nmom(m::MomentFunction) = m.nmom # Base.size(m::MomentFunction) = (nobs(m), npar(m), nmom(m)) ################################################################################ ## Constructor with function and x0 ################################################################################ function GMMEstimator(f::Function, theta::Vector; grad = nothing, data = nothing, initialW = nothing, wts = nothing, mgr::IterationManager = TwoStepGMM(), constraints = Unconstrained()) ## Set Moment Function g(theta) = data == nothing ? f(theta) : f(theta, data) ## Evaluate Moment Function g₀ = g(theta) n, m, p = (size(g₀)..., length(theta)) ## Initial Weighting Matrix W₀ = initialW == nothing ? Matrix(I, m, m) : initialW W = setW0(mgr, m); W[1] = W₀ ## Weighting w = wts == nothing ? Unweighted() : Weighted(float(wts)) ## Set Default Bounds lb = [-Inf for j=1:p] ub = [+Inf for j=1:p] nf = Float64[] ni = 0::Int64 if grad == nothing mf = make_fad_mom_fun(g, IdentitySmoother()) else ∇f(theta) = data == nothing ? grad(theta) : grad(theta, data) mf = make_ana_mom_fun(GMMEstimator, g, ∇f) end MomentBasedEstimator(GMMEstimator(mf, constraints, theta, lb, ub, nf, nf, mgr, IterationState([1], [10.0], theta), W, w, ni, ni, n, p, m)) end const GradTuple = Union{Nothing, Tuple{Function, Function, Function}, Tuple{Function, Function, Function, Function}} function MDEstimator(f::Function, theta::Vector; grad::GradTuple = nothing, data = nothing, wts = nothing, div::Divergence = DEFAULT_DIVERGENCE, kernel::SmoothingKernel = IdentitySmoother()) ## Set Moment Function g(theta) = data == nothing ? f(theta) : f(theta, data) ## Evaluate Moment Function g₀ = g(theta) n, m, p = (size(g₀)..., length(theta)) ## Weighting w = wts == nothing ? Unweighted() : Weighted(float(wts)) #= Set Default bounds =# ## -- Bounds on theta -- ## lb = [-Inf for j=1:p] ub = [+Inf for j=1:p] ## -- Bounds on mdweights -- ## wlb = zeros(Float64, n) wub = ones(Float64, n)*n ## -- Bounds on constraints -- ## glb = [zeros(m); n]; gub = [zeros(m); n]; ni = 0::Int64 if grad == nothing mf = make_fad_mom_fun(g, kernel) else ff = Array(Function, length(grad)) if data != nothing for (i, f) in enumerate(grad) _f = copy(f) _g(theta) = _f(theta, data) ff[i] = _g(theta) end grad = (ff...,) end mf = make_ana_mom_fun(MDEstimator, g, grad) end MomentBasedEstimator(MDEstimator(mf, Unconstrained(), theta, lb, ub, glb, gub, wlb, wub, div, w, ni, ni, n, p, m)) end ################################################################################ ## Solve methods ################################################################################ # function solve!(g::MomentBasedEstimator) # if status(g) == :Uninitialized # initialize!(g) # end # end function initialize!(g::MomentBasedEstimator{MDEstimator{M, V, S, T}}) where {M<:MomentFunction, V<:Divergence, S<:Unconstrained, T<:Weighting} n, p, m = size(g) ξ₀ = [ones(n); startingval(g)] g.e.gele = Int((n+p)*(m+1)-p) g.e.hele = Int(n*p + n + (p+1)*p/2) g_L = getmfLB(g) g_U = getmfUB(g) u_L = [getwtsLB(g); getparLB(g)] u_U = [getwtsUB(g); getparUB(g)] MathProgBase.loadproblem!(g.m, n+p, m+1, u_L, u_U, g_L, g_U, :Min, g.e) MathProgBase.setwarmstart!(g.m, ξ₀) g.status[1] = :Initialized end function initialize!(g::MomentBasedEstimator{GMMEstimator{M, V, S, T}}) where {M<:MomentFunction, V<:IterationManager, S<:Unconstrained, T<:Weighting} n, p, m = size(g) ξ₀ = startingval(g) g.e.gele = @compat Int(p) g.e.hele = @compat Int(2*p) g_L = Float64[] g_U = Float64[] u_L = getparLB(g) u_U = getparUB(g) MathProgBase.loadproblem!(g.m, p, 0, u_L, u_U, g_L, g_U, :Min, g.e) MathProgBase.setwarmstart!(g.m, ξ₀) g.status[1] = :Initialized end function initialize!(g::MomentBasedEstimator{GMMEstimator{M, V, S, T}}) where {M<:MomentFunction, V<:IterationManager, S<:Constrained, T<:Weighting} n, p, m = size(g) ξ₀ = MomentBasedEstimators.startingval(g) g.e.gele = @compat Int(g.e.c.nc*p) g.e.hele = @compat Int(0) g_L = g.e.c.hlb g_U = g.e.c.hub u_L = getparLB(g) u_U = getparUB(g) MathProgBase.loadproblem!(g.m, p, g.e.c.nc, u_L, u_U, g_L, g_U, :Min, g.e) MathProgBase.setwarmstart!(g.m, ξ₀) g.status[1] = :Initialized end ################################################################################ ## Getters ################################################################################ getparLB(g::MomentBasedEstimator) = g.e.lb getparUB(g::MomentBasedEstimator) = g.e.ub getmfLB(g::MomentBasedEstimator) = g.e.glb getmfUB(g::MomentBasedEstimator) = g.e.gub getwtsLB(g::MomentBasedEstimator{MDEstimator{M, V, T, S}}) where {M, V, T, S} = g.e.wlb getwtsUB(g::MomentBasedEstimator{MDEstimator{M, V, T, S}}) where {M, V, T, S} = g.e.wub ################################################################################ ## Set constraint on parameters ################################################################################ function check_constraint_sanity(k, x0, h::Function, hlb, hub) h0 = h(x0); nc = length(h0) typeof(h0) <: Vector{Float64} || error("Constraint function must be ::Vector{Float64}") nc == length(hub) && nc == length(hlb) || error("Constraint bounds of wrong dimension") typeof(hlb) <: Vector{Float64} || (hlb = float(hlb)) typeof(hub) <: Vector{Float64} || (hub = float(hub)) (hlb, hub, nc) end ## This return a constrained version of MomentBasedEstimator function constrained(h::Function, hlb::Vector, hub::Vector, g::MomentBasedEstimator) p = npar(g); chk = check_constraint_sanity(p, startingval(g), h, hlb, hub) r = MomentBasedEstimatorResults(:Uninitialized, 0., Array{Float64}(undef, p), Array{Float64}(undef, p, p)) if typeof(g.e.mf) == MomentBasedEstimators.FADMomFun mf = make_fad_mom_fun(g.e.mf.g, IdentitySmoother()) else mf = make_ana_mom_fun(GMMEstimator, g.e.mf.g, g.e.mf.∇g) end ce = GMMEstimator(mf, Constrained(h, chk...), g.e.x0, g.e.lb, g.e.ub, g.e.glb, g.e.gub, g.e.mgr, g.e.ist, g.e.W, g.e.wtg, g.e.gele, g.e.hele, size(g)...) MomentBasedEstimator(ce, r, g.s, g.m, :Uninitialized) end ################################################################################ ## Update solver ################################################################################ function solver!(g::MomentBasedEstimator, s::MathProgBase.SolverInterface.AbstractMathProgSolver) g.s = s g.m = deepcopy(MathProgBase.NonlinearModel(s)) end ################################################################################ ## Update lb and up on g(θ) default: (0,...,0) ################################################################################ function setmfLB!(g::MomentBasedEstimator{MDEstimator}, lb::Vector) nmom(g) == length(lb) || error("Dimension error") g.e.glb[:] = lb end function setmfUB!(g::MomentBasedEstimator{MDEstimator}, ub::Vector) nmom(g) == length(ub) || error("Dimension error") g.e.glb[:] = ub end function setmfbounds!(g::MomentBasedEstimator{MDEstimator}, lb::Vector, ub::Vector) setmfLB!(g, lb) setmfUB!(g, ub) end ################################################################################ ## Update initial lb and up on parameters(default -inf, +inf) ################################################################################ function setparLB!(g::MomentBasedEstimator{T}, lb::Vector) where T npar(g) == length(lb) || error("Dimension error") copyto!(g.e.lb, lb) end function setparUB!(g::MomentBasedEstimator{T}, ub::Vector) where T npar(g) == length(ub) || error("Dimension error") copyto!(g.e.ub, ub) end function setparbounds!(g::MomentBasedEstimator{T}, lb::Vector, ub::Vector) where T setparLB!(g, lb) setparUB!(g, ub) end ################################################################################ ## Update initial lb and up on mdweights (default 0, n) ################################################################################ function setwtsLB!(g::MomentBasedEstimator{T}, lb::Vector) where T <: MDEstimator nobs(g) == length(lb) || error("Dimension error") copyto!(g.e.wlb, lb) end function setwtsUB!(g::MomentBasedEstimator{T}, ub::Vector) where T <: MDEstimator nobs(g) == length(ub) || error("Dimension error") copyto!(g.e.wub, ub) end function setwtsbounds!(g::MomentBasedEstimator{T}, lb::Vector, ub::Vector) where T <: MDEstimator setwtsLB!(g, lb) setwtsUB!(g, ub) end ################################################################################ ## Update initial weighting matrix (default is I(m)) ################################################################################ ## TODO: This should depend on the Iteration Manager setW0!(g::MomentBasedEstimator{GMMEstimator}, W::Array{Float64, 2}) = copyto!(g.e.W , W) ################################################################################ ## Iteration ################################################################################ function set_iteration_manager!(g::MomentBasedEstimator{GMMEstimator}, mgr::IterationManager) g.e.mgr = mgr end ################################################################################ ## estimate! ################################################################################ function setx0!(g::MomentBasedEstimator{S}, x0::Vector{Float64}) where S<:GMMEstimator ## For GMM x0 is the parameter length(x0) == npar(g) || throw(DimensionMismatch("")) copyto!(g.e.x0, x0) MathProgBase.setwarmstart!(g.m, x0) end function setx0!(m::MomentBasedEstimator{S}, x0::Vector{Float64}) where S<:MDEstimator ## For GMM x0 is the parameter length(x0) == npar(m) || throw(DimensionMismatch("")) copyto!(m.e.x0, x0) x00 = [m.m.inner.x[1:nobs(m)], x0] MathProgBase.setwarmstart!(m.m, x00) end function estimate!(g::MomentBasedEstimator) ## There are three possible states of g.status ## :Uninitialized ## :Initialized ## :Solved(Success|Failure) initialize!(g) solve!(g, g.m) fill_in_results!(g) g end function fill_in_results!(me::MomentBasedEstimator{T}) where T<:MDEstimator me.r.status = MathProgBase.status(me.m) n, p, m = size(me) ss = MathProgBase.getsolution(me.m) copyto!(me.r.coef, ss[n+1:end]) ## The ϕ_value is ## equal to 2*objval/(S_T(k1^2/k2)) v = MathProgBase.getobjval(me.m) k1 = κ₁(me) k2 = κ₂(me) S = bw(me) me.r.objval = 2*k2*v/(S*k1^2) me.status[1] = :Solved end function fill_in_results!(g::MomentBasedEstimator{S}) where S<:GMMEstimator g.r.status = MathProgBase.status(g.m) n, p, m = size(g) copyto!(g.r.coef, MathProgBase.getsolution(g.m)) g.r.objval = MathProgBase.getobjval(g.m) g.status[1] = :Solved end # function solve!(g::MomentBasedEstimator{S}, s::KNITRO.KnitroMathProgModel) where S<:MDEstimator # # KNITRO.restartProblem(g.m.inner, startingval(g), g.m.inner.numConstr) # # KNITRO.solveProblem(g.m.inner) # MathProgBase.optimize!(g.m) # end solve!(g::MomentBasedEstimator{S}, s::Ipopt.IpoptMathProgModel) where S<:MDEstimator = MathProgBase.optimize!(g.m) function solve!(g::MomentBasedEstimator{S}, s::MathProgBase.SolverInterface.AbstractMathProgModel) where S<:GMMEstimator reset_iteration_state!(g) n, p, m = size(g) theta = zeros(p) while !(finished(g.e.mgr, g.e.ist)) if g.e.ist.n[1]>1 g.e.W[g.e.ist.n[1]][:,:] .= optimal_W(g.e.mf, theta, g.e.mgr.k, g.e.mgr.demean) end MathProgBase.optimize!(g.m) theta = MathProgBase.getsolution(g.m) update!(g.e.ist, theta) next!(g.e.ist) if !(finished(g.e.mgr, g.e.ist)) MathProgBase.setwarmstart!(g.m, theta) end end fill_in_results!(g) g.status[1] = :Solved g end next!(x::IterationState) = x.n[1] += 1 function update!(x::IterationState, v::Vector) x.change[:] .= maximum(abs, x.prev - v) x.prev[:] .= v end reset_iteration_state!(g::MomentBasedEstimator) = g.e.ist = deepcopy(IterationState([1], [10.0], startingval(g))) function optimal_W(g::Array{Float64, 2}, theta::Vector, k::RobustVariance, demean::Bool) h = demean ? g .- mean(g, 1) : g n = size(h, 1) S = CovarianceMatrices.vcov(h, k) * n pinv(S) end function optimal_W(mf::MomentFunction, theta::Vector, k::RobustVariance, demean::Bool) g = mf.s(theta) optimal_W(g, theta, k, demean) end function optimal_W(mf::Function, theta::Vector, k::RobustVariance, demean::Bool) g = mf(theta) optimal_W(g, theta, k, demean) end function optimal_W(e::MomentBasedEstimator, k::RobustVariance, demean::Bool = false) optimal_W(e.e.mf, coef(e), k, demean) end
We saw this car and decided on the spot...We MUST have it. So cool and modern looking it could be a prop on the Star Wars set. Very roomy inside with all the creature comforts you want. Of course the real reason to consider this amazing Mazda 3 is the widely known ZOOM, ZOOM factor. That is the feeling you get when you press the accellratorto the ground and feel all those ponies going to work. You owe yourself a thrill ride. Come make it happen today!
Formal statement is: lemma measure_eq_AE: assumes iff: "AE x in M. x \<in> A \<longleftrightarrow> x \<in> B" assumes A: "A \<in> sets M" and B: "B \<in> sets M" shows "measure M A = measure M B" Informal statement is: If two sets $A$ and $B$ are equal almost everywhere, then they have the same measure.
from typing import NamedTuple, Callable class Primitive(NamedTuple): name: str add_p = Primitive('add') mul_p = Primitive('mul') neg_p = Primitive("neg") sin_p = Primitive("sin") cos_p = Primitive("cos") reduce_sum_p = Primitive("reduce_sum") greater_p = Primitive("greater") less_p = Primitive("less") transpose_p = Primitive("transpose") broadcast_p = Primitive("broadcast") def add(x, y): return bind1(add_p, x, y) def mul(x, y): return bind1(mul_p, x, y) def neg(x): return bind1(neg_p, x) def sin(x): return bind1(sin_p, x) def cos(x): return bind1(cos_p, x) def reduce_sum(x, axis=None): return bind1(reduce_sum_p, x, axis=axis) def greater(x, y): return bind1(greater_p, x, y) def less(x, y): return bind1(less_p, x, y) def transpose(x, perm): return bind1(transpose_p, x, perm=perm) def broadcast(x, shape, axes): return bind1(broadcast_p, x, shape=shape, axes=axes) def bind1(prim, *args, **params): out, = bind(prim, *args, **params) return out from contextlib import contextmanager from typing import Type, List, Tuple, Sequence, Optional, Any class MainTrace(NamedTuple): level: int trace_type: Type['Trace'] global_data: Optional[Any] trace_stack: List[MainTrace] = [] dynamic_trace: Optional[MainTrace] = None # to be employed in Part 3 @contextmanager def new_main(trace_type: Type['Trace'], global_data=None): level = len(trace_stack) main = MainTrace(level, trace_type, global_data) trace_stack.append(main) try: yield main finally: trace_stack.pop() class Trace: main: MainTrace def __init__(self, main: MainTrace) -> None: self.main = main def pure(self, val): assert False # must override def lift(self, val): assert False # must override def process_primitive(self, primitive, tracers, params): assert False # must override import numpy as np class Tracer: _trace: Trace __array_priority__ = 1000 @property def aval(self): assert False # must override def full_lower(self): return self # default implementation def __neg__(self): return self.aval._neg(self) def __add__(self, other): return self.aval._add(self, other) def __radd__(self, other): return self.aval._radd(self, other) def __mul__(self, other): return self.aval._mul(self, other) def __rmul__(self, other): return self.aval._rmul(self, other) def __gt__(self, other): return self.aval._gt(self, other) def __lt__(self, other): return self.aval._lt(self, other) def __bool__(self): return self.aval._bool(self) def __nonzero__(self): return self.aval._nonzero(self) def __getattr__(self, name): try: return getattr(self.aval, name) except AttributeError: raise AttributeError(f"{self.__class__.__name__} has no attribute {name}") def swap(f): return lambda x, y: f(y, x) class ShapedArray: array_abstraction_level = 1 shape: Tuple[int] dtype: np.dtype def __init__(self, shape, dtype): self.shape = shape self.dtype = dtype @property def ndim(self): return len(self.shape) _neg = staticmethod(neg) _add = staticmethod(add) _radd = staticmethod(swap(add)) _mul = staticmethod(mul) _rmul = staticmethod(swap(mul)) _gt = staticmethod(greater) _lt = staticmethod(less) @staticmethod def _bool(tracer): raise Exception("ShapedArray can't be unambiguously converted to bool") @staticmethod def _nonzero(tracer): raise Exception("ShapedArray can't be unambiguously converted to bool") def str_short(self): return f'{self.dtype.name}[{",".join(str(d) for d in self.shape)}]' def __hash__(self): return hash((self.shape, self.dtype)) def __eq__(self, other): return (type(self) is type(other) and self.shape == other.shape and self.dtype == other.dtype) def __repr__(self): return f"ShapedArray(shape={self.shape}, dtype={self.dtype})" class ConcreteArray(ShapedArray): array_abstraction_level = 2 val: np.ndarray def __init__(self, val): self.val = val self.shape = val.shape self.dtype = val.dtype @staticmethod def _bool(tracer): return bool(tracer.aval.val) @staticmethod def _nonzero(tracer): return bool(tracer.aval.val) def get_aval(x): if isinstance(x, Tracer): return x.aval elif type(x) in jax_types: return ConcreteArray(np.asarray(x)) else: raise TypeError(x) jax_types = {bool, int, float, np.bool_, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64, np.float16, np.float32, np.float64, np.ndarray} def bind(prim, *args, **params): top_trace = find_top_trace(args) tracers = [full_raise(top_trace, arg) for arg in args] outs = top_trace.process_primitive(prim, tracers, params) return [full_lower(out) for out in outs] import operator as op def find_top_trace(xs) -> Trace: top_main = max((x._trace.main for x in xs if isinstance(x, Tracer)), default=trace_stack[0], key=op.attrgetter('level')) if dynamic_trace and dynamic_trace.level > top_main.level: top_main = dynamic_trace return top_main.trace_type(top_main) def full_lower(val: Any): if isinstance(val, Tracer): return val.full_lower() else: return val def full_raise(trace: Trace, val: Any) -> Tracer: if not isinstance(val, Tracer): assert type(val) in jax_types return trace.pure(val) level = trace.main.level if val._trace.main is trace.main: return val elif val._trace.main.level < level: return trace.lift(val) elif val._trace.main.level > level: raise Exception(f"Can't lift level {val._trace.main.level} to {level}.") else: # val._trace.level == level raise Exception(f"Different traces at same level: {val._trace}, {trace}.") # Evaluation interpreter class EvalTrace(Trace): pure = lift = lambda self, x: x # no boxing in Tracers needed def process_primitive(self, primitive, tracers, params): return impl_rules[primitive](*tracers, **params) trace_stack.append(MainTrace(0, EvalTrace, None)) # special bottom of the stack # NB: in JAX, instead of a dict we attach impl rules to the Primitive instance impl_rules = {} impl_rules[add_p] = lambda x, y: [np.add(x, y)] impl_rules[mul_p] = lambda x, y: [np.multiply(x, y)] impl_rules[neg_p] = lambda x: [np.negative(x)] impl_rules[sin_p] = lambda x: [np.sin(x)] impl_rules[cos_p] = lambda x: [np.cos(x)] impl_rules[reduce_sum_p] = lambda x, *, axis: [np.sum(x, axis)] impl_rules[greater_p] = lambda x, y: [np.greater(x, y)] impl_rules[less_p] = lambda x, y: [np.less(x, y)] impl_rules[transpose_p] = lambda x, *, perm: [np.transpose(x, perm)] def broadcast_impl(x, *, shape, axes): for axis in sorted(axes): x = np.expand_dims(x, axis) return [np.broadcast_to(x, shape)] impl_rules[broadcast_p] = broadcast_impl # Pytrees #from .util import unzip2 def unzip2(pairs): lst1, lst2 = [], [] for x1, x2 in pairs: lst1.append(x1) lst2.append(x2) return lst1, lst2 map_ = map def map(f, *xs): return list(map_(f, *xs)) zip_ = zip def zip(*args): fst, *rest = args = map(list, args) n = len(fst) for arg in rest: assert len(arg) == n return list(zip_(*args)) from jax.tree_util import tree_flatten, tree_unflatten, tree_map, PyTreeDef def flatten_fun(f, in_tree): store = Store() def flat_fun(*args_flat): pytree_args = tree_unflatten(in_tree, args_flat) out = f(*pytree_args) out_flat, out_tree = tree_flatten(out) store.set_value(out_tree) return out_flat return flat_fun, store class Empty: pass empty = Empty() class Store: val = empty def set_value(self, val): assert self.val is empty self.val = val def __call__(self): return self.val # Vectorized batching with vmap def mapped_aval(batch_dim, aval): shape = list(aval.shape) del shape[batch_dim] return ShapedArray(tuple(shape), aval.dtype) def move_batch_axis(axis_size, src, dst, x): if src is not_mapped: target_shape = list(np.shape(x)) target_shape.insert(dst, axis_size) return broadcast(x, target_shape, [dst]) elif src == dst: return x else: return moveaxis(x, src, dst) def moveaxis(x, src: int, dst: int): perm = [i for i in range(np.ndim(x)) if i != src] perm.insert(dst, src) return transpose(x, perm) from typing import Union class NotMapped: pass not_mapped = NotMapped() BatchAxis = Union[NotMapped, int] class BatchTracer(Tracer): def __init__(self, trace, val, batch_dim: BatchAxis): self._trace = trace self.val = val self.batch_dim = batch_dim @property def aval(self): if self.batch_dim is not_mapped: return get_aval(self.val) else: return mapped_aval(self.batch_dim, get_aval(self.val)) def full_lower(self): if self.batch_dim is not_mapped: return full_lower(self.val) else: return self class BatchTrace(Trace): pure = lift = lambda self, val: BatchTracer(self, val, not_mapped) def process_primitive(self, primitive, tracers, params): vals_in, bdims_in = unzip2((t.val, t.batch_dim) for t in tracers) vmap_rule = vmap_rules[primitive] val_outs, bdim_outs = vmap_rule(self.axis_size, vals_in, bdims_in, **params) return [BatchTracer(self, x, bd) for x, bd in zip(val_outs, bdim_outs)] @property def axis_size(self): return self.main.global_data vmap_rules = {} from functools import partial def binop_batching_rule(op, axis_size, vals_in, dims_in): (x, y), (x_bdim, y_bdim) = vals_in, dims_in if x_bdim != y_bdim: if x_bdim is not_mapped: x = move_batch_axis(axis_size, x_bdim, y_bdim, x) x_bdim = y_bdim else: y = move_batch_axis(axis_size, y_bdim, x_bdim, y) return [op(x, y)], [x_bdim] vmap_rules[add_p] = partial(binop_batching_rule, add) vmap_rules[mul_p] = partial(binop_batching_rule, mul) def vectorized_unop_batching_rule(op, axis_size, vals_in, dims_in): (x,), (x_bdim,) = vals_in, dims_in return [op(x)], [x_bdim] vmap_rules[sin_p] = partial(vectorized_unop_batching_rule, sin) vmap_rules[cos_p] = partial(vectorized_unop_batching_rule, cos) vmap_rules[neg_p] = partial(vectorized_unop_batching_rule, neg) def reduce_sum_batching_rule(axis_size, vals_in, dims_in, *, axis): (x,), (x_bdim,) = vals_in, dims_in new_axis = axis + (x_bdim <= axis) out_bdim = x_bdim - (new_axis < x_bdim) return [reduce_sum(x, new_axis)], [out_bdim] vmap_rules[reduce_sum_p] = reduce_sum_batching_rule def vmap_flat(f, in_axes, *args): axis_size, = {x.shape[ax] for x, ax in zip(args, in_axes) if ax is not not_mapped} with new_main(BatchTrace, axis_size) as main: trace = BatchTrace(main) tracers_in = [BatchTracer(trace, x, ax) if ax is not None else x for x, ax in zip(args, in_axes)] outs = f(*tracers_in) tracers_out = [full_raise(trace, out) for out in outs] vals_out, bdims_out = unzip2((t.val, t.batch_dim) for t in tracers_out) outs_transposed = [move_batch_axis(axis_size, bdim, 0, val_out) for val_out, bdim in zip(vals_out, bdims_out)] return outs_transposed def vmap(f, in_axes): def batched_f(*args): args_flat, in_tree = tree_flatten(args) in_axes_flat, in_tree2 = tree_flatten(in_axes) if in_tree != in_tree2: raise TypeError f_flat, out_tree = flatten_fun(f, in_tree) outs_flat = vmap_flat(f_flat, in_axes_flat, *args_flat) return tree_unflatten(out_tree(), outs_flat) return batched_f # Part 2: Jaxprs from typing import Set, Dict class Var: aval: ShapedArray def __init__(self, aval): self.aval = aval class Lit: val: Any aval: ShapedArray def __init__(self, val): self.aval = aval = raise_to_shaped(get_aval(val)) self.val = np.array(val, aval.dtype) Atom = Union[Var, Lit] class JaxprEqn(NamedTuple): primitive: Primitive inputs: List[Atom] params: Dict[str, Any] out_binders: List[Var] class Jaxpr(NamedTuple): in_binders: List[Var] eqns: List[JaxprEqn] outs: List[Atom] def __hash__(self): return id(self) __eq__ = op.is_ def raise_to_shaped(aval): return ShapedArray(aval.shape, aval.dtype) class JaxprType(NamedTuple): in_types: List[ShapedArray] out_types: List[ShapedArray] def __repr__(self): in_types = ', '.join(aval.str_short() for aval in self.in_types) out_types = ', '.join(aval.str_short() for aval in self.out_types) return f'({in_types}) -> ({out_types})' def typecheck_jaxpr(jaxpr: Jaxpr) -> JaxprType: env: Set[Var] = set() for v in jaxpr.in_binders: if v in env: raise TypeError env.add(v) for eqn in jaxpr.eqns: in_types = [typecheck_atom(env, x) for x in eqn.inputs] out_types = abstract_eval_rules[eqn.primitive](*in_types, **eqn.params) for out_binder, out_type in zip(eqn.out_binders, out_types): if not out_type == out_binder.aval: raise TypeError for out_binder in eqn.out_binders: if out_binder in env: raise TypeError env.add(out_binder) in_types = [v.aval for v in jaxpr.in_binders] out_types = [typecheck_atom(env, x) for x in jaxpr.outs] return JaxprType(in_types, out_types) def typecheck_atom(env: Set[Var], x: Atom) -> ShapedArray: if isinstance(x, Var): if x not in env: raise TypeError("unbound variable") return x.aval elif isinstance(x, Lit): return raise_to_shaped(get_aval(x.val)) else: assert False def eval_jaxpr(jaxpr: Jaxpr, args: List[Any]) -> List[Any]: env: Dict[Var, Any] = {} def read(x: Atom) -> Any: return env[x] if type(x) is Var else x.val def write(v: Var, val: Any) -> None: assert v not in env # single-assignment env[v] = val map(write, jaxpr.in_binders, args) for eqn in jaxpr.eqns: in_vals = map(read, eqn.inputs) outs = bind(eqn.primitive, *in_vals, **eqn.params) map(write, eqn.out_binders, outs) return map(read, jaxpr.outs) def jaxpr_as_fun(jaxpr: Jaxpr): return lambda *args: eval_jaxpr(jaxpr, args) # Building jaxprs with tracing #from .util import (split_list, partition_list) #from .util import partition_list #from . import util from jax._src.util import partition_list from jax import util def split_list(lst: List[Any], n: Union[int, List[int]]) -> Tuple[List[Any], List[Any]]: if isinstance(n, tuple): n = list(n) if not isinstance(n, list): n = [n] return util.split_list(lst, n) # def split_list(lst: List[Any], n: int) -> Tuple[List[Any], List[Any]]: # assert 0 <= n <= len(lst) # return lst[:n], lst[n:] # # def partition_list(bs: List[bool], l: List[Any]) -> Tuple[List[Any], List[Any]]: # assert len(bs) == len(l) # lists = lst1, lst2 = [], [] # for b, x in zip(bs, l): # lists[b].append(x) # return lst1, lst2 # NB: the analogous class in JAX is called 'DynamicJaxprTracer' class JaxprTracer(Tracer): __slots__ = ['aval'] aval: ShapedArray def __init__(self, trace, aval): self._trace = trace self.aval = aval # NB: the analogous class in JAX is called 'DynamicJaxprTrace' class JaxprTrace(Trace): def new_arg(self, aval: ShapedArray) -> JaxprTracer: aval = raise_to_shaped(aval) tracer = self.builder.new_tracer(self, aval) self.builder.tracer_to_var[id(tracer)] = Var(aval) return tracer def get_or_make_const_tracer(self, val: Any) -> JaxprTracer: tracer = self.builder.const_tracers.get(id(val)) if tracer is None: tracer = self.builder.new_tracer(self, raise_to_shaped(get_aval(val))) self.builder.add_const(tracer, val) return tracer pure = lift = get_or_make_const_tracer def process_primitive(self, primitive, tracers, params): avals_in = [t.aval for t in tracers] avals_out = abstract_eval_rules[primitive](*avals_in, **params) out_tracers = [self.builder.new_tracer(self, a) for a in avals_out] inputs = [self.builder.getvar(t) for t in tracers] outvars = [self.builder.add_var(t) for t in out_tracers] self.builder.add_eqn(JaxprEqn(primitive, inputs, params, outvars)) return out_tracers @property def builder(self): return self.main.global_data # NB: in JAX, we instead attach abstract eval rules to Primitive instances abstract_eval_rules = {} class JaxprBuilder: eqns: List[JaxprEqn] tracer_to_var: Dict[int, Var] const_tracers: Dict[int, JaxprTracer] constvals: Dict[Var, Any] tracers: List[JaxprTracer] def __init__(self): self.eqns = [] self.tracer_to_var = {} self.const_tracers = {} self.constvals = {} self.tracers = [] def new_tracer(self, trace: JaxprTrace, aval: ShapedArray) -> JaxprTracer: tracer = JaxprTracer(trace, aval) self.tracers.append(tracer) return tracer def add_eqn(self, eqn: JaxprEqn) -> None: self.eqns.append(eqn) def add_var(self, tracer: JaxprTracer) -> Var: assert id(tracer) not in self.tracer_to_var var = self.tracer_to_var[id(tracer)] = Var(tracer.aval) return var def getvar(self, tracer: JaxprTracer) -> Var: var = self.tracer_to_var.get(id(tracer)) assert var is not None return var def add_const(self, tracer: JaxprTracer, val: Any) -> Var: var = self.add_var(tracer) self.const_tracers[id(val)] = tracer self.constvals[var] = val return var def build(self, in_tracers: List[JaxprTracer], out_tracers: List[JaxprTracer] ) -> Tuple[Jaxpr, List[Any]]: constvars, constvals = unzip2(self.constvals.items()) t2v = lambda t: self.tracer_to_var[id(t)] in_binders = constvars + [t2v(t) for t in in_tracers] out_vars = [t2v(t) for t in out_tracers] jaxpr = Jaxpr(in_binders, self.eqns, out_vars) typecheck_jaxpr(jaxpr) jaxpr, constvals = _inline_literals(jaxpr, constvals) return jaxpr, constvals def _inline_literals(jaxpr: Jaxpr, consts: List[Any]) -> Tuple[Jaxpr, List[Any]]: const_binders, other_binders = split_list(jaxpr.in_binders, len(consts)) scalars = [type(x) in jax_types and not get_aval(x).shape for x in consts] new_const_binders, lit_binders = partition_list(scalars, const_binders) new_consts, lit_vals = partition_list(scalars, consts) literals = dict(zip(lit_binders, map(Lit, lit_vals))) new_eqns = [JaxprEqn(eqn.primitive, [literals.get(x, x) for x in eqn.inputs], eqn.params, eqn.out_binders) for eqn in jaxpr.eqns] new_outs = [literals.get(x, x) for x in jaxpr.outs] new_jaxpr = Jaxpr(new_const_binders + other_binders, new_eqns, new_outs) typecheck_jaxpr(new_jaxpr) return new_jaxpr, new_consts def binop_abstract_eval(x: ShapedArray, y: ShapedArray) -> List[ShapedArray]: if not isinstance(x, ShapedArray) or not isinstance(y, ShapedArray): raise TypeError if raise_to_shaped(x) != raise_to_shaped(y): raise TypeError return [ShapedArray(x.shape, x.dtype)] abstract_eval_rules[add_p] = binop_abstract_eval abstract_eval_rules[mul_p] = binop_abstract_eval def compare_abstract_eval(x: ShapedArray, y: ShapedArray) -> List[ShapedArray]: if not isinstance(x, ShapedArray) or not isinstance(y, ShapedArray): raise TypeError if x.shape != y.shape: raise TypeError return [ShapedArray(x.shape, np.dtype('bool'))] abstract_eval_rules[greater_p] = compare_abstract_eval abstract_eval_rules[less_p] = compare_abstract_eval def vectorized_unop_abstract_eval(x: ShapedArray) -> List[ShapedArray]: return [ShapedArray(x.shape, x.dtype)] abstract_eval_rules[sin_p] = vectorized_unop_abstract_eval abstract_eval_rules[cos_p] = vectorized_unop_abstract_eval abstract_eval_rules[neg_p] = vectorized_unop_abstract_eval def reduce_sum_abstract_eval(x: ShapedArray, *, axis: int) -> List[ShapedArray]: new_shape = [d for i, d in enumerate(x.shape) if i != axis] return [ShapedArray(tuple(new_shape), x.dtype)] abstract_eval_rules[reduce_sum_p] = reduce_sum_abstract_eval def broadcast_abstract_eval(x: ShapedArray, *, shape: Sequence[int], axes: Sequence[int]) -> List[ShapedArray]: return [ShapedArray(tuple(shape), x.dtype)] abstract_eval_rules[broadcast_p] = broadcast_abstract_eval from functools import lru_cache # @lru_cache() # ShapedArrays are hashable def make_jaxpr_v1(f, *avals_in): avals_in, in_tree = tree_flatten(avals_in) f, out_tree = flatten_fun(f, in_tree) builder = JaxprBuilder() with new_main(JaxprTrace, builder) as main: trace = JaxprTrace(main) tracers_in = [trace.new_arg(aval) for aval in avals_in] outs = f(*tracers_in) tracers_out = [full_raise(trace, out) for out in outs] jaxpr, consts = builder.build(tracers_in, tracers_out) return jaxpr, consts, out_tree() @contextmanager def new_dynamic(main: MainTrace): global dynamic_trace prev_dynamic_trace, dynamic_trace = dynamic_trace, main try: yield finally: dynamic_trace = prev_dynamic_trace @lru_cache() def make_jaxpr(f: Callable, *avals_in: ShapedArray, ) -> Tuple[Jaxpr, List[Any], PyTreeDef]: avals_in, in_tree = tree_flatten(avals_in) f, out_tree = flatten_fun(f, in_tree) builder = JaxprBuilder() with new_main(JaxprTrace, builder) as main: with new_dynamic(main): trace = JaxprTrace(main) tracers_in = [trace.new_arg(aval) for aval in avals_in] outs = f(*tracers_in) tracers_out = [full_raise(trace, out) for out in outs] jaxpr, consts = builder.build(tracers_in, tracers_out) return jaxpr, consts, out_tree() # Part 3: jit def jit(f): def f_jitted(*args): avals_in = [raise_to_shaped(get_aval(x)) for x in args] jaxpr, consts, out_tree = make_jaxpr(f, *avals_in) outs = bind(xla_call_p, *consts, *args, jaxpr=jaxpr, num_consts=len(consts)) return tree_unflatten(out_tree, outs) return f_jitted xla_call_p = Primitive('xla_call') class IDHashable: val: Any def __init__(self, val): self.val = val def __hash__(self) -> int: return id(self.val) def __eq__(self, other): return type(other) is IDHashable and id(self.val) == id(other.val) #from jax._src.lib import xla_bridge as xb from jaxdax._src.lib import xla_bridge as xb from jax._src.lib import xla_client as xc xe = xc._xla xops = xc._xla.ops def xla_call_impl(*args, jaxpr: Jaxpr, num_consts: int): consts, args = args[:num_consts], args[num_consts:] hashable_consts = tuple(map(IDHashable, consts)) execute = xla_callable(IDHashable(jaxpr), hashable_consts) return execute(*args) impl_rules[xla_call_p] = xla_call_impl backend_name = None backend_name = 'dax' @lru_cache() def xla_callable(hashable_jaxpr: IDHashable, hashable_consts: Tuple[IDHashable]): jaxpr: Jaxpr = hashable_jaxpr.val typecheck_jaxpr(jaxpr) consts = [x.val for x in hashable_consts] in_avals = [v.aval for v in jaxpr.in_binders[len(consts):]] c = xc.XlaBuilder('xla_call') xla_consts = _xla_consts(c, consts) xla_params = _xla_params(c, in_avals) outs = jaxpr_subcomp(c, jaxpr, xla_consts + xla_params) out = xops.Tuple(c, outs) compiled = xb.get_backend(backend_name).compile(c.build(out)) return partial(execute_compiled, compiled, [v.aval for v in jaxpr.outs]) def _xla_consts(c: xe.XlaBuilder, consts: List[Any]) -> List[xe.XlaOp]: unique_consts = {id(cnst): cnst for cnst in consts} xla_consts = { id_: xops.ConstantLiteral(c, cnst) for id_, cnst in unique_consts.items()} return [xla_consts[id(cnst)] for cnst in consts] def _xla_params(c: xe.XlaBuilder, avals_in: List[ShapedArray]) -> List[xe.XlaOp]: return [xb.parameter(c, i, _xla_shape(a)) for i, a in enumerate(avals_in)] def _xla_shape(aval: ShapedArray) -> xe.Shape: return xc.Shape.array_shape(xc.dtype_to_etype(aval.dtype), aval.shape) def jaxpr_subcomp(c: xe.XlaBuilder, jaxpr: Jaxpr, args: List[xe.XlaOp] ) -> xe.XlaOp: env: Dict[Var, xe.XlaOp] = {} def read(x: Atom) -> xe.XlaOp: return env[x] if type(x) is Var else xops.Constant(c, np.asarray(x.val)) def write(v: Var, val: xe.XlaOp) -> None: env[v] = val map(write, jaxpr.in_binders, args) for eqn in jaxpr.eqns: in_avals = [x.aval for x in eqn.inputs] in_vals = map(read, eqn.inputs) rule = xla_translations[eqn.primitive] out_vals = rule(c, in_avals, in_vals, **eqn.params) map(write, eqn.out_binders, out_vals) return map(read, jaxpr.outs) def execute_compiled(compiled, out_avals, *args): input_bufs = [input_handlers[type(x)](x) for x in args] out_bufs = compiled.execute(input_bufs) return [handle_result(aval, buf) for aval, buf in zip(out_avals, out_bufs)] default_input_handler = xb.get_backend(backend_name).buffer_from_pyval input_handlers = {ty: default_input_handler for ty in [bool, int, float, np.ndarray, np.float64, np.float32]} def handle_result(aval: ShapedArray, buf): del aval # Unused for now return buf.to_py() xla_translations = {} def direct_translation(op, c, in_avals, in_vals): del c, in_avals return [op(*in_vals)] xla_translations[add_p] = partial(direct_translation, xops.Add) xla_translations[mul_p] = partial(direct_translation, xops.Mul) xla_translations[neg_p] = partial(direct_translation, xops.Neg) xla_translations[sin_p] = partial(direct_translation, xops.Sin) xla_translations[cos_p] = partial(direct_translation, xops.Cos) xla_translations[greater_p] = partial(direct_translation, xops.Gt) xla_translations[less_p] = partial(direct_translation, xops.Lt) def reduce_sum_translation(c, in_avals, in_vals, *, axis): (x_aval,), (x,) = in_avals, in_vals zero = xops.ConstantLiteral(c, np.array(0, x_aval.dtype)) subc = xc.XlaBuilder('add') shape = _xla_shape(ShapedArray((), x_aval.dtype)) xops.Add(xops.Parameter(subc, 0, shape), xops.Parameter(subc, 1, shape)) return [xops.Reduce(c, [x], [zero], subc.build(), [axis])] xla_translations[reduce_sum_p] = reduce_sum_translation def broadcast_translation(c, in_avals, in_vals, *, shape, axes): x, = in_vals dims_complement = [i for i in range(len(shape)) if i not in axes] return [xops.BroadcastInDim(x, shape, dims_complement)] xla_translations[broadcast_p] = broadcast_translation if 'jvp_rules' in globals(): def xla_call_jvp_rule(primals, tangents, *, jaxpr, num_consts): del num_consts # Unused new_jaxpr, new_consts = jvp_jaxpr(jaxpr) outs = bind(xla_call_p, *new_consts, *primals, *tangents, jaxpr=new_jaxpr, num_consts=len(new_consts)) n = len(outs) // 2 primals_out, tangents_out = outs[:n], outs[n:] return primals_out, tangents_out jvp_rules[xla_call_p] = xla_call_jvp_rule @lru_cache() def jvp_jaxpr(jaxpr: Jaxpr) -> Tuple[Jaxpr, List[Any]]: def jvp_traceable(*primals_and_tangents): n = len(primals_and_tangents) // 2 primals, tangents = primals_and_tangents[:n], primals_and_tangents[n:] return jvp(jaxpr_as_fun(jaxpr), primals, tangents) in_avals = [v.aval for v in jaxpr.in_binders] new_jaxpr, new_consts, _ = make_jaxpr(jvp_traceable, *in_avals, *in_avals) return new_jaxpr, new_consts def xla_call_vmap_rule(axis_size, vals_in, dims_in, *, jaxpr, num_consts): del num_consts # Unused new_jaxpr, new_consts = vmap_jaxpr(jaxpr, axis_size, tuple(dims_in)) outs = bind(xla_call_p, *new_consts, *vals_in, jaxpr=new_jaxpr, num_consts=len(new_consts)) return outs, [0] * len(outs) vmap_rules[xla_call_p] = xla_call_vmap_rule @lru_cache() def vmap_jaxpr(jaxpr: Jaxpr, axis_size: int, bdims_in: Tuple[BatchAxis, ...] ) -> Tuple[Jaxpr, List[Any]]: vmap_traceable = vmap(jaxpr_as_fun(jaxpr), tuple(bdims_in)) in_avals = [unmapped_aval(axis_size, d, v.aval) for v, d in zip(jaxpr.in_binders, bdims_in)] new_jaxpr, new_consts, _ = make_jaxpr(vmap_traceable, *in_avals) return new_jaxpr, new_consts def unmapped_aval(axis_size: int, batch_dim: BatchAxis, aval: ShapedArray ) -> ShapedArray: if batch_dim is not_mapped: return aval else: shape = list(aval.shape) shape.insert(batch_dim, axis_size) return ShapedArray(tuple(shape), aval.dtype) def xla_call_abstract_eval_rule(*in_types, jaxpr, num_consts): del num_consts # Unused jaxpr_type = typecheck_jaxpr(jaxpr) if not all(t1 == t2 for t1, t2 in zip(jaxpr_type.in_types, in_types)): raise TypeError return jaxpr_type.out_types abstract_eval_rules[xla_call_p] = xla_call_abstract_eval_rule def xla_call_translation(c, in_avals, in_vals, *, jaxpr, num_consts): del num_consts # Only used at top-level. # Calling jaxpr_subcomp directly would inline. We generate a Call HLO instead. subc = xc.XlaBuilder('inner xla_call') xla_params = _xla_params(subc, in_avals) outs = jaxpr_subcomp(subc, jaxpr, xla_params) subc = subc.build(xops.Tuple(subc, outs)) return destructure_tuple(c, xops.Call(c, subc, in_vals)) xla_translations[xla_call_p] = xla_call_translation def destructure_tuple(c, tup): num_elements = len(c.get_shape(tup).tuple_shapes()) return [xops.GetTupleElement(tup, i) for i in range(num_elements)] # DeviceArrays def handle_result(aval: ShapedArray, buf): # noqa: F811 return DeviceArray(aval, buf) class DeviceArray: buf: Any aval: ShapedArray def __init__(self, aval, buf): self.aval = aval self.buf = buf dtype = property(lambda self: self.aval.dtype) shape = property(lambda self: self.aval.shape) ndim = property(lambda self: self.aval.ndim) def __array__(self): return self.buf.to_py() def __repr__(self): return repr(self.buf.to_py()) def __str__(self): return str(self.buf.to_py()) _neg = staticmethod(neg) _add = staticmethod(add) _radd = staticmethod(add) _mul = staticmethod(mul) _rmul = staticmethod(mul) _gt = staticmethod(greater) _lt = staticmethod(less) input_handlers[DeviceArray] = lambda x: x.buf jax_types.add(DeviceArray) # Part 5: control flow primitives def cond(pred, true_fn, false_fn, *operands): avals_in = [raise_to_shaped(get_aval(x)) for x in operands] true_jaxpr, true_consts, out_tree = make_jaxpr(true_fn, *avals_in) false_jaxpr, false_consts, out_tree_ = make_jaxpr(false_fn, *avals_in) if out_tree != out_tree_: raise TypeError true_jaxpr, false_jaxpr = _join_jaxpr_consts( true_jaxpr, false_jaxpr, len(true_consts), len(false_consts)) if typecheck_jaxpr(true_jaxpr) != typecheck_jaxpr(false_jaxpr): raise TypeError outs = bind_cond(pred, *true_consts, *false_consts, *operands, true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr) return tree_unflatten(out_tree, outs) cond_p = Primitive('cond') def _join_jaxpr_consts(jaxpr1: Jaxpr, jaxpr2: Jaxpr, n1: int, n2: int ) -> Tuple[Jaxpr, Jaxpr]: jaxpr1_type, jaxpr2_type = typecheck_jaxpr(jaxpr1), typecheck_jaxpr(jaxpr2) assert jaxpr1_type.in_types[n1:] == jaxpr2_type.in_types[n2:] consts1, rest1 = split_list(jaxpr1.in_binders, n1) consts2, rest2 = split_list(jaxpr2.in_binders, n2) new_jaxpr1 = Jaxpr(consts1 + consts2 + rest1, jaxpr1.eqns, jaxpr1.outs) new_jaxpr2 = Jaxpr(consts1 + consts2 + rest2, jaxpr2.eqns, jaxpr2.outs) return new_jaxpr1, new_jaxpr2 def bind_cond(pred, *args, true_jaxpr, false_jaxpr): assert len(args) == len(true_jaxpr.in_binders) == len(false_jaxpr.in_binders) return bind(cond_p, pred, *args, true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr) def cond_impl(pred, *operands, true_jaxpr, false_jaxpr): if pred: return eval_jaxpr(true_jaxpr, operands) else: return eval_jaxpr(false_jaxpr, operands) impl_rules[cond_p] = cond_impl if 'jvp_rules' in globals(): def cond_jvp_rule(primals, tangents, *, true_jaxpr, false_jaxpr): pred, *primals = primals _ , *tangents = tangents true_jaxpr , true_consts = jvp_jaxpr(true_jaxpr) false_jaxpr, false_consts = jvp_jaxpr(false_jaxpr) true_jaxpr, false_jaxpr = _join_jaxpr_consts( true_jaxpr, false_jaxpr, len(true_consts), len(false_consts)) assert typecheck_jaxpr(true_jaxpr) == typecheck_jaxpr(false_jaxpr) outs = bind_cond(pred, *true_consts, *false_consts, *primals, *tangents, true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr) primals_out, tangents_out = split_half(outs) return primals_out, tangents_out jvp_rules[cond_p] = cond_jvp_rule def cond_vmap_rule(axis_size, vals_in, dims_in, *, true_jaxpr, false_jaxpr): pred , *vals_in = vals_in pred_dim, *dims_in = dims_in if pred_dim is not not_mapped: raise NotImplementedError # TODO true_jaxpr, true_consts = vmap_jaxpr(true_jaxpr, axis_size, tuple(dims_in)) false_jaxpr, false_consts = vmap_jaxpr(false_jaxpr, axis_size, tuple(dims_in)) true_jaxpr, false_jaxpr = _join_jaxpr_consts( true_jaxpr, false_jaxpr, len(true_consts), len(false_consts)) assert typecheck_jaxpr(true_jaxpr) == typecheck_jaxpr(false_jaxpr) outs = bind_cond(pred, *true_consts, *false_consts, *vals_in, true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr) return outs, [0] * len(outs) vmap_rules[cond_p] = cond_vmap_rule def cond_abstract_eval(pred_type, *in_types, true_jaxpr, false_jaxpr): if pred_type != ShapedArray((), np.dtype('bool')): raise TypeError jaxpr_type = typecheck_jaxpr(true_jaxpr) if jaxpr_type != typecheck_jaxpr(false_jaxpr): raise TypeError if not all(t1 == t2 for t1, t2 in zip(jaxpr_type.in_types, in_types)): raise TypeError return jaxpr_type.out_types abstract_eval_rules[cond_p] = cond_abstract_eval def cond_translation(c, in_avals, in_vals, *, true_jaxpr, false_jaxpr): del in_avals # Unused pred, *in_vals = in_vals flat_vals, in_tree = tree_flatten(in_vals) operand = xops.Tuple(c, flat_vals) operand_shape = c.get_shape(operand) def make_comp(name: str, jaxpr: Jaxpr) -> xe.XlaComputation: c = xc.XlaBuilder(name) operand = xb.parameter(c, 0, operand_shape) operands = tree_unflatten(in_tree, destructure_tuple(c, operand)) outs = jaxpr_subcomp(c, jaxpr, operands) return c.build(xops.Tuple(c, outs)) true_comp = make_comp('true_fn', true_jaxpr) false_comp = make_comp('false_fn', false_jaxpr) int_etype = xc.dtype_to_etype(np.dtype('int32')) out = xops.Conditional(xops.ConvertElementType(pred, int_etype), [false_comp, true_comp], [operand] * 2) return destructure_tuple(c, out) xla_translations[cond_p] = cond_translation
lemma degree_monom_eq: "a \<noteq> 0 \<Longrightarrow> degree (monom a n) = n"
## Setup R error handling to go to stderr options( show.error.messages=F, error = function () { cat( geterrmessage(), file=stderr() ); q( "no", 1, F ) } ) library(RColorBrewer) library(lattice) library(latticeExtra) library(grid) library(gridExtra) library(optparse) # Parse arguments option_list <- list( make_option(c("-r", "--readmap_tab"), type="character", help="Path to file with tabular readmap"), make_option(c("-s", "--size_distribution_tab"), type="character", help="Path to file with tabular size distribution"), make_option("--readmap_pdf", type="character", help="Path to file with readmap plot"), make_option("--size_distribution_pdf", type="character", help="Path to file with size distribution plot"), make_option("--combi_pdf", type="character", help="Path to file with size distribution and readmap plot"), make_option("--title", type="character", help="Title for readmaps and size distribution"), make_option("--xlabel", type="character", help="xlabel for readmaps and size distribution"), make_option("--ylabel", type="character", help="ylabel for readmaps and size distribution"), make_option("--yrange", type="integer", help="Y-axis range"), make_option("--rows_per_page", type="integer", help="rows_per_page") ) parser <- OptionParser(usage = "%prog [options] file", option_list=option_list) args = parse_args(parser) ## data frames implementation rm=read.delim(args$readmap_tab, header=T, row.names=NULL) n_samples=length(unique(rm$sample)) genes=unique(levels(rm$gene)) per_gene_readmap=lapply(genes, function(x) subset(rm, gene==x)) n_genes=length(per_gene_readmap) size=read.delim(args$size_distribution_tab, header=T, row.names=NULL) per_gene_size=lapply(genes, function(x) subset(size, gene==x)) ## end of data frames implementation ## functions plot_readmap=function(df, ...) { combineLimits(xyplot(count~coord|factor(sample, levels=unique(sample))+reorder(gene, count, function(x) -sum(abs(x))), data=df, type='h', scales= list(relation="free", x=list(rot=0, cex=0.7, axs="i", tck=0.5), y=list(tick.number=4, rot=90, cex=0.7)), xlab=NULL, main=NULL, ylab=NULL, as.table=T, origin = 0, horizontal=FALSE, group=polarity, col=c("red","blue"), par.strip.text = list(cex=0.7), ...)) } plot_size_distribution= function(df, ...) { smR.prepanel=function(x,y,...){; yscale=c(-max(abs(y)), max(abs(y)));list(ylim=yscale);} bc= barchart(count~as.factor(size)|factor(sample, levels=unique(sample))+gene, data = df, origin = 0, horizontal=FALSE, group=polarity, stack=TRUE, col=c('red', 'blue'), cex=0.75, scales=list(y=list(tick.number=4, rot=90, relation="free", cex=0.7), x=list(cex=0.7) ), prepanel=smR.prepanel, xlab = NULL, ylab = NULL, main = NULL, as.table=TRUE, newpage = T, par.strip.text = list(cex=0.7), ...) combineLimits(bc) } ## end of functions ## function parameters' par.settings.readmap=list(layout.heights=list(top.padding=0, bottom.padding=-2.5), strip.background = list(col=c("lightblue","lightgreen")) ) par.settings.size=list(layout.heights=list(top.padding=-1, bottom.padding=-2.5), strip.background = list(col=c("lightblue","lightgreen")) ) par.settings.combination.readmap=list(layout.heights=list(top.padding=0, bottom.padding=-3), strip.background=list(col=c("lightblue","lightgreen")) ) par.settings.combination.size=list(layout.heights=list(top.padding=-2, bottom.padding=-0.5), strip.background=list(col=c("lightblue", "lightgreen")) ) ## end of function parameters' ## GRAPHS if (n_genes > 7) {page_height_simple = 11.69; page_height_combi=11.69; rows_per_page=args$rows_per_page} else { rows_per_page= n_genes; page_height_simple = 2.5*n_genes; page_height_combi=page_height_simple*2 } if (n_samples > 4) {page_width = 8.2677*n_samples/4} else {page_width = 8.2677*n_samples/3} # to test pdf(file=args$readmap_pdf, paper="special", height=page_height_simple, width=page_width) for (i in seq(1,n_genes,rows_per_page)) { start=i end=i+rows_per_page-1 if (end>n_genes) {end=n_genes} if (args$yrange == 0) { readmap_plot.list=lapply(per_gene_readmap[start:end], function(x) plot_readmap(x, par.settings=par.settings.readmap)) } else { readmap_plot.list=lapply(per_gene_readmap[start:end], function(x) plot_readmap(x, ylim=c(-args$yrange, args$yrange) , par.settings=par.settings.readmap)) } args_list=c(readmap_plot.list, list(nrow=rows_per_page, ncol=1, top=textGrob("Read Maps (nucleotide coordinates)", gp=gpar(cex=1), just="top"), left=textGrob(args$ylabel, gp=gpar(cex=1), vjust=1, rot=90) ) ) do.call(grid.arrange, args_list) } devname=dev.off() pdf(file=args$size_distribution_pdf, paper="special", height=page_height_simple, width=page_width) for (i in seq(1,n_genes,rows_per_page)) { start=i end=i+rows_per_page-1 if (end>n_genes) {end=n_genes} plot.list=lapply(per_gene_size[start:end], function(x) plot_size_distribution(x, par.settings=par.settings.size) ) args_list=c(plot.list, list(nrow=rows_per_page, ncol=1, top=textGrob("Size distributions (in nucleotides)", gp=gpar(cex=1), just="top"), left=textGrob(args$ylabel, gp=gpar(cex=1), vjust=1, rot=90) ) ) do.call(grid.arrange, args_list) } devname=dev.off() pdf(file=args$combi_pdf, paper="special", height=page_height_combi, width=page_width) if (rows_per_page %% 2 != 0) { rows_per_page = rows_per_page + 1} for (i in seq(1,n_genes,rows_per_page/2)) { start=i end=i+rows_per_page/2-1 if (end>n_genes) {end=n_genes} if (args$yrange == 0) {readmap_plot.list=lapply(per_gene_readmap[start:end], function(x) plot_readmap(x, par.settings=par.settings.readmap)) } else { readmap_plot.list=lapply(per_gene_readmap[start:end], function(x) plot_readmap(x, ylim=c(-args$yrange, args$yrange), par.settings=par.settings.readmap)) } size_plot.list=lapply(per_gene_size[start:end], function(x) plot_size_distribution(x, strip=FALSE, par.settings=par.settings.combination.size)) plot.list=rbind(readmap_plot.list, size_plot.list ) args_list=c(plot.list, list(nrow=rows_per_page+1, ncol=1, top=textGrob(args$title, gp=gpar(cex=1), just="top"), left=textGrob(args$ylabel, gp=gpar(cex=1), vjust=1, rot=90), sub=textGrob(args$xlabel, gp=gpar(cex=1), just="bottom") ) ) do.call(grid.arrange, args_list) } devname=dev.off()
-- ---------------------------------------------------------------- [ Eval.idr ] -- Module : Eval.idr -- Copyright : (c) Jan de Muijnck-Hughes -- License : see LICENSE -- --------------------------------------------------------------------- [ EOH ] module Frigg.Eval import Data.AVL.Dict import Readability import Readability.Process.XML import Config.INI import XML.DOM import XML.XPath import XML.XPath.Query import Freyja.Utils import Frigg.Options import Frigg.Error import Frigg.Effs import Frigg.Config -- -------------------------------------------------------------- [ Directives ] %access private -- ------------------------------------------------------------------- [ Begin ] -- @TODO Make nicers getScore : (heading : String) -> (gradings : Dict String Double) -> (doc : XMLDoc) -> Either FriggError Double getScore t gsc doc = do case query (concat ["//", toLower t, "/@score"]) doc of Left err => Left $ ExtractionError err Right xs => case getText xs of Nil => Left $ EvalError $ unwords ["Node Not Found for:", show t] (x::xs) => case getNodeValue x of Nothing => Left $ EvalError $ unwords ["Score Not Given for:", show t] Just v => case Dict.lookup (toLower v) gsc of Nothing => Left $ EvalError $ unwords ["Weighting not found for:", show v] Just s => Right s calcTemplateAdherence : Dict String Double -> Dict String Double -> Document DOCUMENT -> Either FriggError Double calcTemplateAdherence ws gsc doc = do ss <- mapEither (\(k,v) => doCalc k v) $ Dict.toList ws pure $ foldl (+) 0.0 ss where doCalc : String -> Double -> Either FriggError Double doCalc t weight = do res <- getScore t gsc doc pure (weight * res) export evalTemplate : Frigg (Either FriggError Double) evalTemplate = do doc <- getXMLDoc scale <- getGradingScale weight <- getTemplateWeightings case (isEmpty scale, isEmpty weight) of (False, False) => pure $ calcTemplateAdherence weight scale doc otherwise => pure $ Left InvalidMapping export evalTemplateAndReport : Frigg () evalTemplateAndReport = do (Right res) <- evalTemplate | Left err => printLn err printLn res -- --------------------------------------------------------------------- [ EOF ]
function xran1(idum) dimension r(97) parameter (m1=259200,ia1=7141,ic1=54773,rm1=3.8580247e-6) parameter (m2=134456,ia2=8121,ic2=28411,rm2=7.4373773e-6) parameter (m3=243000,ia3=4561,ic3=51349) data iff /0/ save r, ix1,ix2,ix3 if (idum.lt.0.or.iff.eq.0) then iff=1 ix1=mod(ic1-idum,m1) ix1=mod(ia1*ix1+ic1,m1) ix2=mod(ix1,m2) ix1=mod(ia1*ix1+ic1,m1) ix3=mod(ix1,m3) do 11 j=1,97 ix1=mod(ia1*ix1+ic1,m1) ix2=mod(ia2*ix2+ic2,m2) r(j)=(float(ix1)+float(ix2)*rm2)*rm1 11 continue idum=1 endif ix1=mod(ia1*ix1+ic1,m1) ix2=mod(ia2*ix2+ic2,m2) ix3=mod(ia3*ix3+ic3,m3) j=1+(97*ix3)/m3 if(j.gt.97.or.j.lt.1)then write(*,*) 'j is bad in ran1.f',j, 97d0*ix3/m3 STOP endif xran1=r(j) r(j)=(float(ix1)+float(ix2)*rm2)*rm1 return end
(* Title: HOL/UNITY/Comp/TimerArray.thy Author: Lawrence C Paulson, Cambridge University Computer Laboratory Copyright 1998 University of Cambridge A trivial example of reasoning about an array of processes *) theory TimerArray imports "../UNITY_Main" begin type_synonym 'a state = "nat * 'a" (*second component allows new variables*) definition count :: "'a state => nat" where "count s = fst s" definition decr :: "('a state * 'a state) set" where "decr = (UN n uu. {((Suc n, uu), (n,uu))})" definition Timer :: "'a state program" where "Timer = mk_total_program (UNIV, {decr}, UNIV)" declare Timer_def [THEN def_prg_Init, simp] declare count_def [simp] decr_def [simp] (*Demonstrates induction, but not used in the following proof*) lemma Timer_leadsTo_zero: "Timer : UNIV leadsTo {s. count s = 0}" apply (rule_tac f = count in lessThan_induct, simp) apply (case_tac "m") apply (force intro!: subset_imp_leadsTo) apply (unfold Timer_def, ensures_tac "decr") done lemma Timer_preserves_snd [iff]: "Timer : preserves snd" apply (rule preservesI) apply (unfold Timer_def, safety) done declare PLam_stable [simp] lemma TimerArray_leadsTo_zero: "finite I ==> (plam i: I. Timer) : UNIV leadsTo {(s,uu). ALL i:I. s i = 0}" apply (erule_tac A'1 = "%i. lift_set i ({0} \<times> UNIV)" in finite_stable_completion [THEN leadsTo_weaken]) apply auto (*Safety property, already reduced to the single Timer case*) prefer 2 apply (simp add: Timer_def, safety) (*Progress property for the array of Timers*) apply (rule_tac f = "sub i o fst" in lessThan_induct) apply (case_tac "m") (*Annoying need to massage the conditions to have the form (... \<times> UNIV)*) apply (auto intro: subset_imp_leadsTo simp add: insert_absorb lift_set_Un_distrib [symmetric] lessThan_Suc [symmetric] Times_Un_distrib1 [symmetric] Times_Diff_distrib1 [symmetric]) apply (rename_tac "n") apply (rule PLam_leadsTo_Basis) apply (auto simp add: lessThan_Suc [symmetric]) apply (unfold Timer_def mk_total_program_def, safety) apply (rule_tac act = decr in totalize_transientI, auto) done end
On May 20, 2018, the Bolivarian Republic of Venezuela held Presidential, Municipal and State Legislative elections. By far the biggest result was the reelection of President Nicolás Maduro for a new term (2019-2025) with over 6,192,000 votes or almost 68% of all valid votes. The Venezuelan Electoral Council reported participation at 46%. The Bolivarian Republic of Venezuela repudiates and protests the decision by the government of the United States of America, of imposing sanctions against Venezuela’s Head of State, Nicolás Maduro Moros, consummating a serious violation of International Law, and an infringement upon his human rights, constituting an unfriendly action against Venezuela. Likewise, it rejects the insolent and distempered remarks of the National Security Adviser to the White House, H.R. McMaster. These sanctions constitute a retaliation against President Nicolas Maduro for having convened the electoral process of greatest magnitude in recent times in Venezuela, for deepening democracy and sovereignty through the activation of the original constituent power, for being a defender of the dignity of Latin America and the Caribbean, and for fiercely opposing white supremacy in the U.S. presidency and its Ku Klux Klan vindicating racism. The event organized by the Venezuela Solidarity Committee on July 29, 2017, was a successful display of solidarity with the Bolivarian Revolution. The event was entitled “Affirming Life in Venezuela, Seeds of Solidarity” and was co-sponsored by the following organizations, the International Action Center, CISPES Boston, the July 26 Coalition, Encuentro5, Chelsea Uniting Against the War, Socialist Party, Party for Socialism and Liberation, Communist Party – Boston Branch, Boston May Day Coalition, United for Justice with Peace, Alliance for a Secular and Democratic South Asia, Workers World Party Boston, Democratic Socialists of America. Their statements of solidarity with Venezuela can be read here. As part of the program there were songs by Ali Primera, interpreted by Sergio Reyes, a report on current situation in Venezuela by Omar Sierra, a video recorded in Venezuela, a report from the recipients of our seeds of solidarity already sent (the video can be seen in YouTube here), and a presentation by Jorge Marin, member of the VSC, about our work and the need to demand that our country, the United States, stop its intervention in Venezuela in support of the right-wing opposition. There was also a rich discussion during the questions and answers period. In general, all present expressed their support for our people-to-people solidarity campaign, and their support for social change campaigns started by the Government of President Hugo Chavez, and continued by Nicolas Maduro. A generous amount of money was collected towards buying and sending organic, non-gmo seeds to our friends in Venezuela. Our thanks to all who attended and supported this event. To download flyer click here. In their feverish attempts to overthrow the democratically elected government of President Nicolas Maduro in Venezuela, the right-wing opposition is resorting to criminal terrorism. Their criminal actions show an utter disregard for human life. Among other acts of terrorism, they are using lynching, complete with racist and classist intent. On June 3, 2017, Orlando Figueras, 21, Afro Venezuelan, was attacked on his way to work in Caracas. He was punched, stabbed with knives, sprayed with gasoline and set on fire by masked right-wing protesters. This horrific act of racism was captured on video. Orlando, unfortunately for him, looked “Chavista”. The truth is that the victim had no political affiliation. This was, plain and simple murder by lynching.
------------------------------------------------------------------------ -- The Agda standard library -- -- Some derivable properties ------------------------------------------------------------------------ open import Algebra module Algebra.Properties.Ring {r₁ r₂} (R : Ring r₁ r₂) where import Algebra.Properties.AbelianGroup as AGP open import Data.Product open import Function import Relation.Binary.EqReasoning as EqR open Ring R open EqR setoid open AGP +-abelianGroup public renaming ( ⁻¹-involutive to -‿involutive ; left-identity-unique to +-left-identity-unique ; right-identity-unique to +-right-identity-unique ; identity-unique to +-identity-unique ; left-inverse-unique to +-left-inverse-unique ; right-inverse-unique to +-right-inverse-unique ; ⁻¹-∙-comm to -‿+-comm ) -‿*-distribˡ : ∀ x y → - x * y ≈ - (x * y) -‿*-distribˡ x y = begin - x * y ≈⟨ sym $ proj₂ +-identity _ ⟩ - x * y + 0# ≈⟨ refl ⟨ +-cong ⟩ sym (proj₂ -‿inverse _) ⟩ - x * y + (x * y + - (x * y)) ≈⟨ sym $ +-assoc _ _ _ ⟩ - x * y + x * y + - (x * y) ≈⟨ sym (proj₂ distrib _ _ _) ⟨ +-cong ⟩ refl ⟩ (- x + x) * y + - (x * y) ≈⟨ (proj₁ -‿inverse _ ⟨ *-cong ⟩ refl) ⟨ +-cong ⟩ refl ⟩ 0# * y + - (x * y) ≈⟨ proj₁ zero _ ⟨ +-cong ⟩ refl ⟩ 0# + - (x * y) ≈⟨ proj₁ +-identity _ ⟩ - (x * y) ∎ -‿*-distribʳ : ∀ x y → x * - y ≈ - (x * y) -‿*-distribʳ x y = begin x * - y ≈⟨ sym $ proj₁ +-identity _ ⟩ 0# + x * - y ≈⟨ sym (proj₁ -‿inverse _) ⟨ +-cong ⟩ refl ⟩ - (x * y) + x * y + x * - y ≈⟨ +-assoc _ _ _ ⟩ - (x * y) + (x * y + x * - y) ≈⟨ refl ⟨ +-cong ⟩ sym (proj₁ distrib _ _ _) ⟩ - (x * y) + x * (y + - y) ≈⟨ refl ⟨ +-cong ⟩ (refl ⟨ *-cong ⟩ proj₂ -‿inverse _) ⟩ - (x * y) + x * 0# ≈⟨ refl ⟨ +-cong ⟩ proj₂ zero _ ⟩ - (x * y) + 0# ≈⟨ proj₂ +-identity _ ⟩ - (x * y) ∎
#ifndef SUFFIXTREE_ITERATOR_HPP_ #define SUFFIXTREE_ITERATOR_HPP_ #include <scitbx/suffixtree/edge.hpp> #include <boost/mpl/if.hpp> #include <iterator> #include <deque> namespace scitbx { namespace suffixtree { namespace iterator { template< typename Edge > struct IteratorTraits { typedef Edge edge_type; typedef typename edge_type::const_iterator underlying_const_iterator; typedef typename std::forward_iterator_tag iterator_category; typedef typename edge_type::ptr_type ptr_type; typedef ptr_type const value_type; typedef std::ptrdiff_t difference_type; typedef value_type& reference; typedef value_type* pointer; }; template< typename Edge > class PreOrder { public: typedef Edge edge_type; typedef IteratorTraits< Edge > traits_type; typedef typename traits_type::iterator_category iterator_category; typedef typename traits_type::value_type value_type; typedef typename traits_type::difference_type difference_type; typedef typename traits_type::reference reference; typedef typename traits_type::pointer pointer; typedef typename traits_type::underlying_const_iterator underlying_const_iterator; typedef typename traits_type::ptr_type ptr_type; typedef PreOrder iterator; private: ptr_type root_; bool at_top_; underlying_const_iterator pos_; std::deque< underlying_const_iterator > underlying_iterators_deque_; private: PreOrder(ptr_type const& root, bool at_top); public: static iterator begin(ptr_type const& root); static iterator end(ptr_type const& root); public: ~PreOrder(); reference operator *() const; pointer operator ->() const; iterator& operator ++(); iterator operator ++(int); template< typename E2 > friend bool operator ==(PreOrder< E2 > const& lhs, PreOrder< E2 > const& rhs); }; template< typename Edge > bool operator !=(PreOrder< Edge > const& lhs, PreOrder< Edge > const& rhs); template< typename Edge > class PostOrder { public: typedef Edge edge_type; typedef IteratorTraits< Edge > traits_type; typedef typename traits_type::iterator_category iterator_category; typedef typename traits_type::value_type value_type; typedef typename traits_type::difference_type difference_type; typedef typename traits_type::reference reference; typedef typename traits_type::pointer pointer; typedef typename traits_type::underlying_const_iterator underlying_const_iterator; typedef typename traits_type::ptr_type ptr_type; typedef PostOrder iterator; private: ptr_type root_; bool at_top_; underlying_const_iterator pos_; std::deque< underlying_const_iterator > underlying_iterators_deque_; private: PostOrder(ptr_type const& root, bool at_top); public: static iterator begin(ptr_type const& root); static iterator end(ptr_type const& root); public: ~PostOrder(); reference operator *() const; pointer operator ->() const; iterator& operator ++(); iterator operator ++(int); private: void descend(); template< typename E2 > friend bool operator ==(PostOrder< E2 > const& lhs, PostOrder< E2 > const& rhs); }; template< typename Edge > bool operator !=(PostOrder< Edge > const& lhs, PostOrder< Edge > const& rhs); #include "iterator.hxx" } // namespace iterator } // namespace suffixtree } // namespace scitbx #endif // SUFFIXTREE_ITERATOR_HPP_
module Main %default total -- An expensive function. qib : Nat -> Nat qib Z = 1 qib (S Z) = 2 qib (S (S n)) = qib n * qib (S n) -- An equality whose size reflects the size of numbers. data Equals : Nat -> Nat -> Type where EqZ : Z `Equals` Z EqS : m `Equals` n -> S m `Equals` S n eq_refl : {n : Nat} -> n `Equals` n eq_refl {n = Z} = EqZ eq_refl {n = S n} = EqS eq_refl -- Here, the proof is very expensive to compute. -- We add a recursive argument to prevent Idris from inlining the function. f : (r, n : Nat) -> Subset Nat (\k => qib n `Equals` qib k) f Z n = Element n eq_refl f (S r) n = f r n -- A (contrived) relation, just to have something to show. data Represents : Nat -> Nat -> Type where Axiom : (n : Nat) -> qib n `Represents` n -- Here, the witness is very expensive to compute. -- We add a recursive argument to prevent Idris from inlining the function. g : (r, n : Nat) -> Exists (\k : Nat => k `Represents` n) g Z n = Evidence (qib n) (Axiom n) g (S r) n = g r n fmt : qib n `Represents` n -> String fmt (Axiom n) = "Axiom " ++ show n main : IO () main = do n <- map (const (the Nat 10000)) (putStrLn "*oink*") putStrLn . show $ getWitness (f 4 n) putStrLn . fmt $ getProof (g 4 n)
If $p$ is a prime number, then $d$ divides $p^k$ if and only if $d = p^i$ for some $i \leq k$.
function check_valid_code(code) occursin(r"^[\sa-zA-Z0-9_]+$", code) || throw(ErrorException("`$code` is not a valid series code")) end function check_valid_date(date) occursin(r"\d{4}-\d{2}-\d{2}", date) && length(date)==10 end function check_valid_as_at_date(date) check_valid_date(date) || throw(ErrorException("Invalid as_at_date `$date`. Date must be in form YYYY-MM-DD.")) end function check_valid_vintage(vintage) (vintage == "current" || vintage == "latest" || vintage == "previous" || check_valid_date(vintage)) || throw(ErrorException("Invalid vintage `$vintage`. Options are a date (in form YYYY-MM-DD) or one of `current` (alias `latest`) or `previous`")) end
{-# OPTIONS --universe-polymorphism #-} open import Categories.Category open import Categories.Support.Equivalence module Categories.Object.Indexed {o ℓ e c q} (C : Category o ℓ e) (B : Setoid c q) where open import Categories.Support.SetoidFunctions open Category C open _⟶_ public using () renaming (cong to cong₀; _⟨$⟩_ to _!_) Objoid = set→setoid Obj Dust = B ⟶ Objoid dust-setoid = B ⇨ Objoid
/** * testAbst.cpp * * Test abstraction by using a car kinematics model. * * Created by Yinan Li on Nov. 14, 2020. * Hybrid Systems Group, University of Waterloo. */ #include <iostream> #include <fstream> #include <sstream> #include <string> #include <algorithm> #include <cmath> #include <sys/stat.h> #include <boost/numeric/odeint.hpp> #include "src/grid.h" #include "src/definitions.h" #include "src/abstraction.hpp" #include "src/hdf5io.h" /* user defined dynamics */ struct car_ode { rocs::Rn u; car_ode (const rocs::Rn param): u (param) {} /** * ODE model * @param x system state: [x,y,theta], n=3 * @param dxdt vector field * @param t time */ void operator() (rocs::Rn &x, rocs::Rn &dxdt, double t) const { dxdt[0] = u[0]*std::cos(x[2]); dxdt[1] = u[0]*std::sin(x[2]); dxdt[2] = u[1]; } }; const double h = 0.3; // sampling time const double dt = 0.001; //integration step size for odeint struct carde { // discrete-time model (difference equation) static const int n = 3; // system dimension static const int m = 2; /** * Discrete-time dynamics * @param h sampling time * @param x system state: [x,y,theta], n=3 * @param u control array (size of 2, velocity and steering angle) * @param nu the number of different control values */ template<typename S> carde(S &dx, const S &x, rocs::Rn u) { if (std::fabs(u[0]) < 1e-6) { //v=0 dx[0] = x[0]; dx[1] = x[1]; dx[2] = x[2] + u[1] * h; } else if (std::fabs(u[1]) < 1e-6) { //w=0 dx[0] = x[0] + u[0]* cos(x[2])*h; dx[1] = x[1] + u[0]* sin(x[2])*h; dx[2] = x[2]; } else { //v,w not 0 dx[0] = x[0] + u[0]/u[1]*2*sin(u[1]*h/2.)*cos(x[2]+u[1]*h/2.); dx[1] = x[1] + u[0]/u[1]*2*sin(u[1]*h/2.)*sin(x[2]+u[1]*h/2.); dx[2] = x[2] + u[1] * h; } } }; // struct carde struct twoagent { static const int n = 3; // system dimension static const int nu = 2; // control dimension rocs::ivec d{rocs::interval(-0.8, 0.8), rocs::interval(-0.8, 0.8)}; /* template constructor * @param[out] dx * @param[in] x = [xr, yr, psir] * @param u = [v, w] * @param d = [v', w'] */ template<typename S> twoagent(S *dx, const S *x, rocs::Rn u) { dx[0] = -u[0] + d[0]*cos(x[2]) + u[1]*x[1]; dx[1] = d[0]*sin(x[2]) - u[1]*x[0]; dx[2] = d[1] - u[1]; } }; int main() { /* Config */ clock_t tb, te; boost::numeric::odeint::runge_kutta_cash_karp54<rocs::Rn> rk45; /** * Case I */ /* Set the state and control space */ const int xdim = 3; const int udim = 2; double xlb[] = {-3, -3, -M_PI}; double xub[] = {3, 3, M_PI}; double eta[] = {0.2, 0.2, 0.2}; double ulb[] = {-1.0, -1.0}; double uub[] = {1.0, 1.0}; double mu[] = {0.3, 0.3}; /** * Define the two-agent system */ double t = 0.3; double delta = 0.01; /* parameters for computing the flow */ int kmax = 5; double tol = 0.01; double alpha = 0.5; double beta = 2; rocs::params controlparams(kmax, tol, alpha, beta); rocs::CTCntlSys<twoagent> safety("collision-free", t, twoagent::n, twoagent::nu, delta, &controlparams); safety.init_workspace(xlb, xub); safety.init_inputset(mu, ulb, uub); safety.allocate_flows(); rocs::abstraction< rocs::CTCntlSys<twoagent> > abst(&safety); abst.init_state(eta, xlb, xub); std::cout << "# of in-domain nodes: " << abst._x._nv << '\n'; /** * Assign 1 to the target invariant set and 0 to others. * Mark 0 for any box intersect or inside the cylinder: x^2+y^2<=rmin^2, any phi. * The invariant set is the region outside of the cylinder. */ auto inv_set = [&abst, &eta](size_t i) { const double rmin = 1.21; std::vector<double> x(abst._x._dim); abst._x.id_to_val(x, i); double xl = x[0] - eta[0]/2.; double xr = x[0] + eta[0]/2.; double yl = x[1] - eta[1]/2.; double yr = x[1] + eta[1]/2.; double xsqr = (xr*xr) > (xl*xl) ? (xl*xl) : (xr*xr); double ysqr = (yr*yr) > (yl*yl) ? (yl*yl) : (yr*yr); if(xsqr + ysqr < rmin*rmin) return 0; else return 1; }; abst.assign_labels(inv_set); abst.assign_label_outofdomain(1); //out of domain is safe std::string transfile = "abstca_0.2-0.2-0.2.h5"; struct stat buffer; if(stat(transfile.c_str(), &buffer) == 0) { /* Read from a file */ std::cout << "Reading transitions...\n"; rocs::h5FileHandler transRdr(transfile, H5F_ACC_RDONLY); tb = clock(); transRdr.read_transitions(abst._ts); te = clock(); } else { std::cout << "No transition file found. Computing transitions...\n"; /* Robustness margins */ double e1[] = {0,0,0}; double e2[] = {0,0,0}; tb = clock(); abst.assign_transitions(e1, e2); te = clock(); /* Write transitions to file */ rocs::h5FileHandler transWtr(transfile, H5F_ACC_TRUNC); transWtr.write_transitions(abst._ts); } float time = (float)(te - tb)/CLOCKS_PER_SEC; std::cout << "Time of reading/computing abstraction: " << time << '\n'; std::cout << "# of all nodes: " << abst._ts._nx << '\n'; std::cout << "# of actions: " << abst._ts._nu << '\n'; std::cout << "# of transitions: " << abst._ts._ntrans << '\n'; /** * Case II */ // /* Set the state space */ // const int xdim = 3; // const int udim = 2; // const double theta = 3.5; // double xlb[] = {0, 0, -theta}; // double xub[] = {10, 10, theta}; // double eta[] = {0.2, 0.2, 0.2}; // /* Set the control values */ // double ulb[] = {-1.0, -1.0}; // double uub[] = {1.0, 1.0}; // double mu[] = {0.3, 0.3}; // /* Define the control system */ // rocs::DTCntlSys<carde> car("DBA", h, carde::n, carde::m); // car.init_workspace(xlb, xub); // car.init_inputset(mu, ulb, uub); // rocs::abstraction< rocs::DTCntlSys<carde> > abst(&car); // abst.init_state(eta, xlb, xub); // std::cout << "# of in-domain nodes: " << abst._x._nv << '\n'; // /* Assign the label of avoid area to -1 */ // rocs::UintSmall nAvoid = 4; // double obs[4][4] = { // {1.6, 5.7, 4.0, 5.0}, // {3.0, 5.0, 5.0, 8.0}, // {4.3, 5.7, 1.8, 4.0}, // {5.7, 8.5, 1.8, 2.5} // }; // auto label_avoid = [&obs, &nAvoid, &abst, &eta](size_t i) { // std::vector<double> x(abst._x._dim); // abst._x.id_to_val(x, i); // double c1= eta[0]/2.0+1e-10; // double c2= eta[1]/2.0+1e-10; // for(size_t i = 0; i < nAvoid; ++i) { // if ((obs[i][0]-c1) <= x[0] && x[0] <= (obs[i][1]+c1) && // (obs[i][2]-c2) <= x[1] && x[1] <= (obs[i][3]+c2)) // return -1; // } // return 0; // }; // abst.assign_labels(label_avoid); // abst.assign_label_outofdomain(-1); // std::vector<size_t> obstacles; // for (size_t i = 0; i < abst._x._nv; ++i) { // if (abst._labels[i] < 0) // obstacles.push_back(i); // } // /* Compute/Read abstraction */ // float tabst; // std::string transfile = "abstfull_0.2-0.2-0.2.h5"; // struct stat buffer; // if(stat(transfile.c_str(), &buffer) == 0) { // /* Read from a file */ // std::cout << "Reading transitions...\n"; // rocs::h5FileHandler transRdr(transfile, H5F_ACC_RDONLY); // tb = clock(); // transRdr.read_transitions(abst._ts); // te = clock(); // } else { // std::cout << "No transition file found. Computing transitions...\n"; // /* Robustness margins */ // double e1[] = {0,0,0}; // double e2[] = {0,0,0}; // tb = clock(); // abst.assign_transitions(e1, e2); // te = clock(); // /* Write abstraction to file */ // rocs::h5FileHandler transWtr(transfile, H5F_ACC_TRUNC); // transWtr.write_transitions(abst._ts); // } // tabst = (float)(te - tb)/CLOCKS_PER_SEC; // std::cout << "Time of reading/computing abstraction: " << tabst << '\n'; // std::cout << "# of all nodes: " << abst._ts._nx << '\n'; // std::cout << "# of actions: " << abst._ts._nu << '\n'; // std::cout << "# of transitions: " << abst._ts._ntrans << '\n'; /* Test */ size_t na = abst._ts._nu; size_t nx = abst._ts._nx; size_t si, sk, k; bool suc = 0; int np; /* Test post-pre consistency */ std::cout << "Checking post-pre consistency...\n"; for(size_t i = 0; i < nx; ++i) { for(size_t j = 0; j < na; ++j) { si = abst._ts._ptrpost[i*na+j]; for(size_t p=si; p<si+abst._ts._npost[i*na+j]; ++p) { k = abst._ts._idpost[p]; /* Test if the pre of post by j contains i */ suc = 0; sk = abst._ts._ptrpre[k*na+j]; // /********** logging **********/ // if(i == 0 && j == 16 && k == 0) { // std::cout << "The predecessors of " << k << " with " << j << ": "; // } // /********** logging **********/ for(size_t pp=sk; pp<sk+abst._ts._npre[k*na+j]; ++pp) { // /********** logging **********/ // if(i == 0 && j == 16 && k == 0) { // std::cout << "idpre["<< pp << "]=" // << abst._ts._idpre[pp] << '\n'; // } // /********** logging **********/ if(abst._ts._idpre[pp] == i) { suc = 1; break; } } if(i == 0 && j == 16 && k == 0) { std::cout << '\n'; } if(!suc) {//two cases: npre(k,j)=0 or no i in npre(k, j) std::cout << "Post and pre transitions are inconsistent " << i << "->(" << j << ")->" << k << '\n'; return -1; } } } } std::cout << "Every post transition has its corresponding pre transition.\n"; for(size_t i = 0; i < nx; ++i) { for(size_t j = 0; j < na; ++j) { si = abst._ts._ptrpre[i*na+j]; for(size_t p=si; p<si+abst._ts._npre[i*na+j]; ++p) { k = abst._ts._idpre[p]; /* Test if the post of pre by j contains i */ suc = 0; sk = abst._ts._ptrpost[k*na+j]; for(size_t pp=sk; pp<sk+abst._ts._npost[k*na+j]; ++pp) { if(abst._ts._idpost[pp] == i) { suc = 1; break; } } if(!suc) {//two cases: npost(k,j)=0 or no i in npost(k, j) std::cout << "Post and pre transitions are inconsistent " << k << "->(" << j << ")->" << i << '\n'; return -1; } } } } std::cout << "Every pre transition has its corresponding post transition.\n"; // /* Test reachable set computation */ // std::cout << "Checking post transitions by rechable set computation...\n"; // rocs::Rn x(xdim); // rocs::Rn u(udim); // rocs::Rn xpost(xdim); // rocs::ivec box(xdim); // std::vector<rocs::ivec> reachset(na, rocs::ivec(xdim)); // // std::vector<rocs::Rn> corners(std::pow(2, xdim), rocs::Rn(xdim)); // rocs::Rn corner(xdim); // int quo, rem; // rocs::ivec margin{rocs::interval(-rocs::EPSIVAL, rocs::EPSIVAL), // rocs::interval(-rocs::EPSIVAL, rocs::EPSIVAL), // rocs::interval(-rocs::EPSIVAL, rocs::EPSIVAL)}; // rocs::ivec yt(xdim); // for(size_t i = 0; i < nx; ++i) { // // std::cout << "State x= " << '(' << x[0] << ',' << x[1] << ',' << x[2] << "):\n"; // if(i < abst._x._nv) { //belongs to xgrid // /* Compute the reachable set */ // abst._x.id_to_val(x, i); //x is the center of the box i // for(int d = 0; d < xdim; ++d) // box.setval(d, rocs::interval(x[d]-eta[d]/2., x[d]+eta[d]/2.)); // car.get_reach_set(reachset, box); // /* Test valid control inputs */ // for(size_t j = 0; j < na; ++j) { // if(abst._ts._npost[i*na+j] > 0) { // car._ugrid.id_to_val(u, j); //get control values // /* Test if the reachable set covers ode solutions of all corners */ // for(int k = 0; k < std::pow(2, xdim); ++k) { // quo = k; // for(int d = 0; d < xdim; ++d) { // if(quo % 2) { // corner[d] = x[d]+eta[d]/2.; //upper bound // } else { // corner[d] = x[d]-eta[d]/2.; //lower bound // } // quo /= 2; // } // // std::cout << "Corner " // // << '(' << corner[0] << ',' << corner[1] << ',' << corner[2] << ")\n"; // boost::numeric::odeint::integrate_const(rk45, car_ode(u), corner, 0.0, h, dt); // yt = reachset[j] + margin; // if(!yt.isin(corner)) { // std::cout << "The reachable set is incorrect with u=" // << '(' << u[0] << ',' << u[1] << "):" // << '(' << corner[0] << ',' << corner[1] << ',' << corner[2] << ')' // << " is not in " << yt << '\n' // << "Test terminates.\n"; // return -1; // } // } // /* Test if all post nodes are in the reachable set (soundness) */ // si = abst._ts._ptrpost[i*na+j]; // for(size_t p = si; p<si+abst._ts._npost[i*na+j]; ++p) { // abst._x.id_to_val(xpost, abst._ts._idpost[p]); //xpost: post node center // for(int d = 0; d < xdim; ++d) //box: post interval centered at xpost // box.setval(d, rocs::interval(xpost[d]-eta[d]/2., xpost[d]+eta[d]/2.)); // if(reachset[j].isout(box)) { //box and reachset[j] should intersect // std::cout << "Post transition for xid,uid=" << i << ',' << j // << " is incorrect.\n" // << "Test terminates.\n"; // return -1; // } // } // } // }//end for control values // } else { //out-of-domain node // std::cout << "Checking the out-of-domain node...\n"; // } // } return 0; }
State Before: R : Type u_2 B : Type u_1 F : Type u_3 E : B → Type ?u.414625 inst✝⁸ : NontriviallyNormedField R inst✝⁷ : (x : B) → AddCommMonoid (E x) inst✝⁶ : (x : B) → Module R (E x) inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace R F inst✝³ : TopologicalSpace B inst✝² : TopologicalSpace (TotalSpace E) inst✝¹ : (x : B) → TopologicalSpace (E x) inst✝ : FiberBundle F E ι : Type u_4 Z : VectorBundleCore R B F ι b✝ : B a : F i j : ι b : B hb : b ∈ baseSet Z i ∩ baseSet Z j v : F ⊢ ↑(Trivialization.coordChangeL R (localTriv Z i) (localTriv Z j) b) v = ↑(coordChange Z i j b) v State After: case a R : Type u_2 B : Type u_1 F : Type u_3 E : B → Type ?u.414625 inst✝⁸ : NontriviallyNormedField R inst✝⁷ : (x : B) → AddCommMonoid (E x) inst✝⁶ : (x : B) → Module R (E x) inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace R F inst✝³ : TopologicalSpace B inst✝² : TopologicalSpace (TotalSpace E) inst✝¹ : (x : B) → TopologicalSpace (E x) inst✝ : FiberBundle F E ι : Type u_4 Z : VectorBundleCore R B F ι b✝ : B a : F i j : ι b : B hb : b ∈ baseSet Z i ∩ baseSet Z j v : F ⊢ { fst := (b, v).fst, snd := ↑(coordChange Z i (indexAt Z (b, v).fst) (b, v).fst) (b, v).snd }.fst ∈ baseSet Z i ∩ baseSet Z (indexAt Z { fst := (b, v).fst, snd := ↑(coordChange Z i (indexAt Z (b, v).fst) (b, v).fst) (b, v).snd }.fst) ∩ baseSet Z j case hb R : Type u_2 B : Type u_1 F : Type u_3 E : B → Type ?u.414625 inst✝⁸ : NontriviallyNormedField R inst✝⁷ : (x : B) → AddCommMonoid (E x) inst✝⁶ : (x : B) → Module R (E x) inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace R F inst✝³ : TopologicalSpace B inst✝² : TopologicalSpace (TotalSpace E) inst✝¹ : (x : B) → TopologicalSpace (E x) inst✝ : FiberBundle F E ι : Type u_4 Z : VectorBundleCore R B F ι b✝ : B a : F i j : ι b : B hb : b ∈ baseSet Z i ∩ baseSet Z j v : F ⊢ b ∈ (localTriv Z i).baseSet ∩ (localTriv Z j).baseSet Tactic: rw [Trivialization.coordChangeL_apply', localTriv_symm_fst, localTriv_apply, coordChange_comp] State Before: case a R : Type u_2 B : Type u_1 F : Type u_3 E : B → Type ?u.414625 inst✝⁸ : NontriviallyNormedField R inst✝⁷ : (x : B) → AddCommMonoid (E x) inst✝⁶ : (x : B) → Module R (E x) inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace R F inst✝³ : TopologicalSpace B inst✝² : TopologicalSpace (TotalSpace E) inst✝¹ : (x : B) → TopologicalSpace (E x) inst✝ : FiberBundle F E ι : Type u_4 Z : VectorBundleCore R B F ι b✝ : B a : F i j : ι b : B hb : b ∈ baseSet Z i ∩ baseSet Z j v : F ⊢ { fst := (b, v).fst, snd := ↑(coordChange Z i (indexAt Z (b, v).fst) (b, v).fst) (b, v).snd }.fst ∈ baseSet Z i ∩ baseSet Z (indexAt Z { fst := (b, v).fst, snd := ↑(coordChange Z i (indexAt Z (b, v).fst) (b, v).fst) (b, v).snd }.fst) ∩ baseSet Z j case hb R : Type u_2 B : Type u_1 F : Type u_3 E : B → Type ?u.414625 inst✝⁸ : NontriviallyNormedField R inst✝⁷ : (x : B) → AddCommMonoid (E x) inst✝⁶ : (x : B) → Module R (E x) inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace R F inst✝³ : TopologicalSpace B inst✝² : TopologicalSpace (TotalSpace E) inst✝¹ : (x : B) → TopologicalSpace (E x) inst✝ : FiberBundle F E ι : Type u_4 Z : VectorBundleCore R B F ι b✝ : B a : F i j : ι b : B hb : b ∈ baseSet Z i ∩ baseSet Z j v : F ⊢ b ∈ (localTriv Z i).baseSet ∩ (localTriv Z j).baseSet State After: no goals Tactic: exacts [⟨⟨hb.1, Z.mem_baseSet_at b⟩, hb.2⟩, hb]
/*! \file \brief A constraint. Copyright (C) 2019-2022 kaoru https://www.tetengo.org/ */ #if !defined(TETENGO_LATTICE_CONSTRAINT_HPP) #define TETENGO_LATTICE_CONSTRAINT_HPP #include <memory> #include <vector> #include <boost/core/noncopyable.hpp> namespace tetengo::lattice { class constraint_element; class node; /*! \brief A constraint. */ class constraint : private boost::noncopyable { public: // constructors and destructor /*! \brief Creates an empty constraint. It matches any path. */ constraint(); /*! \brief Creates a constraint. \param pattern A pattern. */ explicit constraint(std::vector<std::unique_ptr<constraint_element>>&& pattern); /*! \brief Destroys the constraint. */ ~constraint(); // functions /*! \brief Returns true when the path matches the pattern. \param reverse_path A path in reverse order. \retval true When the path matches the pattern. \retval false Otherwise. */ [[nodiscard]] bool matches(const std::vector<node>& reverse_path) const; /*! \brief Returns true when the tail path matches the tail of the pattern. \param reverse_tail_path A tail path in reverse order. \retval true When the tail path matches the tail of the pattern. \retval false Otherwise. */ [[nodiscard]] bool matches_tail(const std::vector<node>& reverse_tail_path) const; private: // types class impl; // variables const std::unique_ptr<impl> m_p_impl; }; } #endif