text
stringlengths 0
3.34M
|
---|
module Circuits.Idealised.Terms
import Decidable.Equality
import Data.Nat
import Data.List.Elem
import Data.List.Quantifiers
import Toolkit.Data.Whole
import Circuits.Idealised.Types
import Circuits.Split
%default total
namespace Circuits
public export
data Term : (old : List (Ty, Usage))
-> (type : Ty)
-> (new : List (Ty, Usage))
-> Type
where
Var : (prf : Elem (type, FREE) old)
-> Use old prf new
-> Term old type new
NewSignal : (flow : Direction)
-> (datatype : DType)
-> (body : Term ((TyPort (flow, datatype), FREE) :: old)
TyUnit
Nil)
-> Term old TyUnit Nil
Wire : (datatype : DType)
-> (body : Term ((TyPort (OUTPUT,datatype), FREE)::(TyPort (INPUT,datatype), FREE)::old)
TyUnit
Nil)
-> Term old TyUnit Nil
Mux : (output : Term a (TyPort (OUTPUT, datatype)) b)
-> (control : Term b (TyPort (INPUT, LOGIC)) c)
-> (inputA : Term c (TyPort (INPUT, datatype)) d)
-> (inputB : Term d (TyPort (INPUT, datatype)) e)
-> Term a TyGate e
Dup : (outA : Term a (TyPort (OUTPUT, datatype)) b)
-> (outB : Term b (TyPort (OUTPUT, datatype)) c)
-> (input : Term c (TyPort (INPUT, datatype)) d)
-> Term a TyGate d
Seq : Term a TyGate b
-> Term b TyUnit Nil
-> Term a TyUnit Nil
Not : (output : Term a (TyPort (OUTPUT, LOGIC)) b)
-> (input : Term b (TyPort (INPUT, LOGIC)) c)
-> Term a TyGate c
Gate : (kind : GateKind)
-> (output : Term a (TyPort (OUTPUT, LOGIC)) b)
-> (inputA : Term b (TyPort (INPUT, LOGIC)) c)
-> (inputB : Term c (TyPort (INPUT, LOGIC)) d)
-> Term a TyGate d
IndexSingleton : (output : Term a (TyPort (OUTPUT, datatype)) b)
-> (input : Term b (TyPort (INPUT, (BVECT (W (S Z) ItIsSucc) datatype))) c)
-> Term a TyGate c
IndexEdge : (pivot : Nat)
-> (idx : Index size pivot free)
-> (outu : Term a (TyPort (OUTPUT, datatype)) b)
-> (outf : Term b (TyPort (OUTPUT, (BVECT free datatype))) c)
-> (input : Term c (TyPort (INPUT, (BVECT size datatype))) d)
-> Term a TyGate d
IndexSplit : (pivot : Nat)
-> (idx : Index Z pivot size sizeA sizeB)
-> (usedA : Term a (TyPort (OUTPUT, datatype)) b)
-> (freeA : Term b (TyPort (OUTPUT, (BVECT sizeA datatype))) c)
-> (freeB : Term c (TyPort (OUTPUT, (BVECT sizeB datatype))) d)
-> (input : Term d (TyPort (INPUT, (BVECT size datatype))) e)
-> Term a TyGate e
Merge2L2V : (output : Term a (TyPort (OUTPUT, BVECT (W (S (S Z)) ItIsSucc) LOGIC)) b)
-> (inputA : Term b (TyPort (INPUT, LOGIC)) c)
-> (inputB : Term c (TyPort (INPUT, LOGIC)) d)
-> Term a TyGate d
Merge2V2V : (prf : Plus sizeA sizeB sizeC)
-> (output : Term a (TyPort (OUTPUT, BVECT sizeC datatype)) b)
-> (inputA : Term b (TyPort (INPUT, BVECT sizeA datatype)) c)
-> (inputB : Term c (TyPort (INPUT, BVECT sizeB datatype)) d)
-> Term a TyGate d
MergeSingleton : (output : Term a (TyPort (OUTPUT, (BVECT (W (S Z) ItIsSucc) datatype))) b)
-> (input : Term b (TyPort (INPUT, datatype)) c)
-> Term a TyGate c
Stop : All Used old -> Term old TyUnit Nil
-- [ EOF ]
|
# coding=utf-8
# Copyright (C) 2020 NumS Development Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from nums.core.array.application import ArrayApplication
from nums.core.array.blockarray import BlockArray
from nums.core.grid.grid import ArrayGrid
from nums.core.grid.grid import DeviceID
from nums.core.storage.storage import BimodalGaussian
from nums.core.systems import utils as systems_utils
import common # pylint: disable=import-error, wrong-import-order
def test_scalar_op(app_inst: ArrayApplication):
app_inst.scalar(1)
app_inst.scalar(False)
app_inst.scalar(2.0)
app_inst.scalar(np.float32(1))
app_inst.scalar(np.complex64(1))
with pytest.raises(ValueError):
app_inst.scalar(np.array(1))
def test_device_id_hashing(app_inst: ArrayApplication):
assert app_inst is not None
d1 = DeviceID(0, "node:localhost1", "cpu", 0)
d2 = DeviceID(1, "node:localhost2", "cpu", 0)
x = {}
x[d1] = "one"
x[d2] = "two"
assert x[d1] == "one"
assert x[d2] == "two"
def test_array_integrity(app_inst: ArrayApplication):
shape = 12, 21
npX = np.arange(np.product(shape)).reshape(*shape)
X = app_inst.array(npX, block_shape=(6, 7))
common.check_block_integrity(X)
def test_transpose(app_inst: ArrayApplication):
real_X, _ = BimodalGaussian.get_dataset(100, 9)
X = app_inst.array(real_X, block_shape=(100, 1))
assert np.allclose(X.T.get(), real_X.T)
# Identity.
assert np.allclose(X.T.T.get(), X.get())
assert np.allclose(X.T.T.get(), real_X)
def test_reshape(app_inst: ArrayApplication):
real_X, _ = BimodalGaussian.get_dataset(1000, 9)
X = app_inst.array(real_X, block_shape=(100, 9))
X = X.reshape((1000, 9), block_shape=(1000, 1))
assert np.allclose(X.get(), real_X)
def test_concatenate(app_inst: ArrayApplication):
axis = 1
real_X, _ = BimodalGaussian.get_dataset(1000, 9)
real_ones = np.ones(shape=(1000, 1))
X = app_inst.array(real_X, block_shape=(100, 9))
ones = app_inst.ones((1000, 1), (100, 1), dtype=X.dtype)
X_concated = app_inst.concatenate(
[X, ones], axis=axis, axis_block_size=X.block_shape[axis]
)
real_X_concated = np.concatenate([real_X, real_ones], axis=axis)
assert np.allclose(X_concated.get(), real_X_concated)
real_X2 = np.random.random_sample(1000 * 17).reshape(1000, 17)
X2 = app_inst.array(real_X2, block_shape=(X.block_shape[0], 3))
X_concated = app_inst.concatenate(
[X, ones, X2], axis=axis, axis_block_size=X.block_shape[axis]
)
real_X_concated = np.concatenate([real_X, real_ones, real_X2], axis=axis)
assert np.allclose(X_concated.get(), real_X_concated)
def test_split(app_inst: ArrayApplication):
# TODO (hme): Implement a split leveraging block_shape param in reshape op.
x = app_inst.array(np.array([1.0, 2.0, 3.0, 4.0]), block_shape=(4,))
syskwargs = {
"grid_entry": x.blocks[0].grid_entry,
"grid_shape": x.blocks[0].grid_shape,
"options": {"num_returns": 2},
}
res1, res2 = x.cm.split(
x.blocks[0].oid, 2, axis=0, transposed=False, syskwargs=syskwargs
)
ba = BlockArray(ArrayGrid((4,), (2,), x.dtype.__name__), x.cm)
ba.blocks[0].oid = res1
ba.blocks[1].oid = res2
assert np.allclose([1.0, 2.0, 3.0, 4.0], ba.get())
def test_touch(app_inst: ArrayApplication):
ones = app_inst.ones((123, 456), (12, 34))
assert ones.touch() is ones
def test_num_cores(app_inst: ArrayApplication):
assert np.allclose(app_inst.cm.num_cores_total(), systems_utils.get_num_cores())
def ideal_tall_skinny_shapes(size, dtype):
assert dtype in (np.float32, np.float64)
denom = 2 if dtype is np.float64 else 1
num_cols = 2 ** 8
if size == "1024GB":
# Approximately 1 TB, 1024 blocks, 1 GB / block.
num_rows = 2 ** 30 // denom
grid_shape = (2 ** 10, 1)
elif size == "512GB":
# 512GB, 512 blocks, 1 GB / block.
# Perfect fit on 8 nodes.
num_rows = 2 ** 29 // denom
grid_shape = (2 ** 9, 1)
elif size == "256GB":
# 256GB, 256 blocks, 1 GB / block.
# Perfect fit on 4 nodes.
num_rows = 2 ** 28 // denom
grid_shape = (2 ** 8, 1)
elif size == "128GB":
# 128GB, 128 blocks, 1 GB / block.
# Perfect fit on 2 nodes.
num_rows = 2 ** 27 // denom
grid_shape = (2 ** 7, 1)
elif size == "64GB":
# Approximately 64GB, 64 blocks, 1 GB / block.
# Perfect fit on 1 nodes.
num_rows = 2 ** 26 // denom
grid_shape = (2 ** 6, 1)
elif size == "32GB":
num_rows = 2 ** 25 // denom
grid_shape = (2 ** 6, 1)
elif size == "16GB":
num_rows = 2 ** 24 // denom
grid_shape = (2 ** 6, 1)
elif size == "8GB":
num_rows = 2 ** 23 // denom
grid_shape = (2 ** 6, 1)
elif size == "4GB":
num_rows = 2 ** 22 // denom
grid_shape = (2 ** 6, 1)
elif size == "2GB":
num_rows = 2 ** 21 // denom
grid_shape = (2 ** 6, 1)
elif size == "1GB":
# Approximately 1GB, 64 blocks, 16 MB / block.
num_rows = 2 ** 20 // denom
grid_shape = (2 ** 6, 1)
else:
raise Exception()
shape = (num_rows, num_cols)
block_shape = (num_rows // grid_shape[0], num_cols // grid_shape[1])
return shape, block_shape, grid_shape
def ideal_square_shapes(size, dtype):
assert dtype in (np.float32, np.float64)
denom = 2 if dtype is np.float64 else 1
# Assume 4 bytes, and start with a 1GB square array.
shape = np.array([2 ** 14, 2 ** 14], dtype=int)
if size == "4GB":
shape *= 1 // denom
grid_shape = (8, 8)
elif size == "16GB":
shape *= 4 // denom
grid_shape = (8, 8)
elif size == "64GB":
shape *= 8 // denom
grid_shape = (8, 8)
elif size == "256GB":
shape *= 16 // denom
grid_shape = (16, 16)
elif size == "1024GB":
shape *= 32 // denom
grid_shape = (32, 32)
else:
raise Exception()
block_shape = tuple(shape // grid_shape)
shape = tuple(shape)
return shape, block_shape, grid_shape
def test_compute_block_shape(app_inst: ArrayApplication):
dtype = np.float32
cores_per_node = 64
# Tall-skinny.
for size in [64, 128, 256, 512, 1024]:
size_str = "%sGB" % size
num_nodes = size // 64
cluster_shape = (16, 1)
shape, expected_block_shape, expected_grid_shape = ideal_tall_skinny_shapes(
size_str, dtype
)
block_shape = app_inst.cm.compute_block_shape(
shape, dtype, cluster_shape, num_nodes * cores_per_node
)
grid: ArrayGrid = ArrayGrid(shape, block_shape, dtype.__name__)
print(
"tall-skinny",
"cluster_shape=%s" % str(cluster_shape),
"grid_shape=%s" % str(expected_grid_shape),
"size=%s" % size_str,
"bytes computed=%s" % (grid.nbytes() / 10 ** 9),
)
assert expected_grid_shape == grid.grid_shape
assert expected_block_shape == block_shape
# Square.
for size in [4, 16, 64, 256, 1024]:
size_str = "%sGB" % size
num_nodes = 1 if size < 64 else size // 64
cluster_shape = int(np.sqrt(num_nodes)), int(np.sqrt(num_nodes))
shape, expected_block_shape, expected_grid_shape = ideal_square_shapes(
size_str, dtype
)
block_shape = app_inst.cm.compute_block_shape(
shape, dtype, cluster_shape, num_nodes * cores_per_node
)
grid: ArrayGrid = ArrayGrid(shape, block_shape, dtype.__name__)
print(
"square",
"cluster_shape=%s" % str(cluster_shape),
"grid_shape=%s" % str(expected_grid_shape),
"size=%s" % size_str,
"bytes computed=%s" % (grid.nbytes() / 10 ** 9),
)
assert expected_grid_shape == grid.grid_shape, "%s != %s" % (
expected_grid_shape,
grid.grid_shape,
)
assert expected_block_shape == block_shape, "%s != %s" % (
expected_block_shape,
block_shape,
)
if __name__ == "__main__":
# pylint: disable=import-error, no-member
import conftest
app_inst = conftest.get_app("serial")
test_scalar_op(app_inst)
test_array_integrity(app_inst)
test_concatenate(app_inst)
test_touch(app_inst)
test_split(app_inst)
test_compute_block_shape(app_inst)
|
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.DStructures.Structures.Type where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.HLevels
open import Cubical.Functions.FunExtEquiv
open import Cubical.Foundations.Univalence
open import Cubical.Data.Sigma
open import Cubical.Data.Unit
open import Cubical.Relation.Binary
open import Cubical.DStructures.Base
open import Cubical.DStructures.Meta.Properties
private
variable
ℓ ℓ' ℓ'' ℓ₁ ℓ₁' ℓ₁'' ℓ₂ ℓA ℓ≅A ℓB ℓ≅B ℓ≅ᴰ ℓP : Level
-- a type is a URGStr with the relation given by its identity type
𝒮-type : (A : Type ℓ) → URGStr A ℓ
𝒮-type A = make-𝒮 {_≅_ = _≡_} (λ _ → refl) isContrSingl
𝒮ᴰ-type : {A : Type ℓA} (B : A → Type ℓB)
→ URGStrᴰ (𝒮-type A) B ℓB
𝒮ᴰ-type {A = A} B = make-𝒮ᴰ (λ b p b' → PathP (λ i → B (p i)) b b')
(λ _ → refl)
λ _ b → isContrSingl b
-- subtypes are displayed structures
𝒮ᴰ-subtype : {A : Type ℓ} (P : A → hProp ℓ')
→ URGStrᴰ (𝒮-type A)
(λ a → P a .fst)
ℓ-zero
𝒮ᴰ-subtype P
= make-𝒮ᴰ (λ _ _ _ → Unit)
(λ _ → tt)
λ a p → isContrRespectEquiv (invEquiv (Σ-contractSnd (λ _ → isContrUnit)))
(inhProp→isContr p (P a .snd))
-- a subtype induces a URG structure on itself
Subtype→Sub-𝒮ᴰ : {A : Type ℓA} (P : A → hProp ℓP)
(StrA : URGStr A ℓ≅A)
→ URGStrᴰ StrA (λ a → P a .fst) ℓ-zero
Subtype→Sub-𝒮ᴰ P StrA =
make-𝒮ᴰ (λ _ _ _ → Unit)
(λ _ → tt)
(λ a p → isContrRespectEquiv (invEquiv (Σ-contractSnd (λ _ → isContrUnit)))
(inhProp→isContr p (P a .snd)))
-- uniqueness of small URG structures
private
module _ {A : Type ℓA} (𝒮 : URGStr A ℓA) where
open URGStr
𝒮' = 𝒮-type A
≅-≡ : _≅_ 𝒮' ≡ _≅_ 𝒮
≅-≡ = funExt₂ (λ a a' → ua (isUnivalent→isUnivalent' (_≅_ 𝒮) (ρ 𝒮) (uni 𝒮) a a'))
ρ-≡ : PathP (λ i → isRefl (≅-≡ i)) (ρ 𝒮') (ρ 𝒮)
ρ-≡ = funExt (λ a → toPathP (p a))
where
p : (a : A) → transport (λ i → ≅-≡ i a a) refl ≡ (ρ 𝒮 a)
p a = uaβ (isUnivalent→isUnivalent' (_≅_ 𝒮) (ρ 𝒮) (uni 𝒮) a a) refl ∙ transportRefl (ρ 𝒮 a)
u : (a : A) → (transport (λ i → ≅-≡ i a a) refl) ≡ (subst (λ a' → (_≅_ 𝒮) a a') refl (ρ 𝒮 a))
u a = uaβ (isUnivalent→isUnivalent' (_≅_ 𝒮) (ρ 𝒮) (uni 𝒮) a a) refl
uni-≡ : PathP (λ i → isUnivalent (≅-≡ i) (ρ-≡ i)) (uni 𝒮') (uni 𝒮)
uni-≡ = isProp→PathP (λ i → isPropΠ2 (λ a a' → isPropIsEquiv (≡→R (≅-≡ i) (ρ-≡ i)))) (uni 𝒮') (uni 𝒮)
𝒮-uniqueness : (A : Type ℓA) → isContr (URGStr A ℓA)
𝒮-uniqueness A .fst = 𝒮-type A
𝒮-uniqueness A .snd 𝒮 = sym (η-URGStr (𝒮-type A)) ∙∙ (λ i → p i) ∙∙ η-URGStr 𝒮
where
p = λ (i : I) → urgstr (≅-≡ 𝒮 i) (ρ-≡ 𝒮 i) (uni-≡ 𝒮 i)
|
Every sport brings its own requirements for eyewear, including winter sports. Simon Briggs from Extreme-Eyewear, a specialist sports eyewear company, explains what the main issues are when choosing the right eyewear for your sport.
“Sports eyewear requirements can be broken down into two main areas, eyewear fit and eyewear lenses.
“A sports person's eyewear needs to fit securely and comfortably, ensuring optimal performance in any situation. Each sport has its own requirements for sport sunglasses/eyewear. Many sports are played all year round so its important to have a sunglass lens in your sport sunglasses that will cope with a wide variety of sunlight conditions (from low light to heavy sunshine) or to have interchangeable lenses.
“Some sports are played on reflective surfaces such as water or snow where it is beneficial to have polarised lenses in your sport sunglasses to reduce the sun glare.
"The most popular correction comes in the form of an optical insert which is fitted with your prescription. The insert clips onto the back of the sport sunglasses enabling you to still interchange the ‘main sunglass’ lenses for different colours. As the insert can be removed this concept is also great for contact lens wearers."
"Cosmetically you can often tell there is an insert behind the sunglasses which makes them less than perfect for fashion prescription sunglasses but their functionality makes them idea for sport."
"The second form of correction is 'direct glazed' lenses, where your prescription is fitted into the eyewear frame. Two brands that excel are Oakley and Maui Jim, both manufacturing wrap-around lenses tailored to your prescription.
"Getting your ski goggles right is essential, when skiing or snowboarding a goggle has to be able to deliver good peripheral view, air circulation (to combat fogging), comfort and due to the popularity of helmets now be ‘helmet compatible’.
"There are a number of different sunglass and goggle hybrids available. These clever pieces of kit mean that you can buy your sunglasses and goggles at once. You simply buy a pair of sunglasses with sides that can be clicked off of the frame and replaced with a headband for a more secure fit.
"Some of these models include a padding system, which gives a more comfortable fit when worn as a goggle. The padding simply clicks into place on the back of the sunglass, and turns the sunglass into a full goggle."
"The prescription part of the correction is in the form of an optical insert that sits inside the skiing goggle, behind the main lens. The optical insert has no contact with your face making a perfect fit and allowing the goggle to fit as intended by the manufacturer.
Are There Alternative Corrections Available?"An alternative to optical inserts is to purchase 'Over the glass' ski goggles. These goggles are specifically designed to sit over the top of prescription glasses, having increased ventilation and a wide fit enabling your glasses to comfortably sit underneath."
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- Some derivable properties
------------------------------------------------------------------------
{-# OPTIONS --without-K --safe #-}
open import Algebra
module Algebra.Properties.Ring {r₁ r₂} (R : Ring r₁ r₂) where
import Algebra.Properties.AbelianGroup as AGP
open import Function
import Relation.Binary.Reasoning.Setoid as EqR
open Ring R
open EqR setoid
open AGP +-abelianGroup public
renaming ( ⁻¹-involutive to -‿involutive
; left-identity-unique to +-left-identity-unique
; right-identity-unique to +-right-identity-unique
; identity-unique to +-identity-unique
; left-inverse-unique to +-left-inverse-unique
; right-inverse-unique to +-right-inverse-unique
; ⁻¹-∙-comm to -‿+-comm
)
-‿*-distribˡ : ∀ x y → - x * y ≈ - (x * y)
-‿*-distribˡ x y = begin
- x * y ≈⟨ sym $ +-identityʳ _ ⟩
- x * y + 0# ≈⟨ +-congˡ $ sym (-‿inverseʳ _) ⟩
- x * y + (x * y + - (x * y)) ≈⟨ sym $ +-assoc _ _ _ ⟩
- x * y + x * y + - (x * y) ≈⟨ +-congʳ $ sym (distribʳ _ _ _) ⟩
(- x + x) * y + - (x * y) ≈⟨ +-congʳ $ *-congʳ $ -‿inverseˡ _ ⟩
0# * y + - (x * y) ≈⟨ +-congʳ $ zeroˡ _ ⟩
0# + - (x * y) ≈⟨ +-identityˡ _ ⟩
- (x * y) ∎
-‿*-distribʳ : ∀ x y → x * - y ≈ - (x * y)
-‿*-distribʳ x y = begin
x * - y ≈⟨ sym $ +-identityˡ _ ⟩
0# + x * - y ≈⟨ +-congʳ $ sym (-‿inverseˡ _) ⟩
- (x * y) + x * y + x * - y ≈⟨ +-assoc _ _ _ ⟩
- (x * y) + (x * y + x * - y) ≈⟨ +-congˡ $ sym (distribˡ _ _ _) ⟩
- (x * y) + x * (y + - y) ≈⟨ +-congˡ $ *-congˡ $ -‿inverseʳ _ ⟩
- (x * y) + x * 0# ≈⟨ +-congˡ $ zeroʳ _ ⟩
- (x * y) + 0# ≈⟨ +-identityʳ _ ⟩
- (x * y) ∎
|
C Copyright(C) 1999-2020 National Technology & Engineering Solutions
C of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
C NTESS, the U.S. Government retains certain rights in this software.
C
C See packages/seacas/LICENSE for details
C=======================================================================
SUBROUTINE ORDSTR (NORD, IXORD, LOLD, NAME, ISCR)
C=======================================================================
C --*** ORDSTR *** (GJOIN) Order a list of strings according to indices
C -- Written by Greg Sjaardema - revised 07/11/90
C -- Modified from ORDIX Written by Amy Gilkey
C --
C --ORDSTR orders a list of strings according to a list of indices.
C --
C --Parameters:
C -- NORD - IN - the number of indices
C -- IXORD - IN - the indices of the ordered items
C -- LOLD - IN - the length of NAME
C -- NAME - IN - the unordered string list
C -- ISCR - SCRATCH - size = LOLD
C -- INEW - OUT - the ordered string list
INTEGER IXORD(*)
CHARACTER*(*) NAME(*)
CHARACTER*(*) ISCR(*)
DO 100 I = 1, LOLD
ISCR(I) = NAME(I)
100 CONTINUE
DO 110 I = 1, NORD
NAME(I) = ISCR(IXORD(I))
110 CONTINUE
RETURN
END
|
\documentclass[conference]{IEEEtran}
\IEEEoverridecommandlockouts
% The preceding line is only needed to identify funding in the first footnote. If that is unneeded, please comment it out.
\usepackage{cite}
\usepackage{amsmath,amssymb,amsfonts}
\usepackage{algorithmic}
\usepackage{graphicx}
\usepackage{textcomp}
\usepackage{xcolor}
\def\BibTeX{{\rm B\kern-.05em{\sc i\kern-.025em b}\kern-.08em
T\kern-.1667em\lower.7ex\hbox{E}\kern-.125emX}}
\begin{document}
\title{Predicting New York City Crime Using Weather\\}
\author{\IEEEauthorblockN{Vaibhav A Gadodia}
\IEEEauthorblockA{\textit{Courant Institute of Mathematical Sciences} \\
\textit{New York University}\\
New York, New York \\
[email protected]}
\and
\IEEEauthorblockN{Jinze Qi}
\IEEEauthorblockA{\textit{Courant Institute of Mathematical Sciences} \\
\textit{New York University}\\
New York, New York \\
[email protected]}
}
\maketitle
\begin{abstract}
While there has been extensive work on the effect of individual weather conditions such as temperature, precipitation, etc. on the occurrence of crime and on the correlation between climate change and criminal activity, only a little research is available that attempts to forecast criminal activity using a more holistic set of weather conditions.
In this work, we aim to correlate weather combinations of temperature, relative humidity, and the presence of rain, snow, and fog with crime and predict the percentile ranks of such weather combinations and their crime risk in New York City.
\end{abstract}
\begin{IEEEkeywords}
Analytic, Application, Big Data, Crime, Spark, Weather
\end{IEEEkeywords}
\section{Introduction}
\subsection{Overview}
Crime is an unfortunate part of urban life and often millions are spent in efforts to reduce criminal activity and to safeguard citizens.
In New York City, there is a significant amount of criminal activity and the New York Police Department's attempts to protect the city residents is a big part of why the city continues to be a relatively safe place to reside despite all the crime.
However, we believe that NYPD's efforts can be enhanced by applying predictive analytics on big data and leveraging the predictive power of weather on crime.
A small body of research has shown that external factors such as temperature, precipitation, etc. influence criminal behavior significantly.
However, most of such past work has either been heavily focused on understanding the effect of individual factors on crime or has been about the correlation between weather and crime on a monthly or yearly basis.
With this work, we attempt to both explore the effect of weather as a whole on criminal behavior and study this effect on a more granular time scale of hours and minutes.
\subsection{Approach}
We explore the weather crime correlation and attempt to forecast criminal activity using weather conditions by studying local climatological and NYPD complaints data spanning a period of 12 years from the beginning of 2006 to the end of 2017.
We preprocess the data to discard irrelevant feature columns and then we combine the two cleaned datasets on the basis of date and time.
This ensures that for every complaint filed with the NYPD, we have the related weather recordings.
We then use this combined data to compute the frequency of criminal activity for different weather conditions and use that in a linear regression - with polynomial terms for modelling quadratic relationships.
We compute and report the R$^{2}$ score for each regression and use the models for crime predictions.
Our work also involves the development of an easy-to-use web application that we have built specifically for the NYPD and other NYC law enforcement agencies.
This application can be queried by the officers to obtain the expected criminal activity based on a few weather based inputs.
\subsection{Paper Organization}
This paper is organized as follows: in Section II, we describe the motivation behind this work.
In Section III, we discuss related works.
In Section IV, we briefly describe the data used here.
In Section V, we describe our analytic, discussing preprocessing, combining data, regression analysis, and prediction.
In Section VI, we provide an overview of our application design.
In Section VII, we discuss potential actuation or remediation based on the insights gained through this analysis.
In Section VIII, we provide details on the analysis, including regression, predictions, as well as challenges and limitations of the application.
\section{Motivation}
We wanted to give the NYPD and other law enforcement agencies operating in NYC an easy to use tool for exploring the likelihood of criminal activity in the city in general and of assault, burglary and rape, in particular on any given time of day.
Our application will assist the law enforcement officers by classifying the time of day, using weather conditions, in percentiles of expected criminal activity.
While the application can be used by the city residents to inquire and act upon their personal safety, it has been designed with the law enforcement agencies in mind who can use the application’s output to inform their officers’ beats and take necessary action to protect citizens.
\section{Related Work}
In recent work, Alamo et. al. \cite{b1} have studied the relationship between crime in Orlando, Florida to Orlando's weather and Twitter presence.
They collected crime data as reported by the Orlando Police Department daily, which gave dates, crime categories, location, etc.
They also collected weather data from the National Oceanic and Atmospheric Administration and used a Twitter developer account to collect tweets pertaining to crime in the Orlando area.
They filtered out the tweets using specific keywords such as "crime", "larceny", etc. to isolate the tweets that have any reference to crime and used similarity measures and regression analysis to explore the relationship between crime and tweets.
Using chi-squared tests, they further concluded that high crime rates are associated with average daily temperatures over 60$^{\circ}$F and that precipitation discouraged crime.
Recently, Mullins et. al. \cite{b2} have explored the relation between ambient temperature and mental health and found that higher temperatures increase emergency department visits for mental illness and self-reported days of poor mental health.
They specifically concluded that cold temperatures reduce negative mental health outcomes while hot temperatures increase them.
Interestingly, they also conclude that there is no evidence of any adaptation and that this temperature relationship remains stable across time, base climate, air conditioning penetration rates, and other factors.
Given that mental health is a strong influence in criminal activity, these results are very significant and indicate a relation between weather and crime.
In a related work, Ranson et. al. \cite{b3} study the impact of climate change on the prevalence of criminal activity in the United States.
They use a 30-year panel of monthly crime and weather data for 2997 US counties and identify the effect of weather on monthly crime by using a semi-parametric bin estimator and controlling for state-by-month and county-by-year fixed effects.
Their results show that increasing temperature has a strong positive effect on criminal activity and they predict that between 2010 and 2099, climate change will cause more than a million additional crimes.
These works are very interesting and suggest significant external effect on criminal behavior.
However, our work attempts to understand the effect of weather conditions as a composite instead of studying temperature, precipitation, etc. as an isolated feature and we aim to study these effects on a much more granular level of hourly observations.
\section{Datasets}
Our weather data was a part of National Center for Environmental Information's Local Climatological Data obtained at their Central Park station.
The data, which came in at about 80 MB, was downloaded once from the following link:
\underline{https://www.ncdc.noaa.gov/cdo-web/datatools/lcd}
It included entries from January 1, 2006 to December 31, 2017 and consisted of hourly recordings of dry bulb temperature, relative humidity, weather type, etc.
After cleaning the data schema was as follows:
\begin{itemize}
\item Year: Int - The year when the weather measurement was recorded.
\item Month: Int - The month when the weather measurement was recorded.
\item Day: Int - The day when the weather measurement was recorded.
\item Minute: Int - The minute when the weather measurement was recorded.
\item Temperature: Int - The dry bulb temperature recorded.
\item Rain: Int - 0/1 entry indicating absence/presence of rain.
\item Snow: Int - 0/1 entry indicating absence/presence of snow.
\item Fog: Int - 0/1 entry indicating absence/presence of fog.
\item Humidity: Int - The relative humidity recorded.
\end{itemize}
The second data that we used was the historical complaint data made available by the NYPD.
This data, which came in at about 2 GB was collected once from the following link:
\underline{https://data.cityofnewyork.us/Public-Safety/NYPD- Complaint}
\underline{-Data-Historic/qgea-i56i}
It had entries from January 1, 2006 to December 31, 2017 and tracked all complaints filed with the NYPD over this 12 year period and consisted of crime location, latitude, longitude, crime type, etc.
After cleaning the data schema here was as follows:
\begin{itemize}
\item Year: Int - The year of the occurrence of the reported crime.
\item Month: Int - The month of the occurrence of the reported crime.
\item Day: Int - The day of the occurrence of the reported crime.
\item Minute: Int - The minute of the occurrence of the reported crime.
\item Crime Type: Int - The type of crime reported. Each unique type was indicated by a numeric code, for instance, assault was 9, burglary had 3 as its numeric code, rape was number 45, etc.
\end{itemize}
\section{Description of Analytic}
\begin{figure*}[htbp]
\centerline{\scalebox{0.5}{\includegraphics{design_diagram.png}}}
\caption{Analytic Design}
\label{design}
\end{figure*}
\subsection{Preprocessing}
We preprocessed the weather data using an Apache Spark pipeline.
We filtered out all feature columns that were either deemed to be non-essential based on domain knowledge or those that did not have any recorded readings for multiple years in the period spanned by our data.
As a consequence of this filtering, our data was left with four feature columns that were further processed and are described below.
\begin{itemize}
\item DATE: The values here indicated the date and time when the weather measurements were recorded. The values included the data - in YYYY-MM-DD format - concatenated with time - in HH:MM format - with a "T".
We further processed this column and split it into four different features indicating year, month, day and time (in minutes based on a 24-hour clock starting at 0 minutes at 00:00 hours).
We rounded off the minutes to the nearest 60.
\item HourlyDryBulbTemperature: This feature column had the dry bulb temperature readings recorded by the automated sensors.
The values were in whole degrees Fahrenheit and ranged from low negatives to over a $100^{\circ}$F.
\item HourlyPresentWeatherType: The values here indicated observed weather types using AU, AW and MW codes.
For instance, "RA" indicated rain, "FG" indicated fog, etc. We processed this column and split it into three separate categorical columns for rain, snow and fog, with binary 0/1 values indicating the absence/presence of the relevant weather type.
\item HourlyRelativeHumidity: This was the observed relative humidity given to the closest whole percentage.
\end{itemize}
After filtering out irrelevant feature columns, the weather data was further processed and the observations with irregular data entries were discarded.
This included entries where either the temperature or relative humidity readings were left out or those that had irrelevant wildcard characters in their recordings such as "*", "s", etc.
For the crime data, we used a similar Apache Spark pipeline for preprocessing.
We began by filtering out non-essential feature columns, which left us with three that were further processed and are described below.
\begin{itemize}
\item CMPLNT\_FR\_DT: The values here indicated the date of the occurrence of the reported crime.
The values were in MM/DD/YYYY format and we split these into three separate columns for year, month and day.
\item CMPLNT\_FR\_TM: This column had the time of the occurrence of the reportec crime in HH:MM:SS format, which we converted into minutes like in the weather data.
\item OFNS\_DESC: The values here indicated the type of crime reported.
These were names of different crime types, which we converted into integers by assigning each one of the 71 types a numerical code from 0 to 70.
\end{itemize}
After the aforementioned processing, we removed all rows with irregular or missing entries.
\subsection{Combining Data}
In order to relate the crime data with the weather conditions when the complaint was filed, we generated two SparkSQL DataFrames for the preprocessed weather and crime datasets.
These were then combined using a "left-inner" join according to the two dataframes' year, month, day and minutes columns.
This generated a combined dataset for analysis and ensured that every crime reported was associated with the relevant weather conditions.
\subsection{Regression}
We used the newly combined dataset to compute the total complaints filed for each weather condition - which was a combination of the temperature, relative humidity, rain, snow and fog - and the complaints filed for each weather condition that were specifically related to assault, burglary and rape.
This way we ended up with four input datasets that were used in a linear regression for analysis of the impact of weather conditions on criminal activity and to predict future crime rate.
For our regression analysis, we used Apache Spark's MLlib's LinearRegression model.
For all four regression models, the weather conditions were treated as the independent variables and included temperature and relative humidity as continuous variables and rain, snow and fog as categorical dummy variables.
Additionally, two polynomial terms comprising of the squared temperature and squared relative humidity were also included as independent variables.
These additional polynomial terms ensured the regressions' ability to model non-linear quadratic relationship between weather and crime that was observed in preliminary exploration and visualization of the data.
For our models, the dependent variables were one of the number of overall crimes, number of assaults, number of burglaries, or the number of rapes.
Therefore, the models took the following form.
\begin{multline}\label{eq}
\#crimes = \theta_{0}temp + \theta_{1}temp^{2} + \theta_{2}rain + \theta_{3}snow + \\
\theta_{4}fog + \theta_{5}humidity + \theta_{6}humidity^{2}
\end{multline}
\subsection{Prediction}
Given these four regression models, we are able to predict the number of expected crimes for every possible weather condition and then convert those values into percentiles to determine criminal risk.
\section{Application Design}
\begin{figure*}[htbp]
\centerline{\scalebox{0.25}{\includegraphics{app_in.png}}}
\caption{Application - Input}
\label{design}
\end{figure*}
\begin{figure*}[htbp]
\centerline{\scalebox{0.25}{\includegraphics{app_out.png}}}
\caption{Application - Output}
\label{design}
\end{figure*}
The workflow for the backend of this project has been illustrated in Fig. 1.
We began the work by gathering, analyzing and cleaning the data, which was followed by the generation of a combined dataset and performing regression analysis on it.
Finally, we used the regression models to predict crime rates.
The primary goal of this project was to enable the NYPD in keeping the city safer.
As a consequence, we wanted to provide them with an easy-to-use application powered by a strong analytic.
Thus, in addition to the backend, we also created a simple Flask based web application.
The NYPD can query this application to forecast criminal risk by entering the observed weather conditions as shown in Fig. 2 and the application provided them with a straightforward forecast as shown in Fig. 3.
\section{Actuation or Remediation}
When a law enforcement official queries our application inputting the observed weather conditions, our application returns a percentile ranking of the conditions' criminal risk.
The officers can use this ranking to inform their department's beats and to increase/decrease police presence in high risk neighborhoods.
Furthermore, on a longer time scale, this application's insights can be used by the law enforcement agencies to inform their budgeting and recruiting efforts.
\section{Analysis}
\begin{table*}[htbp]
\caption{Regression R$^{2}$ Scores}
\begin{center}
\begin{tabular}{|c|c|c|c|c|}
\hline
& \textbf{Overall} & \textbf{Assault} & \textbf{Burglary} & \textbf{Rape} \\
\hline
\textbf{R$^{2}$} & 0.4037 & 0.4185 & 0.3717 & 0.2796 \\
\hline
\end{tabular}
\label{r2}
\end{center}
\end{table*}
\subsection{Regression}
Given the combined weather and crime dataframe, we computed the total number of crimes for each observed weather combination of temperature, relative humidity, rain, snow, and fog that was available in the data.
We repeated this step for the other three regression models as well where we calculated the total number of assaults, burglaries, and rapes for each observed weather combination present in the data.
After including the additional polynomial terms in our regression models, we performed linear regression analysis on all four dataframes.
The trained models had coefficients of determination - R$^{2}$ scores - that are shown in TABLE~\ref{r2}.
While the R$^{2}$ scores might appear to be low, one must take into account the affect of non-weather related factors on human activity, in general and on crime, in particular.
These scores suggest a reasonable correlation between weather conditions as a whole and criminal activity.
Furthermore, our analysis suggests that among assault, burglary, and rape, assault has the highest correlation with weather and it is even higher than the overall crime rate's correlation with weather conditions.
On the other hand, both burglary and rape have a lower correlation with weather, with rape being the least strongly affected by it.
\subsection{Prediction}
We used our trained models to predict criminal activity for all possible weather combinations.
In order to do so in a reasonable manner, we rounded off the temperature and relative humidity values to the nearest ten, thus limiting the total possible weather combinations to 880.
This does not significantly alter the model predictions as there is no significant change within a few degrees of temperature or percentages of relative humidity.
\subsection{Challenges and Limitations}
Our regression models provide a reliable framework for forecasting crimes using weather conditions.
However, due to the nature of regression analysis, there were challenges in how the predictions were to be interpreted and presented to the user.
\begin{itemize}
\item Due to the nature of regression analysis, some predictions tend to be negative.
However, this is not an indicator of a complete lack of criminal activity.
Rather, this is an artifact of regression based predictions and therefore, was treated as such because we converted the predictions into percentiles.
\item Another closely related limitation of the application was that the regression prediction shows the expected number of crimes over a 12 year period.
This is an artifact of the data used for analysis as it spanned 12 years from January 1, 2006 to December 31, 2017.
\end{itemize}
For the aforementioned reasons, our application does not present the user with the raw regression prediction.
Instead, we present the user with a percentile rating of the input weather conditions' criminal risk based on the regression predictions of all possible weather conditions.
This comes with its own set of challenges as not all combinations of temperature, humidity, rain, snow, and fog are physically possible.
However, this is a reasonable approximation that helps prevent misinterpretation of the application output.
The crime data used for regression analysis was organized by the date and time of the crime that was reported in the complaints filed with the NYPD.
This presents significant challenges as there is no way of informing our analysis whether the time of crime as reported in the complaints were accurate or not.
This affects our models as this raises the possibility of there being entries in our data where the correct weather conditions are not matched to the crime and therefore, this ultimately affects our predictions.
\section{Conclusion}
Crime is an unfortunate reality of urban life and NYC has seen more than its fair share of criminal activity over the years.
A lot of work has been done by the law enforcement agencies and city government to protect vulnerable citizens.
Our results show that these efforts could be further enhanced by using the predictive power of weather conditions on NYC crime.
We found that temperature, relative humidity, rain, snow, and fog have a significant influence on crime.
Our regression models further isolated assault, burglary, and rape and we found that the aforementioned weather conditions significantly affected these crimes, albeit to varying degrees.
In addition to data analysis, we also developed an easy-to-use web application that can be employed by the law enforcement arm of the government to inform their work.
\section{Future Work}
\textit{1) Taking correctness of complaints into account:} While our models are reasonably functional in their current form , they do not take into account the possibility of there being false or incorrect complaints that could cause discrepancies in important information such as the date and time of actual crimes and when the complaints indicated they happened.
Since, our analysis is informed by the weather conditions at the moment of crimes, there is a possibility that our predictions could be further improved if such discrepancies were taken into account.
\textit{2) More complex modelling techniques:} Given the structure of our data, linear regression with polynomial terms was the best way of modelling relationship between weather and crime.
However, it would be interesting to further explore this relationship using advanced modern techniques such as Deep Learning, random forest analysis, etc.
\textit{3) Predicting type of crime with highest probability:} We currently predict the number of crimes that are expected for a given weather combination over a span of 12 years.
However, it would be very interesting and useful to develop a model to predict the types of crimes with the highest probabilities of occurring in particular weather conditions.
\begin{thebibliography}{00}
\bibitem{b1}J. Alamo, C. Fortes, N. Occhiogrosso, and C.-Y. Huang, “Mining the Relationship between Crimes, Weather and Tweets,” Proceedings of the 2019 the International Conference on Pattern Recognition and Artificial Intelligence - PRAI 19, 2019.
\bibitem{b2}J. T. Mullins and C. White, “Temperature and mental health: Evidence from the spectrum of mental health outcomes,” Journal of Health Economics, vol. 68, Dec. 2019.
\bibitem{b3}M. Ranson, “Crime, weather, and climate change,” Journal of Environmental Economics and Management, vol. 67, no. 3, pp. 2734–302, May 2014.
\end{thebibliography}
\end{document}
|
If $S$ is a retract of a path-connected space $T$, then $S$ is path-connected.
|
[STATEMENT]
lemma bangSC:
fixes P
shows "!P \<approx> P \<parallel> !P"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. !P \<approx> P \<parallel> !P
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. !P \<approx> P \<parallel> !P
[PROOF STEP]
have "!P \<sim>\<^sub>e P \<parallel> !P"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. !P \<sim>\<^sub>e P \<parallel> !P
[PROOF STEP]
by(rule Strong_Early_Bisim_SC.bangSC)
[PROOF STATE]
proof (state)
this:
!P \<sim>\<^sub>e P \<parallel> !P
goal (1 subgoal):
1. !P \<approx> P \<parallel> !P
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
!P \<sim>\<^sub>e P \<parallel> !P
goal (1 subgoal):
1. !P \<approx> P \<parallel> !P
[PROOF STEP]
by(rule strongBisimWeakBisim)
[PROOF STATE]
proof (state)
this:
!P \<approx> P \<parallel> !P
goal:
No subgoals!
[PROOF STEP]
qed
|
function [map2] = changeNodesArea(map, metList, areaHeight, areaWidth)
% Change the area size of a list of metabolites (standardize the map)
%
% USAGE:
%
% [map2] = changeNodesArea(map,metList,areaHeight,areaWidth)
%
% INPUTS:
% map: A parsed model structure generated by
% 'transformXML2Map' function
% metList: list of metabolites which area wants to be changed
%
% OPTIONAL INPUTS:
% areaHeight: change heigt
% areaWidth: change areaWidth
%
% OUTPUTS:
% map2: New parsed file with the changes in the reactions
%
% .. Authors:
% - J.modamio 18/07/2017. Belval, Luxembourg, 18/07/2017.
% - N.Sompairac - Institut Curie, Paris, 11/10/2017. (Code checking)
if nargin < 3
areaHeight = 25.0;
if nargin < 4
areaWidth = 70.0;
end
end
% Rename map
map2 = map;
% Find mets in the map
id = map2.specID(ismember(map2.specName, metList));
idx = find(ismember(map2.molID, id));
% Change areaHeight
map2.molHeight(idx, 1) = {areaHeight};
% Change areaWidth
map2.molWidth(idx, 1) = {areaWidth};
end
|
\chapter{Content}
\label{cha:content}
This part usually contains multiple chapters (and thus files) from your papers \cite{Xing:2014:APR,Xing:2015:AHA}.
|
r=0.28
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7x30h/media/images/d7x30h-015/svc:tesseract/full/full/0.28/default.jpg Accept:application/hocr+xml
|
using Documenter, Seismic
makedocs(
modules = [Seismic],
doctest = true)
deploydocs(
deps = Deps.pip("mkdocs", "python-markdown-math"),
repo = "github.com/msacchi/Seismic.jl.git",
julia = "0.5",
osname = "osx")
|
Sanjeev Kumar as Thakur Baldev Singh , usually addressed as " Thakur "
|
Require Export assetmapping_spl_def.
Require Export featuremodel_spl_def.
Require Export cktrans_spl_def.
Require Export spl_int.
Require Export assetmapping_spl_int.
Require Export assetmapping_spl_inst.
Require Export featuremodel_spl_int.
Require Export featuremodel_spl_inst.
Require Export featuremodel_spl_proofs.
Require Export cktrans_spl_int.
Require Export cktrans_spl_proofs.
Require Export cktrans_spl_inst.
Require Export maps_proofs.
Require Export maps_int.
Require Export maps_def.
Require Export maps_inst.
Require Export spl_def.
Require Export spl_proofs.
Require Import Coq.Lists.ListSet.
Require Import Coq.Arith.Arith.
Require Import Coq.Init.Specif.
Require Export Coq.Lists.List.
Import Maps.
Import CKTransSPL.
Import FeatureModelSPL.
Import AssetMappingSPL.
Import SPL.
Program Instance Ins_SPL {FMs: FeatureModel FM Conf}
{AssetM : AssetMapping Asset AssetName AM}
{ckTrans: CKTrans FM Asset AM CK Conf}
{spl: SPL Asset Conf FM AM CK ArbitrarySPL} :
SPL Asset Conf FM AM CK PL :=
{
getFM:= getFM_func;
getAM:= getAM_func;
getCK:= getCK_func;
getCk:= getCk_func;
genPL:= genPL_func;
genPLCK := genPLCK_func;
gerPL:= gerPL_func;
wfPL := wfPL_func;
plRefinement:= plRefinement_func;
products:= products_func;
plRefinementAlt:= plRefinementAlt_func;
subsetProducts:= subsetProducts_func;
plWeakRefinement:= plWeakRefinement_func;
strongerPLRefinement:= strongerPLrefinement_func;
}.
Next Obligation. {(*fmEquivalenceCompositionality*)
intros.
split.
+ unfold plRefinement_func. intros. exists c1. split.
- unfold getFM_func. simpl. unfold getFM_func in H0. destruct pl in H0.
simpl in H0. destruct pls0 in H0. simpl in H0. destruct p. simpl in H0.
rewrite f in H0. rewrite fm. apply H0.
- unfold getCK_func. unfold getAM_func. simpl.
apply assetRefinementReflexivity_axiom.
+ unfold wfPL_func. intros.
unfold equivalentFMs in H. intuition.
} Qed. Next Obligation. {(*weakFMcompositionality*)
intros.
unfold plRefinement_func.
intros. exists c1. split.
+ unfold getFM_func. simpl. unfold getFM_func in H1. destruct pl in H1.
simpl in H1. destruct pls0 in H1. simpl in H1. destruct p. simpl in H1. rewrite f in H1.
rewrite fm. apply H1.
+ unfold getCK_func. unfold getAM_func. simpl.
apply assetRefinementReflexivity_axiom.
} Qed. Next Obligation. {(*ckEquivalenceCompositionality*)
intros.
split.
+ unfold plRefinement_func. intros. exists c1. split.
- unfold getFM_func. simpl. unfold getFM_func in H0. destruct pl in H0.
simpl in H0. destruct pls0 in H0. simpl in H0. destruct p. simpl in H0.
destruct fm, f. apply H0.
- unfold getCK_func. unfold getAM_func. simpl.
apply assetRefinementReflexivity_axiom.
+ unfold wfPL_func. intros.
unfold equivalentFMs in H.
intuition.
} Qed. Next Obligation. {(*weakerCKcompositionality*)
intros.
split.
+ unfold plRefinement_func. intros. exists c1. split.
- unfold getFM_func. simpl. unfold getFM_func in H0. destruct pl in H0.
simpl in H0. destruct pls0 in H0. simpl in H0. destruct p. simpl in H0. rewrite f in H0.
rewrite fm. apply H0.
- unfold getCK_func. unfold getAM_func. simpl.
apply assetRefinementReflexivity_axiom.
+ unfold wfPL_func. intros.
unfold equivalentFMs in H.
intuition.
} Qed. Next Obligation. {(*amRefinementCompositionality*)
intros.
split.
+ unfold plRefinement_func. intros. exists c1. split.
- unfold getFM_func. simpl. unfold getFM_func in H0. destruct pl in H0.
simpl in H0. destruct pls0 in H0. simpl in H0. destruct p. simpl in H0. rewrite f in H0.
rewrite fm. apply H0.
- unfold getCK_func. unfold getAM_func. simpl.
apply assetRefinementReflexivity_axiom.
+ unfold wfPL_func. intros.
unfold equivalentFMs in H.
intuition.
} Qed. Next Obligation. {(*fullCompositionality*)
intros.
split.
+ unfold plRefinement_func. intros. exists c1. split.
- unfold getFM_func. simpl. unfold getFM_func in H2.
simpl in H2. destruct pl in H2. simpl in H2.
destruct pls0 in H2. simpl in H2. destruct p. simpl in H2.
destruct fm. rewrite f in H2. apply H2.
- unfold getCK_func. unfold getAM_func. simpl.
apply assetRefinementReflexivity_axiom.
+ unfold wfPL_func. intros.
unfold equivalentFMs in H.
intuition.
} Qed. Next Obligation.
{(*weakFullCompositionality*)
intros.
unfold plRefinement_func.
intros. exists c1. split.
+ unfold getFM_func. simpl. unfold getFM_func in H2. destruct pl in H2.
simpl in H2. destruct pls0 in H2. simpl in H2. destruct p.
simpl in H2. rewrite f in H2.
rewrite fm. apply H2.
+ unfold getCK_func. unfold getAM_func. simpl.
apply assetRefinementReflexivity_axiom.
} Qed. Next Obligation.
{(*fullCompositionality2*)
intros.
split.
+ unfold plRefinement_func. intros. exists c1. split.
- unfold getFM_func. simpl. unfold getFM_func in H2.
simpl in H2. destruct pl in H2. simpl in H2. destruct pls0 in H2. simpl in H2. destruct p.
simpl in H2. destruct fm. rewrite f in H2. apply H2.
- unfold getCK_func. unfold getAM_func. simpl.
apply assetRefinementReflexivity_axiom.
+ unfold wfPL_func. intros.
unfold equivalentFMs in H.
intuition.
} Qed. Next Obligation. { (*weakFullCompositionality2*)
intros.
unfold plRefinement_func.
intros. exists c1. split.
+ unfold getFM_func. simpl. unfold getFM_func in H2. destruct pl in H2.
simpl in H2. destruct pls0 in H2. simpl in H2. destruct p.
simpl in H2. rewrite f in H2.
rewrite fm. apply H2.
+ unfold getCK_func. unfold getAM_func. simpl.
apply assetRefinementReflexivity_axiom.
} Qed. Next Obligation.
{(*plRefAlt*)
split.
+ intros. apply equalsRefinementAlt. reflexivity.
+ intros. destruct H. unfold plRefinementAlt in H. unfold plRefinementAlt_func.
intros p3 H1.
specialize (H p3). apply H in H1. destruct H1. unfold plRefinementAlt_func in H0.
specialize (H0 x). destruct H1. apply H0 in H1.
destruct H1. destruct H1. exists x0. split.
- apply H1.
- generalize H2, H3. apply assetRefinementTranstivity_axiom.
} Qed. Next Obligation.
{(*strongerPLref*)
intros.
split.
+ apply equalsStrongerPL. reflexivity.
+ unfold strongerPLrefinement_func. intros. destruct H. specialize (H c). specialize (H1 c).
destruct c. apply H in H0. destruct H0.
split.
- apply H1 in H0. destruct H0. apply H0.
- apply H1 in H0. destruct H0. generalize H3. generalize H2. apply assetRefinementTranstivity_axiom.
} Qed. Next Obligation.
{(*plRef*)
intros.
split.
+ apply equalsPL. reflexivity.
+ unfold plRefinement_func. intros. destruct H. specialize (H c1).
specialize (H1 c1). destruct c1. apply H in H0. destruct H0.
destruct H0. destruct x. apply H1 in H0. destruct H0. destruct H0.
exists x. split.
- apply H0.
- generalize H3. generalize H2. apply assetRefinementTranstivity_axiom.
} Qed.
|
function r = issparse(c)
%ISSPARSE Returns 1 if c is sparse
%
% r = issparse(c);
%
% written 10/16/98 S.M. Rump
% modified 04/04/04 S.M. Rump set round to nearest for safety
% modified 04/06/05 S.M. Rump rounding unchanged
%
if c.complex
r = issparse(c.mid);
else
r = issparse(c.inf);
end
|
{-# OPTIONS --without-K --safe #-}
module Data.List.Kleene where
open import Data.List.Kleene.Base public
|
# Advanced: Extending lambeq
## Creating readers
### Reader example: "Comb" reader
In this example we will create a reader that, given a sentence, it generates the following tensor network:
<center>
</center>
Note that the particular compositional model is not appropriate for classical experiments, since the tensor that implements the layer can become very large for long sentences. However, the model can be implemented without problems on a quantum computer.
```python
from lambeq import AtomicType, Reader
from discopy import Box, Id, Word
N = AtomicType.NOUN
class CombReader(Reader):
def sentence2diagram(self, sentence):
words = Id().tensor(*[Word(w, N) for w in sentence.split()])
layer = Box('LAYER', words.cod, N)
return words >> layer
diagram = CombReader().sentence2diagram('John gave Mary a flower')
diagram.draw()
```
```python
Id().tensor(*[Word(w, N) for w in ['John', 'gave', 'Mary', 'a', 'flower']]).draw()
```
## Creating rewrite rules
```python
from lambeq import BobcatParser
parser = BobcatParser(verbose='text')
d = parser.sentence2diagram('The food is fresh')
```
### SimpleRewriteRule example: Negation functor
```python
from lambeq import AtomicType, SimpleRewriteRule
from discopy.rigid import Box, Id
N = AtomicType.NOUN
S = AtomicType.SENTENCE
adj = N @ N.l
NOT = Box('NOT', S, S)
negation_rewrite = SimpleRewriteRule(
cod=N.r @ S @ S.l @ N,
template=SimpleRewriteRule.placeholder(N.r @ S @ S.l @ N) >> Id(N.r) @ NOT @ Id(S.l @ N),
words=['is', 'was', 'has', 'have'])
```
```python
from lambeq import Rewriter
from discopy import drawing
not_d = Rewriter([negation_rewrite])(d)
drawing.equation(d, not_d, symbol='->', figsize=(14, 4))
```
### RewriteRule example: "Past" functor
```python
from lambeq import RewriteRule
class PastRewriteRule(RewriteRule):
mapping = {
'is': 'was',
'are': 'were',
'has': 'had'
}
def matches(self, box):
return box.name in self.mapping
def rewrite(self, box):
new_name = self.mapping[box.name]
return type(box)(name=new_name, dom=box.dom, cod=box.cod)
```
```python
past_d = Rewriter([PastRewriteRule()])(d)
drawing.equation(d, past_d, symbol='->', figsize=(14, 4))
```
## Creating ansätze
```python
d = parser.sentence2diagram('We will go')
```
### CircuitAnsatz example: "Real-valued" ansatz
```python
from discopy.quantum.circuit import Functor, Id
from discopy.quantum.gates import Bra, CX, Ket, Ry
from lambeq import CircuitAnsatz
from lambeq.ansatz import Symbol
class RealAnsatz(CircuitAnsatz):
def __init__(self, ob_map, n_layers):
super().__init__(ob_map=ob_map, n_layers=n_layers)
self.n_layers = n_layers
self.functor = Functor(ob=self.ob_map, ar=self._ar)
def _ar(self, box):
# step 1: obtain label
label = self._summarise_box(box)
# step 2: map domain and codomain
dom, cod = self._ob(box.dom), self._ob(box.cod)
n_qubits = max(dom, cod)
n_layers = self.n_layers
# step 3: construct and return ansatz
if n_qubits == 1:
circuit = Ry(Symbol(f'{label}_0'))
else:
# this also deals with the n_qubits == 0 case correctly
circuit = Id(n_qubits)
for i in range(n_layers):
offset = i * n_qubits
syms = [Symbol(f'{label}_{offset + j}') for j in range(n_qubits)]
# adds a ladder of CNOTs
for j in range(n_qubits - 1):
circuit >>= Id(j) @ CX @ Id(n_qubits - j - 2)
# adds a layer of Y rotations
circuit >>= Id().tensor(*[Ry(sym) for sym in syms])
if cod <= dom:
circuit >>= Id(cod) @ Bra(*[0]*(dom - cod))
else:
circuit <<= Id(dom) @ Ket(*[0]*(cod - dom))
return circuit
```
```python
real_d = RealAnsatz({N: 1, S: 1}, n_layers=2)(d)
real_d.draw(figsize=(12, 10))
```
### TensorAnsatz example: "Positive" ansatz
```python
from lambeq import TensorAnsatz
from discopy import rigid, tensor
from functools import reduce
class PositiveAnsatz(TensorAnsatz):
def _ar(self, box):
# step 1: obtain label
label = self._summarise_box(box)
# step 2: map domain and codomain
dom, cod = self._ob(box.dom), self._ob(box.cod)
# step 3: construct and return ansatz
name = self._summarise_box(box)
n_params = reduce(lambda x, y: x * y, dom @ cod, 1)
syms = Symbol(name, size=n_params)
return tensor.Box(box.name, dom, cod, syms ** 2)
```
```python
from discopy import Dim
ansatz = PositiveAnsatz({N: Dim(2), S: Dim(2)})
positive_d = ansatz(d)
positive_d.draw()
```
```python
import numpy as np
from sympy import default_sort_key
syms = sorted(positive_d.free_symbols, key=default_sort_key)
sym_dict = {k: -np.ones(k.size) for k in syms}
subbed_diagram = positive_d.lambdify(*syms)(*sym_dict.values())
subbed_diagram.eval()
```
Tensor(dom=Dim(1), cod=Dim(2), array=[8., 8.])
## Contributions
|
import Data.IOArray
main : IO ()
main
= do x <- newArray 20
ignore $ writeArray x 10 "Hello"
ignore $ writeArray x 11 "World"
printLn !(toList x)
y <- fromList (map Just [1,2,3,4,5])
printLn !(toList y)
|
# Notes on current version:
For TOC if missing from command line try
jupyter nbextensions_configurator enable
then toggle nbextensions, restart.
1. 1.9.2020 Managed to convert ODE models for economic extension to transition model ready for stochastic simulation, using separate birth death list
See section on SC2UIR model. Not done for other two economic extensions yet
2. 1.9.2020 Implemented stochastic simulation (Tau-leap method) using PyGom inbuilt capabilities: for SCIR simulation only so far
Neeed to use integer N>>1, not 1.0, for stochastic simulation. Calculates in a few minutes for N=10000, rescaled ICUfrac to 0.02 (x10). N=100000 didn't finish in 10m.
# Model Definitions
## Utilities for custom extension of PyGom
```python
# import required packages
import os
import csv
from sympy import symbols, init_printing
import numpy as np
import matplotlib
%matplotlib inline
import seaborn as sb
from matplotlib import pyplot as plt
import sympy
import itertools
import scipy
import datetime
import matplotlib.dates as mdates
from pygom import DeterministicOde, Transition, SimulateOde, TransitionType, SquareLoss
from scipy.optimize import minimize
import pickle as pk
import jsonpickle as jpk
from cycler import cycler
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
import pwlf
```
/Users/n/.pyenv/versions/3.7.2/lib/python3.7/site-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.
import pandas.util.testing as tm
```python
# This cell adds two methods to the DeterministicODE class of pygom
# dumpparams: stores params in a file './params/Model_Name.pk'
# loadparams: loads params from same file. returns None if any problem finding the file.
# e.g. will be accessed by SCIR.dumparams() or SCIR.loadparams()
# This stuff needs modules os, sys, pickle as pk.
def dumpparams(self,run_id=''): # Have to add self since this will become a method
mname = self.modelname
dirnm = os.getcwd()
pfile = dirnm+'/params/'+mname+'.pk'
try:
params = self.params.copy()
with open(pfile,'wb') as fp:
pk.dump(params,fp,protocol=pk.HIGHEST_PROTOCOL)
print('dumped params to',pfile)
if run_id != '':
pfile2 = dirnm+'/params/'+run_id+'.pk'
with open(pfile2,'wb') as fp:
pk.dump(params,fp,protocol=pk.HIGHEST_PROTOCOL)
print('dumped params to',pfile2)
except:
print('problem dumping params to ',pfile)
def loadparams(self,run_id=''): # Have to add self since this will become a method
mname = self.modelname
dirnm = os.getcwd()
if run_id == '':
pfile = dirnm+'/params/'+mname+'.pk'
else:
pfile = dirnm+'/params/'+run_id+'.pk'
try:
with open(pfile,'rb') as fp:
params = pk.load(fp)
print('loaded params from ',pfile,':')
except:
print("problem loading",pfile)
return None
nms = [x.name for x in self.param_list]
try:
self.params = params.copy()
self.parameters = params.copy()
except:
print('problem loading the params; none loaded')
return None
return True
OdeClass = DeterministicOde().__class__
setattr(OdeClass,'dumpparams', dumpparams)
setattr(OdeClass,'loadparams', loadparams)
```
```python
def Float(x):
try:
rtn = float(x)
except:
rtn = float('NaN')
return rtn
```
```python
def print_ode2(self):
'''
Prints the ode in symbolic form onto the screen/console in actual
symbols rather than the word of the symbol.
Based on the PyGOM built-in but adapted for Jupyter
Corrected by John McCaskill to avoid subscript format error
'''
A = self.get_ode_eqn()
B = sympy.zeros(A.rows,2)
for i in range(A.shape[0]):
B[i,0] = sympy.symbols('d' + '{' + str(self._stateList[i]) + '}'+ '/dt=')
B[i,1] = A[i]
return B
```
```python
# Jupyter Specifics
from IPython.display import display, HTML
from ipywidgets.widgets import interact, interactive, IntSlider, FloatSlider, Layout, ToggleButton, ToggleButtons, fixed
display(HTML("<style>.container { width:100% !important; }</style>"))
style = {'description_width': '100px'}
slider_layout = Layout(width='99%')
```
<style>.container { width:100% !important; }</style>
## Caution Extensions to SIR Model
### SIR model
#### Equations
\begin{equation}
\begin{split}
\dot{S} &= -\beta I S\\
\dot{I} &= \beta I S - \gamma I - \mu I\\
\dot{R} & = \gamma I \\
\dot{D} & = \mu I
\end{split}
\end{equation}
#### Variables
* $S$: Susceptible individuals
* $I$: Infected individuals
* $R$: individuals who have recovered from disease and are now immune
* $D$: Dead individuals
* $N=S+I+R+D$ Total population size (constant)
#### Parameters
* $\beta$ rate at which infected individuals contact susceptibles and infect them
* $\gamma$ rate at which infected individuals recover from disease and become immune
* $\mu$ death rate for infected individuals
#### Implementation
Using PyGOM, we will set up my simple SCIR model ODE system
PyGOM – A Python Package for Simplifying Modelling with Systems of Ordinary Differential Equations https://arxiv.org/pdf/1803.06934.pdf
```python
# set up the symbolic SIR model, actually SIRD including deaths
state = ['S', 'I', 'R', 'D']
param_list = ['beta', 'gamma','mu','N']
transition = [
Transition(origin='S', destination='I', equation='beta*I*S',
transition_type=TransitionType.T),
Transition(origin='I', destination='R', equation='gamma*I',
transition_type=TransitionType.T),
Transition(origin='I', destination='D', equation='mu*I',
transition_type=TransitionType.T)
]
SIR_model = DeterministicOde(state, param_list, transition=transition)
SIR_model.modelname='SIR'
SIR_model.ei=1
SIR_model.confirmed=slice(1,4) # cases 1-3 i.e. I, R and D
SIR_model.recovered=slice(2,3)
SIR_model.deaths=slice(3,4)
```
```python
# display equations
print_ode2(SIR_model)
```
$\displaystyle \left[\begin{matrix}d{S}/dt= & - I S \beta\\d{I}/dt= & I S \beta - I \gamma - I \mu\\d{R}/dt= & I \gamma\\d{D}/dt= & I \mu\end{matrix}\right]$
```python
# display graphical representation of the model
SIR_model.get_transition_graph()
```
##### Derived equations, Jacobian and gradient
```python
SIR_model.get_ode_eqn()
```
$\displaystyle \left[\begin{matrix}- I S \beta\\I S \beta - I \gamma - I \mu\\I \gamma\\I \mu\end{matrix}\right]$
```python
SIR_model.get_jacobian_eqn()
```
$\displaystyle \left[\begin{matrix}- I \beta & - S \beta & 0 & 0\\I \beta & S \beta - \gamma - \mu & 0 & 0\\0 & \gamma & 0 & 0\\0 & \mu & 0 & 0\end{matrix}\right]$
```python
SIR_model.get_grad_eqn()
```
$\displaystyle \left[\begin{matrix}- I S & 0 & 0 & 0\\I S & - I & - I & 0\\0 & I & 0 & 0\\0 & 0 & I & 0\end{matrix}\right]$
#### R0
```python
from pygom.model.epi_analysis import R0
```
```python
transition_ode = [
Transition(origin='S', equation='-beta*I*S'),
Transition(origin='I', equation='beta*I*S-gamma*I-mu*I'),
Transition(origin='R', equation='gamma*I'),
Transition(origin='D', equation='mu*I')
]
ode = SimulateOde(state, param_list, ode=transition_ode)
ode = ode.get_unrolled_obj()
ode.get_transition_graph()
```
```python
R0(ode,['I'])
```
$\displaystyle 0$
```python
import sympy.matrices.matrices
# from sympy import *
```
### SCIR model
#### Equations
\begin{equation}
\begin{split}
\dot{S} &= -\beta I S + c_1 S_c - c_2*S*I\\
\dot{S_c} &= - c_0 \beta I S_c - c_1 S_c + c_2*S*I\\
\dot{I} &= \beta I S - \gamma I - \mu I\\
\dot{R} & = \gamma I \\
\dot{D} & = \mu I
\end{split}
\end{equation}
The use of I as a state variable triggering susceptibles to execute caution is just one choice. In contrast with deaths, it does not accumulate over time and so retains the property of an active threat to society, rather than an historical one. Alternatively, one could use the daily death rate which is proportional to it.
Actually, the effect of caution may be quadratic, since both the individual doing the infection and individual potentially being infected may be executing caution. The current version assumes that infectives do not change their precautionary measures in response to I. To implement this we would need to further distinguish careful and non careful infectives. This is done in the SC2IR model.
#### Variables
* $S$: Susceptible individuals living as normal
* $S_c$: Susceptible individuals exercising pandemy precautions
* $I$: Infected individuals
* $R$: individuals who have recovered from disease and are now immune
* $D$: Dead individuals
* $N=S+S_c+I+R+D$ Total population size (constant)
#### Parameters
* $\beta$ rate at which infected individuals contact susceptibles and infect them
* $\gamma$ rate at which infected individuals recover from disease and become immune
* $\mu$ death rate for infected individuals
* $c_i$ three parameters characterizing cautionary response of population via class $S_c$
- c_0 : reduction factor for exposure for cautioned susceptibles
- c_1 : duration of caution (exponential decay time constant in days)
- c_2 : rate constant for transition from uncautioned to cautioned susceptible
#### Implementation
Using PyGOM, we will set up my simple SCIR model ODE system
PyGOM – A Python Package for Simplifying Modelling with Systems of Ordinary Differential Equations https://arxiv.org/pdf/1803.06934.pdf
```python
# set up symbolic model
state = ['S', 'I', 'R', 'D', 'S_c']
param_list = ['beta', 'gamma', 'mu', 'c_0', 'c_1', 'c_2', 'N']
transition = [
Transition(origin='S', destination='I', equation='beta*I*S',
transition_type=TransitionType.T),
Transition(origin='S', destination='S_c', equation='c_2*I*S',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='S', equation='c_1*S_c',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='I', equation='c_0*beta*I*S_c',
transition_type=TransitionType.T),
Transition(origin='I', destination='R', equation='gamma*I',
transition_type=TransitionType.T),
Transition(origin='I', destination='D', equation='mu*I',
transition_type=TransitionType.T)
]
SCIR_model = DeterministicOde(state, param_list, transition=transition)
SCIR_modelS = SimulateOde(state, param_list , transition=transition)
SCIR_model.modelname='SCIR'
SCIR_model.ei=1
SCIR_model.confirmed=slice(1,4) # cases 1-3 i.e. I, R and D
SCIR_model.recovered=slice(2,3)
SCIR_model.deaths=slice(3,4)
```
```python
# display equations
print_ode2(SCIR_model)
```
$\displaystyle \left[\begin{matrix}d{S}/dt= & - I S \beta - I S c_{2} + S_{c} c_{1}\\d{I}/dt= & I S \beta + I S_{c} \beta c_{0} - I \gamma - I \mu\\d{R}/dt= & I \gamma\\d{D}/dt= & I \mu\\d{S_c}/dt= & I S c_{2} - I S_{c} \beta c_{0} - S_{c} c_{1}\end{matrix}\right]$
```python
# display graphical representation of the model
SCIR_model.get_transition_graph()
```
### SC2IR model
#### Equations
\begin{equation}
\begin{split}
\dot{S} &= -\beta (I + c_0 I_c) S + c_1 S_c - c_2 S (I + I_c)\\
\dot{S_c} &= - c_0 \beta (I + c_0 I_c) S_c - c_1 S_c + c_2 S (I + I_c)\\
\dot{I} &= \beta (I + c_0 I_c) S - \gamma I - \mu I + c_1 I_c - c_2 I (I + I_c)\\
\dot{I_c} &= \beta (I + c_0 I_c) S_c - \gamma I_c - \mu I_c - c_1 I_c + c_2 I (I + I_c)\\
\dot{R} & = \gamma (I + I_c)\\
\dot{D} & = \mu (I + I_c)
\end{split}
\end{equation}
The use of I as a state variable triggering susceptibles to execute caution is just one choice. In contrast with deaths, it does not accumulate over time and so retains the property of an active threat to society, rather than an historical one. Alternatively, one could use the daily death rate which is proportional to it.
The effect of caution may be quadratic, since both the individual doing the infection and individual potentially being infected may be executing caution. To implement this we distinguish careful and non careful infectives. We ignore infectives making the transition to caution or relaxing it.
#### Variables
* $S$: Susceptible individuals living as normal
* $S_c$: Susceptible individuals exercising pandemy precautions
* $I$: Infected individuals non exercising pandemy precautions
* $I_c$: Infected individuals exercising pandemy precautions
* $R$: individuals who have recovered from disease and are now immune
* $D$: Dead individuals
* $N=S+S_c+I+I_c+R+D$ Total population size (constant)
#### Parameters
* $\beta$ rate at which infected individuals contact susceptibles and infect them
* $\gamma$ rate at which infected individuals recover from disease and become immune
* $\mu$ death rate for infected individuals
* $c_i$ three parameters characterizing cautionary response of population via class $S_c$
- c_0 : reduction factor for exposure for cautioned susceptibles
- c_1 : duration of caution (exponential decay time constant in days)
- c_2 : rate constant for transition from uncautioned to cautioned susceptible
#### Implementation
Using PyGOM, we will set up my simple SCIR model ODE system
PyGOM – A Python Package for Simplifying Modelling with Systems of Ordinary Differential Equations https://arxiv.org/pdf/1803.06934.pdf
```python
# set up symbolic model
state = ['S', 'I', 'R', 'D', 'I_c', 'S_c']
param_list = ['beta', 'gamma', 'mu', 'c_0', 'c_1', 'c_2', 'N']
transition = [
Transition(origin='S', destination='I', equation='beta*(I+c_0*I_c)*S',
transition_type=TransitionType.T),
Transition(origin='S', destination='S_c', equation='c_2*(I+I_c)*S',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='S', equation='c_1*S_c',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='I_c', equation='c_0*beta*(I+c_0*I_c)*S_c',
transition_type=TransitionType.T),
Transition(origin='I', destination='R', equation='gamma*I',
transition_type=TransitionType.T),
Transition(origin='I', destination='D', equation='mu*I',
transition_type=TransitionType.T),
Transition(origin='I', destination='I_c', equation='c_2*(I+I_c)*I',
transition_type=TransitionType.T),
Transition(origin='I_c', destination='R', equation='gamma*I_c',
transition_type=TransitionType.T),
Transition(origin='I_c', destination='I', equation='c_1*I_c',
transition_type=TransitionType.T),
Transition(origin='I_c', destination='D', equation='mu*I_c',
transition_type=TransitionType.T) #,
]
SC2IR_model = DeterministicOde(state, param_list, transition=transition)
SC2IR_model.modelname='SC2IR'
SC2IR_model.ei=1
SC2IR_model.confirmed=slice(1,5) # cases 1-3 i.e. I, R and D
SC2IR_model.recovered=slice(2,3)
SC2IR_model.deaths=slice(3,4)
```
```python
# display equations
print_ode2(SC2IR_model)
```
$\displaystyle \left[\begin{matrix}d{S}/dt= & - S \beta \left(I + I_{c} c_{0}\right) - S c_{2} \left(I + I_{c}\right) + S_{c} c_{1}\\d{I}/dt= & - I c_{2} \left(I + I_{c}\right) - I \gamma - I \mu + I_{c} c_{1} + S \beta \left(I + I_{c} c_{0}\right)\\d{R}/dt= & I \gamma + I_{c} \gamma\\d{D}/dt= & I \mu + I_{c} \mu\\d{I_c}/dt= & I c_{2} \left(I + I_{c}\right) - I_{c} c_{1} - I_{c} \gamma - I_{c} \mu + S_{c} \beta c_{0} \left(I + I_{c} c_{0}\right)\\d{S_c}/dt= & S c_{2} \left(I + I_{c}\right) - S_{c} \beta c_{0} \left(I + I_{c} c_{0}\right) - S_{c} c_{1}\end{matrix}\right]$
\begin{equation}
\begin{split}
\dot{S} &= -\beta (I + c_0 I_c) S + c_1 S_c - c_2 S (I + I_c)\\
\dot{S_c} &= - c_0 \beta (I + c_0 I_c) S_c - c_1 S_c + c_2 S (I + I_c)\\
\dot{I} &= \beta (I + c_0 I_c) S - \gamma I - \mu I\\
\dot{I_c} &= \beta (I + c_0 I_c) S_c - \gamma I_c - \mu I_c\\
\dot{R} & = \gamma (I + I_c)\\
\dot{D} & = \mu (I + I_c)
\end{split}
\end{equation}
```python
# display graphical representation of the model
SC2IR_model.get_transition_graph()
```
## Caution Extensions to SEIR Model
### SEIR model
#### Equations
\begin{equation}
\begin{split}
\dot{S} &= -\beta I S\\
\dot{E} &= \beta I S - \alpha E\\
\dot{I} &= \alpha E - \gamma I - \mu I\\
\dot{R} & = \gamma I \\
\dot{D} & = \mu I
\end{split}
\end{equation}
#### Variables
* $S$: Susceptible individuals
* $E$: Exposed individuals
* $I$: Infected individuals
* $R$: individuals who have recovered from disease and are now immune
* $D$: Dead individuals
* $N=S+E+I+R+D$ Total population size (constant)
#### Parameters
* $\beta$ rate at which infected individuals contact susceptibles and expose them to infection
* $\alpha$ rate at which exposed individuals become infected (1/(incubation time)
* $\gamma$ rate at which infected individuals recover from disease and become immune
* $\mu$ death rate for infected individuals
#### Implementation
Using PyGOM, we will set up my simple SCIR model ODE system
PyGOM – A Python Package for Simplifying Modelling with Systems of Ordinary Differential Equations https://arxiv.org/pdf/1803.06934.pdf
```python
# set up the symbolic model
state = ['S', 'E', 'I', 'R', 'D']
param_list = ['beta', 'alpha', 'gamma', 'mu', 'N']
transition = [
Transition(origin='S', destination='E', equation='beta*I*S',
transition_type=TransitionType.T),
Transition(origin='E', destination='I', equation='alpha*E',
transition_type=TransitionType.T),
Transition(origin='I', destination='R', equation='gamma*I',
transition_type=TransitionType.T),
Transition(origin='I', destination='D', equation='mu*I',
transition_type=TransitionType.T)
]
SEIR_model = DeterministicOde(state, param_list, transition=transition)
SEIR_model.modelname='SEIR'
SEIR_model.ei=slice(1,3) # cases 1,2 i.e. E and I
SEIR_model.confirmed=slice(2,5) # cases 2-4 i.e. I, R and D, not E
SEIR_model.recovered=slice(3,4)
SEIR_model.deaths=slice(4,5)
```
```python
# display equations
print_ode2(SEIR_model)
```
```python
# display graphical representation of the model
SEIR_model.get_transition_graph()
```
### SCEIR model
#### Equations
\begin{equation}
\begin{split}
\dot{S} &= -\beta I S + c_1 S_c - c_2*S*I\\
\dot{S_c} &= - c_0 \beta I S_c - c_1 S_c + c_2*S*I\\
\dot{E} &= \beta I (S + c_0 S_c) - \alpha E\\
\dot{I} &= \alpha E - \gamma I - \mu I\\
\dot{R} & = \gamma I \\
\dot{D} & = \mu I
\end{split}
\end{equation}
The use of I as a state variable triggering susceptibles to execute caution is just one choice. In contrast with deaths, it does not accumulate over time and so retains the property of an active threat to society, rather than an historical one. Alternatively, one could use the daily death rate which is proportional to it.
Actually, the effect of caution may be quadratic, since both the individual doing the infection and individual potentially being infected may be executing caution. The current version assumes that infectives do not change their precautionary measures in response to I. To implement this we would need to further distinguish careful and non careful infectives. This is done in the SC2IR model.
#### Variables
* $S$: Susceptible individuals living as normal
* $S_c$: Susceptible individuals exercising pandemy precautions
* $E$: Exposed individuals
* $I$: Infected individuals
* $R$: individuals who have recovered from disease and are now immune
* $D$: Dead individuals
* $N=S+S_c+E+I+R+D$ Total population size (constant)
#### Parameters
* $\beta$ rate at which infected individuals contact susceptibles and infect them
* $\alpha$ rate at which exposed individuals become infected (1/(incubation time)
* $\gamma$ rate at which infected individuals recover from disease and become immune
* $\mu$ death rate for infected individuals
* $c_i$ three parameters characterizing cautionary response of population via class $S_c$
- c_0 : reduction factor for exposure for cautioned susceptibles
- c_1 : duration of caution (exponential decay time constant in days)
- c_2 : rate constant for transition from uncautioned to cautioned susceptible
#### Implementation
Using PyGOM, we will set up my simple SCIR model ODE system
PyGOM – A Python Package for Simplifying Modelling with Systems of Ordinary Differential Equations https://arxiv.org/pdf/1803.06934.pdf
```python
# set up the symbolic model
state = ['S', 'E', 'I', 'R', 'D', 'S_c']
param_list = ['beta', 'alpha', 'gamma', 'mu', 'c_0', 'c_1', 'c_2', 'N']
transition = [
Transition(origin='S', destination='E', equation='beta*I*S',
transition_type=TransitionType.T),
Transition(origin='S', destination='S_c', equation='c_2*I*S',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='S', equation='c_1*S_c',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='E', equation='c_0*beta*I*S_c',
transition_type=TransitionType.T),
Transition(origin='E', destination='I', equation='alpha*E',
transition_type=TransitionType.T),
Transition(origin='I', destination='R', equation='gamma*I',
transition_type=TransitionType.T),
Transition(origin='I', destination='D', equation='mu*I',
transition_type=TransitionType.T)
]
SCEIR_model = DeterministicOde(state, param_list, transition=transition)
SCEIR_model.modelname='SCEIR'
SCEIR_model.ei=slice(1,3) # cases 1,2 i.e. E,I
SCEIR_model.confirmed=slice(2,5) # cases 2-4 i.e. I, R and D, not E
SCEIR_model.recovered=slice(3,4)
SCEIR_model.deaths=slice(4,5)
```
```python
# display equations
print_ode2(SCEIR_model)
```
```python
# display graphical representation of the model
SCEIR_model.get_transition_graph()
```
### SC3EIR model
#### Equations
\begin{equation}
\begin{split}
\dot{S} &= -\beta (I + c_0 I_c) S + c_1 S_c - c_2 S (I + I_c)\\
\dot{S_c} &= - c_0 \beta (I + c_0 I_c) S_c - c_1 S_c + c_2 S (I + I_c)\\
\dot{E} &= \beta (I + c_0 I_c) S - \alpha E + c_1 E_c - c_2 E (I + I_c)\\
\dot{E_c} &= c_0 \beta (I + c_0 I_c) S_c - \alpha E_c - c_1 E_c + c_2 E (I + I_c)\\
\dot{I} &= \alpha E - \gamma I - \mu I + c_1 I_c - c_2 I (I + I_c)\\
\dot{I_c} &= \alpha E_c - \gamma I_c - \mu I_c - c_1 I_c + c_2 I (I + I_c)\\
\dot{R} & = \gamma (I + I_c) \\
\dot{D} & = \mu (I + I_c)
\end{split}
\end{equation}
The use of I as a state variable triggering susceptibles to execute caution is just one choice. In contrast with deaths, it does not accumulate over time and so retains the property of an active threat to society, rather than an historical one. Alternatively, one could use the daily death rate which is proportional to it.
Actually, the effect of caution may be quadratic, since both the individual doing the infection and individual potentially being infected may be executing caution. The current version assumes that infectives do not change their precautionary measures in response to I. To implement this we would need to further distinguish careful and non careful infectives. This is done in the SC2IR model.
#### Variables
* $S$: Susceptible individuals living as normal
* $S_c$: Susceptible individuals exercising pandemy precautions
* $E$: Exposed individuals
* $E_c$: Exposed individuals exercising caution
* $I$: Infected individuals
* $I_c$: Infected individuals exercising caution
* $R$: individuals who have recovered from disease and are now immune
* $D$: Dead individuals
* $N=S+S_c+E+E_c+I+I_c+R+D$ Total population size (constant)
#### Parameters
* $\beta$ rate at which infected individuals contact susceptibles and infect them
* $\alpha$ rate at which exposed individuals become infected (1/(incubation time)
* $\gamma$ rate at which infected individuals recover from disease and become immune
* $\mu$ death rate for infected individuals
* $c_i$ three parameters characterizing cautionary response of population via class $S_c$
- c_0 : reduction factor for exposure for cautioned susceptibles
- c_1 : duration of caution (exponential decay time constant in days)
- c_2 : rate constant for transition from uncautioned to cautioned susceptible
#### Implementation
Using PyGOM, we will set up my simple SCIR model ODE system
PyGOM – A Python Package for Simplifying Modelling with Systems of Ordinary Differential Equations https://arxiv.org/pdf/1803.06934.pdf
```python
# set up the symbolic model
state = ['S', 'E', 'I', 'R', 'D', 'I_c', 'S_c', 'E_c']
param_list = ['beta', 'alpha', 'gamma', 'mu', 'c_0', 'c_1', 'c_2', 'N']
transition = [
Transition(origin='S', destination='E', equation='beta*(I+c_0*I_c)*S',
transition_type=TransitionType.T),
Transition(origin='S', destination='S_c', equation='c_2*(I+I_c)*S',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='S', equation='c_1*S_c',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='E_c', equation='c_0*beta*(I+c_0*I_c)*S_c',
transition_type=TransitionType.T),
Transition(origin='E', destination='I', equation='alpha*E',
transition_type=TransitionType.T),
Transition(origin='E', destination='E_c', equation='c_2*(I+I_c)*E',
transition_type=TransitionType.T),
Transition(origin='E_c', destination='I_c', equation='alpha*E_c',
transition_type=TransitionType.T),
Transition(origin='E_c', destination='E', equation='c_1*E_c',
transition_type=TransitionType.T),
Transition(origin='I', destination='R', equation='gamma*I',
transition_type=TransitionType.T),
Transition(origin='I', destination='I_c', equation='c_2*(I+I_c)*I',
transition_type=TransitionType.T),
Transition(origin='I', destination='D', equation='mu*I',
transition_type=TransitionType.T),
Transition(origin='I_c', destination='R', equation='gamma*I_c',
transition_type=TransitionType.T),
Transition(origin='I_c', destination='I', equation='c_1*I_c',
transition_type=TransitionType.T),
Transition(origin='I_c', destination='D', equation='mu*I_c',
transition_type=TransitionType.T)
]
SC3EIR_model = DeterministicOde(state, param_list, transition=transition)
SC3EIR_model.modelname='SC3EIR'
SC3EIR_model.ei=slice(1,3) # cases 1,2 i.e. E,I # note E_c and I_c not included
SC3EIR_model.confirmed=slice(2,6) # cases 2-5 i.e. I, R, D, and I_c, not E, E_c
SC3EIR_model.recovered=slice(3,4)
SC3EIR_model.deaths=slice(4,5)
```
```python
# display equations
print_ode2(SC3EIR_model)
```
```python
# display graphical representation of the model
SC3EIR_model.get_transition_graph()
```
## Caution Extensions to SEI3R Model
### SEI3R model
#### Equations
\begin{equation}
\begin{split}
\dot{S} &= -(\beta_1 I_1 +\beta_2 I_2 + \beta_3 I_3) S\\
\dot{E} &=(\beta_1 I_1 +\beta_2 I_2 + \beta_3 I_3 ) S - a E \\
\dot{I_1} &= a E - \gamma_1 I_1 - p_1 I_1 \\
\dot{I_2} &= p_1 I_1 -\gamma_2 I_2 - p_2 I_2 \\
\dot{I_3} & = p_2 I_2 -\gamma_3 I_3 - \mu I_3 \\
\dot{R} & = \gamma_1 I_1 + \gamma_2 I_2 + \gamma_3 I_3 \\
\dot{D} & = \mu I_3
\end{split}
\end{equation}
This model (by Dr. Alison for example) involves exposed but not infectious individuals and three classes of infective states with increasing severity.
The latter two involve hospitalization with the last in ICU.
#### Variables
* $S$: Susceptible individuals living as normal
* $E$: Exposed individuals - infected but not yet infectious or symptomatic
* $I_i$: Infected individuals in severity class $i$. Severity increaes with $i$ and we assume individuals must pass through all previous classes
* $I_1$: Mild infection (hospitalization not required)
* $I_2$: Severe infection (hospitalization required)
* $I_3$: Critical infection (ICU required)
* $R$: individuals who have recovered from disease and are now immune
* $D$: Dead individuals
* $N=S+E+I_1+I_2+I_3+R+D$ Total population size (constant)
#### Parameters
* $\beta_i$ rate at which infected individuals in class $I_i$ contact susceptibles and infect them
* $a$ rate of progression from the exposed to infected class
* $\gamma_i$ rate at which infected individuals in class $I_i$ recover from disease and become immune
* $p_i$ rate at which infected individuals in class $I_i$ progress to class $I_{I+1}$
* $\mu$ death rate for individuals in the most severe stage of disease
#### Implementation
Using PyGOM, we will set up the model ODE system
PyGOM – A Python Package for Simplifying Modelling with Systems of Ordinary Differential Equations https://arxiv.org/pdf/1803.06934.pdf
```python
# set up the symbolic SEI3R model
state = ['S', 'E', 'I_1', 'I_2','I_3','R','D']
param_list = ['beta_1', 'beta_2','beta_3','alpha', 'gamma_1', 'gamma_2', 'gamma_3',
'p_1','p_2','mu','N']
transition = [
Transition(origin='S', destination='E', equation='(beta_1*I_1+beta_2*I_2+beta_3*I_3)*S',
transition_type=TransitionType.T),
Transition(origin='E', destination='I_1', equation='alpha*E',
transition_type=TransitionType.T),
Transition(origin='I_1', destination='R', equation='gamma_1*I_1',
transition_type=TransitionType.T),
Transition(origin='I_2', destination='R', equation='gamma_2*I_2',
transition_type=TransitionType.T),
Transition(origin='I_3', destination='R', equation='gamma_3*I_3',
transition_type=TransitionType.T),
Transition(origin='I_1', destination='I_2', equation='p_1*I_1',
transition_type=TransitionType.T),
Transition(origin='I_2', destination='I_3', equation='p_2*I_2',
transition_type=TransitionType.T),
Transition(origin='I_3', destination='D', equation='mu*I_3',
transition_type=TransitionType.T)
]
SEI3R_model = DeterministicOde(state, param_list, transition=transition)
SEI3R_model.modelname='SEI3R'
SEI3R_model.ei=slice(1,5)
SEI3R_model.confirmed=slice(2,7) # cases 2-6 i.e. I1, I2, I3, R and D
SEI3R_model.recovered=slice(5,6)
SEI3R_model.deaths=slice(6,7)
```
```python
# display equations
print_ode2(SEI3R_model)
```
```python
# display graphical representation of the model
SEI3R_model.get_transition_graph()
```
### SCEI3R model
#### Equations
\begin{equation}
\begin{split}
\dot{S} &= -(\beta_1 I_1 +\beta_2 I_2 + \beta_3 I_3) S + c_1 S_c - c_2*S*I_3\\
\dot{S_c} &= - c_0(\beta_1 I_1 +\beta_2 I_2 + \beta_3 I_3) S_c - c_1 S_c + c_2*S*I_3\\
\dot{E} &=(\beta_1 I_1 +\beta_2 I_2 + \beta_3 I_3 ) (S + c_0 S_c) - a E \\
\dot{I_1} &= a E - \gamma_1 I_1 - p_1 I_1 \\
\dot{I_2} &= p_1 I_1 -\gamma_2 I_2 - p_2 I_2 \\
\dot{I_3} & = p_2 I_2 -\gamma_3 I_3 - \mu I_3 \\
\dot{R} & = \gamma_1 I_1 + \gamma_2 I_2 + \gamma_3 I_3 \\
\dot{D} & = \mu I_3
\end{split}
\end{equation}
The use of I_3 as a state variable triggering susceptibles to execute caution is just one choice. In contrast with deaths, it does not accumulate over time and so retains the property of an active threat to society, rather than an historical one.
Actually, the effect of caution may be quadratic, since both the individual doing the infection and individual potentially being infected may be executing caution. The current version assumes that infectives do not change their precautionary measures in response to I_3. To implement this we would need to further distinguish careful and non careful infectives at least up to the I_1 level. This is done in the SC3EI3R model.
#### Variables
* $S$: Susceptible individuals living as normal
* $S_c$: Susceptible individuals exercising pandemy precautions
* $E$: Exposed individuals - infected but not yet infectious or symptomatic
* $I_i$: Infected individuals in severity class $i$. Severity increaes with $i$ and we assume individuals must pass through all previous classes
* $I_1$: Mild infection (hospitalization not required)
* $I_2$: Severe infection (hospitalization required)
* $I_3$: Critical infection (ICU required)
* $R$: individuals who have recovered from disease and are now immune
* $D$: Dead individuals
* $N=S+S_c+E+I_1+I_2+I_3+R+D$ Total population size (constant)
#### Parameters
* $\beta_i$ rate at which infected individuals in class $I_i$ contact susceptibles and infect them
* $a$ rate of progression from the exposed to infected class
* $\gamma_i$ rate at which infected individuals in class $I_i$ recover from disease and become immune
* $p_i$ rate at which infected individuals in class $I_i$ progress to class $I_{I+1}$
* $\mu$ death rate for individuals in the most severe stage of disease
* $c_i$ three parameters characterizing cautionary response of population via class $S_c$
- c_0 : reduction factor for exposure for cautioned susceptibles
- c_1 : duration of caution (exponential decay time constant in days)
- c_2 : rate constant for transition from uncautioned to cautioned susceptible
#### Implementation
Using PyGOM, we will set up the model ODE system
PyGOM – A Python Package for Simplifying Modelling with Systems of Ordinary Differential Equations https://arxiv.org/pdf/1803.06934.pdf
```python
# set up the symbolic model
state = ['S', 'E', 'I_1', 'I_2','I_3','R','D','S_c']
param_list = ['beta_1', 'beta_2','beta_3','alpha', 'gamma_1', 'gamma_2', 'gamma_3',
'p_1','p_2','mu','c_0','c_1','c_2','N']
transition = [
Transition(origin='S', destination='E', equation='(beta_1*I_1+beta_2*I_2+beta_3*I_3)*S',
transition_type=TransitionType.T),
Transition(origin='S', destination='S_c', equation='c_2*I_3*S',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='S', equation='c_1*S_c',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='E', equation='c_0*(beta_1*I_1+beta_2*I_2+beta_3*I_3)*S_c',
transition_type=TransitionType.T),
Transition(origin='E', destination='I_1', equation='alpha*E',
transition_type=TransitionType.T),
Transition(origin='I_1', destination='R', equation='gamma_1*I_1',
transition_type=TransitionType.T),
Transition(origin='I_2', destination='R', equation='gamma_2*I_2',
transition_type=TransitionType.T),
Transition(origin='I_3', destination='R', equation='gamma_3*I_3',
transition_type=TransitionType.T),
Transition(origin='I_1', destination='I_2', equation='p_1*I_1',
transition_type=TransitionType.T),
Transition(origin='I_2', destination='I_3', equation='p_2*I_2',
transition_type=TransitionType.T),
Transition(origin='I_3', destination='D', equation='mu*I_3',
transition_type=TransitionType.T)
]
SCEI3R_model = DeterministicOde(state, param_list, transition=transition)
SCEI3R_model.modelname='SCEI3R'
SCEI3R_model.ei=slice(1,5)
SCEI3R_model.confirmed=slice(2,7) # cases 2-6 i.e. I1, I2, I3, R and D
SCEI3R_model.recovered=slice(5,6)
SCEI3R_model.deaths=slice(6,7)
```
```python
# display equations
print_ode2(SCEI3R_model)
```
```python
# display graphical representation of the model
SCEI3R_model.get_transition_graph()
```
### SC3EI3R model with caution distinguished $E$ and 𝐼1
#### Equations
\begin{equation}
\begin{split}
\dot{S} &= -(\beta_1 (I_1 + c_0 I_{1c}) +\beta_2 I_2 + \beta_3 I_3) S + c_1 S_c - c_2*S*I_3\\
\dot{S_c} &= - c_0(\beta_1 (I_1 + c_0 I_{1c}) +\beta_2 I_2 + \beta_3 I_3) S_c - c_1 S_c + c_2*S*I_3\\
\dot{E} &=(\beta_1 (I_1 + c_0 I_{1c}) +\beta_2 I_2 + \beta_3 I_3 ) S - a E + c_1 E_c - c_2*E*I_3\\
\dot{E_c} &=(\beta_1 (I_1 + c_0 I_{1c}) +\beta_2 I_2 + \beta_3 I_3 ) c_0 S_c - a E - c_1 E_c + c_2*E*I_3\\
\dot{I_1} &= a E - \gamma_1 I_1 - p_1 I_1 + c_1 I_{1c} - c_2*I_{1c}*I_3\\
\dot{I_{1c}} &= a E_c - \gamma_1 I_{1c} - p_1 I_{1c} - c_1 I_{1c} + c_2*I_{1c}*I_3\\
\dot{I_2} &= p_1 (I_1 + I_{1c}) -\gamma_2 I_2 - p_2 I_2 \\
\dot{I_3} & = p_2 I_2 -\gamma_3 I_3 - \mu I_3 \\
\dot{R} & = \gamma_1 (I_1 + I_{1c}) + \gamma_2 I_2 + \gamma_3 I_3 \\
\dot{D} & = \mu I_3
\end{split}
\end{equation}
The use of I_3 as a state variable triggering susceptibles to execute caution is just one choice. In contrast with deaths, it does not accumulate over time and so retains the property of an active threat to society, rather than an historical one.
Here, the effect of caution is quadratic, since both the individual doing the infection and individual potentially being infected may be executing caution. To implement this we distinguish careful and non careful exposed and infectives up to the I_1 level. Once in hospital there is no difference, since all caution is executed wrt infected patients.
We ignore transition in caution among infected intervals as a second order effect: could be included as in SC2IR model.
#### Variables
* $S$: Susceptible individuals living as normal
* $S_c$: Susceptible individuals exercising pandemy precautions
* $E$: Exposed individuals living as normal - infected but not yet infectious or symptomatic
* $E_c$: Exposed individuals exercising pandemy precautions
* $I_i$: Infected individuals in severity class $i$. Severity increaes with $i$ and we assume individuals must pass through all previous classes. Split non hospital cases by caution.
* $I_1$: Mild infection (hospitalization not required), living as normal
* $I_{1c}$: Mild infection (hospitalization not required), exercising caution
* $I_2$: Severe infection (hospitalization required)
* $I_3$: Critical infection (ICU required)
* $R$: individuals who have recovered from disease and are now immune
* $D$: Dead individuals
* $N=S+S_c+E+E_c+I_{1c}+I_1+I_2+I_3+R+D$ Total population size (constant)
#### Parameters
* $\beta_i$ rate at which infected individuals in class $I_i$ contact susceptibles and infect them
* $a$ rate of progression from the exposed to infected class
* $\gamma_i$ rate at which infected individuals in class $I_i$ recover from disease and become immune
* $p_i$ rate at which infected individuals in class $I_i$ progress to class $I_{I+1}$
* $\mu$ death rate for individuals in the most severe stage of disease
* $c_i$ three parameters characterizing cautionary response of population via class $S_c$
- c_0 : reduction factor for exposure for cautioned susceptibles
- c_1 : duration of caution (exponential decay time constant in days)
- c_2 : rate constant for transition from uncautioned to cautioned susceptible
#### Implementation
```python
# set up the symbolic model
state = ['S', 'E', 'I_1', 'I_2','I_3', 'R', 'D', 'I_c', 'S_c', 'E_c']
param_list = ['beta_1', 'beta_2','beta_3','alpha', 'gamma_1', 'gamma_2', 'gamma_3',
'p_1','p_2','mu','c_0','c_1','c_2','N']
transition = [
Transition(origin='S', destination='E', equation='(beta_1*I_1+beta_2*I_2+beta_3*I_3+c_0*beta_1*I_c)*S',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='E_c', equation='c_0*(beta_1*I_1+beta_2*I_2+beta_3*I_3+c_0*beta_1*I_c)*S_c',
transition_type=TransitionType.T),
Transition(origin='S', destination='S_c', equation='c_2*I_3*S',
transition_type=TransitionType.T),
Transition(origin='S_c', destination='S', equation='c_1*S_c',
transition_type=TransitionType.T),
Transition(origin='E', destination='I_1', equation='alpha*E',
transition_type=TransitionType.T),
Transition(origin='E', destination='E_c', equation='c_2*I_3*E',
transition_type=TransitionType.T),
Transition(origin='E_c', destination='I_c', equation='alpha*E_c',
transition_type=TransitionType.T),
Transition(origin='E_c', destination='E', equation='c_1*E_c',
transition_type=TransitionType.T),
Transition(origin='I_1', destination='R', equation='gamma_1*I_1',
transition_type=TransitionType.T),
Transition(origin='I_1', destination='I_c', equation='c_2*I_3*I_1', # error corrected I_1, mistakenly was I_c
transition_type=TransitionType.T),
Transition(origin='I_c', destination='R', equation='gamma_1*I_c',
transition_type=TransitionType.T),
Transition(origin='I_c', destination='I_1', equation='c_1*I_c',
transition_type=TransitionType.T),
Transition(origin='I_2', destination='R', equation='gamma_2*I_2',
transition_type=TransitionType.T),
Transition(origin='I_3', destination='R', equation='gamma_3*I_3',
transition_type=TransitionType.T),
Transition(origin='I_1', destination='I_2', equation='p_1*I_1',
transition_type=TransitionType.T),
Transition(origin='I_c', destination='I_2', equation='p_1*I_c',
transition_type=TransitionType.T),
Transition(origin='I_2', destination='I_3', equation='p_2*I_2',
transition_type=TransitionType.T),
Transition(origin='I_3', destination='D', equation='mu*I_3',
transition_type=TransitionType.T)
]
SC3EI3R_model = DeterministicOde(state, param_list, transition=transition)
SC3EI3R_model.modelname='SC3EI3R'
SC3EI3R_model.ei=slice(1,5) # 1,2,3,4 i.e. E,I_1,I_2,I_3 – not E_c and I_c
SC3EI3R_model.confirmed=slice(2,8) # cases 2-7 i.e. I1, I2, I3, R, D and I_c
SC3EI3R_model.recovered=slice(5,6)
SC3EI3R_model.deaths=slice(6,7)
```
```python
# display equations
print_ode2(SC3EI3R_model)
```
```python
# display graphical representation of the model
SC3EI3R_model.get_transition_graph()
```
```python
# set up the symbolic model directly as ODEs to allow confirmed cases as state for fitting
state = ['S', 'E', 'I_1', 'I_2','I_3', 'R', 'D', 'I_c', 'S_c', 'E_c', 'C_f']
param_list = ['beta_1', 'beta_2','beta_3','alpha', 'gamma_1', 'gamma_2', 'gamma_3',
'p_1','p_2','mu','c_0','c_1','c_2','N']
transition = [
Transition('S', '-(beta_1*(I_1+c_0*I_c)+beta_2*I_2+beta_3*I_3)*S - c_2*I_3*S + c_1*S_c','ODE'),
Transition('S_c', '-c_0*(beta_1*(I_1+c_0*I_c)+beta_2*I_2+beta_3*I_3)*S_c+c_2*I_3*S-c_1*S_c','ODE'),
Transition('E', '(beta_1*(I_1+c_0*I_c)+beta_2*I_2+beta_3*I_3)*S-alpha*E-c_2*I_3*E+c_1*E_c','ODE'),
Transition('E_c', 'c_0*(beta_1*(I_1+c_0*I_c)+beta_2*I_2+beta_3*I_3)*S_c-alpha*E_c+c_2*I_3*E-c_1*E_c','ODE'),
Transition('I_1', 'alpha*E-gamma_1*I_1-p_1*I_1-c_2*I_3*I_1+c_1*I_c','ODE'),
Transition('I_c', 'alpha*E_c-gamma_1*I_c-p_1*I_c+c_2*I_3*I_1-c_1*I_c','ODE'),
Transition('I_2', 'p_1*(I_1+I_c)-gamma_2*I_2-p_2*I_2','ODE'),
Transition('I_3', 'p_2*I_2-gamma_3*I_3-mu*I_3','ODE'),
Transition('R', 'gamma_1*(I_1+I_c)+gamma_2*I_2+gamma_3*I_3','ODE'),
Transition('D', 'mu*I_3','ODE'),
Transition('C_f', 'alpha*(E+E_c)','ODE')
]
SC3EI3R_model = DeterministicOde(state, param_list, ode=transition)
SC3EI3R_model.modelname='SC3EI3R'
SC3EI3R_model.ei=slice(1,5) # 1,2,3,4 i.e. E,I_1,I_2,I_3 – not E_c and I_c
SC3EI3R_model.confirmed=slice(10,11)
SC3EI3R_model.recovered=slice(5,6)
SC3EI3R_model.deaths=slice(6,7)
```
```python
# display equations
print_ode2(SC3EI3R_model)
```
## Caution Extensions to SEIR Model with Economic Supression
This model is an extension of the cautionary model to include a class of susceptibles $S_u$ who are impervious to caution.
The main influencer for this class is the economy, which we introduce as a new state variable W, normalized to 1 in the absence of pandemic.
The model assumption is that fractional depression of the economy influences some susceptibles (both cautioned and uncautioned) to become uncautionable,
with a rate coefficient proportional to the economic depression (1-W). The economy itself is mdoelled with logistic growth to a state 1 in the absence of pandemic
and 1- $\kappa S_c$ with pandemic. i.e. individuals exercising caution are the main correlate of economic depression (but the only suppressor for the pandemic).
As for the cautioned class, uncautionable individuals also return to normal sussceptibles with exponential decay at rate $k_1$.
### SC2UIR model
#### Equations
\begin{equation}
\begin{split}
\dot{S} &= -\beta (I + c_0 I_c) S + c_1 S_c - c_2 S (I + I_c) - k_u (1 - W) S + k_1 S_u\\
\dot{S_c} &= - c_0 \beta (I + c_0 I_c) S_c - c_1 S_c + c_2 S (I + I_c) - k_u (1 - W) S_c\\
\dot{I} &= \beta (I + c_0 I_c) S - \gamma I - \mu I + c_1 I_c - c_2 I (I + I_c)\\
\dot{I_c} &= c_0 \beta (I + c_0 I_c) S_c - \gamma I_c - \mu I_c - c_1 I_c + c_2 I (I + I_c)\\
\dot{R} & = \gamma (I + I_c) \\
\dot{D} & = \mu (I + I_c) \\
\dot{S_u} & = -\beta (I + c_0 I_c) S_u + k_u (1 - W) (S + S_c) - k_1 S_u \\
\dot{W} & = k_w W (1 - \kappa S_c - W)
\end{split}
\end{equation}
#### Variables
* $S$: Susceptible individuals living as normal
* $S_c$: Susceptible individuals exercising pandemy precautions
* $S_u$: Susceptible individuals immune to caution because of economic downturn
* $E$: Exposed individuals
* $E_c$: Exposed individuals exercising caution
* $I$: Infected individuals
* $I_c$: Infected individuals exercising caution
* $R$: individuals who have recovered from disease and are now immune
* $D$: Dead individuals
* $W$: Economic status obeying a logistic law with caution individuals downturning
* $N=S+S_c+S_u+E+E_c+I+I_c+R+D$ Total population size (constant)
#### Parameters
* $\beta$ rate at which infected individuals contact susceptibles and infect them
* $\alpha$ rate at which exposed individuals become infected (1/(incubation time)
* $\gamma$ rate at which infected individuals recover from disease and become immune
* $\mu$ death rate for infected individuals
* $c_i$ three parameters characterizing cautionary response of population via class $S_c$
- c_0 : reduction factor for exposure for cautioned susceptibles
- c_1 : inverse duration of caution (exponential decay time constant in days)
- c_2 : rate constant for transition from uncautioned to cautioned susceptible
* four parameters coupling to economy and uncautionable individuals
- k_w : rate coefficient of economy equilibration
- k_u : rate coefficient of transition from uncautioned to uncautionable
- k_1 : inverse duration of uncautionable state
- $\kappa$ : economic downturn of caution (proportional to number cautious)
#### Implementation
```python
# set up the symbolic model
state = ['S', 'I', 'R', 'D', 'I_c', 'S_c', 'S_u', 'W']
param_list = ['beta', 'gamma', 'mu', 'c_0', 'c_1', 'c_2', 'k_u', 'k_1', 'k_w','kappa', 'N']
transition = [
Transition(origin='S', equation='-beta*(I+c_0*I_c)*S+c_1*S_c-c_2*(I+I_c)*S-k_u*(1-W)*S+k_1*S_u'),
Transition(origin='S_c', equation='-c_0*beta*(I+c_0*I_c)*S_c-c_1*S_c+c_2*(I+I_c)*S-k_u*(1-W)*S_c'),
Transition(origin='S_u', equation='-beta*(I+c_0*I_c)*S_u+k_u*(1-W)*(S+S_c)-k_1*S_u'),
Transition(origin='I', equation='beta*(I+c_0*I_c)*S-gamma*I-mu*I+c_1*I_c-c_2*(I+I_c)*I'),
Transition(origin='I_c', equation='c_0*beta*(I+c_0*I_c)*S_c-gamma*I_c-mu*I_c-c_1*I_c+c_2*(I+I_c)*I'),
Transition(origin='R', equation='gamma*(I+I_c)'),
Transition(origin='D', equation='mu*(I+I_c)'),
Transition(origin='W', equation='k_w*W*(1-kappa*S_c-W)')
]
SC2UIR_model = DeterministicOde(state, param_list, ode=transition)
SC2UIR_model.modelname='SC2UIR'
SC2UIR_model.ei=1 # case 1 i.e. I # note I_c not included
SC2UIR_model.confirmed=slice(1,5) # cases 1-4 i.e. I, R, D, and I_c
SC2UIR_model.recovered=slice(2,3)
SC2UIR_model.deaths=slice(3,4)
# display equations
print_ode2(SC2UIR_model)
```
$\displaystyle \left[\begin{matrix}d{S}/dt= & - S \beta \left(I + I_{c} c_{0}\right) - S c_{2} \left(I + I_{c}\right) - S k_{u} \left(1 - W\right) + S_{c} c_{1} + S_{u} k_{1}\\d{I}/dt= & - I c_{2} \left(I + I_{c}\right) - I \gamma - I \mu + I_{c} c_{1} + S \beta \left(I + I_{c} c_{0}\right)\\d{R}/dt= & \gamma \left(I + I_{c}\right)\\d{D}/dt= & \mu \left(I + I_{c}\right)\\d{I_c}/dt= & I c_{2} \left(I + I_{c}\right) - I_{c} c_{1} - I_{c} \gamma - I_{c} \mu + S_{c} \beta c_{0} \left(I + I_{c} c_{0}\right)\\d{S_c}/dt= & S c_{2} \left(I + I_{c}\right) - S_{c} \beta c_{0} \left(I + I_{c} c_{0}\right) - S_{c} c_{1} - S_{c} k_{u} \left(1 - W\right)\\d{S_u}/dt= & - S_{u} \beta \left(I + I_{c} c_{0}\right) - S_{u} k_{1} + k_{u} \left(1 - W\right) \left(S + S_{c}\right)\\d{W}/dt= & W k_{w} \left(- S_{c} \kappa - W + 1\right)\end{matrix}\right]$
```python
# set up the symbolic model from transitions, works using separate birth and death list
state = ['S', 'I', 'R', 'D', 'I_c', 'S_c', 'S_u', 'W']
param_list = ['beta', 'gamma', 'mu', 'c_0', 'c_1', 'c_2', 'k_u', 'k_1', 'k_w','kappa', 'N']
transition = [
Transition(origin='S', destination='I', equation='beta*(I+c_0*I_c)*S', transition_type=TransitionType.T),
Transition(origin='S', destination='S_c', equation='c_2*(I+I_c)*S', transition_type=TransitionType.T),
Transition(origin='S', destination='S_u', equation='k_u*(1-W)*S', transition_type=TransitionType.T),
Transition(origin='S_c', destination='S', equation='c_1*S_c', transition_type=TransitionType.T),
Transition(origin='S_c', destination='I_c', equation='c_0*beta*(I+c_0*I_c)*S_c', transition_type=TransitionType.T),
Transition(origin='S_c', destination='S_u', equation='k_u*(1-W)*S_c', transition_type=TransitionType.T),
Transition(origin='S_u', destination='S', equation='k_1*S_u', transition_type=TransitionType.T),
Transition(origin='S_u', destination='I', equation='beta*(I+c_0*I_c)*S_u', transition_type=TransitionType.T),
Transition(origin='I', destination='I_c', equation='c_2*(I+I_c)*I', transition_type=TransitionType.T),
Transition(origin='I', destination='R', equation='gamma*I', transition_type=TransitionType.T),
Transition(origin='I', destination='D', equation='mu*I', transition_type=TransitionType.T),
Transition(origin='I_c', destination='I', equation='c_1*I_c', transition_type=TransitionType.T),
Transition(origin='I_c', destination='R', equation='gamma*I_c', transition_type=TransitionType.T),
Transition(origin='I_c', destination='D', equation='mu*I_c', transition_type=TransitionType.T),
Transition(origin='W', destination='D', equation='0*W', transition_type=TransitionType.T)
]
bdlist = [Transition(origin='W',equation='k_w*W*(1-kappa*S_c-W)', transition_type=TransitionType.B)
]
SC2UIR_model = DeterministicOde(state, param_list, transition=transition)
SC2UIR_model.birth_death_list = bdlist
SC2UIR_model.modelname='SC2UIR'
SC2UIR_model.ei=1 # case 1 i.e. I # note I_c not included
SC2UIR_model.confirmed=slice(1,5) # cases 1-4 i.e. I, R, D, and I_c
SC2UIR_model.recovered=slice(2,3)
SC2UIR_model.deaths=slice(3,4)
```
```python
# display equations
print_ode2(SC2UIR_model)
```
$\displaystyle \left[\begin{matrix}d{S}/dt= & - S \beta \left(I + I_{c} c_{0}\right) - S c_{2} \left(I + I_{c}\right) - S k_{u} \left(1 - W\right) + S_{c} c_{1} + S_{u} k_{1}\\d{I}/dt= & - I c_{2} \left(I + I_{c}\right) - I \gamma - I \mu + I_{c} c_{1} + S \beta \left(I + I_{c} c_{0}\right) + S_{u} \beta \left(I + I_{c} c_{0}\right)\\d{R}/dt= & I \gamma + I_{c} \gamma\\d{D}/dt= & I \mu + I_{c} \mu\\d{I_c}/dt= & I c_{2} \left(I + I_{c}\right) - I_{c} c_{1} - I_{c} \gamma - I_{c} \mu + S_{c} \beta c_{0} \left(I + I_{c} c_{0}\right)\\d{S_c}/dt= & S c_{2} \left(I + I_{c}\right) - S_{c} \beta c_{0} \left(I + I_{c} c_{0}\right) - S_{c} c_{1} - S_{c} k_{u} \left(1 - W\right)\\d{S_u}/dt= & S k_{u} \left(1 - W\right) + S_{c} k_{u} \left(1 - W\right) - S_{u} \beta \left(I + I_{c} c_{0}\right) - S_{u} k_{1}\\d{W}/dt= & W k_{w} \left(- S_{c} \kappa - W + 1\right)\end{matrix}\right]$
```python
SC2UIR_model.get_transition_graph()
```
```python
ode = SimulateOde(state, param_list, ode=transition)
ode = ode.get_unrolled_obj()
# R0(ode, ['I','I_c']) # produces error, no valid subset found
```
### SC3UEIR model
#### Equations
\begin{equation}
\begin{split}
\dot{S} &= -\beta (I + c_0 I_c) S + c_1 S_c - c_2 S (I + I_c) - k_u (1 - W) S + k_1 S_u\\
\dot{S_c} &= - c_0 \beta (I + c_0 I_c) S_c - c_1 S_c + c_2 S (I + I_c) - k_u (1 - W) S_c\\
\dot{E} &= \beta (I + c_0 I_c) (S + S_u) - \alpha E + c_1 E_c - c_2 E (I + I_c)\\
\dot{E_c} &= c_0 \beta (I + c_0 I_c) S_c - \alpha E_c - c_1 E_c + c_2 E (I + I_c)\\
\dot{I} &= \alpha E - \gamma I - \mu I + c_1 I_c - c_2 I (I + I_c)\\
\dot{I_c} &= \alpha E_c - \gamma I_c - \mu I_c - c_1 I_c + c_2 I (I + I_c)\\
\dot{R} & = \gamma (I + I_c) \\
\dot{D} & = \mu (I + I_c) \\
\dot{S_u} & = -\beta (I + c_0 I_c) S_u + k_u (1 - W) (S + S_c) - k_1 S_u \\
\dot{W} & = k_w W (1 - \kappa S_c - W)
\end{split}
\end{equation}
#### Variables
* $S$: Susceptible individuals living as normal
* $S_c$: Susceptible individuals exercising pandemy precautions
* $S_u$: Susceptible individuals immune to caution because of economic downturn
* $E$: Exposed individuals
* $E_c$: Exposed individuals exercising caution
* $I$: Infected individuals
* $I_c$: Infected individuals exercising caution
* $R$: individuals who have recovered from disease and are now immune
* $D$: Dead individuals
* $W$: Economic status obeying a logistic law with caution individuals downturning
* $N=S+S_c+S_u+E+E_c+I+I_c+R+D$ Total population size (constant)
#### Parameters
* $\beta$ rate at which infected individuals contact susceptibles and infect them
* $\alpha$ rate at which exposed individuals become infected (1/(incubation time)
* $\gamma$ rate at which infected individuals recover from disease and become immune
* $\mu$ death rate for infected individuals
* $c_i$ three parameters characterizing cautionary response of population via class $S_c$
- c_0 : reduction factor for exposure for cautioned susceptibles
- c_1 : inverse duration of caution (exponential decay time constant in days)
- c_2 : rate constant for transition from uncautioned to cautioned susceptible
* four parameters coupling to economy and uncautionable individuals
- k_w : rate coefficient of economy equilibration
- k_u : rate coefficient of transition from uncautioned to uncautionable
- k_1 : inverse duration of uncautionable state
- $\kappa$ : economic downturn of caution (proportional to number cautious)
#### Implementation
```python
# set up the symbolic model
state = ['S', 'E', 'I', 'R', 'D', 'I_c', 'S_c', 'E_c', 'S_u', 'W']
param_list = ['beta', 'alpha', 'gamma', 'mu', 'c_0', 'c_1', 'c_2', 'k_u', 'k_1', 'k_w','kappa', 'N']
transition = [
Transition(origin='S', equation='-beta*(I+c_0*I_c)*S+c_1*S_c-c_2*(I+I_c)*S-k_u*(1-W)*S+k_1*S_u'),
Transition(origin='S_c', equation='-c_0*beta*(I+c_0*I_c)*S_c-c_1*S_c+c_2*(I+I_c)*S-k_u*(1-W)*S_c'),
Transition(origin='S_u', equation='-beta*(I+c_0*I_c)*S_u+k_u*(1-W)*(S+S_c)-k_1*S_u'),
Transition(origin='E', equation='beta*(I+c_0*I_c)*(S+S_u)-alpha*E+c_1*E_c-c_2*(I+I_c)*E'),
Transition(origin='E_c', equation='c_0*beta*(I+c_0*I_c)*S_c-alpha*E_c-c_1*E_c+c_2*(I+I_c)*E'),
Transition(origin='I', equation='alpha*E-gamma*I-mu*I+c_1*I_c-c_2*(I+I_c)*I'),
Transition(origin='I_c', equation='alpha*E_c-gamma*I_c-mu*I_c-c_1*I_c+c_2*(I+I_c)*I'),
Transition(origin='R', equation='gamma*(I+I_c)'),
Transition(origin='D', equation='mu*(I+I_c)'),
Transition(origin='W', equation='k_w*W*(1-kappa*S_c-W)')
]
SC3UEIR_model = DeterministicOde(state, param_list, ode=transition)
SC3UEIR_model.modelname='SC3UEIR'
SC3UEIR_model.ei=slice(1,3) # cases 1,2 i.e. E,I # note E_c and I_c not included
SC3UEIR_model.confirmed=slice(2,6) # cases 2-5 i.e. I, R, D, and I_c, not E, E_c
SC3UEIR_model.recovered=slice(3,4)
SC3UEIR_model.deaths=slice(4,5)
```
```python
# display equations
print_ode2(SC3UEIR_model)
```
### SC3UEI3R model
#### Equations
\begin{equation}
\begin{split}
\dot{S} &={} -(\beta_1 (I_1 + c_0 I_c) + \beta_2 I_2 + \beta_3 I_3) S + c_1 S_c - c_2 S I_3 - S k_u (1-W) + k_1 S_u\\
\dot{S_c} &={} - c_0 (\beta_1 (I_1 + c_0 I_c) + \beta_2 I_2 + \beta_3 I_3) S_c - c_1 S_c + c_2 S I_3 - k_u (1 - W) S_c \\
\dot{E} &= \beta_1 (I_1 + c_0 I_c) (S + S_u) - \alpha E + c_1 E_c - c_2 I_3 E\\
\dot{E_c} &= c_0 \beta_1 (I_1 + c_0 I_c) S_c - \alpha E_c - c_1 E_c + c_2 I_3 E\\
\dot{I_1} &= \alpha E - \gamma_1 I_1 - p_1 I_1 + c_1 I_c - c_2 I_3 I_1\\
\dot{I_c} &= \alpha E_c - \gamma_1 I_c - p_1 I_c - c_1 I_c + c_2 I_3 I_1\\
\dot{I_2} &= p_1 (I_1 + I_c) -\gamma_2 I_2 - p_2 I_2 \\
\dot{I_3} & = p_2 I_2 -\gamma_3 I_3 - \mu I_3 \\
\dot{R} & = \gamma_1 (I_1 + I_c) +\gamma_2 I_2 + \gamma_3 I_3\\
\dot{D} & = \mu (I_3) \\
\dot{S_u} & = -(\beta_1 (I_1 + c_0 I_c)+\beta_2 I_2 + \beta_3 I_3) S_u + k_u (1 - W)(S+ S_c) - k_1 S_u \\
\dot{W} & = k_w W (1 - \kappa S_c - W)\\
\end{split}
\end{equation}
#### Variables
* $S$: Susceptible individuals living as normal
* $S_c$: Susceptible individuals exercising pandemy precautions
* $S_u$: Susceptible individuals immune to caution because of economic downturn
* $W$: Economic status obeying a logistic law with caution individuals downturning
* $E$: Exposed individuals
* $E_c$: Exposed individuals exercising caution
* $I_i$: Infected individuals in severity class $i$. Severity increaes with $i$ and we assume individuals must pass through all previous classes. Split non hospital cases by caution.
* $I_1$: Mild infection (hospitalization not required), living as normal
* $I_c$: Mild infection (hospitalization not required), exercising caution
* $I_2$: Severe infection (hospitalization required)
* $I_3$: Critical infection (ICU required)
* $R$: individuals who have recovered from disease and are now immune
* $D$: Dead individuals
* $N=S+S_c+S_u+E+E_c+I_1+I_c+I_2+I_3+R+D$ Total population size (constant)
#### Parameters
* $\beta$ rate at which infected individuals contact susceptibles and infect them
* $\alpha$ rate at which exposed individuals become infected (1/(incubation time)
* $\gamma$ rate at which infected individuals recover from disease and become immune
* $\mu$ death rate for infected individuals
* $\gamma_i$ rate at which infected individuals in class $I_i$ recover from disease and become immune
* $p_i$ rate at which infected individuals in class $I_i$ progress to class $I_{I+1}$
* $c_i$ three parameters characterizing cautionary response of population via class $S_c$
- $c_0$ : reduction factor for exposure for cautioned susceptibles
- $c_1$ : inverse duration of caution (exponential decay time constant in days)
- $c_2$ : rate constant for transition from uncautioned to cautioned susceptible
* four parameters coupling to economy and uncautionable individuals
- $k_w$ : rate coefficient of economy equilibration
- $k_u$ : rate coefficient of transition from uncautioned to uncautionable
- $k_1$ : inverse duration of uncautionable state
- $\kappa$ : economic downturn of caution (proportional to number cautious)
#### Implementation
```python
# set up the symbolic model
state = ['S', 'E', 'I_1', 'I_2', 'I_3', 'R', 'D', 'I_c', 'S_c', 'E_c', 'S_u', 'W'] # order important to allow correct plot groupings
param_list = ['beta_1', 'beta_2', 'beta_3', 'p_1', 'p_2', 'alpha',
'gamma_1', 'gamma_2', 'gamma_3','mu', 'c_0', 'c_1', 'c_2', 'k_u', 'k_1', 'k_w', 'kappa', 'N'] # order also important
transition = [
Transition(origin='S', equation='-(beta_1*(I_1+c_0*I_c)+beta_2*I_2+beta_3*I_3)*S+c_1*S_c-c_2*(I_3)*S-k_u*(1-W)*S+k_1*S_u'),
Transition(origin='S_c', equation='-c_0*(beta_1*(I_1+c_0*I_c)+beta_2*I_2+beta_3*I_3)*S_c-c_1*S_c+c_2*(I_3)*S-k_u*(1-W)*S_c'),
Transition(origin='S_u', equation='-(beta_1*(I_1+c_0*I_c)+beta_2*I_2+beta_3*I_3)*S_u+k_u*(1-W)*(S+S_c)-k_1*S_u'),
Transition(origin='W', equation='k_w*W*(1-kappa*S_c-W)'),
Transition(origin='E', equation='beta_1*(I_1+c_0*I_c)*(S+S_u)-alpha*E-c_2*(I_3)*E+c_1*E_c'),
Transition(origin='E_c', equation='c_0*beta_1*(I_1+c_0*I_c)*S_c-alpha*E_c+c_2*(I_3)*E-c_1*E_c'),
Transition(origin='I_1', equation='alpha*E-gamma_1*I_1-p_1*I_1-c_2*(I_3)*I_1+c_1*I_c'),
Transition(origin='I_c', equation='alpha*E_c-gamma_1*I_c-p_1*I_c+c_2*(I_3)*I_1-c_1*I_c'), # changed to I_c, prints better
Transition(origin='I_2', equation='p_1*(I_1+I_c)-gamma_2*I_2-p_2*I_2'),
Transition(origin='I_3', equation='p_2*I_2-gamma_3*I_3-mu*I_3'), # error corrected, this is equation for I_3 not I_2
Transition(origin='R', equation='gamma_1*(I_1+I_c)+gamma_2*I_2+gamma_3*I_3'),
Transition(origin='D', equation='mu*I_3')
]
SC3UEI3R_model = DeterministicOde(state, param_list, ode=transition)
SC3UEI3R_model.modelname='SC3UEI3R' # following needs to be adjusted for new models, NB add new species at end to preserve slice subsets
SC3UEI3R_model.ei=slice(1,5) # 1,2,3,4 i.e. E,I_1,I_2,I_3 – not E_c and I_c
SC3UEI3R_model.confirmed=slice(2,8) # cases 2-7 i.e. I1, I2, I3, R, D and I_c
SC3UEI3R_model.recovered=slice(5,6) # case 5 R
SC3UEI3R_model.deaths=slice(6,7) # case 6 D
```
```python
# display equations
print_ode2(SC3UEI3R_model) # name needs to be that of current model
```
# Extract data from Johns Hopkins data base
## Definition of data extraction fuctions get_data and get_country_data
```python
import numpy as np
import csv
import itertools
import matplotlib
%matplotlib inline
import seaborn as sb
from matplotlib import pyplot as plt
from cycler import cycler
import datetime
import matplotlib.dates as mdates
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
import pwlf
import sys
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
```
<style>.container { width:100% !important; }</style>
```python
def get_data(jhu_file):
dat = []
with open(jhu_file, newline='') as csvfile:
myreader = csv.reader(csvfile, delimiter=',')
popdat = []
i = 0
for row in myreader:
if i != 0:
poplist = []
j = 0
for elt in row:
if j >= 4:
poplist.append(int(elt))
elif j == 0:
poplist.append(elt)
elif j == 1:
poplist[0]=(elt,poplist[0])
j = j+1
popdat.append(poplist)
else:
popdat.append(row)
# print(popdat[i])
i = i + 1;
# dates
popdat0=['dates']
for elt in popdat[0][4:]:
popdat0.append(elt)
popdat[0] = [pop for pop in popdat0]
# print('popdat[0]',popdat[0])
# totals over all countries
totals = np.zeros(len(popdat[0])-1,dtype=int)
for row in popdat[1:]:
totals = totals + np.array(row[1:])
totals = list(np.asarray(totals))
# print(totals)
popkeyed = {poplist[0]: poplist[1:] for poplist in popdat}
popkeyed.update({'dates':popdat[0][1:]})
popkeyed.update({('World',''):totals})
# del popkeyed[('d','a')]
# assemble totals for countries with multiple regions
total = np.zeros(len(popkeyed['dates']),dtype=int)
poptotkeyed = {}
for country,tseries in popkeyed.items():
if country!='dates' and country[1] != '': # it seems that UK is single exception with both '' and non '' regions, UK total is then UK overseas
countrytotal = (country[0],'Total')
if countrytotal in poptotkeyed:
# print(country,popkeyed[country],poptotkeyed[countrytotal])
total = np.array(tseries)[:]+np.array(poptotkeyed[countrytotal])[:]
else:
total = np.array(tseries)
poptotkeyed.update({countrytotal:list(total)})
for countrytotal,tseries in poptotkeyed.items():
total = np.array(tseries)
popkeyed.update({countrytotal:list(total)})
return popkeyed
```
```python
def Float(x):
try:
rtn = float(x)
except:
rtn = float('NaN')
return rtn
```
```python
# from covid_data_explore-jhu-j
def get_country_data(country_s='World', datatype='confirmed', firstdate=None, lastdate=None):
if isinstance(country_s,str):
country = (country_s,'')
else: # single ('country','reg') entry
country = country_s
popkeyed = covid_ts[datatype]
dates = popkeyed['dates']
fmt = '%m/%d/%y'
xx = [datetime.datetime.strptime(dd,fmt) for dd in dates ]
if firstdate:
firstdate_d = datetime.datetime.strptime(firstdate,fmt)
else:
firstdate_d = datetime.datetime.strptime(dates[0],fmt)
if lastdate:
lastdate_d = datetime.datetime.strptime(lastdate,fmt)
else:
lastdate_d = datetime.datetime.strptime(dates[-1],fmt)
daystart = (firstdate_d-xx[0]).days
daystop = (lastdate_d-xx[-1]).days
try:
yy = popkeyed[country]
# print(country)
except:
print('country data not found',country)
return None,None,None
yyf = [Float(y) for y in yy]
if daystart <0:
xx0 = [xx[0]+datetime.timedelta(days=i) for i in range(daystart,0)]
yy0 = [0.]*(-daystart)
else:
xx0 = []
yy0 = []
if daystop > 0:
xx1 = [xx[-1]+datetime.timedelta(days=i) for i in range(daystop)]
yy1 = [0.]*(daystop)
else:
xx1 = []
yy1 = []
xx = xx0 + xx + xx1
xxf = [Float((x-firstdate_d).days) for x in xx ]
yy = yy0 + yyf + yy1
return xx,xxf,yy
```
```python
def get_country_data_nyw(country_s='World', datatype='confirmed', firstdate=None, lastdate=None):
if isinstance(country_s,str):
country = (country_s,'')
else: # single ('country','reg') entry
country = country_s
popkeyed = covid_ts[datatype]
dates = popkeyed['dates']
fmt = '%m/%d/%y'
xx = [datetime.datetime.strptime(dd,fmt) for dd in dates ]
if firstdate:
firstdate_d = datetime.datetime.strptime(firstdate,fmt)
else:
firstdate_d = datetime.datetime.strptime(dates[0],fmt)
if lastdate:
lastdate_d = datetime.datetime.strptime(lastdate,fmt)
else:
lastdate_d = datetime.datetime.strptime(dates[-1],fmt)
daystart = (firstdate_d-xx[0]).days
daystop = (lastdate_d-xx[-1]).days
try:
yy = popkeyed[country]
# print(country)
except:
print('country data not found',country)
return None,None
yyf = [Float(y) for y in yy]
yy0 = []
yy1 = []
if daystart>len(yyf):
print('Error: start date does not overlap with available data')
return None,None
elif daystart>0:
yyf = yyf[daystart:]
elif daystart <0:
yy0 = [0.]*(-daystart)
if daystop < 0:
yyf = yyf[:daystop]
elif daystop > 0:
yy1 = [0.]*(daystop)
yyf = yy0 + yyf + yy1
xxf = [float(x) for x in range(len(yyf))]
return xxf,yyf
```
## JHU data
```python
base = '../../covid-19-JH/csse_covid_19_data/csse_covid_19_time_series/'
confirmed = get_data(base+'time_series_covid19_confirmed_global.csv')
deaths = get_data(base+'time_series_covid19_deaths_global.csv')
recovered = get_data(base+'time_series_covid19_recovered_global.csv')
covid_ts = {'confirmed':confirmed,'deaths':deaths,'recovered':recovered}
countries_jhu = [(row[0],row[1]) for row in confirmed][1:]
print("number of countries listed",len(countries_jhu))
i=0
for country in countries_jhu:
# print(i,country)
i = i + 1
```
number of countries listed 274
## Get data for one country
```python
# assumed data starting on firstdate
test_country='Germany'
N = 80000000
firstdate = '01/25/20'
lastdate = '01/08/20'
xx,xxf,yy0 = get_country_data(test_country,'confirmed',firstdate=firstdate,lastdate=lastdate)
xx,xxf,yy1 = get_country_data(test_country,'recovered',firstdate=firstdate,lastdate=lastdate)
xx,xxf,yy2 = get_country_data(test_country,'deaths',firstdate=firstdate,lastdate=lastdate)
print(xxf)
y_jhu={}
y_jhu[test_country] = np.array([[yy0[i],yy1[i],yy2[i]] for i in range(0,len(yy0))])/N
# data = np.array([[xxf[i],yy0[i],yy1[i],yy2[i]] for i in range(len(yy))])
# print(data)
lastday = len(y_jhu[test_country])
print('days 0 to',lastday,'data stored in y_jhu')
```
[-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0, 100.0, 101.0, 102.0, 103.0, 104.0, 105.0, 106.0, 107.0, 108.0, 109.0, 110.0, 111.0, 112.0, 113.0, 114.0, 115.0, 116.0, 117.0, 118.0, 119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 125.0, 126.0, 127.0, 128.0, 129.0, 130.0, 131.0, 132.0, 133.0, 134.0, 135.0, 136.0, 137.0, 138.0, 139.0, 140.0, 141.0, 142.0, 143.0, 144.0, 145.0, 146.0, 147.0, 148.0, 149.0, 150.0, 151.0, 152.0, 153.0, 154.0, 155.0, 156.0, 157.0, 158.0, 159.0, 160.0, 161.0, 162.0, 163.0, 164.0, 165.0, 166.0, 167.0, 168.0, 169.0, 170.0, 171.0, 172.0, 173.0, 174.0, 175.0, 176.0, 177.0, 178.0, 179.0, 180.0, 181.0, 182.0, 183.0, 184.0, 185.0, 186.0, 187.0, 188.0, 189.0, 190.0, 191.0, 192.0, 193.0, 194.0, 195.0, 196.0, 197.0, 198.0, 199.0, 200.0, 201.0, 202.0, 203.0, 204.0, 205.0, 206.0, 207.0, 208.0, 209.0, 210.0, 211.0, 212.0, 213.0, 214.0, 215.0, 216.0, 217.0, 218.0]
days 0 to 222 data stored in y_jhu
## OWID data
```python
import csv
owid_file = '../../covid-19-owid/public/data/owid-covid-data.csv'
covid_owid = []
with open(owid_file,'r',newline='') as fp:
myreader = csv.DictReader(fp,delimiter=',')
# rows = list(itertools.islice(myreader,4))
for row in myreader:
covid_owid.append(row)
covid_owid[0].keys()
```
odict_keys(['iso_code', 'continent', 'location', 'date', 'total_cases', 'new_cases', 'new_cases_smoothed', 'total_deaths', 'new_deaths', 'new_deaths_smoothed', 'total_cases_per_million', 'new_cases_per_million', 'new_cases_smoothed_per_million', 'total_deaths_per_million', 'new_deaths_per_million', 'new_deaths_smoothed_per_million', 'new_tests', 'total_tests', 'total_tests_per_thousand', 'new_tests_per_thousand', 'new_tests_smoothed', 'new_tests_smoothed_per_thousand', 'tests_per_case', 'positive_rate', 'tests_units', 'stringency_index', 'population', 'population_density', 'median_age', 'aged_65_older', 'aged_70_older', 'gdp_per_capita', 'extreme_poverty', 'cardiovasc_death_rate', 'diabetes_prevalence', 'female_smokers', 'male_smokers', 'handwashing_facilities', 'hospital_beds_per_thousand', 'life_expectancy'])
```python
def get_data_owid(owid_file,database='owid',datatype='confirmed',dataaccum = 'cumulative'):
import numpy as np
import datetime
import matplotlib.dates as mdates
global covid_owid
if not covid_owid:
with open(owid_file, 'r', newline='') as csvfile:
myreader = csv.DictReader(csvfile,delimiter=',')
for row in myreader:
covid_owid.append(row)
close(owid_file)
# for key in covid_owid[0].keys(): # to loop through all keys
if datatype == 'confirmed':
if dataaccum == 'cumulative':
key = 'total_cases'
elif dataaccum == 'weekly':
key = 'new_cases_smoothed'
else:
key = 'new_cases'
elif datatype == 'recovered':
print('data for recovered cases not available in OWID database')
key = None
elif datatype == 'deaths':
if dataaccum == 'cumulative':
key = 'total_deaths'
elif dataaccum == 'weekly':
key = 'new_deaths_smoothed'
else:
key = 'new_deaths'
elif datatype == 'tests':
if dataaccum == 'cumulative': # reporting intervals often sporadic so better to use smoothed weekly
# key = 'total_tests'
key = 'new_tests_smoothed' # will adjust to cumulative below
elif dataaccum == 'weekly':
key = 'new_tests_smoothed'
else:
key = 'new_tests' # reporting intervals often sporadic so better to use smoothed weekly
elif datatype =='stringency':
key = 'stringency_index'
elif datatype == 'recovered':
print('data for recovered cases not available in OWID database')
key = None
return
else:
print('data for ', datatype,'not available or not yet translated in OWID database')
key = None
return
countries = np.unique(np.array([dd['location'] for dd in covid_owid]))
dates = np.unique(np.array([dd['date'] for dd in covid_owid]))
dates.sort()
fmt = '%Y-%m-%d'
dates_t = [datetime.datetime.strptime(dd,fmt) for dd in dates ]
firstdate = dates[0]
lastdate = dates[-1]
firstdate_t = dates_t[0]
lastdate_t = dates_t[-1]
daystart = 0
daystop = (lastdate_t-firstdate_t).days
popkeyed = {country: np.zeros(daystop+1,dtype=float) for country in countries}
for dd in covid_owid:
country = dd['location']
day = (datetime.datetime.strptime(dd['date'],fmt)-firstdate_t).days
popkeyed[country][day] = float(dd[key]) if not dd[key]=='' else 0.0
# popkeyed = {country: np.transpose(np.array([[dd['date'],dd[key]] for dd in covid_owid if dd['location'] == country])) for country in countries}
# popkeyed = {country: np.array([float(dd[key]) if not dd[key]=='' else 0.0 for dd in covid_owid if dd['location'] == country]) for country in countries}
if datatype == 'tests' and dataaccum == 'cumulative': # assemble cumulative tests from smooth daily tests
for country in countries:
data = popkeyed[country]
sumdata= np.zeros(len(data))
sum = 0.0
for i,d in enumerate(data):
sum = sum + d
sumdata[i] = sum
popkeyed.update({country:sumdata})
fmt_jhu = '%m/%d/%y'
popkeyed.update({'dates': [date.strftime(fmt_jhu) for date in dates_t]}) # dates are set to strings in jhu date format for compatibility
return popkeyed
```
```python
owid_file = '../../covid-19-owid/public/data/owid-covid-data.csv'
confirmed_owid=get_data_owid(owid_file,database='owid',datatype='confirmed',dataaccum = 'cumulative')
recovered_owid = None
deaths_owid=get_data_owid(owid_file,database='owid',datatype='deaths',dataaccum = 'cumulative')
tests_owid=get_data_owid(owid_file,database='owid',datatype='tests',dataaccum = 'cumulative')
stringency_owid=get_data_owid(owid_file,database='owid',datatype='stringency',dataaccum = 'daily')
covid_owid_ts= {'confirmed':confirmed_owid,'deaths':deaths_owid,'recovered':recovered_owid, 'tests': tests_owid , 'stringency': stringency_owid}
```
```python
def truncx(xx,daystart,daystop):
"""truncate array xx to run from daystart to daystop
do this before trying to extend the arrays if required"""
daymin = max(daystart,0)
daymax = min(daystop,(xx[-1]-xx[0]).days)
return xx[daymin:daymax+1]
def truncy(xx,yy,daystart,daystop):
"""truncate arrays xx and yy to run from daystart to daystop
do this before trying to extend the arrays if required"""
daymin = max(daystart,0)
daymax = min(daystop,(xx[-1]-xx[0]).days)
return yy[daymin:daymax+1]
def plotCountry_(country_s, datatype='confirmed', dataaccum='cumulative', fittype=None, ax=None, ax2=False,
symbol='o--', step=None, firstdate=None, lastdate=None, intdates=False, linecolor=None, maxyval=None, minconfirmed=0,nsegments=3,database='jhu'):
""" plots selected data for a list of countries or single country
datatypes allowed are 'confirmed','deaths','recovered'
dataaccum specifies either 'cumulative' or 'daily' or averaged over 7 days 'cum_av_weekly' or 'daily_av_weekly'
fittypes allowed are currently None, 'piecewise-linear'
ax graphical axes to use for plot: default None -> new axes
ax2 true if second axes as twin axes for overlay plotting
symbol to use for plotting
step whether to use step plotting instead of points: default None -> points
firstdate to plot (maybe before first date in data - pad with 0)
lastdate to plot (maybe after last date in data - pad with 0)
intdates : whether to plot dates as integers for compatibility (default as dates)
linecolor is default color to use for a single trace, instead of listed set)
"""
global covid_ts, covid_ts_owid
import math
import warnings
# extract list of countries in [(country,region),...] format from first parameter
countries = []
if isinstance(country_s,list):
for country in country_s:
if isinstance(country,str) and database == 'jhu':
country = (country,'')
countries.append(country)
elif isinstance(country_s,str):
if database == 'jhu':
countries = [( country_s,'')]
else:
countries = [country_s]
else: # single ('country','reg') entry
countries = [country_s]
# get data with datatype and extend dates to padd desired interval specified by firstdate,lastdate
if database == 'jhu':
popkeyed = covid_ts[datatype]
dates = popkeyed['dates']
fmt = '%m/%d/%y'
elif database == 'owid':
popkeyed = covid_owid_ts[datatype]
dates = popkeyed['dates']
fmt = '%m/%d/%y'
# fmt = '%Y-%m-%d' the owid date format was converted to the jhu date format in get_data_owid
xxd = [datetime.datetime.strptime(dd,fmt) for dd in dates ]
if firstdate:
firstdate_d = datetime.datetime.strptime(firstdate,fmt)
else:
firstdate_d = datetime.datetime.strptime(dates[0],fmt)
if lastdate:
lastdate_d = datetime.datetime.strptime(lastdate,fmt)
else:
lastdate_d = datetime.datetime.strptime(dates[-1],fmt)
daystart = (firstdate_d-xxd[0]).days
daystop = (lastdate_d-xxd[0]).days
xx = [0.]*(daystop-daystart+1)
xx = truncx(xxd,daystart,daystop)
# print('1 len xx',len(xx))
if daystart <0:
xx0 = [xx[0]+datetime.timedelta(days=i) for i in range(daystart,0)]
yy0 = [0.]*(-daystart)
else:
xx0 = []
yy0 = []
if daystop > (xxd[-1]-xxd[0]).days:
xx1 = [xxd[-1]+datetime.timedelta(days=i) for i in range(daystop-(xxd[-1]-xxd[0]).days)]
yy1 = [' ']*(daystop-(xxd[-1]-xxd[0]).days)
else:
xx1 = []
yy1 = []
xx = xx0 + xx + xx1
# print('2 len xx',len(xx))
#print('len xx1 yy1',len(xx1),len(yy1))
# print('len xx',len(xx))
if fittype == 'piecewise-linear':
xxi = [Float((x-xx[0]).days) for x in xx ]
# print(xxi)
# print('len xxi',len(xxi))
# locator = mdates.MonthLocator()
locator = mdates.AutoDateLocator(minticks=5, maxticks=13)
formatter= mdates.ConciseDateFormatter(locator)
if not ax:
fig,ax = plt.subplots(1,1,figsize=(9,6))
ax2 = ax
elif ax2:
ax2 = ax.twinx()
else:
ax2 = ax
colors = ['k', 'b', 'c', 'm', 'y', 'g', 'olive', 'chocolate']
i = 0
j = 0
for country in countries:
try:
yyd = popkeyed[country]
if np.max(yyd) >= minconfirmed:
j = j+1
else:
i = i + 1
continue
except:
print('country not found',country)
i = i + 1
continue
yy = truncy(xxd,yyd,daystart,daystop)
# print(country,'1 len yy yyd',len(yy),len(yyd))
yyf = [Float(y) for y in yy]
yy = yy0 + yyf + yy1
# print(country,'2 len yy',len(yy))
# ymax=np.max(np.array(yy))
yyf = [Float(y) for y in yy]
if dataaccum == 'daily':
yy = [0.]*len(yy)
yy[0] = yyf[0]
for k in range(1,len(yy)):
yy[k] = yyf[k]-yyf[k-1]
elif dataaccum == 'cum_av_weekly':
yy = [0.]*len(yy)
moving_av = 0.
for k in range(len(yy)):
if k-7 >= 0:
moving_av = moving_av - yyf[k-7]
moving_av = moving_av + yyf[k]
yy[k] = moving_av/min(7.0,float(k+1))
elif dataaccum == 'daily_av_weekly':
yy = [0.]*len(yyf)
yy[0] = yyf[0]
for k in range(1,len(yy)):
yy[k] = yyf[k]-yyf[k-1]
yyf = [y for y in yy]
yy = [0.]*len(yy)
moving_av = 0.
for k in range(len(yy)):
if k-7 >= 0:
moving_av = moving_av - yyf[k-7]
moving_av = moving_av + yyf[k]
yy[k] = moving_av/min(7.0,float(k+1))
if intdates:
xx = range(len(xx))
if step:
ax2.step(xx,yy,label = country[0])
else:
# print(ax,ax2)
# ax2.set_ylim(ymax,0)
if linecolor:
color = linecolor
else:
color = colors[i]
ax2.plot(xx, yy, symbol, markersize=3, color = color, alpha=0.8, label = country[0])
if maxyval: ax.set_ylim(0,maxyval)
if maxyval: ax2.set_ylim(0,maxyval)
plt.title(country[0]+'-'+country[1]) # +' '+datatype)
if fittype == 'piecewise-linear':
warnings.filterwarnings("ignore", message="Warning: zero length interval encountered in pwlf.py calc_slopes")
# initialize piecewise linear fit with your x and y data
# yyf = [Float(y) for y in yy]
yyf = [Float(y) if not math.isnan(y) else 0.0 for y in yy]
# print(yyf)
my_pwlf = pwlf.PiecewiseLinFit(xxi, yyf)
# fit the data for three line segments
res = my_pwlf.fit(nsegments)
ppp = my_pwlf.p_values(method='non-linear', step_size=1e-4)
se = my_pwlf.se # standard errors
parameters = np.concatenate((my_pwlf.beta,
my_pwlf.fit_breaks[1:-1]))
header = ['Parameter type', 'Parameter value', 'Standard error', 't ', 'P > np.abs(t) (p-value)']
print(*header, sep=' | ')
values = np.zeros((parameters.size, 5), dtype=np.object_)
values[:, 1] = np.around(parameters, decimals=3)
values[:, 2] = np.around(se, decimals=3)
values[:, 3] = np.around(parameters / se, decimals=3)
values[:, 4] = np.around(ppp, decimals=3)
for iii, row in enumerate(values):
if iii < my_pwlf.beta.size:
row[0] = 'Slope '
print(*row, sep=' | ')
else:
row[0] = 'Breakpoint'
print(*row, sep=' | ')
print("")
# predict for the determined points
xHat = np.linspace(min(xxi), max(xxi), num=len(xx))
# print(len(xHat),len(xxi))
yHat = my_pwlf.predict(xHat)
ax2.plot(xx, yHat, color = colors[i], alpha=0.5, label = country[0]+' fit')
i = i+1
if j==0:
ax.axis("off")
else:
if j > 1:
plt.legend(loc="upper left")
plt.title('countries '+datatype+dataaccum)
if not intdates:
ax2.xaxis.set_major_formatter(formatter)
ax2.xaxis.set_major_locator(locator)
for tick in ax2.get_xticklabels():
tick.set_rotation(40)
```
```python
```
## Plots of data for Cautionary Model comparison
Comment out line 1110 in pwlf.py (in /usr/local/lib/python3.7/site-packages/pwlf directory)
print("Warning: zero length interval encountered in pwlf.py calc_slopes").
to remove repeated warnings, which don't seem to harm final result
Warning: zero length interval encountered in pwlf.py calc_slopes
```python
plotCountry_(['Italy','Spain','Germany','France','United Kingdom','Sweden','Turkey'],
'confirmed','cum_av_weekly',firstdate='02/15/20',lastdate='09/1/20',fittype='piecewise-linear',nsegments=4)
#plt.savefig("covid-19-caution/figures/fig1a.pdf",bbox_inches='tight')
```
```python
plotCountry_(['Italy','Spain','Germany','France','United Kingdom','Sweden','Turkey'],
'confirmed','daily_av_weekly',firstdate='02/15/20',lastdate='08/31/20',database='owid')
plt.title("");
# plt.plot(xx,[450000*d[1] for d in dat],linewidth=6,color='salmon',alpha=0.5,linestyle='--');
# plt.savefig("covid-19-caution/figures/fig1b.pdf",bbox_inches='tight')
```
# Simulation
Note: Problem with setting parameters in model.
The DeterministicOde Class method parameters, converts a dictionary or list of tuples of parameters to a dictionary with sympy symbolic keys, not strings.
So attempts to modify parameter values by accessing this dictionary fail. Copying the dictionary, modifying and rewriting also fail.
Instead we store the dictionary of parameters in addition as a dictionary with string keys, under model.params. When we modify these values, they can then be copied back to the parameters method using model.parameters = model.params.
## Simulation of SCIR model
```python
# setup time points for simulation, initial conditions and parameters
t = np.linspace(0, lastday -1, lastday)
# initial conditions assuming there is no natural immunity
I_0 = 0.00003
x0_SCIR = [1.0-I_0, I_0, 0.0, 0.0, 0.0]
# Define parameters based on clinical observations Dr. Alison
Exposure=0.25 # Rate coefficient for exposure per individual in contact per day
IncubPeriod=5 #Incubation period, days
DurInf=10 #Duration of mild infections, days
FracCritical=0.1 #Fraction of infections that are critical
CFR=0.05 #Case fatality rate (fraction of infections resulting in death)
TimeDeath=DurInf+7 #Time from ICU admission to death, days
# Model extension by John McCaskill to include caution
CautionFactor= 0.4 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 21. # Duration of cautionary state of susceptibles (2 weeks)
CautionICUFrac= 0.25 # Fraction of ICUs occupied leading to transition to caution @ 1/day
ICUFrac= 0.002 # Fraction of ICUs relative to population size N
# Model fitting extension to allow for incomplete detection
FracConfirmedDet=0.125 # Fraction of recovered individuals measured
FracRecoveredDet=FracConfirmedDet # Fraction of recovered individuals measured
FracDeathsDet=1.0
# convert above parameters to model parameters
params = {'beta' : Exposure/sum(x0_SCIR),
'gamma': (1.0/DurInf),
'mu' : (1.0/TimeDeath)*(CFR/FracCritical),
'c_0' : CautionFactor,
'c_1' : 1.0/CautionRetention,
'c_2' : 1.0/(sum(x0_SCIR)*ICUFrac*CautionICUFrac),
'N' : sum(x0_SCIR)}
print(params)
# assign x0 and params to the model, integrate over t and plot
SCIR_model.initial_values = (x0_SCIR, t[0])
SCIR_model.parameters = params
SCIR_model.params = params.copy()
solution = SCIR_model.integrate(t[1::])
SCIR_model.plot()
# calculate time point when maximum number of people are infectious
peak_i = np.argmax(solution[:,1])
print('Peak infection (days)', t[peak_i])
```
### Integration and plot using scipy and matplotlib directly
```python
solution1 = scipy.integrate.odeint(SCIR_model.ode, x0_SCIR, t[1::])
ys = solution1.copy()
plt.figure(figsize=(15,7))
plt.subplot(1,2,1)
plt.xlabel("Time (days)")
plt.ylabel("Number per 1000000 People")
plt.ylim([0,0.001])
plt.plot(t[1::],ys,label=("S","I","R","D","Sc"))
plt.legend(("S","I","R","D","Sc"))
plt.subplot(1,2,2)
plt.xlabel("Time (days)")
plt.ylabel("Number per 1000000 People")
plt.ylim([0.000001,1])
plt.semilogy()
plt.plot(t[1::],ys,label=("S","I","R","D","Sc"))
plt.legend(("S","I","R","D","Sc"));
```
### Compare data with SCIR simulation
```python
# model with generating parameters
params1 = SCIR_model.params.copy()
params1['c_0']=0.85
params1['beta']=0.15
SCIR_model.parameters = params1
print('parameters',SCIR_model.parameters)
x0_fit = x0_SCIR.copy()
print('initial conditions',x0_fit)
t_fit = t
SCIR_model.initial_values = (x0_fit, t_fit[0])
sol_fit = scipy.integrate.odeint(SCIR_model.ode, x0_fit, t_fit[1::])
plt.figure(figsize=(15,10))
plt.plot(t,y_jhu[test_country][:,1]/FracRecoveredDet, 'bo',label='R') # recovered
plt.plot(t,y_jhu[test_country][:,2], 'ro',label='D') # died
plt.gca().set_prop_cycle(color=['grey','orange','green','green','green','blue','red', 'black'])
plt.plot(t_fit[1::], sol_fit)
plt.ylim([0,0.02])
plt.legend()
peak_i = np.argmax(sol_fit[:,2])
print('Peak infection (days)', t_fit[peak_i])
```
### Stochastic simulation
```python
N=10000
I_0 = 10
x0_SCIR_S = [N-I_0, I_0, 0, 0, 0]
# Define parameters based on clinical observations Dr. Alison
Exposure=0.25 # Rate coefficient for exposure per individual in contact per day
IncubPeriod=5 #Incubation period, days
DurInf=10 #Duration of mild infections, days
FracCritical=0.1 #Fraction of infections that are critical
CFR=0.05 #Case fatality rate (fraction of infections resulting in death)
TimeDeath=DurInf+7 #Time from ICU admission to death, days
# Model extension by John McCaskill to include caution
CautionFactor= 0.4 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 21. # Duration of cautionary state of susceptibles (2 weeks)
CautionICUFrac= 0.25 # Fraction of ICUs occupied leading to transition to caution @ 1/day
ICUFrac= 0.02 # Fraction of ICUs relative to population size N # increased 10X for low pop simulation
# Model fitting extension to allow for incomplete detection
FracConfirmedDet=0.125 # Fraction of recovered individuals measured
FracRecoveredDet=FracConfirmedDet # Fraction of recovered individuals measured
FracDeathsDet=1.0
params_S = {'beta' : Exposure/sum(x0_SCIR_S),
'gamma': (1.0/DurInf),
'mu' : (1.0/TimeDeath)*(CFR/FracCritical),
'c_0' : CautionFactor,
'c_1' : 1.0/CautionRetention,
'c_2' : 1.0/(sum(x0_SCIR_S)*ICUFrac*CautionICUFrac),
'N' : sum(x0_SCIR_S)}
SCIR_modelS.initial_values = (x0_SCIR_S, t[0])
SCIR_modelS.parameters = params_S
SCIR_modelS.params = params_S.copy()
t_jump = np.linspace(0,100,50)
simX, simT =SCIR_modelS.simulate_jump(t_jump, iteration=5, full_output=True)
```
```python
plt.figure(figsize=(25,15))
for iter in range(5):
plt.subplot(2,5,iter+1)
plt.xlabel("Time (days)")
plt.ylabel("Population")
plt.plot(simT,simX[iter],label=("S","I","R","D","Sc"))
plt.legend(("S","I","R","D","Sc"))
for iter in range(5):
plt.subplot(2,5,5+iter+1)
plt.xlabel("Time (days)")
plt.ylabel("Population")
plt.semilogy()
plt.plot(simT,simX[iter],label=("S","I","R","D","Sc"))
plt.legend(("S","I","R","D","Sc"));
```
## Simulation of SC2IR model
```python
# setup time points for simulation, initial conditions and parameters
t = np.linspace(0, lastday -1, lastday)
# initial conditions assuming there is no natural immunity
I_0 = 0.00003
x0 = [1.0-I_0, I_0, 0.0, 0.0, 0.0, 0.0]
# Define parameters based on clinical observations Dr. Alison
Exposure=0.25 # Rate coefficient for exposure per individual in contact per day
IncubPeriod=5 #Incubation period, days
DurInf=10 #Duration of mild infections, days
FracCritical=0.1 #Fraction of infections that are critical
CFR=0.05 #Case fatality rate (fraction of infections resulting in death)
TimeDeath=DurInf+7 #Time from ICU admission to death, days
# Model extension by John McCaskill to include caution
CautionFactor= 0.4 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 21. # Duration of cautionary state of susceptibles (2 weeks)
CautionICUFrac= 0.25 # Fraction of ICUs occupied leading to transition to caution @ 1/day
ICUFrac= 0.002 # Fraction of ICUs relative to population size N
# Model fitting extension to allow for incomplete detection
FracConfirmedDet=0.125 # Fraction of recovered individuals measured
FracRecoveredDet=FracConfirmedDet # Fraction of recovered individuals measured
FracDeathsDet=1.0
# convert above parameters to model parameters
params = {'beta' : Exposure/sum(x0),
'gamma': (1.0/DurInf),
'mu' : (1.0/TimeDeath)*(CFR/FracCritical),
'c_0' : CautionFactor,
'c_1' : 1.0/CautionRetention,
'c_2' : 1.0/(sum(x0)*ICUFrac*CautionICUFrac),
'N' : sum(x0)}
print('parameters',params)
# assign x0 and params to the model, integrate over t and plot
SC2IR_model.initial_values = (x0, t[0])
SC2IR_model.parameters = params
SC2IR_model.params = params.copy()
solution = SC2IR_model.integrate(t[1::])
SC2IR_model.plot()
# calculate time point when maximum number of people are infectious
peak_i = np.argmax(solution[:,1])
print('Peak infection (days)', t[peak_i])
```
### Integration and plot using scipy and matplotlib directly
```python
solution1 = scipy.integrate.odeint(SC2IR_model.ode, x0, t[1::])
ys = solution1.copy()
plt.figure(figsize=(15,7))
plt.subplot(1,2,1)
plt.xlabel("Time (days)")
plt.ylabel("Fraction of population")
plt.ylim([0,1])
plt.gca().set_prop_cycle(color=['grey','green','blue','red','darkgreen', 'black'])
plt.plot(t[1::],ys,label=("S","I","R","D",'Ic',"Sc"))
plt.legend(("S","I","R","D","Ic","Sc"))
plt.title(SC2IR_model.modelname)
plt.subplot(1,2,2)
plt.xlabel("Time (days)")
plt.ylabel("Fraction of population")
plt.ylim([0.000001,1])
plt.semilogy()
plt.gca().set_prop_cycle(color=['grey','green','blue','red','darkgreen', 'black'])
plt.plot(t[1::],ys,label=("S","I","R","D",'Ic',"Sc"))
plt.legend(("S","I","R","D","Ic","Sc"))
plt.title(SC2IR_model.modelname + ' - semilog');
```
### Compare data with SC2IR simulation
```python
# model with generating parameters
def isolveplot(beta,gamma,mu,c_0,c_1,c_2,logI_0):
# saveparams=SC2IR_model.parameters.copy() # backup model current parameters
# saveICs = SC2IR_model.initial_values # back model ICs
I_0 = 10.**logI_0
x0 = [1.0-I_0, I_0, 0.0, 0.0, 0.0, 0.0]
params = {'beta' : beta,
'gamma': gamma,
'mu' : mu,
'c_0' : c_0,
'c_1' : c_1,
'c_2' : c_2,
'N' : sum(x0)}
SC2IR_model.initial_values = (x0, t[0])
SC2IR_model.parameters = params.copy()
SC2IR_model.params = params.copy()
SC2IR_model.dumpparams()
sol_fit = scipy.integrate.odeint(SC2IR_model.ode, x0, t[1::])
#
plt.figure(figsize=(15,10))
plt.plot(t,y_jhu[test_country][:,1]/FracRecoveredDet, 'bo',label='R') # recovered
plt.semilogy()
plt.plot(t,y_jhu[test_country][:,2], 'ro',label='D') # died
plt.semilogy()
plt.gca().set_prop_cycle(color=['grey','green','blue','red','darkgreen','black'])
plt.plot(t[1::], sol_fit)
plt.ylim([0.000001,1])
plt.semilogy()
plt.legend(('R','D','S','I','R','D','I_c','S_c'))
#plt.show(())
#ode_fit.plot()
peak_i = np.argmax(sol_fit[:,2])
print('Peak infection (days)', t_fit[peak_i])
# SC2IR_model.parameters=saveparams.copy()
# SC2IR_model.initial_values=saveICs
```
```python
if SC2IR_model.loadparams():
params = SC2IR_model.params.copy()
else:
params = {'beta' : 0.25,
'gamma': 0.1,
'mu' : 0.05,
'c_0' : 0.3,
'c_1' : 1./14.,
'c_2' : 2000.,
'N' : 1.}
interact(isolveplot,
beta=FloatSlider(min=0,max=1,step=0.01,value=params['beta'],description='beta',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
gamma=FloatSlider(min=0,max=1,step=0.01,value=params['gamma'],description='gamma',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
mu=FloatSlider(min=0,max=0.1,step=0.001,value=params['mu'],description='mu',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_0=FloatSlider(min=0,max=1,step=0.01,value=params['c_0'],description='c_0',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_1=FloatSlider(min=0,max=1,step=0.001,value=params['c_1'],description='c_1',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_2=FloatSlider(min=0,max=5000,step=1,value=params['c_2'],description='c_2',
style=style,layout=slider_layout,continuous_update=False,readout_format='.1f'),
logI_0=FloatSlider(min=-10,max=0,step=0.01,value=np.log10(x0[1]),description='log I_0',
style=style,layout=slider_layout,continuous_update=False))
```
loaded params from /Users/n/Projects/covid-recovery/Notebooks/covid-19-caution/params/SC2IR.pk :
interactive(children=(FloatSlider(value=0.54, continuous_update=False, description='beta', layout=Layout(width…
<function __main__.isolveplot(beta, gamma, mu, c_0, c_1, c_2, logI_0)>
```python
slide_params=SC2IR_model.parameters.copy()
print(slide_params)
theta = [0.4,0.11,0.007,0.33,0.228,275.]
```
{beta: 0.46, gamma: 0.1, mu: 0.045, c_0: 0.3, c_1: 0.07142857142857142, c_2: 153.0, N: 1.0}
```python
SC2IR_model.params
```
{'beta': 0.25,
'gamma': 0.1,
'mu': 0.029411764705882353,
'c_0': 0.4,
'c_1': 0.047619047619047616,
'c_2': 2000.0,
'N': 1.0}
## Simulation of SCEI3R model
```python
# setup time points for simulation, initial conditions and parameters
t = np.linspace(0, lastday -1, lastday)
# initial conditions assuming there is no natural immunity
E_0 = 0.00003
x0_SCEI3R = [1.0-E_0, E_0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
# Define parameters based on clinical observations Dr. Alison
Exposure=0.25 # Rate coefficient for exposure per individual in contact per day
IncubPeriod=5 #Incubation period, days
DurMildInf=10 #Duration of mild infections, days
FracMild=0.70 #Fraction of infections that are mild
FracSevere=0.20 #Fraction of infections that are severe
FracCritical=0.1 #Fraction of infections that are critical
CFR=0.07 #Case fatality rate (fraction of infections resulting in death)
TimeICUDeath=7 #Time from ICU admission to death, days
DurHosp=11 #Duration of hospitalization, days
# Model extension by John McCaskill to include caution
CautionFactor= 0.2 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 21. # Duration of cautionary state of susceptibles (2 weeks)
CautionICUFrac= 0.3 # Fraction of ICUs occupied leading to transition to caution @ 1/day
ICUFrac= 0.002 # Fraction of ICUs relative to population size N
# Model fitting extension to allow for incomplete detection
FracConfirmedDet=0.125 # Fraction of recovered individuals measured
FracRecoveredDet=FracConfirmedDet # Fraction of recovered individuals measured
FracDeathsDet=1.0
# convert above parameters to model parameters
params = {'beta_1' : Exposure/sum(x0_SCEI3R),
'beta_2' : 0.0,
'beta_3' : 0.0,
'alpha' : 1.0/IncubPeriod,
'gamma_1': (1.0/DurMildInf)*FracMild,
'gamma_2': (1.0/DurHosp)-(1/DurHosp)*(FracCritical/(FracCritical+FracSevere)),
'gamma_3': (1.0/TimeICUDeath)-(1/TimeICUDeath)*(CFR/FracCritical),
'p_1' : (1.0/DurMildInf)-(1.0/DurMildInf)*FracMild,
'p_2' : (1.0/DurHosp)*(FracCritical/(FracCritical+FracSevere)),
'mu' : (1.0/TimeICUDeath)*(CFR/FracCritical),
'c_0' : CautionFactor,
'c_1' : 1.0/CautionRetention,
'c_2' : 1.0/(sum(x0_SCEI3R)*ICUFrac*CautionICUFrac),
'N' : sum(x0_SCEI3R)}
print(params)
# assign x0 and params to the model, integrate over t and plot
SCEI3R_model.initial_values = (x0_SCEI3R, t[0])
SCEI3R_model.parameters = params
SCEI3R_model.params = params.copy()
solution = SCEI3R_model.integrate(t[1::])
SCEI3R_model.plot()
# calculate time point when maximum number of people are infectious
peak_i = np.argmax(solution[:,2])
print('Peak infection (days)', t[peak_i])
```
### Compare data with SCEI3R simulation
```python
# model with generating parameters
params1 = SCEI3R_model.params.copy()
params1['c_0']=0.7
SCEI3R_model.parameters = params1
print(SCEI3R_model.parameters)
x0_fit = x0_SCEI3R.copy()
# x0_fit[2] = 0.00001
#t_fit = numpy.linspace(0, 150, 1000)
print(x0_fit)
t_fit = t
print(len(t))
SCEI3R_model.initial_values = (x0_fit, t_fit[0])
# %timeit sol_fit =SCEI3R_model.integrate(t_fit[1::]) # use magic %timeit to time
#sol_fit =SCEI3R_model.integrate(t_fit[1::])
# sol_fit = SCEI3R_model.integrate(t_fit)
sol_fit = scipy.integrate.odeint(SCEI3R_model.ode, x0_fit, t_fit[1::])
# print(len(sol_fit[0]))
#
plt.figure(figsize=(15,10))
#plt.plot(t,y_jhu[test_country][:,0], 'go',label='I_1') # infected observations
#plt.plot(t,y_jhu[test_country][:,1], 'go',label='I_2') # infected observations
#plt.plot(t,y_jhu[test_country][:,2], 'go',label='I_3') # infected observations
plt.plot(t,y_jhu[test_country][:,1]/FracRecoveredDet, 'bo',label='R') # recovered
plt.plot(t,y_jhu[test_country][:,2], 'ro',label='D') # died
plt.gca().set_prop_cycle(color=['grey','orange','green','green','green','blue','red','black',])
plt.plot(t_fit[1::], sol_fit)
plt.ylim([0,0.06])
plt.legend()
#plt.show(())
#ode_fit.plot()
peak_i = np.argmax(sol_fit[:,2])
print('Peak infection (days)', t_fit[peak_i])
```
### Integration and plot using scipy and matplotlib directly
```python
# solution = scipy.integrate.odeint(SCEI3R_model.ode, x0, t)
# print(len(t))
solution1 = scipy.integrate.odeint(SCEI3R_model.ode, x0_SCEI3R, t[1::])
ys = solution1.copy()
plt.figure(figsize=(15,7))
plt.subplot(1,2,1)
plt.xlabel("Time (days)")
plt.ylabel("Number per 1000000 People")
plt.ylim([0,1])
plt.legend(("S","E","I1","I2","I3","R","D","Sc"))
plt.plot(t[1::],ys)
plt.subplot(1,2,2)
plt.xlabel("Time (days)")
plt.ylabel("Number per 1000000 People")
plt.ylim([0.000001,1])
plt.legend(("S","E","I1","I2","I3","R","D","Sc"))
plt.semilogy()
plt.plot(t[1::],ys);
```
## Simulation of SC3EI3R model
```python
len(params)
```
```python
# setup time points for simulation, initial conditions and parameters
t = np.linspace(0, lastday -1, lastday)
tmax=lastday-1
# initial conditions assuming there is no natural immunity
E_0 = 0.00003
x0_SC3EI3R = [1.0-E_0, E_0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
x0_SC3EI3R = [1.0-E_0, E_0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
# Define parameters based on clinical observations Dr. Alison
Exposure=0.25 # Rate coefficient for exposure per individual in contact per day
IncubPeriod=5 #Incubation period, days
DurMildInf=10 #Duration of mild infections, days
FracMild=0.70 #Fraction of infections that are mild
FracSevere=0.20 #Fraction of infections that are severe
FracCritical=0.1 #Fraction of infections that are critical
CFR=0.07 #Case fatality rate (fraction of infections resulting in death)
TimeICUDeath=7 #Time from ICU admission to death, days
DurHosp=11 #Duration of hospitalization, days
# Model fitting extension to allow for incomplete detection
FracConfirmedDet=0.125 # Fraction of recovered individuals measured
FracRecoveredDet=FracConfirmedDet # Fraction of recovered individuals measured
FracDeathsDet=1.0
# Model extension by John McCaskill to include caution
CautionFactor= 0.2 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 21. # Duration of cautionary state of susceptibles (2 weeks)
CautionICUFrac= 0.3 # Fraction of ICUs occupied leading to transition to caution @ 1/day
ICUFrac= 0.002 # Fraction of ICUs relative to population size N
# convert above parameters to model parameters
params = {'beta_1' : Exposure/sum(x0_SC3EI3R),
'beta_2' : 0.0,
'beta_3' : 0.0,
'alpha' : 1.0/IncubPeriod,
'gamma_1': (1.0/DurMildInf)*FracMild,
'gamma_2': (1.0/DurHosp)-(1/DurHosp)*(FracCritical/(FracCritical+FracSevere)),
'gamma_3': (1.0/TimeICUDeath)-(1/TimeICUDeath)*(CFR/FracCritical),
'p_1' : (1.0/DurMildInf)-(1.0/DurMildInf)*FracMild,
'p_2' : (1.0/DurHosp)*(FracCritical/(FracCritical+FracSevere)),
'mu' : (1.0/TimeICUDeath)*(CFR/FracCritical),
'c_0' : CautionFactor,
'c_1' : 1.0/CautionRetention,
'c_2' : 1.0/(sum(x0_SC3EI3R)*ICUFrac*CautionICUFrac),
'N' : sum(x0_SC3EI3R)}
print(params)
# assign x0 and params to the model, integrate over t and plot
SC3EI3R_model.initial_values = (x0_SC3EI3R, t[0])
SC3EI3R_model.parameters = params
SC3EI3R_model.params = params.copy()
solution = SC3EI3R_model.integrate(t[1::])
SC3EI3R_model.plot()
# calculate time point when maximum number of people are infectious
peak_i = np.argmax(solution[:,2])
print('Peak infection (days)', t[peak_i])
```
```python
x0_SC3EI3R
```
### Compare data with SC3EI3R simulation
```python
# model with generating parameters
params1 = SC3EI3R_model.params.copy()
params1['c_0']=0.35
SC3EI3R_model.parameters = params1
print(SC3EI3R_model.parameters)
x0_fit = x0_SC3EI3R.copy()
# x0_fit[2] = 0.00001
#t_fit = numpy.linspace(0, 150, 1000)
print(x0_fit)
t_fit = t
print(len(t))
SC3EI3R_model.initial_values = (x0_fit, t_fit[0])
# %timeit sol_fit =SC3EI3R_model.integrate(t_fit[1::]) # use magic %timeit to time
sol_fit =SC3EI3R_model.integrate(t_fit[1::])
# sol_fit = SC3EI3R_model.integrate(t_fit)
# sol_fit = scipy.integrate.odeint(SC3EI3R_model(params_fit).ode, x0_fit, t_fit[1::])
# print(len(sol_fit[0]))
#
plt.figure(figsize=(15,10))
plt.plot(t,y_jhu[test_country][:,1]/FracRecoveredDet, 'bo',label='R') # recovered
plt.plot(t,y_jhu[test_country][:,2]/FracDeathsDet, 'ro',label='D') # died
plt.gca().set_prop_cycle(color=['grey','orange','green','green','green','blue','red','darkgreen', 'black'])
#plt.plot(t_fit[1::], sol_fit)
plt.plot(t_fit, sol_fit)
plt.ylim([0,0.06])
plt.legend()
#plt.show(())
#ode_fit.plot()
peak_i = np.argmax(sol_fit[:,2])
print('Peak infection (days)', t_fit[peak_i])
```
## Simulation models for range of caution parameters
### SCIR, SC2IR, SCEIR, SC3EIR, SCEI3R, SC3EI3R, SC2UIR, SC3UEIR, SC3UEI3R simulations
```python
def param_copy(model):
params = model.parameters
newparams = {}
pkeys1 = list(model.params.keys())
pkeys2 = list(model.parameters.keys())
for i in range(len(pkeys1)):
newparams[pkeys1[i]] = params[pkeys2[i]]
print(newparams)
model.parameters=newparams
def param_modify(model,param,value):
params = model.parameters
newparams = {}
pkeys1 = list(model.params.keys())
pkeys2 = list(model.parameters.keys())
for i in range(len(pkeys1)):
newparams[pkeys1[i]] = params[pkeys2[i]]
newparams[param]=value
print(newparams)
model.parameters=newparams
param_modify(SCIR_model,'beta',0.721)
```
```python
SCIR_model.parameters = {'gamma':0.4}
SCIR_model.parameters
```
{beta: 0.721,
gamma: 0.4,
mu: 0.029411764705882353,
c_0: 0.85,
c_1: 0.047619047619047616,
c_2: 2000.0,
N: 1.0}
```python
def vector2params_old(b,a,g,p,u,c,k,N,modelname):
if 'I3' in modelname: # models with hospitalization
params = {
'beta_1' : b[1],
'beta_2' : b[2],
'beta_3' : b[3],
'alpha' : a,
'gamma_1': g[1],
'gamma_2': g[2],
'gamma_3': g[3],
'p_1' : p[1],
'p_2' : p[2],
'mu' : u}
elif 'E' in modelname:
params = {
'beta' : b[1], # see above for explanations
'alpha' : a,
'gamma': g[1]+g[2]*(p[1]/(g[2]+p[2]))+g[3]*(p[1]/(g[2]+p[2]))*(p[2]/(g[3]+u)),
'mu' : u*(p[1]/(g[2]+p[2])*(p[2]/(g[3]+u)))}
else:
params = {
'beta' : b[1], # see above for explanations
'gamma': g[1]+g[2]*(p[1]/(g[2]+p[2]))+g[3]*(p[1]/(g[2]+p[2]))*(p[2]/(g[3]+u)),
'mu' : u*(p[1]/(g[2]+p[2])*(p[2]/(g[3]+u)))}
if 'C' in modelname: # models with caution
params['c_0'] = c[0]
params['c_1'] = c[1]
if 'I3' in modelname: # models with hospitalization
params['c_2'] = c[2]
else:
params['c_2'] = c[2]*FracCritical
if 'U' in modelname: # models with economic correction to caution
params['k_u'] = k[0]
params['k_1'] = k[1]
params['k_w'] = k[2]
params['kappa'] = k[3]
params['N'] = N
return params
def params2vector_old(params):
b = [None,None,None]
g = [None,None,None]
p = [None,None,None]
c = [None,None,None]
b[0]=0.0
b[1]=params['beta_1']
b[2]=params['beta_2']
b[3]=params['beta_3']
g[0]=0.0
g[1]=params['gamma_1']
g[2]=params['gamma_2']
g[3]=params['gamma_3']
p[0]=0.0
p[1]=params['p_1']
p[2]=params['p_2']
c[0]=params['c_1']
c[1]=params['c_2']
c[2]=params['c_3']
a=params['alpha']
u=params['mu']
N=params['N']
return (b,a,g,p,u,c,N)
```
```python
def vector2params(b,a,g,p,u,c,k,N,modelname):
global FracCritical
if 'I3' in modelname: # models with hospitalization
params = {
'beta_1' : b[1],
'beta_2' : b[2],
'beta_3' : b[3],
'alpha' : a,
'gamma_1': g[1],
'gamma_2': g[2],
'gamma_3': g[3],
'p_1' : p[1],
'p_2' : p[2],
'mu' : u}
elif 'E' in modelname:
irat = 1 + p[1]/(g[2]+p[2]) + p[2]/(g[3]+u)
#irat = 1
params = {
'beta' : b[1], # see above for explanations
'alpha' : a,
'gamma': (g[1]+g[2]*(p[1]/(g[2]+p[2]))+g[3]*(p[1]/(g[2]+p[2]))*(p[2]/(g[3]+u)))/irat,
'mu' : u*(p[1]/(g[2]+p[2])*(p[2]/(g[3]+u))/irat)}
else:
irat = 1 + p[1]/(g[2]+p[2]) + p[2]/(g[3]+u)
#irat = 1
params = {
#'beta' : np.sqrt(b[1]*a), # see above for explanations
'beta' : b[1], # see above for explanations
'gamma': (g[1]+g[2]*(p[1]/(g[2]+p[2]))+g[3]*(p[1]/(g[2]+p[2]))*(p[2]/(g[3]+u)))/irat,
'mu' : u*(p[1]/(g[2]+p[2])*(p[2]/(g[3]+u))/irat)}
if 'C' in modelname: # models with caution
params['c_0'] = c[0]
params['c_1'] = c[1]
if 'I3' in modelname: # models with hospitalization
params['c_2'] = c[2]
else:
params['c_2'] = c[2]*FracCritical
if 'U' in modelname: # models with economic correction to caution
params['k_u'] = k[0]
params['k_1'] = k[1]
params['k_w'] = k[2]
params['kappa'] = k[3]
params['N'] = N
return params
def params2vector(params):
b = [None,None,None]
g = [None,None,None]
p = [None,None,None]
c = [None,None,None]
b[0]=0.0
b[1]=params['beta_1']
b[2]=params['beta_2']
b[3]=params['beta_3']
g[0]=0.0
g[1]=params['gamma_1']
g[2]=params['gamma_2']
g[3]=params['gamma_3']
p[0]=0.0
p[1]=params['p_1']
p[2]=params['p_2']
c[0]=params['c_1']
c[1]=params['c_2']
c[2]=params['c_3']
a=params['alpha']
u=params['mu']
N=params['N']
return (b,a,g,p,u,c,N)
```
```python
count = 0
def difference(datain):
dataout = np.zeros(np.shape(datain))
for i in range(1,len(datain)):
dataout[i,...] = datain[i,...]-datain[i-1,...]
return dataout
def rolling_average(datain,period):
(tmax,n) = np.shape(datain)
dataout = np.zeros((tmax,n),dtype=float)
moving_av = np.zeros(n,dtype=float)
for k in range(len(datain)):
if k-period >= 0:
moving_av[:] = moving_av[:] - datain[k-7,...]
moving_av[:] = moving_av[:] + datain[k,...]
dataout[k] = moving_av/min(float(period),float(k+1))
return dataout
axes = [None]
def solveplot(smodels=['SIR','SCIR','SC2IR','SEIR','SCEIR','SC3EIR','SEI3R','SCEI3R','SC3EI3R'],species='EI',tmax=100,summing='daily',averaging='weekly',fitdata = None,scale='linear',plottitle= '',label='',
newplot = True, gbrcolors=False, figsize = None):
"""
solve ODEs and plot for set of models indicated
params: dictionary of simulation parameters
scale: alternative 'linear' or 'log'
species alternatives 'all', 'EI', 'confirmed', 'deaths', 'daily confirmed', 'daily deaths'
plottitle : title for plot
label : label for curve when called as part of multicurve plot
newplot : whether to open new plot True/False
models : list of models to include, default all three of those possible
"""
global count
global axes
global FracConfirmedDet,FracRecoveredDet,FracDeathsDet
tvec=np.arange(0,tmax,1)
tvec1 = tvec[1:]
if not fitdata is None:
tmaxf = len(fitdata)
if fitdata.ndim != 2:
print("error in number of dimensions of array")
else:
print("fit data ",np.shape(fitdata))
tvecf=np.arange(0,tmaxf,1)
tvecf1 = tvecf[1:]
nmodels = len(smodels)
nm = 0
count = count+1
if newplot:
axes = [None]*nmodels
if (figsize == None):
figsize=(nmodels*8,6)
plt.figure(figsize=figsize)
# fig, axeslist = plt.subplots(1, nmodels, figsize=(nmodels*8,6))
solns = []
for smodel in smodels:
model = cmodels[smodel]
nm = nm + 1
soln = scipy.integrate.odeint(model.ode, model.initial_values[0], tvec[1::])
#Plot
# ax = axeslist[nm]
if axes[nm-1] == None:
ax = axes[nm-1] = plt.subplot(1,nmodels,nm)
else:
ax = axes[nm-1]
if scale == 'log': #Plot on log scale
ax.semilogy()
ax.ylim([0.00000001,1.0])
if not isinstance(species,list):
lspecies = [species]
else:
lspecies = species
if summing == 'daily':
ssoln = difference(soln)
if not fitdata is None:
sfit = difference(fitdata)
else:
ssoln = soln
if not fitdata is None:
sfit = fitdata
if averaging == 'weekly':
srsoln = rolling_average(ssoln,7)
if not fitdata is None:
srfit = rolling_average(sfit,7)
else:
srsoln = ssoln
if not fitdata is None:
srfit = sfit
for species in lspecies:
if species == 'confirmed':
suma = np.sum(srsoln[:,model.confirmed],axis=1)
if not fitdata is None:
ax.plot(tvec1,suma,label=label,color='green')
fita = srfit[1::,0]/FracConfirmedDet # confirmed cases data, corrected by FracConfirmedDet
ax.plot(tvecf1,fita,'o',label=label,color='green')
else:
ax.plot(tvec1,suma,label=label)
if species == 'recovered':
suma = np.sum(srsoln[:,model.recovered],axis=1)
if not fitdata is None:
ax.plot(tvec1,suma,label=label,color='blue')
fita = srfit[1::,1]/FracRecoveredDet # recovered cases data, corrected by FracRecoveredDet
ax.plot(tvecf1,fita,'o',label=label,color='blue')
else:
ax.plot(tvec1,suma,label=label)
elif species == 'deaths':
suma = np.sum(srsoln[:,model.deaths],axis=1)
if not fitdata is None:
ax.plot(tvec1,suma,label=label,color='red')
fita = srfit[1::,2]/FracDeathsDet # deaths cases data, corrected by FracDeathsDet
ax.plot(tvecf1,fita,'o',label=label,color='red')
else:
ax.plot(tvec1,suma,label=label)
elif species == 'deaths_x10':
suma = np.sum(srsoln[:,model.deaths],axis=1)*10
if not fitdata is None:
ax.plot(tvec1,suma,label=label,color='red')
fita = srfit[1::,2]*10/FracDeathsDet # deaths cases data, corrected by FracDeathsDet
ax.plot(tvecf1,fita,'o',label=label,color='red')
else:
ax.plot(tvec1,suma,label=label)
elif species == 'EI':
ax.plot(tvec1,soln[:,model.ei],label=label)
# ax.plot(tvec1,soln[:,model.ei],label="%s" % count)
if 'I3' in model.modelname:
plt.legend(("E","I1","I2","I3"))
elif 'E' in model.modelname:
plt.legend(("E","I"))
else:
plt.legend(("I"))
elif species == 'all':
ax.plot(tvec1,soln,label=label)
if 'I3' in model.modelname:
if 'C3'in model.modelname:
pspecies=("S","E","I1","I2","I3","R","D","Ic","Sc","Ec")
elif 'C' in model.modelname:
pspecies=("S","E","I1","I2","I3","R","D","Sc")
else:
pspecies=("S","E","I1","I2","I3","R","D")
elif 'E' in model.modelname:
if 'C3'in model.modelname:
pspecies=("S","E","I","R","D","Ic","Sc","Ec")
else:
pspecies=("S","E","I","R","D","Sc")
else:
if 'C2'in model.modelname:
pspecies=("S","I","R","D","Ic","Sc")
else:
pspecies=("S","I","R","D","Sc")
plt.legend(pspecies)
plt.xlabel("Time (days)")
plt.ylabel("Fraction of population")
plt.title(model.modelname +' '+plottitle)
solns.append(soln)
return solns
```
```python
# Set up multimodel consistent sets of parameters
Exposure=0.25 # Rate coefficient for exposure per individual in contact per day
IncubPeriod=5 #Incubation period, days
DurMildInf=10 #Duration of mild infections, days
FracMild=0.8 #Fraction of infections that are mild
FracSevere=0.15 #Fraction of infections that are severe
FracCritical=0.05 #Fraction of infections that are critical
CFR=0.02 #Case fatality rate (fraction of infections resulting in death)
TimeICUDeath=7 #Time from ICU admission to death, days
DurHosp=11 #Duration of hospitalization, days
# Model extension by John McCaskill to include caution
CautionFactor= 0.3 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 14. # Duration of cautionary state of susceptibles (4 weeks)
CautionICUFrac= 0.25 # Fraction of ICUs occupied leading to 90% of susceptibles in caution
ICUFrac= 0.001 # Fraction of ICUs relative to population size N
EconomicCostOfCaution = 0.5 # Cost to economy of individual exercising caution
N=1
b=np.zeros(4) # beta
g=np.zeros(4) # gamma
p=np.zeros(3) # progression
c=np.zeros(3) # caution
k=np.zeros(4) # economic caution
a=1/IncubPeriod # transition rate from exposed to infected
b=Exposure*np.array([0,1,0,0])/N # hospitalized cases don't transmit
u=(1/TimeICUDeath)*(CFR/FracCritical) # death rate from ICU
g[3]=(1/TimeICUDeath)-u # recovery rate
p[2]=(1/DurHosp)*(FracCritical/(FracCritical+FracSevere))
g[2]=(1/DurHosp)-p[2]
g[1]=(1/DurMildInf)*FracMild
p[1]=(1/DurMildInf)-g[1]
c[0]=CautionFactor
c[1]=1/CautionRetention
c[2]=1/(N*ICUFrac*CautionICUFrac) # this is the rate coefficient giving 1/day at I3 = denominator
k[0]=c[1]
k[1]=c[1]
k[2]=c[1]
k[3]=EconomicCostOfCaution
cmodels = {'SIR':SIR_model,'SCIR':SCIR_model,'SC2IR':SC2IR_model,
'SEIR':SEIR_model,'SCEIR':SCEIR_model,'SC3EIR':SC3EIR_model,
'SEI3R':SEI3R_model,'SCEI3R':SCEI3R_model,'SC3EI3R':SC3EI3R_model,
'SC2UIR':SC2UIR_model,'SC3UEIR':SC3UEIR_model,'SC3UEI3R':SC3UEI3R_model}
smodels = ['SIR','SCIR','SC2IR','SEIR','SCEIR','SC3EIR','SEI3R','SCEI3R','SC3EI3R','SC2UIR','SC3UEIR','SC3UEI3R']
for smodel in smodels:
params_in=vector2params(b,a,g,p,u,c,k,N,smodel)
# print(smodel,params_in)
cmodels[smodel].parameters = params_in
I_0 = 0.00003
x0_SIR = [1.0-I_0, I_0, 0.0, 0.0]
x0_SCIR = [1.0-I_0, I_0, 0.0, 0.0, 0.0]
x0_SC2IR = [1.0-I_0, I_0, 0.0, 0.0, 0.0, 0.0]
SIR_model.initial_values = (x0_SIR, t[0])
SCIR_model.initial_values = (x0_SCIR, t[0])
SC2IR_model.initial_values = (x0_SC2IR, t[0])
x0_SEIR = [1.0-I_0, 0.0, I_0, 0.0, 0.0]
x0_SCEIR = [1.0-I_0, 0.0, I_0, 0.0, 0.0, 0.0]
x0_SC3EIR = [1.0-I_0, 0.0, I_0, 0.0, 0.0, 0.0, 0.0, 0.0]
SEIR_model.initial_values = (x0_SEIR, t[0])
SCEIR_model.initial_values = (x0_SCEIR, t[0])
SC3EIR_model.initial_values = (x0_SC3EIR, t[0])
x0_SEI3R = [1.0-I_0, 0.0, I_0, 0.0, 0.0, 0.0, 0.0]
x0_SCEI3R = [1.0-I_0, 0.0, I_0, 0.0, 0.0, 0.0, 0.0, 0.0]
x0_SC3EI3R = [1.0-I_0, 0.0, I_0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
x0_SC3EI3R = [1.0-I_0, 0.0, I_0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
SEI3R_model.initial_values = (x0_SEI3R, t[0])
SCEI3R_model.initial_values = (x0_SCEI3R, t[0])
SC3EI3R_model.initial_values = (x0_SC3EI3R, t[0])
x0_SC2UIR = [1.0-I_0, I_0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]
x0_SC3UEIR = [1.0-I_0, 0.0, I_0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]
x0_SC3UEI3R = [1.0-I_0, 0.0, I_0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]
SC2UIR_model.initial_values = (x0_SC2UIR, t[0])
SC3UEIR_model.initial_values = (x0_SC3UEIR, t[0])
SC3UEI3R_model.initial_values = (x0_SC3UEI3R, t[0])
```
```python
import os
os.getcwd()
```
```python
smodels1 = ['SIR','SEIR','SEI3R']
smodels2 = ['SC2IR','SC3EIR','SC3EI3R']
# tmax = lastday-1
tmax = 300
# caution standard parameters
CautionFactor= 0.2 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 21. # Duration of cautionary state of susceptibles (4 weeks)
CautionICUFrac= 0.3 # Fraction of ICUs occupied leading to transition to caution @ 1/day
ICUFrac= 0.002 # Fraction of ICUs relative to population size N
# Sensitivity scans
CautionFactors= [1.0,0.9,0.8,0.7,0.6,0.5] # Fractional reduction of exposure rate for cautioned individuals
CautionFactors2= [0.5,0.4,0.3,0.2,0.1,0.0] # Fractional reduction of exposure rate for cautioned individuals
CautionRetentions= [7.,14.,28.,56.,112.,224.] # Duration of cautionary state of susceptibles
CautionICUFracs= [1.0,0.75,0.5,0.25,0.125,0.0625] # Fraction of ICUs occupied leading to 90% of susceptibles in caution
import os
cwd=os.getcwd()
newplot = True
for smodel in smodels1:
model = cmodels[smodel]
label_c = ''
plottitle = 'Without Caution'
solns=solveplot(smodels1,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
irat = 1 + p[1]/(g[2]+p[2]) + p[2]/(g[3]+u)
drat = (p[1]/(g[2]+p[2]))*(p[2]/(g[3]+u))/irat
newplot = True
for s in range(6):
for smodel in smodels2:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactors[s],'c_1':1./CautionRetention}
if 'E' in model.modelname:
if 'I3' in model.modelname:
cmodels[smodel].parameters = {'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
# print(smodel,cmodels[smodel].parameters)
label_c = 'CautionFactor %s' % CautionFactors[s]
plottitle = 'Caution Factor'
solns=solveplot(smodels2,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
plt.savefig(cwd+"/figures/fig3a.pdf",bbox_inches='tight')
newplot = True
for s in range(6):
for smodel in smodels2:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactors2[s],'c_1':1./CautionRetention}
if 'E' in model.modelname:
if 'I3' in model.modelname:
cmodels[smodel].parameters = {'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
# print(smodel,cmodels[smodel].parameters)
label_c = 'CautionFactor %s' % CautionFactors2[s]
plottitle = 'Caution Factor'
solns=solveplot(smodels2,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
plt.savefig(cwd+"/figures/fig3b.pdf",bbox_inches='tight')
newplot = True
#for i in reversed(range(5)):
for s in range(6):
for smodel in smodels2:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1./CautionRetentions[s]}
if 'E' in model.modelname:
if 'I3' in model.modelname:
cmodels[smodel].parameters = {'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
# print(smodel,cmodels[smodel].parameters)
label_c = 'CautionRetention %s'% CautionRetentions[s]
plottitle = 'Caution Retention'
solns=solveplot(smodels2,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
plt.savefig(cwd+"/figures/fig3c.pdf",bbox_inches='tight')
newplot = True
for s in range(6):
for smodel in smodels2:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1./CautionRetentions[s]}
if 'E' in model.modelname:
if 'I3' in model.modelname:
cmodels[smodel].parameters = {'c_2':1./(N*ICUFrac*CautionICUFracs[s])}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFracs[s])}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFracs[s])}
# print(smodel,cmodels[smodel].parameters)
label_c = 'CautionICUFrac %s'% CautionICUFracs[s]
plottitle = 'Caution ICUFrac'
solns=solveplot(smodels2,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
plt.savefig(cwd+"/figures/fig3d.pdf",bbox_inches='tight')
# return parameters to standard set
for smodel in smodels2:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1/CautionRetention,'c_2':1./(N*ICUFrac*CautionICUFrac)}
```
```python
smodels = ['SCIR','SC2IR','SCEIR','SC3EIR','SCEI3R','SC3EI3R']
# tmax = lastday-1
tmax = 300
# caution standard parameters
CautionFactor= 0.2 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 21. # Duration of cautionary state of susceptibles (4 weeks)
CautionICUFrac= 0.3 # Fraction of ICUs occupied leading to transition to caution @ 1/day
ICUFrac= 0.002 # Fraction of ICUs relative to population size N
# Sensitivity scans
CautionFactors= [1.0,0.9,0.8,0.7,0.6,0.5] # Fractional reduction of exposure rate for cautioned individuals
CautionFactors2= [0.5,0.4,0.3,0.2,0.1,0.0] # Fractional reduction of exposure rate for cautioned individuals
CautionRetentions= [7.,14.,28.,56.,112.,224.] # Duration of cautionary state of susceptibles
CautionICUFracs= [1.0,0.75,0.5,0.25,0.125,0.0625] # Fraction of ICUs occupied leading to 90% of susceptibles in caution
irat = 1 + p[1]/(g[2]+p[2]) + p[2]/(g[3]+u)
drat = (p[1]/(g[2]+p[2]))*(p[2]/(g[3]+u))/irat
newplot = True
for s in range(6):
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactors[s],'c_1':1./CautionRetention}
if 'E' in model.modelname:
if 'I3' in model.modelname:
cmodels[smodel].parameters = {'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
# print(smodel,cmodels[smodel].parameters)
label_c = 'CautionFactor %s' % CautionFactors[s]
plottitle = 'Caution Factor'
solns=solveplot(smodels,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
#plt.savefig(cwd+"/figures/fig3sa.pdf",bbox_inches='tight')
newplot = True
for s in range(6):
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactors2[s],'c_1':1./CautionRetention}
if 'E' in model.modelname:
if 'I3' in model.modelname:
cmodels[smodel].parameters = {'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
# print(smodel,cmodels[smodel].parameters)
label_c = 'CautionFactor %s' % CautionFactors2[s]
plottitle = 'Caution Factor'
solns=solveplot(smodels,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
#plt.savefig(cwd+"/figures/fig3sb.pdf",bbox_inches='tight')
newplot = True
for s in range(6):
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1./CautionRetentions[s]}
if 'E' in model.modelname:
if 'I3' in model.modelname:
cmodels[smodel].parameters = {'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
# print(smodel,cmodels[smodel].parameters)
label_c = 'CautionRetention %s'% CautionRetentions[s]
plottitle = 'Caution Retention'
solns=solveplot(smodels,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
#plt.savefig(cwd+"/figures/fig3sc.pdf",bbox_inches='tight')
newplot = True
for s in range(6):
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1./CautionRetentions[s]}
if 'E' in model.modelname:
if 'I3' in model.modelname:
cmodels[smodel].parameters = {'c_2':1./(N*ICUFrac*CautionICUFracs[s])}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFracs[s])}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFracs[s])}
# print(smodel,cmodels[smodel].parameters)
label_c = 'CautionICUFrac %s'% CautionICUFracs[s]
plottitle = 'Caution ICUFrac'
solns=solveplot(smodels,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
#plt.savefig(cwd+"/figures/fig3sd.pdf",bbox_inches='tight')
# return parameters to standard set
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1/CautionRetention,'c_2':1./(N*ICUFrac*CautionICUFrac)}
```
```python
smodels = ['SCEIR','SC3EIR','SCEI3R','SC3EI3R']
# tmax = lastday-1
tmax = 300
# caution standard parameters
CautionFactor= 0.25 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 45. # Duration of cautionary state of susceptibles (4 weeks)
CautionICUFrac= 0.05 # Fraction of ICUs occupied leading to transition to caution @ 1/day
ICUFrac= 0.002 # Fraction of ICUs relative to population size N
# Sensitivity scans
CautionFactors= [1.0,0.9,0.75,0.5,0.25,0.0] # Fractional reduction of exposure rate for cautioned individuals
CautionFactors2= [0.4,0.3,0.2,0.1,0.0] # Fractional reduction of exposure rate for cautioned individuals
CautionRetentions= [7.,14.,28.,56.,112.,224.] # Duration of cautionary state of susceptibles
CautionICUFracs= [1.0,0.75,0.5,0.25,0.125,0.0625] # Fraction of ICUs occupied leading to 90% of susceptibles in caution
newplot = True
for s in range(6):
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
if 'E' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactors[s],'c_1':1./CautionRetention,'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_0':CautionFactors[s],'c_1':1./CautionRetention,'c_2':FracSevere*FracCritical/(N*ICUFrac*CautionICUFrac)}
# print(smodel,cmodels[smodel].parameters)
else:
print("ERROR")
label_c = 'CautionFactor %s' % CautionFactors[s]
plottitle = 'Caution Factor'
solns=solveplot(smodels,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
newplot = True
for s in range(5):
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
if 'E' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactors2[s],'c_1':1./CautionRetention,'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_0':CautionFactors2[s],'c_1':1./CautionRetention,'c_2':FracSevere*FracCritical/(N*ICUFrac*CautionICUFrac)}
# print(smodel,cmodels[smodel].parameters)
else:
print("ERROR")
label_c = 'CautionFactor %s' % CautionFactors2[s]
plottitle = 'Caution Factor'
solns=solveplot(smodels,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
newplot = True
#for i in reversed(range(5)):
for s in range(5):
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
if 'E' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1./CautionRetentions[s],'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1./CautionRetentions[s],'c_2':FracSevere*FracCritical/(N*ICUFrac*CautionICUFrac)}
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1/CautionRetentions[s],'c_2':1./(N*ICUFrac*CautionICUFrac)}
# print(smodel,cmodels[smodel].parameters)
label_c = 'CautionRetention %s'% CautionRetentions[s]
plottitle = 'Caution Retention'
solns=solveplot(smodels,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
newplot = True
for s in range(5):
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1/CautionRetention,'c_2':1./(N*ICUFrac*CautionICUFracs[s])}
# print(smodel,cmodels[smodel].parameters)
label_c = 'CautionICUFrac %s'% CautionICUFracs[s]
plottitle = 'Caution ICUFrac'
solns=solveplot(smodels,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
# return parameters to standard set
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1/CautionRetention,'c_2':1./(N*ICUFrac*CautionICUFrac)}
```
```python
smodels = ['SCIR','SC2IR','SCEIR','SC3EIR','SCEI3R','SC3EI3R']
# tmax = lastday-1
tmax = 300
# caution standard parameters
CautionFactor= 0.2 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 21. # Duration of cautionary state of susceptibles (4 weeks)
CautionICUFrac= 0.3 # Fraction of ICUs occupied leading to transition to caution @ 1/day
ICUFrac= 0.002 # Fraction of ICUs relative to population size N
# Sensitivity scans
CautionFactors= [1.0,0.9,0.75,0.5,0.25,0.0] # Fractional reduction of exposure rate for cautioned individuals
CautionRetentions= [7.,14.,28.,56.,112.,224.] # Duration of cautionary state of susceptibles
CautionICUFracs= [1.0,0.75,0.5,0.25,0.125,0.0625] # Fraction of ICUs occupied leading to 90% of susceptibles in caution
newplot = True
for s in range(6):
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
if 'E' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactors[s],'c_1':1./CautionRetention,'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_0':CautionFactors[s],'c_1':1./CautionRetention,'c_2':FracSevere*FracCritical/(N*ICUFrac*CautionICUFrac)}
# print(smodel,cmodels[smodel].parameters)
else:
print("ERROR")
label_c = 'CautionFactor %s' % CautionFactors[s]
plottitle = 'Caution Factor'
solns=solveplot(smodels,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
newplot = True
for s in range(6):
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
if 'E' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1./CautionRetentions[s],'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1./CautionRetentions[s],'c_2':FracSevere*FracCritical/(N*ICUFrac*CautionICUFrac)}
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1/CautionRetentions[s],'c_2':1./(N*ICUFrac*CautionICUFrac)}
# print(smodel,cmodels[smodel].parameters)
label_c = 'CautionRetention %s'% CautionRetentions[s]
plottitle = 'Caution Retention'
solns=solveplot(smodels,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
newplot = True
for s in range(6):
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1/CautionRetention,'c_2':1./(N*ICUFrac*CautionICUFracs[s])}
# print(smodel,cmodels[smodel].parameters)
label_c = 'CautionICUFrac %s'% CautionICUFracs[s]
plottitle = 'Caution ICUFrac'
solns=solveplot(smodels,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
# return parameters to standard set
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1/CautionRetention,'c_2':1./(N*ICUFrac*CautionICUFrac)}
```
```python
# smodels = ['SC2IR','SC2UIR','SC3EIR','SC3UEIR','SC3EI3R','SC3UEI3R']
smodels = ['SC2UIR','SC3UEIR','SC3UEI3R']
# tmax = lastday-1
tmax = 300
# caution standard parameters
CautionFactor= 0.4 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 21. # Duration of cautionary state of susceptibles (4 weeks)
CautionICUFrac= 0.3 # Fraction of ICUs occupied leading to transition to caution @ 1/day
ICUFrac= 0.002 # Fraction of ICUs relative to population size N
# Sensitivity scans
CautionFactors= [1.0,0.9,0.8,0.7,0.6,0.5] # Fractional reduction of exposure rate for cautioned individuals
CautionFactors2= [0.5,0.4,0.3,0.2,0.1,0.0] # Fractional reduction of exposure rate for cautioned individuals
CautionRetentions= [7.,14.,28.,56.,112.,224.].reverse() # Duration of cautionary state of susceptibles
CautionICUFracs= [1.0,0.75,0.5,0.25,0.125,0.0625] # Fraction of ICUs occupied leading to 90% of susceptibles in caution
ktime = 56.
ktimes= [1., 7.,14.,28.,56.,112.] # Duration of cautionary state of susceptibles
kappas = [1.,0.8,0.6,0.4,0.2,0.] # Economic cost of caution
kappa = 0.5
irat = 1 + p[1]/(g[2]+p[2]) + p[2]/(g[3]+u)
drat = (p[1]/(g[2]+p[2]))*(p[2]/(g[3]+u))/irat
newplot = True
for s in range(-1,6):
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1./CautionRetention}
if 'E' in model.modelname:
if 'I3' in model.modelname:
cmodels[smodel].parameters = {'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
print("ERROR")
if 'U' in smodel:
if s == -1:
cmodels[smodel].parameters = {'k_u':0.,'k_1':1.,'k_w':1.,'kappa':0.}
else:
cmodels[smodel].parameters = {'k_u':1./ktime,'k_1':1./ktime,'k_w':1./ktime,'kappa':kappas[s]}
if s == -1:
label_c = 'no economic influence'
else:
label_c = 'kappa %s' % kappas[s]
plottitle = 'Cost of caution kappa'
solns=solveplot(smodels,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
plt.savefig(cwd+"/figures/fig5sa.pdf",bbox_inches='tight')
newplot = True
for s in range(-1,6):
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1./CautionRetention}
if 'E' in model.modelname:
if 'I3' in model.modelname:
cmodels[smodel].parameters = {'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
print("ERROR")
if 'U' in smodel:
if s == -1:
cmodels[smodel].parameters = {'k_u':0.,'k_1':1.,'k_w':1.,'kappa':0.}
else:
cmodels[smodel].parameters = {'k_u':1./ktimes[s],'k_1':1./ktime,'k_w':1./ktime, 'kappa':kappa}
if s == -1:
label_c = 'no economic influence'
else:
label_c = 'k_u time %s' % ktimes[s]
plottitle = 'Uncautionable decay k_u'
solns=solveplot(smodels,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
plt.savefig(cwd+"/figures/fig5sb.pdf",bbox_inches='tight')
newplot = True
for s in range(-1,6):
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1./CautionRetention}
if 'E' in model.modelname:
if 'I3' in model.modelname:
cmodels[smodel].parameters = {'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
print("ERROR")
if 'U' in smodel:
if s == -1:
cmodels[smodel].parameters = {'k_u':0.,'k_1':1.,'k_w':1.,'kappa':0.}
else:
cmodels[smodel].parameters = {'k_u':1./ktime,'k_1':1./ktimes[s],'k_w':1./ktime, 'kappa':kappa}
if s == -1:
label_c = 'no economic influence'
else:
label_c = 'k_1 time %s' % ktimes[s]
plottitle = 'Uncautionable decay k_1'
solns=solveplot(smodels,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
plt.savefig(cwd+"/figures/fig5sc.pdf",bbox_inches='tight')
newplot = True
for s in range(-1,6):
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1./CautionRetention}
if 'E' in model.modelname:
if 'I3' in model.modelname:
cmodels[smodel].parameters = {'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
print("ERROR")
if 'U' in smodel:
if s == -1:
cmodels[smodel].parameters = {'k_u':0.,'k_1':1.,'k_w':1.,'kappa':0.}
else:
cmodels[smodel].parameters = {'k_u':1./ktime,'k_1':1./ktime,'k_w':1./ktimes[s], 'kappa':kappa}
if s == -1:
label_c = 'no economic influence'
else:
label_c = 'k_w time %s' % ktimes[s]
plottitle = 'Economic relaxation k_w'
solns=solveplot(smodels,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
plt.savefig(cwd+"/figures/fig5sd.pdf",bbox_inches='tight')
# return parameters to standard set
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1./CautionRetention}
if 'E' in model.modelname:
if 'I3' in model.modelname:
cmodels[smodel].parameters = {'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
if 'U' in model.modelname:
cmodels[smodel].parameters = {'k_u':1./ktime,'k_1':1./ktime,'k_w':1./ktime, 'kappa':kappa}
```
```python
# smodels = ['SC2IR','SC2UIR','SC3EIR','SC3UEIR','SC3EI3R','SC3UEI3R']
smodels = ['SC2UIR','SC3UEIR','SC3UEI3R']
# tmax = lastday-1
tmax = 600
# caution standard parameters
CautionFactor= 0.2 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 60. # Duration of cautionary state of susceptibles (4 weeks)
CautionICUFrac= 0.3 # Fraction of ICUs occupied leading to transition to caution @ 1/day
ICUFrac= 0.002 # Fraction of ICUs relative to population size N
# Sensitivity scans
CautionFactors= [1.0,0.9,0.8,0.7,0.6,0.5] # Fractional reduction of exposure rate for cautioned individuals
CautionFactors2= [0.5,0.4,0.3,0.2,0.1,0.0] # Fractional reduction of exposure rate for cautioned individuals
CautionRetentions= [7.,14.,28.,56.,112.,224.].reverse() # Duration of cautionary state of susceptibles
CautionICUFracs= [1.0,0.75,0.5,0.25,0.125,0.0625] # Fraction of ICUs occupied leading to 90% of susceptibles in caution
ktime = 56.
ktimes= [1., 7.,14.,28.,56.,112.] # Duration of cautionary state of susceptibles
kappas = [1.,0.8,0.6,0.4,0.2,0.] # Economic cost of caution
kappa = 0.5
irat = 1 + p[1]/(g[2]+p[2]) + p[2]/(g[3]+u)
drat = (p[1]/(g[2]+p[2]))*(p[2]/(g[3]+u))/irat
newplot = True
for s in range(-1,6):
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1./CautionRetention}
if 'E' in model.modelname:
if 'I3' in model.modelname:
cmodels[smodel].parameters = {'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
print("ERROR")
if 'U' in smodel:
if s == -1:
cmodels[smodel].parameters = {'k_u':0.,'k_1':1.,'k_w':1.,'kappa':0.}
else:
cmodels[smodel].parameters = {'k_u':1./ktime,'k_1':1./ktime,'k_w':1./ktime,'kappa':kappas[s]}
if s == -1:
label_c = 'no economic influence'
else:
label_c = 'kappa %s' % kappas[s]
plottitle = 'Cost of caution kappa'
solns=solveplot(smodels,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
plt.savefig(cwd+"/figures/fig6sa.pdf",bbox_inches='tight')
newplot = True
for s in range(-1,6):
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1./CautionRetention}
if 'E' in model.modelname:
if 'I3' in model.modelname:
cmodels[smodel].parameters = {'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
print("ERROR")
if 'U' in smodel:
if s == -1:
cmodels[smodel].parameters = {'k_u':0.,'k_1':1.,'k_w':1.,'kappa':0.}
else:
cmodels[smodel].parameters = {'k_u':1./ktimes[s],'k_1':1./ktime,'k_w':1./ktime, 'kappa':kappa}
if s == -1:
label_c = 'no economic influence'
else:
label_c = 'k_u time %s' % ktimes[s]
plottitle = 'Uncautionable decay k_u'
solns=solveplot(smodels,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
plt.savefig(cwd+"/figures/fig6sb.pdf",bbox_inches='tight')
newplot = True
for s in range(-1,6):
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1./CautionRetention}
if 'E' in model.modelname:
if 'I3' in model.modelname:
cmodels[smodel].parameters = {'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
print("ERROR")
if 'U' in smodel:
if s == -1:
cmodels[smodel].parameters = {'k_u':0.,'k_1':1.,'k_w':1.,'kappa':0.}
else:
cmodels[smodel].parameters = {'k_u':1./ktime,'k_1':1./ktimes[s],'k_w':1./ktime, 'kappa':kappa}
if s == -1:
label_c = 'no economic influence'
else:
label_c = 'k_1 time %s' % ktimes[s]
plottitle = 'Uncautionable decay k_1'
solns=solveplot(smodels,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
plt.savefig(cwd+"/figures/fig6sc.pdf",bbox_inches='tight')
newplot = True
for s in range(-1,6):
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1./CautionRetention}
if 'E' in model.modelname:
if 'I3' in model.modelname:
cmodels[smodel].parameters = {'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
print("ERROR")
if 'U' in smodel:
if s == -1:
cmodels[smodel].parameters = {'k_u':0.,'k_1':1.,'k_w':1.,'kappa':0.}
else:
cmodels[smodel].parameters = {'k_u':1./ktime,'k_1':1./ktime,'k_w':1./ktimes[s], 'kappa':kappa}
if s == -1:
label_c = 'no economic influence'
else:
label_c = 'k_w time %s' % ktimes[s]
plottitle = 'Economic relaxation k_w'
solns=solveplot(smodels,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
plt.savefig(cwd+"/figures/fig6sd.pdf",bbox_inches='tight')
# return parameters to standard set
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1./CautionRetention}
if 'E' in model.modelname:
if 'I3' in model.modelname:
cmodels[smodel].parameters = {'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
if 'U' in model.modelname:
cmodels[smodel].parameters = {'k_u':1./ktime,'k_1':1./ktime,'k_w':1./ktime, 'kappa':kappa}
```
```python
smodels = ['SC3UEI3R']
# tmax = lastday-1
tmax = 300
# caution standard parameters
CautionFactor= 0.4 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 60. # Duration of cautionary state of susceptibles (4 weeks)
CautionICUFrac= 0.3 # Fraction of ICUs occupied leading to transition to caution @ 1/day
ICUFrac= 0.002 # Fraction of ICUs relative to population size N
# Sensitivity scans
CautionFactors= [1.0,0.9,0.8,0.7,0.6,0.5] # Fractional reduction of exposure rate for cautioned individuals
CautionFactors2= [0.5,0.4,0.3,0.2,0.1,0.0] # Fractional reduction of exposure rate for cautioned individuals
CautionRetentions= [7.,14.,28.,56.,112.,224.].reverse() # Duration of cautionary state of susceptibles
CautionICUFracs= [1.0,0.75,0.5,0.25,0.125,0.0625] # Fraction of ICUs occupied leading to 90% of susceptibles in caution
ktime = 56.
ktimes= [2.5, 5.,10.,20.,40.,80.] # Duration of cautionary state of susceptibles
kappas = [1.,0.8,0.6,0.4,0.2,0.] # Economic cost of caution
kappa = 0.5
irat = 1 + p[1]/(g[2]+p[2]) + p[2]/(g[3]+u)
drat = (p[1]/(g[2]+p[2]))*(p[2]/(g[3]+u))/irat
newplot = True
for s in range(-1,6):
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1./CautionRetention}
if 'E' in model.modelname:
if 'I3' in model.modelname:
cmodels[smodel].parameters = {'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
print("ERROR")
if 'U' in smodel:
if s == -1:
cmodels[smodel].parameters = {'k_u':0.,'k_1':1.,'k_w':1.,'kappa':0.}
else:
cmodels[smodel].parameters = {'k_u':1./ktimes[s],'k_1':1./90.,'k_w':1./90., 'kappa':kappa}
if s == -1:
label_c = 'no economic influence'
else:
label_c = 'k_u 1/%s' % ktimes[s]
plottitle = 'Uncautionable decay k_u'
solns=solveplot(smodels,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
#plt.savefig(cwd+"/figures/fig5a.pdf",bbox_inches='tight')
# return parameters to standard set
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1./CautionRetention}
if 'E' in model.modelname:
if 'I3' in model.modelname:
cmodels[smodel].parameters = {'c_2':1./(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
else:
cmodels[smodel].parameters = {'c_2':drat/(N*ICUFrac*CautionICUFrac)}
if 'U' in model.modelname:
cmodels[smodel].parameters = {'k_u':1./ktime,'k_1':1./ktime,'k_w':1./ktime, 'kappa':kappa}
```
```python
# more extensive parameter screen for one model
smodels = ['SCEI3R']
# tmax = lastday-1
tmax = 300
# caution standard parameters
CautionFactor= 0.2 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 21. # Duration of cautionary state of susceptibles (4 weeks)
CautionICUFrac= 0.3 # Fraction of ICUs occupied leading to transition to caution @ 1/day
ICUFrac= 0.002 # Fraction of ICUs relative to population size N
# Sensitivity scans
CautionFactors= [1.0,0.5,0.1] # Fractional reduction of exposure rate for cautioned individuals
CautionRetentions= [7.,28.,112.] # Duration of cautionary state of susceptibles (2 weeks)
CautionICUFracs= [0.0625,0.25,0.75] # Fraction of ICUs occupied leading to 90% of susceptibles in caution
for s2 in range(3):
newplot = True
for s3 in range(3):
newplot = True
for s1 in range(3):
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactors[s1],'c_1':1./CautionRetentions[s2],'c_2':1./(N*ICUFrac*CautionICUFracs[s3])}
else:
print("ERROR")
label_c = ('CF %s' % CautionFactors[s1]) + (' CICUF %s'% CautionICUFracs[s3])
plottitle = 'Caution Factor'
solns=solveplot(smodels,'confirmed',tmax,'daily','daily',None,'linear',plottitle,label_c,newplot)
plt.legend()
newplot = False
# return parameters to standard set
for smodel in smodels:
model = cmodels[smodel]
if 'C' in model.modelname:
cmodels[smodel].parameters = {'c_0':CautionFactor,'c_1':1./CautionRetention,'c_2':1./(N*ICUFrac*CautionICUFrac)}
```
```python
# the parameters are stored not as strings but as sympy symbols
# but they cannot be accessed externally like that
# from sympy import Symbol
# cmodels['SCIR'].parameters[Symbol('c_0')] # prouces KeyError: c_0
types= [type(k) for k in cmodels['SCIR'].parameters.keys()]
print(types)
```
# Parameter fitting
## Fitting via sliders
### SC3EIR Model
```python
len(t)
```
```python
model = 'SC3EIR'
if cmodels[model].loadparams():
params = cmodels[model].params.copy()
else:
params={'beta':0.25,'alpha':1./5.,'gamma':0.1,'mu':0.05,'c_0':0.3, 'c_1':1/14., 'c_2':2000}
def slidefitplot(beta,alpha,gamma,mu,c_0,c_1,c_2,logI_0):
params={ 'beta':beta, 'alpha':alpha, 'gamma':gamma, 'mu':mu, 'c_0':c_0, 'c_1':c_1, 'c_2':c_2}
cmodels[model].parameters = params
cmodels[model].params = params
cmodels[model].dumpparams()
I0 = 10**logI_0
x0 = [1.-I0,0.,I0,0.,0.,0.,0.,0.]
cmodels[model].initial_values = (x0,t[0])
solveplot(smodels=[model],species=['confirmed','recovered','deaths'],tmax=len(t),summing='daily',fitdata=y_jhu[test_country],scale='linear',plottitle= '',label='confirmed',newplot = True)
```
loaded params from /Users/n/Projects/covid-recovery/Notebooks/covid-19-caution/params/SC3EIR.pk :
```python
interact(slidefitplot,
beta=FloatSlider(min=0,max=1,step=0.01,value=params['beta'],description='beta',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
alpha=FloatSlider(min=0,max=1,step=0.01,value=params['alpha'],description='alpha',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
gamma=FloatSlider(min=0,max=1,step=0.01,value=params['gamma'],description='gamma',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
mu=FloatSlider(min=0,max=0.2,step=0.002,value=params['mu'],description='mu',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_0=FloatSlider(min=0,max=1,step=0.01,value=params['c_0'],description='c_0',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_1=FloatSlider(min=0,max=1,step=0.001,value=params['c_1'],description='c_1',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_2=FloatSlider(min=0,max=5000,step=1,value=params['c_2'],description='c_2',
style=style,layout=slider_layout,continuous_update=False,readout_format='.1f'),
logI_0=FloatSlider(min=-10,max=0,step=0.01,value=-6,description='log I_0',
style=style,layout=slider_layout,continuous_update=False))
```
interactive(children=(FloatSlider(value=0.67, continuous_update=False, description='beta', layout=Layout(width…
<function __main__.slidefitplot(beta, alpha, gamma, mu, c_0, c_1, c_2, logI_0)>
### SC3EI3R Model
#### Germany
```python
# assumed data starting on firstdate
test_country='Germany'
N = 80000000
firstdate = '01/25/20'
lastdate = '01/08/20'
xx,xxf,yy0 = get_country_data(test_country,'confirmed',firstdate=firstdate,lastdate=lastdate)
xx,xxf,yy1 = get_country_data(test_country,'recovered',firstdate=firstdate,lastdate=lastdate)
xx,xxf,yy2 = get_country_data(test_country,'deaths',firstdate=firstdate,lastdate=lastdate)
print(xxf)
y_jhu={}
y_jhu[test_country] = np.array([[yy0[i],yy1[i],yy2[i]] for i in range(0,len(yy0))])/N
# data = np.array([[xxf[i],yy0[i],yy1[i],yy2[i]] for i in range(len(yy))])
# print(data)
lastday = len(y_jhu[test_country])
print('days 0 to',lastday,'data stored in y_jhu')
```
```python
len(t)
```
```python
(1.0/TimeICUDeath)*(CFR/FracCritical)
```
```python
model = 'SC3EI3R'
# Define parameters based on clinical observations Dr. Alison
Exposure=0.4 # Rate coefficient for exposure per individual in contact per day
IncubPeriod=5 #Incubation period, days
DurMildInf=10 #Duration of mild infections, days : includes time for reg. of recovery
FracMild=0.7 #Fraction of infections that are mild
FracSevere=0.20 #Fraction of infections that are severe
FracCritical=0.1 #Fraction of infections that are critical
CFR=0.05 #Case fatality rate (fraction of infections resulting in death)
TimeICUDeath=5 #Time from ICU admission to death, days
DurHosp=4 #Duration of hospitalization, days : includes 4 day reg of recovery
# Model fitting extension to allow for incomplete detection
FracConfirmedDet=1.0 # Fraction of recovered individuals measured : plots made with this parameter NYI
FracRecoveredDet=FracConfirmedDet # Fraction of recovered individuals measured
FracDeathsDet=1.0
# Model extension by John McCaskill to include caution
CautionFactor= 0.1 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 60. # Duration of cautionary state of susceptibles (2 weeks)
CautionICUFrac= 0.1 # Fraction of ICUs occupied leading to transition to caution @ 1/day
ICUFrac= 0.001 # Fraction of ICUs relative to population size N
if cmodels[model].loadparams():
params = cmodels[model].params.copy()
else:
params = {'beta_1' : Exposure/sum(x0_SC3EI3R),
'beta_2' : 0.0,
'beta_3' : 0.0,
'alpha' : 1.0/IncubPeriod,
'gamma_1': (1.0/DurMildInf)*FracMild,
'gamma_2': (1.0/DurHosp)-(1/DurHosp)*(FracCritical/(FracCritical+FracSevere)),
'gamma_3': (1.0/TimeICUDeath)-(1/TimeICUDeath)*(CFR/FracCritical),
'p_1' : (1.0/DurMildInf)-(1.0/DurMildInf)*FracMild,
'p_2' : (1.0/DurHosp)*(FracCritical/(FracCritical+FracSevere)),
'mu' : (1.0/TimeICUDeath)*(CFR/FracCritical),
'c_0' : CautionFactor,
'c_1' : 1.0/CautionRetention,
'c_2' : 1.0/(sum(x0_SC3EI3R)*ICUFrac*CautionICUFrac),
'N' : sum(x0_SC3EI3R)}
print(params)
SC3EI3R_model.parameters = params
def slidefitplot(beta_1,mu,c_0,c_1,c_2,logI_0):
params={ 'beta_1':beta_1, 'mu':mu, 'c_0':c_0, 'c_1':c_1, 'c_2':c_2}
cmodels[model].parameters = params
cmodels[model].params = params
cmodels[model].dumpparams()
I0 = 10**logI_0
x0 = [1.-I0,0.,I0,0.,0.,0.,0.,0.,0.,0.,0.]
x0 = [1.-I0,0.,I0,0.,0.,0.,0.,0.,0.,0.]
cmodels[model].initial_values = (x0,t[0])
weights=np.array([1.,1.,1.])
solveplot(smodels=[model],species=['confirmed','recovered','deaths_x10'],tmax=len(t),summing='daily',averaging='weekly',fitdata=y_jhu[test_country]*weights,scale='linear',plottitle= '',label='confirmed',newplot = True, figsize = (15,15))
```
loaded params from /Users/n/Projects/covid-recovery/Notebooks/covid-19-caution/params/SC3EI3R.pk :
{'beta_1': 0.4, 'mu': 0.1, 'c_0': 0.1, 'c_1': 0.016666666666666666, 'c_2': 10000.0}
```python
w =interactive(slidefitplot,
beta_1=FloatSlider(min=0,max=1,step=0.01,value=params['beta_1'],description='beta_1',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
mu=FloatSlider(min=0,max=0.2,step=0.002,value=params['mu'],description='mu',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_0=FloatSlider(min=0,max=1,step=0.01,value=params['c_0'],description='c_0',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_1=FloatSlider(min=0,max=1,step=0.001,value=params['c_1'],description='c_1',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_2=FloatSlider(min=0,max=20000,step=1,value=params['c_2'],description='c_2',
style=style,layout=slider_layout,continuous_update=False,readout_format='.1f'),
logI_0=FloatSlider(min=-10,max=0,step=0.01,value=-6,description='log I_0',
style=style,layout=slider_layout,continuous_update=False)
)
display(w)
```
interactive(children=(FloatSlider(value=0.4, continuous_update=False, description='beta_1', layout=Layout(widt…
```python
params=w.kwargs
print(params)
```
```python
```
#### Spain
```python
# assumed data starting on firstdate
test_country='Spain'
N = 80000000
firstdate = '01/25/20'
lastdate = '01/08/20'
xx,xxf,yy0 = get_country_data(test_country,'confirmed',firstdate=firstdate,lastdate=lastdate)
xx,xxf,yy1 = get_country_data(test_country,'recovered',firstdate=firstdate,lastdate=lastdate)
xx,xxf,yy2 = get_country_data(test_country,'deaths',firstdate=firstdate,lastdate=lastdate)
print(xxf)
y_jhu={}
y_jhu[test_country] = np.array([[yy0[i],yy1[i],yy2[i]] for i in range(0,len(yy0))])/N
# data = np.array([[xxf[i],yy0[i],yy1[i],yy2[i]] for i in range(len(yy))])
# print(data)
lastday = len(y_jhu[test_country])
print('days 0 to',lastday,'data stored in y_jhu')
```
[-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0, 100.0, 101.0, 102.0, 103.0, 104.0, 105.0, 106.0, 107.0, 108.0, 109.0, 110.0, 111.0, 112.0, 113.0, 114.0, 115.0, 116.0, 117.0, 118.0, 119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 125.0, 126.0, 127.0, 128.0, 129.0, 130.0, 131.0, 132.0, 133.0, 134.0, 135.0, 136.0, 137.0, 138.0, 139.0, 140.0, 141.0, 142.0, 143.0, 144.0, 145.0, 146.0, 147.0, 148.0, 149.0, 150.0, 151.0, 152.0, 153.0, 154.0, 155.0, 156.0, 157.0, 158.0, 159.0, 160.0, 161.0, 162.0, 163.0, 164.0, 165.0, 166.0, 167.0, 168.0, 169.0, 170.0, 171.0, 172.0, 173.0, 174.0, 175.0, 176.0, 177.0, 178.0, 179.0, 180.0, 181.0, 182.0, 183.0, 184.0, 185.0, 186.0, 187.0, 188.0, 189.0, 190.0, 191.0, 192.0, 193.0, 194.0, 195.0, 196.0, 197.0, 198.0, 199.0, 200.0, 201.0, 202.0, 203.0, 204.0, 205.0, 206.0, 207.0, 208.0, 209.0, 210.0, 211.0, 212.0, 213.0, 214.0, 215.0, 216.0, 217.0, 218.0]
days 0 to 222 data stored in y_jhu
```python
len(t)
```
222
```python
(1.0/TimeICUDeath)*(CFR/FracCritical)
```
0.1
```python
model = 'SC3EI3R'
# Define parameters based on clinical observations Dr. Alison
Exposure=0.4 # Rate coefficient for exposure per individual in contact per day
IncubPeriod=5 #Incubation period, days
DurMildInf=10 #Duration of mild infections, days : includes time for reg. of recovery
FracMild=0.7 #Fraction of infections that are mild
FracSevere=0.20 #Fraction of infections that are severe
FracCritical=0.1 #Fraction of infections that are critical
CFR=0.05 #Case fatality rate (fraction of infections resulting in death)
TimeICUDeath=5 #Time from ICU admission to death, days
DurHosp=4 #Duration of hospitalization, days : includes 4 day reg of recovery
# Model fitting extension to allow for incomplete detection
FracConfirmedDet=0.5 # Fraction of confirmed individuals measured : plots made with this parameter NYI
FracRecoveredDet=FracConfirmedDet # Fraction of recovered individuals measured
FracDeathsDet=1.0
# Model extension by John McCaskill to include caution
CautionFactor= 0.1 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 60. # Duration of cautionary state of susceptibles (2 weeks)
CautionICUFrac= 0.1 # Fraction of ICUs occupied leading to transition to caution @ 1/day
ICUFrac= 0.001 # Fraction of ICUs relative to population size N
if cmodels[model].loadparams():
params = cmodels[model].params.copy()
else:
params = {'beta_1' : Exposure/sum(x0_SC3EI3R),
'beta_2' : 0.0,
'beta_3' : 0.0,
'alpha' : 1.0/IncubPeriod,
'gamma_1': (1.0/DurMildInf)*FracMild,
'gamma_2': (1.0/DurHosp)-(1/DurHosp)*(FracCritical/(FracCritical+FracSevere)),
'gamma_3': (1.0/TimeICUDeath)-(1/TimeICUDeath)*(CFR/FracCritical),
'p_1' : (1.0/DurMildInf)-(1.0/DurMildInf)*FracMild,
'p_2' : (1.0/DurHosp)*(FracCritical/(FracCritical+FracSevere)),
'mu' : (1.0/TimeICUDeath)*(CFR/FracCritical),
'c_0' : CautionFactor,
'c_1' : 1.0/CautionRetention,
'c_2' : 1.0/(sum(x0_SC3EI3R)*ICUFrac*CautionICUFrac),
'N' : sum(x0_SC3EI3R)}
print(params)
cmodels[model].parameters = params
cmodels[model].params = params
cmodels[model].dumpparams()
def slidefitplot(beta_1,mu,c_0,c_1,c_2,logI_0):
params={ 'beta_1':beta_1, 'mu':mu, 'c_0':c_0, 'c_1':c_1, 'c_2':c_2}
cmodels[model].parameters = params
cmodels[model].params = params
cmodels[model].dumpparams()
I0 = 10**logI_0
x0 = [1.-I0,0.,I0,0.,0.,0.,0.,0.,0.,0.,0.]
x0 = [1.-I0,0.,I0,0.,0.,0.,0.,0.,0.,0.]
cmodels[model].initial_values = (x0,t[0])
weights=np.array([1.,1.,1.])
solveplot(smodels=[model],species=['confirmed','recovered','deaths_x10'],tmax=len(t),summing='daily',averaging='weekly',fitdata=y_jhu[test_country]*weights,scale='linear',plottitle= '',label='confirmed',newplot = True, figsize = (15,15))
```
loaded params from /Users/n/Projects/covid-recovery/Notebooks/covid-19-caution/params/SC3EI3R.pk :
{'beta_1': 0.48, 'mu': 0.1, 'c_0': 0.1, 'c_1': 0.016, 'c_2': 3819.0}
dumped params to /Users/n/Projects/covid-recovery/Notebooks/covid-19-caution/params/SC3EI3R.pk
```python
w =interactive(slidefitplot,
beta_1=FloatSlider(min=0,max=1,step=0.01,value=params['beta_1'],description='beta_1',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
mu=FloatSlider(min=0,max=0.2,step=0.002,value=params['mu'],description='mu',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_0=FloatSlider(min=0,max=1,step=0.01,value=params['c_0'],description='c_0',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_1=FloatSlider(min=0,max=1,step=0.001,value=params['c_1'],description='c_1',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_2=FloatSlider(min=0,max=20000,step=1,value=params['c_2'],description='c_2',
style=style,layout=slider_layout,continuous_update=False,readout_format='.1f'),
logI_0=FloatSlider(min=-10,max=0,step=0.01,value=-6,description='log I_0',
style=style,layout=slider_layout,continuous_update=False)
)
display(w)
```
interactive(children=(FloatSlider(value=0.48, continuous_update=False, description='beta_1', layout=Layout(wid…
```python
params=w.kwargs
print(params)
```
#### Italy
```python
# assumed data starting on firstdate
test_country='Italy'
N = 66650000
firstdate = '01/25/20'
lastdate = '01/08/20'
xx,xxf,yy0 = get_country_data(test_country,'confirmed',firstdate=firstdate,lastdate=lastdate)
xx,xxf,yy1 = get_country_data(test_country,'recovered',firstdate=firstdate,lastdate=lastdate)
xx,xxf,yy2 = get_country_data(test_country,'deaths',firstdate=firstdate,lastdate=lastdate)
print(xxf)
y_jhu={}
y_jhu[test_country] = np.array([[yy0[i],yy1[i],yy2[i]] for i in range(0,len(yy0))])/N
# data = np.array([[xxf[i],yy0[i],yy1[i],yy2[i]] for i in range(len(yy))])
# print(data)
lastday = len(y_jhu[test_country])
print('days 0 to',lastday,'data stored in y_jhu')
```
```python
model = 'SC3EI3R'
# Define parameters based on clinical observations Dr. Alison
Exposure=0.4 # Rate coefficient for exposure per individual in contact per day
IncubPeriod=5 #Incubation period, days
DurMildInf=8 #Duration of mild infections, days
FracMild=0.65 #Fraction of infections that are mild
FracSevere=0.20 #Fraction of infections that are severe
FracCritical=0.15 #Fraction of infections that are critical
CFR=0.1 #Case fatality rate (fraction of infections resulting in death)
TimeICUDeath=4 #Time from ICU admission to death, days
DurHosp=4 #Duration of hospitalization, days
# Model fitting extension to allow for incomplete detection
FracConfirmedDet=0.5 # Fraction of infected individuals confirmed
FracRecoveredDet=FracConfirmedDet # Fraction of recovered individuals measured
FracDeathsDet=1.0
# Model extension by John McCaskill to include caution
CautionFactor= 0.1 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 60. # Duration of cautionary state of susceptibles (2 weeks)
CautionICUFrac= 0.1 # Fraction of ICUs occupied leading to transition to caution @ 1/day
ICUFrac= 0.001 # Fraction of ICUs relative to population size N
if cmodels[model].loadparams():
params = cmodels[model].params.copy()
else:
params = {'beta_1' : Exposure/sum(x0_SC3EI3R),
'beta_2' : 0.0,
'beta_3' : 0.0,
'alpha' : 1.0/IncubPeriod,
'gamma_1': (1.0/DurMildInf)*FracMild,
'gamma_2': (1.0/DurHosp)-(1/DurHosp)*(FracCritical/(FracCritical+FracSevere)),
'gamma_3': (1.0/TimeICUDeath)-(1/TimeICUDeath)*(CFR/FracCritical),
'p_1' : (1.0/DurMildInf)-(1.0/DurMildInf)*FracMild,
'p_2' : (1.0/DurHosp)*(FracCritical/(FracCritical+FracSevere)),
'mu' : (1.0/TimeICUDeath)*(CFR/FracCritical),
'c_0' : CautionFactor,
'c_1' : 1.0/CautionRetention,
'c_2' : 1.0/(sum(x0_SC3EI3R)*ICUFrac*CautionICUFrac),
'N' : sum(x0_SC3EI3R)}
print(params)
cmodels[model].parameters = params
cmodels[model].params = params
cmodels[model].dumpparams()
def slidefitplot(beta_1,mu,c_0,c_1,c_2,logI_0):
params={ 'beta_1':beta_1, 'mu':mu, 'c_0':c_0, 'c_1':c_1, 'c_2':c_2}
cmodels[model].parameters = params
cmodels[model].params = params
cmodels[model].dumpparams()
I0 = 10**logI_0
x0 = [1.-I0,0.,I0,0.,0.,0.,0.,0.,0.,0.,0.]
x0 = [1.-I0,0.,I0,0.,0.,0.,0.,0.,0.,0.]
cmodels[model].initial_values = (x0,t[0])
weights=np.array([1.,1.,1.])
solveplot(smodels=[model],species=['confirmed','recovered','deaths_x10'],tmax=len(t),summing='daily',averaging='weekly',fitdata=y_jhu[test_country]*weights,scale='linear',plottitle= '',label='confirmed',newplot = True, figsize = (15,15))
```
loaded params from /Users/n/Projects/covid-recovery/Notebooks/covid-19-caution/params/SC3EI3R.pk :
{'beta_1': 0.48, 'mu': 0.1, 'c_0': 0.1, 'c_1': 0.016, 'c_2': 3819.0}
dumped params to /Users/n/Projects/covid-recovery/Notebooks/covid-19-caution/params/SC3EI3R.pk
```python
w =interactive(slidefitplot,
beta_1=FloatSlider(min=0,max=1,step=0.01,value=params['beta_1'],description='beta_1',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
mu=FloatSlider(min=0,max=0.2,step=0.002,value=params['mu'],description='mu',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_0=FloatSlider(min=0,max=1,step=0.01,value=params['c_0'],description='c_0',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_1=FloatSlider(min=0,max=1,step=0.001,value=params['c_1'],description='c_1',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_2=FloatSlider(min=0,max=20000,step=1,value=params['c_2'],description='c_2',
style=style,layout=slider_layout,continuous_update=False,readout_format='.1f'),
logI_0=FloatSlider(min=-10,max=0,step=0.01,value=-6,description='log I_0',
style=style,layout=slider_layout,continuous_update=False)
)
display(w)
```
interactive(children=(FloatSlider(value=0.48, continuous_update=False, description='beta_1', layout=Layout(wid…
```python
params=w.kwargs
print(params)
```
Note that we have used 50% detection of confirmed and recovered, 100% for deaths in manual fit.
It appears that Italy's registration of recovery, although the right overall magnitude is markedly delayed - check reporting delays.
Italy also had at least two successive regional infections, as seen in the dual peak confirmed data, so not easy to fit with one model.
See below for simulation of second peak.
```python
w =interactive(slidefitplot,
beta_1=FloatSlider(min=0,max=1,step=0.01,value=params['beta_1'],description='beta_1',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
mu=FloatSlider(min=0,max=0.2,step=0.002,value=params['mu'],description='mu',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_0=FloatSlider(min=0,max=1,step=0.01,value=params['c_0'],description='c_0',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_1=FloatSlider(min=0,max=1,step=0.001,value=params['c_1'],description='c_1',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_2=FloatSlider(min=0,max=20000,step=1,value=params['c_2'],description='c_2',
style=style,layout=slider_layout,continuous_update=False,readout_format='.1f'),
logI_0=FloatSlider(min=-10,max=0,step=0.01,value=-6,description='log I_0',
style=style,layout=slider_layout,continuous_update=False)
)
display(w)
```
#### Brazil
```python
# assumed data starting on firstdate
test_country='Brazil'
N = 210000000
firstdate = '01/25/20'
lastdate = '01/08/20'
xx,xxf,yy0 = get_country_data(test_country,'confirmed',firstdate=firstdate,lastdate=lastdate)
xx,xxf,yy1 = get_country_data(test_country,'recovered',firstdate=firstdate,lastdate=lastdate)
xx,xxf,yy2 = get_country_data(test_country,'deaths',firstdate=firstdate,lastdate=lastdate)
print(xxf)
y_jhu={}
y_jhu[test_country] = np.array([[yy0[i],yy1[i],yy2[i]] for i in range(0,len(yy0))])/N
# data = np.array([[xxf[i],yy0[i],yy1[i],yy2[i]] for i in range(len(yy))])
# print(data)
lastday = len(y_jhu[test_country])
print('days 0 to',lastday,'data stored in y_jhu')
```
```python
model = 'SC3EI3R'
# Define parameters based on clinical observations Dr. Alison
Exposure=0.4 # Rate coefficient for exposure per individual in contact per day
IncubPeriod=5 #Incubation period, days
DurMildInf=8 #Duration of mild infections, days
FracMild=0.65 #Fraction of infections that are mild
FracSevere=0.20 #Fraction of infections that are severe
FracCritical=0.15 #Fraction of infections that are critical
CFR=0.1 #Case fatality rate (fraction of infections resulting in death)
TimeICUDeath=4 #Time from ICU admission to death, days
DurHosp=8 #Duration of hospitalization, days
# Model fitting extension to allow for incomplete detection
FracConfirmedDet=0. # Fraction of recovered individuals measured
FracRecoveredDet=FracConfirmedDet # Fraction of recovered individuals measured
FracDeathsDet=1.0
# Model extension by John McCaskill to include caution
CautionFactor= 0.1 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 60. # Duration of cautionary state of susceptibles (2 weeks)
CautionICUFrac= 0.1 # Fraction of ICUs occupied leading to transition to caution @ 1/day
ICUFrac= 0.001 # Fraction of ICUs relative to population size N
if cmodels[model].loadparams():
params = cmodels[model].params.copy()
else:
params = {'beta_1' : Exposure/sum(x0_SC3EI3R),
'beta_2' : 0.0,
'beta_3' : 0.0,
'alpha' : 1.0/IncubPeriod,
'gamma_1': (1.0/DurMildInf)*FracMild,
'gamma_2': (1.0/DurHosp)-(1/DurHosp)*(FracCritical/(FracCritical+FracSevere)),
'gamma_3': (1.0/TimeICUDeath)-(1/TimeICUDeath)*(CFR/FracCritical),
'p_1' : (1.0/DurMildInf)-(1.0/DurMildInf)*FracMild,
'p_2' : (1.0/DurHosp)*(FracCritical/(FracCritical+FracSevere)),
'mu' : (1.0/TimeICUDeath)*(CFR/FracCritical),
'c_0' : CautionFactor,
'c_1' : 1.0/CautionRetention,
'c_2' : 1.0/(sum(x0_SC3EI3R)*ICUFrac*CautionICUFrac),
'N' : sum(x0_SC3EI3R)}
print(params)
cmodels[model].parameters = params
cmodels[model].params = params
cmodels[model].dumpparams()
def slidefitplot(beta_1,mu,c_0,c_1,c_2,logI_0):
params={ 'beta_1':beta_1, 'mu':mu, 'c_0':c_0, 'c_1':c_1, 'c_2':c_2}
cmodels[model].parameters = params
cmodels[model].params = params
cmodels[model].dumpparams()
I0 = 10**logI_0
x0 = [1.-I0,0.,I0,0.,0.,0.,0.,0.,0.,0.,0.]
x0 = [1.-I0,0.,I0,0.,0.,0.,0.,0.,0.,0.]
cmodels[model].initial_values = (x0,t[0])
weights=np.array([1.,1.,1.])
solveplot(smodels=[model],species=['confirmed','recovered','deaths_x10'],tmax=len(t),summing='cumulative',averaging='weekly',fitdata=y_jhu[test_country]*weights,scale='linear',plottitle= '',label='confirmed',newplot = True, figsize = (15,15))
```
loaded params from /Users/n/Projects/covid-recovery/Notebooks/covid-19-caution/params/SC3EI3R.pk :
{'beta_1': 0.48, 'mu': 0.1, 'c_0': 0.1, 'c_1': 0.016, 'c_2': 3819.0}
dumped params to /Users/n/Projects/covid-recovery/Notebooks/covid-19-caution/params/SC3EI3R.pk
```python
w =interactive(slidefitplot,
beta_1=FloatSlider(min=0,max=1,step=0.01,value=params['beta_1'],description='beta_1',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
mu=FloatSlider(min=0,max=0.2,step=0.002,value=params['mu'],description='mu',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_0=FloatSlider(min=0,max=1,step=0.01,value=params['c_0'],description='c_0',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_1=FloatSlider(min=0,max=1,step=0.001,value=params['c_1'],description='c_1',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_2=FloatSlider(min=0,max=20000,step=1,value=params['c_2'],description='c_2',
style=style,layout=slider_layout,continuous_update=False,readout_format='.1f'),
logI_0=FloatSlider(min=-10,max=0,step=0.01,value=-6,description='log I_0',
style=style,layout=slider_layout,continuous_update=False)
)
display(w)
```
interactive(children=(FloatSlider(value=0.48, continuous_update=False, description='beta_1', layout=Layout(wid…
The Brazil data shows that death is not as delayed as assumed. The process of progression is perhaps less clearly documented.
#### Russia
```python
# assumed data starting on firstdate
test_country='Iran'
N = 144500000
firstdate = '01/25/20'
lastdate = '01/08/20'
xx,xxf,yy0 = get_country_data(test_country,'confirmed',firstdate=firstdate,lastdate=lastdate)
xx,xxf,yy1 = get_country_data(test_country,'recovered',firstdate=firstdate,lastdate=lastdate)
xx,xxf,yy2 = get_country_data(test_country,'deaths',firstdate=firstdate,lastdate=lastdate)
print(xxf)
y_jhu={}
y_jhu[test_country] = np.array([[yy0[i],yy1[i],yy2[i]] for i in range(0,len(yy0))])/N
# data = np.array([[xxf[i],yy0[i],yy1[i],yy2[i]] for i in range(len(yy))])
# print(data)
lastday = len(y_jhu[test_country])
print('days 0 to',lastday,'data stored in y_jhu')
```
```python
model = 'SC3EI3R'
# Define parameters based on clinical observations Dr. Alison
Exposure=0.4 # Rate coefficient for exposure per individual in contact per day
IncubPeriod=5 #Incubation period, days
DurMildInf=8 #Duration of mild infections, days
FracMild=0.65 #Fraction of infections that are mild
FracSevere=0.20 #Fraction of infections that are severe
FracCritical=0.15 #Fraction of infections that are critical
CFR=0.1 #Case fatality rate (fraction of infections resulting in death)
TimeICUDeath=4 #Time from ICU admission to death, days
DurHosp=8 #Duration of hospitalization, days
# Model fitting extension to allow for incomplete detection
FracConfirmedDet=0. # Fraction of recovered individuals measured
FracRecoveredDet=FracConfirmedDet # Fraction of recovered individuals measured
FracDeathsDet=1.0
# Model extension by John McCaskill to include caution
CautionFactor= 0.1 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 60. # Duration of cautionary state of susceptibles (2 weeks)
CautionICUFrac= 0.1 # Fraction of ICUs occupied leading to transition to caution @ 1/day
ICUFrac= 0.002 # Fraction of ICUs relative to population size N
if cmodels[model].loadparams():
params = cmodels[model].params.copy()
else:
params = {'beta_1' : Exposure/sum(x0_SC3EI3R),
'beta_2' : 0.0,
'beta_3' : 0.0,
'alpha' : 1.0/IncubPeriod,
'gamma_1': (1.0/DurMildInf)*FracMild,
'gamma_2': (1.0/DurHosp)-(1/DurHosp)*(FracCritical/(FracCritical+FracSevere)),
'gamma_3': (1.0/TimeICUDeath)-(1/TimeICUDeath)*(CFR/FracCritical),
'p_1' : (1.0/DurMildInf)-(1.0/DurMildInf)*FracMild,
'p_2' : (1.0/DurHosp)*(FracCritical/(FracCritical+FracSevere)),
'mu' : (1.0/TimeICUDeath)*(CFR/FracCritical),
'c_0' : CautionFactor,
'c_1' : 1.0/CautionRetention,
'c_2' : 1.0/(sum(x0_SC3EI3R)*ICUFrac*CautionICUFrac),
'N' : sum(x0_SC3EI3R)}
print(params)
cmodels[model].parameters = params
cmodels[model].params = params
cmodels[model].dumpparams()
def slidefitplot(beta_1,mu,c_0,c_1,c_2,logI_0):
params={ 'beta_1':beta_1, 'mu':mu, 'c_0':c_0, 'c_1':c_1, 'c_2':c_2}
cmodels[model].parameters = params
cmodels[model].params = params
cmodels[model].dumpparams()
I0 = 10**logI_0
x0 = [1.-I0,0.,I0,0.,0.,0.,0.,0.,0.,0.]
cmodels[model].initial_values = (x0,t[0])
weights=np.array([1.,1.,1.])
solveplot(smodels=[model],species=['confirmed','recovered','deaths_x10'],tmax=len(t),summing='cumulative',averaging='weekly',fitdata=y_jhu[test_country]*weights,scale='linear',plottitle= '',label='confirmed',newplot = True, figsize = (15,15))
```
loaded params from /Users/n/Projects/covid-recovery/Notebooks/covid-19-caution/params/SC3EI3R.pk :
{'beta_1': 0.48, 'mu': 0.1, 'c_0': 0.1, 'c_1': 0.016, 'c_2': 3819.0}
dumped params to /Users/n/Projects/covid-recovery/Notebooks/covid-19-caution/params/SC3EI3R.pk
```python
w =interactive(slidefitplot,
beta_1=FloatSlider(min=0,max=1,step=0.01,value=params['beta_1'],description='beta_1',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
mu=FloatSlider(min=0,max=0.2,step=0.002,value=params['mu'],description='mu',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_0=FloatSlider(min=0,max=1,step=0.01,value=params['c_0'],description='c_0',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_1=FloatSlider(min=0,max=1,step=0.001,value=params['c_1'],description='c_1',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_2=FloatSlider(min=0,max=20000,step=1,value=params['c_2'],description='c_2',
style=style,layout=slider_layout,continuous_update=False,readout_format='.1f'),
logI_0=FloatSlider(min=-10,max=0,step=0.01,value=-6,description='log I_0',
style=style,layout=slider_layout,continuous_update=False)
)
display(w)
```
interactive(children=(FloatSlider(value=0.48, continuous_update=False, description='beta_1', layout=Layout(wid…
### SC3UEIR Model
```python
# assumed data starting on firstdate
test_country='US'
N = 66650000
firstdate = '01/25/20'
lastdate = '01/08/20'
xx,xxf,yy0 = get_country_data(test_country,'confirmed',firstdate=firstdate,lastdate=lastdate)
xx,xxf,yy1 = get_country_data(test_country,'recovered',firstdate=firstdate,lastdate=lastdate)
xx,xxf,yy2 = get_country_data(test_country,'deaths',firstdate=firstdate,lastdate=lastdate)
print(xxf)
y_jhu={}
y_jhu[test_country] = np.array([[yy0[i],yy1[i],yy2[i]] for i in range(0,len(yy0))])/N
# data = np.array([[xxf[i],yy0[i],yy1[i],yy2[i]] for i in range(len(yy))])
# print(data)
lastday = len(y_jhu[test_country])
print('days 0 to',lastday,'data stored in y_jhu')
```
```python
model = 'SC3UEIR'
I_0 = 0.00003
x0_SC3UEIR = [1.0-I_0, 0.0, I_0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]
SC3UEIR_model.initial_values = (x0_SC3UEIR, t[0])
# Define parameters based on clinical observations Dr. Alison
Exposure=0.4 # Rate coefficient for exposure per individual in contact per day
IncubPeriod=5 #Incubation period, days
DurMildInf=8 #Duration of mild infections, days
FracMild=0.65 #Fraction of infections that are mild
FracSevere=0.20 #Fraction of infections that are severe
FracCritical=0.15 #Fraction of infections that are critical
CFR=0.1 #Case fatality rate (fraction of infections resulting in death)
TimeICUDeath=4 #Time from ICU admission to death, days
DurHosp=8 #Duration of hospitalization, days
# Model fitting extension to allow for incomplete detection
FracConfirmedDet=0.5 # Fraction of recovered individuals measured
FracRecoveredDet=FracConfirmedDet # Fraction of recovered individuals measured
FracDeathsDet=1.0
# Model extension by John McCaskill to include caution
CautionFactor= 0.1 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 60. # Duration of cautionary state of susceptibles (2 weeks)
CautionICUFrac= 0.1 # Fraction of ICUs occupied leading to transition to caution @ 1/day
ICUFrac= 0.001 # Fraction of ICUs relative to population size N
# Model extension by John McCaskill to include economic influence on caution
EconomicCostOfCaution= 0.5 # Fractional reduction of economic contribution for cautioned individuals
p = [0,(1.0/DurMildInf)-(1.0/DurMildInf)*FracMild, (1.0/DurHosp)*(FracCritical/(FracCritical+FracSevere))]
g = [0,(1.0/DurMildInf)*FracMild, (1.0/DurHosp)-(1/DurHosp)*(FracCritical/(FracCritical+FracSevere)),
(1.0/TimeICUDeath)-(1/TimeICUDeath)*(CFR/FracCritical)]
u = (1.0/TimeICUDeath)*(CFR/FracCritical)
if cmodels[model].loadparams():
params = cmodels[model].params.copy()
else:
params = {'beta' : Exposure/sum(x0_SC3UEIR),
'alpha' : 1.0/IncubPeriod,
'gamma' : g[1]+g[2]*(p[1]/(g[2]+p[2]))+g[3]*(p[1]/(g[2]+p[2]))*(p[2]/(g[3]+u)),
'mu' : u*(p[1]/(g[2]+p[2])*(p[2]/(g[3]+u))),
'c_0' : CautionFactor,
'c_1' : 1.0/CautionRetention,
'c_2' : 1.0/(sum(x0_SC3UEIR)*ICUFrac*CautionICUFrac),
'N' : sum(x0_SC3UEIR),
'k_u' : 1.0/CautionRetention,
'k_1' : 1.0/CautionRetention,
'k_w' : 1.0/CautionRetention,
'kappa' : EconomicCostOfCaution}
print(params)
cmodels[model].parameters = params
cmodels[model].params = params
cmodels[model].dumpparams()
# solution = SCIR_model.integrate(t[1::])
def slidefitplot(beta,alpha,gamma,mu,c_0,c_1,c_2,logI_0,k_u,k_1,k_w,kappa):
params={ 'beta':beta, 'alpha':alpha, 'gamma':gamma, 'mu':mu, 'c_0':c_0, 'c_1':c_1, 'c_2':c_2, 'k_u':k_u, 'k_1':k_1, 'k_w':k_w, 'kappa':kappa}
cmodels[model].parameters = params
cmodels[model].params = params
cmodels[model].dumpparams()
I0 = 10**logI_0
x0 = [1.-I0,0.,I0,0.,0.,0.,0.,0.,0.,1.]
cmodels[model].initial_values = (x0,t[0])
weights=np.array([1.,1.,1.])
solveplot(smodels=[model],species=['confirmed','recovered','deaths_x10'],tmax=len(t),summing='daily',averaging='weekly',fitdata=y_jhu[test_country]*weights,scale='linear',plottitle= '',label='confirmed',newplot = True, figsize = (15,15))
```
loaded params from /Users/n/Projects/covid-recovery/Notebooks/covid-19-caution/params/SC3UEIR.pk :
{'beta': 0.48, 'alpha': 0.2, 'gamma': 0.11249999999999999, 'mu': 0.0125, 'c_0': 0.1, 'c_1': 0.016666666666666666, 'c_2': 110.0, 'k_u': 0.016666666666666666, 'k_1': 0.016666666666666666, 'k_w': 0.016666666666666666, 'kappa': 0.5}
dumped params to /Users/n/Projects/covid-recovery/Notebooks/covid-19-caution/params/SC3UEIR.pk
```python
w = interactive(slidefitplot,
beta=FloatSlider(min=0,max=1,step=0.01,value=params['beta'],description='beta',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
alpha=FloatSlider(min=0,max=1,step=0.01,value=params['alpha'],description='alpha',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
gamma=FloatSlider(min=0,max=1,step=0.01,value=params['gamma'],description='gamma',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
mu=FloatSlider(min=0,max=0.2,step=0.002,value=params['mu'],description='mu',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_0=FloatSlider(min=0,max=1,step=0.01,value=params['c_0'],description='c_0',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_1=FloatSlider(min=0,max=1,step=0.001,value=params['c_1'],description='c_1',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_2=FloatSlider(min=0,max=5000,step=1,value=params['c_2'],description='c_2',
style=style,layout=slider_layout,continuous_update=False,readout_format='.1f'),
logI_0=FloatSlider(min=-10,max=0,step=0.01,value=-6,description='log I_0',
style=style,layout=slider_layout,continuous_update=False),
k_u=FloatSlider(min=0,max=1,step=0.001,value=params['k_u'],description='k_u',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
k_1=FloatSlider(min=0,max=1,step=0.001,value=params['k_1'],description='k_1',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
k_w=FloatSlider(min=0,max=1,step=0.001,value=params['k_w'],description='k_w',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
kappa=FloatSlider(min=0,max=1,step=0.001,value=params['kappa'],description='kappa',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'))
display(w)
```
interactive(children=(FloatSlider(value=0.48, continuous_update=False, description='beta', layout=Layout(width…
```python
params=w.kwargs
# not a good fit yet, did better last week
print(params)
```
{'beta': 0.48, 'alpha': 0.2, 'gamma': 0.11249999999999999, 'mu': 0.0125, 'c_0': 0.1, 'c_1': 0.016666666666666666, 'c_2': 110.0, 'logI_0': -6.0, 'k_u': 0.016666666666666666, 'k_1': 0.016666666666666666, 'k_w': 0.016666666666666666, 'kappa': 0.5}
### SC3UEI3R Model
#### USA
```python
# assumed data starting on firstdate
test_country='US'
N = 66650000
firstdate = '01/25/20'
lastdate = '01/08/20'
xx,xxf,yy0 = get_country_data(test_country,'confirmed',firstdate=firstdate,lastdate=lastdate)
xx,xxf,yy1 = get_country_data(test_country,'recovered',firstdate=firstdate,lastdate=lastdate)
xx,xxf,yy2 = get_country_data(test_country,'deaths',firstdate=firstdate,lastdate=lastdate)
print(xxf)
y_jhu={}
y_jhu[test_country] = np.array([[yy0[i],yy1[i],yy2[i]] for i in range(0,len(yy0))])/N
# data = np.array([[xxf[i],yy0[i],yy1[i],yy2[i]] for i in range(len(yy))])
# print(data)
lastday = len(y_jhu[test_country])
print('days 0 to',lastday,'data stored in y_jhu')
```
```python
model = 'SC3UEI3R'
I_0 = 0.00003
x0_SC3UEI3R = [1.0-I_0, 0.0, I_0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]
SC3UEI3R_model.initial_values = (x0_SC3UEI3R, t[0])
# Define parameters based on clinical observations Dr. Alison
Exposure=0.4 # Rate coefficient for exposure per individual in contact per day
IncubPeriod=5 #Incubation period, days
DurMildInf=8 #Duration of mild infections, days
FracMild=0.65 #Fraction of infections that are mild
FracSevere=0.20 #Fraction of infections that are severe
FracCritical=0.15 #Fraction of infections that are critical
CFR=0.1 #Case fatality rate (fraction of infections resulting in death)
TimeICUDeath=4 #Time from ICU admission to death, days
DurHosp=5 #Duration of hospitalization, days
# Model fitting extension to allow for incomplete detection
FracConfirmedDet=0.5 # Fraction of recovered individuals measured
FracRecoveredDet=FracConfirmedDet # Fraction of recovered individuals measured
FracDeathsDet=1.0
# Model extension by John McCaskill to include caution
CautionFactor= 0.1 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 60. # Duration of cautionary state of susceptibles (2 weeks)
CautionICUFrac= 0.1 # Fraction of ICUs occupied leading to transition to caution @ 1/day
ICUFrac= 0.001 # Fraction of ICUs relative to population size N
# Model extension by John McCaskill to include economic influence on caution
EconomicCostOfCaution= 0.5 # Fractional reduction of economic contribution for cautioned individuals
p = [0,(1.0/DurMildInf)-(1.0/DurMildInf)*FracMild, (1.0/DurHosp)*(FracCritical/(FracCritical+FracSevere))]
g = [0,(1.0/DurMildInf)*FracMild, (1.0/DurHosp)-(1/DurHosp)*(FracCritical/(FracCritical+FracSevere)),
(1.0/TimeICUDeath)-(1/TimeICUDeath)*(CFR/FracCritical)]
u = (1.0/TimeICUDeath)*(CFR/FracCritical)
if cmodels[model].loadparams():
params = cmodels[model].params.copy()
else:
params = {'beta_1' : Exposure/sum(x0_SC3UEI3R),
'beta_2' : 0.0,
'beta_3' : 0.0,
'alpha' : 1.0/IncubPeriod,
'gamma_1': (1.0/DurMildInf)*FracMild,
'gamma_2': (1.0/DurHosp)-(1/DurHosp)*(FracCritical/(FracCritical+FracSevere)),
'gamma_3': (1.0/TimeICUDeath)-(1/TimeICUDeath)*(CFR/FracCritical),
'p_1' : (1.0/DurMildInf)-(1.0/DurMildInf)*FracMild,
'p_2' : (1.0/DurHosp)*(FracCritical/(FracCritical+FracSevere)),
'mu' : (1.0/TimeICUDeath)*(CFR/FracCritical),
'c_0' : CautionFactor,
'c_1' : 1.0/CautionRetention,
'c_2' : 1.0/(ICUFrac*CautionICUFrac),
'k_u' : 1.0/5.,
'k_1' : 1.0/90,
'k_w' : 1.0/90,
'kappa' : EconomicCostOfCaution,
'N' : sum(x0_SC3UEI3R)}
print(params)
cmodels[model].parameters = params
cmodels[model].params = params
cmodels[model].dumpparams()
```
loaded params from /Users/n/Projects/covid-recovery/Notebooks/covid-19-caution/params/SC3UEI3R.pk :
{'beta_1': 0.2, 'beta_2': 0.0, 'beta_3': 0.0, 'alpha': 0.2, 'gamma_1': 0.08125, 'gamma_2': 0.11428571428571428, 'gamma_3': 0.08333333333333331, 'p_1': 0.04375, 'p_2': 0.08571428571428573, 'mu': 0.16666666666666669, 'c_0': 0.1, 'c_1': 0.016666666666666666, 'c_2': 10000.0, 'k_u': 0.2, 'k_1': 0.011111111111111112, 'k_w': 0.011111111111111112, 'kappa': 0.5, 'N': 2.0}
dumped params to /Users/n/Projects/covid-recovery/Notebooks/covid-19-caution/params/SC3UEI3R.pk
```python
def slidefitplot(beta_1,alpha,mu,c_0,c_1,c_2,logI_0,k_u,k_1,k_w,kappa):
params={ 'beta_1':beta_1, 'alpha':alpha, 'mu':mu, 'c_0':c_0, 'c_1':c_1, 'c_2':c_2, 'k_u':k_u, 'k_1':k_1, 'k_w':k_w, 'kappa':kappa}
cmodels[model].parameters = params
I0 = 10**logI_0
x0 = [1.-I0,0.,I0,0.,0.,0.,0.,0.,0.,0.,0.,1.]
cmodels[model].initial_values = (x0,t[0])
cmodels[model].params = params
cmodels[model].dumpparams()
weights=np.array([1.,1.,1.])
solveplot(smodels=[model],species=['confirmed','recovered','deaths_x10'],tmax=len(t),summing='daily',averaging='weekly',fitdata=y_jhu[test_country]*weights,scale='linear',plottitle= '',label='confirmed',newplot = True, figsize = (15,15))
```
```python
w=interactive(slidefitplot,
beta_1=FloatSlider(min=0,max=1,step=0.01,value=params['beta_1'],description='beta_1',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
alpha=FloatSlider(min=0,max=1,step=0.01,value=params['alpha'],description='alpha',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
mu=FloatSlider(min=0,max=0.2,step=0.002,value=params['mu'],description='mu',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_0=FloatSlider(min=0,max=1,step=0.01,value=params['c_0'],description='c_0',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_1=FloatSlider(min=0,max=1,step=0.001,value=params['c_1'],description='c_1',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_2=FloatSlider(min=0,max=5000,step=1,value=params['c_2'],description='c_2',
style=style,layout=slider_layout,continuous_update=False,readout_format='.1f'),
logI_0=FloatSlider(min=-10,max=0,step=0.01,value=-6,description='log I_0',
style=style,layout=slider_layout,continuous_update=False),
k_u=FloatSlider(min=0,max=1,step=0.001,value=params['k_u'],description='k_u',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
k_1=FloatSlider(min=0,max=1,step=0.001,value=params['k_1'],description='k_1',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
k_w=FloatSlider(min=0,max=1,step=0.001,value=params['k_w'],description='k_w',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
kappa=FloatSlider(min=0,max=1,step=0.001,value=params['kappa'],description='kappa',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'))
display(w)
```
interactive(children=(FloatSlider(value=0.2, continuous_update=False, description='beta_1', layout=Layout(widt…
```python
params=w.kwargs
print(params)
```
#### Spain
```python
# assumed data starting on firstdate
test_country='Spain'
N = 66650000
firstdate = '01/25/20'
lastdate = '01/08/20'
xx,xxf,yy0 = get_country_data(test_country,'confirmed',firstdate=firstdate,lastdate=lastdate)
xx,xxf,yy1 = get_country_data(test_country,'recovered',firstdate=firstdate,lastdate=lastdate)
xx,xxf,yy2 = get_country_data(test_country,'deaths',firstdate=firstdate,lastdate=lastdate)
print(xxf)
y_jhu={}
y_jhu[test_country] = np.array([[yy0[i],yy1[i],yy2[i]] for i in range(0,len(yy0))])/N
# data = np.array([[xxf[i],yy0[i],yy1[i],yy2[i]] for i in range(len(yy))])
# print(data)
lastday = len(y_jhu[test_country])
print('days 0 to',lastday,'data stored in y_jhu')
```
[-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0, 100.0, 101.0, 102.0, 103.0, 104.0, 105.0, 106.0, 107.0, 108.0, 109.0, 110.0, 111.0, 112.0, 113.0, 114.0, 115.0, 116.0, 117.0, 118.0, 119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 125.0, 126.0, 127.0, 128.0, 129.0, 130.0, 131.0, 132.0, 133.0, 134.0, 135.0, 136.0, 137.0, 138.0, 139.0, 140.0, 141.0, 142.0, 143.0, 144.0, 145.0, 146.0, 147.0, 148.0, 149.0, 150.0, 151.0, 152.0, 153.0, 154.0, 155.0, 156.0, 157.0, 158.0, 159.0, 160.0, 161.0, 162.0, 163.0, 164.0, 165.0, 166.0, 167.0, 168.0, 169.0, 170.0, 171.0, 172.0, 173.0, 174.0, 175.0, 176.0, 177.0, 178.0, 179.0, 180.0, 181.0, 182.0, 183.0, 184.0, 185.0, 186.0, 187.0, 188.0, 189.0, 190.0, 191.0, 192.0, 193.0, 194.0, 195.0, 196.0, 197.0, 198.0, 199.0, 200.0, 201.0, 202.0, 203.0, 204.0, 205.0, 206.0, 207.0, 208.0, 209.0, 210.0, 211.0, 212.0, 213.0, 214.0, 215.0, 216.0, 217.0, 218.0]
days 0 to 222 data stored in y_jhu
```python
model = 'SC3UEI3R'
I_0 = 0.00003
x0_SC3UEI3R = [1.0-I_0, 0.0, I_0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]
SC3UEI3R_model.initial_values = (x0_SC3UEI3R, t[0])
# Define parameters based on clinical observations Dr. Alison
Exposure=0.4 # Rate coefficient for exposure per individual in contact per day
IncubPeriod=5 #Incubation period, days
DurMildInf=8 #Duration of mild infections, days
FracMild=0.65 #Fraction of infections that are mild
FracSevere=0.20 #Fraction of infections that are severe
FracCritical=0.15 #Fraction of infections that are critical
CFR=0.1 #Case fatality rate (fraction of infections resulting in death)
TimeICUDeath=4 #Time from ICU admission to death, days
DurHosp=5 #Duration of hospitalization, days
# Model fitting extension to allow for incomplete detection
FracConfirmedDet=0.5 # Fraction of recovered individuals measured
FracRecoveredDet=FracConfirmedDet # Fraction of recovered individuals measured
FracDeathsDet=1.0
# Model extension by John McCaskill to include caution
CautionFactor= 0.1 # Fractional reduction of exposure rate for cautioned individuals
CautionRetention= 60. # Duration of cautionary state of susceptibles (2 weeks)
CautionICUFrac= 0.1 # Fraction of ICUs occupied leading to transition to caution @ 1/day
ICUFrac= 0.001 # Fraction of ICUs relative to population size N
# Model extension by John McCaskill to include economic influence on caution
EconomicCostOfCaution= 0.5 # Fractional reduction of economic contribution for cautioned individuals
p = [0,(1.0/DurMildInf)-(1.0/DurMildInf)*FracMild, (1.0/DurHosp)*(FracCritical/(FracCritical+FracSevere))]
g = [0,(1.0/DurMildInf)*FracMild, (1.0/DurHosp)-(1/DurHosp)*(FracCritical/(FracCritical+FracSevere)),
(1.0/TimeICUDeath)-(1/TimeICUDeath)*(CFR/FracCritical)]
u = (1.0/TimeICUDeath)*(CFR/FracCritical)
if cmodels[model].loadparams():
params = cmodels[model].params.copy()
else:
params = {'beta_1' : Exposure/sum(x0_SC3UEI3R),
'beta_2' : 0.0,
'beta_3' : 0.0,
'alpha' : 1.0/IncubPeriod,
'gamma_1': (1.0/DurMildInf)*FracMild,
'gamma_2': (1.0/DurHosp)-(1/DurHosp)*(FracCritical/(FracCritical+FracSevere)),
'gamma_3': (1.0/TimeICUDeath)-(1/TimeICUDeath)*(CFR/FracCritical),
'p_1' : (1.0/DurMildInf)-(1.0/DurMildInf)*FracMild,
'p_2' : (1.0/DurHosp)*(FracCritical/(FracCritical+FracSevere)),
'mu' : (1.0/TimeICUDeath)*(CFR/FracCritical),
'c_0' : CautionFactor,
'c_1' : 1.0/CautionRetention,
'c_2' : 1.0/(ICUFrac*CautionICUFrac),
'k_u' : 1.0/5.,
'k_1' : 1.0/90,
'k_w' : 1.0/90,
'kappa' : EconomicCostOfCaution,
'N' : sum(x0_SC3UEI3R)}
print(params)
cmodels[model].parameters = params
cmodels[model].params = params
run_id = '{}_{}_logI0=-6'.format(model,test_country)
cmodels[model].dumpparams(run_id)
```
loaded params from /Users/n/Projects/covid-recovery/Notebooks/covid-19-caution/params/SC3UEI3R.pk :
{'beta_1': 0.35, 'alpha': 0.2, 'mu': 0.16666666666666669, 'c_0': 0.1, 'c_1': 0.016666666666666666, 'c_2': 882.0, 'k_u': 0.084, 'k_1': 0.011111111111111112, 'k_w': 0.011111111111111112, 'kappa': 0.553}
dumped params to /Users/n/Projects/covid-recovery/Notebooks/covid-19-caution/params/SC3UEI3R.pk
dumped params to /Users/n/Projects/covid-recovery/Notebooks/covid-19-caution/params/SC3UEI3R_Spain_logI0=-6.pk
```python
def slidefitplot(beta_1,alpha,mu,c_0,c_1,c_2,logI_0,k_u,k_1,k_w,kappa):
params={ 'beta_1':beta_1, 'alpha':alpha, 'mu':mu, 'c_0':c_0, 'c_1':c_1, 'c_2':c_2, 'k_u':k_u, 'k_1':k_1, 'k_w':k_w, 'kappa':kappa}
cmodels[model].parameters = params
cmodels[model].params = params
run_id = '{}_{}_logI0={}'.format(model,test_country,logI_0)
cmodels[model].dumpparams(run_id)
I0 = 10**logI_0
x0 = [1.-I0,0.,I0,0.,0.,0.,0.,0.,0.,0.,0.,1.]
cmodels[model].initial_values = (x0,t[0])
weights=np.array([1.,1.,1.])
solveplot(smodels=[model],species=['confirmed','recovered','deaths_x10'],tmax=len(t),summing='daily',averaging='weekly',fitdata=y_jhu[test_country]*weights,scale='linear',plottitle= '',label='confirmed',newplot = True, figsize = (15,15))
```
```python
w=interactive(slidefitplot,
beta_1=FloatSlider(min=0,max=1,step=0.01,value=params['beta_1'],description='beta_1',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
alpha=FloatSlider(min=0,max=1,step=0.01,value=params['alpha'],description='alpha',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
mu=FloatSlider(min=0,max=0.2,step=0.002,value=params['mu'],description='mu',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_0=FloatSlider(min=0,max=1,step=0.01,value=params['c_0'],description='c_0',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_1=FloatSlider(min=0,max=1,step=0.001,value=params['c_1'],description='c_1',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
c_2=FloatSlider(min=0,max=5000,step=1,value=params['c_2'],description='c_2',
style=style,layout=slider_layout,continuous_update=False,readout_format='.1f'),
logI_0=FloatSlider(min=-10,max=0,step=0.01,value=-6,description='log I_0',
style=style,layout=slider_layout,continuous_update=False),
k_u=FloatSlider(min=0,max=1,step=0.001,value=params['k_u'],description='k_u',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
k_1=FloatSlider(min=0,max=1,step=0.001,value=params['k_1'],description='k_1',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
k_w=FloatSlider(min=0,max=1,step=0.001,value=params['k_w'],description='k_w',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'),
kappa=FloatSlider(min=0,max=1,step=0.001,value=params['kappa'],description='kappa',
style=style,layout=slider_layout,continuous_update=False,readout_format='.3f'))
display(w)
```
interactive(children=(FloatSlider(value=0.35, continuous_update=False, description='beta_1', layout=Layout(wid…
```python
run_id = '{}_{}_logI0=-6.28'.format(model,test_country)
run_id
```
'SC3UEI3R_Spain_logI0=-6.28'
```python
```
```python
params=w.kwargs
print(params)
```
## Fit SC3EI3R parameters to jhu data based on square_loss
### Fit c_0 , c_1 and c_2 as well as initial value of I_1
```python
SC3EI3R_model.parameters
```
```python
# Initial guess of parameters and initial condition, and bounding constraints
I0 =10**-6.43
x0_SC3EI3R = [1.-I0,0.,I0,0.,0.,0.,0.,0.,0.,0.,0.]
SC3EI3R_model.parameters={'beta_1': 0.41, 'mu': 0.079, 'c_0': 0.1, 'c_1': 0.030303030303030304, 'c_2': 11170.0}
cautionparams = list(params.values())[-4:-1]
theta = [0.1,0.07,8000.] # cautionparams
boxBounds = [(0.05,0.4),(0.05,0.15),(1000.,200000.)]
# set up optimization function with cost and sensitivity (Jacobian)
objSC3EI3R = SquareLoss(theta=theta, ode=SC3EI3R_model, x0=x0_SC3EI3R, t0=t[0], t=t[1::], y=y_jhu[test_country][1::,:],
state_weight=[1.,1.,10.],state_name=['C_f','R','D'],
target_param=['c_0','c_1','c_2'],target_state=['I_1'])
# perform optimization
res = minimize(fun=objSC3EI3R.costIV,
jac=objSC3EI3R.sensitivityIV,
x0=theta+[I0],
bounds=boxBounds+[(0.0000001,0.0001)],
#method='BFGS',
method='SLSQP',
#options={'disp':True,'maxiter':1000,'eps':0.01,'gtol':0.01})
#options={'disp':True})
options={'disp':True,'maxiter':1000,'eps':0.001,'ftol':0.001})
print(res)
```
```python
# model with fitted parameters
print(params)
print(x0_SC3EI3R)
params_fit = params.copy()
#params_fit['c_0'] = res.x[0]
#params_fit['c_1'] = res.x[1]
SC3EI3R_model.params = params_fit
print(SC3EI3R_model.params)
#ode_fit = common_models.SEI3R({'beta':res.x[0], 'gamma':res.x[1],'alpha':res.x[2]})
#x0_fit = [1-1.27e-6, 1.27e-6, 0]
x0_fit = x0_SC3EI3R.copy()
#x0_fit[2] = res.x[2]
#t_fit = numpy.linspace(0, 150, 1000)
t_fit = t
SC3EI3R_model.initial_values = (x0_fit, t_fit[0])
# %timeit sol_fit =SCEI3R_model.integrate(t_fit[1::]) # use magic %timeit to time
# sol_fit =SCEI3R_model.integrate(t_fit[0::])
sol_fit = scipy.integrate.odeint(SC3EI3R_model.ode, x0_fit, t_fit[1::])
#
plt.figure(figsize=(15,10))
plt.plot(t,ynoise[:,0], 'o',color='lightgreen') # infected observations
plt.plot(t,ynoise[:,1], 'o',color='green') # infected observations
plt.plot(t,ynoise[:,2], 'o',color='darkgreen') # infected observations
plt.plot(t,ynoise[:,3], 'bo') # recoverd
plt.plot(t,ynoise[:,4], 'ro') # died
plt.plot(t_fit[1::], sol_fit)
plt.ylim([0,0.004])
#plt.show(())
#ode_fit.plot()
peak_i = np.argmax(sol_fit[:,2])
print('Peak infection (days)', t_fit[peak_i])
```
## Testing fitting
### Generate test data based on SCEI3R simulation
```python
# Add noise
y = solution[:,2:7].copy()
#print('len(y)',len(y),'t',len(t),t[0],t[1],'...',t[-1])
np.random.seed(seed=6)
noise = np.random.normal(0,1.e-2,[len(t),5])
# ynoise = y *(1+noise)
ynoise = y *(1.0 + noise)
ynoise[ynoise<0] = 0
plt.figure(figsize=(15,10))
plt.plot(t,ynoise[:,0], 'go', label='I_1')
plt.plot(t,ynoise[:,1], 'go', label='I_2')
plt.plot(t,ynoise[:,2], 'go', label='I_3')
plt.plot(t,ynoise[:,3], 'bo', label='R')
plt.plot(t,ynoise[:,4], 'ro', label='D')
plt.plot(t,y[:,0], 'g', label='I_1')
plt.plot(t,y[:,1], 'g', label='I_2')
plt.plot(t,y[:,2], 'g', label='I_3')
plt.plot(t,y[:,3], 'b', label='R')
plt.plot(t,y[:,4], 'r', label='D')
plt.legend()
plt.ylim(0,0.003)
plt.show()
```
```python
# model with generating parameters
print(params)
params_fit = params.copy()
print(params_fit['c_0'],params_fit['c_1'])
SCEI3R_model.params = params_fit
x0_fit = x0_SCEI3R.copy()
print(x0_fit)
#t_fit = numpy.linspace(0, 150, 1000)
t_fit = t
SCEI3R_model.initial_values = (x0_fit, t_fit[0])
# %timeit sol_fit =SCEI3R_model.integrate(t_fit[1::]) # use magic %timeit to time
sol_fit = scipy.integrate.odeint(SCEI3R_model.ode, x0_fit, t_fit[1::])
# print(len(sol_fit[0]))
#
plt.figure(figsize=(15,10))
plt.plot(t,ynoise[:,0], 'go',label='I_1') # infected observations
plt.plot(t,ynoise[:,1], 'go',label='I_2') # infected observations
plt.plot(t,ynoise[:,2], 'go',label='I_3') # infected observations
plt.plot(t,ynoise[:,3], 'bo',label='R') # recoverd
plt.plot(t,ynoise[:,4], 'ro',label='D') # died
plt.gca().set_prop_cycle(color=['grey','orange','green','green','green','blue','red', 'black'])
plt.plot(t_fit[1::], sol_fit)
plt.ylim([0,0.004])
plt.legend()
#plt.show(())
#ode_fit.plot()
peak_i = np.argmax(sol_fit[:,2])
print('Peak infection (days)', t_fit[peak_i])
```
```python
params # use list(...) to convert to list
```
### Fit parameters to randomized simulation data based on square_loss
#### Fit c_0 and c_1 only
```python
# Initial guess of parameters, and bounding constraints
cautionparams = list(params.values())[-4:-2]
theta = [value for value in cautionparams]
theta = [0.21,0.08]
boxBounds = [(0.2,0.4),(0.05,0.15)]
objSCEI3R = SquareLoss(theta=theta, ode=SCEI3R_model, x0=x0_SCEI3R, t0=t[0], t=t[1::], y=ynoise[1::,:],
state_weight=[1.,1.,1.,1.0,1.0],state_name=['I_1','I_2','I_3','R','D'],
target_param=['c_0','c_1'])
# perform optimization
res = minimize(fun=objSCEI3R.cost,
jac=objSCEI3R.sensitivity,
x0=theta,
#bounds=boxBounds,
method='BFGS',
options={'disp':True,'maxiter':1000,'eps':0.00001})# ,'ftol':0.01}) #not BFGS
print(res)
```
#### Fit c_0 and c_1 as well as initial value of E
##### Fit c_0 and c_1 as well as initial value of E with 'SLSQP'
does not work well
note use of special methods IV for initial value fitting of target_state
```python
# Initial guess of parameters and initial condition, and bounding constraints
cautionparams = list(params.values())[-4:-2]
theta = [0.25,0.08]
boxBounds = [(0.2,0.4),(0.05,0.15)]
objSCEI3R = SquareLoss(theta=theta, ode=SCEI3R_model, x0=x0_SCEI3R, t0=t[0], t=t[1::], y=ynoise[1::,:],
state_weight=[1.,1.,1.,1.,1.],state_name=['I_1','I_2','I_3','R','D'],
target_param=['c_0','c_1'],target_state=['E'])
# perform optimization
res = minimize(fun=objSCEI3R.costIV,
jac=objSCEI3R.sensitivityIV,
x0=theta+[0.00005],
#bounds=boxBounds+[(0.0000001,0.001)],
method='SLSQP',
options={'disp':True,'maxiter':1000,'eps':0.01,'ftol':0.01})
print(res)
```
##### Fit c_0 and c_1 as well as initial value of E with BFGS
works well: no constraints and gtol not ftol
```python
# Initial guess of parameters and initial condition, and bounding constraints
cautionparams = list(params.values())[-4:-2]
theta = [0.25,0.08]
boxBounds = [(0.2,0.4),(0.05,0.15)]
objSCEI3R = SquareLoss(theta=theta, ode=SCEI3R_model, x0=x0_SCEI3R, t0=t[0], t=t[1::], y=ynoise[1::,:],
state_weight=[1.,1.,1.,1.,1.],state_name=['I_1','I_2','I_3','R','D'],
target_param=['c_0','c_1'],target_state=['E'])
# perform optimization
res = minimize(fun=objSCEI3R.costIV,
jac=objSCEI3R.sensitivityIV,
x0=theta+[0.00005],
#bounds=boxBounds+[(0.0000001,0.001)],
method='BFGS',
options={'disp':True,'maxiter':1000,'eps':0.01,'gtol':0.01})
print(res)
```
```python
# model with fitted parameters
print(params)
print(x0_SCEI3R)
params_fit = params.copy()
#params_fit['c_0'] = res.x[0]
#params_fit['c_1'] = res.x[1]
SCEI3R_model.params = params_fit
print(SCEI3R_model.params)
#ode_fit = common_models.SEI3R({'beta':res.x[0], 'gamma':res.x[1],'alpha':res.x[2]})
#x0_fit = [1-1.27e-6, 1.27e-6, 0]
x0_fit = x0.copy()
#x0_fit[2] = res.x[2]
#t_fit = numpy.linspace(0, 150, 1000)
t_fit = t
SCEI3R_model.initial_values = (x0_fit, t_fit[0])
# %timeit sol_fit =SCEI3R_model.integrate(t_fit[1::]) # use magic %timeit to time
# sol_fit =SCEI3R_model.integrate(t_fit[0::])
sol_fit = scipy.integrate.odeint(SCEI3R_model.ode, x0_fit, t_fit[1::])
#
plt.figure(figsize=(15,10))
plt.plot(t,ynoise[:,0], 'go') # infected observations
plt.plot(t,ynoise[:,1], 'go') # infected observations
plt.plot(t,ynoise[:,2], 'go') # infected observations
plt.plot(t,ynoise[:,3], 'bo') # recoverd
plt.plot(t,ynoise[:,4], 'ro') # died
plt.plot(t_fit[1::], sol_fit)
plt.ylim([0,0.004])
#plt.show(())
#ode_fit.plot()
peak_i = np.argmax(sol_fit[:,2])
print('Peak infection (days)', t_fit[peak_i])
```
##### Fit c_0 and c_1 as well as initial value of E using L-BFGS-B
this method does not work well
```python
# Initial guess of parameters and initial condition, and bounding constraints
cautionparams = list(params.values())[-4:-2]
theta = [0.25,0.08]
boxBounds = [(0.2,0.4),(0.05,0.15)]
objSCEI3R = SquareLoss(theta=theta, ode=SCEI3R_model, x0=x0_SCEI3R, t0=t[0], t=t[1::], y=ynoise[1::,:],
state_weight=[1.,1.,1.,1.,1.],state_name=['I_1','I_2','I_3','R','D'],
target_param=['c_0','c_1'],target_state=['E'])
# perform optimization
res = minimize(fun=objSCEI3R.costIV,
jac=objSCEI3R.sensitivityIV,
x0=theta+[0.00005],
bounds=boxBounds+[(0.0000001,0.001)],
method='L-BFGS-B',
options={'disp':True,'maxiter':1000,'eps':0.0001,'ftol':0.001})
print(res)
```
```python
objSCEI3R.residual()
```
##### Fit c_0 and c_1 as well as initial value of E with Nelder-Mead
no use of Jacobian and no constraints
```python
# Initial guess of parameters and initial condition, and bounding constraints
cautionparams = list(params.values())[-4:-2]
theta = [0.25,0.08]
boxBounds = [(0.2,0.4),(0.05,0.15)]
objSCEI3R = SquareLoss(theta=theta, ode=SCEI3R_model, x0=x0_SCEI3R, t0=t[0], t=t[1::], y=ynoise[1::,:],
state_weight=[1.,1.,1.,1.,1.],state_name=['I_1','I_2','I_3','R','D'],
target_param=['c_0','c_1'],target_state=['E'])
# perform optimization
res = minimize(fun=objSCEI3R.costIV,
#jac=objSCEI3R.sensitivityIV,
x0=theta+[0.00005],
#bounds=boxBounds+[(0.0000001,0.001)],
method='Nelder-Mead',
options={'disp':True,'maxiter':1000}) #,'eps':0.0001,'ftol':0.01}) #not NM
print(res)
```
```python
# model with fitted parameters
print(params)
print(x0_SCEI3R)
params_fit = params.copy()
#params_fit['c_0'] = res.x[0]
#params_fit['c_1'] = res.x[1]
SCEI3R_model.params = params_fit
print(SCEI3R_model.params)
#ode_fit = common_models.SEI3R({'beta':res.x[0], 'gamma':res.x[1],'alpha':res.x[2]})
#x0_fit = [1-1.27e-6, 1.27e-6, 0]
x0_fit = x0_SCEI3R.copy()
#x0_fit[2] = res.x[2]
#t_fit = numpy.linspace(0, 150, 1000)
t_fit = t
SCEI3R_model.initial_values = (x0_fit, t_fit[0])
# %timeit sol_fit =SCEI3R_model.integrate(t_fit[1::]) # use magic %timeit to time
# sol_fit =SCEI3R_model.integrate(t_fit[0::])
sol_fit = scipy.integrate.odeint(SCEI3R_model.ode, x0_fit, t_fit[1::])
#
plt.figure(figsize=(15,10))
plt.plot(t,ynoise[:,0], 'go') # infected observations
plt.plot(t,ynoise[:,1], 'go') # infected observations
plt.plot(t,ynoise[:,2], 'go') # infected observations
plt.plot(t,ynoise[:,3], 'bo') # recoverd
plt.plot(t,ynoise[:,4], 'ro') # died
plt.plot(t_fit[1::], sol_fit)
plt.ylim([0,0.004])
#plt.show(())
#ode_fit.plot()
peak_i = np.argmax(sol_fit[:,2])
print('Peak infection (days)', t_fit[peak_i])
```
## Fit SC2IR parameters to jhu data based on square_loss
```python
params=SC2IR_model.parameters
print(params)
```
```python
# Initial guess of parameters and initial condition, and bounding constraints
theta = [0.4,0.11,0.007,0.33,0.228,275.]
boxBounds = [(0.2,0.5),(0.05,0.15),(0.005,0.015),(0.25,0.55),(0.15,0.4),(5.,2000.)]
# setup cost function and Jacobian with target parameters and initial states
objSC2IR = SquareLoss(theta=theta, ode=SC2IR_model, x0=x0, t0=t[0], t=t[1::], y=y_jhu[test_country][1::,1:3],
state_weight=[0.2,1.],state_name=['R','D'],
target_param=['beta','gamma','mu','c_0','c_1','c_2'],
target_state=['I'])
# perform optimization
res = minimize(fun=objSC2IR.costIV,
jac=objSC2IR.sensitivityIV,
x0=theta+[0.000000001],
bounds=boxBounds+[(0.0000000001,0.000001)],
# method='L-BFGS-B',
# method='Nelder-Mead',
#options={'disp':True,'maxiter':1000,'eps':0.01,'gtol':0.01})
options={'disp':True,'maxiter':1000,'eps':0.000001,'ftol':0.000000001})
print(res)
```
```python
# model with fitted parameters
startparams = SC2IR_model.parameters.copy() # save starting parameters (not fit)
print(params)
print(x0)
params_fit = params.copy()
params_fit['beta'] = res.x[0]
params_fit['gamma'] = res.x[1]
params_fit['mu'] = res.x[2]
params_fit['c_0'] = res.x[3]
params_fit['c_1'] = res.x[4]
params_fit['c_2'] = res.x[5]
SC2IR_model.params = params_fit
print(SC2IR_model.params)
x0_fit = x0.copy()
x0_fit[1] = res.x[6]
t_fit = t
SC2IR_model.initial_values = (x0_fit, t_fit[0])
sol_fit = scipy.integrate.odeint(SC2IR_model.ode, x0_fit, t_fit[1::])
#
plt.figure(figsize=(15,10))
plt.semilogy()
plt.ylim([0.000001,1])
plt.plot(t,y_jhu[test_country][:,1], 'bo',label='R') # recovered
plt.semilogy()
plt.ylim([0.000001,1])
plt.plot(t,y_jhu[test_country][:,2], 'ro',label='D') # died
plt.semilogy()
plt.ylim([0.000001,1])
plt.gca().set_prop_cycle(color=['grey','green','blue','red', 'black'])
plt.plot(t_fit[1::], sol_fit)
plt.ylim([0.000001,1])
plt.legend(('R','D','S','I','R','D','S_c','I_c'))
plt.semilogy()
#plt.show(())
#ode_fit.plot()
peak_i = np.argmax(sol_fit[:,1])
print('Peak infection (days)', t_fit[peak_i])
SC2IR_model.parameters = startparams
```
## Fit SC3EI3R parameters to jhu data based on square_loss
### Fit c_0 and c_1 only
```python
# Initial guess of parameters, and bounding constraints
cautionparams = list(params.values())[-4:-1]
theta = [value for value in cautionparams]
print(theta)
theta = [0.3,0.08,2500.]
boxBounds = [(0.2,0.8),(0.05,0.15),(100.,10000.)]
objSC3EI3R = SquareLoss(theta=theta, ode=SC3EI3R_model, x0=x0_SC3EI3R, t0=t[0], t=t[1::], y=y_jhu[test_country][1::,1:3],
state_weight=[1.,1.],state_name=['R','D'],
target_param=['c_0','c_1','c_2'])
# perform optimization
res = minimize(fun=objSC3EI3R.cost,
#jac=objSC3EI3R.sensitivity,
x0=theta,
#bounds=boxBounds,
method='L-BFGS-B',
# method='Nelder-Mead',
options={'disp':True,'maxiter':1000,'eps':0.00001})# ,'ftol':0.01}) #not BFGS
print(res)
```
### Fit c_0 and c_1 as well as initial value of E
#### Fit c_0 and c_1 as well as initial value of E with 'SLSQP'
does not work well
note use of special methods IV for initial value fitting of target_state
```python
# Initial guess of parameters and initial condition, and bounding constraints
cautionparams = list(params.values())[-4:-3]
theta = [value for value in cautionparams]
theta = [0.21,0.08,2500.]
objSC3EI3R = SquareLoss(theta=theta, ode=SC3EI3R_model, x0=x0_SC3EI3R, t0=t[0], t=t[1::], y=y_jhu[test_country][1::,1:3],
state_weight=[1.,1.],state_name=['R','D'],
target_param=['c_0','c_1','c_2'],target_state=['I_1'])
# perform optimization
res = minimize(fun=objSC3EI3R.costIV,
jac=objSC3EI3R.sensitivityIV,
x0=theta+[0.00005],
bounds=boxBounds+[(0.0000001,0.001)],
# method='BFGS',
method='L-BFGS-B',
options={'disp':True,'maxiter':1000,'eps':0.01,'gtol':0.01})
print(res)
```
#### Fit c_0 and c_1 as well as initial value of E with BFGS
works well: no constraints and gtol not ftol
```python
# Initial guess of parameters and initial condition, and bounding constraints
cautionparams = list(params.values())[-4:-2]
theta = [0.25,0.08]
boxBounds = [(0.2,0.4),(0.05,0.15)]
objSCEI3R = SquareLoss(theta=theta, ode=SCEI3R_model, x0=x0_SC3EI3R, t0=t[0], t=t[1::], y=ynoise[1::,:],
state_weight=[1.,1.,1.,1.,1.],state_name=['I_1','I_2','I_3','R','D'],
target_param=['c_0','c_1'],target_state=['E'])
# perform optimization
res = minimize(fun=objSCEI3R.costIV,
jac=objSCEI3R.sensitivityIV,
x0=theta+[0.00005],
#bounds=boxBounds+[(0.0000001,0.001)],
method='BFGS',
options={'disp':True,'maxiter':1000,'eps':0.01,'gtol':0.01})
print(res)
```
```python
# model with fitted parameters
print(params)
print(x0_SC3EI3R)
params_fit = params.copy()
#params_fit['c_0'] = res.x[0]
#params_fit['c_1'] = res.x[1]
SC3EI3R_model.params = params_fit
print(SC3EI3R_model.params)
#ode_fit = common_models.SEI3R({'beta':res.x[0], 'gamma':res.x[1],'alpha':res.x[2]})
#x0_fit = [1-1.27e-6, 1.27e-6, 0]
x0_fit = x0_SC3EI3R.copy()
#x0_fit[2] = res.x[2]
#t_fit = numpy.linspace(0, 150, 1000)
t_fit = t
SC3EI3R_model.initial_values = (x0_fit, t_fit[0])
# %timeit sol_fit =SCEI3R_model.integrate(t_fit[1::]) # use magic %timeit to time
# sol_fit =SCEI3R_model.integrate(t_fit[0::])
sol_fit = scipy.integrate.odeint(SC3EI3R_model.ode, x0_fit, t_fit[1::])
#
plt.figure(figsize=(15,10))
plt.plot(t,ynoise[:,0], 'o',color='lightgreen') # infected observations
plt.plot(t,ynoise[:,1], 'o',color='green') # infected observations
plt.plot(t,ynoise[:,2], 'o',color='darkgreen') # infected observations
plt.plot(t,ynoise[:,3], 'bo') # recoverd
plt.plot(t,ynoise[:,4], 'ro') # died
plt.plot(t_fit[1::], sol_fit)
plt.ylim([0,0.004])
#plt.show(())
#ode_fit.plot()
peak_i = np.argmax(sol_fit[:,2])
print('Peak infection (days)', t_fit[peak_i])
```
#### Fit c_0 and c_1 as well as initial value of E using L-BFGS-B
this method does not work well
```python
# Initial guess of parameters and initial condition, and bounding constraints
cautionparams = list(params.values())[-4:-2]
theta = [0.25,0.08]
boxBounds = [(0.2,0.4),(0.05,0.15)]
objSCEI3R = SquareLoss(theta=theta, ode=SCEI3R_model, x0=x0_SC3EI3R, t0=t[0], t=t[1::], y=ynoise[1::,:],
state_weight=[1.,1.,1.,1.,1.],state_name=['I_1','I_2','I_3','R','D'],
target_param=['c_0','c_1'],target_state=['E'])
# perform optimization
res = minimize(fun=objSCEI3R.costIV,
jac=objSCEI3R.sensitivityIV,
x0=theta+[0.00005],
bounds=boxBounds+[(0.0000001,0.001)],
method='L-BFGS-B',
options={'disp':True,'maxiter':1000,'eps':0.0001,'ftol':0.001})
print(res)
```
```python
objSCEI3R.residual()
```
#### Fit c_0 and c_1 as well as initial value of E with Nelder-Mead
no use of Jacobian and no constraints
```python
# Initial guess of parameters and initial condition, and bounding constraints
cautionparams = list(params.values())[-4:-2]
theta = [0.25,0.08]
boxBounds = [(0.2,0.4),(0.05,0.15)]
objSCEI3R = SquareLoss(theta=theta, ode=SCEI3R_model, x0=x0_SC3EI3R, t0=t[0], t=t[1::], y=ynoise[1::,:],
state_weight=[1.,1.,1.,1.,1.],state_name=['I_1','I_2','I_3','R','D'],
target_param=['c_0','c_1'],target_state=['E'])
# perform optimization
res = minimize(fun=objSCEI3R.costIV,
#jac=objSCEI3R.sensitivityIV,
x0=theta+[0.00005],
#bounds=boxBounds+[(0.0000001,0.001)],
method='Nelder-Mead',
options={'disp':True,'maxiter':1000}) #,'eps':0.0001,'ftol':0.01}) #not NM
print(res)
```
```python
# model with fitted parameters
print(params)
print(x0_SC3EI3R)
params_fit = params.copy()
params_fit['c_0'] = res.x[0]
params_fit['c_1'] = res.x[1]
SC3EI3R_model.params = params_fit
print(SC3EI3R_model.params)
x0_fit = x0_SC3EI3R.copy()
#x0_fit[2] = res.x[2]
#t_fit = numpy.linspace(0, 150, 1000)
t_fit = t
SC3EI3R_model.initial_values = (x0_fit, t_fit[0])
# %timeit sol_fit =SC3EI3R_model.integrate(t_fit[1::]) # use magic %timeit to time
# sol_fit =SC3EI3R_model.integrate(t_fit[0::])
sol_fit = scipy.integrate.odeint(SC3EI3R_model.ode, x0_fit, t_fit[1::])
#
plt.figure(figsize=(15,10))
plt.plot(t,y_jhu[:,0], 'bo') # recoverd
plt.plot(t,y_jhu[:,1], 'ro') # died
plt.plot(t_fit[1::], sol_fit)
plt.ylim([0,0.004])
#plt.show(())
#ode_fit.plot()
peak_i = np.argmax(sol_fit[:,2])
print('Peak infection (days)', t_fit[peak_i])
```
### Information on method options
```python
scipy.optimize.show_options(solver='minimize', method='SLSQP', disp=True)
print(' ')
scipy.optimize.show_options(solver='minimize', method='L-BFGS-B', disp=True)
```
## Plot using full control
```python
def plotmodel(solns,t,scale='linear',species='no_susc',plottitle= '',label='',
newplot = True,models=['SEI3R','SCEI3R','SC3EI3R']):
"""
plot solns over
times t interpreted as models indicated in models parameter
scale: alternative 'linear' or 'log'
species alternatives 'all', 'confirmed', 'deaths', 'daily confirmed', 'daily deaths'
plottitle : title for plot
label : label for curve when called as part of multicurve plot
newplot : whether to open new plot True/False
models : list of models to include, default all three of those possible
"""
nmodels = len(models)
if len(solns) != len(models):
print("Error: number of models must match number of solutions")
return None
nm = 0
if newplot == True:
plt.figure(figsize=(nmodels*8,6))
for nm in range(nmodels):
soln = solns[nm]
if models[nm] == 'SEI3R': #SEI3R
plt.subplot(1,nmodels,nm+1)
if scale == 'log': #Plot on log scale
plt.semilogy()
plt.ylim([1,10000])
elif species != 'daily confirmed': # Plot on normal linear scale
#plt.ylim([0,10000])
pass
if species == 'no_susc':
plt.plot(tvec,soln[:,1:5],label=label)
plt.legend(("E","I1","I2","I3"))
elif species == 'confirmed' or species == 'daily confirmed':
suma = np.sum(soln[:,2:7],axis=1)
# print('length=',len(suma))
if species == 'daily confirmed':
sumd = np.zeros(len(suma))
for i in range(1,len(suma)):
sumd[i] = suma[i]-suma[i-1]
#plt.ylim([0,1000])
plt.plot(tvec,sumd,label=label)
else:
#plt.ylim([0,200000])
plt.plot(t,suma,label=label)
elif species == 'all':
plt.plot(tvec,soln,label=label)
plt.legend(("S","E","I1","I2","I3","R","D"))
plt.xlabel("Time (days)")
plt.ylabel("Portion of population N")
plt.title('SEI3R %s' % plottitle)
elif models[nm] == 'SCEI3R': #SCEI3R
#Plot
plt.subplot(1,nmodels,nm+1)
if scale == 'log': #Plot on log scale
plt.semilogy()
plt.ylim([1,10000])
elif species != 'daily confirmed': # Plot on normal linear scale
#plt.ylim([0,10000])
pass
if species == 'no_susc':
plt.plot(t,soln[:,1:5],label=label)
plt.legend(("E","I1","I2","I3"))
elif species == 'confirmed' or species == 'daily confirmed':
suma = np.sum(soln[:,2:7],axis=1)
# print('length=',len(suma))
if species == 'daily confirmed':
sumd = np.zeros(len(suma))
for i in range(1,len(suma)):
sumd[i] = suma[i]-suma[i-1]
#plt.ylim([0,1000])
plt.plot(t,sumd,label=label)
else:
#plt.ylim([0,200000])
plt.plot(t,suma,label=label)
elif species == 'all':
plt.plot(t,soln,label=label)
plt.legend(("S","E","I1","I2","I3","R","D","Sc"))
plt.xlabel("Time (days)")
plt.ylabel("Portion of population N")
plt.title('SCEI3R %s' % plottitle)
elif models[nm] == 'SC3EI3R': #SC3EI3R
plt.subplot(1,nmodels,nm+1)
if scale == 'log': #Plot on log scale
plt.semilogy()
plt.ylim([1,10000])
elif species != 'daily confirmed': # Plot on normal linear scale
#plt.ylim([0,10000])
pass
if species == 'no_susc':
plt.plot(t,sol[:,1:5])
plt.legend(("E","I1","I2","I3"))
elif species == 'confirmed' or species == 'daily confirmed':
suma = np.sum(soln[:,2:7],axis=1) + soln[:,9]
if species == 'daily confirmed':
sumd = np.zeros(len(suma))
for i in range(1,len(suma)):
sumd[i] = suma[i]-suma[i-1]
# plt.ylim([0,1000])
plt.plot(t,sumd,label=label)
else:
# plt.ylim([0,200000])
plt.plot(t,suma,label=label)
elif species == 'all':
plt.plot(t,soln,label=label)
plt.legend(("S","E","I1","I2","I3","R","D","Sc","Ec","I1c"))
plt.xlabel("Time (days)")
plt.ylabel("Portion of population N")
plt.title('SC3EI3R %s' % plottitle)
return True
```
```python
plotmodel([sol_fit],t_fit[1:],scale='linear',species='no_susc',plottitle= 'test',label='',
newplot = True,models=['SCEI3R'])
```
|
import .love03_forward_proofs_demo
/- # LoVe Exercise 4: Functional Programming -/
set_option pp.beta true
set_option pp.generalized_field_notation false
namespace LoVe
/- ## Question 1: Reverse of a List
We define a new accumulator-based version of `reverse`. The first argument,
`as`, serves as the accumulator. This definition is __tail-recursive__, meaning
that compilers and interpreters can easily optimize the recursion away,
resulting in more efficient code. -/
def accurev {α : Type} : list α → list α → list α
| as [] := as
| as (x :: xs) := accurev (x :: as) xs
/- 1.1. Our intention is that `accurev [] xs` should be equal to `reverse xs`.
But if we start an induction, we quickly see that the induction hypothesis is
not strong enough. Start by proving the following generalization (using the
`induction'` tactic or pattern matching): -/
lemma accurev_eq_reverse_append {α : Type} :
∀as xs : list α, accurev as xs = reverse xs ++ as
| as [] := by refl
| as (x :: xs) := by simp [reverse, accurev, accurev_eq_reverse_append _ xs]
/- 1.2. Derive the desired equation. -/
lemma accurev_eq_reverse {α : Type} (xs : list α) :
accurev [] xs = reverse xs :=
by simp [accurev_eq_reverse_append]
/- 1.3. Prove the following property.
Hint: A one-line inductionless proof is possible. -/
lemma accurev_accurev {α : Type} (xs : list α) :
accurev [] (accurev [] xs) = xs :=
by simp [accurev_eq_reverse, reverse_reverse]
/- 1.4. Prove the following lemma by structural induction, as a "paper" proof.
This is a good exercise to develop a deeper understanding of how structural
induction works (and is good practice for the final exam).
lemma accurev_eq_reverse_append {α : Type} :
∀as xs : list α, accurev as xs = reverse xs ++ as
Guidelines for paper proofs:
We expect detailed, rigorous, mathematical proofs. You are welcome to use
standard mathematical notation or Lean structured commands (e.g., `assume`,
`have`, `show`, `calc`). You can also use tactical proofs (e.g., `intro`,
`apply`), but then please indicate some of the intermediate goals, so that we
can follow the chain of reasoning.
Major proof steps, including applications of induction and invocation of the
induction hypothesis, must be stated explicitly. For each case of a proof by
induction, you must list the inductive hypotheses assumed (if any) and the goal
to be proved. Minor proof steps corresponding to `refl`, `simp`, or `cc` need
not be justified if you think they are obvious (to humans), but you should say
which key lemmas they depend on. You should be explicit whenever you use a
function definition or an introduction rule for an inductive predicate. -/
/- We perform the proof by structural induction on `xs`, generalizing `as`.
Case `[]`: The goal is `accurev as [] = reverse [] ++ as`. The left-hand side
is `as` by definition of `accurev`. The right-hand side is `as` by definition
of `reverse` and `++`.
Case `x :: xs`: The goal is `accurev as (x :: xs) = reverse (x :: xs) ++ as`.
The induction hypothesis is `∀as, accurev as xs = reverse xs ++ as`.
Let us simplify the goal's left-hand side:
accurev as (x :: xs)
= accurev (x :: as) xs -- by definition of `accurev`
= reverse xs ++ (x :: as) -- by the induction hypothesis
Now let us massage the right-hand side so that it matches the simplified
left-hand side:
reverse (x :: xs) ++ as
= (reverse xs ++ [x]) ++ as -- by definition of `reverse`
= reverse xs ++ ([x] ++ as) -- by associativity of `++`
= reverse xs ++ (x :: as) -- by definition of `++`
The two sides are equal. QED -/
/- ## Question 2: Drop and Take
The `drop` function removes the first `n` elements from the front of a list. -/
def drop {α : Type} : ℕ → list α → list α
| 0 xs := xs
| (_ + 1) [] := []
| (m + 1) (x :: xs) := drop m xs
/- Its relative `take` returns a list consisting of the the first `n` elements
at the front of a list.
2.1. Define `take`.
To avoid unpleasant surprises in the proofs, we recommend that you follow the
same recursion pattern as for `drop` above. -/
def take {α : Type} : ℕ → list α → list α
| 0 _ := []
| (_ + 1) [] := []
| (m + 1) (x :: xs) := x :: take m xs
#eval take 0 [3, 7, 11] -- expected: []
#eval take 1 [3, 7, 11] -- expected: [3]
#eval take 2 [3, 7, 11] -- expected: [3, 7]
#eval take 3 [3, 7, 11] -- expected: [3, 7, 11]
#eval take 4 [3, 7, 11] -- expected: [3, 7, 11]
#eval take 2 ["a", "b", "c"] -- expected: ["a", "b"]
/- 2.2. Prove the following lemmas, using `induction'` or pattern matching.
Notice that they are registered as simplification rules thanks to the `@[simp]`
attribute. -/
@[simp] lemma drop_nil {α : Type} :
∀n : ℕ, drop n ([] : list α) = []
| 0 := by refl
| (_ + 1) := by refl
@[simp] lemma take_nil {α : Type} :
∀n : ℕ, take n ([] : list α) = []
| 0 := by refl
| (_ + 1) := by refl
/- 2.3. Follow the recursion pattern of `drop` and `take` to prove the
following lemmas. In other words, for each lemma, there should be three cases,
and the third case will need to invoke the induction hypothesis.
The first case is shown for `drop_drop`. Beware of the fact that there are three
variables in the `drop_drop` lemma (but only two arguments to `drop`).
Hint: The `refl` tactic might be useful in the third case of `drop_drop`. -/
lemma drop_drop {α : Type} :
∀(m n : ℕ) (xs : list α), drop n (drop m xs) = drop (n + m) xs
| 0 n xs := by refl
| (_ + 1) _ [] := by simp [drop]
| (m + 1) n (x :: xs) :=
begin
simp [drop, drop_drop m n xs],
refl
end
lemma take_take {α : Type} :
∀(m : ℕ) (xs : list α), take m (take m xs) = take m xs
| 0 _ := by refl
| (_ + 1) [] := by refl
| (m + 1) (x :: xs) := by simp [take, take_take m xs]
lemma take_drop {α : Type} :
∀(n : ℕ) (xs : list α), take n xs ++ drop n xs = xs
| 0 _ := by refl
| (_ + 1) [] := by refl
| (m + 1) (x :: xs) := by simp [take, drop, take_drop m]
/- ## Question 3: A Type of λ-Terms
3.1. Define an inductive type corresponding to the untyped λ-terms, as given
by the following context-free grammar:
term ::= 'var' string -- variable (e.g., `x`)
| 'lam' string term -- λ-expression (e.g., `λx, t`)
| 'app' term term -- application (e.g., `t u`) -/
inductive term : Type
| var : string → term
| lam : string → term → term
| app : term → term → term
/- 3.2. Register a textual representation of the type `term` as an instance of
the `has_repr` type class. Make sure to supply enough parentheses to guarantee
that the output is unambiguous. -/
def term.repr : term → string
| (term.var s) := s
| (term.lam s t) := "(λ" ++ s ++ ", " ++ term.repr t ++ ")"
| (term.app t u) := "(" ++ term.repr t ++ " " ++ term.repr u ++ ")"
@[instance] def term.has_repr : has_repr term :=
{ repr := term.repr }
/- 3.3. Test your textual representation: -/
#eval (term.lam "x" (term.app (term.app (term.var "y") (term.var "x"))
(term.var "x")))
-- should print something like `(λx, ((y x) x))`
end LoVe
|
\documentclass[a4paper,12pt]{article}
\usepackage{xcolor}
\usepackage[colorlinks = true,
linkcolor = blue,
urlcolor = blue,
citecolor = blue,
anchorcolor = blue]{hyperref}
\newcommand{\lnk}[3][blue]{\href{#2}{\color{#1}{#3}}}%
\newcommand{\sectionbreak}{\clearpage}
\begin{document}
% Title of the project
\title{Steem Times White Paper}
% Authors
\author{revisesociology,\\ gabriellecd,\\ victorcovrig,\\ warpedpoetic,\\ superoo7,\\ karmachela,\\ livinguktaiwan}
% Created date / last updated date
\date{June 8, 2018}
\maketitle
\sectionbreak
% 1 Rational
\section{Rational}
The steem blockchain is an innovation that stands out in the innovation that is the blockchain technology. Unlike other blockchain related platforms, the steem blockchain is the only one of its kind that offers the average user an opportunity to step through the rabbit hole that is blockchain technology. It enables the noob, the mom, the grandpa, the dude on the street who knows next to nothing about cryptocurrency, blockchain technology or programming, etc, to have access to the innovation, head down the rabbit hole to make money out of it and to build communities out of it. This something that all other active blockchain related projects lack.
\\\\
The steem blockchain flagship project, steemit.com, is a social media unlike any social media out there. It offers users, not only the opportunity to actively market their skills and ideas, but also to interact with a wider base of dedicated and invested users, who are mutually interested in growing and bringing much more energy, beauty, and artistry to the platform and to the lives of the users themselves. It is a community driven project that seeks to bridge the gap between content creators and content consumers, project creators and investors, cultures, art and sciences and between people.
\\\\
To achieve this, a lot has to be done, but for those who have been on the platform for a long time, it is obvious that there are lapses in the process of bridging these gaps. It is obvious that due to the financial benefits inherent in the platform’s core process of interaction, abuse cannot be avoided, and as human beings, it is a sad commentary on our evolution that we know how to take, destroy, kill but do not know how to give, how to maintain, how to manage, how to grow things.
\\\\
A lot has been said about problems on steemit: abuse of the reward pool, the dismal state of the trending page, the increasing number of inactive accounts, the inactive users with huge SP, the delegation to bidbots and other issues that the platform face but this is not the time or place for a debate. This is the place where we put our foot forward in bridging the gap between content creators and content consumers, between cultures, languages and ideas, between project coordinators and investors. For, we can no longer wait for the whales to come to our aid, if we are going to keep the platform alive. It is our duty to keep this beautiful dream alive and to make it bud, grow and flower into something we will all cherish.
\\\\
In other to further this goal, we have gathered together a team of individuals from different cultures, different areas of specialization, to form a newspaper for the steem blockchain.
\\\\
We feel and believe that the steem blockchain as a whole is underreported and as a result many of the users do not know or do not have access to the issues on the platform, the consequences of their actions, the beauty to be found on the platform as well as the activities of those elected to stand for us as witnesses.
\\\\
This will be the first steem newspaper and it intends to focus on the steem blockchain in such a way that the gaps can be bridged and the idea of community that propels the platform is achieved.
\\\\
% 2 members
\section{Members of SteemTimes}
\begin{center}
\begin{tabular} {|p{7em}|p{4em}|p{7em}|p{7.5em}|p{7.5em}|}
\hline
Steemit Name & Country & Skills & Role & Additional Info \\
\hline
@revisesociology & UK & Procrastination/ native English speaker & Updates on ‘what’s occurring on the ‘steem ecosystem’ & Best beard on the blockchain \\
\hline
@gabriellecd & Venezuela & & & \\
\hline
@victorcovrig & Romania & & & \\
\hline
@warpedpoetic & Nigeria & & & \\
\hline
@superoo7 & Australia / Malaysia & Software Development & Creating the whitepaper github repository & High enthusiasm undergraduate student \\
\hline
@karmachela & Indonesia & & & \\
\hline
@lvinguktaiwan & Taiwan & & & \\
\hline
\end{tabular}
\end{center}
\section {Official account}
SteemTimes only have one official account on Steem blockchain which is \textbf{\lnk{https://steemit.com/@steem-times}{@steem-times}}.
\sectionbreak
\section {Aims and Objectives}
The @steem-times, has the following aims and objectives;
\begin{enumerate}
\item To be the first port of call for all significant developments within the steem ecosystem: from hard forks, DApp developments, whale movements and witness chats, to economic analysis and community projects.
\item To analyse issues presented as well as discussions that are ongoing on witness chats, discord shows as well as posts by those who have a direct access to the flow of information.
\item To showcase projects that are ongoing on the steem blockchain through interviews with project developers, testers who may be engaged in lots of ‘behind the scenes’ work, but not actually be that active on steemit’s front end. With this in mind, we hope to partner with utopian.io as well as project developers all over the steem blockchain in other to gain access to first hand information on the progress of projects.
\item To identify quality posts and present them for our readers and subscribers to read and follow. This is to create an avenue for content consumers to connect with content creators who have quality content on their blogs.
\item To identify newbies who show promise and encouraging them along with interviews, guest contributor role for a certain time yet to be ascertained as well as curation.
\item To connect with curators from @OCD, @Curie, @sndbox-alpha, etc as well as mentors, editors on the different discord servers, in other to get out a firsthand info on the growth of newbies as well as other issues like curating quality posts, the difficulties in the teaching of newbies as well as curation.
\end{enumerate}
\sectionbreak
\section {Posting and Categories}
There will be 3 weekly issues of the @steem-times, published Monday, Weds and Friday, and each will have three sections:
\begin{enumerate}
\item (30\% of content, 600 words) Steem Blockchain news which will appear in every issue - this will contain a brief overview of some of the most recent developments on the steem blockchain, such as releases by @steemitblog on major technical developments, new SMT and DApp launches and progress, Witness and Whale movements and comments, and details of new curation and projects launched on steemit itself.
\item (20\% of content, 400 words (133 per sub-section)) - Links, with brief summaries, to some of the best posts produced in the the last week under the following categories of steemit. (Might subject to change)
\begin{enumerate}
\item Monday: (gabriellecd \& livinguktaiwan)
\begin{enumerate}
\item Art and Culture
\item Writing and Litreature
\item Sports
\end{enumerate}
\item Wednesday (revisesociology \& warpedpoetic)
\begin{enumerate}
\item Philosophy and Religion
\item World Politics
\item Science and Technology
\end{enumerate}
\item Friday (superoo7 \& karmachel)
\begin{enumerate}
\item Gossip and Lifestyle
\item Travel
\item Gaming
\end{enumerate}
\end{enumerate}
\item (Max 1000 words, 500 words per piece) The third section will be 2 columns, our (the team’s) opinion or views on state of things.
\end{enumerate}
\section {Template}
The template is to be created that a comfortable number of sections are on each post. This is to make it possible to every subscriber to find something to his or her enjoyment on each and every issue we publish.
\\\\
Sample view of the template at \lnk{https://steemit.com/esteem/@johnsonlai/say-no-more-to-microsoft-word-fe73aa1ca8232}{steemit} and \lnk{https://busy.org/esteem/@johnsonlai/say-no-more-to-microsoft-word-fe73aa1ca8232}{busy.org}. \
The markdown version is available at Official Github repo of SteemTimes: \lnk{https://github.com/superoo7/SteemTimes/blame/master/format.md}{format.md}.
\sectionbreak
\section {Rewards and Payment}
We have 100 SBD as a start up budget for @steem-times and it is proposed that we will use it in the following ways:
\begin{enumerate}
\item \textbf{Post using the \#steem-times tag}\\Encourage Steemians to post articles about their own opinion and state of things on Steemit and use the tag \#steem-times. Since this is a newspaper and not a curation project we may not reward posts using \#steem-times in the long run, but we will do so in the initial few months as a way to promote the newspaper and create awareness. The reward will be nominal like SBD0.3 to SBD1. The number of rewarded post each day will vary depending on the volume and quality. We estimate to spend no more than 25\% of our budget in this area.
\item \textbf{Guest columnists}\\If we identify any Steemians from the above program who provides good quality insight and opinions, we will invite them as guest contributors to Section 3 of the @steem-times column. There will likely to be no more than 2 guest contributors each week and they will be paid a fee of SBD3 to SBD5 per article. This will take up approximately 40\% of the budget.
\item \textbf{Weekly review contest}\\Each weekend we will organise a weekly @steem-times review contest and ask Steemians questions about contents from the three issues during the week. This can serve as a reminder to them to read @steem-news if they haven't had time to do so during the week. There will be around 10 questions, and the answers will be submitted via Google form. The weekly prize pool will be around SBD10 split between three Steemian who gets the most correct answer, or a lucky draw if there are more than three. This will take up approximately 35\% of the budget.
\end{enumerate}
\noindent
Members of the project will share 50\% of SBD post payout and the rest will be sold on the internal market for Steem then powered up so as to grow the @steem-times account. As a result the paper will have more stakes on the platform as well as encourage quality content creators.
\sectionbreak
\section{Conclusion}
This is not a perfect rendition of all that this paper seeks to achieve and each and every segment is open for correction, amendment and approval from the project members. Since this is a project that will be on the steemit platform for a long time, room for improvement is definitely encouraged.
\\\\
We believe that this project is something the steem blockchain needs and we believe that we have the wherewithal to propel this project into something that the platform users would enjoy and those who support the project would definitely be glad that they are a part of it.
\end{document}
|
module LoadFromMonai
using PythonCall
function permuteAndReverseFromMonai(pixels)
sizz=size(pixels)
for i in 1:sizz[2]
for j in 1:sizz[3]
pixels[:,i,j] = reverse(pixels[:,i,j])
end#
end#
return pixels
end#permuteAndReverse
"""
given file paths it loads
imagePath - path to main image
labelPath - path to label
transforms - monai.transforms.Compose object -important Load imaged should not be in this list it is added separately
default transforms standardize orientation, voxel dimensions crops unnecessary background reducing array size
ensure type of the images and labels so it will be easily convertible to for example numpy and then Julia
more in https://docs.monai.io/en/stable/transforms.html
"""
function loadByMonaiFromImageAndLabelPaths(
imagePath
,labelPath
,trAnsforms=[])
monai=pyimport("monai")
#default transforms
if(length(trAnsforms)==0)
trAnsforms= [
#monai.transforms.LoadImaged(keys=["image", "label"]),
monai.transforms.EnsureChannelFirstd(keys=["image", "label"]),
monai.transforms.Spacingd(keys=["image", "label"], pixdim=( 1.0, 1.0, 1.0), mode=("bilinear", "nearest")),
monai.transforms.Orientationd(keys=["image", "label"], axcodes="RAS"),
monai.transforms.CropForegroundd(keys=["image", "label"], source_key="image"),
monai.transforms.EnsureTyped(keys=["image", "label"])
]
end
trAnsformsComposed= monai.transforms.Compose(trAnsforms)
dicttt= pydict(Dict([("image", imagePath),( "label", labelPath )]))
loadObj=monai.transforms.LoadImaged(keys=["image", "label"])(dicttt)
metaData= pyconvert(Dict,pyconvert(Dict,loadObj)["image_meta_dict"])
loadObj = trAnsformsComposed(loadObj)
image = permuteAndReverseFromMonai(pyconvert(Array,loadObj["image"].detach().numpy()[0]))
label =permuteAndReverseFromMonai(pyconvert(Array,loadObj["label"].detach().numpy()[0]))
return (image,label,metaData)
end
end
|
\chapter{Dielectric Haloscopes for Detecting Axions and Dark Photons}
This chapter describes how ADMX uses microwave cavities to search for $\mathcal{O}(\SI{1}{\mu eV})$ axions, how the same technique gets harder to apply at higher axion masses, and how dielectric haloscopes can be used for higher mass searches.
This section focuses heavily on axions because that's the context for which Orpheus was developed. Most of the time, the following discussion applies to dark photons. The main difference between detecting either is in the polarization of the detected photons. I will talk about the detection of axions and remark about dark photons when appropriate.
This chapter assumes the reader understands resonant cavities.
\section{Haloscopes to search for wavelike dark matter}\label{sec:haloscope_search}
An axion can be detected by detecting its feeble coupling to the SM photon. Axions mix with photons in a magnetic field (similar to how neutrinos of different flavors mix in free space). If this mixing happens inside a resonant cavity, and if that resonant frequency matches the photon frequency, the axion signal power is resonantly enhanced\cite{PhysRevLett.51.1415}.
The Lagrangian that describes an axion coupling to two photons is
\begin{align}
\mathcal{L}_{a\gamma\gamma} = \frac{1}{4} \gagg a F_{\mu\nu}\tilde{F}_{\mu\nu} = -\gagg a \vb{E}\vdot\vb{B}
\end{align}
From the Lagrangian, the derived axion modified Maxwell's equations are~\cite{2017MADMAXtheory, PhysRevD.99.055010}
\begin{align}
&\div{\vb{E}} = \rho - \gagg \vb{B}\vdot \grad{a},\\
&\div{\vb{B}} = 0,\\
&\curl{\vb{E}} = -\dot{\vb{B}},\\
&\curl{\vb{B}} = \dot{\vb{E}} + \vb{J} - \gagg(\vb{E}\cross \grad{a} - \dot{a}\vb{B}),\\
&\ddot{a} - \laplacian{a} + m_a^2 a = \gagg \vb{E} \vdot \vb{B}.
\end{align}
This can be simplified by noting that $\grad{a}\approx 0$. This approximation works because de~Broglie wavelength of the axion is much larger than the size of the detector. For an axion with $m_a = \SI{80}{\mu eV}$ and $\langle v^2 \rangle^{1/2} = \SI{270}{km/s}$, the de~Broglie wavelength is $\ldb \approx \SI{17}{m}$. Orpheus is about \SI{15}{cm}, so the axion field is approximately constant throughout the detector volume.
Also note that $\gagg \dot{a}\vb{B}$ is on equal footing with $\vb{J}$. So the axion field can act like a current source that excites electromagnetic fields. Treatments of currents exciting cavity modes can be found in Jackson Chapter 8\cite{Jackson} and Pozar 4th ed. Chapter 6\cite{pozar}.
This coupling is feeble, but the resulting signal can be made detectable if the interacting magnetic field is very strong and if the signal is resonantly enhanced by a high-Q cavity\footnote{The cavity allows for the axion signal to build up (think of a photon of having about Q bounces before it dissipates).}. This signal can be detected with low noise electronics as a spectrally narrow power excess over the noise background, as shown in Figure~\ref{fig:haloscope}.
\begin{figure}
\centering
\includegraphics[width=\textwidth]{admx_haloscope.png}
\caption{Haloscope search for axions. An axion, when it sees a strong magnetic field, can convert to a photon. If the photon frequency matches the resonant frequency of the cavity, the axion signal power is resonantly enhanced. A haloscope search for dark photons works the same way, except that a dark photon can convert to an SM photon without the presence of a magnetic field. Figure courtesy of Christian Boutan.}
\label{fig:haloscope}
\end{figure}
I will outline the steps to deriving the equation for the axion signal power inside a cavity. First, derive the wave equation from Maxwell's equations. Assume harmonic time dependence and write $\vb{E}$ as the sum of cavity eigenmodes, i.e., rewrite the wave equation in k-space such that $\vb{E} = \sum_i \vb{E}_i \exp(-i(\omega t - \vb{k}_i\vdot\vb{x}))$. The k-space wave equation should have a damping term that's related to the cavity Q. Solve for the electric field amplitude of the relevant cavity mode and find the time-averaged energy stored $U = \int dV \frac{1}{2} \epsilon |E_0|^2$.
For a signal to be detected, the cavity has to be coupled to an external receiver. From the energy stored in the cavity, the unloaded Q of the cavity, and the coupling of the receiver to the cavity, one can derive the power dissipated in the receiver, i.e., the signal power. Let
\begin{align}
\frac{1}{Q_L} = \frac{1}{Q_0} +\frac{1}{Q_e}
\end{align}
where $Q_L$ is the loaded Q of the cavity, $Q_0$ is the unloaded Q, and $Q_e$ characterizes how much energy gets dissipated in the external load. The cavity coupling is $\beta \equiv \frac{Q_0}{Q_e}$. The power drawn into the external load is $P_{a}=\omega \frac{U}{Q_e}$. Putting all this together, the resulting signal power is (in natural units)
\begin{align}
& P_a = \frac{\gagg^2}{m_a} m_a \rho_a B_0^2 V_{eff} Q_L \betaterm L(f, f_0, Q_L) \\
& L(f, f_0, Q_L) = \frac{1}{1+4\Delta^2} \hspace{1cm} \Delta \equiv Q_L \frac{f-f_0}{f_0}\\
& V_{eff} = \frac{\left |\int dV \vb{B}_0\vdot \vb{E}_a \right |^2}{B_0^2 \int dV \epsilon_r |E_a|^2}
\end{align}
The derivation was merely verbally outlined, and the math is left as an exercise to the reader. The reader is encouraged to read other sources like~\cite{brubaker2018results} for a more thorough treatment.
Axion searches look for a power excess over a thermal noise floor $P_n$. $P_n$ is the average of thousands of power spectrum measurements and consequently follows the Central Limit Theorem, so $\sigma_{P_n} = \frac{P_n}{\sqrt{N}}$, where N is the number of averaged spectra. The number of spectra is $N = b \Delta t$, where $b$ is the frequency bin width and $\Delta t$ is the total digitization time. The SNR of a haloscope signal is
\begin{align}
\snr &= \frac{Ps}{\sigma_{P_n}} = \frac{Ps}{P_n}\sqrt{N} = \frac{Ps}{P_n}\sqrt{b\Delta t }
\end{align}
Keeping in mind that the cavity bandwidth is $\Delta f = \frac{f_0}{Q_L}$, the instantaneous scan rate is
\begin{align}
\dv{f}{t} = \frac{\Delta f}{\Delta t} = \frac{f_0 Q_L}{b}\left (\frac{\gagg^2 \rho_a B_0^2 \veff \beta}{\snr m_a T_n(\beta+1)}\right )^2
\end{align}
ADMX has successfully applied this technique to search for DFSZ axions with masses between \SI{2.7}{\mu eV} (\SI{650}{MHz}) and \SI{3.3}{\mu eV} (\SI{800}{MHz}) and continues to push the search to higher masses~\cite{PhysRevLett.120.151301, PhysRevLett.124.101303, admxcollaboration2021search}. For Run 1B, ADMX employed a \SI{136}{L} cylindrical cavity. The cavity was immersed in a \SI{7.6}{T} magnetic field and operated at the TM$_{010}$ mode such that the axion's electric field maximally aligned with the external magnetic field (the axion's electric field looks like that of a parallel plate capacitor). For Run 1B, $Q_L = 30000$ and $\dfdt = \SI{543}{MHz/yr}$.
\section{Haloscopes for Dark Photons}
The dark photon story is similar but with some key differences.
The dark photon interacts with the SM photon through kinetic mixing. The Lagrangian for this interaction is shown in Equation~\ref{eqn:dp_mixing_lagrangian}.
The consequence is that the dark photon and SM photon will oscillate into each other. One can derive the electromagnetic field produced by the dark photon to be~\cite{caputo2021dark}
\begin{align}
|\vb{E_0}| = \left | \frac{\chi m_{X}}{\epsilon} \vb{X}_0 \right |
\end{align}
where $\vb{X}$ is the dark photon field. One should note that the SM photon polarization is determined by the dark photon polarization rather than some external magnetic field. This polarization can be misaligned with the probing mode of the cavity.
The resulting dark photon signal power in a cavity is~\cite{Ghosh:2021ard} (in natural units)
\begin{align}
& P_{S} = \eta \chi^2 m_{\ap} \rho_{\ap} V_{eff} Q_L \betaterm L(f, f_0, Q_L) \\
& V_{eff} = \frac{\left (\int dV \vec{E}(\vec{x}) \vdot \vec{X}(\vec{x})\right )^2}{\int dV \epsilon_r |\vec{E}(\vec{x})|^2|\vec{X}(\vec{x})|^2}
\end{align}
Let $\theta$ be the angle between the cavity field and the dark photon field, such that $\vec{X}\vdot\vec{E} = |\vec{X}||\vec{E}|\cos\theta$. The $\veff$ can be rewritten as
\begin{align}
V_{eff} = \frac{\left (\int dV |\vec{E}(\vec{x})| | \vec{X}(\vec{x})|\right )^2 }{\int dV \epsilon_r |\vec{E}(\vec{x})|^2|\vec{X}(\vec{x})|^2}\langle \cos^2\theta \rangle_T
\end{align}
where $\cost$ is the time-averaged $\cos^2\theta$ value. The actual value of $\cost$ depends on the dark photon cosmology and the detector design and orientation. Often I will write $\veff = V_{eff,max} \cost$ because it allows me to rescale my dark photon limits to any cosmology.
\FloatBarrier
\section{Scaling the Haloscope Concept to Higher Frequencies}
ADMX is currently using this haloscope method to look for axions around a few \SI{}{\mu eV} with great success. Unfortunately, this haloscope design becomes increasingly difficult to implement at higher frequencies. Increasing mass corresponds to higher frequency photons. Operating at the TM$_{010}$ mode would require smaller-diameter cavities, and a smaller cavity volume reduces the signal strength. The volume would scale by $V_{eff} \propto f^{-3}$, or $V_{eff} \propto f^{-2}$ if one wanted to keep the same aspect ratio. Furthermore, the decreased volume-to-surface ratio decreases Q, further decreasing the signal. Q also reduces as a function of frequency because of the anomalous skin effect, so $Q_u \propto f^{-2/3}$. The smaller $Q_u$ also makes it more difficult for the receiver to couple critically. The quantum noise limit also increases linearly with $f$. So putting all these effects together, for a single closed cavity operating at the lowest order mode with a quantum noise limited amplifier, the axion signal power scales optimistically as $P_s \propto f^{-2.66}$ and the scan rate scales optimistically as $\dv{f}{t} \propto f^{-7.66}$. This unfavorable frequency scaling motivates the design of more sophisticated resonators\footnote{Someone at a workshop called axions above \SI{10}{GHz} axions from hell. Orpheus came out of hell but lost someone he loved in the process.}.
\begin{figure}[h]
\centering
\subfloat[]{\input{small_resonator.tikz}}\hfil
\subfloat[]{\input{multimode_resonator.tikz}}
\caption{Traditional haloscope signal power scale poorly with frequency. (a) Small cavity operating at the fundamental mode, e.g., TM$_{001}$ mode. The volume of such a cavity scales as $\sim f^{-3}$ for the same aspect ratio. (b) Operating a large cavity at higher frequencies requires operating at higher order modes. The effective volume $\veff \propto \int dV \vb{E}_a \vdot \vb{B}_o \approx 0$. }
\end{figure}
One can think about keeping a large volume and operating at a mode higher than the TM$_{010}$. But then portions of $\va{E}_a$ are anti-aligned with $\va{B}_o$, and $\qty|\int dV {\va{B}_o\vdot \va{E}_a }| \approx 0$. The effective volume approaches zero even though the physical volume is large. Thus, there is little benefit to operating an empty cylindrical cavity at a higher-order mode.
One can also combine many cylindrical cavities. This is the plan for future ADMX runs. However, once the frequency approaches \SI{15}{GHz}, the wavelength is about \SI{1}{cm^3}. To have $\veff = \SI{1}{L}$, one would need to coherently power combine about 2000 cylindrical cavities. It would be difficult to instrument a \SI{1}{cm^3} cavity, and it would be unfeasibly complex to instrument 2000 cavities in a coordinated way.
\section{Multimode Dielectric Haloscopes and Orpheus Conceptual Design}
High-order modes can couple to the axion when dielectrics are placed inside of the resonator. Dielectrics suppress electric fields. If dielectrics can be placed where the electric field is anti-aligned with the magnetic field, then the overlap between the axion's electric field and the external magnetic field is greater than zero (see Figure~\ref{fig:dielectric_haloscope}). Thus the effective volume can become arbitrarily large, and the axion signal power is greater than what it would have been for a cylindrical cavity operating at the TM$_{010}$ mode. Overall, dielectric resonators can be operated at higher-order modes while maintaining coupling to the axion and can be made arbitrarily large, making them suited for higher-frequency searches.
Orpheus will implement this dielectric haloscope concept to search for axions around \SI{70}{\mu eV}. Orpheus\footnote{Orpheus was originally designed to have a spatially alternating magnetic field rather than a periodic dielectric structure\cite{PhysRevD.91.011701}. However, this alternating magnetic field design is difficult to scale to many Tesla.} is a dielectrically loaded Fabry-Perot open cavity placed inside of a dipole magnet. Dielectrics are placed every fourth\footnote{I could have also designed Orpheus so that the dielectrics were at every other half-wavelength. Intuitively, this would have resulted in a much greater $\veff$. But that didn't happen for various practical reasons that will be addressed in the next few sections. To summarize, it's because it would have been harder to design a cryo-compatible mechanical structure with the dielectric plates that close together, and I was having a hard time simulating the mode of interest\footnote{The axion-coupling mode or the dark photon coupling mode, depending on the context.} for the entire tuning range. I managed to make Orpheus work when the dielectrics were spaced every fourth of a half-wavelength and kept forging ahead with what worked. Orpheus could have worked if I placed dielectrics at every other half-wavelength, but it would have been harder to implement, and the tuning range would have likely been more limited.} of a half-wavelength to suppress the electric field where it is anti-aligned with the dipole field. Orpheus is designed to search for axions around \SI{16}{GHz} with over \SI{1}{GHz} of tuning range. The cavity tunes by changing its length, and the dielectrics are automatically adjusted to maintain even spacing throughout the cavity.
There are several benefits to the open resonator design. Less metallic walls lead to less ohmic losses and a higher Q. Less metallic walls also mean fewer resonating modes. This leads to a sparse spectrum and fewer mode crossings, making it easier to maintain the mode of interest.
However, this experiment has many challenges. First, the optics must be designed to maintain good axion coupling for over \SI{1}{GHz} of tuning range. This includes choosing the right radius of curvature for the Fabry-Perot mirrors and appropriate dielectric thicknesses. Another challenge is that a dipole magnet is required to have $B_{0}$ aligned with the TEM electric fields. Dipoles are about ten times more expensive than solenoids. In addition, the mechanical design for such a cavity is complicated because there are many moving parts that have to work in a cryogenic environment. Finally, both diffraction and dielectric losses will decrease the resonator Q.
\begin{figure}[h]
\centering
\input{dielectric_resonator.tikz}
\caption{A multimode dielectrically-loaded cavity. Dielectrics are placed where the electric field is anti-aligned with the external magnetic field. Dielectrics suppress electric fields, so $V_{eff} \propto \int dV \vec{E} \vdot \vec{B} > 0$.}
\label{fig:dielectric_haloscope}
\end{figure}
\section{Other related experiments}
Dielectric haloscopes are a growing field. Without going into detail, there is MADMAX that uses a very similar periodic dielectric structure~\cite{Brun2019}. Baryakhtar, et.\ al., has developed a similar concept for the infrared and optical range~\cite{PhysRevD.98.035006}. Quiskamp, et.\ al., has also developed a cylindrical cavity with a periodic dielectric structure~\cite{PhysRevApplied.14.044051}. This design has the advantage of working in a solenoid rather than a dipole magnet. RADES is not a dielectric haloscope but is a multimode cavity designed to work in a dipole magnet~\cite{melcon2021results}.
|
[STATEMENT]
lemma cross_rotoinversion_matrix: "rotoinversion_matrix A \<Longrightarrow> (A *v x) \<times> (A *v y) = - A *v (x \<times> y)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. rotoinversion_matrix A \<Longrightarrow> (A *v x) \<times> (A *v y) = - A *v x \<times> y
[PROOF STEP]
by (simp add: rotoinversion_matrix_def cross_orthogonal_matrix scaleR_matrix_vector_assoc)
|
If the degree of a polynomial $p$ is less than or equal to $n$, and the degree of a polynomial $q$ is less than or equal to $n$, then the degree of $p - q$ is less than or equal to $n$.
|
(* Title: HOL/Auth/n_g2kAbsAfter_lemma_inv__7_on_rules.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_g2kAbsAfter Protocol Case Study*}
theory n_g2kAbsAfter_lemma_inv__7_on_rules imports n_g2kAbsAfter_lemma_on_inv__7
begin
section{*All lemmas on causal relation between inv__7*}
lemma lemma_inv__7_on_rules:
assumes b1: "r \<in> rules N" and b2: "(f=inv__7 )"
shows "invHoldForRule s f r (invariants N)"
proof -
have c1: "(\<exists> d. d\<le>N\<and>r=n_n_Store_i1 d)\<or>
(\<exists> d. d\<le>N\<and>r=n_n_AStore_i1 d)\<or>
(r=n_n_SendReqS_j1 )\<or>
(r=n_n_SendReqEI_i1 )\<or>
(r=n_n_SendReqES_i1 )\<or>
(r=n_n_RecvReq_i1 )\<or>
(r=n_n_SendInvE_i1 )\<or>
(r=n_n_SendInvS_i1 )\<or>
(r=n_n_SendInvAck_i1 )\<or>
(r=n_n_RecvInvAck_i1 )\<or>
(r=n_n_SendGntS_i1 )\<or>
(r=n_n_SendGntE_i1 )\<or>
(r=n_n_RecvGntS_i1 )\<or>
(r=n_n_RecvGntE_i1 )\<or>
(r=n_n_ASendReqIS_j1 )\<or>
(r=n_n_ASendReqSE_j1 )\<or>
(r=n_n_ASendReqEI_i1 )\<or>
(r=n_n_ASendReqES_i1 )\<or>
(r=n_n_SendReqEE_i1 )\<or>
(r=n_n_ARecvReq_i1 )\<or>
(r=n_n_ASendInvE_i1 )\<or>
(r=n_n_ASendInvS_i1 )\<or>
(r=n_n_ASendInvAck_i1 )\<or>
(r=n_n_ARecvInvAck_i1 )\<or>
(r=n_n_ASendGntS_i1 )\<or>
(r=n_n_ASendGntE_i1 )\<or>
(r=n_n_ARecvGntS_i1 )\<or>
(r=n_n_ARecvGntE_i1 )"
apply (cut_tac b1, auto) done
moreover {
assume d1: "(\<exists> d. d\<le>N\<and>r=n_n_Store_i1 d)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_Store_i1Vsinv__7) done
}
moreover {
assume d1: "(\<exists> d. d\<le>N\<and>r=n_n_AStore_i1 d)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_AStore_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_SendReqS_j1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendReqS_j1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_SendReqEI_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendReqEI_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_SendReqES_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendReqES_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_RecvReq_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_RecvReq_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_SendInvE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendInvE_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_SendInvS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendInvS_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_SendInvAck_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendInvAck_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_RecvInvAck_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_RecvInvAck_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_SendGntS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendGntS_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_SendGntE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendGntE_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_RecvGntS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_RecvGntS_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_RecvGntE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_RecvGntE_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_ASendReqIS_j1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendReqIS_j1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_ASendReqSE_j1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendReqSE_j1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_ASendReqEI_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendReqEI_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_ASendReqES_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendReqES_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_SendReqEE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendReqEE_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_ARecvReq_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ARecvReq_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_ASendInvE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendInvE_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_ASendInvS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendInvS_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_ASendInvAck_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendInvAck_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_ARecvInvAck_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ARecvInvAck_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_ASendGntS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendGntS_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_ASendGntE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendGntE_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_ARecvGntS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ARecvGntS_i1Vsinv__7) done
}
moreover {
assume d1: "(r=n_n_ARecvGntE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ARecvGntE_i1Vsinv__7) done
}
ultimately show "invHoldForRule s f r (invariants N)"
by satx
qed
end
|
using MAT
using Printf
include("../core/layers.jl")
include("../core/blocks.jl")
include("../../configs.jl")
struct _InitLayer conv_bn; end
function _InitLayer(;input_dim=3, output_dim=64, dtype=Array{Float32}, pdrop=0, bias=true)
return _InitLayer(
ConvBn(7, 7, input_dim, output_dim, bias=bias, padding=3, stride=2, f=relu, dtype=dtype, pdrop=pdrop)
)
end
function (il::_InitLayer)(x; train=true)
return pool(il.conv_bn(x, train=train), window=3, stride=2, padding=1)
end
"""
Required ResNet structures are implemented there.
"""
mutable struct ResNet50 layer1; layer2; layer3; layer4; layer5; fc; dtype; classes; end
function ResNet50(;input_dim=3, dtype=Array{Float32}, pdrop=0, bias=false, include_top=true)
layer1 = _InitLayer(input_dim=input_dim, dtype=dtype, pdrop=pdrop)
layer2 = Chain([
Residual_1x3x1(64, [64, 64, 256], dtype=dtype, pdrop=pdrop, downsample=true),
Residual_1x3x1(256, [64, 64, 256], dtype=dtype, pdrop=pdrop),
Residual_1x3x1(256, [64, 64, 256], dtype=dtype, pdrop=pdrop)
])
layer3 = Chain([
Residual_1x3x1(256, [128, 128, 512], dtype=dtype, pdrop=pdrop, downsample=true, ds_3x3_stride=2),
Residual_1x3x1(512, [128, 128, 512], dtype=dtype, pdrop=pdrop),
Residual_1x3x1(512, [128, 128, 512], dtype=dtype, pdrop=pdrop),
Residual_1x3x1(512, [128, 128, 512], dtype=dtype, pdrop=pdrop)
])
layer4 = Chain([
Residual_1x3x1(512, [256, 256, 1024], dtype=dtype, pdrop=pdrop, downsample=true, ds_3x3_stride=2),
Residual_1x3x1(1024, [256, 256, 1024], dtype=dtype, pdrop=pdrop),
Residual_1x3x1(1024, [256, 256, 1024], dtype=dtype, pdrop=pdrop),
Residual_1x3x1(1024, [256, 256, 1024], dtype=dtype, pdrop=pdrop),
Residual_1x3x1(1024, [256, 256, 1024], dtype=dtype, pdrop=pdrop),
Residual_1x3x1(1024, [256, 256, 1024], dtype=dtype, pdrop=pdrop)
])
layer5 = Chain([
Residual_1x3x1(1024, [512, 512, 2048], dtype=dtype, pdrop=pdrop, downsample=true, ds_3x3_stride=2),
Residual_1x3x1(2048, [512, 512, 2048], dtype=dtype, pdrop=pdrop),
Residual_1x3x1(2048, [512, 512, 2048], dtype=dtype, pdrop=pdrop),
])
fc = nothing
if include_top fc = Dense(2048, 1000) end
return ResNet50(layer1, layer2, layer3, layer4, layer5, fc, dtype, nothing)
end
function (rn::ResNet50)(x; train=true, return_intermediate=true)
c1 = rn.layer1(x, train=train)
c2 = rn.layer2(c1, train=train)
c3 = rn.layer3(c2, train=train)
c4 = rn.layer4(c3, train=train)
c5 = rn.layer5(c4, train=train)
if rn.fc === nothing
if return_intermediate return c2, c3, c4, c5, nothing
else return c5
end
else
h, w, c, n = size(c5)
# assuming image is square shaped
p = mat(pool(c5, window=h, mode=2))
res = rn.fc(p, train=train)
if return_intermediate return c2, c3, c4, c5, res
else return res
end
end
end
function evaluate_model(rn::ResNet50, x, top=5)
res = rn(convert(rn.dtype, x), train=false, return_intermediate=false)
z1 = vec(Array(res))
s1 = sortperm(z1,rev=true)
p1 = exp.(logp(z1))
for ind in s1[1:top]
print("$(rn.classes[ind]): $(@sprintf("%.2f",p1[ind]*100))%\n")
end
end
function load_mat_weights(rn::ResNet50, path; pre_weights=nothing)
conv_w=nothing; conv_b=nothing; fc_w=nothing; fc_b=nothing;
bn_mom=nothing; bn_b=nothing; bn_mult=nothing; rn.classes=nothing;
if pre_weights === nothing
conv_w, conv_b, fc_w, fc_b, bn_mom, bn_b, bn_mult, rn.classes = _get_params(path, dtype=rn.dtype)
else
conv_w, conv_b, fc_w, fc_b, bn_mom, bn_b, bn_mult = pre_weights
end
# setting fc layer parameters
if rn.fc !== nothing && fc_w !== nothing
rn.fc.w = convert(rn.dtype, fc_w)
rn.fc.b = convert(rn.dtype, fc_b)
end
idx = 1
mom_idx = 1
# setting layer 1
rn.layer1.conv_bn.conv.w = Param(conv_w[1])
rn.layer1.conv_bn.conv.b = Param(conv_b)
rn.layer1.conv_bn.bn.bn_moments = bnmoments(mean=bn_mom[1], var=bn_mom[2])
rn.layer1.conv_bn.bn.bn_params = Param(vcat(vec(bn_mult[1]), vec(bn_b[1])))
idx += 1; mom_idx += 2;
# setting layer 2
rn.layer2.layers[1] = load_mat_weights(
rn.layer2.layers[1], conv_w[idx:idx+3], bn_mom[mom_idx:mom_idx+7], bn_b[idx:idx+3], bn_mult[idx:idx+3]
)
idx += 4; mom_idx += 8;
for i in 2:size(rn.layer2.layers, 1)
rn.layer2.layers[i] = load_mat_weights(
rn.layer2.layers[i], conv_w[idx:idx+2], bn_mom[mom_idx:mom_idx+5], bn_b[idx:idx+2], bn_mult[idx:idx+2]
)
idx += 3; mom_idx += 6;
end
# setting layer 3
rn.layer3.layers[1] = load_mat_weights(
rn.layer3.layers[1], conv_w[idx:idx+3], bn_mom[mom_idx:mom_idx+7], bn_b[idx:idx+3], bn_mult[idx:idx+3]
)
idx += 4; mom_idx += 8;
for i in 2:size(rn.layer3.layers, 1)
rn.layer3.layers[i] = load_mat_weights(
rn.layer3.layers[i], conv_w[idx:idx+2], bn_mom[mom_idx:mom_idx+5], bn_b[idx:idx+2], bn_mult[idx:idx+2]
)
idx += 3; mom_idx += 6;
end
# setting layer 4
rn.layer4.layers[1] = load_mat_weights(
rn.layer4.layers[1], conv_w[idx:idx+3], bn_mom[mom_idx:mom_idx+7], bn_b[idx:idx+3], bn_mult[idx:idx+3]
)
idx += 4; mom_idx += 8;
for i in 2:size(rn.layer4.layers, 1)
rn.layer4.layers[i] = load_mat_weights(
rn.layer4.layers[i], conv_w[idx:idx+2], bn_mom[mom_idx:mom_idx+5], bn_b[idx:idx+2], bn_mult[idx:idx+2]
)
idx += 3; mom_idx += 6;
end
# setting layer 5
rn.layer5.layers[1] = load_mat_weights(
rn.layer5.layers[1], conv_w[idx:idx+3], bn_mom[mom_idx:mom_idx+7], bn_b[idx:idx+3], bn_mult[idx:idx+3]
)
idx += 4; mom_idx += 8;
for i in 2:size(rn.layer5.layers, 1)
rn.layer5.layers[i] = load_mat_weights(
rn.layer5.layers[i], conv_w[idx:idx+2], bn_mom[mom_idx:mom_idx+5], bn_b[idx:idx+2], bn_mult[idx:idx+2]
)
idx += 3; mom_idx += 6;
end
return rn
end
function _get_params(path; dtype=Array{Float32})
wr50 = matread(path)
params = wr50["params"]
total_params = size(params["value"], 2)
classes_imagenet = wr50["meta"]["classes"]["description"]
avg_img = wr50["meta"]["normalization"]["averageImage"]
conv_w = []; conv_b = [];
fc_w = nothing; fc_b = nothing;
bn_mom = []; bn_b = []; bn_mult = [];
for k in 1:total_params
name = params["name"][k]
value = convert(dtype, params["value"][k])
if startswith(name, "bn") && endswith(name, "moments")
push!(bn_mom, reshape(value[:,1], (1,1,size(value,1),1)))
push!(bn_mom, (reshape(value[:,2], (1,1,size(value,1),1)).^2) .- 1e-5)
elseif startswith(name, "bn") && endswith(name, "bias")
push!(bn_b, reshape(value, (1,1,length(value),1)))
elseif startswith(name, "bn")
push!(bn_mult, reshape(value, (1,1,length(value),1)))
elseif startswith(name, "fc") && endswith(name, "filter")
fc_w = reshape(value,(size(value,3),size(value,4)))'
elseif startswith(name, "fc") && endswith(name, "bias")
fc_b = value
elseif endswith(name, "filter")
push!(conv_w, value)
elseif endswith(name, "bias")
conv_b = reshape(value, (1,1,length(value),1))
end
end
return conv_w, conv_b, fc_w, fc_b, bn_mom, bn_b, bn_mult, classes_imagenet
end
|
\documentclass[a4paper, 11pt]{article}
\usepackage{lipsum} %This package just generates Lorem Ipsum filler text.
\usepackage{fullpage} % changes the margin
\usepackage{mathpazo}
\usepackage{multicol}
\usepackage{graphicx, float}
\usepackage{enumerate}
\usepackage{pythonhighlight}
\usepackage{booktabs}
\usepackage{listings}
\usepackage[T1]{fontenc}
\usepackage[english]{babel}
\usepackage{amsmath,amsfonts,amsthm} % Math packages
\begin{document}
%Header-Make sure you update this information!!!!
\noindent
\large\textbf{Chapter 2} \hfill \textbf{Siyuan Feng (516030910575)} \\
\normalsize {\bf CS 391 Computer Networking} \hfill ACM Class, Zhiyuan College, SJTU\\
Prof.~{\bf Yanmin Zhu} \hfill Due Date: October 18, 2018\\
TA.~{\bf Haobing Liu} \hfill Submit Date: \today
\section*{P7}
\begin{enumerate}[a.]
\item $t_{total} = TT_l + 3RTT_r$
\item $t_{total} = TT_l + 3RTT_r$ (suppose RTT between other DNS servers is also $RTT_r$)
\item Since the cache exists, the IP address can be sent to client directly from the local DNS server. $RTT_1 = RTT_2 = TT_l$
\end{enumerate}
\section*{P8}
Suppose the time of client send message to the server and get response is $RTT_0$.
\begin{enumerate}[a.]
\item $T = TT_l + 3RTT_r + 2RTT_0 + 8*2RTT_0 = 18RTT_0 + TT_l + 3RTT_r$
\item $T = TT_l + 3RTT_r + 2RTT_0 + 2*2RTT_0 = 6RTT_0 + TT_l + 3RTT_r$
\item $T = TT_l + 3RTT_r + 2RTT_0 + 2RTT_0 = 3RTT_0 + TT_l + 3RTT_r$
\end{enumerate}
\section*{P14}
Because that the destination, no matter is a client or another SMTP server, may be offline or unable to receive message. The server must retry to transmit message until the destination receive the message.
\section*{P18}
\begin{enumerate}[a.]
\item Whois database stores the registered users or assignees of an Internet resource, such as a domain name, an IP address block or an autonomous system.
\item I use $Aliyun WHOIS$ to obtain two DNS server $GoogleDNS(8.8.8.8)$ and $OpenDNS(208.67.222.222)$.
\item The screenshot is shown below. \newline \includegraphics[width=\linewidth]{figure}
\item Many popular website has multiply IP address, such as $www.baidu.com$.
\item Ip range of SJTU is $202.120.0.0 - 202.120.63.255$, source from $APNIC$
\item An attacker can use the whois database and $nslookup$ tool to determine the IP address ranges, DNS server addresses, etc. for the target institution.
\item If under an attack a victim can analyze the source address of packets, the victim can then use whois to obtain information about domain from which attack is coming and
possibly inform the administrators of the origin domain.
\end{enumerate}
\section*{P19}
\begin{enumerate}[a.]
\item The following delegation chain is used for www.sjtu.edu.cn
\begin{enumerate}[1)]
\item a.root-servers.net
\item a.dns.cn
\item dns.edu.cn
\item dns.sjtu.edu.cn
\item www.sjtu.edu.cn
\end{enumerate}
\item The following delegation chain is used for www.google.com
\begin{enumerate}[1)]
\item a.gtld-servers.net
\item ns2.google.com
\item www.baidu.com
\end{enumerate}
\end{enumerate}
\section*{P27}
\begin{enumerate}[a.]
\item If the TCP client runs before the server, the connection will be refused by server. Since there is a handshake protocol in TCP connection, which must be sent form a running server.
\item If the TCP client runs before the server, no errors happened but the packets sent from the client will loss. After the server launches up, the connection will fully work.
\item If the client try to connect server but with wrong port number, the same situation, as the server launches after the client, will happen.
\end{enumerate}
\section*{P30}
\begin{itemize}
\item For an application such as remote login (telnet and ssh), a byte-stream oriented protocols very natural since there is no notion of message boundaries in the application. When a user types a character, we simply drop the character into the TCP connection.
\item In other applications, we may be sending a series of messages that have inherent boundaries between them. For example, when one SMTP mail server sends another SMTP mail server several email messages back to back. Since TCP does not have a mechanism to indicate the boundaries, the application must add the indications itself, so that receiving side of the application can distinguish one message from the next. If each message were instead put into a distinct UDP segment, the receiving end would be able to distinguish the various messages without any indications added by the sending side of the application.
\end{itemize}
\end{document}
|
// ---------------------------------------------------------------------
// pion: a Boost C++ framework for building lightweight HTTP interfaces
// ---------------------------------------------------------------------
// Copyright (C) 2007-2012 Cloudmeter, Inc. (http://www.cloudmeter.com)
//
// Distributed under the Boost Software License, Version 1.0.
// See http://www.boost.org/LICENSE_1_0.txt
//
#include <pion/config.hpp>
#include <pion/http/response.hpp>
#include <boost/test/unit_test.hpp>
using namespace pion;
class NewHTTPResponse_F : public http::response {
public:
NewHTTPResponse_F() {
}
~NewHTTPResponse_F() {
}
};
BOOST_FIXTURE_TEST_SUITE(NewHTTPResponse_S, NewHTTPResponse_F)
BOOST_AUTO_TEST_CASE(checkClear) {
prepare_headers_for_send(true, false);
BOOST_CHECK(!get_headers().empty());
clear();
BOOST_CHECK(get_headers().empty());
}
BOOST_AUTO_TEST_CASE(checkStatusCodeAccessors) {
set_status_code(http::types::RESPONSE_CODE_NOT_FOUND);
BOOST_CHECK_EQUAL(get_status_code(), http::types::RESPONSE_CODE_NOT_FOUND);
set_status_code(http::types::RESPONSE_CODE_CREATED);
BOOST_CHECK_EQUAL(get_status_code(), http::types::RESPONSE_CODE_CREATED);
}
BOOST_AUTO_TEST_CASE(checkStatusMessageAccessors) {
set_status_message(http::types::RESPONSE_MESSAGE_NOT_FOUND);
BOOST_CHECK_EQUAL(get_status_message(), http::types::RESPONSE_MESSAGE_NOT_FOUND);
set_status_message(http::types::RESPONSE_MESSAGE_CREATED);
BOOST_CHECK_EQUAL(get_status_message(), http::types::RESPONSE_MESSAGE_CREATED);
}
BOOST_AUTO_TEST_CASE(checkSetLastModified) {
set_last_modified(0);
BOOST_CHECK_EQUAL(get_header(HEADER_LAST_MODIFIED), get_date_string(0));
set_last_modified(100000000);
BOOST_CHECK_EQUAL(get_header(HEADER_LAST_MODIFIED), get_date_string(100000000));
set_last_modified(1000000000);
BOOST_CHECK_EQUAL(get_header(HEADER_LAST_MODIFIED), get_date_string(1000000000));
}
BOOST_AUTO_TEST_SUITE_END()
|
[STATEMENT]
lemma cos_0_iff_canon:
assumes "cos \<phi> = 0" and "-pi < \<phi>" and "\<phi> \<le> pi"
shows "\<phi> = pi/2 \<or> \<phi> = -pi/2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<phi> = pi / 2 \<or> \<phi> = - pi / 2
[PROOF STEP]
by (smt (verit, best) arccos_0 arccos_cos assms cos_minus divide_minus_left)
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj13eqsynthconj2 : forall (lv0 : natural) (lv1 : natural) (lv2 : natural), (@eq natural (plus (plus (mult lv0 lv1) lv1) lv2) (plus lv1 (plus lv2 (mult lv1 lv0)))).
Admitted.
QuickChick conj13eqsynthconj2.
|
[GOAL]
C : Type u
inst✝¹ : Category.{v, u} C
inst✝ : HasStrictInitialObjects C
I : C
hI : IsInitial I
A : C
f g : A ⟶ I
⊢ f = g
[PROOFSTEP]
haveI := hI.isIso_to f
[GOAL]
C : Type u
inst✝¹ : Category.{v, u} C
inst✝ : HasStrictInitialObjects C
I : C
hI : IsInitial I
A : C
f g : A ⟶ I
this : IsIso f
⊢ f = g
[PROOFSTEP]
haveI := hI.isIso_to g
[GOAL]
C : Type u
inst✝¹ : Category.{v, u} C
inst✝ : HasStrictInitialObjects C
I : C
hI : IsInitial I
A : C
f g : A ⟶ I
this✝ : IsIso f
this : IsIso g
⊢ f = g
[PROOFSTEP]
exact eq_of_inv_eq_inv (hI.hom_ext (inv f) (inv g))
[GOAL]
C : Type u
inst✝² : Category.{v, u} C
inst✝¹ : HasStrictInitialObjects C
I X : C
inst✝ : HasBinaryProduct X I
hI : IsInitial I
⊢ X ⨯ I ≅ I
[PROOFSTEP]
have := hI.isIso_to (prod.snd : X ⨯ I ⟶ I)
[GOAL]
C : Type u
inst✝² : Category.{v, u} C
inst✝¹ : HasStrictInitialObjects C
I X : C
inst✝ : HasBinaryProduct X I
hI : IsInitial I
this : IsIso prod.snd
⊢ X ⨯ I ≅ I
[PROOFSTEP]
exact asIso prod.snd
[GOAL]
C : Type u
inst✝² : Category.{v, u} C
inst✝¹ : HasStrictInitialObjects C
I X : C
inst✝ : HasBinaryProduct I X
hI : IsInitial I
⊢ I ⨯ X ≅ I
[PROOFSTEP]
have := hI.isIso_to (prod.fst : I ⨯ X ⟶ I)
[GOAL]
C : Type u
inst✝² : Category.{v, u} C
inst✝¹ : HasStrictInitialObjects C
I X : C
inst✝ : HasBinaryProduct I X
hI : IsInitial I
this : IsIso prod.fst
⊢ I ⨯ X ≅ I
[PROOFSTEP]
exact asIso prod.fst
[GOAL]
C : Type u
inst✝¹ : Category.{v, u} C
inst✝ : HasInitial C
h : ∀ (A : C) (f : A ⟶ ⊥_ C), IsIso f
I A : C
f : A ⟶ I
hI : IsInitial I
this : IsIso (f ≫ IsInitial.to hI (⊥_ C))
⊢ f ≫ IsInitial.to hI (⊥_ C) ≫ inv (f ≫ IsInitial.to hI (⊥_ C)) = 𝟙 A
[PROOFSTEP]
rw [← assoc, IsIso.hom_inv_id]
[GOAL]
C : Type u
inst✝¹ : Category.{v, u} C
inst✝ : HasStrictTerminalObjects C
I : C
hI : IsTerminal I
A : C
f g : I ⟶ A
⊢ f = g
[PROOFSTEP]
haveI := hI.isIso_from f
[GOAL]
C : Type u
inst✝¹ : Category.{v, u} C
inst✝ : HasStrictTerminalObjects C
I : C
hI : IsTerminal I
A : C
f g : I ⟶ A
this : IsIso f
⊢ f = g
[PROOFSTEP]
haveI := hI.isIso_from g
[GOAL]
C : Type u
inst✝¹ : Category.{v, u} C
inst✝ : HasStrictTerminalObjects C
I : C
hI : IsTerminal I
A : C
f g : I ⟶ A
this✝ : IsIso f
this : IsIso g
⊢ f = g
[PROOFSTEP]
exact eq_of_inv_eq_inv (hI.hom_ext (inv f) (inv g))
[GOAL]
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
⊢ IsIso (limit.π F i)
[PROOFSTEP]
classical
refine' ⟨⟨limit.lift _ ⟨_, ⟨_, _⟩⟩, _, _⟩⟩
· exact fun j => dite (j = i) (fun h => eqToHom (by cases h; rfl)) fun h => (H _ h).from _
· intro j k f
split_ifs with h h_1 h_1
· cases h
cases h_1
obtain rfl : f = 𝟙 _ := Subsingleton.elim _ _
simp
· cases h
erw [Category.comp_id]
haveI : IsIso (F.map f) := (H _ h_1).isIso_from _
rw [← IsIso.comp_inv_eq]
apply (H _ h_1).hom_ext
· cases h_1
apply (H _ h).hom_ext
· apply (H _ h).hom_ext
· ext
rw [assoc, limit.lift_π]
dsimp only
split_ifs with h
· cases h
rw [id_comp, eqToHom_refl]
exact comp_id _
· apply (H _ h).hom_ext
· rw [limit.lift_π]
simp
[GOAL]
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
⊢ IsIso (limit.π F i)
[PROOFSTEP]
refine' ⟨⟨limit.lift _ ⟨_, ⟨_, _⟩⟩, _, _⟩⟩
[GOAL]
case refine'_1
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
⊢ (X : J) → ((Functor.const J).obj (F.toPrefunctor.1 i)).obj X ⟶ F.obj X
[PROOFSTEP]
exact fun j => dite (j = i) (fun h => eqToHom (by cases h; rfl)) fun h => (H _ h).from _
[GOAL]
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
j : J
h : j = i
⊢ ((Functor.const J).obj (F.toPrefunctor.1 i)).obj j = F.obj j
[PROOFSTEP]
cases h
[GOAL]
case refl
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
⊢ ((Functor.const J).obj (F.toPrefunctor.1 i)).obj i = F.obj i
[PROOFSTEP]
rfl
[GOAL]
case refine'_2
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
⊢ ∀ ⦃X Y : J⦄ (f : X ⟶ Y),
(((Functor.const J).obj (F.toPrefunctor.1 i)).map f ≫
if h : Y = i then eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj Y = F.obj Y)
else IsTerminal.from (H Y h) (((Functor.const J).obj (F.toPrefunctor.1 i)).obj Y)) =
(if h : X = i then eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj X = F.obj X)
else IsTerminal.from (H X h) (((Functor.const J).obj (F.toPrefunctor.1 i)).obj X)) ≫
F.map f
[PROOFSTEP]
intro j k f
[GOAL]
case refine'_2
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
j k : J
f : j ⟶ k
⊢ (((Functor.const J).obj (F.toPrefunctor.1 i)).map f ≫
if h : k = i then eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj k = F.obj k)
else IsTerminal.from (H k h) (((Functor.const J).obj (F.toPrefunctor.1 i)).obj k)) =
(if h : j = i then eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj j = F.obj j)
else IsTerminal.from (H j h) (((Functor.const J).obj (F.toPrefunctor.1 i)).obj j)) ≫
F.map f
[PROOFSTEP]
split_ifs with h h_1 h_1
[GOAL]
case pos
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
j k : J
f : j ⟶ k
h : k = i
h_1 : j = i
⊢ ((Functor.const J).obj (F.toPrefunctor.1 i)).map f ≫
eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj k = F.obj k) =
eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj j = F.obj j) ≫ F.map f
[PROOFSTEP]
cases h
[GOAL]
case pos.refl
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
j : J
h_1 : j = i
f : j ⟶ i
⊢ ((Functor.const J).obj (F.toPrefunctor.1 i)).map f ≫
eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj i = F.obj i) =
eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj j = F.obj j) ≫ F.map f
[PROOFSTEP]
cases h_1
[GOAL]
case pos.refl.refl
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
f : i ⟶ i
⊢ ((Functor.const J).obj (F.toPrefunctor.1 i)).map f ≫
eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj i = F.obj i) =
eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj i = F.obj i) ≫ F.map f
[PROOFSTEP]
obtain rfl : f = 𝟙 _ := Subsingleton.elim _ _
[GOAL]
case pos.refl.refl
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
⊢ ((Functor.const J).obj (F.toPrefunctor.1 i)).map (𝟙 i) ≫
eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj i = F.obj i) =
eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj i = F.obj i) ≫ F.map (𝟙 i)
[PROOFSTEP]
simp
[GOAL]
case neg
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
j k : J
f : j ⟶ k
h : k = i
h_1 : ¬j = i
⊢ ((Functor.const J).obj (F.toPrefunctor.1 i)).map f ≫
eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj k = F.obj k) =
IsTerminal.from (H j h_1) (((Functor.const J).obj (F.toPrefunctor.1 i)).obj j) ≫ F.map f
[PROOFSTEP]
cases h
[GOAL]
case neg.refl
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
j : J
h_1 : ¬j = i
f : j ⟶ i
⊢ ((Functor.const J).obj (F.toPrefunctor.1 i)).map f ≫
eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj i = F.obj i) =
IsTerminal.from (H j h_1) (((Functor.const J).obj (F.toPrefunctor.1 i)).obj j) ≫ F.map f
[PROOFSTEP]
erw [Category.comp_id]
[GOAL]
case neg.refl
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
j : J
h_1 : ¬j = i
f : j ⟶ i
⊢ ((Functor.const J).obj (F.toPrefunctor.1 i)).map f =
IsTerminal.from (H j h_1) (((Functor.const J).obj (F.toPrefunctor.1 i)).obj j) ≫ F.map f
[PROOFSTEP]
haveI : IsIso (F.map f) := (H _ h_1).isIso_from _
[GOAL]
case neg.refl
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
j : J
h_1 : ¬j = i
f : j ⟶ i
this : IsIso (F.map f)
⊢ ((Functor.const J).obj (F.toPrefunctor.1 i)).map f =
IsTerminal.from (H j h_1) (((Functor.const J).obj (F.toPrefunctor.1 i)).obj j) ≫ F.map f
[PROOFSTEP]
rw [← IsIso.comp_inv_eq]
[GOAL]
case neg.refl
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
j : J
h_1 : ¬j = i
f : j ⟶ i
this : IsIso (F.map f)
⊢ ((Functor.const J).obj (F.toPrefunctor.1 i)).map f ≫ inv (F.map f) =
IsTerminal.from (H j h_1) (((Functor.const J).obj (F.toPrefunctor.1 i)).obj j)
[PROOFSTEP]
apply (H _ h_1).hom_ext
[GOAL]
case pos
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
j k : J
f : j ⟶ k
h : ¬k = i
h_1 : j = i
⊢ ((Functor.const J).obj (F.toPrefunctor.1 i)).map f ≫
IsTerminal.from (H k h) (((Functor.const J).obj (F.toPrefunctor.1 i)).obj k) =
eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj j = F.obj j) ≫ F.map f
[PROOFSTEP]
cases h_1
[GOAL]
case pos.refl
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
k : J
h : ¬k = i
f : i ⟶ k
⊢ ((Functor.const J).obj (F.toPrefunctor.1 i)).map f ≫
IsTerminal.from (H k h) (((Functor.const J).obj (F.toPrefunctor.1 i)).obj k) =
eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj i = F.obj i) ≫ F.map f
[PROOFSTEP]
apply (H _ h).hom_ext
[GOAL]
case neg
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
j k : J
f : j ⟶ k
h : ¬k = i
h_1 : ¬j = i
⊢ ((Functor.const J).obj (F.toPrefunctor.1 i)).map f ≫
IsTerminal.from (H k h) (((Functor.const J).obj (F.toPrefunctor.1 i)).obj k) =
IsTerminal.from (H j h_1) (((Functor.const J).obj (F.toPrefunctor.1 i)).obj j) ≫ F.map f
[PROOFSTEP]
apply (H _ h).hom_ext
[GOAL]
case refine'_3
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
⊢ limit.π F i ≫
limit.lift F
{ pt := F.toPrefunctor.1 i,
π :=
NatTrans.mk fun j =>
if h : j = i then eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj j = F.obj j)
else IsTerminal.from (H j h) (((Functor.const J).obj (F.toPrefunctor.1 i)).obj j) } =
𝟙 (limit F)
[PROOFSTEP]
ext
[GOAL]
case refine'_3.w
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
j✝ : J
⊢ (limit.π F i ≫
limit.lift F
{ pt := F.toPrefunctor.1 i,
π :=
NatTrans.mk fun j =>
if h : j = i then eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj j = F.obj j)
else IsTerminal.from (H j h) (((Functor.const J).obj (F.toPrefunctor.1 i)).obj j) }) ≫
limit.π F j✝ =
𝟙 (limit F) ≫ limit.π F j✝
[PROOFSTEP]
rw [assoc, limit.lift_π]
[GOAL]
case refine'_3.w
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
j✝ : J
⊢ limit.π F i ≫
NatTrans.app
{ pt := F.toPrefunctor.1 i,
π :=
NatTrans.mk fun j =>
if h : j = i then eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj j = F.obj j)
else IsTerminal.from (H j h) (((Functor.const J).obj (F.toPrefunctor.1 i)).obj j) }.π
j✝ =
𝟙 (limit F) ≫ limit.π F j✝
[PROOFSTEP]
dsimp only
[GOAL]
case refine'_3.w
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
j✝ : J
⊢ (limit.π F i ≫
if h : j✝ = i then eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj j✝ = F.obj j✝)
else IsTerminal.from (H j✝ h) (((Functor.const J).obj (F.toPrefunctor.1 i)).obj j✝)) =
𝟙 (limit F) ≫ limit.π F j✝
[PROOFSTEP]
split_ifs with h
[GOAL]
case pos
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
j✝ : J
h : j✝ = i
⊢ limit.π F i ≫ eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj j✝ = F.obj j✝) =
𝟙 (limit F) ≫ limit.π F j✝
[PROOFSTEP]
cases h
[GOAL]
case pos.refl
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
⊢ limit.π F i ≫ eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj i = F.obj i) = 𝟙 (limit F) ≫ limit.π F i
[PROOFSTEP]
rw [id_comp, eqToHom_refl]
[GOAL]
case pos.refl
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
⊢ limit.π F i ≫ 𝟙 (((Functor.const J).obj (F.toPrefunctor.1 i)).obj i) = limit.π F i
[PROOFSTEP]
exact comp_id _
[GOAL]
case neg
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
j✝ : J
h : ¬j✝ = i
⊢ limit.π F i ≫ IsTerminal.from (H j✝ h) (((Functor.const J).obj (F.toPrefunctor.1 i)).obj j✝) =
𝟙 (limit F) ≫ limit.π F j✝
[PROOFSTEP]
apply (H _ h).hom_ext
[GOAL]
case refine'_4
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
⊢ limit.lift F
{ pt := F.toPrefunctor.1 i,
π :=
NatTrans.mk fun j =>
if h : j = i then eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj j = F.obj j)
else IsTerminal.from (H j h) (((Functor.const J).obj (F.toPrefunctor.1 i)).obj j) } ≫
limit.π F i =
𝟙 (F.obj i)
[PROOFSTEP]
rw [limit.lift_π]
[GOAL]
case refine'_4
C : Type u
inst✝⁴ : Category.{v, u} C
inst✝³ : HasStrictTerminalObjects C
I : C
J : Type v
inst✝² : SmallCategory J
F : J ⥤ C
inst✝¹ : HasLimit F
i : J
H : (j : J) → j ≠ i → IsTerminal (F.obj j)
inst✝ : Subsingleton (i ⟶ i)
⊢ NatTrans.app
{ pt := F.toPrefunctor.1 i,
π :=
NatTrans.mk fun j =>
if h : j = i then eqToHom (_ : ((Functor.const J).obj (F.toPrefunctor.1 i)).obj j = F.obj j)
else IsTerminal.from (H j h) (((Functor.const J).obj (F.toPrefunctor.1 i)).obj j) }.π
i =
𝟙 (F.obj i)
[PROOFSTEP]
simp
[GOAL]
C : Type u
inst✝ : Category.{v, u} C
I : C
h : ∀ (A : C) (f : I ⟶ A), IsIso f
I' A : C
f : I' ⟶ A
hI' : IsTerminal I'
this : IsIso (IsTerminal.from hI' I ≫ f)
⊢ (inv (IsTerminal.from hI' I ≫ f) ≫ IsTerminal.from hI' I) ≫ f = 𝟙 A
[PROOFSTEP]
rw [assoc, IsIso.inv_hom_id]
|
% -------------------------------------------------------- %
% BloomFilter
% by: Isai Barajas Cicourel
% -------------------------------------------------------- %
% Document Start
\section{\textbf{Restricted Bloom Filter}}
% -------------------------------------------------------- %
% Particular Caes
\subsection{Particular Case}
\par
In this experiment we implement a bloom filters to reduce expensive disk lookups for non-existent keys.
If the element is not in the bloom filter, then we know for sure we don't need to perform the expensive lookup. On the other hand, if it is in the bloom filter, we perform the lookup.
\par
% -------------------------------------------------------- %
% Solution Information
\subsection{Solution}
\par
The basic bloom filter supports two operations, \textit{contains()} and \textit{add()}.
Contains is used to check whether a given element is in the set or not.
\par
\begin{lstlisting}[frame=single,breaklines=true]
public boolean contains(Object x) {
return filter.get(hash0(x)) && filter.get(hash1(x));
}
\end{lstlisting}
Add simply adds an element to the set. Removal is impossible without introducing false negatives.
\begin{lstlisting}[frame=single,breaklines=true]
public void add(Object x) {
filter.set(hash0(x));
filter.set(hash1(x));
}
\end{lstlisting}
% -------------------------------------------------------- %
% Experiment
\subsection{Experiment Description}
\par
The test creates three values and adds two off them to the filter, then we check if the hash contains them and only fail if the assertion fails if a non-existing value returns true.
\par
% -------------------------------------------------------- %
% Results
\subsection{Observations and Interpretations}
\par
The tests executed as expected and no errors where found.
\begin{lstlisting}[frame=single,breaklines=true]
Testsuite: TinyTM.locking.BloomFilterTest
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.393 sec
test-single:
BUILD SUCCESSFUL (total time: 0 seconds)
\end{lstlisting}
|
There’s a trick question that rabbis and religious school teachers sometimes like to ask in order to stump their congregants and their students. But I bet there are plenty of you who will get the right answer even though it is sort of tricky. Ready?
Yes, some of you got it – the answer is Shabbat.
Of course, Yom Kippur is very important too. And, our little quiz notwithstanding, we all know very well that Yom Kippur --– and especially the Kol Nidre service that inaugurates it --- is when many synagogues will have their biggest attendance.
What is it that draws us here on this night of all nights?
My sense is that what draws us to shul for Yom Kippur is that this is the day each year when, as a Jewish community, we most powerfully confront our sense of mortality.
Indeed, the language of the ritual confession or “vidui” that is recited at the bedside of a person who is near death is similar to the language of the vidui prayer of the minchah (afternoon service) for Erev Yom Kippur, which I personally recited just a few hours ago.
And the white kittel that I’m wearing to lead services is intended to remind us of a burial shroud.
Kol Nidre night may pull us into shul because it reminds us that we have a limited time on this earth. We can never know with absolute certainty what tomorrow will bring.
So it is today/hayom that we must rededicate ourselves to being better partners with God in the healing and repair of our world; and in the healing and repair of ourselves.
In Hebrew the same word “avodah” means both “worship” and “service.” The two are inextricably linked. Our worship – in prayer and song and public reading of scripture – should lead us to service. That is the message of this awesome day.
I’m hoping we will pay particular attention this year to a verse from the Yom Kippur afternoon Torah portion, Leviticus 19:16 to be precise: There we learn -- “lo ta’amod al dam reyekha”/ “do not stand idly by your neighbor’s blood.” (Lev. 19:16).
The Talmud explains that the commandment “do not stand idly by your neighbor’s blood” means that we must not stand and watch a person die when we can do something to save them.
This is a lesson that too few people understood as the Shoah unfolded in the 1930’s and 40’s. This is a lesson that too few people understood as genocide took place in Rwanda in the 1990’s. And nine years ago on Kol Nidre night I was speaking to the congregation in my former congregation in Plattsburgh, NY about not standing idly by while the people of Darfur in western Sudan were being massacred by the central government of their own country.
Growing up, and inculcated with the values I absorbed in my own Jewish upbringing, I accepted the notion that the Shoah was totally unique in world history. I no longer agree with that idea. Genocide is genocide, and it makes no difference that in the particular case of the Jewish people during the Nazi era it was the work of white Europeans….
I feel I need to mention this because a congregant who I respect very much challenged me a couple of weeks ago when I first mentioned from the bima that I was opposed to President Obama’s plan to bomb Syria. One of the offhand comments I had made at the time was that this was a Syrian civil war, and that Assad was not attacking or threatening to attack the United States (or Israel for that matter).
But then he said to me: Well, if the Syrians who were being gassed were Jews would that be different? My instinctive, even reflexive, response to him at that moment was that, yes, that would be different because we, as Jews, have a special responsibility to other Jews. So, in that sense, it would be as if we ourselves were, in fact, being attacked so that this would no longer be just a Syrian internal civil conflict. He then responded that our responsibility really should be to all of humanity.
Okay, so let me clarify the reflexive answer I gave to him two weeks ago. And that congregant – He told me I could identify him – was none other than our Temple president Tom Griggs --- who I do indeed respect very much.
So I want to state, “for the record” that yes, I do believe that “lo ta’amod al dam reyekha” / “do not stand idly by the blood of your neighbor” DOES apply to all of our neighbors on this planet of ours, and that includes the innocent dead of Syria.
“[…] Congressional delegations, special envoys and humanitarian agencies send back or bring back horror-filled reports from the scene. A million human beings, young and old, have been uprooted, deported. Scores of women are being raped every day, children are dying of disease hunger and violence.
“How can a citizen of a free country not pay attention? How can anyone, anywhere not feel outraged? How can a person, whether religious or secular, not be moved by compassion? And above all, how can anyone who remembers remain silent?
“As a Jew who does not compare any event to the Holocaust, I feel concerned and challenged by the Sudanese tragedy. We must be involved. How can we reproach the indifference of non-Jews to Jewish suffering if we remain indifferent to another people's plight?
“It happened in Cambodia, then in former Yugoslavia, and in Rwanda, now in Sudan. Asia, Europe, Africa: Three continents have become prisons, killing fields and cemeteries for countless innocent, defenseless populations. Will the plague be allowed to spread?
"Lo taamod al dam réakha" is a Biblical commandment. "Thou shall not stand idly by the shedding of the blood of thy fellow man." The word is not "akhikha," thy Jewish brother, but "réakha," thy fellow human being, be he or she Jewish or not. All are entitled to live with dignity and hope. All are entitled to live without fear and pain.
“Not to assist Sudan's victims today would for me be unworthy of what I have learned from my teachers, my ancestors and my friends, namely that God alone is alone: His creatures must not be.
“What pains and hurts me most now is the simultaneity of events. While we sit here and discuss how to behave morally, both individually and collectively, over there, in Darfur and elsewhere in Sudan, human beings kill and die.
“Should the Sudanese victims feel abandoned and neglected, it would be our fault - and perhaps our guilt.
“That's why we must intervene.
The bloodshed has been going on in Syria for two years already --- or decades – if you want to think of the larger picture of the violent repressive nature of the State led now by Bashar Assad and formerly by his father Hafez Assad.
But even if we limit ourselves to the current civil conflict there, we are faced with the fact that atrocities are being committed on both sides, and great moral ambiguity exists regarding the pros and cons of any particular solutions to the conflict. What we do know is that over 100,000 Syrians have died in the last two years of fighting, and that MILLIONS of Syrians have fled the country as refugees.
And the question remains: What is to be done?
But on this Yom Kippur 5774, I’m sensing other ways in which today feels like Yom Ke-Purim/ A Day Like Purim. In Megillat Esther there are dizzying twists and turns of plot---- so too this High Holiday season. We’ve gone from the brink of attacking Syria to a new attempt at diplomacy in ways we would not have anticipated less than a week ago. And, depending on your interpretation of recent events, President Obama and Secretary of State Kerry have been as inept as King Achashverosh or as astute as Mordechai in their handling of the situation.
But that’s the subject of another sermon….
In any event, all the Purimesque plot twists of the past couple of weeks have been particularly challenging for rabbis like me who have been trying to write our High Holiday sermons.
My first reaction when President Obama started pushing the idea of attacking Syria was impatience with him for getting tripped up in a “red line” of his own making. But then again, I too find myself tripped up by my own self-imposed “red line” of wanting to do a current-events oriented talk from the bima tonight.
In truth, I often feel that way when trying to make connections between Jewish teachings, liturgy and scripture and political issues of the day. From my perspective, Judaism is multi-voiced and the Jewish people are politically diverse. And my general sense is that any of you can read the New York Times or listen to NPR just as well as I can. And this week in particular, I’ve been dealing with data overload – TMI – too much information --- as I’ve been seizing onto every new blog post, news report and op-ed essay to try to help me understand what’s going on in Syria and what should be done about it. It has gotten to the point where I have so many news articles and listserve posts from colleagues weighing in on Syria that all these resources have just blurred together and have pretty much become useless to me. So, I’ll just have to try to give you an impressionistic account of my thinking on the subject and see if I can try to couch it in some Jewish teaching appropriate for the holiday.
Viscerally, I’m reminded of the events of 9-11 and their aftermath. We marked the anniversary of that awful day just yesterday. Back then I found myself glued to media reports of the ongoing rush of new developments in the same way that I’ve been doing so in the last couple of weeks about the possibility of the US attacking Syria.
The attacks on New York City and Washington, DC and the plane crash in Shanksville, Pennsylvania that was headed to Washington --- took place 12 years ago, but we still, as a nation, carry within ourselves the trauma of that day. If we are reluctant to confront Assad militarily over his use of chemical weapons, the reluctance comes in part from our knowing that Assad’s opponents include significant numbers of extremists allied to the terrorist group that targeted us on 9-11.
And our reluctance comes from remembering the costly and destabilizing legacy of the Iraq War that President George W. Bush instigated in large part by using the events of 9-11 as a phony pretext.
And our reluctance comes from remembering how that war was sold to us as a limited action and so we don’t trust the current administration when it says it too wants authorization only for a limited action.
And we’re confused about what this supposedly limited action is supposed to entail --- more than a pinprick but less than a regime change --- with no real sense of what that actually would entail.
And how can it not turn out to be a slippery slope to being fully mired in a civil war that is not our own?
And our reluctance comes from knowing that there are so many “nation building” needs right here at home that compete for our attention and our dollars.
What is there to say? My first instinct has been the same as that of many others, both Republicans and Democrats: that President Obama’s plan to attack Syria has been more about proving our toughness than about achieving cogent goals. Assad is a brutal dictator, but a US attack on his forces would be an act of aggression against a nation that has neither attacked us nor threatened to attack us.
All for what? To defend a “rule of warfare” not to use chemical weapons. Yes, the President made a poignant case on national television and radio Wednesday night about the horrors of poison gas. But to my mind he didn’t manage to make a sensible case for how this is qualitatively worse than the horrors of conventional warfare that has killed over 100 times as many Syrians. And to my mind he didn’t make a convincing case as to how adding American attacks to the mix would increase the likelihood of peace or even how it would decrease the likelihood of Assad using chemical weapons if in fact we’re NOT looking to oust Assad altogether and we’re not attempting to destroy the chemical weapons themselves – for we know that any such attempt would simply release them into the air and cause the very harms we’re trying to prevent.
I’m very thankful that there is now a reasonable possibility that the current impasse over chemical weapons and red lines will be resolved without sending American bombs into the midst of the Syrian Civil War. The Russians have given us a diplomatic way out. I’m sure that President Putin’s motives include a general desire to increase Russian influence in the World in general and the Middle East in particular. But, however Machiavellian his motives may be, my gut sense is that --- yes – he, with his Foreign Minister Sergey Lavrov are very much serving the cause of peace.
And I give a lot of credit to both President Obama and Secretary of State Kerry for switching course to give the diplomatic effort the best possible shot at success.
Of course, a number of pundits have suggested that this course change suits President Obama just fine. The resolution of support he was seeking in Congress was poised to fail, and there is that suspicion all around that he was backed into a corner by his own red lines.
Whatever the motivations, and whether all this was planned or serendipitous -- Let us hope and pray for the success of these diplomatic efforts.
And further, let us hope and pray for the welfare of all of the innocent civilians caught in the crossfire.
Meanwhile, there are various organizations working to assist Syrian refugees. Many of us just a few months ago attended a fundraising dinner organized by the Islamic Center of the Twin Ports and the Unitarian Universalist Congregation of Duluth for Syrian refugee relief.
Better than bombs, we can still contribute to such organizations as: CARE, Unicef, Doctors Without Borders or other groups that are trying to address what the World Health Organization has called the worst humanitarian crisis in the world today. I hope you will consider doing so as I have personally done earlier this week.
Just remember that even if Syria signs on to the international convention of chemical weapons, and even if those weapons are put under international control and destroyed --- that doesn’t mean that the Syrian crisis will have been resolved. Far from it.
Psalm 34, one of the biblical passages that we often include in our Shabbat morning service charges us:בַּקֵּשׁ שָׁלוֹם וְרָדְפֵהוּ. – Seek peace and chase after it. May those who are pursuing peace be crowned with success in their efforts.
And, in the meantime, amidst all the work that needs to be done in our own city, our own state, and our own country, may we nevertheless not stand idly by as the crisis that engulfs Syria continues to unfold.
Gmar chatimah tovah v’tzom kal/ I wish you all a good sealing in the Book of Life, and an easy fast.
|
```python
"""
================================
Data pre-processing
Angle correction
================================
(See the project documentation for more info)
The goal is to process data before using it to train ML algorithms :
1. Extraction of accelerations for activity 1 (rest activity)
2. Transitory regime suppression on activity 1
3. Calculation of theta angle between Z' and Z (ground's normal axis)
4. System rotation towards Z earth axis
5. Offset removal
Note that the solver fails to find a solution at step 3
"""
print(__doc__)
```
================================
Data pre-processing
Angle correction
================================
(See the project documentation for more info)
The goal is to process data before using it to train ML algorithms :
1. Extraction of accelerations for activity 1 (rest activity)
2. Transitory regime suppression on activity 1
3. Calculation of theta angle between Z' and Z (ground's normal axis)
4. System rotation towards Z earth axis
5. Offset removal
Note that the solver fails to find a solution at step 3
```python
# Imports statements
import pandas as pd
import numpy as np
# from math import cos, sin
from utils.colorpalette import black, red, blue, green, yellow, pink, brown, violet
from utils.activities import activities_labels
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
```
```python
# Import data into memory
raw_data = pd.read_csv('../data/1.csv',header=None,delimiter=',').astype(int)
raw_data.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>0</th>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>0.0</td>
<td>1502</td>
<td>2215</td>
<td>2153</td>
<td>1</td>
</tr>
<tr>
<th>1</th>
<td>1.0</td>
<td>1667</td>
<td>2072</td>
<td>2047</td>
<td>1</td>
</tr>
<tr>
<th>2</th>
<td>2.0</td>
<td>1611</td>
<td>1957</td>
<td>1906</td>
<td>1</td>
</tr>
<tr>
<th>3</th>
<td>3.0</td>
<td>1601</td>
<td>1939</td>
<td>1831</td>
<td>1</td>
</tr>
<tr>
<th>4</th>
<td>4.0</td>
<td>1643</td>
<td>1965</td>
<td>1879</td>
<td>1</td>
</tr>
</tbody>
</table>
</div>
```python
# Prepare further plotting activities
color_map = np.array([black, red, blue, green, yellow, pink, brown, violet])
axe_name = ['X', 'Y', 'Z']
activities = np.array(raw_data[4]-1) # -1 is here to shift the table correctly to work with the color_map
x_min, x_max = raw_data[0].min() - 1, raw_data[0].max() + 1
```
```python
# Show data before processing
y_min, y_max, xx, yy, fig, subplot = [], [], [], [], [], []
legend = []
for activity, color in zip(activities_labels, color_map):
legend.append(Line2D([0], [0], marker='o', label=activity, ls='None', markerfacecolor=color, markeredgecolor='k'))
for k in range(0,3):
y_min.append(raw_data[k+1].min() - 1)
y_max.append(raw_data[k+1].max() + 1)
xx_tmp, yy_tmp = np.meshgrid(np.arange(x_min, x_max, 1000),np.arange(y_min[k], y_max[k], 100))
xx.append(xx_tmp)
yy.append(yy_tmp)
fig.append(plt.figure())
subplot.append(fig[k].add_subplot(111))
subplot[k].scatter(raw_data[0], raw_data[k+1], s=1,c=color_map[activities])
subplot[k].set_title('Acceleration on ' + axe_name[k])
legend = plt.legend(handles=legend, loc='upper center', bbox_to_anchor=(1, 2), title='Activities')
plt.show()
```
```python
#Prepare for processing
clean_data = []
clean_data.append(raw_data[0])
```
```python
# Transitory regime suppression on activity 1
np_raw_data = np.array(raw_data, dtype=object)
bool_mask_on_act_1 = np_raw_data[:, 4] == 1 # Boolean mask to only select rows concerning activity 1
bool_mask_on_permanent_regime = (np_raw_data[:, 0] >= 3200) & (np_raw_data[:, 0] <= 16000)
act_1_data_permanent_regime = np_raw_data[bool_mask_on_act_1 & bool_mask_on_permanent_regime]
```
```python
# Show activity 1 data after transitory regime suppression on activity 1
activities = np.array(act_1_data_permanent_regime[:,4]-1, dtype=int) # -1 is here to shift the table correctly to work with the color_map
x_min, x_max = act_1_data_permanent_regime[0].min() - 1, act_1_data_permanent_regime[0].max() + 1
y_min, y_max, xx, yy, fig, subplot = [], [], [], [], [], []
legend = []
for activity, color in zip(activities_labels, color_map):
legend.append(Line2D([0], [0], marker='o', label=activity, ls='None', markerfacecolor=color, markeredgecolor='k'))
for k in range(0,3):
y_min.append(act_1_data_permanent_regime[k+1].min() - 1)
y_max.append(act_1_data_permanent_regime[k+1].max() + 1)
xx_tmp, yy_tmp = np.meshgrid(np.arange(x_min, x_max, 1000),np.arange(y_min[k], y_max[k], 100))
xx.append(xx_tmp)
yy.append(yy_tmp)
fig.append(plt.figure())
subplot.append(fig[k].add_subplot(111))
subplot[k].scatter(act_1_data_permanent_regime[:,0], act_1_data_permanent_regime[:,k+1], s=1,c=color_map[activities])
subplot[k].set_title('Acceleration on ' + axe_name[k])
legend = plt.legend(handles=legend, loc='upper center', bbox_to_anchor=(1, 2), title='Activities')
plt.show()
```
```python
index_mean, xp_mean, yp_mean, zp_mean, activity_mean = act_1_data_permanent_regime.mean(axis=0)
```
```python
# Look for theta value :
from sympy.solvers import solve
from sympy import Symbol, sin, cos
from math import sqrt
index_mean, xp_mean, yp_mean, zp_mean, activity_mean = act_1_data_permanent_regime.mean(axis=0)
abs_gamma_mean = sqrt(xp_mean**2+yp_mean**2+zp_mean**2)
theta = Symbol('theta')
index_mean, xp_mean, yp_mean, zp_mean, activity_mean = act_1_data_permanent_regime.mean(axis=0)
theta = solve(sin(theta)*yp_mean+cos(theta)*zp_mean, abs_gamma_mean, dict=True)
# TODO : Find a way that this equation returns results !
```
```python
# System rotation towards Z earth axis - see the report for documentation
rotation_matrix = np.array([[1, 0, 0],
[0, cos(theta), -sin(theta)]
[0, sin(theta), cos(theta)]])
for row_index in clean_data:
Gamma_xp_yp_zp = clean_data.iloc[row_index][1, 2, 3]
Gamma_x_y_z = np.matmul(rotation_matrix, Gamma_xp_yp_zp)
clean_data.iloc[row_index][1, 2, 3] = Gamma_x_y_z
```
```python
# Offset suppression
# TODO :
# Should we really delete the offset though? Maybe it just corresponds to gravity, so change of system first !
# At rest, Gamma is expected to be 1g, but is calculated to be around 3,7g.
# So there might be offsets, indeed, but in which direction?
mean_acc_by_act = raw_data[[1, 2, 3, 4]].groupby([4], as_index=False).mean().sort_values(by=4, ascending=True)
mean_acc_at_act_1 = mean_acc_by_act.iloc[0] # Offset is calculated at rest (activity 1)
for k in range(1,4):
clean_data.append(raw_data[k] - mean_acc_at_act_1[k])
```
```python
# Show changes after offset suppression
legend = []
for activity, color in zip(activities_labels, color_map):
legend.append(Line2D([0], [0], marker='o', label=activity, ls='None', markerfacecolor=color, markeredgecolor='k'))
y_min, y_max, xx, yy, fig, subplot = [], [], [], [], [], []
for k in range(0,3):
y_min.append(clean_data[k+1].min() - 1)
y_max.append(clean_data[k+1].max() + 1)
xx_tmp, yy_tmp = np.meshgrid(np.arange(x_min, x_max, 1000),np.arange(y_min[k], y_max[k], 100))
xx.append(xx_tmp)
yy.append(yy_tmp)
fig.append(plt.figure())
subplot.append(fig[k].add_subplot(111))
subplot[k].scatter(clean_data[0], clean_data[k+1], s=1,c=color_map[activities])
subplot[k].set_title('Acceleration on ' + axe_name[k])
legend = plt.legend(handles=legend, loc='upper center', bbox_to_anchor=(1, 2), title='Activities')
plt.show()
```
```python
# Push data changes into new csv file
df = pd.DataFrame(clean_data)
df.to_csv("../../data/cleaned_data/projected_on_z_axis_1.csv",index=False)
```
|
[STATEMENT]
lemma cfifi:"f^-1 O f^-1 \<subseteq> f^-1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f\<inverse> O f\<inverse> \<subseteq> f\<inverse>
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<in> f\<inverse> O f\<inverse> \<Longrightarrow> x \<in> f\<inverse>
[PROOF STEP]
fix x::"'a\<times>'a"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<in> f\<inverse> O f\<inverse> \<Longrightarrow> x \<in> f\<inverse>
[PROOF STEP]
assume "x \<in> f^-1 O f^-1"
[PROOF STATE]
proof (state)
this:
x \<in> f\<inverse> O f\<inverse>
goal (1 subgoal):
1. \<And>x. x \<in> f\<inverse> O f\<inverse> \<Longrightarrow> x \<in> f\<inverse>
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<in> f\<inverse> O f\<inverse>
[PROOF STEP]
obtain p q z where x:"x = (p, q)" and "(p,z) \<in> f^-1" and "(z,q) \<in> f^-1"
[PROOF STATE]
proof (prove)
using this:
x \<in> f\<inverse> O f\<inverse>
goal (1 subgoal):
1. (\<And>p q z. \<lbrakk>x = (p, q); (p, z) \<in> f\<inverse>; (z, q) \<in> f\<inverse>\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x = (p, q)
(p, z) \<in> f\<inverse>
(z, q) \<in> f\<inverse>
goal (1 subgoal):
1. \<And>x. x \<in> f\<inverse> O f\<inverse> \<Longrightarrow> x \<in> f\<inverse>
[PROOF STEP]
from \<open>(p,z) \<in> f^-1\<close>
[PROOF STATE]
proof (chain)
picking this:
(p, z) \<in> f\<inverse>
[PROOF STEP]
obtain k l u where kp:"k\<parallel>p" and kl:"k\<parallel>l" and lz:"l\<parallel>z" and pu:"p\<parallel>u" and zu:"z\<parallel>u"
[PROOF STATE]
proof (prove)
using this:
(p, z) \<in> f\<inverse>
goal (1 subgoal):
1. (\<And>k l u. \<lbrakk>k \<parallel> p; k \<parallel> l; l \<parallel> z; p \<parallel> u; z \<parallel> u\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using f
[PROOF STATE]
proof (prove)
using this:
(p, z) \<in> f\<inverse>
((?p, ?q) \<in> f) = (\<exists>k l u. k \<parallel> l \<and> l \<parallel> ?p \<and> ?p \<parallel> u \<and> k \<parallel> ?q \<and> ?q \<parallel> u)
goal (1 subgoal):
1. (\<And>k l u. \<lbrakk>k \<parallel> p; k \<parallel> l; l \<parallel> z; p \<parallel> u; z \<parallel> u\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
k \<parallel> p
k \<parallel> l
l \<parallel> z
p \<parallel> u
z \<parallel> u
goal (1 subgoal):
1. \<And>x. x \<in> f\<inverse> O f\<inverse> \<Longrightarrow> x \<in> f\<inverse>
[PROOF STEP]
from \<open>(z,q) \<in> f^-1\<close>
[PROOF STATE]
proof (chain)
picking this:
(z, q) \<in> f\<inverse>
[PROOF STEP]
obtain k' u' l' where kpz:"k'\<parallel>z" and kplp:"k'\<parallel>l'" and lpq:"l'\<parallel>q" and qup:"q\<parallel>u'" and zup:"z\<parallel>u'"
[PROOF STATE]
proof (prove)
using this:
(z, q) \<in> f\<inverse>
goal (1 subgoal):
1. (\<And>k' l' u'. \<lbrakk>k' \<parallel> z; k' \<parallel> l'; l' \<parallel> q; q \<parallel> u'; z \<parallel> u'\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using f
[PROOF STATE]
proof (prove)
using this:
(z, q) \<in> f\<inverse>
((?p, ?q) \<in> f) = (\<exists>k l u. k \<parallel> l \<and> l \<parallel> ?p \<and> ?p \<parallel> u \<and> k \<parallel> ?q \<and> ?q \<parallel> u)
goal (1 subgoal):
1. (\<And>k' l' u'. \<lbrakk>k' \<parallel> z; k' \<parallel> l'; l' \<parallel> q; q \<parallel> u'; z \<parallel> u'\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
k' \<parallel> z
k' \<parallel> l'
l' \<parallel> q
q \<parallel> u'
z \<parallel> u'
goal (1 subgoal):
1. \<And>x. x \<in> f\<inverse> O f\<inverse> \<Longrightarrow> x \<in> f\<inverse>
[PROOF STEP]
from zu zup pu
[PROOF STATE]
proof (chain)
picking this:
z \<parallel> u
z \<parallel> u'
p \<parallel> u
[PROOF STEP]
have "p\<parallel>u'"
[PROOF STATE]
proof (prove)
using this:
z \<parallel> u
z \<parallel> u'
p \<parallel> u
goal (1 subgoal):
1. p \<parallel> u'
[PROOF STEP]
using M1
[PROOF STATE]
proof (prove)
using this:
z \<parallel> u
z \<parallel> u'
p \<parallel> u
\<lbrakk>?p \<parallel> ?q; ?p \<parallel> ?s; ?r \<parallel> ?q\<rbrakk> \<Longrightarrow> ?r \<parallel> ?s
goal (1 subgoal):
1. p \<parallel> u'
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
p \<parallel> u'
goal (1 subgoal):
1. \<And>x. x \<in> f\<inverse> O f\<inverse> \<Longrightarrow> x \<in> f\<inverse>
[PROOF STEP]
from lz kpz kplp
[PROOF STATE]
proof (chain)
picking this:
l \<parallel> z
k' \<parallel> z
k' \<parallel> l'
[PROOF STEP]
have "l\<parallel>l'"
[PROOF STATE]
proof (prove)
using this:
l \<parallel> z
k' \<parallel> z
k' \<parallel> l'
goal (1 subgoal):
1. l \<parallel> l'
[PROOF STEP]
using M1
[PROOF STATE]
proof (prove)
using this:
l \<parallel> z
k' \<parallel> z
k' \<parallel> l'
\<lbrakk>?p \<parallel> ?q; ?p \<parallel> ?s; ?r \<parallel> ?q\<rbrakk> \<Longrightarrow> ?r \<parallel> ?s
goal (1 subgoal):
1. l \<parallel> l'
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
l \<parallel> l'
goal (1 subgoal):
1. \<And>x. x \<in> f\<inverse> O f\<inverse> \<Longrightarrow> x \<in> f\<inverse>
[PROOF STEP]
with kl lpq
[PROOF STATE]
proof (chain)
picking this:
k \<parallel> l
l' \<parallel> q
l \<parallel> l'
[PROOF STEP]
obtain ll where "k\<parallel>ll" and "ll\<parallel>q"
[PROOF STATE]
proof (prove)
using this:
k \<parallel> l
l' \<parallel> q
l \<parallel> l'
goal (1 subgoal):
1. (\<And>ll. \<lbrakk>k \<parallel> ll; ll \<parallel> q\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using M5exist_var
[PROOF STATE]
proof (prove)
using this:
k \<parallel> l
l' \<parallel> q
l \<parallel> l'
\<lbrakk>?x \<parallel> ?y; ?y \<parallel> ?z; ?z \<parallel> ?w\<rbrakk> \<Longrightarrow> \<exists>t. ?x \<parallel> t \<and> t \<parallel> ?w
goal (1 subgoal):
1. (\<And>ll. \<lbrakk>k \<parallel> ll; ll \<parallel> q\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
k \<parallel> ll
ll \<parallel> q
goal (1 subgoal):
1. \<And>x. x \<in> f\<inverse> O f\<inverse> \<Longrightarrow> x \<in> f\<inverse>
[PROOF STEP]
with kp \<open>p\<parallel>u'\<close> qup
[PROOF STATE]
proof (chain)
picking this:
k \<parallel> p
p \<parallel> u'
q \<parallel> u'
k \<parallel> ll
ll \<parallel> q
[PROOF STEP]
show "x \<in> f^-1"
[PROOF STATE]
proof (prove)
using this:
k \<parallel> p
p \<parallel> u'
q \<parallel> u'
k \<parallel> ll
ll \<parallel> q
goal (1 subgoal):
1. x \<in> f\<inverse>
[PROOF STEP]
using x f
[PROOF STATE]
proof (prove)
using this:
k \<parallel> p
p \<parallel> u'
q \<parallel> u'
k \<parallel> ll
ll \<parallel> q
x = (p, q)
((?p, ?q) \<in> f) = (\<exists>k l u. k \<parallel> l \<and> l \<parallel> ?p \<and> ?p \<parallel> u \<and> k \<parallel> ?q \<and> ?q \<parallel> u)
goal (1 subgoal):
1. x \<in> f\<inverse>
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
x \<in> f\<inverse>
goal:
No subgoals!
[PROOF STEP]
qed
|
Formal statement is: lemma setdist_eq_infdist: "setdist A B = (if A = {} then 0 else INF a\<in>A. infdist a B)" Informal statement is: The distance between two sets is equal to the infimum of the distances between the points of the first set and the second set.
|
#####Resultado dataprev concurso 2016 área 312RJ
install.packages("SnowballC")
install.packages("tm") # only need to do once
install.packages("pdftools")
library(tm)
library(pdftools)
data_raw=pdf_text("edital_divulgao_resultado_final_20_02-2017_-_dataprev.pdf")
data_raw[2]
#https://github.com/ropensci/tabulizer
#https://www.r-bloggers.com/extracting-tables-from-pdfs-in-r-using-the-tabulizer-package/
install.packages("ghit")
install.packages("plyr")
#in linux
##("https://cran.r-project.org/src/contrib/stringi_1.1.5.tar.gz", type="source")
#install.packages("stringr")
#install.packages("evaluate")
#install.packages("knitr")
install.packages("rJava")
ghit::install_github(c("ropensci/tabulizerjars", "ropensci/tabulizer"), INSTALL_opts = "--no-multiarch")
library("tabulizer")
library(plyr)
#f <- system.file("edital_divulgao_resultado_final_20_02-2017_-_dataprev.pdf", package = "tabulizer")
#dataprev_array <- extract_tables("data/edital_divulgao_resultado_final_20_02-2017_-_dataprev.pdf", pages = c(1050:1082))
load("data/dataprev.RData")
summary(dataprev_array[1])
#com erro
#8,29,30,31,32,33
dataprev_list=do.call("rbind",dataprev_array[c(1:7,9:28)])
#rm(dataprev_array)
dataprev=as.data.frame(dataprev_list,stringsAsFactors = F)
#rm(dataprev_list)
names(dataprev)
nome=c("INSCRICAO","NOME","DOCUMENTO", "NASCIM.", "LP_ACERTO","LP_NOTA","LEST_ACERTO","LEST_NOTA","CG_ACERTO", "CG_NOTA", "CE_ACERTO","CE_NOTA","AC.TOT.","NOTA P.O.","TIT.","RED.","NT.GER.","CLASS","SITUACAO")
names(dataprev)=nome
summary(dataprev)
indicies=which(dataprev$LP_ACERTO=='')
dataprev[indicies,'LP_ACERTO']='0'
dataprev[indicies,'LP_NOTA']='0,0'
dataprev$LP_ACERTO=as.numeric(dataprev$LP_ACERTO)
dataprev$LP_NOTA=gsub(",", ".", dataprev$LP_NOTA)
dataprev$LP_NOTA=as.numeric(dataprev$LP_NOTA)
indicies=which(dataprev$LEST_ACERTO=='')
dataprev[indicies,'LEST_ACERTO']='0'
dataprev[indicies,'LEST_NOTA']='0,0'
dataprev$LEST_ACERTO=as.numeric(dataprev$LEST_ACERTO)
dataprev$LEST_NOTA=gsub(",", ".", dataprev$LEST_NOTA)
dataprev$LEST_NOTA=as.numeric(dataprev$LEST_NOTA)
indicies=which(dataprev$CG_ACERTO=='')
dataprev[indicies,'CG_ACERTO']='0'
dataprev[indicies,'CG_NOTA']='0,0'
dataprev$CG_ACERTO=as.numeric(dataprev$CG_ACERTO)
dataprev$CG_NOTA=as.numeric(gsub(",", ".", dataprev$CG_NOTA))
indicies=which(dataprev$CE_ACERTO=='')
dataprev[indicies,'CE_ACERTO']='0'
dataprev[indicies,'CE_NOTA']='0,0'
dataprev$CE_ACERTO=as.numeric(dataprev$CE_ACERTO)
dataprev$CE_NOTA=as.numeric(gsub(",", ".", dataprev$CE_NOTA))
indicies=which(dataprev$AC.TOT.=='')
dataprev[indicies,'AC.TOT.']='0'
dataprev$AC.TOT.=as.numeric(gsub(",", ".",dataprev$AC.TOT.))
indicies=which(dataprev$`NOTA P.O.`=='')
dataprev[indicies,'NOTA P.O.']='0'
dataprev$`NOTA P.O.`=as.numeric(gsub(",", ".",dataprev$`NOTA P.O.`))
indicies=which(dataprev$TIT.=='')
dataprev[indicies,'TIT.']=NA
dataprev$TIT.=gsub(",", ".",dataprev$TIT.)
dataprev$TIT.=gsub("NE", NA,dataprev$TIT.)
dataprev$TIT.=as.numeric(gsub(",", ".",dataprev$TIT.))
indicies=which(dataprev$RED.=='')
dataprev[indicies,'RED.']='0'
dataprev$RED.=as.numeric(gsub(",", ".",dataprev$RED.))
indicies=which(dataprev$NT.GER=='')
dataprev[indicies,'NT.GER.']='0'
dataprev$NT.GER.=as.numeric(gsub(",", ".",dataprev$NT.GER.))
indicies=which(dataprev$CLASS=='')
dataprev[indicies,'NT.GER.']=NA
dataprev$CLASS=as.numeric(gsub("o", "",dataprev$CLASS))
rm(indicies)
rm(nome)
dataprev_habilitados=dataprev[which(dataprev$SITUACAO=='Habilitado'),]
dataprev_habilitados[which(dataprev_habilitados$DOCUMENTO==""),]
dataprev_habilitados[which(dataprev_habilitados$INSCRICAO=="22090023645"),c('NOME','DOCUMENTO')]=c('RODRIGO AUGUSTO DE OLIVEIRA PAES BORGES BIONE','0122706393')
dataprev_habilitados[which(dataprev_habilitados$INSCRICAO=="22090068021"),c('NOME','DOCUMENTO')]=c('RODRIGO CAETANO FILGUEIRA','13386104')
dataprev_habilitados[which(dataprev_habilitados$INSCRICAO=="22090029777"),c('NOME','DOCUMENTO')]=c('RODRIGO GOMES MARCELO','219011319')
dataprev_habilitados[which(dataprev_habilitados$INSCRICAO=="22090061216"),c('NOME','DOCUMENTO')]=c('RODRIGO S MORAES','97679539')
dataprev_habilitados[which(dataprev_habilitados$DOCUMENTO==""),]
dataprev_habilitados$CHAMADO=''
summary(dataprev_habilitados)
hist(dataprev_habilitados$NT.GER.)
dataprev_habilitados[which(dataprev_habilitados$INSCRICAO=="22090029777"),]
dataprev_habilitados[which(dataprev_habilitados$INSCRICAO=="22090029777"),'CHAMADO']='03/02/2017'
#https://portal.dataprev.gov.br/situacao-concursados/2016
#https://portal.dataprev.gov.br/situacao-concursados/2016?field_candidato_value=&field_cargo_value=&field_perfil_value=&field_lotacao_value=&field_cadastro_reserva_value=ADMITIDO&field_inscricao_value=&field_cpf_value=&op-concurso=Pesquisar
|
open import Relation.Binary.PropositionalEquality using (_≡_; refl; subst)
open import Data.Sum
import SingleSorted.AlgebraicTheory as SS
module SingleSorted.Combinators where
module Sum {𝓈} (Σ₁ Σ₂ : SS.Signature) (T₁ : SS.Theory 𝓈 Σ₁) (T₂ : SS.Theory 𝓈 Σ₂) where
-- disjoint sum of signatures
S : SS.Signature
S = record { oper = SS.Signature.oper Σ₁ ⊎ SS.Signature.oper Σ₂
; oper-arity = [ SS.Signature.oper-arity Σ₁ , SS.Signature.oper-arity Σ₂ ]
}
inj-term-l : ∀ {Γ : SS.Context} → SS.Signature.Term Σ₁ Γ → SS.Signature.Term S Γ
inj-term-l {Γ} (SS.Signature.tm-var x) = SS.Signature.tm-var x
inj-term-l {Γ} (SS.Signature.tm-oper f ts) = SS.Signature.tm-oper (inj₁ f) λ{ i → inj-term-l (ts i)}
inj-term-r : ∀ {Γ : SS.Context} → SS.Signature.Term Σ₂ Γ → SS.Signature.Term S Γ
inj-term-r {Γ} (SS.Signature.tm-var x) = SS.Signature.tm-var x
inj-term-r {Γ} (SS.Signature.tm-oper f ts) = SS.Signature.tm-oper (inj₂ f) λ{ i → inj-term-r (ts i)}
coerce₁ : SS.Signature.Equation Σ₁ → SS.Signature.Equation S
coerce₁ eq = record { eq-ctx = SS.Signature.Equation.eq-ctx eq
; eq-lhs = inj-term-l (SS.Signature.Equation.eq-lhs eq)
; eq-rhs = inj-term-l (SS.Signature.Equation.eq-rhs eq)
}
coerce₂ : SS.Signature.Equation Σ₂ → SS.Signature.Equation S
coerce₂ eq = record { eq-ctx = SS.Signature.Equation.eq-ctx eq
; eq-lhs = inj-term-r (SS.Signature.Equation.eq-lhs eq)
; eq-rhs = inj-term-r (SS.Signature.Equation.eq-rhs eq)
}
-- define a theory with the set of axioms a union of the axioms of both theories
T : SS.Theory 𝓈 S
T = record { ax = SS.Theory.ax T₁ ⊎ SS.Theory.ax T₂
; ax-eq = [ (λ a → coerce₁ (SS.Theory.ax-eq T₁ a)) , (λ a → coerce₂ (SS.Theory.ax-eq T₂ a)) ]
}
|
theory flash111Rev imports flashPub
begin
section{*Main defintions*}
lemma NI_FAckVsInv111:
(*Rule0VsPInv0*)
shows "invHoldForRule' s (inv111 ) (NI_FAck ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by( auto)
lemma NI_InvVsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Inv iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_InvAck_1VsInv111:
(*Rule2VsPInv0*)
assumes a1:"iRule1 \<le> N" and a2:"iRule2 \<le> N" and a3:"iRule1~=iRule2 "
shows "invHoldForRule' s (inv111 ) (NI_InvAck_1 iRule1 iRule2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1 a2 a3,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_InvAck_1_HomeVsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_InvAck_1_Home iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_InvAck_2VsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_InvAck_2 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_GetX_GetXVsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Local_GetX_GetX iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_GetX_Nak1VsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Local_GetX_Nak1 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_GetX_Nak2VsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Local_GetX_Nak2 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_GetX_Nak3VsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Local_GetX_Nak3 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_GetX_PutX1VsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Local_GetX_PutX1 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_GetX_PutX2VsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Local_GetX_PutX2 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_GetX_PutX3VsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Local_GetX_PutX3 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_GetX_PutX4VsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Local_GetX_PutX4 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_GetX_PutX5VsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Local_GetX_PutX5 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_GetX_PutX6VsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Local_GetX_PutX6 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_GetX_PutX7VsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Local_GetX_PutX7 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_GetX_PutX8VsInv111:
(*Rule2VsPInv0*)
assumes a1:"iRule1 \<le> N" and a2:"iRule2 \<le> N" and a3:"iRule1~=iRule2 "
shows "invHoldForRule' s (inv111 ) (NI_Local_GetX_PutX8 N iRule1 iRule2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1 a2 a3,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_GetX_PutX8_homeVsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Local_GetX_PutX8_home N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_GetX_PutX9VsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Local_GetX_PutX9 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_GetX_PutX10VsInv111:
(*Rule2VsPInv0*)
assumes a1:"iRule1 \<le> N" and a2:"iRule2 \<le> N" and a3:"iRule1~=iRule2 "
shows "invHoldForRule' s (inv111 ) (NI_Local_GetX_PutX10 N iRule1 iRule2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1 a2 a3,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_GetX_PutX10_homeVsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Local_GetX_PutX10_home N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_GetX_PutX11VsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Local_GetX_PutX11 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_Get_GetVsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Local_Get_Get iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_Get_Nak1VsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Local_Get_Nak1 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_Get_Nak2VsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Local_Get_Nak2 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_Get_Nak3VsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Local_Get_Nak3 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_Get_Put1VsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Local_Get_Put1 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_Get_Put2VsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Local_Get_Put2 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_Get_Put3VsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Local_Get_Put3 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Local_PutVsInv111:
(*Rule0VsPInv0*)
shows "invHoldForRule' s (inv111 ) (NI_Local_Put ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P1 s"
apply( auto)
done
then show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by blast
qed
lemma NI_Local_PutXAcksDoneVsInv111:
(*Rule0VsPInv0*)
shows "invHoldForRule' s (inv111 ) (NI_Local_PutXAcksDone ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P1 s"
apply( auto)
done
then show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by blast
qed
lemma NI_NakVsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Nak iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Nak_ClearVsInv111:
(*Rule0VsPInv0*)
shows "invHoldForRule' s (inv111 ) (NI_Nak_Clear ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by( auto)
lemma NI_Nak_HomeVsInv111:
(*Rule0VsPInv0*)
shows "invHoldForRule' s (inv111 ) (NI_Nak_Home ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P1 s"
apply( auto)
done
then show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by blast
qed
lemma NI_Remote_GetX_NakVsInv111:
(*Rule2VsPInv0*)
assumes a1:"iRule1 \<le> N" and a2:"iRule2 \<le> N" and a3:"iRule1~=iRule2 "
shows "invHoldForRule' s (inv111 ) (NI_Remote_GetX_Nak iRule1 iRule2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1 a2 a3,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Remote_GetX_Nak_HomeVsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Remote_GetX_Nak_Home iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P1 s"
apply(cut_tac a1 , auto)
done
then show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by blast
qed
lemma NI_Remote_GetX_PutXVsInv111:
(*Rule2VsPInv0*)
assumes a1:"iRule1 \<le> N" and a2:"iRule2 \<le> N" and a3:"iRule1~=iRule2 "
shows "invHoldForRule' s (inv111 ) (NI_Remote_GetX_PutX iRule1 iRule2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1 a2 a3,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Remote_GetX_PutX_HomeVsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Remote_GetX_PutX_Home iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P1 s"
apply(cut_tac a1 , auto)
done
then show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by blast
qed
lemma NI_Remote_Get_Nak1VsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Remote_Get_Nak1 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P1 s"
apply(cut_tac a1 , auto)
done
then show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by blast
qed
lemma NI_Remote_Get_Nak2VsInv111:
(*Rule2VsPInv0*)
assumes a1:"iRule1 \<le> N" and a2:"iRule2 \<le> N" and a3:"iRule1~=iRule2 "
shows "invHoldForRule' s (inv111 ) (NI_Remote_Get_Nak2 iRule1 iRule2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1 a2 a3,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Remote_Get_Put1VsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Remote_Get_Put1 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P1 s"
apply(cut_tac a1 , auto)
done
then show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by blast
qed
lemma NI_Remote_Get_Put2VsInv111:
(*Rule2VsPInv0*)
assumes a1:"iRule1 \<le> N" and a2:"iRule2 \<le> N" and a3:"iRule1~=iRule2 "
shows "invHoldForRule' s (inv111 ) (NI_Remote_Get_Put2 iRule1 iRule2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1 a2 a3,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Remote_PutVsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Remote_Put iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Remote_PutXVsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Remote_PutX iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_ReplaceVsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_Replace iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_ReplaceHomeVsInv111:
(*Rule0VsPInv0*)
shows "invHoldForRule' s (inv111 ) (NI_ReplaceHome ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by( auto)
lemma NI_ReplaceHomeShrVldVsInv111:
(*Rule0VsPInv0*)
shows "invHoldForRule' s (inv111 ) (NI_ReplaceHomeShrVld ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by( auto)
lemma NI_ReplaceShrVldVsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (NI_ReplaceShrVld iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_ShWbVsInv111:
(*Rule0VsPInv0*)
shows "invHoldForRule' s (inv111 ) (NI_ShWb N ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by( auto)
lemma NI_WbVsInv111:
(*Rule0VsPInv0*)
shows "invHoldForRule' s (inv111 ) (NI_Wb ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by( auto)
lemma PI_Local_GetX_GetX1VsInv111:
(*Rule0VsPInv0*)
shows "invHoldForRule' s (inv111 ) (PI_Local_GetX_GetX1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P1 s"
apply( auto)
done
then show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by blast
qed
lemma PI_Local_GetX_GetX2VsInv111:
(*Rule0VsPInv0*)
shows "invHoldForRule' s (inv111 ) (PI_Local_GetX_GetX2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P1 s"
apply( auto)
done
then show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by blast
qed
lemma PI_Local_GetX_PutX1VsInv111:
(*Rule0VsPInv0*)
shows "invHoldForRule' s (inv111 ) (PI_Local_GetX_PutX1 N ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P3 s"
apply( simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''procCmd'' Home) ) ( Const NODE_None )) ( eqn ( IVar ( Para ''UniMsg_Cmd'' Home) ) ( Const UNI_GetX )) ) ) " in exI,auto)
done
then show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by blast
qed
lemma PI_Local_GetX_PutX2VsInv111:
(*Rule0VsPInv0*)
shows "invHoldForRule' s (inv111 ) (PI_Local_GetX_PutX2 N ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by( auto)
lemma PI_Local_GetX_PutX3VsInv111:
(*Rule0VsPInv0*)
shows "invHoldForRule' s (inv111 ) (PI_Local_GetX_PutX3 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P3 s"
apply( simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''procCmd'' Home) ) ( Const NODE_None )) ( eqn ( IVar ( Para ''UniMsg_Cmd'' Home) ) ( Const UNI_GetX )) ) ) " in exI,auto)
done
then show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by blast
qed
lemma PI_Local_GetX_PutX4VsInv111:
(*Rule0VsPInv0*)
shows "invHoldForRule' s (inv111 ) (PI_Local_GetX_PutX4 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P3 s"
apply( simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''procCmd'' Home) ) ( Const NODE_None )) ( eqn ( IVar ( Para ''UniMsg_Cmd'' Home) ) ( Const UNI_GetX )) ) ) " in exI,auto)
done
then show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by blast
qed
lemma PI_Local_Get_GetVsInv111:
(*Rule0VsPInv0*)
shows "invHoldForRule' s (inv111 ) (PI_Local_Get_Get ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P1 s"
apply( auto)
done
then show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by blast
qed
lemma PI_Local_Get_PutVsInv111:
(*Rule0VsPInv0*)
shows "invHoldForRule' s (inv111 ) (PI_Local_Get_Put ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P3 s"
apply( simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''procCmd'' Home) ) ( Const NODE_None )) ( eqn ( IVar ( Para ''UniMsg_Cmd'' Home) ) ( Const UNI_GetX )) ) ) " in exI,auto)
done
then show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by blast
qed
lemma PI_Local_PutXVsInv111:
(*Rule0VsPInv0*)
shows "invHoldForRule' s (inv111 ) (PI_Local_PutX ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by( auto)
lemma PI_Local_ReplaceVsInv111:
(*Rule0VsPInv0*)
shows "invHoldForRule' s (inv111 ) (PI_Local_Replace ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by( auto)
lemma PI_Remote_GetVsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (PI_Remote_Get iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma PI_Remote_GetXVsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (PI_Remote_GetX iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma PI_Remote_PutXVsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (PI_Remote_PutX iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma PI_Remote_ReplaceVsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (PI_Remote_Replace iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma StoreVsInv111:
(*Rule1VsPInv0*)
assumes a1:"iRule1 \<le> N"
shows "invHoldForRule' s (inv111 ) (Store iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by (cut_tac a1,auto )
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma StoreHomeVsInv111:
(*Rule0VsPInv0*)
shows "invHoldForRule' s (inv111 ) (StoreHome ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by( auto)
end
|
(*
* Copyright 2014, General Dynamics C4 Systems
*
* This software may be distributed and modified according to the terms of
* the GNU General Public License version 2. Note that NO WARRANTY is provided.
* See "LICENSE_GPLv2.txt" for details.
*
* @TAG(GD_GPL)
*)
theory Finalise_AI
imports
"./$L4V_ARCH/ArchIpcCancel_AI"
"./$L4V_ARCH/ArchInterruptAcc_AI"
"./$L4V_ARCH/ArchRetype_AI"
begin
definition
fst_cte_ptrs :: "cap \<Rightarrow> cslot_ptr set"
where
"fst_cte_ptrs cap \<equiv> (case cap of
cap.CNodeCap r bits guard \<Rightarrow> {(r, replicate bits False)}
| cap.ThreadCap r \<Rightarrow> {(r, tcb_cnode_index 0)}
| cap.Zombie r zb n \<Rightarrow> {(r, replicate (zombie_cte_bits zb) False)}
| _ \<Rightarrow> {})"
context begin interpretation Arch .
requalify_consts
vs_cap_ref
unmap_page
clearMemory
requalify_facts
final_cap_lift
no_irq_clearMemory
valid_global_refsD
valid_global_refsD2
end
locale Finalise_AI_1 =
fixes state_ext_type1 :: "('a :: state_ext) itself"
fixes state_ext_type2 :: "('b :: state_ext) itself"
assumes replaceable_cdt_update[simp]:
"\<And>f (s :: 'a state). replaceable (cdt_update f s) = replaceable s"
assumes replaceable_revokable_update[simp]:
"\<And> f (s :: 'a state).replaceable (is_original_cap_update f s) = replaceable s"
assumes replaceable_more_update[simp]:
"\<And> (f :: 'a \<Rightarrow> 'b) (s :: 'a state) sl cap cap'. replaceable (trans_state f s) sl cap cap'
= replaceable s sl cap cap'"
assumes obj_ref_ofI: "\<And> cap x. obj_refs cap = {x} \<Longrightarrow> obj_ref_of cap = x"
assumes empty_slot_invs:
"\<And>sl irqopt. \<lbrace>\<lambda> (s :: 'a state). invs s \<and> cte_wp_at (replaceable s sl cap.NullCap) sl s \<and>
emptyable sl s \<and>
(\<forall>irq. irqopt = Some irq \<longrightarrow>
cap.IRQHandlerCap irq \<notin>
ran ((caps_of_state s) (sl \<mapsto> cap.NullCap)))\<rbrace>
empty_slot sl irqopt
\<lbrace>\<lambda>rv. invs\<rbrace>"
assumes dom_tcb_cap_cases_lt:
"dom tcb_cap_cases = {xs. length xs = 3 \<and> unat (of_bl xs :: machine_word) < 5}"
assumes unbind_notification_final[wp]:
"\<And> cap t.\<lbrace>is_final_cap' cap :: 'a state \<Rightarrow> bool\<rbrace>
unbind_notification t
\<lbrace> \<lambda>rv. is_final_cap' cap\<rbrace>"
assumes deleting_irq_handler_final:
"\<And> cap slot irq. \<lbrace>(is_final_cap' cap :: 'a state \<Rightarrow> bool) and cte_wp_at (op = cap) slot
and K (\<not> can_fast_finalise cap)\<rbrace>
deleting_irq_handler irq
\<lbrace>\<lambda>rv. is_final_cap' cap\<rbrace>"
assumes finalise_cap_cases1:
"\<And> cap final slot. \<lbrace>\<lambda>(s :: 'a state). final \<longrightarrow> is_final_cap' cap s
\<and> cte_wp_at (op = cap) slot s\<rbrace>
finalise_cap cap final
\<lbrace>\<lambda>rv (s :: 'a state). fst rv = cap.NullCap
\<and> snd rv = (if final then cap_irq_opt cap else None)
\<and> (snd rv \<noteq> None \<longrightarrow> is_final_cap' cap s)
\<or>
is_zombie (fst rv) \<and> is_final_cap' cap s
\<and> snd rv = None
\<and> appropriate_cte_cap (fst rv) = appropriate_cte_cap cap
\<and> cte_refs (fst rv) = cte_refs cap
\<and> obj_refs (fst rv) = obj_refs cap
\<and> obj_size (fst rv) = obj_size cap
\<and> cap_irqs (fst rv) = cap_irqs cap
\<and> fst_cte_ptrs (fst rv) = fst_cte_ptrs cap
\<and> vs_cap_ref cap = None\<rbrace>"
assumes arch_finalise_cap_typ_at[wp]:
"\<And> P T p a b.
\<lbrace>\<lambda>(s :: 'a state). P (typ_at T p s)\<rbrace> arch_finalise_cap a b \<lbrace>\<lambda>_ s. P (typ_at T p s)\<rbrace>"
assumes prepare_thread_delete_typ_at[wp]:
"\<And> P T p a.
\<lbrace>\<lambda>(s :: 'a state). P (typ_at T p s)\<rbrace> prepare_thread_delete a \<lbrace>\<lambda>_ s. P (typ_at T p s)\<rbrace>"
assumes finalise_cap_new_valid_cap[wp]:
"\<And> cap x. \<lbrace>valid_cap cap :: 'a state \<Rightarrow> bool\<rbrace> finalise_cap cap x \<lbrace>\<lambda>rv. valid_cap (fst rv)\<rbrace>"
assumes arch_finalise_cap_invs[wp]:
"\<And> cap final.\<lbrace>invs and (valid_cap (ArchObjectCap cap) :: 'a state \<Rightarrow> bool)\<rbrace>
arch_finalise_cap cap final
\<lbrace>\<lambda>rv. invs\<rbrace>"
assumes obj_at_not_live_valid_arch_cap_strg:
"\<And>(s :: 'a state) cap r. (s \<turnstile> ArchObjectCap cap \<and> aobj_ref cap = Some r)
\<longrightarrow> obj_at (\<lambda>ko. \<not> live ko) r s"
assumes deleting_irq_handler_slot_not_irq_node:
"\<And> irq sl.
\<lbrace>if_unsafe_then_cap and valid_global_refs
and cte_wp_at (\<lambda>cp. cap_irqs cp \<noteq> {}) sl\<rbrace>
deleting_irq_handler irq
\<lbrace>\<lambda>rv (s :: 'a state). (interrupt_irq_node s irq, []) \<noteq> sl\<rbrace>"
assumes no_cap_to_obj_with_diff_ref_finalI:
"\<And>p (s :: 'a state) cap cap'. \<lbrakk> cte_wp_at (op = cap) p s; is_final_cap' cap s;
obj_refs cap' = obj_refs cap \<rbrakk>
\<Longrightarrow> no_cap_to_obj_with_diff_ref cap' {p} s"
assumes suspend_no_cap_to_obj_ref[wp]:
"\<And> S t cap.
\<lbrace>no_cap_to_obj_with_diff_ref cap S :: 'a state \<Rightarrow> bool\<rbrace>
suspend t
\<lbrace>\<lambda>rv. no_cap_to_obj_with_diff_ref cap S\<rbrace>"
assumes finalise_cap_replaceable:
"\<And> cap x sl.
\<lbrace>\<lambda>(s :: 'a state). s \<turnstile> cap \<and> x = is_final_cap' cap s \<and> valid_mdb s
\<and> cte_wp_at (op = cap) sl s \<and> valid_objs s \<and> sym_refs (state_refs_of s)
\<and> (cap_irqs cap \<noteq> {} \<longrightarrow> if_unsafe_then_cap s \<and> valid_global_refs s)
\<and> (is_arch_cap cap \<longrightarrow> pspace_aligned s \<and>
valid_arch_objs s \<and>
valid_arch_state s)\<rbrace>
finalise_cap cap x
\<lbrace>\<lambda>rv s. replaceable s sl (fst rv) cap\<rbrace>"
assumes deleting_irq_handler_cte_preserved:
"\<And> P p irq.\<lbrakk> \<And>cap. P cap \<Longrightarrow> \<not> can_fast_finalise cap \<rbrakk>
\<Longrightarrow> \<lbrace>cte_wp_at P p\<rbrace>
deleting_irq_handler irq :: 'a state \<Rightarrow> (unit \<times> 'a state) set \<times> bool
\<lbrace>\<lambda>rv. cte_wp_at P p\<rbrace>"
assumes arch_finalise_cap_cte_wp_at[wp]:
"\<And> P P' p a b.
\<lbrace>\<lambda>(s :: 'a state). P (cte_wp_at P' p s)\<rbrace> arch_finalise_cap a b \<lbrace>\<lambda>_ s. P (cte_wp_at P' p s)\<rbrace>"
assumes prepare_thread_delete_cte_wp_at[wp]:
"\<And> P p a.
\<lbrace>\<lambda>(s :: 'a state). P (cte_wp_at P' p s)\<rbrace> prepare_thread_delete a \<lbrace>\<lambda>_ s. P (cte_wp_at P' p s)\<rbrace>"
assumes prepare_thread_delete_caps_of_state:
"\<And>P t. \<lbrace>\<lambda>(s :: 'a state). P (caps_of_state s)\<rbrace> prepare_thread_delete t \<lbrace>\<lambda>_ s. P (caps_of_state s)\<rbrace>"
text {* Properties about empty_slot *}
definition
"halted_if_tcb \<equiv> \<lambda>t s. tcb_at t s \<longrightarrow> st_tcb_at halted t s"
lemma halted_emptyable:
"\<And>ref. halted_if_tcb t s \<Longrightarrow> emptyable (t, ref) s"
by (simp add: halted_if_tcb_def emptyable_def)
lemma tcb_cap_valid_NullCapD:
"\<And>cap sl. \<lbrakk> tcb_cap_valid cap sl s; \<not> is_master_reply_cap cap \<rbrakk> \<Longrightarrow>
tcb_cap_valid cap.NullCap sl s"
apply (clarsimp simp: tcb_cap_valid_def valid_ipc_buffer_cap_def
elim!: pred_tcb_weakenE split: option.splits)
apply (rename_tac get set restr)
apply (subgoal_tac "(get, set, restr) \<in> ran tcb_cap_cases")
apply (fastforce simp: ran_tcb_cap_cases is_cap_simps
split: Structures_A.thread_state.split)
apply (simp add: ranI)
done
lemma emptyable_valid_NullCapD:
"\<lbrakk> emptyable sl s; valid_objs s \<rbrakk> \<Longrightarrow> tcb_cap_valid cap.NullCap sl s"
apply (clarsimp simp: emptyable_def tcb_cap_valid_def
valid_ipc_buffer_cap_def)
apply (clarsimp simp: pred_tcb_at_def obj_at_def is_tcb split: option.split)
apply (erule(1) valid_objsE)
apply (clarsimp simp: valid_obj_def valid_tcb_def tcb_cap_cases_def
split: Structures_A.thread_state.split)
done
lemma emptyable_valid_NullCap_strg:
"emptyable sl s \<and> valid_objs s \<longrightarrow> tcb_cap_valid cap.NullCap sl s"
by (simp add: emptyable_valid_NullCapD)
lemma tcb_cap_valid_pspaceI[intro]:
"\<lbrakk> tcb_cap_valid cap sl s; kheap s = kheap s' \<rbrakk> \<Longrightarrow> tcb_cap_valid cap sl s'"
by (clarsimp simp: tcb_cap_valid_def obj_at_def pred_tcb_at_def)
crunch valid_objs[wp]: deleted_irq_handler "valid_objs"
lemma emptyable_rvk[simp]:
"emptyable sl (is_original_cap_update f s) = emptyable sl s"
by (simp add: emptyable_def)
lemma set_cdt_emptyable[wp]:
"\<lbrace>emptyable sl\<rbrace> set_cdt m \<lbrace>\<lambda>rv. emptyable sl\<rbrace>"
by (simp add: set_cdt_def emptyable_def | wp)+
lemma emptyable_more_update[simp]:
"emptyable sl (trans_state f s) = emptyable sl s"
by (simp add: emptyable_def)
lemma tcb_cp_valid_trans_state_update[simp]: "tcb_cap_valid cap sl
(trans_state f s) = tcb_cap_valid cap sl s"
apply (simp add: tcb_cap_valid_def)
done
lemma empty_slot_valid_objs[wp]:
"\<lbrace>valid_objs and emptyable sl\<rbrace> empty_slot sl irqopt \<lbrace>\<lambda>rv. valid_objs\<rbrace>"
apply (simp add: empty_slot_def)
apply (rule hoare_pre)
apply (wp set_cap_valid_objs set_cdt_valid_objs set_cdt_valid_cap
| simp add: trans_state_update[symmetric] del: trans_state_update| wpcw
| strengthen emptyable_valid_NullCap_strg
| wp_once hoare_drop_imps)+
done
lemmas empty_slot_valid_cap[wp] = valid_cap_typ [OF empty_slot_typ_at]
locale mdb_empty_abs = vmdb_abs +
fixes slot
fixes n::cdt
defines "n \<equiv> (\<lambda>p. (if m p = Some slot then m slot else m p)) (slot := None)"
lemma (in mdb_empty_abs) parency:
"n \<Turnstile> p \<rightarrow> p' = (p \<noteq> slot \<and> p' \<noteq> slot \<and> m \<Turnstile> p \<rightarrow> p')"
proof
assume n: "n \<Turnstile> p \<rightarrow> p'"
from n
have "p \<noteq> slot"
by (clarsimp dest!: tranclD simp: n_def cdt_parent_of_def
split: if_split_asm)
moreover
from n
have "p' \<noteq> slot"
by (clarsimp dest!: tranclD2 simp: n_def cdt_parent_of_def )
moreover
from n
have "m \<Turnstile> p \<rightarrow> p'"
proof induct
case (base x)
thus ?case
apply (clarsimp simp: cdt_parent_of_def n_def split: if_split_asm)
apply (rule trancl_trans)
apply (fastforce simp: cdt_parent_of_def)+
done
next
case (step y z)
thus ?case
apply (clarsimp simp: cdt_parent_of_def n_def split: if_split_asm)
apply (erule trancl_trans)
apply (rule trancl_trans)
apply (fastforce simp: cdt_parent_of_def)
apply (fastforce simp: cdt_parent_of_def)
apply (erule trancl_trans)
apply (fastforce simp: cdt_parent_of_def)
done
qed
ultimately
show "p \<noteq> slot \<and> p' \<noteq> slot \<and> m \<Turnstile> p \<rightarrow> p'" by simp
next
assume asm: "p \<noteq> slot \<and> p' \<noteq> slot \<and> m \<Turnstile> p \<rightarrow> p'"
from asm have p: "p \<noteq> slot" ..
from asm have p': "p' \<noteq> slot" by simp
from asm
have m: "m \<Turnstile> p \<rightarrow> p'" by simp
hence neq: "p \<noteq> p'" by clarsimp
from m
have "if p' = slot then
\<exists>p''. (p, p'') \<in> (cdt_parent_rel m)^* \<and> m \<Turnstile> p'' \<leadsto> p' \<and> (p, p'') \<in> (cdt_parent_rel n)^*
else
n \<Turnstile> p \<rightarrow> p'"
proof induct
case (base y)
thus ?case
apply (clarsimp simp: cdt_parent_of_def simp del: split_paired_Ex)
apply (fastforce simp: cdt_parent_of_def n_def p)
done
next
case (step y z)
thus ?case
apply (clarsimp simp: cdt_parent_of_def simp del: split_paired_Ex)
apply (rule conjI)
apply (clarsimp simp del: split_paired_Ex)
apply (cases "y = slot", simp)
apply fastforce
apply (clarsimp simp del: split_paired_Ex)
apply (cases "y = slot")
apply (simp del: split_paired_Ex)
apply (elim exE conjE)
apply (drule rtranclD [where R="cdt_parent_rel n"])
apply (erule disjE)
apply simp
apply (rule r_into_trancl)
apply (clarsimp simp: cdt_parent_of_def n_def)
apply clarsimp
apply (erule trancl_trans)
apply (fastforce simp: cdt_parent_of_def n_def)
apply simp
apply (erule trancl_trans)
apply (fastforce simp: cdt_parent_of_def n_def)
done
qed
with p'
show "n \<Turnstile> p \<rightarrow> p'" by simp
qed
lemma (in mdb_empty_abs) descendants:
"descendants_of p n =
(if p = slot then {} else descendants_of p m - {slot})"
by (auto simp add: descendants_of_def parency)
lemma (in mdb_empty_abs) no_mloop_n:
"no_mloop n"
by (simp add: no_mloop_def parency)
lemma final_mdb_update[simp]:
"is_final_cap' cap (cdt_update f s) = is_final_cap' cap s"
by (clarsimp simp: is_final_cap'_def2)
lemma no_cap_to_obj_with_diff_cdt_update[simp]:
"no_cap_to_obj_with_diff_ref cap S (cdt_update f s)
= no_cap_to_obj_with_diff_ref cap S s"
by (simp add: no_cap_to_obj_with_diff_ref_def)
lemma no_cap_to_obj_with_diff_rvk_update[simp]:
"no_cap_to_obj_with_diff_ref cap S (is_original_cap_update f s)
= no_cap_to_obj_with_diff_ref cap S s"
by (simp add: no_cap_to_obj_with_diff_ref_def)
lemma zombies_final_cdt_update[simp]:
"zombies_final (cdt_update f s) = zombies_final s"
by (fastforce elim!: zombies_final_pspaceI)
lemma opt_deleted_irq_handler_invs:
"\<lbrace>\<lambda>s. invs s \<and> (\<forall>irq. opt = Some irq \<longrightarrow> cap.IRQHandlerCap irq \<notin> ran (caps_of_state s))\<rbrace>
case opt of Some irq \<Rightarrow> deleted_irq_handler irq | _ \<Rightarrow> return ()
\<lbrace>\<lambda>rv. invs\<rbrace>"
apply (simp add: deleted_irq_handler_def cong: option.case_cong)
apply (rule hoare_pre)
apply (wp | wpc)+
apply clarsimp
done
lemma emptyable_no_reply_cap:
assumes e: "emptyable sl s"
and mdb: "reply_caps_mdb (mdb s) (caps_of_state s)"
and vr: "valid_reply_caps s"
and vm: "valid_reply_masters s"
and vo: "valid_objs s"
and rc: "caps_of_state s sl' = Some (cap.ReplyCap t False)"
and rp: "mdb s sl' = Some sl"
shows "False"
proof -
have rm:
"caps_of_state s sl = Some (cap.ReplyCap t True)"
using mdb rc rp unfolding reply_caps_mdb_def
by fastforce
have tcb_slot:
"sl = (t, tcb_cnode_index 2)"
using vm rm unfolding valid_reply_masters_def
by (fastforce simp: cte_wp_at_caps_of_state)
have tcb_halted:
"st_tcb_at halted t s"
using vo rm tcb_slot e unfolding emptyable_def
by (fastforce dest: caps_of_state_valid_cap simp: valid_cap_def)
have tcb_not_halted:
"st_tcb_at (Not \<circ> halted) t s"
using vr rc unfolding valid_reply_caps_def
by (fastforce simp add: has_reply_cap_def cte_wp_at_caps_of_state
simp del: split_paired_Ex
elim!: pred_tcb_weakenE)
show ?thesis
using tcb_halted tcb_not_halted
by (clarsimp simp: st_tcb_def2)
qed
lemmas (in Finalise_AI_1) obj_ref_ofI' = obj_ref_ofI[OF obj_ref_elemD]
crunch cte_wp_at[wp]: deleted_irq_handler "cte_wp_at P p"
lemma empty_slot_deletes[wp]:
"\<lbrace>\<top>\<rbrace> empty_slot sl opt \<lbrace>\<lambda>rv. cte_wp_at (\<lambda>c. c = cap.NullCap) sl\<rbrace>"
apply (simp add: empty_slot_def)
apply (wp set_cap_sets get_cap_wp opt_return_pres_lift|simp)+
apply (clarsimp elim!: cte_wp_at_weakenE)
done
crunch caps_of_state[wp]: deleted_irq_handler "\<lambda>s. P (caps_of_state s)"
lemma empty_slot_final_cap_at:
"\<lbrace>(\<lambda>s. cte_wp_at (\<lambda>c. obj_refs c \<noteq> {} \<and> is_final_cap' c s) p s) and K (p \<noteq> p')\<rbrace>
empty_slot p' opt \<lbrace>\<lambda>rv s. cte_wp_at (\<lambda>c. is_final_cap' c s) p s\<rbrace>"
apply (rule hoare_gen_asm)
apply (simp add: empty_slot_def final_cap_at_eq cte_wp_at_conj cte_wp_at_caps_of_state)
apply (wpsimp wp: opt_return_pres_lift get_cap_wp)
done
crunch pred_tcb_at[wp]: empty_slot "pred_tcb_at proj P t"
lemma set_cap_revokable_update:
"((),s') \<in> fst (set_cap c p s) \<Longrightarrow>
((),is_original_cap_update f s') \<in> fst (set_cap c p (is_original_cap_update f s))"
apply (cases p)
apply (clarsimp simp add: set_cap_def in_monad get_object_def)
apply (case_tac y)
apply (auto simp add: in_monad set_object_def split: if_split_asm)
done
lemma set_cap_cdt_update:
"((),s') \<in> fst (set_cap c p s) \<Longrightarrow> ((),cdt_update f s') \<in> fst (set_cap c p (cdt_update f s))"
apply (cases p)
apply (clarsimp simp add: set_cap_def in_monad get_object_def)
apply (case_tac y)
apply (auto simp add: in_monad set_object_def split: if_split_asm)
done
lemma tcb_cap_cases_lt:
"n < 5 \<Longrightarrow> tcb_cap_cases (nat_to_cref 3 n) \<noteq> None"
unfolding tcb_cnode_index_def2[symmetric]
by (simp add: tcb_cap_cases_def
| erule less_handy_casesE)+
lemma cte_refs_CNode_Zombie_helper[simp]:
"{xs. length xs = n \<and> unat (of_bl xs :: word32) < 2 ^ n}
= {xs. length xs = n}"
apply safe
apply (rule unat_of_bl_length)
done
lemma empty_slot_caps_of_state:
"\<lbrace>\<lambda>s. P ((caps_of_state s) (slot \<mapsto> cap.NullCap))\<rbrace>
empty_slot slot opt
\<lbrace>\<lambda>rv s. P (caps_of_state s)\<rbrace>"
apply (simp add: empty_slot_def set_cdt_def)
apply (wp get_cap_wp opt_return_pres_lift | simp)+
apply (clarsimp simp: cte_wp_at_caps_of_state
fun_upd_def[symmetric]
fun_upd_idem)
done
crunch caps_of_state[wp]: cancel_all_ipc "\<lambda>s. P (caps_of_state s)"
(wp: mapM_x_wp' crunch_wps)
crunch caps_of_state[wp]: fast_finalise, unbind_notification "\<lambda>s. P (caps_of_state s)"
(wp: mapM_x_wp' crunch_wps thread_set_caps_of_state_trivial
simp: tcb_cap_cases_def)
lemma cap_delete_one_caps_of_state:
"\<lbrace>\<lambda>s. cte_wp_at can_fast_finalise p s
\<longrightarrow> P ((caps_of_state s) (p \<mapsto> cap.NullCap))\<rbrace>
cap_delete_one p
\<lbrace>\<lambda>rv s. P (caps_of_state s)\<rbrace>"
apply (simp add: cap_delete_one_def unless_def
is_final_cap_def)
apply (rule hoare_seq_ext [OF _ get_cap_sp])
apply (case_tac "can_fast_finalise cap")
apply (wp empty_slot_caps_of_state get_cap_wp)
apply (clarsimp simp: cte_wp_at_caps_of_state
fun_upd_def[symmetric]
fun_upd_idem)
apply (simp add: fast_finalise_def2)
apply wp
apply (clarsimp simp: can_fast_finalise_def)
done
crunch caps_of_state[wp]: blocked_cancel_ipc, cancel_signal
"\<lambda>s. P (caps_of_state s)"
lemma cancel_ipc_caps_of_state:
"\<lbrace>\<lambda>s. (\<forall>p. cte_wp_at can_fast_finalise p s
\<longrightarrow> P ((caps_of_state s) (p \<mapsto> cap.NullCap)))
\<and> P (caps_of_state s)\<rbrace>
cancel_ipc t
\<lbrace>\<lambda>rv s. P (caps_of_state s)\<rbrace>"
apply (simp add: cancel_ipc_def reply_cancel_ipc_def
cong: Structures_A.thread_state.case_cong)
apply (wpsimp wp: cap_delete_one_caps_of_state select_wp)
apply (rule_tac Q="\<lambda>_ s. (\<forall>p. cte_wp_at can_fast_finalise p s
\<longrightarrow> P ((caps_of_state s) (p \<mapsto> cap.NullCap)))
\<and> P (caps_of_state s)"
in hoare_post_imp)
apply (clarsimp simp: fun_upd_def[symmetric] split_paired_Ball)
apply (simp add: cte_wp_at_caps_of_state)
apply (wpsimp wp: hoare_vcg_all_lift hoare_convert_imp thread_set_caps_of_state_trivial
simp: tcb_cap_cases_def)+
prefer 2
apply assumption
apply (rule hoare_strengthen_post [OF gts_sp])
apply (clarsimp simp: fun_upd_def[symmetric] cte_wp_at_caps_of_state)
done
lemma suspend_caps_of_state:
"\<lbrace>\<lambda>s. (\<forall>p. cte_wp_at can_fast_finalise p s
\<longrightarrow> P ((caps_of_state s) (p \<mapsto> cap.NullCap)))
\<and> P (caps_of_state s)\<rbrace>
suspend t
\<lbrace>\<lambda>rv s. P (caps_of_state s)\<rbrace>"
unfolding suspend_def
by (wpsimp wp: cancel_ipc_caps_of_state simp: fun_upd_def[symmetric])
lemma suspend_final_cap:
"\<lbrace>\<lambda>s. is_final_cap' cap s \<and> \<not> can_fast_finalise cap
\<and> cte_wp_at (op = cap) sl s\<rbrace>
suspend t
\<lbrace>\<lambda>rv s. is_final_cap' cap s\<rbrace>"
apply (simp add: is_final_cap'_def2 cte_wp_at_caps_of_state
del: split_paired_Ex split_paired_All)
apply (wp suspend_caps_of_state)
apply (clarsimp simp del: split_paired_Ex split_paired_All)
apply (rule_tac x=sl in exI)
apply (intro allI impI conjI)
apply (clarsimp simp: cte_wp_at_caps_of_state)
apply (subgoal_tac "(aa, ba) = sl")
apply clarsimp
apply (frule_tac x="(aa, ba)" in spec)
apply (drule_tac x=sl in spec)
apply clarsimp
done
lemma cap_delete_one_final_cap:
"\<lbrace>\<lambda>s. cte_wp_at (op = cap) slot s
\<and> \<not> can_fast_finalise cap
\<and> is_final_cap' cap s\<rbrace>
cap_delete_one slot'
\<lbrace>\<lambda>rv s. is_final_cap' cap s\<rbrace>"
apply (simp add: is_final_cap'_def2 cte_wp_at_caps_of_state
del: split_paired_All split_paired_Ex)
apply (wp cap_delete_one_caps_of_state)
apply (clarsimp simp: cte_wp_at_caps_of_state
simp del: split_paired_Ex split_paired_All)
apply (subgoal_tac "slot = (a, b)")
apply (rule_tac x=slot in exI)
apply clarsimp
apply (frule_tac x=slot in spec)
apply (drule_tac x="(a, b)" in spec)
apply clarsimp
done
lemma unbind_notification_cte_wp_at[wp]:
"\<lbrace>\<lambda>s. cte_wp_at P slot s\<rbrace> unbind_notification t \<lbrace>\<lambda>rv s. cte_wp_at P slot s\<rbrace>"
by (wp thread_set_cte_wp_at_trivial hoare_drop_imp | wpc | simp add: unbind_notification_def tcb_cap_cases_def)+
lemma (in Finalise_AI_1) finalise_cap_cases:
"\<lbrace>\<lambda>(s :: 'a state). final \<longrightarrow> is_final_cap' cap s
\<and> cte_wp_at (op = cap) slot s\<rbrace>
finalise_cap cap final
\<lbrace>\<lambda>rv (s :: 'a state). fst rv = cap.NullCap
\<and> snd rv = (if final then cap_irq_opt cap else None)
\<and> (snd rv \<noteq> None \<longrightarrow> is_final_cap' cap s)
\<or>
is_zombie (fst rv) \<and> is_final_cap' cap s
\<and> is_final_cap' (fst rv) s
\<and> snd rv = None
\<and> appropriate_cte_cap (fst rv) = appropriate_cte_cap cap
\<and> cte_refs (fst rv) = cte_refs cap
\<and> obj_refs (fst rv) = obj_refs cap
\<and> obj_size (fst rv) = obj_size cap
\<and> cap_irqs (fst rv) = cap_irqs cap
\<and> fst_cte_ptrs (fst rv) = fst_cte_ptrs cap
\<and> vs_cap_ref cap = None\<rbrace>"
apply (rule hoare_strengthen_post,
rule finalise_cap_cases1)
apply (erule disjEI)
apply (auto simp: is_final_cap'_def)
done
lemma is_final_cap'_objrefsE:
"\<lbrakk> is_final_cap' cap s; obj_refs cap = obj_refs cap';
cap_irqs cap = cap_irqs cap' \<rbrakk>
\<Longrightarrow> is_final_cap' cap' s"
by (simp add: is_final_cap'_def)
crunch typ_at[wp]: deleting_irq_handler "\<lambda>s. P (typ_at T p s)"
(wp:crunch_wps simp:crunch_simps unless_def assertE_def)
context Finalise_AI_1 begin
context begin
declare if_cong[cong]
crunch typ_at[wp]: finalise_cap "\<lambda>(s :: 'a state). P (typ_at T p s)"
end
end
lemma valid_cap_Null_ext:
"valid_cap cap.NullCap = \<top>"
by (rule ext) simp
lemma unbind_notification_valid_cap[wp]:
"\<lbrace>valid_cap cap\<rbrace> unbind_notification t \<lbrace>\<lambda>rv. valid_cap cap\<rbrace>"
unfolding unbind_notification_def
by (wp abs_typ_at_lifts hoare_drop_imps | wpc | clarsimp)+
lemma refs_in_ntfn_q_refs:
"(x, ref) \<in> ntfn_q_refs_of ntfn \<Longrightarrow> ref = NTFNSignal"
by (clarsimp simp: ntfn_q_refs_of_def split: ntfn.splits)
lemma ntfn_q_refs_no_TCBSignal:
"(x, TCBSignal) \<notin> ntfn_q_refs_of ntfn"
by (clarsimp simp: ntfn_q_refs_of_def split: ntfn.splits)
lemma tcb_st_refs_no_TCBBound:
"(x, TCBBound) \<notin> tcb_st_refs_of ts"
by (clarsimp simp: tcb_st_refs_of_def split: thread_state.splits)
lemma (in Finalise_AI_1) unbind_maybe_notification_invs:
"\<lbrace>invs\<rbrace> unbind_maybe_notification ntfnptr \<lbrace>\<lambda>rv. invs\<rbrace>"
apply (simp add: unbind_maybe_notification_def invs_def valid_state_def valid_pspace_def)
apply (rule hoare_seq_ext [OF _ get_ntfn_sp])
apply (rule hoare_pre)
apply (wp valid_irq_node_typ set_ntfn_valid_objs | wpc | simp)+
apply safe
defer 3 defer 6
apply (auto elim!: obj_at_weakenE obj_at_valid_objsE if_live_then_nonz_capD2
simp: valid_ntfn_set_bound_None is_ntfn valid_obj_def)[6]
apply (rule delta_sym_refs, assumption)
apply (fastforce simp: obj_at_def is_tcb
dest!: pred_tcb_at_tcb_at ko_at_state_refs_ofD
split: if_split_asm)
apply (clarsimp split: if_split_asm)
apply (subst (asm) ko_at_state_refs_ofD, assumption)
apply (fastforce simp: ntfn_q_refs_no_NTFNBound symreftype_inverse' is_tcb refs_of_rev
dest!: refs_in_ntfn_q_refs)
apply (rule delta_sym_refs, assumption)
apply (clarsimp split: if_split_asm)
apply (subst (asm) ko_at_state_refs_ofD, assumption)
apply (frule refs_in_ntfn_q_refs)
apply (fastforce)
apply (clarsimp split: if_split_asm)
apply (frule_tac P="op = (Some ntfnptr)" in ntfn_bound_tcb_at, simp_all add: obj_at_def)[1]
apply (fastforce simp: ntfn_q_refs_no_NTFNBound tcb_at_no_ntfn_bound tcb_ntfn_is_bound_def
obj_at_def tcb_st_refs_no_TCBBound
dest!: pred_tcb_at_tcb_at bound_tcb_at_state_refs_ofD)
apply (subst (asm) ko_at_state_refs_ofD, assumption)
apply (fastforce simp: ntfn_q_refs_no_NTFNBound symreftype_inverse' is_tcb refs_of_rev
dest!: refs_in_ntfn_q_refs)
done
crunch (in Finalise_AI_1) invs[wp]: fast_finalise "invs"
lemma cnode_at_unlive[elim!]:
"s \<turnstile> cap.CNodeCap ptr bits gd \<Longrightarrow> obj_at (\<lambda>ko. \<not> live ko) ptr s"
by (clarsimp simp: valid_cap_def is_cap_table
elim!: obj_at_weakenE)
lemma set_thread_state_final_cap[wp]:
"\<lbrace>is_final_cap' cap\<rbrace> set_thread_state st t \<lbrace>\<lambda>rv. is_final_cap' cap\<rbrace>"
by (simp add: is_final_cap'_def2 cte_wp_at_caps_of_state, wp)
lemma tcb_cap_valid_imp':
"((\<forall>(get, set, restr)\<in>ran tcb_cap_cases.
\<forall>ptr st. restr ptr st cap \<longrightarrow> restr ptr st newcap)
\<and> (\<forall>ptr. valid_ipc_buffer_cap cap ptr
\<longrightarrow> valid_ipc_buffer_cap newcap ptr))
\<longrightarrow> (tcb_cap_valid cap sl s \<longrightarrow> tcb_cap_valid newcap sl s)"
by (fastforce simp: tcb_cap_valid_def elim!: pred_tcb_weakenE
split: option.split)
lemma tcb_cap_valid_imp_NullCap:
"(\<not> is_master_reply_cap cap)
\<longrightarrow> (tcb_cap_valid cap sl s \<longrightarrow> tcb_cap_valid cap.NullCap sl s)"
apply (strengthen tcb_cap_valid_imp')
apply (clarsimp simp: ran_tcb_cap_cases valid_ipc_buffer_cap_def
split: Structures_A.thread_state.split_asm)
done
lemma a_type_arch_live:
"a_type ko = AArch tp \<Longrightarrow> \<not> live ko"
by (simp add: a_type_def
split: Structures_A.kernel_object.split_asm)
lemma pred_tcb_at_def2:
"pred_tcb_at proj P t \<equiv> \<lambda>s. \<exists>tcb. ko_at (TCB tcb) t s \<and> P (proj (tcb_to_itcb tcb))"
by (rule eq_reflection, rule ext) (fastforce simp: pred_tcb_at_def obj_at_def)
(* sseefried: 'st_tcb_at_def2' only exists to make existing proofs go through. Can use 'pred_tcb_at_def2' instead *)
lemmas st_tcb_at_def2 = pred_tcb_at_def2[where proj=itcb_state,simplified]
lemma imp_and_strg: "Q \<and> C \<longrightarrow> (A \<longrightarrow> Q \<and> C) \<and> C" by blast
(* FIXME: move *)
lemma cases_conj_strg: "A \<and> B \<longrightarrow> (P \<and> A) \<or> (\<not> P \<and> B)"
by simp
(* FIXME: move *)
lemma and_not_not_or_imp: "(~ A & ~ B | C) = ((A | B) \<longrightarrow> C)" by blast
lemmas tcb_cap_valid_imp = mp [OF mp [OF tcb_cap_valid_imp'], rotated]
crunch irq_node[wp]: cancel_all_ipc "\<lambda>s. P (interrupt_irq_node s)"
(wp: crunch_wps simp: crunch_simps unless_def)
crunch irq_node[wp]: cancel_all_signals, fast_finalise "\<lambda>s. P (interrupt_irq_node s)"
(wp: crunch_wps simp: crunch_simps unless_def)
crunch irq_node[wp]: cap_delete_one "\<lambda>s. P (interrupt_irq_node s)"
(wp: crunch_wps simp: crunch_simps unless_def)
lemma deleting_irq_handler_empty:
"\<lbrace>\<top>\<rbrace>
deleting_irq_handler irq
\<lbrace>\<lambda>rv s. cte_wp_at (op = cap.NullCap) (interrupt_irq_node s irq, []) s\<rbrace>"
apply (simp add: deleting_irq_handler_def cte_wp_at_caps_of_state
get_irq_slot_def)
apply (wp hoare_use_eq_irq_node [OF cap_delete_one_irq_node cap_delete_one_caps_of_state])
apply clarsimp
done
lemmas obj_irq_refs_empty2 = trans [OF eq_commute obj_irq_refs_empty]
lemma cnode_zombie_thread_appropriate[simp]:
"appropriate_cte_cap cp (cap.CNodeCap a b c)"
"appropriate_cte_cap cp (cap.ThreadCap f)"
"appropriate_cte_cap cp (cap.Zombie h i j)"
by (simp add: appropriate_cte_cap_def split: cap.splits)+
lemma unbind_notification_not_bound:
"\<lbrace>\<lambda>s. obj_at (\<lambda>ko. \<exists>ntfn. ko = Notification ntfn \<and> ntfn_bound_tcb ntfn = Some tcbptr) ntfnptr s
\<and> valid_objs s \<and> sym_refs (state_refs_of s)\<rbrace>
unbind_notification tcbptr
\<lbrace>\<lambda>_. obj_at (\<lambda>ko. \<exists>ntfn. ko = Notification ntfn \<and> ntfn_bound_tcb ntfn = None) ntfnptr\<rbrace>"
apply (simp add: unbind_notification_def)
apply (rule hoare_pre)
apply (rule hoare_seq_ext[OF _ gbn_wp[where P="\<lambda>ptr _. ptr = (Some ntfnptr)"]])
apply (rule hoare_gen_asm[where P'=\<top>, simplified])
apply (wp sbn_obj_at_impossible set_notification_obj_at | wpc | simp)+
apply (clarsimp simp: obj_at_def)
apply (rule valid_objsE, simp+)
apply (drule_tac P="op = (Some ntfnptr)" in ntfn_bound_tcb_at, simp+)
apply (auto simp: obj_at_def valid_obj_def is_tcb valid_ntfn_def pred_tcb_at_def)
done
lemma unbind_maybe_notification_not_bound:
"\<lbrace>\<lambda>s. ntfn_at ntfnptr s \<and> valid_objs s \<and> sym_refs (state_refs_of s)\<rbrace>
unbind_maybe_notification ntfnptr
\<lbrace>\<lambda>_. obj_at (\<lambda>ko. \<exists>ntfn. ko = Notification ntfn \<and> ntfn_bound_tcb ntfn = None) ntfnptr\<rbrace>"
apply (simp add: unbind_maybe_notification_def)
apply (rule hoare_pre)
apply (wp get_ntfn_wp sbn_obj_at_impossible set_notification_obj_at | wpc | simp)+
apply (clarsimp simp: obj_at_def)
done
lemma unbind_notification_bound_tcb_at[wp]:
"\<lbrace>\<top>\<rbrace> unbind_notification tcbptr \<lbrace>\<lambda>_. bound_tcb_at (op = None) tcbptr\<rbrace>"
apply (simp add: unbind_notification_def)
apply (wpsimp wp: sbn_bound_tcb_at')
apply (rule gbn_bound_tcb[THEN hoare_strengthen_post])
apply clarsimp
apply assumption
done
crunch valid_mdb[wp]: unbind_notification "valid_mdb"
crunch tcb_at[wp]: unbind_notification "tcb_at t"
lemma unbind_notification_no_cap_to_obj_ref[wp]:
"\<lbrace>no_cap_to_obj_with_diff_ref cap S\<rbrace>
unbind_notification tcbptr
\<lbrace>\<lambda>_. no_cap_to_obj_with_diff_ref cap S\<rbrace>"
apply (simp add: no_cap_to_obj_with_diff_ref_def cte_wp_at_caps_of_state)
apply (wp unbind_notification_caps_of_state)
done
lemma empty_slot_cte_wp_elsewhere:
"\<lbrace>(\<lambda>s. cte_wp_at P p s) and K (p \<noteq> p')\<rbrace> empty_slot p' opt \<lbrace>\<lambda>rv s. cte_wp_at P p s\<rbrace>"
apply (rule hoare_gen_asm)
apply (simp add: empty_slot_def cte_wp_at_caps_of_state)
apply (wp opt_return_pres_lift | simp split del: if_split)+
done
lemma fast_finalise_lift:
assumes ep:"\<And>r. \<lbrace>P\<rbrace>cancel_all_ipc r \<lbrace>\<lambda>r s. P s\<rbrace>"
and ntfn:"\<And>r. \<lbrace>P\<rbrace>cancel_all_signals r \<lbrace>\<lambda>r s. P s\<rbrace>"
and unbind:"\<And>r. \<lbrace>P\<rbrace> unbind_notification r \<lbrace> \<lambda>r s. P s\<rbrace>"
and unbind2: "\<And>r. \<lbrace>P\<rbrace> unbind_maybe_notification r \<lbrace> \<lambda>r s. P s\<rbrace>"
shows "\<lbrace>P\<rbrace> fast_finalise cap final \<lbrace>\<lambda>r s. P s\<rbrace>"
apply (case_tac cap,simp_all)
apply (wp ep ntfn unbind unbind2 hoare_drop_imps | clarsimp | wpc)+
done
crunch cte_wp_at[wp]: fast_finalise "cte_wp_at P p"
(wp:fast_finalise_lift)
lemma cap_delete_one_cte_wp_at_preserved:
assumes x: "\<And>cap flag. P cap \<Longrightarrow> \<not> can_fast_finalise cap"
shows "\<lbrace>cte_wp_at P p\<rbrace> cap_delete_one ptr \<lbrace>\<lambda>rv s. cte_wp_at P p s\<rbrace>"
apply (simp add: cte_wp_at_caps_of_state)
apply (wp cap_delete_one_caps_of_state)
apply (clarsimp simp: cte_wp_at_caps_of_state x)
done
interpretation delete_one_pre
by (unfold_locales, wp cap_delete_one_cte_wp_at_preserved)
lemma (in Finalise_AI_1) finalise_cap_equal_cap[wp]:
"\<lbrace>cte_wp_at (op = cap) sl :: 'a state \<Rightarrow> bool\<rbrace>
finalise_cap cap fin
\<lbrace>\<lambda>rv. cte_wp_at (op = cap) sl\<rbrace>"
apply (cases cap, simp_all split del: if_split)
apply (wp suspend_cte_wp_at_preserved
deleting_irq_handler_cte_preserved
hoare_drop_imp thread_set_cte_wp_at_trivial
| clarsimp simp: can_fast_finalise_def unbind_maybe_notification_def unbind_notification_def
tcb_cap_cases_def
| wpc )+
done
lemma emptyable_lift:
assumes typ_at: "\<And>P T t. \<lbrace>\<lambda>s. P (typ_at T t s)\<rbrace> f \<lbrace>\<lambda>_ s. P (typ_at T t s)\<rbrace>"
assumes st_tcb: "\<And>t. \<lbrace>st_tcb_at halted t\<rbrace> f \<lbrace>\<lambda>_. st_tcb_at halted t\<rbrace>"
shows "\<lbrace>emptyable t\<rbrace> f \<lbrace>\<lambda>_. emptyable t\<rbrace>"
unfolding emptyable_def
apply (subst imp_conv_disj)+
apply (rule hoare_vcg_disj_lift)
apply (simp add: tcb_at_typ)
apply (rule typ_at)
apply (rule st_tcb)
done
crunch emptyable[wp]: set_endpoint, set_notification "emptyable sl"
(wp: emptyable_lift)
lemma sts_emptyable:
"\<lbrace>emptyable sl and st_tcb_at (\<lambda>st. \<not> halted st) t\<rbrace>
set_thread_state t st
\<lbrace>\<lambda>rv. emptyable sl\<rbrace>"
apply (simp add: emptyable_def)
apply (subst imp_conv_disj)+
apply (wp hoare_vcg_disj_lift sts_st_tcb_at_cases | simp add: tcb_at_typ)+
apply (clarsimp simp: pred_tcb_at_def obj_at_def)
done
lemma cancel_all_emptyable_helper:
"\<lbrace>emptyable sl and (\<lambda>s. \<forall>t \<in> set q. st_tcb_at (\<lambda>st. \<not> halted st) t s)\<rbrace>
mapM_x (\<lambda>t. do y \<leftarrow> set_thread_state t Structures_A.Restart;
do_extended_op (tcb_sched_enqueue_ext t) od) q
\<lbrace>\<lambda>rv. emptyable sl\<rbrace>"
apply (rule hoare_strengthen_post)
apply (rule mapM_x_wp [where S="set q", simplified])
apply (wp, simp, wp hoare_vcg_const_Ball_lift sts_emptyable sts_st_tcb_at_cases)
apply simp+
done
lemma unbind_notification_emptyable[wp]:
"\<lbrace> emptyable sl \<rbrace> unbind_notification t \<lbrace> \<lambda>rv. emptyable sl\<rbrace>"
unfolding unbind_notification_def
apply (wp emptyable_lift hoare_drop_imps thread_set_no_change_tcb_state | wpc |simp)+
done
lemma unbind_maybe_notification_emptyable[wp]:
"\<lbrace> emptyable sl \<rbrace> unbind_maybe_notification r \<lbrace> \<lambda>rv. emptyable sl\<rbrace>"
unfolding unbind_maybe_notification_def
apply (wp emptyable_lift hoare_drop_imps thread_set_no_change_tcb_state | wpc |simp)+
done
lemma cancel_all_signals_emptyable[wp]:
"\<lbrace>invs and emptyable sl\<rbrace> cancel_all_signals ptr \<lbrace>\<lambda>_. emptyable sl\<rbrace>"
unfolding cancel_all_signals_def unbind_maybe_notification_def
apply (rule hoare_seq_ext[OF _ get_ntfn_sp])
apply (rule hoare_pre)
apply (wp cancel_all_emptyable_helper
hoare_vcg_const_Ball_lift
| wpc
| simp)+
apply (auto elim: ntfn_queued_st_tcb_at)
done
lemma cancel_all_ipc_emptyable[wp]:
"\<lbrace>invs and emptyable sl\<rbrace> cancel_all_ipc ptr \<lbrace>\<lambda>_. emptyable sl\<rbrace>"
apply (simp add: cancel_all_ipc_def)
apply (rule hoare_seq_ext [OF _ get_endpoint_sp])
apply (case_tac ep, simp_all)
apply (wp, simp)
apply (wp cancel_all_emptyable_helper hoare_vcg_const_Ball_lift
| simp add: get_ep_queue_def
| clarsimp simp: invs_def valid_state_def valid_pspace_def
ep_queued_st_tcb_at)+
done
lemma (in Finalise_AI_1) fast_finalise_emptyable[wp]:
"\<lbrace>invs and emptyable sl\<rbrace> fast_finalise cap fin \<lbrace>\<lambda>rv. emptyable sl\<rbrace>"
apply (simp add: fast_finalise_def2)
apply (case_tac cap, simp_all add: can_fast_finalise_def)
apply (wp unbind_maybe_notification_invs hoare_drop_imps | simp add: o_def | wpc)+
done
locale Finalise_AI_2 = Finalise_AI_1 a b
for a :: "('a :: state_ext) itself"
and b :: "('b :: state_ext) itself" +
assumes cap_delete_one_invs[wp]:
"\<And> ptr. \<lbrace>invs and emptyable ptr\<rbrace> cap_delete_one ptr \<lbrace>\<lambda>rv. invs :: 'a state \<Rightarrow> bool\<rbrace>"
lemma cap_delete_one_deletes[wp]:
"\<lbrace>\<top>\<rbrace> cap_delete_one ptr \<lbrace>\<lambda>rv. cte_wp_at (\<lambda>c. c = cap.NullCap) ptr\<rbrace>"
apply (simp add: cap_delete_one_def unless_def)
apply (wp get_cap_wp)
apply (clarsimp elim!: cte_wp_at_weakenE)
done
context Finalise_AI_2 begin
sublocale delete_one_abs a' for a' :: "('a :: state_ext) itself"
by (unfold_locales; wp cap_delete_one_deletes cap_delete_one_caps_of_state)
end
lemma cap_delete_one_deletes_reply:
"\<lbrace>cte_wp_at (op = (cap.ReplyCap t False)) slot and valid_reply_caps\<rbrace>
cap_delete_one slot
\<lbrace>\<lambda>rv s. \<not> has_reply_cap t s\<rbrace>"
apply (simp add: cap_delete_one_def unless_def is_final_cap_def)
apply wp
apply (rule_tac Q="\<lambda>rv s. \<forall>sl'. if (sl' = slot)
then cte_wp_at (\<lambda>c. c = cap.NullCap) sl' s
else caps_of_state s sl' \<noteq> Some (cap.ReplyCap t False)"
in hoare_post_imp)
apply (clarsimp simp add: has_reply_cap_def cte_wp_at_caps_of_state
simp del: split_paired_All split_paired_Ex
split: if_split_asm elim!: allEI)
apply (rule hoare_vcg_all_lift)
apply simp
apply (wp static_imp_wp empty_slot_deletes empty_slot_caps_of_state get_cap_wp)+
apply (fastforce simp: cte_wp_at_caps_of_state valid_reply_caps_def
is_cap_simps unique_reply_caps_def
simp del: split_paired_All)
done
lemma cap_delete_one_reply_st_tcb_at:
"\<lbrace>pred_tcb_at proj P t and cte_wp_at (op = (cap.ReplyCap t' False)) slot\<rbrace>
cap_delete_one slot
\<lbrace>\<lambda>rv. pred_tcb_at proj P t\<rbrace>"
apply (simp add: cap_delete_one_def unless_def is_final_cap_def)
apply (rule hoare_seq_ext [OF _ get_cap_sp])
apply (rule hoare_assume_pre)
apply (clarsimp simp: cte_wp_at_caps_of_state when_def)
apply wpsimp
done
lemma get_irq_slot_emptyable[wp]:
"\<lbrace>invs\<rbrace> get_irq_slot irq \<lbrace>emptyable\<rbrace>"
apply (rule hoare_strengthen_post)
apply (rule get_irq_slot_real_cte)
apply (clarsimp simp: emptyable_def is_cap_table is_tcb elim!: obj_atE)
done
crunch (in Finalise_AI_2) invs[wp]: deleting_irq_handler "invs :: 'a state \<Rightarrow> bool"
crunch tcb_at[wp]: unbind_notification "tcb_at t"
locale Finalise_AI_3 = Finalise_AI_2 a b
for a :: "('a :: state_ext) itself"
and b :: "('b :: state_ext) itself" +
fixes replaceable_or_arch_update :: "'a state \<Rightarrow> machine_word \<times> bool list \<Rightarrow> cap \<Rightarrow> cap \<Rightarrow> bool"
fixes c :: "'c itself"
assumes finalise_cap_invs:
"\<And> cap slot x.
\<lbrace>invs and cte_wp_at (op = cap) slot\<rbrace>
finalise_cap cap x
\<lbrace>\<lambda>rv. invs :: 'a state \<Rightarrow> bool\<rbrace>"
assumes finalise_cap_irq_node:
"\<And>P a b.
\<lbrace>\<lambda>(s :: 'a state). P (interrupt_irq_node s)\<rbrace>
finalise_cap a b
\<lbrace>\<lambda>_ s. P (interrupt_irq_node s)\<rbrace>"
assumes arch_finalise_cte_irq_node[wp]:
"\<And>P P' p a b.
\<lbrace>\<lambda>(s :: 'a state). P (interrupt_irq_node s)
(cte_wp_at (P' (interrupt_irq_node s)) (p (interrupt_irq_node s)) s)\<rbrace>
arch_finalise_cap a b
\<lbrace>\<lambda>rv s. P (interrupt_irq_node s)
(cte_wp_at (P' (interrupt_irq_node s)) (p (interrupt_irq_node s)) s)\<rbrace>"
assumes deleting_irq_handler_st_tcb_at:
"\<And>P t irq.\<lbrace>st_tcb_at P t and K (\<forall>st. simple st \<longrightarrow> P st)\<rbrace>
deleting_irq_handler irq
\<lbrace>\<lambda>rv. st_tcb_at P t :: 'a state \<Rightarrow> bool\<rbrace>"
assumes irq_node_global_refs:
"\<And>(s :: 'a state) irq. interrupt_irq_node s irq \<in> global_refs s"
assumes get_irq_slot_fast_finalisable[wp]:
"\<And> irq. \<lbrace>invs :: 'a state \<Rightarrow> bool\<rbrace> get_irq_slot irq \<lbrace>cte_wp_at can_fast_finalise\<rbrace>"
assumes replaceable_or_arch_update_same:
"\<And> s slot cap. replaceable_or_arch_update s slot cap cap"
assumes replace_cap_invs_arch_update:
"\<And> cap p. \<lbrace>\<lambda>s. cte_wp_at (replaceable_or_arch_update s p cap) p s
\<and> invs s
\<and> cap \<noteq> cap.NullCap
\<and> ex_cte_cap_wp_to (appropriate_cte_cap cap) p s
\<and> s \<turnstile> cap\<rbrace>
set_cap cap p
\<lbrace>\<lambda>rv s. invs s\<rbrace>"
assumes dmo_tcb_cap_valid:
"\<And>P cap ptr mop.
\<lbrace>\<lambda>(s :: 'a state). P (tcb_cap_valid cap ptr s)\<rbrace>
do_machine_op (mop :: 'c machine_monad)
\<lbrace>\<lambda>_ s. P (tcb_cap_valid cap ptr s)\<rbrace>"
assumes dmo_replaceable_or_arch_update [wp]:
"\<And> slot cap cap' mo.
\<lbrace>\<lambda>s. replaceable_or_arch_update s slot cap cap'\<rbrace>
do_machine_op (mo :: 'c machine_monad)
\<lbrace>\<lambda>r s. replaceable_or_arch_update s slot cap cap'\<rbrace>"
assumes prepare_thread_delete_irq_node[wp]:
"\<And>t. \<lbrace>\<lambda>(s :: 'a state). P (interrupt_irq_node s)\<rbrace>
prepare_thread_delete t
\<lbrace>\<lambda>_ s. P (interrupt_irq_node s)\<rbrace>"
crunch irq_node[wp]: suspend, unbind_maybe_notification, unbind_notification "\<lambda>s. P (interrupt_irq_node s)"
(wp: crunch_wps select_wp simp: crunch_simps)
crunch irq_node[wp]: deleting_irq_handler "\<lambda>s. P (interrupt_irq_node s)"
(wp: crunch_wps select_wp simp: crunch_simps)
lemmas cancel_all_ipc_cte_irq_node[wp]
= hoare_use_eq_irq_node [OF cancel_all_ipc_irq_node cancel_all_ipc_cte_wp_at]
lemmas cancel_all_signals_cte_irq_node[wp]
= hoare_use_eq_irq_node [OF cancel_all_signals_irq_node cancel_all_signals_cte_wp_at]
lemmas suspend_cte_irq_node[wp]
= hoare_use_eq_irq_node [OF suspend_irq_node suspend_cte_wp_at_preserved]
lemmas unbind_notification_cte_irq_node[wp]
= hoare_use_eq_irq_node [OF unbind_notification_irq_node unbind_notification_cte_wp_at]
lemmas unbind_maybe_notification_cte_irq_node[wp]
= hoare_use_eq_irq_node [OF unbind_maybe_notification_irq_node unbind_maybe_notification_cte_wp_at]
lemmas (in Finalise_AI_3) deleting_irq_handler_cte_preserved_irqn
= hoare_use_eq_irq_node [OF deleting_irq_handler_irq_node
deleting_irq_handler_cte_preserved]
lemmas (in Finalise_AI_3) prepare_thread_delete_cte_preserved_irqn
= hoare_use_eq_irq_node [OF prepare_thread_delete_irq_node
prepare_thread_delete_cte_wp_at]
lemma unbind_notification_cte_cap_to[wp]:
"\<lbrace>ex_cte_cap_wp_to P sl\<rbrace> unbind_notification t \<lbrace>\<lambda>rv. ex_cte_cap_wp_to P sl\<rbrace>"
by (wp ex_cte_cap_to_pres)
lemma unbind_maybe_notification_cte_cap_to[wp]:
"\<lbrace>ex_cte_cap_wp_to P sl\<rbrace> unbind_maybe_notification t \<lbrace>\<lambda>rv. ex_cte_cap_wp_to P sl\<rbrace>"
by (wp ex_cte_cap_to_pres)
lemma (in Finalise_AI_3) finalise_cap_cte_cap_to[wp]:
"\<lbrace>ex_cte_cap_wp_to P sl :: 'a state \<Rightarrow> bool\<rbrace> finalise_cap cap fin \<lbrace>\<lambda>rv. ex_cte_cap_wp_to P sl\<rbrace>"
apply (cases cap, simp_all add: ex_cte_cap_wp_to_def split del: if_split)
apply (wp hoare_vcg_ex_lift hoare_drop_imps
deleting_irq_handler_cte_preserved_irqn
prepare_thread_delete_cte_preserved_irqn
| simp
| clarsimp simp: can_fast_finalise_def
split: cap.split_asm | wpc)+
done
lemma (in Finalise_AI_3) finalise_cap_zombie_cap[wp]:
"\<lbrace>cte_wp_at (\<lambda>cp. is_zombie cp \<and> P cp) sl :: 'a state \<Rightarrow> bool\<rbrace>
finalise_cap cap fin
\<lbrace>\<lambda>rv. cte_wp_at (\<lambda>cp. is_zombie cp \<and> P cp) sl\<rbrace>"
apply (cases cap, simp_all split del: if_split)
apply (wp deleting_irq_handler_cte_preserved
| clarsimp simp: is_cap_simps can_fast_finalise_def)+
done
lemma fast_finalise_st_tcb_at:
"\<lbrace>st_tcb_at P t and K (\<forall>st. active st \<longrightarrow> P st)\<rbrace>
fast_finalise cap fin
\<lbrace>\<lambda>rv. st_tcb_at P t\<rbrace>"
apply (rule hoare_gen_asm)
apply (cases cap; wpsimp wp: cancel_all_ipc_st_tcb_at cancel_all_signals_st_tcb_at)
done
lemma cap_delete_one_st_tcb_at:
"\<lbrace>st_tcb_at P t and K (\<forall>st. active st \<longrightarrow> P st)\<rbrace>
cap_delete_one ptr
\<lbrace>\<lambda>rv. st_tcb_at P t\<rbrace>"
apply (simp add: cap_delete_one_def unless_def is_final_cap_def)
apply (wpsimp wp: fast_finalise_st_tcb_at get_cap_wp)
done
lemma can_fast_finalise_Null:
"can_fast_finalise cap.NullCap"
by (simp add: can_fast_finalise_def)
lemmas (in Finalise_AI_3) finalise_cap_cte_at[wp] = valid_cte_at_typ [OF finalise_cap_typ_at]
lemma finalise_cap_fast_Null:
"\<lbrace>\<lambda>s. can_fast_finalise cap\<rbrace> finalise_cap cap final \<lbrace>\<lambda>rv s. rv = (cap.NullCap, None)\<rbrace>"
apply (cases cap, simp_all add: can_fast_finalise_def)
apply (wp | simp only: o_def simp_thms cases_simp if_cancel fst_conv)+
done
lemmas cases_simp_option[simp] = cases_simp[where P="x = None" for x, simplified]
lemma replaceable_same:
"replaceable s slot cap cap"
by (simp add: replaceable_def)
lemma hoare_pre_disj':
"\<lbrakk>\<lbrace>\<lambda>s. P s \<and> R s\<rbrace> f \<lbrace>T\<rbrace>;
\<lbrace>\<lambda>s. Q s \<and> R s\<rbrace> f \<lbrace>T\<rbrace> \<rbrakk> \<Longrightarrow>
\<lbrace>\<lambda>s. (P s \<or> Q s) \<and> R s\<rbrace> f \<lbrace>T\<rbrace>"
apply (rule hoare_pre)
apply (erule (1) hoare_pre_disj)
apply simp
done
(* FIXME: move *)
lemma invs_pspace_alignedI:
"invs s \<Longrightarrow> pspace_aligned s"
apply (simp add: invs_def valid_state_def valid_pspace_def)
done
lemma cte_wp_at_disj:
"cte_wp_at (\<lambda>c. P c \<or> P' c) sl s =
(cte_wp_at (\<lambda>c. P c) sl s \<or> cte_wp_at (\<lambda>c. P' c) sl s)"
unfolding cte_wp_at_def
by fastforce
lemmas thread_set_final_cap =
final_cap_lift [OF thread_set_caps_of_state_trivial]
schematic_goal no_cap_to_obj_with_diff_ref_lift:
"\<lbrace>\<lambda>s. ?P (caps_of_state s)\<rbrace> f \<lbrace>\<lambda>rv s. ?P (caps_of_state s)\<rbrace>
\<Longrightarrow> \<lbrace>no_cap_to_obj_with_diff_ref cap S\<rbrace>
f
\<lbrace>\<lambda>rv. no_cap_to_obj_with_diff_ref cap S\<rbrace>"
by (simp add: no_cap_to_obj_with_diff_ref_def
cte_wp_at_caps_of_state)
lemmas thread_set_no_cap_obj_ref_trivial
= no_cap_to_obj_with_diff_ref_lift [OF thread_set_caps_of_state_trivial]
lemma cap_not_in_valid_global_refs:
"\<lbrakk>invs s; caps_of_state s p = Some cap\<rbrakk> \<Longrightarrow>
obj_refs cap \<inter> global_refs s = {}"
apply (drule invs_valid_global_refs)
apply (simp add: valid_global_refs_def valid_refs_def)
apply (case_tac p, simp)
apply (erule_tac x=a in allE, erule_tac x=b in allE)
apply (clarsimp simp: cte_wp_at_caps_of_state cap_range_def)
apply blast
done
(* FIXME: move *)
lemma gts_wp:
"\<lbrace>\<lambda>s. \<forall>st. st_tcb_at (op = st) t s \<longrightarrow> P st s\<rbrace> get_thread_state t \<lbrace>P\<rbrace>"
unfolding get_thread_state_def
apply (wp thread_get_wp')
apply clarsimp
apply (drule spec, erule mp)
apply (clarsimp simp: pred_tcb_at_def obj_at_def)
done
lemma gbn_wp:
"\<lbrace>\<lambda>s. \<forall>ntfn. bound_tcb_at (op = ntfn) t s \<longrightarrow> P ntfn s\<rbrace> get_bound_notification t \<lbrace>P\<rbrace>"
unfolding get_bound_notification_def
apply (wp thread_get_wp')
apply (clarsimp)
apply (drule spec, erule mp)
apply (clarsimp simp: pred_tcb_at_def obj_at_def)
done
locale Finalise_AI_4 = Finalise_AI_3 a b _ c
for a :: "('a :: state_ext) itself"
and b :: "('b :: state_ext) itself"
and c :: "'c itself"
value Finalise_AI_4
locale Finalise_AI_5 = Finalise_AI_4 _ a b c
for a :: "('a :: state_ext) itself"
and b :: "('b :: state_ext) itself"
and c :: "'c itself" +
assumes clearMemory_invs[wp]:
"\<And> w sz. \<lbrace>invs\<rbrace> do_machine_op (clearMemory w sz) \<lbrace>\<lambda>_. invs :: 'a state \<Rightarrow> bool\<rbrace>"
assumes valid_idle_has_null_cap:
"\<And> cap (s :: 'a state) v.
\<lbrakk> if_unsafe_then_cap s; valid_global_refs s; valid_idle s; valid_irq_node s\<rbrakk>
\<Longrightarrow> caps_of_state s (idle_thread s, v) = Some cap
\<Longrightarrow> cap = NullCap"
assumes zombie_cap_two_nonidles:
"\<And> (s :: 'a state) ptr ptr' zbits n.
\<lbrakk> caps_of_state s ptr = Some (Zombie ptr' zbits n); invs s \<rbrakk>
\<Longrightarrow> fst ptr \<noteq> idle_thread s \<and> ptr' \<noteq> idle_thread s"
lemma valid_irq_node_arch [iff]:
"valid_irq_node (arch_state_update f s) = valid_irq_node s"
by (simp add: valid_irq_node_def)
(* FIXME: move *)
lemma vms_arch_state_update[simp]:
"valid_machine_state (arch_state_update f s) = valid_machine_state s"
by (simp add: valid_machine_state_def)
(* FIXME: move *)
lemma dmo_bind_return:
"\<lbrace>P\<rbrace> do_machine_op f \<lbrace>\<lambda>_. Q\<rbrace> \<Longrightarrow>
\<lbrace>P\<rbrace> do_machine_op (do _ \<leftarrow> f; return x od) \<lbrace>\<lambda>_. Q\<rbrace>"
by (simp add: do_machine_op_def bind_def return_def valid_def select_f_def
split_def)
lemma st_tcb_at_idle_thread:
"\<lbrakk> st_tcb_at P (idle_thread s) s; valid_idle s \<rbrakk>
\<Longrightarrow> P Structures_A.IdleThreadState"
by (clarsimp simp: valid_idle_def st_tcb_def2 pred_tcb_def2)
lemma tcb_state_merge_tcb_state_default:
"tcb_state (tcb_registers_caps_merge tcb tcb') = tcb_state tcb"
"tcb_state default_tcb = Structures_A.Inactive"
by (auto simp add: tcb_registers_caps_merge_def default_tcb_def)
lemma tcb_bound_notification_merge_tcb_state_default:
"tcb_bound_notification (tcb_registers_caps_merge tcb tcb') = tcb_bound_notification tcb"
"tcb_bound_notification default_tcb = None"
by (auto simp add: tcb_registers_caps_merge_def default_tcb_def)
(*Lift hoare triples from an instantiation to the nondeterministic hoare triple version.
Since bcorres states that f refines g with respect to the non_extended state,
we can prove the hoare triple over the more abstract g and put undefined
values into the extended_state*)
lemma use_bcorres: "bcorres f g \<Longrightarrow> (\<And>f f'.
\<lbrace>P o (trans_state f)\<rbrace> g \<lbrace>\<lambda>r s. Q r (trans_state f' s)\<rbrace>)\<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>"
apply (clarsimp simp add: bcorres_underlying_def s_bcorres_underlying_def valid_def)
apply (drule_tac x="\<lambda>_.exst s" in meta_spec)
apply (drule_tac x="\<lambda>_.exst b" in meta_spec)
apply (drule_tac x="truncate_state s" in spec)
apply (simp add: trans_state_update')
apply (drule_tac x="(a,truncate_state b)" in bspec)
apply force
apply (simp add: trans_state_update')
done
lemma dxo_noop: "do_extended_op f = (return () :: (unit,unit) s_monad)"
apply (clarsimp simp add: do_extended_op_def bind_def gets_def get_def return_def
select_f_def modify_def put_def mk_ef_def wrap_ext_op_unit_def)
apply force
done
(*FIXME: move *)
lemma corres_option_split:
"\<lbrakk>v = v'; corres_underlying sr nf nf' r P P' a c; (\<And>x. v = Some x \<Longrightarrow> corres_underlying sr nf nf' r (Q x) (Q' x) (b x) (d x))\<rbrakk>
\<Longrightarrow> corres_underlying sr nf nf' r (case_option P Q v) (case_option P' Q' v') (case_option a b v) (case_option c d v')"
by (cases v', simp_all)
lemma hoare_post_case_option_ext:
"\<lbrace>R\<rbrace> f \<lbrace>\<lambda>rv s. case_option (P s) (\<lambda>rv'. Q rv' s) rv\<rbrace> \<Longrightarrow> \<lbrace>R\<rbrace> f \<lbrace>case_option P Q\<rbrace>"
by (erule hoare_post_imp [rotated], simp split: option.splits)
lemma hoare_when_weak_wp:
"\<lbrace>P\<rbrace> f \<lbrace>\<lambda>_. P\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> when G f \<lbrace>\<lambda>_. P\<rbrace>"
by wp simp+
lemma zombie_not_ex_cap_to:
"\<lbrakk> cte_wp_at (op = (cap.Zombie ptr zbits n)) slot s;
zombies_final s \<rbrakk>
\<Longrightarrow> \<not> ex_nonz_cap_to ptr s"
apply (clarsimp simp: ex_nonz_cap_to_def )
apply (frule(1) zombies_finalD3[where P="op = c" and P'="\<lambda>c. x \<in> S c" for c x S])
apply (clarsimp simp: cte_wp_at_caps_of_state)
apply assumption
apply (rule notI, drule_tac a=ptr in equals0D)
apply (clarsimp simp add: zobj_refs_to_obj_refs)
apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps)
apply fastforce
done
lemma is_cap_tableE:
"\<lbrakk> is_cap_table sz ko; \<And>cs. \<lbrakk> ko = kernel_object.CNode sz cs; well_formed_cnode_n sz cs\<rbrakk> \<Longrightarrow> P \<rbrakk> \<Longrightarrow> P"
unfolding is_cap_table_def
by (auto split: Structures_A.kernel_object.split_asm)
lemma cap_table_at_length:
"\<lbrakk> cap_table_at bits oref s; valid_objs s \<rbrakk>
\<Longrightarrow> bits < (word_bits - cte_level_bits)"
apply (erule(1) obj_at_valid_objsE)
apply (case_tac ko, simp_all add: is_cap_table_def)
apply (clarsimp simp: valid_obj_def valid_cs_def
valid_cs_size_def well_formed_cnode_n_def
length_set_helper)
done
lemma emptyable_cte_wp_atD: "\<lbrakk> cte_wp_at P sl s; valid_objs s;
\<forall>cap. P cap \<longrightarrow> \<not> is_master_reply_cap cap \<rbrakk>
\<Longrightarrow> emptyable sl s"
apply (clarsimp simp: emptyable_def pred_tcb_at_def obj_at_def
is_tcb cte_wp_at_cases)
apply (erule(1) pspace_valid_objsE)
apply (clarsimp simp: valid_obj_def valid_tcb_def ran_tcb_cap_cases)
done
lemma thread_set_emptyable:
assumes z: "\<And>tcb. tcb_state (f tcb) = tcb_state tcb"
shows "\<lbrace>emptyable sl\<rbrace> thread_set f t \<lbrace>\<lambda>rv. emptyable sl\<rbrace>"
by (wp emptyable_lift thread_set_no_change_tcb_state z)
end
|
Home > All about hedgehogs > Do hedgehogs have fleas?
Ticks are another common external parasite on hedgehogs. Usually an individual will have a couple of ticks on it though occasionally there are hedgehogs with heavier burdens. Ticks often attach themselves to the underside, behind the ears or the flanks of hedgehogs but they can occur elsewhere as well. Ticks are in general harmless to hedgehogs. However, a high parasite load can be indicative of sickness.
Hedgehogs often get ringworm, with around a quarter of the national population thought to be affected. Most hedgehogs show no visible symptoms and even those with severe infections can still show little sign of skin infection and can feed normally. Dry, crusty ears are one of the most common symptoms of a ringworm infection.
→ What do I do if I’ve seen a dead or diseased hedgehog?
|
\section{Interests and Extra-Curricular Activities}
\cvlistitem{I frequently participate in machine learning algorithm competitions in order to reinforce my existing knowledge in the area of data analysis and pattern classification, while also learning new algorithms and ways to make them more efficient.}
\cvlistitem{I am also an avid runner, competing in various road and trail races in Toronto.}
|
function [y,fs,wrd,phn,ffx]=v_readsph(filename,mode,nmax,nskip)
%V_READSPH Read a SPHERE/TIMIT format sound file [Y,FS,WRD,PHN,FFX]=(FILENAME,MODE,NMAX,NSKIP)
%
% Input Parameters:
%
% FILENAME gives the name of the file (with optional .SPH extension) or alternatively
% can be the FFX output from a previous call to READSPH having the 'f' mode option
% MODE specifies the following (*=default):
%
% Scaling: 's' Auto scale to make data peak = +-1 (use with caution if reading in chunks)
% 'r' Raw unscaled data (integer values)
% 'p' * Scaled to make +-1 equal full scale
% 'o' Scale to bin centre rather than bin edge (e.g. 127 rather than 127.5 for 8 bit values)
% (can be combined with n+p,r,s modes)
% 'n' Scale to negative peak rather than positive peak (e.g. 128.5 rather than 127.5 for 8 bit values)
% (can be combined with o+p,r,s modes)
% Format 'l' Little endian data (Intel,DEC) (overrides indication in file)
% 'b' Big endian data (non Intel/DEC) (overrides indication in file)
%
% File I/O: 'f' Do not close file on exit
% 'd' Look in data directory: v_voicebox('dir_data')
% 'w' Also read the annotation file *.wrd if present (as in TIMIT)
% 't' Also read the phonetic transcription file *.phn if present (as in TIMIT)
% Eac line of the annotation and transcription files is of the form: m n token
% where m and n are start end end times in samples and token is a word or phoneme test descriptor
% The corresponding cell arrays WRD and PHN contain two elements per row: {[m n]/fs 'token'}
% These outputs are only present if the corresponding 'w' and 't' options are selected
%
% NMAX maximum number of samples to read (or -1 for unlimited [default])
% NSKIP number of samples to skip from start of file
% (or -1 to continue from previous read when FFX is given instead of FILENAME [default])
%ff
% Output Parameters:
%
% Y data matrix of dimension (samples,channels)
% FS sample frequency in Hz
% WRD{*,2} cell array with word annotations: WRD{*,:)={[t_start t_end],'text'} where times are in seconds
% with the first sample at t=0 [only present if 'w' option is selected]
% PHN{*,2} cell array with phoneme annotations: PHN{*,:)={[t_start t_end],'phoneme'} where times are in seconds
% with the first sample at t=0 [only present if 't' option is selected]
% FFX Cell array containing
%
% {1} filename
% {2} header information
% {1} first header field name
% {2} first header field value
% {3} format string (e.g. NIST_1A)
% {4}(1) file id
% (2) current position in file
% (3) dataoff byte offset in file to start of data
% (4) order byte order (l or b)
% (5) nsamp number of samples
% (6) number of channels
% (7) nbytes bytes per data value
% (8) bits number of bits of precision
% (9) fs sample frequency
% (10) min value
% (11) max value
% (12) coding: 0=PCM,1=uLAW + 0=no compression,10=shorten,20=wavpack,30=shortpack
% (13) file not yet decompressed
% {5} temporary filename
%
% If no output parameters are specified, header information will be printed.
% To decode shorten-encoded files, the program shorten.exe must be in the same directory as this m-file
%
% Usage Examples:
%
% (a) Draw an annotated spectrogram of a TIMIT file
% filename='....TIMIT/TEST/DR1/FAKS0/SA1.WAV';
% [s,fs,wrd,phn]=v_readsph(filename,'wt');
% v_spgrambw(s,fs,'Jwcpta',[],[],[],[],wrd);
% Copyright (C) Mike Brookes 1998
% Version: $Id: v_readsph.m 10865 2018-09-21 17:22:45Z dmb $
%
% VOICEBOX is a MATLAB toolbox for speech processing.
% Home page: http://www.ee.ic.ac.uk/hp/staff/dmb/voicebox/voicebox.html
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This program is free software; you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 2 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You can obtain a copy of the GNU General Public License from
% http://www.gnu.org/copyleft/gpl.html or by writing to
% Free Software Foundation, Inc.,675 Mass Ave, Cambridge, MA 02139, USA.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
persistent BYTEORDER
codes={'sample_count'; 'channel_count'; 'sample_n_bytes';'sample_sig_bits'; 'sample_rate'; 'sample_min'; 'sample_max'};
codings={'pcm'; 'ulaw'};
compressions={',embedded-shorten-';',embedded-wavpack-'; ',embedded-shortpack-'};
if isempty(BYTEORDER), BYTEORDER='l'; end
if nargin<1, error('Usage: [y,fs,hdr,fidx]=READSPH(filename,mode,nmax,nskip)'); end
if nargin<2, mode='p';
else mode = [mode(:).' 'p'];
end
k=find((mode>='p') & (mode<='s'));
mno=all(mode~='o'); % scale to input limits not output limits
sc=mode(k(1));
if any(mode=='l'), BYTEORDER='l';
elseif any(mode=='b'), BYTEORDER='b';
end
if nargout
ffx=cell(5,1);
if ischar(filename)
if any(mode=='d')
filename=fullfile(v_voicebox('dir_data'),filename);
end
fid=fopen(filename,'rb',BYTEORDER);
if fid == -1
fn=[filename,'.sph'];
fid=fopen(fn,'rb',BYTEORDER);
if fid ~= -1, filename=fn; end
end
if fid == -1
error('Can''t open %s for input',filename);
end
ffx{1}=filename;
else
if iscell(filename)
ffx=filename;
else
fid=filename;
end
end
if isempty(ffx{4})
fseek(fid,0,-1);
str=char(fread(fid,16)');
if str(8) ~= 10 || str(16) ~= 10, fclose(fid); error('File does not begin with a SPHERE header'); end
ffx{3}=str(1:7);
hlen=str2double(str(9:15));
hdr={};
while 1
str=fgetl(fid);
if str(1) ~= ';'
[tok,str]=strtok(str);
if strcmp(tok,'end_head'), break; end
hdr(end+1,1)={tok};
[tok,str]=strtok(str);
if tok(1) ~= '-', error('Missing ''-'' in SPHERE header'); end
if tok(2)=='s'
hdr(end,2)={str(2:str2num(tok(3:end))+1)};
elseif tok(2)=='i'
hdr(end,2)={sscanf(str,'%d',1)};
else
hdr(end,2)={sscanf(str,'%f',1)};
end
end
end
i=find(strcmp(hdr(:,1),'sample_byte_format'));
if ~isempty(i)
bord=char('b'+('l'-'b')*(hdr{i,2}(1)=='0'));
if bord ~= BYTEORDER && all(mode~='b') && all(mode ~='l')
BYTEORDER=bord;
fclose(fid);
fid=fopen(filename,'rb',BYTEORDER);
end
end
i=find(strcmp(hdr(:,1),'sample_coding'));
icode=0; % initialize to PCM coding
if ~isempty(i)
icode=-1; % unknown code
scode=hdr{i,2};
nscode=length(scode);
for j=1:length(codings)
lenj=length(codings{j});
if strcmp(scode(1:min(nscode,lenj)),codings{j})
if nscode>lenj
for k=1:length(compressions)
lenk=length(compressions{k});
if strcmp(scode(lenj+1:min(lenj+lenk,nscode)),compressions{k})
icode=10*k+j-1;
break;
end
end
else
icode=j-1;
end
break;
end
end
end
info=[fid; 0; hlen; double(BYTEORDER); 0; 1; 2; 16; 1 ; 1; -1; icode];
for j=1:7
i=find(strcmp(hdr(:,1),codes{j}));
if ~isempty(i)
info(j+4)=hdr{i,2};
end
end
if ~info(5)
fseek(fid,0,1);
info(5)=floor((ftell(fid)-info(3))/(info(6)*info(7)));
end
ffx{2}=hdr;
ffx{4}=info;
end
info=ffx{4};
if nargin<4, nskip=info(2);
elseif nskip<0, nskip=info(2);
end
ksamples=info(5)-nskip;
if nargin>2
if nmax>=0
ksamples=min(nmax,ksamples);
end
end
if ksamples>0
fid=info(1);
if icode>=10 && isempty(ffx{5})
fclose(fid);
dirt=v_voicebox('dir_temp');
filetemp=fullfile(dirt,'shorten.wav');
cmdtemp=fullfile(dirt,'shorten.bat'); % batch file needed to convert to short filenames
% if ~exist(cmdtemp,'file') % write out the batch file if it doesn't exist
cmdfid=fopen(cmdtemp,'wt');
fprintf(cmdfid,'@"%s" -x -a %%1 "%%~s2" "%%~s3"\n',v_voicebox('shorten'));
fclose(cmdfid);
% end
if exist(filetemp,'file') % need to explicitly delete old file since shorten makes read-only
doscom=['del /f "' filetemp '"'];
if dos(doscom) % run the program
error('Error running DOS command: %s',doscom);
end
end
if floor(icode/10)==1 % shorten
doscom=['"' cmdtemp '" ' num2str(info(3)) ' "' filename '" "' filetemp '"'];
% fprintf(1,'Executing: %s\n',doscom);
if dos(doscom) % run the program
error('Error running DOS command: %s',doscom);
end
else
error('unknown compression format');
end
ffx{5}=filetemp;
fid=fopen(filetemp,'r',BYTEORDER);
if fid<0, error('Cannot open decompressed file %s',filetemp); end
info(1)=fid; % update fid
end
info(2)=nskip+ksamples;
pk=pow2(0.5,8*info(7))*(1+(mno/2-all(mode~='n'))/pow2(0.5,info(8))); % use modes o and n to determine effective peak
fseek(fid,info(3)+info(6)*info(7)*nskip,-1);
nsamples=info(6)*ksamples;
if info(7)<3
if info(7)<2
y=fread(fid,nsamples,'uchar');
if mod(info(12),10)==1
y=v_pcmu2lin(y);
pk=2.005649;
elseif mod(info(12),10)==2,
y=v_pcma2lin(y);
pk=2.005649;
else
y=y-128;
end
else
y=fread(fid,nsamples,'short');
end
else
if info(7)<4
y=fread(fid,3*nsamples,'uchar');
y=reshape(y,3,nsamples);
y=[1 256 65536]*y-pow2(fix(pow2(y(3,:),-7)),24);
else
y=fread(fid,nsamples,'long');
end
end
if sc ~= 'r'
if sc=='s'
if info(10)>info(11)
info(10)=min(y);
info(11)=max(y);
end
sf=1/max(max(abs(info(10:11))),1);
else sf=1/pk;
end
y=sf*y;
end
if info(6)>1, y = reshape(y,info(6),ksamples).'; end
else
y=[];
end
if mode~='f'
fclose(fid);
info(1)=-1;
if ~isempty(ffx{5})
doscom=['del /f ' ffx{5}];
if dos(doscom) % run the program
error('Error running DOS command: %s',doscom);
end
ffx{5}=[];
end
end
ffx{4}=info;
fs=info(9);
wrd=ffx; % copy ffx into the other arguments in case 'w' and/or 't' are not specified
phn=ffx;
if any(mode=='w')
wrd=cell(0,0);
fidw=fopen([filename(1:end-3) 'wrd'],'r');
if fidw>0
while 1
tline = fgetl(fidw); % read an input line
if ~ischar(tline)
break
end
[wtim, ntim, ee, nix] = sscanf(tline,'%d%d',2);
if ntim==2
wrd{end+1,1}=wtim(:)'/fs;
wrd{end,2}=strtrim(tline(nix:end));
end
end
fclose(fidw);
end
end
if any(mode=='t')
ph=cell(0,0);
fidw=fopen([filename(1:end-3) 'phn'],'r');
if fidw>0
while 1
tline = fgetl(fidw); % read an input line
if ~ischar(tline)
break
end
[wtim, ntim, ee, nix] = sscanf(tline,'%d%d',2);
if ntim==2
ph{end+1,1}=wtim(:)'/fs;
ph{end,2}=strtrim(tline(nix:end));
end
end
fclose(fidw);
end
if any(mode=='w')
phn=ph; % copy into 4th argument
else
wrd=ph; % copy into 3rd argument
end
end
else
[y1,fs,ffx]=v_readsph(filename,mode,0);
info=ffx{4};
icode=info(12); % could convert this into text
if ~isempty(ffx{1}), fprintf(1,'Filename: %s\n',ffx{1}); end
fprintf(1,'Sphere file type: %s, coding %d\n',ffx{3}, icode);
fprintf(1,'Duration = %ss: %d channel * %d samples @ %sHz\n',v_sprintsi(info(5)/info(9)),info(6),info(5),v_sprintsi(info(9)));
end
|
In 2001 Professor Takashi Asano was awarded the Stockholm Water Prize for his outstanding contributions to efficient use of water in the domain of wastewater reclamation, recycling and reuse through theoretical developments, practical research and worldwide adaptation and promotion.
|
#include <boost/mpl/multiset/aux_/multiset0.hpp>
|
module Algebra.Solver.Prod
import public Data.List.Elem
%default total
||| A product of variables each represented by the exponent,
||| to which it is raised.
|||
||| When normalizing arithmetic expressions, they often
||| get converted to (sums of) products of variables
||| (listed in index `as`), each raised to a certain
||| exponent. This is the case for commutative monoids
||| (a single product) as well as commutative (semi)rings
||| (a sum of products).
public export
data Prod : (a : Type) -> (as : List a) -> Type where
Nil : Prod a []
(::) : (exp : Nat) -> Prod a xs -> Prod a (x :: xs)
||| Multiplying two products means adding all
||| expontents pairwise.
public export
mult : Prod a as -> Prod a as -> Prod a as
mult [] [] = []
mult (x :: xs) (y :: ys) = (x + y) :: mult xs ys
||| We sort products by lexicographically comparing
||| the exponents.
public export
compProd : Prod a as -> Prod a as -> Ordering
compProd [] [] = EQ
compProd (x :: xs) (y :: ys) = case compare x y of
LT => LT
GT => GT
EQ => compProd xs ys
||| The neutral product where all exponents are zero.
public export
one : {as : List a} -> Prod a as
one {as = []} = []
one {as = x :: xs} = 0 :: one
||| Convert a single variable to a product of variables.
public export
fromVar : {as : List a} -> Elem x as -> Prod a as
fromVar {as = x :: xs} Here = 1 :: one
fromVar {as = x :: xs} (There y) = 0 :: fromVar y
fromVar {as = []} Here impossible
fromVar {as = []} (There y) impossible
--------------------------------------------------------------------------------
-- Proofs
--------------------------------------------------------------------------------
Uninhabited (LT = EQ) where
uninhabited _ impossible
Uninhabited (GT = EQ) where
uninhabited _ impossible
export
0 pcompNat : (x,y : Nat) -> (compare x y === EQ) -> x === y
pcompNat 0 0 prf = Refl
pcompNat (S k) (S j) prf = cong S $ pcompNat k j prf
pcompNat 0 (S k) Refl impossible
pcompNat (S k) 0 Refl impossible
export
0 pcompProd : (x,y : Prod a as)
-> (compProd x y === EQ)
-> x === y
pcompProd [] [] prf = Refl
pcompProd (x :: xs) (y :: ys) prf with (compare x y) proof eq
_ | EQ = cong2 (::) (pcompNat x y eq) (pcompProd xs ys prf)
_ | LT = absurd prf
_ | GT = absurd prf
pcompProd [] (_ :: _) Refl impossible
pcompProd (_ :: _) [] Refl impossible
|
/*
Copyright 2009-2021 Nicolas Colombe
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <math/mathexp.hpp>
#include <core/containers.hpp>
#include <math/vector2.hpp>
#include <math/segment.hpp>
#include <boost/container/flat_map.hpp>
namespace eXl
{
struct PolyVertex
{
PolyVertex(Vector2i const& iPos, Vector2f const& iPosf)
: position(iPos)
, positionf(iPosf)
{
}
Vector2f positionf;
Vector2i position;
int32_t firstEdge = -1;
};
struct PolyHalfEdge
{
uint32_t srcVtx;
uint32_t dstVtx;
uint32_t sibling;
int32_t userId = -1;
int32_t nextEdge = -1;
Vector2f normDir;
float length;
};
inline std::size_t hash_value(Vector2i const& iPt)
{
if (sizeof(Vector2i) == sizeof(std::size_t))
{
return reinterpret_cast<size_t const&>(iPt);
}
else
{
size_t seed = iPt.X();
boost::hash_combine(seed, iPt.Y());
return seed;
}
}
struct EXL_MATH_API PolyMesh
{
Vector<PolyVertex> vertices;
Vector<PolyHalfEdge> edges;
// boost::container::flat_map<Vector2i, uint32_t, std::less<Vector2i>, eXl::Allocator<std::pair<Vector2i, uint32_t>>> pointMap;
// Need a way to keep capacity.
UnorderedMap<Vector2i, uint32_t> pointMap;
// leftmost/bottom most point
boost::optional<std::pair<Vector2i, uint32_t>> smallerPoint;
void UpdateSmallerPoint(std::pair<Vector2i, uint32_t> const& iNewPoint);
PolyHalfEdge const* GetNextIncomingEdge(uint32_t curVtx, PolyHalfEdge const* curEdge) const;
bool FindInsertionPoint(uint32_t srcVtx, uint32_t dstVtx, uint32_t firstEdge, Vector2f const& iOutgoingDir, int32_t& prevInsertPt, int32_t& nextInsertPt) const;
boost::optional<uint32_t> InsertEdge(Segmenti const& iSeg, Segmentf const& iFltSeg, int32_t edgeId = -1);
void Clear();
};
}
|
# 1. Set up the notebook
Import modules.
```python
import numpy as np
import sympy as sym
import json
import matplotlib.pyplot as plt
from scipy import linalg
from scipy.interpolate import interp1d
```
Define a function to load data from a hardware flight test and resample it at 100 Hz using linear interpolation. If `only_in_flight=True`, then only data for which the desired position was positive (i.e., "trying to fly" rather than "sitting on the ground") will be returned.
```python
def load_hardware_data(filename, t_min_offset=0, t_max_offset=0, only_in_flight=False):
# load raw data
with open(filename, 'r') as f:
data = json.load(f)
# convert lists to numpy arrays
for val in data.values():
for key in val.keys():
val[key] = np.array(val[key])
# create an array of times at which to subsample
t_min = -np.inf
t_max = np.inf
for key, val in data.items():
t_min = max(t_min, val['time'][0])
t_max = min(t_max, val['time'][-1])
t_min += t_min_offset * 1000
t_max -= t_max_offset * 1000
nt = int(1 + np.floor((t_max - t_min) / 10.))
t = np.arange(0, 10 * nt, 10) / 1000.
resampled_data = {'time': t}
# resample raw data with linear interpolation
for k, v in data.items():
f = interp1d((v['time'] - t_min) / 1000., v['data'])
resampled_data[k] = f(t)
# truncate to times when o_z_des is positive
if only_in_flight:
if 'ae483log.o_z_des' not in resampled_data.keys():
raise Exception('"ae483log.o_z_des" must be logged')
i = np.argwhere(resampled_data['ae483log.o_z_des'] > 0).flatten()
if len(i) == 0:
raise Exception('o_z_des was never positive')
if len(i) < 2:
raise Exception('o_z_des was only positive for one time step')
for key in resampled_data.keys():
resampled_data[key] = resampled_data[key][i[0]:i[-1]]
# return the resampled data
return resampled_data
```
Define a function to solve the linear quadratic regulator (LQR) problem - i.e., that finds the matrix $K$ for which
$$u(t) = - K x(t)$$
is the optimal solution to
$$
\begin{align*}
\underset{u_{[t_{0}, \infty)}}{\text{minimize}} &\qquad \int_{t_{0}}^{\infty}\left( x(t)^{T}Qx(t)+u(t)^{T}Ru(t)\right)dt \\
\text{subject to} &\qquad \dot{x}(t) = Ax(t)+Bu(t), \quad x(t_{0})=x_{0}.
\end{align*}
$$
```python
def lqr(A, B, Q, R):
P = linalg.solve_continuous_are(A, B, Q, R)
K = linalg.inv(R) @ B.T @ P
return K
```
Define a function to create the observability matrix
$$ W_o = \begin{bmatrix} C \\ CA \\ CA^2 \\ \vdots \\ CA^{n-1} \end{bmatrix} $$
where $A$ has size $n \times n$.
```python
def obsv(A, C):
W = C
for i in range(1, A.shape[0]):
W = np.vstack([W, C @ np.linalg.matrix_power(A, i)])
return W
```
# 2. Design observer
## 2.1 Define state-space model
Here are the matrices that describe the state-space model
$$\begin{align*} \dot{x} &= Ax + Bu \\ y &= Cx + Bu \end{align*}$$
that was derived in Lab 7 (copy/pasted from that week's analysis notebook).
```python
# FIXME (copy/paste definition of A, B, C, D)
```
## 2.2 Show that not all states are observable
Find the observability matrix
$$ W_o = \begin{bmatrix} C \\ CA \\ CA^2 \\ \vdots \\ CA^{n-1} \end{bmatrix} $$
where $A$ is $n \times n$.
```python
W_o = obsv(A, C)
```
Find the rank of the observability matrix using [numpy.linalg.matrix_rank](https://numpy.org/doc/stable/reference/generated/numpy.linalg.matrix_rank.html). The system is **observable** if and only if $W_o$ is **full rank**, that is, if its rank is equal to the number of states $n$.
```python
print(f' rank of W_o is: {np.linalg.matrix_rank(W_o)}')
print(f'"full rank" would be: {A.shape[0]}')
```
If the system is *not* observable, then it is impossible to design a stable observer - that is, an observer that makes the error in your estimate of each state converge to zero over time.
In particular, the following code would produce an error if you tried to use it:
```python
# Choose weights
Q = np.eye(3) # <-- one diagonal entry for each output
R = np.eye(9) # <-- one diagonal entry for each state
# Find gain matrix
L = lqr(A.T, C.T, linalg.inv(R), linalg.inv(Q)).T
```
It does not matter what method you use - if it is impossible to design a stable observer, that's it, you're out of luck!
## 2.3 Choose a subset of states that are observable
List the index of each state to include.
```python
# FIXME: delete the index of each state that is not observable!
s_obs_index = [
0, # o_x
1, # o_y
2, # o_z
3, # psi
4, # theta
5, # phi
6, # v_x
7, # v_y
8, # v_z
]
```
Define a state-space model
$$
\begin{align*}
\dot{x}_\text{obs} = A_\text{obs} x_\text{obs} + B_\text{obs} u \\
y = C_\text{obs} x_\text{obs} + D_\text{obs} u
\end{align*}
$$
with only these states.
```python
A_obs = A[s_obs_index, :][:, s_obs_index]
B_obs = B[s_obs_index, :]
C_obs = C[:, s_obs_index]
D_obs = D
```
Show the matrices that describe this state-space model.
```python
print(f'A_obs:\n{A_obs}\n')
print(f'B_obs:\n{B_obs}\n')
print(f'C_obs:\n{C_obs}\n')
print(f'D_obs:\n{D_obs}\n')
```
Check that this subsystem is observable:
```python
print(f' rank of W_o is: {np.linalg.matrix_rank(obsv(A_obs, C_obs))}')
print(f'"full rank" would be: {A_obs.shape[0]}')
```
## 2.4 Choose gain matrix for the observable subsystem
### 2.4.1 With equal weights
Choose weights $Q$ and $R$ as identity matrices.
```python
# FIXME: change the size of each identity matrix to match the
# number of states and outputs for your observable subsystem
Q = np.eye(3) # <-- one diagonal entry for each output
R = np.eye(9) # <-- one diagonal entry for each state
```
Find gain matrix $L$ for the chosen $Q$ and $R$ by solving an LQR problem.
```python
L = lqr(A_obs.T, C_obs.T, linalg.inv(R), linalg.inv(Q)).T
```
Show $L$ (formatted nicely).
```python
L_str = np.array2string(L,
formatter={'float_kind': lambda x: f'{x:12.6f}'},
prefix=' ',
max_line_width=np.inf)
print(f'L = {L_str}')
```
### 2.4.2 With weights chosen by model error variance
Choose weights $Q$ and $R$ based on variance of model error.
```python
# FIXME:
#
# (1) Change the size of each matrix to match the number of
# states and outputs for your observable subsystem
#
# (2) Change the value of each diagonal entry based on your
# results from Lab 7.
Q = np.diag([
1., # n_x
1., # n_y
1., # r
])
R = np.diag([
1., # o_x
1., # o_y
1., # o_z
1., # psi
1., # theta
1., # phi
1., # v_x
1., # v_y
1., # v_z
])
```
Find gain matrix $L$ for the chosen $Q$ and $R$ by solving an LQR problem.
```python
L = lqr(A_obs.T, C_obs.T, linalg.inv(R), linalg.inv(Q)).T
```
Show $L$ (formatted nicely).
```python
L_str = np.array2string(L,
formatter={'float_kind': lambda x: f'{x:12.6f}'},
prefix=' ',
max_line_width=np.inf)
print(f'L = {L_str}')
```
# 3. Implement and test observer (offline)
## 3.1 Do a flight test
#### Describe the flight test
Replace this cell with the following information, at minimum:
* A description of the flight trajectory, both in words and (if you like) a snippet of code from `flight.py`.
* A description of the flight conditions (e.g., where was the flight conducted, did you power cycle the drone just before flying, were you using the positioning system or only the onboard sensors, etc.).
Also fill in the following table, which describes the point at which the drone landed, as measured with a ruler:
| | $o_x$ (meters) | $o_y$ (meters) |
| ---: | :---: | :---: |
| landing position | 1.000 | 2.000 |
#### Show a video of the flight test
Replace this cell with a video of your flight. Here are two ways to do this. (What you should *not* do is drag-and-drop your video into this notebook.)
##### Markdown
Put your video in the same directory as this notebook. Suppose this video is called `hardware_video.mov`. Then put the following code in a cell of type `Markdown` and evaluate it:
```
```
##### HTML
Put your video in the same directory as this notebook. Suppose this video is called `hardware_video.mov`. Then put the following code in a cell of type `Code` and evaluate it:
```
%%HTML
```
You can change the `width` parameter to resize your video.
## 3.2 Load and parse data from flight test
Load flight test data.
```python
data = load_hardware_data(
'hardware_data.json', # <-- FIXME: replace with name of file with data
t_min_offset=0.,
t_max_offset=0.,
only_in_flight=True,
)
```
Parse flight test data.
```python
# time
t = data['time']
# states
o_x_true = data['ae483log.o_x']
o_y_true = data['ae483log.o_y']
o_z_true = data['ae483log.o_z']
psi_true = data['ae483log.psi']
theta_true = data['ae483log.theta']
phi_true = data['ae483log.phi']
v_x_true = data['ae483log.v_x']
v_y_true = data['ae483log.v_y']
v_z_true = data['ae483log.v_z']
# inputs
w_x_true = data['ae483log.w_x']
w_y_true = data['ae483log.w_y']
w_z_true = data['ae483log.w_z']
a_z_true = data['ae483log.a_z']
# outputs
n_x_true = data['ae483log.n_x']
n_y_true = data['ae483log.n_y']
r_true = data['ae483log.r']
```
Define parameter values.
```python
# Acceleration of gravity
g = 9.81
# Optical flow constant (do not change)
k_flow = 0.01 * 30.0 / np.deg2rad(4.2)
# Equilibrium value of o_z
o_z_eq = 0. # <-- FIXME: replace with the equilibrium height you chose in Lab 7
# Time step (should be 0.01)
dt = t[1] - t[0]
print(f'dt = {dt:.4f} (should be 0.01)')
```
## 3.3 Apply observer for gain matrix chosen by equal weights
Implement and run your observer in the following code cell. For each state, the result will be a numpy array (e.g., `o_x_hat`) that contains a state estimate at every time step.
```python
# Initialize estimates
o_x = 0.
o_y = 0.
o_z = 0.
psi = 0.
theta = 0.
phi = 0.
v_x = 0.
v_y = 0.
v_z = 0.
# Initialize storage (do not change)
o_x_hat = np.zeros(len(t))
o_y_hat = np.zeros(len(t))
o_z_hat = np.zeros(len(t))
psi_hat = np.zeros(len(t))
theta_hat = np.zeros(len(t))
phi_hat = np.zeros(len(t))
v_x_hat = np.zeros(len(t))
v_y_hat = np.zeros(len(t))
v_z_hat = np.zeros(len(t))
# Iterate over all time steps
for i in range(len(t)):
# Get measurements (do not change)
w_x = w_x_true[i]
w_y = w_y_true[i]
w_z = w_z_true[i]
a_z = a_z_true[i]
n_x = n_x_true[i]
n_y = n_y_true[i]
r = r_true[i]
# Compute each element of:
#
# C x + D u - y
#
n_x_err = 0. # <-- FIXME
n_y_err = 0. # <-- FIXME
r_err = 0. # <-- FIXME
# Update estimates
o_x += dt * (0.) # <-- FIXME
o_y += dt * (0.) # <-- FIXME
o_z += dt * (0.) # <-- FIXME
psi += dt * (0.) # <-- FIXME
theta += dt * (0.) # <-- FIXME
phi += dt * (0.) # <-- FIXME
v_x += dt * (0.) # <-- FIXME
v_y += dt * (0.) # <-- FIXME
v_z += dt * (0.) # <-- FIXME
# Store estimates (do not change)
o_x_hat[i] = o_x
o_y_hat[i] = o_y
o_z_hat[i] = o_z
psi_hat[i] = psi
theta_hat[i] = theta
phi_hat[i] = phi
v_x_hat[i] = v_x
v_y_hat[i] = v_y
v_z_hat[i] = v_z
```
Compute error in each state estimate (relative to "ground truth" from the stock code).
```python
o_x_err = o_x_hat - o_x_true
o_y_err = o_y_hat - o_y_true
o_z_err = o_z_hat - o_z_true
psi_err = psi_hat - psi_true
theta_err = theta_hat - theta_true
phi_err = phi_hat - phi_true
v_x_err = v_x_hat - v_x_true
v_y_err = v_y_hat - v_y_true
v_z_err = v_z_hat - v_z_true
```
Compare estimates with ground truth.
```python
def comparison_plot(t, s_hat, s_true, name, ax):
ax.plot(t, s_hat, label=f'{name} (estimated)', linewidth=3)
ax.plot(t, s_true, '--', label=f'{name} (true)', linewidth=2)
ax.legend()
ax.grid()
fig, (ax_o_x,
ax_o_y,
ax_o_z,
ax_psi,
ax_theta,
ax_phi,
ax_v_x,
ax_v_y,
ax_v_z) = plt.subplots(9, 1, figsize=(15, 25), sharex=True)
comparison_plot(t, o_x_hat, o_x_true, 'o_x', ax_o_x)
comparison_plot(t, o_y_hat, o_y_true, 'o_y', ax_o_y)
comparison_plot(t, o_z_hat, o_z_true, 'o_z', ax_o_z)
comparison_plot(t, psi_hat, psi_true, 'psi', ax_psi)
comparison_plot(t, theta_hat, theta_true, 'theta', ax_theta)
comparison_plot(t, phi_hat, phi_true, 'phi', ax_phi)
comparison_plot(t, v_x_hat, v_x_true, 'v_x', ax_v_x)
comparison_plot(t, v_y_hat, v_y_true, 'v_y', ax_v_y)
comparison_plot(t, v_z_hat, v_z_true, 'v_z', ax_v_z)
ax_v_z.set_xlabel('time (s)')
plt.show()
```
Plot error in each state estimate with respect to ground truth.
```python
def error_plot(t, s_err, name, ax):
ax.plot(t, s_err, label=f'{name} (error)', linewidth=3)
ax.legend()
ax.grid()
fig, (ax_o_x,
ax_o_y,
ax_o_z,
ax_psi,
ax_theta,
ax_phi,
ax_v_x,
ax_v_y,
ax_v_z) = plt.subplots(9, 1, figsize=(15, 25), sharex=True)
error_plot(t, o_x_err, 'o_x', ax_o_x)
error_plot(t, o_y_err, 'o_y', ax_o_y)
error_plot(t, o_z_err, 'o_z', ax_o_z)
error_plot(t, psi_err, 'psi', ax_psi)
error_plot(t, theta_err, 'theta', ax_theta)
error_plot(t, phi_err, 'phi', ax_phi)
error_plot(t, v_x_err, 'v_x', ax_v_x)
error_plot(t, v_y_err, 'v_y', ax_v_y)
error_plot(t, v_z_err, 'v_z', ax_v_z)
ax_v_z.set_xlabel('time (s)')
plt.show()
```
Plot histogram of errors in each state estimate.
```python
def histogram_plot(t, s_err, name, ax):
label = f'Error in estimate of {name} ' + \
f'(RMSE = {np.sqrt(np.mean(s_err**2)):6.3f}, ' + \
f'mean = {np.mean(s_err):6.3f}, ' + \
f'std = {np.std(s_err):6.3f})'
ax.hist(s_err, 50, label=label)
ax.legend(fontsize=14)
fig, (ax_o_x,
ax_o_y,
ax_o_z,
ax_psi,
ax_theta,
ax_phi,
ax_v_x,
ax_v_y,
ax_v_z) = plt.subplots(9, 1, figsize=(15, 25))
histogram_plot(t, o_x_err, 'o_x', ax_o_x)
histogram_plot(t, o_y_err, 'o_y', ax_o_y)
histogram_plot(t, o_z_err, 'o_z', ax_o_z)
histogram_plot(t, psi_err, 'psi', ax_psi)
histogram_plot(t, theta_err, 'theta', ax_theta)
histogram_plot(t, phi_err, 'phi', ax_phi)
histogram_plot(t, v_x_err, 'v_x', ax_v_x)
histogram_plot(t, v_y_err, 'v_y', ax_v_y)
histogram_plot(t, v_z_err, 'v_z', ax_v_z)
fig.tight_layout()
plt.show()
```
## 3.4 Apply observer for gain matrix chosen by weights from model error variance
Implement and run your observer in the following code cell. For each state, the result will be a numpy array (e.g., `o_x_hat`) that contains a state estimate at every time step.
```python
# Initialize estimates
o_x = 0.
o_y = 0.
o_z = 0.
psi = 0.
theta = 0.
phi = 0.
v_x = 0.
v_y = 0.
v_z = 0.
# Initialize storage (do not change)
o_x_hat = np.zeros(len(t))
o_y_hat = np.zeros(len(t))
o_z_hat = np.zeros(len(t))
psi_hat = np.zeros(len(t))
theta_hat = np.zeros(len(t))
phi_hat = np.zeros(len(t))
v_x_hat = np.zeros(len(t))
v_y_hat = np.zeros(len(t))
v_z_hat = np.zeros(len(t))
# Iterate over all time steps
for i in range(len(t)):
# Get measurements (do not change)
w_x = w_x_true[i]
w_y = w_y_true[i]
w_z = w_z_true[i]
a_z = a_z_true[i]
n_x = n_x_true[i]
n_y = n_y_true[i]
r = r_true[i]
# Compute each element of:
#
# C x + D u - y
#
n_x_err = 0. # <-- FIXME
n_y_err = 0. # <-- FIXME
r_err = 0. # <-- FIXME
# Update estimates
o_x += dt * (0.) # <-- FIXME
o_y += dt * (0.) # <-- FIXME
o_z += dt * (0.) # <-- FIXME
psi += dt * (0.) # <-- FIXME
theta += dt * (0.) # <-- FIXME
phi += dt * (0.) # <-- FIXME
v_x += dt * (0.) # <-- FIXME
v_y += dt * (0.) # <-- FIXME
v_z += dt * (0.) # <-- FIXME
# Store estimates (do not change)
o_x_hat[i] = o_x
o_y_hat[i] = o_y
o_z_hat[i] = o_z
psi_hat[i] = psi
theta_hat[i] = theta
phi_hat[i] = phi
v_x_hat[i] = v_x
v_y_hat[i] = v_y
v_z_hat[i] = v_z
```
Compute error in each state estimate (relative to "ground truth" from the stock code).
```python
o_x_err = o_x_hat - o_x_true
o_y_err = o_y_hat - o_y_true
o_z_err = o_z_hat - o_z_true
psi_err = psi_hat - psi_true
theta_err = theta_hat - theta_true
phi_err = phi_hat - phi_true
v_x_err = v_x_hat - v_x_true
v_y_err = v_y_hat - v_y_true
v_z_err = v_z_hat - v_z_true
```
Compare estimates with ground truth.
```python
def comparison_plot(t, s_hat, s_true, name, ax):
ax.plot(t, s_hat, label=f'{name} (estimated)', linewidth=3)
ax.plot(t, s_true, '--', label=f'{name} (true)', linewidth=2)
ax.legend()
ax.grid()
fig, (ax_o_x,
ax_o_y,
ax_o_z,
ax_psi,
ax_theta,
ax_phi,
ax_v_x,
ax_v_y,
ax_v_z) = plt.subplots(9, 1, figsize=(15, 25), sharex=True)
comparison_plot(t, o_x_hat, o_x_true, 'o_x', ax_o_x)
comparison_plot(t, o_y_hat, o_y_true, 'o_y', ax_o_y)
comparison_plot(t, o_z_hat, o_z_true, 'o_z', ax_o_z)
comparison_plot(t, psi_hat, psi_true, 'psi', ax_psi)
comparison_plot(t, theta_hat, theta_true, 'theta', ax_theta)
comparison_plot(t, phi_hat, phi_true, 'phi', ax_phi)
comparison_plot(t, v_x_hat, v_x_true, 'v_x', ax_v_x)
comparison_plot(t, v_y_hat, v_y_true, 'v_y', ax_v_y)
comparison_plot(t, v_z_hat, v_z_true, 'v_z', ax_v_z)
ax_v_z.set_xlabel('time (s)')
plt.show()
```
Plot error in each state estimate with respect to ground truth.
```python
def error_plot(t, s_err, name, ax):
ax.plot(t, s_err, label=f'{name} (error)', linewidth=3)
ax.legend()
ax.grid()
fig, (ax_o_x,
ax_o_y,
ax_o_z,
ax_psi,
ax_theta,
ax_phi,
ax_v_x,
ax_v_y,
ax_v_z) = plt.subplots(9, 1, figsize=(15, 25), sharex=True)
error_plot(t, o_x_err, 'o_x', ax_o_x)
error_plot(t, o_y_err, 'o_y', ax_o_y)
error_plot(t, o_z_err, 'o_z', ax_o_z)
error_plot(t, psi_err, 'psi', ax_psi)
error_plot(t, theta_err, 'theta', ax_theta)
error_plot(t, phi_err, 'phi', ax_phi)
error_plot(t, v_x_err, 'v_x', ax_v_x)
error_plot(t, v_y_err, 'v_y', ax_v_y)
error_plot(t, v_z_err, 'v_z', ax_v_z)
ax_v_z.set_xlabel('time (s)')
plt.show()
```
Plot histogram of errors in each state estimate.
```python
def histogram_plot(t, s_err, name, ax):
label = f'Error in estimate of {name} ' + \
f'(RMSE = {np.sqrt(np.mean(s_err**2)):6.3f}, ' + \
f'mean = {np.mean(s_err):6.3f}, ' + \
f'std = {np.std(s_err):6.3f})'
ax.hist(s_err, 50, label=label)
ax.legend(fontsize=14)
fig, (ax_o_x,
ax_o_y,
ax_o_z,
ax_psi,
ax_theta,
ax_phi,
ax_v_x,
ax_v_y,
ax_v_z) = plt.subplots(9, 1, figsize=(15, 25))
histogram_plot(t, o_x_err, 'o_x', ax_o_x)
histogram_plot(t, o_y_err, 'o_y', ax_o_y)
histogram_plot(t, o_z_err, 'o_z', ax_o_z)
histogram_plot(t, psi_err, 'psi', ax_psi)
histogram_plot(t, theta_err, 'theta', ax_theta)
histogram_plot(t, phi_err, 'phi', ax_phi)
histogram_plot(t, v_x_err, 'v_x', ax_v_x)
histogram_plot(t, v_y_err, 'v_y', ax_v_y)
histogram_plot(t, v_z_err, 'v_z', ax_v_z)
fig.tight_layout()
plt.show()
```
## 3.5 Summary and discussion
The following table reports the RMSE in each state estimate:
| | $o_x$ | $o_y$ | $o_z$ | $\psi$ | $\theta$ | $\phi$ | $v_x$ | $v_y$ | $v_z$ |
| --: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: |
| RMSE with equal weights | 0.001 | 0.002 | 0.003 | 0.004 | 0.005 | 0.006 | 0.007 | 0.008 | 0.009 |
| RMSE with weights from error model variance | 0.001 | 0.002 | 0.003 | 0.004 | 0.005 | 0.006 | 0.007 | 0.008 | 0.009 |
**Modify the text in this cell** to answer the following questions:
* Which observer performs better, the one derived from equal weights or the one derive from weights based on error model variance? Why? (Make reference to your table of results when answering this question.)
* Is there a difference in what happens to the error in your estimate of (1) observable states and (2) non-observable states over time? Why? (Make reference to your plots when answering this question.)
* We refer to the state estimates that are computed on-board by the stock code as "true"? Are they? For example, compare the landing position that you measured with a ruler (Section 3.1) to the final position that was estimated by the stock code - to what extent are they the same? (You may want to disinguish between observable and non-observable states when answering this question.)
|
/**
* Interface definitions for the postscript output routines
*/
#ifndef _SPCE_OUTPUT_H
#define _SPCE_OUTPUT_H
#include "aper_conf.h"
#include <gsl/gsl_matrix.h>
/*
* Structure to store all information on
* a stamp image which is going to be
* created. This information is steps in x
* start coos in x,y and image dimensions.
*/
typedef struct
{
double resolution;
double xstart;
double ystart;
long xsize;
long ysize;
}
drzstamp_dim;
/*
* Quadrangle structure to be filled with the
* corners of a pixel in the drizzled coo-system.
* Those corners are then passed to the boxer
* subroutine to determine the overlap for a
* particular pixel
*/
typedef struct
{
double xmax; // the maximum value in x[]
double xmin; // the minimum value in x[]
double ymax; // the maximum value in y[]
double ymin; // the minimum value in y[]
// (x[0],y[0]), (x[1],y[1])
double x[4]; // (x[2],y[2]) and (x[3],y[3])
double y[4]; // are the corner points of a pixel
// in the coo-system of the drizzled image
}
quadrangle;
typedef struct
{
gsl_matrix *counts;
gsl_matrix *weight;
}
drzstamp;
/*
* Structure for all stamp images
* of a beam in 'drzprep'
*/
typedef struct
{
gsl_matrix *counts;
gsl_matrix *error;
gsl_matrix *cont;
gsl_matrix *model;
gsl_matrix *vari;
}
drzprep;
extern gsl_vector_int *
get_trace_inds (const ap_pixel * const ap_p);
extern gsl_matrix *
stamp_img (const ap_pixel * const ap_p, float width, d_point *stp_min);
extern drzprep *
stamp_img_drzprep (const int opt_extr, const ap_pixel * const ap_p, const ap_pixel * const se_p,
float width, float nullval, int usemode,
drzstamp_dim dimension, gsl_matrix *drzcoeffs,
double exptime, double sky_cps, double rdnoise,
const int bckmode);
extern void
free_drzprep(drzprep *drzprep_stamps);
extern gsl_matrix *
rectified_stamp_img (const ap_pixel * const ap_p, float width, d_point *stp_min);
extern d_point
get_minxy_from_PET(const ap_pixel * const ap_p);
extern d_point
get_maxxy_from_PET(const ap_pixel * const ap_p);
extern drzstamp *
drizzled_stamp_img (const ap_pixel * const ap_p, double width,
const double orient, const drzstamp_dim dimension);
extern drzstamp_dim
get_drzprep_dim(const ap_pixel *const ap_p, float width,
int boxwidth, int boxheight);
extern drzstamp_dim
get_stamp_dim(const ap_pixel * const ap_p, float width,
aperture_conf *conf, const int beamID, d_point *stp_min);
extern quadrangle
get_quad_from_pixel(const ap_pixel *cur_p, const double orient, const drzstamp_dim dimension);
extern gsl_matrix *
drizzled_stamp_img_orig (const ap_pixel * const ap_p, float width,
aperture_conf *conf);
extern void
interpolate_over_NaN (gsl_matrix *data);
extern void
free_stamp_img(gsl_matrix *stamp);
extern void
free_drzstamp(drzstamp *stamp);
#endif
|
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#ifndef _CHART2_ACCESSIBLEBASE_HXX_
#define _CHART2_ACCESSIBLEBASE_HXX_
#include "ObjectIdentifier.hxx"
#include <com/sun/star/chart2/XChartDocument.hpp>
#include <com/sun/star/accessibility/XAccessible.hpp>
#include <com/sun/star/accessibility/XAccessibleContext.hpp>
#include <com/sun/star/accessibility/XAccessibleComponent.hpp>
#include <com/sun/star/lang/XServiceInfo.hpp>
#include <com/sun/star/document/XEventListener.hpp>
#include <com/sun/star/lang/XEventListener.hpp>
#include <com/sun/star/lang/DisposedException.hpp>
#include <com/sun/star/accessibility/XAccessibleEventBroadcaster.hpp>
#include <com/sun/star/view/XSelectionSupplier.hpp>
#include <comphelper/accessibleeventnotifier.hxx>
#include <cppuhelper/compbase6.hxx>
#include <cppuhelper/interfacecontainer.hxx>
#include <unotools/accessiblestatesethelper.hxx>
#include <vector>
#include <map>
#include <boost/shared_ptr.hpp>
#include "MutexContainer.hxx"
class SfxItemSet;
class SdrObject;
class SdrView;
namespace accessibility
{
class IAccessibleViewForwarder;
}
namespace chart
{
class AccessibleBase;
class ObjectHierarchy;
typedef ObjectIdentifier AccessibleUniqueId;
struct AccessibleElementInfo
{
AccessibleUniqueId m_aOID;
::com::sun::star::uno::WeakReference<
::com::sun::star::chart2::XChartDocument > m_xChartDocument;
::com::sun::star::uno::WeakReference<
::com::sun::star::view::XSelectionSupplier > m_xSelectionSupplier;
::com::sun::star::uno::WeakReference<
::com::sun::star::uno::XInterface > m_xView;
::com::sun::star::uno::WeakReference<
::com::sun::star::awt::XWindow > m_xWindow;
::boost::shared_ptr< ObjectHierarchy > m_spObjectHierarchy;
AccessibleBase * m_pParent;
SdrView* m_pSdrView;
::accessibility::IAccessibleViewForwarder* m_pViewForwarder;
};
namespace impl
{
typedef ::cppu::WeakComponentImplHelper6<
::com::sun::star::accessibility::XAccessible,
::com::sun::star::accessibility::XAccessibleContext,
::com::sun::star::accessibility::XAccessibleComponent,
::com::sun::star::accessibility::XAccessibleEventBroadcaster,
::com::sun::star::lang::XServiceInfo,
::com::sun::star::lang::XEventListener
> AccessibleBase_Base;
}
/** Base class for all Chart Accessibility objects
*/
class AccessibleBase :
public MutexContainer,
public impl::AccessibleBase_Base
{
public:
enum EventType
{
OBJECT_CHANGE,
GOT_SELECTION,
LOST_SELECTION,
PROPERTY_CHANGE
};
AccessibleBase( const AccessibleElementInfo & rAccInfo,
bool bMayHaveChildren,
bool bAlwaysTransparent = false );
virtual ~AccessibleBase();
protected:
// for all calls to protected methods it is assumed that the mutex is locked
// unless calls outside via UNO, e.g. event notification, are done
/** @param bThrowException if true, a DisposedException is thrown if the
object is already disposed
@return true, if the component is already disposed and bThrowException is false,
false otherwise
*/
bool CheckDisposeState( bool bThrowException = true ) const throw (::com::sun::star::lang::DisposedException);
/** Events coming from the core have to be processed in this methods. The
default implementation returns false, which indicates that the object is
not interested in the event. To react on events you have to implement
this method in derived classes.
The default implementation iterates over all children and forwards the
event until the first child returns true.
@param nObjId contains the object id of chart objects. If the object is
no chart object, the event is not broadcast.
@return If an object is the addressee of the event it should return
true, false otherwise.
*/
virtual bool NotifyEvent( EventType eType, const AccessibleUniqueId & rId );
/** Adds a state to the set.
*/
void AddState( sal_Int16 aState ) throw (::com::sun::star::uno::RuntimeException);
/** Removes a state from the set if the set contains the state, otherwise
nothing is done.
*/
void RemoveState( sal_Int16 aState ) throw (::com::sun::star::uno::RuntimeException);
/** has to be overloaded by derived classes that support child elements.
With this method a rescan is initiated that should result in a correct
list of children.
This method is called when access to any methods concerning children is
invoked for the first time.
*/
bool UpdateChildren();
/** Is called by UpdateChildren. This method is only called if an update is
really necessary.
*/
virtual bool ImplUpdateChildren();
/** adds a child to the end of the internal vector of children. As a
result, the child-count increases by one, but all existing children keep
their indices.
Important: as the implementation is needed, this should remain the only
method for adding children (i.e. there mustn't be an AddChild( Reference<
XAccessible > ) or the like).
*/
void AddChild( AccessibleBase* pChild );
/** removes a child from the internal vector. All children with index
greater than the index of the removed element get an index one less than
before.
*/
void RemoveChildByOId( const ObjectIdentifier& rOId );
/** Retrieve the pixel coordinates of logical coordinates (0,0) of the
current logic coordinate system. This can be used for
getLocationOnScreen, if the coordinates of an object are not relative to
its direct parent, but a parent higher up in hierarchy.
@return the (x,y) pixel coordinates of the upper left corner
*/
virtual ::com::sun::star::awt::Point GetUpperLeftOnScreen() const;
/** This method creates an AccessibleEventObject and sends it to all
listeners that are currently listening to this object
If bSendGlobally is true, the event is also broadcast via
vcl::unohelper::NotifyAccessibleStateEventGlobally()
*/
void BroadcastAccEvent( sal_Int16 nId,
const ::com::sun::star::uno::Any & rNew,
const ::com::sun::star::uno::Any & rOld,
bool bSendGlobally = false ) const;
/** Removes all children from the internal lists and broadcasts child remove
events.
This method cares about mutex locking, and thus should be called without
the mutex locked.
*/
virtual void KillAllChildren();
/** Is called from getAccessibleChild(). Before this method is called, an
update of children is done if necessary.
*/
virtual ::com::sun::star::uno::Reference<
::com::sun::star::accessibility::XAccessible >
ImplGetAccessibleChildById( sal_Int32 i ) const
throw (::com::sun::star::lang::IndexOutOfBoundsException,
::com::sun::star::uno::RuntimeException);
/** Is called from getAccessibleChildCount(). Before this method is called,
an update of children is done if necessary.
*/
virtual sal_Int32 ImplGetAccessibleChildCount() const
throw (::com::sun::star::uno::RuntimeException);
AccessibleElementInfo GetInfo() const;
void SetInfo( const AccessibleElementInfo & rNewInfo );
AccessibleUniqueId GetId() const;
// ________ WeakComponentImplHelper (XComponent::dispose) ________
virtual void SAL_CALL disposing();
// ________ XAccessible ________
virtual ::com::sun::star::uno::Reference< ::com::sun::star::accessibility::XAccessibleContext > SAL_CALL getAccessibleContext()
throw (::com::sun::star::uno::RuntimeException);
// ________ XAccessibleContext ________
virtual sal_Int32 SAL_CALL getAccessibleChildCount()
throw (::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::uno::Reference< ::com::sun::star::accessibility::XAccessible > SAL_CALL
getAccessibleChild( sal_Int32 i )
throw (::com::sun::star::lang::IndexOutOfBoundsException,
::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::uno::Reference< ::com::sun::star::accessibility::XAccessible > SAL_CALL
getAccessibleParent()
throw (::com::sun::star::uno::RuntimeException);
virtual sal_Int32 SAL_CALL getAccessibleIndexInParent()
throw (::com::sun::star::uno::RuntimeException);
/// @return AccessibleRole.SHAPE
virtual sal_Int16 SAL_CALL getAccessibleRole()
throw (::com::sun::star::uno::RuntimeException);
// has to be implemented by derived classes
// virtual ::rtl::OUString SAL_CALL getAccessibleName()
// throw (::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::uno::Reference< ::com::sun::star::accessibility::XAccessibleRelationSet > SAL_CALL
getAccessibleRelationSet()
throw (::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::uno::Reference< ::com::sun::star::accessibility::XAccessibleStateSet > SAL_CALL
getAccessibleStateSet()
throw (::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::lang::Locale SAL_CALL getLocale()
throw (::com::sun::star::accessibility::IllegalAccessibleComponentStateException,
::com::sun::star::uno::RuntimeException);
// has to be implemented by derived classes
// virtual ::rtl::OUString SAL_CALL getAccessibleDescription()
// throw (::com::sun::star::uno::RuntimeException);
// ________ XAccessibleComponent ________
virtual sal_Bool SAL_CALL containsPoint(
const ::com::sun::star::awt::Point& aPoint )
throw (::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::uno::Reference< ::com::sun::star::accessibility::XAccessible > SAL_CALL
getAccessibleAtPoint( const ::com::sun::star::awt::Point& aPoint )
throw (::com::sun::star::uno::RuntimeException);
// has to be defined in derived classes
virtual ::com::sun::star::awt::Rectangle SAL_CALL getBounds()
throw (::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::awt::Point SAL_CALL getLocation()
throw (::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::awt::Point SAL_CALL getLocationOnScreen()
throw (::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::awt::Size SAL_CALL getSize()
throw (::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL grabFocus()
throw (::com::sun::star::uno::RuntimeException);
virtual sal_Int32 SAL_CALL getForeground()
throw (::com::sun::star::uno::RuntimeException);
virtual sal_Int32 SAL_CALL getBackground()
throw (::com::sun::star::uno::RuntimeException);
// ________ XServiceInfo ________
virtual ::rtl::OUString SAL_CALL getImplementationName()
throw (::com::sun::star::uno::RuntimeException);
virtual sal_Bool SAL_CALL supportsService(
const ::rtl::OUString& ServiceName )
throw (::com::sun::star::uno::RuntimeException);
virtual ::com::sun::star::uno::Sequence< ::rtl::OUString > SAL_CALL getSupportedServiceNames()
throw (::com::sun::star::uno::RuntimeException);
// ________ XEventListener ________
virtual void SAL_CALL disposing(
const ::com::sun::star::lang::EventObject& Source )
throw (::com::sun::star::uno::RuntimeException);
using ::cppu::WeakComponentImplHelperBase::addEventListener;
using ::cppu::WeakComponentImplHelperBase::removeEventListener;
// ________ XAccessibleEventBroadcaster ________
virtual void SAL_CALL addEventListener(
const ::com::sun::star::uno::Reference< ::com::sun::star::accessibility::XAccessibleEventListener >& xListener )
throw (::com::sun::star::uno::RuntimeException);
virtual void SAL_CALL removeEventListener(
const ::com::sun::star::uno::Reference< ::com::sun::star::accessibility::XAccessibleEventListener >& xListener )
throw (::com::sun::star::uno::RuntimeException);
private:
enum eColorType
{
ACC_BASE_FOREGROUND,
ACC_BASE_BACKGROUND
};
sal_Int32 getColor( eColorType eColType );
private:
typedef ::com::sun::star::uno::Reference<
::com::sun::star::accessibility::XAccessible > tAccessible;
/** type of the vector containing the accessible children
*/
typedef ::std::vector< tAccessible > ChildListVectorType;
/** type of the hash containing a vector index for every AccessibleUniqueId
of the object in the child list
*/
typedef ::std::map< ObjectIdentifier, tAccessible > ChildOIDMap;
bool m_bIsDisposed;
const bool m_bMayHaveChildren;
bool m_bChildrenInitialized;
ChildListVectorType m_aChildList;
ChildOIDMap m_aChildOIDMap;
::comphelper::AccessibleEventNotifier::TClientId m_nEventNotifierId;
/** Implementation helper for getAccessibleStateSet()
Note: This member must come before m_aStateSet!
*/
::utl::AccessibleStateSetHelper * m_pStateSetHelper;
/** this is returned in getAccessibleStateSet().
The implementation is an ::utl::AccessibleStateSetHelper. To access
implementation methods use m_pStateSetHelper.
Note: Keeping this reference ensures, that the helper object is only
destroyed after this object has been disposed().
*/
::com::sun::star::uno::Reference< ::com::sun::star::accessibility::XAccessibleStateSet >
m_aStateSet;
AccessibleElementInfo m_aAccInfo;
const bool m_bAlwaysTransparent;
/** denotes if the state-set is initialized. On initialization the selected
state is checked.
This variable is monitored by the solar mutex!
Note: declared volatile to enable double-check-locking
*/
volatile bool m_bStateSetInitialized;
};
} // namespace chart
#endif
|
C Copyright(C) 1999-2020 National Technology & Engineering Solutions
C of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
C NTESS, the U.S. Government retains certain rights in this software.
C
C See packages/seacas/LICENSE for details
SUBROUTINE INITDG (MCOM, ICOM, JCOM, CIN, RIN, IIN, KIN, IDUMP,
& XX1, YY1, SCALE, CT, ST, X1, X2, Y1, Y2, DRWTAB, SNAP)
C***********************************************************************
C SUBROUTINE INITDG = INITIALIZES THE DIGITIZING TABLET
C***********************************************************************
DIMENSION KIN (MCOM), IIN (MCOM), RIN (MCOM)
CHARACTER * 72 CIN (MCOM), BUTTON * 1
LOGICAL DRWTAB, IANS, SNAP
IZ = 0
C CHECK TO MAKE SURE THAT THE DRAWING IS NOT BEING TOGGLED
IF (DRWTAB) THEN
CALL MESAGE ('DRAWING INITIALIZATION IS ALREADY ACTIVE')
CALL INTRUP ('TOGGLE ALL DRAWING INITIALIZATION OFF',
& IANS, MCOM, ICOM, JCOM, CIN, IIN, RIN, KIN)
IF (IANS) THEN
DRWTAB = .FALSE.
CALL TABINT (X1, X2, Y1, Y2, CT, ST, SCALE, XX1, YY1, XX2,
& YY2, DRWTAB)
RETURN
ENDIF
ENDIF
C GET THE ZOOM LIMITS
CALL MESAGE (' ')
IF (ICOM .GT. JCOM) THEN
CALL FREFLD (IZ, IZ, 'ENTER DRAWING XMIN, XMAX, YMIN, YMAX:',
& MCOM, IOSTAT, JCOM, KIN, CIN, IIN, RIN)
ICOM = 1
ENDIF
IF ( (JCOM - ICOM + 1) .GE. 4) THEN
SNAP = .TRUE.
X1 = RIN (ICOM)
X2 = RIN (ICOM + 1)
Y1 = RIN (ICOM + 2)
Y2 = RIN (ICOM + 3)
ICOM = ICOM + 4
ELSE
CALL MESAGE ('NOT ENOUGH INFORMATION DEFINED TO SPECIFY'//
& ' DRAWING LIMITS')
CALL MESAGE ('INITIALIZATION ABORTED')
CALL MESAGE (' ')
RETURN
ENDIF
C GET THE DIGITIZING POINTS
CALL MESAGE ('NOW DIGITIZE THOSE 2 POINTS')
CALL MESAGE (' PUSH "PUCK - 1" FOR LOWER LEFT')
CALL MESAGE (' PUSH "PUCK - 2" FOR UPPER RIGHT')
CALL MESAGE (' PUSH "PUCK - E" TO END')
100 CONTINUE
CALL DPREAD (X, Y, BUTTON)
IF (BUTTON .EQ. '1') THEN
XX1 = X
YY1 = Y
CALL MESAGE ('LOWER LEFT INPUT')
GOTO 100
ELSEIF (BUTTON .EQ. '2') THEN
XX2 = X
YY2 = Y
CALL MESAGE ('UPPER RIGHT INPUT')
GOTO 100
ELSEIF (BUTTON .EQ. 'E') THEN
CALL PLTBEL
CALL PLTFLU
ENDIF
IF ( ( (YY2 - YY1 .EQ. 0.) .AND. (XX2 - XX1 .EQ. 0.))
& .OR. ( (Y2 - Y1 .EQ. 0.) .AND. (X2 - X1 .EQ. 0.))) THEN
CALL MESAGE ('BAD INITIALIZATION - INITIALIZATION ABORTED')
CALL MESAGE (' ')
CALL PLTBEL
CALL PLTFLU
RETURN
ENDIF
DRWTAB = .TRUE.
CALL TABINT (X1, X2, Y1, Y2, CT, ST, SCALE, XX1, YY1, XX2, YY2,
& DRWTAB)
CALL MESAGE ('INITIALIZATION COMPLETE')
CALL MESAGE (' ')
RETURN
END
|
#include <mex.h>
#include <matrix.h>
#include <math.h>
#include <string.h>
#include <stdio.h>
#include <stdarg.h>
#include <stdlib.h>
#include <blas.h> // dgemv
#include <lapack.h> // dstegr
#define abs1(a) ((a) < 0.0 ? -(a) : (a))
#define sign1(a) ((a)==0) ? 0 : (((a)>0.0)?1:(-1))
#define max1(a,b) ((a) > (b) ? (a) : (b))
#define min1(a,b) ((a) < (b) ? (a) : (b))
double dot1(const double*a,const double*b,const ptrdiff_t n)
{
double ret = 0;
ptrdiff_t i;
for(i=0;i<n;i++)
ret +=a[i]*b[i];
return ret;
}
void AxSym(const double*A, const double*x, double*result, const ptrdiff_t n)
{
/*
* input:
* A: n x n
* x: n x 1
* output:
* result = A*x: n x 1
* NOTE: A is a symmetric matrix, only the the lower triangular part of A is to be referenced
* x and the result can not be the same space
*/
char *UPLO="L";ptrdiff_t N;double ALPHA; ptrdiff_t LDA; ptrdiff_t INCX; double BETA;ptrdiff_t INCY;
N = n; ALPHA = 1; LDA = n; INCX=1; BETA=0; INCY=1;
dsymv(UPLO,&N,&ALPHA,A,&LDA,x,&INCX,&BETA,result,&INCY);
}
double computeObj(double s, double u0, double u1, double u2,double d0,double d1,double d2)
{
return (u2*s*s + u1*s + u0) / (d2*s*s + d1*s + d0);
}
double quadfrac2(double u0, double u1, double u2,double d0,double d1,double d2)
{
double x1;double x2;
double c2 = u2*d1 - d2*u1;
double c1 = 2*u2*d0 - 2*d2*u0;
double c0 = u1*d0 - d1*u0;
double dd = sqrt(c1*c1 - 4*c2*c0);
if(c2==0)
{
x1 = -c0/c1;
x2 = x1;
}
else
{
x1 = (-c1+dd)/(2*c2);
x2 = (-c1-dd)/(2*c2);
}
if(computeObj(x1,u0,u1,u2,d0,d1,d2)<computeObj(x2,u0,u1,u2,d0,d1,d2))
{
return x1;
}
else
{
return x2;
}
}
ptrdiff_t pos_max(const double*a,const ptrdiff_t n)
{
double val = abs1(a[0]);
ptrdiff_t pos=0;
ptrdiff_t i ;
for (i=1;i<n;i++)
{
if(abs1(a[i]) > val)
{
val = abs1(a[i]);
pos = i;
}
}
return pos;
}
void pvec(const double*a,const ptrdiff_t n)
{
// print the vector a
ptrdiff_t i;
printf("[ ");
for (i=0;i<n;i++)
{
printf("%f ",a[i]);
}
printf("]\n");
}
void solve(double *x,const double *in_x, const double*A, const double*b, const double c, const double *Q, const double *r,const double s,const ptrdiff_t n,const ptrdiff_t max_iter)
{
double *Ax = (double*)malloc(n*sizeof(double));
double *Qx = (double*)malloc(n*sizeof(double));
double *up_g = (double*)malloc(n*sizeof(double));
double *down_g = (double*)malloc(n*sizeof(double));
double *grad = (double*)malloc(n*sizeof(double));
double xAx = 0;double bx = 0; double xQx = 0; double rx = 0;
ptrdiff_t iter, i,j;
memcpy(x,in_x,n*sizeof(double));
AxSym(A, x, Ax, n);
xAx = dot1(x,Ax,n);
bx = dot1(x,b,n);
AxSym(Q,x,Qx,n);
xQx = dot1(x,Qx,n);
rx = dot1(r,x,n);
for ( iter=1;iter<=max_iter;iter++)
{
double u0,u1,u2,d0,d1,d2,alpha;
double up,down,fobj;
up = 0.5*xAx + bx + c;
down = 0.5*xQx + rx + s;
fobj = up / down;
for (j=0;j<n;j++)
{
up_g[j] = Ax[j]+b[j];
down_g[j] = Qx[j]+r[j];
grad[j] = up_g[j]/down - fobj * down_g[j]/down ;
}
i = pos_max(grad,n);
u0 = 0.5*xAx + bx + c;
u1 = Ax[i] + b[i];
u2 = 0.5*A[i*n+i];
d0 = 0.5*xQx + rx + s;
d1 = Qx[i] + r[i];
d2 = 0.5*Q[i*n+i];
alpha = quadfrac2(u0,u1,u2,d0,d1,d2);
x[i] = x[i] + alpha;
xAx = xAx + 2*alpha*Ax[i] + alpha*alpha*A[i*n+i];
xQx = xQx + 2*alpha*Qx[i] + alpha*alpha*Q[i*n+i];
for (j=0;j<n;j++)
{
Ax[j] = Ax[j] + alpha*A[i*n+j];
Qx[j] = Qx[j] + alpha*Q[i*n+j];
}
bx = bx + alpha*b[i];
rx = rx + alpha*r[i];
}
for (ptrdiff_t i=0;i<n;i++)if(x[i]==0)x[i]=1e-100;
free(Ax);
free(Qx);
free(up_g);
free(down_g);
free(grad);
}
void mexFunction (int nlhs, mxArray* plhs[], int nrhs, const mxArray* prhs[])
{
/*set up input arguments */
double* x = mxGetPr(prhs[0]);
double* A = mxGetPr(prhs[1]);
double* b = mxGetPr(prhs[2]);
double c = mxGetScalar(prhs[3]);
double* D = mxGetPr(prhs[4]);
double* e = mxGetPr(prhs[5]);
double f = mxGetScalar(prhs[6]);
ptrdiff_t n = mxGetScalar(prhs[7]);
ptrdiff_t max_iter = mxGetScalar(prhs[8]);
double *out_x;
plhs[0] = mxCreateDoubleMatrix(n,1,mxREAL);
out_x = mxGetPr(plhs[0]);
solve(out_x,x,A,b,c,D,e,f,n,max_iter);
}
|
{- 2010-09-28 Andreas, see issue 336 -}
module WhyWeNeedUntypedLambda where
IdT = ({A : Set} -> A -> A)
data _==_ {A : Set2}(a : A) : A -> Set where
refl : a == a
-- Untyped lambda succeeds, because checking \ x -> x : X is postponed,
-- then the solution X = IdT is found, and upon revisiting the tc problem
-- a hidden lambda \ {A} is inserted.
foo : ({X : Set1} -> X -> X == IdT -> Set) -> Set
foo k = k (\ x -> x) refl -- succeeds
{-
-- Typed lambda fails, because \ (x : _) -> x has inferred type ?A -> ?A
-- but then unification with IdT fails.
foo' : ({X : Set1} -> X -> X == IdT -> Set) -> Set
foo' k = k (\ (x : _) -> x) refl -- fails
-}
|
/*
@copyright Louis Dionne 2014
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
#include <boost/hana/ext/boost/fusion.hpp>
#include <boost/hana/detail/constexpr.hpp>
#include "../helper.hpp"
#include <cassert>
#include <tuple>
using namespace boost::hana;
BOOST_HANA_CONSTEXPR_LAMBDA auto f = [](auto x, auto xs) {
return std::tuple_cat(std::make_tuple(x), xs);
};
int main() {
constexpr std::tuple<> s;
with_nonassociative_forward_sequences([=](auto container) {
assert(foldr(container(), s, f) == s);
assert(foldr(container(1), s, f) == f(1, s));
assert(foldr(container(1, '2'), s, f) == f(1, f('2', s)));
assert(foldr(container(1, '2', 3.3), s, f) == f(1, f('2', f(3.3, s))));
assert(foldr(container(1, '2', 3.3, 4.4f), s, f) == f(1, f('2', f(3.3, f(4.4f, s)))));
});
}
|
$\lambda \cdot 0 = 0$ for all $\lambda \in \mathbb{R}$.
|
(* Title: HOL/Quickcheck_Examples/Quickcheck_Lattice_Examples.thy
Author: Lukas Bulwahn
Copyright 2010 TU Muenchen
*)
theory Quickcheck_Lattice_Examples
imports Main
begin
declare [[quickcheck_finite_type_size=5]]
text \<open>We show how other default types help to find counterexamples to propositions if
the standard default type \<^typ>\<open>int\<close> is insufficient.\<close>
notation
less_eq (infix "\<sqsubseteq>" 50) and
less (infix "\<sqsubset>" 50) and
top ("\<top>") and
bot ("\<bottom>") and
inf (infixl "\<sqinter>" 70) and
sup (infixl "\<squnion>" 65)
declare [[quickcheck_narrowing_active = false, quickcheck_timeout = 3600]]
subsection \<open>Distributive lattices\<close>
lemma sup_inf_distrib2:
"((y :: 'a :: distrib_lattice) \<sqinter> z) \<squnion> x = (y \<squnion> x) \<sqinter> (z \<squnion> x)"
quickcheck[expect = no_counterexample]
by(simp add: inf_sup_aci sup_inf_distrib1)
lemma sup_inf_distrib2_1:
"((y :: 'a :: lattice) \<sqinter> z) \<squnion> x = (y \<squnion> x) \<sqinter> (z \<squnion> x)"
quickcheck[expect = counterexample]
oops
lemma sup_inf_distrib2_2:
"((y :: 'a :: distrib_lattice) \<sqinter> z') \<squnion> x = (y \<squnion> x) \<sqinter> (z \<squnion> x)"
quickcheck[expect = counterexample]
oops
lemma inf_sup_distrib1_1:
"(x :: 'a :: distrib_lattice) \<sqinter> (y \<squnion> z) = (x \<sqinter> y) \<squnion> (x' \<sqinter> z)"
quickcheck[expect = counterexample]
oops
lemma inf_sup_distrib2_1:
"((y :: 'a :: distrib_lattice) \<squnion> z) \<sqinter> x = (y \<sqinter> x) \<squnion> (y \<sqinter> x)"
quickcheck[expect = counterexample]
oops
subsection \<open>Bounded lattices\<close>
lemma inf_bot_left [simp]:
"\<bottom> \<sqinter> (x :: 'a :: bounded_lattice_bot) = \<bottom>"
quickcheck[expect = no_counterexample]
by (rule inf_absorb1) simp
lemma inf_bot_left_1:
"\<bottom> \<sqinter> (x :: 'a :: bounded_lattice_bot) = x"
quickcheck[expect = counterexample]
oops
lemma inf_bot_left_2:
"y \<sqinter> (x :: 'a :: bounded_lattice_bot) = \<bottom>"
quickcheck[expect = counterexample]
oops
lemma inf_bot_left_3:
"x \<noteq> \<bottom> ==> y \<sqinter> (x :: 'a :: bounded_lattice_bot) \<noteq> \<bottom>"
quickcheck[expect = counterexample]
oops
lemma inf_bot_right [simp]:
"(x :: 'a :: bounded_lattice_bot) \<sqinter> \<bottom> = \<bottom>"
quickcheck[expect = no_counterexample]
by (rule inf_absorb2) simp
lemma inf_bot_right_1:
"x \<noteq> \<bottom> ==> (x :: 'a :: bounded_lattice_bot) \<sqinter> \<bottom> = y"
quickcheck[expect = counterexample]
oops
lemma inf_bot_right_2:
"(x :: 'a :: bounded_lattice_bot) \<sqinter> \<bottom> ~= \<bottom>"
quickcheck[expect = counterexample]
oops
lemma sup_bot_right [simp]:
"(x :: 'a :: bounded_lattice_bot) \<squnion> \<bottom> = \<bottom>"
quickcheck[expect = counterexample]
oops
lemma sup_bot_left [simp]:
"\<bottom> \<squnion> (x :: 'a :: bounded_lattice_bot) = x"
quickcheck[expect = no_counterexample]
by (rule sup_absorb2) simp
lemma sup_bot_right_2 [simp]:
"(x :: 'a :: bounded_lattice_bot) \<squnion> \<bottom> = x"
quickcheck[expect = no_counterexample]
by (rule sup_absorb1) simp
lemma sup_eq_bot_iff [simp]:
"(x :: 'a :: bounded_lattice_bot) \<squnion> y = \<bottom> \<longleftrightarrow> x = \<bottom> \<and> y = \<bottom>"
quickcheck[expect = no_counterexample]
by (simp add: eq_iff)
lemma sup_top_left [simp]:
"\<top> \<squnion> (x :: 'a :: bounded_lattice_top) = \<top>"
quickcheck[expect = no_counterexample]
by (rule sup_absorb1) simp
lemma sup_top_right [simp]:
"(x :: 'a :: bounded_lattice_top) \<squnion> \<top> = \<top>"
quickcheck[expect = no_counterexample]
by (rule sup_absorb2) simp
lemma inf_top_left [simp]:
"\<top> \<sqinter> x = (x :: 'a :: bounded_lattice_top)"
quickcheck[expect = no_counterexample]
by (rule inf_absorb2) simp
lemma inf_top_right [simp]:
"x \<sqinter> \<top> = (x :: 'a :: bounded_lattice_top)"
quickcheck[expect = no_counterexample]
by (rule inf_absorb1) simp
lemma inf_eq_top_iff [simp]:
"(x :: 'a :: bounded_lattice_top) \<sqinter> y = \<top> \<longleftrightarrow> x = \<top> \<and> y = \<top>"
quickcheck[expect = no_counterexample]
by (simp add: eq_iff)
no_notation
less_eq (infix "\<sqsubseteq>" 50) and
less (infix "\<sqsubset>" 50) and
inf (infixl "\<sqinter>" 70) and
sup (infixl "\<squnion>" 65) and
top ("\<top>") and
bot ("\<bottom>")
end
|
[STATEMENT]
lemma mult_isor: "x \<le> y \<Longrightarrow> x \<cdot> z \<le> y \<cdot> z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<le> y \<Longrightarrow> x \<cdot> z \<le> y \<cdot> z
[PROOF STEP]
by (metis distrib_right less_eq_def)
|
options(max.print = 1000)# {{{
rm(list = ls())
today <- format(Sys.Date(), "%Y%m%d")
# }}}
# all data
project_dir <- "H:\\gabilan-turbidity-tmdl\\"# {{{
data_all <- read.csv(paste0(project_dir,
"data\\summary-stats-all-20191203.csv"))
data_dry <- read.csv(paste0(project_dir,
"data\\summary-stats-dry-20191203.csv"))
data_wet <- read.csv(paste0(project_dir,
"data\\summary-stats-wet-20191203.csv"))
rest_codes <- c("309GAB", "309NAD",
"309ALG", "309ALD",
"309JON", "309TEH",
"309TEM", "309TDW",
"309OLD", "309ASB",
"309MER", "309ESP",
"309RTA")
rest_names <- c("Gabilan Creek", "Natividad Creek",
"Reclamation Canal/Alisal Creek", "Reclamation Canal",
"Reclamation Canal", "Tembladero Slough",
"Tembladero Slough", "Tembladero Slough",
"Old Salinas River Channel", "Alisal Slough",
"Merrill Ditch", "Espinosa Slogh",
"Santa Rita Creek")
ref_codes <- c("305LCS", "305CAN",
"305CAR", "305WSA",
"305CHI", "305COR",
"305SJA", "305SJN",
"306WAC", "310CCC",
"310TWB", "310PRE",
"310SLB", "310PIS",
"312OFN", "312ORI",
"313SAI", "314SYN",
"314SYF", "314SYL",
"315DEV")
ref_names <- c("Lower Llagas Creek", "Lower Uvas Creek",
"Lower Uvas Creek", "Watsonville Slough",
"Lower Pajaro River", "Salsipuedes Creek",
"San Juan Creek", "San Juan Creek",
"Elkhorn Slough (Watsonville Creek)", "Chorro Creek",
"Chorro Creek", "San Luis Obispo Creek",
"San Luis Obispo Creek", "Pismo Creek",
"Oso Flaco Creek", "Orcutt Creek (at Highway 1)",
"Lower San Antonio Creek", "Santa Lucia Canyon-Santa Ynez River",
"Santa Lucia Canyon-Santa Ynez River", "Santa Miguelito Canyon-Santa Ynez River",
"Dos Pueblos Canyon (Devereux Slough)")
# }}}
# exceedance table{{{
exceed_data <- data_all[match(rest_codes, data_all$StationCode),
c("StationCode", "N", "pct.gte.25", "pct.gte.40")]
exceed_tab <- cbind(rest_names, exceed_data)
exceed_out <- paste0(project_dir, "data\\rest-site-exceedances.csv")
# write.csv(exceed_tab, exceed_out)
# iqr table
iqr_data <- data_all[match(rest_codes, data_all$StationCode),
c("StationCode", "N", "turb_quan25",
"turb_quan5", "turb_quan75")]
iqr <- iqr_data$turb_quan75 - iqr_data$turb_quan25
iqr_tab <- cbind(rest_names, iqr_data, iqr)
iqr_out <- paste0(project_dir, "data\\rest-site-iqr.csv")
# write.csv(iqr_tab, iqr_out)
# }}}
# seasonal iqr table{{{
seas_wet_data <- data_wet[match(rest_codes, data_wet$StationCode),
c("StationCode", "N", "turb_quan25",
"turb_quan5", "turb_quan75")]
seas_dry_data <- data_dry[match(rest_codes, data_dry$StationCode),
c("StationCode", "N", "turb_quan25",
"turb_quan5", "turb_quan75")]
seas_wet_iqr <- seas_wet_data$turb_quan75 - seas_wet_data$turb_quan25
seas_dry_iqr <- seas_dry_data$turb_quan75 - seas_dry_data$turb_quan25
seas_wet_tab <- cbind(rest_names, "season" = "wet", seas_wet_data,
"iqr" = seas_wet_iqr, "order" = 1:length(rest_codes))
seas_dry_tab <- cbind(rest_names, "season" = "dry", seas_dry_data,
"iqr" = seas_dry_iqr, "order" = 1:length(rest_codes))
seas_tab <- rbind(seas_dry_tab, seas_wet_tab)
seas_tab <- seas_tab[order(seas_tab$order), ]
seas_out <- paste0(project_dir, "data\\seasonal-iqr.csv")
write.csv(seas_tab, seas_out)
# }}}
# ref iqr table---all seasons{{{
# Table 17. Turbidity data summary at perennial riverine reference sites
ref_iqr_data <- data_all[match(ref_codes, data_all$StationCode),
c("StationCode", "N", "turb_quan25",
"turb_quan5", "turb_quan75")]
ref_iqr <- ref_iqr_data$turb_quan75 - ref_iqr_data$turb_quan25
ref_iqr_tab <- cbind(ref_names, ref_iqr_data, ref_iqr)
ref_iqr_tab <- ref_iqr_tab[order(ref_iqr_tab$StationCode), ]
all <- ref_iqr_tab
all_out <- paste0(project_dir, "data\\ref-site-iqr-all-", today, ".csv")
# write.csv(all, all_out)
# }}}
# ref iqr table---dry season{{{
# Table 17. Turbidity data summary at perennial riverine reference sites
ref_iqr_data <- data_dry[match(ref_codes, data_dry$StationCode),
c("StationCode", "N", "turb_quan25",
"turb_quan5", "turb_quan75")]
ref_iqr <- ref_iqr_data$turb_quan75 - ref_iqr_data$turb_quan25
ref_iqr_tab <- cbind(ref_names, ref_iqr_data, ref_iqr)
ref_iqr_tab <- ref_iqr_tab[order(ref_iqr_tab$StationCode), ]
dry <- ref_iqr_tab
dry_out <- paste0(project_dir, "data\\ref-site-iqr-dry-", today, ".csv")
# write.csv(dry, dry_out)
# }}}
# ref iqr table---wet season{{{
# Table 17. Turbidity data summary at perennial riverine reference sites
ref_iqr_data <- data_wet[match(ref_codes, data_wet$StationCode),
c("StationCode", "N", "turb_quan25",
"turb_quan5", "turb_quan75")]
ref_iqr <- ref_iqr_data$turb_quan75 - ref_iqr_data$turb_quan25
ref_iqr_tab <- cbind(ref_names, ref_iqr_data, ref_iqr)
ref_iqr_tab <- ref_iqr_tab[order(ref_iqr_tab$StationCode), ]
wet <- ref_iqr_tab
wet_out <- paste0(project_dir, "data\\ref-site-iqr-wet-", today, ".csv")
# write.csv(wet, wet_out)
# }}}
# only CMP data
project_dir <- "H:\\gabilan-turbidity-tmdl\\"# {{{
data_all <- read.csv(paste0(project_dir,
"data\\summary-stats-all-cmp-20200115.csv"))
data_dry <- read.csv(paste0(project_dir,
"data\\summary-stats-dry-cmp-20200115.csv"))
data_wet <- read.csv(paste0(project_dir,
"data\\summary-stats-wet-cmp-20200115.csv"))
rest_codes <- c("309GAB", "309NAD",
"309ALG", "309ALD",
"309JON", "309TEH",
"309TEM", "309TDW",
"309OLD", "309ASB",
"309MER", "309ESP",
"309RTA")
rest_names <- c("Gabilan Creek", "Natividad Creek",
"Reclamation Canal/Alisal Creek", "Reclamation Canal",
"Reclamation Canal", "Tembladero Slough",
"Tembladero Slough", "Tembladero Slough",
"Old Salinas River Channel", "Alisal Slough",
"Merrill Ditch", "Espinosa Slogh",
"Santa Rita Creek")
ref_codes <- c("305LCS", "305CAN",
"305CAR", "305WSA",
"305CHI", "305COR",
"305SJA", "305SJN",
"306WAC", "310CCC",
"310TWB", "310PRE",
"310SLB", "310PIS",
"312OFN", "312ORI",
"313SAI", "314SYN",
"314SYF", "314SYL",
"315DEV")
ref_names <- c("Lower Llagas Creek", "Lower Uvas Creek",
"Lower Uvas Creek", "Watsonville Slough",
"Lower Pajaro River", "Salsipuedes Creek",
"San Juan Creek", "San Juan Creek",
"Elkhorn Slough (Watsonville Creek)", "Chorro Creek",
"Chorro Creek", "San Luis Obispo Creek",
"San Luis Obispo Creek", "Pismo Creek",
"Oso Flaco Creek", "Orcutt Creek (at Highway 1)",
"Lower San Antonio Creek", "Santa Lucia Canyon-Santa Ynez River",
"Santa Lucia Canyon-Santa Ynez River", "Santa Miguelito Canyon-Santa Ynez River",
"Dos Pueblos Canyon (Devereux Slough)")
# }}}
# ref iqr table---all seasons{{{
# Table 17. Turbidity data summary at perennial riverine reference sites
ref_iqr_data <- data_all[match(ref_codes, data_all$StationCode),
c("StationCode", "N", "turb_quan25",
"turb_quan5", "turb_quan75")]
ref_iqr <- ref_iqr_data$turb_quan75 - ref_iqr_data$turb_quan25
ref_iqr_tab <- cbind(ref_names, ref_iqr_data, ref_iqr)
ref_iqr_tab <- ref_iqr_tab[order(ref_iqr_tab$StationCode), ]
all <- ref_iqr_tab
all_out <- paste0(project_dir, "data\\ref-site-iqr-all-cmp-", today, ".csv")
write.csv(all, all_out)
# }}}
# ref iqr table---dry season{{{
# Table 17. Turbidity data summary at perennial riverine reference sites
ref_iqr_data <- data_dry[match(ref_codes, data_dry$StationCode),
c("StationCode", "N", "turb_quan25",
"turb_quan5", "turb_quan75")]
ref_iqr <- ref_iqr_data$turb_quan75 - ref_iqr_data$turb_quan25
ref_iqr_tab <- cbind(ref_names, ref_iqr_data, ref_iqr)
ref_iqr_tab <- ref_iqr_tab[order(ref_iqr_tab$StationCode), ]
dry <- ref_iqr_tab
dry_out <- paste0(project_dir, "data\\ref-site-iqr-dry-cmp-", today, ".csv")
write.csv(dry, dry_out)
# }}}
# ref iqr table---wet season{{{
# Table 17. Turbidity data summary at perennial riverine reference sites
ref_iqr_data <- data_wet[match(ref_codes, data_wet$StationCode),
c("StationCode", "N", "turb_quan25",
"turb_quan5", "turb_quan75")]
ref_iqr <- ref_iqr_data$turb_quan75 - ref_iqr_data$turb_quan25
ref_iqr_tab <- cbind(ref_names, ref_iqr_data, ref_iqr)
ref_iqr_tab <- ref_iqr_tab[order(ref_iqr_tab$StationCode), ]
wet <- ref_iqr_tab
wet_out <- paste0(project_dir, "data\\ref-site-iqr-wet-cmp-", today, ".csv")
write.csv(wet, wet_out)
# }}}
|
import numpy as np
import tensorflow as tf
import time, random
import matplotlib.pyplot as plt
from cnn_model import nn_model,loss,optimizer,accuracy
import warnings
warnings.simplefilter("ignore", DeprecationWarning)
warnings.simplefilter("ignore", FutureWarning)
tf.logging.set_verbosity(tf.logging.INFO)
def get_random_rotation_angle():
return random.randint(-45,45)
def get_random():
return random.randint(0,2)/10.0
def get_new_size():
new_size = 96 + random.choice([24,16])
return [new_size,new_size]
def get_random_augmentation_combinations(length):
out = [True,False]
return [random.choice(out) for i in xrange(length)]
def get_all_images(img_file):
images = np.fromfile(img_file,dtype=np.uint8).astype(np.float32)
images = np.reshape(images,(-1,3,96,96))
images = np.transpose(images,(0,3,2,1))
print 'Normalizing Inputs...'
rmean = np.mean(images[:,:,:,0])
gmean = np.mean(images[:,:,:,1])
bmean = np.mean(images[:,:,:,2])
rstd = np.std(images[:,:,:,0])
gstd = np.std(images[:,:,:,1])
bstd = np.std(images[:,:,:,2])
images[:,:,:,0] = (images[:,:,:,0] - rmean)#/rstd
images[:,:,:,1] = (images[:,:,:,1] - gmean)#/gstd
images[:,:,:,2] = (images[:,:,:,2] - bmean)#/bstd
print 'R_mean:',rmean,'G_mean:',gmean,'B_mean:',bmean
print 'R_stddev:',rstd,'G_stddev:',gstd,'B_stddev:',bstd
return images,rmean,gmean,bmean
def get_all_labels(label_file):
labels = np.fromfile(label_file,dtype=np.uint8)
#print labels.shape
return labels
def get_val_images(img_file,rmean,gmean,bmean):
images = np.fromfile(img_file,dtype=np.uint8).astype(np.float32)
images = np.reshape(images,(-1,3,96,96))
images = np.transpose(images,(0,3,2,1))
print 'Normalizing Validation Images...'
images[:,:,:,0] = (images[:,:,:,0] - rmean)#/rstd
images[:,:,:,1] = (images[:,:,:,1] - gmean)#/gstd
images[:,:,:,2] = (images[:,:,:,2] - bmean)#/bstd
return images
#Create dataset
#Getting the dataset
print 'Getting the data...'
train_data_path = '/floyd/train_X.bin' #/media/siladittya/fdc481ce-9355-46a9-b381-9001613e3422/siladittya/StudyMaterials/ISI/code/ds/stl10_binary
train_label_path = '/floyd/train_y.bin'
train_img_file = open(train_data_path,'rb')
train_label_file = open(train_label_path,'rb')
train_x,rmean,gmean,bmean = get_all_images(train_img_file)
train_y = get_all_labels(train_label_file)
#Getting Validation Dataset
val_img_path = '/floyd/test_X.bin'
val_label_path = '/floyd/test_y.bin'
val_img_file = open(val_img_path,'rb')
val_label_file = open(val_label_path,'rb')
val_x = get_val_images(val_img_file,rmean,gmean,bmean)
val_y = get_all_labels(val_label_file)
print'Getting Validation set from Test set...'
val_x = val_x[:200]
val_y = val_y[:200]
index = np.arange(train_x.shape[0])
#Set seed placeholder for setting a different seed in each epoch
seedin = tf.placeholder(tf.int64,shape=())
#Keep count
count = 0
#........ This part will used to get training data for each epoch during training
init_count = 0
num_epochs = 100
batch_size = 40
numiter = 125
ne = 0
valacc = []
#Create session
feed_images = tf.placeholder(tf.float32,shape=(None,96,96,3))
feed_labels = tf.placeholder(tf.float32,shape=(None,))
aug_img = tf.placeholder(tf.float32,shape=(96,96,3))
logits = nn_model(feed_images,training = True)
cost = loss(logits,feed_labels)
opt_mom = optimizer(lr=0.01)
opt = opt_mom.minimize(cost)
acc = accuracy(logits,feed_labels)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
img_scale_crop = tf.random_crop(tf.image.resize_images(aug_img,get_new_size()),[96,96,3])
img_rand_flip_lr = tf.image.random_flip_left_right(aug_img)
img_rand_flip_ud = tf.image.random_flip_up_down(aug_img)
builder = tf.saved_model.builder.SavedModelBuilder("/output/cnn_model")
while(ne<num_epochs):
stime = time.time()
print 'epoch::',ne+1,'...'
if ne != 0:
np.random.shuffle(index)
train_x = train_x[index]
train_y = train_y[index]
for niter in xrange(numiter):
print 'iter..',niter+1
offset = niter*batch_size
x_iter, y_iter = np.array(train_x[offset:offset+batch_size,:,:,:]), np.array(train_y[offset:offset+batch_size])
y_iter=y_iter-1
print 'Data Augmenting...'
augtime = time.time()
for n in xrange(batch_size):
args = get_random_augmentation_combinations(3)
if args[0]:
x_iter[n] = sess.run(img_scale_crop,feed_dict={aug_img:x_iter[n]})
if args[1]:
x_iter[n] = sess.run(img_rand_flip_lr,feed_dict={aug_img:x_iter[n]})
if args[2]:
x_iter[n] = sess.run(img_rand_flip_ud,feed_dict={aug_img:x_iter[n]})
print 'Time for augmentation:: ',time.time()-augtime,' seconds...'
#print 'Labels::',nl.reshape([-1])
feed_trdict={feed_images:x_iter,feed_labels:y_iter}
#Train
sess.run(opt,feed_dict=feed_trdict)
#Calculate accuracy of Validation set
if (ne+1)%10==0:
val_acc = sess.run(acc,feed_dict_acc = {feed_images:val_x,feed_labels:val_y})
print 'Epoch',ne+1,' Validation accuracy::',val_acc
valacc.append(val_acc)
if len(valacc)>=3 and (valacc[-1]-valacc[-2])-(valacc[-2]-valacc[-3]) < 10e-4:
print 'Change in Learning Rate applied...'
lr=lr/10
opt_mom = optimizer(lr)
opt = opt_mom.minimize(cost)
cc = sess.run(cost,feed_dict=feed_trdict)
tr_acc = sess.run(acc,feed_dict = feed_trdict)
print 'Epoch..',ne+1,
print 'Training cost::',cc,
print 'Training accuracy::',tr_acc*100,'%'
print 'Time reqd.::',(time.time()-stime)/60,'mins...'
init_count+=1
ne+=1
builder.add_meta_graph_and_variables(sess, ["EVALUATING"])
builder.save()
#close session
sess.close()
|
Kitchens and Baths by Briggs is proud to present this pvd brass finished item, by Watermark. The 310-8-F-PVD is made from premium materials, this Item offers great function and value for your home. This fixture is part of Watermark's decorative Hampshire Collection, so make sure to check out other styles of fixtures to accessorize your room.
|
Formal statement is: lemma LIMSEQ_const_iff: "(\<lambda>n. k) \<longlonglongrightarrow> l \<longleftrightarrow> k = l" for k l :: "'a::t2_space" Informal statement is: A sequence converges to a constant if and only if the constant is the limit.
|
# Information about code:
# This code corresponds to data quality checks for my MSc thesis.
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# Set up
source('2019-06-19-jsa-type/subset.r')
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# Section - data quality
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
print(paste0(Sys.time(), " --- data quality"))
spp <- get_df1(write=F)
spp2 <- get_df2(write=F)
table(spp$duplicated.row)
table(spp2$duplicated.row)
flag <- rbind(
data.frame(table(spp[date.n <= 2018 & duplicated.row=="FALSE"]$source.of.latlon.n)),
data.frame(table(spp2[status=="Synonym" & date.n <= 2018 & duplicated.row=="FALSE"]$source.of.latlon.n)))
write.csv(flag,
paste0(dir_data_raw, "test/2019-09-22-type-data-quality2.csv"),
na='', row.names=F, fileEncoding="UTF-8")
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# Section - lat and lon
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
print(paste0(Sys.time(), " --- lat and lon"))
df_ll <- rbind(spp[, c("idx", "lat", "lon", "status")],
spp2[, c("idx", "lat", "lon", "status")])
x <- dim(df_ll)
df_ll <- df_ll[!(is.na(lat) | is.na(lon)) &
status %in% c("Valid species", "Valid Species", "Synonym")]
y <- dim(df_ll); round(y/x*100, 2)
write.csv(df_ll[order(as.numeric(idx))],
paste0(dir_data_raw, "test/2019-09-26-lat-lon.csv"),
na='', row.names=F, fileEncoding="UTF-8")
|
module radtrans_integrate
use odepack, only: lsoda_basic
use interpolate, only: get_weight, locate
use math, only: tsum
implicit none
! global data to avoid needing to pass arguments through external integration routines (e.g., lsoda)
integer :: lindx=25,nequations,nptstot,npts,nptsout,iflag
real(kind=8), dimension(:,:), allocatable :: KK,jj,intensity,PP
real(kind=8), dimension(:), allocatable :: s,ss,s0,tau,stokesq,stokesu,stokesv
real(kind=8), dimension(:,:,:), allocatable :: QQ,imm,OO
integer :: IS_LINEAR_STOKES = 1
integer(kind=4) :: maxsteps = 100000
integer, dimension(4) :: stats
real(kind=8) :: ortol = 1d-6, oatol = 1d-8, hmax = 10, MAX_TAU, MAX_TAU_DEFAULT = 10d0, thin = 1d-2, sphtolfac=1d0
!$omp threadprivate(ss,tau,s0,jj,KK,intensity,lindx,nptstot,npts,nptsout,s,stats,stokesq,stokesu,stokesv)
!$omp threadprivate(ortol,oatol,hmax,thin,MAX_TAU,QQ,PP,imm,OO,IS_LINEAR_STOKES,sphtolfac)
interface integrate
module procedure integrate
end interface
interface calc_delo_Q
module procedure calc_delo_Q_single
end interface
interface calc_delo_P
module procedure calc_delo_P_single
end interface
interface imatrix_4
module procedure imatrix_4_single
end interface
interface invert_delo_matrix
module procedure invert_delo_matrix_single
end interface
interface calc_delo_Q_thin
module procedure calc_delo_Q_thin_single
end interface
interface calc_delo_P_thin
module procedure calc_delo_P_thin_single
end interface
interface invert_delo_matrix_thin
module procedure invert_delo_matrix_thin_single
end interface
interface calc_O
module procedure calc_O
end interface
interface radtrans_integrate_formal
module procedure radtrans_integrate_formal
end interface
contains
subroutine init_radtrans_integrate_data(riflag,rneq,gnpts,rnpts,maxtau,hm,oa,ort,th,maxst)
integer, intent(in) :: riflag,rneq,gnpts,rnpts
! real(kind=8), intent(in), optional :: maxtau,hm,oa,ort
real(kind=8), intent(in), optional :: maxtau,hm,oa,ort,th
integer(kind=4), intent(in), optional :: maxst
nequations = rneq; npts = gnpts; iflag = riflag; nptsout = rnpts
if(present(maxtau)) then
! MAX_TAU = maxtau; hmax = hm; oatol = oa; ortol = ort
MAX_TAU = maxtau
else
MAX_TAU=MAX_TAU_DEFAULT
endif
if(present(hm)) then
hmax = hm
else
hmax = 0.1d0
endif
if(present(ort)) then
ortol = ort
else
ortol = 1d-6
endif
if(present(oa)) then
oatol = oa
else
oatol = 1d-8
endif
if(present(th)) then
thin = th
else
thin = 1d-2
endif
if(present(maxst)) then
maxsteps = maxst
else
maxsteps = 100000
endif
allocate(tau(npts))
allocate(s0(npts))
allocate(jj(npts,nequations))
allocate(KK(npts,1+(nequations)*(nequations-1)/2))
allocate(intensity(nequations,npts))
allocate(s(npts)); allocate(ss(npts))
s(:) = 0d0
ss(:) = 0d0
intensity(:,:) = 0d0
jj(:,:) = 0d0
tau(:) = 0d0
KK(:,:) = 0d0
s0(:) = 0d0
if(iflag==1) then
allocate(QQ(npts,nequations,nequations)); allocate(PP(npts,nequations))
allocate(imm(npts,nequations,nequations))
QQ = 0d0; PP = 0d0; imm = 0d0
elseif(iflag==2) then
allocate(OO(nequations,nequations,npts))
OO = 0d0
elseif(iflag==3) then
allocate(stokesq(npts))
allocate(stokesu(npts))
allocate(stokesv(npts))
endif
end subroutine init_radtrans_integrate_data
subroutine integrate(sinput,ej,eK,tauin,rnpts)
! these array sizes are really all known in terms of npts, nequations...
integer, intent(inout) :: rnpts
! real(kind=8), dimension(:,:), intent(inout) :: rI
real(kind=8), dimension(:,:), intent(in) :: ej,eK
real(kind=8), dimension(:), intent(in) :: sinput
real(kind=8), dimension(:), intent(in) :: tauin
! real(kind=8), dimension(:), intent(inout) :: I0
! real(kind=8), intent(in), optional :: maxt
! Subroutine to call integration routine for RT equation
! JAD 2/28/2011 moved from grtrans_driver 8/7/2014
s0=sinput; s=sinput; ss=sinput; intensity=0d0
tau = tauin
jj=ej; KK=eK
if(any(isnan(jj))) then
!write(6,*) 'nan in radtrans_integrate j'
! write(6,*) 'radtrans_integrate j1: ',jj(:,1)
! write(6,*) 'radtrans_integrate j2: ',jj(:,2)
endif
if(any(isnan(KK))) then
!write(6,*) 'nan in radtrans_integrate K'
! write(6,*) 'radtrans_integrate K1: ',KK(:,1)
! write(6,*) 'radtrans_integrate K2: ',KK(:,2)
! write(6,*) 'radtrans_integrate K4: ',KK(:,4)
! write(6,*) 'radtrans_integrate K5: ',KK(:,5)
! write(6,*) 'radtrans_integrate K7: ',KK(:,7)
endif
! spherical stokes case
if (iflag==0) then
IS_LINEAR_STOKES=1
call radtrans_integrate_lsoda()
elseif (iflag==3) then
IS_LINEAR_STOKES=0
call radtrans_integrate_lsoda()
elseif (iflag==1) then
if(nequations==4) then
call radtrans_integrate_delo(s,tau,jj,KK(:,1:4),KK(:,5:7),PP,QQ,imm)
else
call radtrans_integrate_quadrature(s,jj(:,1),KK(:,1))
endif
elseif (iflag==2) then
if(nequations==4) then
call radtrans_integrate_formal(s,jj,KK(:,1:4),KK(:,5:7),OO)
else
call radtrans_integrate_quadrature(s,jj(:,1),KK(:,1))
endif
endif
! write(6,*) 'assign intensity', size(rI,1), size(rI,2), size(intensity,1), size(intensity,2), nptsout
! rI(1:nequations,1:nptsout) = intensity(1:nequations,1:nptsout);
rnpts = nptsout
! write(6,*) 'intensity: ', rnpts, rI(rnpts,1)
if(isnan(intensity(1,rnpts))) then
write(6,*) 'NaN in integrate ej: '!,ej
! write(6,*) 'NaN in integrate jj 2: ',jj
! write(6,*) 'NaN in integrate eK: ',eK
endif
return
end subroutine integrate
subroutine radtrans_integrate_lsoda()
real(kind=8), dimension(4) :: I0
real(kind=8), dimension(npts) :: dummy
real(kind=8), dimension(:,:), allocatable :: tau_arr
real(kind=8), dimension(:), allocatable :: tau_temp,intvals,q,u,v
integer, dimension(:), allocatable :: inds
real(kind=8) :: weight
integer :: lamdex,i,ii,i1,i2,taudex
i1=1; i2=nptstot
I0 = 0d0
if(maxval(tau).le.MAX_TAU) then
lamdex=npts
else
call locate(tau,MAX_TAU,lamdex)
lamdex=lamdex+1
! write(6,*) 'locate', lamdex
! if(lamdex==2) then
! call get_weight(tau,MAX_TAU,lamdex,weight)
! need to re-work this. just trying to interpolate between s0(1) and s0(2).
! write(6,*) 'weight: ',lamdex,weight,s(lamdex),s(lamdex+1)
! s=(/(s(1)-s(2))*(1d0-weight),0d0/)
! lamdex=lamdex+1
! write(6,*) 'radtrans integrate s: ',s(1:lamdex),lamdex
! endif
endif
! Only use parts of ray where emissivity is non-zero:
if (jj(1,1).eq.0d0) then
! Find first non-zero element:
do ii=2,npts
if (jj(ii,1).ne.0d0) then
i1=ii
exit
endif
enddo
else
i1=1
endif
! i1=1
if (jj(lamdex,1).eq.0d0) then
! Find last non-zero element:
do ii=1,lamdex-1
if (jj(lamdex-ii,1).ne.0d0) then
i2=lamdex-ii
exit
endif
enddo
else
i2=lamdex
endif
! write(6,*) 'i1i2: ',i1,i2,npts, jj(i2,1), jj(i2-1,1)
nptsout=i2
! write(6,*) 's: ',s(lamdex),neq,npts,tau(lamdex),lamdex
! write(6,*) 'I0: ',s(npts), tau(npts)
! write(6,*) 'lam: ',s0
! try to figure out hmax:
ss(1:npts)=s0(npts:1:-1)
s(i1:i2)=s(i2:i1:-1)
! write(6,*) 'integrate s: ',minval(s), maxval(s), i1, i2, lamdex
! should make this the start of grtrans_integrate_lsoda and put everything else in general integrate subroutine since it's generic to all methods
if(nequations==4) then
if(IS_LINEAR_STOKES==1) then
call lsoda_basic(radtrans_lsoda_calc_rhs,I0(1:nequations), &
s(i1:i2),oatol, &
ortol,radtrans_lsoda_calc_jac,intensity(:,i1:i2), &
1,maxsteps,stats,hmax=hmax)
else
! spherical stokes under development
! I0=I0+I0sphmin
! I0(1)=1d-64; I0(2)=1d-64
I0(1)=1d-8; I0(2)=1d-8
I0(3)=0.1; I0(4)=-0.1
! I0=0d0
! testing higher error tolerance for sph stokes to converge on difficult problems
call lsoda_basic(radtrans_lsoda_calc_rhs_sph,I0(1:nequations), &
s(i1:i2),oatol*sphtolfac, &
ortol*sphtolfac,radtrans_lsoda_calc_jac_sph,intensity(:,i1:i2), &
1,maxsteps,stats,hmax=hmax)
! convert to linear stokes parameters
! allocate(q(npts));allocate(u(npts))
! allocate(v(npts))
! write(6,*) 'sph stokes intensity: ',maxval(intensity(1,i1:i2)), &
! maxval(intensity(2,i1:i2))
stokesq(i1:i2)=intensity(2,i1:i2)*sin(intensity(4,i1:i2)) &
*cos(intensity(3,i1:i2))
stokesu(i1:i2)=intensity(2,i1:i2)*sin(intensity(4,i1:i2)) &
*sin(intensity(3,i1:i2))
stokesv(i1:i2)=intensity(2,i1:i2)*cos(intensity(4,i1:i2))
intensity(2,i1:i2)=stokesq(i1:i2); intensity(3,i1:i2)=stokesu(i1:i2)
intensity(4,i1:i2)=stokesv(i1:i2)
! write(6,*) 'sph stokes intensity: ',maxval(q),maxval(v)
! write(6,*) 'sph stokes intensity: ',maxval(intensity(2,i1:i2))
! write(6,*) 'sph stokes intensity: ',maxval(intensity(1,i1:i2))
! deallocate(q); deallocate(u); deallocate(v)
endif
else
call lsoda_basic(radtrans_lsoda_calc_rhs_npol, &
I0(1:nequations),s(i1:i2),oatol, &
ortol,radtrans_lsoda_calc_jac_npol,intensity(:,i1:i2), &
1,maxsteps,stats,hmax=hmax)
endif
if(isnan(intensity(1,i2))) then
! write(6,*) 'NaN in integrate: ',i1,i2,s(i1:i2)
! write(6,*) 'NaN in integrate intensity: ',intensity(1,i1:i2)
! write(6,*) 'NaN in integrate j: ',jj(i1:i2,:)
! write(6,*) 'NaN in integrate K: ',KK(i1:i2,:)
endif
return
end subroutine radtrans_integrate_lsoda
! subroutine convert_sph_lin_stokes()
! convert spherical stokes output to normal (I,Q,U,V)
! intensity(2,i1:i2
subroutine radtrans_lsoda_calc_rhs(neq,lam,I,dIdlam)
! Compute RHS dIdlam for LSODA
integer, intent(in) :: neq
real(kind=8), intent(in) :: lam
real(kind=8), intent(in), dimension(neq) :: I
real(kind=8), intent(out), dimension(neq) :: dIdlam
! real(kind=8), intent(out), dimension(neq,neq) :: jac
real(kind=8), dimension(neq) :: j
real(kind=8), dimension(1+neq*(neq-1)/2) :: K
call radtrans_aux(neq,lam,j,K)
call radtrans_rhs_form(neq,j,K,dIdlam,I)
! write(6,*) 'dIdlam: ',lam,dIdlam
! write(6,*) 'jk: ',jj(1),jj(2),j(4)
! write(6,*) 'K: ',K(1),K(4),
! & K(5),K(7)
return
end subroutine radtrans_lsoda_calc_rhs
subroutine radtrans_lsoda_calc_rhs_npol(neq,lam,I,dIdlam)
! Compute RHS dIdlam for LSODA
integer, intent(in) :: neq
real(kind=8), intent(in) :: lam
real(kind=8), intent(in), dimension(neq) :: I
real(kind=8), intent(out), dimension(neq) :: dIdlam
! real(kind=8), intent(out), dimension(neq,neq) :: jac
real(kind=8), dimension(neq) :: j
real(kind=8), dimension(1+neq*(neq-1)/2) :: K
call radtrans_aux(neq,lam,j,K)
call radtrans_rhs_form_npol(neq,j,K,dIdlam,I)
! didlam=didlam*1e25
! write(6,*) 'dIdlam: ',lam,dIdlam
! write(6,*) 'jk: ',j(1),j(2),j(4)
! write(6,*) 'K: ',K(1),K(4),
! & K(5),K(7)
end subroutine radtrans_lsoda_calc_rhs_npol
subroutine radtrans_rhs_form(neq,j,K,dIdlam,I)
integer, intent(in) :: neq
real(kind=8), intent(in), dimension(neq) :: j
real(kind=8), intent(in), dimension(1+neq*(neq-1)/2) :: K
real(kind=8), intent(out), dimension(neq) :: dIdlam
real(kind=8), intent(in), dimension(neq) :: I
! write(6,*) 'rhs: ',IS_LINEAR_STOKES,size(I),size(K),size(J)
! if (IS_LINEAR_STOKES==1) then
dIdlam(1)=j(1)-(K(1)*I(1)+K(2)*I(2)+K(3)*I(3)+K(4)*I(4))
dIdlam(2)=j(2)-(K(2)*I(1)+K(1)*I(2)+K(7)*I(3)-K(6)*I(4))
dIdlam(3)=j(3)-(K(3)*I(1)-K(7)*I(2)+K(1)*I(3)+K(5)*I(4))
dIdlam(4)=j(4)-(K(4)*I(1)+K(6)*I(2)-K(5)*I(3)+K(1)*I(4))
! endif
end subroutine radtrans_rhs_form
subroutine radtrans_rhs_form_npol(neq,j,K,dIdlam,I)
integer, intent(in) :: neq
real(kind=8), intent(in), dimension(neq) :: j
real(kind=8), intent(in), dimension(1+neq*(neq-1)/2) :: K
real(kind=8), intent(out), dimension(neq) :: dIdlam
real(kind=8), intent(in), dimension(neq) :: I
! write(6,*) 'rhs npol: ',size(I),size(K),size(J)
! dIdlam(1)=maxval((/j(1)-K(1)*I(1),0d0/))
dIdlam(1)=j(1)-K(1)*I(1)
! write(6,*) 'rhs: ',dIdlam(1),j(1),K(1),I(1)
return
end subroutine radtrans_rhs_form_npol
subroutine radtrans_jac_form_npol(neq,j,K,nrowpd,pd)
integer, intent(in) :: neq, nrowpd
real(kind=8), intent(in), dimension(neq) :: j
real(kind=8), intent(in), dimension(1+neq*(neq-1)/2) :: K
real(kind=8), intent(out), dimension(nrowpd,neq) :: pd
! write(6,*) 'jac: ',nrowpd,neq,size(K),K(1)
pd(1,1)=-K(1)
! write(6,*) 'pd: ',pd
return
end subroutine radtrans_jac_form_npol
subroutine radtrans_jac_form(neq,j,K,nrowpd,pd)
integer, intent(in) :: neq, nrowpd
real(kind=8), intent(in), dimension(neq) :: j
real(kind=8), intent(in), dimension(1+neq*(neq-1)/2) :: K
real(kind=8), intent(out), dimension(nrowpd,neq) :: pd
! write(6,*) 'jac: ',nrowpd,neq,size(K)
! if (IS_LINEAR_STOKES==1) then
pd(1,1)=K(1)
pd(1,2)=K(2)
pd(1,3)=K(3)
pd(1,4)=K(4)
pd(2,1)=K(2)
pd(2,2)=K(1)
pd(2,3)=K(7)
pd(2,4)=-K(6)
pd(3,1)=K(3)
pd(3,2)=-K(7)
pd(3,3)=K(1)
pd(3,4)=K(5)
pd(4,1)=K(4)
pd(4,2)=K(6)
pd(4,3)=-K(5)
pd(4,4)=K(1)
pd=-1d0*pd
! endif
! write(6,*) 'pd: ',pd
return
end subroutine radtrans_jac_form
subroutine radtrans_lsoda_calc_jac(neq,lam,I,ml &
,mu,pd,nrowpd)
! Compute Jacobian for LSODA
integer, intent(in) :: neq, nrowpd
real(kind=8), intent(in) :: lam
real(kind=8), intent(in), dimension(neq) :: I
real(kind=8), intent(in) :: ml
real(kind=8), intent(in) :: mu
real(kind=8), intent(out), dimension(nrowpd,neq) :: pd
real(kind=8), dimension(neq) :: j
real(kind=8), dimension(1+neq*(neq-1)/2) :: K
! write(6,*) 'jac: ',nrowpd
call radtrans_aux(neq,lam,j,K)
call radtrans_jac_form(neq,j,K,nrowpd,pd)
! write(6,*) 'pd: ', pd
return
end subroutine radtrans_lsoda_calc_jac
subroutine radtrans_lsoda_calc_jac_npol(neq,lam,I,ml &
,mu,pd,nrowpd)
! Compute Jacobian for LSODA
integer, intent(in) :: neq, nrowpd
real(kind=8), intent(in) :: lam
real(kind=8), intent(in), dimension(neq) :: I
real(kind=8), intent(in) :: ml
real(kind=8), intent(in) :: mu
real(kind=8), intent(out), dimension(nrowpd,neq) :: pd
real(kind=8), dimension(neq) :: j
real(kind=8), dimension(1+neq*(neq-1)/2) :: K
! write(6,*) 'jac: ',nrowpd
call radtrans_aux(neq,lam,j,K)
call radtrans_jac_form_npol(neq,j,K,nrowpd,pd)
! write(6,*) 'pd: ', pd
return
end subroutine radtrans_lsoda_calc_jac_npol
subroutine radtrans_aux(neq,lam,j,K)
integer, intent(in) :: neq
real(kind=8), intent(in) :: lam
real(kind=8), intent(out), dimension(neq) :: j
real(kind=8), intent(out), dimension(1+neq*(neq-1)/2) :: K
real(kind=8) :: weight
integer :: indx,uindx
! here is where we would need to change things: have a new routine that calculates new j, K with new rel. factors at point lam
call get_weight(ss,lam,lindx,weight)
indx=npts-lindx+1; uindx=minval((/indx+1,npts/))
j=(1d0-weight)*jj(indx,:)+weight*jj(uindx,:)
K=(1d0-weight)*KK(indx,:)+weight*KK(uindx,:)
end subroutine radtrans_aux
! spherical stokes stuff in development
subroutine radtrans_lsoda_calc_rhs_sph(neq,lam,I,dIdlam)
! Compute RHS dIdlam for LSODA
integer, intent(in) :: neq
real(kind=8), intent(in) :: lam
real(kind=8), intent(in), dimension(neq) :: I
real(kind=8), intent(out), dimension(neq) :: dIdlam
! real(kind=8), intent(out), dimension(neq,neq) :: jac
real(kind=8), dimension(neq) :: j
real(kind=8), dimension(1+neq*(neq-1)/2) :: K
call radtrans_aux(neq,lam,j,K)
call radtrans_rhs_form_sph(neq,j,K,dIdlam,I)
! write(6,*) 'dIdlam: ',lam,dIdlam
! write(6,*) 'jk: ',jj(1),jj(2),j(4)
! write(6,*) 'K: ',K(1),K(4),
! & K(5),K(7)
return
end subroutine radtrans_lsoda_calc_rhs_sph
subroutine radtrans_rhs_form_sph(neq,j,K,dIdlam,I)
integer, intent(in) :: neq
real(kind=8), intent(in), dimension(neq) :: j
real(kind=8), intent(in), dimension(1+neq*(neq-1)/2) :: K
real(kind=8), intent(out), dimension(neq) :: dIdlam
real(kind=8), intent(in), dimension(neq) :: I
real(kind=8) :: sphi,spsi,cphi,cpsi
! write(6,*) 'rhs: ',IS_LINEAR_STOKES,size(I),size(K),size(J)
! if (IS_LINEAR_STOKES==1) then
! dIdlam(1)=j(1)-(K(1)*I(1)+K(2)*I(2)+K(3)*I(3)+K(4)*I(4))
! dIdlam(2)=j(2)-(K(2)*I(1)+K(1)*I(2)+K(7)*I(3)-K(6)*I(4))
! dIdlam(3)=j(3)-(K(3)*I(1)-K(7)*I(2)+K(1)*I(3)+K(5)*I(4))
! dIdlam(4)=j(4)-(K(4)*I(1)+K(6)*I(2)-K(5)*I(3)+K(1)*I(4))
sphi=sin(I(3)); cphi=cos(I(3)); spsi=sin(I(4)); cpsi=cos(I(4))
dIdlam(1)=j(1)-K(1)*I(1)-(cphi*spsi*K(2)+sphi*spsi*K(3)+cpsi*K(4))*I(2)
dIdlam(2)=-K(1)*I(2)-cphi*spsi*K(2)*I(1)-sphi*spsi*K(3)*I(1)+ &
spsi*(cphi*j(2)+sphi*j(3))+cpsi*(-I(1)*K(4)+j(4))
! dIdlam(3)=1d0/I(2)*(-cphi/spsi*(I(1)*K(3)-j(3)+I(2)*cpsi*K(5))- &
! sphi/spsi*(-I(1)*K(2)+j(2)+I(2)*cpsi*K(6))+I(2)*K(7))
dIdlam(3)=1d0/I(2)/spsi*(cphi*(j(3)-I(1)*K(3))+sphi*(-j(2)+I(1)*K(2))) &
-cpsi/spsi*(cphi*K(5)+sphi*K(6))+K(7)
! dIdlam(4)=1d0/I(2)*(-I(1)*cphi*cpsi*K(2)+spsi*(-j(4)+I(1)*K(4))- &
! sphi*(I(1)*cpsi*K(3)-cpsi*j(3)+I(2)*K(5))+cphi*cpsi*j(2))+cphi*K(6)
dIdlam(4)=1d0/I(2)*(spsi*(-j(4)+I(1)*K(4))+cpsi*(cphi*(j(2)-K(2)*I(1)) &
+sphi*(-I(1)*K(3)+j(3))))+cphi*K(6)-sphi*K(5)
! endif
end subroutine radtrans_rhs_form_sph
subroutine radtrans_jac_form_sph(neq,j,K,I,nrowpd,pd)
integer, intent(in) :: neq, nrowpd
real(kind=8), intent(in), dimension(neq) :: j,I
real(kind=8), intent(in), dimension(1+neq*(neq-1)/2) :: K
real(kind=8), intent(out), dimension(nrowpd,neq) :: pd
real(kind=8) :: cphi,sphi,cpsi,spsi
! write(6,*) 'jac: ',nrowpd,neq,size(K)
! if (IS_LINEAR_STOKES==1) then
cphi=cos(I(3)); sphi=sin(I(3)); cpsi=cos(I(4)); spsi=sin(I(4))
pd(1,1)=-K(1)
pd(1,2)=-cphi*spsi*K(2)-sphi*spsi*K(3)-cpsi*K(4)
pd(1,3)=-I(2)*(-sphi*spsi*K(2)+cphi*spsi*K(3))
pd(1,4)=-I(2)*(cphi*cpsi*K(2)+cpsi*sphi*K(3)-spsi*K(4))
pd(2,1)=pd(1,2)
pd(2,2)=-K(1)
pd(2,3)=I(1)*spsi*(sphi*K(2)-cphi*K(3))+spsi*(cphi*j(3)-sphi*j(2))
pd(2,4)=-I(1)*cpsi*(cphi*K(2)+sphi*K(3))+cpsi*(cphi*j(2)+sphi*j(3))-spsi*(j(4)-I(1)*K(4))
! pd(3,1)=1d0/I(2)*(sphi/spsi*K(2)-cphi/spsi*K(3))
pd(3,1)=1d0/spsi/I(2)*(sphi*K(2)-cphi*K(3))
! pd(3,2)=(-cphi*cpsi/spsi*K(5)-cpsi/spsi*sphi*K(6)+K(7))/I(2)-(-cphi/spsi*(I(1)*K(3)-&
! j(3)+I(2)*cpsi*K(5))-1d0/spsi*sphi*(-I(1)*K(2)+j(2)+I(2)*cpsi*K(6))+I(2)*K(7))/I(2)**2.
pd(3,2)=1d0/spsi/I(2)**2d0*(cphi*(I(1)*K(3)-j(3))+sphi*(j(2)-I(1)*K(2)))
! pd(3,3)=(1d0/spsi*sphi*(I(1)*K(3)-j(3)+I(2)*cpsi*K(5))-cphi/spsi*(-I(1)*K(2)+j(2)+I(2)*cpsi*K(6)))/I(2)
pd(3,3)=1d0/I(2)/spsi*(sphi*(I(1)*K(3)-j(3))+cphi*(I(1)*K(2)-j(2)))+1d0/spsi*(sphi*K(5)-cphi*K(6))
! pd(3,4)=(I(2)*cphi*K(5)+cphi*cpsi/spsi/spsi*(I(1)*K(3)-j(3)+I(2)*cpsi*K(5))+I(2)*sphi*K(6)+ &
! cpsi/spsi/spsi*sphi*(-I(1)*K(2)+j(2)+I(2)*cpsi*K(6)))/I(2)
pd(3,4)=cpsi/spsi/spsi/I(2)*(cphi*(I(1)*K(3)-j(3))-sphi*(I(1)*K(2)-j(2)))+1d0/spsi/spsi*(cphi*K(5)+sphi*K(6))
pd(4,1)=(-cphi*cpsi*K(2)-cpsi*sphi*K(3)+spsi*K(4))/I(2)
! pd(4,2)=(-sphi*K(5)+cphi*K(6))/I(2)-(-I(1)*cphi*cpsi*K(2)+spsi*(I(1)*K(4)-j(4))-sphi*(I(1)*cpsi*K(3) &
! -cpsi*j(3)+I(2)*K(5))+cphi*(cpsi*j(2)+I(2)*K(6)))/I(2)**2d0
pd(4,2)=1d0/I(2)**2d0*(cphi*cpsi*(I(1)*K(2)-j(2))-spsi*(I(1)*K(4)-j(4))+sphi*cpsi*(I(1)*K(3)-j(3)))
! pd(4,3)=(I(1)*cpsi*sphi*K(2)-cphi*(I(1)*cpsi*K(3)-cpsi*j(3)+I(2)*K(5))-sphi*(cpsi*j(2)+I(2)*K(6)))/I(2)
pd(4,3)=1d0/I(2)*(cpsi*sphi*(I(1)*K(2)-j(2))-cpsi*cphi*(I(1)*K(3)-j(3)))-cphi*K(5)-sphi*K(6)
! pd(4,4)=(I(1)*cphi*spsi*K(2)-cphi*spsi*j(2)-sphi*(-I(1)*spsi*K(3)+spsi*j(3))+cpsi*(I(1)*K(4)-j(4)))/I(2)
pd(4,4)=1d0/I(2)*(cphi*spsi*(I(1)*K(2)-j(2))+sphi*spsi*(I(1)*K(3)-j(3))+cpsi*(I(1)*K(4)-j(4)))
! pd=-1d0*pd
! endif
! write(6,*) 'pd: ',pd
return
end subroutine radtrans_jac_form_sph
subroutine radtrans_jac_form_sph_old(neq,j,K,I,nrowpd,pd)
integer, intent(in) :: neq, nrowpd
real(kind=8), intent(in), dimension(neq) :: j,I
real(kind=8), intent(in), dimension(1+neq*(neq-1)/2) :: K
real(kind=8), intent(out), dimension(nrowpd,neq) :: pd
real(kind=8) :: cphi,sphi,cpsi,spsi
! write(6,*) 'jac: ',nrowpd,neq,size(K)
! if (IS_LINEAR_STOKES==1) then
cphi=cos(I(3)); sphi=sin(I(3)); cpsi=cos(I(4)); spsi=sin(I(4))
pd(1,1)=-K(1)
pd(1,2)=-cphi*spsi*K(2)-sphi*spsi*K(3)-cpsi*K(4)
pd(1,3)=-I(2)*(-sphi*spsi*K(2)+cphi*spsi*K(3))
pd(1,4)=-I(2)*(cphi*cpsi*K(2)+cpsi*sphi*K(3)-spsi*K(4))
pd(2,1)=pd(1,2)
pd(2,2)=-K(1)
pd(2,3)=I(1)*spsi*(sphi*K(2)-cphi*K(3))+spsi*(cphi*j(3)-sphi*j(2))
pd(2,4)=-I(1)*cpsi*(cphi*K(2)+sphi*K(3))+cpsi*(cphi*j(2)+sphi*j(3))-spsi*(j(4)-I(1)*K(4))
pd(3,1)=1d0/I(2)*(sphi/spsi*K(2)-cphi/spsi*K(3))
! pd(3,1)=1d0/spsi/I(2)*(sphi*K(2)-cphi*K(3))
pd(3,2)=(-cphi*cpsi/spsi*K(5)-cpsi/spsi*sphi*K(6)+K(7))/I(2)-(-cphi/spsi*(I(1)*K(3)-&
j(3)+I(2)*cpsi*K(5))-1d0/spsi*sphi*(-I(1)*K(2)+j(2)+I(2)*cpsi*K(6))+I(2)*K(7))/I(2)**2.
! pd(3,2)=1d0/spsi/I(2)**2d0*(cphi*(I(1)*K(3)-j(3))+sphi*(j(2)-I(1)*K(2)))
pd(3,3)=(1d0/spsi*sphi*(I(1)*K(3)-j(3)+I(2)*cpsi*K(5))-cphi/spsi*(-I(1)*K(2)+j(2)+I(2)*cpsi*K(6)))/I(2)
! pd(3,3)=1d0/I(2)/spsi*(sphi*(I(1)*K(3)-j(3))+cphi*(I(1)*K(2)-j(2)))+1d0/spsi*(sphi*K(5)-cphi*K(6))
pd(3,4)=(I(2)*cphi*K(5)+cphi*cpsi/spsi/spsi*(I(1)*K(3)-j(3)+I(2)*cpsi*K(5))+I(2)*sphi*K(6)+ &
cpsi/spsi/spsi*sphi*(-I(1)*K(2)+j(2)+I(2)*cpsi*K(6)))/I(2)
! pd(3,4)=cpsi/spsi/spsi/I(2)*(cphi*(I(1)*K(3)-j(3))-sphi*(I(1)*K(2)-j(2)))+1d0/spsi/spsi*(cphi*K(5)+sphi*K(6))
pd(4,1)=(-cphi*cpsi*K(2)-cpsi*sphi*K(3)+spsi*K(4))/I(2)
pd(4,2)=(-sphi*K(5)+cphi*K(6))/I(2)-(-I(1)*cphi*cpsi*K(2)+spsi*(I(1)*K(4)-j(4))-sphi*(I(1)*cpsi*K(3) &
-cpsi*j(3)+I(2)*K(5))+cphi*(cpsi*j(2)+I(2)*K(6)))/I(2)**2d0
! pd(4,2)=1d0/I(2)**2d0*(cphi*cpsi*(I(1)*K(2)-j(2))-spsi*(I(1)*K(4)-j(4))+sphi*cpsi*(I(1)*K(3)-j(3)))
pd(4,3)=(I(1)*cpsi*sphi*K(2)-cphi*(I(1)*cpsi*K(3)-cpsi*j(3)+I(2)*K(5))-sphi*(cpsi*j(2)+I(2)*K(6)))/I(2)
! pd(4,3)=1d0/I(2)*(cpsi*sphi*(I(1)*K(2)-j(2))-cpsi*cphi*(I(1)*K(3)-j(3)))-cphi*K(5)-sphi*K(6)
pd(4,4)=(I(1)*cphi*spsi*K(2)-cphi*spsi*j(2)-sphi*(-I(1)*spsi*K(3)+spsi*j(3))+cpsi*(I(1)*K(4)-j(4)))/I(2)
! pd(4,4)=1d0/I(2)*(cphi*spsi*(I(1)*K(2)-j(2))+sphi*spsi*(I(1)*K(3)-j(3))+cpsi*(I(1)*K(4)-j(4)))
! pd=-1d0*pd
! endif
! write(6,*) 'pd: ',pd
return
end subroutine radtrans_jac_form_sph_old
subroutine radtrans_lsoda_calc_jac_sph(neq,lam,I,ml &
,mu,pd,nrowpd)
! Compute Jacobian for LSODA
integer, intent(in) :: neq, nrowpd
real(kind=8), intent(in) :: lam
real(kind=8), intent(in), dimension(neq) :: I
real(kind=8), intent(in) :: ml
real(kind=8), intent(in) :: mu
real(kind=8), intent(out), dimension(nrowpd,neq) :: pd
real(kind=8), dimension(neq) :: j
real(kind=8), dimension(1+neq*(neq-1)/2) :: K
! write(6,*) 'jac: ',nrowpd
call radtrans_aux(neq,lam,j,K)
! changed from above to add I as argument since not linear eqns any more
call radtrans_jac_form_sph(neq,j,K,I,nrowpd,pd)
! write(6,*) 'pd: ', pd
return
end subroutine radtrans_lsoda_calc_jac_sph
subroutine calc_O(a,rho,dx,identity,O,M1,M2,M3,M4)
real(kind=8), dimension(4), intent(in) :: a
real(kind=8), dimension(3), intent(in) :: rho
real(kind=8), dimension(4,4) :: identity,onopol!,M1,M2,M3,M4
real(kind=8), dimension(4,4), intent(out) :: M1,M2,M3,M4,O
real(kind=8) :: lam1,lam2,ap,theta,sig,a2,p2
real(kind=8) :: aq,au,av,rhoq,rhou,rhov
real(kind=8) :: dx
onopol = exp(-a(1)*dx)
aq = a(2); au = a(3); av = a(4)
rhoq = rho(1); rhou = rho(2); rhov = rho(3)
a2 = aq**2d0+au**2d0+av**2d0
p2 = rhoq**2d0+rhou**2d0+rhov**2d0
if(a2.eq.0d0.and.p2.eq.0d0) then
O = identity*onopol
else
ap = aq*rhoq+au*rhou+av*rhov
lam1 = sqrt(sqrt((a2-p2)**2d0/4d0+ap**2d0)+(a2-p2)/2d0)
lam2 = sqrt(sqrt((a2-p2)**2d0/4d0+ap**2d0)-(a2-p2)/2d0)
theta = lam1**2d0+lam2**2d0
sig = sign(1d0,ap)
M1 = identity
M2(:,1) = (/0d0,lam2*aq-sig*lam1*rhoq,lam2*au-sig*lam1*rhou,lam2*av-sig*lam1*rhov/)
M2(:,2) = (/lam2*aq-sig*lam1*rhoq,0d0,-sig*lam1*av-lam2*rhov,sig*lam1*au+lam2*rhou/)
M2(:,3) = (/lam2*au-sig*lam1*rhou,sig*lam1*av+lam2*rhov,0d0,-sig*lam1*aq-lam2*rhoq/)
M2(:,4) = (/lam2*av-sig*lam1*rhov,-sig*lam1*au-lam2*rhou,sig*lam1*aq+lam2*rhoq,0d0/)
M2 = 1d0/theta*M2
M3(:,1) = (/0d0,lam1*aq+sig*lam2*rhoq,lam1*au+sig*lam2*rhou,lam1*av+sig*lam2*rhov/)
M3(:,2) = (/lam1*aq+sig*lam2*rhoq,0d0,sig*lam2*av-lam1*rhov,-sig*lam2*au+lam1*rhou/)
M3(:,3) = (/lam1*au+sig*lam2*rhou,-sig*lam2*av+lam1*rhov,0d0,sig*lam2*aq-lam1*rhoq/)
M3(:,4) = (/lam1*av+sig*lam2*rhov,sig*lam2*au-lam1*rhou,-sig*lam2*aq+lam1*rhoq,0d0/)
M3=1d0/theta*M3
M4(:,1) = (/(a2+p2)/2d0,au*rhov-av*rhou,av*rhoq-aq*rhov,aq*rhou-au*rhoq/)
M4(:,2) = (/av*rhou-au*rhov,aq*aq+rhoq*rhoq-(a2+p2)/2d0,aq*au+rhoq*rhou,av*aq+rhov*rhoq/)
M4(:,3) = (/aq*rhov-av*rhoq,aq*au+rhoq*rhou,au*au+rhou*rhou-(a2+p2)/2d0,au*av+rhou*rhov/)
M4(:,4) = (/au*rhoq-aq*rhou,av*aq+rhov*rhoq,au*av+rhou*rhov,av*av+rhov*rhov-(a2+p2)/2d0/)
M4=2d0/theta*M4
O = onopol*(1d0/2d0*(cosh(lam1*dx)+cos(lam2*dx))*M1 - sin(lam2*dx)*M2-sinh(lam1*dx)*M3+1d0/2d0 &
*(cosh(lam1*dx)-cos(lam2*dx))*M4)
endif
end subroutine calc_O
subroutine imatrix_4_single(a,b)
real(kind=8), dimension(4,4), intent(in) :: a
real(kind=8), dimension(4,4), intent(out) :: b
real(kind=8) :: detA,a11,a21,a31,a41,a12,a22, &
a32,a42,a13,a23,a33,a43,a14,a24,a34,a44
b = 0d0
a11 = a(1,1); a12 = a(1,2); a13 = a(1,3); a14 = a(1,4)
a21 = a(2,1); a22 = a(2,2); a23 = a(2,3); a24 = a(2,4)
a31 = a(3,1); a32 = a(3,2); a33 = a(3,3); a34 = a(3,4)
a41 = a(4,1); a42 = a(4,2); a43 = a(4,3); a44 = a(4,4)
b(1,1) = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 &
- a22*a34*a43 - a23*a32*a44 - a24*a33*a42
b(1,2) = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 &
- a12*a33*a44 - a13*a34*a42 - a14*a32*a43
b(1,3) = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 &
- a12*a24*a43 - a13*a22*a44 - a14*a23*a42
b(1,4) = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 &
- a12*a23*a34 - a13*a24*a32 - a14*a22*a33
b(2,1) = a21*a34*a43 + a23*a31*a44 + a24*a33*a41 &
- a21*a33*a44 - a23*a34*a41 - a24*a31*a43
b(2,2) = a11*a33*a44 + a13*a34*a41 + a14*a31*a43 &
- a11*a34*a43 - a13*a31*a44 - a14*a33*a41
b(2,3) = a11*a24*a43 + a13*a21*a44 + a14*a23*a41 &
- a11*a23*a44 - a13*a24*a41 - a14*a21*a43
b(2,4) = a11*a23*a34 + a13*a24*a31 + a14*a21*a33 &
- a11*a24*a33 - a13*a21*a34 - a14*a23*a31
b(3,1) = a21*a32*a44 + a22*a34*a41 + a24*a31*a42 &
- a21*a34*a42 - a22*a31*a44 - a24*a32*a41
b(3,2) = a11*a34*a42 + a12*a31*a44 + a14*a32*a41 &
- a11*a32*a44 - a12*a34*a41 - a14*a31*a42
b(3,3) = a11*a22*a44 + a12*a24*a41 + a14*a21*a42 &
- a11*a24*a42 - a12*a21*a44 - a14*a22*a41
b(3,4) = a11*a24*a32 + a12*a21*a34 + a14*a22*a31 &
- a11*a22*a34 - a12*a24*a31 - a14*a21*a32
b(4,1) = a21*a33*a42 + a22*a31*a43 + a23*a32*a41 &
- a21*a32*a43 - a22*a33*a41 - a23*a31*a42
b(4,2) = a11*a32*a43 + a12*a33*a41 + a13*a31*a42 &
- a11*a33*a42 - a12*a31*a43 - a13*a32*a41
b(4,3) = a11*a23*a42 + a12*a21*a43 + a13*a22*a41 &
- a11*a22*a43 - a12*a23*a41 - a13*a21*a42
b(4,4) = a11*a22*a33 + a12*a23*a31 + a13*a21*a32 &
- a11*a23*a32 - a12*a21*a33 - a13*a22*a31
detA = a(1,1)*b(1,1) + a(2,1)*b(1,2) + &
a(3,1)*b(1,3) + a(4,1)*b(1,4)
b = b/detA
end subroutine imatrix_4_single
subroutine opacity_matrix(a,p,Karr)
real(kind=8), dimension(:,:), intent(in) :: a
real(kind=8), dimension(:,:), intent(in) :: p
real(kind=8), dimension(size(a,1),4,4), intent(out) :: Karr
Karr(:,1,1) = a(:,1); Karr(:,2,2) = a(:,1); Karr(:,3,3) = a(:,1); Karr(:,4,4) = a(:,1)
Karr(:,2,1) = a(:,2); Karr(:,3,1) = a(:,3); Karr(:,4,1) = a(:,4)
Karr(:,1,2) = a(:,2); Karr(:,1,3) = a(:,3); Karr(:,1,4) = a(:,4)
Karr(:,3,2) = -p(:,3); Karr(:,4,2) = p(:,2); Karr(:,2,3) = p(:,3); Karr(:,2,4) = -p(:,2)
Karr(:,4,3) = -p(:,1); Karr(:,3,4) = p(:,1)
end subroutine opacity_matrix
subroutine invert_delo_matrix_thin_single(dx,K0,ki,delta,identity,matrix,imatrix)
real(kind=8), intent(in) :: dx,ki,delta
real(kind=8), intent(in), dimension(4,4) :: identity,K0
real(kind=8), intent(out), dimension(4,4) :: matrix,imatrix
matrix = (1d0-delta/2d0+delta**2d0/6d0)*identity &
+(0.5d0*dx-1d0/6d0*dx**2d0*ki)*K0
call imatrix_4(matrix,imatrix)
end subroutine invert_delo_matrix_thin_single
subroutine calc_delo_P_thin_single(imatrix,dx,j,j1,ki,ki1,P)
real(kind=8), intent(in), dimension(4,4) :: imatrix
real(kind=8), intent(in) :: dx,ki,ki1
real(kind=8), intent(in), dimension(4) :: j,j1
real(kind=8), intent(out), dimension(4) :: P
P = matmul(imatrix,(0.5d0*dx*j-1d0/6d0*dx**2d0*ki*j)+ &
(0.5d0*j1*dx-1d0/3d0*dx**2d0*ki*j1))
end subroutine calc_delo_P_thin_single
subroutine calc_delo_Q_thin_single(imatrix,dx,ki,ki1,K1,identity,Q)
real(kind=8), intent(in), dimension(4,4) :: imatrix,K1,identity
real(kind=8), intent(in) :: dx,ki,ki1
real(kind=8), intent(out), dimension(4,4) :: Q
Q = matmul(imatrix,identity*(1d0-0.5d0*dx*ki+1d0/6d0*dx**2d0*ki**2d0) &
-(0.5d0*dx-1d0/3d0*dx**2d0)*K1)
end subroutine calc_delo_Q_thin_single
subroutine invert_delo_matrix_single(F,G,Kp,identity,matrix,imatrix)
real(kind=8), intent(in) :: F,G
real(kind=8), intent(in), dimension(4,4) :: Kp,identity
real(kind=8), intent(out), dimension(4,4) :: matrix,imatrix
matrix = identity+(F-G)*Kp
call imatrix_4(matrix,imatrix)
end subroutine invert_delo_matrix_single
subroutine calc_delo_P_single(imatrix,F,G,Sp,Sp1,P)
real(kind=8), dimension(4,4), intent(in) :: imatrix
real(kind=8), intent(in) :: F,G
real(kind=8), intent(in), dimension(4) :: Sp,Sp1
real(kind=8), intent(out), dimension(4) :: P
P = matmul(imatrix,(F-G)*Sp+G*Sp1)
end subroutine calc_delo_P_single
subroutine calc_delo_Q_single(imatrix,E,F,G,Kp1,identity,Q)
real(kind=8), dimension(4,4), intent(in) :: imatrix,Kp1,identity
real(kind=8), intent(in) :: E,F,G
real(kind=8), dimension(4,4), intent(out) :: Q
Q = matmul(imatrix,identity*E-G*Kp1)
end subroutine calc_delo_Q_single
subroutine radtrans_integrate_delo(x,tau,j,a,rho,P,Q,im)
real(kind=8), dimension(:), intent(in) :: x,tau
real(kind=8), dimension(:,:), intent(in) :: j,a,rho
real(kind=8), dimension(size(x),4), intent(inout) :: P
real(kind=8), dimension(size(x),4,4), intent(inout) :: Q,im
real(kind=8), dimension(size(x)-1) :: delta,dx
real(kind=8) :: E,F,G
real(kind=8), dimension(4) :: iprev,Sp,Sp1,pt,I0
real(kind=8), dimension(4,4) :: identity,Kp,Kp1,K0,K1,qt,imatrix,matrix
real(kind=8), dimension(size(x),4,4) :: Karr
integer :: k
I0=0d0
identity = reshape((/1d0,0d0,0d0,0d0,0d0,1d0,0d0,0d0,0d0,0d0, &
1d0,0d0,0d0,0d0,0d0,1d0/),(/4,4/))
delta = tau(2:npts) - tau(1:npts-1)
call opacity_matrix(a,rho,Karr)
dx = x(1:npts-1) - x(2:npts)
! write(6,*) 'delo integrate: ',thin,minval(dx),maxval(tau),minval(delta),maxval(delta)
! integration is from deepest point out for starting intensity I0
intensity(:,1) = I0; iprev = I0
! alternative way to write this: where(delta.gt.thin) and calculate pt, qt, elsewhere() and calculate ptt, qtt and then do loop free of if statements
do k=npts-1,1,-1
K0 = Karr(k,:,:)
K1 = Karr(k+1,:,:)
if (delta(k).gt.thin) then
E = exp(-delta(k))
F = 1d0-E
G = (1d0-(1d0+delta(k))*E)/delta(k)
Sp = j(k,:)/a(k,1)
Sp1 = j(k+1,:)/a(k+1,1)
Kp = K0/a(k,1)-identity; Kp1 = K1/a(k+1,1)-identity
call invert_delo_matrix(F,G,Kp,identity,matrix,imatrix)
call calc_delo_P(imatrix,F,G,Sp,Sp1,pt)
call calc_delo_Q(imatrix,E,F,G,Kp1,identity,qt)
else
call invert_delo_matrix_thin(dx(k),K0,a(k,1),delta(k),identity,matrix,imatrix)
call calc_delo_P_thin(imatrix,dx(k),j(k,:),j(k+1,:),a(k,1),a(k+1,1),pt)
call calc_delo_Q_thin(imatrix,dx(k),a(k,1),a(k+1,1),K1,identity,qt)
endif
intensity(:,npts-k+1) = pt + matmul(qt,iprev)
P(k,:) = pt
Q(k,:,:) = qt
im(k,:,:) = imatrix
iprev = intensity(:,npts-k+1)
end do
! write(6,*) 'delo intensity: ',intensity(1,:)
! write(6,*) 'delo intensity P: ',P(:,1)
end subroutine radtrans_integrate_delo
subroutine radtrans_integrate_formal(x,j,a,rho,O)
real(kind=8), dimension(:), intent(in) :: x
real(kind=8), dimension(size(x),4), intent(in) :: j,a
real(kind=8), dimension(size(x),3), intent(in) :: rho
real(kind=8), dimension(4,4,size(x)), intent(inout) :: O
real(kind=8), dimension(size(x)-1) :: dx
real(kind=8), dimension(4) :: I0,iprev
real(kind=8), dimension(4,4) :: identity,M1,M2,M3,M4,Ot
integer :: k
I0=0d0
identity = reshape((/1d0,0d0,0d0,0d0,0d0,1d0,0d0,0d0,0d0,0d0, &
1d0,0d0,0d0,0d0,0d0,1d0/),(/4,4/))
dx = x(1:npts-1) - x(2:npts)
intensity(:,1) = I0; iprev = I0
do k=npts-1,1,-1
call calc_O(a(k,:),rho(k,:),dx(k),identity,Ot,M1,M2,M3,M4)
intensity(:,npts-k+1) = matmul(Ot,j(k,:))*dx(k)+matmul(Ot,iprev)
call calc_O(a(k,:),rho(k,:),dx(k),identity,Ot,M1,M2,M3,M4)
intensity(:,npts-k+1) = matmul(Ot,j(k,:))*dx(k)+matmul(Ot,iprev)
iprev = intensity(:,npts-k+1)
O(:,:,k)=Ot
end do
return
end subroutine radtrans_integrate_formal
subroutine radtrans_integrate_quadrature(s,j,K)
real(kind=8), dimension(:), intent(in) :: s,j,K
! quadrature solution assuming I0=0, - sign from s,j,tau being tabulated backwards:
intensity(1,:)=-tsum(s,j*exp(-tau))
end subroutine radtrans_integrate_quadrature
subroutine del_radtrans_integrate_data()
deallocate(jj); deallocate(KK)
deallocate(s); deallocate(ss); deallocate(s0); deallocate(tau)
deallocate(intensity)
if(iflag==1) then
deallocate(PP); deallocate(QQ); deallocate(imm)
elseif(iflag==2) then
deallocate(OO)
elseif(iflag==3) then
deallocate(stokesq);deallocate(stokesu);deallocate(stokesv)
endif
end subroutine del_radtrans_integrate_data
end module radtrans_integrate
|
[STATEMENT]
lemma vifunion_vrange_VLambda: "(\<Union>\<^sub>\<circ>i\<in>\<^sub>\<circ>I. f i) = \<Union>\<^sub>\<circ>(\<R>\<^sub>\<circ> (\<lambda>a\<in>\<^sub>\<circ>I. f a))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Union>\<^sub>\<circ> (VLambda I f `\<^sub>\<circ> I) = \<Union>\<^sub>\<circ> (\<R>\<^sub>\<circ> (VLambda I f))
[PROOF STEP]
using vimage_VLambda_vrange_rep
[PROOF STATE]
proof (prove)
using this:
VLambda ?A ?f `\<^sub>\<circ> ?A = \<R>\<^sub>\<circ> (VLambda ?A ?f)
goal (1 subgoal):
1. \<Union>\<^sub>\<circ> (VLambda I f `\<^sub>\<circ> I) = \<Union>\<^sub>\<circ> (\<R>\<^sub>\<circ> (VLambda I f))
[PROOF STEP]
by auto
|
\section{\module{code} ---
Code object services.}
\declaremodule{standard}{code}
\modulesynopsis{Code object services.}
The \code{code} module defines operations pertaining to Python code
objects. It defines the following function:
\begin{funcdesc}{compile_command}{source, \optional{filename\optional{, symbol}}}
This function is useful for programs that want to emulate Python's
interpreter main loop (a.k.a. the read-eval-print loop). The tricky
part is to determine when the user has entered an incomplete command
that can be completed by entering more text (as opposed to a complete
command or a syntax error). This function \emph{almost} always makes
the same decision as the real interpreter main loop.
Arguments: \var{source} is the source string; \var{filename} is the
optional filename from which source was read, defaulting to
\code{'<input>'}; and \var{symbol} is the optional grammar start
symbol, which should be either \code{'single'} (the default) or
\code{'eval'}.
Return a code object (the same as \code{compile(\var{source},
\var{filename}, \var{symbol})}) if the command is complete and valid;
return \code{None} if the command is incomplete; raise
\exception{SyntaxError} if the command is a syntax error.
\end{funcdesc}
|
cube := Group([(2, 3, 4, 5), (1, 2, 6, 4)]);
Print("Order(cube) = ", Order(cube), "\n");
Print("Size(Orbit(cube, 1)) = ", Size(Orbit(cube, 1)), "\n");
stab := Stabilizer(cube, 1);
Print("IsCyclic(stab) = ", IsCyclic(stab), "\n");
Print("Order(stab) = ", Order(stab), "\n");
|
# Handwritten Music Symbol Recognition with Deep Ensemble
In ancient times, there was no system to record or document the music. Later, the musical pieces from the classical and post-classical period of European music were documented as scores using western staff notations. These notations are used by most of the modern genres of music due to its versatility. Hence, it is very important to develop a method that can store such sheets of handwritten music scores digitally. Optical Music Recognition (OMR) is a system which automatically interprets the scanned handwritten music scores. In this work, we have proposed a classifier ensemble of deep transfer learning models with Support Vector Machine (SVM) as the aggregation function for handwritten music symbol recognition. We have applied three pre-trained deep learning models, namely ResNet50, GoogleNet and DenseNet161 (each trained on ImageNet) and fine-tuned on our target datasets i.e., music symbol image datasets. The proposed ensemble is able to capture a more complex association of the base learners, thus improving the overall performance. We have evaluated the proposed model on three publicly available standard datasets, namely Handwritten Online Music Symbols (HOMUS), Capitan_Score_Non-uniform and Rebelo_real,and achieved state-of-the-art results for all three datasets.
<br></br>
Hyperparameter Initialization
```python
#hyper params
lr = 1e-4
bs = 32
val_split = 0.85
num_epoch = 20
num_classes = 32
```
We use pytorch to implement the project. Here we include relevant modules and check for GPU.
```python
#imports
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils import data
import numpy as np
import torchvision
from numpy import exp,absolute
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
import math
from sklearn import svm
import sklearn.model_selection as model_selection
from sklearn.metrics import accuracy_score,f1_score,precision_score ,recall_score
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
```
This function gives us training, validation and test set and takes the path to folder as input. This folder must be arranged as per `torchvision.datasets.Imagefolder` specification.
```python
def get_TVT(path):
data_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
dataset = datasets.ImageFolder(path+'train/',transform=data_transforms)
train_size = math.floor(len(dataset)*val_split)
val_size = len(dataset) - train_size
trainset, valset = data.random_split(dataset,lengths=[train_size,val_size])
testset = datasets.ImageFolder(path+'test/',transform=data_transforms)
return trainset,valset,testset
```
This is the function to train the model
```python
def train_model(trainset, valset, model, criterion, optimizer, scheduler, num_epochs):
dataloaders = {
'train': data.DataLoader(trainset,batch_size=bs,shuffle=True),
'val' : data.DataLoader(valset,batch_size=bs,shuffle=True)
}
dataset_sizes = {'train':len(trainset),'val':len(valset)}
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# print('bruh')
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
```
This function calculates the model accuracy on test set.
```python
def test_acc(model, testset):
running_corrects = 0
testloader = data.DataLoader(testset,batch_size=bs,shuffle=True)
for inputs, labels in testloader:
inputs = inputs.to(device)
labels = labels.to(device)
with torch.set_grad_enabled(False):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
running_corrects += torch.sum(preds == labels.data)
return (running_corrects/len(testset))
```
This function returns a pair of set of data X and label Y. The elements in X represent the concatenated score from the models. If size of dataset is N, number of classes is c and number of trained model is k then the shape of X is (N,ck). The samples are also given weight based on total number of unique classification made on them (Explained later).
```python
def get_weighted_score_ft(models,dataset):
num_models = len(models)
X = np.empty((0,num_models*num_classes))
Y = np.empty((0),dtype=int)
dataloader = data.DataLoader(dataset,batch_size=1,shuffle=True)
for inputs,labels in dataloader:
inputs,labels = inputs.to(device),labels.to(device)
predictions = set()
with torch.set_grad_enabled(False):
x = models[0](inputs)
_, preds = torch.max(x, 1)
predictions.add(preds)
for i in range(1,num_models):
x1 = models[i](inputs)
_, preds = torch.max(x1, 1)
predictions.add(preds)
x = torch.cat((x,x1),dim=1)
if len(predictions) > 1:
X = np.append(X,x.cpu().numpy()*3,axis=0)
else:
X = np.append(X,x.cpu().numpy(),axis=0)
Y = np.append(Y,labels.cpu().numpy(),axis=0)
return X,Y
```
We load the models with pretrained weights
```python
def get_models():
googlenet = torchvision.models.googlenet(pretrained=True)
resnet = torchvision.models.resnet50(pretrained=True)
densenet = torchvision.models.densenet161(pretrained=True)
densenet.classifier = nn.Linear(2208,num_classes)
resnet.fc = nn.Linear(2048,num_classes)
googlenet.fc = nn.Linear(1024,num_classes)
densenet = densenet.to(device)
resnet = resnet.to(device)
googlenet = googlenet.to(device)
return [densenet,googlenet,resnet]
```
This is the main code cell where all the functions are utilised together. Now let us consider there are $K$ number of base classifiers $\{CF_1, CF_2, \dots, CF_K\}$ to deal with an $n$-class classification problem. Hence, the output of any classifier (say, $CF_i$) is an $n$-dimensional vector $O_i = {s_1^i, s_2^i, \dots, s_n^i}$. Here, $s_j^i$ is confidence score produced by $i_{th}$ classifier for the $j_{th}$ class. We concatenate all the output vectors produced by the classifiers $\{CF_1, CF_2, \dots, CF_K\}$ to get a vector $S$ of length $nK$. $S$ is represented by
\begin{equation}
\label{equ:final_vector}
S = \{s_1^1, s_1^2, \dots, s_2^1, s_2^2, \dots, s_n^K\}
\end{equation}
One such vector $S$ is produced for every sample of the dataset. Let us consider that we have $N$ such samples with corresponding labels $y_i$ in the dataset to be used for training. Thus obtained the set $\{(S_1, y_1), (S_2, y_2),\\ \dots, (S_N, y_N)\}$ on which we train the SVM model. To introduce weights on samples, we consider the total number of unique predictions made on a sample by the base classifiers. For example, if there are three base classifiers and for some sample two of the classifiers are predicting the label 'class-x' and the remaining one is predicting the label 'class-y', then the total number of unique predictions of that sample is $2$. If the total number of prediction is greater than $1$, it suggests that there is a conflict among the classifiers on the correct class. So we propose that the SVM must put more emphasis on these samples in order to approximate a better decision boundary or support vectors.
A sample is assigned with $\mathcal{W}$ times more weight if the number of unique predictions regarding the corresponding image is greater than $\lambda$, which is an integer and whose value lies between $[1, K]$. In this work, we choose the values of both $K$ and $\lambda$ to be 3. The value of $\mathcal{W}$ is taken as 3 which is decided experimentally.
While testing and image it is first passed through all of the three DL models and the three output vectors are obtained. Then these output vectors are concatenated, during this concatenation the order of the models are maintained (as same as during training). We pass this vector through the trained SVM classifier, which predicts the final class of our test image.
```python
criterion = nn.CrossEntropyLoss()
ensemble_accuracy=[]
for fold in ['Fold_1','Fold_2','Fold_3','Fold_4','Fold_5']:
for folder in ['HOMUS']: #['Capitan_Score_Non-uniform','Capitan_Score_Uniform','Fornes_Music_Symbols_labelled']['Rebelo_Syn_labelled']:
trainset,valset,testset = get_TVT('/content/homus/'+fold+'/',folder)
models = get_models()
for model in models:
optimizer = optim.Adam(model.parameters(),lr=lr)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=9, gamma=0.1)
model = train_model(trainset, valset, model, criterion, optimizer, exp_lr_scheduler,num_epoch)
print(test_acc(model,testset))
train_X, train_Y = get_weighted_score_ft(models,trainset)
test_X, test_Y = get_weighted_score_ft(models,testset)
clf = svm.SVC(kernel='rbf',break_ties=True).fit(train_X, train_Y)
pred = clf.predict(test_X)
acc = accuracy_score(test_Y, pred)
ensemble_accuracy.append(acc)
print('Ensemble on '+fold+': '+str(acc))
print("Average Ensemble Accuracy:",sum(ensemble_accuracy)/len(ensemble_accuracy))
```
|
/-
Copyright (c) 2021 David Wärn. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: David Wärn
! This file was ported from Lean 3 source module combinatorics.quiver.symmetric
! leanprover-community/mathlib commit c3291da49cfa65f0d43b094750541c0731edc932
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Combinatorics.Quiver.Basic
import Mathbin.Combinatorics.Quiver.Path
import Mathbin.Combinatorics.Quiver.Push
import Mathbin.Data.Sum.Basic
/-!
## Symmetric quivers and arrow reversal
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This file contains constructions related to symmetric quivers:
* `symmetrify V` adds formal inverses to each arrow of `V`.
* `has_reverse` is the class of quivers where each arrow has an assigned formal inverse.
* `has_involutive_reverse` extends `has_reverse` by requiring that the reverse of the reverse
is equal to the original arrow.
* `prefunctor.preserve_reverse` is the class of prefunctors mapping reverses to reverses.
* `symmetrify.of`, `symmetrify.lift`, and the associated lemmas witness the universal property
of `symmetrify`.
-/
universe v u w v'
namespace Quiver
#print Quiver.Symmetrify /-
/-- A type synonym for the symmetrized quiver (with an arrow both ways for each original arrow).
NB: this does not work for `Prop`-valued quivers. It requires `[quiver.{v+1} V]`. -/
@[nolint has_nonempty_instance]
def Symmetrify (V : Type _) :=
V
#align quiver.symmetrify Quiver.Symmetrify
-/
#print Quiver.symmetrifyQuiver /-
instance symmetrifyQuiver (V : Type u) [Quiver V] : Quiver (Symmetrify V) :=
⟨fun a b : V => Sum (a ⟶ b) (b ⟶ a)⟩
#align quiver.symmetrify_quiver Quiver.symmetrifyQuiver
-/
variable (U V W : Type _) [Quiver.{u + 1} U] [Quiver.{v + 1} V] [Quiver.{w + 1} W]
#print Quiver.HasReverse /-
/-- A quiver `has_reverse` if we can reverse an arrow `p` from `a` to `b` to get an arrow
`p.reverse` from `b` to `a`.-/
class HasReverse where
reverse' : ∀ {a b : V}, (a ⟶ b) → (b ⟶ a)
#align quiver.has_reverse Quiver.HasReverse
-/
#print Quiver.reverse /-
/-- Reverse the direction of an arrow. -/
def reverse {V} [Quiver.{v + 1} V] [HasReverse V] {a b : V} : (a ⟶ b) → (b ⟶ a) :=
HasReverse.reverse'
#align quiver.reverse Quiver.reverse
-/
#print Quiver.HasInvolutiveReverse /-
/-- A quiver `has_involutive_reverse` if reversing twice is the identity.`-/
class HasInvolutiveReverse extends HasReverse V where
inv' : ∀ {a b : V} (f : a ⟶ b), reverse (reverse f) = f
#align quiver.has_involutive_reverse Quiver.HasInvolutiveReverse
-/
variable {U V W}
/- warning: quiver.reverse_reverse -> Quiver.reverse_reverse is a dubious translation:
lean 3 declaration is
forall {V : Type.{u2}} [_inst_2 : Quiver.{succ u1, u2} V] [h : Quiver.HasInvolutiveReverse.{u1, u2} V _inst_2] {a : V} {b : V} (f : Quiver.Hom.{succ u1, u2} V _inst_2 a b), Eq.{succ u1} (Quiver.Hom.{succ u1, u2} V _inst_2 a b) (Quiver.reverse.{u1, u2} V _inst_2 (Quiver.HasInvolutiveReverse.toHasReverse.{u1, u2} V _inst_2 h) b a (Quiver.reverse.{u1, u2} V _inst_2 (Quiver.HasInvolutiveReverse.toHasReverse.{u1, u2} V _inst_2 h) a b f)) f
but is expected to have type
forall {V : Type.{u1}} [_inst_2 : Quiver.{succ u2, u1} V] [h : Quiver.HasInvolutiveReverse.{u2, u1} V _inst_2] {a : V} {b : V} (f : Quiver.Hom.{succ u2, u1} V _inst_2 a b), Eq.{succ u2} (Quiver.Hom.{succ u2, u1} V _inst_2 a b) (Quiver.reverse.{u2, u1} V _inst_2 (Quiver.HasInvolutiveReverse.toHasReverse.{u2, u1} V _inst_2 h) b a (Quiver.reverse.{u2, u1} V _inst_2 (Quiver.HasInvolutiveReverse.toHasReverse.{u2, u1} V _inst_2 h) a b f)) f
Case conversion may be inaccurate. Consider using '#align quiver.reverse_reverse Quiver.reverse_reverseₓ'. -/
@[simp]
theorem reverse_reverse [h : HasInvolutiveReverse V] {a b : V} (f : a ⟶ b) :
reverse (reverse f) = f :=
h.inv' f
#align quiver.reverse_reverse Quiver.reverse_reverse
/- warning: quiver.reverse_inj -> Quiver.reverse_inj is a dubious translation:
lean 3 declaration is
forall {V : Type.{u2}} [_inst_2 : Quiver.{succ u1, u2} V] [_inst_4 : Quiver.HasInvolutiveReverse.{u1, u2} V _inst_2] {a : V} {b : V} (f : Quiver.Hom.{succ u1, u2} V _inst_2 a b) (g : Quiver.Hom.{succ u1, u2} V _inst_2 a b), Iff (Eq.{succ u1} (Quiver.Hom.{succ u1, u2} V _inst_2 b a) (Quiver.reverse.{u1, u2} V _inst_2 (Quiver.HasInvolutiveReverse.toHasReverse.{u1, u2} V _inst_2 _inst_4) a b f) (Quiver.reverse.{u1, u2} V _inst_2 (Quiver.HasInvolutiveReverse.toHasReverse.{u1, u2} V _inst_2 _inst_4) a b g)) (Eq.{succ u1} (Quiver.Hom.{succ u1, u2} V _inst_2 a b) f g)
but is expected to have type
forall {V : Type.{u1}} [_inst_2 : Quiver.{succ u2, u1} V] [_inst_4 : Quiver.HasInvolutiveReverse.{u2, u1} V _inst_2] {a : V} {b : V} (f : Quiver.Hom.{succ u2, u1} V _inst_2 a b) (g : Quiver.Hom.{succ u2, u1} V _inst_2 a b), Iff (Eq.{succ u2} (Quiver.Hom.{succ u2, u1} V _inst_2 b a) (Quiver.reverse.{u2, u1} V _inst_2 (Quiver.HasInvolutiveReverse.toHasReverse.{u2, u1} V _inst_2 _inst_4) a b f) (Quiver.reverse.{u2, u1} V _inst_2 (Quiver.HasInvolutiveReverse.toHasReverse.{u2, u1} V _inst_2 _inst_4) a b g)) (Eq.{succ u2} (Quiver.Hom.{succ u2, u1} V _inst_2 a b) f g)
Case conversion may be inaccurate. Consider using '#align quiver.reverse_inj Quiver.reverse_injₓ'. -/
@[simp]
theorem reverse_inj [HasInvolutiveReverse V] {a b : V} (f g : a ⟶ b) :
reverse f = reverse g ↔ f = g := by
constructor
· rintro h
simpa using congr_arg Quiver.reverse h
· rintro h
congr
assumption
#align quiver.reverse_inj Quiver.reverse_inj
/- warning: quiver.eq_reverse_iff -> Quiver.eq_reverse_iff is a dubious translation:
lean 3 declaration is
forall {V : Type.{u2}} [_inst_2 : Quiver.{succ u1, u2} V] [_inst_4 : Quiver.HasInvolutiveReverse.{u1, u2} V _inst_2] {a : V} {b : V} (f : Quiver.Hom.{succ u1, u2} V _inst_2 a b) (g : Quiver.Hom.{succ u1, u2} V _inst_2 b a), Iff (Eq.{succ u1} (Quiver.Hom.{succ u1, u2} V _inst_2 a b) f (Quiver.reverse.{u1, u2} V _inst_2 (Quiver.HasInvolutiveReverse.toHasReverse.{u1, u2} V _inst_2 _inst_4) b a g)) (Eq.{succ u1} (Quiver.Hom.{succ u1, u2} V _inst_2 b a) (Quiver.reverse.{u1, u2} V _inst_2 (Quiver.HasInvolutiveReverse.toHasReverse.{u1, u2} V _inst_2 _inst_4) a b f) g)
but is expected to have type
forall {V : Type.{u1}} [_inst_2 : Quiver.{succ u2, u1} V] [_inst_4 : Quiver.HasInvolutiveReverse.{u2, u1} V _inst_2] {a : V} {b : V} (f : Quiver.Hom.{succ u2, u1} V _inst_2 a b) (g : Quiver.Hom.{succ u2, u1} V _inst_2 b a), Iff (Eq.{succ u2} (Quiver.Hom.{succ u2, u1} V _inst_2 a b) f (Quiver.reverse.{u2, u1} V _inst_2 (Quiver.HasInvolutiveReverse.toHasReverse.{u2, u1} V _inst_2 _inst_4) b a g)) (Eq.{succ u2} (Quiver.Hom.{succ u2, u1} V _inst_2 b a) (Quiver.reverse.{u2, u1} V _inst_2 (Quiver.HasInvolutiveReverse.toHasReverse.{u2, u1} V _inst_2 _inst_4) a b f) g)
Case conversion may be inaccurate. Consider using '#align quiver.eq_reverse_iff Quiver.eq_reverse_iffₓ'. -/
theorem eq_reverse_iff [HasInvolutiveReverse V] {a b : V} (f : a ⟶ b) (g : b ⟶ a) :
f = reverse g ↔ reverse f = g := by rw [← reverse_inj, reverse_reverse]
#align quiver.eq_reverse_iff Quiver.eq_reverse_iff
section MapReverse
variable [HasReverse U] [HasReverse V] [HasReverse W]
#print Prefunctor.MapReverse /-
/-- A prefunctor preserving reversal of arrows -/
class Prefunctor.MapReverse (φ : U ⥤q V) where
map_reverse' : ∀ {u v : U} (e : u ⟶ v), φ.map (reverse e) = reverse (φ.map e)
#align prefunctor.map_reverse Prefunctor.MapReverse
-/
/- warning: prefunctor.map_reverse' -> Prefunctor.map_reverse is a dubious translation:
lean 3 declaration is
forall {U : Type.{u3}} {V : Type.{u4}} [_inst_1 : Quiver.{succ u2, u3} U] [_inst_2 : Quiver.{succ u1, u4} V] [_inst_4 : Quiver.HasReverse.{u2, u3} U _inst_1] [_inst_5 : Quiver.HasReverse.{u1, u4} V _inst_2] (φ : Prefunctor.{succ u2, succ u1, u3, u4} U _inst_1 V _inst_2) [_inst_7 : Prefunctor.MapReverse.{u1, u2, u3, u4} U V _inst_1 _inst_2 _inst_4 _inst_5 φ] {u : U} {v : U} (e : Quiver.Hom.{succ u2, u3} U _inst_1 u v), Eq.{succ u1} (Quiver.Hom.{succ u1, u4} V _inst_2 (Prefunctor.obj.{succ u2, succ u1, u3, u4} U _inst_1 V _inst_2 φ v) (Prefunctor.obj.{succ u2, succ u1, u3, u4} U _inst_1 V _inst_2 φ u)) (Prefunctor.map.{succ u2, succ u1, u3, u4} U _inst_1 V _inst_2 φ v u (Quiver.reverse.{u2, u3} U _inst_1 _inst_4 u v e)) (Quiver.reverse.{u1, u4} V _inst_2 _inst_5 (Prefunctor.obj.{succ u2, succ u1, u3, u4} U _inst_1 V _inst_2 φ u) (Prefunctor.obj.{succ u2, succ u1, u3, u4} U _inst_1 V _inst_2 φ v) (Prefunctor.map.{succ u2, succ u1, u3, u4} U _inst_1 V _inst_2 φ u v e))
but is expected to have type
forall {U : Type.{u2}} {V : Type.{u1}} [_inst_1 : Quiver.{succ u4, u2} U] [_inst_2 : Quiver.{succ u3, u1} V] [_inst_4 : Quiver.HasReverse.{u4, u2} U _inst_1] [_inst_5 : Quiver.HasReverse.{u3, u1} V _inst_2] (φ : Prefunctor.{succ u4, succ u3, u2, u1} U _inst_1 V _inst_2) [_inst_7 : Prefunctor.MapReverse.{u3, u4, u2, u1} U V _inst_1 _inst_2 _inst_4 _inst_5 φ] {u : U} {v : U} (e : Quiver.Hom.{succ u4, u2} U _inst_1 u v), Eq.{succ u3} (Quiver.Hom.{succ u3, u1} V _inst_2 (Prefunctor.obj.{succ u4, succ u3, u2, u1} U _inst_1 V _inst_2 φ v) (Prefunctor.obj.{succ u4, succ u3, u2, u1} U _inst_1 V _inst_2 φ u)) (Prefunctor.map.{succ u4, succ u3, u2, u1} U _inst_1 V _inst_2 φ v u (Quiver.reverse.{u4, u2} U _inst_1 _inst_4 u v e)) (Quiver.reverse.{u3, u1} V _inst_2 _inst_5 (Prefunctor.obj.{succ u4, succ u3, u2, u1} U _inst_1 V _inst_2 φ u) (Prefunctor.obj.{succ u4, succ u3, u2, u1} U _inst_1 V _inst_2 φ v) (Prefunctor.map.{succ u4, succ u3, u2, u1} U _inst_1 V _inst_2 φ u v e))
Case conversion may be inaccurate. Consider using '#align prefunctor.map_reverse' Prefunctor.map_reverseₓ'. -/
@[simp]
theorem Prefunctor.map_reverse (φ : U ⥤q V) [φ.MapReverse] {u v : U} (e : u ⟶ v) :
φ.map (reverse e) = reverse (φ.map e) :=
Prefunctor.MapReverse.map_reverse' e
#align prefunctor.map_reverse' Prefunctor.map_reverse
#print Prefunctor.mapReverseComp /-
instance Prefunctor.mapReverseComp (φ : U ⥤q V) (ψ : V ⥤q W) [φ.MapReverse] [ψ.MapReverse] :
(φ ⋙q ψ).MapReverse
where map_reverse' u v e := by simp only [Prefunctor.comp_map, Prefunctor.map_reverse]
#align prefunctor.map_reverse_comp Prefunctor.mapReverseComp
-/
#print Prefunctor.mapReverseId /-
instance Prefunctor.mapReverseId : (Prefunctor.id U).MapReverse where map_reverse' u v e := rfl
#align prefunctor.map_reverse_id Prefunctor.mapReverseId
-/
end MapReverse
instance : HasReverse (Symmetrify V) :=
⟨fun a b e => e.symm⟩
instance : HasInvolutiveReverse (Symmetrify V)
where
reverse' _ _ e := e.symm
inv' _ _ e := congr_fun Sum.swap_swap_eq e
/- warning: quiver.symmetrify_reverse -> Quiver.symmetrify_reverse is a dubious translation:
lean 3 declaration is
forall {V : Type.{u2}} [_inst_2 : Quiver.{succ u1, u2} V] {a : Quiver.Symmetrify.{u2} V} {b : Quiver.Symmetrify.{u2} V} (e : Quiver.Hom.{succ u1, u2} (Quiver.Symmetrify.{u2} V) (Quiver.symmetrifyQuiver.{u2, u1} V _inst_2) a b), Eq.{succ u1} (Quiver.Hom.{succ u1, u2} (Quiver.Symmetrify.{u2} V) (Quiver.symmetrifyQuiver.{u2, u1} V _inst_2) b a) (Quiver.reverse.{u1, u2} (Quiver.Symmetrify.{u2} V) (Quiver.symmetrifyQuiver.{u2, u1} V _inst_2) (Quiver.Symmetrify.hasReverse.{u1, u2} V _inst_2) a b e) (Sum.swap.{u1, u1} (Quiver.Hom.{succ u1, u2} V _inst_2 a b) (Quiver.Hom.{succ u1, u2} V _inst_2 b a) e)
but is expected to have type
forall {V : Type.{u1}} [_inst_2 : Quiver.{succ u2, u1} V] {a : Quiver.Symmetrify.{u1} V} {b : Quiver.Symmetrify.{u1} V} (e : Quiver.Hom.{succ u2, u1} (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u2} V _inst_2) a b), Eq.{succ u2} (Quiver.Hom.{succ u2, u1} (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u2} V _inst_2) b a) (Quiver.reverse.{u2, u1} (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u2} V _inst_2) (Quiver.instHasReverseSymmetrifySymmetrifyQuiver.{u2, u1} V _inst_2) a b e) (Sum.swap.{u2, u2} (Quiver.Hom.{succ u2, u1} V _inst_2 a b) (Quiver.Hom.{succ u2, u1} V _inst_2 b a) e)
Case conversion may be inaccurate. Consider using '#align quiver.symmetrify_reverse Quiver.symmetrify_reverseₓ'. -/
@[simp]
theorem symmetrify_reverse {a b : Symmetrify V} (e : a ⟶ b) : reverse e = e.symm :=
rfl
#align quiver.symmetrify_reverse Quiver.symmetrify_reverse
#print Quiver.Hom.toPos /-
/-- Shorthand for the "forward" arrow corresponding to `f` in `symmetrify V` -/
abbrev Hom.toPos {X Y : V} (f : X ⟶ Y) : (Quiver.symmetrifyQuiver V).Hom X Y :=
Sum.inl f
#align quiver.hom.to_pos Quiver.Hom.toPos
-/
#print Quiver.Hom.toNeg /-
/-- Shorthand for the "backward" arrow corresponding to `f` in `symmetrify V` -/
abbrev Hom.toNeg {X Y : V} (f : X ⟶ Y) : (Quiver.symmetrifyQuiver V).Hom Y X :=
Sum.inr f
#align quiver.hom.to_neg Quiver.Hom.toNeg
-/
#print Quiver.Path.reverse /-
/-- Reverse the direction of a path. -/
@[simp]
def Path.reverse [HasReverse V] {a : V} : ∀ {b}, Path a b → Path b a
| a, path.nil => Path.nil
| b, path.cons p e => (reverse e).toPath.comp p.reverse
#align quiver.path.reverse Quiver.Path.reverse
-/
/- warning: quiver.path.reverse_to_path -> Quiver.Path.reverse_toPath is a dubious translation:
lean 3 declaration is
forall {V : Type.{u2}} [_inst_2 : Quiver.{succ u1, u2} V] [_inst_4 : Quiver.HasReverse.{u1, u2} V _inst_2] {a : V} {b : V} (f : Quiver.Hom.{succ u1, u2} V _inst_2 a b), Eq.{max (succ u2) (succ u1)} (Quiver.Path.{succ u1, u2} V _inst_2 b a) (Quiver.Path.reverse.{u1, u2} V _inst_2 _inst_4 a b (Quiver.Hom.toPath.{u2, succ u1} V _inst_2 a b f)) (Quiver.Hom.toPath.{u2, succ u1} V _inst_2 b a (Quiver.reverse.{u1, u2} V _inst_2 _inst_4 a b f))
but is expected to have type
forall {V : Type.{u1}} [_inst_2 : Quiver.{succ u2, u1} V] [_inst_4 : Quiver.HasReverse.{u2, u1} V _inst_2] {a : V} {b : V} (f : Quiver.Hom.{succ u2, u1} V _inst_2 a b), Eq.{max (succ u2) (succ u1)} (Quiver.Path.{succ u2, u1} V _inst_2 b a) (Quiver.Path.reverse.{u2, u1} V _inst_2 _inst_4 a b (Quiver.Hom.toPath.{u1, succ u2} V _inst_2 a b f)) (Quiver.Hom.toPath.{u1, succ u2} V _inst_2 b a (Quiver.reverse.{u2, u1} V _inst_2 _inst_4 a b f))
Case conversion may be inaccurate. Consider using '#align quiver.path.reverse_to_path Quiver.Path.reverse_toPathₓ'. -/
@[simp]
theorem Path.reverse_toPath [HasReverse V] {a b : V} (f : a ⟶ b) :
f.toPath.reverse = (reverse f).toPath :=
rfl
#align quiver.path.reverse_to_path Quiver.Path.reverse_toPath
/- warning: quiver.path.reverse_comp -> Quiver.Path.reverse_comp is a dubious translation:
lean 3 declaration is
forall {V : Type.{u2}} [_inst_2 : Quiver.{succ u1, u2} V] [_inst_4 : Quiver.HasReverse.{u1, u2} V _inst_2] {a : V} {b : V} {c : V} (p : Quiver.Path.{succ u1, u2} V _inst_2 a b) (q : Quiver.Path.{succ u1, u2} V _inst_2 b c), Eq.{max (succ u2) (succ u1)} (Quiver.Path.{succ u1, u2} V _inst_2 c a) (Quiver.Path.reverse.{u1, u2} V _inst_2 _inst_4 a c (Quiver.Path.comp.{u2, succ u1} V _inst_2 a b c p q)) (Quiver.Path.comp.{u2, succ u1} V _inst_2 c b a (Quiver.Path.reverse.{u1, u2} V _inst_2 _inst_4 b c q) (Quiver.Path.reverse.{u1, u2} V _inst_2 _inst_4 a b p))
but is expected to have type
forall {V : Type.{u1}} [_inst_2 : Quiver.{succ u2, u1} V] [_inst_4 : Quiver.HasReverse.{u2, u1} V _inst_2] {a : V} {b : V} {c : V} (p : Quiver.Path.{succ u2, u1} V _inst_2 a b) (q : Quiver.Path.{succ u2, u1} V _inst_2 b c), Eq.{max (succ u2) (succ u1)} (Quiver.Path.{succ u2, u1} V _inst_2 c a) (Quiver.Path.reverse.{u2, u1} V _inst_2 _inst_4 a c (Quiver.Path.comp.{u1, succ u2} V _inst_2 a b c p q)) (Quiver.Path.comp.{u1, succ u2} V _inst_2 c b a (Quiver.Path.reverse.{u2, u1} V _inst_2 _inst_4 b c q) (Quiver.Path.reverse.{u2, u1} V _inst_2 _inst_4 a b p))
Case conversion may be inaccurate. Consider using '#align quiver.path.reverse_comp Quiver.Path.reverse_compₓ'. -/
@[simp]
theorem Path.reverse_comp [HasReverse V] {a b c : V} (p : Path a b) (q : Path b c) :
(p.comp q).reverse = q.reverse.comp p.reverse :=
by
induction q
· simp
· simp [q_ih]
#align quiver.path.reverse_comp Quiver.Path.reverse_comp
/- warning: quiver.path.reverse_reverse -> Quiver.Path.reverse_reverse is a dubious translation:
lean 3 declaration is
forall {V : Type.{u2}} [_inst_2 : Quiver.{succ u1, u2} V] [_inst_4 : Quiver.HasInvolutiveReverse.{u1, u2} V _inst_2] {a : V} {b : V} (p : Quiver.Path.{succ u1, u2} V _inst_2 a b), Eq.{max (succ u2) (succ u1)} (Quiver.Path.{succ u1, u2} V _inst_2 a b) (Quiver.Path.reverse.{u1, u2} V _inst_2 (Quiver.HasInvolutiveReverse.toHasReverse.{u1, u2} V _inst_2 _inst_4) b a (Quiver.Path.reverse.{u1, u2} V _inst_2 (Quiver.HasInvolutiveReverse.toHasReverse.{u1, u2} V _inst_2 _inst_4) a b p)) p
but is expected to have type
forall {V : Type.{u1}} [_inst_2 : Quiver.{succ u2, u1} V] [_inst_4 : Quiver.HasInvolutiveReverse.{u2, u1} V _inst_2] {a : V} {b : V} (p : Quiver.Path.{succ u2, u1} V _inst_2 a b), Eq.{max (succ u2) (succ u1)} (Quiver.Path.{succ u2, u1} V _inst_2 a b) (Quiver.Path.reverse.{u2, u1} V _inst_2 (Quiver.HasInvolutiveReverse.toHasReverse.{u2, u1} V _inst_2 _inst_4) b a (Quiver.Path.reverse.{u2, u1} V _inst_2 (Quiver.HasInvolutiveReverse.toHasReverse.{u2, u1} V _inst_2 _inst_4) a b p)) p
Case conversion may be inaccurate. Consider using '#align quiver.path.reverse_reverse Quiver.Path.reverse_reverseₓ'. -/
@[simp]
theorem Path.reverse_reverse [HasInvolutiveReverse V] {a b : V} (p : Path a b) :
p.reverse.reverse = p := by
induction p
· simp
· simp only [path.reverse, path.reverse_comp, path.reverse_to_path, reverse_reverse, p_ih]
rfl
#align quiver.path.reverse_reverse Quiver.Path.reverse_reverse
namespace Symmetrify
#print Quiver.Symmetrify.of /-
/-- The inclusion of a quiver in its symmetrification -/
@[simps]
def of : V ⥤q Symmetrify V where
obj := id
map X Y f := Sum.inl f
#align quiver.symmetrify.of Quiver.Symmetrify.of
-/
variable {V' : Type _} [Quiver.{v' + 1} V']
#print Quiver.Symmetrify.lift /-
/-- Given a quiver `V'` with reversible arrows, a prefunctor to `V'` can be lifted to one from
`symmetrify V` to `V'` -/
def lift [HasReverse V'] (φ : V ⥤q V') : Symmetrify V ⥤q V'
where
obj := φ.obj
map X Y f := Sum.rec (fun fwd => φ.map fwd) (fun bwd => reverse (φ.map bwd)) f
#align quiver.symmetrify.lift Quiver.Symmetrify.lift
-/
/- warning: quiver.symmetrify.lift_spec -> Quiver.Symmetrify.lift_spec is a dubious translation:
lean 3 declaration is
forall {V : Type.{u3}} [_inst_2 : Quiver.{succ u1, u3} V] {V' : Type.{u4}} [_inst_4 : Quiver.{succ u2, u4} V'] [_inst_5 : Quiver.HasReverse.{u2, u4} V' _inst_4] (φ : Prefunctor.{succ u1, succ u2, u3, u4} V _inst_2 V' _inst_4), Eq.{max (max (succ u3) (succ u1) (succ u2)) (succ u3) (succ u4)} (Prefunctor.{succ u1, succ u2, u3, u4} V _inst_2 V' _inst_4) (Prefunctor.comp.{u3, succ u1, u3, succ u1, u4, succ u2} V _inst_2 (Quiver.Symmetrify.{u3} V) (Quiver.symmetrifyQuiver.{u3, u1} V _inst_2) V' _inst_4 (Quiver.Symmetrify.of.{u1, u3} V _inst_2) (Quiver.Symmetrify.lift.{u1, u2, u3, u4} V _inst_2 V' _inst_4 _inst_5 φ)) φ
but is expected to have type
forall {V : Type.{u1}} [_inst_2 : Quiver.{succ u3, u1} V] {V' : Type.{u2}} [_inst_4 : Quiver.{succ u4, u2} V'] [_inst_5 : Quiver.HasReverse.{u4, u2} V' _inst_4] (φ : Prefunctor.{succ u3, succ u4, u1, u2} V _inst_2 V' _inst_4), Eq.{max (max (max (succ u3) (succ u4)) (succ u1)) (succ u2)} (Prefunctor.{succ u3, succ u4, u1, u2} V _inst_2 V' _inst_4) (Prefunctor.comp.{u1, succ u3, u1, succ u3, u2, succ u4} V _inst_2 (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u3} V _inst_2) V' _inst_4 (Quiver.Symmetrify.of.{u3, u1} V _inst_2) (Quiver.Symmetrify.lift.{u3, u4, u1, u2} V _inst_2 V' _inst_4 _inst_5 φ)) φ
Case conversion may be inaccurate. Consider using '#align quiver.symmetrify.lift_spec Quiver.Symmetrify.lift_specₓ'. -/
theorem lift_spec [HasReverse V'] (φ : V ⥤q V') : of ⋙q lift φ = φ :=
by
fapply Prefunctor.ext
· rintro X
rfl
· rintro X Y f
rfl
#align quiver.symmetrify.lift_spec Quiver.Symmetrify.lift_spec
/- warning: quiver.symmetrify.lift_reverse -> Quiver.Symmetrify.lift_reverse is a dubious translation:
lean 3 declaration is
forall {V : Type.{u3}} [_inst_2 : Quiver.{succ u1, u3} V] {V' : Type.{u4}} [_inst_4 : Quiver.{succ u2, u4} V'] [h : Quiver.HasInvolutiveReverse.{u2, u4} V' _inst_4] (φ : Prefunctor.{succ u1, succ u2, u3, u4} V _inst_2 V' _inst_4) {X : Quiver.Symmetrify.{u3} V} {Y : Quiver.Symmetrify.{u3} V} (f : Quiver.Hom.{succ u1, u3} (Quiver.Symmetrify.{u3} V) (Quiver.symmetrifyQuiver.{u3, u1} V _inst_2) X Y), Eq.{succ u2} (Quiver.Hom.{succ u2, u4} V' _inst_4 (Prefunctor.obj.{succ u1, succ u2, u3, u4} (Quiver.Symmetrify.{u3} V) (Quiver.symmetrifyQuiver.{u3, u1} V _inst_2) V' _inst_4 (Quiver.Symmetrify.lift.{u1, u2, u3, u4} V _inst_2 V' _inst_4 (Quiver.HasInvolutiveReverse.toHasReverse.{u2, u4} V' _inst_4 h) φ) Y) (Prefunctor.obj.{succ u1, succ u2, u3, u4} (Quiver.Symmetrify.{u3} V) (Quiver.symmetrifyQuiver.{u3, u1} V _inst_2) V' _inst_4 (Quiver.Symmetrify.lift.{u1, u2, u3, u4} V _inst_2 V' _inst_4 (Quiver.HasInvolutiveReverse.toHasReverse.{u2, u4} V' _inst_4 h) φ) X)) (Prefunctor.map.{succ u1, succ u2, u3, u4} (Quiver.Symmetrify.{u3} V) (Quiver.symmetrifyQuiver.{u3, u1} V _inst_2) V' _inst_4 (Quiver.Symmetrify.lift.{u1, u2, u3, u4} V _inst_2 V' _inst_4 (Quiver.HasInvolutiveReverse.toHasReverse.{u2, u4} V' _inst_4 h) φ) Y X (Quiver.reverse.{u1, u3} (Quiver.Symmetrify.{u3} V) (Quiver.symmetrifyQuiver.{u3, u1} V _inst_2) (Quiver.Symmetrify.hasReverse.{u1, u3} V _inst_2) X Y f)) (Quiver.reverse.{u2, u4} V' _inst_4 (Quiver.HasInvolutiveReverse.toHasReverse.{u2, u4} V' _inst_4 h) (Prefunctor.obj.{succ u1, succ u2, u3, u4} (Quiver.Symmetrify.{u3} V) (Quiver.symmetrifyQuiver.{u3, u1} V _inst_2) V' _inst_4 (Quiver.Symmetrify.lift.{u1, u2, u3, u4} V _inst_2 V' _inst_4 (Quiver.HasInvolutiveReverse.toHasReverse.{u2, u4} V' _inst_4 h) φ) X) (Prefunctor.obj.{succ u1, succ u2, u3, u4} (Quiver.Symmetrify.{u3} V) (Quiver.symmetrifyQuiver.{u3, u1} V _inst_2) V' _inst_4 (Quiver.Symmetrify.lift.{u1, u2, u3, u4} V _inst_2 V' _inst_4 (Quiver.HasInvolutiveReverse.toHasReverse.{u2, u4} V' _inst_4 h) φ) Y) (Prefunctor.map.{succ u1, succ u2, u3, u4} (Quiver.Symmetrify.{u3} V) (Quiver.symmetrifyQuiver.{u3, u1} V _inst_2) V' _inst_4 (Quiver.Symmetrify.lift.{u1, u2, u3, u4} V _inst_2 V' _inst_4 (Quiver.HasInvolutiveReverse.toHasReverse.{u2, u4} V' _inst_4 h) φ) X Y f))
but is expected to have type
forall {V : Type.{u1}} [_inst_2 : Quiver.{succ u3, u1} V] {V' : Type.{u2}} [_inst_4 : Quiver.{succ u4, u2} V'] [h : Quiver.HasInvolutiveReverse.{u4, u2} V' _inst_4] (φ : Prefunctor.{succ u3, succ u4, u1, u2} V _inst_2 V' _inst_4) {X : Quiver.Symmetrify.{u1} V} {Y : Quiver.Symmetrify.{u1} V} (f : Quiver.Hom.{succ u3, u1} (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u3} V _inst_2) X Y), Eq.{succ u4} (Quiver.Hom.{succ u4, u2} V' _inst_4 (Prefunctor.obj.{succ u3, succ u4, u1, u2} (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u3} V _inst_2) V' _inst_4 (Quiver.Symmetrify.lift.{u3, u4, u1, u2} V _inst_2 V' _inst_4 (Quiver.HasInvolutiveReverse.toHasReverse.{u4, u2} V' _inst_4 h) φ) Y) (Prefunctor.obj.{succ u3, succ u4, u1, u2} (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u3} V _inst_2) V' _inst_4 (Quiver.Symmetrify.lift.{u3, u4, u1, u2} V _inst_2 V' _inst_4 (Quiver.HasInvolutiveReverse.toHasReverse.{u4, u2} V' _inst_4 h) φ) X)) (Prefunctor.map.{succ u3, succ u4, u1, u2} (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u3} V _inst_2) V' _inst_4 (Quiver.Symmetrify.lift.{u3, u4, u1, u2} V _inst_2 V' _inst_4 (Quiver.HasInvolutiveReverse.toHasReverse.{u4, u2} V' _inst_4 h) φ) Y X (Quiver.reverse.{u3, u1} (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u3} V _inst_2) (Quiver.instHasReverseSymmetrifySymmetrifyQuiver.{u3, u1} V _inst_2) X Y f)) (Quiver.reverse.{u4, u2} V' _inst_4 (Quiver.HasInvolutiveReverse.toHasReverse.{u4, u2} V' _inst_4 h) (Prefunctor.obj.{succ u3, succ u4, u1, u2} (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u3} V _inst_2) V' _inst_4 (Quiver.Symmetrify.lift.{u3, u4, u1, u2} V _inst_2 V' _inst_4 (Quiver.HasInvolutiveReverse.toHasReverse.{u4, u2} V' _inst_4 h) φ) X) (Prefunctor.obj.{succ u3, succ u4, u1, u2} (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u3} V _inst_2) V' _inst_4 (Quiver.Symmetrify.lift.{u3, u4, u1, u2} V _inst_2 V' _inst_4 (Quiver.HasInvolutiveReverse.toHasReverse.{u4, u2} V' _inst_4 h) φ) Y) (Prefunctor.map.{succ u3, succ u4, u1, u2} (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u3} V _inst_2) V' _inst_4 (Quiver.Symmetrify.lift.{u3, u4, u1, u2} V _inst_2 V' _inst_4 (Quiver.HasInvolutiveReverse.toHasReverse.{u4, u2} V' _inst_4 h) φ) X Y f))
Case conversion may be inaccurate. Consider using '#align quiver.symmetrify.lift_reverse Quiver.Symmetrify.lift_reverseₓ'. -/
theorem lift_reverse [h : HasInvolutiveReverse V'] (φ : V ⥤q V') {X Y : Symmetrify V} (f : X ⟶ Y) :
(lift φ).map (Quiver.reverse f) = Quiver.reverse ((lift φ).map f) :=
by
dsimp [lift]; cases f
· simp only
rfl
· simp only [reverse_reverse]
rfl
#align quiver.symmetrify.lift_reverse Quiver.Symmetrify.lift_reverse
/- warning: quiver.symmetrify.lift_unique -> Quiver.Symmetrify.lift_unique is a dubious translation:
lean 3 declaration is
forall {V : Type.{u3}} [_inst_2 : Quiver.{succ u1, u3} V] {V' : Type.{u4}} [_inst_4 : Quiver.{succ u2, u4} V'] [_inst_5 : Quiver.HasReverse.{u2, u4} V' _inst_4] (φ : Prefunctor.{succ u1, succ u2, u3, u4} V _inst_2 V' _inst_4) (Φ : Prefunctor.{succ u1, succ u2, u3, u4} (Quiver.Symmetrify.{u3} V) (Quiver.symmetrifyQuiver.{u3, u1} V _inst_2) V' _inst_4), (Eq.{max (max (succ u3) (succ u1) (succ u2)) (succ u3) (succ u4)} (Prefunctor.{succ u1, succ u2, u3, u4} V _inst_2 V' _inst_4) (Prefunctor.comp.{u3, succ u1, u3, succ u1, u4, succ u2} V _inst_2 (Quiver.Symmetrify.{u3} V) (Quiver.symmetrifyQuiver.{u3, u1} V _inst_2) V' _inst_4 (Quiver.Symmetrify.of.{u1, u3} V _inst_2) Φ) φ) -> (forall [hΦrev : Prefunctor.MapReverse.{u2, u1, u3, u4} (Quiver.Symmetrify.{u3} V) V' (Quiver.symmetrifyQuiver.{u3, u1} V _inst_2) _inst_4 (Quiver.Symmetrify.hasReverse.{u1, u3} V _inst_2) _inst_5 Φ], Eq.{max (max (succ u3) (succ u1) (succ u2)) (succ u3) (succ u4)} (Prefunctor.{succ u1, succ u2, u3, u4} (Quiver.Symmetrify.{u3} V) (Quiver.symmetrifyQuiver.{u3, u1} V _inst_2) V' _inst_4) Φ (Quiver.Symmetrify.lift.{u1, u2, u3, u4} V _inst_2 V' _inst_4 _inst_5 φ))
but is expected to have type
forall {V : Type.{u1}} [_inst_2 : Quiver.{succ u3, u1} V] {V' : Type.{u2}} [_inst_4 : Quiver.{succ u4, u2} V'] [_inst_5 : Quiver.HasReverse.{u4, u2} V' _inst_4] (φ : Prefunctor.{succ u3, succ u4, u1, u2} V _inst_2 V' _inst_4) (Φ : Prefunctor.{succ u3, succ u4, u1, u2} (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u3} V _inst_2) V' _inst_4), (Eq.{max (max (max (succ u3) (succ u4)) (succ u1)) (succ u2)} (Prefunctor.{succ u3, succ u4, u1, u2} V _inst_2 V' _inst_4) (Prefunctor.comp.{u1, succ u3, u1, succ u3, u2, succ u4} V _inst_2 (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u3} V _inst_2) V' _inst_4 (Quiver.Symmetrify.of.{u3, u1} V _inst_2) Φ) φ) -> (forall {X : Quiver.Symmetrify.{u1} V} {Y : Quiver.Symmetrify.{u1} V} (f : Quiver.Hom.{succ u3, u1} (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u3} V _inst_2) X Y), Eq.{succ u4} (Quiver.Hom.{succ u4, u2} V' _inst_4 (Prefunctor.obj.{succ u3, succ u4, u1, u2} (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u3} V _inst_2) V' _inst_4 Φ Y) (Prefunctor.obj.{succ u3, succ u4, u1, u2} (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u3} V _inst_2) V' _inst_4 Φ X)) (Prefunctor.map.{succ u3, succ u4, u1, u2} (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u3} V _inst_2) V' _inst_4 Φ Y X (Quiver.reverse.{u3, u1} (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u3} V _inst_2) (Quiver.instHasReverseSymmetrifySymmetrifyQuiver.{u3, u1} V _inst_2) X Y f)) (Quiver.reverse.{u4, u2} V' _inst_4 _inst_5 (Prefunctor.obj.{succ u3, succ u4, u1, u2} (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u3} V _inst_2) V' _inst_4 Φ X) (Prefunctor.obj.{succ u3, succ u4, u1, u2} (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u3} V _inst_2) V' _inst_4 Φ Y) (Prefunctor.map.{succ u3, succ u4, u1, u2} (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u3} V _inst_2) V' _inst_4 Φ X Y f))) -> (Eq.{max (max (max (succ u3) (succ u4)) (succ u1)) (succ u2)} (Prefunctor.{succ u3, succ u4, u1, u2} (Quiver.Symmetrify.{u1} V) (Quiver.symmetrifyQuiver.{u1, u3} V _inst_2) V' _inst_4) Φ (Quiver.Symmetrify.lift.{u3, u4, u1, u2} V _inst_2 V' _inst_4 _inst_5 φ))
Case conversion may be inaccurate. Consider using '#align quiver.symmetrify.lift_unique Quiver.Symmetrify.lift_uniqueₓ'. -/
/-- `lift φ` is the only prefunctor extending `φ` and preserving reverses. -/
theorem lift_unique [HasReverse V'] (φ : V ⥤q V') (Φ : Symmetrify V ⥤q V') (hΦ : of ⋙q Φ = φ)
[hΦrev : Φ.MapReverse] : Φ = lift φ := by
subst_vars
fapply Prefunctor.ext
· rintro X
rfl
· rintro X Y f
cases f
· rfl
· dsimp [lift, of]
simp only [← Prefunctor.map_reverse, symmetrify_reverse, Sum.swap_inl]
#align quiver.symmetrify.lift_unique Quiver.Symmetrify.lift_unique
/-- A prefunctor canonically defines a prefunctor of the symmetrifications. -/
@[simps]
def Prefunctor.symmetrify (φ : U ⥤q V) : Symmetrify U ⥤q Symmetrify V
where
obj := φ.obj
map X Y := Sum.map φ.map φ.map
#align prefunctor.symmetrify Prefunctor.symmetrify
instance Prefunctor.symmetrifyMapReverse (φ : U ⥤q V) : Prefunctor.MapReverse φ.Symmetrify :=
⟨fun u v e => by cases e <;> rfl⟩
#align prefunctor.symmetrify_map_reverse Prefunctor.symmetrifyMapReverse
end Symmetrify
namespace Push
variable {V' : Type _} (σ : V → V')
instance [HasReverse V] : HasReverse (Push σ)
where reverse' a b F := by
cases F
constructor
apply reverse
exact F_f
instance [HasInvolutiveReverse V] : HasInvolutiveReverse (Push σ)
where
reverse' a b F := by
cases F
constructor
apply reverse
exact F_f
inv' a b F := by
cases F
dsimp [reverse]
congr
apply reverse_reverse
/- warning: quiver.push.of_reverse -> Quiver.Push.of_reverse is a dubious translation:
lean 3 declaration is
forall {V : Type.{u2}} [_inst_2 : Quiver.{succ u1, u2} V] {V' : Type.{u3}} (σ : V -> V') [h : Quiver.HasInvolutiveReverse.{u1, u2} V _inst_2] (X : V) (Y : V) (f : Quiver.Hom.{succ u1, u2} V _inst_2 X Y), Eq.{succ (max u2 u3 (succ u1))} (Quiver.Hom.{succ (max u2 u3 (succ u1)), u3} (Quiver.Push.{u2, succ u1, u3} V _inst_2 V' σ) (Quiver.Push.quiver.{u2, succ u1, u3} V _inst_2 V' σ) (Prefunctor.obj.{succ u1, succ (max u2 u3 (succ u1)), u2, u3} V _inst_2 (Quiver.Push.{u2, succ u1, u3} V _inst_2 V' σ) (Quiver.Push.quiver.{u2, succ u1, u3} V _inst_2 V' σ) (Quiver.Push.of.{u2, succ u1, u3} V _inst_2 V' σ) Y) (Prefunctor.obj.{succ u1, succ (max u2 u3 (succ u1)), u2, u3} V _inst_2 (Quiver.Push.{u2, succ u1, u3} V _inst_2 V' σ) (Quiver.Push.quiver.{u2, succ u1, u3} V _inst_2 V' σ) (Quiver.Push.of.{u2, succ u1, u3} V _inst_2 V' σ) X)) (Quiver.reverse.{max u2 u3 (succ u1), u3} (Quiver.Push.{u2, succ u1, u3} V _inst_2 V' σ) (Quiver.Push.quiver.{u2, succ u1, u3} V _inst_2 V' σ) (Quiver.Push.quiver.hasReverse.{u1, u2, u3} V _inst_2 V' σ (Quiver.HasInvolutiveReverse.toHasReverse.{u1, u2} V _inst_2 h)) (Prefunctor.obj.{succ u1, succ (max u2 u3 (succ u1)), u2, u3} V _inst_2 (Quiver.Push.{u2, succ u1, u3} V _inst_2 V' σ) (Quiver.Push.quiver.{u2, succ u1, u3} V _inst_2 V' σ) (Quiver.Push.of.{u2, succ u1, u3} V _inst_2 V' σ) X) (Prefunctor.obj.{succ u1, succ (max u2 u3 (succ u1)), u2, u3} V _inst_2 (Quiver.Push.{u2, succ u1, u3} V _inst_2 V' σ) (Quiver.Push.quiver.{u2, succ u1, u3} V _inst_2 V' σ) (Quiver.Push.of.{u2, succ u1, u3} V _inst_2 V' σ) Y) (Prefunctor.map.{succ u1, succ (max u2 u3 (succ u1)), u2, u3} V _inst_2 (Quiver.Push.{u2, succ u1, u3} V _inst_2 V' σ) (Quiver.Push.quiver.{u2, succ u1, u3} V _inst_2 V' σ) (Quiver.Push.of.{u2, succ u1, u3} V _inst_2 V' σ) X Y f)) (Prefunctor.map.{succ u1, succ (max u2 u3 (succ u1)), u2, u3} V _inst_2 (Quiver.Push.{u2, succ u1, u3} V _inst_2 V' σ) (Quiver.Push.quiver.{u2, succ u1, u3} V _inst_2 V' σ) (Quiver.Push.of.{u2, succ u1, u3} V _inst_2 V' σ) Y X (Quiver.reverse.{u1, u2} V _inst_2 (Quiver.HasInvolutiveReverse.toHasReverse.{u1, u2} V _inst_2 h) X Y f))
but is expected to have type
forall {V : Type.{u2}} [_inst_2 : Quiver.{succ u3, u2} V] {V' : Type.{u1}} (σ : V -> V') [h : Quiver.HasInvolutiveReverse.{u3, u2} V _inst_2] (X : V) (Y : V) (f : Quiver.Hom.{succ u3, u2} V _inst_2 X Y), Eq.{max (max (succ (succ u3)) (succ u2)) (succ u1)} (Quiver.Hom.{succ (max (max (succ u3) u2) u1), u1} (Quiver.Push.{u2, u1} V V' σ) (Quiver.instQuiverPush.{u2, succ u3, u1} V _inst_2 V' σ) (Prefunctor.obj.{succ u3, max (max (succ (succ u3)) (succ u2)) (succ u1), u2, u1} V _inst_2 (Quiver.Push.{u2, u1} V V' σ) (Quiver.instQuiverPush.{u2, succ u3, u1} V _inst_2 V' σ) (Quiver.Push.of.{u2, succ u3, u1} V _inst_2 V' σ) Y) (Prefunctor.obj.{succ u3, max (max (succ (succ u3)) (succ u2)) (succ u1), u2, u1} V _inst_2 (Quiver.Push.{u2, u1} V V' σ) (Quiver.instQuiverPush.{u2, succ u3, u1} V _inst_2 V' σ) (Quiver.Push.of.{u2, succ u3, u1} V _inst_2 V' σ) X)) (Quiver.reverse.{max (max (succ u3) u2) u1, u1} (Quiver.Push.{u2, u1} V V' σ) (Quiver.instQuiverPush.{u2, succ u3, u1} V _inst_2 V' σ) (Quiver.Push.instHasReversePushInstQuiverPush.{u3, u2, u1} V _inst_2 V' σ (Quiver.HasInvolutiveReverse.toHasReverse.{u3, u2} V _inst_2 h)) (Prefunctor.obj.{succ u3, max (max (succ (succ u3)) (succ u2)) (succ u1), u2, u1} V _inst_2 (Quiver.Push.{u2, u1} V V' σ) (Quiver.instQuiverPush.{u2, succ u3, u1} V _inst_2 V' σ) (Quiver.Push.of.{u2, succ u3, u1} V _inst_2 V' σ) X) (Prefunctor.obj.{succ u3, max (max (succ (succ u3)) (succ u2)) (succ u1), u2, u1} V _inst_2 (Quiver.Push.{u2, u1} V V' σ) (Quiver.instQuiverPush.{u2, succ u3, u1} V _inst_2 V' σ) (Quiver.Push.of.{u2, succ u3, u1} V _inst_2 V' σ) Y) (Prefunctor.map.{succ u3, max (max (succ (succ u3)) (succ u2)) (succ u1), u2, u1} V _inst_2 (Quiver.Push.{u2, u1} V V' σ) (Quiver.instQuiverPush.{u2, succ u3, u1} V _inst_2 V' σ) (Quiver.Push.of.{u2, succ u3, u1} V _inst_2 V' σ) X Y f)) (Prefunctor.map.{succ u3, max (max (succ (succ u3)) (succ u2)) (succ u1), u2, u1} V _inst_2 (Quiver.Push.{u2, u1} V V' σ) (Quiver.instQuiverPush.{u2, succ u3, u1} V _inst_2 V' σ) (Quiver.Push.of.{u2, succ u3, u1} V _inst_2 V' σ) Y X (Quiver.reverse.{u3, u2} V _inst_2 (Quiver.HasInvolutiveReverse.toHasReverse.{u3, u2} V _inst_2 h) X Y f))
Case conversion may be inaccurate. Consider using '#align quiver.push.of_reverse Quiver.Push.of_reverseₓ'. -/
theorem of_reverse [h : HasInvolutiveReverse V] (X Y : V) (f : X ⟶ Y) :
(reverse <| (Push.of σ).map f) = (Push.of σ).map (reverse f) :=
rfl
#align quiver.push.of_reverse Quiver.Push.of_reverse
#print Quiver.Push.ofMapReverse /-
instance ofMapReverse [h : HasInvolutiveReverse V] : (Push.of σ).MapReverse :=
⟨by simp [of_reverse]⟩
#align quiver.push.of_map_reverse Quiver.Push.ofMapReverse
-/
end Push
#print Quiver.IsPreconnected /-
/-- A quiver is preconnected iff there exists a path between any pair of
vertices.
Note that if `V` doesn't `has_reverse`, then the definition is stronger than
simply having a preconnected underlying `simple_graph`, since a path in one
direction doesn't induce one in the other.
-/
def IsPreconnected (V) [Quiver.{u + 1} V] :=
∀ X Y : V, Nonempty (Path X Y)
#align quiver.is_preconnected Quiver.IsPreconnected
-/
end Quiver
|
#!/usr/bin/env Rscript
source("/tmp/class-libs.R")
class_name = "2020 Spring Env Econ C118"
class_libs = c(
"margins", "0.3.23"
)
class_libs_install_version(class_name, class_libs)
|
[STATEMENT]
lemma independent_mono: "independent A \<Longrightarrow> B \<subseteq> A \<Longrightarrow> independent B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>independent A; B \<subseteq> A\<rbrakk> \<Longrightarrow> independent B
[PROOF STEP]
by (auto intro: dependent_mono)
|
\section{Predicting point correspondences}
Before delving into methods for 3D reconstruction, it is first necessary to discuss techniques for identifying \emph{point correspondences}. Point correspondences have a long history in computer vision for associating the same real-world location as it is represented by multiple camera views or on a 3D model surface. In a multi-image scenario, determining reliable correspondences between image pairs can be used to greatly reduce the ambiguity when reconstructing 3D scenes from 2D images. Even with only a single image available, correspondences can be predicted between the image and a representative 3D template mesh. This 3D-to-2D correspondence type is important for constraining the class of model fitting algorithms (discussed in depth later) which operate by aligning a 3D template mesh to a given 2D image. Of course, determining point correspondences is made more difficult in the presence of particular nuisance factors. In the case of animal imagery, we must associate points on a non-rigid object with independently moving parts (articulated), deal with frequent self-occlusion in which limbs overlap each other from the perspective of the camera, occlusion caused by environmental factors (e.g. trees, fences, humans etc.), varied and unknown backgrounds and a range of complex lighting conditions (including shadows). Throughout this section, the methods highlighted will be appraised against their suitability in this complex setting.
% https://link.springer.com/article/10.1007/s13735-019-00183-w
% https://arxiv.org/pdf/1603.09114.pdf
\subsection{Relating separate views of the same object/scene}
The first class of techniques focuses on classical approaches for determining corresponding image points taken of precisely the same (and almost always rigid) object. Early techniques focused on stereo~\cite{corres-stereo} or optical flow~\cite{corres-optflow} imagery, and matched image points based on finding regions with similar pixel intensities. Due to the adverse effects caused by changeable environmental factors (e.g. lighting) would have on the appearance of the real-world location when captured in separate images, attention moved towards designing schemes with improved robustness. Improvements were achieved when matching points based on local \emph{mid-level features} such as edges and corners, which have greater invariance to colour changes caused by lighting effects. Typical pipeliens would first identify \emph{interest points} (typically corners~\cite{corner-moravec,corner-harris,corner-susan} or blobs~\cite{sift}), from which local image patches could be compared according to either Squared Sum of Intensity Differences (SSD) or a cross-correlation (CC) scheme. Steady improvements were then made through the design of ever-improving feature descriptors, which encode local image information around points and aim for invariance against common transformations (e.g. viewpoint, rotation and scaling). Progress in this field arguably reached maturity with the advent of SIFT~\cite{sift}, which encodes points according to local histograms of graident orientations and was later speed-up by SURF~\cite{surf} and DAISY~\cite{daisy}. There have been modern attempts to learn sophisticated feature representations using convolutional neural network architectures~\cite{lift, matchnet}, which are shown to offer still further improvement.
The primary aim of these systems is to derive point correspondences between multiple views of the same object, usually as depicted in stereo images or between successive frames of a video. Unfortunately, by matching points based on local geometric features learnt from few image examples, these techniques do not readily extend to identifying correspondences between different instances of the same category. For example, matching SIFT features is likely to result in poor quality correspondences if tested on two dogs of different breeds due to the differing appearance and body geometry. For similar reasons, this class of techniques tend to deteriorate when tested on articulated objects since the object's structure can change and cause self-occlusion between views. The techniques are also known to suffer in scenarios with significant viewpoint changes (e.g. image of the front/back of an animal), since there are few correpsonding points available for matching. Finally, these techniques do not directly offer a method for identifying correspondences between an image and a representative 3D mesh. Although some work exists that extends some of the aforementioned feature descriptors (e.g. 3D-SIFT~\cite{sift-3d}) to 3D, matching typically requires a photorealistic 3D scan of the 2D subject which we cannot assume as input for our problem.
\subsection{Predicting semantically-meaningful keypoints}
This section will explore an alternative class of methods for identifying point correspondences. So far, the approaches described do not detect correspondences with any semantic meaning; in other words, the returned points cannot truly be `named' and there is no guarantee the same points (or even the same number of points) will be identified in different test images. Instead, this section will focus on techniques which predict a set of keypoint locations which are specified in a pre-defined list (for example: nose, tail tip, toe). In general, data-driven machine learning algorithms are used in order to learn an association between image appearance information and semantic keypoint labels. The techniques fall into two general categories: the former set of \emph{supervised techniques} rely on large image datasets manually annotated with keypoint locations, and the latter set of \emph{unsupervised techniques} learn the association through other means.
\subsubsection{Supervised techniques}
Early work in the supervised prediction of landmarks began through the refinement of object detection methods to predict fine-grained object part labels and eventually progressed to keypoint locations. Perhaps the earliest techniques in this category made use of face part annotations (referred to as fiducial points) to align target faces to improve the face recongition accuracy. Human detection and pose estimation methods progressed from simple bounding box representations~\cite{hog}, to object part prediction~\cite{xxx,xxx}, poselets~\cite{pose-kposelets} and subsequently 2D keypoint localization~\cite{xxx,xxx}. Most commonly, methods aim to predict the location of important 2D human joints (such as the shoulders and wrists) in order to roughly approximate the subject's skeletal pose. For this reason, this task is commonly referred to as \emph{2D human pose estimation}. The earliest techniques represented humans as a graph of parts~\cite{human-rep-parts} and fit shape primitives (e.g. cylinders~\cite{pose-hogg}) to detected edges. Tree-based graphical models known as pictorial structures~\cite{pictorial-structures} were adopted and later made efficient~\cite{pose-felzen}. Improvements were made with models capable of expressing complex relationships betwen joints, such as flexible part mixtures~\cite{yang2013articulated,pose-johnson-mixtureparts}.
Before the popularization of modern deep learning architectures, various methods made use of features computed underneath predicted 2D landmark locations for fine-grained image classification tasks. For this reason, there are limited examples of keypoint datasets for animal categories such as dogs~\cite{liu2012dog} and birds~\cite{WelinderEtal2010}. \Cref{chap:wldo} of this thesis will discuss StanfordExtra, a new dataset complete with annotated keypoint locations and segmentation masks for 12,000 dog images, encompassing 120 different breeds. At the time of publication, StanfordExtra is the largest annotated animal dataset of its kind.
% https://arxiv.org/pdf/2012.13392.pdf
Recent works in 2D pose estimation typically employ convolutional neural networks (CNNs) due to the complex feature represenations that can be learnt for joints that, when applied discriminatively, enable accurate recongition. An early example~\cite{pose-embedding} learnt a pose embedding space with a CNN, and employed a nearest neighbour search algorithm to regress a pose. Later, deeper CNN models were used to regress facial point~\cite{pose-face-earlycnn} and full body~\cite{toshev2014deeppose} landmarks. More recent works improve robustness by regressing keypoint confidence maps~\cite{joint-training} rather than 2D keypoints directly, enabling spatial priors to be applied to remove outliers~\cite{cao2018openpose,Pfister15,Pfister14a,Charles16,joint-training,viewpoints-keypoints,pishchulin2016deepcut}. More recent methods are able to directly produce accurate confidence maps through a multi-stage pipeline~\cite{wei2016cpm}. Of particular note are hourglass~\cite{newell2016stacked} (relied upon in this thesis \Cref{chap:cgas}) and multi-level~\cite{sun2019deep,Xiao_2018_ECCV} structures, which combine global reasoning of full-body attributes and of fine-grained details. A related class of methods~\cite{guler2018densepose, taylor2012vitruvian} focus on \emph{dense} human pose estimation, which relate all 2D image pixels to a representative 3D surface of the human body.
Modern techniques in 2D human pose estimation demonstrate impressive accuracy on in-the-wild datasets, and deal with with parsing multiple subjects in challenging poses and in the presence of various occluders. However, part of what enables these achievements is the prevalance of large 2D keypoint datasets which can be used for training. Further discussion of available 2D keypoint datasets has been left for \Cref{chap:3dmulti}, in which they are considered in-depth. Further discussion on the history and advances in 2D human pose estimation are comprehensively reviewed in~\cite{2dpose-survey-1, 2dpose-survey-2}.
\subsubsection{Unsupervised learning}
As this thesis focuses on developing methods for animal reconstruction, it is useful to review techniques which operate without large 2D keypoint training datasets, which are scarce for animal subjects. Note that the methods in this section all describe approaches for determining point correspondences between different scenes. Under consideration are methods based on transfer learning, unsupervised learning and methods based on weak-supervision.
Early correspondence techniques include dense alignment methods including SIFT-flow~\cite{siftflow} which employed optical flow methods to match image using SIFT features, and Bristow et al.~\cite{Bristow2015DenseSC} who demonstrate a method for learning per-pixel semantic correspondences using geometric priors. They also show examples on various animal categories. Recent unsupervised techniques learn \emph{category-specific} semantic priors by employing deep networks on large image collections.
Zhou et al.~\cite{flowweb-efros} demonstrate a method for solving correspondences across an image collection by enforcing cycle consistency. Kanazawa et al.~\cite{kanazawa2016warpnet} introduce WarpNet which predicts a dense 2D deformation field for bird images by learning from synthetic thin-plate spline warps generated on extracted silhouettes. Thewlis et al.~\cite{thewlis-unsup-sphere} apply a similar trick, by ensuring a consistent mapping of warped facial images to a spherical coordinate frame and show results on human and cats. Jakab et al.~\cite{unsup-articulated-objects} show they can estimate 2D human pose without training data data by leverging that between two frames of a simple video sequence, human body shape and texture remains reasonably similar but the pose (including global rotation) varies. They therefore construct an architecture that, given a pair of frames $(I, J)$ defines a network $f$ that given frame $I$ predicts a 2D location vector $y$. The system then combines this vector $y$ with the second frame $J$ and trains a secondary network $g$ to reconstruct the original frame $I$. Due to the limited capacity of $v$, the fact that apart from the pose, most of the information necessary for reconstruction is already available in $J$, the network eventually learns to encode 2D pose coordinates using $v$.
Transfer learning describes a family of methods in which a machine learning model is first \emph{pre-trained} to solve a related task (often making use of secondary dataset with may be larger in size) in order to accumulate knowledge which offers an advantage when solving the original task. DeepLabCut~\cite{mathis2018deeplabcut}, LEAP~\cite{leap-animal-pose} and DeepPoseKit~\cite{graving2019deepposekit} exemplify such techniques, in which existing architectures~\cite{pishchulin2016deepcut,newell2016stacked,densenet,mobilenetv2} are first trained to predict 2D human pose (making use of the large available datasets), and are then repurposed to predict 2D animal keypoints using few (generally 100s) training examples. Cao et al~\cite{animalpose} demonstrate a cross-domain adaptation technique, which transfers knowledge gained from a modestly-sized animal dataset to unseen animal types. There are also dense estimation techniques, which extend DensePose~\cite{guler2018densepose} described above to proximal animal classes~\cite{DenseposeEvo20}, such as chimpanzees, by aligning the geometry between the animal category to humans for which data is plentiful.
% Also under consideration are methods which learn from yet lesser sources of supervision.
%https://www.robots.ox.ac.uk/~vedaldi/assets/pubs/jakab18unsupervised.pdf
%https://people.csail.mit.edu/celiu/SIFTflow/SIFTflow.pdf
%https://people.eecs.berkeley.edu/~tinghuiz/papers/cvpr15_flow.pdf
%https://www.robots.ox.ac.uk/~vedaldi/assets/pubs/thewlis18modelling.pdf
%https://www.robots.ox.ac.uk/~vedaldi/assets/pubs/thewlis17dense.pdf
%https://www.robots.ox.ac.uk/~vedaldi/assets/pubs/thewlis16fully-trainable.pdf
% https://reader.elsevier.com/reader/sd/pii/S0959438819301151?token=7A13D081FA0EE09BD23EDE5D517D499C7678CD59082C5B225CE01EC8063089BDC30D740FA31FFA6330F7FF6D2FEF2D89
% DeepLabCut: ResNet
% https://www.nature.com/articles/s41592-018-0234-5 (LEAP, StackedHourglass, recently sped up with MobileNet2)
% https://github.com/jgraving/DeepPoseKit
% https://elifesciences.org/articles/47994
% 3D DeepLabCut: https://www.nature.com/articles/s41596-019-0176-0
% AniPose: https://anipose.readthedocs.io/en/latest/
% DeepFly3D: https://elifesciences.org/articles/48571
% TODO: Find out a bit more about sun2019deep, Xiao 2018 etc.
|
\documentclass[14pt,compress,english,utf8,t]{beamer}
\usepackage{etex}
\usepackage[english]{babel}
\usepackage{tikz}
\usepackage{booktabs}
\usepackage{ragged2e}
\usepackage{mathtools}
\usepackage{wasysym}
\usepackage{minted}
\usemintedstyle{tango}
\usetikzlibrary{calc,shapes.callouts,shapes.arrows}
\definecolor{darkred}{RGB}{220,0,0}
\newcommand{\hcancel}[5]{%
\tikz[baseline=(tocancel.base)]{
\node[inner sep=0pt,outer sep=0pt] (tocancel) {#1};
\draw[darkred, line width=1mm] ($(tocancel.south west)+(#2,#3)$) -- ($(tocancel.north east)+(#4,#5)$);
}%
}%
\usepackage[protrusion=true,expansion=true]{microtype}
\title[10 modern programming concepts]{10 modern programming concepts \\ which
your favourite programming language is missing\footnote{unless your favorite
language is \ldots}}
\author[Curry Club Augsburg]{\includegraphics[scale=0.6]{logo-heller-hintergrund}}
\institute{32th Chaos Communication Congress}
\date{December 29th, 2015}
\usetheme{Warsaw}
\usecolortheme{seahorse}
\definecolor{mypurple}{RGB}{150,0,255}
\setbeamercolor{structure}{fg=mypurple}
\usefonttheme{serif}
\usepackage[T1]{fontenc}
\usepackage{libertine}
\useinnertheme{rectangles}
\setbeamercovered{invisible}
\setbeamertemplate{title page}[default][colsep=-1bp,rounded=false,shadow=false]
\setbeamertemplate{frametitle}[default][colsep=-2bp,rounded=false,shadow=false,center]
\setbeamertemplate{navigation symbols}{}
\setbeamertemplate{headline}{}
\newcommand*\oldmacro{}%
\let\oldmacro\insertshorttitle%
\renewcommand*\insertshorttitle{%
\oldmacro\hfill\insertframenumber\,/\,\inserttotalframenumber\hfill}
\newcommand{\defeq}{\vcentcolon=}
\newcommand{\hil}[1]{{\usebeamercolor[fg]{item}{\textbf{#1}}}}
\newcommand{\atpos}[1]{%
\begin{tikzpicture}[remember picture, overlay]%
\node[anchor=south east] at (current page.south east) {#1};
\end{tikzpicture}%
}
\newcommand{\centeredpar}[2]{%
\begin{center}
\colorbox{white}{\parbox{#1\textwidth}{%
#2%
}}%
\end{center}%
}
\newcommand{\icfrac}[4]{#1 + \dfrac{1}{#2 + \dfrac{1}{#3 + \dfrac{1}{#4 + \ddots}}}}
\newcommand{\icfracc}[3]{\dfrac{1}{#1 + \dfrac{1}{#2 + \dfrac{1}{#3 + \ddots}}}}
\newcommand{\icfraccc}[2]{\dfrac{1}{#1 + \dfrac{1}{#2 + \ddots}}}
\newcommand{\icfracccc}[5]{#1 + \dfrac{1}{#2 + \dfrac{1}{#3 + \dfrac{1}{#4 + \dfrac{1}{#5 + \ddots}}}}}
% Gonzalo Medina, http://tex.stackexchange.com/a/228198
\makeatletter
\def\Mdescription#1{%
\advance\beamer@descdefault by \labelsep%
\list
{}
{\labelwidth\beamer@descdefault%
\leftmargin\beamer@descdefault%
\let\makelabel\beamer@descriptionitem
\settowidth\labelwidth{\beamer@descriptionitem{#1}}%
\setlength\leftmargin{\labelwidth}%
\addtolength\leftmargin{\labelsep}%
}%
\beamer@cramped%
\raggedright
\beamer@firstlineitemizeunskip%
}
\def\endMdescription{\ifhmode\unskip\fi\endlist}
\long\def\beamer@descriptionitem#1{%
\def\insertdescriptionitem{#1}%
{\usebeamertemplate**{description item}}\hfil}
\makeatother
\setbeameroption{show notes}
\setbeamertemplate{note page}[plain]
\begin{document}
\frame{
\titlepage
}
\section{Overloaded semicolon}
\begin{frame}[fragile]
\vspace*{-1.5em}
\begin{center}
\Huge \hil{Callback hell \frownie}
\end{center}
\begin{minted}{javascript}
getData(function(a) {
getMoreData(a, function(b) {
getYetMoreData(b, function(c) {
getMoreFoo(c, function(d) {
...
});
});
});
});
\end{minted}
\pause
And this is even without error handling!
\end{frame}
\begin{frame}[fragile]
\vspace*{-1.5em}
\begin{center}
\Huge \hil{Overloaded semicolon \smiley}
\end{center}
\begin{minted}{haskell}
do a <- getData
b <- getMoreData a
c <- getYetMoreData b
d <- getMoreFoo c
\end{minted}
Simple \& easy. You can pretend that you're using blocking I/O.
\medskip
\pause
NB: This is called ``monads''. There are also monads for non-determinism,
parsing, \ldots
\end{frame}
\begin{frame}[c]
\centering
\includegraphics[height=\textheight]{hell-yeah}
\par
\end{frame}
\begin{frame}[fragile]
\begin{center}
\vspace*{-1em}\Huge\hil{QuickCheck}
\end{center}
\begin{minted}{perl}
is(sqrt(4), 2, "sqrt(4) is working");
is(sqrt(16), 4, "sqrt(16) is working");
# ...
\end{minted}
This does not scale! Property-checking is a useful addition:
\begin{minted}{haskell}
> quickCheck $ \x -> sqrt(x*x) == x
*** Failed! Falsifiable (after 2 tests):
-0.269864
> quickCheck $ \x -> sqrt(x*x) == abs x
+++ OK, passend 100 tests.
\end{minted}
Automatic test case generation and counterexample simplification.
\end{frame}
\begin{frame}[fragile]
\vspace*{-1.5em}
\begin{center}
\huge Quiz time! Spot the error.
\end{center}
\begin{minted}{c}
#include <stdlib.h>
int main(int argc, char *argv[]) {
...;
user_input = ...;
if(abs(user_input) > ...) {
exit(1);
}
...;
}
\end{minted}
\pause
Also: Billion Dollar Mistake by Tony Hoare.
\url{http://lambda-the-ultimate.org/node/3186}
\end{frame}
\begin{frame}[fragile]
\begin{center}
\huge
\hil{Solution: Option types.}
\end{center}
\medskip
\pause
A value of type \mintinline{haskell}{Maybe Int} is
\medskip
\begin{enumerate}
\item either \mintinline{haskell}{Nothing}
\item or a value of the form \mintinline{haskell}{Just x}, where~\mintinline{haskell}{x} is
an~\mintinline{haskell}{Int}.
\end{enumerate}
\medskip
Type signature of \texttt{abs}:
\mintinline{haskell}{Int -> Maybe Int}
\medskip
Use option types when you cannot return a meaningful result and don't want to
raise a proper exception, when you have optional arguments, or when you have
optional entries in data structures.
\end{frame}
\begin{frame}[c]
\centering
\includegraphics[height=\textheight]{haskell_call_me_maybe}
\par
\end{frame}
\begin{frame}[fragile]
\begin{center}
\vspace*{-1em}
\huge\hil{Pattern matching}
\scalebox{0.5}{\input{baum.pspdftex}}
\end{center}
\small
\begin{minted}[fontsize=\footnotesize]{haskell}
data Tree = Leaf Int | Fork Tree Tree
ex = Fork
(Fork (Leaf 17) (Leaf 37))
(Fork (Fork (Leaf 42) (Leaf 0)) (Leaf 41))
inorder :: Tree -> [Int]
inorder (Leaf x) = [x]
inorder (Fork l r) = inorder l ++ inorder r
\end{minted}
% Declare custom datatypes in just one line of code and write functions using
% them with pattern matching.
\end{frame}
\begin{frame}[fragile]
\begin{center}
\vspace*{-1em}
\Huge
\hil{Typing}
\end{center}
\begin{minted}[fontsize=\small]{java}
BufferedReader in =
new BufferedReader(
new InputStreamReader(System.in)
);
\end{minted}
\begin{center}
\scalebox{3}{\Huge \frownie}
\includegraphics[scale=0.2]{EnterpriseReady_trans}
\end{center}
\end{frame}
\begin{frame}[fragile]
\begin{center}
\vspace*{-1em}\Huge\hil{Types \smiley}
\end{center}
A good type system provides:
\medskip
\begin{itemize}
\item inference: you don't have to type those types!
\item safety: no NullPointerException
\item ``algebraic data types'' and function types
\item parametricity: generics on steroids
\item higher-kinded types
\end{itemize}
Great for prototyping and refactoring!
\end{frame}
\begin{frame}[c]
\centering
\includegraphics[height=\textheight]{doge_enterprise_wow}
\par
\end{frame}
\begin{frame}%\frametitle{Units of Measure}
\begin{minipage}{0.45 \textwidth}
\includegraphics[width=0.9 \textwidth]{Mars_Climate_Orbiter_2.jpg}
\end{minipage}
\begin{minipage}{0.53 \textwidth}
Mars Climate Orbiter (1998) \\[0.4cm]
Cost of the mission: \\
\$327 million \\[0.4cm]
Failed due to a unit error \\
{\small (newton-secs vs. pound-secs)}
\end{minipage}
\begin{center}
\scalebox{3}{\Huge \frownie}
\end{center}
\end{frame}
\begin{frame}[fragile]%\frametitle{Units of Measure}
\begin{center}
\vspace*{-1em}\Large\hil{Units of Measure Types \smiley}
\end{center}
\begin{minted}{text}
[<measure>] type N = (kg * m) / sec^2
fireThrusters (x:float<N * sec>) = ...
let duration = 2<sec>
let force = 1000<N>
fireThrusters (duration * force)
let diag (x:float<'u>) (y:float<'u>)
= sqrt (x*x + y*y) // Pythagoras
\end{minted}
\end{frame}
\begin{frame}%\frametitle{Units of Measure}
\begin{center}
\includegraphics[width=11cm]{population_explosion.jpg}
\end{center}
\end{frame}
\begin{frame}
\begin{center}
\vspace*{-1em}\Huge\hil{Time-traveling debugger}
\medskip
\includegraphics[scale=0.7]{time-travel}
% http://blogs.discovermagazine.com/crux/files/2014/01/time-travel-300x202.jpg
\end{center}
\end{frame}
\begin{frame}
\begin{center}
\vspace*{-1em}\Huge\hil{Dependent types}
\medskip
\end{center}
\end{frame}
\end{document}
Brainstorm:
* "overloaded semicolon" to avoid callback hell (and mention monads) (Ingo)
* Maybe (Ingo)
* data structures & pattern matching (Ingo)
* Type system with type inference (Ingo)
* QuickCheck (Tim)
* time traveling debugger (Tim)
* Units of measure types (Tim)
* Regions from Rust
* STM
* DSLs
* meta programming
* Purity
* REPL with tab completion
* Regions von Rust?
* MonadFix?
* Listenmonade?
* Typklassen?
* List Fusion?
* LINQ
* Concurrency features
* probabilistic programming?
* dependent types?
|
module Core.CaseBuilder
import Core.CaseTree
import Core.Context
import Core.Context.Log
import Core.Core
import Core.Env
import Core.Normalise
import Core.Options
import Core.TT
import Core.Value
import Libraries.Data.LengthMatch
import Data.List
import Data.Vect
import Decidable.Equality
import Libraries.Text.PrettyPrint.Prettyprinter
%default covering
public export
data Phase = CompileTime RigCount | RunTime
Eq Phase where
CompileTime r == CompileTime r' = r == r'
RunTime == RunTime = True
_ == _ = False
data ArgType : List Name -> Type where
Known : RigCount -> (ty : Term vars) -> ArgType vars -- arg has type 'ty'
Stuck : (fty : Term vars) -> ArgType vars
-- ^ arg will have argument type of 'fty' when we know enough to
-- calculate it
Unknown : ArgType vars
-- arg's type is not yet known due to a previously stuck argument
HasNames (ArgType vars) where
full gam (Known c ty) = Known c <$> full gam ty
full gam (Stuck ty) = Stuck <$> full gam ty
full gam Unknown = pure Unknown
resolved gam (Known c ty) = Known c <$> resolved gam ty
resolved gam (Stuck ty) = Stuck <$> resolved gam ty
resolved gam Unknown = pure Unknown
{ns : _} -> Show (ArgType ns) where
show (Known c t) = "Known " ++ show c ++ " " ++ show t
show (Stuck t) = "Stuck " ++ show t
show Unknown = "Unknown"
record PatInfo (pvar : Name) (vars : List Name) where
constructor MkInfo
{idx : Nat}
{name : Name}
pat : Pat
0 loc : IsVar name idx vars
argType : ArgType vars -- Type of the argument being inspected (i.e.
-- *not* refined by this particular pattern)
{vars : _} -> Show (PatInfo n vars) where
show pi = show (pat pi) ++ " : " ++ show (argType pi)
HasNames (PatInfo n vars) where
full gam (MkInfo pat loc argType)
= do pat <- full gam pat
argType <- full gam argType
pure $ MkInfo pat loc argType
resolved gam (MkInfo pat loc argType)
= do pat <- resolved gam pat
argType <- resolved gam argType
pure $ MkInfo pat loc argType
{-
NamedPats is a list of patterns on the LHS of a clause. Each entry in
the list gives a pattern, and a proof that there is a variable we can
inspect to see if it matches the pattern.
A definition consists of a list of clauses, which are a 'NamedPats' and
a term on the RHS. There is an assumption that corresponding positions in
NamedPats always have the same 'Elem' proof, though this isn't expressed in
a type anywhere.
-}
data NamedPats : List Name -> -- pattern variables still to process
List Name -> -- the pattern variables still to process,
-- in order
Type where
Nil : NamedPats vars []
(::) : PatInfo pvar vars ->
-- ^ a pattern, where its variable appears in the vars list,
-- and its type. The type has no variable names; any names it
-- refers to are explicit
NamedPats vars ns -> NamedPats vars (pvar :: ns)
getPatInfo : NamedPats vars todo -> List Pat
getPatInfo [] = []
getPatInfo (x :: xs) = pat x :: getPatInfo xs
updatePats : {vars, todo : _} ->
{auto c : Ref Ctxt Defs} ->
Env Term vars ->
NF vars -> NamedPats vars todo -> Core (NamedPats vars todo)
updatePats env nf [] = pure []
updatePats {todo = pvar :: ns} env (NBind fc _ (Pi _ c _ farg) fsc) (p :: ps)
= case argType p of
Unknown =>
do defs <- get Ctxt
empty <- clearDefs defs
pure (record { argType = Known c !(quote empty env farg) } p
:: !(updatePats env !(fsc defs (toClosure defaultOpts env (Ref fc Bound pvar))) ps))
_ => pure (p :: ps)
updatePats env nf (p :: ps)
= case argType p of
Unknown =>
do defs <- get Ctxt
empty <- clearDefs defs
pure (record { argType = Stuck !(quote empty env nf) } p :: ps)
_ => pure (p :: ps)
substInPatInfo : {pvar, vars, todo : _} ->
{auto c : Ref Ctxt Defs} ->
FC -> Name -> Term vars -> PatInfo pvar vars ->
NamedPats vars todo ->
Core (PatInfo pvar vars, NamedPats vars todo)
substInPatInfo {pvar} {vars} fc n tm p ps
= case argType p of
Known c ty => pure (record { argType = Known c (substName n tm ty) } p, ps)
Stuck fty =>
do defs <- get Ctxt
empty <- clearDefs defs
let env = mkEnv fc vars
case !(nf defs env (substName n tm fty)) of
NBind pfc _ (Pi _ c _ farg) fsc =>
pure (record { argType = Known c !(quote empty env farg) } p,
!(updatePats env
!(fsc defs (toClosure defaultOpts env
(Ref pfc Bound pvar))) ps))
_ => pure (p, ps)
Unknown => pure (p, ps)
-- Substitute the name with a term in the pattern types, and reduce further
-- (this aims to resolve any 'Stuck' pattern types)
substInPats : {vars, todo : _} ->
{auto c : Ref Ctxt Defs} ->
FC -> Name -> Term vars -> NamedPats vars todo ->
Core (NamedPats vars todo)
substInPats fc n tm [] = pure []
substInPats fc n tm (p :: ps)
= do (p', ps') <- substInPatInfo fc n tm p ps
pure (p' :: !(substInPats fc n tm ps'))
getPat : {idx : Nat} ->
(0 el : IsVar nm idx ps) -> NamedPats ns ps -> PatInfo nm ns
getPat First (x :: xs) = x
getPat (Later p) (x :: xs) = getPat p xs
dropPat : {idx : Nat} ->
(0 el : IsVar nm idx ps) ->
NamedPats ns ps -> NamedPats ns (dropVar ps el)
dropPat First (x :: xs) = xs
dropPat (Later p) (x :: xs) = x :: dropPat p xs
HasNames (NamedPats vars todo) where
full gam [] = pure []
full gam (x::xs) = [| (::) (full gam x) (full gam xs) |]
resolved gam [] = pure []
resolved gam (x::xs) = [| (::) (resolved gam x) (resolved gam xs) |]
{vars : _} -> {todo : _} -> Show (NamedPats vars todo) where
show xs = "[" ++ showAll xs ++ "]"
where
showAll : {vs, ts : _} -> NamedPats vs ts -> String
showAll [] = ""
showAll {ts = t :: _ } [x]
= show t ++ " " ++ show (pat x) ++ " [" ++ show (argType x) ++ "]"
showAll {ts = t :: _ } (x :: xs)
= show t ++ " " ++ show (pat x) ++ " [" ++ show (argType x) ++ "]"
++ ", " ++ showAll xs
{vars : _} -> {todo : _} -> Pretty (NamedPats vars todo) where
pretty xs = hsep $ prettyAll xs
where
prettyAll : {vs, ts : _} -> NamedPats vs ts -> List (Doc ann)
prettyAll [] = []
prettyAll {ts = t :: _ } (x :: xs)
= parens (pretty t <++> pretty "=" <++> pretty (pat x))
:: prettyAll xs
Weaken ArgType where
weaken (Known c ty) = Known c (weaken ty)
weaken (Stuck fty) = Stuck (weaken fty)
weaken Unknown = Unknown
weakenNs s (Known c ty) = Known c (weakenNs s ty)
weakenNs s (Stuck fty) = Stuck (weakenNs s fty)
weakenNs s Unknown = Unknown
Weaken (PatInfo p) where
weaken (MkInfo p el fty) = MkInfo p (Later el) (weaken fty)
-- FIXME: perhaps 'vars' should be second argument so we can use Weaken interface
weaken : {x, vars : _} ->
NamedPats vars todo -> NamedPats (x :: vars) todo
weaken [] = []
weaken (p :: ps) = weaken p :: weaken ps
weakenNs : SizeOf ns ->
NamedPats vars todo ->
NamedPats (ns ++ vars) todo
weakenNs ns [] = []
weakenNs ns (p :: ps)
= weakenNs ns p :: weakenNs ns ps
(++) : NamedPats vars ms -> NamedPats vars ns -> NamedPats vars (ms ++ ns)
(++) [] ys = ys
(++) (x :: xs) ys = x :: xs ++ ys
tail : NamedPats vars (p :: ps) -> NamedPats vars ps
tail (x :: xs) = xs
take : (as : List Name) -> NamedPats vars (as ++ bs) -> NamedPats vars as
take [] ps = []
take (x :: xs) (p :: ps) = p :: take xs ps
data PatClause : (vars : List Name) -> (todo : List Name) -> Type where
MkPatClause : List Name -> -- names matched so far (from original lhs)
NamedPats vars todo ->
Int -> (rhs : Term vars) -> PatClause vars todo
getNPs : PatClause vars todo -> NamedPats vars todo
getNPs (MkPatClause _ lhs pid rhs) = lhs
{vars : _} -> {todo : _} -> Show (PatClause vars todo) where
show (MkPatClause _ ps pid rhs)
= show ps ++ " => " ++ show rhs
{vars : _} -> {todo : _} -> Pretty (PatClause vars todo) where
pretty (MkPatClause _ ps _ rhs)
= pretty ps <++> "=>" <++> pretty rhs
HasNames (PatClause vars todo) where
full gam (MkPatClause ns nps i rhs)
= [| MkPatClause (traverse (full gam) ns) (full gam nps) (pure i) (full gam rhs) |]
resolved gam (MkPatClause ns nps i rhs)
= [| MkPatClause (traverse (resolved gam) ns) (resolved gam nps) (pure i) (resolved gam rhs) |]
substInClause : {a, vars, todo : _} ->
{auto c : Ref Ctxt Defs} ->
FC -> PatClause vars (a :: todo) ->
Core (PatClause vars (a :: todo))
substInClause {vars} {a} fc (MkPatClause pvars (MkInfo pat pprf fty :: pats) pid rhs)
= do pats' <- substInPats fc a (mkTerm vars pat) pats
pure (MkPatClause pvars (MkInfo pat pprf fty :: pats') pid rhs)
data Partitions : List (PatClause vars todo) -> Type where
ConClauses : {todo, vars, ps : _} ->
(cs : List (PatClause vars todo)) ->
Partitions ps -> Partitions (cs ++ ps)
VarClauses : {todo, vars, ps : _} ->
(vs : List (PatClause vars todo)) ->
Partitions ps -> Partitions (vs ++ ps)
NoClauses : Partitions []
{ps : _} -> Show (Partitions ps) where
show (ConClauses cs rest)
= unlines ("CON" :: map ((" " ++) . show) cs)
++ "\n, " ++ show rest
show (VarClauses vs rest)
= unlines ("VAR" :: map ((" " ++) . show) vs)
++ "\n, " ++ show rest
show NoClauses = "NONE"
data ClauseType = ConClause | VarClause
namesIn : List Name -> Pat -> Bool
namesIn pvars (PAs _ n p) = (n `elem` pvars) && namesIn pvars p
namesIn pvars (PCon _ _ _ _ ps) = all (namesIn pvars) ps
namesIn pvars (PTyCon _ _ _ ps) = all (namesIn pvars) ps
namesIn pvars (PArrow _ _ s t) = namesIn pvars s && namesIn pvars t
namesIn pvars (PDelay _ _ t p) = namesIn pvars t && namesIn pvars p
namesIn pvars (PLoc _ n) = n `elem` pvars
namesIn pvars _ = True
namesFrom : Pat -> List Name
namesFrom (PAs _ n p) = n :: namesFrom p
namesFrom (PCon _ _ _ _ ps) = concatMap namesFrom ps
namesFrom (PTyCon _ _ _ ps) = concatMap namesFrom ps
namesFrom (PArrow _ _ s t) = namesFrom s ++ namesFrom t
namesFrom (PDelay _ _ t p) = namesFrom t ++ namesFrom p
namesFrom (PLoc _ n) = [n]
namesFrom _ = []
clauseType : Phase -> PatClause vars (a :: as) -> ClauseType
-- If it's irrelevant, a constructor, and there's no names we haven't seen yet
-- and don't see later, treat it as a variable
-- Or, if we're compiling for runtime we won't be able to split on it, so
-- also treat it as a variable
-- Or, if it's an under-applied constructor then do NOT attempt to split on it!
clauseType phase (MkPatClause pvars (MkInfo arg _ ty :: rest) pid rhs)
= getClauseType phase arg ty
where
-- used when we are tempted to split on a constructor: is
-- this actually a fully applied one?
splitCon : Nat -> List Pat -> ClauseType
splitCon arity xs
= if arity == length xs then ConClause else VarClause
-- used to get the remaining clause types
clauseType' : Pat -> ClauseType
clauseType' (PCon _ _ _ a xs) = splitCon a xs
clauseType' (PTyCon _ _ a xs) = splitCon a xs
clauseType' (PConst _ x) = ConClause
clauseType' (PArrow _ _ s t) = ConClause
clauseType' (PDelay _ _ _ _) = ConClause
clauseType' _ = VarClause
getClauseType : Phase -> Pat -> ArgType vars -> ClauseType
getClauseType (CompileTime cr) (PCon _ _ _ a xs) (Known r t)
= if (isErased r && not (isErased cr) &&
all (namesIn (pvars ++ concatMap namesFrom (getPatInfo rest))) xs)
then VarClause
else splitCon a xs
getClauseType phase (PAs _ _ p) t = getClauseType phase p t
getClauseType phase l (Known r t) = if isErased r
then VarClause
else clauseType' l
getClauseType phase l _ = clauseType' l
partition : {a, as, vars : _} ->
Phase -> (ps : List (PatClause vars (a :: as))) -> Partitions ps
partition phase [] = NoClauses
partition phase (x :: xs) with (partition phase xs)
partition phase (x :: (cs ++ ps)) | (ConClauses cs rest)
= case clauseType phase x of
ConClause => ConClauses (x :: cs) rest
VarClause => VarClauses [x] (ConClauses cs rest)
partition phase (x :: (vs ++ ps)) | (VarClauses vs rest)
= case clauseType phase x of
ConClause => ConClauses [x] (VarClauses vs rest)
VarClause => VarClauses (x :: vs) rest
partition phase (x :: []) | NoClauses
= case clauseType phase x of
ConClause => ConClauses [x] NoClauses
VarClause => VarClauses [x] NoClauses
data ConType : Type where
CName : Name -> (tag : Int) -> ConType
CDelay : ConType
CConst : Constant -> ConType
conTypeEq : (x, y : ConType) -> Maybe (x = y)
conTypeEq (CName x tag) (CName x' tag')
= do Refl <- nameEq x x'
case decEq tag tag' of
Yes Refl => Just Refl
No contra => Nothing
conTypeEq CDelay CDelay = Just Refl
conTypeEq (CConst x) (CConst y) = cong CConst <$> constantEq x y
conTypeEq _ _ = Nothing
data Group : List Name -> -- variables in scope
List Name -> -- pattern variables still to process
Type where
ConGroup : {newargs : _} ->
Name -> (tag : Int) ->
List (PatClause (newargs ++ vars) (newargs ++ todo)) ->
Group vars todo
DelayGroup : {tyarg, valarg : _} ->
List (PatClause (tyarg :: valarg :: vars)
(tyarg :: valarg :: todo)) ->
Group vars todo
ConstGroup : Constant -> List (PatClause vars todo) ->
Group vars todo
{vars : _} -> {todo : _} -> Show (Group vars todo) where
show (ConGroup c t cs) = "Con " ++ show c ++ ": " ++ show cs
show (DelayGroup cs) = "Delay: " ++ show cs
show (ConstGroup c cs) = "Const " ++ show c ++ ": " ++ show cs
data GroupMatch : ConType -> List Pat -> Group vars todo -> Type where
ConMatch : LengthMatch ps newargs ->
GroupMatch (CName n tag) ps
(ConGroup {newargs} n tag (MkPatClause pvs pats pid rhs :: rest))
DelayMatch : GroupMatch CDelay []
(DelayGroup {tyarg} {valarg} (MkPatClause pvs pats pid rhs :: rest))
ConstMatch : GroupMatch (CConst c) []
(ConstGroup c (MkPatClause pvs pats pid rhs :: rest))
NoMatch : GroupMatch ct ps g
checkGroupMatch : (c : ConType) -> (ps : List Pat) -> (g : Group vars todo) ->
GroupMatch c ps g
checkGroupMatch (CName x tag) ps (ConGroup {newargs} x' tag' (MkPatClause pvs pats pid rhs :: rest))
= case checkLengthMatch ps newargs of
Nothing => NoMatch
Just prf => case (nameEq x x', decEq tag tag') of
(Just Refl, Yes Refl) => ConMatch prf
_ => NoMatch
checkGroupMatch (CName x tag) ps _ = NoMatch
checkGroupMatch CDelay [] (DelayGroup (MkPatClause pvs pats pid rhs :: rest))
= DelayMatch
checkGroupMatch (CConst c) [] (ConstGroup c' (MkPatClause pvs pats pid rhs :: rest))
= case constantEq c c' of
Nothing => NoMatch
Just Refl => ConstMatch
checkGroupMatch _ _ _ = NoMatch
data PName : Type where
nextName : {auto i : Ref PName Int} ->
String -> Core Name
nextName root
= do x <- get PName
put PName (x + 1)
pure (MN root x)
nextNames : {vars : _} ->
{auto i : Ref PName Int} ->
{auto c : Ref Ctxt Defs} ->
FC -> String -> List Pat -> Maybe (NF vars) ->
Core (args ** (SizeOf args, NamedPats (args ++ vars) args))
nextNames fc root [] fty = pure ([] ** (zero, []))
nextNames {vars} fc root (p :: pats) fty
= do defs <- get Ctxt
empty <- clearDefs defs
n <- nextName root
let env = mkEnv fc vars
fa_tys <- the (Core (Maybe (NF vars), ArgType vars)) $
case fty of
Nothing => pure (Nothing, Unknown)
Just (NBind pfc _ (Pi _ c _ fargc) fsc) =>
do farg <- evalClosure defs fargc
case farg of
NErased _ _ =>
pure (Just !(fsc defs (toClosure defaultOpts env (Ref pfc Bound n))),
Unknown)
_ => pure (Just !(fsc defs (toClosure defaultOpts env (Ref pfc Bound n))),
Known c !(quote empty env farg))
Just t =>
pure (Nothing, Stuck !(quote empty env t))
(args ** (l, ps)) <- nextNames {vars} fc root pats (fst fa_tys)
let argTy = case snd fa_tys of
Unknown => Unknown
Known rig t => Known rig (weakenNs (suc l) t)
Stuck t => Stuck (weakenNs (suc l) t)
pure (n :: args ** (suc l, MkInfo p First argTy :: weaken ps))
-- replace the prefix of patterns with 'pargs'
newPats : (pargs : List Pat) -> LengthMatch pargs ns ->
NamedPats vars (ns ++ todo) ->
NamedPats vars ns
newPats [] NilMatch rest = []
newPats (newpat :: xs) (ConsMatch w) (pi :: rest)
= record { pat = newpat} pi :: newPats xs w rest
updateNames : List (Name, Pat) -> List (Name, Name)
updateNames = mapMaybe update
where
update : (Name, Pat) -> Maybe (Name, Name)
update (n, PLoc fc p) = Just (p, n)
update _ = Nothing
updatePatNames : List (Name, Name) -> NamedPats vars todo -> NamedPats vars todo
updatePatNames _ [] = []
updatePatNames ns (pi :: ps)
= record { pat $= update } pi :: updatePatNames ns ps
where
update : Pat -> Pat
update (PAs fc n p)
= case lookup n ns of
Nothing => PAs fc n (update p)
Just n' => PAs fc n' (update p)
update (PCon fc n i a ps) = PCon fc n i a (map update ps)
update (PTyCon fc n a ps) = PTyCon fc n a (map update ps)
update (PArrow fc x s t) = PArrow fc x (update s) (update t)
update (PDelay fc r t p) = PDelay fc r (update t) (update p)
update (PLoc fc n)
= case lookup n ns of
Nothing => PLoc fc n
Just n' => PLoc fc n'
update p = p
groupCons : {a, vars, todo : _} ->
{auto i : Ref PName Int} ->
{auto ct : Ref Ctxt Defs} ->
FC -> Name ->
List Name ->
List (PatClause vars (a :: todo)) ->
Core (List (Group vars todo))
groupCons fc fn pvars cs
= gc [] cs
where
addConG : {vars', todo' : _} ->
Name -> (tag : Int) ->
List Pat -> NamedPats vars' todo' ->
Int -> (rhs : Term vars') ->
(acc : List (Group vars' todo')) ->
Core (List (Group vars' todo'))
-- Group all the clauses that begin with the same constructor, and
-- add new pattern arguments for each of that constructor's arguments.
-- The type of 'ConGroup' ensures that we refer to the arguments by
-- the same name in each of the clauses
addConG {vars'} {todo'} n tag pargs pats pid rhs []
= do cty <- if n == UN (Basic "->")
then pure $ NBind fc (MN "_" 0) (Pi fc top Explicit (MkNFClosure defaultOpts (mkEnv fc vars') (NType fc))) $
(\d, a => pure $ NBind fc (MN "_" 1) (Pi fc top Explicit (MkNFClosure defaultOpts (mkEnv fc vars') (NErased fc False)))
(\d, a => pure $ NType fc))
else do defs <- get Ctxt
Just t <- lookupTyExact n (gamma defs)
| Nothing => pure (NErased fc False)
nf defs (mkEnv fc vars') (embed t)
(patnames ** (l, newargs)) <- nextNames {vars=vars'} fc "e" pargs (Just cty)
-- Update non-linear names in remaining patterns (to keep
-- explicit dependencies in types accurate)
let pats' = updatePatNames (updateNames (zip patnames pargs))
(weakenNs l pats)
let clause = MkPatClause {todo = patnames ++ todo'}
pvars
(newargs ++ pats')
pid (weakenNs l rhs)
pure [ConGroup n tag [clause]]
addConG {vars'} {todo'} n tag pargs pats pid rhs (g :: gs) with (checkGroupMatch (CName n tag) pargs g)
addConG {vars'} {todo'} n tag pargs pats pid rhs
((ConGroup {newargs} n tag ((MkPatClause pvars ps tid tm) :: rest)) :: gs)
| (ConMatch {newargs} lprf)
= do let newps = newPats pargs lprf ps
let l = mkSizeOf newargs
let pats' = updatePatNames (updateNames (zip newargs pargs))
(weakenNs l pats)
let newclause : PatClause (newargs ++ vars') (newargs ++ todo')
= MkPatClause pvars
(newps ++ pats')
pid
(weakenNs l rhs)
-- put the new clause at the end of the group, since we
-- match the clauses top to bottom.
pure ((ConGroup n tag (MkPatClause pvars ps tid tm :: rest ++ [newclause]))
:: gs)
addConG n tag pargs pats pid rhs (g :: gs) | NoMatch
= do gs' <- addConG n tag pargs pats pid rhs gs
pure (g :: gs')
-- This rather ugly special case is to deal with laziness, where Delay
-- is like a constructor, but with a special meaning that it forces
-- evaluation when case analysis reaches it (dealt with in the evaluator
-- and compiler)
addDelayG : {vars', todo' : _} ->
Pat -> Pat -> NamedPats vars' todo' ->
Int -> (rhs : Term vars') ->
(acc : List (Group vars' todo')) ->
Core (List (Group vars' todo'))
addDelayG {vars'} {todo'} pty parg pats pid rhs []
= do let dty = NBind fc (MN "a" 0) (Pi fc erased Explicit (MkNFClosure defaultOpts (mkEnv fc vars') (NType fc))) $
(\d, a =>
do a' <- evalClosure d a
pure (NBind fc (MN "x" 0) (Pi fc top Explicit a)
(\dv, av => pure (NDelayed fc LUnknown a'))))
([tyname, argname] ** (l, newargs)) <- nextNames {vars=vars'} fc "e" [pty, parg]
(Just dty)
| _ => throw (InternalError "Error compiling Delay pattern match")
let pats' = updatePatNames (updateNames [(tyname, pty),
(argname, parg)])
(weakenNs l pats)
let clause = MkPatClause {todo = tyname :: argname :: todo'}
pvars (newargs ++ pats')
pid (weakenNs l rhs)
pure [DelayGroup [clause]]
addDelayG {vars'} {todo'} pty parg pats pid rhs (g :: gs) with (checkGroupMatch CDelay [] g)
addDelayG {vars'} {todo'} pty parg pats pid rhs
((DelayGroup {tyarg} {valarg} ((MkPatClause pvars ps tid tm) :: rest)) :: gs)
| (DelayMatch {tyarg} {valarg})
= do let l = mkSizeOf [tyarg, valarg]
let newps = newPats [pty, parg] (ConsMatch (ConsMatch NilMatch)) ps
let pats' = updatePatNames (updateNames [(tyarg, pty),
(valarg, parg)])
(weakenNs l pats)
let newclause : PatClause (tyarg :: valarg :: vars')
(tyarg :: valarg :: todo')
= MkPatClause pvars (newps ++ pats') pid
(weakenNs l rhs)
pure ((DelayGroup (MkPatClause pvars ps tid tm :: rest ++ [newclause]))
:: gs)
addDelayG pty parg pats pid rhs (g :: gs) | NoMatch
= do gs' <- addDelayG pty parg pats pid rhs gs
pure (g :: gs')
addConstG : {vars', todo' : _} ->
Constant -> NamedPats vars' todo' ->
Int -> (rhs : Term vars') ->
(acc : List (Group vars' todo')) ->
Core (List (Group vars' todo'))
addConstG c pats pid rhs []
= pure [ConstGroup c [MkPatClause pvars pats pid rhs]]
addConstG {todo'} {vars'} c pats pid rhs (g :: gs) with (checkGroupMatch (CConst c) [] g)
addConstG {todo'} {vars'} c pats pid rhs
((ConstGroup c ((MkPatClause pvars ps tid tm) :: rest)) :: gs) | ConstMatch
= let newclause : PatClause vars' todo'
= MkPatClause pvars pats pid rhs in
pure ((ConstGroup c
(MkPatClause pvars ps tid tm :: rest ++ [newclause])) :: gs)
addConstG c pats pid rhs (g :: gs) | NoMatch
= do gs' <- addConstG c pats pid rhs gs
pure (g :: gs')
addGroup : {vars, todo, idx : _} ->
Pat -> (0 p : IsVar nm idx vars) ->
NamedPats vars todo -> Int -> Term vars ->
List (Group vars todo) ->
Core (List (Group vars todo))
-- In 'As' replace the name on the RHS with a reference to the
-- variable we're doing the case split on
addGroup (PAs fc n p) pprf pats pid rhs acc
= addGroup p pprf pats pid (substName n (Local fc (Just True) _ pprf) rhs) acc
addGroup (PCon cfc n t a pargs) pprf pats pid rhs acc
= if a == length pargs
then addConG n t pargs pats pid rhs acc
else throw (CaseCompile cfc fn (NotFullyApplied n))
addGroup (PTyCon cfc n a pargs) pprf pats pid rhs acc
= if a == length pargs
then addConG n 0 pargs pats pid rhs acc
else throw (CaseCompile cfc fn (NotFullyApplied n))
addGroup (PArrow _ _ s t) pprf pats pid rhs acc
= addConG (UN $ Basic "->") 0 [s, t] pats pid rhs acc
-- Go inside the delay; we'll flag the case as needing to force its
-- scrutinee (need to check in 'caseGroups below)
addGroup (PDelay _ _ pty parg) pprf pats pid rhs acc
= addDelayG pty parg pats pid rhs acc
addGroup (PConst _ c) pprf pats pid rhs acc
= addConstG c pats pid rhs acc
addGroup _ pprf pats pid rhs acc = pure acc -- Can't happen, not a constructor
-- -- FIXME: Is this possible to rule out with a type? Probably.
gc : {a, vars, todo : _} ->
List (Group vars todo) ->
List (PatClause vars (a :: todo)) ->
Core (List (Group vars todo))
gc acc [] = pure acc
gc {a} acc ((MkPatClause pvars (MkInfo pat pprf fty :: pats) pid rhs) :: cs)
= do acc' <- addGroup pat pprf pats pid rhs acc
gc acc' cs
getFirstPat : NamedPats ns (p :: ps) -> Pat
getFirstPat (p :: _) = pat p
getFirstArgType : NamedPats ns (p :: ps) -> ArgType ns
getFirstArgType (p :: _) = argType p
||| Store scores alongside rows of named patterns. These scores are used to determine
||| which column of patterns to switch on first. One score per column.
data ScoredPats : List Name -> List Name -> Type where
Scored : List (NamedPats ns (p :: ps)) -> Vect (length (p :: ps)) Int -> ScoredPats ns (p :: ps)
{ps : _} -> Show (ScoredPats ns ps) where
show (Scored xs ys) = (show ps) ++ "//" ++ (show ys)
zeroedScore : {ps : _} -> List (NamedPats ns (p :: ps)) -> ScoredPats ns (p :: ps)
zeroedScore nps = Scored nps (replicate (S $ length ps) 0)
||| Proof that a value `v` inserted in the middle of a list with
||| prefix `ps` and suffix `qs` can equivalently be snoced with
||| `ps` or consed with `qs` before appending `qs` to `ps`.
elemInsertedMiddle : (v : a) -> (ps,qs : List a) -> (ps ++ (v :: qs)) = ((ps `snoc` v) ++ qs)
elemInsertedMiddle v [] qs = Refl
elemInsertedMiddle v (x :: xs) qs = rewrite elemInsertedMiddle v xs qs in Refl
||| Helper to find a single highest scoring name (or none at all) while
||| retaining the context of all names processed.
highScore : {prev : List Name} ->
(names : List Name) ->
(scores : Vect (length names) Int) ->
(highVal : Int) ->
(highIdx : (n ** NVar n (prev ++ names))) ->
(duped : Bool) ->
Maybe (n ** NVar n (prev ++ names))
highScore [] [] high idx True = Nothing
highScore [] [] high idx False = Just idx
highScore (x :: xs) (y :: ys) high idx duped =
let next = highScore {prev = prev `snoc` x} xs ys
prf = elemInsertedMiddle x prev xs
in rewrite prf in
case compare y high of
LT => next high (rewrite sym $ prf in idx) duped
EQ => next high (rewrite sym $ prf in idx) True
GT => next y (x ** rewrite sym $ prf in weakenNVar (mkSizeOf prev) (MkNVar First)) False
||| Get the index of the highest scoring column if there is one.
||| If no column has a higher score than all other columns then
||| the result is Nothing indicating we need to apply more scoring
||| to break the tie.
||| Suggested heuristic application order: f, b, a.
highScoreIdx : {p : _} -> {ps : _} -> ScoredPats ns (p :: ps) -> Maybe (n ** NVar n (p :: ps))
highScoreIdx (Scored xs (y :: ys)) = highScore {prev = []} (p :: ps) (y :: ys) (y - 1) (p ** MkNVar First) False
||| Apply the penalty function to the head constructor's
||| arity. Produces 0 for all non-head-constructors.
headConsPenalty : (penality : Nat -> Int) -> Pat -> Int
headConsPenalty p (PAs _ _ w) = headConsPenalty p w
headConsPenalty p (PCon _ n _ arity pats) = p arity
headConsPenalty p (PTyCon _ _ arity _) = p arity
headConsPenalty _ (PConst _ _) = 0
headConsPenalty _ (PArrow _ _ _ _) = 0
headConsPenalty p (PDelay _ _ _ w) = headConsPenalty p w
headConsPenalty _ (PLoc _ _) = 0
headConsPenalty _ (PUnmatchable _ _) = 0
||| Apply the given function that scores a pattern to all patterns and then
||| sum up the column scores and add to the ScoredPats passed in.
consScoreHeuristic : {ps : _} -> (scorePat : Pat -> Int) -> ScoredPats ns ps -> ScoredPats ns ps
consScoreHeuristic _ sps@(Scored [] _) = sps -- can't update scores without any patterns
consScoreHeuristic scorePat (Scored xs ys) =
let columnScores = sum <$> scoreColumns xs
ys' = zipWith (+) ys columnScores
in Scored xs ys'
where
-- also returns NamePats of remaining columns while its in there
-- scoring the first column.
scoreFirstColumn : (nps : List (NamedPats ns (p' :: ps'))) -> (res : List (NamedPats ns ps') ** (LengthMatch nps res, Vect (length nps) Int))
scoreFirstColumn [] = ([] ** (NilMatch, []))
scoreFirstColumn ((w :: ws) :: nps) =
let (ws' ** (prf, scores)) = scoreFirstColumn nps
in (ws :: ws' ** (ConsMatch prf, scorePat (pat w) :: scores))
scoreColumns : {ps' : _} -> (nps : List (NamedPats ns ps')) -> Vect (length ps') (Vect (length nps) Int)
scoreColumns {ps' = []} nps = []
scoreColumns {ps' = (w :: ws)} nps =
let (rest ** (prf, firstColScore)) = scoreFirstColumn nps
in firstColScore :: (rewrite lengthsMatch prf in scoreColumns rest)
||| Add 1 to each non-default pat in the first row.
||| This favors constructive matching first and reduces tree depth on average.
heuristicF : {ps : _} -> ScoredPats ns (p :: ps) -> ScoredPats ns (p :: ps)
heuristicF sps@(Scored [] _) = sps
heuristicF (Scored (x :: xs) ys) =
let columnScores = scores x
ys' = zipWith (+) ys columnScores
in Scored (x :: xs) ys'
where
isBlank : Pat -> Bool
isBlank (PLoc _ _) = True
isBlank _ = False
scores : NamedPats ns' ps' -> Vect (length ps') Int
scores [] = []
scores (y :: ys) = let score : Int = if isBlank (pat y) then 0 else 1
in score :: scores ys
||| Subtract 1 from each column for each pat that represents a head constructor.
||| This favors pats that produce less branching.
heuristicB : {ps : _} -> ScoredPats ns ps -> ScoredPats ns ps
heuristicB = consScoreHeuristic (headConsPenalty (\arity => if arity == 0 then 0 else -1))
||| Subtract the sum of the arities of constructors in each column.
heuristicA : {ps : _} -> ScoredPats ns ps -> ScoredPats ns ps
heuristicA = consScoreHeuristic (headConsPenalty (negate . cast))
applyHeuristics : {p : _} ->
{ps : _} ->
ScoredPats ns (p :: ps) ->
List (ScoredPats ns (p :: ps) -> ScoredPats ns (p :: ps)) ->
Maybe (n ** NVar n (p :: ps))
applyHeuristics x [] = highScoreIdx x
applyHeuristics x (f :: fs) = highScoreIdx x <|> applyHeuristics (f x) fs
||| Based only on the heuristic-score of columns, get the index of
||| the column that should be processed next.
|||
||| The scoring is inspired by results from the paper:
||| http://moscova.inria.fr/~maranget/papers/ml05e-maranget.pdf
nextIdxByScore : {p : _} ->
{ps : _} ->
(useHeuristics : Bool) ->
Phase ->
List (NamedPats ns (p :: ps)) ->
(n ** NVar n (p :: ps))
nextIdxByScore False _ _ = (_ ** (MkNVar First))
nextIdxByScore _ (CompileTime _) _ = (_ ** (MkNVar First))
nextIdxByScore True RunTime xs =
fromMaybe (_ ** (MkNVar First)) $
applyHeuristics (zeroedScore xs) [heuristicF, heuristicB, heuristicA]
-- Check whether all the initial patterns have the same concrete, known
-- and matchable type, which is multiplicity > 0.
-- If so, it's okay to match on it
sameType : {ns : _} ->
{auto i : Ref PName Int} ->
{auto c : Ref Ctxt Defs} ->
FC -> Phase -> Name ->
Env Term ns -> List (NamedPats ns (p :: ps)) ->
Core ()
sameType fc phase fn env [] = pure ()
sameType {ns} fc phase fn env (p :: xs)
= do defs <- get Ctxt
case getFirstArgType p of
Known _ t => sameTypeAs phase
!(nf defs env t)
(map getFirstArgType xs)
ty => throw (CaseCompile fc fn DifferingTypes)
where
firstPat : NamedPats ns (np :: nps) -> Pat
firstPat (pinf :: _) = pat pinf
headEq : NF ns -> NF ns -> Phase -> Bool
headEq (NBind _ _ (Pi _ _ _ _) _) (NBind _ _ (Pi _ _ _ _) _) _ = True
headEq (NTCon _ n _ _ _) (NTCon _ n' _ _ _) _ = n == n'
headEq (NPrimVal _ c) (NPrimVal _ c') _ = c == c'
headEq (NType _) (NType _) _ = True
headEq (NApp _ (NRef _ n) _) (NApp _ (NRef _ n') _) RunTime = n == n'
headEq (NErased _ _) _ RunTime = True
headEq _ (NErased _ _) RunTime = True
headEq _ _ _ = False
sameTypeAs : Phase -> NF ns -> List (ArgType ns) -> Core ()
sameTypeAs _ ty [] = pure ()
sameTypeAs ph ty (Known r t :: xs) =
do defs <- get Ctxt
if headEq ty !(nf defs env t) phase
then sameTypeAs ph ty xs
else throw (CaseCompile fc fn DifferingTypes)
sameTypeAs p ty _ = throw (CaseCompile fc fn DifferingTypes)
-- Check whether all the initial patterns are the same, or are all a variable.
-- If so, we'll match it to refine later types and move on
samePat : List (NamedPats ns (p :: ps)) -> Bool
samePat [] = True
samePat (pi :: xs)
= samePatAs (dropAs (getFirstPat pi))
(map (dropAs . getFirstPat) xs)
where
dropAs : Pat -> Pat
dropAs (PAs _ _ p) = p
dropAs p = p
samePatAs : Pat -> List Pat -> Bool
samePatAs p [] = True
samePatAs (PTyCon fc n a args) (PTyCon _ n' _ _ :: ps)
= n == n' && samePatAs (PTyCon fc n a args) ps
samePatAs (PCon fc n t a args) (PCon _ n' t' _ _ :: ps)
= n == n' && t == t' && samePatAs (PCon fc n t a args) ps
samePatAs (PConst fc c) (PConst _ c' :: ps)
= c == c' && samePatAs (PConst fc c) ps
samePatAs (PArrow fc x s t) (PArrow _ _ s' t' :: ps)
= samePatAs (PArrow fc x s t) ps
samePatAs (PDelay fc r t p) (PDelay _ _ _ _ :: ps)
= samePatAs (PDelay fc r t p) ps
samePatAs (PLoc fc n) (PLoc _ _ :: ps) = samePatAs (PLoc fc n) ps
samePatAs x y = False
getFirstCon : NamedPats ns (p :: ps) -> Pat
getFirstCon (p :: _) = pat p
-- Count the number of distinct constructors in the initial pattern
countDiff : List (NamedPats ns (p :: ps)) -> Nat
countDiff xs = length (distinct [] (map getFirstCon xs))
where
isVar : Pat -> Bool
isVar (PAs _ _ p) = isVar p
isVar (PCon _ _ _ _ _) = False
isVar (PTyCon _ _ _ _) = False
isVar (PConst _ _) = False
isVar (PArrow _ _ _ _) = False
isVar (PDelay _ _ _ p) = False
isVar _ = True
-- Return whether two patterns would lead to the same match
sameCase : Pat -> Pat -> Bool
sameCase (PAs _ _ p) p' = sameCase p p'
sameCase p (PAs _ _ p') = sameCase p p'
sameCase (PCon _ _ t _ _) (PCon _ _ t' _ _) = t == t'
sameCase (PTyCon _ t _ _) (PTyCon _ t' _ _) = t == t'
sameCase (PConst _ c) (PConst _ c') = c == c'
sameCase (PArrow _ _ _ _) (PArrow _ _ _ _) = True
sameCase (PDelay _ _ _ _) (PDelay _ _ _ _) = True
sameCase x y = isVar x && isVar y
distinct : List Pat -> List Pat -> List Pat
distinct acc [] = acc
distinct acc (p :: ps)
= if elemBy sameCase p acc
then distinct acc ps
else distinct (p :: acc) ps
getScore : {ns : _} ->
{auto i : Ref PName Int} ->
{auto c : Ref Ctxt Defs} ->
FC -> Phase -> Name ->
List (NamedPats ns (p :: ps)) ->
Core (Either CaseError ())
getScore fc phase name npss
= do catch (do sameType fc phase name (mkEnv fc ns) npss
pure (Right ()))
$ \case
CaseCompile _ _ err => pure $ Left err
err => throw err
||| Pick the leftmost matchable thing with all constructors in the
||| same family, or all variables, or all the same type constructor.
pickNextViable : {p, ns, ps : _} ->
{auto i : Ref PName Int} ->
{auto c : Ref Ctxt Defs} ->
FC -> Phase -> Name -> List (NamedPats ns (p :: ps)) ->
Core (n ** NVar n (p :: ps))
-- last possible variable
pickNextViable {ps = []} fc phase fn npss
= if samePat npss
then pure (_ ** MkNVar First)
else do Right () <- getScore fc phase fn npss
| Left err => throw (CaseCompile fc fn err)
pure (_ ** MkNVar First)
pickNextViable {ps = q :: qs} fc phase fn npss
= if samePat npss
then pure (_ ** MkNVar First)
else case !(getScore fc phase fn npss) of
Right () => pure (_ ** MkNVar First)
_ => do (_ ** MkNVar var) <- pickNextViable fc phase fn (map tail npss)
pure (_ ** MkNVar (Later var))
moveFirst : {idx : Nat} -> (0 el : IsVar nm idx ps) -> NamedPats ns ps ->
NamedPats ns (nm :: dropVar ps el)
moveFirst el nps = getPat el nps :: dropPat el nps
shuffleVars : {idx : Nat} -> (0 el : IsVar nm idx todo) -> PatClause vars todo ->
PatClause vars (nm :: dropVar todo el)
shuffleVars First orig@(MkPatClause pvars lhs pid rhs) = orig -- no-op
shuffleVars el (MkPatClause pvars lhs pid rhs)
= MkPatClause pvars (moveFirst el lhs) pid rhs
mutual
{- 'PatClause' contains a list of patterns still to process (that's the
"todo") and a right hand side with the variables we know about "vars".
So "match" builds the remainder of the case tree for
the unprocessed patterns. "err" is the tree for when the patterns don't
cover the input (i.e. the "fallthrough" pattern, which at the top
level will be an error). -}
match : {vars, todo : _} ->
{auto i : Ref PName Int} ->
{auto c : Ref Ctxt Defs} ->
FC -> Name -> Phase ->
List (PatClause vars todo) -> (err : Maybe (CaseTree vars)) ->
Core (CaseTree vars)
-- Before 'partition', reorder the arguments so that the one we
-- inspect next has a concrete type that is the same in all cases, and
-- has the most distinct constructors (via pickNextViable)
match {todo = (_ :: _)} fc fn phase clauses err
= do let nps = getNPs <$> clauses
let (_ ** (MkNVar next)) = nextIdxByScore (caseTreeHeuristics !getSession) phase nps
let prioritizedClauses = shuffleVars next <$> clauses
(n ** MkNVar next') <- pickNextViable fc phase fn (getNPs <$> prioritizedClauses)
log "compile.casetree.pick" 25 $ "Picked " ++ show n ++ " as the next split"
let clauses' = shuffleVars next' <$> prioritizedClauses
log "compile.casetree.clauses" 25 $
unlines ("Using clauses:" :: map ((" " ++) . show) clauses')
let ps = partition phase clauses'
log "compile.casetree.partition" 25 $ "Got Partition:\n" ++ show ps
mix <- mixture fc fn phase ps err
case mix of
Nothing =>
do log "compile.casetree.intermediate" 25 "match: No clauses"
pure (Unmatched "No clauses")
Just m =>
do log "compile.casetree.intermediate" 25 $ "match: new case tree " ++ show m
Core.pure m
match {todo = []} fc fn phase [] err
= maybe (pure (Unmatched "No patterns"))
pure err
match {todo = []} fc fn phase ((MkPatClause pvars [] pid (Erased _ True)) :: _) err
= pure Impossible
match {todo = []} fc fn phase ((MkPatClause pvars [] pid rhs) :: _) err
= pure $ STerm pid rhs
caseGroups : {pvar, vars, todo : _} ->
{auto i : Ref PName Int} ->
{auto c : Ref Ctxt Defs} ->
FC -> Name -> Phase ->
{idx : Nat} -> (0 p : IsVar pvar idx vars) -> Term vars ->
List (Group vars todo) -> Maybe (CaseTree vars) ->
Core (CaseTree vars)
caseGroups {vars} fc fn phase el ty gs errorCase
= do g <- altGroups gs
pure (Case _ el (resolveNames vars ty) g)
where
altGroups : List (Group vars todo) -> Core (List (CaseAlt vars))
altGroups [] = maybe (pure [])
(\e => pure [DefaultCase e])
errorCase
altGroups (ConGroup {newargs} cn tag rest :: cs)
= do crest <- match fc fn phase rest (map (weakenNs (mkSizeOf newargs)) errorCase)
cs' <- altGroups cs
pure (ConCase cn tag newargs crest :: cs')
altGroups (DelayGroup {tyarg} {valarg} rest :: cs)
= do crest <- match fc fn phase rest (map (weakenNs (mkSizeOf [tyarg, valarg])) errorCase)
cs' <- altGroups cs
pure (DelayCase tyarg valarg crest :: cs')
altGroups (ConstGroup c rest :: cs)
= do crest <- match fc fn phase rest errorCase
cs' <- altGroups cs
pure (ConstCase c crest :: cs')
conRule : {a, vars, todo : _} ->
{auto i : Ref PName Int} ->
{auto c : Ref Ctxt Defs} ->
FC -> Name -> Phase ->
List (PatClause vars (a :: todo)) ->
Maybe (CaseTree vars) ->
Core (CaseTree vars)
conRule fc fn phase [] err = maybe (pure (Unmatched "No constructor clauses")) pure err
-- ASSUMPTION, not expressed in the type, that the patterns all have
-- the same variable (pprf) for the first argument. If not, the result
-- will be a broken case tree... so we should find a way to express this
-- in the type if we can.
conRule {a} fc fn phase cs@(MkPatClause pvars (MkInfo pat pprf fty :: pats) pid rhs :: rest) err
= do refinedcs <- traverse (substInClause fc) cs
groups <- groupCons fc fn pvars refinedcs
ty <- case fty of
Known _ t => pure t
_ => throw (CaseCompile fc fn UnknownType)
caseGroups fc fn phase pprf ty groups err
varRule : {a, vars, todo : _} ->
{auto i : Ref PName Int} ->
{auto c : Ref Ctxt Defs} ->
FC -> Name -> Phase ->
List (PatClause vars (a :: todo)) ->
Maybe (CaseTree vars) ->
Core (CaseTree vars)
varRule {vars} {a} fc fn phase cs err
= do alts' <- traverse updateVar cs
match fc fn phase alts' err
where
updateVar : PatClause vars (a :: todo) -> Core (PatClause vars todo)
-- replace the name with the relevant variable on the rhs
updateVar (MkPatClause pvars (MkInfo (PLoc pfc n) prf fty :: pats) pid rhs)
= pure $ MkPatClause (n :: pvars)
!(substInPats fc a (Local pfc (Just False) _ prf) pats)
pid (substName n (Local pfc (Just False) _ prf) rhs)
-- If it's an as pattern, replace the name with the relevant variable on
-- the rhs then continue with the inner pattern
updateVar (MkPatClause pvars (MkInfo (PAs pfc n pat) prf fty :: pats) pid rhs)
= do pats' <- substInPats fc a (mkTerm _ pat) pats
let rhs' = substName n (Local pfc (Just True) _ prf) rhs
updateVar (MkPatClause pvars (MkInfo pat prf fty :: pats') pid rhs')
-- match anything, name won't appear in rhs but need to update
-- LHS pattern types based on what we've learned
updateVar (MkPatClause pvars (MkInfo pat prf fty :: pats) pid rhs)
= pure $ MkPatClause pvars
!(substInPats fc a (mkTerm vars pat) pats) pid rhs
mixture : {a, vars, todo : _} ->
{auto i : Ref PName Int} ->
{auto c : Ref Ctxt Defs} ->
{ps : List (PatClause vars (a :: todo))} ->
FC -> Name -> Phase ->
Partitions ps ->
Maybe (CaseTree vars) ->
Core (Maybe (CaseTree vars))
mixture fc fn phase (ConClauses cs rest) err
= do fallthrough <- mixture fc fn phase rest err
pure (Just !(conRule fc fn phase cs fallthrough))
mixture fc fn phase (VarClauses vs rest) err
= do fallthrough <- mixture fc fn phase rest err
pure (Just !(varRule fc fn phase vs fallthrough))
mixture fc fn {a} {todo} phase NoClauses err
= pure err
export
mkPat : {auto c : Ref Ctxt Defs} -> List Pat -> ClosedTerm -> ClosedTerm -> Core Pat
mkPat args orig (Ref fc Bound n) = pure $ PLoc fc n
mkPat args orig (Ref fc (DataCon t a) n) = pure $ PCon fc n t a args
mkPat args orig (Ref fc (TyCon t a) n) = pure $ PTyCon fc n a args
mkPat args orig (Ref fc Func n)
= do prims <- getPrimitiveNames
mtm <- normalisePrims (const True) isPConst True prims n args orig []
case mtm of
Just tm => if tm /= orig -- check we made progress; if there's an
-- unresolved interface, we might be stuck
-- and we'd loop forever
then mkPat [] tm tm
else -- Possibly this should be an error instead?
pure $ PUnmatchable (getLoc orig) orig
Nothing =>
do log "compile.casetree" 10 $
"Unmatchable function: " ++ show n
pure $ PUnmatchable (getLoc orig) orig
mkPat args orig (Bind fc x (Pi _ _ _ s) t)
= let t' = subst (Erased fc False) t in
pure $ PArrow fc x !(mkPat [] s s) !(mkPat [] t' t')
mkPat args orig (App fc fn arg)
= do parg <- mkPat [] arg arg
mkPat (parg :: args) orig fn
mkPat args orig (As fc _ (Ref _ Bound n) ptm)
= pure $ PAs fc n !(mkPat [] ptm ptm)
mkPat args orig (As fc _ _ ptm)
= mkPat [] orig ptm
mkPat args orig (TDelay fc r ty p)
= pure $ PDelay fc r !(mkPat [] orig ty) !(mkPat [] orig p)
mkPat args orig (PrimVal fc c)
= pure $ if constTag c == 0
then PConst fc c
else PTyCon fc (UN (Basic $ show c)) 0 []
mkPat args orig (TType fc) = pure $ PTyCon fc (UN $ Basic "Type") 0 []
mkPat args orig tm
= do log "compile.casetree" 10 $
"Catchall: marking " ++ show tm ++ " as unmatchable"
pure $ PUnmatchable (getLoc orig) orig
export
argToPat : {auto c : Ref Ctxt Defs} -> ClosedTerm -> Core Pat
argToPat tm = mkPat [] tm tm
mkPatClause : {auto c : Ref Ctxt Defs} ->
FC -> Name ->
(args : List Name) -> ClosedTerm ->
Int -> (List Pat, ClosedTerm) ->
Core (PatClause args args)
mkPatClause fc fn args ty pid (ps, rhs)
= maybe (throw (CaseCompile fc fn DifferingArgNumbers))
(\eq =>
do defs <- get Ctxt
nty <- nf defs [] ty
ns <- mkNames args ps eq (Just nty)
log "compile.casetree" 20 $
"Make pat clause for names " ++ show ns
++ " in LHS " ++ show ps
pure (MkPatClause [] ns pid
(rewrite sym (appendNilRightNeutral args) in
(weakenNs (mkSizeOf args) rhs))))
(checkLengthMatch args ps)
where
mkNames : (vars : List Name) -> (ps : List Pat) ->
LengthMatch vars ps -> Maybe (NF []) ->
Core (NamedPats vars vars)
mkNames [] [] NilMatch fty = pure []
mkNames (arg :: args) (p :: ps) (ConsMatch eq) fty
= do defs <- get Ctxt
empty <- clearDefs defs
fa_tys <- the (Core (Maybe _, ArgType _)) $
case fty of
Nothing => pure (Nothing, CaseBuilder.Unknown)
Just (NBind pfc _ (Pi _ c _ farg) fsc) =>
pure (Just !(fsc defs (toClosure defaultOpts [] (Ref pfc Bound arg))),
Known c (embed {more = arg :: args}
!(quote empty [] farg)))
Just t =>
pure (Nothing,
Stuck (embed {more = arg :: args}
!(quote empty [] t)))
pure (MkInfo p First (Builtin.snd fa_tys)
:: weaken !(mkNames args ps eq (Builtin.fst fa_tys)))
export
patCompile : {auto c : Ref Ctxt Defs} ->
FC -> Name -> Phase ->
ClosedTerm -> List (List Pat, ClosedTerm) ->
Maybe (CaseTree []) ->
Core (args ** CaseTree args)
patCompile fc fn phase ty [] def
= maybe (pure ([] ** Unmatched "No definition"))
(\e => pure ([] ** e))
def
patCompile fc fn phase ty (p :: ps) def
= do let (ns ** n) = getNames 0 (fst p)
pats <- mkPatClausesFrom 0 ns (p :: ps)
-- low verbosity level: pretty print fully resolved names
logC "compile.casetree" 5 $ do
pats <- traverse toFullNames pats
pure $ "Pattern clauses:\n"
++ show (indent 2 $ vcat $ pretty {ann = ()} <$> pats)
-- higher verbosity: dump the raw data structure
log "compile.casetree" 10 $ show pats
i <- newRef PName (the Int 0)
cases <- match fc fn phase pats
(rewrite sym (appendNilRightNeutral ns) in
map (TT.weakenNs n) def)
pure (_ ** cases)
where
mkPatClausesFrom : Int -> (args : List Name) ->
List (List Pat, ClosedTerm) ->
Core (List (PatClause args args))
mkPatClausesFrom i ns [] = pure []
mkPatClausesFrom i ns (p :: ps)
= do p' <- mkPatClause fc fn ns ty i p
ps' <- mkPatClausesFrom (i + 1) ns ps
pure (p' :: ps')
getNames : Int -> List Pat -> (ns : List Name ** SizeOf ns)
getNames i [] = ([] ** zero)
getNames i (x :: xs) =
let (ns ** n) = getNames (i + 1) xs
in (MN "arg" i :: ns ** suc n)
toPatClause : {auto c : Ref Ctxt Defs} ->
FC -> Name -> (ClosedTerm, ClosedTerm) ->
Core (List Pat, ClosedTerm)
toPatClause fc n (lhs, rhs)
= case getFnArgs lhs of
(Ref ffc Func fn, args)
=> do defs <- get Ctxt
(np, _) <- getPosition n (gamma defs)
(fnp, _) <- getPosition fn (gamma defs)
if np == fnp
then pure (!(traverse argToPat args), rhs)
else throw (GenericMsg ffc ("Wrong function name in pattern LHS " ++ show (n, fn)))
(f, args) => throw (GenericMsg fc "Not a function name in pattern LHS")
-- Assumption (given 'ClosedTerm') is that the pattern variables are
-- explicitly named. We'll assign de Bruijn indices when we're done, and
-- the names of the top level variables we created are returned in 'args'
export
simpleCase : {auto c : Ref Ctxt Defs} ->
FC -> Phase -> Name -> ClosedTerm -> (def : Maybe (CaseTree [])) ->
(clauses : List (ClosedTerm, ClosedTerm)) ->
Core (args ** CaseTree args)
simpleCase fc phase fn ty def clauses
= do logC "compile.casetree" 5 $
do cs <- traverse (\ (c,d) => [| MkPair (toFullNames c) (toFullNames d) |]) clauses
pure $ "simpleCase: Clauses:\n" ++ show (
indent {ann = ()} 2 $ vcat $ flip map cs $ \ (lrhs) =>
pretty {ann = ()} (fst lrhs) <++> pretty "=" <++> pretty (snd lrhs))
ps <- traverse (toPatClause fc fn) clauses
defs <- get Ctxt
patCompile fc fn phase ty ps def
findReached : CaseTree ns -> List Int
findReached (Case _ _ _ alts) = concatMap findRAlts alts
where
findRAlts : CaseAlt ns' -> List Int
findRAlts (ConCase _ _ _ t) = findReached t
findRAlts (DelayCase _ _ t) = findReached t
findRAlts (ConstCase _ t) = findReached t
findRAlts (DefaultCase t) = findReached t
findReached (STerm i _) = [i]
findReached _ = []
-- Returns the case tree, and a list of the clauses that aren't reachable
export
getPMDef : {auto c : Ref Ctxt Defs} ->
FC -> Phase -> Name -> ClosedTerm -> List Clause ->
Core (args ** (CaseTree args, List Clause))
-- If there's no clauses, make a definition with the right number of arguments
-- for the type, which we can use in coverage checking to ensure that one of
-- the arguments has an empty type
getPMDef fc phase fn ty []
= do log "compile.casetree.getpmdef" 20 "getPMDef: No clauses!"
defs <- get Ctxt
pure (!(getArgs 0 !(nf defs [] ty)) ** (Unmatched "No clauses", []))
where
getArgs : Int -> NF [] -> Core (List Name)
getArgs i (NBind fc x (Pi _ _ _ _) sc)
= do defs <- get Ctxt
sc' <- sc defs (toClosure defaultOpts [] (Erased fc False))
pure (MN "arg" i :: !(getArgs i sc'))
getArgs i _ = pure []
getPMDef fc phase fn ty clauses
= do defs <- get Ctxt
let cs = map (toClosed defs) (labelPat 0 clauses)
(_ ** t) <- simpleCase fc phase fn ty Nothing cs
logC "compile.casetree.getpmdef" 20 $
pure $ "Compiled to: " ++ show !(toFullNames t)
let reached = findReached t
pure (_ ** (t, getUnreachable 0 reached clauses))
where
getUnreachable : Int -> List Int -> List Clause -> List Clause
getUnreachable i is [] = []
getUnreachable i is (c :: cs)
= if i `elem` is
then getUnreachable (i + 1) is cs
else c :: getUnreachable (i + 1) is cs
labelPat : Int -> List a -> List (String, a)
labelPat i [] = []
labelPat i (x :: xs) = ("pat" ++ show i ++ ":", x) :: labelPat (i + 1) xs
mkSubstEnv : Int -> String -> Env Term vars -> SubstEnv vars []
mkSubstEnv i pname [] = Nil
mkSubstEnv i pname (v :: vs)
= Ref fc Bound (MN pname i) :: mkSubstEnv (i + 1) pname vs
close : {vars : _} ->
Env Term vars -> String -> Term vars -> ClosedTerm
close {vars} env pname tm
= substs (mkSubstEnv 0 pname env)
(rewrite appendNilRightNeutral vars in tm)
toClosed : Defs -> (String, Clause) -> (ClosedTerm, ClosedTerm)
toClosed defs (pname, MkClause env lhs rhs)
= (close env pname lhs, close env pname rhs)
|
// Wrapper of the GSL interpolation functions that lets you define a
// 1D function by points and then query points on that function.
//
// Author: Mark Desnoyer ([email protected])
// Date: Oct 2012
#ifndef __INTERPOLATION_H__
#define __INTERPOLATION_H__
#include <vector>
#include <gsl/gsl_spline.h>
#include <assert.h>
#include <algorithm>
#include <limits>
#include <exception>
template <typename T>
class SortOrder {
public:
SortOrder(const std::vector<T>& sortArray) : sortArray_(sortArray) {}
bool operator()(int lhs, int rhs) const {
return sortArray_[lhs] < sortArray_[rhs];
}
private:
const std::vector<T>& sortArray_;
};
// Classes that inherit this abstract class must intialize spline_ in
// their constructors.
class Interpolator {
public:
class out_of_bounds : public std::exception {};
template <typename InputIterator>
Interpolator(InputIterator xFirst, InputIterator xLast,
InputIterator yFirst, InputIterator yLast)
: x_(), y_(), acc_(gsl_interp_accel_alloc()) {
std::vector<double> x(xFirst, xLast);
std::vector<double> y(yFirst, yLast);
std::vector<int> idx;
for (unsigned int i = 0u; i < x.size(); ++i) {
idx.push_back(i);
}
assert(x.size() == y.size());
// Get the sorting indicies based on the x matrix
std::sort(idx.begin(), idx.end(), SortOrder<double>(x));
// Populate the data in ascending x order skipping equal values
double curX = std::numeric_limits<double>::quiet_NaN();
for (unsigned int i = 0u; i < idx.size(); ++i) {
if (curX != x[idx[i]]) {
x_.push_back(x[idx[i]]);
y_.push_back(y[idx[i]]);
}
curX = x[idx[i]];
}
}
virtual ~Interpolator();
// Evaluate the interpolation at xi
double operator()(double xi) const {
if (xi < x_[0] || xi > x_.back()) {
throw out_of_bounds();
}
return gsl_spline_eval(spline_, xi, acc_);
}
// Evaluate the derivative at xi
double EvalDeriv(double xi) const {
if (xi < x_[0] || xi > x_.back()) {
throw out_of_bounds();
}
return gsl_spline_eval_deriv(spline_, xi, acc_);
}
double minY() const { return y_.front(); }
double maxY() const { return y_.back(); }
protected:
std::vector<double> x_;
std::vector<double> y_;
gsl_interp_accel* acc_;
gsl_spline* spline_;
};
class LinearInterpolator : public Interpolator {
public:
LinearInterpolator(const std::vector<double>& x, const std::vector<double>& y)
: Interpolator(x.begin(), x.end(), y.begin(), y.end()) {
InitSpline();
}
template <typename InputIterator>
LinearInterpolator(InputIterator xFirst, InputIterator xLast,
InputIterator yFirst, InputIterator yLast)
: Interpolator(xFirst, xLast, yFirst, yLast) {
InitSpline();
}
virtual ~LinearInterpolator();
private:
void InitSpline();
};
class SplineInterpolator : public Interpolator {
public:
SplineInterpolator(const std::vector<double>& x, const std::vector<double>& y)
: Interpolator(x.begin(), x.end(), y.begin(), y.end()) {
InitSpline();
}
template <typename InputIterator>
SplineInterpolator(InputIterator xFirst, InputIterator xLast,
InputIterator yFirst, InputIterator yLast)
: Interpolator(xFirst, xLast, yFirst, yLast) {
InitSpline();
}
virtual ~SplineInterpolator();
private:
void InitSpline();
};
#endif // __INTERPOLATION_H__
|
The scalar product of a polynomial with a sum of polynomials is equal to the sum of the scalar products of the polynomial with each of the summands.
|
From Test Require Import tactic.
Section FOFProblem.
Variable Universe : Set.
Variable UniverseElement : Universe.
Variable wd_ : Universe -> Universe -> Prop.
Variable col_ : Universe -> Universe -> Universe -> Prop.
Variable col_swap1_1 : (forall A B C : Universe, (col_ A B C -> col_ B A C)).
Variable col_swap2_2 : (forall A B C : Universe, (col_ A B C -> col_ B C A)).
Variable col_triv_3 : (forall A B : Universe, col_ A B B).
Variable wd_swap_4 : (forall A B : Universe, (wd_ A B -> wd_ B A)).
Variable col_trans_5 : (forall P Q A B C : Universe, ((wd_ P Q /\ (col_ P Q A /\ (col_ P Q B /\ col_ P Q C))) -> col_ A B C)).
Theorem pipo_6 : (forall O E Eprime U A B C : Universe, ((wd_ U O /\ (wd_ U E /\ (wd_ O E /\ (wd_ O Eprime /\ (wd_ E Eprime /\ (wd_ O U /\ (wd_ U Eprime /\ (wd_ A O /\ (wd_ B O /\ (wd_ A O /\ (wd_ U Eprime /\ (col_ O E U /\ (col_ O E A /\ (col_ O E B /\ (col_ O E C /\ (col_ O U A /\ (col_ O U B /\ (col_ O U C /\ (col_ A U Eprime /\ (col_ O U Eprime /\ col_ O Eprime O)))))))))))))))))))) -> col_ O E Eprime)).
Proof.
time tac.
Qed.
End FOFProblem.
|
# Solution {-}
Given the following spectral density function:
\begin{equation*}
S(j\omega)=\delta(\omega)+\frac{1}{2}\delta(\omega-\omega_0)+\frac{1}{2}\delta(\omega+\omega_0)+\frac{2}{\omega^2+1}
\end{equation*}
1. Find the autocorrelation function
\begin{align*}
R(\tau)=&\frac{1}{2\pi}+\frac{1}{4\pi}e^{j\omega_0\tau}+\frac{1}{4\pi}e^{-j\omega_0\tau}+e^{-|\tau|}\\
=&\frac{1}{2\pi}+\frac{1}{4\pi}\cos \omega_0\tau+e^{-|\tau|}
\end{align*}
```python
from sympy import symbols, inverse_fourier_transform, DiracDelta, pi
t, w, w0 = symbols('tau omega omega_0')
S = DiracDelta(w) + 1/2*DiracDelta(w - w0) + 1/2*DiracDelta(w + w0) + 2/(w**2 + 1)
S
```
$\displaystyle \delta\left(\omega\right) + 0.5 \delta\left(\omega - \omega_{0}\right) + 0.5 \delta\left(\omega + \omega_{0}\right) + \frac{2}{\omega^{2} + 1}$
```python
R = 1/(2*pi)*inverse_fourier_transform(S, w, t)
R
```
$\displaystyle \frac{0.5 e^{2 i \pi \omega_{0} \tau} + 1 + 0.5 e^{- 2 i \pi \omega_{0} \tau} + 2 \pi e^{- 2 \pi \tau}}{2 \pi}$
|
[STATEMENT]
lemma subst_app: "subst x v (EApp e1 e2) = EApp (subst x v e1) (subst x v e2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. subst x v (EApp e1 e2) = EApp (subst x v e1) (subst x v e2)
[PROOF STEP]
by auto
|
!==============================================================================!
module Grid_Mod
!------------------------------------------------------------------------------!
! Grids module is used throughout all programs !
! (that means in "Generate", "Divide", "Convert", "Process". !
!------------------------------------------------------------------------------!
!----------------------------------[Modules]-----------------------------------!
use Const_Mod
use Math_Mod
use Comm_Mod
use File_Mod
use Grid_Level_Mod
use Bnd_Cond_Mod
use Metis_Options_Mod
use Sort_Mod
!------------------------------------------------------------------------------!
implicit none
!==============================================================================!
!---------------!
! !
! Grid type !
! !
!---------------!
type Grid_Type
! Stores the name of this domain
character(len=80) :: name
! Number of ...
integer :: n_nodes ! ... nodes
integer :: n_cells ! ... cells
integer :: n_faces ! ... faces
integer :: n_bnd_cells ! ... boundary cells
integer :: n_per_faces ! ... periodic faces (shadows)
integer :: n_bnd_cond ! ... boundary conditions
integer :: n_shadows ! ... shadow faces
integer :: n_levels ! ... multigrid levels
! Periodic span
real :: per_x, per_y, per_z
! Minimum, Maximum and total volumes
real :: min_vol, max_vol, tot_vol
!-------------------------!
! Cell-based variables !
!-------------------------!
! Cell center coordinates
real, allocatable :: xc(:), yc(:), zc(:)
! Cell volumes
real, allocatable :: vol(:)
! Wall distance - distance from the nearest wall
real, allocatable :: wall_dist(:)
! True if cell is near wall. Used in Process for some turblence models.
logical, allocatable :: cell_near_wall(:)
! Number of nodes at each cell (determines cell's shape really)
integer, allocatable :: cells_n_nodes(:)
! Number of faces surrounding each cell
integer, allocatable :: cells_n_faces(:)
! Number of cells surrounding each cell
integer, allocatable :: cells_n_cells(:)
! Cells' nodes, faces and neigboring cells
integer, allocatable :: cells_n(:,:)
integer, allocatable :: cells_f(:,:)
integer, allocatable :: cells_c(:,:)
! For boundary cells, store corresponding face
integer, allocatable :: cells_bnd_face(:)
! For each cell; type of the boundary condition in a given direction
integer, allocatable :: cells_bnd_color(:,:)
! Coarser levels for the grid
type(Grid_Level_Type) :: level(MAX_MG_LEVELS)
!-------------------------!
! Face-based variables !
!-------------------------!
! Number of nodes at each face (determines face's shape really)
integer, allocatable :: faces_n_nodes(:)
! Faces' nodes, neigboring cells and shadows
integer, allocatable :: faces_n(:,:)
integer, allocatable :: faces_c(:,:)
integer, allocatable :: faces_s(:)
! Periodic faces
integer, allocatable :: per_faces(:)
! Face surface areas (si), total surface (s)
! and distances between cells (di)
real, allocatable :: sx(:), sy(:), sz(:), s(:)
real, allocatable :: dx(:), dy(:), dz(:)
! Face coordinates
real, allocatable :: xf(:), yf(:), zf(:)
! Face weight-factors: purely geometrical (f) and
! adapted to near wall cells in the fluid phase (fw)
real, allocatable :: f(:)
real, allocatable :: fw(:)
!-------------------------!
! Node-based variables !
!-------------------------!
! Node coordinates
real, allocatable :: xn(:), yn(:), zn(:)
type(Bnd_Cond_Type) :: bnd_cond
! Maximum number of cells, boundary cells and faces
! (Used for tentative memory allocation in Generator)
integer :: max_n_nodes
integer :: max_n_bnd_cells
integer :: max_n_faces
! New numbers for nodes, cells and faces
integer, allocatable :: new_n(:)
integer, allocatable :: new_c(:)
integer, allocatable :: new_f(:)
! Old numbers for cells and faces
integer, allocatable :: old_c(:)
integer, allocatable :: old_f(:)
! Number of cells surrounding each node
integer, allocatable :: nodes_n_cells(:)
! List of cells surrounding each node
integer, allocatable :: nodes_c(:,:)
!------------------------------------------!
! Variables important for parallel run !
!------------------------------------------!
type(Comm_Type) :: comm
! User arrays. I am neither sure if this is the ...
! ... best place for them nor do I need them at all?
integer :: n_user_arrays
real, allocatable :: user_array(:,:)
end type
contains
include 'Grid_Mod/Allocate_Cells.f90'
include 'Grid_Mod/Allocate_Faces.f90'
include 'Grid_Mod/Allocate_Levels.f90'
include 'Grid_Mod/Allocate_New_Numbers.f90'
include 'Grid_Mod/Allocate_Nodes.f90'
include 'Grid_Mod/Bnd_Cond_Name.f90'
include 'Grid_Mod/Bnd_Cond_Type.f90'
include 'Grid_Mod/Bnd_Cond_Ranges.f90'
include 'Grid_Mod/Calculate_Face_Geometry.f90'
include 'Grid_Mod/Calculate_Global_Volumes.f90'
include 'Grid_Mod/Calculate_Wall_Distance.f90'
include 'Grid_Mod/Check_Levels.f90'
include 'Grid_Mod/Coarsen.f90'
include 'Grid_Mod/Correction_Periodicity.f90'
include 'Grid_Mod/Create_Levels.f90'
include 'Grid_Mod/Decompose.f90'
include 'Grid_Mod/Estimate_Big_And_Small.f90'
include 'Grid_Mod/Exchange_Cells_Int.f90'
include 'Grid_Mod/Exchange_Cells_Log.f90'
include 'Grid_Mod/Exchange_Cells_Real.f90'
include 'Grid_Mod/Exchange_Nodes_Int.f90'
include 'Grid_Mod/Exchange_Nodes_Log.f90'
include 'Grid_Mod/Exchange_Nodes_Real.f90'
include 'Grid_Mod/Find_Cells_Faces.f90'
include 'Grid_Mod/Find_Nodes_Cells.f90'
include 'Grid_Mod/Find_Periodic_Faces.f90'
include 'Grid_Mod/Form_Cells_Comm.f90'
include 'Grid_Mod/Form_Maps.f90'
include 'Grid_Mod/Form_Nodes_Comm.f90'
include 'Grid_Mod/Get_C1_And_C2_At_Level.f90'
include 'Grid_Mod/Load_Cns.f90'
include 'Grid_Mod/Load_Geo.f90'
include 'Grid_Mod/Print_Bnd_Cond_Info.f90'
include 'Grid_Mod/Save_Cns.f90'
include 'Grid_Mod/Save_Geo.f90'
include 'Grid_Mod/Sort_Cells_By_Index.f90'
include 'Grid_Mod/Sort_Cells_Smart.f90'
include 'Grid_Mod/Sort_Faces_By_Index.f90'
include 'Grid_Mod/Sort_Faces_Smart.f90'
end module
|
/-
Copyright (c) 2021 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import measure_theory.group.fundamental_domain
import measure_theory.integral.interval_integral
/-!
# Integrals of periodic functions
In this file we prove that `∫ x in b..b + a, f x = ∫ x in c..c + a, f x` for any (not necessarily
measurable) function periodic function with period `a`.
-/
open set function measure_theory measure_theory.measure topological_space
open_locale measure_theory
lemma is_add_fundamental_domain_Ioc {a : ℝ} (ha : 0 < a) (b : ℝ) (μ : measure ℝ . volume_tac) :
is_add_fundamental_domain (add_subgroup.zmultiples a) (Ioc b (b + a)) μ :=
begin
refine is_add_fundamental_domain.mk' measurable_set_Ioc (λ x, _),
have : bijective (cod_restrict (λ n : ℤ, n • a) (add_subgroup.zmultiples a) _),
from (equiv.of_injective (λ n : ℤ, n • a) (zsmul_strict_mono_left ha).injective).bijective,
refine this.exists_unique_iff.2 _,
simpa only [add_comm x] using exists_unique_add_zsmul_mem_Ioc ha x b
end
variables {E : Type*} [normed_group E] [normed_space ℝ E] [measurable_space E] [borel_space E]
[complete_space E] [second_countable_topology E]
namespace function
namespace periodic
/-- An auxiliary lemma for a more general `function.periodic.interval_integral_add_eq`. -/
lemma interval_integral_add_eq_of_pos {f : ℝ → E} {a : ℝ} (hf : periodic f a)
(ha : 0 < a) (b c : ℝ) : ∫ x in b..b + a, f x = ∫ x in c..c + a, f x :=
begin
haveI : encodable (add_subgroup.zmultiples a) := (countable_range _).to_encodable,
simp only [interval_integral.integral_of_le, ha.le, le_add_iff_nonneg_right],
haveI : vadd_invariant_measure (add_subgroup.zmultiples a) ℝ volume :=
⟨λ c s hs, real.volume_preimage_add_left _ _⟩,
exact (is_add_fundamental_domain_Ioc ha b).set_integral_eq
(is_add_fundamental_domain_Ioc ha c) hf.map_vadd_zmultiples
end
/-- If `f` is a periodic function with period `a`, then its integral over `[b, b + a]` does not
depend on `b`. -/
lemma interval_integral_add_eq {f : ℝ → E} {a : ℝ} (hf : periodic f a)
(b c : ℝ) : ∫ x in b..b + a, f x = ∫ x in c..c + a, f x :=
begin
rcases lt_trichotomy 0 a with (ha|rfl|ha),
{ exact hf.interval_integral_add_eq_of_pos ha b c },
{ simp },
{ rw [← neg_inj, ← interval_integral.integral_symm, ← interval_integral.integral_symm],
simpa only [← sub_eq_add_neg, add_sub_cancel]
using (hf.neg.interval_integral_add_eq_of_pos (neg_pos.2 ha) (b + a) (c + a)) }
end
end periodic
end function
|
Formal statement is: lemma bounded_norm_comp: "bounded ((\<lambda>x. norm (f x)) ` S) = bounded (f ` S)" Informal statement is: The set of norms of a set of complex numbers is bounded if and only if the set of complex numbers is bounded.
|
\section*{Introduction}
The goal of the GPS Toolkit (GPSTk) project is to provide an open
source library and suite of applications to the satellite navigation
community---to free researchers to focus on research, not lower level
coding. In this paper, we explain the organization of the GPSTk project
and its software suite. Because the GPSTk is a collaboration, it grows
over time with new capabilities. Capabilities developed over the last
two years, some of which are still under test, are described.
For exchange of observation data, the GPS community has relied on the
{\bf R}eceiver {\bf IN}dependent {\bf EX}change
format~\cite{rinex1format,rinex2format,rinex211format}. Since the
GPSTk has been released it has specifically support RINEX version 2
(R2). To support multi-GNSS receivers and data analysis, RINEX has
evolved from RINEX version 2 to version 3.00, which includes coherent
schemes for multi-GNSS data, as well as greatly enhanced data records
specifically designed for kinematic
applications~\cite{rinex300format}. Some of the RINEX formats had to
be radically restructured to do so. The GPSTk has developed support
for reading and writing R3 files and mechanisms for storing the
additional data defined by new standard. Existing applications have
been upgraded to use R3.
A key, underlying challenge to truly support R3 is the ability to
integrate observations from multiple Global Navigation Satellite
Systems (GNSSs). Each GNSS defines its own coordinate and time
systems. Reconciling disparate systems could be accomplished on a
case-by-case basis. In the GPSTk, this translates to modifying
processes at the application level. A more seamless solution was
sought for the GPSTk, one that could exist at the library level. The
R3 code transparently provides the translation of coordinate and time
systems during ephemeris evaluation. However this convenience has
implications. The design of the R3 implementation and its implications
are a subject of this paper. Also discussed is how the R3 implementation
enables the integrated processing of GPS and GLONASS observations.
A topic common to all GNSS processing is clock stability.
When satellite observations are used to support time transfer or
orbit determination, clock stability is key factor. The new
GPSTK application \gpstkapp{Clock Tools} provides the GPS and precise time
communities free access to basic clock stability analyses. Current
\gpstkapp{Clock Tools} capabilities include the computation of Allan and Hadamard
stability metrics, along with data parsing, grooming, and plotting. We
present several typical applications.
Fundamental contributions have been made to the \mbox{GPSTk} library as well.
A new auxiliary library has been added that supports precise point
positioning (PPP). Another library has been added to provide customizable
plotting in \LaTeX\ and HTML.
|
MODULE KPP_ROOT_Integrator
USE KPP_ROOT_Parameters, ONLY : NVAR, NFIX, NREACT
USE KPP_ROOT_Global, ONLY : TIME, RCONST, Volume
USE KPP_ROOT_Stoichiom
USE KPP_ROOT_Stochastic
USE KPP_ROOT_Rates
IMPLICIT NONE
CONTAINS
!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
SUBROUTINE Gillespie(Nevents, T, SCT, NmlcV, NmlcF)
!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
!
! Gillespie stochastic integration
! INPUT:
! Nevents = no. of individual reaction events to be simulated
! SCT = stochastic rate constants
! T = time
! NmlcV, NmlcF = no. of molecules for variable and fixed species
! OUTPUT:
! T = updated time (after Nevents reactions)
! NmlcV = updated no. of molecules for variable species
!
!
!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
IMPLICIT NONE
KPP_REAL:: T
INTEGER :: Nevents
INTEGER :: NmlcV(NVAR), NmlcF(NFIX)
INTEGER :: i, m, issa
REAL :: r1, r2
KPP_REAL :: A(NREACT), SCT(NREACT), x
DO issa = 1, Nevents
! Uniformly distributed random numbers
CALL RANDOM_NUMBER(r1)
CALL RANDOM_NUMBER(r2)
! Avoid log of zero
r2 = MAX(r2,1.e-14)
! Propensity vector
CALL Propensity ( NmlcV, NmlcF, SCT, A )
! Cumulative sum of propensities
DO i = 2, NREACT
A(i) = A(i-1)+A(i);
END DO
! Index of next reaction
x = r1*A(NREACT)
DO i = 1, NREACT
IF (A(i)>=x) THEN
m = i;
EXIT
END IF
END DO
! Update time with time to next reaction
T = T - LOG(r2)/A(NREACT);
! Update state vector
CALL MoleculeChange( m, NmlcV )
END DO
CONTAINS
SUBROUTINE PropensityTemplate( T, NmlcV, NmlcF, Prop )
KPP_REAL, INTENT(IN) :: T
INTEGER, INTENT(IN) :: NmlcV(NVAR), NmlcF(NFIX)
KPP_REAL, INTENT(OUT) :: Prop(NREACT)
KPP_REAL :: Tsave, SCT(NREACT)
! Update the stochastic reaction rates, which may be time dependent
Tsave = TIME
TIME = T
CALL Update_RCONST()
CALL StochasticRates( RCONST, Volume, SCT )
CALL Propensity ( NmlcV, NmlcF, SCT, Prop )
TIME = Tsave
END SUBROUTINE PropensityTemplate
END SUBROUTINE Gillespie
END MODULE KPP_ROOT_Integrator
|
module Control.Effect.State
import Control.EffectAlgebra
import Control.HigherOrder
import Control.Monad.State
import public Control.Effect.Reader
import public Control.Effect.Writer
||| State effect - the union of Reader and Writer effects.
public export
StateE : Type -> (Type -> Type) -> (Type -> Type)
StateE s = ReaderE s :+: WriterE s
public export
Inj (StateE s) sig => Inj (ReaderE s) sig where
inj = inj {sub = StateE s, sup = sig} . Inl
public export
Inj (StateE s) sig => Inj (WriterE s) sig where
inj = inj {sub = StateE s, sup = sig} . Inr
-- public export
-- get : Algebra sig m => Inj (StateE s) sig => m s
-- get = send {sig} {eff = StateE s} (Inl Ask)
-- public export
-- put : Algebra sig m => Inj (StateE s) sig => s -> m ()
-- put x = send {sig} {eff = StateE s} (Inr (Tell x))
namespace Algebra
public export
[State] Algebra sig m => Algebra (StateE s :+: sig) (StateT s m) where
alg ctxx hdl (Inl (Inl Ask)) =
ST $ \s => pure {f = m} (s, (s <$ ctxx))
alg ctxx hdl (Inl (Inr (Tell s))) =
ST $ \_ => pure {f = m} (s, ctxx)
alg ctxx hdl (Inr x) = ST $ \r => do
res <- alg
{f = Functor.Compose}
(r, ctxx) h x
pure res
where
h : Handler ((s,) . ctx) n m
h =
(~<~)
{ctx1 = (s,), ctx2 = ctx}
{l = n}
{m = StateT s m} {n = m} (uncurry runStateT) hdl
%hint public export
HintState : Algebra sig m => Algebra (StateE s :+: sig) (StateT s m)
HintState = State
|
State Before: E : ℕ → Type u_1
inst✝¹ : (n : ℕ) → TopologicalSpace (E n)
inst✝ : ∀ (n : ℕ), DiscreteTopology (E n)
s : Set ((n : ℕ) → E n)
hs : IsClosed s
x y : (n : ℕ) → E n
hx : ¬x ∈ s
hy : y ∈ s
⊢ firstDiff x y ≤ longestPrefix x s State After: E : ℕ → Type u_1
inst✝¹ : (n : ℕ) → TopologicalSpace (E n)
inst✝ : ∀ (n : ℕ), DiscreteTopology (E n)
s : Set ((n : ℕ) → E n)
hs : IsClosed s
x y : (n : ℕ) → E n
hx : ¬x ∈ s
hy : y ∈ s
⊢ firstDiff x y + 1 ≤ shortestPrefixDiff x s
E : ℕ → Type u_1
inst✝¹ : (n : ℕ) → TopologicalSpace (E n)
inst✝ : ∀ (n : ℕ), DiscreteTopology (E n)
s : Set ((n : ℕ) → E n)
hs : IsClosed s
x y : (n : ℕ) → E n
hx : ¬x ∈ s
hy : y ∈ s
⊢ 1 ≤ shortestPrefixDiff x s Tactic: rw [longestPrefix, le_tsub_iff_right] State Before: E : ℕ → Type u_1
inst✝¹ : (n : ℕ) → TopologicalSpace (E n)
inst✝ : ∀ (n : ℕ), DiscreteTopology (E n)
s : Set ((n : ℕ) → E n)
hs : IsClosed s
x y : (n : ℕ) → E n
hx : ¬x ∈ s
hy : y ∈ s
⊢ firstDiff x y + 1 ≤ shortestPrefixDiff x s State After: no goals Tactic: exact firstDiff_lt_shortestPrefixDiff hs hx hy State Before: E : ℕ → Type u_1
inst✝¹ : (n : ℕ) → TopologicalSpace (E n)
inst✝ : ∀ (n : ℕ), DiscreteTopology (E n)
s : Set ((n : ℕ) → E n)
hs : IsClosed s
x y : (n : ℕ) → E n
hx : ¬x ∈ s
hy : y ∈ s
⊢ 1 ≤ shortestPrefixDiff x s State After: no goals Tactic: exact shortestPrefixDiff_pos hs ⟨y, hy⟩ hx
|
r=0.87
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d73w2r/media/images/d73w2r-009/svc:tesseract/full/full/0.87/default.jpg Accept:application/hocr+xml
|
Formal statement is: lemma prime_power_exp_nat: fixes p::nat assumes p: "prime p" and n: "n \<noteq> 0" and xn: "x^n = p^k" shows "\<exists>i. x = p^i" Informal statement is: If $p$ is a prime number, $n$ is a positive integer, and $x^n = p^k$, then $x = p^i$ for some integer $i$.
|
Formal statement is: lemma homotopic_paths_linear: fixes g h :: "real \<Rightarrow> 'a::real_normed_vector" assumes "path g" "path h" "pathstart h = pathstart g" "pathfinish h = pathfinish g" "\<And>t. t \<in> {0..1} \<Longrightarrow> closed_segment (g t) (h t) \<subseteq> S" shows "homotopic_paths S g h" Informal statement is: If $g$ and $h$ are paths with the same endpoints, and the line segments between $g(t)$ and $h(t)$ are contained in $S$ for all $t$, then $g$ and $h$ are homotopic in $S$.
|
module VariableName where
open import OscarPrelude
record VariableName : Set
where
constructor ⟨_⟩
field
name : Nat
open VariableName public
instance EqVariableName : Eq VariableName
Eq._==_ EqVariableName _ = decEq₁ (cong name) ∘ (_≟_ on name $ _)
|
(* Tree on Ssreflect *)
Require Import
Ssreflect.ssreflect
Ssreflect.ssrfun
Ssreflect.ssrbool
Ssreflect.eqtype
Ssreflect.ssrnat
Ssreflect.seq.
Set Implicit Arguments.
Unset Strict Implicit.
Unset Printing Implicit Defensive.
Delimit Scope tree_scope with tree.
Open Scope tree_scope.
(** * Definition of [tree] and [forest] *)
Inductive tree (T: Type) :=
| node (x: T)(c: forest T)
with
forest (T: Type) :=
| leaf
| sibl (t: tree T)(f: forest T).
Arguments leaf {T}.
(** ** Notations *)
Infix "-:" := node (at level 60, no associativity): tree_scope.
Notation "[*]" := leaf: tree_scope.
Infix "~+" := sibl (at level 60, right associativity): tree_scope.
Notation "[~ X + .. + Y ~]" := (X ~+ ( .. (Y ~+ [*]) ..)) (at level 60, right associativity): tree_scope.
(** ** Induction principles *)
Scheme tree_forest_ind := Induction for tree Sort Prop
with forest_tree_ind := Induction for forest Sort Prop.
Scheme tree_forest_rec := Induction for tree Sort Set
with forest_tree_rec := Induction for forest Sort Set.
Scheme tree_forest_rect := Induction for tree Sort Type
with forest_tree_rect := Induction for forest Sort Type.
(** *** Mutual Induction Principle *)
Combined Scheme tree_forest_mut_ind from
tree_forest_ind, forest_tree_ind.
(** ** map function *)
Fixpoint map_tree {A B: Type}(F: A -> B)(t: tree A): tree B :=
let (x, f) := t in F x -: map_forest F f with
map_forest {A B: Type}(F: A -> B)(f: forest A): forest B :=
if f is t ~+ f' then map_tree F t ~+ map_forest F f' else [*].
(** * Traverse [tree] and [forest] by pre- and post- order *)
Section Traverse.
Variables (T: Type).
Implicit Type t : tree T.
Implicit Type f : forest T.
Fixpoint size_tree t: nat :=
match t with
| x -: f => (size_forest f).+1
end with
size_forest f: nat :=
match f with
| [*] => 0
| t ~+ f => size_tree t + size_forest f
end.
Lemma size_tree_ltn0 t:
0 < size_tree t.
Proof.
by move: t => [x f] //.
Qed.
Lemma size_forest0leaf f:
size_forest f = 0 -> f = [*].
Proof.
move: f => [| t f] //= Heq.
by move: Heq (ltn_addr (size_forest f) (size_tree_ltn0 t)) => <-; rewrite ltnn.
Qed.
Fixpoint cat_forest f1 f2 :=
match f1 with
| [*] => f2
| t ~+ f => t ~+ (cat_forest f f2)
end.
Infix "+++" := cat_forest (at level 50, left associativity).
Lemma cat_forest_leaf f:
f +++ [*] = f.
Proof.
by elim: f => [| t f /= ->] //.
Qed.
Lemma cat_forest_app f1 f2 f3:
(f1 +++ f2) +++ f3 = f1 +++ (f2 +++ f3).
Proof.
by elim: f1 => [| t f /= ->] //.
Qed.
Fixpoint rev_tree t :=
match t with
| x -: f => x -: rev_forest f
end with
rev_forest f :=
match f with
| [*] => [*]
| t ~+ f => rev_forest f +++ (rev_tree t ~+[*])
end.
Lemma cat_rev_forest f1 f2:
rev_forest (f1 +++ f2) = rev_forest f2 +++ rev_forest f1.
Proof.
by elim: f1 => [/= | t f /= ->] //;
rewrite ?cat_forest_leaf ?cat_forest_app.
Qed.
Lemma rev_rev_id:
(forall t, rev_tree (rev_tree t) = t)
/\(forall f, rev_forest (rev_forest f) = f).
Proof.
apply: tree_forest_mut_ind.
by move => a c //= ->.
done.
by move => t Heqt f Heqf //=; rewrite cat_rev_forest /= Heqt Heqf.
Qed.
Fixpoint preorder_tree t :=
let: x -: f := t in [:: x & preorder_forest f ]
with preorder_forest f :=
if f is t ~+ f
then preorder_tree t ++ preorder_forest f
else [::].
Lemma preorder_f_forest_cat f1 f2:
preorder_forest (f1 +++ f2) = preorder_forest f1 ++ preorder_forest f2.
Proof.
by elim: f1 => [| t f /= ->] //; rewrite catA.
Qed.
Fixpoint postorder_tree t :=
let: x -: f := t in rcons (postorder_forest f) x
with postorder_forest f :=
if f is t ~+ f
then postorder_tree t ++ postorder_forest f
else [::].
Lemma pre_rev_eq_rev_post:
(forall t, preorder_tree (rev_tree t) = rev (postorder_tree t))/\
(forall f, preorder_forest (rev_forest f) = rev (postorder_forest f)).
Proof.
apply: tree_forest_mut_ind => [x c /= -> || /= t Heqt f Heqf] //;
first by rewrite rev_rcons.
by rewrite rev_cat -Heqt -Heqf preorder_f_forest_cat //= catA cats0.
Qed.
Lemma pre_rev_eq_rev_post_tree t:
preorder_tree (rev_tree t) = rev (postorder_tree t).
Proof.
apply pre_rev_eq_rev_post.
Qed.
Lemma pre_rev_eq_rev_post_forest f:
preorder_forest (rev_forest f) = rev (postorder_forest f).
Proof.
apply pre_rev_eq_rev_post.
Qed.
End Traverse.
Infix "+++" := cat_forest (at level 50, left associativity).
Lemma tree_forest_mut_rect:
forall (T: Type)(P : tree T -> Type) (Q : forest T -> Type),
(forall (x : T) (c : forest T), Q c -> P (node x c)) ->
Q leaf ->
(forall t, P t -> forall f, Q f -> Q (sibl t f)) ->
(forall t, P t) * (forall f, Q f).
Proof.
move => T P Q IHn IHl IHs.
exact (tree_forest_rect IHn IHl IHs, forest_tree_rect IHn IHl IHs).
Qed.
Section EqTree.
Variable T: eqType.
Implicit Type t : tree T.
Implicit Type f : forest T.
Fixpoint eqtree t1 t2 :=
match t1, t2 with
| x1-:c1, x2-:c2 => (x1 == x2) && (eqforest c1 c2)
end with
eqforest f1 f2 :=
match f1, f2 with
| [*], [*] => true
| t1 ~+ f1, t2 ~+ f2 => (eqtree t1 t2) && (eqforest f1 f2)
| _, _ => false
end.
Lemma eqtreeP_eqforestP:
(Equality.axiom eqtree)*(Equality.axiom eqforest).
Proof.
apply: tree_forest_mut_rect
=> [x f IHf [x' f'] | [| t f] | t IHt f IHf [|t' f']] /=; try by constructor.
- case: (x =P x') => [<- | Hneq]; last by right; case.
by case: (IHf f') => [<- | Hneq] /=; [ left | right; case ].
- move: (IHt t') => [<- | Hneq]; last by right; case.
by move: (IHf f') => [<- | Hneq]; [left | right; case].
Qed.
Definition eqtreeP: Equality.axiom eqtree := fst eqtreeP_eqforestP.
Definition eqforestP: Equality.axiom eqforest := snd eqtreeP_eqforestP.
Canonical tree_eqMixin := EqMixin eqtreeP.
Canonical tree_eqType := Eval hnf in EqType (tree T) (EqMixin eqtreeP).
Lemma eqtreeE : eqtree = eq_op.
Proof.
by [].
Qed.
Canonical forest_eqMixin := EqMixin eqforestP.
Canonical forest_eqType := Eval hnf in EqType (forest T) forest_eqMixin.
Lemma eqforestE : eqforest = eq_op.
Proof.
by [].
Qed.
Lemma eqtree_node x1 x2 f1 f2:
(x1 -: f1 == x2 -: f2) = (x1 == x2) && (f1 == f2).
Proof.
by [].
Qed.
Lemma eqforest_sibl t1 t2 f1 f2:
(t1 ~+ f1 == t2 ~+ f2) = (t1 == t2) && (f1 == f2).
Proof.
by [].
Qed.
Fixpoint mem_tree (t: tree T): pred T :=
match t with
| node y f => xpredU1 y (mem_forest f)
end
with mem_forest (f: forest T): pred T :=
match f with
| leaf => xpred0
| sibl t f => xpredU (mem_tree t) (mem_forest f)
end.
Definition eqtree_class := tree T.
Identity Coercion tree_of_eqtree : eqtree_class >-> tree.
Coercion pred_of_eq_tree (t : eqtree_class) : pred_class := [eta mem_tree t].
Canonical tree_predType := @mkPredType T (tree T) pred_of_eq_tree.
Canonical mem_tree_predType := mkPredType mem_tree.
Definition eqforest_class := forest T.
Identity Coercion forest_of_eqforest : eqforest_class >-> forest.
Coercion pred_of_eq_forest (f : eqforest_class) : pred_class := [eta mem_forest f].
Canonical forest_predType := @mkPredType T (forest T) pred_of_eq_forest.
Canonical mem_forest_predType := mkPredType mem_forest.
Lemma in_node y f x:
(x \in node y f) = (x == y) || (x \in f).
Proof.
by [].
Qed.
Lemma in_leaf x:
(x \in leaf) = false.
Proof.
by [].
Qed.
Lemma in_sibl x t f:
(x \in sibl t f) = (x \in t) || (x \in f).
Proof.
by [].
Qed.
Lemma mem_tree1 x y:
(x \in node y leaf) = (x == y).
Proof.
by rewrite in_node orbF //.
Qed.
Let inE := (mem_tree1, in_node, inE).
Lemma mem_f_cat x f1 f2 :
(x \in f1 +++ f2) = (x \in f1) || (x \in f2).
Proof.
elim: f1 => [//=| t f /= Heq].
by rewrite in_sibl in_sibl Heq orbA //=.
Qed.
End EqTree.
Definition inE := (mem_tree1, in_node, inE).
Section TravIn.
Variables (T: eqType).
Implicit Type t : tree T.
Implicit Type f : forest T.
Lemma traverse_correct_pre:
(forall t x, (x \in t) == (x \in preorder_tree t))/\
(forall f x, (x \in f) == (x \in preorder_forest f)).
Proof.
apply tree_forest_mut_ind.
- move=> x f Heq y.
by rewrite in_node in_cons; move: (Heq y) => /eqP ->.
- move=> x //=.
- move=> t Heqt f Heqf x /=.
by rewrite in_sibl mem_cat; move: (Heqt x) (Heqf x) => /eqP -> /eqP ->.
Qed.
Theorem traverse_correct_pre_tree t x:
(x \in t) == (x \in preorder_tree t).
Proof.
move: t x; apply traverse_correct_pre.
Qed.
Lemma traverse_correct_post:
(forall t x, (x \in t) == (x \in postorder_tree t))/\
(forall f x, (x \in f) == (x \in postorder_forest f)).
Proof.
apply tree_forest_mut_ind.
- move=> x f Heq y.
by rewrite in_node mem_rcons; move: (Heq y) => /eqP ->.
- move=> x //=.
- move=> t Heqt f Heqf x.
by rewrite in_sibl mem_cat; move: (Heqt x) (Heqf x) => /eqP -> /eqP ->.
Qed.
Theorem traverse_correct_post_tree t x:
(x \in t) == (x \in postorder_tree t).
Proof.
move: t x; apply traverse_correct_post.
Qed.
End TravIn.
|
During the post @-@ Civil @-@ War period , leaders such as George T. Ruby and Norris Wright Cuney , who headed the Texas Republican Party and promoted civil rights for freedmen , helped to dramatically improve educational and employment opportunities for blacks in Galveston and in Texas . Cuney established his own business of stevedores and a union of black dockworkers to break the white monopoly on dock jobs . Galveston was a cosmopolitan city and one of the more successful during Reconstruction ; the Freedmen 's Bureau was headquartered here . German families sheltered teachers from the North , and hundreds of freedmen were taught to read . Its business community promoted progress , and immigrants continued to stay after arriving at this port of entry .
|
!==============================================================================!
module Var_Mod
!------------------------------------------------------------------------------!
use Grid_Mod
!------------------------------------------------------------------------------!
implicit none
!------------------------------------------------------------------------------!
!--------------!
! Var type !
!--------------!
type Var_Type
type(Grid_Type), pointer :: pnt_grid ! grid for which it is defined
character(len=4) :: name ! variable name, always
! uppercase and very short
character(len=4) :: flux_name ! variable flux name, always
! uppercase and very short
real, allocatable :: n(:) ! new value
real, allocatable :: o(:), oo(:) ! old and older then old
real, allocatable :: a(:) ! advection fluxes
real, allocatable :: b(:) ! boundary value
real, allocatable :: c(:) ! cross-difusion fluxes
real, allocatable :: x(:), y(:), z(:) ! gradient components
real, allocatable :: q(:) ! flux of a variable
real :: sigma ! sigma
real :: res ! residual after lin. solver
real :: units(5) ! mass, length, time,
! temperature, angle
! Boundary cell type (important for scalars, since they
! can have different boundary conditions at the walls)
integer, allocatable :: bnd_cond_type(:)
! Parameters for numerical solution of the variable
character(len=80) :: precond
integer :: adv_scheme ! advection scheme
real :: blend ! blending (1.0 central; 0.0 upwind)
integer :: td_scheme ! time-disretization
real :: tol ! linear solver tolerance
real :: urf ! under-relaxation factor
integer :: niter ! number of iterations
real, allocatable :: max(:) ! max and min around a face ...
real, allocatable :: min(:) ! important for advection schemes
end type
contains
include 'Var_Mod/Allocate_New_Only.f90'
include 'Var_Mod/Allocate_Solution.f90'
include 'Var_Mod/Bnd_Cond_Name.f90'
include 'Var_Mod/Bnd_Cond_Type.f90'
end module
|
// This file is part of MLDB. Copyright 2015 mldb.ai inc. All rights reserved.
/** python_procedure_test.cc -*- C++ -*-
Francois Maillet, 9 mars 2015
Copyright (c) 2015 mldb.ai inc. All rights reserved.
*/
#include "mldb/server/mldb_server.h"
#include "mldb/http/http_rest_proxy.h"
#include "mldb/builtin/plugin_resource.h"
#include "mldb/core/procedure.h"
#include "mldb/types/value_description.h"
#define BOOST_TEST_MAIN
#define BOOST_TEST_DYN_LINK
#include <boost/test/unit_test.hpp>
using namespace std;
using namespace MLDB;
BOOST_AUTO_TEST_CASE( test_two_members )
{
MldbServer server;
server.init();
string httpBoundAddress = server.bindTcp(PortRange(17000,18000), "127.0.0.1");
cerr << "http listening on " << httpBoundAddress << endl;
server.start();
HttpRestProxy proxy(httpBoundAddress);
PolyConfig pluginConfig;
pluginConfig.type = "python";
PluginResource plugRes;
plugRes.source.main = R"foo(
def doTrain(mldb, trainingConfig):
mldb.log(str(trainingConfig))
print "wahou"
return {"status": "OK"}
mldb.create_procedure("my_procedure", "description of my procedure", doTrain)
print "pwet";
)foo";
pluginConfig.params = plugRes;
auto putResult = proxy.put("/v1/plugins/myplugin",
jsonEncode(pluginConfig));
cerr << putResult << endl;
BOOST_CHECK_EQUAL(putResult.code(), 201);
// Check procedure was added successfully
auto getResult = proxy.get("/v1/types/procedures");
cerr << "getResult = " << getResult << endl;
BOOST_REQUIRE(getResult.body().find("my_procedure") != string::npos);
// BOOST_CHECK_EQUAL(getResult.jsonBody()["how"].asString(), "are you");
Json::Value training;
training["id"] = "my_procedure_train";
training["type"] = "my_procedure";
Json::Value customConf;
customConf["param"] = 5;
training["params"] = customConf;
putResult = proxy.put("/v1/procedures/my_procedure_train",
jsonEncode(training));
cerr << putResult << endl;
BOOST_CHECK_EQUAL(putResult.code(), 201);
ProcedureRunConfig trainConf;
trainConf.id = "1";
putResult = proxy.put("/v1/procedures/my_procedure_train/runs/1", jsonEncode(trainConf));
cerr << putResult << endl;
BOOST_CHECK_EQUAL(putResult.code(), 201);
}
|
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.montgomery64_2e336m17_6limbs.Synthesis.
(* TODO : change this to field once field isomorphism happens *)
Definition opp :
{ opp : feBW_small -> feBW_small
| forall a, phiM_small (opp a) = F.opp (phiM_small a) }.
Proof.
Set Ltac Profiling.
Time synthesize_opp ().
Show Ltac Profile.
Time Defined.
Print Assumptions opp.
|
#!/usr/bin/python3
import rospy
import numpy
import scipy.spatial
from matplotlib import pyplot
from hdl_localization.msg import *
class Plotter(object):
def __init__(self):
pyplot.ion()
pyplot.show(block=False)
self.status_buffer = []
self.timer = rospy.Timer(rospy.Duration(0.1), self.timer_callback)
self.status_sub = rospy.Subscriber('/status', ScanMatchingStatus, self.status_callback)
def status_callback(self, status_msg):
self.status_buffer.append(status_msg)
if len(self.status_buffer) > 50:
self.status_buffer = self.status_buffer[-50:]
def timer_callback(self, event):
if len(self.status_buffer) < 2:
return
errors = {}
for status in self.status_buffer:
for label, error in zip(status.prediction_labels, status.prediction_errors):
if label.data not in errors:
errors[label.data] = []
quat = [error.rotation.x, error.rotation.y, error.rotation.z, error.rotation.w]
trans = [error.translation.x, error.translation.y, error.translation.z]
t = status.header.stamp.secs + status.header.stamp.nsecs / 1e9
t_error = numpy.linalg.norm(trans)
r_error = numpy.linalg.norm(scipy.spatial.transform.Rotation.from_quat(quat).as_rotvec())
if len(errors[label.data]) and abs(errors[label.data][-1][0] - t) > 1.0:
errors[label.data] = []
errors[label.data].append((t, t_error, r_error))
pyplot.clf()
for label in errors:
errs = numpy.float64(errors[label])
pyplot.subplot('211')
pyplot.plot(errs[:, 0], errs[:, 1], label=label)
pyplot.subplot('212')
pyplot.plot(errs[:, 0], errs[:, 2], label=label)
pyplot.subplot('211')
pyplot.ylabel('trans error')
pyplot.subplot('212')
pyplot.ylabel('rot error')
pyplot.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=len(errors))
pyplot.gcf().canvas.flush_events()
# pyplot.pause(0.0001)
def main():
rospy.init_node('status_plotter')
node = Plotter()
rospy.spin()
if __name__ == '__main__':
main()
|
open import Signature
import Program
module Rewrite (Σ : Sig) (V : Set) (P : Program.Program Σ V) where
open import Terms Σ
open import Program Σ V
open import Data.Empty renaming (⊥ to ∅)
open import Data.Unit
open import Data.Product as Prod renaming (Σ to ⨿)
open import Data.Sum as Sum
open import Data.Fin
open import Relation.Nullary
open import Relation.Unary
open import Relation.Binary.PropositionalEquality using (_≡_; refl; subst)
data _⟿_ (t : T V) : T V → Set where
rew-step : (cl : dom P) (i : dom (getb P cl)) {σ : Subst V V} →
matches (geth P cl) t σ → -- (mgm t (geth P cl) σ) →
t ⟿ app σ (get (getb P cl) i)
Val : Pred (T V) _
Val t = (cl : dom P) (i : dom (getb P cl)) {σ : Subst V V} →
¬ (matches (geth P cl) t σ)
no-rewrite-on-vals : (t : T V) → Val t → (s : T V) → ¬ (t ⟿ s)
no-rewrite-on-vals t p ._ (rew-step cl i q) = p cl i q
data _↓_ (t : T V) : T V → Set where
val : Val t → t ↓ t
step : (r s : T V) → r ↓ s → t ⟿ r → t ↓ s
{- Strongly normalising terms wrt to P are either in normal form, i.e. values,
or for every clause that matches, every rewrite step must be SN.
This is an adaption of the usual (constructive) definition.
-}
data SN (t : T V) : Set where
val-sn : Val t → SN t
steps-sn : (cl : dom P) (i : dom (getb P cl)) {σ : Subst V V} →
(matches t (geth P cl) σ) →
SN (app σ (get (getb P cl) i)) →
SN t
-- | Determines whether a term t is derivable from an axiom, i.e., whether
-- there is a clause " ⇒ p" such that p matches t.
Axiom : Pred (T V) _
Axiom t = ∃₂ λ cl σ → (mgm t (geth P cl) σ) × (domEmpty (getb P cl))
-- | An inductively valid term is derivable in finitely many steps from
-- axioms.
data Valid (t : T V) : Set where
val-sn : Axiom t → Valid t
steps-sn : (cl : dom P) (i : dom (getb P cl)) {σ : Subst V V} →
(matches t (geth P cl) σ) →
Valid (app σ (get (getb P cl) i)) →
Valid t
{-
⊥ : {X : Set} → X ⊎ ⊤
⊥ = inj₂ tt
record Rew-Branch (F : T V → Set) (t : T V) : Set where
constructor prf-branch
field
clause : dom P
matcher : Subst V V
isMgm : mgm t (geth P clause) matcher
next : (i : dom (getb P clause)) →
F (app matcher (get (getb P clause) i))
-- | Set of rewrite trees starting in t that use the rules given in P.
-- If the tree is ⊥, then t cannot be rewritten by any of the rules of P.
data Rew (t : T V) : Set where
in-prf : Rew-Branch Rew t ⊎ ⊤ → Rew t
-- | Just as Rew, only that we also allow infinite rewriting sequences.
record Rew∞ (t : T V) : Set where
coinductive
field out-prf : Rew-Branch Rew∞ t ⊎ ⊤
open Rew∞
out-prf⁻¹ : ∀{t} → Rew-Branch Rew∞ t ⊎ ⊤ → Rew∞ t
out-prf (out-prf⁻¹ b) = b
-- | Finite rewriting trees are included in the set of the possibly infinite
-- ones.
χ-prf : ∀{t} → Rew t → Rew∞ t
χ-prf (in-prf (inj₁ (prf-branch c m isMgm next))) =
out-prf⁻¹ (inj₁ (prf-branch c m isMgm (λ i → χ-prf (next i))))
χ-prf (in-prf (inj₂ tt)) = out-prf⁻¹ ⊥
Rew-Step : (F : {s : T V} → Rew∞ s → Set) → {t : T V} (R : Rew∞ t) → Set
Rew-Step F R with out-prf R
Rew-Step F R | inj₁ (prf-branch clause matcher isMgm next) = {!!}
Rew-Step F R | inj₂ tt = ∅
data Path {t : T V} (R : Rew∞ t) : Set where
root : Path R
step : {!!} → Path R
-}
|
(* Title: Sort.thy
Author: Danijela Petrovi\'c, Facylty of Mathematics, University of Belgrade *)
header {* Verification of Heap Sort *}
theory Heap
imports RemoveMax
begin
subsection {* Defining tree and properties of heap *}
datatype 'a Tree = "E" | "T" 'a "'a Tree" "'a Tree"
text{*With {\em E} is represented empty tree and with {\em T\ \ \ 'a\ \ \ 'a
Tree\ \ \ 'a Tree} is represented a node whose root element is of
type {\em 'a} and its left and right branch is also a tree of
type {\em 'a}. *}
primrec size :: "'a Tree \<Rightarrow> nat" where
"size E = 0"
| "size (T v l r) = 1 + size l + size r"
text{* Definition of the function that makes a multiset from the given tree: *}
primrec multiset where
"multiset E = {#}"
| "multiset (T v l r) = multiset l + {#v#} + multiset r"
primrec val where
"val (T v _ _) = v"
text{* Definition of the function that has the value {\em True} if the tree is
heap, otherwise it is {\em False}: *}
fun is_heap :: "'a::linorder Tree \<Rightarrow> bool" where
"is_heap E = True"
| "is_heap (T v E E) = True"
| "is_heap (T v E r) = (v \<ge> val r \<and> is_heap r)"
| "is_heap (T v l E) = (v \<ge> val l \<and> is_heap l)"
| "is_heap (T v l r) = (v \<ge> val r \<and> is_heap r \<and> v \<ge> val l \<and> is_heap l)"
lemma heap_top_geq:
assumes "a \<in># multiset t" "is_heap t"
shows "val t \<ge> a"
using assms
by (induct t rule: is_heap.induct) (auto split: split_if_asm)
lemma heap_top_max:
assumes "t \<noteq> E" "is_heap t"
shows "val t = Max (set_of (multiset t))"
proof (rule Max_eqI[symmetric])
fix y
assume "y \<in> set_of (multiset t)"
thus "y \<le> val t"
using heap_top_geq[of t y] `is_heap t`
by simp
next
show "val t \<in> set_of (multiset t)"
using `t \<noteq> E`
by (cases t) auto
qed simp
text{* The next step is to define function {\em remove\_max}, but the
question is weather implementation of {\em remove\_max} depends on
implementation of the functions {\em is\_heap} and {\em multiset}. The
answer is negative. This suggests that another step of refinement
could be added before definition of function {\em
remove\_max}. Additionally, there are other reasons why this should
be done, for example, function {\em remove\_max} could be implemented
in functional or in imperative manner.
*}
locale Heap = Collection empty is_empty of_list multiset for
empty :: "'b" and
is_empty :: "'b \<Rightarrow> bool" and
of_list :: "'a::linorder list \<Rightarrow> 'b" and
multiset :: "'b \<Rightarrow> 'a::linorder multiset" +
fixes as_tree :: "'b \<Rightarrow> 'a::linorder Tree"
-- {* This function is not very important, but it is needed in order to avoide problems with types and to detect that observed object is a tree.*}
fixes remove_max :: "'b \<Rightarrow> 'a \<times> 'b"
assumes multiset: "multiset l = Heap.multiset (as_tree l)"
assumes is_heap_of_list: "is_heap (as_tree (of_list i))"
assumes as_tree_empty: "as_tree t = E \<longleftrightarrow> is_empty t"
assumes remove_max_multiset':
"\<lbrakk>\<not> is_empty l; (m, l') = remove_max l\<rbrakk> \<Longrightarrow> multiset l' + {#m#} = multiset l"
assumes remove_max_is_heap:
"\<lbrakk>\<not> is_empty l; is_heap (as_tree l); (m, l') = remove_max l\<rbrakk> \<Longrightarrow>
is_heap (as_tree l')"
assumes remove_max_val:
"\<lbrakk> \<not> is_empty t; (m, t') = remove_max t\<rbrakk> \<Longrightarrow> m = val (as_tree t)"
text{* It is very easy to prove that locale {\em Heap} is sublocale of locale {\em RemoveMax} *}
sublocale Heap <
RemoveMax empty is_empty of_list multiset remove_max "\<lambda> t. is_heap (as_tree t)"
proof
fix x
show "is_heap (as_tree (of_list x))"
by (rule is_heap_of_list)
next
fix l m l'
assume "\<not> is_empty l" "(m, l') = remove_max l"
thus "multiset l' + {#m#} = multiset l"
by (rule remove_max_multiset')
next
fix l m l'
assume "\<not> is_empty l" "is_heap (as_tree l)" "(m, l') = remove_max l"
thus "is_heap (as_tree l')"
by (rule remove_max_is_heap)
next
fix l m l'
assume "\<not> is_empty l" "is_heap (as_tree l)" "(m, l') = remove_max l"
thus "m = Max (set l)"
unfolding set_def
using heap_top_max[of "as_tree l"] remove_max_val[of l m l']
using multiset is_empty_inj as_tree_empty
by auto
qed
primrec in_tree where
"in_tree v E = False"
| "in_tree v (T v' l r) \<longleftrightarrow> v = v' \<or> in_tree v l \<or> in_tree v r"
lemma is_heap_max:
assumes "in_tree v t" "is_heap t"
shows "val t \<ge> v"
using assms
apply (induct t rule:is_heap.induct)
by auto
end
|
module ElabImplLambda
import Language.Reflection
%language ElabReflection
derive : {ty : Type} -> Elab ty
derive = check $
ILam EmptyFC MW ExplicitArg (Just `{x}) `(Nat) $
ILam EmptyFC MW AutoImplicit Nothing `(List Nat) $
`(x + 1)
derived : Nat -> List Nat => Nat
derived = %runElab derive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.