Datasets:
AI4M
/

text
stringlengths
0
3.34M
#!/usr/bin/env julia using Gtk, Gtk.ShortNames, Printf using Random: rand! row, column, space = 400, 600, 20 mutable struct GOL tmpmat::Matrix{Bool} now::Matrix{Bool} row::Int column::Int function GOL() row_cell = div(row, space) column_cell = div(column, space) new(falses(row_cell, column_cell), falses(row_cell, column_cell), row_cell, column_cell) end end gol = GOL() c = @GtkCanvas(column, row) win = GtkWindow("Game of Life") hbox = GtkBox(:h) vbox = GtkBox(:v) button_reset = GtkButton("Reset") button_random = GtkButton("Random") button_start = GtkButton("Start") push!(vbox, button_reset) push!(vbox, button_random) push!(vbox, button_start) push!(win, hbox) push!(hbox, c) push!(hbox, vbox) set_gtk_property!(hbox, :expand, c, true) showall(win) "white brckgroud" function makebroad!(c) @guarded draw(c) do widget ctx = getgc(c) rectangle(ctx, 0, 0, column, row) set_source_rgb(ctx, 1, 1, 1) fill(ctx) end end "make grid" function grid!(c) @guarded draw(c) do widget n = 8 ctx = getgc(c) h = height(c) w = width(c) for x in 0:space:w move_to(ctx, x, 0) line_to(ctx, x, h) end for y in 0:space:h move_to(ctx, 0, y) line_to(ctx, w, y) end set_source_rgb(ctx, 0, 0, 0) stroke(ctx) reveal(widget) end show(c) end # grid!(c) function initilize!(c) makebroad!(c) grid!(c) end initilize!(c) function paintcell!(c, x, y, red=0, green=1, blue=1) @guarded draw(c) do widget ctx = getgc(c) set_source_rgb(ctx, red, green, blue) rectangle(ctx, x*space, y*space, space, space) fill(ctx) reveal(widget) end # show(c) end function board_draw!(c, gol) row, column = gol.row, gol.column # grid!(c) for x in 0:column-1 for y in 0:row-1 if gol.now[y+1, x+1] paintcell!(c, x, y) else paintcell!(c, x, y, 1, 1, 1) end end end # grid!(c) end c.mouse.button1press = @guarded (widget, event) -> begin x, y = Int(div(event.x, space)), Int(div(event.y, space)) println(x, " ", y) paintcell!(c, x, y) grid!(c) gol.now[y+1, x+1] = true end function board_reset!() initilize!(c) row, column = gol.row, gol.column fill!(gol.now, false) fill!(gol.tmpmat, false) # show(c) return end signal_connect(x -> board_reset!(), button_reset, "clicked") function board_rand!() # initilize!(c) row, column = gol.row, gol.column rand!(gol.now) fill!(gol.tmpmat, false) board_draw!(c, gol) println("rand") # show(c) return end signal_connect(x -> board_rand!(), button_random, "clicked") function next_gen_state(gol, x, y) row, column = gol.row, gol.column nowboard = gol.now num = 0 for i in -1:1 for j in -1:1 num += nowboard[mod1(x+i, row), mod1(y+j, column)] end end # println(num) if !nowboard[x,y] # 死んでいた場合 if num == 3 return true else return false end else if num <= 2 || num >= 5 return false else return true end end end function next_gen!(gol) row, column = gol.row, gol.column for x in 1:row for y in 1:column gol.tmpmat[x,y] = next_gen_state(gol, x, y) end end copyto!(gol.now, gol.tmpmat) return end ngen = 0 function board_start!() # initilize!(c) row, column = gol.row, gol.column for i in 1:50 sleep(0.1) next_gen!(gol) # fill!(gol.tmpmat, false) board_draw!(c, gol) global ngen += 1 @printf("%d\r", ngen) # show(c) end return end signal_connect(x -> board_start!(), button_start, "clicked") showall(win) if !isinteractive() cond = Condition() signal_connect(win, :destroy) do widget notify(cond) end wait(cond) end
-- ------------------------------------------------------------- [ Strings.idr ] -- Module : Lightyear.Strings -- Description : String-related parsers. -- -- This code is distributed under the BSD 2-clause license. -- See the file LICENSE in the root directory for its full text. -- --------------------------------------------------------------------- [ EOH ] module Lightyear.Strings import public Data.Vect import public Data.Fin import public Control.Monad.Identity import Lightyear.Core import Lightyear.Combinators import Lightyear.Errmsg import Lightyear.Char %access export -- -------------------------------------------------------- [ Helper Functions ] private nat2int : Nat -> Int nat2int Z = 0 nat2int (S x) = 1 + nat2int x implementation Layout String where lineLengths = map (nat2int . Prelude.Strings.length) . lines -- --------------------------------------------------------- [ A String Parser ] ||| Parsers, specialised to Strings public export Parser : Type -> Type Parser = ParserT String Identity ||| Run a parser against an input string parse : Parser a -> String -> Either String a parse f s = let Id r = execParserT f s in case r of Success _ x => Right x Failure es => Left $ formatError s es implementation Stream Char String where uncons s with (strM s) uncons "" | StrNil = Nothing uncons (strCons x xs) | StrCons x xs = Just (x, xs) -- ---------------------------------------------------------- [ Reserved Stuff ] ||| A parser that matches a particular string string : Monad m => String -> ParserT String m String string s = pack <$> (traverse char $ unpack s) <?> "string " ++ show s -- ------------------------------------------------------------------- [ Space ] ||| A simple lexer that strips white space from tokens lexeme : Monad m => ParserT String m a -> ParserT String m a lexeme p = p <* spaces -- ------------------------------------------------------------------ [ Tokens ] ||| A parser that matches a specific string, then skips following whitespace token : Monad m => String -> ParserT String m () token s = lexeme (skip (string s)) <?> "token " ++ show s ||| Parses ',' and trailing whitespace. comma : Monad m => ParserT String m () comma = token "," <?> "Comma" ||| Parses '=' and trailing whitespace. equals : Monad m => ParserT String m () equals = token "=" <?> "equals" ||| Parses '.' and trailing whitespace. dot : Monad m => ParserT String m () dot = token "." <?> "dot" ||| Parses ':' and trailing whitespace. colon : Monad m => ParserT String m () colon = token ":" <?> "colon" ||| Parses ';' and trailing whitespace. semi : Monad m => ParserT String m () semi = token ";" <?> "semi colon" -- -------------------------------------------------- [ Delineated Expressions ] ||| Parses `p` enclosed in parenthesis and returns result of `p`. parens : Monad m => ParserT String m a -> ParserT String m a parens p = between (token "(") (token ")") p ||| Parses `p` enclosed in brackets and returns result of `p`. brackets : Monad m => ParserT String m a -> ParserT String m a brackets p = between (token "[") (token "]") p ||| Parses `p` enclosed in braces and returns the result of `p`. braces : Monad m => ParserT String m a -> ParserT String m a braces p = between (token "{") (token "}") p ||| Parses `p` enclosed in angles and returns the result of `p`. angles : Monad m => ParserT String m a -> ParserT String m a angles p = between (token "<") (token ">") p ||| Parses `p` enclosed in single quotes and returns the result of `p`. ||| Not to be used for charLiterals. squote : Monad m => ParserT String m a -> ParserT String m a squote p = between (char '\'') (lexeme $ char '\'') p ||| Parses `p` enclosed in double quotes and returns the result of `p`. ||| Not to be used for `stringLiterals`. dquote : Monad m => ParserT String m a -> ParserT String m a dquote p = between (char '\"') (lexeme $ char '\"') p ||| Collect the literal string contained between two characters quoted' : Monad m => Char -> Char -> ParserT String m String quoted' l r = map pack $ between (char l) (lexeme $ char r) (some (satisfy (/= r))) ||| Literal string between two identical characters quoted : Monad m => Char -> ParserT String m String quoted c = quoted' c c -- --------------------------------------------------- [ Separated Expressions ] ||| Parses /one/ or more occurrences of `p` separated by `comma`. commaSep1 : Monad m => ParserT String m a -> ParserT String m (List a) commaSep1 p = p `sepBy1` comma ||| Parses /zero/ or more occurrences of `p` separated by `comma`. commaSep : Monad m => ParserT String m a -> ParserT String m (List a) commaSep p = p `sepBy` comma ||| Parses /one/ or more occurrences of `p` separated by `semi`. semiSep1 : Monad m => ParserT String m a -> ParserT String m (List a) semiSep1 p = p `sepBy1` semi ||| Parses /zero/ or more occurrences of `p` separated by `semi`. semiSep : Monad m => ParserT String m a -> ParserT String m (List a) semiSep p = p `sepBy` semi ||| Run some parser `p` until the second parser is encountered, ||| collecting a list of success for `p`, and the result of the second ||| parser is dropped. ||| ||| Primarily useful for collecting single line comments and other ||| similar verbatim environments. manyTill : Monad m => ParserT String m a -> ParserT String m b -> ParserT String m (List a) manyTill p end = scan where scan : Monad m => ParserT String m (List a) scan = do { end; return List.Nil } <|> do { x <- p; xs <- scan; return (x::xs)} -- -------------------------------------------------------- [ Testing Function ] testParser : Parser a -> String -> IO (Maybe a) testParser p s = case parse p s of Left e => putStrLn e *> pure Nothing Right x => pure (Just x) -- --------------------------------------------------------------------- [ EOF ]
Require Import ClassDatatypesIface. Require Import CertRuntimeTypesIface. Require Import JVMState. Require Import List. Require Import NativeMethods. Require Import BasicMachineTypes. Require Import ClasspoolIface. Require Import AssignabilityIface. Require Import ResolutionIface. Require Import VirtualMethodLookupIface. Require Import ResourceAlgebra. Require Import AnnotationIface. Module Type FileImpl. Axiom nint : Set. (* Not sure how much I can rely on evaluation order... *) Axiom open_file : nint -> nint. Axiom read_int : nint -> nint. Axiom close_file : nint -> nint. Axiom classnames : Set. Axiom methodnames : Set. Axiom fileclass : classnames. Axiom openmeth : methodnames. Axiom readmeth : methodnames. Axiom closemeth : methodnames. End FileImpl. Module FileNativeMethods (B : BASICS) (RA : RESOURCE_ALGEBRA B) (ANN : ANNOTATION B) (C : CLASSDATATYPES B ANN) (CP : CLASSPOOL B ANN C) (A : ASSIGNABILITY B ANN C CP) (R : RESOLUTION B ANN C CP A) (VM : VIRTUALMETHODLOOKUP B ANN C CP A) (RDT : CERTRUNTIMETYPES B ANN C CP A) (JVM : JVMSTATE B RA ANN C CP A R VM RDT) (F : FileImpl with Definition nint := B.Int32.t with Definition classnames := B.Classname.t with Definition methodnames := B.Methodname.t) <: NATIVE_METHODS B RA ANN C CP A R VM RDT JVM. Inductive resultval : Type := | void : resultval | val : RDT.rt_val -> resultval | exn : Heap.addr_t -> resultval. Record result : Type := { resval : resultval ; classes : CP.cert_classpool ; heap : RDT.cert_heap classes ; static : RDT.cert_fieldstore classes heap ; res : RA.res ; reslimit : RA.res }. Definition native_invoke := fun cls mth args state => if B.Classname.eq_dec (C.class_name cls) F.fileclass then if B.Methodname.eq_dec (C.method_name mth) F.openmeth then match args with | RDT.rt_int file::nil => match state with | RDT.mkState fs classes heap static res reslimit => Some (Build_result (val (RDT.rt_int (F.open_file file))) classes heap static res reslimit) end | _ => None (* type error *) end else if B.Methodname.eq_dec (C.method_name mth) F.readmeth then match args with | RDT.rt_int file::nil => match state with | RDT.mkState fs classes heap static res reslimit => Some (Build_result (val (RDT.rt_int (F.read_int file))) classes heap static res reslimit) end | _ => None (* type error *) end else if B.Methodname.eq_dec (C.method_name mth) F.closemeth then match args with | RDT.rt_int file::nil => match state with | RDT.mkState fs classes heap static res reslimit => Some (Build_result (val (RDT.rt_int (F.close_file file))) classes heap static res reslimit) end | _ => None (* type error *) end else None (* unknown method, maybe should throw exception *) else None (* unknown class, maybe should throw exception *) . End FileNativeMethods.
open import Level using (Level) open import Function using (_∘_; id; flip) open import Data.Fin as Fin using (fromℕ) open import Data.Nat as Nat using (ℕ; suc; zero; pred; _+_; _⊔_) open import Data.Nat.Properties using (≤-decTotalOrder) open import Data.List as List using (List; []; _∷_; [_]; concatMap; _++_; length; map) open import Data.Vec as Vec using (Vec; []; _∷_; _∷ʳ_; reverse; initLast; toList) open import Data.Product as Prod using (∃; _×_; _,_; proj₁; proj₂) open import Data.Maybe as Maybe using (Maybe; just; nothing; maybe) open import Data.Sum as Sum using (_⊎_; inj₁; inj₂) open import Data.Integer as Int using (ℤ; -[1+_]; +_) renaming (_≟_ to _≟-Int_) open import Relation.Nullary using (Dec; yes; no) open import Relation.Nullary.Decidable using (map′) open import Relation.Binary using (module DecTotalOrder) open import Relation.Binary.PropositionalEquality using (_≡_; refl; cong; sym) open import Reflection renaming (Term to AgTerm; _≟_ to _≟-AgTerm_) module Auto.Core where open DecTotalOrder ≤-decTotalOrder using (total) private ∃-syntax : ∀ {a b} {A : Set a} → (A → Set b) → Set (b Level.⊔ a) ∃-syntax = ∃ syntax ∃-syntax (λ x → B) = ∃[ x ] B -- define error messages that may occur when the `auto` function is -- called. data Message : Set where searchSpaceExhausted : Message unsupportedSyntax : Message -- define our own instance of the error functor based on the either -- monad, and use it to propagate one of several error messages private Error : ∀ {a} (A : Set a) → Set a Error A = Message ⊎ A _⟨$⟩_ : ∀ {a b} {A : Set a} {B : Set b} (f : A → B) → Error A → Error B f ⟨$⟩ inj₁ x = inj₁ x f ⟨$⟩ inj₂ y = inj₂ (f y) -- define term names for the term language we'll be using for proof -- search; we use standard Agda names, together with term-variables -- and Agda implications/function types. data TermName : Set₀ where name : (n : Name) → TermName var : (i : ℤ) → TermName impl : TermName tname-injective : ∀ {x y} → TermName.name x ≡ TermName.name y → x ≡ y tname-injective refl = refl tvar-injective : ∀ {i j} → TermName.var i ≡ TermName.var j → i ≡ j tvar-injective refl = refl _≟-TermName_ : (x y : TermName) → Dec (x ≡ y) (name x) ≟-TermName (name y) with x ≟-Name y (name x) ≟-TermName (name .x) | yes refl = yes refl (name x) ≟-TermName (name y) | no x≠y = no (x≠y ∘ tname-injective) (name _) ≟-TermName (var _) = no (λ ()) (name _) ≟-TermName (impl ) = no (λ ()) (var _) ≟-TermName (name _) = no (λ ()) (var i) ≟-TermName (var j) with i ≟-Int j (var i) ≟-TermName (var .i) | yes refl = yes refl (var i) ≟-TermName (var j) | no i≠j = no (i≠j ∘ tvar-injective) (var _) ≟-TermName (impl ) = no (λ ()) (impl ) ≟-TermName (name _) = no (λ ()) (impl ) ≟-TermName (var _) = no (λ ()) (impl ) ≟-TermName (impl ) = yes refl -- define rule names for the proof terms/rules that our proof search will -- return/use; we'll use standard Agda names, together with rule-variables. data RuleName : Set where name : Name → RuleName var : ℕ → RuleName name-injective : ∀ {x y} → RuleName.name x ≡ name y → x ≡ y name-injective refl = refl rvar-injective : ∀ {x y} → RuleName.var x ≡ var y → x ≡ y rvar-injective refl = refl _≟-RuleName_ : (x y : RuleName) → Dec (x ≡ y) name x ≟-RuleName name y = map′ (cong name) name-injective (x ≟-Name y) name x ≟-RuleName var y = no (λ ()) var x ≟-RuleName name y = no (λ ()) var x ≟-RuleName var y = map′ (cong var) rvar-injective (x Nat.≟ y) -- now we can load the definitions from proof search open import ProofSearch RuleName TermName _≟-TermName_ Literal _≟-Lit_ as PS public renaming (Term to PsTerm; module Extensible to PsExtensible) -- next up, converting the terms returned by Agda's reflection -- mechanism to terms in our proof search's language! -- dictionary for the treatment of variables in conversion from Agda -- terms to terms to be used in proof search. ConvertVar : Set ConvertVar = (depth index : ℕ) → ∃ PsTerm -- conversion dictionary for rule-terms, which turns every variable -- that is within the scope of the term (i.e. is defined within the -- term by lambda abstraction) into a variable, and every variable -- which is defined out of scope into a Skolem constant (which -- blocks unification). convertVar4Term : ConvertVar convertVar4Term = fromVar where fromVar : (depth index : ℕ) → ∃ PsTerm fromVar d i with total i d fromVar d i | inj₁ i≤d = (suc (Δ i≤d) , var (fromℕ (Δ i≤d))) fromVar d i | inj₂ i>d = (0 , con (var (-[1+ Δ i>d ])) []) -- conversion dictionary for goal-terms, which turns all variables -- into Skolem constants which blocks all unification. convertVar4Goal : ConvertVar convertVar4Goal = fromVar where fromVar : (depth index : ℕ) → ∃ PsTerm fromVar d i with total i d fromVar d i | inj₁ i≤d = (0 , con (var (+ Δ i≤d)) []) fromVar d i | inj₂ i>d = (0 , con (var (-[1+ Δ i>d ])) []) -- helper function for converting definitions or constructors to -- proof terms. fromDefOrCon : (s : Name) → ∃[ n ] List (PsTerm n) → ∃ PsTerm fromDefOrCon f (n , ts) = n , con (name f) ts -- specialised function to convert literals of natural numbers -- (since they have a representation using Agda names) convertℕ : ∀ {k} → ℕ → PsTerm k convertℕ zero = con (name (quote zero)) [] convertℕ (suc n) = con (name (quote suc)) (convertℕ n ∷ []) -- convert an Agda term to a term, abstracting over the treatment of -- variables with an explicit dictionary of the type `ConvertVar`--- -- passing in `ConvertVar4Term` or `ConvertVar4Goal` will result in -- rule-terms or goal-terms, respectively. convert : ConvertVar → (depth : ℕ) → AgTerm → Error (∃ PsTerm) convertChildren : ConvertVar → ℕ → List (Arg AgTerm) → Error (∃[ n ] List (PsTerm n)) convert cv d (lit (nat n)) = inj₂ (0 , convertℕ n) convert cv d (lit l) = inj₂ (0 , lit l) convert cv d (var i []) = inj₂ (cv d i) convert cv d (var i args) = inj₁ unsupportedSyntax convert cv d (con c args) = fromDefOrCon c ⟨$⟩ convertChildren cv d args convert cv d (def f args) = fromDefOrCon f ⟨$⟩ convertChildren cv d args convert cv d (pi (arg (arg-info visible _) t₁) (abs _ t₂)) with convert cv d t₁ | convert cv (suc d) t₂ ... | inj₁ msg | _ = inj₁ msg ... | _ | inj₁ msg = inj₁ msg ... | inj₂ (n₁ , p₁) | inj₂ (n₂ , p₂) with match p₁ p₂ ... | (p₁′ , p₂′) = inj₂ (n₁ ⊔ n₂ , con impl (p₁′ ∷ p₂′ ∷ [])) convert cv d (pi (arg _ _) (abs _ t₂)) = convert cv (suc d) t₂ convert cv d (lam _ _) = inj₁ unsupportedSyntax convert cv d (pat-lam _ _) = inj₁ unsupportedSyntax convert cv d (sort _) = inj₁ unsupportedSyntax convert cv d unknown = inj₁ unsupportedSyntax convert cv d (meta x args) = inj₁ unsupportedSyntax convertChildren cv d [] = inj₂ (0 , []) convertChildren cv d (arg (arg-info visible _) t ∷ ts) with convert cv d t | convertChildren cv d ts ... | inj₁ msg | _ = inj₁ msg ... | _ | inj₁ msg = inj₁ msg ... | inj₂ (m , p) | inj₂ (n , ps) with match p ps ... | (p′ , ps′) = inj₂ (m ⊔ n , p′ ∷ ps′) convertChildren cv d (arg _ _ ∷ ts) = convertChildren cv d ts -- convert an Agda term to a rule-term. agda2term : AgTerm → Error (∃ PsTerm) agda2term t = convert convertVar4Term 0 t -- split a term at every occurrence of the `impl` constructor--- -- equivalent to splitting at every occurrence of the _→_ symbol in -- an Agda term. split : ∀ {n} → PsTerm n → ∃[ k ] Vec (PsTerm n) (suc k) split (con impl (t₁ ∷ t₂ ∷ [])) = Prod.map suc (λ ts → t₁ ∷ ts) (split t₂) split t = zero , t ∷ [] -- convert an Agda term to a goal-term, together with a `HintDB` -- representing the premises of the rule---this means that for a -- term of the type `A → B` this function will generate a goal of -- type `B` and a premise of type `A`. agda2goal×premises : AgTerm → Error (∃ PsTerm × Rules) agda2goal×premises t with convert convertVar4Goal 0 t ... | inj₁ msg = inj₁ msg ... | inj₂ (n , p) with split p ... | (k , ts) with initLast ts ... | (prems , goal , _) = inj₂ ((n , goal) , toPremises (pred k) prems) where toPremises : ∀ {k} → ℕ → Vec (PsTerm n) k → Rules toPremises i [] = [] toPremises i (t ∷ ts) = (n , rule (var i) t []) ∷ toPremises (pred i) ts -- convert an Agda name to an rule-term. name2term : Name → TC (Error (∃ PsTerm)) name2term nm = bindTC (getType nm) (λ tp → returnTC (agda2term tp)) -- convert an Agda name to a rule. name2ruleHelper : Name → (Error (∃ PsTerm)) → TC (Error (∃ Rule)) name2ruleHelper nm name2term_nm with name2term_nm ... | inj₁ msg = returnTC (inj₁ msg) ... | inj₂ (n , t) with split t ... | (k , ts) with initLast ts ... | (prems , concl , _) = returnTC (inj₂ (n , rule (name nm) concl (toList prems))) -- convert an Agda name to a rule. name2rule : Name → TC (Error (∃ Rule)) name2rule nm = bindTC (name2term nm) (name2ruleHelper nm) -- function which reifies untyped proof terms (from the -- `ProofSearch` module) to untyped Agda terms. reify : Proof → TC AgTerm reifyChildren : List Proof → TC (List (Arg AgTerm)) reify (con (var i) ps) = returnTC ((var i [])) reify (con (name n) ps) = bindTC (getDefinition n) (λ { (function x) → bindTC (reifyChildren ps) (λ rc → returnTC (def n rc)) ; (data-type pars cs) → bindTC (reifyChildren ps) ((λ rc → returnTC (con n rc))) ; (record′ c _) → returnTC unknown ; (constructor′ d ) → returnTC unknown ; axiom → returnTC unknown ; primitive′ → returnTC unknown} ) reifyChildren [] = returnTC [] reifyChildren (p ∷ ps) = bindTC (reify p) (λ rp → bindTC (reifyChildren ps) (λ rcps → returnTC (toArg rp ∷ rcps))) where toArg : AgTerm → Arg AgTerm toArg = arg (arg-info visible relevant) -- data-type `Exception` which is used to unquote error messages to -- the type-level so that `auto` can generate descriptive type-errors. data Exception : Message → Set where throw : (msg : Message) → Exception msg quoteError : Message → AgTerm quoteError (searchSpaceExhausted) = quoteTerm (throw searchSpaceExhausted) quoteError (unsupportedSyntax) = quoteTerm (throw unsupportedSyntax)
\documentclass{report} % for dvips color names such as JungleGreen % MUST come before \usepackage{tikz} \usepackage[usenames, dvipsnames]{color} % for \begin{lstlisting} \usepackage{listings} \usepackage{tikz} \usetikzlibrary{arrows} \usetikzlibrary{decorations.markings} \usetikzlibrary{decorations.pathreplacing} % For <>| See "Special Characters.rtfd" \usepackage[T1]{fontenc}% For being able to name paths and finding their intersection. For example: "\path [name path=upward line]". \usetikzlibrary{intersections} \begin{document} \section*{} \vspace{30pt} \section*{Grid} \begin{tikzpicture} \draw[help lines] (0,0) grid (5,5); \end{tikzpicture} {\color{RubineRed} \rule{\linewidth}{0.5mm} } The background colour of some text can also be \textcolor{red}{easily} set. For instance, you can change to orange the background of \colorbox{BurntOrange}{this text} and then continue typing. \end{document}
-- Andreas, 2020-05-18, issue #3933 -- -- Duplicate imports of the same modules should be cumulative, -- rather than overwriting the previous scope. {-# OPTIONS -v scope.import:10 #-} {-# OPTIONS -v scope:clash:20 #-} open import Agda.Builtin.Nat using () Nat = Agda.Builtin.Nat.Nat zero = Agda.Builtin.Nat.zero import Agda.Builtin.Nat using () works : Nat works = zero test : Agda.Builtin.Nat.Nat test = Agda.Builtin.Nat.zero -- Used to fail since the second import emptied -- the contents of module Agda.Builtin.Nat.
{-# OPTIONS --no-termination-check #-} module Pi-abstract-machine where open import Data.Empty open import Data.Unit open import Data.Sum hiding (map) open import Data.Product hiding (map) infixr 30 _⟷_ infixr 30 _⟺_ infixr 20 _◎_ ------------------------------------------------------------------------------ -- A universe of our value types data B : Set where ZERO : B ONE : B PLUS : B → B → B TIMES : B → B → B data VB : (b : B) → Set where unitB : VB ONE inlB : {b₁ b₂ : B} → VB b₁ → VB (PLUS b₁ b₂) inrB : {b₁ b₂ : B} → VB b₂ → VB (PLUS b₁ b₂) pairB : {b₁ b₂ : B} → VB b₁ → VB b₂ → VB (TIMES b₁ b₂) ------------------------------------------------------------------------------ -- Primitive isomorphisms data _⟷_ : B → B → Set where -- (+,0) commutative monoid unite₊ : { b : B } → PLUS ZERO b ⟷ b uniti₊ : { b : B } → b ⟷ PLUS ZERO b swap₊ : { b₁ b₂ : B } → PLUS b₁ b₂ ⟷ PLUS b₂ b₁ assocl₊ : { b₁ b₂ b₃ : B } → PLUS b₁ (PLUS b₂ b₃) ⟷ PLUS (PLUS b₁ b₂) b₃ assocr₊ : { b₁ b₂ b₃ : B } → PLUS (PLUS b₁ b₂) b₃ ⟷ PLUS b₁ (PLUS b₂ b₃) -- (*,1) commutative monoid unite⋆ : { b : B } → TIMES ONE b ⟷ b uniti⋆ : { b : B } → b ⟷ TIMES ONE b swap⋆ : { b₁ b₂ : B } → TIMES b₁ b₂ ⟷ TIMES b₂ b₁ assocl⋆ : { b₁ b₂ b₃ : B } → TIMES b₁ (TIMES b₂ b₃) ⟷ TIMES (TIMES b₁ b₂) b₃ assocr⋆ : { b₁ b₂ b₃ : B } → TIMES (TIMES b₁ b₂) b₃ ⟷ TIMES b₁ (TIMES b₂ b₃) -- * distributes over + dist : { b₁ b₂ b₃ : B } → TIMES (PLUS b₁ b₂) b₃ ⟷ PLUS (TIMES b₁ b₃) (TIMES b₂ b₃) factor : { b₁ b₂ b₃ : B } → PLUS (TIMES b₁ b₃) (TIMES b₂ b₃) ⟷ TIMES (PLUS b₁ b₂) b₃ -- id id⟷ : { b : B } → b ⟷ b adjointP : { b₁ b₂ : B } → (b₁ ⟷ b₂) → (b₂ ⟷ b₁) adjointP unite₊ = uniti₊ adjointP uniti₊ = unite₊ adjointP swap₊ = swap₊ adjointP assocl₊ = assocr₊ adjointP assocr₊ = assocl₊ adjointP unite⋆ = uniti⋆ adjointP uniti⋆ = unite⋆ adjointP swap⋆ = swap⋆ adjointP assocl⋆ = assocr⋆ adjointP assocr⋆ = assocl⋆ adjointP dist = factor adjointP factor = dist adjointP id⟷ = id⟷ evalP : { b₁ b₂ : B } → (b₁ ⟷ b₂) → VB b₁ → VB b₂ evalP unite₊ (inlB ()) evalP unite₊ (inrB v) = v evalP uniti₊ v = inrB v evalP swap₊ (inlB v) = inrB v evalP swap₊ (inrB v) = inlB v evalP assocl₊ (inlB v) = inlB (inlB v) evalP assocl₊ (inrB (inlB v)) = inlB (inrB v) evalP assocl₊ (inrB (inrB v)) = inrB v evalP assocr₊ (inlB (inlB v)) = inlB v evalP assocr₊ (inlB (inrB v)) = inrB (inlB v) evalP assocr₊ (inrB v) = inrB (inrB v) evalP unite⋆ (pairB unitB v) = v evalP uniti⋆ v = (pairB unitB v) evalP swap⋆ (pairB v₁ v₂) = pairB v₂ v₁ evalP assocl⋆ (pairB v₁ (pairB v₂ v₃)) = pairB (pairB v₁ v₂) v₃ evalP assocr⋆ (pairB (pairB v₁ v₂) v₃) = pairB v₁ (pairB v₂ v₃) evalP dist (pairB (inlB v₁) v₃) = inlB (pairB v₁ v₃) evalP dist (pairB (inrB v₂) v₃) = inrB (pairB v₂ v₃) evalP factor (inlB (pairB v₁ v₃)) = pairB (inlB v₁) v₃ evalP factor (inrB (pairB v₂ v₃)) = pairB (inrB v₂) v₃ evalP id⟷ v = v -- Backwards evaluator bevalP : { b₁ b₂ : B } → (b₁ ⟷ b₂) → VB b₂ → VB b₁ bevalP c v = evalP (adjointP c) v ------------------------------------------------------------------------------ -- Closure combinators data _⟺_ : B → B → Set where iso : { b₁ b₂ : B } → (b₁ ⟷ b₂) → (b₁ ⟺ b₂) sym : { b₁ b₂ : B } → (b₁ ⟺ b₂) → (b₂ ⟺ b₁) _◎_ : { b₁ b₂ b₃ : B } → (b₁ ⟺ b₂) → (b₂ ⟺ b₃) → (b₁ ⟺ b₃) _⊕_ : { b₁ b₂ b₃ b₄ : B } → (b₁ ⟺ b₃) → (b₂ ⟺ b₄) → (PLUS b₁ b₂ ⟺ PLUS b₃ b₄) _⊗_ : { b₁ b₂ b₃ b₄ : B } → (b₁ ⟺ b₃) → (b₂ ⟺ b₄) → (TIMES b₁ b₂ ⟺ TIMES b₃ b₄) -- adjoint : { b₁ b₂ : B } → (b₁ ⟺ b₂) → (b₂ ⟺ b₁) adjoint (iso c) = iso (adjointP c) adjoint (sym c) = c adjoint (c₁ ◎ c₂) = adjoint c₂ ◎ adjoint c₁ adjoint (c₁ ⊕ c₂) = adjoint c₁ ⊕ adjoint c₂ adjoint (c₁ ⊗ c₂) = adjoint c₁ ⊗ adjoint c₂ ------------------------------------------------------------------------------ -- Operational semantics -- (Context a b c d) represents a combinator (c <=> d) with a hole -- requiring something of type (a <=> b). When we use these contexts, -- it is always the case that the (c <=> a) part of the computation -- has ALREADY been done and that we are about to evaluate (a <=> b) -- using a given 'a'. The continuation takes the output 'b' and -- produces a 'd'. data Context : B → B → B → B → Set where emptyC : {a b : B} → Context a b a b seqC₁ : {a b c i o : B} → (b ⟺ c) → Context a c i o → Context a b i o seqC₂ : {a b c i o : B} → (a ⟺ b) → Context a c i o → Context b c i o leftC : {a b c d i o : B} → (c ⟺ d) → Context (PLUS a c) (PLUS b d) i o → Context a b i o rightC : {a b c d i o : B} → (a ⟺ b) → Context (PLUS a c) (PLUS b d) i o → Context c d i o -- the (i <-> a) part of the computation is completely done; so we must store -- the value of type [[ c ]] as part of the context fstC : {a b c d i o : B} → VB c → (c ⟺ d) → Context (TIMES a c) (TIMES b d) i o → Context a b i o -- the (i <-> c) part of the computation and the (a <-> b) part of -- the computation are completely done; so we must store the value -- of type [[ b ]] as part of the context sndC : {a b c d i o : B} → (a ⟺ b) → VB b → Context (TIMES a c) (TIMES b d) i o → Context c d i o -- Small-step evaluation -- A computation (c <=> d) is split into: -- - a history (c <==> a) -- - a current computation in focus (a <==> b) -- - a future (b <==> d) record BState (a b c d : B) : Set where constructor <_!_!_> field comb : a ⟺ b val : VB a context : Context a b c d record AState (a b c d : B) : Set where constructor [_!_!_] field comb : a ⟺ b val : VB b context : Context a b c d data State (d : B) : Set where before : {a b c : B} → BState a b c d → State d after : {a b c : B} → AState a b c d → State d final : VB d → State d -- The (c <=> a) part of the computation has been done. -- We are about to perform the (a <=> b) part of the computation. beforeStep : {a b c d : B} → BState a b c d → State d beforeStep < iso f ! v ! C > = after [ iso f ! (evalP f v) ! C ] beforeStep < sym c ! v ! C > = before < adjoint c ! v ! C > beforeStep < _◎_ {b₂ = _} f g ! v ! C > = before < f ! v ! seqC₁ g C > beforeStep < _⊕_ {b₁} {b₂} {b₃} {b₄} f g ! inlB v ! C > = before < f ! v ! leftC g C > beforeStep < _⊕_ {b₁} {b₂} {b₃} {b₄} f g ! inrB v ! C > = before < g ! v ! rightC f C > beforeStep < _⊗_ f g ! (pairB v₁ v₂ ) ! C > = before < f ! v₁ ! fstC v₂ g C > -- The (c <=> a) part of the computation has been done. -- The (a <=> b) part of the computation has been done. -- We need to examine the context to get the 'd'. -- We rebuild the combinator on the way out. afterStep : {a b c d : B} → AState a b c d → State d afterStep {d = d} [ f ! v ! emptyC ] = final v afterStep [ f ! v ! seqC₁ g C ] = before < g ! v ! seqC₂ f C > afterStep [ g ! v ! seqC₂ f C ] = after [ f ◎ g ! v ! C ] afterStep [ f ! v ! leftC g C ] = after [ f ⊕ g ! inlB v ! C ] afterStep [ g ! v ! rightC f C ] = after [ f ⊕ g ! inrB v ! C ] afterStep [ f ! v₁ ! fstC v₂ g C ] = before < g ! v₂ ! sndC f v₁ C > afterStep [ g ! v₂ ! sndC f v₁ C ] = after [ f ⊗ g ! (pairB v₁ v₂) ! C ] -- Backwards evaluator -- Re-use AState and BState, but use them 'backwards' -- this one is different as it produces a 'c' rather than a 'd' data Stateb (c : B) : Set where before : {a b d : B} → AState a b c d → Stateb c after : {a b d : B} → BState a b c d → Stateb c final : VB c → Stateb c -- The (d <=> b) part of the computation has been done. -- We have a 'b' and we are about to do the (a <=> b) computation backwards. -- We get an 'a' and examine the context to get the 'c' beforeStepb : { a b c d : B } → AState a b c d → Stateb c beforeStepb [ iso f ! v ! C ] = after < iso f ! bevalP f v ! C > beforeStepb [ sym c ! v ! C ] = before [ adjoint c ! v ! C ] beforeStepb [ f ◎ g ! v ! C ] = before [ g ! v ! seqC₂ f C ] beforeStepb [ f ⊕ g ! inlB v ! C ] = before [ f ! v ! leftC g C ] beforeStepb [ f ⊕ g ! inrB v ! C ] = before [ g ! v ! rightC f C ] beforeStepb [ f ⊗ g ! pairB v₁ v₂ ! C ] = before [ g ! v₂ ! sndC f v₁ C ] -- The (d <-> b) part of the computation has been done. -- The (a <-> b) backwards computation has been done. -- We have an 'a' and examine the context to get the 'c' afterStepb : { a b c d : B } → BState a b c d → Stateb c afterStepb < f ! v ! emptyC > = final v afterStepb < g ! v ! seqC₂ f C > = before [ f ! v ! seqC₁ g C ] afterStepb < f ! v ! seqC₁ g C > = after < f ◎ g ! v ! C > afterStepb < f ! v ! leftC g C > = after < f ⊕ g ! inlB v ! C > afterStepb < g ! v ! rightC f C > = after < f ⊕ g ! inrB v ! C > afterStepb < g ! v₂ ! sndC f v₁ C > = before [ f ! v₁ ! fstC v₂ g C ] afterStepb < f ! v₁ ! fstC v₂ g C > = after < f ⊗ g ! pairB v₁ v₂ ! C > ------------------------------------------------------------------------------ -- A single step of a machine step : {d : B} → State d → State d step (before st) = beforeStep st step (after st) = afterStep st step (final v) = final v stepb : {c : B} → Stateb c → Stateb c stepb (before st) = beforeStepb st stepb (after st) = afterStepb st stepb (final v) = final v -- Forward and backwards evaluators loop until final state eval : {a b : B} → (a ⟺ b) → VB a → VB b eval f v = loop (before < f ! v ! emptyC > ) where loop : {b : B} → State b → VB b loop (final v) = v loop st = loop (step st) evalb : {a b : B} → (a ⟺ b) → VB b → VB a evalb f v = loop (before [ f ! v ! emptyC ] ) where loop : {b : B} → Stateb b → VB b loop (final v) = v loop st = loop (stepb st) ------------------------------------------------------------------------------
Require Export Bool. Require Export ZArith. Open Scope Z_scope. Inductive Z_inf_branch_tree : Set := Z_inf_leaf : Z_inf_branch_tree | Z_inf_node : Z->(nat->Z_inf_branch_tree)->Z_inf_branch_tree. Fixpoint any_true (n:nat)(f:nat->bool){struct n}:bool := match n with 0%nat => f 0%nat | S p => orb (f (S p)) (any_true p f) end. Fixpoint izero_present (n:nat)(t:Z_inf_branch_tree) {struct t} :bool := match t with | Z_inf_leaf => false | Z_inf_node v f => match v with 0 => true | _ => any_true n (fun p => izero_present n (f p)) end end.
(* Copyright (C) 2017 M.A.L. Marques This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. *) op_a1 := 1.5214: op_a2 := 0.5764: op_b1 := 1.1284: op_b2 := 0.3183: (* This wrapper is to avoid overflows in the OP functionals. The energy is not affected, since the value is only changed for densities that are screened away. *) op_b88_zab := (f_x, rs, z, xs0, xs1) -> my_piecewise3( b88_zab(1, op_enhancement, rs, z, xs0, xs1) = 0, DBL_EPSILON, b88_zab(1, op_enhancement, rs, z, xs0, xs1) ): op_beta := (rs, z, xs0, xs1) -> op_qab/op_b88_zab(op_enhancement, rs, z, xs0, xs1): op_f_s := (rs, z, xt, xs0, xs1) -> - (1 - z^2)*n_total(rs)/4.0 * (op_a1*op_beta(rs, z, xs0, xs1) + op_a2) / (op_beta(rs, z, xs0, xs1)^4 + op_b1*op_beta(rs, z, xs0, xs1)^3 + op_b2*op_beta(rs, z, xs0, xs1)^2) : op_f := (rs, z, xt, xs0, xs1) -> my_piecewise3(1 - abs(z) <= p_a_zeta_threshold or (screen_dens(rs,z) and screen_dens(rs,-z)), 0, op_f_s(rs, z_thr(z), xt, xs0, xs1)): f := (rs, z, xt, xs0, xs1) -> op_f(rs, z, xt, xs0, xs1):
[STATEMENT] lemma strict_mono_o: "strict_mono r \<Longrightarrow> strict_mono s \<Longrightarrow> strict_mono (r \<circ> s)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>strict_mono r; strict_mono s\<rbrakk> \<Longrightarrow> strict_mono (r \<circ> s) [PROOF STEP] unfolding strict_mono_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>\<forall>x y. x < y \<longrightarrow> r x < r y; \<forall>x y. x < y \<longrightarrow> s x < s y\<rbrakk> \<Longrightarrow> \<forall>x y. x < y \<longrightarrow> (r \<circ> s) x < (r \<circ> s) y [PROOF STEP] by simp
lemma sums_cnj: "((\<lambda>x. cnj(f x)) sums cnj l) \<longleftrightarrow> (f sums l)"
theory Simple_Separation_Example imports "Extended_Separation_Algebra" "Hoare_Sep_Tactics" "../Monad_WP/WhileLoopRules" "Sep_SP" begin declare [[syntax_ambiguity_warning = false]] type_synonym heap = "((nat \<Rightarrow> nat option))" definition maps_to:: "nat \<Rightarrow> nat \<Rightarrow> heap \<Rightarrow> bool" ("_ \<mapsto> _" [56,51] 56) where "x \<mapsto> y \<equiv> \<lambda>h. h = [x \<mapsto> y] " notation pred_ex (binder "\<exists>" 10) definition maps_to_ex :: "nat \<Rightarrow> heap \<Rightarrow> bool" ("_ \<mapsto> -" [56] 56) where "x \<mapsto> - \<equiv> (\<lambda>s. \<exists>y. (x \<mapsto> y) s)" lemma maps_to_maps_to_ex [elim!]: "(p \<mapsto> v) s \<Longrightarrow> (p \<mapsto> -) s" by (auto simp: maps_to_ex_def) declare maps_to_maps_to_ex[sep_cancel] lemma precise_weaken_pre: "precise P \<Longrightarrow> \<lbrace>P \<leadsto>* R\<rbrace> f \<lbrace>\<lambda>_. Q \<and>* R\<rbrace> \<Longrightarrow> \<lbrace>P \<and>* R\<rbrace> f \<lbrace>\<lambda>_. Q \<and>* R\<rbrace> " apply (rule hoare_weaken_pre, assumption) by (simp add: precise_conj_coimpl) definition "delete_ptr p = do x <- gets (\<lambda>s. s p); assert (x \<noteq> None); (modify (\<lambda>s x. if x = p then None else s x )) od" definition "get_ptr p = do x <- gets (\<lambda>s. s p); assert (x \<noteq> None); (return (the x)) od" definition "set_ptr p v = do x <- gets (\<lambda>s. s p); assert (x \<noteq> None); (modify (\<lambda>s. s(p \<mapsto> v))) od" definition "new_ptr = do ptrs <- gets dom; p <- (select ptrs); x <- gets (\<lambda>s. s p); assert (x = None); (modify (\<lambda>s. s(p \<mapsto> 0))); return p od" lemma new_ptr_sp: "\<lbrace>R\<rbrace> new_ptr \<lbrace>\<lambda>rv. (rv \<mapsto> - \<and>* R) \<rbrace>" apply (clarsimp simp: new_ptr_def, wp select_wp) apply (simp add: Ball_def, intro allI impI) apply (clarsimp simp: sep_conj_def) done lemma delete_ptr_sp: "\<lbrace>(p \<mapsto> - \<leadsto>* R)\<rbrace> delete_ptr p \<lbrace>\<lambda>_. R \<rbrace>" apply (clarsimp simp: delete_ptr_def, wp) apply (intro allI impI) apply (clarsimp simp: sep_conj_def sep_coimpl_def pred_neg_def) apply (erule_tac x="[p \<mapsto> y] :: heap" in allE) apply (drule mp) apply (clarsimp simp: maps_to_ex_def maps_to_def, fastforce) apply (erule_tac x=" (\<lambda>a. if a = p then None else s a)" in allE) apply (drule mp) apply (rule ext, clarsimp simp: plus_fun_def plus_option_def) apply (drule mp) apply (clarsimp simp: sep_disj_fun_def sep_disj_option_def) apply (assumption) done lemma set_ptr_sp: "\<lbrace>(p \<mapsto> - \<leadsto>* R)\<rbrace> set_ptr p v \<lbrace>\<lambda>_. (p \<mapsto> v \<and>* R) \<rbrace>" apply (clarsimp simp: set_ptr_def, wp) apply (intro allI impI) apply (clarsimp simp: sep_conj_def sep_coimpl_def pred_neg_def) apply (erule_tac x="[p \<mapsto> y] :: heap" in allE) apply (drule mp) apply (clarsimp simp: maps_to_ex_def maps_to_def, fastforce) apply (erule_tac x=" (\<lambda>a. if a = p then None else s a)" in allE) apply (drule mp) apply (rule ext, clarsimp simp: plus_fun_def plus_option_def) apply (drule mp) apply (clarsimp simp: sep_disj_fun_def sep_disj_option_def) apply (rule_tac x="[p \<mapsto> v] :: heap" in exI) apply (rule_tac x=" (\<lambda>a. if a = p then None else s a)" in exI) apply (clarsimp, intro conjI) apply (clarsimp simp: sep_disj_fun_def sep_disj_option_def) apply (rule ext, clarsimp simp: plus_fun_def plus_option_def) apply (clarsimp simp: maps_to_ex_def maps_to_def) done lemma set_ptr_sp': "\<lbrace>(R)\<rbrace> set_ptr p v \<lbrace>\<lambda>rv. (p \<mapsto> v \<and>* (p \<mapsto> - -* R )) \<rbrace>" apply (rule hoare_chain, rule set_ptr_sp) apply (erule (1) sep_snake_septraction, assumption) done lemma get_ptr_wp: "\<lbrace>EXS x. (p \<mapsto> x \<and>* (p \<mapsto> x \<longrightarrow>* R x ))\<rbrace> get_ptr p \<lbrace>R \<rbrace>" apply (clarsimp simp: get_ptr_def, wp) apply (intro allI impI) apply (clarsimp simp: sep_coimpl_def sep_conj_def pred_neg_def sep_impl_def maps_to_def) apply (clarsimp simp: sep_disj_commute sep_add_commute) apply (subgoal_tac "x=y") apply (clarsimp) apply (clarsimp simp: plus_fun_def plus_option_def split: option.splits) done lemma get_ptr_sp_weak: "\<lbrace>(R)\<rbrace> get_ptr p \<lbrace>\<lambda>rv. R and (ALLS x. (p \<mapsto> x \<leadsto>* (\<lambda>s. rv = x))) \<rbrace>" apply (clarsimp simp: get_ptr_def,wp) apply (intro allI impI) apply (clarsimp simp: sep_conj_def) apply (clarsimp simp: sep_coimpl_def pred_neg_def sep_conj_def) apply (clarsimp simp: septraction_def plus_fun_def plus_option_def pred_neg_def sep_impl_def maps_to_def sep_disj_fun_def sep_disj_option_def split: option.splits if_split_asm) done lemma get_ptr_sp: "\<lbrace>(R)\<rbrace> get_ptr p \<lbrace>\<lambda>rv. (p \<mapsto> rv \<and>* (p \<mapsto> rv -* R)) \<rbrace>" apply (clarsimp simp: get_ptr_def, wp) apply (intro allI impI) apply (clarsimp simp: sep_conj_def) apply (rule_tac x="[p \<mapsto> y] :: heap" in exI) apply (rule_tac x=" (\<lambda>a. if a = p then None else s a)" in exI) apply (intro conjI) apply (clarsimp simp: sep_disj_fun_def sep_disj_option_def) apply (rule ext, clarsimp simp: plus_fun_def plus_option_def) apply (clarsimp simp: maps_to_ex_def maps_to_def) apply (clarsimp simp: sep_coimpl_def sep_conj_def pred_neg_def sep_impl_def septraction_def) apply (rule_tac x="[p \<mapsto> y] :: heap" in exI) apply (clarsimp simp: maps_to_ex_def maps_to_def, rule conjI) apply (clarsimp simp: sep_disj_fun_def sep_disj_option_def split: option.splits) apply (erule back_subst[where P=R]) apply (rule ext, clarsimp simp: plus_fun_def plus_option_def split: option.splits) done lemma get_ptr_sp': "\<lbrace>\<lambda>s. (R (the (s p)) s )\<rbrace> get_ptr p \<lbrace>\<lambda>rv. (p \<mapsto> rv \<and>* (p \<mapsto> rv -* R rv)) \<rbrace>" apply (clarsimp simp: get_ptr_def, wp) apply (intro allI impI) apply (clarsimp simp: sep_conj_def) apply (rule_tac x="[p \<mapsto> y] :: heap" in exI) apply (rule_tac x=" (\<lambda>a. if a = p then None else s a)" in exI) apply (intro conjI) apply (clarsimp simp: sep_disj_fun_def sep_disj_option_def) apply (rule ext, clarsimp simp: plus_fun_def plus_option_def) apply (clarsimp simp: maps_to_ex_def maps_to_def) apply (clarsimp simp: sep_coimpl_def sep_conj_def pred_neg_def sep_impl_def septraction_def) apply (rule_tac x="[p \<mapsto> y] :: heap" in exI) apply (clarsimp simp: maps_to_ex_def maps_to_def, rule conjI) apply (clarsimp simp: sep_disj_fun_def sep_disj_option_def split: option.splits) apply (erule_tac P="R y" in back_subst) apply (rule ext, clarsimp simp: plus_fun_def plus_option_def split: option.splits) done lemma extract_forall_septract:"(P -* (ALLS x. R x)) s \<Longrightarrow> \<forall>x. (P -* (R x)) s" apply (clarsimp simp: septraction_def pred_neg_def sep_impl_def) apply (erule_tac x=x in allE) apply (fastforce) done lemma septraction_snake_trivial_alls: "(P x -* (ALLS x. P x \<leadsto>* R x)) s \<Longrightarrow> R x s" apply (clarsimp simp: septraction_def ) apply (clarsimp simp: pred_neg_def) apply (erule contrapos_np) apply (erule sep_curry[rotated]) apply (rule_tac x=x in exI) apply (clarsimp simp: sep_coimpl_def) apply (sep_cancel) apply (clarsimp simp: pred_neg_def) done lemma get_ptr_valid: "\<lbrace>ALLS x. (p \<mapsto> x \<leadsto>* R x) \<rbrace> get_ptr p \<lbrace>\<lambda>rv. (p \<mapsto> rv \<and>* R rv) \<rbrace>" apply (rule hoare_strengthen_post, rule get_ptr_sp) apply (sep_cancel) apply (erule septraction_snake_trivial_alls) done definition "copy_ptr p p' = do x <- get_ptr p; set_ptr p' x od" lemma precise_maps_to[precise]: "precise (p \<mapsto> v)" apply (clarsimp simp: precise_def maps_to_def) done lemma precise_maps_to_ex[precise]: "precise (p \<mapsto> -)" apply (clarsimp simp: precise_def maps_to_def maps_to_ex_def) apply (rule ext, clarsimp simp: sep_substate_def) apply (clarsimp simp: plus_fun_def plus_option_def) apply (drule fun_cong[where x=p], clarsimp split: option.splits) by (metis (full_types) fun_upd_same option.distinct(2) option.simps(5) sep_disj_fun_def sep_disj_option_def) lemma septract_maps_to:"(p \<mapsto> v -* (p \<mapsto> v' \<and>* R)) s \<Longrightarrow> R s \<and> v = v'" apply (clarsimp simp: septraction_def pred_neg_def sep_impl_def maps_to_def sep_conj_def) apply (rule conjI) apply (erule back_subst[where P=R]) apply (rule ext) apply (drule_tac x=x in fun_cong) apply (clarsimp simp: plus_fun_def plus_option_def sep_disj_fun_def sep_disj_option_def) apply (erule_tac x=x in allE) apply (erule_tac x=x in allE) apply (clarsimp split: option.splits if_split_asm) apply (drule_tac x=p in fun_cong) apply (clarsimp simp: plus_fun_def plus_option_def sep_disj_fun_def sep_disj_option_def) apply (erule_tac x=p in allE) apply (erule_tac x=p in allE) apply (clarsimp split: option.splits) done lemma precise_conj_coimpl': "precise P \<Longrightarrow> (\<And>s R. (P \<and>* R) s \<Longrightarrow> (P \<leadsto>* R) s) " by (clarsimp simp: precise_conj_coimpl) lemma septraction_precise_conj: "precise P \<Longrightarrow> (P -* (P \<and>* R)) s \<Longrightarrow> R s " apply (drule septraction_impl2) apply (erule (1) precise_conj_coimpl') by (erule septraction_snake_trivial) lemma septract_lift_pure[simp]: "(P -* (\<lambda>s. p \<and> Q s)) s \<longleftrightarrow> (P -* Q) s \<and> p" apply (rule iffI, rule conjI) using septraction_impl2 apply blast apply (clarsimp simp: septraction_def pred_neg_def sep_impl_def) apply (clarsimp simp: septraction_def pred_neg_def sep_impl_def) done lemma copy_ptr_wp: "\<lbrace>EXS x. (p \<mapsto> x \<and>* (p \<mapsto> x \<longrightarrow>* p' \<mapsto> - \<and>* (p' \<mapsto> x \<longrightarrow>* R))) \<rbrace> copy_ptr p p' \<lbrace>\<lambda>rv. R \<rbrace>" apply (clarsimp simp: copy_ptr_def) apply (rule hoare_seq_ext) apply (rule hoare_strengthen_post, rule precise_weaken_pre[OF precise_maps_to_ex], rule set_ptr_sp) apply (sep_erule (direct) sep_mp) apply (rule get_ptr_wp) done lemma sep_All_mp:"(P x \<and>* (ALLS v. P v \<longrightarrow>* R v)) s \<Longrightarrow> R x s" apply (clarsimp simp: sep_conj_def sep_impl_def) apply (erule_tac x=x in allE, erule_tac x=xa in allE) by (simp add: sep_add_commute sep_disj_commuteI) (* new_rules can be run in both directions *) lemma copy_ptr_wp': "\<lbrace>ALLS x. (p \<mapsto> x \<leadsto>* (ALLS x. (p \<mapsto> x \<longrightarrow>* (p' \<mapsto> - \<leadsto>* p' \<mapsto> x \<longrightarrow>* R)) )) \<rbrace> copy_ptr p p' \<lbrace>\<lambda>rv. R\<rbrace>" apply (clarsimp simp: copy_ptr_def) apply (rule hoare_seq_ext) apply (rule hoare_strengthen_post, rule set_ptr_sp) apply (sep_erule (direct) sep_mp) apply (rule hoare_strengthen_post, rule get_ptr_valid) apply (erule sep_All_mp) done lemma copy_ptr_sp: "\<lbrace>R\<rbrace> copy_ptr p p' \<lbrace>\<lambda>rv. \<exists>x. (p' \<mapsto> x \<and>* (p' \<mapsto> - -* (p \<mapsto> x \<and>* (p \<mapsto> x -* R)))) \<rbrace>" apply (clarsimp simp: copy_ptr_def ) apply (rule seq_ext) apply (rule get_ptr_sp) apply (rule hoare_chain) apply (rule set_ptr_sp, erule (1) sep_snake_septraction) apply (rule_tac x=x in exI, clarsimp) done lemma copy_ptr_valid: "\<lbrace>(p \<mapsto> x \<and>* p' \<mapsto> - \<and>* R)\<rbrace> copy_ptr p p' \<lbrace>\<lambda>_. (p \<mapsto> x \<and>* p' \<mapsto> x \<and>* R) \<rbrace>" apply (wp copy_ptr_wp) apply (rule_tac x=x in exI) apply (sep_solve) done lemma copy_ptr_valid'': "\<lbrace>(p \<mapsto> x \<and>* p' \<mapsto> - \<and>* (p \<mapsto> x \<and>* p' \<mapsto> x \<longrightarrow>* R))\<rbrace> copy_ptr p p' \<lbrace>\<lambda>_. (R) \<rbrace>" apply (rule hoare_chain, rule copy_ptr_valid) apply (assumption) apply (sep_solve) done lemma copy_ptr_wp'': "\<lbrace>EXS x. (p \<mapsto> x \<and>* p' \<mapsto> - \<and>* (p \<mapsto> x \<and>* p' \<mapsto> x \<longrightarrow>* R))\<rbrace> copy_ptr p p' \<lbrace>\<lambda>_. (R) \<rbrace>" apply (rule hoare_weaken_pre, rule copy_ptr_wp, clarsimp) apply (rule_tac x=x in exI) apply (sep_cancel)+ apply (sep_solve) done lemma copy_ptr_valid': "\<lbrace>(p \<mapsto> x \<and>* p' \<mapsto> - \<and>* R)\<rbrace> copy_ptr p p' \<lbrace>\<lambda>_. (p \<mapsto> x \<and>* p' \<mapsto> x \<and>* R) \<rbrace>" apply (rule hoare_strengthen_post[OF copy_ptr_sp], elim exE) apply (sep_drule septraction_impl2) apply (sep_drule septract_maps_to, assumption, clarsimp simp: sep_conj_assoc) apply (sep_drule septraction_impl2, erule precise_conj_coimpl'[OF precise_maps_to_ex]) apply (sep_drule septraction_snake_trivial) apply (sep_solve) done definition "swap_ptr p p' = do np <- new_ptr; copy_ptr p np; copy_ptr p' p; copy_ptr np p'; delete_ptr np od" lemma delete_ptr_sp': "\<lbrace>R\<rbrace> delete_ptr p \<lbrace>\<lambda>_. (p \<mapsto> - -* R)\<rbrace>" apply (rule hoare_weaken_pre[OF delete_ptr_sp]) apply (erule (1) sep_snake_septraction) done lemma extract_exs_septraction_simp[simp]: "(P -* (EXS v. R v)) = (EXS v. (P -* R v) )" apply (rule ext, rule iffI) apply (fastforce simp: septraction_def sep_impl_def pred_neg_def) apply (fastforce simp: septraction_def sep_impl_def pred_neg_def) done lemma extract_exs_septraction_simp'[simp]: "((EXS v. R v) -* P ) = (EXS v. (R v -* P) )" apply (rule ext, rule iffI; fastforce simp: septraction_def sep_impl_def pred_neg_def) done lemma new_ptr_wp: "\<lbrace>ALLS p. (p \<mapsto> - \<longrightarrow>* R p)\<rbrace> new_ptr \<lbrace> R \<rbrace>" apply (rule hoare_chain, rule new_ptr_sp, assumption) using sep_conj_commuteI sep_conj_sep_impl2 by blast lemma delete_ptr_valid: "\<lbrace>(p \<mapsto> - \<and>* R )\<rbrace> delete_ptr p \<lbrace>\<lambda>_. R \<rbrace>" apply (rule hoare_weaken_pre[OF delete_ptr_sp]) by (erule precise_conj_coimpl'[OF precise_maps_to_ex]) lemma swap_ptr_valid: "\<lbrace>(p \<mapsto> v \<and>* p' \<mapsto> v' \<and>* R)\<rbrace> swap_ptr p p' \<lbrace>\<lambda>_. (p \<mapsto> v' \<and>* p' \<mapsto> v \<and>* R)\<rbrace>" apply (clarsimp simp: swap_ptr_def) apply (wp delete_ptr_valid) apply (wp copy_ptr_wp new_ptr_wp)+ apply (clarsimp simp: sep_conj_exists) apply (sep_cancel) apply (rule_tac x=v in exI) apply (sep_cancel)+ apply (rule_tac x=v' in exI) apply (sep_cancel add: maps_to_maps_to_ex)+ apply (rule exI) by (sep_solve add: maps_to_maps_to_ex) lemma septraction_lens: "((P -* Q) \<and>* R) s \<Longrightarrow> (\<And>s. Q' s = Q s) \<Longrightarrow> ((P -* Q') \<and>* R) s" apply (sep_cancel) using septract_cancel by blast lemmas septraction_lens' = septraction_lens[where R=\<box>, simplified] ML{* fun septraction_drule thms ctxt = let val lens = dresolve0_tac [@{thm septraction_lens}] val r = rotator' ctxt in sep_drule_tac (dresolve0_tac thms |> r lens) ctxt end; fun septraction_drule_method thms ctxt = SIMPLE_METHOD' (septraction_drule thms ctxt) fun septraction_drule' thms ctxt = let val lens = dresolve0_tac [@{thm septraction_lens'}] val r = rotator' ctxt in (dresolve0_tac thms |> r lens) end; fun septraction_drule_method' thms ctxt = SIMPLE_METHOD' (septraction_drule' thms ctxt) *} method_setup septract_drule = {* (Attrib.thms) >> septraction_drule_method *} method_setup septract_drule' = {* (Attrib.thms) >> septraction_drule_method' *} lemma septract_maps_to1:"((p \<mapsto> v -* (p \<mapsto> v' \<and>* R)) \<and>* Q) s \<Longrightarrow> ((R \<and>* Q) and K(v = v')) s" by (sep_drule septract_maps_to, clarsimp) lemma septract_maps_to2: "((p \<mapsto> v -* (p \<mapsto> - \<and>* R)) \<and>* Q) s \<Longrightarrow> (R \<and>* Q) s" apply (sep_cancel) using precise_maps_to_ex septraction_impl1 septraction_precise_conj by fastforce lemma septract_maps_to3: "((p \<mapsto> - -* (p \<mapsto> v' \<and>* R)) \<and>* Q) s \<Longrightarrow> (R \<and>* Q) s " apply (sep_cancel) by (smt maps_to_maps_to_ex precise_conj_coimpl precise_maps_to_ex sep_rule sep_septraction_snake) lemma septract_maps_to4: "((p \<mapsto> - -* (p \<mapsto> - \<and>* R)) \<and>* Q) s \<Longrightarrow> (R \<and>* Q) s" apply (sep_cancel) using precise_maps_to_ex septraction_precise_conj by blast lemmas septract_maps_to_set = septract_maps_to1 septract_maps_to2 septract_maps_to3 septract_maps_to4 lemmas septract_maps_to_set' = septract_maps_to_set[where Q=sep_empty, simplified] lemma septraction_extract_pure[simp]: "(P -* (\<lambda>s. R s \<and> r)) = ((P -* R) and K r)" by (rule ext, rule iffI; fastforce simp: septraction_def sep_impl_def pred_neg_def) (* method septract_cancel = ((septract_drule septract_maps_to_set | septract_drule' septract_maps_to_set') | ((sep_drule septraction_impl2 | drule septraction_impl2), septract_cancel, assumption)) (* now forwards *) *) lemma maps_to_pointer[precise]: "pointer (maps_to p)" apply (clarsimp simp: pointer_def maps_to_def sep_conj_def sep_coimpl_def pred_neg_def) apply (subgoal_tac "x=y", simp) apply (metis sep_add_cancelD sep_add_commute sep_disj_commuteI) apply (drule_tac x=p in fun_cong) apply (clarsimp simp: plus_fun_def plus_option_def split: option.splits) apply (clarsimp simp: sep_disj_fun_def) apply (erule_tac x=p in allE) apply (erule_tac x=p in allE) apply (clarsimp simp: sep_disj_fun_def sep_disj_option_def) done lemma swap_ptr_valid': "\<lbrace>(p \<mapsto> v \<and>* p' \<mapsto> v' \<and>* R)\<rbrace> swap_ptr p p' \<lbrace>\<lambda>_. (p \<mapsto> v' \<and>* p' \<mapsto> v \<and>* R)\<rbrace>" apply (clarsimp simp: swap_ptr_def, intro seq_ext) apply (rule new_ptr_sp) apply (rule copy_ptr_sp)+ apply (rule hoare_strengthen_post[OF delete_ptr_sp']) apply (clarsimp simp: sep_conj_exists2 sep_conj_assoc) apply (sep_invert, sep_forward+, simp, sep_solve) done primrec list :: "nat \<Rightarrow> nat list \<Rightarrow> heap \<Rightarrow> bool" where "list i [] = (\<langle>i=0\<rangle> and \<box>)" | "list i (x#xs) = (\<langle>i=x \<and> i\<noteq>0\<rangle> and (EXS j. i \<mapsto> j ** list j xs))" lemma list_empty [simp]: shows "list 0 xs = (\<lambda>s. xs = [] \<and> \<box> s)" by (cases xs) auto lemma list_nonempty: shows "0 < i \<Longrightarrow> list i xs = (EXS h t. (\<langle>i= (h) \<and> i\<noteq>0 \<and> xs = h#t \<rangle> and (EXS j. i \<mapsto> j ** list j (t))))" apply (cases xs; clarsimp) defer apply (rule ext, rule iffI) apply (clarsimp simp: pred_conj_def) apply (clarsimp simp: pred_conj_def) done lemma " \<lbrace>((p \<mapsto> - and (\<lambda>s. p \<noteq>0)) \<and>* list q qs)\<rbrace> set_ptr p q \<lbrace>\<lambda>_. list p (p#qs) \<rbrace>" apply (rule hoare_chain, rule precise_weaken_pre[OF precise_maps_to_ex], rule set_ptr_sp) defer apply (sep_erule (direct) sep_mp) apply (clarsimp simp: pred_conj_def) apply (sep_cancel) apply (sep_lift) apply (rule_tac x=q in exI, sep_solve) done lemma set_ptr_wp: "\<lbrace>(p \<mapsto> - \<and>* (p \<mapsto> v \<longrightarrow>* R)) \<rbrace> set_ptr p v \<lbrace>\<lambda>_. R\<rbrace>" apply (rule hoare_chain, rule set_ptr_sp) apply (erule precise_conj_coimpl'[OF precise_maps_to_ex]) apply (sep_solve) done definition "NULL \<equiv> 0::nat" declare NULL_def[simp] definition list_rev where "list_rev p = do (hd_ptr, rev) <- whileLoop (\<lambda>(hd_ptr, rev) s. hd_ptr \<noteq> NULL) (\<lambda>(hd_ptr, rev). do next_ptr <- get_ptr hd_ptr; set_ptr hd_ptr rev; return (next_ptr, hd_ptr) od) (p, NULL) ; return rev od " definition "reverse_inv xs list' rev' = (EXS ys zs. (list list' ys \<and>* list rev' zs) and (\<lambda>s. rev xs = rev ys @ zs) )" lemma list_rev_valid_wp:" \<lbrace>(list p ps)\<rbrace> list_rev p \<lbrace>\<lambda>rv. list (rv) (rev ps) \<rbrace>" apply (clarsimp simp: list_rev_def, wpsimp) apply (subst whileLoop_add_inv [where I="\<lambda>(list', rev'). (EXS ys zs. (list list' ys \<and>* list rev' zs) and (\<lambda>s. rev ps = rev ys @ zs)) ", unfolded reverse_inv_def]) apply (wp whileLoop_wp_inv; clarsimp) apply (wp set_ptr_wp get_ptr_wp) apply (clarsimp simp: pred_conj_def sep_conj_exists sep_conj_assoc) apply (case_tac x; clarsimp simp: pred_conj_def sep_conj_exists sep_conj_assoc) apply (rule_tac exI) apply (sep_cancel add: maps_to_maps_to_ex)+ apply (rule_tac x=lista in exI) apply (rule_tac x="aa # xa" in exI) apply (intro conjI) apply (clarsimp) apply (sep_cancel) apply (rule_tac x=b in exI) apply (sep_cancel) apply (clarsimp) apply (clarsimp split: prod.splits) done lemma septract_extra_pure1[simp]: "(P -* (\<lambda>s. Q s \<and> q)) = ((P -* Q) and (\<lambda>s. q))" by (rule ext, rule iffI; fastforce simp: pred_conj_def septraction_def sep_impl_def pred_neg_def) lemma septract_extra_pure2[simp]: "(P -* (\<lambda>s. q \<and> Q s)) = ((P -* Q) and (\<lambda>s. q))" by (rule ext, rule iffI; fastforce simp: pred_conj_def septraction_def sep_impl_def pred_neg_def) lemma septract_false[simp] :"(P -* (sep_false)) = sep_false" using sep_septraction_snake by blast lemma whileLoop_sp_inv: "\<lbrakk> \<And>r. \<lbrace>\<lambda>s. I r s \<and> C r s\<rbrace> B r \<lbrace>I\<rbrace>; \<And>s. P s \<Longrightarrow> I r s \<rbrakk> \<Longrightarrow> \<lbrace> P \<rbrace> whileLoop_inv C B r I M \<lbrace>\<lambda>rv s. I rv s \<and> \<not> C rv s \<rbrace>" apply (clarsimp simp: whileLoop_inv_def) apply (rule hoare_weaken_pre) apply (rule valid_whileLoop [where P=I and I=I]; fastforce) apply (assumption) done lemma sep_conj_coimpl_mp:"(P \<and>* R) s \<Longrightarrow> (P \<leadsto>* Q) s \<Longrightarrow> (P \<and>* (Q and R)) s" apply (drule (2) sep_coimpl_mp_gen, clarsimp simp: pred_conj_def conj_commute) done lemma list_rev_valid_sp: " \<lbrace>(list p ps \<and>* R)\<rbrace> list_rev p \<lbrace>\<lambda>rv. (list (rv) (rev ps) \<and>* R) \<rbrace>" apply (clarsimp simp: list_rev_def) apply (subst whileLoop_add_inv [where I="\<lambda>(list', rev') s. (reverse_inv ps list' rev' \<and>* R) s ", unfolded reverse_inv_def]) apply (sp sp: whileLoop_sp_inv) apply (clarsimp) apply (sp sp: get_ptr_sp set_ptr_sp') apply (clarsimp) apply (clarsimp simp: pred_conj_def sep_conj_exists sep_conj_assoc, case_tac x; clarsimp simp: pred_conj_def sep_conj_exists sep_conj_assoc) apply (sep_invert; sep_forward+) apply (clarsimp) apply (rule exI, rule exI, intro conjI, fastforce) apply (clarsimp simp: pred_conj_def sep_conj_exists) apply (rule_tac exI) apply (sep_solve) apply (clarsimp)+ apply (sp, clarsimp) done lemma "P s \<Longrightarrow> Q s \<Longrightarrow> (P \<and>* (Q -* (P and Q))) s" by (metis disjoint_zero_sym pred_andI pred_neg_def sep_add_zero_sym sep_conjI sep_conj_commuteI sep_mp septraction_def) end
[STATEMENT] lemma seq_invariant_ctermI: assumes wf: "wellformed \<Gamma>" and cw: "control_within \<Gamma> (init A)" and sl: "simple_labels \<Gamma>" and sp: "trans A = seqp_sos \<Gamma>" and init: "\<And>\<xi> p l. \<lbrakk> (\<xi>, p) \<in> init A; l\<in>labels \<Gamma> p \<rbrakk> \<Longrightarrow> P (\<xi>, l)" and step: "\<And>p l \<xi> a q l' \<xi>' pp. \<lbrakk> p\<in>cterms \<Gamma>; l\<in>labels \<Gamma> p; P (\<xi>, l); ((\<xi>, p), a, (\<xi>', q)) \<in> seqp_sos \<Gamma>; ((\<xi>, p), a, (\<xi>', q)) \<in> trans A; l'\<in>labels \<Gamma> q; (\<xi>, pp)\<in>reachable A I; p\<in>sterms \<Gamma> pp; (\<xi>', q)\<in>reachable A I; I a \<rbrakk> \<Longrightarrow> P (\<xi>', l')" shows "A \<TTurnstile> (I \<rightarrow>) onl \<Gamma> P" [PROOF STATE] proof (prove) goal (1 subgoal): 1. A \<TTurnstile> (I \<rightarrow>) onl \<Gamma> P [PROOF STEP] proof [PROOF STATE] proof (state) goal (2 subgoals): 1. \<And>\<xi> p l. \<lbrakk>(\<xi>, p) \<in> init A; l \<in> labels \<Gamma> p\<rbrakk> \<Longrightarrow> P (\<xi>, l) 2. \<And>\<xi> p a \<xi>' p' l'. \<lbrakk>(\<xi>, p) \<in> reachable A I; \<forall>l\<in>labels \<Gamma> p. P (\<xi>, l); ((\<xi>, p), a, \<xi>', p') \<in> automaton.trans A; l' \<in> labels \<Gamma> p'; I a\<rbrakk> \<Longrightarrow> P (\<xi>', l') [PROOF STEP] fix \<xi> p l [PROOF STATE] proof (state) goal (2 subgoals): 1. \<And>\<xi> p l. \<lbrakk>(\<xi>, p) \<in> init A; l \<in> labels \<Gamma> p\<rbrakk> \<Longrightarrow> P (\<xi>, l) 2. \<And>\<xi> p a \<xi>' p' l'. \<lbrakk>(\<xi>, p) \<in> reachable A I; \<forall>l\<in>labels \<Gamma> p. P (\<xi>, l); ((\<xi>, p), a, \<xi>', p') \<in> automaton.trans A; l' \<in> labels \<Gamma> p'; I a\<rbrakk> \<Longrightarrow> P (\<xi>', l') [PROOF STEP] assume "(\<xi>, p) \<in> init A" and *: "l \<in> labels \<Gamma> p" [PROOF STATE] proof (state) this: (\<xi>, p) \<in> init A l \<in> labels \<Gamma> p goal (2 subgoals): 1. \<And>\<xi> p l. \<lbrakk>(\<xi>, p) \<in> init A; l \<in> labels \<Gamma> p\<rbrakk> \<Longrightarrow> P (\<xi>, l) 2. \<And>\<xi> p a \<xi>' p' l'. \<lbrakk>(\<xi>, p) \<in> reachable A I; \<forall>l\<in>labels \<Gamma> p. P (\<xi>, l); ((\<xi>, p), a, \<xi>', p') \<in> automaton.trans A; l' \<in> labels \<Gamma> p'; I a\<rbrakk> \<Longrightarrow> P (\<xi>', l') [PROOF STEP] with init [PROOF STATE] proof (chain) picking this: \<lbrakk>(?\<xi>, ?p) \<in> init A; ?l \<in> labels \<Gamma> ?p\<rbrakk> \<Longrightarrow> P (?\<xi>, ?l) (\<xi>, p) \<in> init A l \<in> labels \<Gamma> p [PROOF STEP] show "P (\<xi>, l)" [PROOF STATE] proof (prove) using this: \<lbrakk>(?\<xi>, ?p) \<in> init A; ?l \<in> labels \<Gamma> ?p\<rbrakk> \<Longrightarrow> P (?\<xi>, ?l) (\<xi>, p) \<in> init A l \<in> labels \<Gamma> p goal (1 subgoal): 1. P (\<xi>, l) [PROOF STEP] by auto [PROOF STATE] proof (state) this: P (\<xi>, l) goal (1 subgoal): 1. \<And>\<xi> p a \<xi>' p' l'. \<lbrakk>(\<xi>, p) \<in> reachable A I; \<forall>l\<in>labels \<Gamma> p. P (\<xi>, l); ((\<xi>, p), a, \<xi>', p') \<in> automaton.trans A; l' \<in> labels \<Gamma> p'; I a\<rbrakk> \<Longrightarrow> P (\<xi>', l') [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>\<xi> p a \<xi>' p' l'. \<lbrakk>(\<xi>, p) \<in> reachable A I; \<forall>l\<in>labels \<Gamma> p. P (\<xi>, l); ((\<xi>, p), a, \<xi>', p') \<in> automaton.trans A; l' \<in> labels \<Gamma> p'; I a\<rbrakk> \<Longrightarrow> P (\<xi>', l') [PROOF STEP] fix \<xi> p a \<xi>' q l' [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>\<xi> p a \<xi>' p' l'. \<lbrakk>(\<xi>, p) \<in> reachable A I; \<forall>l\<in>labels \<Gamma> p. P (\<xi>, l); ((\<xi>, p), a, \<xi>', p') \<in> automaton.trans A; l' \<in> labels \<Gamma> p'; I a\<rbrakk> \<Longrightarrow> P (\<xi>', l') [PROOF STEP] assume sr: "(\<xi>, p) \<in> reachable A I" and pl: "\<forall>l\<in>labels \<Gamma> p. P (\<xi>, l)" and tr: "((\<xi>, p), a, (\<xi>', q)) \<in> trans A" and A6: "l' \<in> labels \<Gamma> q" and "I a" [PROOF STATE] proof (state) this: (\<xi>, p) \<in> reachable A I \<forall>l\<in>labels \<Gamma> p. P (\<xi>, l) ((\<xi>, p), a, \<xi>', q) \<in> automaton.trans A l' \<in> labels \<Gamma> q I a goal (1 subgoal): 1. \<And>\<xi> p a \<xi>' p' l'. \<lbrakk>(\<xi>, p) \<in> reachable A I; \<forall>l\<in>labels \<Gamma> p. P (\<xi>, l); ((\<xi>, p), a, \<xi>', p') \<in> automaton.trans A; l' \<in> labels \<Gamma> p'; I a\<rbrakk> \<Longrightarrow> P (\<xi>', l') [PROOF STEP] from this(3) and \<open>trans A = seqp_sos \<Gamma>\<close> [PROOF STATE] proof (chain) picking this: ((\<xi>, p), a, \<xi>', q) \<in> automaton.trans A automaton.trans A = seqp_sos \<Gamma> [PROOF STEP] have tr': "((\<xi>, p), a, (\<xi>', q)) \<in> seqp_sos \<Gamma>" [PROOF STATE] proof (prove) using this: ((\<xi>, p), a, \<xi>', q) \<in> automaton.trans A automaton.trans A = seqp_sos \<Gamma> goal (1 subgoal): 1. ((\<xi>, p), a, \<xi>', q) \<in> seqp_sos \<Gamma> [PROOF STEP] by simp [PROOF STATE] proof (state) this: ((\<xi>, p), a, \<xi>', q) \<in> seqp_sos \<Gamma> goal (1 subgoal): 1. \<And>\<xi> p a \<xi>' p' l'. \<lbrakk>(\<xi>, p) \<in> reachable A I; \<forall>l\<in>labels \<Gamma> p. P (\<xi>, l); ((\<xi>, p), a, \<xi>', p') \<in> automaton.trans A; l' \<in> labels \<Gamma> p'; I a\<rbrakk> \<Longrightarrow> P (\<xi>', l') [PROOF STEP] show "P (\<xi>', l')" [PROOF STATE] proof (prove) goal (1 subgoal): 1. P (\<xi>', l') [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. P (\<xi>', l') [PROOF STEP] from sr and tr and \<open>I a\<close> [PROOF STATE] proof (chain) picking this: (\<xi>, p) \<in> reachable A I ((\<xi>, p), a, \<xi>', q) \<in> automaton.trans A I a [PROOF STEP] have A7: "(\<xi>', q) \<in> reachable A I" [PROOF STATE] proof (prove) using this: (\<xi>, p) \<in> reachable A I ((\<xi>, p), a, \<xi>', q) \<in> automaton.trans A I a goal (1 subgoal): 1. (\<xi>', q) \<in> reachable A I [PROOF STEP] .. [PROOF STATE] proof (state) this: (\<xi>', q) \<in> reachable A I goal (1 subgoal): 1. P (\<xi>', l') [PROOF STEP] from tr' [PROOF STATE] proof (chain) picking this: ((\<xi>, p), a, \<xi>', q) \<in> seqp_sos \<Gamma> [PROOF STEP] obtain p' where "p' \<in> sterms \<Gamma> p" and "((\<xi>, p'), a, (\<xi>', q)) \<in> seqp_sos \<Gamma>" [PROOF STATE] proof (prove) using this: ((\<xi>, p), a, \<xi>', q) \<in> seqp_sos \<Gamma> goal (1 subgoal): 1. (\<And>p'. \<lbrakk>p' \<in> sterms \<Gamma> p; ((\<xi>, p'), a, \<xi>', q) \<in> seqp_sos \<Gamma>\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by (blast dest: trans_from_sterms [OF _ wf]) [PROOF STATE] proof (state) this: p' \<in> sterms \<Gamma> p ((\<xi>, p'), a, \<xi>', q) \<in> seqp_sos \<Gamma> goal (1 subgoal): 1. P (\<xi>', l') [PROOF STEP] from wf cw sp sr this(1) [PROOF STATE] proof (chain) picking this: wellformed \<Gamma> control_within \<Gamma> (init A) automaton.trans A = seqp_sos \<Gamma> (\<xi>, p) \<in> reachable A I p' \<in> sterms \<Gamma> p [PROOF STEP] have A1: "p'\<in>cterms \<Gamma>" [PROOF STATE] proof (prove) using this: wellformed \<Gamma> control_within \<Gamma> (init A) automaton.trans A = seqp_sos \<Gamma> (\<xi>, p) \<in> reachable A I p' \<in> sterms \<Gamma> p goal (1 subgoal): 1. p' \<in> cterms \<Gamma> [PROOF STEP] by (rule seq_reachable_in_cterms) [PROOF STATE] proof (state) this: p' \<in> cterms \<Gamma> goal (1 subgoal): 1. P (\<xi>', l') [PROOF STEP] from labels_not_empty [OF wf] [PROOF STATE] proof (chain) picking this: labels \<Gamma> ?p \<noteq> {} [PROOF STEP] obtain ll where A2: "ll\<in>labels \<Gamma> p'" [PROOF STATE] proof (prove) using this: labels \<Gamma> ?p \<noteq> {} goal (1 subgoal): 1. (\<And>ll. ll \<in> labels \<Gamma> p' \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by blast [PROOF STATE] proof (state) this: ll \<in> labels \<Gamma> p' goal (1 subgoal): 1. P (\<xi>', l') [PROOF STEP] with \<open>p'\<in>sterms \<Gamma> p\<close> [PROOF STATE] proof (chain) picking this: p' \<in> sterms \<Gamma> p ll \<in> labels \<Gamma> p' [PROOF STEP] have "ll\<in>labels \<Gamma> p" [PROOF STATE] proof (prove) using this: p' \<in> sterms \<Gamma> p ll \<in> labels \<Gamma> p' goal (1 subgoal): 1. ll \<in> labels \<Gamma> p [PROOF STEP] by (rule labels_sterms_labels [OF wf]) [PROOF STATE] proof (state) this: ll \<in> labels \<Gamma> p goal (1 subgoal): 1. P (\<xi>', l') [PROOF STEP] with pl [PROOF STATE] proof (chain) picking this: \<forall>l\<in>labels \<Gamma> p. P (\<xi>, l) ll \<in> labels \<Gamma> p [PROOF STEP] have A3: "P (\<xi>, ll)" [PROOF STATE] proof (prove) using this: \<forall>l\<in>labels \<Gamma> p. P (\<xi>, l) ll \<in> labels \<Gamma> p goal (1 subgoal): 1. P (\<xi>, ll) [PROOF STEP] by simp [PROOF STATE] proof (state) this: P (\<xi>, ll) goal (1 subgoal): 1. P (\<xi>', l') [PROOF STEP] from \<open>((\<xi>, p'), a, (\<xi>', q)) \<in> seqp_sos \<Gamma>\<close> and sp [PROOF STATE] proof (chain) picking this: ((\<xi>, p'), a, \<xi>', q) \<in> seqp_sos \<Gamma> automaton.trans A = seqp_sos \<Gamma> [PROOF STEP] have A5: "((\<xi>, p'), a, (\<xi>', q)) \<in> trans A" [PROOF STATE] proof (prove) using this: ((\<xi>, p'), a, \<xi>', q) \<in> seqp_sos \<Gamma> automaton.trans A = seqp_sos \<Gamma> goal (1 subgoal): 1. ((\<xi>, p'), a, \<xi>', q) \<in> automaton.trans A [PROOF STEP] by simp [PROOF STATE] proof (state) this: ((\<xi>, p'), a, \<xi>', q) \<in> automaton.trans A goal (1 subgoal): 1. P (\<xi>', l') [PROOF STEP] with sp [PROOF STATE] proof (chain) picking this: automaton.trans A = seqp_sos \<Gamma> ((\<xi>, p'), a, \<xi>', q) \<in> automaton.trans A [PROOF STEP] have A4: "((\<xi>, p'), a, (\<xi>', q)) \<in> seqp_sos \<Gamma>" [PROOF STATE] proof (prove) using this: automaton.trans A = seqp_sos \<Gamma> ((\<xi>, p'), a, \<xi>', q) \<in> automaton.trans A goal (1 subgoal): 1. ((\<xi>, p'), a, \<xi>', q) \<in> seqp_sos \<Gamma> [PROOF STEP] by simp [PROOF STATE] proof (state) this: ((\<xi>, p'), a, \<xi>', q) \<in> seqp_sos \<Gamma> goal (1 subgoal): 1. P (\<xi>', l') [PROOF STEP] from sr \<open>p'\<in>sterms \<Gamma> p\<close> [PROOF STATE] proof (chain) picking this: (\<xi>, p) \<in> reachable A I p' \<in> sterms \<Gamma> p [PROOF STEP] obtain pp where A7: "(\<xi>, pp)\<in>reachable A I" and A8: "p'\<in>sterms \<Gamma> pp" [PROOF STATE] proof (prove) using this: (\<xi>, p) \<in> reachable A I p' \<in> sterms \<Gamma> p goal (1 subgoal): 1. (\<And>pp. \<lbrakk>(\<xi>, pp) \<in> reachable A I; p' \<in> sterms \<Gamma> pp\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by auto [PROOF STATE] proof (state) this: (\<xi>, pp) \<in> reachable A I p' \<in> sterms \<Gamma> pp goal (1 subgoal): 1. P (\<xi>', l') [PROOF STEP] from sr tr \<open>I a\<close> [PROOF STATE] proof (chain) picking this: (\<xi>, p) \<in> reachable A I ((\<xi>, p), a, \<xi>', q) \<in> automaton.trans A I a [PROOF STEP] have A9: "(\<xi>', q) \<in> reachable A I" [PROOF STATE] proof (prove) using this: (\<xi>, p) \<in> reachable A I ((\<xi>, p), a, \<xi>', q) \<in> automaton.trans A I a goal (1 subgoal): 1. (\<xi>', q) \<in> reachable A I [PROOF STEP] .. [PROOF STATE] proof (state) this: (\<xi>', q) \<in> reachable A I goal (1 subgoal): 1. P (\<xi>', l') [PROOF STEP] from A1 A2 A3 A4 A5 A6 A7 A8 A9 \<open>I a\<close> [PROOF STATE] proof (chain) picking this: p' \<in> cterms \<Gamma> ll \<in> labels \<Gamma> p' P (\<xi>, ll) ((\<xi>, p'), a, \<xi>', q) \<in> seqp_sos \<Gamma> ((\<xi>, p'), a, \<xi>', q) \<in> automaton.trans A l' \<in> labels \<Gamma> q (\<xi>, pp) \<in> reachable A I p' \<in> sterms \<Gamma> pp (\<xi>', q) \<in> reachable A I I a [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: p' \<in> cterms \<Gamma> ll \<in> labels \<Gamma> p' P (\<xi>, ll) ((\<xi>, p'), a, \<xi>', q) \<in> seqp_sos \<Gamma> ((\<xi>, p'), a, \<xi>', q) \<in> automaton.trans A l' \<in> labels \<Gamma> q (\<xi>, pp) \<in> reachable A I p' \<in> sterms \<Gamma> pp (\<xi>', q) \<in> reachable A I I a goal (1 subgoal): 1. P (\<xi>', l') [PROOF STEP] by (rule step) [PROOF STATE] proof (state) this: P (\<xi>', l') goal: No subgoals! [PROOF STEP] qed [PROOF STATE] proof (state) this: P (\<xi>', l') goal: No subgoals! [PROOF STEP] qed
C ********************************************************* C * * C * TEST NUMBER: 11.02/03 * C * TEST TITLE : Normalize prototypes and typedefs * C * in phigs.h * C * * C * PHIGS Validation Tests, produced by NIST * C * * C ********************************************************* COMMON /GLOBNU/ CTLHND, ERRSIG, ERRFIL, IERRCT, UNERR, 1 TESTCT, IFLERR, PASSSW, ERRSW, MAXLIN, 2 CONID, MEMUN, WKID, WTYPE, GLBLUN, INDLUN, 3 DUMINT, DUMRL INTEGER CTLHND, ERRSIG, ERRFIL, IERRCT, UNERR, 1 TESTCT, IFLERR, PASSSW, ERRSW, MAXLIN, 2 CONID, MEMUN, WKID, WTYPE, GLBLUN, INDLUN, 3 DUMINT(20), ERRIND REAL DUMRL(20) COMMON /GLOBCH/ PIDENT, GLBERR, TSTMSG, FUNCID, 1 DUMCH CHARACTER PIDENT*40, GLBERR*60, TSTMSG*900, FUNCID*80, 1 DUMCH(20)*20 INTEGER IX, IY, UIN,UOUT, IOERR, NXTPOS, ITRIM, CC, BLEVEL INTEGER C1,C2,C3, NNB, BUFLEN, LEADNB, NG C NG must be greater than the length of INBUF to signify not-found PARAMETER (NG = 99999) CHARACTER INBUF*10000, MSG*300, INREC*500, RSTREC*500 CHARACTER THISCH*2, CHTOK(9)*1, ENDCH*1, CHBUF*10000 DATA CHTOK / '[', ']', '{', '}', '(', ')', ';', ',', '*' / 801 FORMAT (A) CALL INITGL ('11.02/03') CALL INMSG ('This program does not perform any test cases, ' // 1 'but merely normalizes the contents of phigs.hp ' // 2 'as phigs.hpn for later comparison.') UIN = MAX(20, INDLUN, GLBLUN) + 1 UOUT = UIN+1 C open output file OPEN (UNIT=UOUT, IOSTAT=IOERR, FILE='phigs.hpn', RECL=9000, 1 STATUS='UNKNOWN', FORM='FORMATTED') IF (IOERR .NE. 0) THEN WRITE (MSG, '(A,I6,A)') 'Error code opening output file ' // 1 'phigs.hpn = ', IOERR, '.' CALL UNMSG (MSG) ENDIF REWIND (UNIT=UOUT, IOSTAT=IOERR) IF (IOERR .NE. 0) THEN WRITE (MSG, '(A,I6,A)') 'Error code rewinding output file ' // 1 'phigs.hpn = ', IOERR, '.' CALL UNMSG (MSG) ENDIF C open input file OPEN (UNIT=UIN, IOSTAT=IOERR, FILE='phigs.hp', RECL=2000, 1 STATUS='OLD', FORM='FORMATTED') IF (IOERR .NE. 0) THEN WRITE (MSG, '(A,I6,A)') 'Error code opening input file ' // 1 'phigs.hp = ', IOERR, '. Create phigs.hp as ' // 2 'pre-processed version of phigs.h and re-execute.' CALL UNMSG (MSG) ENDIF REWIND (UNIT=UIN, IOSTAT=IOERR) IF (IOERR .NE. 0) THEN WRITE (MSG, '(A,I6,A)') 'Error code rewinding input file ' // 1 'phigs.hp = ', IOERR, '.' CALL UNMSG (MSG) ENDIF C record as read in from input file INREC = ' ' C the part of inbuf beyond the first statement RSTREC = ' ' C try to get next statement in inbuf 100 CONTINUE INBUF = RSTREC C search inbuf for zero-level semi-colon BLEVEL = 0 NXTPOS = 1 110 CONTINUE C1 = INDEX(INBUF(NXTPOS:), '{') IF (C1.EQ.0) C1 = NG C2 = INDEX(INBUF(NXTPOS:), '}') IF (C2.EQ.0) C2 = NG IF (BLEVEL.EQ.0) THEN C3 = INDEX(INBUF(NXTPOS:), ';') IF (C3.EQ.0) C3 = NG ELSE C3 = NG ENDIF CC = MIN(C1,C2,C3) IF (CC.GE.NG) GOTO 200 C get absolute position of next interesting character CC = CC + NXTPOS - 1 C where to start next search NXTPOS = CC+1 THISCH = INBUF(CC:CC) IF (THISCH .EQ. '{') THEN BLEVEL = BLEVEL+1 GOTO 110 ELSEIF (THISCH .EQ. '}') THEN BLEVEL = BLEVEL-1 GOTO 110 ELSEIF (THISCH .EQ. ';') THEN GOTO 300 ELSE CALL UNMSG ('Logic error searching for delimiter in: ' // 1 INBUF(1:300)) ENDIF C not found - need more records in inbuf 200 CONTINUE READ (UNIT=UIN, FMT=801, END=601, IOSTAT=IOERR) INREC IF (IOERR .EQ. 0) THEN C skip any records left with leading "#" or blanks IF (INREC(1:1).EQ.'#' .OR. INREC.EQ.' ') GOTO 200 C append INREC to INBUF NXTPOS = ITRIM(INBUF) IF (INBUF(NXTPOS:NXTPOS) .NE. '\') THEN C not C-continuation NXTPOS = NXTPOS+2 ENDIF INBUF(NXTPOS:) = INREC GOTO 110 ELSE WRITE (MSG, '(A,I6,A)') 'Error #', IOERR, 1 ' occurred reading phigs.hp.' CALL UNMSG (MSG) ENDIF 601 CONTINUE IF (INBUF .NE. ' ') THEN CALL UNMSG ('Hit end-of-file in phigs.hp with partial ' // 1 'statement: ' // INBUF(1:300)) ENDIF GOTO 666 C found zero-level semi-colon 300 CONTINUE C put rest of record in rstrec RSTREC = INBUF(CC+1:) INBUF(CC+1:) = ' ' C INBUF now has full statement CUSERMOD convert HT,LF,VT,FF,CR to space C convert all white space to spaces - we'll take C these to be ASCII 9-13 ... DO 310 CC = 9,13 315 CONTINUE NXTPOS = INDEX(INBUF, CHAR(CC)) IF (NXTPOS .NE. 0) THEN INBUF(NXTPOS:NXTPOS) = ' ' GOTO 315 ENDIF 310 CONTINUE C keep this statement? CC = LEADNB(INBUF) IF (INBUF(CC:CC+7) .EQ. 'typedef ' .OR. 1 INBUF(CC:CC+6) .EQ. 'extern ') THEN C keep it ELSE C toss it GOTO 100 ENDIF C delete all extra spaces NXTPOS = 1 320 CONTINUE C next non-blank NNB = LEADNB(INBUF(NXTPOS:)) IF (NNB .LE. 0) GOTO 350 C compress only if extra spaces found IF (NNB.GT.1) THEN CHBUF = INBUF(NXTPOS + NNB - 1:) INBUF(NXTPOS:) = CHBUF ENDIF C where is next blank? CC = INDEX(INBUF(NXTPOS:), ' ') C position one beyond next blank - next receiving position NXTPOS = NXTPOS + CC GOTO 320 350 CONTINUE C delete spaces adjacent to character-tokens: []{}();,* BUFLEN = ITRIM(INBUF) DO 340 IX = 1,9 DO 345 IY = 0,1 IF (IY .EQ. 0) THEN THISCH = ' ' // CHTOK(IX) ELSE THISCH = CHTOK(IX) // ' ' ENDIF 355 CONTINUE CC = INDEX(INBUF, THISCH) IF (CC.LE.0 .OR. CC.GE.BUFLEN) GOTO 345 INBUF(CC+IY:) = INBUF(CC+IY+1:) BUFLEN = BUFLEN-1 GOTO 355 345 CONTINUE 340 CONTINUE C INBUF now normalized BUFLEN = ITRIM(INBUF) C if typedef, swing type name up to front IF (INBUF(1:7) .NE. 'typedef') GOTO 400 C search backwards for "}", "*" or " " DO 370 IX = BUFLEN,1,-1 ENDCH = INBUF(IX:IX) IF (ENDCH.EQ.'}' .OR. ENDCH.EQ.'*' .OR. ENDCH.EQ.' ') GOTO 380 370 CONTINUE CALL UNMSG ('Could not find last "}", "*", or " " in typedef: ' // 1 INBUF(1:300)) 380 CONTINUE CHBUF = INBUF(IX+1:BUFLEN-1) // ' ' // INBUF(1:BUFLEN) BUFLEN = BUFLEN + BUFLEN - IX INBUF = CHBUF 400 CONTINUE WRITE (UNIT=UOUT, FMT=801) INBUF(1:BUFLEN) GOTO 100 666 CONTINUE CLOSE (UNIT=UOUT) CLOSE (UNIT=UIN) CALL WINDUP END
(** ** Operations T and Tt on carriers of lB-systems and their properties TT and TTt. by Vladimir Voevodsky, file created on Jan. 6, 2015 *) Unset Automatic Introduction. Require Export lBsystems.lB_carriers . (** *** Operation(s) T. Including constructions related to their domains of definition. *) (** **** Domains of definition of operations of type T *) Definition T_dom { BB : lBsystem_carrier } ( X1 X2 : BB ) := dirprod ( ll X1 > 0 ) ( isabove X2 ( ft X1 ) ) . Definition T_dom_constr { BB : lBsystem_carrier } { X1 X2 : BB } ( gt0 : ll X1 > 0 ) ( isab : isabove X2 ( ft X1 ) ) : T_dom X1 X2 := tpair _ gt0 isab . Definition T_dom_gt0 { BB : lBsystem_carrier } { X1 X2 : BB } ( inn : T_dom X1 X2 ) : ll X1 > 0 := pr1 inn . Definition T_dom_gth { BB : lBsystem_carrier } { X1 X2 : BB } ( inn : T_dom X1 X2 ) : ll X2 > ll ( ft X1 ) := isabove_gth ( pr2 inn ) . Definition T_dom_isabove { BB : lBsystem_carrier } { X1 X2 : BB } ( inn : T_dom X1 X2 ) : isabove X2 ( ft X1 ) := pr2 inn . Definition T_dom_geh { BB : lBsystem_carrier } { X1 X2 : BB } ( inn : T_dom X1 X2 ) : ll X2 >= ll X1 . Proof . intros . assert ( gt := T_dom_gth inn ) . assert ( gte := natgthtogehsn _ _ gt ) . refine ( istransnatgeh _ _ _ gte _ ) . rewrite ll_ft . change ( 1 + ( ll X1 - 1 ) >= ll X1 ) . rewrite natpluscomm . exact ( minusplusnmmineq _ _ ) . Defined. Lemma T_dom_gt0_2 { BB : lBsystem_carrier } { X1 X2 : BB } ( inn : T_dom X1 X2 ) : ll X2 > 0 . Proof . intros . exact ( isabove_gt0 ( T_dom_isabove inn ) ) . Defined. Lemma isaprop_T_dom { BB : lBsystem_carrier } ( X1 X2 : BB ) : isaprop ( T_dom X1 X2 ) . Proof. intros . apply isapropdirprod . apply ( pr2 ( _ > _ ) ) . exact ( isaprop_isabove _ _ ) . Defined. Lemma noparts_T_dom { BB : lBsystem_carrier } { X1 X2 : BB } ( inn1 inn2 : T_dom X1 X2 ) : inn1 = inn2 . Proof . intros . apply ( proofirrelevance _ ( isaprop_T_dom X1 X2 ) ) . Defined . Definition T_dom_refl { BB : lBsystem_carrier } ( X : BB ) ( gt0 : ll X > 0 ) : T_dom X X := T_dom_constr gt0 ( isabove_X_ftX X gt0 ) . Definition T_dom_comp { BB : lBsystem_carrier } { X1 X2 X3 : BB } ( inn12 : T_dom X1 X2 ) ( inn23 : T_dom X2 X3 ) : T_dom X1 X3 . Proof. intros. assert ( gt0 := T_dom_gt0 inn12 ) . assert ( is21 := T_dom_isabove inn12 ) . assert ( is32 := T_dom_isabove inn23 ) . refine ( T_dom_constr _ _ ) . exact gt0 . exact ( isabov_trans is32 ( isover_ft' is21 ) ) . Defined. Lemma T_dom_ftn { BB : lBsystem_carrier } { X1 X2 : BB } ( n : nat ) ( inn : T_dom X1 X2 ) ( isab : isabove ( ftn n X2 ) ( ft X1 ) ) : T_dom X1 ( ftn n X2 ) . Proof . intros. exact ( T_dom_constr ( T_dom_gt0 inn ) isab ) . Defined. (** **** The type objects of which are candidates for operations T on an lB-system. *) Definition T_ops_type ( BB : lBsystem_carrier ) := forall ( X1 X2 : BB ) ( inn : T_dom X1 X2 ) , BB . Identity Coercion T_ops_to_Fun : T_ops_type >-> Funclass . Lemma T_equals_T { BB : lBsystem_carrier } { X1 X2 X2' : BB } ( T : T_ops_type BB ) ( eq : X2 = X2' ) ( inn : T_dom X1 X2 ) ( inn' : T_dom X1 X2' ) : T X1 X2 inn = T X1 X2' inn' . Proof. intros BB X1 X2 X2' T eq . rewrite eq . intros . rewrite ( noparts_T_dom inn inn' ) . apply idpath . Defined. (** **** The zeros property (later an axiom) of an operation of type T *) Definition T_ax0_type { BB : lBsystem_carrier } ( T : T_ops_type BB ) := forall ( X1 X2 : BB ) ( inn : T_dom X1 X2 ) , ll ( T X1 X2 inn ) = 1 + ( ll X2 ) . Identity Coercion T_ax0_to_Fun : T_ax0_type >-> Funclass . Lemma ll_T_gt0 { BB : lBsystem_carrier } { T : T_ops_type BB } ( ax0 : T_ax0_type T ) { X1 X2 : BB } ( inn : T_dom X1 X2 ) : ll ( T X1 X2 inn ) > 0 . Proof. intros . rewrite ax0 . exact ( natgthsn0 _ ) . Defined. (** **** The first property (later an axiom) of an operation of type T *) Lemma T_ax1a_dom { BB : lBsystem_carrier } { X1 X2 : BB } ( inn : T_dom X1 X2 ) ( isab : isabove ( ft X2 ) ( ft X1 ) ) : T_dom X1 ( ft X2 ) . Proof . intros. exact ( T_dom_constr ( T_dom_gt0 inn ) isab ) . Defined. Definition T_ax1a_type { BB : lBsystem_carrier } ( T : T_ops_type BB ) := forall ( X1 X2 : BB ) ( inn : T_dom X1 X2 ) ( isab : isabove ( ft X2 ) ( ft X1 ) ) , ft ( T X1 X2 inn ) = T X1 ( ft X2 ) ( T_ax1a_dom inn isab ) . Identity Coercion T_ax1a_to_Fun: T_ax1a_type >-> Funclass . Definition T_ax1b_type { BB : lBsystem_carrier } ( T : T_ops_type BB ) := forall ( X1 X2 : BB ) ( inn : T_dom X1 X2 ) , isabove ( T X1 X2 inn ) X1 . Identity Coercion T_ax1b_to_Fun: T_ax1b_type >-> Funclass . (** **** The computation of the iterated ft of ( T X1 X2 ) *) Lemma ftn_T { BB : lBsystem_carrier } { T : T_ops_type BB } ( ax1a : T_ax1a_type T ) ( n : nat ) { X1 X2 : BB } ( isab : isabove ( ftn n X2 ) ( ft X1 ) ) ( inn : T_dom X1 X2 ) : ftn n ( T X1 X2 inn ) = T X1 ( ftn n X2 ) ( T_dom_ftn n inn isab ) . Proof . intros BB T ax1a n . induction n as [ | n IHn ] . intros . rewrite ( noparts_T_dom inn (T_dom_ftn 0 inn isab) ) . apply idpath . intros . change ( ftn (S n) (T X1 X2 inn) ) with ( ft ( ftn n (T X1 X2 inn) ) ) . assert ( isab' : isabove ( ftn n X2 ) ( ft X1 ) ) . exact ( isabove_ft_inv isab ) . rewrite ( IHn X1 X2 isab' inn ) . refine ( ax1a _ _ _ _ ) . Defined. Lemma ft_T { BB : lBsystem_carrier } { T : T_ops_type BB } { X1 X2 : BB } ( ax0 : T_ax0_type T ) ( ax1b : T_ax1b_type T ) ( iseq : ft X2 = ft X1 ) ( inn : T_dom X1 X2 ) : ft ( T X1 X2 inn ) = X1 . Proof. intros . assert ( isov := ax1b X1 X2 inn : isover (T X1 X2 inn) X1 ) . unfold isover in isov . rewrite ax0 in isov . rewrite ( natassocpmeq _ _ _ ( T_dom_geh inn ) ) in isov . assert ( eq : ll X2 = ll X1 ) . assert ( eq' : ll X2 - 1 = ll X1 - 1 ) . repeat rewrite <- ll_ft . rewrite iseq . apply idpath . assert ( eq1 : ( ll X1 - 1 ) + 1 = ll X1 ) . refine ( minusplusnmm _ _ _ ) . exact ( natgthtogehsn _ _ (T_dom_gt0 inn ) ) . assert ( eq2 : ( ll X2 - 1 ) + 1 = ll X2 ) . refine ( minusplusnmm _ _ _ ) . exact ( istransnatgeh _ _ _ ( T_dom_geh inn ) ( natgthtogehsn _ _ (T_dom_gt0 inn ) ) ) . assert ( eq'' := maponpaths ( fun n => n + 1 ) eq' ) . lazy beta in eq'' . rewrite eq1 in eq'' . rewrite eq2 in eq'' . exact eq'' . rewrite eq in isov . rewrite natminusnn in isov . exact ( ! isov ) . Defined. (** **** The isover and isabove properties of the expressions T X1 X2 *) Lemma isover_T_T_2 { BB : lBsystem_carrier } { T : T_ops_type BB } ( ax0 : T_ax0_type T ) ( ax1a : T_ax1a_type T ) { X1 X2 X2' : BB } ( inn : T_dom X1 X2 ) ( inn' : T_dom X1 X2' ) ( is : isover X2 X2' ) : isover ( T X1 X2 inn ) ( T X1 X2' inn' ) . Proof . intros . unfold isover in * . repeat rewrite ax0 . simpl . assert ( isab : isabove ( ftn ( ll X2 - ll X2') X2 ) ( ft X1 ) ) . rewrite <- is . Set Printing All . exact ( T_dom_isabove inn' ) . rewrite ( ftn_T ax1a _ isab inn ) . exact ( T_equals_T _ is _ _ ) . Defined. Lemma isabove_T_T_2 { BB : lBsystem_carrier } { T : T_ops_type BB } ( ax0 : T_ax0_type T ) ( ax1a : T_ax1a_type T ) { X1 X2 X2' : BB } ( inn : T_dom X1 X2 ) ( inn' : T_dom X1 X2' ) ( is : isabove X2 X2' ) : isabove ( T X1 X2 inn ) ( T X1 X2' inn' ) . Proof . intros . refine ( isabove_constr _ _ ) . repeat rewrite ax0 . exact ( isabove_gth is ) . exact ( isover_T_T_2 ax0 ax1a _ _ is ) . Defined. (** *** Operation(s) Tt . Including constructions related to their domains of definition. *) (** **** Domains of definition of operations of type Tt *) Definition Tt_dom { BB : lBsystem_carrier } ( X : BB ) ( s : Tilde BB ) := T_dom X ( dd s ) . (** **** The type objects of which are candidates for operations Tt on an lB-system. *) Definition Tt_ops_type ( BB : lBsystem_carrier ) := forall ( X : BB ) ( s : Tilde BB ) ( inn : Tt_dom X s ) , Tilde BB . Identity Coercion Tt_ops_to_Fun: Tt_ops_type >-> Funclass . (** **** The zeros property (later an axiom) of an operation of type Tt It will be shown to be a corollary of the first property of Tt and the zeros property of T. However it is convenient to have it separately for the use in the definition of a prelBsystem. *) Definition Tt_ax0_type { BB : lBsystem_carrier } ( Tt : Tt_ops_type BB ) := forall ( X : BB ) ( s : Tilde BB ) ( inn : Tt_dom X s ) , ll ( dd ( Tt X s inn ) ) = 1 + ll ( dd s ) . (** **** The first property (later an axiom) of an operation of type Tt *) Definition Tt_ax1_type { BB : lBsystem_carrier } ( T : T_ops_type BB ) ( Tt : Tt_ops_type BB ) := forall ( X : BB ) ( s : Tilde BB ) ( inn : Tt_dom X s ) , dd ( Tt X s inn ) = T X ( dd s ) inn . Identity Coercion Tt_ax1_to_Fun: Tt_ax1_type >-> Funclass . Lemma Tt_ax1_to_Tt_ax0 { BB : lBsystem_carrier } { T : T_ops_type BB } ( ax0 : T_ax0_type T ) { Tt : Tt_ops_type BB } ( ax1 : Tt_ax1_type T Tt ) : Tt_ax0_type Tt . Proof . intros . unfold Tt_ax0_type . intros . rewrite ax1 . exact ( ax0 _ _ _ ) . Defined. (** *** The properties TT and TTt *) (** **** Two implications of the zeros and first properties of operations of type T that are required for the formulation of the property TT *) Lemma T_dom_12_23_to_T12_T13 { BB : lBsystem_carrier } { T : T_ops_type BB } ( ax0 : T_ax0_type T ) ( ax1a : T_ax1a_type T ) ( ax1b : T_ax1b_type T ) { X1 X2 X3 : BB } ( inn12 : T_dom X1 X2 ) ( inn23 : T_dom X2 X3 ) : T_dom ( T X1 X2 inn12 ) ( T X1 X3 ( T_dom_comp inn12 inn23 ) ) . Proof . intros . assert ( is21 := T_dom_isabove inn12 ) . assert ( is32 := T_dom_isabove inn23 ) . refine ( T_dom_constr _ _ ) . rewrite ( ax0 _ _ inn12 ) . exact ( natgthsn0 _ ) . destruct ( isabove_choice is21 ) as [ isab | eq ] . rewrite ( ax1a _ _ _ isab ) . exact ( isabove_T_T_2 ax0 ax1a _ _ is32) . rewrite ( ft_T ax0 ax1b ( ! eq ) _ ) . apply ax1b . Defined. Lemma T_dom_12_23_to_T1T23 { BB : lBsystem_carrier } { T : T_ops_type BB } ( ax1b : T_ax1b_type T ) { X1 X2 X3 : BB } ( inn12 : T_dom X1 X2 ) ( inn23 : T_dom X2 X3 ) : T_dom X1 ( T X2 X3 inn23 ) . Proof . intros . refine ( T_dom_constr _ _ ) . exact ( T_dom_gt0 inn12 ) . refine ( isabov_trans ( ax1b _ _ _ ) _ ) . exact ( T_dom_isabove inn12 ) . Defined. (** **** The property (later an axiom) TT *) Definition TT_type { BB : lBsystem_carrier } { T : T_ops_type BB } ( ax0 : T_ax0_type T ) ( ax1a : T_ax1a_type T ) ( ax1b : T_ax1b_type T ) := forall ( X1 X2 X3 : BB ) ( inn12 : T_dom X1 X2 ) ( inn23 : T_dom X2 X3 ) , T ( T X1 X2 inn12 ) ( T X1 X3 ( T_dom_comp inn12 inn23 ) ) ( T_dom_12_23_to_T12_T13 ax0 ax1a ax1b inn12 inn23 ) = T X1 ( T X2 X3 inn23 ) ( T_dom_12_23_to_T1T23 ax1b inn12 inn23 ) . Identity Coercion TT_to_Fun: TT_type >-> Funclass . (** **** Two implications of the zeros and first properties of operations of type T and Tt that are required for the formulation of the property TTt *) Lemma Tt_dom_12_2r_to_T12_Tt1r { BB : lBsystem_carrier } { T : T_ops_type BB } ( ax0 : T_ax0_type T ) ( ax1a : T_ax1a_type T ) ( ax1b : T_ax1b_type T ) { Tt : Tt_ops_type BB } ( ax1at : Tt_ax1_type T Tt ) { X1 X2 : BB } { r : Tilde BB } ( inn12 : T_dom X1 X2 ) ( inn2r : Tt_dom X2 r ) : Tt_dom ( T X1 X2 inn12 ) ( Tt X1 r ( T_dom_comp inn12 inn2r ) ) . Proof. intros . unfold Tt_dom . rewrite ax1at . apply ( T_dom_12_23_to_T12_T13 ax0 ax1a ax1b inn12 inn2r ) . Defined. Lemma Tt_dom_12_2r_to_Tt1Tt2r { BB : lBsystem_carrier } { T : T_ops_type BB } ( ax0 : T_ax0_type T ) ( ax1b : T_ax1b_type T ) { Tt : Tt_ops_type BB } ( ax1at : Tt_ax1_type T Tt ) { X1 X2 : BB } { r : Tilde BB } ( inn12 : T_dom X1 X2 ) ( inn2r : Tt_dom X2 r ) : Tt_dom X1 ( Tt X2 r inn2r ) . Proof. intros. unfold Tt_dom. rewrite ax1at . apply ( T_dom_12_23_to_T1T23 ax1b inn12 inn2r ) . Defined. (** **** The property TTt *) Definition TTt_type { BB : lBsystem_carrier } { T : T_ops_type BB } ( ax0 : T_ax0_type T ) ( ax1a : T_ax1a_type T ) ( ax1b : T_ax1b_type T ) { Tt : Tt_ops_type BB } ( ax1t : Tt_ax1_type T Tt ) := forall ( X1 X2 : BB ) ( r : Tilde BB ) ( inn12 : T_dom X1 X2 ) ( inn2r : Tt_dom X2 r ) , Tt ( T X1 X2 inn12 ) ( Tt X1 r ( T_dom_comp inn12 inn2r ) ) ( Tt_dom_12_2r_to_T12_Tt1r ax0 ax1a ax1b ax1t inn12 inn2r ) = Tt X1 ( Tt X2 r inn2r ) ( Tt_dom_12_2r_to_Tt1Tt2r ax0 ax1b ax1t inn12 inn2r ) . Identity Coercion TTt_to_Fun: TTt_type >-> Funclass . (* End of the file lBsystems_T_Tt.v *)
SUBROUTINE inpd04 (task, iwrit, elsnam, nelms) ! READ control DATA for element number 04 (TL CST++) USE ele04_db IMPLICIT NONE ! dummy arguments CHARACTER(len=*),INTENT(IN):: elsnam ! element set name CHARACTER(len=*),INTENT(IN):: task ! requested task INTEGER (kind=4) :: nelms, & ! number of element sets iwrit ! flag to echo data input ! local variables LOGICAL :: oldset INTEGER (kind=4) :: nelem, nreqs, narch, nn TYPE (ele04_set), POINTER, SAVE :: elset, anter IF (TRIM(task) == 'INPUT ') THEN ! check if list of sets and set exists and initializes CALL srch_ele04 (head, anter, elset, elsnam, oldset) IF (oldset) THEN !if set exists CALL comm04 (1, nelem, nreqs, narch, elsnam, elset) elset%lside = .NOT.elset%nodvo !initializes flag to compute LSIDE ELSE !set ELSNAM does not exist ALLOCATE (elset) !reserve memory for set elset%gauss = .FALSE. !initializes flag to compute Gauss constants CALL listen('INPD04') !read a line nn = getint('NNODE ',4,' NUMBER OF ELEMENT NODES (4 ONLY)..') IF( nn /= nnode )CALL runend('TETRA: NNODE must be 4 ') nreqs=getint('NREQS ',0,' Gauss pt for stress time history..') elset%angdf(1) =getrea('ALPHA ',0d0,' First Euler Angle from X and Ortho') elset%angdf(2) =getrea('BETA ',0d0,' Second Euler Angle from X and Orth') elset%angdf(3) =getrea('GAMMA ',0d0,' Third Euler Angle from X and Ortho') elset%nodvo = .NOT.exists('STANDA') IF( elset%nodvo )THEN WRITE(lures,"(/,5X,'Volume Approximation based on neighbours will be used')") nodvo = .TRUE. stab =getrea('STABIL',stab,' Stabiliztion factor ') ELSE WRITE(lures,"(/,5X,'STANDARD Approximation Strain) will be used')") END IF elset%btscal = getrea('BTSCAL',1d0,' Critical time increment scaler ') elset%lside = .NOT.elset%nodvo !initializes flag to compute LSIDE IF(exists('SMALL'))THEN elset%small = .TRUE. WRITE(lures,"(' Green strains will be used if possible')") ELSE elset%small = .FALSE. END IF IF( nreqs > 0 )NULLIFY( elset%ngrqs ) narch = 0 !to check nelem = 0 !new set, initializes number of elements !Initialize empty list Point both pointer to nothing CALL ini_ele04e (elset%head, elset%tail) END IF ! read new data or add to previous data CALL elmd04( nelem, elset%head, elset%tail, iwrit ) elset%plstr = 0 ! do not compute plastic strains IF (.NOT.oldset) CALL rdreqs (ngaus,nreqs, elset%ngrqs, iwrit ) CALL comm04(0, nelem, nreqs, narch, elsnam, elset) ! add to the list of sets IF (.NOT.oldset) THEN CALL add_ele04 (elset, head, tail) nelms = nelms + 1 ! increased set counter for this element type END IF ELSE IF (TRIM(task) == 'RESTAR') THEN ALLOCATE (elset) !initializes a list NULLIFY(elset%head) !nullify head pointer ! read control parameters elset%sname = elsnam READ (51) elset%nelem, elset%nreqs, elset%narch, elset%gauss, & elset%plstr, elset%angdf, elset%btscal, elset%small, elset%nodvo CALL rest04 (elset%nelem, elset%nreqs, elset%head, elset%tail, & elset%ngrqs) ! add to list of elements CALL add_ele04 (elset, head, tail) ELSE CALL runend('INPD04: NON-EXISTENT TASK . ') END IF RETURN END SUBROUTINE inpd04
||| SExpParser stolen from Idris2 sources' `Idris.IDEMode.Parser`. module Bautzen.REPL.SExpParser import Bautzen.SExp import Text.Parser import Text.Lexer import Text.Token import Data.Strings import Data.List %hide Text.Lexer.Core.strTail public export data SExpKind = Ident | Literal | StrLit | Symbol | Comment | EndInput Show a => Show (Token a) where show (Tok k t) = "Tok " ++ show k ++ " " ++ t Show SExpKind where show Ident = "Ident" show Literal = "Literal" show StrLit = "String" show Symbol = "Symbol" show Comment = "Comment" show EndInput = "EOF" TokenKind SExpKind where TokType Ident = String TokType Literal = Integer TokType StrLit = String TokType Symbol = String TokType Comment = () TokType EndInput = () tokValue Ident x = x tokValue StrLit x = x tokValue Literal x = cast x tokValue Symbol x = x tokValue Comment x = () tokValue EndInput x = () public export data ParseError = ParseFail String (Maybe (Int, Int)) (List (Token SExpKind)) | LexFail (Int, Int, String) | LitFail (List Int) export Show ParseError where show (ParseFail err loc toks) = "Parse error: " ++ err ++ " (next tokens: " ++ show (take 10 toks) ++ ")" show (LexFail (c, l, str)) = "Lex error at " ++ show (c, l) ++ " input: " ++ str show (LitFail l) = "Lit error(s) at " ++ show l symbols : List String symbols = ["(", ":", ")"] ident : Lexer ident = pred startIdent <+> many (pred validIdent) where startIdent : Char -> Bool startIdent '_' = True startIdent x = isAlpha x validIdent : Char -> Bool validIdent '_' = True validIdent '-' = True validIdent '\'' = True validIdent '?' = True validIdent '!' = True validIdent x = isAlphaNum x ideTokens : TokenMap (Token SExpKind) ideTokens = map (\x => (exact x, \s => Tok Symbol x)) symbols ++ [ (digits, \x => Tok Literal x) , (stringLit, \x => Tok StrLit (stripQuotes x)) , (ident, \x => Tok Ident x) , (space, \x => Tok Comment x) ] where stripQuotes : String -> String -- ASSUMPTION! Only total because we know we're getting quoted strings. stripQuotes = assert_total (strTail . reverse . strTail . reverse) idelex : String -> Either (Int, Int, String) (List (TokenData (Token SExpKind))) idelex str = case lex ideTokens str of -- Add the EndInput token so that we'll have a line and column -- number to read when storing spans in the file (tok, (l, c, "")) => Right (filter notComment tok ++ [MkToken l c (Tok EndInput "")]) (_, fail) => Left fail where notComment : TokenData (Token SExpKind) -> Bool notComment t = case tok t of Tok Comment _ => False _ => True Rule : Type -> Type Rule ty = Grammar (TokenData (Token SExpKind)) True ty EmptyRule : Type -> Type EmptyRule ty = Grammar (TokenData (Token SExpKind)) False ty eoi : EmptyRule () eoi = do nextIs "Expected end of input" (isEOI . tok) pure () where isEOI : Token SExpKind -> Bool isEOI (Tok EndInput _) = True isEOI _ = False intLit : Rule Integer intLit = terminal "Expected integer literal" (\x => case tok x of Tok Literal i => Just (cast i) _ => Nothing) strLit : Rule String strLit = terminal "Expected string literal" (\x => case tok x of Tok StrLit s => Just s _ => Nothing) symbol : String -> Rule () symbol req = terminal ("Expected '" ++ req ++ "'") (\x => case tok x of Tok Symbol s => if s == req then Just () else Nothing _ => Nothing) exactIdent : String -> Rule () exactIdent req = terminal ("Expected " ++ req) (\x => case tok x of Tok Ident s => if s == req then Just () else Nothing _ => Nothing) identPart : Rule String identPart = terminal "Expected name" (\x => case tok x of Tok Ident str => Just str _ => Nothing) sexp : Rule SExp sexp = do i <- fromInteger <$> intLit pure (SInt i) <|> do str <- strLit pure (SStr str) <|> do symbol ":"; x <- identPart pure (SSym x) <|> do symbol "(" xs <- many sexp symbol ")" pure (SList xs) ideParser : String -> Grammar (TokenData (Token SExpKind)) True ty -> Either String ty ideParser str p = case idelex str of Left err => Left $ show $ LexFail err Right toks => case parse p toks of Left (Error err []) => Left $ show $ ParseFail err Nothing [] Left (Error err (t :: ts)) => Left $ show $ ParseFail err (Just (line t, col t)) (map tok (t :: ts)) Right (val, _) => Right val export parseSExp : String -> Either String SExp parseSExp inp = ideParser inp (do c <- sexp; eoi; pure c)
-- Andreas, 2016-09-28, Level meta below neutral level -- Agda previously simplified X <= a to X = a. -- This loses the solution X = lzero. -- {-# OPTIONS -v tc.constr.add:40 #-} open import Common.Level module _ (a : Level) where module WorksWithSwappedDeclarations where mutual X : Level X = _ data E : Set₁ where c : Set X → E -- constraint lsuc X <= 1 solves X = lzero data D : Set (lsuc a) where c : Set X → D -- fine since lzero <= a module WorksWithGivenSolution where mutual X : Level X = lzero data D : Set (lsuc a) where c : Set X → D data E : Set₁ where c : Set X → E module Test where mutual X : Level X = _ data D : Set (lsuc a) where c : Set X → D -- solved X (prematurely) since X <= a implies X = a ?? (Wrong!) data E : Set₁ where c : Set X → E -- constraint X <= 0 became contradictory constraint a <= 0 -- ERROR WAS: -- The type of the constructor does not fit in the sort of the -- datatype, since Set (lsuc a) is not less or equal than Set₁ -- when checking the constructor c in the declaration of E -- should succeed
# Copyright (c) 2018-2021, Carnegie Mellon University # See LICENSE for details Class(IdFunc, DiagFunc, rec( range := self >> TInt )); #F fConst(<N>, <c>) - constant diagonal function, <N> values of <c> #F Class(idConst, IdFunc, rec( # def := (N, c) -> Checked(IsPosInt0(N), rec(size:=N)), def := (N, c) -> rec(size:=N), inline := true, lambda := self >> let(i:=Ind(self.size), Lambda(i, self.params[2])), isReal := self >> IsRealNumber(self.params[2]), domain := self >> self.params[1], )); Class(idId, IdFunc, rec( def := size -> rec(size := size), lambda := self >> let(i := Ind(self.size), Lambda(i,i)), #inverse := i->i, transpose := self >> self, domain := self >> self.params[1], )); #NOTE Derive the idTensor from diagTensor Class(idTensor, fTensorBase, rec( print := FuncClassOper.print, # range := self >> UnifyTypes(List(self.children(), x->x.range())), combine_op := (self, jv, split, f, g) >> f.relaxed_at(jv, split[1]) * g.relaxed_at(jv, split[2]), updateRange := self >> UnifyTypes(List(self.children(), x->x.range())), range := self >> self.updateRange(), # combine_op := (self, split, F, G) >> F.at(split[1]) * G.at(split[2]) # idTensor is something else again ??? # combine_op := (self, split, F, G) >> self.child(2).domain() * F.at(split[1]) + G.at(split[2]) )); #Class(idfTensor, fTensorBase, rec( # print := FuncClassOper.print, # updateRange := self >> UnifyTypes(List(self.children(), x->x.range())), # combine_op := (self, split, F, G) >> self.child(2).domain() * F.at(split[1]) + G.at(split[2]) #)); # Class(idCompose, fCompose); Class(Id, Diag, rec( toAMat := self >> IdentityMatAMat(self.element.domain()), domain := self >> self.element.domain() ));
module Cats.Category.One where open import Data.Unit using (⊤ ; tt) open import Level open import Cats.Category One : ∀ lo la l≈ → Category lo la l≈ One lo la l≈ = record { Obj = Lift lo ⊤ ; _⇒_ = λ _ _ → Lift la ⊤ ; _≈_ = λ _ _ → Lift l≈ ⊤ ; id = lift tt ; _∘_ = λ _ _ → lift tt ; equiv = record { refl = lift tt ; sym = λ _ → lift tt ; trans = λ _ _ → lift tt } ; ∘-resp = λ _ _ → lift tt ; id-r = lift tt ; id-l = lift tt ; assoc = lift tt }
(* * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: BSD-2-Clause *) theory SimplRewrite imports "CTranslationNICTA" "Lib.SplitRule" "HOL-Eisbach.Eisbach" begin primrec add_statefn :: "('s \<Rightarrow> 's) \<Rightarrow> ('s, 'x, 'e) com \<Rightarrow> ('s, 'x, 'e) com" where "add_statefn f (Call x) = Call x" | "add_statefn f (Seq c d) = Seq (add_statefn f c) (add_statefn f d)" | "add_statefn f (Catch c d) = Catch (add_statefn f c) (add_statefn f d)" | "add_statefn f Throw = Throw" | "add_statefn f (Guard F S c) = Guard F {s. f s \<in> S} (add_statefn f c)" | "add_statefn f (DynCom c_fn) = DynCom (\<lambda>s. add_statefn f (c_fn (f s)))" | "add_statefn f (While S c) = While {s. f s \<in> S} (add_statefn f c)" | "add_statefn f (Cond S c c') = Cond {s. f s \<in> S} (add_statefn f c) (add_statefn f c')" | "add_statefn f (Spec R) = Spec {(a, b). (f a, f b) \<in> R}" | "add_statefn f (Basic g) = Basic (\<lambda>s. inv f (g (f s)))" | "add_statefn f Skip = Skip" lemma add_statefn_id1: "add_statefn id x = x" by (induct x) auto lemma add_statefn_id[simp]: "add_statefn id = id" by (rule ext, simp add: add_statefn_id1) lemma add_statefn_comp: "\<lbrakk> inv (g o f) = inv f o inv g \<rbrakk> \<Longrightarrow> add_statefn f (add_statefn g x) = add_statefn (g o f) x" by (induct x, simp_all add: o_def) definition "add_statefn_xstate f xs \<equiv> case xs of Normal s \<Rightarrow> Normal (f s) | Abrupt s \<Rightarrow> Abrupt (f s) | _ \<Rightarrow> xs" lemmas add_statefn_xstate_simps[simp] = add_statefn_xstate_def[split_simps xstate.split] lemma isAbr_add_statefn_xstate[simp]: "isAbr (add_statefn_xstate f xs) = isAbr xs" by (cases xs, simp_all) lemma add_statefn_xstate_comp: "add_statefn_xstate f (add_statefn_xstate g xs) = add_statefn_xstate (f o g) xs" by (cases xs, simp_all) lemma add_statefn_xstate_id[simp]: "add_statefn_xstate id = id" by (simp add: add_statefn_xstate_def fun_eq_iff split: xstate.split) lemma add_statefn_exec1: assumes bij: "bij f" shows "\<Gamma> \<turnstile> \<langle>c, xs\<rangle> \<Rightarrow> t \<Longrightarrow> (map_option (add_statefn (inv f)) o \<Gamma>) \<turnstile> \<langle>add_statefn (inv f) c, add_statefn_xstate f xs\<rangle> \<Rightarrow> add_statefn_xstate f t" proof (induct rule: exec.induct) case Basic show ?case apply simp apply (rule_tac P="exec G c xs" for G c xs in subst[rotated], rule exec.Basic) apply (simp add: inv_inv_eq bij inv_f_f bij_is_inj) done qed (auto intro: exec.intros simp: inv_f_f[OF bij_is_inj, OF bij] surj_f_inv_f[OF bij_is_surj, OF bij]) lemma add_statefn_exec: assumes bij: "bij f" shows "\<Gamma> \<turnstile> \<langle>add_statefn f c, xs\<rangle> \<Rightarrow> t = (map_option (add_statefn (inv f)) o \<Gamma>) \<turnstile> \<langle>c, add_statefn_xstate f xs\<rangle> \<Rightarrow> add_statefn_xstate f t" apply (rule iffI) apply (drule add_statefn_exec1[OF bij]) apply (simp add: add_statefn_comp surj_iff[THEN iffD1] bij_is_surj[OF bij] inv_inv_eq bij) apply (drule add_statefn_exec1[OF bij_imp_bij_inv, OF bij]) apply (simp add: inv_inv_eq bij add_statefn_xstate_comp bij_is_inj[OF bij]) apply (simp add: o_def option.map_comp add_statefn_comp surj_iff[THEN iffD1] bij_is_surj[OF bij]) apply (simp add: add_statefn_comp inj_iff[THEN iffD1] bij_is_inj[OF bij] inv_inv_eq bij) apply (simp add: map_option_case) done definition exec_simulates :: "'s set \<Rightarrow> 's set \<Rightarrow> ('s, 'x, 'e) com \<Rightarrow> ('s, 'x, 'e) com \<Rightarrow> bool" where "exec_simulates S T a b = (\<forall>s \<in> S. \<forall>\<Gamma> t. \<Gamma> \<turnstile> \<langle>a, Normal s\<rangle> \<Rightarrow> t \<longrightarrow> \<Gamma> \<turnstile> \<langle>b, Normal s\<rangle> \<Rightarrow> t \<or> (\<exists>ft. \<Gamma> \<turnstile> \<langle>b, Normal s\<rangle> \<Rightarrow> Fault ft) \<or> (\<exists>t' \<in> - T. \<Gamma> \<turnstile> \<langle>b, Normal s\<rangle> \<Rightarrow> Normal t'))" lemma exec_simulates_refl: "exec_simulates S T c c" by (simp add: exec_simulates_def) lemma exec_simulatesD: "\<lbrakk> \<Gamma> \<turnstile> \<langle>a, Normal s\<rangle> \<Rightarrow> t; exec_simulates S T a b; s \<in> S \<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> \<langle>b, Normal s\<rangle> \<Rightarrow> t \<or> (\<exists>ft. \<Gamma> \<turnstile> \<langle>b, Normal s\<rangle> \<Rightarrow> Fault ft) \<or> (\<exists>t' \<in> - T. \<Gamma> \<turnstile> \<langle>b, Normal s\<rangle> \<Rightarrow> Normal t')" unfolding exec_simulates_def by auto definition spec_simulates :: "('x \<rightharpoonup> ('s, 'x, 'e) com) \<Rightarrow> ('x \<rightharpoonup> ('s, 'x, 'e) com) \<Rightarrow> bool" where "spec_simulates G G' = (\<forall>x. (G x = None) = (G' x = None) \<and> (\<forall>b b'. G x = Some b \<and> G' x = Some b' \<longrightarrow> exec_simulates UNIV UNIV b b'))" lemma spec_simulates_to_exec_simulates: "\<lbrakk> G \<turnstile> \<langle>a, xs\<rangle> \<Rightarrow> t; spec_simulates G G' \<rbrakk> \<Longrightarrow> G' \<turnstile> \<langle>a, xs\<rangle> \<Rightarrow> t \<or> (\<exists>ft. G' \<turnstile> \<langle>a, xs\<rangle> \<Rightarrow> Fault ft)" proof (induct rule: exec.induct) case (Call p bdy s t) show ?case using Call apply clarsimp apply (frule_tac x=p in spec_simulates_def[THEN iffD1, rule_format]) apply (clarsimp simp: exec_simulates_def) apply (rule exec.Call, simp) apply (blast intro: exec.intros) done next case (CallUndefined p) show ?case using CallUndefined apply clarsimp apply (frule_tac x=p in spec_simulates_def[THEN iffD1, rule_format]) apply (fastforce intro: exec.CallUndefined) done qed (auto intro: exec.intros, (force intro: exec.intros)+) theorem spec_simulates_refinement: "\<lbrakk> spec_simulates G G'; exec_simulates P Q a b; G' \<turnstile> P b Q, A \<rbrakk> \<Longrightarrow> G \<turnstile> P a Q, A" apply (drule hoare_sound) apply (rule hoare_complete) apply (clarsimp simp: HoarePartialDef.cvalid_def HoarePartialDef.valid_def) apply (rule ccontr) apply (drule(1) exec_simulatesD, simp) apply ((auto | drule(1) spec_simulates_to_exec_simulates)+) done definition exec_statefn_simulates :: "('s \<Rightarrow> 's) \<Rightarrow> 's set \<Rightarrow> 's set \<Rightarrow> ('s, 'x, 'e) com \<Rightarrow> ('s, 'x, 'e) com \<Rightarrow> bool" where "exec_statefn_simulates f S T a b = (\<forall>s \<in> S. \<forall>\<Gamma> t. \<Gamma> \<turnstile> \<langle>a, Normal s\<rangle> \<Rightarrow> t \<longrightarrow> (map_option (add_statefn (inv f)) o \<Gamma>) \<turnstile> \<langle>b, Normal (f s)\<rangle> \<Rightarrow> add_statefn_xstate f t \<or> (\<exists>ft. (map_option (add_statefn (inv f)) o \<Gamma>) \<turnstile> \<langle>b, Normal (f s)\<rangle> \<Rightarrow> Fault ft) \<or> (\<exists>t' \<in> - T. (map_option (add_statefn (inv f)) o \<Gamma>) \<turnstile> \<langle>b, Normal (f s)\<rangle> \<Rightarrow> Normal (f t')))" lemma exec_statefn_simulatesD: "\<lbrakk> \<Gamma> \<turnstile> \<langle>a, Normal s\<rangle> \<Rightarrow> t; exec_statefn_simulates f S T a b; s \<in> S \<rbrakk> \<Longrightarrow> (map_option (add_statefn (inv f)) o \<Gamma>) \<turnstile> \<langle>b, Normal (f s)\<rangle> \<Rightarrow> add_statefn_xstate f t \<or> (\<exists>ft. (map_option (add_statefn (inv f)) o \<Gamma>) \<turnstile> \<langle>b, Normal (f s)\<rangle> \<Rightarrow> Fault ft) \<or> (\<exists>t' \<in> - T. (map_option (add_statefn (inv f)) o \<Gamma>) \<turnstile> \<langle>b, Normal (f s)\<rangle> \<Rightarrow> Normal (f t'))" unfolding exec_statefn_simulates_def by auto lemmas exec_statefn_simulatesI = exec_statefn_simulates_def[THEN iffD2, rule_format] lemma exec_statefn_simulates_refl: "exec_statefn_simulates id S T c c" by (simp add: exec_statefn_simulates_def map_option.id) lemma exec_statefn_simulates_via_statefn: "bij f \<Longrightarrow> exec_statefn_simulates f S T a b = exec_simulates S T a (add_statefn f b)" apply (simp add: exec_statefn_simulates_def exec_simulates_def) apply (simp add: add_statefn_exec bij_imp_bij_inv) done definition "spec_statefn_simulates f G G' = (\<forall>x. (G x = None) = (G' x = None) \<and> (\<forall>b b'. G x = Some b \<and> G' x = Some b' \<longrightarrow> exec_statefn_simulates f UNIV UNIV b b'))" lemma spec_statefn_simulates_via_statefn: "bij f \<Longrightarrow> spec_statefn_simulates f G G' = spec_simulates G (map_option (add_statefn f) o G')" apply (simp add: spec_statefn_simulates_def spec_simulates_def) apply (rule arg_cong[where f=All, OF ext]) apply (rule HOL.conj_cong[OF refl]) apply (safe, simp_all add: exec_statefn_simulates_via_statefn) done theorem spec_statefn_simulates_refinement: "\<lbrakk> spec_statefn_simulates f G G'; exec_statefn_simulates f {s. f s \<in> P} {s. f s \<in> Q} a b; G' \<turnstile> P b Q, A; bij f \<rbrakk> \<Longrightarrow> G \<turnstile> {s. f s \<in> P} a {s. f s \<in> Q}, {s. f s \<in> A}" apply (simp add: spec_statefn_simulates_via_statefn exec_statefn_simulates_via_statefn) apply (erule spec_simulates_refinement) apply (simp add: Compl_Collect) apply (drule hoare_sound) apply (rule hoare_complete) apply (clarsimp simp: HoarePartialDef.cvalid_def HoarePartialDef.valid_def add_statefn_exec) apply (simp add: o_def option.map_comp) apply (simp add: add_statefn_comp surj_iff[THEN iffD1, OF bij_is_surj] inv_inv_eq) apply (simp add: map_option_case) apply (case_tac t, auto) done primrec com_initial_guards :: "('s, 'x, 'e) com \<Rightarrow> 's set" where "com_initial_guards (a ;; b) = com_initial_guards a" | "com_initial_guards (Guard F G c) = G \<inter> com_initial_guards c" | "com_initial_guards Skip = UNIV" | "com_initial_guards Throw = UNIV" | "com_initial_guards (Basic f) = UNIV" | "com_initial_guards (Spec r) = UNIV" | "com_initial_guards (Cond S a b) = UNIV" | "com_initial_guards (While S c) = UNIV" | "com_initial_guards (Call f) = UNIV" | "com_initial_guards (DynCom fn) = UNIV" | "com_initial_guards (Catch a b) = UNIV" lemma com_initial_guards_extra_simps[simp]: "com_initial_guards (whileAnno S I V c) = UNIV" "com_initial_guards (creturn exn_upd rv_upd rv) = UNIV" "com_initial_guards (creturn_void exn_upd) = UNIV" "com_initial_guards (call init f ret save) = UNIV" "com_initial_guards (cbreak exn_upd) = UNIV" "com_initial_guards (ccatchbrk exn) = UNIV" by (simp_all add: whileAnno_def creturn_def creturn_void_def call_def block_def cbreak_def ccatchbrk_def) lemmas com_initial_guards_all_simps = com_initial_guards.simps com_initial_guards_extra_simps primrec com_final_guards :: "'s set \<Rightarrow> ('s, 'x, 'e) com \<Rightarrow> 's set" where "com_final_guards S (a ;; b) = com_final_guards UNIV b" | "com_final_guards S (Guard F G c) = com_final_guards (S \<inter> G) c" | "com_final_guards S Skip = S" | "com_final_guards S Throw = UNIV" | "com_final_guards S (Basic f) = UNIV" | "com_final_guards S (Spec r) = UNIV" | "com_final_guards S (Cond C a b) = UNIV" | "com_final_guards S (While C c) = UNIV" | "com_final_guards S (Call f) = UNIV" | "com_final_guards S (DynCom fn) = UNIV" | "com_final_guards S (Catch a b) = UNIV" lemma com_final_guards_extra_simps[simp]: "com_final_guards S (whileAnno C I V c) = UNIV" "com_final_guards S (creturn exn_upd rv_upd rv) = UNIV" "com_final_guards S (creturn_void exn_upd) = UNIV" "com_final_guards S (call init f ret save) = UNIV" "com_final_guards S (cbreak exn_upd) = UNIV" "com_final_guards S (ccatchbrk exn) = UNIV" by (simp_all add: whileAnno_def creturn_def creturn_void_def call_def block_def cbreak_def ccatchbrk_def) lemmas com_final_guards_all_simps = com_final_guards.simps com_final_guards_extra_simps lemma exec_not_in_initial_guards: "\<lbrakk> s \<notin> com_initial_guards c \<rbrakk> \<Longrightarrow> \<exists>ft. \<Gamma> \<turnstile> \<langle>c, Normal s\<rangle> \<Rightarrow> Fault ft" apply (induct c, simp_all) apply clarsimp apply (blast intro: exec.Seq exec.FaultProp) apply (blast intro: exec.GuardFault exec.Guard) done lemma exec_in_final_guards_induct: "\<lbrakk> \<Gamma> \<turnstile> \<langle>c, x\<rangle> \<Rightarrow> y \<rbrakk> \<Longrightarrow> \<forall>s t S. x = Normal s \<and> y = Normal t \<and> s \<in> S \<longrightarrow> t \<in> com_final_guards S c" apply (induct rule: exec.induct, simp_all) apply (case_tac s', simp_all) apply (auto elim: exec_Normal_elim_cases) done lemma exec_in_final_guards: "\<lbrakk> \<Gamma> \<turnstile> \<langle>c, Normal s\<rangle> \<Rightarrow> Normal t \<rbrakk> \<Longrightarrow> t \<in> com_final_guards UNIV c" by (drule exec_in_final_guards_induct, simp) lemma exec_statefn_simulates_Seq: "\<lbrakk> exec_statefn_simulates f S {s. f s \<in> com_initial_guards d} a b; exec_statefn_simulates f UNIV T c d \<rbrakk> \<Longrightarrow> exec_statefn_simulates f S T (a ;; c) (b ;; d)" apply (rule exec_statefn_simulatesI) apply (erule exec.cases, simp_all) apply clarsimp apply (drule(2) exec_statefn_simulatesD) apply (elim disjE exE) apply (case_tac s', simp_all)[1] apply (drule(1) exec_statefn_simulatesD, simp) apply (auto intro: exec.Seq)[1] apply ((force elim: exec.Seq exec.cases notE)+)[4] apply clarsimp apply (rule ccontr, frule_tac \<Gamma>="map_option (add_statefn (inv f)) \<circ> \<Gamma>" in exec_not_in_initial_guards, clarsimp) apply (blast intro: exec.Seq) done lemma exec_statefn_simulates_Cond: "\<lbrakk> \<And>s. s \<in> S \<Longrightarrow> (s \<in> C) = (f s \<in> C'); exec_statefn_simulates f (S \<inter> C) T a b; exec_statefn_simulates f (S \<inter> - C) T c d \<rbrakk> \<Longrightarrow> exec_statefn_simulates f S T (Cond C a c) (Cond C' b d)" apply atomize apply (rule exec_statefn_simulatesI) apply (erule exec.cases, simp_all) apply clarsimp apply (drule spec, drule(1) mp, simp) apply (drule(1) exec_statefn_simulatesD, simp) apply (auto intro: exec.CondTrue)[1] apply clarsimp apply (drule spec, drule(1) mp, simp) apply (drule(1) exec_statefn_simulatesD, simp) apply (auto intro: exec.CondFalse)[1] done lemma exec_While_not_in_state_lemma: "\<lbrakk> \<forall>t'\<in>- T. \<not> \<Gamma> \<turnstile> \<langle>While C' b,Normal s\<rangle> \<Rightarrow> Normal (f t'); \<forall>ft. \<not> \<Gamma> \<turnstile> \<langle>While C' b,Normal s\<rangle> \<Rightarrow> Fault ft \<rbrakk> \<Longrightarrow> (s \<in> com_initial_guards b \<or> s \<notin> f ` (- T))" apply (rule ccontr, clarsimp) apply (drule_tac \<Gamma>=\<Gamma> in exec_not_in_initial_guards) apply (blast intro: exec.WhileTrue exec.WhileFalse) done lemma exec_statefn_simulates_While_lemma: assumes sim: "exec_statefn_simulates f C {s. f s \<in> S \<and> (f s \<in> com_initial_guards b \<or> f s \<notin> f ` (- T))} a b" assumes eq: "\<And>s. \<lbrakk> f s \<in> S; f s \<in> com_initial_guards b \<or> f s \<notin> f ` (- T) \<rbrakk> \<Longrightarrow> s \<in> C = (f s \<in> C')" assumes subs: "com_final_guards UNIV b \<subseteq> S" shows "\<lbrakk> \<Gamma> \<turnstile> \<langle>bdy, xs\<rangle> \<Rightarrow> t \<rbrakk> \<Longrightarrow> \<forall>s. bdy = While C a \<and> xs = Normal s \<and> f s \<in> S \<longrightarrow> (map_option (add_statefn (inv f)) o \<Gamma>) \<turnstile> \<langle>While C' b, Normal (f s)\<rangle> \<Rightarrow> add_statefn_xstate f t \<or> (\<exists>ft. (map_option (add_statefn (inv f)) o \<Gamma>) \<turnstile> \<langle>While C' b,Normal (f s)\<rangle> \<Rightarrow> Fault ft) \<or> (\<exists>t' \<in> - T. (map_option (add_statefn (inv f)) o \<Gamma>) \<turnstile> \<langle>While C' b,Normal (f s)\<rangle> \<Rightarrow> Normal (f t'))" apply (induct rule: exec.induct, simp_all) apply clarsimp apply (rule ccontr) apply (frule exec_While_not_in_state_lemma, simp) apply (drule(1) eq[rotated]) apply (drule(1) exec_statefn_simulatesD[OF _ sim]) apply (simp add: o_def) apply (elim disjE exE) apply (case_tac s', simp_all) apply (blast intro: exec.WhileTrue exec_in_final_guards[THEN subsetD[OF subs]])[1] apply (erule exec.cases, simp_all)[1] apply (blast intro: exec.WhileTrue)[1] apply (erule exec.cases, simp_all)[1] apply (blast intro: exec.WhileTrue) apply (erule exec.cases, simp_all)[1] apply (blast intro: exec.WhileTrue)[1] apply (case_tac s', simp_all) apply (blast intro: exec.WhileTrue) apply (erule exec.cases, simp_all)[1] apply (blast intro: exec.WhileTrue) apply (erule exec.cases, simp_all) apply (blast intro: exec.WhileTrue) apply (erule exec.cases, simp_all)[1] apply (blast intro: exec.WhileTrue) apply (clarsimp simp: Bex_def exec_in_final_guards[THEN subsetD[OF subs]]) apply (drule_tac \<Gamma>="map_option (add_statefn (inv f)) \<circ> \<Gamma>" in exec_not_in_initial_guards) apply (clarsimp simp: o_def) apply (blast intro: exec.WhileTrue exec.WhileFalse) apply clarsimp apply (rule ccontr, frule exec_While_not_in_state_lemma, simp) apply (cut_tac s=s in eq) apply (auto intro: exec.WhileFalse) done lemma exec_statefn_simulates_While: assumes bij: "bij f" shows "\<lbrakk> \<And>s. \<lbrakk> s \<in> S \<or> f s \<in> com_final_guards UNIV b; f s \<in> com_initial_guards b \<or> s \<in> T \<rbrakk> \<Longrightarrow> s \<in> C = (f s \<in> C'); exec_statefn_simulates f C {s. (s \<in> S \<or> f s \<in> com_final_guards UNIV b) \<and> (f s \<in> com_initial_guards b \<or> s \<in> T)} a b \<rbrakk> \<Longrightarrow> exec_statefn_simulates f S T (While C a) (While C' b)" apply (rule exec_statefn_simulatesI) apply (rule_tac S="f ` S \<union> com_final_guards UNIV b" in exec_statefn_simulates_While_lemma[rule_format]) apply (auto simp add: inj_image_mem_iff[OF bij_is_inj, OF bij]) done lemma exec_statefn_simulates_Catch: "\<lbrakk> exec_statefn_simulates f S UNIV a b; exec_statefn_simulates f UNIV T c d \<rbrakk> \<Longrightarrow> exec_statefn_simulates f S T (Catch a c) (Catch b d)" apply (rule exec_statefn_simulatesI) apply (erule exec.cases, simp_all) apply clarsimp apply (drule(2) exec_statefn_simulatesD) apply (elim disjE exE) apply (drule(1) exec_statefn_simulatesD, simp) apply (auto intro: exec.intros)[1] apply (fastforce intro: exec.intros) apply (fastforce intro: exec.intros) apply (drule(2) exec_statefn_simulatesD) apply (fastforce intro: exec.intros) done lemma exec_statefn_simulates_Guard_rhs: "exec_statefn_simulates f (S \<inter> {s. f s \<in> G}) T a b \<Longrightarrow> exec_statefn_simulates f S T a (Guard E G b)" apply (rule exec_statefn_simulatesI) apply (case_tac "f s \<in> G") apply (drule(1) exec_statefn_simulatesD, simp) apply (auto intro: exec.intros) done lemma exec_statefn_simulates_Guard_lhs: "\<lbrakk> S \<subseteq> G; exec_statefn_simulates f S T a b \<rbrakk> \<Longrightarrow> exec_statefn_simulates f S T (Guard E G a) b" apply (rule exec_statefn_simulatesI) apply (erule exec.cases, simp_all) apply (drule(1) exec_statefn_simulatesD, simp) apply (auto intro: exec.intros) done lemmas exec_statefn_simulates_whileAnno = exec_statefn_simulates_While[folded whileAnno_def[where I=I and V=V]] for I V lemma exec_statefn_simulates_Basic: "\<lbrakk> \<And>s. \<lbrakk> s \<in> S; g (fn s) \<notin> fn ` (- T) \<rbrakk> \<Longrightarrow> fn (f s) = g (fn s) \<rbrakk> \<Longrightarrow> exec_statefn_simulates fn S T (Basic f) (Basic g)" apply atomize apply (rule exec_statefn_simulatesI) apply (erule exec.cases, simp_all, clarsimp) apply (drule spec, drule(1) mp) apply (drule mp) apply clarsimp apply (metis exec.Basic Compl_iff) apply clarsimp apply (blast intro: exec.Basic) done lemma exec_statefn_simulates_Call: "bij f \<Longrightarrow> exec_statefn_simulates f S T (Call c) (Call c)" apply (rule exec_statefn_simulatesI) apply (intro disjI1) apply (erule exec.cases, simp_all) apply (rule exec.intros, simp) apply (simp add: add_statefn_exec bij_imp_bij_inv option.map_comp o_def inv_inv_eq) apply (simp add: add_statefn_comp inj_iff[THEN iffD1, OF bij_is_inj] inv_inv_eq bij_imp_bij_inv map_option_case inv_f_f[OF bij_is_inj] add_statefn_xstate_comp) apply (fastforce intro: exec.intros) done lemma exec_statefn_simulates_DynCom: "\<lbrakk> \<And>s. s \<in> S \<Longrightarrow> exec_statefn_simulates f S T (g s) (h (f s)) \<rbrakk> \<Longrightarrow> exec_statefn_simulates f S T (DynCom g) (DynCom h)" apply atomize apply (clarsimp simp add: exec_statefn_simulates_def) apply (erule exec.cases, simp_all) apply (fastforce intro: exec.intros) done lemma exec_statefn_simulates_Skip_Throw: "exec_statefn_simulates f S T Skip Skip" "exec_statefn_simulates f S T Throw Throw" apply (simp_all add: exec_statefn_simulates_def) apply (fastforce elim: exec.cases intro: exec.intros)+ done lemma exec_statefn_simulates_call: "\<lbrakk> bij f; \<And>s. s \<in> S \<Longrightarrow> f (init1 s) = init2 (f s); \<And>s t. f (ret1 s t) = ret2 (f s) (f t); \<And>s t. exec_statefn_simulates f UNIV T (save1 s t) (save2 (f s) (f t)) \<rbrakk> \<Longrightarrow> exec_statefn_simulates f S T (call init1 c ret1 save1) (call init2 c ret2 save2)" apply (simp add: call_def block_def) apply (intro exec_statefn_simulates_Seq exec_statefn_simulates_Catch exec_statefn_simulates_DynCom exec_statefn_simulates_Basic exec_statefn_simulates_Call exec_statefn_simulates_Skip_Throw) apply simp+ done lemma exec_statefn_simulates_creturn_void: "\<lbrakk> \<And>inn s. s \<in> S \<Longrightarrow> f (exn_upd inn s) = exn_upd' inn (f s) \<rbrakk> \<Longrightarrow> exec_statefn_simulates f S T (creturn_void exn_upd) (creturn_void exn_upd')" apply (simp add: creturn_void_def) apply (intro exec_statefn_simulates_Seq exec_statefn_simulates_Basic exec_statefn_simulates_Skip_Throw | simp)+ done lemma exec_statefn_simulates_creturn: "\<lbrakk> \<And>inn s. f (exn_upd inn s) = exn_upd' inn (f s); \<And>inn s. s \<in> S \<Longrightarrow> f (rv_upd inn s) = rv_upd' inn (f s); \<And>inn s. s \<in> S \<Longrightarrow> rv s = rv' (f s) \<rbrakk> \<Longrightarrow> exec_statefn_simulates f S T (creturn exn_upd rv_upd rv) (creturn exn_upd' rv_upd' rv')" apply (simp add: creturn_def) apply (intro exec_statefn_simulates_Seq exec_statefn_simulates_Basic exec_statefn_simulates_Skip_Throw | simp)+ done lemma exec_statefn_simulates_cbreak: "\<lbrakk> \<And>inn s. s \<in> S \<Longrightarrow> f (exn_upd inn s) = exn_upd' inn (f s) \<rbrakk> \<Longrightarrow> exec_statefn_simulates f S T (cbreak exn_upd) (cbreak exn_upd')" apply (simp add: cbreak_def) apply (intro exec_statefn_simulates_Seq exec_statefn_simulates_Basic exec_statefn_simulates_Skip_Throw | simp)+ done lemma exec_statefn_simulates_ccatchbrk: "\<lbrakk> \<And>s. s \<in> S \<Longrightarrow> exn' (f s) = exn s \<rbrakk> \<Longrightarrow> exec_statefn_simulates f S T (ccatchbrk exn) (ccatchbrk exn')" apply (simp add: ccatchbrk_def) apply (intro exec_statefn_simulates_Cond exec_statefn_simulates_Skip_Throw | simp)+ done lemma exec_statefn_simulates_Spec: "\<lbrakk> bij f; \<And>s. \<lbrakk> s \<in> S; \<forall>t. (f s, f t) \<in> R' \<longrightarrow> t \<in> T \<rbrakk> \<Longrightarrow> \<forall>t. ((s, t) \<in> R) = ((f s, f t) \<in> R') \<rbrakk> \<Longrightarrow> exec_statefn_simulates f S T (Spec R) (Spec R')" apply (rule exec_statefn_simulatesI) apply (erule exec_Normal_elim_cases, simp_all) apply (blast intro: exec.Spec) apply (case_tac "\<forall>t. (f s, f t) \<in> R' \<longrightarrow> t \<in> T") apply clarsimp apply (subgoal_tac "\<forall>t. (f s, f (inv f t)) \<notin> R'") apply (simp add: surj_f_inv_f bij_is_surj) apply (blast intro: exec.SpecStuck) apply clarsimp apply (blast intro: exec.Spec) done lemmas exec_statefn_simulates_comI = exec_statefn_simulates_refl exec_statefn_simulates_Seq exec_statefn_simulates_Cond exec_statefn_simulates_While exec_statefn_simulates_whileAnno exec_statefn_simulates_Catch exec_statefn_simulates_Guard_rhs exec_statefn_simulates_Guard_lhs exec_statefn_simulates_Call exec_statefn_simulates_call exec_statefn_simulates_Skip_Throw exec_statefn_simulates_Basic exec_statefn_simulates_creturn exec_statefn_simulates_creturn_void exec_statefn_simulates_cbreak exec_statefn_simulates_ccatchbrk exec_statefn_simulates_Spec lemma exec_statefn_simulates_additional_Guards: "exec_statefn_simulates f S T a (b ;; Guard F (G \<inter> G') c) \<Longrightarrow> exec_statefn_simulates f S T a (b ;; Guard F G (Guard F' G' c))" apply (rule exec_statefn_simulatesI) apply (drule(2) exec_statefn_simulatesD) apply (elim disjE exE) apply (erule exec_Normal_elim_cases) apply (case_tac s', auto elim!: exec_Normal_elim_cases, (blast intro: exec.Seq exec.Guard exec.GuardFault)+)[1] apply (erule exec_Normal_elim_cases) apply (case_tac s', auto elim!: exec_Normal_elim_cases, (blast intro: exec.Seq exec.Guard exec.GuardFault)+)[1] apply (clarsimp elim!: exec_Normal_elim_cases) apply (case_tac s', auto elim!: exec_Normal_elim_cases, (blast intro: exec.Seq exec.Guard exec.GuardFault)+)[1] done lemma exec_statefn_simulates_additional_Guarded_Skip: "exec_statefn_simulates f S (T \<inter> {s. f s \<in> G}) a b \<Longrightarrow> exec_statefn_simulates f S T a (b ;; Guard F G Skip)" apply (rule exec_statefn_simulatesI) apply (drule(2) exec_statefn_simulatesD) apply (elim disjE exE) apply (case_tac t, auto elim!: exec_Normal_elim_cases, (blast intro: exec.Seq exec.Skip exec.Guard exec.GuardFault)+)[1] apply (case_tac t, auto elim!: exec_Normal_elim_cases, (blast intro: exec.Seq exec.Skip exec.Guard exec.GuardFault)+)[1] apply (blast intro: exec.Seq exec.Skip exec.Guard exec.GuardFault) done lemmas exec_statefn_simulates_additionals = exec_statefn_simulates_additional_Guarded_Skip exec_statefn_simulates_additional_Guards inductive guards_adjust_by_invariant :: "'s set \<Rightarrow> 's set \<Rightarrow> ('s, 'x, 'e) com \<Rightarrow> ('s, 'x, 'e) com \<Rightarrow> bool" where gabi_Skip: "guards_adjust_by_invariant S T Skip Skip" | gabi_Guard: "\<lbrakk> S \<inter> T \<inter> G = S \<inter> T \<inter> G'; guards_adjust_by_invariant S (T \<inter> G) c c' \<rbrakk> \<Longrightarrow> guards_adjust_by_invariant S T (Guard F G c) (Guard F G' c')" | gabi_Basic: "\<lbrakk> \<And>s. \<lbrakk> s \<in> S; s \<in> T \<rbrakk> \<Longrightarrow> f s \<in> S \<rbrakk> \<Longrightarrow> guards_adjust_by_invariant S T (Basic f) (Basic f)" | gabi_Spec: "\<lbrakk> \<And>s t. \<lbrakk> s \<in> S; s \<in> T; (s, t) \<in> R \<rbrakk> \<Longrightarrow> t \<in> S \<rbrakk> \<Longrightarrow> guards_adjust_by_invariant S T (Spec R) (Spec R)" | gabi_Seq: "\<lbrakk> guards_adjust_by_invariant S T c d; guards_adjust_by_invariant S UNIV c' d' \<rbrakk> \<Longrightarrow> guards_adjust_by_invariant S T (Seq c c') (Seq d d')" | gabi_Cond: "\<lbrakk> guards_adjust_by_invariant S T c d; guards_adjust_by_invariant S T c' d' \<rbrakk> \<Longrightarrow> guards_adjust_by_invariant S T (Cond C c c') (Cond C d d')" | gabi_While: "\<lbrakk> guards_adjust_by_invariant S UNIV c d \<rbrakk> \<Longrightarrow> guards_adjust_by_invariant S T (While C c) (While C d)" | gabi_Call :"guards_adjust_by_invariant S T (Call proc) (Call proc)" | gabi_Dyncom :"\<lbrakk> \<And>s. \<lbrakk> s \<in> S; s \<in> T \<rbrakk> \<Longrightarrow> guards_adjust_by_invariant S T (f s) (f' s) \<rbrakk> \<Longrightarrow> guards_adjust_by_invariant S T (DynCom f) (DynCom f')" | gabi_Throw: "guards_adjust_by_invariant S T Throw Throw" | gabi_Catch: "\<lbrakk> guards_adjust_by_invariant S T c d; guards_adjust_by_invariant S UNIV c' d' \<rbrakk> \<Longrightarrow> guards_adjust_by_invariant S T (Catch c c') (Catch d d')" definition context_gabi :: "'s set \<Rightarrow> ('x \<rightharpoonup> ('s, 'x, 'e) com) \<Rightarrow> ('x \<rightharpoonup> ('s, 'x, 'e) com) \<Rightarrow> bool" where "context_gabi S G G' = (\<forall>x. (G x = None) = (G' x = None) \<and> (G x \<noteq> None \<longrightarrow> guards_adjust_by_invariant S UNIV (the (G x)) (the (G' x))))" definition xstate_inv_set :: "'s set \<Rightarrow> ('s, 'e) xstate set" where "xstate_inv_set S = {xs. case xs of Normal s \<Rightarrow> s \<in> S | Abrupt s \<Rightarrow> s \<in> S | _ \<Rightarrow> True}" lemmas xstate_inv_set_simps = xstate_inv_set_def[THEN eqset_imp_iff, simplified, split_simps xstate.split] lemma xstate_inv_set_UNIV: "xstate_inv_set UNIV = UNIV" by (simp add: xstate_inv_set_def split: xstate.split) method gs_simple_cases = (simp_all add: xstate_inv_set_simps, ((erule guards_adjust_by_invariant.cases, simp_all)[1], clarsimp simp: xstate_inv_set_simps, (fastforce intro: exec.intros guards_adjust_by_invariant.intros)[1])+) method gs_case methods m uses g_def = ((erule guards_adjust_by_invariant.cases; simp), clarsimp simp: g_def xstate_inv_set_simps, m, auto intro: exec.intros guards_adjust_by_invariant.intros)[1] lemma gabi_simulation: "\<lbrakk> G \<turnstile> \<langle>c, xs\<rangle> \<Rightarrow> xs'; guards_adjust_by_invariant S T c c'; xs \<in> xstate_inv_set (S \<inter> T); context_gabi S G G' \<rbrakk> \<Longrightarrow> G' \<turnstile> \<langle>c', xs\<rangle> \<Rightarrow> xs' \<and> xs' \<in> xstate_inv_set S" proof (induct arbitrary: c' T rule: exec.induct) case (Call proc bdy s t) show ?case using Call.prems Call.hyps by - (gs_case \<open>drule_tac x=proc in spec\<close> g_def: context_gabi_def) next case (CallUndefined proc s t) show ?case using CallUndefined.prems CallUndefined.hyps by - (gs_case \<open>drule_tac x=proc in spec\<close> g_def: context_gabi_def) next case (WhileTrue s S c s' t) show ?case using WhileTrue.prems WhileTrue.hyps by - (gs_case \<open>(erule_tac x=UNIV in meta_allE)+\<close>) qed gs_simple_cases end
import order.complete_lattice import order.fixed_points import data.set.lattice import syntax -- Definition 2, page 6 structure kripkeModel (W : Type) : Type := (val : W → char → Prop) (rel : W → W → Prop) -- Definition 3, page 6 def evaluate {W : Type} : (kripkeModel W × W) → formula → Prop | (M,w) ⊥ := false | (M,w) (· p) := M.val w p | (M,w) (~ φ) := ¬ evaluate (M,w) φ | (M,w) (φ ⋏ ψ) := evaluate (M,w) φ ∧ evaluate (M,w) ψ | (M,w) (□ φ) := ∀ v : W, (M.rel w v → evaluate (M,v) φ) def tautology (φ : formula) := ∀ W (M : kripkeModel W) w, evaluate (M,w) φ def contradiction (φ : formula) := ∀ W (M : kripkeModel W) w, ¬ evaluate (M,w) φ -- Definition 4, page 8 -- Definition 5, page 9 class has_sat (α : Type) := (satisfiable : α → Prop) open has_sat instance form_has_sat : has_sat formula := has_sat.mk (λ ϕ, ∃ W (M : kripkeModel W) w, evaluate (M,w) ϕ) instance set_has_sat : has_sat (finset formula) := has_sat.mk (λ X, ∃ W (M : kripkeModel W) w, (∀ φ ∈ X, evaluate (M,w) φ)) lemma notsatisfnotThenTaut : ∀ φ, ¬ satisfiable (~φ) → tautology φ := begin intro phi, unfold satisfiable, unfold tautology, simp, intro lhs, intros W M w, specialize lhs W M w, unfold evaluate at *, simp at lhs, exact lhs, end @[simp] lemma singletonSat_iff_sat : ∀ φ, satisfiable ({ φ } : finset formula) ↔ satisfiable φ := begin intro phi, unfold satisfiable, simp, end lemma tautImp_iff_comboNotUnsat {ϕ ψ} : tautology (ϕ ↣ ψ) ↔ ¬satisfiable ({ϕ, ~ψ} : finset formula) := begin unfold tautology, unfold satisfiable, simp, split ; { intro hyp, intros W M w, specialize hyp W M w, intro sat_phi, unfold evaluate at *, simp at *, tauto, }, end def semImplies_sets (X : finset formula) (Y : finset formula) := ∀ (W : Type) (M : kripkeModel W) w, (∀ φ ∈ X, evaluate (M,w) φ) → (∀ ψ ∈ Y, evaluate (M,w) ψ) -- Definition 5, page 9 class vDash {α : Type} {β : Type} := (semImplies : α → β → Prop) open vDash @[simp] instance model_canSemImply_form {W : Type} : vDash := vDash.mk (@evaluate W) @[simp] instance model_canSemImply_set {W : Type} : vDash := @vDash.mk (kripkeModel W × W) (finset formula) (λ Mw X, ∀ f ∈ X, @evaluate W Mw f) instance set_canSemImply_set : vDash := vDash.mk semImplies_sets instance set_canSemImply_form : vDash := vDash.mk (λ X ψ, semImplies_sets X {ψ}) instance form_canSemImply_set : vDash := vDash.mk (λ φ X, semImplies_sets {φ} X) instance form_canSemImply_form : vDash := vDash.mk (λ φ ψ, semImplies_sets {φ} {ψ}) infixl `⊨`:40 := semImplies infixl `⊭`:40 := λ a b, ¬ (a ⊨ b) -- useful lemmas to connect all the different ⊨ cases lemma forms_to_sets { φ ψ : formula } : φ ⊨ ψ → ({φ}: finset formula) ⊨ ({ψ} : finset formula):= begin intros impTaut, intros W M w lhs ψ1 psi1_in_setpsi, specialize impTaut W M w, simp at *, subst psi1_in_setpsi, apply impTaut, exact lhs end
lemma BfunE: assumes "Bfun f F" obtains B where "0 < B" and "eventually (\<lambda>x. norm (f x) \<le> B) F"
Load LFindLoad. From lfind Require Import LFind. From QuickChick Require Import QuickChick. From adtind Require Import goal33. Derive Show for natural. Derive Arbitrary for natural. Instance Dec_Eq_natural : Dec_Eq natural. Proof. dec_eq. Qed. Lemma conj29synthconj6 : forall (lv0 : natural) (lv1 : natural) (lv2 : natural), (@eq natural (Succ (plus (mult lv0 lv1) (plus lv1 lv0))) (plus (mult lv2 lv0) lv2)). Admitted. QuickChick conj29synthconj6.
%% figuremax % Below is a demonstration of the features of the |figuremax| function %% clear; close all; clc; %% Syntax % |[varargout]=figuremax(varargin);| %% Description % UNDOCUMENTED %% Examples % %% % % <<gibbVerySmall.gif>> % % _*GIBBON*_ % <www.gibboncode.org> % % _Kevin Mattheus Moerman_, <[email protected]> %% % _*GIBBON footer text*_ % % License: <https://github.com/gibbonCode/GIBBON/blob/master/LICENSE> % % GIBBON: The Geometry and Image-based Bioengineering add-On. A toolbox for % image segmentation, image-based modeling, meshing, and finite element % analysis. % % Copyright (C) 2006-2022 Kevin Mattheus Moerman and the GIBBON contributors % % This program is free software: you can redistribute it and/or modify % it under the terms of the GNU General Public License as published by % the Free Software Foundation, either version 3 of the License, or % (at your option) any later version. % % This program is distributed in the hope that it will be useful, % but WITHOUT ANY WARRANTY; without even the implied warranty of % MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the % GNU General Public License for more details. % % You should have received a copy of the GNU General Public License % along with this program. If not, see <http://www.gnu.org/licenses/>.
satis_distort = 0; while ~satis_distort, k_g_save = k_g; k_g = input(['Guess for distortion factor kc ([]=' num2str(k_g_save) '): ']); if isempty(k_g), k_g = k_g_save; end; x_n = (x - c_g(1))/f_g; y_n = (y - c_g(2))/f_g; [x_pn] = comp_fisheye_distortion([x_n' ; y_n'],[k_g;0;0;0]); % Compute the inside points through computation of the planar homography (collineation) a00 = [x_pn(1,1);x_pn(2,1);1]; a10 = [x_pn(1,2);x_pn(2,2);1]; a11 = [x_pn(1,3);x_pn(2,3);1]; a01 = [x_pn(1,4);x_pn(2,4);1]; % Compute the planar collineation: (return the normalization matrix as well) [Homo,Hnorm,inv_Hnorm] = compute_homography([a00 a10 a11 a01],[0 1 1 0;0 0 1 1;1 1 1 1]); % Build the grid using the planar collineation: x_l = ((0:n_sq_x)'*ones(1,n_sq_y+1))/n_sq_x; y_l = (ones(n_sq_x+1,1)*(0:n_sq_y))/n_sq_y; pts = [x_l(:) y_l(:) ones((n_sq_x+1)*(n_sq_y+1),1)]'; XXpn = Homo*pts; XXpn = XXpn(1:2,:) ./ (ones(2,1)*XXpn(3,:)); XX = apply_fisheye_distortion(XXpn,[k_g;0;0;0]); XX(1,:) = f_g*XX(1,:) + c_g(1); XX(2,:) = f_g*XX(2,:) + c_g(2); figure(2); image(I); colormap(map); zoom on; hold on; plot(XX(1,:),XX(2,:),'r+'); title('The red crosses should be on the grid corners...'); hold off; satis_distort = input('Satisfied with distortion? ([]=no, other=yes) '); satis_distort = ~isempty(satis_distort); end;
The product of any number and zero is zero.
source("scRNA_func.r") library(Seurat) library(ggplot2) myoptions<-read.table(parSampleFile1, stringsAsFactors = F, sep="\t", header=F) myoptions<-split(myoptions$V1, myoptions$V2) newnames<-read.table(parSampleFile2, stringsAsFactors = F, sep="\t", header=F) newnames<-split(newnames$V1, newnames$V2) clusters<-read.csv(parFile3, stringsAsFactors = F, header=T) rownames(clusters)=clusters$seurat_clusters renamed_column = paste0("renamed_", myoptions$celltype_name) clusters[,renamed_column] = clusters[,myoptions$celltype_name] clusters[names(newnames),renamed_column] = unlist(newnames) seurat_renamed_column=paste0("seurat_", renamed_column) clusters[,seurat_renamed_column] = paste0(clusters$seurat_clusters, " : ", clusters[,renamed_column]) clusters[,seurat_renamed_column] = factor(clusters[,seurat_renamed_column], levels=clusters[,seurat_renamed_column]) write.csv(clusters, file=paste0(outFile, ".rename_celltype.csv"), row.names=F) cells<-read.csv(parFile2, stringsAsFactors = F, row.names=1, header=T) renames<-split(clusters[,renamed_column], clusters$seurat_clusters) cells[,renamed_column]=unlist(renames[as.character(cells$seurat_clusters)]) renames<-split(clusters[,seurat_renamed_column], clusters$seurat_clusters) cells[,seurat_renamed_column]=unlist(renames[as.character(cells$seurat_clusters)]) write.csv(cells, file=paste0(outFile, ".rename_cluster.csv")) finalList<-readRDS(parFile1) obj<-finalList$obj #make sure with same cell order cells<-cells[colnames(obj),] obj[[seurat_renamed_column]]<-cells[,seurat_renamed_column] png(file=paste0(outFile, ".rename_cluster.png"), width=4000, height=3000, res=300) g<-DimPlot(object = obj, reduction = 'umap', label=TRUE, group.by=seurat_renamed_column) + guides(colour = guide_legend(override.aes = list(size = 3), ncol=1)) print(g) dev.off() cellcounts<-data.frame(Sample=obj$orig.ident, Cluster=obj[[seurat_renamed_column]]) ctable<-t(table(cellcounts)) ctable_perThousand <- round(ctable / colSums(ctable) * 1000) colnames(ctable_perThousand)<-paste0(colnames(ctable), "_perThousand") final<-cbind(ctable, ctable_perThousand) write.csv(final, file=paste0(outFile, ".rename_cluster.summery.csv"))
JasonCash seems to be rather excited about Microsoft OneNote. He added a link to a graphic advertising OneNote to several page on the wiki during the evening of Oct 13th 2005. According to wiki ethics this was questionable and so his contributions were reverted. Me thinks this is a onetime bogus name never to be used again and probably doesnt even deserve this page. I doubt hell be back... (Not to waste too much time on this, but he comes from 67.114.39.73 which is a Pacbell (Yahoo) DSL address.) This page was created in an effort to be friendly and helpful to those who may be unfamiliar with our site and the local etiquette. Most likely, you are right, but this page does little harm and is an excellent example of how the wiki deals with annoyances, which is to turn them into something positive.
(* Authors: Anthony Bordg, University of Cambridge, [email protected]; Yijun He, University of Cambridge, [email protected] *) theory No_Cloning imports Quantum Tensor begin section \<open>The Cauchy-Schwarz Inequality\<close> lemma inner_prod_expand: assumes "dim_vec a = dim_vec b" and "dim_vec a = dim_vec c" and "dim_vec a = dim_vec d" shows "\<langle>a + b|c + d\<rangle> = \<langle>a|c\<rangle> + \<langle>a|d\<rangle> + \<langle>b|c\<rangle> + \<langle>b|d\<rangle>" apply (simp add: inner_prod_def) using assms sum.cong by (simp add: sum.distrib algebra_simps) lemma inner_prod_distrib_left: assumes "dim_vec a = dim_vec b" shows "\<langle>c \<cdot>\<^sub>v a|b\<rangle> = cnj(c) * \<langle>a|b\<rangle>" using assms inner_prod_def by (simp add: algebra_simps mult_hom.hom_sum) lemma inner_prod_distrib_right: assumes "dim_vec a = dim_vec b" shows "\<langle>a|c \<cdot>\<^sub>v b\<rangle> = c * \<langle>a|b\<rangle>" using assms by (simp add: algebra_simps mult_hom.hom_sum) lemma cauchy_schwarz_ineq: assumes "dim_vec v = dim_vec w" shows "(cmod(\<langle>v|w\<rangle>))\<^sup>2 \<le> Re (\<langle>v|v\<rangle> * \<langle>w|w\<rangle>)" proof (cases "\<langle>v|v\<rangle> = 0") case c0:True then have "\<And>i. i < dim_vec v \<Longrightarrow> v $ i = 0" by(metis index_zero_vec(1) inner_prod_with_itself_nonneg_reals_non0) then have "(cmod(\<langle>v|w\<rangle>))\<^sup>2 = 0" by (simp add: assms inner_prod_def) moreover have "Re (\<langle>v|v\<rangle> * \<langle>w|w\<rangle>) = 0" by (simp add: c0) ultimately show ?thesis by simp next case c1:False have "dim_vec w = dim_vec (- \<langle>v|w\<rangle> / \<langle>v|v\<rangle> \<cdot>\<^sub>v v)" by (simp add: assms) then have "\<langle>w + -\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v|w + -\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v\<rangle> = \<langle>w|w\<rangle> + \<langle>w|-\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v\<rangle> + \<langle>-\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v|w\<rangle> + \<langle>-\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v|-\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v\<rangle>" using inner_prod_expand[of "w" "-\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v" "w" "-\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v"] by auto moreover have "\<langle>w|-\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v\<rangle> = -\<langle>v|w\<rangle>/\<langle>v|v\<rangle> * \<langle>w|v\<rangle>" using assms inner_prod_distrib_right[of "w" "v" "-\<langle>v|w\<rangle>/\<langle>v|v\<rangle>"] by simp moreover have "\<langle>-\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v|w\<rangle> = cnj(-\<langle>v|w\<rangle>/\<langle>v|v\<rangle>) * \<langle>v|w\<rangle>" using assms inner_prod_distrib_left[of "v" "w" "-\<langle>v|w\<rangle>/\<langle>v|v\<rangle>"] by simp moreover have "\<langle>-\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v|-\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v\<rangle> = cnj(-\<langle>v|w\<rangle>/\<langle>v|v\<rangle>) * (-\<langle>v|w\<rangle>/\<langle>v|v\<rangle>) * \<langle>v|v\<rangle>" using inner_prod_distrib_left[of "v" "-\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v" "-\<langle>v|w\<rangle>/\<langle>v|v\<rangle>"] inner_prod_distrib_right[of "v" "v" "-\<langle>v|w\<rangle>/\<langle>v|v\<rangle>"] by simp ultimately have "\<langle>w + -\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v|w + -\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v\<rangle> = \<langle>w|w\<rangle> - cmod(\<langle>v|w\<rangle>)^2 / \<langle>v|v\<rangle>" using assms inner_prod_cnj[of "w" "v"] inner_prod_cnj[of "v" "v"] complex_norm_square by simp moreover have "Re(\<langle>w + -\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v|w + -\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v\<rangle>) \<ge> 0" using inner_prod_with_itself_Re by blast ultimately have "Re(\<langle>w|w\<rangle>) \<ge> cmod(\<langle>v|w\<rangle>)^2/Re(\<langle>v|v\<rangle>)" using inner_prod_with_itself_real by simp moreover have c2:"Re(\<langle>v|v\<rangle>) > 0" using inner_prod_with_itself_Re_non0 inner_prod_with_itself_eq0 c1 by auto ultimately have "Re(\<langle>w|w\<rangle>) * Re(\<langle>v|v\<rangle>) \<ge> cmod(\<langle>v|w\<rangle>)^2/Re(\<langle>v|v\<rangle>) * Re(\<langle>v|v\<rangle>)" using real_mult_le_cancel_iff1 by blast thus ?thesis using inner_prod_with_itself_Im c2 by (simp add: mult.commute) qed lemma cauchy_schwarz_eq [simp]: assumes "v = (l \<cdot>\<^sub>v w)" shows "(cmod(\<langle>v|w\<rangle>))\<^sup>2 = Re (\<langle>v|v\<rangle> * \<langle>w|w\<rangle>)" proof- have "cmod(\<langle>v|w\<rangle>) = cmod(cnj(l) * \<langle>w|w\<rangle>)" using assms inner_prod_distrib_left[of "w" "w" "l"] by simp then have "cmod(\<langle>v|w\<rangle>)^2 = cmod(l)^2 * \<langle>w|w\<rangle> * \<langle>w|w\<rangle>" using complex_norm_square inner_prod_cnj[of "w" "w"] by simp moreover have "\<langle>v|v\<rangle> = cmod(l)^2 * \<langle>w|w\<rangle>" using assms complex_norm_square inner_prod_distrib_left[of "w" "v" "l"] inner_prod_distrib_right[of "w" "w" "l"] by simp ultimately show ?thesis by (metis Re_complex_of_real) qed lemma cauchy_schwarz_col [simp]: assumes "dim_vec v = dim_vec w" and "(cmod(\<langle>v|w\<rangle>))\<^sup>2 = Re (\<langle>v|v\<rangle> * \<langle>w|w\<rangle>)" shows "\<exists>l. v = (l \<cdot>\<^sub>v w) \<or> w = (l \<cdot>\<^sub>v v)" proof (cases "\<langle>v|v\<rangle> = 0") case c0:True then have "\<And>i. i < dim_vec v \<Longrightarrow> v $ i = 0" by(metis index_zero_vec(1) inner_prod_with_itself_nonneg_reals_non0) then have "v = 0 \<cdot>\<^sub>v w" by (auto simp: assms) then show ?thesis by auto next case c1:False have f0:"dim_vec w = dim_vec (- \<langle>v|w\<rangle> / \<langle>v|v\<rangle> \<cdot>\<^sub>v v)" by (simp add: assms(1)) then have "\<langle>w + -\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v|w + -\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v\<rangle> = \<langle>w|w\<rangle> + \<langle>w|-\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v\<rangle> + \<langle>-\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v|w\<rangle> + \<langle>-\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v|-\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v\<rangle>" using inner_prod_expand[of "w" "-\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v" "w" "-\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v"] by simp moreover have "\<langle>w|-\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v\<rangle> = -\<langle>v|w\<rangle>/\<langle>v|v\<rangle> * \<langle>w|v\<rangle>" using assms(1) inner_prod_distrib_right[of "w" "v" "-\<langle>v|w\<rangle>/\<langle>v|v\<rangle>"] by simp moreover have "\<langle>-\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v|w\<rangle> = cnj(-\<langle>v|w\<rangle>/\<langle>v|v\<rangle>) * \<langle>v|w\<rangle>" using assms(1) inner_prod_distrib_left[of "v" "w" "-\<langle>v|w\<rangle>/\<langle>v|v\<rangle>"] by simp moreover have "\<langle>-\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v|-\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v\<rangle> = cnj(-\<langle>v|w\<rangle>/\<langle>v|v\<rangle>) * (-\<langle>v|w\<rangle>/\<langle>v|v\<rangle>) * \<langle>v|v\<rangle>" using inner_prod_distrib_left[of "v" "-\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v" "-\<langle>v|w\<rangle>/\<langle>v|v\<rangle>"] inner_prod_distrib_right[of "v" "v" "-\<langle>v|w\<rangle>/\<langle>v|v\<rangle>"] by simp ultimately have "\<langle>w + -\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v|w + -\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v\<rangle> = \<langle>w|w\<rangle> - cmod(\<langle>v|w\<rangle>)^2 / \<langle>v|v\<rangle>" using inner_prod_cnj[of "w" "v"] inner_prod_cnj[of "v" "v"] assms(1) complex_norm_square by simp moreover have "\<langle>w|w\<rangle> = cmod(\<langle>v|w\<rangle>)^2 / \<langle>v|v\<rangle>" using assms(2) inner_prod_with_itself_real by(metis Reals_mult c1 nonzero_mult_div_cancel_left of_real_Re) ultimately have "\<langle>w + -\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v|w + -\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v\<rangle> = 0" by simp then have "\<And>i. i<dim_vec w \<Longrightarrow> (w + -\<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v) $ i = 0" by (metis f0 index_add_vec(2) index_zero_vec(1) inner_prod_with_itself_nonneg_reals_non0) then have "\<And>i. i<dim_vec w \<Longrightarrow> w $ i + -\<langle>v|w\<rangle>/\<langle>v|v\<rangle> * v $ i = 0" by (metis assms(1) f0 index_add_vec(1) index_smult_vec(1)) then have "\<And>i. i<dim_vec w \<Longrightarrow> w $ i = \<langle>v|w\<rangle>/\<langle>v|v\<rangle> * v $ i" by simp then have "w = \<langle>v|w\<rangle>/\<langle>v|v\<rangle> \<cdot>\<^sub>v v" by (auto simp add: assms(1)) thus ?thesis by auto qed section \<open>The No-Cloning Theorem\<close> lemma eq_from_inner_prod [simp]: assumes "dim_vec v = dim_vec w" and "\<langle>v|w\<rangle> = 1" and "\<langle>v|v\<rangle> = 1" and "\<langle>w|w\<rangle> = 1" shows "v = w" proof- have "(cmod(\<langle>v|w\<rangle>))\<^sup>2 = Re (\<langle>v|v\<rangle> * \<langle>w|w\<rangle>)" by (simp add: assms) then have f0:"\<exists>l. v = (l \<cdot>\<^sub>v w) \<or> w = (l \<cdot>\<^sub>v v)" by (simp add: assms(1)) then show ?thesis proof (cases "\<exists>l. v = (l \<cdot>\<^sub>v w)") case True then have "\<exists>l. v = (l \<cdot>\<^sub>v w) \<and> \<langle>v|w\<rangle> = cnj(l) * \<langle>w|w\<rangle>" using inner_prod_distrib_left by auto then show ?thesis by (simp add: assms(2,4)) next case False then have "\<exists>l. w = (l \<cdot>\<^sub>v v) \<and> \<langle>v|w\<rangle> = l * \<langle>v|v\<rangle>" using f0 inner_prod_distrib_right by auto then show ?thesis by (simp add: assms(2,3)) qed qed lemma hermite_cnj_of_tensor: shows "(A \<Otimes> B)\<^sup>\<dagger> = (A\<^sup>\<dagger>) \<Otimes> (B\<^sup>\<dagger>)" proof show c0:"dim_row ((A \<Otimes> B)\<^sup>\<dagger>) = dim_row ((A\<^sup>\<dagger>) \<Otimes> (B\<^sup>\<dagger>))" by simp show c1:"dim_col ((A \<Otimes> B)\<^sup>\<dagger>) = dim_col ((A\<^sup>\<dagger>) \<Otimes> (B\<^sup>\<dagger>))" by simp show "\<And>i j. i < dim_row ((A\<^sup>\<dagger>) \<Otimes> (B\<^sup>\<dagger>)) \<Longrightarrow> j < dim_col ((A\<^sup>\<dagger>) \<Otimes> (B\<^sup>\<dagger>)) \<Longrightarrow> ((A \<Otimes> B)\<^sup>\<dagger>) $$ (i, j) = ((A\<^sup>\<dagger>) \<Otimes> (B\<^sup>\<dagger>)) $$ (i, j)" proof- fix i j assume a0:"i < dim_row ((A\<^sup>\<dagger>) \<Otimes> (B\<^sup>\<dagger>))" and a1:"j < dim_col ((A\<^sup>\<dagger>) \<Otimes> (B\<^sup>\<dagger>))" then have "(A \<Otimes> B)\<^sup>\<dagger> $$ (i, j) = cnj((A \<Otimes> B) $$ (j, i))" by (simp add: dagger_def) also have "\<dots> = cnj(A $$ (j div dim_row(B), i div dim_col(B)) * B $$ (j mod dim_row(B), i mod dim_col(B)))" by (metis (mono_tags, lifting) a0 a1 c1 dim_row_tensor_mat dim_col_of_dagger dim_row_of_dagger index_tensor_mat less_nat_zero_code mult_not_zero neq0_conv) moreover have "((A\<^sup>\<dagger>) \<Otimes> (B\<^sup>\<dagger>)) $$ (i, j) = (A\<^sup>\<dagger>) $$ (i div dim_col(B), j div dim_row(B)) * (B\<^sup>\<dagger>) $$ (i mod dim_col(B), j mod dim_row(B))" by (smt a0 a1 c1 dim_row_tensor_mat dim_col_of_dagger dim_row_of_dagger index_tensor_mat less_nat_zero_code mult_eq_0_iff neq0_conv) moreover have "(B\<^sup>\<dagger>) $$ (i mod dim_col(B), j mod dim_row(B)) = cnj(B $$ (j mod dim_row(B), i mod dim_col(B)))" proof- have "i mod dim_col(B) < dim_col(B)" using a0 gr_implies_not_zero mod_div_trivial by fastforce moreover have "j mod dim_row(B) < dim_row(B)" using a1 gr_implies_not_zero mod_div_trivial by fastforce ultimately show ?thesis by (simp add: dagger_def) qed moreover have "(A\<^sup>\<dagger>) $$ (i div dim_col(B), j div dim_row(B)) = cnj(A $$ (j div dim_row(B), i div dim_col(B)))" proof- have "i div dim_col(B) < dim_col(A)" using a0 dagger_def by (simp add: less_mult_imp_div_less) moreover have "j div dim_row(B) < dim_row(A)" using a1 dagger_def by (simp add: less_mult_imp_div_less) ultimately show ?thesis by (simp add: dagger_def) qed ultimately show "((A \<Otimes> B)\<^sup>\<dagger>) $$ (i, j) = ((A\<^sup>\<dagger>) \<Otimes> (B\<^sup>\<dagger>)) $$ (i, j)" by simp qed qed locale quantum_machine = fixes n:: nat and s:: "complex Matrix.vec" and U:: "complex Matrix.mat" assumes dim_vec [simp]: "dim_vec s = 2^n" and dim_col [simp]: "dim_col U = 2^n * 2^n" and square [simp]: "square_mat U" and unitary [simp]: "unitary U" lemma inner_prod_of_unit_vec: fixes n i:: nat assumes "i < n" shows "\<langle>unit_vec n i| unit_vec n i\<rangle> = 1" apply (auto simp add: inner_prod_def unit_vec_def) by (simp add: assms sum.cong[of "{0..<n}" "{0..<n}" "\<lambda>j. cnj (if j = i then 1 else 0) * (if j = i then 1 else 0)" "\<lambda>j. (if j = i then 1 else 0)"]) theorem (in quantum_machine) no_cloning: assumes [simp]: "dim_vec v = 2^n" and [simp]: "dim_vec w = 2^n" and cloning1: "\<And>s. U * ( |v\<rangle> \<Otimes> |s\<rangle>) = |v\<rangle> \<Otimes> |v\<rangle>" and cloning2: "\<And>s. U * ( |w\<rangle> \<Otimes> |s\<rangle>) = |w\<rangle> \<Otimes> |w\<rangle>" and "\<langle>v|v\<rangle> = 1" and "\<langle>w|w\<rangle> = 1" shows "v = w \<or> \<langle>v|w\<rangle> = 0" proof- define s:: "complex Matrix.vec" where d0:"s = unit_vec (2^n) 0" have f0:"\<langle>|v\<rangle>| \<Otimes> \<langle>|s\<rangle>| = (( |v\<rangle> \<Otimes> |s\<rangle>)\<^sup>\<dagger>)" using hermite_cnj_of_tensor[of "|v\<rangle>" "|s\<rangle>"] bra_def dagger_def ket_vec_def by simp moreover have f1:"( |v\<rangle> \<Otimes> |v\<rangle>)\<^sup>\<dagger> * ( |w\<rangle> \<Otimes> |w\<rangle>) = (\<langle>|v\<rangle>| \<Otimes> \<langle>|s\<rangle>| ) * ( |w\<rangle> \<Otimes> |s\<rangle>)" proof- have "(U * ( |v\<rangle> \<Otimes> |s\<rangle>))\<^sup>\<dagger> = (\<langle>|v\<rangle>| \<Otimes> \<langle>|s\<rangle>| ) * (U\<^sup>\<dagger>)" using dagger_of_prod[of "U" "|v\<rangle> \<Otimes> |s\<rangle>"] f0 d0 by (simp add: ket_vec_def) then have "(U * ( |v\<rangle> \<Otimes> |s\<rangle>))\<^sup>\<dagger> * U * ( |w\<rangle> \<Otimes> |s\<rangle>) = (\<langle>|v\<rangle>| \<Otimes> \<langle>|s\<rangle>| ) * (U\<^sup>\<dagger>) * U * ( |w\<rangle> \<Otimes> |s\<rangle>)" by simp moreover have "(U * ( |v\<rangle> \<Otimes> |s\<rangle>))\<^sup>\<dagger> * U * ( |w\<rangle> \<Otimes> |s\<rangle>) = (( |v\<rangle> \<Otimes> |v\<rangle>)\<^sup>\<dagger>) * ( |w\<rangle> \<Otimes> |w\<rangle>)" using assms(2-4) d0 unit_vec_def by (smt Matrix.dim_vec assoc_mult_mat carrier_mat_triv dim_row_mat(1) dim_row_tensor_mat dim_col_of_dagger index_mult_mat(2) ket_vec_def square square_mat.elims(2)) moreover have "(U\<^sup>\<dagger>) * U = 1\<^sub>m (2^n * 2^n)" using unitary_def dim_col unitary by simp moreover have "(\<langle>|v\<rangle>| \<Otimes> \<langle>|s\<rangle>| ) * (U\<^sup>\<dagger>) * U = (\<langle>|v\<rangle>| \<Otimes> \<langle>|s\<rangle>| ) * ((U\<^sup>\<dagger>) * U)" using d0 assms(1) unit_vec_def by (smt Matrix.dim_vec assoc_mult_mat carrier_mat_triv dim_row_mat(1) dim_row_tensor_mat f0 dim_col_of_dagger dim_row_of_dagger ket_vec_def local.dim_col) moreover have "(\<langle>|v\<rangle>| \<Otimes> \<langle>|s\<rangle>| ) * 1\<^sub>m (2^n * 2^n) = (\<langle>|v\<rangle>| \<Otimes> \<langle>|s\<rangle>| )" using f0 ket_vec_def d0 by simp ultimately show ?thesis by simp qed then have f2:"(\<langle>|v\<rangle>| * |w\<rangle>) \<Otimes> (\<langle>|v\<rangle>| * |w\<rangle>) = (\<langle>|v\<rangle>| * |w\<rangle>) \<Otimes> (\<langle>|s\<rangle>| * |s\<rangle>)" proof- have "\<langle>|v\<rangle>| \<Otimes> \<langle>|v\<rangle>| = (( |v\<rangle> \<Otimes> |v\<rangle>)\<^sup>\<dagger>)" using hermite_cnj_of_tensor[of "|v\<rangle>" "|v\<rangle>"] bra_def dagger_def ket_vec_def by simp then show ?thesis using f1 d0 by (simp add: bra_def mult_distr_tensor ket_vec_def) qed then have "\<langle>v|w\<rangle> * \<langle>v|w\<rangle> = \<langle>v|w\<rangle> * \<langle>s|s\<rangle>" proof- have "((\<langle>|v\<rangle>| * |w\<rangle>) \<Otimes> (\<langle>|v\<rangle>| * |w\<rangle>)) $$ (0,0) = \<langle>v|w\<rangle> * \<langle>v|w\<rangle>" using assms inner_prod_with_times_mat[of "v" "w"] by (simp add: bra_def ket_vec_def) moreover have "((\<langle>|v\<rangle>| * |w\<rangle>) \<Otimes> (\<langle>|s\<rangle>| * |s\<rangle>)) $$ (0,0) = \<langle>v|w\<rangle> * \<langle>s|s\<rangle>" using inner_prod_with_times_mat[of "v" "w"] inner_prod_with_times_mat[of "s" "s"] by(simp add: bra_def ket_vec_def) ultimately show ?thesis using f2 by auto qed then have "\<langle>v|w\<rangle> = 0 \<or> \<langle>v|w\<rangle> = \<langle>s|s\<rangle>" by (simp add: mult_left_cancel) moreover have "\<langle>s|s\<rangle> = 1" by(simp add: d0 inner_prod_of_unit_vec) ultimately show ?thesis using assms(1,2,5,6) by auto qed end
import Network.Socket import Lightyear.Strings tcpConnect : String -> Port -> IO (Maybe Socket) tcpConnect host port = do sock <- socket AF_INET Stream 0 case sock of Left _ => return Nothing Right s => do conn <- connect s (parseIPv4 host) port case conn of 0 => return $ Just s ec => const Nothing <$> (close s *> (putStrLn $ "Connection failed, error: " ++ (show ec))) sendLine : Socket -> String -> IO (Either SocketError ByteLength) sendLine s str = send s (str ++ "\r\n") recvTill : Socket -> String -> IO (Either SocketError String) recvTill sock till = recvTill' "" where recvTill' : String -> IO (Either SocketError String) recvTill' prev = do r <- recv sock 1 case r of e@(Left err) => return e (Right (c, _)) => let s = prev ++ c in if isSuffixOf till s then return $ Right s else recvTill' s scrape : String -> Int -> String -> String -> Parser String -> IO (Maybe String) scrape host port req till parser = do ms <- tcpConnect host port case ms of Nothing => pure Nothing Just s => do sendLine s req d <- recvTill s till close s case d of Left err => pure Nothing Right str => processBody (parse parser str) where processBody : Either String String -> IO (Maybe String) processBody (Left err) = pure Nothing processBody (Right b) = do putStrLn b pure $ Just b
LOGICAL FUNCTION CHKBUF3( FDUM ) C*********************************************************************** C EDSS/Models-3 I/O API. C Copyright (C) 1992-2002 MCNC and Carlie J. Coats, Jr., C (C) 2003-2011 by Baron Advanced Meteorological Systems. C Distributed under the GNU LESSER GENERAL PUBLIC LICENSE version 2.1 C See file "LGPL.txt" for conditions of use. C......................................................................... C function body starts at line 95 C C FUNCTION: Check consistency pf BUFFERED file treatment between C libioapi.a and model-code C C RETURN VALUE: TRUE iff consistent C C PRECONDITIONS REQUIRED: call after INIT3() C C REVISION HISTORY: C prototype 04/2011 by Carlie J. Coats, Jr. C*********************************************************************** IMPLICIT NONE C........... INCLUDES: INCLUDE 'PARMS3.EXT' INCLUDE 'STATE3.EXT' C........... ARGUMENTS and their descriptions: INTEGER, INTENT( OUT) :: FDUM ! prevents excessive optimization C............................................................................. C begin body of subroutine CHKBUF3 FDUM = VGTYP3( 1 ) CHKBUF3 = .TRUE. RETURN END FUNCTION CHKBUF3
-- Andreas, 2013-10-21 -- There was a bug in Rules/Builtin such that NATEQUALS' equations -- would be checked at type Nat instead of Bool. -- This bug surfaced only because of today's refactoring in Conversion, -- because then I got a strange unsolved constraint true == true : Nat. module NatEquals where import Common.Level data Bool : Set where true false : Bool {-# BUILTIN BOOL Bool #-} {-# BUILTIN TRUE true #-} {-# BUILTIN FALSE false #-} data Nat : Set where zero : Nat suc : Nat -> Nat {-# BUILTIN NATURAL Nat #-} infix 40 _=?=_ _=?=_ : Nat -> Nat -> Bool zero =?= zero = true zero =?= suc _ = false suc _ =?= zero = false suc n =?= suc m = n =?= m {-# BUILTIN NATEQUALS _=?=_ #-}
% composit.m September 2, 2013 % sun-synchronous, repeating ground track, % frozen orbit design - Kozai J2 perturbations % Orbital Mechanics with MATLAB %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% clear all; global j2 j3 mu req omega argper fi xldot xqp % astrodynamic and utility constants j2 = 0.00108263; j3 = -0.00000254; mu = 398600.4415; req = 6378.14; omega = 0.000072921151467; pi2 = 2.0 * pi; dtr = pi / 180.0; rtd = 180.0 / pi; x = zeros(3, 1); clc; home; fprintf('\n program composit'); fprintf('\n\n < sun-synchronous, repeating ground track, frozen orbits >\n\n'); % request initial guesses while(1) fprintf('\nplease input an initial guess for the semimajor axis (kilometers)\n'); x(1) = input('? '); if (x(1) > 0.0) break; end end while(1) fprintf('\nplease input an initial guess for the eccentricity (non-dimensional)'); fprintf('\n(0 <= eccentricity < 1)\n'); x(2) = input('? '); if (x(2) >= 0.0 && x(2) < 1.0) break; end end while(1) fprintf('\nplease input an initial guess for the inclination (degrees)'); fprintf('\n(90 < inclination <= 180)\n'); x(3) = input('? '); if (x(3) > 90.0 && x(3) <= 180.0) break; end end x(3) = x(3) * dtr; while(1) fprintf('\nplease input the number of integer orbits in the repeat cycle\n'); norbits = input('? '); if (norbits > 1) break; end end while(1) fprintf('\nplease input the number of integer days in the repeat cycle\n'); ndays = input('? '); if (ndays > 0) break; end end % compute repetition factor xqp = norbits / ndays; % fundamental interval fi = pi2 / xqp; % required nodal regression rate xldot = (360.0 / 365.25) * dtr / 86400.0; argper = 90.0 * dtr; % solve system of nonlinear equations n = 3; maxiter = 200; [xf, niter, icheck] = snle ('compfunc', x, n, maxiter); sma = xf(1); ecc = xf(2); inc = rtd * xf(3); % keplerian period tkepler = pi2 * sqrt(sma ^ 3 / mu); tnode = pi2 * (1.0 / xqp) / (omega - xldot); % print results clc; home; fprintf('\n program composit'); fprintf('\n\n < sun-synchronous, repeating ground track, frozen orbits >\n\n'); fprintf('mean semimajor axis %12.4f kilometers \n\n', sma); fprintf('mean eccentricity %12.10f \n\n', ecc); fprintf('mean orbital inclination %12.4f degrees \n\n', inc); fprintf('mean argument of perigee %12.4f degrees \n\n', argper*rtd); fprintf('keplerian period %12.4f minutes \n\n', tkepler/60); fprintf('nodal period %12.4f minutes \n\n', tnode/60); fprintf('number of orbits in repeat cycle %12.4f \n\n', norbits); fprintf('number of days in repeat cycle %12.4f \n\n', ndays); fprintf('ground trace repetition factor %12.4f \n\n', xqp);
import warnings import numpy as np # try to import z5py try: import z5py WITH_Z5PY = True except ImportError: WITH_Z5PY = False # try to import h5py try: import h5py WITH_H5PY = True except ImportError: WITH_H5PY = False # try to import zarr try: import zarr WITH_ZARR = True except ImportError: WITH_ZARR = False # try to import dvid try: from libdvid import DVIDNodeService from libdvid import ConnectionMethod WITH_DVID = True except ImportError: WITH_DVID = False class IoBase(object): """ Base class for I/O with h5, n5 and zarr. Libraries need to largely follow h5 syntax. Arguments: path (str): path to h5 or n5 file keys (str or list[str]): key or list of keys to datasets in file channel_order (list[slice]): mapping of channels to output datasets (default: None) voxel_size (tuple or list[tuple]): voxel sizes of datasets stored in keys (same order) """ def __init__(self, path, keys, channel_order=None, voxel_size=None): assert isinstance(keys, (tuple, list, str)), type(keys) self.path = path self.keys = keys if isinstance(keys, (list, tuple)) else [keys] self.ff = self.open(self.path) assert all(kk in self.ff for kk in self.keys), "%s, %s" % (self.path, self.keys) self.datasets = [self.ff[kk] for kk in self.keys] # we just assume that everything has the same shape and voxel size... try: self._voxel_size = tuple(np.array(self.datasets[0].attrs['resolution']).astype(np.int)) if voxel_size is not None and voxel_size != self._voxel_size: warnings.warn("specified voxel size does not match voxel size saved in data") except KeyError: self._voxel_size = voxel_size assert self._voxel_size is not None self._shape_vc = self.datasets[0].shape self._shape = tuple(np.array(self._shape_vc) * np.array(self._voxel_size)) # validate non-trivial channel orders if channel_order is not None: assert all(isinstance(cho, slice) for cho in channel_order) assert len(channel_order) == len(self.datasets) for ds, ch in zip(self.datasets, channel_order): n_chan = ch.stop - ch.start if ds.ndim == 4: assert n_chan == ds.shape[0] elif ds.ndim == 3: assert n_chan == 1 else: raise RuntimeError("Invalid dataset dimensionality") self.channel_order = channel_order else: assert len(self.datasets) == 1, "Need channel order if given more than one dataset" self.channel_order = None def open(self, path): raise NotImplemented("open needs to be implemented by sub-classes") def read(self, starts_wc, stops_wc): # make sure that things align with the voxel grid assert all(start_wc % res == 0 for start_wc, res in zip(starts_wc, self.voxel_size)) assert all(stop_wc % res == 0 for stop_wc, res in zip(stops_wc, self.voxel_size)) assert len(self.datasets) == 1 bb_vc = tuple(slice(int(start_wc/res), int(stop_wc/res)) for start_wc, stop_wc, res in zip(starts_wc, stops_wc, self._voxel_size)) return self.read_vc(bb_vc) def read_vc(self, bounding_box_vc): assert len(self.datasets) == 1, "Use separate IOs for reading" return self.datasets[0][bounding_box_vc] def write_vc(self, out, out_bb_vc): if self.channel_order is None: ds = self.datasets[0] assert out.ndim == ds.ndim, "%i, %i" % (out.ndim, ds.ndim) if out.ndim == 4: ds[(slice(None),) + out_bb_vc] = out else: ds[out_bb_vc] = out else: for ds, ch in zip(self.datasets, self.channel_order): if ds.ndim == 3: ds[out_bb_vc] = out[ch][0] else: ds[(slice(None),) + out_bb_vc] = out[ch] def write(self, out, offsets_wc): assert all(offset_wc % res == 0 for offset_wc, res in zip(offsets_wc, self.voxel_size)) stops_wc = tuple([offset_wc + out_sh * res for offset_wc, out_sh, res in zip(offsets_wc, out.shape, self.voxel_size)]) assert all(stop_wc % res == 0 for stop_wc, res in zip(stops_wc, self.voxel_size)) bb_vc = tuple(slice(int(start_wc/res), int(stop_wc/res)) for start_wc, stop_wc, res in zip(offsets_wc, stops_wc, self.voxel_size)) return self.write_vc(out, bb_vc) def verify_block_shape(self, offset_wc, arr): if arr.ndim == 4: stops_wc = tuple([off_wc + out_sh * res for off_wc, out_sh, res in zip(offset_wc, arr.shape[1:], self.voxel_size)]) else: stops_wc = tuple([off_wc + out_sh * res for off_wc, out_sh, res in zip(offset_wc, arr.shape, self.voxel_size)]) # test whether block is overhanging, then crop if any(stop_wc > sh_wc for stop_wc, sh_wc in zip(stops_wc, self.shape)): arr_stops_wc = [sh_wc-off_wc if stop_wc > sh_wc else None for stop_wc, sh_wc, off_wc in zip(stops_wc, self.shape, offset_wc)] assert all(arr_stop_wc%res == 0 if arr_stop_wc is not None else True for arr_stop_wc, res in zip(arr_stops_wc, self.voxel_size)) arr_stops_vc = [int(arr_stop_wc/res) if arr_stop_wc is not None else None for arr_stop_wc, res in zip(arr_stops_wc, self.voxel_size)] bb_vc = tuple(slice(0, arr_stop_vc) for arr_stop_vc in arr_stops_vc) if arr.ndim == 4: bb_vc = ((slice(None),) + bb_vc) arr = arr[bb_vc] return arr @property def voxel_size(self): return self._voxel_size @property def shape(self): return self._shape def close(self): pass class IoHDF5(IoBase): def __init__(self, path, keys, channel_order=None, voxel_size=None): assert WITH_H5PY, "Need h5py" super(IoHDF5, self).__init__(path, keys, channel_order=channel_order, voxel_size=voxel_size) def open(self, path): return h5py.File(path) def close(self): self.ff.close() class IoN5(IoBase): def __init__(self, path, keys, channel_order=None, voxel_size=None): assert WITH_Z5PY, "Need z5py" super(IoN5, self).__init__(path, keys, channel_order=channel_order, voxel_size=voxel_size) def open(self, path): return z5py.File(path) class IoZarr(IoBase): def __init__(self, path, keys, channel_order=None, voxel_size=None): assert WITH_ZARR, "Need zarr" super(IoZarr, self).__init__(path, keys, channel_order=channel_order, voxel_size=voxel_size) def open(self, path): return zarr.open(path) class IoDVID(object): def __init__(self, server_address, uuid, key): assert WITH_DVID, "Need dvid" self.ds = DVIDNodeService(server_address, uuid) self.key = key # get the shape the dvid way... endpoint = "/" + self.key + "/info" attributes = self.ds.custom_request(endpoint, "", ConnectionMethod.GET) # TODO do we need to increase by 1 here ? self._shape = tuple(mp + 1 for mp in attributes["MaxPoint"]) def read(self, bb): offset = tuple(b.start for b in bb) shape = tuple(b.stop - b.start for b in bb) return self.ds.get_gray3D(self.key, shape, offset) def write(self, out, out_bb): raise NotImplementedError("Writing to DVID is not yet implemented!") @property def shape(self): return self._shape def close(self): pass
Formal statement is: lemma metric_LIM_compose2: fixes a :: "'a::metric_space" assumes f: "f \<midarrow>a\<rightarrow> b" and g: "g \<midarrow>b\<rightarrow> c" and inj: "\<exists>d>0. \<forall>x. x \<noteq> a \<and> dist x a < d \<longrightarrow> f x \<noteq> b" shows "(\<lambda>x. g (f x)) \<midarrow>a\<rightarrow> c" Informal statement is: If $f$ and $g$ are continuous functions and $f$ is injective in a neighborhood of $a$, then $g \circ f$ is continuous at $a$.
corollary\<^marker>\<open>tag unimportant\<close> Cauchy_theorem_disc: "\<lbrakk>finite K; continuous_on (cball a e) f; \<And>x. x \<in> ball a e - K \<Longrightarrow> f field_differentiable at x; valid_path g; path_image g \<subseteq> cball a e; pathfinish g = pathstart g\<rbrakk> \<Longrightarrow> (f has_contour_integral 0) g"
Pentax K100D is an entry class of Digital SLR camera (released on 23.05.06) featuring 6.1 Megapixel, PENTAX-developed Shake Reduction (SR) system, 2.5”, 11-point wide-frame AF, and continuous shooting mode at a maximum speed of approximately 2.8 images per second. Pentax K100D will be available in July 2006 with an expected street price of $699. >> REVIEWS (last updated: 11.05.07) – Scrolldown for sample images. “If you’re looking for your first digital SLR, the Pentax K100D is well worth a look. The Shake Reduction, easy controls SD card compatibility and lightweight design make this camera ideal for those looking for a step up from a compact digicam. Photographers who travel will also appreciate these features, as well as the ability to use AA batteries. Existing Pentax SLR owners may also wish to upgrade, whether it is from a film or earlier digital SLR. Pros: Great build quality for a lightweight digital SLR, Takes AA or CR-V3 batteries, Images are colourful and contrasty straight from the camera, Better dynamic range than previous Pentax’s. Cons: Pause between focus and shooting in Single Servo AF, Shallow buffer, Flimsy battery door. PENTAX Corporation is pleased to introduce the PENTAX K100D lens-interchangeable digital SLR camera. This new digital SLR camera combines 6.1 effective megapixels with a host of advanced technologies — including a PENTAX-original Shake Reduction (SR) system — to deliver high-quality digital SLR photography and user-friendly operations to all levels of photographers. This entry was posted on Friday, May 11th, 2007 at 2:10 pm and is filed under Digital Camera, Pentax. You can follow any responses to this entry through the RSS 2.0 feed. You can skip to the end and leave a response. Pinging is currently not allowed.
[STATEMENT] lemma finite_dom_graph: "finite (dom f) \<Longrightarrow> finite (map_graph f)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. finite (dom f) \<Longrightarrow> finite (map_graph f) [PROOF STEP] by (metis dom_map_graph finite_imageD fst_eq_Domain functional_def map_graph_functional)
= = = Cellular localization = = =
# Linear least square fitting In the lectures we looked at linear least square approximat to a linear fit. In this notebook we extend that general polynomial fits and then to general linear fits. This will involve simultaneous linear equations which we solve via matrix methods. A model a linear if the parameters are independent of each other, i.e., $$f(x, \vec{\beta}) = \beta_0 x + \beta_1 \sin(x) $$ is a linear model whereas $$f(x, \vec{\beta}) = \beta_0[x + \beta_1\sin(x)] $$ is a non-linear model due to the product $\beta_0 \beta_1$. In this notebook we will only look at fitting data linear with models. ## Setup and overview For all the following algorithms let $(x_i,y_i)$ with $i=1\dots n$ be the data we want to fit to. Let the *residual* at each data point be given by $$ r_i = f(x_i,\vec{\beta}) - y_i $$ The goal is to vary $\vec{\beta}$ in order to minimize the sum of the squares of the residuals, i.e., we want to minimize, $S$, where $$ S(\vec{\beta}) = \sum_{i=1}^n r_i^2 $$ As usual to find the minimize a function we find where the tangent(s) of the function are equal to zero. ## A quadratic example Let's extend the linear model given in the notes to a quadratic model. Let our model be given by $$ f(x, \vec{\beta}) = \beta_1 x + \beta_2 x^2$$ In this case $$ \begin{align} \frac{\partial S}{\partial \beta_1} &= \sum_{i=1}^n 2(\beta_1 x_i +\beta_2 x_i^2 - y_i) x_i = 0 \\ \frac{\partial S}{\partial \beta_2} &= \sum_{i=1}^n 2(\beta_1 x_i +\beta_2 x_i^2 - y_i) x_i^2 =0 \end{align} $$ We can re-write this as matrix equation: $$ \begin{bmatrix}\sum_{i=1}^n x_i^2 & \sum_{i=1}^n x_i^3 \\ \sum_{i=1}^n x_i^3 & \sum_{i=1}^n x_i^4\end{bmatrix}\begin{bmatrix}\beta_1 \\ \beta_2 \end{bmatrix} = \begin{bmatrix} \sum_{i=1}^n y_i x_i \\ \sum_{i=1}^n y_i x_i^2 \end{bmatrix} $$ Writing this as a matrix equation in the form $X \vec{\beta} = \vec{\alpha}$ we can find the best fit parameters via $\vec{\beta} = X^{-1}\vec{\alpha}$. Let's see this in action in the code ```python import numpy as np import matplotlib.pyplot as plt from numpy import linalg as la ``` ```python # The below two lines set the default size and font size for matplotlib plt.rcParams['figure.figsize'] = (16.0, 10.0) plt.rcParams.update({'font.size': 22}) ``` First let's prepare some data to test the algorithm on ```python x = np.linspace(-4,4,100) a = 3; b = -2 y = a*x**2 + b*x ``` Next prepare the matrix $X$ and vector, $\vec{\alpha}$ ```python X = np.sum(np.array([[x**4, x**3],[x**3, x**2]]),2) alpha = np.sum(np.array([y*x**2, y*x]),1) ``` Now solve for $\vec{\beta}$ ```python la.inv(X)@alpha ``` array([ 3., -2.]) The algorithm worked! We recoved the coefficients $a$ and $b$. ## Fitting to a polynomial Now let's generalize to fitting data using an $n^{th}$-order polynomial. By making the natural extension of above we have $$ f(x,\vec{\beta}) = \beta_0 + \beta_1 x + \beta_2 x^2 +\dots + \beta_n x^n$$ The $(n+1)\times(n+1)$ system ofequations we have to solve is now given by $$\begin{bmatrix} X_0 & X_1 & \cdots & X_n \\ X_1 & X_2 & \cdots & X_{n+1} \\ \vdots & & & \vdots \\ X_n & X_{n+1} & \cdots & X_{2n} \end{bmatrix} \begin{bmatrix} \beta_0 \\ \beta_1 \\ \cdots \\ \beta_n \end{bmatrix} = \begin{bmatrix} \alpha_0 \\ \alpha_1 \\ \cdots \\ \alpha_n \end{bmatrix} $$ where $$ \begin{align} X_n &= \sum_{i=1}^n x_i^n \\ \alpha_n &= \sum_{i=1}^n y_i x_i^n \end{align} $$ Let's now write a function that implements this algorithm ```python def PolynomialFit(xi, yi, n): X = np.zeros((n+1, n+1)) alpha = np.zeros(n+1) for i in range(0,n+1): alpha[i] = np.sum(yi*xi**i) for j in range(0,n+1): X[i,j] = np.sum(xi**(i+j)) return la.inv(X)@alpha ``` Let's make some data to test the function on ```python xi = np.linspace(-4,4,100) yi = 9 - 9*xi -xi**2 + x**3 ``` ```python PolynomialFit(xi,yi, 3) ``` array([ 9., -9., -1., 1.]) For this smooth data we recover precisely the coefficients of the cubic. Let's look at fitting some noisy data and plotting the result. First let's generate some noisy data and the fit to it. ```python yiNoisy = yi + 10*np.random.random(xi.size) cubicFitCoeffs = PolynomialFit(xi, yiNoisy, 3) cubicFit = cubicFitCoeffs[0] + cubicFitCoeffs[1]*xi + cubicFitCoeffs[2]*xi**2 + cubicFitCoeffs[3]*xi**3 ``` ```python plt.grid(True) plt.scatter(xi,yiNoisy); plt.plot(xi, cubicFit, 'r'); ``` ## Fitting to a general linear model If we have a general linear model we can still perform fits. We wont derive the algorithm below but it is a generalization of the above methods. Instead we will just state it and show it in action. $$ f(x, \vec{\beta}) = \beta_0 \phi_0(x) + \beta_1 \phi_1(x) + \dots + \beta_n \phi_n(x) $$ Then we can define the elements of a matrix $X$ via $$ X_{ij} = \phi_j(x_i) $$ Then the coefficients in the fit can be calculated via $$ \vec{\beta} = (X^T X)^{-1} X^T \vec{y} $$ where $\vec{y}$ is the $y$-values of the data. ### Example Let's look at the example where the data is given by $$ y_i = 5\sin(x_i) - x + 4$$ In this case $\phi_0 = \sin(x)$, $\phi_1 = x$ and $\phi_2 = 4$. Let's now define the matrix $X$ and the vector $\vec{y}$. ```python xi = np.linspace(-4,4,100) yi = 5*np.sin(xi) - xi + 4 X = np.array([np.sin(xi), xi, 4*np.ones(xi.size)]).T ``` Applying the formula abovr we recover the coefficients: ```python la.inv(X.T@X)@X.T@yi ``` array([ 5., -1., 1.]) Let's look at a noise data version ```python yiNoisy = 5*np.sin(xi) - xi + 5*np.random.rand(xi.size) fitCoeffs = la.inv(X.T@X)@X.T@yiNoisy fit = fitCoeffs[0]*np.sin(xi) + fitCoeffs[1]*xi + fitCoeffs[2]*4 print(fitCoeffs) ``` [ 5.1905638 -0.950227 0.64035906] ```python plt.scatter(xi, yiNoisy) plt.grid(True) plt.plot(xi, fit, 'r'); ``` ```python ```
module Data.Logic where open import Haskell.Prelude renaming (zero to Z; suc to S) open import Relation.Nullary.Decidable open import Data.Nat.DivMod open import Data.Nat.Properties open import Agda.Primitive {-# FOREIGN AGDA2HS import Data.Nat #-} ---- Equational reasoning useEq : {x y : Bool} -> x ≡ y -> .(IsTrue x) -> IsTrue y useEq {true} {true} eq is = IsTrue.itsTrue useEqFalse : {x y : Bool} -> x ≡ y -> .(IsFalse x) -> IsFalse y useEqFalse {false} {false} eq is = IsFalse.itsFalse isTrueToEquiv : {c : Bool} -> IsTrue c -> c ≡ true isTrueToEquiv {true} p = refl isFalseToEquiv : {c : Bool} -> IsFalse c -> c ≡ false isFalseToEquiv {false} p = refl -- symmetry of equality sym : {u : Level} {A : Set u} {x y : A} → x ≡ y → y ≡ x sym refl = refl -- transitivity of equality trans : {u : Level} {A : Set u} {x y z : A} → x ≡ y → y ≡ z → x ≡ z trans refl refl = refl -- congruence of equality cong : {u v : Level} {A : Set u} {B : Set v} {x y : A} → (f : A → B) → x ≡ y → f x ≡ f y cong f refl = refl cong2 : {A B T : Set} {a1 a2 : A} {b1 b2 : B} → (f : A → B -> T) → a1 ≡ a2 -> b1 ≡ b2 → f a1 b1 ≡ f a2 b2 cong2 f refl refl = refl cong3 : {A B C T : Set} {a1 a2 : A} {b1 b2 : B} {c1 c2 : C} → (f : A → B -> C -> T) → a1 ≡ a2 -> b1 ≡ b2 -> c1 ≡ c2 → f a1 b1 c1 ≡ f a2 b2 c2 cong3 f refl refl refl = refl cong4 : {A B C D T : Set} {a1 a2 : A} {b1 b2 : B} {c1 c2 : C} {d1 d2 : D} → (f : A → B -> C -> D -> T) → a1 ≡ a2 -> b1 ≡ b2 → c1 ≡ c2 -> d1 ≡ d2 -> f a1 b1 c1 d1 ≡ f a2 b2 c2 d2 cong4 f refl refl refl refl = refl begin_ : {u : Level} {A : Set u} → {x y : A} → x ≡ y → x ≡ y begin p = p _end : {u : Level} {A : Set u} → (x : A) → x ≡ x x end = refl _=⟨_⟩_ : {u : Level} {A : Set u} → (x : A) → {y z : A} → x ≡ y → y ≡ z → x ≡ z x =⟨ p ⟩ q = trans p q _=⟨⟩_ : {u : Level} {A : Set u} → (x : A) → {y : A} → x ≡ y → x ≡ y x =⟨⟩ q = x =⟨ refl ⟩ q infix 1 begin_ infix 3 _end infixr 2 _=⟨_⟩_ infixr 2 _=⟨⟩_ ---- General purpose proofs propZeroImpliesLtOne : (x : Nat) -> IsFalse (x == 0) -> IsFalse (x < 1) propZeroImpliesLtOne Z notZ = notZ propZeroImpliesLtOne (S x) notZ = IsFalse.itsFalse propFnIf : {a b : Set} -> {c : Bool} {x y : a} (f : a -> b) -> (if c then f x else f y) ≡ f (if c then x else y) propFnIf {c = false} f = refl propFnIf {c = true} f = refl propMaxSuc : (x y : Nat) -> max (S x) (S y) ≡ S (max x y) propMaxSuc Z Z = refl propMaxSuc Z (S y) = refl propMaxSuc (S x) Z = refl propMaxSuc (S x) (S y) = begin max (S $ S x) (S $ S y) =⟨⟩ (if x < y then (S $ S y) else (S $ S x)) =⟨ propFnIf S ⟩ (S $ (if x < y then (S y) else (S x))) =⟨⟩ S (max (S x) (S y)) end propMaxRefl : (x y : Nat) -> max x y ≡ max y x propMaxRefl Z Z = refl propMaxRefl Z (S y) = refl propMaxRefl (S x) Z = refl propMaxRefl (S x) (S y) = begin max (S x) (S y) =⟨ propMaxSuc x y ⟩ S (max x y) =⟨ cong S (propMaxRefl x y) ⟩ S (max y x) =⟨ sym $ propMaxSuc y x ⟩ max (S y) (S x) end propIsTrueCombine2 : {a b : Bool} -> IsTrue a -> IsTrue b -> IsTrue (a && b) propIsTrueCombine2 {true} {true} ta tb = IsTrue.itsTrue propIsTrueCombine4 : {a b c d : Bool} -> IsTrue a -> IsTrue b -> IsTrue c -> IsTrue d -> IsTrue ((a && b) && (c && d)) propIsTrueCombine4 {true} {true} {true} {true} ta tb tc td = IsTrue.itsTrue propIsTrueCombine4Alt : {a b c d : Bool} -> IsTrue a -> IsTrue b -> IsTrue c -> IsTrue d -> IsTrue (a && b && c && d) propIsTrueCombine4Alt {true} {true} {true} {true} ta tb tc td = IsTrue.itsTrue andRefl : (a b : Bool) -> (a && b) ≡ (b && a) andRefl false false = refl andRefl false true = refl andRefl true false = refl andRefl true true = refl orFst : {a b : Bool} -> IsTrue a -> IsTrue (a || b) orFst {true} {b} ap = IsTrue.itsTrue andFst : {a b : Bool} -> IsTrue (a && b) -> IsTrue a andFst {true} {true} ab = IsTrue.itsTrue andSnd : {a b : Bool} -> IsTrue (a && b) -> IsTrue b andSnd {true} {true} ab = IsTrue.itsTrue andSndI : {a b : Bool} -> .(IsTrue (a && b)) -> IsTrue b andSndI {true} {true} ab = IsTrue.itsTrue and1 : {a b c d : Bool} -> IsTrue (a && b && c && d) -> IsTrue a and1 {true} {true} {true} {true} abcd = IsTrue.itsTrue and2 : {a b c d : Bool} -> IsTrue (a && b && c && d) -> IsTrue b and2 {true} {true} {true} {true} abcd = IsTrue.itsTrue and3 : {a b c d : Bool} -> IsTrue (a && b && c && d) -> IsTrue c and3 {true} {true} {true} {true} abcd = IsTrue.itsTrue and4 : {a b c d : Bool} -> IsTrue (a && b && c && d) -> IsTrue d and4 {true} {true} {true} {true} abcd = IsTrue.itsTrue andCombine : {a b : Bool} -> IsTrue a -> IsTrue b -> IsTrue (a && b) andCombine {true} {true} ta tb = IsTrue.itsTrue boolAndTrue : (a : Bool) -> (a && true) ≡ a boolAndTrue false = refl boolAndTrue true = refl ifTrue : {u : Level} {t : Set u} {a b : t} (c : Bool) -> IsTrue c -> (if c then a else b) ≡ a ifTrue true tc = refl ifFalse : {t : Set} {a b : t} (c : Bool) -> IsFalse c -> (if c then a else b) ≡ b ifFalse false fc = refl infix -2 ifc_then_else_ ifc_then_else_ : {u : Level} {a : Set u} → (c : Bool) → ({{IsTrue c}} → a) → ({{IsFalse c}} → a) → a ifc false then x else y = y {{IsFalse.itsFalse}} ifc true then x else y = x {{IsTrue.itsTrue}} {-# COMPILE AGDA2HS ifc_then_else_ #-} ifcTrue : {u : Level} {t : Set u} -> (c : Bool) {a : {{.(IsTrue c)}} -> t} {b : {{.(IsFalse c)}} -> t} -> .(ct : IsTrue c) -> (ifc c then (λ {{p}} -> a) else (λ {{p}} -> b)) ≡ (a {{ct}}) ifcTrue true {a} {b} ct = refl ifcFalse : {u : Level} {t : Set u} -> (c : Bool) {a : {{.(IsTrue c)}} -> t} {b : {{.(IsFalse c)}} -> t} -> .(ct : IsFalse c) -> (ifc c then (λ {{p}} -> a) else (λ {{p}} -> b)) ≡ (b {{ct}}) ifcFalse false {a} {b} ct = refl propFnIfc : {a b : Set} -> (c : Bool) {x : {{IsTrue c}} -> a} {y : {{IsFalse c}} -> a} (f : a -> b) -> (ifc c then f x else f y) ≡ f (ifc c then x else y) propFnIfc false f = refl propFnIfc true f = refl propFnDistributeIfc2 : {a b : Set} -> (c1 c2 : Bool) {w x y z : a} (f : a -> b) -> f (ifc c1 then (ifc c2 then w else x) else (ifc c2 then y else z)) ≡ (ifc c1 then (ifc c2 then f w else f x) else (ifc c2 then f y else f z)) propFnDistributeIfc2 false false f = refl propFnDistributeIfc2 false true f = refl propFnDistributeIfc2 true false f = refl propFnDistributeIfc2 true true f = refl propIfcBranchesSame : {t : Set} -> {c : Bool} (v : t) -> (ifc c then v else v) ≡ v propIfcBranchesSame {c = false} v = refl propIfcBranchesSame {c = true} v = refl propIfBranchesSame : {t : Set} -> {c : Bool} (v : t) -> (if c then v else v) ≡ v propIfBranchesSame {c = false} v = refl propIfBranchesSame {c = true} v = refl ifToIfc : {t : Set} {c : Bool} {a b : t} -> (if c then a else b) ≡ (ifc c then a else b) ifToIfc {c = false} = refl ifToIfc {c = true} = refl ifTrueMap : {t : Set} -> {c : Bool} {a a2 b : t} -> (IsTrue c -> a ≡ a2) -> (if c then a else b) ≡ (if c then a2 else b) ifTrueMap {c = false} aa2 = refl ifTrueMap {c = true} {a} {a2} aa2 = begin a =⟨ aa2 IsTrue.itsTrue ⟩ a2 end ifcTrueMap : {t : Set} -> {c : Bool} {a a2 b : t} -> (IsTrue c -> a ≡ a2) -> (ifc c then a else b) ≡ (ifc c then a2 else b) ifcTrueMap {c = false} aa2 = refl ifcTrueMap {c = true} {a} {a2} aa2 = begin a =⟨ aa2 IsTrue.itsTrue ⟩ a2 end ifTrueNested : {t : Set} -> {c1 c2 : Bool} {a b c : t} -> (c1 ≡ c2) -> (if c1 then (if c2 then a else b) else c) ≡ (if c1 then a else c) ifTrueNested {t} {false} {false} {a} {b} {c} c1eqc2 = refl ifTrueNested {t} {true} {true} {a} {b} {c} c1eqc2 = refl ifFalseNested : {t : Set} -> {c1 c2 : Bool} {a b c : t} -> (c1 ≡ c2) -> (if c1 then a else (if c2 then b else c)) ≡ (if c1 then a else c) ifFalseNested {t} {false} {false} {a} {b} {c} c1eqc2 = refl ifFalseNested {t} {true} {true} {a} {b} {c} c1eqc2 = refl ---- Useful functions div : Nat -> (divisor : Nat) -> {≢0 : False (divisor ≟ 0)} -> Nat div a b {p} = _/_ a b {p} -- Does not need compile, since it is already defined in haskell mod : Nat -> (divisor : Nat) -> {≢0 : False (divisor ≟ 0)} -> Nat mod a b {p} = _%_ a b {p} -- Does not need compile, since it is already defined in haskell addLte : (a b c : Nat) -> IsTrue (c == a + b) -> IsTrue (a <= c) addLte Z Z Z abc = IsTrue.itsTrue addLte Z (S b) (S c) abc = IsTrue.itsTrue addLte (S a) Z (S c) abc = addLte a Z c abc addLte (S a) (S b) (S c) abc = addLte a (S b) c abc 0+n : (n : Nat) -> IsTrue (n == 0 + n) 0+n Z = IsTrue.itsTrue 0+n (S n) = 0+n n addSuc : (a b c : Nat) -> IsTrue (c == a + S b) -> IsTrue (c == S a + b) addSuc Z Z (S c) abc = abc addSuc Z (S b) (S c) abc = abc addSuc (S a) Z (S c) abc = addSuc a Z c abc addSuc (S a) (S b) (S c) abc = addSuc a (S b) c abc -- m = k + j ==> mod-helper k m n j = (n + k) mod (1 + m). modHelperLt : (k m n j : Nat) -> IsTrue (m == k + j) -> IsTrue (mod-helper k m n j <= m) modHelperLt k m Z j i1 = addLte k j m i1 modHelperLt k m (S n) Z i1 = modHelperLt 0 m n m (0+n m) modHelperLt k m (S n) (S j) i1 = modHelperLt (S k) m n j (addSuc k j m i1) lteToLt : (a b : Nat) -> (a <= b) ≡ (a < S b) lteToLt Z Z = refl lteToLt Z (S b) = refl lteToLt (S a) Z = refl lteToLt (S a) (S b) = lteToLt a b modLt : (a b : Nat) {≢0 : False (b ≟ 0)} -> IsTrue ((mod a b {≢0}) < b) modLt a b@(S bs) = let pgoal : IsTrue (mod a b <= bs) pgoal = modHelperLt 0 bs a bs (0+n bs) in useEq (lteToLt (mod a b) bs) pgoal pow : Nat -> Nat -> Nat pow b Z = 1 pow b (S e) = b * pow b e {-# COMPILE AGDA2HS pow #-} mul_not_zero : {a b : Nat} -> IsFalse (a == 0) -> IsFalse (b == 0) -> IsFalse (a * b == 0) mul_not_zero {S a} {S b} az bz = IsFalse.itsFalse pow_not_zero : (n : Nat) -> IsFalse (pow 2 n == 0) pow_not_zero Z = IsFalse.itsFalse pow_not_zero (S sn) = mul_not_zero {2} {pow 2 sn} IsFalse.itsFalse (pow_not_zero sn) false_convert : (n : Nat) -> IsFalse (n == 0) -> False (n ≟ 0) false_convert (S n) if = tt pow_not_zero_cv : (n : Nat) -> False (pow 2 n ≟ 0) pow_not_zero_cv n = false_convert (pow 2 n) $ pow_not_zero n zeroLteAny : (a : Nat) -> IsTrue (0 <= a) zeroLteAny Z = IsTrue.itsTrue zeroLteAny (S a) = IsTrue.itsTrue lteSum : (a b s : Nat) -> (a <= b) ≡ (s + a <= s + b) lteSum a b Z = refl lteSum a b (S s) = lteSum a b s lteSumOne : (a b s : Nat) -> IsTrue (a <= b) -> IsTrue (a <= s + b) lteSumOne a b Z ab = ab lteSumOne Z b (S n) ab = IsTrue.itsTrue lteSumOne (S Z) Z (S Z) ab = IsTrue.itsTrue lteSumOne (S (S n)) Z (S Z) ab = ab lteSumOne (S n₁) (S n) (S Z) ab = lteSumOne n₁ n 1 ab lteSumOne (S n₁) b (S (S n)) ab = lteSumOne n₁ (n + b) 1 (lteSumOne (S n₁) b (S n) ab) anyGteZero : (a : Nat) -> IsTrue (a >= 0) anyGteZero Z = IsTrue.itsTrue anyGteZero (S a) = IsTrue.itsTrue {-# TERMINATING #-} log2up : Nat -> Nat -- UNSAFE: This terminates since x/2 always decreases if x > 1 log2up 0 = 0 log2up 1 = 0 log2up (S (S x)) = S (log2up (div (S (S (S x))) 2)) {-# COMPILE AGDA2HS log2up #-} divHelperReduce : (x a b c : Nat) -> div-helper (S x) a b c ≡ S (div-helper x a b c) divHelperReduce x a Z c = refl divHelperReduce x a (S b) Z = begin div-helper (S (S x)) a b a =⟨ divHelperReduce (S x) a b a ⟩ S (div-helper (S x) a b a) end divHelperReduce x a (S b) (S c) = divHelperReduce x a b c div2Reduce : (x : Nat) -> div (2 + x) 2 ≡ S (div x 2) div2Reduce Z = refl div2Reduce x@(S sx) = begin div (2 + x) 2 =⟨⟩ div-helper 1 1 sx 0 =⟨ divHelperReduce 0 1 sx 0 ⟩ S (div-helper 0 1 sx 0) =⟨⟩ S (div x 2) end isTrueEquiv : {b : Bool} -> IsTrue b -> true ≡ b isTrueEquiv {true} t = refl plusGteOne : (a b : Nat) -> IsTrue (a >= 1) -> IsTrue (a + b >= 1) plusGteOne (S a) b p = anyGteZero (a + b) multGteOne : (a b : Nat) -> IsTrue (a >= 1) -> IsTrue (b >= 1) -> IsTrue (a * b >= 1) multGteOne (S a) (S b) pa pb = plusGteOne (S b) (a * (S b)) pb powGteOne : (n : Nat) -> IsTrue (pow 2 n >= 1) powGteOne Z = IsTrue.itsTrue powGteOne (S n) = multGteOne 2 (pow 2 n) IsTrue.itsTrue (powGteOne n) add-assoc : (a b c : Nat) → (a + b) + c ≡ a + (b + c) add-assoc Z b c = refl add-assoc (S a) b c = cong S (add-assoc a b c) add-n-zero : (n : Nat) → n + Z ≡ n add-n-zero Z = refl add-n-zero (S n) = cong S (add-n-zero n) add-n-suc : (m n : Nat) → m + (S n) ≡ S (m + n) add-n-suc Z n = refl add-n-suc (S m) n = cong S (add-n-suc m n) add-comm : (m n : Nat) → m + n ≡ n + m add-comm m Z = add-n-zero m add-comm m (S n) = begin m + (S n) =⟨ add-n-suc m n ⟩ S (m + n) =⟨ cong S (add-comm m n) ⟩ (S n) + m end mul-n-zero : (a : Nat) -> a * Z ≡ Z mul-n-zero Z = refl mul-n-zero (S a) = mul-n-zero a mul-n-suc : (a b : Nat) -> a * (S b) ≡ a + a * b mul-n-suc Z b = refl mul-n-suc (S a) b = begin (S a) * (S b) =⟨⟩ (S b) + a * (S b) =⟨ cong (λ q -> (S b) + q) (mul-n-suc a b) ⟩ S (b + (a + a * b)) =⟨ cong S (sym $ add-assoc b a (a * b)) ⟩ S ((b + a) + a * b) =⟨ cong (λ q -> S (q + a * b)) (add-comm b a) ⟩ S ((a + b) + a * b) =⟨ cong S (add-assoc a b (a * b)) ⟩ S (a + (b + a * b) ) =⟨⟩ (S a) + (S a) * b end mul-comm : (a b : Nat) -> a * b ≡ b * a mul-comm a Z = mul-n-zero a mul-comm a (S b) = begin a * (S b) =⟨ mul-n-suc a b ⟩ a + a * b =⟨ cong (λ q -> a + q) (mul-comm a b) ⟩ (S b) * a end gteDouble : (a b : Nat) -> (a >= b) ≡ (a + a >= b + b) gteDouble Z Z = refl gteDouble Z (S b) = refl gteDouble (S a) Z = refl gteDouble (S a) (S b) = begin a >= b =⟨ gteDouble a b ⟩ (S a) + a >= (S b) + b =⟨ cong (λ q -> q >= (S b) + b) (add-comm (S a) a) ⟩ a + S a >= S b + b =⟨ cong (λ q -> a + S a >= q) (add-comm (S b) b) ⟩ a + S a >= b + S b end gteMultBoth : (a b : Nat) -> (a >= b) ≡ (2 * a >= 2 * b) gteMultBoth a b = begin a >= b =⟨ gteDouble a b ⟩ a + a >= b + b =⟨ cong (λ q -> a + q >= b + b) (sym $ add-comm a 0) ⟩ a + (a + 0) >= b + b =⟨ cong (λ q -> a + (a + 0) >= b + q) (sym $ add-comm b 0) ⟩ a + (a + 0) >= b + (b + 0) end gteTransitive : (a b c : Nat) -> IsTrue (a >= b) -> IsTrue (b >= c) -> IsTrue (a >= c) gteTransitive Z Z Z ab bc = IsTrue.itsTrue gteTransitive (S a) Z Z ab bc = IsTrue.itsTrue gteTransitive (S a) (S b) Z ab bc = IsTrue.itsTrue gteTransitive (S a) (S b) (S c) ab bc = gteTransitive a b c ab bc mul-div : (x : Nat) -> IsTrue (2 * (div (1 + x) 2) >= x) mul-div Z = IsTrue.itsTrue mul-div (S Z) = IsTrue.itsTrue mul-div (S (S x)) = let p1 : IsTrue (2 + 2 * (div (1 + x) 2) >= 2 + x) p1 = mul-div x p2 : IsTrue (S (div (1 + x) 2) * 2 >= 2 + x) p2 = useEq (cong (λ q -> 2 + q >= 2 + x) (mul-comm 2 (div (1 + x) 2))) p1 p3 : IsTrue (2 * S (div (1 + x) 2) >= 2 + x) p3 = useEq (cong (λ q -> q >= 2 + x) (mul-comm (S (div (1 + x) 2)) 2)) p2 goal : IsTrue (2 * (div (3 + x) 2) >= 2 + x) goal = useEq (cong (λ q -> 2 * q >= 2 + x) (sym $ div2Reduce (1 + x))) p3 in goal log2upPow : (a b : Nat) -> .(IsTrue (a >= log2up b)) -> IsTrue (pow 2 a >= b) log2upPow Z Z p = IsTrue.itsTrue log2upPow Z (S Z) p = IsTrue.itsTrue log2upPow (S a) Z p = anyGteZero (pow 2 (S a)) log2upPow (S a) (S Z) p = useEq (begin (S a) >= 0 =⟨ isTrueEquiv $ anyGteZero (S a) ⟩ true =⟨ isTrueEquiv $ powGteOne (S a) ⟩ pow 2 (S a) >= 1 end) p log2upPow (S a) (S (S b)) p = let p1 : IsTrue ( pow 2 a >= div (3 + b) 2 ) p1 = log2upPow a (div (3 + b) 2) p p2 : IsTrue ( pow 2 (S a) >= 2 * (div (3 + b) 2) ) p2 = useEq (gteMultBoth (pow 2 a) (div (3 + b) 2)) p1 in gteTransitive (pow 2 (S a)) (2 * (div (3 + b) 2)) (2 + b) p2 ((mul-div (2 + b))) eqToGte : (a b : Nat) -> IsTrue (a == b) -> IsTrue (a >= b) eqToGte Z b ab = ab eqToGte (S a) (S b) ab = eqToGte a b ab gteInvert : (a b : Nat) -> IsTrue (a >= b) -> IsTrue (b <= a) gteInvert Z Z ab = IsTrue.itsTrue gteInvert (S a) Z ab = IsTrue.itsTrue gteInvert (S a) (S b) ab = gteInvert a b ab ltLteTransitive : (a b c : Nat) -> IsTrue (a < b) -> IsTrue (b <= c) -> IsTrue (a < c) ltLteTransitive Z (S b) (S c) ab bc = IsTrue.itsTrue ltLteTransitive (S a) (S b) (S c) ab bc = ltLteTransitive a b c ab bc lteTransitive : (a b c : Nat) -> IsTrue (a <= b) -> IsTrue (b <= c) -> IsTrue (a <= c) lteTransitive Z Z c ab bc = bc lteTransitive Z (S b) (S c) ab bc = IsTrue.itsTrue lteTransitive (S a) (S b) (S c) ab bc = lteTransitive a b c ab bc incrLte : (a b : Nat) -> IsTrue (a <= b) -> IsTrue (a <= S b) incrLte Z Z altb = IsTrue.itsTrue incrLte Z (S b) altb = IsTrue.itsTrue incrLte (S a) (S b) altb = incrLte a b altb natPlusMinNat : (x : Nat) -> {{p : IsFalse (x < 1)}} -> x ≡ (S (x - 1)) natPlusMinNat (S x) = refl transformLteRight : {a b c : Nat} -> b ≡ c -> IsTrue (a <= b) -> IsTrue (a <= c) transformLteRight {a} {b} {.b} refl ab = ab lteSelf : (v : Nat) -> IsTrue (v <= v) lteSelf Z = IsTrue.itsTrue lteSelf (S v) = lteSelf v falseToNotTrue : {b : Bool} -> IsFalse (b) -> IsTrue (not b) falseToNotTrue {false} if = IsTrue.itsTrue notFalseToTrue : {b : Bool} -> IsFalse (not b) -> IsTrue b notFalseToTrue {true} if = IsTrue.itsTrue trueToNotFalse : {b : Bool} -> IsTrue (b) -> IsFalse (not b) trueToNotFalse {true} if = IsFalse.itsFalse notTrueToFalse : {b : Bool} -> IsTrue (not b) -> IsFalse (b) notTrueToFalse {false} if = IsFalse.itsFalse -- Law of reflexivity for equality postulate eqReflexivity : {t : Set} {{eqT : Eq t}} (v : t) -> IsTrue (v == v) eqToEquiv : {t : Set} {{eqT : Eq t}} (a b : t) -> IsTrue (a == b) -> a ≡ b botToAny : {t : Set} -> ⊥ -> t botToAny () max4 : (a b c d : Nat) -> Nat max4 a b c d = max (max a b) (max c d) {-# COMPILE AGDA2HS max4 #-} sub : (a b : Nat) -> {{ .( IsTrue (b <= a) ) }} -> Nat sub a Z {{ab}} = a sub (S a) (S b) {{ab}} = sub a b {-# COMPILE AGDA2HS sub #-} diff : (a b : Nat) -> Nat diff a Z = a diff Z b = b diff (S a) (S b) = diff a b {-# COMPILE AGDA2HS diff #-}
module Liu using ModelingToolkit using ..NeuronBuilder import ..get_parameters, ..get_states, ..default_params, ..default_states include("channels.jl") include("calc_dynamics.jl") export get_parameters, get_states, default_params, default_states end """ Deliberately not exporting ion channel names so they don't conflict. eg Na can only be accessed as Liu.Na from NeuronBuilder Deliberately exporting other structs that act on components. because it is one struct, not defined separaetly in different modules, that acts on module-specific structs """
(* ************************************************************************** *) (** * Basic tactics *) (* ************************************************************************** *) (** This file collects a number of basic tactics for better proof automation, structuring large proofs, or rewriting. Many of the definitions have been ported from ss-reflect. *) (** Symbols starting with [hahn__] are internal. *) Require Import Bool Arith ZArith String. Require ClassicalFacts. Require Export ClassicalDescription FunctionalExtensionality. Open Scope bool_scope. Open Scope list_scope. Set Implicit Arguments. Unset Strict Implicit. (** Set up hint databases *) Create HintDb hahn discriminated. (* General stuff, used by done *) Create HintDb hahn_refl discriminated. (* Decidable equalities *) Create HintDb hahn_full discriminated. (* Expensive lemmas *) (** Shorthand for applying functional extensionality. *) Ltac exten := apply functional_extensionality. (** Notation for classical if-then-else *) Notation "'ifP' c 'then' u 'else' v" := (if excluded_middle_informative c then u else v) (at level 200). (* ************************************************************************** *) (** ** Coersion of [bool] into [Prop] *) (* ************************************************************************** *) (** Coersion of bools into Prop *) Coercion is_true (b : bool) : Prop := b = true. (** Hints for auto *) Lemma hahn__true_is_true : true. Proof. reflexivity. Qed. Lemma hahn__not_false_is_true : ~ false. Proof. discriminate. Qed. Global Hint Resolve hahn__true_is_true hahn__not_false_is_true : core. (* ************************************************************************** *) (** ** Very basic automation *) (* ************************************************************************** *) (** Set up for basic simplification *) (** Adaptation of the ss-reflect "[done]" tactic. *) Ltac hahn__basic_done := solve [trivial with hahn | simple apply sym_equal; trivial | discriminate | contradiction]. Ltac done := trivial with hahn; hnf; intros; solve [try hahn__basic_done; split; try hahn__basic_done; split; try hahn__basic_done; split; try hahn__basic_done; split; try hahn__basic_done; split; hahn__basic_done | match goal with H : ~ _ |- _ => solve [case H; trivial] end]. (** A variant of the ssr "done" tactic that performs "eassumption". *) Ltac edone := try eassumption; trivial; hnf; intros; solve [try eassumption; try hahn__basic_done; split; try eassumption; try hahn__basic_done; split; try eassumption; try hahn__basic_done; split; try eassumption; try hahn__basic_done; split; try eassumption; try hahn__basic_done; split; try eassumption; hahn__basic_done | match goal with H : ~ _ |- _ => solve [case H; trivial] end]. Tactic Notation "by" tactic(tac) := (tac; done). Tactic Notation "eby" tactic(tac) := (tac; edone). (* ************************************************************************** *) (** ** Equality types *) (* ************************************************************************** *) Module Equality. Definition axiom T (e : T -> T -> bool) := forall x y, reflect (x = y) (e x y). Structure mixin_of T := Mixin {op : T -> T -> bool; _ : axiom op}. Notation class_of := mixin_of (only parsing). Section ClassDef. Structure type := Pack {sort; _ : class_of sort; _ : Type}. Definition class cT' := match cT' return class_of (sort cT') with @Pack _ c _ => c end. Definition pack (T: Type) c := @Pack T c T. End ClassDef. Module Exports. Coercion sort : type >-> Sortclass. Notation eqType := type. Notation EqMixin := Mixin. Notation EqType T m := (@pack T m). End Exports. End Equality. Export Equality.Exports. Definition eq_op T := Equality.op (Equality.class T). Arguments eq_op {T}. Lemma eqE : forall T x, eq_op x = Equality.op (Equality.class T) x. Proof. done. Qed. Lemma eqP : forall T, Equality.axiom (@eq_op T). Proof. by unfold eq_op; destruct T as [? []]. Qed. Arguments eqP [T] x y. (* Notation "x == y" := (eq_op x y) (at level 70, no associativity) : bool_scope. Notation "x == y :> T" := ((x : T) == (y : T)) (at level 70, y at next level) : bool_scope. Notation "x != y" := (negb (x == y)) (at level 70, no associativity) : bool_scope. Notation "x != y :> T" := (negb (x == y :> T)) (at level 70, y at next level) : bool_scope. *) Lemma hahn__internal_eqP : forall (T: eqType) (x y : T), reflect (x = y) (eq_op x y). Proof. apply eqP. Qed. Lemma neqP : forall (T: eqType) (x y: T), reflect (x <> y) (negb (eq_op x y)). Proof. intros; case eqP; constructor; auto. Qed. Lemma beq_refl : forall (T : eqType) (x : T), eq_op x x. Proof. by intros; case eqP. Qed. Lemma beq_sym : forall (T : eqType) (x y : T), (eq_op x y) = (eq_op y x). Proof. intros; do 2 case eqP; congruence. Qed. Global Hint Resolve beq_refl : hahn. Hint Rewrite beq_refl : hahn_trivial. Notation eqxx := beq_refl. (** Comparison for [nat] *) Fixpoint eqn_rec (x y: nat) {struct x} := match x, y with | O, O => true | S x, S y => eqn_rec x y | _, _ => false end. Definition eqn := match tt with tt => eqn_rec end. Lemma eqnP: forall x y, reflect (x = y) (eqn x y). Proof. induction x; destruct y; try (constructor; done). change (eqn (S x) (S y)) with (eqn x y). case IHx; constructor; congruence. Qed. Canonical Structure nat_eqMixin := EqMixin eqnP. Canonical Structure nat_eqType := Eval hnf in EqType nat nat_eqMixin. Lemma eqnE : eqn = (@eq_op _). Proof. done. Qed. (* ************************************************************************** *) (** ** Basic simplification tactics *) (* ************************************************************************** *) Lemma hahn__negb_rewrite : forall b, negb b -> b = false. Proof. by intros []. Qed. Lemma hahn__andb_split : forall b1 b2, b1 && b2 -> b1 /\ b2. Proof. by intros [] []. Qed. Lemma hahn__nandb_split : forall b1 b2, b1 && b2 = false -> b1 = false \/ b2 = false. Proof. intros [] []; auto. Qed. Lemma hahn__orb_split : forall b1 b2, b1 || b2 -> b1 \/ b2. Proof. intros [] []; auto. Qed. Lemma hahn__norb_split : forall b1 b2, b1 || b2 = false -> b1 = false /\ b2 = false. Proof. intros [] []; auto. Qed. Lemma hahn__eqb_split : forall b1 b2 : bool, (b1 -> b2) -> (b2 -> b1) -> b1 = b2. Proof. intros [] [] H H'; unfold is_true in *; auto using sym_eq. Qed. Lemma hahn__beq_rewrite (T : eqType) (x1 x2 : T) : eq_op x1 x2 -> x1 = x2. Proof. by case eqP. Qed. (** Set up for basic simplification: database of reflection lemmas *) Global Hint Resolve hahn__internal_eqP neqP eqnP : hahn_refl. Global Hint Resolve Z.eqb_spec Z.leb_spec0 Z.ltb_spec0 : hahn_refl. Global Hint Resolve N.eqb_spec N.leb_spec0 N.ltb_spec0 : hahn_refl. Global Hint Resolve Pos.eqb_spec Pos.leb_spec0 Pos.ltb_spec0 : hahn_refl. Global Hint Resolve Nat.eqb_spec Nat.leb_spec0 Nat.ltb_spec0 : hahn_refl. Global Hint Resolve Ascii.eqb_spec String.eqb_spec : hahn_refl. Global Hint Resolve Bool.eqb_spec : hahn_refl. Ltac hahn__complaining_inj f H := let X := fresh in (match goal with | [|- ?P ] => set (X := P) end); injection H; clear H; intros; subst X; try subst. Ltac hahn__clarify1 := try subst; repeat match goal with | [H: is_true (andb _ _) |- _] => let H' := fresh H in case (hahn__andb_split H); clear H; intros H' H | [H: is_true (negb ?x) |- _] => rewrite (hahn__negb_rewrite H) in * | [H: is_true ?x |- _] => rewrite H in * | [H: ?x = true |- _] => rewrite H in * | [H: ?x = false |- _] => rewrite H in * | [H: is_true (eq_op _ _) |- _] => generalize (hahn__beq_rewrite H); clear H; intro H | [H: @existT _ _ _ _ = @existT _ _ _ _ |- _] => apply inj_pair2 in H; try subst | [H: ?f _ = ?f _ |- _] => hahn__complaining_inj f H | [H: ?f _ _ = ?f _ _ |- _] => hahn__complaining_inj f H | [H: ?f _ _ _ = ?f _ _ _ |- _] => hahn__complaining_inj f H | [H: ?f _ _ _ _ = ?f _ _ _ _ |- _] => hahn__complaining_inj f H | [H: ?f _ _ _ _ _ = ?f _ _ _ _ _ |- _] => hahn__complaining_inj f H | [H: ?f _ _ _ _ _ _ = ?f _ _ _ _ _ _ |- _] => hahn__complaining_inj f H | [H: ?f _ _ _ _ _ _ _ = ?f _ _ _ _ _ _ _ |- _] => hahn__complaining_inj f H end; try done. (** Perform injections & discriminations on all hypotheses *) Ltac clarify := hahn__clarify1; repeat match goal with | H1: ?x = Some _, H2: ?x = None |- _ => rewrite H2 in H1; discriminate | H1: ?x = Some _, H2: ?x = Some _ |- _ => rewrite H2 in H1; hahn__clarify1 end; (* autorewrite with hahn_trivial; *) try done. (** Kill simple goals that require up to two econstructor calls. *) Ltac vauto := (clarify; try edone; try [> econstructor; (solve [edone | [> econstructor; edone]])]). Ltac inv x := inversion x; clarify. Ltac simpls := simpl in *; try done. Ltac ins := simpl in *; try done; intros. Ltac hahn__clarsimp1 := clarify; (autorewrite with hahn_trivial hahn in * ); (autorewrite with hahn_trivial in * ); try done; clarify; auto 1 with hahn. Ltac clarsimp := intros; simpl in *; hahn__clarsimp1. Ltac autos := clarsimp; auto with hahn. Tactic Notation "econs" := econstructor. Tactic Notation "econs" int_or_var(x) := econstructor x. (* ************************************************************************** *) (** Destruct but give useful names *) (* ************************************************************************** *) Definition NW (P: unit -> Prop) : Prop := P tt. Notation "⟪ x : t ⟫" := (NW (fun x => t)) (at level 80, x ident, no associativity). Notation "<< x : t >>" := (NW (fun x => t)) (at level 80, x ident, no associativity, only parsing). Notation "⟪ t ⟫" := (NW (fun _ => t)) (at level 79, no associativity, format "⟪ t ⟫"). Ltac unnw := unfold NW in *. Ltac rednw := red; unnw. Global Hint Unfold NW : core. Ltac splits := intros; unfold NW; repeat match goal with | [ |- _ /\ _ ] => split end. Ltac esplits := intros; unfold NW; repeat match goal with | [ |- @ex _ _ ] => eexists | [ |- _ /\ _ ] => split | [ |- @sig _ _ ] => eexists | [ |- @sigT _ _ ] => eexists | [ |- @prod _ _ ] => split end. (** Destruct, but no case split *) Ltac desc := repeat match goal with | H: is_true (eq_op _ _) |- _ => generalize (hahn__beq_rewrite H); clear H; intro H | H : exists x, NW (fun y => _) |- _ => progress first [try (destruct H as [? H] ; fail 1) | (* Check it's not a Section Hypothesis *) let x' := fresh x in let y' := fresh y in destruct H as [x' y']; red in y'] | H : exists x, ?p |- _ => let x' := fresh x in destruct H as [x' H] | H : ?p /\ ?q |- _ => progress first [try (destruct H as [H ?] ; fail 1) | (* Check it's not a Section Hypothesis *) let x' := match p with | NW (fun z => _) => fresh z | _ => H end in let y' := match q with | NW (fun z => _) => fresh z | _ => fresh H end in destruct H as [x' y']; match p with | NW _ => red in x' | _ => idtac end; match q with | NW _ => red in y' | _ => idtac end] | H : is_true (_ && _) |- _ => let H' := fresh H in case (hahn__andb_split H); clear H; intros H H' | H : (_ || _) = false |- _ => let H' := fresh H in case (hahn__norb_split H); clear H; intros H H' | H : ?x = ?x |- _ => clear H (* | H: is_true ?x |- _ => eapply elimT in H; [|solve [trivial with hahn_refl]] | H: ?x = true |- _ => eapply elimT in H; [|solve [trivial with hahn_refl]] | H: ?x = false |- _ => eapply elimFn in H; [|solve [trivial with hahn_refl]] | H: ?x = false |- _ => eapply elimF in H; [|solve [trivial with hahn_refl]] *) end. Ltac des := repeat match goal with | H: is_true (eq_op _ _) |- _ => generalize (hahn__beq_rewrite H); clear H; intro H | H : exists x, NW (fun y => _) |- _ => progress first [try (destruct H as [? H] ; fail 1) | (* Check it's not a Section Hypothesis *) let x' := fresh x in let y' := fresh y in destruct H as [x' y']; red in y'] | H : exists x, ?p |- _ => let x' := fresh x in destruct H as [x' H] | H : ?p /\ ?q |- _ => progress first [try (destruct H as [H ?] ; fail 1) | (* Check it's not a Section Hypothesis *) let x' := match p with | NW (fun z => _) => fresh z | _ => H end in let y' := match q with | NW (fun z => _) => fresh z | _ => fresh H end in destruct H as [x' y']; match p with | NW _ => red in x' | _ => idtac end; match q with | NW _ => red in y' | _ => idtac end] | H : is_true (_ && _) |- _ => let H' := fresh H in case (hahn__andb_split H); clear H; intros H H' | H : (_ || _) = false |- _ => let H' := fresh H in case (hahn__norb_split H); clear H; intros H H' | H : ?x = ?x |- _ => clear H | H : ?p <-> ?q |- _ => progress first [try (destruct H as [H ?] ; fail 1) | (* Check it's not a Section Hypothesis *) let x' := match p with | NW (fun z => _) => fresh z | _ => H end in let y' := match q with | NW (fun z => _) => fresh z | _ => fresh H end in destruct H as [x' y']; match p with | NW _ => unfold NW at 1 in x'; red in y' | _ => idtac end; match q with | NW _ => unfold NW at 1 in y'; red in x' | _ => idtac end] | H : ?p \/ ?q |- _ => progress first [try (destruct H as [H|H] ; fail 1) | (* Check it's not a Section Hypothesis *) let x' := match p with | NW (fun z => _) => fresh z | _ => H end in let y' := match q with | NW (fun z => _) => fresh z | _ => H end in destruct H as [x' | y']; [ match p with | NW _ => red in x' | _ => idtac end | match q with | NW _ => red in y' | _ => idtac end]] | H : is_true (_ || _) |- _ => case (hahn__orb_split H); clear H; intro H | H : (_ && _) = false |- _ => case (hahn__nandb_split H); clear H; intro H end. Ltac desc_section := repeat match goal with | H : exists x, NW (fun y => _) |- _ => try (destruct H as [? H] ; fail 1); (* Check it is a Section Hypothesis *) let x' := fresh x in let y' := fresh y in destruct H as [x' y']; clear H; red in y' | H : exists x, ?p |- _ => try (destruct H as [? H] ; fail 1); (* Check it is a Section Hypothesis *) let x' := fresh x in let y' := fresh H in destruct H as [x' y']; clear H; red in y' | H : ?p /\ ?q |- _ => try (destruct H as [H ?] ; fail 1); (* Check it is a Section Hypothesis *) let x' := match p with | NW (fun z => _) => fresh z | _ => fresh H end in let y' := match q with | NW (fun z => _) => fresh z | _ => fresh H end in destruct H as [x' y']; clear H; match p with | NW _ => red in x' | _ => idtac end; match q with | NW _ => red in y' | _ => idtac end | H : ?x = ?x |- _ => clear H end; desc. Ltac cdes H := let H' := fresh H in assert (H' := H); try red in H'; desc. Ltac des_if_asm := clarify; repeat match goal with | H: context[ match ?x with _ => _ end ] |- _ => match (type of x) with | { _ } + { _ } => destruct x; clarify | bool => let Heq := fresh "Heq" in let P := fresh in evar(P: Prop); assert (Heq: reflect P x) by (subst P; trivial with hahn_refl); subst P; destruct Heq as [Heq|Heq] | _ => let Heq := fresh "Heq" in destruct x as [] eqn: Heq; clarify end end. Ltac des_if_goal := clarify; repeat match goal with | |- context[match ?x with _ => _ end] => match (type of x) with | { _ } + { _ } => destruct x; clarify | bool => let Heq := fresh "Heq" in let P := fresh in evar(P: Prop); assert (Heq: reflect P x) by (subst P; trivial with hahn_refl); subst P; destruct Heq as [Heq|Heq] | _ => let Heq := fresh "Heq" in destruct x as [] eqn: Heq; clarify end end. Ltac des_if := clarify; repeat match goal with | |- context[match ?x with _ => _ end] => match (type of x) with | { _ } + { _ } => destruct x; clarify | bool => let Heq := fresh "Heq" in let P := fresh in evar(P: Prop); assert (Heq: reflect P x) by (subst P; trivial with hahn_refl); subst P; destruct Heq as [Heq|Heq] | _ => let Heq := fresh "Heq" in destruct x as [] eqn: Heq; clarify end | H: context[ match ?x with _ => _ end ] |- _ => match (type of x) with | { _ } + { _ } => destruct x; clarify | bool => let Heq := fresh "Heq" in let P := fresh in evar(P: Prop); assert (Heq: reflect P x) by (subst P; trivial with hahn_refl); subst P; destruct Heq as [Heq|Heq] | _ => let Heq := fresh "Heq" in destruct x as [] eqn: Heq; clarify end end. Ltac des_eqrefl := match goal with | H: context[match ?X with _ => _ end Logic.eq_refl] |- _ => let EQ := fresh "EQ" in let id' := fresh "x" in revert H; generalize (Logic.eq_refl X); generalize X at 1 3; intros id' EQ; destruct id'; intros H | |- context[match ?X with _ => _ end Logic.eq_refl] => let EQ := fresh "EQ" in let id' := fresh "x" in generalize (Logic.eq_refl X); generalize X at 1 3; intros id' EQ; destruct id' end. Ltac desf_asm := clarify; des; des_if_asm. Ltac desf := clarify; des; des_if. Ltac clarassoc := clarsimp; autorewrite with hahn_trivial hahn hahnA in *; try done. Ltac hahn__hacksimp1 := clarsimp; match goal with | H: _ |- _ => solve [rewrite H; clear H; clarsimp |rewrite <- H; clear H; clarsimp] | _ => solve [f_equal; clarsimp] end. Ltac hacksimp := clarsimp; try match goal with | H: _ |- _ => solve [rewrite H; clear H; clarsimp |rewrite <- H; clear H; clarsimp] | |- context[match ?p with _ => _ end] => solve [destruct p; hahn__hacksimp1] | _ => solve [f_equal; clarsimp] end. Ltac clarify_not := repeat (match goal with | H : ~ False |- _ => clear H | H : ~ ~ _ |- _ => apply NNPP in H | H : ~ _ |- _ => apply imply_to_and in H; desc | H : ~ _ |- _ => apply not_or_and in H; desc | H : ~ _ |- _ => apply not_and_or in H; des | H : ~ _ |- _ => apply not_all_ex_not in H; desc end; clarify). Tactic Notation "tertium_non_datur" constr(P) := destruct (classic P); clarify_not. Tactic Notation "tertium_non_datur" constr(P) "as" simple_intropattern(pattern) := destruct (classic P) as pattern; clarify_not. (* ************************************************************************** *) (** ** Unification helpers *) (* ************************************************************************** *) Tactic Notation "pattern_lhs" uconstr(term) := match goal with |- _ ?lhs _ => let P := fresh in pose (P := lhs); pattern term in P; change lhs with P; subst P end. Tactic Notation "pattern_rhs" uconstr(term) := match goal with |- _ _ ?rhs => let P := fresh in pose (P := rhs); pattern term in P; change rhs with P; subst P end. (* ************************************************************************** *) (** ** Exploiting a hypothesis *) (* ************************************************************************** *) Tactic Notation "forward" tactic1(tac) := let foo := fresh in evar (foo : Prop); cut (foo); subst foo; cycle 1; [tac|]. Tactic Notation "forward" tactic1(tac) "as" simple_intropattern(H) := let foo := fresh in evar (foo : Prop); cut (foo); subst foo; cycle 1; [tac|intros H]. Tactic Notation "specialize_full" ident(H) := let foo := fresh in evar (foo : Prop); cut (foo); subst foo; cycle 1; [eapply H|try clear H; intro H]. (** Exploit an assumption (adapted from CompCert). *) Ltac exploit x := refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _ _) _) || refine ((fun x y => y x) (x _ _ _) _) || refine ((fun x y => y x) (x _ _) _) || refine ((fun x y => y x) (x _) _).
DragonPlate offers a wide range of carbon fiber tubes for many different applications. Our tubes are manufactured by us here in the USA, giving us complete control over the characteristics and quality. If you do not see what you need, contact us to discuss manufacturing tubes to your custom specifications. See also our Tube Connector Systems and Kevlar Tubes. Roll wrapped prepreg carbon fiber tubes are constructed using multiple wraps of twill and/or unidirectional fabrics. Rolled tubes are ideal for applications that require the highest bending stiffness and lowest weight, such as automation robotics, telescoping poles, idler rollers, and UAV components. Additionally, the longer length versions of these tubes have options for high and ultra high modulus carbon fiber, as well as choices for outer fabrics, such as colored Kevlar fabrics. The standard finish is cello-wrapped gloss, but a smooth sanded finish is also available. Braided carbon fiber tubes are comprised of carbon fiber braid and unidirectional fabrics. They offer excellent torsional characteristics, as well as crush strength, and are ideal for high torque applications. These carbon tubes are available in round, rectangular and square and can be axially optimized or made with higher modulus carbon fibers for increased bending stiffness. The standard for these tubes is our proprietary natural surface finish, showing a wet shiny appearance. Many can also be cello-wrapped for a glossier look, or even peel ply texture finish for improved bonding. Our large diameter carbon fiber tubes are comprised of rolled bi-directional woven carbon fiber. This tubing is designed for large applications, such as telescope projects or light weight drums. Our carbon fiber large tubes come with a texture finish on both the inside and outside for easy bonding and painting.
Require Import Crypto.Arithmetic.PrimeFieldTheorems. Require Import Crypto.Specific.solinas64_2e206m5_5limbs.Synthesis. (* TODO : change this to field once field isomorphism happens *) Definition carry : { carry : feBW_loose -> feBW_tight | forall a, phiBW_tight (carry a) = (phiBW_loose a) }. Proof. Set Ltac Profiling. Time synthesize_carry (). Show Ltac Profile. Time Defined. Print Assumptions carry.
[GOAL] R : Type u inst✝¹ : CommSemiring R 𝓟 : Ideal R f : R[X] hf : IsWeaklyEisensteinAt f 𝓟 A : Type v inst✝ : CommRing A φ : R →+* A ⊢ IsWeaklyEisensteinAt (Polynomial.map φ f) (Ideal.map φ 𝓟) [PROOFSTEP] refine' (IsWeaklyEisensteinAt_iff _ _).2 fun hn => _ [GOAL] R : Type u inst✝¹ : CommSemiring R 𝓟 : Ideal R f : R[X] hf : IsWeaklyEisensteinAt f 𝓟 A : Type v inst✝ : CommRing A φ : R →+* A n✝ : ℕ hn : n✝ < natDegree (Polynomial.map φ f) ⊢ coeff (Polynomial.map φ f) n✝ ∈ Ideal.map φ 𝓟 [PROOFSTEP] rw [coeff_map] [GOAL] R : Type u inst✝¹ : CommSemiring R 𝓟 : Ideal R f : R[X] hf : IsWeaklyEisensteinAt f 𝓟 A : Type v inst✝ : CommRing A φ : R →+* A n✝ : ℕ hn : n✝ < natDegree (Polynomial.map φ f) ⊢ ↑φ (coeff f n✝) ∈ Ideal.map φ 𝓟 [PROOFSTEP] exact mem_map_of_mem _ (hf.mem (lt_of_lt_of_le hn (natDegree_map_le _ _))) [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hx : ↑(aeval x) f = 0 hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) ⊢ ∃ y, y ∈ adjoin R {x} ∧ ↑(algebraMap R S) p * y = x ^ natDegree (Polynomial.map (algebraMap R S) f) [PROOFSTEP] rw [aeval_def, Polynomial.eval₂_eq_eval_map, eval_eq_sum_range, range_add_one, sum_insert not_mem_range_self, sum_range, (hmo.map (algebraMap R S)).coeff_natDegree, one_mul] at hx [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hx : x ^ natDegree (Polynomial.map (algebraMap R S) f) + ∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i = 0 hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) ⊢ ∃ y, y ∈ adjoin R {x} ∧ ↑(algebraMap R S) p * y = x ^ natDegree (Polynomial.map (algebraMap R S) f) [PROOFSTEP] replace hx := eq_neg_of_add_eq_zero_left hx [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) hx : x ^ natDegree (Polynomial.map (algebraMap R S) f) = -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i ⊢ ∃ y, y ∈ adjoin R {x} ∧ ↑(algebraMap R S) p * y = x ^ natDegree (Polynomial.map (algebraMap R S) f) [PROOFSTEP] have : ∀ n < f.natDegree, p ∣ f.coeff n := by intro n hn refine' mem_span_singleton.1 (by simpa using hf.mem hn) [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) hx : x ^ natDegree (Polynomial.map (algebraMap R S) f) = -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i ⊢ ∀ (n : ℕ), n < natDegree f → p ∣ coeff f n [PROOFSTEP] intro n hn [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) hx : x ^ natDegree (Polynomial.map (algebraMap R S) f) = -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i n : ℕ hn : n < natDegree f ⊢ p ∣ coeff f n [PROOFSTEP] refine' mem_span_singleton.1 (by simpa using hf.mem hn) [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) hx : x ^ natDegree (Polynomial.map (algebraMap R S) f) = -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i n : ℕ hn : n < natDegree f ⊢ coeff f n ∈ span {p} [PROOFSTEP] simpa using hf.mem hn [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) hx : x ^ natDegree (Polynomial.map (algebraMap R S) f) = -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i this : ∀ (n : ℕ), n < natDegree f → p ∣ coeff f n ⊢ ∃ y, y ∈ adjoin R {x} ∧ ↑(algebraMap R S) p * y = x ^ natDegree (Polynomial.map (algebraMap R S) f) [PROOFSTEP] choose! φ hφ using this [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) hx : x ^ natDegree (Polynomial.map (algebraMap R S) f) = -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i φ : ℕ → R hφ : ∀ (n : ℕ), n < natDegree f → coeff f n = p * φ n ⊢ ∃ y, y ∈ adjoin R {x} ∧ ↑(algebraMap R S) p * y = x ^ natDegree (Polynomial.map (algebraMap R S) f) [PROOFSTEP] conv_rhs at hx => congr congr ·skip ext i rw [coeff_map, hφ i.1 (lt_of_lt_of_le i.2 (natDegree_map_le _ _)), RingHom.map_mul, mul_assoc] [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) hx : x ^ natDegree (Polynomial.map (algebraMap R S) f) = -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i φ : ℕ → R hφ : ∀ (n : ℕ), n < natDegree f → coeff f n = p * φ n | -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i [PROOFSTEP] congr congr ·skip ext i rw [coeff_map, hφ i.1 (lt_of_lt_of_le i.2 (natDegree_map_le _ _)), RingHom.map_mul, mul_assoc] [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) hx : x ^ natDegree (Polynomial.map (algebraMap R S) f) = -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i φ : ℕ → R hφ : ∀ (n : ℕ), n < natDegree f → coeff f n = p * φ n | -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i [PROOFSTEP] congr congr ·skip ext i rw [coeff_map, hφ i.1 (lt_of_lt_of_le i.2 (natDegree_map_le _ _)), RingHom.map_mul, mul_assoc] [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) hx : x ^ natDegree (Polynomial.map (algebraMap R S) f) = -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i φ : ℕ → R hφ : ∀ (n : ℕ), n < natDegree f → coeff f n = p * φ n | -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i [PROOFSTEP] congr [GOAL] case a R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) hx : x ^ natDegree (Polynomial.map (algebraMap R S) f) = -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i φ : ℕ → R hφ : ∀ (n : ℕ), n < natDegree f → coeff f n = p * φ n | ∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i [PROOFSTEP] congr [GOAL] case a.s R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) hx : x ^ natDegree (Polynomial.map (algebraMap R S) f) = -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i φ : ℕ → R hφ : ∀ (n : ℕ), n < natDegree f → coeff f n = p * φ n | univ case a.f R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) hx : x ^ natDegree (Polynomial.map (algebraMap R S) f) = -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i φ : ℕ → R hφ : ∀ (n : ℕ), n < natDegree f → coeff f n = p * φ n | fun i => coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i [PROOFSTEP] ·skip [GOAL] case a.s R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) hx : x ^ natDegree (Polynomial.map (algebraMap R S) f) = -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i φ : ℕ → R hφ : ∀ (n : ℕ), n < natDegree f → coeff f n = p * φ n | univ [PROOFSTEP] skip [GOAL] case a.s R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) hx : x ^ natDegree (Polynomial.map (algebraMap R S) f) = -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i φ : ℕ → R hφ : ∀ (n : ℕ), n < natDegree f → coeff f n = p * φ n | univ [PROOFSTEP] skip [GOAL] case a.s R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) hx : x ^ natDegree (Polynomial.map (algebraMap R S) f) = -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i φ : ℕ → R hφ : ∀ (n : ℕ), n < natDegree f → coeff f n = p * φ n | univ [PROOFSTEP] skip [GOAL] case a.f R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) hx : x ^ natDegree (Polynomial.map (algebraMap R S) f) = -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i φ : ℕ → R hφ : ∀ (n : ℕ), n < natDegree f → coeff f n = p * φ n | fun i => coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i [PROOFSTEP] ext i [GOAL] case a.f.h R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) hx : x ^ natDegree (Polynomial.map (algebraMap R S) f) = -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i φ : ℕ → R hφ : ∀ (n : ℕ), n < natDegree f → coeff f n = p * φ n i : Fin (natDegree (Polynomial.map (algebraMap R S) f)) | coeff (Polynomial.map (algebraMap R S) f) ↑i * x ^ ↑i [PROOFSTEP] rw [coeff_map, hφ i.1 (lt_of_lt_of_le i.2 (natDegree_map_le _ _)), RingHom.map_mul, mul_assoc] [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) φ : ℕ → R hx : x ^ natDegree (Polynomial.map (algebraMap R S) f) = -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), ↑(algebraMap R S) p * (↑(algebraMap R S) (φ ↑i) * x ^ ↑i) hφ : ∀ (n : ℕ), n < natDegree f → coeff f n = p * φ n ⊢ ∃ y, y ∈ adjoin R {x} ∧ ↑(algebraMap R S) p * y = x ^ natDegree (Polynomial.map (algebraMap R S) f) [PROOFSTEP] rw [hx, ← mul_sum, neg_eq_neg_one_mul, ← mul_assoc (-1 : S), mul_comm (-1 : S), mul_assoc] [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) φ : ℕ → R hx : x ^ natDegree (Polynomial.map (algebraMap R S) f) = -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), ↑(algebraMap R S) p * (↑(algebraMap R S) (φ ↑i) * x ^ ↑i) hφ : ∀ (n : ℕ), n < natDegree f → coeff f n = p * φ n ⊢ ∃ y, y ∈ adjoin R {x} ∧ ↑(algebraMap R S) p * y = ↑(algebraMap R S) p * (-1 * ∑ x_1 : Fin (natDegree (Polynomial.map (algebraMap R S) f)), ↑(algebraMap R S) (φ ↑x_1) * x ^ ↑x_1) [PROOFSTEP] refine' ⟨-1 * ∑ i : Fin (f.map (algebraMap R S)).natDegree, (algebraMap R S) (φ i.1) * x ^ i.1, _, rfl⟩ [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) φ : ℕ → R hx : x ^ natDegree (Polynomial.map (algebraMap R S) f) = -∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), ↑(algebraMap R S) p * (↑(algebraMap R S) (φ ↑i) * x ^ ↑i) hφ : ∀ (n : ℕ), n < natDegree f → coeff f n = p * φ n ⊢ -1 * ∑ i : Fin (natDegree (Polynomial.map (algebraMap R S) f)), ↑(algebraMap R S) (φ ↑i) * x ^ ↑i ∈ adjoin R {x} [PROOFSTEP] exact Subalgebra.mul_mem _ (Subalgebra.neg_mem _ (Subalgebra.one_mem _)) (Subalgebra.sum_mem _ fun i _ => Subalgebra.mul_mem _ (Subalgebra.algebraMap_mem _ _) (Subalgebra.pow_mem _ (subset_adjoin (Set.mem_singleton x)) _)) [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hx : ↑(aeval x) f = 0 hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) ⊢ ∀ (i : ℕ), natDegree (Polynomial.map (algebraMap R S) f) ≤ i → ∃ y, y ∈ adjoin R {x} ∧ ↑(algebraMap R S) p * y = x ^ i [PROOFSTEP] intro i hi [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hx : ↑(aeval x) f = 0 hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) i : ℕ hi : natDegree (Polynomial.map (algebraMap R S) f) ≤ i ⊢ ∃ y, y ∈ adjoin R {x} ∧ ↑(algebraMap R S) p * y = x ^ i [PROOFSTEP] obtain ⟨k, hk⟩ := exists_add_of_le hi [GOAL] case intro R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hx : ↑(aeval x) f = 0 hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) i : ℕ hi : natDegree (Polynomial.map (algebraMap R S) f) ≤ i k : ℕ hk : i = natDegree (Polynomial.map (algebraMap R S) f) + k ⊢ ∃ y, y ∈ adjoin R {x} ∧ ↑(algebraMap R S) p * y = x ^ i [PROOFSTEP] rw [hk, pow_add] [GOAL] case intro R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hx : ↑(aeval x) f = 0 hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) i : ℕ hi : natDegree (Polynomial.map (algebraMap R S) f) ≤ i k : ℕ hk : i = natDegree (Polynomial.map (algebraMap R S) f) + k ⊢ ∃ y, y ∈ adjoin R {x} ∧ ↑(algebraMap R S) p * y = x ^ natDegree (Polynomial.map (algebraMap R S) f) * x ^ k [PROOFSTEP] obtain ⟨y, hy, H⟩ := exists_mem_adjoin_mul_eq_pow_natDegree hx hmo hf [GOAL] case intro.intro.intro R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hx : ↑(aeval x) f = 0 hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) i : ℕ hi : natDegree (Polynomial.map (algebraMap R S) f) ≤ i k : ℕ hk : i = natDegree (Polynomial.map (algebraMap R S) f) + k y : S hy : y ∈ adjoin R {x} H : ↑(algebraMap R S) p * y = x ^ natDegree (Polynomial.map (algebraMap R S) f) ⊢ ∃ y, y ∈ adjoin R {x} ∧ ↑(algebraMap R S) p * y = x ^ natDegree (Polynomial.map (algebraMap R S) f) * x ^ k [PROOFSTEP] refine' ⟨y * x ^ k, _, _⟩ [GOAL] case intro.intro.intro.refine'_1 R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hx : ↑(aeval x) f = 0 hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) i : ℕ hi : natDegree (Polynomial.map (algebraMap R S) f) ≤ i k : ℕ hk : i = natDegree (Polynomial.map (algebraMap R S) f) + k y : S hy : y ∈ adjoin R {x} H : ↑(algebraMap R S) p * y = x ^ natDegree (Polynomial.map (algebraMap R S) f) ⊢ y * x ^ k ∈ adjoin R {x} [PROOFSTEP] exact Subalgebra.mul_mem _ hy (Subalgebra.pow_mem _ (subset_adjoin (Set.mem_singleton x)) _) [GOAL] case intro.intro.intro.refine'_2 R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf✝ : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S p : R x : S hx : ↑(aeval x) f = 0 hmo : Monic f hf : IsWeaklyEisensteinAt f (Submodule.span R {p}) i : ℕ hi : natDegree (Polynomial.map (algebraMap R S) f) ≤ i k : ℕ hk : i = natDegree (Polynomial.map (algebraMap R S) f) + k y : S hy : y ∈ adjoin R {x} H : ↑(algebraMap R S) p * y = x ^ natDegree (Polynomial.map (algebraMap R S) f) ⊢ ↑(algebraMap R S) p * (y * x ^ k) = x ^ natDegree (Polynomial.map (algebraMap R S) f) * x ^ k [PROOFSTEP] rw [← mul_assoc _ y, H] [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S x : R hroot : IsRoot f x hmo : Monic f ⊢ ∀ (i : ℕ), natDegree f ≤ i → x ^ i ∈ 𝓟 [PROOFSTEP] intro i hi [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S x : R hroot : IsRoot f x hmo : Monic f i : ℕ hi : natDegree f ≤ i ⊢ x ^ i ∈ 𝓟 [PROOFSTEP] obtain ⟨k, hk⟩ := exists_add_of_le hi [GOAL] case intro R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S x : R hroot : IsRoot f x hmo : Monic f i : ℕ hi : natDegree f ≤ i k : ℕ hk : i = natDegree f + k ⊢ x ^ i ∈ 𝓟 [PROOFSTEP] rw [hk, pow_add] [GOAL] case intro R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S x : R hroot : IsRoot f x hmo : Monic f i : ℕ hi : natDegree f ≤ i k : ℕ hk : i = natDegree f + k ⊢ x ^ natDegree f * x ^ k ∈ 𝓟 [PROOFSTEP] suffices x ^ f.natDegree ∈ 𝓟 by exact mul_mem_right (x ^ k) 𝓟 this [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S x : R hroot : IsRoot f x hmo : Monic f i : ℕ hi : natDegree f ≤ i k : ℕ hk : i = natDegree f + k this : x ^ natDegree f ∈ 𝓟 ⊢ x ^ natDegree f * x ^ k ∈ 𝓟 [PROOFSTEP] exact mul_mem_right (x ^ k) 𝓟 this [GOAL] case intro R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S x : R hroot : IsRoot f x hmo : Monic f i : ℕ hi : natDegree f ≤ i k : ℕ hk : i = natDegree f + k ⊢ x ^ natDegree f ∈ 𝓟 [PROOFSTEP] rw [IsRoot.def, eval_eq_sum_range, Finset.range_add_one, Finset.sum_insert Finset.not_mem_range_self, Finset.sum_range, hmo.coeff_natDegree, one_mul] at * [GOAL] case intro R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S x : R hroot : x ^ natDegree f + ∑ i : Fin (natDegree f), coeff f ↑i * x ^ ↑i = 0 hmo : Monic f i : ℕ hi : natDegree f ≤ i k : ℕ hk : i = natDegree f + k ⊢ x ^ natDegree f ∈ 𝓟 [PROOFSTEP] rw [eq_neg_of_add_eq_zero_left hroot, Ideal.neg_mem_iff] [GOAL] case intro R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S x : R hroot : x ^ natDegree f + ∑ i : Fin (natDegree f), coeff f ↑i * x ^ ↑i = 0 hmo : Monic f i : ℕ hi : natDegree f ≤ i k : ℕ hk : i = natDegree f + k ⊢ ∑ i : Fin (natDegree f), coeff f ↑i * x ^ ↑i ∈ 𝓟 [PROOFSTEP] refine' Submodule.sum_mem _ fun i _ => mul_mem_right _ _ (hf.mem (Fin.is_lt i)) [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S x : S hx : ↑(aeval x) f = 0 hmo : Monic f ⊢ ∀ (i : ℕ), natDegree (Polynomial.map (algebraMap R S) f) ≤ i → x ^ i ∈ Ideal.map (algebraMap R S) 𝓟 [PROOFSTEP] suffices x ^ (f.map (algebraMap R S)).natDegree ∈ 𝓟.map (algebraMap R S) by intro i hi obtain ⟨k, hk⟩ := exists_add_of_le hi rw [hk, pow_add] refine' mul_mem_right _ _ this [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S x : S hx : ↑(aeval x) f = 0 hmo : Monic f this : x ^ natDegree (Polynomial.map (algebraMap R S) f) ∈ Ideal.map (algebraMap R S) 𝓟 ⊢ ∀ (i : ℕ), natDegree (Polynomial.map (algebraMap R S) f) ≤ i → x ^ i ∈ Ideal.map (algebraMap R S) 𝓟 [PROOFSTEP] intro i hi [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S x : S hx : ↑(aeval x) f = 0 hmo : Monic f this : x ^ natDegree (Polynomial.map (algebraMap R S) f) ∈ Ideal.map (algebraMap R S) 𝓟 i : ℕ hi : natDegree (Polynomial.map (algebraMap R S) f) ≤ i ⊢ x ^ i ∈ Ideal.map (algebraMap R S) 𝓟 [PROOFSTEP] obtain ⟨k, hk⟩ := exists_add_of_le hi [GOAL] case intro R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S x : S hx : ↑(aeval x) f = 0 hmo : Monic f this : x ^ natDegree (Polynomial.map (algebraMap R S) f) ∈ Ideal.map (algebraMap R S) 𝓟 i : ℕ hi : natDegree (Polynomial.map (algebraMap R S) f) ≤ i k : ℕ hk : i = natDegree (Polynomial.map (algebraMap R S) f) + k ⊢ x ^ i ∈ Ideal.map (algebraMap R S) 𝓟 [PROOFSTEP] rw [hk, pow_add] [GOAL] case intro R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S x : S hx : ↑(aeval x) f = 0 hmo : Monic f this : x ^ natDegree (Polynomial.map (algebraMap R S) f) ∈ Ideal.map (algebraMap R S) 𝓟 i : ℕ hi : natDegree (Polynomial.map (algebraMap R S) f) ≤ i k : ℕ hk : i = natDegree (Polynomial.map (algebraMap R S) f) + k ⊢ x ^ natDegree (Polynomial.map (algebraMap R S) f) * x ^ k ∈ Ideal.map (algebraMap R S) 𝓟 [PROOFSTEP] refine' mul_mem_right _ _ this [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S x : S hx : ↑(aeval x) f = 0 hmo : Monic f ⊢ x ^ natDegree (Polynomial.map (algebraMap R S) f) ∈ Ideal.map (algebraMap R S) 𝓟 [PROOFSTEP] rw [aeval_def, eval₂_eq_eval_map, ← IsRoot.def] at hx [GOAL] R : Type u inst✝² : CommRing R 𝓟 : Ideal R f : R[X] hf : IsWeaklyEisensteinAt f 𝓟 S : Type v inst✝¹ : CommRing S inst✝ : Algebra R S x : S hx : IsRoot (Polynomial.map (algebraMap R S) f) x hmo : Monic f ⊢ x ^ natDegree (Polynomial.map (algebraMap R S) f) ∈ Ideal.map (algebraMap R S) 𝓟 [PROOFSTEP] refine' pow_natDegree_le_of_root_of_monic_mem (hf.map _) hx (hmo.map _) _ rfl.le [GOAL] R : Type u A : Type u_1 inst✝¹ : CommRing R inst✝ : CommRing A p : R[X] x : R P : Ideal R hP : x ∈ P ⊢ IsWeaklyEisensteinAt (scaleRoots p x) P [PROOFSTEP] refine' ⟨fun i => _⟩ [GOAL] R : Type u A : Type u_1 inst✝¹ : CommRing R inst✝ : CommRing A p : R[X] x : R P : Ideal R hP : x ∈ P n✝ : ℕ i : n✝ < natDegree (scaleRoots p x) ⊢ coeff (scaleRoots p x) n✝ ∈ P [PROOFSTEP] rw [coeff_scaleRoots] [GOAL] R : Type u A : Type u_1 inst✝¹ : CommRing R inst✝ : CommRing A p : R[X] x : R P : Ideal R hP : x ∈ P n✝ : ℕ i : n✝ < natDegree (scaleRoots p x) ⊢ coeff p n✝ * x ^ (natDegree p - n✝) ∈ P [PROOFSTEP] rw [natDegree_scaleRoots, ← tsub_pos_iff_lt] at i [GOAL] R : Type u A : Type u_1 inst✝¹ : CommRing R inst✝ : CommRing A p : R[X] x : R P : Ideal R hP : x ∈ P n✝ : ℕ i✝ : n✝ < natDegree p i : 0 < natDegree p - n✝ ⊢ coeff p n✝ * x ^ (natDegree p - n✝) ∈ P [PROOFSTEP] exact Ideal.mul_mem_left _ _ (Ideal.pow_mem_of_mem P hP _ i) [GOAL] R : Type u A : Type u_1 inst✝¹ : CommRing R inst✝ : CommRing A f : R →+* A hf : Function.Injective ↑f p : R[X] hp : Monic p x y : R z : A h : eval₂ f z p = 0 hz : ↑f x * z = ↑f y ⊢ x ∣ y ^ natDegree p [PROOFSTEP] rw [← natDegree_scaleRoots p x, ← Ideal.mem_span_singleton] [GOAL] R : Type u A : Type u_1 inst✝¹ : CommRing R inst✝ : CommRing A f : R →+* A hf : Function.Injective ↑f p : R[X] hp : Monic p x y : R z : A h : eval₂ f z p = 0 hz : ↑f x * z = ↑f y ⊢ y ^ natDegree (scaleRoots p x) ∈ span {x} [PROOFSTEP] refine' (scaleRoots.isWeaklyEisensteinAt _ (Ideal.mem_span_singleton.mpr <| dvd_refl x)).pow_natDegree_le_of_root_of_monic_mem _ ((monic_scaleRoots_iff x).mpr hp) _ le_rfl [GOAL] R : Type u A : Type u_1 inst✝¹ : CommRing R inst✝ : CommRing A f : R →+* A hf : Function.Injective ↑f p : R[X] hp : Monic p x y : R z : A h : eval₂ f z p = 0 hz : ↑f x * z = ↑f y ⊢ IsRoot (scaleRoots p x) y [PROOFSTEP] rw [injective_iff_map_eq_zero'] at hf [GOAL] R : Type u A : Type u_1 inst✝¹ : CommRing R inst✝ : CommRing A f : R →+* A hf : ∀ (a : R), ↑f a = 0 ↔ a = 0 p : R[X] hp : Monic p x y : R z : A h : eval₂ f z p = 0 hz : ↑f x * z = ↑f y ⊢ IsRoot (scaleRoots p x) y [PROOFSTEP] have : eval₂ _ _ (p.scaleRoots x) = 0 := scaleRoots_eval₂_eq_zero f h [GOAL] R : Type u A : Type u_1 inst✝¹ : CommRing R inst✝ : CommRing A f : R →+* A hf : ∀ (a : R), ↑f a = 0 ↔ a = 0 p : R[X] hp : Monic p x y : R z : A h : eval₂ f z p = 0 hz : ↑f x * z = ↑f y this : eval₂ f (↑f x * z) (scaleRoots p x) = 0 ⊢ IsRoot (scaleRoots p x) y [PROOFSTEP] rwa [hz, Polynomial.eval₂_at_apply, hf] at this [GOAL] R : Type u inst✝ : CommSemiring R 𝓟 : Ideal R f : R[X] hf : IsEisensteinAt f 𝓟 n : ℕ hn : n ≠ natDegree f ⊢ coeff f n ∈ 𝓟 [PROOFSTEP] cases' ne_iff_lt_or_gt.1 hn with h₁ h₂ [GOAL] case inl R : Type u inst✝ : CommSemiring R 𝓟 : Ideal R f : R[X] hf : IsEisensteinAt f 𝓟 n : ℕ hn : n ≠ natDegree f h₁ : n < natDegree f ⊢ coeff f n ∈ 𝓟 [PROOFSTEP] exact hf.mem h₁ [GOAL] case inr R : Type u inst✝ : CommSemiring R 𝓟 : Ideal R f : R[X] hf : IsEisensteinAt f 𝓟 n : ℕ hn : n ≠ natDegree f h₂ : n > natDegree f ⊢ coeff f n ∈ 𝓟 [PROOFSTEP] rw [coeff_eq_zero_of_natDegree_lt h₂] [GOAL] case inr R : Type u inst✝ : CommSemiring R 𝓟 : Ideal R f : R[X] hf : IsEisensteinAt f 𝓟 n : ℕ hn : n ≠ natDegree f h₂ : n > natDegree f ⊢ 0 ∈ 𝓟 [PROOFSTEP] exact Ideal.zero_mem _
/- Copyright (c) 2018 Michael Jendrusch. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Michael Jendrusch, Scott Morrison, Bhavik Mehta ! This file was ported from Lean 3 source module category_theory.monoidal.functor ! leanprover-community/mathlib commit 3d7987cda72abc473c7cdbbb075170e9ac620042 ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathlib.CategoryTheory.Monoidal.Category import Mathlib.CategoryTheory.Adjunction.Basic import Mathlib.CategoryTheory.Products.Basic /-! # (Lax) monoidal functors A lax monoidal functor `F` between monoidal categories `C` and `D` is a functor between the underlying categories equipped with morphisms * `ε : 𝟙_ D ⟶ F.obj (𝟙_ C)` (called the unit morphism) * `μ X Y : (F.obj X) ⊗ (F.obj Y) ⟶ F.obj (X ⊗ Y)` (called the tensorator, or strength). satisfying various axioms. A monoidal functor is a lax monoidal functor for which `ε` and `μ` are isomorphisms. We show that the composition of (lax) monoidal functors gives a (lax) monoidal functor. See also `CategoryTheory.Monoidal.Functorial` for a typeclass decorating an object-level function with the additional data of a monoidal functor. This is useful when stating that a pre-existing functor is monoidal. See `CategoryTheory.Monoidal.NaturalTransformation` for monoidal natural transformations. We show in `CategoryTheory.Monoidal.Mon_` that lax monoidal functors take monoid objects to monoid objects. ## Future work * Oplax monoidal functors. ## References See <https://stacks.math.columbia.edu/tag/0FFL>. -/ open CategoryTheory universe v₁ v₂ v₃ u₁ u₂ u₃ open CategoryTheory.Category open CategoryTheory.Functor namespace CategoryTheory section open MonoidalCategory variable (C : Type u₁) [Category.{v₁} C] [MonoidalCategory.{v₁} C] (D : Type u₂) [Category.{v₂} D] [MonoidalCategory.{v₂} D] -- The direction of `left_unitality` and `right_unitality` as simp lemmas may look strange: -- remember the rule of thumb that component indices of natural transformations -- "weigh more" than structural maps. -- (However by this argument `associativity` is currently stated backwards!) /-- A lax monoidal functor is a functor `F : C ⥤ D` between monoidal categories, equipped with morphisms `ε : 𝟙 _D ⟶ F.obj (𝟙_ C)` and `μ X Y : F.obj X ⊗ F.obj Y ⟶ F.obj (X ⊗ Y)`, satisfying the appropriate coherences. -/ structure LaxMonoidalFunctor extends C ⥤ D where /-- unit morphism -/ ε : 𝟙_ D ⟶ obj (𝟙_ C) /-- tensorator -/ μ : ∀ X Y : C, obj X ⊗ obj Y ⟶ obj (X ⊗ Y) μ_natural : ∀ {X Y X' Y' : C} (f : X ⟶ Y) (g : X' ⟶ Y'), (map f ⊗ map g) ≫ μ Y Y' = μ X X' ≫ map (f ⊗ g) := by --Porting note: was `obviously` aesop_cat /-- associativity of the tensorator -/ associativity : ∀ X Y Z : C, (μ X Y ⊗ 𝟙 (obj Z)) ≫ μ (X ⊗ Y) Z ≫ map (α_ X Y Z).hom = (α_ (obj X) (obj Y) (obj Z)).hom ≫ (𝟙 (obj X) ⊗ μ Y Z) ≫ μ X (Y ⊗ Z) := by --Porting note: was `obviously` aesop_cat -- unitality left_unitality : ∀ X : C, (λ_ (obj X)).hom = (ε ⊗ 𝟙 (obj X)) ≫ μ (𝟙_ C) X ≫ map (λ_ X).hom := --Porting note: was `obviously` by aesop_cat right_unitality : ∀ X : C, (ρ_ (obj X)).hom = (𝟙 (obj X) ⊗ ε) ≫ μ X (𝟙_ C) ≫ map (ρ_ X).hom := --Porting note: was `obviously` by aesop_cat #align category_theory.lax_monoidal_functor CategoryTheory.LaxMonoidalFunctor -- Porting note: todo: remove this configuration and use the default configuration. -- We keep this to be consistent with Lean 3. initialize_simps_projections LaxMonoidalFunctor (+toFunctor, -obj, -map) --Porting note: was `[simp, reassoc.1]` attribute [reassoc (attr := simp)] LaxMonoidalFunctor.μ_natural attribute [simp] LaxMonoidalFunctor.left_unitality attribute [simp] LaxMonoidalFunctor.right_unitality --Porting note: was `[simp, reassoc.1]` attribute [reassoc (attr := simp)] LaxMonoidalFunctor.associativity -- When `rewrite_search` lands, add @[search] attributes to -- LaxMonoidalFunctor.μ_natural LaxMonoidalFunctor.left_unitality -- LaxMonoidalFunctor.right_unitality LaxMonoidalFunctor.associativity section variable {C D} --Porting note: was `[simp, reassoc.1]` @[reassoc (attr := simp)] theorem LaxMonoidalFunctor.left_unitality_inv (F : LaxMonoidalFunctor C D) (X : C) : (λ_ (F.obj X)).inv ≫ (F.ε ⊗ 𝟙 (F.obj X)) ≫ F.μ (𝟙_ C) X = F.map (λ_ X).inv := by rw [Iso.inv_comp_eq, F.left_unitality, Category.assoc, Category.assoc, ← F.toFunctor.map_comp, Iso.hom_inv_id, F.toFunctor.map_id, comp_id] #align category_theory.lax_monoidal_functor.left_unitality_inv CategoryTheory.LaxMonoidalFunctor.left_unitality_inv --Porting note: was `[simp, reassoc.1]` @[reassoc (attr := simp)] theorem LaxMonoidalFunctor.right_unitality_inv (F : LaxMonoidalFunctor C D) (X : C) : (ρ_ (F.obj X)).inv ≫ (𝟙 (F.obj X) ⊗ F.ε) ≫ F.μ X (𝟙_ C) = F.map (ρ_ X).inv := by rw [Iso.inv_comp_eq, F.right_unitality, Category.assoc, Category.assoc, ← F.toFunctor.map_comp, Iso.hom_inv_id, F.toFunctor.map_id, comp_id] #align category_theory.lax_monoidal_functor.right_unitality_inv CategoryTheory.LaxMonoidalFunctor.right_unitality_inv --Porting note: was `[simp, reassoc.1]` @[reassoc (attr := simp)] theorem LaxMonoidalFunctor.associativity_inv (F : LaxMonoidalFunctor C D) (X Y Z : C) : (𝟙 (F.obj X) ⊗ F.μ Y Z) ≫ F.μ X (Y ⊗ Z) ≫ F.map (α_ X Y Z).inv = (α_ (F.obj X) (F.obj Y) (F.obj Z)).inv ≫ (F.μ X Y ⊗ 𝟙 (F.obj Z)) ≫ F.μ (X ⊗ Y) Z := by rw [Iso.eq_inv_comp, ← F.associativity_assoc, ← F.toFunctor.map_comp, Iso.hom_inv_id, F.toFunctor.map_id, comp_id] #align category_theory.lax_monoidal_functor.associativity_inv CategoryTheory.LaxMonoidalFunctor.associativity_inv end /-- A monoidal functor is a lax monoidal functor for which the tensorator and unitor as isomorphisms. See <https://stacks.math.columbia.edu/tag/0FFL>. -/ structure MonoidalFunctor extends LaxMonoidalFunctor.{v₁, v₂} C D where ε_isIso : IsIso ε := by infer_instance μ_isIso : ∀ X Y : C, IsIso (μ X Y) := by infer_instance #align category_theory.monoidal_functor CategoryTheory.MonoidalFunctor initialize_simps_projections MonoidalFunctor (+toLaxMonoidalFunctor, -obj, -map, -ε, -μ) attribute [instance] MonoidalFunctor.ε_isIso MonoidalFunctor.μ_isIso variable {C D} /-- The unit morphism of a (strong) monoidal functor as an isomorphism. -/ noncomputable def MonoidalFunctor.εIso (F : MonoidalFunctor.{v₁, v₂} C D) : tensorUnit D ≅ F.obj (tensorUnit C) := asIso F.ε #align category_theory.monoidal_functor.ε_iso CategoryTheory.MonoidalFunctor.εIso /-- The tensorator of a (strong) monoidal functor as an isomorphism. -/ noncomputable def MonoidalFunctor.μIso (F : MonoidalFunctor.{v₁, v₂} C D) (X Y : C) : F.obj X ⊗ F.obj Y ≅ F.obj (X ⊗ Y) := asIso (F.μ X Y) #align category_theory.monoidal_functor.μ_iso CategoryTheory.MonoidalFunctor.μIso end open MonoidalCategory namespace LaxMonoidalFunctor variable (C : Type u₁) [Category.{v₁} C] [MonoidalCategory.{v₁} C] /-- The identity lax monoidal functor. -/ @[simps] def id : LaxMonoidalFunctor.{v₁, v₁} C C := { 𝟭 C with ε := 𝟙 _ μ := fun X Y => 𝟙 _ } #align category_theory.lax_monoidal_functor.id CategoryTheory.LaxMonoidalFunctor.id instance : Inhabited (LaxMonoidalFunctor C C) := ⟨id C⟩ end LaxMonoidalFunctor namespace MonoidalFunctor section variable {C : Type u₁} [Category.{v₁} C] [MonoidalCategory.{v₁} C] variable {D : Type u₂} [Category.{v₂} D] [MonoidalCategory.{v₂} D] variable (F : MonoidalFunctor.{v₁, v₂} C D) theorem map_tensor {X Y X' Y' : C} (f : X ⟶ Y) (g : X' ⟶ Y') : F.map (f ⊗ g) = inv (F.μ X X') ≫ (F.map f ⊗ F.map g) ≫ F.μ Y Y' := by simp #align category_theory.monoidal_functor.map_tensor CategoryTheory.MonoidalFunctor.map_tensor theorem map_leftUnitor (X : C) : F.map (λ_ X).hom = inv (F.μ (𝟙_ C) X) ≫ (inv F.ε ⊗ 𝟙 (F.obj X)) ≫ (λ_ (F.obj X)).hom := by simp only [LaxMonoidalFunctor.left_unitality] slice_rhs 2 3 => rw [← comp_tensor_id] simp simp #align category_theory.monoidal_functor.map_left_unitor CategoryTheory.MonoidalFunctor.map_leftUnitor theorem map_rightUnitor (X : C) : F.map (ρ_ X).hom = inv (F.μ X (𝟙_ C)) ≫ (𝟙 (F.obj X) ⊗ inv F.ε) ≫ (ρ_ (F.obj X)).hom := by simp only [LaxMonoidalFunctor.right_unitality] slice_rhs 2 3 => rw [← id_tensor_comp] simp simp #align category_theory.monoidal_functor.map_right_unitor CategoryTheory.MonoidalFunctor.map_rightUnitor /-- The tensorator as a natural isomorphism. -/ noncomputable def μNatIso : Functor.prod F.toFunctor F.toFunctor ⋙ tensor D ≅ tensor C ⋙ F.toFunctor := NatIso.ofComponents (by intros apply F.μIso) (by intros apply F.toLaxMonoidalFunctor.μ_natural) #align category_theory.monoidal_functor.μ_nat_iso CategoryTheory.MonoidalFunctor.μNatIso @[simp] theorem μIso_hom (X Y : C) : (F.μIso X Y).hom = F.μ X Y := rfl #align category_theory.monoidal_functor.μ_iso_hom CategoryTheory.MonoidalFunctor.μIso_hom --Porting note: was `[simp, reassoc.1]` @[reassoc (attr := simp)] theorem μ_inv_hom_id (X Y : C) : (F.μIso X Y).inv ≫ F.μ X Y = 𝟙 _ := (F.μIso X Y).inv_hom_id #align category_theory.monoidal_functor.μ_inv_hom_id CategoryTheory.MonoidalFunctor.μ_inv_hom_id @[simp] theorem μ_hom_inv_id (X Y : C) : F.μ X Y ≫ (F.μIso X Y).inv = 𝟙 _ := (F.μIso X Y).hom_inv_id #align category_theory.monoidal_functor.μ_hom_inv_id CategoryTheory.MonoidalFunctor.μ_hom_inv_id @[simp] theorem εIso_hom : F.εIso.hom = F.ε := rfl #align category_theory.monoidal_functor.ε_iso_hom CategoryTheory.MonoidalFunctor.εIso_hom --Porting note: was `[simp, reassoc.1]` @[reassoc (attr := simp)] theorem ε_inv_hom_id : F.εIso.inv ≫ F.ε = 𝟙 _ := F.εIso.inv_hom_id #align category_theory.monoidal_functor.ε_inv_hom_id CategoryTheory.MonoidalFunctor.ε_inv_hom_id @[simp] theorem ε_hom_inv_id : F.ε ≫ F.εIso.inv = 𝟙 _ := F.εIso.hom_inv_id #align category_theory.monoidal_functor.ε_hom_inv_id CategoryTheory.MonoidalFunctor.ε_hom_inv_id /-- Monoidal functors commute with left tensoring up to isomorphism -/ @[simps!] noncomputable def commTensorLeft (X : C) : F.toFunctor ⋙ tensorLeft (F.toFunctor.obj X) ≅ tensorLeft X ⋙ F.toFunctor := NatIso.ofComponents (fun Y => F.μIso X Y) @fun Y Z f => by convert F.μ_natural (𝟙 X) f using 2 simp #align category_theory.monoidal_functor.comm_tensor_left CategoryTheory.MonoidalFunctor.commTensorLeft /-- Monoidal functors commute with right tensoring up to isomorphism -/ @[simps!] noncomputable def commTensorRight (X : C) : F.toFunctor ⋙ tensorRight (F.toFunctor.obj X) ≅ tensorRight X ⋙ F.toFunctor := NatIso.ofComponents (fun Y => F.μIso Y X) @fun Y Z f => by convert F.μ_natural f (𝟙 X) using 2 simp #align category_theory.monoidal_functor.comm_tensor_right CategoryTheory.MonoidalFunctor.commTensorRight end section variable (C : Type u₁) [Category.{v₁} C] [MonoidalCategory.{v₁} C] /-- The identity monoidal functor. -/ @[simps] def id : MonoidalFunctor.{v₁, v₁} C C := { 𝟭 C with ε := 𝟙 _ μ := fun X Y => 𝟙 _ } #align category_theory.monoidal_functor.id CategoryTheory.MonoidalFunctor.id instance : Inhabited (MonoidalFunctor C C) := ⟨id C⟩ end end MonoidalFunctor variable {C : Type u₁} [Category.{v₁} C] [MonoidalCategory.{v₁} C] variable {D : Type u₂} [Category.{v₂} D] [MonoidalCategory.{v₂} D] variable {E : Type u₃} [Category.{v₃} E] [MonoidalCategory.{v₃} E] namespace LaxMonoidalFunctor variable (F : LaxMonoidalFunctor.{v₁, v₂} C D) (G : LaxMonoidalFunctor.{v₂, v₃} D E) -- The proofs here are horrendous; rewrite_search helps a lot. /-- The composition of two lax monoidal functors is again lax monoidal. -/ @[simps] def comp : LaxMonoidalFunctor.{v₁, v₃} C E := { F.toFunctor ⋙ G.toFunctor with ε := G.ε ≫ G.map F.ε μ := fun X Y => G.μ (F.obj X) (F.obj Y) ≫ G.map (F.μ X Y) μ_natural := @fun _ _ _ _ f g => by simp only [Functor.comp_map, assoc] rw [← Category.assoc, LaxMonoidalFunctor.μ_natural, Category.assoc, ← map_comp, ← map_comp, ← LaxMonoidalFunctor.μ_natural] associativity := fun X Y Z => by dsimp rw [id_tensor_comp] slice_rhs 3 4 => rw [← G.toFunctor.map_id, G.μ_natural] slice_rhs 1 3 => rw [← G.associativity] rw [comp_tensor_id] slice_lhs 2 3 => rw [← G.toFunctor.map_id, G.μ_natural] rw [Category.assoc, Category.assoc, Category.assoc, Category.assoc, Category.assoc, ← G.toFunctor.map_comp, ← G.toFunctor.map_comp, ← G.toFunctor.map_comp, ← G.toFunctor.map_comp, F.associativity] left_unitality := fun X => by dsimp rw [G.left_unitality, comp_tensor_id, Category.assoc, Category.assoc] apply congr_arg rw [F.left_unitality, map_comp, ← NatTrans.id_app, ← Category.assoc, ← LaxMonoidalFunctor.μ_natural, NatTrans.id_app, map_id, ← Category.assoc, map_comp] right_unitality := fun X => by dsimp rw [G.right_unitality, id_tensor_comp, Category.assoc, Category.assoc] apply congr_arg rw [F.right_unitality, map_comp, ← NatTrans.id_app, ← Category.assoc, ← LaxMonoidalFunctor.μ_natural, NatTrans.id_app, map_id, ← Category.assoc, map_comp] } #align category_theory.lax_monoidal_functor.comp CategoryTheory.LaxMonoidalFunctor.comp @[inherit_doc] infixr:80 " ⊗⋙ " => comp end LaxMonoidalFunctor namespace LaxMonoidalFunctor universe v₀ u₀ variable {B : Type u₀} [Category.{v₀} B] [MonoidalCategory.{v₀} B] variable (F : LaxMonoidalFunctor.{v₀, v₁} B C) (G : LaxMonoidalFunctor.{v₂, v₃} D E) attribute [local simp] μ_natural associativity left_unitality right_unitality /-- The cartesian product of two lax monoidal functors is lax monoidal. -/ @[simps] def prod : LaxMonoidalFunctor (B × D) (C × E) := { F.toFunctor.prod G.toFunctor with ε := (ε F, ε G) μ := fun X Y => (μ F X.1 Y.1, μ G X.2 Y.2) } #align category_theory.lax_monoidal_functor.prod CategoryTheory.LaxMonoidalFunctor.prod end LaxMonoidalFunctor namespace MonoidalFunctor variable (C) /-- The diagonal functor as a monoidal functor. -/ @[simps] def diag : MonoidalFunctor C (C × C) := { Functor.diag C with ε := 𝟙 _ μ := fun X Y => 𝟙 _ } #align category_theory.monoidal_functor.diag CategoryTheory.MonoidalFunctor.diag end MonoidalFunctor namespace LaxMonoidalFunctor variable (F : LaxMonoidalFunctor.{v₁, v₂} C D) (G : LaxMonoidalFunctor.{v₁, v₃} C E) /-- The cartesian product of two lax monoidal functors starting from the same monoidal category `C` is lax monoidal. -/ def prod' : LaxMonoidalFunctor C (D × E) := (MonoidalFunctor.diag C).toLaxMonoidalFunctor ⊗⋙ F.prod G #align category_theory.lax_monoidal_functor.prod' CategoryTheory.LaxMonoidalFunctor.prod' @[simp] theorem prod'_toFunctor : (F.prod' G).toFunctor = F.toFunctor.prod' G.toFunctor := rfl #align category_theory.lax_monoidal_functor.prod'_to_functor CategoryTheory.LaxMonoidalFunctor.prod'_toFunctor @[simp] theorem prod'_ε : (F.prod' G).ε = (F.ε, G.ε) := by dsimp [prod'] simp #align category_theory.lax_monoidal_functor.prod'_ε CategoryTheory.LaxMonoidalFunctor.prod'_ε @[simp] end LaxMonoidalFunctor namespace MonoidalFunctor variable (F : MonoidalFunctor.{v₁, v₂} C D) (G : MonoidalFunctor.{v₂, v₃} D E) /-- The composition of two monoidal functors is again monoidal. -/ @[simps] def comp : MonoidalFunctor.{v₁, v₃} C E := { F.toLaxMonoidalFunctor.comp G.toLaxMonoidalFunctor with ε_isIso := by dsimp infer_instance μ_isIso := by dsimp infer_instance } #align category_theory.monoidal_functor.comp CategoryTheory.MonoidalFunctor.comp @[inherit_doc] infixr:80 " ⊗⋙ " =>-- We overload notation; potentially dangerous, but it seems to work. comp end MonoidalFunctor namespace MonoidalFunctor universe v₀ u₀ variable {B : Type u₀} [Category.{v₀} B] [MonoidalCategory.{v₀} B] variable (F : MonoidalFunctor.{v₀, v₁} B C) (G : MonoidalFunctor.{v₂, v₃} D E) /-- The cartesian product of two monoidal functors is monoidal. -/ @[simps] def prod : MonoidalFunctor (B × D) (C × E) := { F.toLaxMonoidalFunctor.prod G.toLaxMonoidalFunctor with ε_isIso := (isIso_prod_iff C E).mpr ⟨ε_isIso F, ε_isIso G⟩ μ_isIso := fun X Y => (isIso_prod_iff C E).mpr ⟨μ_isIso F X.1 Y.1, μ_isIso G X.2 Y.2⟩ } #align category_theory.monoidal_functor.prod CategoryTheory.MonoidalFunctor.prod end MonoidalFunctor namespace MonoidalFunctor variable (F : MonoidalFunctor.{v₁, v₂} C D) (G : MonoidalFunctor.{v₁, v₃} C E) /-- The cartesian product of two monoidal functors starting from the same monoidal category `C` is monoidal. -/ def prod' : MonoidalFunctor C (D × E) := diag C ⊗⋙ F.prod G #align category_theory.monoidal_functor.prod' CategoryTheory.MonoidalFunctor.prod' @[simp] theorem prod'_toLaxMonoidalFunctor : (F.prod' G).toLaxMonoidalFunctor = F.toLaxMonoidalFunctor.prod' G.toLaxMonoidalFunctor := rfl #align category_theory.monoidal_functor.prod'_to_lax_monoidal_functor CategoryTheory.MonoidalFunctor.prod'_toLaxMonoidalFunctor end MonoidalFunctor /-- If we have a right adjoint functor `G` to a monoidal functor `F`, then `G` has a lax monoidal structure as well. -/ @[simps] noncomputable def monoidalAdjoint (F : MonoidalFunctor C D) {G : D ⥤ C} (h : F.toFunctor ⊣ G) : LaxMonoidalFunctor D C where toFunctor := G ε := h.homEquiv _ _ (inv F.ε) μ X Y := h.homEquiv _ (X ⊗ Y) (inv (F.μ (G.obj X) (G.obj Y)) ≫ (h.counit.app X ⊗ h.counit.app Y)) μ_natural := @fun X Y X' Y' f g => by rw [← h.homEquiv_naturality_left, ← h.homEquiv_naturality_right, Equiv.apply_eq_iff_eq, assoc, IsIso.eq_inv_comp, ← F.toLaxMonoidalFunctor.μ_natural_assoc, IsIso.hom_inv_id_assoc, ← tensor_comp, Adjunction.counit_naturality, Adjunction.counit_naturality, tensor_comp] associativity X Y Z := by dsimp only rw [← h.homEquiv_naturality_right, ← h.homEquiv_naturality_left, ← h.homEquiv_naturality_left, ← h.homEquiv_naturality_left, Equiv.apply_eq_iff_eq, ← cancel_epi (F.toLaxMonoidalFunctor.μ (G.obj X ⊗ G.obj Y) (G.obj Z)), ← cancel_epi (F.toLaxMonoidalFunctor.μ (G.obj X) (G.obj Y) ⊗ 𝟙 (F.obj (G.obj Z))), F.toLaxMonoidalFunctor.associativity_assoc (G.obj X) (G.obj Y) (G.obj Z), ← F.toLaxMonoidalFunctor.μ_natural_assoc, assoc, IsIso.hom_inv_id_assoc, ← F.toLaxMonoidalFunctor.μ_natural_assoc, IsIso.hom_inv_id_assoc, ← tensor_comp, ← tensor_comp, id_comp, Functor.map_id, Functor.map_id, id_comp, ← tensor_comp_assoc, ← tensor_comp_assoc, id_comp, id_comp, h.homEquiv_unit, h.homEquiv_unit, Functor.map_comp, assoc, assoc, h.counit_naturality, h.left_triangle_components_assoc, Functor.map_comp, assoc, h.counit_naturality, h.left_triangle_components_assoc] simp left_unitality X := by rw [← h.homEquiv_naturality_right, ← h.homEquiv_naturality_left, ← Equiv.symm_apply_eq, h.homEquiv_counit, F.map_leftUnitor, h.homEquiv_unit, assoc, assoc, assoc, F.map_tensor, assoc, assoc, IsIso.hom_inv_id_assoc, ← tensor_comp_assoc, Functor.map_id, id_comp, Functor.map_comp, assoc, h.counit_naturality, h.left_triangle_components_assoc, ← leftUnitor_naturality, ← tensor_comp_assoc, id_comp, comp_id] simp right_unitality X := by rw [← h.homEquiv_naturality_right, ← h.homEquiv_naturality_left, ← Equiv.symm_apply_eq, h.homEquiv_counit, F.map_rightUnitor, assoc, assoc, ← rightUnitor_naturality, ← tensor_comp_assoc, comp_id, id_comp, h.homEquiv_unit, F.map_tensor, assoc, assoc, assoc, IsIso.hom_inv_id_assoc, Functor.map_comp, Functor.map_id, ← tensor_comp_assoc, assoc, h.counit_naturality, h.left_triangle_components_assoc, id_comp] simp #align category_theory.monoidal_adjoint CategoryTheory.monoidalAdjoint /-- If a monoidal functor `F` is an equivalence of categories then its inverse is also monoidal. -/ @[simps] noncomputable def monoidalInverse (F : MonoidalFunctor C D) [IsEquivalence F.toFunctor] : MonoidalFunctor D C where toLaxMonoidalFunctor := monoidalAdjoint F (asEquivalence _).toAdjunction ε_isIso := by dsimp [Equivalence.toAdjunction] infer_instance μ_isIso X Y := by dsimp [Equivalence.toAdjunction] infer_instance #align category_theory.monoidal_inverse CategoryTheory.monoidalInverse end CategoryTheory
(* Title: Inductive definition of termination Author: Tobias Nipkow, 2001/2006 Maintainer: Tobias Nipkow *) theory PTermi imports PLang begin subsection{*Termination*} inductive termi :: "com \<Rightarrow> state \<Rightarrow> bool" (infixl "\<down>" 50) where Do[iff]: "f s \<noteq> {} \<Longrightarrow> Do f \<down> s" | Semi[intro!]: "\<lbrakk> c1 \<down> s0; \<And>s1. s0 -c1\<rightarrow> s1 \<Longrightarrow> c2 \<down> s1 \<rbrakk> \<Longrightarrow> (c1;c2) \<down> s0" | IfTrue[intro,simp]: "\<lbrakk> b s; c1 \<down> s \<rbrakk> \<Longrightarrow> IF b THEN c1 ELSE c2 \<down> s" | IfFalse[intro,simp]: "\<lbrakk> \<not>b s; c2 \<down> s \<rbrakk> \<Longrightarrow> IF b THEN c1 ELSE c2 \<down> s" | WhileFalse: "\<not>b s \<Longrightarrow> WHILE b DO c \<down> s" | WhileTrue: "\<lbrakk> b s; c \<down> s; \<And>t. s -c\<rightarrow> t \<Longrightarrow> WHILE b DO c \<down> t \<rbrakk> \<Longrightarrow> WHILE b DO c \<down> s" | "body \<down> s \<Longrightarrow> CALL \<down> s" | Local: "c \<down> f s \<Longrightarrow> LOCAL f;c;g \<down> s" lemma [iff]: "((c1;c2) \<down> s0) = (c1 \<down> s0 \<and> (\<forall>s1. s0 -c1\<rightarrow> s1 \<longrightarrow> c2 \<down> s1))" apply(rule iffI) prefer 2 apply(best intro:termi.intros) apply(erule termi.cases) apply blast+ done lemma [iff]: "(IF b THEN c1 ELSE c2 \<down> s) = ((if b s then c1 else c2) \<down> s)" apply simp apply(rule conjI) apply(rule impI) apply(rule iffI) prefer 2 apply(blast intro:termi.intros) apply(erule termi.cases) apply blast+ apply(rule impI) apply(rule iffI) prefer 2 apply(blast intro:termi.intros) apply(erule termi.cases) apply blast+ done lemma [iff]: "(CALL \<down> s) = (body \<down> s)" by(fast elim: termi.cases intro:termi.intros) lemma [iff]: "(LOCAL f;c;g \<down> s) = (c \<down> f s)" by(fast elim: termi.cases intro:termi.intros) lemma termi_while_lemma[rule_format]: "w\<down>fk \<Longrightarrow> (\<forall>k b c. fk = f k \<and> w = WHILE b DO c \<and> (\<forall>i. f i -c\<rightarrow> f(Suc i)) \<longrightarrow> (\<exists>i. \<not>b(f i)))" apply(erule termi.induct) apply simp_all apply blast apply blast done lemma termi_while: "\<lbrakk> (WHILE b DO c) \<down> f k; \<forall>i. f i -c\<rightarrow> f(Suc i) \<rbrakk> \<Longrightarrow> \<exists>i. \<not>b(f i)" by(blast intro:termi_while_lemma) lemma wf_termi: "wf {(t,s). WHILE b DO c \<down> s \<and> b s \<and> s -c\<rightarrow> t}" apply(subst wf_iff_no_infinite_down_chain) apply(rule notI) apply clarsimp apply(insert termi_while) apply blast done end
# Frequentist & Bayesian Statistics With Py4J & PyMC3 ----- __[1. Introduction](#first-bullet)__ __[2. Sampling A Distribution Written In Scala Using Py4J](#second-bullet)__ __[3. The Maximum Likelihood Estimator](#third-bullet)__ __[4. Confidence Intervals From Fisher Information](#fourth-bullet)__ __[5. Bayesian Esimatators & Credible Intervals With PyMC3](#fifth-bullet)__ __[6. Connecting The Two Methods](#sixth-bullet)__ __[7. Conclusions](#seventh-bullet)__ ## 1. Introduction <a class="anchor" id="first-bullet"></a> ------------- In this post I want to go back to the basics of statistics, but with an advanced spin on things. By "advanced spin" I mean, both from in terms of mathematics and computational techniques. The topic I'll dive into is: Estimating a single parameter value from a distribution and then quantifying the uncertantity in the estimate. In general I will take two approaches to quantitfying the uncertainity in the estimate, the first of which is [frequentist](https://en.wikipedia.org/wiki/Frequentist_inference) and second that is [Bayesian](https://en.wikipedia.org/wiki/Bayesian_statistics). I was originally inspired by [Jake VanderPlas' post](http://jakevdp.github.io/blog/2014/03/11/frequentism-and-bayesianism-a-practical-intro/) and admit that I am not very seasoned with Bayesian methods. That's why I'll be sticking to a simple example of estimating the mean rate or 𝜆 in a [Poisson distribution](https://en.wikipedia.org/wiki/Poisson_distribution#) from sampled data. From the computational perspective, I wanted to do something different and decided to write the probability distribution for generating the data in [Scala](https://www.scala-lang.org/), but then use it with Python. Why did I do this? Well, I like Scala and enjoyed the challenge of writing a Poisson distribution using a functional approach. I also wanted to learn more about how to use [Py4J](https://www.py4j.org/) which can be used to work with functions and objects in the [JVM](https://en.wikipedia.org/wiki/Java_virtual_machine) from Python. [Apache Spark](https://spark.apache.org/) actually uses Py4J in PySpark to write Python wrappers for their Scala API. I've used both PySpark and Spark in Scala extensively in the past and doing this project gave me an opportunity to understand how PySpark works better. The source code for this project can be found [here](https://github.com/mdh266/BayesMLE). Let's get into how I wrote the Poisson distribution in Scala and used it from Python to sample data. ## 2. Sampling A Distribution Written In Scala Using Py4J <a class="anchor" id="second-bullet"></a> --------------- I wrote a [Poisson distribution in Scala](https://github.com/mdh266/BayesMLE/blob/main/src/main/scala/PoissonDistribution.scala) so that I could sample data from it to estimate the mean rate $\lambda$ of that distribution. The Poisson distribution is a probability distribution for a random variable $y \, \in \, \mathbb{Z}^{+}$ that represents some count phenomena, i.e. a number of non-negative integer occurences in some fixed time frame. For example the number of trains passing through a station per day or the number of customers that visit a website per hour can be modeled with Poisson distribution. The mathematical form of the distribution is, $$ p(y \, = \, k) \; = \; \frac{\lambda^{k} e^{-\lambda} }{k!} $$ The parameter $\lambda \, \in \, \mathbb{R}^{+}$ is the rate variable, i.e. the true number of customers that visit the website per hour and can be any non-negative real valued number. The first step in this project was to create the Poisson class. I did this in a previous [project](https://github.com/mdh266/PoissonDistributionInScala), however, one key difference is for that Py4J the return value of any public function in Scala/Java needs to be a Java object. Specifically for me the [sample](https://github.com/mdh266/BayesBootstrapMLE/blob/main/src/main/scala/Poisson.scala) method needs to return a Java List of integers ([java.util.List[Int]](https://www.javatpoint.com/java-list)). I originally tried returning a [Scala List](https://www.scala-lang.org/api/current/scala/collection/immutable/List.html) which worked fine in pure Scala, but when returning the list to Python I got a generic "Java Object" and realized Py4J was only able to serialize specific datatypes between Python and the JVM. In order to use [this class](https://github.com/mdh266/BayesMLE/blob/main/src/main/scala/PoissonDistribution.scala) from Python with Py4J I needed to do three things: 1. Create a [Gateway Server](https://github.com/mdh266/BayesMLE/blob/main/src/main/scala/Main.scala) 2. Create a [class entrypoint](https://github.com/mdh266/BayesMLE/blob/main/src/main/scala/PoissonEntryPoint.scala) to allow for setting the Poisson attributes 3. Package the code as a jar using a build tool such as [Maven](https://maven.apache.org/) or [SBT](https://www.scala-sbt.org/) The first step is pretty straight forward to from the [Py4J Documentation](https://www.py4j.org/getting_started.html) and is in the [Main.Scala](https://github.com/mdh266/BayesBootstrapMLE/blob/main/src/main/scala/Main.scala) object. However, in order to accommodate the use of [Docker]() I had to adapt the address for the [GatewayServer](https://www.py4j.org/_static/javadoc/index.html?py4j/GatewayServer.html) based on this [discussion on GitHub](https://github.com/bartdag/py4j/issues/360): import java.net.InetAddress import py4j.GatewayServer object Main { def main(args: Array[String]) = { System.setProperty("java.net.preferIPv4Stack", "true"); val addr = InetAddress.getByName("0.0.0.0") val app = new PoissonEntryPoint() val builder = new GatewayServer.GatewayServerBuilder(app) builder.javaAddress(addr); val server = builder.build(); server.start() println("Gateway Server Started") } } The [GatewayServer](https://www.py4j.org/_static/javadoc/py4j/GatewayServer.html) in the author's own words *it allows Python programs to communicate with the JVM through a local network socket.* The GatewayServer takes an *entrypoint* as a parameter which can be any object (see [here](https://www.py4j.org/getting_started.html#writing-the-python-program) for more info). However, the entrypoint doesn't really offer a way for us to pass the $\lambda$ value from [Python](https://www.py4j.org/getting_started.html#writing-the-python-program) to the Poisson constructor in Scala. To get around this issue I created a [PoissonEntryPoint](https://github.com/mdh266/BayesBootstrapMLE/blob/main/src/main/scala/PoissonEntryPoint.scala) case class: case class PoissonEntryPoint() { def Poisson(lambda : Double) : PoissonDistribution = { new PoissonDistribution(lambda) } } This case class really just acts a [Singleton](https://docs.scala-lang.org/tour/singleton-objects.html), but is a class instead of an object. The point of the `PoissonEntryPoint` class is simply to be able to create a Poisson class with a specific $\lambda$ value after starting the GatewayServer. Now let's talk about how the project is structured and how to package it for use. The project structure is: src/ main/ scala/ Main.scala PoissonDistribution.scala PoissonEntryPoint.scala pom.xml The `pom.xml` file is called the [project object model](https://maven.apache.org/guides/introduction/introduction-to-the-pom.html) and is a file that contains all the instructions for [Maven](https://maven.apache.org/). I won't go into the details here, but I will say that Maven is a Java build tool to compile and package code and [SBT](https://www.scala-sbt.org/) is the Scala equivalent build tool. Since Scala is a [JVM language](https://en.wikipedia.org/wiki/List_of_JVM_languages) we can use either build tool and I went with Maven since I'm more familiar with it and because it was much easier to find examples with Py4J using Maven than with SBT. To package the code into a [uber jar](https://stackoverflow.com/questions/11947037/what-is-an-uber-jar), use the command: mvn package Then we can start our our Py4J Web server with the command: java -jar target/poisson-1.0-jar-with-dependencies.jar We can test that the server is running on the default port 25333 on your local machine with the command, nc -vz 0.0.0.0 25333 and you should see, Connection to 0.0.0.0 port 25333 [tcp/*] succeeded! Now we can start up our Jupyter notebook and connect Python to the JVM with the following code taken directly from [Py4J's](https://www.py4j.org/index.html#) home page. This involves setting up the [JavaGatway](https://www.py4j.org/py4j_java_gateway.html) which is the *main interaction point between a Python VM and a JVM*. When running on your local machine this is simple, however, in order to use the my Poisson Distribution and Jupyter Lab within [Docker Compose](https://docs.docker.com/compose/) I had to pass the appropriate [GatewayParameters](https://www.py4j.org/py4j_java_gateway.html#py4j.java_gateway.GatewayParameters) which specify the address for the Scala [GatewayServer](https://www.py4j.org/_static/javadoc/py4j/GatewayServer.html) (the `py4jserver` service in Docker compose) and the port it uses. In addition, I had to pass the [CallbackServerParameters](https://www.py4j.org/py4j_java_gateway.html#py4j.java_gateway.CallbackServerParameters) which specify the address for this notebook (the `jupyter` service in Docker compose) as well as the port it uses. The callback server allows the JVM to call back Python objects as discussed [here](https://www.py4j.org/advanced_topics.html#implementing-java-interfaces-from-python-callback). I definitely had to have a friend that knows DevOps to help figure this one out, but it doesnt add too much complexity to the basic Py4J example: ```python from py4j.java_gateway import JavaGateway, GatewayParameters, CallbackServerParameters gateway = JavaGateway( gateway_parameters=GatewayParameters(address='py4jserver', port=25333), callback_server_parameters=CallbackServerParameters(address='jupyter', port=25334) ) app = gateway.entry_point ``` The app is now the instantiated [PoissonEntryPoint](https://github.com/mdh266/BayesMLE/blob/main/src/main/scala/PoissonEntryPoint.scala) class. We can see the class type in Python ```python type(app) ``` py4j.java_gateway.JavaObject As well as looking at the methods for the class: ```python dir(app) ``` ['Poisson', 'apply', 'canEqual', 'copy', 'equals', 'getClass', 'hashCode', 'notify', 'notifyAll', 'productArity', 'productElement', 'productIterator', 'productPrefix', 'toString', 'unapply', 'wait'] We can see `Poisson` class method! Since PoissonEntryPoint is a [case class](https://docs.scala-lang.org/tour/case-classes.html) it comes with a number of default methods just like a [data class](https://realpython.com/python-data-classes/) in Python. We can then create a Poisson class instance and see that the value of $\lambda$ is 1.0: ```python p1 = app.Poisson(1.0) ``` We can then instantiate another Poisson object: ```python p2 = app.Poisson(3.0) ``` Note that the [PoissonEntryPoint](https://github.com/mdh266/BayesBootstrapMLE/blob/main/src/main/scala/PoissonEntryPoint.scala) class has a function `Poisson` that returns a specific [PoissonDistribution](https://github.com/mdh266/BayesBootstrapMLE/blob/main/src/main/scala/PoissonDistribution.scala) object that was initailized with the value $\lambda$. It is important that $\lambda$ is not an attribute of the [PoissonEntryPoint](https://github.com/mdh266/BayesBootstrapMLE/blob/main/src/main/scala/PoissonEntryPoint.scala) othwerwise we would not get the seperate values of $\lambda$'s: ```python p1.getLambda() ``` 1.0 ```python p2.getLambda() ``` 3.0 The really nice thing about Py4J *is that you can treat objects in the JVM as if they are Python objects.* For instance we can see the methods in the object: ```python dir(p1) ``` ['$anonfun$cdf$1', '$anonfun$getSum$1', '$anonfun$invCDF$1', '$anonfun$invCDF$2', '$anonfun$invCDF$3', '$anonfun$invCDF$4', '$anonfun$sample$1', '$anonfun$uniform$1', '$lessinit$greater$default$1', 'cdf', 'equals', 'getClass', 'getLambda', 'getSum', 'hashCode', 'invCDF', 'notify', 'notifyAll', 'prob', 'sample', 'setLambda', 'toString', 'uniform', 'wait'] We can then just use the methods in the [PoissonDistribution](https://github.com/mdh266/BayesBootstrapMLE/blob/main/src/main/scala/PoissonDistribution.scala) object just like they would be used directly in Scala. For instance we can get the probability of $y=1$ when $\lambda = 1$: ```python p1.prob(1) ``` 0.36787944117144233 Now let's generate a random samle from the Poisson object: ```python sample = p1.sample(1000) ``` ```python sample[:3] ``` [2, 1, 1] It looks like Py4J returns a Python list while the [PoissonDistribution class](https://github.com/mdh266/BayesBootstrapMLE/blob/main/src/main/scala/PoissonDistribution.scala) returns a `java.util.List[Int]`: ```python type(sample) ``` py4j.java_collections.JavaList We can then convert it to a Python list ```python type(list(sample)) ``` list As mentioned previously, Py4J can only serialize specific Java objects back to Python, but I think that's still awesome! This is also why I needed to convert to from a Scala `List[Int]` to a `java.util.List[Int]`; without it the returned object would just be a generic `Java Object` and I wouldnt be able to access its contents. Now let's visualize the Poission distribution for different values of $\lambda$ ```python import pandas as pd import seaborn as sns df = pd.melt( pd.DataFrame({ '1':list(app.Poisson(1.0).sample(100)), '2':list(app.Poisson(2.0).sample(100)), '3':list(app.Poisson(3.0).sample(100)), '4':list(app.Poisson(4.0).sample(100)), '5':list(app.Poisson(5.0).sample(100)), '6':list(app.Poisson(6.0).sample(100)) }), var_name=["lambda"] ) sns.displot(df, x="value", hue="lambda", kind='kde', height=5) ``` Note that the negative values are not real, but an artifact caused by interpolation with [Kernel Density Esimation](https://en.wikipedia.org/wiki/Kernel_density_estimation). The same is true with the wiggles in the distribution. We can verify this the former, ```python df.query("value < 0") ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>lambda</th> <th>value</th> </tr> </thead> <tbody> </tbody> </table> </div> Now let's get into the Maximum Likelihood Estimator for $\lambda$ using the distribution `p1`. ## 3. The Maximum Likelihood Estimator <a class="anchor" id="third-bullet"></a> ---------- First what is the difference between a statistic and an estimator? A **statistic** is any function of a sample. An **estimator** is any function of a sample that is used to estimate a population parameter. The **maximum likelihood estimator** is the value of a population distribution $\lambda$ that maximizes the probability of observing the sample. We can find the MLE from a independent, identically distributed sample $y_1, y_2, \, \ldots \,, y_{n}$ from $f(y \, \vert \, \lambda)$ by defining the **likelihood function**, $$ L(\lambda \, \vert \, y_1, \, y_2, \, \ldots, \, y_n) \; = \; \prod_{i=1}^{n}f(y_{i} \, \vert \, \lambda) $$ Then we can find the MLE $\widehat{\lambda}$ such that, $$ \hat{\lambda}_{n} \; = \; \max_{\lambda} \, L(\lambda \, \vert \, y_1, y_2, \ldots, \, y_n) $$ From calculus we know that we can find the maximum (or minimum) of any function by solving, $$ \frac{\partial L(\lambda \, \vert y_1, y_2, \ldots, y_n)}{\partial \lambda} \; = \; 0 $$ for $\lambda$. The MLE has many important properties, the most important in my mind are some are, 1. It is a consistent estimator. 2. It is invariant, so that if $\widehat{\lambda}$ is the MLE for $\lambda$, then for any function $\tau(\lambda)$, the MLE for $\tau(\lambda)$ is $\tau(\widehat{\lambda})$. 3. The MLE is an asymptotically normal estimator. That is $\widehat{\lambda} \; \sim \; N(\lambda, \, \mathcal{I}^{-1})$. To explain the first property we, must note that since an estimator is a function of the sample space, it is also a [random variable](https://en.wikipedia.org/wiki/Random_variable). Let $X_1, \, \ldots, \, X_n$ be a sequence of random variables then $X_{i} \; \xrightarrow{\mathcal{P}} \; X$ if, $$\forall \epsilon > 0, \; \lim\limits_{n \rightarrow \infty} P(\vert X_i - X \vert > \epsilon ) \; = \; 0$$ then we say the random variable [converges in probability](https://en.wikipedia.org/wiki/Convergence_of_random_variables). For an estimator this property of convergence is called **consistency**. Consistency is a necessary condition of any estimator in statistics and basically signifies that estimator eventually settles down to constant or some distribution of values. The second property of the MLE allows us to transform our likelihood function into one that is often easier to calculate the MLE with, i.e. the log-likelihood function. That is the MLE, $\hat{\lambda}$ will satisfy, $$ \frac{\partial \log(L(\lambda \, \vert y_1, \, \ldots, y_n ))}{\partial \lambda} \; = \; 0 $$ The third property of the MLE, of asymptotic normality, is helpful in modeling since your standardized residuals are normal. Hence the sum of squares of the residuals are $\chi^2$ distributed. This allows us to define confidence intervals around of estimates. The term $\mathcal{I}$ is the [Fisher Information](https://en.wikipedia.org/wiki/Fisher_information) and will be discussed in the next section. For the Poisson distribution the likelihood function is, $$ \begin{align} L(\lambda \, \vert y_1, \, y_2, \, \ldots, \, y_n) \; &= \; \prod_{i=1}^{n} \frac{e^{-\lambda} \, \lambda^{y_i}} {y_i!} \newline &= \; e^{-n \, \lambda} \lambda^{n \, \bar{y}_{n}} \frac{1} {\prod_{i=1}^{n} y_i!} \end{align} $$ We take the log of both sides and then setting the derivative equal to zero we find $$ - n \, + \, \frac{n \, \bar{y}_{n}}{\widehat{\lambda}} \, = \, 0 $$ Then solving for $\hat{\lambda}$ we find the MLE is, $$ \widehat{\lambda} \, = \, \bar{y}_{n} $$ ```python from typing import List def mle(sample: List[int]) -> float: converted = list(sample) return sum(converted) / len(converted) lam = mle(p2.sample(1000)) print(f"lambda = {lam}") ``` lambda = 2.97 Our estimate for $\lambda$ is pretty close to the true value of 3 which is correct for `p2`! Now, since the maximum likelihood estimator is the mean we know it satifies the [Central Limit Theorem](https://en.wikipedia.org/wiki/Central_limit_theorem), $$ \hat{\lambda} _{n}\, = \, \bar{y}_{n} \; \xrightarrow{\mathcal{D}} \; N(\lambda,\lambda/n) $$ Hence we can repeatedly sample `p2` and compute the distribution of the MLE for various values of sample size $n$ to show how the MLE converges in distribution. ```python import numpy as np ``` ```python # sample the MLE 100 times for each n = 10, 50, 100, 500, 1000 samples = [ [ mle(p2.sample(n)) for k in range(100)] for n in [10, 20, 50, 100, 200, 500, 1000]] ``` ```python sample_df = pd.melt( pd.DataFrame(np.array(samples).T, columns=['10', '20', '50','100', '200', '500', '1000']), var_name=["n"] ) # # plot the MLE for various value of sample size sns.displot(sample_df, x="value", hue="n", kind="kde", height=5,) ``` As $n \rightarrow \infty$ we see the MLE $\bar{y}_{n}$ has a distribution that is more sharply peaked around $3$ and hence shows that the esimator is converging to the true value! We have seen that the MLE $\hat{\lambda}_{n}$ converges to the true value of $\lambda$, but for any finite value of $n$ the esimator can be incorrect. How do we measure our confidence in our estimae $\hat{\lambda}_{n}$? The answer is using [confidence intervals](https://en.wikipedia.org/wiki/Confidence_interval). ## 4. Confidence Intervals From Fisher Information <a class="anchor" id="fourth-bullet"></a> ------------------- Given a distribution $y_{i} \, \sim \, f(y \, \vert \, \alpha)$ for $i \, = \, 1, \ldots , n$ and the likelihood function, $$ L(\alpha \, \vert \, y_1, y_2, \ldots, y_n) \; = \; \prod_{i=1}^{n} \, f(y_i \, \vert \, \alpha) $$ We define the [score statistic](https://en.wikipedia.org/wiki/Score_(statistics)) $S(\alpha)$ to be, $$ S(\alpha) \; = \; \frac{d\log \left(L(\alpha \, \vert \, y_1, y_2, \ldots, y_n) \right)}{d\alpha} $$ For a distribution from the [exponential family](https://en.wikipedia.org/wiki/Exponential_family) the score function satisfies, $$ E_{y}[S] \; = \; 0 $$ Another important property of the score statistic is that it satisfies, $$ S(\widehat{\alpha}) \; = \; 0$$ for the MLE $\widehat{\alpha}$. This property can be used to compute the MLE using the so-called [scoring algorithm](https://en.wikipedia.org/wiki/Scoring_algorithm) which is equivalent to [Newton–Raphson method](https://en.wikipedia.org/wiki/Newton%27s_method). The later method is more frequently used in calculations as it is a general optimization method and has many efficient implmentations. The [Fisher information](https://en.wikipedia.org/wiki/Fisher_information) is defined as, \begin{align} \mathcal{I} \; &= \; \text{Cov}[S] \\ & = \; \text{Var}[ S S^{T}] \\ & = \; E_y[ - S'] \\ &= \; - E_{y} \left[ \frac{d^{2}\log \left(L(\alpha \, | \, y_1, y_2, \ldots, y_n) \right)}{d\alpha^{2}} \right] \end{align} One can show that the standard error for the maximum likelihood estimate $(\widehat{\alpha})$ will then be, $$ \text{S.E.}(\widehat{\alpha}) \; = \; \mathcal{I}^{-1/2}$$ The curvature of the log-likelihood at the MLE is dictated by the Fisher information. If $L$ flat at the MLE then the $\mathcal{I}$ is small and the MLE is not stable or well-defined. Higher Fisher information at the MLE means the distribution is highly peaked and implies the MLE is well defined and stable. As previously mentioned the MLE asymptotically normal which tells us mathematically that, \begin{equation} \widehat{\alpha} \; \sim \; N(\alpha, \, \mathcal{I}^{-1}) \end{equation} These facts can be used to calculate confidence intervals for the MLE, $$\text{CI}_{\alpha} \, = \, [ \widehat{\alpha} - Z_{\alpha/2} \, \mathcal{I}^{-1}/\sqrt{n}, \, \widehat{\alpha} + Z_{\alpha/2} \, \mathcal{I}^{-1} /\sqrt{n}]$$ The Fisher information for a Poisson distribution is, $$ \mathcal{I} \, = \, 1/\lambda $$ This means for our MLE of the Poisson distribution the confidence interval will be: $$\text{CI}_{\hat{\lambda}} \, = \, [ \bar{y}_{n} - Z_{\alpha/2} \, \sqrt{\bar{y}_{n} / n}, \, \bar{y}_{n} + Z_{\alpha/2} \, \sqrt{\bar{y}_{n}/ n}]$$ We can then come up with a functin to compute the 94% confidence interval (most people choose 95%, but to be consistent with [PyMC3](https://docs.pymc.io/) we use 94%) for the sample: ```python from typing import Tuple def ci(sample: List[int]) -> Tuple[float,float]: """ Computes the 94% confidence interval for sampled data from a Poisson distribution """ z = 1.88 m = mle(sample) n = len(sample) return (m - z*np.sqrt(m/n), m + z*np.sqrt(m/n)) ``` We can then get the MLE for the sampled data from `Poisson(1.0)`, ```python mle(sample) ``` 0.995 The 94% confidence interval is then, ```python ci(sample) ``` (0.9356979932885909, 1.054302006711409) We can see that the confidence interval does contain the true $\lambda \, = \, 1$. *Many people think a 94% confidence interval, $\text{CI}_{94}$, can be used to say that there is a 94% probability that the true $\lambda$ is in the confidence interval $\text{CI}_{94}$. This interpetation is wrong, in frequentist methods, the parameter $\lambda$ is assumed to be an unknown fixed value. One cannot make probability statements about fixed values.* *The confidence interval is a function of the sample space and therefore a random variable. One can make probability statements about the confidence intervals. Indeed the correct interpretation is that if you are able to repeatedly re-sample the population distribution $f(y \, \vert \, \lambda)$ to form many confidence intervals $\text{CI}_{94}$, 94% of them would contain the true population parameter $\lambda$.* We can test this by creating a function which returns a boolean indicating whether or not the parameter `lam` for $\lambda$ is contained in the 94% confidence interval from the data `sample`: ```python def in_ci(lam: float, sample: List[int]) -> bool: interval = ci(sample) return (lam >= interval[0] and lam <= interval[1]) ``` We can then test this function, ```python in_ci(1, sample) ``` True ```python in_ci(3, sample) ``` False We can loop over 1,000 confidence intervals to see how many times they capture the true rate parameter, ```python count_in_ci = [1 if in_ci(1, p1.sample(1000)) else 0 for i in range(1000)] print("Confidence interval captures true rate {}% of times".format(100*sum(count_in_ci)/len(count_in_ci))) ``` Confidence interval captures true rate 94.3% of times This is nearly spot on to what the theory says! Let's now move on to Bayesian methods! ## 5. Bayesian Esimatators & Credible Intervals With PyMC3 <a class="anchor" id="fifth-bullet"></a> ----------- In the frequentist approach the parameter we wish to estimation $\lambda$ is fixed, but unknown. The observed data $\left\{ y_1, y_2, \ldots, y_n \right\}$ is assumed to be from a population $f(y \, \vert \, \lambda)$ and estimates about the value of the population paremeter $\lambda$ is obtained by using the maximum likelihood. As we discussed above, probability statements about the unknown rate constant $\lambda$ don't make sense as its a fixed value and not a random variable. However, probability statements can be made about a confidence interval for $\lambda$. In the maximum likelihood method, asympotic normality allows us to use confidence intervals as a way to quantify the uncertaintity in our estimator. In contrast, in Bayesian statistics, $\lambda$ is not a fixed value, but assumed to have values coming from a probability distribution called the [prior](https://en.wikipedia.org/wiki/Prior_probability) $P(\lambda)$. This is often subjective and the choice of distribution for the prior often comes from domain knowledge. The observed data $\left\{ y_1, y_2, \ldots, y_n \right\}$ and samples from the prior taken to evaluate the likelihood of the [posterior distribution](https://en.wikipedia.org/wiki/Posterior_distribution) model, $P(\lambda \, \vert \, y_1, y_2, \ldots, y_n )$. **Now we can formulate estimators for $\lambda$ and quanitify the uncertaintity in those esimates by directly using the posterior distribution.** Let $P(y_1, y_2, \ldots, y_n \, \vert \, \lambda )$ be the sampling distribution then [Baye's theorem](https://en.wikipedia.org/wiki/Bayes%27_theorem) states, $$ P(\lambda \, \vert \, y_1, y_2, \ldots, y_n) \; = \; \frac{ P ( y_1, y_2, \ldots, y_n \, \vert \, \lambda) \, P(\lambda)}{m(y_1, y_2, \ldots, y_n)}, \quad \text{and} \qquad m(y_1, y_2, \ldots, y_n) \; = \; \int P ( y_1, y_2, \ldots, y_n\, \vert \, \lambda) \, P(\lambda) \, d\lambda $$ Where $m(y_1, y_2, \ldots, y_n)$ is called the marginal distribution and used for normalization. Another way to rewrite Baye's formula is in terms of the Likelihood functions, $L(\lambda \, \vert \, y_1, y_2, \ldots, y_n)$ $$ P(\lambda \, \vert \, y_1, y_2, \ldots, y_n) \; = \; \frac{ L(\lambda \, \vert \, y_1, y_2, \ldots, y_n) \, P(\lambda)}{m(y_1, y_2, \ldots, y_n)} $$ The [Bayesian estimator](https://en.wikipedia.org/wiki/Bayes_estimator) (often called the posterior mean) is taken the be the expected value over the the random variable $y$, $$ \widehat{\lambda} \; = \; E_{\lambda \, \vert \, y}(\lambda \, \vert \, y_1, y_2, \ldots, y_n)$$ Until the advant of computers, statisticians were stuck with using [conjugate priors](https://en.wikipedia.org/wiki/Conjugate_prior) with Bayesian methods since there are analytic solutions for the posterior distribution. The conjugate prior for a Poisson distribution is a [Gamma distributed](https://en.wikipedia.org/wiki/Gamma_distribution) which for $\alpha, \beta > 0$ takes the form, $$ P(\lambda \, \vert \, \alpha, \beta ) \; = \; \frac{\beta^{\alpha }}{(\alpha-1)! } \, x^{\alpha-1} \, e^{-\beta y} $$ A depiction of the Gamma distribution for various values of $\alpha$ and $\beta$ can be seen from [PyMC3's website](https://docs.pymc.io/api/distributions/continuous.html#pymc3.distributions.continuous.Gamma). Then the posterior is [again a Gamma distribution](https://people.stat.sc.edu/Hitchcock/slides535day5spr2014.pdf), but with $\alpha^{\prime} \, = \, \alpha \, + \, n$ and $\beta^{\prime} \, = \, \bar{y}_{n}, \, \beta \, + \, n$. This leads to a posterior mean, $$\hat{\lambda} \; = \; \frac{\alpha}{\beta \, + \, n} \, + \, \frac{ \bar{y}_{n}}{1 \, + \, \beta \, / \, n\, }$$ We can see that with little data (small $n$) our estimate we are closer to the prior mean ($\frac{\alpha}{\beta}$) while with lots of data (large $n$) we move towards the average $\bar{y}_{n}$. Let's see this convergence for ourselves! We can define the posterior mean function: ```python def posterior_mean(alpha: float, beta: float , sample: List[int]) -> float: n = len(sample) m = sum(sample) / n return alpha / (beta + n) + m / (1 + beta / n) ``` Then define a 1,000 random samples of various sizes: ```python from random import randint nums_samples = [randint(2,1000) for i in range(1000)] nums_samples.sort() ``` We can the calculate and plot the posterior mean for the [Poisson-Gamma model](https://people.stat.sc.edu/Hitchcock/slides535day5spr2014.pdf) using the data and $\alpha \, = \, 1$ and $\beta \, = \, 3$, ```python alpha = 3.0 beta = 1.0 ``` ```python samples = [ posterior_mean(alpha=alpha, beta=beta, sample = p1.sample(n)) for n in nums_samples] (pd.DataFrame({ "posterior_mean":samples, "sample_size":nums_samples }).plot(x="sample_size", y="posterior_mean", title="Covergence of Posterior Mean", ylim=(0,1.75))) ``` Now let's talk about the confidence of this estimate. The anology of confidence in Bayesian esimation is called the [credible Interval](https://en.wikipedia.org/wiki/Credible_interval) which requires the full posterior. I wrote a function to plot the posterior for the sample from `p1` below, ```python from gamma import posterior_distribution posterior_distribution(alpha, beta, sample) ``` As we saw the Bayesian estimator requires the full posterior distribution. Without a conjugate prior Bayesian methods requires numerical approximation to the posterior which computationally expenisve. Despite the added complexity, Bayesian methods allow us to handle situations where we might not have much data and can often lead us to estimates with smaller variance. One approach to approximating the posterior distribution is to randomly sample the the prior distribution and then evaluate the likelihood of that prior value and the data using Bayes rule, $$ P(\lambda \, \vert \, y_1, y_2, \ldots, y_n) \; = \; \frac{ L(\lambda \, \vert \, y_1, y_2, \ldots, y_n) \, P(\lambda)}{m(y_1, y_2, \ldots, y_n)} $$ Repeatedly sampling the prior and evaluating the likelihood multiple times gives us a good approximation to the posterior distribution. Once we have the posterior distribution we can then evaluate the expected value of $\lambda$. A common method for generating the random samples of the prior above is through [Markov Chain Monte Carlo Methods](https://en.wikipedia.org/wiki/Markov_chain_Monte_Carlo). Bayesian methods with MCMC can be used easily with [PyMC](https://docs.pymc.io/)! We begin by importing the library ```python import pymc3 as pm ``` Then define the model as the same Poisson-Gamma above, and sample it 5,000 times to get the expected mean: ```python with pm.Model() as model_1: λ = pm.Gamma('λ', alpha=3, beta=1) y = pm.Poisson('y', mu=λ, observed=list(sample)) trace = pm.sample(5000, tune=2000, return_inferencedata=True) ``` Auto-assigning NUTS sampler... Initializing NUTS using jitter+adapt_diag... Sequential sampling (2 chains in 1 job) NUTS: [λ] <div> <style> /* Turns off some styling */ progress { /* gets rid of default border in Firefox and Opera. */ border: none; /* Needs to be in here for Safari polyfill so background images work as expected. */ background-size: auto; } .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar { background: #F44336; } </style> <progress value='7000' class='' max='7000' style='width:300px; height:20px; vertical-align: middle;'></progress> 100.00% [7000/7000 00:14<00:00 Sampling chain 0, 0 divergences] </div> <div> <style> /* Turns off some styling */ progress { /* gets rid of default border in Firefox and Opera. */ border: none; /* Needs to be in here for Safari polyfill so background images work as expected. */ background-size: auto; } .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar { background: #F44336; } </style> <progress value='7000' class='' max='7000' style='width:300px; height:20px; vertical-align: middle;'></progress> 100.00% [7000/7000 00:14<00:00 Sampling chain 1, 0 divergences] </div> Sampling 2 chains for 2_000 tune and 5_000 draw iterations (4_000 + 10_000 draws total) took 29 seconds. We can then view the posterior distribution using the [ArviZ](https://arviz-devs.github.io/arviz/) library, ```python import arviz as az az.plot_posterior(trace) ``` The results are the same as before with the analytical posterior. *We should note that **Bayesian estimators are ALWAYS biased due to their choice of prior**, however, they can reduce the variance in our estimators.* This is will become evident in the next example where we show another area where Bayesian method shine is when you have a limited amount of data, but a lot of domain knowledge. Say we only have 20 sample points, we can calculate the MLE, ```python mle(sample[-20:]) ``` 1.0526315789473684 Not too bad! However, the confidence interval is quite large, ```python ci(sample[-20:]) ``` (0.6101254949789889, 1.4951376629157478) Let's define our model to be a Poisson-Exponential model where the prior distribution is an [exponential distribution](https://en.wikipedia.org/wiki/Exponential_distribution) pictured below from [PyMC3's site](https://docs.pymc.io/api/distributions/continuous.html#pymc3.distributions.continuous.Exponential): It's unfortunate that these two distributions both use $\lambda$ for their paramaters, but I will do my best to make it clear which $\lambda$ I refer to. Using a larger $\lambda$ in the exponential prior gives us a smaller sample space while a smaller $\lambda$ in the exponential prior gives us a larger sampling space. Let's choose a define the prior $\exp(-1)$ and sample it using MCMC methods 500 times. We can then plot the posterior and the sampling space using the [plot_trace](https://arviz-devs.github.io/arviz/api/generated/arviz.plot_trace.html) method, ```python with pm.Model() as model_2: λ = pm.Exponential('λ', 1) y = pm.Poisson('y', mu=λ, observed=list(sample[-20:-1])) trace2 = pm.sample(500, tune=2000, cores=2, return_inferencedata=True) az.plot_trace(trace2, var_names=['λ']) ``` We can then calculate the expected value of the posterior and the credible region, ```python az.summary(trace2, kind="stats") ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>mean</th> <th>sd</th> <th>hdi_3%</th> <th>hdi_97%</th> </tr> </thead> <tbody> <tr> <th>λ</th> <td>1.037</td> <td>0.231</td> <td>0.662</td> <td>1.488</td> </tr> </tbody> </table> </div> The values are nearly the same as MLE. One thing to note is that we can see that the posterior isn't very well defined using only 500 MCMC steps. We can see that there seems to be some [autocorrelation in the sample space](https://www.coursera.org/lecture/introduction-to-pymc3/autocorrelation-and-effective-sample-size-YSW3x). ```python az.plot_autocorr(trace2) ``` Let's bump the number of samples up to 10,000 to see how the posterior distribution looks, ```python with model_2: trace3 = pm.sample(10000, tune=2000, cores=2, return_inferencedata=True) az.plot_trace(trace3, var_names=['λ']) ``` We can see the posterior is pretty well defined, with a little skew right. Let's get the expected mean and credible interval, ```python az.summary(trace3, kind="stats") ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>mean</th> <th>sd</th> <th>hdi_3%</th> <th>hdi_97%</th> </tr> </thead> <tbody> <tr> <th>λ</th> <td>1.048</td> <td>0.229</td> <td>0.62</td> <td>1.468</td> </tr> </tbody> </table> </div> The Bayesian in method in this case isnt much better than the MLE, but credible interval is more narrow than the confidence interval. We can also see the mode of the posterior distribution is nearly directly over 1.0, which is the correct value for our parameter. Using the posterior mode as an estimator is called the [Maximum A-Posteriori (MAP)](https://en.wikipedia.org/wiki/Maximum_a_posteriori_estimation) and we can see the calculated value below, ```python pm.find_MAP(model=model_2) ``` <div> <style> /* Turns off some styling */ progress { /* gets rid of default border in Firefox and Opera. */ border: none; /* Needs to be in here for Safari polyfill so background images work as expected. */ background-size: auto; } .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar { background: #F44336; } </style> <progress value='6' class='' max='6' style='width:300px; height:20px; vertical-align: middle;'></progress> 100.00% [6/6 00:00<00:00 logp = -26.451, ||grad|| = 6.1371] </div> {'λ_log__': array(-1.70517194e-07), 'λ': array(0.99999983)} That's really good for only 20 data points!! ## 6. Connecting The Two Methods <a class="anchor" id="sixth-bullet"></a> ------ One way we can connect Bayesian methods with the MLE is by choosing a constant prior $C$ or uniform ($U(\theta)$ so long $\theta \, \geq \, \lambda$). Then MAP is the same as the MLE: $$ \max_{\lambda} \, P(\lambda \, \vert \, y_1, y_2, \ldots, y_n) \; = \; \frac{1}{m(y_1, y_2, \ldots, y_n)} \, \max_{\lambda} \, L(\lambda \, \vert \, y_1, y_2, \ldots, y_n) $$ We can show with PyMC3 by choosing the prior as a $U(10)$: ```python with pm.Model() as model_3: λ = pm.Uniform('λ', lower=0, upper=10) y = pm.Poisson('y', mu=λ, observed=list(sample[-20:-1])) pm.find_MAP(model=model_3) ``` <div> <style> /* Turns off some styling */ progress { /* gets rid of default border in Firefox and Opera. */ border: none; /* Needs to be in here for Safari polyfill so background images work as expected. */ background-size: auto; } .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar { background: #F44336; } </style> <progress value='8' class='' max='8' style='width:300px; height:20px; vertical-align: middle;'></progress> 100.00% [8/8 00:00<00:00 logp = -70.371, ||grad|| = 37.5] </div> {'λ_interval__': array(-2.14006616), 'λ': array(1.05263158)} This the the same value as the MLE! The [Bernstein-von Miss Theorem]( https://en.wikipedia.org/wiki/Bernstein%E2%80%93von_Mises_theorem) shows rigorously that in the limit of large data Bayesian estimators and Maximum Likelihood estimators converge to the same thing. ## 7. Conclusions <a class="anchor" id="seventh-bullet"></a> In this post I discussed frequentist and Bayesian estimation techniques applied to data from a [Poisson distribution](https://en.wikipedia.org/wiki/Poisson_distribution) and covered how to quantity the uncertaintity in each method. I showed how to sample a probability distribution written in Scala from Python using [Py4J](https://www.py4j.org/). For frequentist methods I covered [maximum likelihood estimation](https://en.wikipedia.org/wiki/Maximum_likelihood_estimation), its convergence and quantifying uncertaintity with confidence intervals using the [Fisher information](https://en.wikipedia.org/wiki/Fisher_information). I also covered using Bayesian estimators using [PyMC3](https://docs.pymc.io/) and quantifying their uncertaintity with [credible intervals](https://en.wikipedia.org/wiki/Credible_interval) using [ArviZ](https://arviz-devs.github.io/arviz/). Finally we showed the connection between the maximum likelihood esimators and Bayesian estimators by choosing a [flat prior](https://stats.stackexchange.com/questions/124753/what-are-examples-of-flat-priors#:~:text=The%20term%20%22flat%22%20in%20reference,c%20over%20the%20real%20line.) I leared a lot in creating this post and hope you enjoyed it! ```python ```
module Client.Skeleton.Skeleton import Client.Skeleton.BaseWithTest.InigoTOML import Client.Skeleton.BaseWithTest.Package import Client.Skeleton.BaseWithTest.Test.Suite import Client.Skeleton.BaseWithTest.Test.Test public export data Skeleton : Type where BaseWithTest : Skeleton export toString : Skeleton -> String toString BaseWithTest = "BaseWithTest" export describe : Skeleton -> String describe BaseWithTest = "base skeleton with tests" export fromString : String -> Maybe Skeleton fromString "BaseWithTest" = Just BaseWithTest fromString _ = Nothing export getFiles : Skeleton -> (String, String) -> List (List String, String) getFiles BaseWithTest vars = [ ( Client.Skeleton.BaseWithTest.InigoTOML.name vars , Client.Skeleton.BaseWithTest.InigoTOML.build vars ) , ( Client.Skeleton.BaseWithTest.Package.name vars , Client.Skeleton.BaseWithTest.Package.build vars ) , ( Client.Skeleton.BaseWithTest.Test.Suite.name vars , Client.Skeleton.BaseWithTest.Test.Suite.build vars ) , ( Client.Skeleton.BaseWithTest.Test.Test.name vars , Client.Skeleton.BaseWithTest.Test.Test.build vars ) ]
State Before: α : Sort u β : Sort v γ : Sort w α' : Type u_1 β' : Type u_2 e : α' ≃ β' p p' : Perm α' ⊢ (↑(permCongr e) p).trans (↑(permCongr e) p') = ↑(permCongr e) (p.trans p') State After: case H α : Sort u β : Sort v γ : Sort w α' : Type u_1 β' : Type u_2 e : α' ≃ β' p p' : Perm α' x✝ : β' ⊢ ↑((↑(permCongr e) p).trans (↑(permCongr e) p')) x✝ = ↑(↑(permCongr e) (p.trans p')) x✝ Tactic: ext State Before: case H α : Sort u β : Sort v γ : Sort w α' : Type u_1 β' : Type u_2 e : α' ≃ β' p p' : Perm α' x✝ : β' ⊢ ↑((↑(permCongr e) p).trans (↑(permCongr e) p')) x✝ = ↑(↑(permCongr e) (p.trans p')) x✝ State After: no goals Tactic: simp only [trans_apply, comp_apply, permCongr_apply, symm_apply_apply]
[STATEMENT] lemma nnvs_finite: "n_nearest_verts w u n U \<Longrightarrow> finite U" [PROOF STATE] proof (prove) goal (1 subgoal): 1. n_nearest_verts w u n U \<Longrightarrow> finite U [PROOF STEP] by (induction rule: n_nearest_verts.induct) auto
% This LaTeX was auto-generated from an M-file by MATLAB. % To make changes, update the M-file and republish this document. \subsection*{gTrigT.m} \begin{par} \textbf{Summary:} Test the gTrig function, which computes (at least) the mean and the variance of the transformed variable for a Gaussian distributed input $x\sim\mathcal N(m,v)$. Check the outputs using Monte Carlo, and the derivatives using finite differences. \end{par} \vspace{1em} \begin{verbatim}function gTrigT(m, v, i, e)\end{verbatim} \begin{par} \textbf{Input arguments:} \end{par} \vspace{1em} \begin{verbatim}m mean vector of Gaussian [ d ] v covariance matrix [ d x d ] i vector of indices of elements to augment [ I x 1 ] e (optional) scale vector; default: 1 [ I x 1 ]\end{verbatim} \begin{par} Copyright (C) 2008-2013 by Marc Deisenroth, Andrew McHutchon, Joe Hall, and Carl Edward Rasmussen. \end{par} \vspace{1em} \begin{par} Last modified: 2013-03-25 \end{par} \vspace{1em} \begin{lstlisting} function gTrigT(m, v, i, e) \end{lstlisting} \subsection*{Code} \begin{lstlisting} % create a default test if no input arguments are given if ~nargin D = 4; m = randn(D,1); v = randn(D); v = v*v'+eye(D); i = [2; 4]; I = 2*length(i); e = exp(randn(size(i))); else D = length(m); end n = 1e6; % Monte Carlo sample size delta = 1e-4; % for finite difference approx x = bsxfun(@plus, m, chol(v)'*randn(D,n)); y = bsxfun(@times, [e; e], [sin(x(i,:)); cos(x(i,:))]); y = y(reshape(1:I,I/2,2)',:); % reorder rows [M, V, C] = gTrig(m, v, i, e); Q = cov([x' y']); Qv = Q(D+1:end,D+1:end); Qc = v\Q(1:D,D+1:end); disp(['mean: gTrig Monte Carlo']) disp([M mean(y,2)]); disp([' ']); disp(['var: gTrig Monte Carlo']) disp([V(:) Qv(:)]); disp([' ']); disp(['cov: gTrig Monte Carlo']) disp([C(:) Qc(:)]); disp(' '); disp('dMdm') for j = 1:I checkgrad(@gTrigT0, m, delta, v, i, e, j); disp(['this was element # ' num2str(j) '/' num2str(I)]); end disp(' '); disp('dVdm') for j = 1:I*I checkgrad(@gTrigT1, m, delta, v, i, e, j); disp(['this was element # ' num2str(j) '/' num2str(I*I)]); end disp(' '); disp('dCdm') for j = 1:I*D checkgrad(@gTrigT2, m, delta, v, i, e, j); disp(['this was element # ' num2str(j) '/' num2str(I*D)]); end disp(' '); disp('dMdv') for j = 1:I checkgrad(@gTrigT3, v(tril(ones(length(v)))==1), delta, m, i, e, j); disp(['this was element # ' num2str(j) '/' num2str(I)]); end disp(' '); disp('dVdv') for j = 1:I*I checkgrad(@gTrigT4, v(tril(ones(length(v)))==1), delta, m, i, e, j); disp(['this was element # ' num2str(j) '/' num2str(I*I)]); end disp(' '); disp('dCdv') for j = 1:I*D checkgrad(@gTrigT5, v(tril(ones(length(v)))==1), delta, m, i, e, j); disp(['this was element # ' num2str(j) '/' num2str(I*D)]); end \end{lstlisting} \begin{lstlisting} function [f, df] = gTrigT0(m, v, i, e, j) [M, V, C, dMdm] = gTrig(m, v, i, e); f = M(j); df = dMdm(j,:); function [f, df] = gTrigT1(m, v, i, e, j) [M, V, C, dMdm, dVdm] = gTrig(m, v, i, e); dVdm = reshape(dVdm,[size(V) length(m)]); dd = length(M); p = fix((j+dd-1)/dd); q = j-(p-1)*dd; f = V(p,q); df = squeeze(dVdm(p,q,:)); function [f, df] = gTrigT2(m, v, i, e, j) [M, V, C, dMdm, dVdm, dCdm] = gTrig(m, v, i, e); dCdm = reshape(dCdm,[size(C) length(m)]); dd = length(M); p = fix((j+dd-1)/dd); q = j-(p-1)*dd; f = C(p,q); df = squeeze(dCdm(p,q,:)); function [f, df] = gTrigT3(v, m, i, e, j) d = length(m); vv(tril(ones(d))==1) = v; vv = reshape(vv,d,d); vv = vv + vv' - diag(diag(vv)); [M, V, C, dMdm, dVdm, dCdm, dMdv] = gTrig(m, vv, i, e); dMdv = reshape(dMdv,[length(M) size(v)]); f = M(j); df = squeeze(dMdv(j,:,:)); df = df+df'-diag(diag(df)); df = df(tril(ones(d))==1); function [f, df] = gTrigT4(v, m, i, e, j) d = length(m); vv(tril(ones(d))==1) = v; vv = reshape(vv,d,d); vv = vv + vv' - diag(diag(vv)); [M, V, C, dMdm, dVdm, dCdm, dMdv, dVdv] = gTrig(m, vv, i, e); dVdv = reshape(dVdv,[size(V) size(v)]); dd = length(M); p = fix((j+dd-1)/dd); q = j-(p-1)*dd; f = V(p,q); df = squeeze(dVdv(p,q,:,:)); df = df+df'-diag(diag(df)); df = df(tril(ones(d))==1); function [f, df] = gTrigT5(v, m, i, e, j) d = length(m); vv(tril(ones(d))==1) = v; vv = reshape(vv,d,d); vv = vv + vv' - diag(diag(vv)); [M, V, C, dMdm, dVdm, dCdm, dMdv, dVdv, dCdv] = gTrig(m, vv, i, e); dCdv = reshape(dCdv,[size(C) size(v)]); dd = length(M); p = fix((j+dd-1)/dd); q = j-(p-1)*dd; f = C(p,q); df = squeeze(dCdv(p,q,:,:)); df = df+df'-diag(diag(df)); df = df(tril(ones(d))==1); \end{lstlisting}
[STATEMENT] lemma inv_pair_P_D1: "P m1" if "inv_pair (Pair_Storage k1 k2 m1 m2)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. P m1 [PROOF STEP] using that [PROOF STATE] proof (prove) using this: inv_pair (Pair_Storage k1 k2 m1 m2) goal (1 subgoal): 1. P m1 [PROOF STEP] unfolding inv_pair_def [PROOF STATE] proof (prove) using this: case Pair_Storage k1 k2 m1 m2 of Pair_Storage k1 k2 m1 m2 \<Rightarrow> key ` dom (local.map_of m1) \<subseteq> {k1} \<and> key ` dom (local.map_of m2) \<subseteq> {k2} \<and> k1 \<noteq> k2 \<and> P m1 \<and> P m2 goal (1 subgoal): 1. P m1 [PROOF STEP] by auto
From Coq Require Import String List ZArith. From compcert Require Import Coqlib Integers Floats AST Ctypes Cop Clight Clightdefs. Local Open Scope Z_scope. Module Info. Definition version := "3.7"%string. Definition build_number := ""%string. Definition build_tag := ""%string. Definition arch := "x86"%string. Definition model := "32sse2"%string. Definition abi := "macosx"%string. Definition bitsize := 32. Definition big_endian := false. Definition source_file := "abs.c"%string. Definition normalized := true. End Info. Definition ___builtin_annot : ident := 9%positive. Definition ___builtin_annot_intval : ident := 10%positive. Definition ___builtin_bswap : ident := 2%positive. Definition ___builtin_bswap16 : ident := 4%positive. Definition ___builtin_bswap32 : ident := 3%positive. Definition ___builtin_bswap64 : ident := 1%positive. Definition ___builtin_clz : ident := 35%positive. Definition ___builtin_clzl : ident := 36%positive. Definition ___builtin_clzll : ident := 37%positive. Definition ___builtin_ctz : ident := 38%positive. Definition ___builtin_ctzl : ident := 39%positive. Definition ___builtin_ctzll : ident := 40%positive. Definition ___builtin_debug : ident := 51%positive. Definition ___builtin_fabs : ident := 5%positive. Definition ___builtin_fmadd : ident := 43%positive. Definition ___builtin_fmax : ident := 41%positive. Definition ___builtin_fmin : ident := 42%positive. Definition ___builtin_fmsub : ident := 44%positive. Definition ___builtin_fnmadd : ident := 45%positive. Definition ___builtin_fnmsub : ident := 46%positive. Definition ___builtin_fsqrt : ident := 6%positive. Definition ___builtin_membar : ident := 11%positive. Definition ___builtin_memcpy_aligned : ident := 7%positive. Definition ___builtin_read16_reversed : ident := 47%positive. Definition ___builtin_read32_reversed : ident := 48%positive. Definition ___builtin_sel : ident := 8%positive. Definition ___builtin_va_arg : ident := 13%positive. Definition ___builtin_va_copy : ident := 14%positive. Definition ___builtin_va_end : ident := 15%positive. Definition ___builtin_va_start : ident := 12%positive. Definition ___builtin_write16_reversed : ident := 49%positive. Definition ___builtin_write32_reversed : ident := 50%positive. Definition ___compcert_i64_dtos : ident := 20%positive. Definition ___compcert_i64_dtou : ident := 21%positive. Definition ___compcert_i64_sar : ident := 32%positive. Definition ___compcert_i64_sdiv : ident := 26%positive. Definition ___compcert_i64_shl : ident := 30%positive. Definition ___compcert_i64_shr : ident := 31%positive. Definition ___compcert_i64_smod : ident := 28%positive. Definition ___compcert_i64_smulh : ident := 33%positive. Definition ___compcert_i64_stod : ident := 22%positive. Definition ___compcert_i64_stof : ident := 24%positive. Definition ___compcert_i64_udiv : ident := 27%positive. Definition ___compcert_i64_umod : ident := 29%positive. Definition ___compcert_i64_umulh : ident := 34%positive. Definition ___compcert_i64_utod : ident := 23%positive. Definition ___compcert_i64_utof : ident := 25%positive. Definition ___compcert_va_composite : ident := 19%positive. Definition ___compcert_va_float64 : ident := 18%positive. Definition ___compcert_va_int32 : ident := 16%positive. Definition ___compcert_va_int64 : ident := 17%positive. Definition _abs : ident := 53%positive. Definition _main : ident := 54%positive. Definition _x : ident := 52%positive. Definition f_abs := {| fn_return := tint; fn_callconv := cc_default; fn_params := ((_x, tint) :: nil); fn_vars := nil; fn_temps := nil; fn_body := (Sifthenelse (Ebinop Olt (Etempvar _x tint) (Econst_int (Int.repr 0) tint) tint) (Sreturn (Some (Eunop Oneg (Etempvar _x tint) tint))) (Sreturn (Some (Etempvar _x tint)))) |}. Definition composites : list composite_definition := nil. Definition global_definitions : list (ident * globdef fundef type) := ((___builtin_bswap64, Gfun(External (EF_builtin "__builtin_bswap64" (mksignature (AST.Tlong :: nil) AST.Tlong cc_default)) (Tcons tulong Tnil) tulong cc_default)) :: (___builtin_bswap, Gfun(External (EF_builtin "__builtin_bswap" (mksignature (AST.Tint :: nil) AST.Tint cc_default)) (Tcons tuint Tnil) tuint cc_default)) :: (___builtin_bswap32, Gfun(External (EF_builtin "__builtin_bswap32" (mksignature (AST.Tint :: nil) AST.Tint cc_default)) (Tcons tuint Tnil) tuint cc_default)) :: (___builtin_bswap16, Gfun(External (EF_builtin "__builtin_bswap16" (mksignature (AST.Tint :: nil) AST.Tint16unsigned cc_default)) (Tcons tushort Tnil) tushort cc_default)) :: (___builtin_fabs, Gfun(External (EF_builtin "__builtin_fabs" (mksignature (AST.Tfloat :: nil) AST.Tfloat cc_default)) (Tcons tdouble Tnil) tdouble cc_default)) :: (___builtin_fsqrt, Gfun(External (EF_builtin "__builtin_fsqrt" (mksignature (AST.Tfloat :: nil) AST.Tfloat cc_default)) (Tcons tdouble Tnil) tdouble cc_default)) :: (___builtin_memcpy_aligned, Gfun(External (EF_builtin "__builtin_memcpy_aligned" (mksignature (AST.Tint :: AST.Tint :: AST.Tint :: AST.Tint :: nil) AST.Tvoid cc_default)) (Tcons (tptr tvoid) (Tcons (tptr tvoid) (Tcons tuint (Tcons tuint Tnil)))) tvoid cc_default)) :: (___builtin_sel, Gfun(External (EF_builtin "__builtin_sel" (mksignature (AST.Tint :: nil) AST.Tvoid {|cc_vararg:=true; cc_unproto:=false; cc_structret:=false|})) (Tcons tbool Tnil) tvoid {|cc_vararg:=true; cc_unproto:=false; cc_structret:=false|})) :: (___builtin_annot, Gfun(External (EF_builtin "__builtin_annot" (mksignature (AST.Tint :: nil) AST.Tvoid {|cc_vararg:=true; cc_unproto:=false; cc_structret:=false|})) (Tcons (tptr tschar) Tnil) tvoid {|cc_vararg:=true; cc_unproto:=false; cc_structret:=false|})) :: (___builtin_annot_intval, Gfun(External (EF_builtin "__builtin_annot_intval" (mksignature (AST.Tint :: AST.Tint :: nil) AST.Tint cc_default)) (Tcons (tptr tschar) (Tcons tint Tnil)) tint cc_default)) :: (___builtin_membar, Gfun(External (EF_builtin "__builtin_membar" (mksignature nil AST.Tvoid cc_default)) Tnil tvoid cc_default)) :: (___builtin_va_start, Gfun(External (EF_builtin "__builtin_va_start" (mksignature (AST.Tint :: nil) AST.Tvoid cc_default)) (Tcons (tptr tvoid) Tnil) tvoid cc_default)) :: (___builtin_va_arg, Gfun(External (EF_builtin "__builtin_va_arg" (mksignature (AST.Tint :: AST.Tint :: nil) AST.Tvoid cc_default)) (Tcons (tptr tvoid) (Tcons tuint Tnil)) tvoid cc_default)) :: (___builtin_va_copy, Gfun(External (EF_builtin "__builtin_va_copy" (mksignature (AST.Tint :: AST.Tint :: nil) AST.Tvoid cc_default)) (Tcons (tptr tvoid) (Tcons (tptr tvoid) Tnil)) tvoid cc_default)) :: (___builtin_va_end, Gfun(External (EF_builtin "__builtin_va_end" (mksignature (AST.Tint :: nil) AST.Tvoid cc_default)) (Tcons (tptr tvoid) Tnil) tvoid cc_default)) :: (___compcert_va_int32, Gfun(External (EF_external "__compcert_va_int32" (mksignature (AST.Tint :: nil) AST.Tint cc_default)) (Tcons (tptr tvoid) Tnil) tuint cc_default)) :: (___compcert_va_int64, Gfun(External (EF_external "__compcert_va_int64" (mksignature (AST.Tint :: nil) AST.Tlong cc_default)) (Tcons (tptr tvoid) Tnil) tulong cc_default)) :: (___compcert_va_float64, Gfun(External (EF_external "__compcert_va_float64" (mksignature (AST.Tint :: nil) AST.Tfloat cc_default)) (Tcons (tptr tvoid) Tnil) tdouble cc_default)) :: (___compcert_va_composite, Gfun(External (EF_external "__compcert_va_composite" (mksignature (AST.Tint :: AST.Tint :: nil) AST.Tint cc_default)) (Tcons (tptr tvoid) (Tcons tuint Tnil)) (tptr tvoid) cc_default)) :: (___compcert_i64_dtos, Gfun(External (EF_runtime "__compcert_i64_dtos" (mksignature (AST.Tfloat :: nil) AST.Tlong cc_default)) (Tcons tdouble Tnil) tlong cc_default)) :: (___compcert_i64_dtou, Gfun(External (EF_runtime "__compcert_i64_dtou" (mksignature (AST.Tfloat :: nil) AST.Tlong cc_default)) (Tcons tdouble Tnil) tulong cc_default)) :: (___compcert_i64_stod, Gfun(External (EF_runtime "__compcert_i64_stod" (mksignature (AST.Tlong :: nil) AST.Tfloat cc_default)) (Tcons tlong Tnil) tdouble cc_default)) :: (___compcert_i64_utod, Gfun(External (EF_runtime "__compcert_i64_utod" (mksignature (AST.Tlong :: nil) AST.Tfloat cc_default)) (Tcons tulong Tnil) tdouble cc_default)) :: (___compcert_i64_stof, Gfun(External (EF_runtime "__compcert_i64_stof" (mksignature (AST.Tlong :: nil) AST.Tsingle cc_default)) (Tcons tlong Tnil) tfloat cc_default)) :: (___compcert_i64_utof, Gfun(External (EF_runtime "__compcert_i64_utof" (mksignature (AST.Tlong :: nil) AST.Tsingle cc_default)) (Tcons tulong Tnil) tfloat cc_default)) :: (___compcert_i64_sdiv, Gfun(External (EF_runtime "__compcert_i64_sdiv" (mksignature (AST.Tlong :: AST.Tlong :: nil) AST.Tlong cc_default)) (Tcons tlong (Tcons tlong Tnil)) tlong cc_default)) :: (___compcert_i64_udiv, Gfun(External (EF_runtime "__compcert_i64_udiv" (mksignature (AST.Tlong :: AST.Tlong :: nil) AST.Tlong cc_default)) (Tcons tulong (Tcons tulong Tnil)) tulong cc_default)) :: (___compcert_i64_smod, Gfun(External (EF_runtime "__compcert_i64_smod" (mksignature (AST.Tlong :: AST.Tlong :: nil) AST.Tlong cc_default)) (Tcons tlong (Tcons tlong Tnil)) tlong cc_default)) :: (___compcert_i64_umod, Gfun(External (EF_runtime "__compcert_i64_umod" (mksignature (AST.Tlong :: AST.Tlong :: nil) AST.Tlong cc_default)) (Tcons tulong (Tcons tulong Tnil)) tulong cc_default)) :: (___compcert_i64_shl, Gfun(External (EF_runtime "__compcert_i64_shl" (mksignature (AST.Tlong :: AST.Tint :: nil) AST.Tlong cc_default)) (Tcons tlong (Tcons tint Tnil)) tlong cc_default)) :: (___compcert_i64_shr, Gfun(External (EF_runtime "__compcert_i64_shr" (mksignature (AST.Tlong :: AST.Tint :: nil) AST.Tlong cc_default)) (Tcons tulong (Tcons tint Tnil)) tulong cc_default)) :: (___compcert_i64_sar, Gfun(External (EF_runtime "__compcert_i64_sar" (mksignature (AST.Tlong :: AST.Tint :: nil) AST.Tlong cc_default)) (Tcons tlong (Tcons tint Tnil)) tlong cc_default)) :: (___compcert_i64_smulh, Gfun(External (EF_runtime "__compcert_i64_smulh" (mksignature (AST.Tlong :: AST.Tlong :: nil) AST.Tlong cc_default)) (Tcons tlong (Tcons tlong Tnil)) tlong cc_default)) :: (___compcert_i64_umulh, Gfun(External (EF_runtime "__compcert_i64_umulh" (mksignature (AST.Tlong :: AST.Tlong :: nil) AST.Tlong cc_default)) (Tcons tulong (Tcons tulong Tnil)) tulong cc_default)) :: (___builtin_clz, Gfun(External (EF_builtin "__builtin_clz" (mksignature (AST.Tint :: nil) AST.Tint cc_default)) (Tcons tuint Tnil) tint cc_default)) :: (___builtin_clzl, Gfun(External (EF_builtin "__builtin_clzl" (mksignature (AST.Tint :: nil) AST.Tint cc_default)) (Tcons tuint Tnil) tint cc_default)) :: (___builtin_clzll, Gfun(External (EF_builtin "__builtin_clzll" (mksignature (AST.Tlong :: nil) AST.Tint cc_default)) (Tcons tulong Tnil) tint cc_default)) :: (___builtin_ctz, Gfun(External (EF_builtin "__builtin_ctz" (mksignature (AST.Tint :: nil) AST.Tint cc_default)) (Tcons tuint Tnil) tint cc_default)) :: (___builtin_ctzl, Gfun(External (EF_builtin "__builtin_ctzl" (mksignature (AST.Tint :: nil) AST.Tint cc_default)) (Tcons tuint Tnil) tint cc_default)) :: (___builtin_ctzll, Gfun(External (EF_builtin "__builtin_ctzll" (mksignature (AST.Tlong :: nil) AST.Tint cc_default)) (Tcons tulong Tnil) tint cc_default)) :: (___builtin_fmax, Gfun(External (EF_builtin "__builtin_fmax" (mksignature (AST.Tfloat :: AST.Tfloat :: nil) AST.Tfloat cc_default)) (Tcons tdouble (Tcons tdouble Tnil)) tdouble cc_default)) :: (___builtin_fmin, Gfun(External (EF_builtin "__builtin_fmin" (mksignature (AST.Tfloat :: AST.Tfloat :: nil) AST.Tfloat cc_default)) (Tcons tdouble (Tcons tdouble Tnil)) tdouble cc_default)) :: (___builtin_fmadd, Gfun(External (EF_builtin "__builtin_fmadd" (mksignature (AST.Tfloat :: AST.Tfloat :: AST.Tfloat :: nil) AST.Tfloat cc_default)) (Tcons tdouble (Tcons tdouble (Tcons tdouble Tnil))) tdouble cc_default)) :: (___builtin_fmsub, Gfun(External (EF_builtin "__builtin_fmsub" (mksignature (AST.Tfloat :: AST.Tfloat :: AST.Tfloat :: nil) AST.Tfloat cc_default)) (Tcons tdouble (Tcons tdouble (Tcons tdouble Tnil))) tdouble cc_default)) :: (___builtin_fnmadd, Gfun(External (EF_builtin "__builtin_fnmadd" (mksignature (AST.Tfloat :: AST.Tfloat :: AST.Tfloat :: nil) AST.Tfloat cc_default)) (Tcons tdouble (Tcons tdouble (Tcons tdouble Tnil))) tdouble cc_default)) :: (___builtin_fnmsub, Gfun(External (EF_builtin "__builtin_fnmsub" (mksignature (AST.Tfloat :: AST.Tfloat :: AST.Tfloat :: nil) AST.Tfloat cc_default)) (Tcons tdouble (Tcons tdouble (Tcons tdouble Tnil))) tdouble cc_default)) :: (___builtin_read16_reversed, Gfun(External (EF_builtin "__builtin_read16_reversed" (mksignature (AST.Tint :: nil) AST.Tint16unsigned cc_default)) (Tcons (tptr tushort) Tnil) tushort cc_default)) :: (___builtin_read32_reversed, Gfun(External (EF_builtin "__builtin_read32_reversed" (mksignature (AST.Tint :: nil) AST.Tint cc_default)) (Tcons (tptr tuint) Tnil) tuint cc_default)) :: (___builtin_write16_reversed, Gfun(External (EF_builtin "__builtin_write16_reversed" (mksignature (AST.Tint :: AST.Tint :: nil) AST.Tvoid cc_default)) (Tcons (tptr tushort) (Tcons tushort Tnil)) tvoid cc_default)) :: (___builtin_write32_reversed, Gfun(External (EF_builtin "__builtin_write32_reversed" (mksignature (AST.Tint :: AST.Tint :: nil) AST.Tvoid cc_default)) (Tcons (tptr tuint) (Tcons tuint Tnil)) tvoid cc_default)) :: (___builtin_debug, Gfun(External (EF_external "__builtin_debug" (mksignature (AST.Tint :: nil) AST.Tvoid {|cc_vararg:=true; cc_unproto:=false; cc_structret:=false|})) (Tcons tint Tnil) tvoid {|cc_vararg:=true; cc_unproto:=false; cc_structret:=false|})) :: (_abs, Gfun(Internal f_abs)) :: nil). Definition public_idents : list ident := (_abs :: ___builtin_debug :: ___builtin_write32_reversed :: ___builtin_write16_reversed :: ___builtin_read32_reversed :: ___builtin_read16_reversed :: ___builtin_fnmsub :: ___builtin_fnmadd :: ___builtin_fmsub :: ___builtin_fmadd :: ___builtin_fmin :: ___builtin_fmax :: ___builtin_ctzll :: ___builtin_ctzl :: ___builtin_ctz :: ___builtin_clzll :: ___builtin_clzl :: ___builtin_clz :: ___compcert_i64_umulh :: ___compcert_i64_smulh :: ___compcert_i64_sar :: ___compcert_i64_shr :: ___compcert_i64_shl :: ___compcert_i64_umod :: ___compcert_i64_smod :: ___compcert_i64_udiv :: ___compcert_i64_sdiv :: ___compcert_i64_utof :: ___compcert_i64_stof :: ___compcert_i64_utod :: ___compcert_i64_stod :: ___compcert_i64_dtou :: ___compcert_i64_dtos :: ___compcert_va_composite :: ___compcert_va_float64 :: ___compcert_va_int64 :: ___compcert_va_int32 :: ___builtin_va_end :: ___builtin_va_copy :: ___builtin_va_arg :: ___builtin_va_start :: ___builtin_membar :: ___builtin_annot_intval :: ___builtin_annot :: ___builtin_sel :: ___builtin_memcpy_aligned :: ___builtin_fsqrt :: ___builtin_fabs :: ___builtin_bswap16 :: ___builtin_bswap32 :: ___builtin_bswap :: ___builtin_bswap64 :: nil). Definition prog : Clight.program := mkprogram composites global_definitions public_idents _main Logic.I.
theory Lift_Merge imports "../Lifter" begin (* * merge *) definition merge_l :: "('x, 'a1, 'b) lifting \<Rightarrow> ('x, 'a2, 'b) lifting \<Rightarrow> ('x, 'a1 * 'a2, 'b) lifting" where "merge_l t1 t2 = LMake (\<lambda> s a b . (case a of (a1, a2) \<Rightarrow> LUpd t1 s a1 (LUpd t2 s a2 b ))) (\<lambda> s b . (LOut t1 s b, LOut t2 s b)) (\<lambda> s . LBase t1 s)" locale merge_l_valid_weak' = fixes l1 :: "('x, 'a1, 'b) lifting" fixes l2 :: "('x, 'a2, 'b) lifting" locale merge_l_valid_weak = merge_l_valid_weak' + l_ortho + in1 : lifting_valid_weak l1 S1 + in2 : lifting_valid_weak l2 S2 sublocale merge_l_valid_weak \<subseteq> out : lifting_valid_weak "merge_l l1 l2" "\<lambda> x . S1 x \<inter> S2 x" proof fix s fix a :: "'b * 'd" fix b :: "'c" obtain a1 a2 where A: "a = (a1, a2)" by(cases a; auto) have "LUpd l2 s a2 (LUpd l1 s a1 b) \<in> S1 s" unfolding sym[OF compat] using in1.put_S by auto then show "LUpd (merge_l l1 l2) s a b \<in> S1 s \<inter> S2 s" using A in2.put_S by(simp add: merge_l_def compat) next fix s b fix a :: "'b * 'd" obtain a1 a2 where A: "a = (a1, a2)" by(cases a; auto) have "LOut l2 s (LUpd l1 s a1 (LUpd l2 s a2 b)) = a2" unfolding compat using in2.put_get by auto then show "LOut (merge_l l1 l2) s (LUpd (merge_l l1 l2) s a b) = a" using A in1.put_get by(auto simp add: merge_l_def) next fix s fix b :: "'c" assume "b \<in> S1 s \<inter> S2 s" then have B1 : "b \<in> S1 s" and B2 : "b \<in> S2 s" by auto have Leq1 : "b <[ (LUpd l2 s (LOut l2 s b) b)" using in2.get_put_weak[OF B2] by auto have Eq : "LOut l1 s (LUpd l2 s (LOut l2 s b) b) = LOut l1 s b" using put2_get1 by auto have Upd_in : "LUpd l2 s (LOut l2 s b) b \<in> S1 s" using put2_S1[OF B1] by auto have "b <[ LUpd l1 s (LOut l1 s b) b" using in1.get_put_weak[OF B1] by auto show "b <[ LUpd (merge_l l1 l2) s (LOut (merge_l l1 l2) s b) b" using leq_trans[OF Leq1 in1.get_put_weak[OF Upd_in]] by(auto simp add: merge_l_def Eq) qed lemma (in merge_l_valid_weak) ax : shows "lifting_valid_weak (merge_l l1 l2) (\<lambda> x . S1 x \<inter> S2 x)" using out.lifting_valid_weak_axioms by auto lemma (in merge_l_valid_weak) ax_g : assumes H : "\<And> x . S' x = (\<lambda> x . S1 x \<inter> S2 x) x" shows "lifting_valid_weak (merge_l l1 l2) S'" proof- have "S' = (\<lambda> x . S1 x \<inter> S2 x)" using assms by auto then show ?thesis using out.lifting_valid_weak_axioms by auto qed locale merge_l_valid_ext = merge_l_valid_weak' + in1 : lifting_valid_ext l1 + in2 : lifting_valid_ext l2 sublocale merge_l_valid_ext \<subseteq> out : lifting_valid_ext "merge_l l1 l2" proof fix s fix a :: "'b * 'd" fix b :: "'c" obtain a1 a2 where A: "a = (a1, a2)" by(cases a; auto) have Leq1 : "b <[ LUpd l2 s a2 b" using in2.get_put by auto have Leq2 : "LUpd l2 s a2 b <[ LUpd l1 s a1 (LUpd l2 s a2 b)" using in1.get_put by auto show "b <[ LUpd (merge_l l1 l2) s a b" using A leq_trans[OF Leq1 Leq2] by(auto simp add: merge_l_def) qed lemma (in merge_l_valid_ext) ax : shows "lifting_valid_ext (merge_l l1 l2)" using out.lifting_valid_ext_axioms by auto locale merge_l_valid_base_ext = l_ortho_base + in1 : lifting_valid_base_ext l1 S1 + in2 : lifting_valid_base_ext l2 S2 sublocale merge_l_valid_base_ext \<subseteq> out : lifting_valid_base_ext "merge_l l1 l2" "\<lambda> x . S1 x \<inter> S2 x" proof fix s :: "'a" show "LBase (merge_l l1 l2) s = \<bottom>" using in1.base by(auto simp add: merge_l_def) qed lemma (in merge_l_valid_base_ext) ax : shows "lifting_valid_base_ext (merge_l l1 l2)" using out.lifting_valid_base_ext_axioms by auto locale merge_l_valid_ok_ext = l_ortho_ok + in1 : lifting_valid_ok_ext l1 S1 + in2 : lifting_valid_ok_ext l2 S2 sublocale merge_l_valid_ok_ext \<subseteq> out : lifting_valid_ok_ext "merge_l l1 l2" "\<lambda> x . S1 x \<inter> S2 x" proof fix s show "ok_S \<subseteq> S1 s \<inter> S2 s" using in1.ok_S_valid in2.ok_S_valid by auto next fix s fix a :: "'b * 'd" fix b :: "'c" assume B_ok : "b \<in> ok_S" obtain a1 a2 where A: "a = (a1, a2)" by(cases a; auto) show "LUpd (merge_l l1 l2) s a b \<in> ok_S" using A in1.ok_S_put in2.ok_S_put B_ok by(auto simp add: merge_l_def) qed lemma (in merge_l_valid_ok_ext) ax : shows "lifting_valid_ok_ext (merge_l l1 l2) (\<lambda> x . S1 x \<inter> S2 x)" using out.lifting_valid_ok_ext_axioms by auto lemma (in merge_l_valid_ok_ext) ax_g : assumes H : "\<And> x . S' x = (\<lambda> x . S1 x \<inter> S2 x) x" shows "lifting_valid_ok_ext (merge_l l1 l2) S'" proof- have "S' = (\<lambda> x . S1 x \<inter> S2 x)" using assms by auto then show ?thesis using out.lifting_valid_ok_ext_axioms by auto qed (* TODO: make sure we don't actually need this. *) (* locale merge_l_valid_pres_ext = merge_l_valid_weak + l_ortho_pres + in1 : lifting_valid_pres_ext l1 S1 + in2 : lifting_valid_pres_ext l2 S2 sublocale merge_l_valid_pres_ext \<subseteq> out: lifting_valid_pres_ext "merge_l l1 l2" "\<lambda> x . S1 x \<inter> S2 x" proof fix v supr :: "'c" fix V fix f :: "'a \<Rightarrow> 'b * 'd \<Rightarrow> 'b * 'd" fix s :: 'a assume Vin : "v \<in> V" assume Vsub : "V \<subseteq> S1 s \<inter> S2 s" then have Vsub1 : "V \<subseteq> S1 s" and Vsub2 : "V \<subseteq> S2 s" by auto assume Supr : "is_sup V supr" assume Supr_in : "supr \<in> S1 s \<inter> S2 s" then have Supr_in1 : "supr \<in> S1 s" and Supr_in2 : "supr \<in> S2 s" by auto (* obtain f1 f2 where F: "f = (f1, f2)" by(cases f; auto) *) (* show "is_sup (LMap (merge_l l1 l2) f s ` V) (LMap (merge_l l1 l2) f s supr)" apply(simp add: merge_l_def) *) obtain x1 x2 where X12 : "f s (LOut l1 s supr, LOut l2 s supr) = (x1, x2)" by(fastforce) have Supr' : "is_sup {LUpd l1 s x1 supr, LUpd l2 s x2 supr} (LUpd l1 s x1 (LUpd l2 s x2 supr))" using compat_pres_sup by auto show "is_sup (LMap (merge_l l1 l2) f s ` V) (LMap (merge_l l1 l2) f s supr)" (* using compat_pres_pair[OF Vin Vsub1 Vsub2 Supr Supr_in, of f] by(auto simp add: merge_l_def) *) proof(rule is_supI) fix x assume X: "x \<in> LMap (merge_l l1 l2) f s ` V" then obtain xo where Xo : "xo \<in> V" "LMap (merge_l l1 l2) f s xo = x" by auto show "x <[ LMap (merge_l l1 l2) f s supr" using Xo apply(auto simp add: merge_l_def split: prod.splits) qed *) locale merge_l_valid_pairwise_ext = l_ortho + in1 : lifting_valid_pairwise_ext S1 + in2 : lifting_valid_pairwise_ext S2 sublocale merge_l_valid_pairwise_ext \<subseteq> out : lifting_valid_pairwise_ext "(\<lambda> x . S1 x \<inter> S2 x)" proof show "\<And>x1 x2 x3 s s12 s23 s13 s123. x1 \<in> S1 s \<inter> S2 s \<Longrightarrow> x2 \<in> S1 s \<inter> S2 s \<Longrightarrow> x3 \<in> S1 s \<inter> S2 s \<Longrightarrow> is_sup {x1, x2} s12 \<Longrightarrow> s12 \<in> S1 s \<inter> S2 s \<Longrightarrow> is_sup {x2, x3} s23 \<Longrightarrow> s23 \<in> S1 s \<inter> S2 s \<Longrightarrow> is_sup {x1, x3} s13 \<Longrightarrow> s13 \<in> S1 s \<inter> S2 s \<Longrightarrow> is_sup {x1, x2, x3} s123 \<Longrightarrow> s123 \<in> S1 s \<inter> S2 s" using in1.pairwise_S in2.pairwise_S by blast qed lemma (in merge_l_valid_pairwise_ext) ax : shows "lifting_valid_pairwise_ext (\<lambda> x . S1 x \<inter> S2 x)" using out.lifting_valid_pairwise_ext_axioms by auto lemma (in merge_l_valid_pairwise_ext) ax_g : assumes H: "\<And> x . S' x = (\<lambda> x . S1 x \<inter> S2 x) x" shows "lifting_valid_pairwise_ext S'" proof- have "S' = (\<lambda> x . S1 x \<inter> S2 x)" using assms by auto then show ?thesis using out.lifting_valid_pairwise_ext_axioms by auto qed locale merge_l_ortho' = fixes l1 :: "('a, 'b1, 'c :: {Mergeable, Pordps}) lifting" fixes S1 :: "'a \<Rightarrow> 'c1 set" fixes l2 :: "('a, 'b2, 'c) lifting" fixes S2 :: "'a \<Rightarrow> 'c2 set" fixes l3 :: "('a, 'b3, 'c) lifting" fixes S3 :: "'a \<Rightarrow> 'c3 set" locale merge_l_ortho = merge_l_ortho' + orth1_2 : l_ortho l1 S1 l2 S2 + orth1_3 : l_ortho l1 S1 l3 S3 + orth2_3 : l_ortho l2 S2 l3 S3 (* + valid3 : lifting_valid_weak_pres l3 S3*) (* TODO may need more validity assumptions. *) sublocale merge_l_ortho \<subseteq> out : l_ortho "merge_l l1 l2" "\<lambda> x . S1 x \<inter> S2 x" l3 S3 proof fix s have Supr : "is_sup {LBase l2 s, LBase l2 s} (LBase l2 s)" using sup_singleton[of "LBase l2 s"] by auto have Supr' : "is_sup {LBase l2 s, LBase l2 s} [^ LBase l2 s, LBase l2 s ^]" using bsup_sup[OF Supr bsup_spec] by auto then have "[^ LBase l2 s, LBase l2 s ^] = LBase l2 s" using is_sup_unique[OF Supr Supr'] by auto then show "LBase (merge_l l1 l2) s = LBase l3 s" using orth1_3.eq_base[of s] orth2_3.eq_base[of s] by(auto simp add: merge_l_def) next fix b :: 'c fix a1_2 :: "'b * 'd" fix a3 :: 'e fix s obtain a1 a2 where A1_2 : "a1_2 = (a1, a2)" by(cases a1_2; auto) show "LUpd (merge_l l1 l2) s a1_2 (LUpd l3 s a3 b) = LUpd l3 s a3 (LUpd (merge_l l1 l2) s a1_2 b)" using A1_2 by(auto simp add: merge_l_def orth1_3.compat orth2_3.compat) next fix b :: 'c fix a1_2 :: "'b * 'd" fix a3 :: 'e fix s obtain a1 a2 where A1_2 : "a1_2 = (a1, a2)" by(cases a1_2; auto) show "LOut l3 s (LUpd (merge_l l1 l2) s a1_2 b) = LOut l3 s b" using A1_2 by(auto simp add: merge_l_def orth1_3.put1_get2 orth2_3.put1_get2) next fix b :: 'c fix a1_2 :: "'b * 'd" fix a3 :: 'e fix s obtain a1 a2 where A1_2 : "a1_2 = (a1, a2)" by(cases a1_2; auto) show "LOut (merge_l l1 l2) s (LUpd l3 s a3 b) = LOut (merge_l l1 l2) s b" using A1_2 by(auto simp add: merge_l_def orth1_3.put2_get1 orth2_3.put2_get1) next fix b s fix a1_2 :: "'b * 'd" assume B: "b \<in> S3 s" obtain a1 a2 where A1_2 : "a1_2 = (a1, a2)" by(cases a1_2; auto) have Up2 : "(LUpd l2 s a2 b) \<in> S3 s" using orth2_3.put1_S2[OF B] by auto have Up1 : "(LUpd l1 s a1 (LUpd l2 s a2 b)) \<in> S3 s" using orth1_3.put1_S2[OF Up2] by auto then show "LUpd (merge_l l1 l2) s a1_2 b \<in> S3 s" using A1_2 by(auto simp add: merge_l_def) next fix b s fix a3 :: 'e assume B: "b \<in> S1 s \<inter> S2 s" then have B1 : "b \<in> S1 s" and B2 : "b \<in> S2 s" by auto have Conc1 : "LUpd l3 s a3 b \<in> S1 s" using orth1_3.put2_S1[OF B1] by auto have Conc2 : "LUpd l3 s a3 b \<in> S2 s" using orth2_3.put2_S1[OF B2] by auto show "LUpd l3 s a3 b \<in> S1 s \<inter> S2 s" using Conc1 Conc2 by auto qed lemma (in merge_l_ortho) ax : shows "l_ortho (merge_l l1 l2) (\<lambda> x . S1 x \<inter> S2 x) l3 S3" using out.l_ortho_axioms by auto lemma (in merge_l_ortho) ax_g : assumes H1_2 : "\<And> x . S1_2' x = S1 x \<inter> S2 x" assumes H3 : "\<And> x . S3' x = S3 x" shows "l_ortho (merge_l l1 l2) S1_2' l3 S3'" proof- have H1_2' : "S1_2' = (\<lambda> x . S1 x \<inter> S2 x)" using H1_2 by auto have H3' : "S3' = S3" using H3 by auto show ?thesis using ax unfolding H1_2' H3' by auto qed lemma (in merge_l_ortho) ax_g' : assumes H1 : "\<And> x . S1' x = S1 x" assumes H2 : "\<And> x . S2' x = S2 x" assumes H3 : "\<And> x . S3' x = S3 x" shows "l_ortho (merge_l l1 l2) (\<lambda> x . S1' x \<inter> S2' x) l3 S3'" proof- have H1' : "S1' = S1" using H1 by auto have H2' : "S2' = S2" using H2 by auto have H3' : "S3' = S3" using H3 by auto show ?thesis using ax unfolding H1' H2' H3' by auto qed lemma (in merge_l_ortho) ax_comm : shows "l_ortho l3 S3 (merge_l l1 l2) (\<lambda> x . S1 x \<inter> S2 x)" using out.comm.l_ortho_axioms by auto lemma (in merge_l_ortho) ax_g_comm : assumes H1_2 : "\<And> x . S1_2' x = S1 x \<inter> S2 x" assumes H3 : "\<And> x . S3' x = S3 x" shows "l_ortho l3 S3' (merge_l l1 l2) S1_2' " proof- have H1_2' : "S1_2' = (\<lambda> x . S1 x \<inter> S2 x)" using H1_2 by auto have H3' : "S3' = S3" using H3 by auto show ?thesis using ax_comm unfolding H1_2' H3' by auto qed lemma (in merge_l_ortho) ax_g'_comm : assumes H1 : "\<And> x . S1' x = S1 x" assumes H2 : "\<And> x . S2' x = S2 x" assumes H3 : "\<And> x . S3' x = S3 x" shows "l_ortho l3 S3' (merge_l l1 l2) (\<lambda> x . S1' x \<inter> S2' x) " proof- have H1' : "S1' = S1" using H1 by auto have H2' : "S2' = S2" using H2 by auto have H3' : "S3' = S3" using H3 by auto show ?thesis using ax_comm unfolding H1' H2' H3' by auto qed locale merge_l_ortho_base_ext = merge_l_ortho' + orth1_2 : l_ortho_base_ext l1 l2 + orth1_3 : l_ortho_base_ext l1 l3 + orth2_3 : l_ortho_base_ext l2 l3 (* locale merge_l_ortho_base = merge_l_ortho + orth1_2 : l_ortho_base l1 S1 l2 S2 + orth1_3 : l_ortho_base l1 S1 l3 S3 + orth2_3 : l_ortho_base l2 S2 l3 S3 *) sublocale merge_l_ortho_base_ext \<subseteq> out : l_ortho_base_ext "merge_l l1 l2" l3 proof fix s show "LBase (merge_l l1 l2) s = \<bottom>" using orth1_2.compat_base1 by(auto simp add: merge_l_def) next fix s show "LBase l3 s = \<bottom>" using orth1_3.compat_base2 by(auto) qed lemma (in merge_l_ortho_base_ext) ax : shows "l_ortho_base_ext (merge_l l1 l2) l3" using out.l_ortho_base_ext_axioms by auto lemma (in merge_l_ortho_base_ext) ax_comm : shows "l_ortho_base_ext l3 (merge_l l1 l2)" using out.comm.l_ortho_base_ext_axioms by auto locale merge_l_ortho_ok_ext = merge_l_ortho' + orth1_2 : l_ortho_ok_ext l1 l2 + orth1_3 : l_ortho_ok_ext l1 l3 + orth2_3 : l_ortho_ok_ext l2 l3 sublocale merge_l_ortho_ok_ext \<subseteq> out : l_ortho_ok_ext "merge_l l1 l2" l3 . (* locale merge_l_ortho_pres = merge_l_ortho + orth1_2 : l_ortho_pres l1 S1 l2 S2 + orth1_3 : l_ortho_pres l1 S1 l3 S3 + orth2_3 : l_ortho_pres l2 S2 l3 S3 + (* see if we can avoid this presonly assumptions - i don't think we can though. *) in1 : lifting_valid_pres l1 S1 + in2 : lifting_valid_pres l2 S2 + in3 : lifting_valid_pres l3 S3 sublocale merge_l_ortho_pres \<subseteq> l_ortho_pres "merge_l l1 l2" "\<lambda> x . S1 x \<inter> S2 x" l3 S3 proof fix a1_2 :: "'b * 'd" fix a3 s fix x :: 'c obtain a1 a2 where A1_2 : "a1_2 = (a1, a2)" by(cases a1_2; auto) have Merge_eq : "LUpd (merge_l l1 l2) s a1_2 (LUpd l3 s a3 x) = LUpd l1 s a1 (LUpd l2 s a2 (LUpd l3 s a3 x))" using A1_2 by(auto simp add: merge_l_def) have Sup23 : "is_sup {LUpd l2 s a2 x, LUpd l3 s a3 x} (LUpd l2 s a2 (LUpd l3 s a3 x))" using orth2_3.compat_pres_sup by auto have Sup123 : "is_sup {LUpd l1 s a1 (LUpd l3 s a3 x), LUpd l2 s a2 (LUpd l3 s a3 x)} (LUpd l1 s a1 (LUpd l2 s a2 (LUpd l3 s a3 x)))" using orth1_2.compat_pres_sup by auto have Sup13 : "is_sup {LUpd l1 s a1 x, LUpd l3 s a3 x} (LUpd l1 s a1 (LUpd l3 s a3 x))" using orth1_3.compat_pres_sup by auto have Eq_123 : "({LUpd l1 s a1 x, LUpd l3 s a3 x} \<union> {LUpd l2 s a2 x, LUpd l3 s a3 x}) = {LUpd l1 s a1 x, LUpd l2 s a2 x, LUpd l3 s a3 x}" by auto have Sup123' : "is_sup ({LUpd l1 s a1 x, LUpd l2 s a2 x, LUpd l3 s a3 x}) (LUpd l1 s a1 (LUpd l2 s a2 (LUpd l3 s a3 x)))" using sup_union1[OF Sup13 Sup23 Sup123] unfolding Eq_123 by auto have Sup12 : "is_sup {LUpd l1 s a1 x, LUpd l2 s a2 x} (LUpd l1 s a1 (LUpd l2 s a2 x))" using orth1_2.compat_pres_sup by auto have Sup3 : "is_sup {LUpd l3 s a3 x} (LUpd l3 s a3 x)" using sup_singleton by auto have Eq_123' : "{LUpd l1 s a1 x, LUpd l2 s a2 x, LUpd l3 s a3 x} = {LUpd l1 s a1 x, LUpd l2 s a2 x} \<union> {LUpd l3 s a3 x}" by auto have Sup123'' : "is_sup ({LUpd l1 s a1 x, LUpd l2 s a2 x} \<union> {LUpd l3 s a3 x}) (LUpd l1 s a1 (LUpd l2 s a2 (LUpd l3 s a3 x)))" using Sup123' unfolding Eq_123' by auto have Conc' : "is_sup {(LUpd l1 s a1 (LUpd l2 s a2 x)), LUpd l3 s a3 x} (LUpd l1 s a1 (LUpd l2 s a2 (LUpd l3 s a3 x)))" using sup_union2[OF Sup12 Sup3 Sup123''] by auto have Merge_eq' : "LUpd (merge_l l1 l2) s a1_2 x = LUpd l1 s a1 (LUpd l2 s a2 x)" using A1_2 by(auto simp add: merge_l_def) show "is_sup {LUpd (merge_l l1 l2) s a1_2 x, LUpd l3 s a3 x} (LUpd (merge_l l1 l2) s a1_2 (LUpd l3 s a3 x))" using Conc' unfolding Merge_eq Merge_eq' by simp qed *) end
= = = Return to football part @-@ time = = =
lemma connected_ivt_component: fixes x::"'a::euclidean_space" shows "connected S \<Longrightarrow> x \<in> S \<Longrightarrow> y \<in> S \<Longrightarrow> x\<bullet>k \<le> a \<Longrightarrow> a \<le> y\<bullet>k \<Longrightarrow> (\<exists>z\<in>S. z\<bullet>k = a)"
! ************************************************************* ! rdatav.f read data for the data from the code > ver.32.3 ! 14 Jan. 2016 written by D.Kawata ! ************************************************************** !70******************************************************************* ! flago!=0 case is not done yet. ! subroutine rdata(step,ngt,ng,ndmt,ndm,ndm1t,ndm1,nst,ns & ,ai,tn,flagr) use gcdp_const use gcdp_system use gcdp_baryon use gcdp_dm include 'mpif.h' integer i,j,flagr integer step,ngt,ng,ndmt,ndm,nst,ns,ndm1t,ndm1,npt,np,npst integer ngr,ndmr,nsr,ndm1r,nagr,nadmr,nasr integer npr,nc,flagf,isend,pn integer nof,ifn,ip,cndmt,cngt,cnst,iv integer ndval,nival,nprocr,ndbhyd,ndbmet,ndbsf double precision ai,tn integer ng0,ns0 character filei*60 ! *** for work *** integer invali,invald,nval integer,allocatable :: idisp(:),jjlen(:) integer,allocatable :: npstp(:),npenp(:),jstas(:),jends(:) integer,allocatable :: tivr(:),tivs(:),lists(:) double precision,allocatable :: tdvr(:),tdvs(:) allocate(jstas(0:nprocs-1)) allocate(jends(0:nprocs-1)) allocate(idisp(0:nprocs-1)) allocate(jjlen(0:nprocs-1)) write(6,*) ' step,flagr=',step,flagr ! number of particle for each proc ng=0 ndm=0 ndm1=0 ns=0 np=0 npt=0 ngt=0 nst=0 ifn=0 npst=0 cndmt=0 cngt=0 cnst=0 70 flagf=0 if(myrank.eq.0) then write(filei,'(a21,i6.6,a1,i4.4)') '../output/data/bdvals',step,'n',ifn write(6,*) ' reading ',filei open(50,file=filei,status='old',form='unformatted',err=90) read(50) npt,ndmt,ndm1t,ai,tn read(50) nprocr,nof,invali,invald if(ifn.eq.0) then write(6,*) ' npt,ndmt,ndm1t,nprocr,nof=',npt,ndmt,ndm1t,nprocr,nof write(6,*) ' nvali,nvald=',invali,invald endif goto 91 90 flagf=1 endif 91 call MPI_BCAST(flagf,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) if(flagf.ne.0) then ! no DM data goto 92 endif nval=6 allocate(tivr(0:nval-1)) if(myrank.eq.0) then tivr(0)=ndmt tivr(1)=ndm1t tivr(2)=nprocr tivr(3)=nof tivr(4)=invali tivr(5)=invald endif call MPI_BCAST(tivr,6,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) if(myrank.ne.0) then ndmt=tivr(0) ndm1t=tivr(1) nprocr=tivr(2) nof=tivr(3) invali=tivr(4) invald=tivr(5) endif deallocate(tivr) ! nprocr: number of proc used in simualtion. if(allocated(npstp)) then deallocate(npstp) deallocate(npenp) endif allocate(npstp(0:nprocr-1)) allocate(npenp(0:nprocr-1)) ! number of particles for each proc if(myrank.eq.0) then do i=0,nprocr-1 npstp(i)=0 npenp(i)=0 enddo endif ! *** get start and end id particles for each core recieve *** do i=0,nprocs-1 call para_range(0,ndmt-1,nprocs,i,jsta,jend) jstas(i)=jsta jends(i)=jend enddo if(myrank.eq.0) then do ip=ifn,nprocr-1,nof read(50) ngr,ndmr,nsr,ndm1r,nagr,nadmr,nasr ! write(6,*) ifn,ip,' ng,ndm,ns=',ngr,ndmr,nsr,cndmt cndmt=cndmt+ndmr npstp(ip)=npst npenp(ip)=npst+ndmr-1 npst=npenp(ip)+1 enddo endif call MPI_BCAST(npstp,nprocr,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) call MPI_BCAST(npenp,nprocr,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) nc=ndm do ip=ifn,nprocr-1,nof ! *** set idisp and jjlen *** do i=0,nprocs-1 idisp(i)=0 jjlen(i)=0 enddo ! *** count number of particles read for each proc do i=npstp(ip),npenp(ip) do j=0,nprocs-1 if(i.ge.jstas(j).and.i.le.jends(j)) then if(jjlen(j).eq.0) then idisp(j)=isend endif jjlen(j)=jjlen(j)+1 endif enddo enddo nc=nc+jjlen(myrank) enddo ndm=ndm+nc ifn=ifn+1 ! if(myrank.eq.0) then ! write(6,*) ifn,'file, cndmt,ndm,nc,jjlen=',cndmt,ndm,nc,jjlen(myrank) ! endif close(50) if(ifn.lt.nof) then goto 70 endif call allocate_dm(ndm) ! if(myrank.eq.0) then ! write(6,*) ' for counting: cndmt,ndm=',cndmt,ndm,nc,jjlen(myrank) ! endif ! reset ndm and etc. ndm=0 cndmt=0 npst=0 ifn=0 72 if(myrank.eq.0) then write(filei,'(a21,i6.6,a1,i4.4)') '../output/data/bdvals',step,'n',ifn ! write(6,*) ' reading ',filei open(50,file=filei,status='old',form='unformatted') read(50) npt,ndmt,ndm1t,ai,tn read(50) nprocr,nof,invali,invald endif nval=6 allocate(tivr(0:nval-1)) if(myrank.eq.0) then tivr(0)=ndmt tivr(1)=ndm1t tivr(2)=nprocr tivr(3)=nof tivr(4)=invali tivr(5)=invald endif call MPI_BCAST(tivr,6,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) if(myrank.ne.0) then ndmt=tivr(0) ndm1t=tivr(1) nprocr=tivr(2) nof=tivr(3) invali=tivr(4) invald=tivr(5) endif deallocate(tivr) ! nprocr: number of proc used in simualtion. if(allocated(npstp)) then deallocate(npstp) deallocate(npenp) endif allocate(npstp(0:nprocr-1)) allocate(npenp(0:nprocr-1)) ! number of particles for each proc if(myrank.eq.0) then do i=0,nprocr-1 npstp(i)=0 npenp(i)=0 enddo endif ! *** get start and end id particles for each core recieve *** do i=0,nprocs-1 call para_range(0,ndmt-1,nprocs,i,jsta,jend) jstas(i)=jsta jends(i)=jend enddo if(myrank.eq.0) then do ip=ifn,nprocr-1,nof read(50) ngr,ndmr,nsr,ndm1r,nagr,nadmr,nasr ! write(6,*) ifn,ip,' ng,ndm,ns=',ngr,ndmr,nsr,cndmt cndmt=cndmt+ndmr npstp(ip)=npst npenp(ip)=npst+ndmr-1 npst=npenp(ip)+1 enddo endif call MPI_BCAST(npstp,nprocr,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) call MPI_BCAST(npenp,nprocr,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) ! *** reading integer data *** do iv=1,invali nc=ndm do ip=ifn,nprocr-1,nof npr=npenp(ip)-npstp(ip)+1 allocate(tivs(0:npr-1)) if(myrank.eq.0) then read(50) (tivs(i),i=0,npr-1) endif ! *** set idisp and jjlen *** do i=0,nprocs-1 idisp(i)=0 jjlen(i)=0 enddo ! *** count number of particles read for each proc isend=0 do i=npstp(ip),npenp(ip) do j=0,nprocs-1 if(i.ge.jstas(j).and.i.le.jends(j)) then if(jjlen(j).eq.0) then idisp(j)=isend endif jjlen(j)=jjlen(j)+1 endif enddo isend=isend+1 enddo npr=jjlen(myrank) allocate(tivr(0:npr-1)) call MPI_SCATTERV(tivs,jjlen,idisp,MPI_INTEGER & ,tivr,npr,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) if(iv.eq.1) then do i=0,jjlen(myrank)-1 id_dm(i+nc)=tivr(i) enddo else if(iv.eq.2) then do i=0,jjlen(myrank)-1 list_adm(i+nc)=tivr(i) enddo endif nc=nc+jjlen(myrank) deallocate(tivs) deallocate(tivr) enddo enddo ! *** reading double precision data *** do iv=1,invald nc=ndm do ip=ifn,nprocr-1,nof npr=npenp(ip)-npstp(ip)+1 allocate(tdvs(0:npr-1)) if(myrank.eq.0) then read(50) (tdvs(i),i=0,npr-1) endif ! *** set idisp and jjlen *** do i=0,nprocs-1 idisp(i)=0 jjlen(i)=0 enddo ! *** count number of particles read for each proc isend=0 do i=npstp(ip),npenp(ip) do j=0,nprocs-1 if(i.ge.jstas(j).and.i.le.jends(j)) then if(jjlen(j).eq.0) then idisp(j)=isend endif jjlen(j)=jjlen(j)+1 endif enddo isend=isend+1 enddo ! write(6,*) ' idisp,jjlen=',idisp(0),jjlen(0) & ! ,jstas(0),jends(0),npstp(ip),npenp(ip),ip npr=jjlen(myrank) allocate(tdvr(0:npr-1)) call MPI_SCATTERV(tdvs,jjlen,idisp,MPI_DOUBLE_PRECISION & ,tdvr,npr,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD,ierr) if(iv.eq.1) then do i=0,jjlen(myrank)-1 x_dm(i+nc)=tdvr(i) enddo else if(iv.eq.2) then do i=0,jjlen(myrank)-1 y_dm(i+nc)=tdvr(i) enddo else if(iv.eq.3) then do i=0,jjlen(myrank)-1 z_dm(i+nc)=tdvr(i) enddo else if(iv.eq.4) then do i=0,jjlen(myrank)-1 vx_dm(i+nc)=tdvr(i) enddo else if(iv.eq.5) then do i=0,jjlen(myrank)-1 vy_dm(i+nc)=tdvr(i) enddo else if(iv.eq.6) then do i=0,jjlen(myrank)-1 vz_dm(i+nc)=tdvr(i) enddo else if(iv.eq.7) then do i=0,jjlen(myrank)-1 m_dm(i+nc)=tdvr(i) enddo else if(iv.eq.8) then do i=0,jjlen(myrank)-1 rho_dm(i+nc)=tdvr(i) enddo else if(iv.eq.9) then do i=0,jjlen(myrank)-1 h_dm(i+nc)=tdvr(i) enddo endif nc=nc+jjlen(myrank) deallocate(tdvs) deallocate(tdvr) enddo enddo ndm=ndm+nc ifn=ifn+1 ! if(myrank.eq.0) then ! write(6,*) ' cndmt,ndm=',cndmt,ndm,nc,jjlen(myrank) ! endif close(50) if(ifn.lt.nof) then goto 72 endif if(myrank.eq.0.and.cndmt.ne.ndmt) then write(6,*) ' Number of total DM particle is inconsistent' write(6,*) ' ndmt (read)=',cndmt write(6,*) ' ndmt (file header) =',ndmt call MPI_FINALIZE() stop endif if(ndm.ne.jends(myrank)-jstas(myrank)+1) then if(myrank.eq.0) then write(6,*) ' NDM particle in each proc is inconsistent' write(6,*) ' ndm (read)=',ndm write(6,*) ' ndm (from npt)=',jends(myrank)-jstas(myrank)+1 endif call MPI_FINALIZE() stop endif ! *** read baryon data *** 92 ifn=0 ngt=0 npst=0 np=0 ! count baryon particle in each core 71 flagf=0 if(myrank.eq.0) then write(filei,'(a21,i6.6,a1,i4.4)') & '../output/data/bbvals',step,'n',ifn open(50,file=filei,status='old',form='unformatted',err=93) read(50) npt,ndmt,ndm1t,ai,tn read(50) nprocr,nof,invali,invald if(ifn.eq.0) then write(6,*) ' bbvals*: nvali,nvald=',invali,invald endif goto 94 93 flagf=1 endif 94 call MPI_BCAST(flagf,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) if(flagf.ne.0) then ! no baryon data goto 95 endif allocate(tivr(0:5)) if(myrank.eq.0) then tivr(0)=npt tivr(1)=ndmt tivr(2)=nprocr tivr(3)=nof tivr(4)=invali tivr(5)=invald endif call MPI_BCAST(tivr,6,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) if(myrank.ne.0) then npt=tivr(0) ndmt=tivr(1) nprocr=tivr(2) nof=tivr(3) invali=tivr(4) invald=tivr(5) endif deallocate(tivr) ! nprocr: number of proc used in simualtion. if(allocated(npstp)) then deallocate(npstp) deallocate(npenp) endif allocate(npstp(0:nprocr-1)) allocate(npenp(0:nprocr-1)) ! number of particles for each proc if(myrank.eq.0) then do i=0,nprocr-1 npstp(i)=0 npenp(i)=0 enddo endif ! *** get start and end id particles for each core recieve *** do i=0,nprocs-1 call para_range(0,npt-1,nprocs,i,jsta,jend) jstas(i)=jsta jends(i)=jend enddo if(myrank.eq.0) then do ip=ifn,nprocr-1,nof read(50) ngr,ndmr,nsr,ndm1r,nagr,nadmr,nasr ! write(6,*) ifn,ip,' ng,ndm,ns=',ng,ndm,ns npstp(ip)=npst npenp(ip)=npst+ngr+nsr-1 npst=npenp(ip)+1 enddo endif call MPI_BCAST(npstp,nprocr,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) call MPI_BCAST(npenp,nprocr,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) nc=np do ip=ifn,nprocr-1,nof ! *** set idisp and jjlen *** do i=0,nprocs-1 idisp(i)=0 jjlen(i)=0 enddo ! *** count number of particles read for each proc do i=npstp(ip),npenp(ip) do j=0,nprocs-1 if(i.ge.jstas(j).and.i.le.jends(j)) then if(jjlen(j).eq.0) then idisp(j)=isend endif jjlen(j)=jjlen(j)+1 endif enddo isend=isend+1 enddo nc=nc+jjlen(myrank) enddo np=np+nc ifn=ifn+1 close(50) if(ifn.lt.nof) then goto 71 endif call allocate_baryon(np) ! if(myrank.eq.0) then ! write(6,*) ' for counting np(rank=0)=',np,myrank ! endif ifn=0 ngt=0 npst=0 np=0 73 if(myrank.eq.0) then write(filei,'(a21,i6.6,a1,i4.4)') & '../output/data/bbvals',step,'n',ifn open(50,file=filei,status='old',form='unformatted') read(50) npt,ndmt,ndm1t,ai,tn read(50) nprocr,nof,invali,invald if(ifn.eq.0) then write(6,*) ' bbvals*: nvali,nvald=',invali,invald endif endif if(flagr.gt.0) then ! reading extra data write(filei,'(a21,i6.6,a1,i4.4)') & '../output/data/bbhyds',step,'n',ifn open(51,file=filei,status='old',form='unformatted',err=96) read(51) npt,ndmt,ndm1t,ai,tn read(51) nprocr,nof,ndbhyd write(filei,'(a21,i6.6,a1,i4.4)') & '../output/data/bbmets',step,'n',ifn open(52,file=filei,status='old',form='unformatted',err=96) read(52) npt,ndmt,ndm1t,ai,tn read(52) nprocr,nof,ndbmet write(filei,'(a21,i6.6,a1,i4.4)') & '../output/data/bbsfis',step,'n',ifn open(54,file=filei,status='old',form='unformatted',err=96) read(54) npt,ndmt,ndm1t,ai,tn read(54) nprocr,nof,ndbsf else ndbhyd=0 ndbmet=0 nbdsf=0 endif 96 allocate(tivr(0:6)) if(myrank.eq.0) then tivr(0)=npt tivr(1)=ndmt tivr(2)=nprocr tivr(3)=nof tivr(4)=invali tivr(5)=invald tivr(6)=ndbhyd endif call MPI_BCAST(tivr,7,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) if(myrank.ne.0) then npt=tivr(0) ndmt=tivr(1) nprocr=tivr(2) nof=tivr(3) invali=tivr(4) invald=tivr(5) ndbhyd=tivr(6) endif deallocate(tivr) ! nprocr: number of proc used in simualtion. if(allocated(npstp)) then deallocate(npstp) deallocate(npenp) endif allocate(npstp(0:nprocr-1)) allocate(npenp(0:nprocr-1)) ! number of particles for each proc if(myrank.eq.0) then do i=0,nprocr-1 npstp(i)=0 npenp(i)=0 enddo endif ! *** get start and end id particles for each core recieve *** do i=0,nprocs-1 call para_range(0,npt-1,nprocs,i,jsta,jend) jstas(i)=jsta jends(i)=jend enddo if(myrank.eq.0) then do ip=ifn,nprocr-1,nof read(50) ngr,ndmr,nsr,ndm1r,nagr,nadmr,nasr npstp(ip)=npst npenp(ip)=npst+ngr+nsr-1 npst=npenp(ip)+1 enddo endif call MPI_BCAST(npstp,nprocr,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) call MPI_BCAST(npenp,nprocr,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) ! *** initialization *** do i=0,np-1 flagfd_p(i)=0 enddo do iv=1,invali nc=np do ip=ifn,nprocr-1,nof npr=npenp(ip)-npstp(ip)+1 allocate(tivs(0:npr-1)) if(myrank.eq.0) then read(50) (tivs(i),i=0,npr-1) endif ! *** set idisp and jjlen *** do i=0,nprocs-1 idisp(i)=0 jjlen(i)=0 enddo ! *** count number of particles read for each proc isend=0 do i=npstp(ip),npenp(ip) do j=0,nprocs-1 if(i.ge.jstas(j).and.i.le.jends(j)) then if(jjlen(j).eq.0) then idisp(j)=isend endif jjlen(j)=jjlen(j)+1 endif enddo isend=isend+1 enddo npr=jjlen(myrank) allocate(tivr(0:npr-1)) call MPI_SCATTERV(tivs,jjlen,idisp,MPI_INTEGER & ,tivr,npr,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) if(iv.eq.1) then do i=0,jjlen(myrank)-1 id_p(i+nc)=tivr(i) enddo else if(iv.eq.2) then do i=0,jjlen(myrank)-1 flagc_p(i+nc)=tivr(i) enddo else if(iv.eq.3) then do i=0,jjlen(myrank)-1 list_ap(i+nc)=tivr(i) enddo else if(iv.eq.4) then do i=0,jjlen(myrank)-1 flagfd_p(i+nc)=tivr(i) enddo endif nc=nc+jjlen(myrank) deallocate(tivs) deallocate(tivr) enddo enddo ! *** reading double precision data do iv=1,invald nc=np do ip=ifn,nprocr-1,nof npr=npenp(ip)-npstp(ip)+1 allocate(tdvs(0:npr-1)) if(myrank.eq.0) then read(50) (tdvs(i),i=0,npr-1) endif ! *** set idisp and jjlen *** do i=0,nprocs-1 idisp(i)=0 jjlen(i)=0 enddo ! *** count number of particles read for each proc isend=0 do i=npstp(ip),npenp(ip) do j=0,nprocs-1 if(i.ge.jstas(j).and.i.le.jends(j)) then if(jjlen(j).eq.0) then idisp(j)=isend endif jjlen(j)=jjlen(j)+1 endif enddo isend=isend+1 enddo npr=jjlen(myrank) allocate(tdvr(0:npr-1)) call MPI_SCATTERV(tdvs,jjlen,idisp,MPI_DOUBLE_PRECISION & ,tdvr,npr,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD,ierr) if(iv.eq.1) then do i=0,jjlen(myrank)-1 x_p(i+nc)=tdvr(i) enddo else if(iv.eq.2) then do i=0,jjlen(myrank)-1 y_p(i+nc)=tdvr(i) enddo else if(iv.eq.3) then do i=0,jjlen(myrank)-1 z_p(i+nc)=tdvr(i) enddo else if(iv.eq.4) then do i=0,jjlen(myrank)-1 vx_p(i+nc)=tdvr(i) enddo else if(iv.eq.5) then do i=0,jjlen(myrank)-1 vy_p(i+nc)=tdvr(i) enddo else if(iv.eq.6) then do i=0,jjlen(myrank)-1 vz_p(i+nc)=tdvr(i) enddo else if(iv.eq.7) then do i=0,jjlen(myrank)-1 m_p(i+nc)=tdvr(i) enddo else if(iv.eq.8) then do i=0,jjlen(myrank)-1 rho_p(i+nc)=tdvr(i) enddo else if(iv.eq.9) then do i=0,jjlen(myrank)-1 u_p(i+nc)=tdvr(i) enddo endif nc=nc+jjlen(myrank) deallocate(tdvs) deallocate(tdvr) enddo enddo if(flagr.gt.0) then ! *** hyd vals do iv=1,ndbhyd nc=np do ip=ifn,nprocr-1,nof npr=npenp(ip)-npstp(ip)+1 allocate(tdvs(0:npr-1)) if(myrank.eq.0) then read(51) (tdvs(i),i=0,npr-1) endif ! *** set idisp and jjlen *** do i=0,nprocs-1 idisp(i)=0 jjlen(i)=0 enddo ! *** count number of particles read for each proc isend=0 do i=npstp(ip),npenp(ip) do j=0,nprocs-1 if(i.ge.jstas(j).and.i.le.jends(j)) then if(jjlen(j).eq.0) then idisp(j)=isend endif jjlen(j)=jjlen(j)+1 endif enddo isend=isend+1 enddo npr=jjlen(myrank) allocate(tdvr(0:npr-1)) call MPI_SCATTERV(tdvs,jjlen,idisp,MPI_DOUBLE_PRECISION & ,tdvr,npr,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD,ierr) if(iv.eq.1) then do i=0,jjlen(myrank)-1 h_p(i+nc)=tdvr(i) enddo else if(iv.eq.2) then do i=0,jjlen(myrank)-1 div_v_p(i+nc)=tdvr(i) enddo else if(iv.eq.3) then do i=0,jjlen(myrank)-1 alpv_p(i+nc)=tdvr(i) enddo else if(iv.eq.4) then do i=0,jjlen(myrank)-1 alpu_p(i+nc)=tdvr(i) enddo else if(iv.eq.5) then do i=0,jjlen(myrank)-1 myu_p(i+nc)=tdvr(i) enddo endif nc=nc+jjlen(myrank) deallocate(tdvs) deallocate(tdvr) enddo enddo ! metal values do iv=1,ndbmet nc=np do ip=ifn,nprocr-1,nof npr=npenp(ip)-npstp(ip)+1 allocate(tdvs(0:npr-1)) if(myrank.eq.0) then read(52) (tdvs(i),i=0,npr-1) endif ! *** set idisp and jjlen *** do i=0,nprocs-1 idisp(i)=0 jjlen(i)=0 enddo ! *** count number of particles read for each proc isend=0 do i=npstp(ip),npenp(ip) do j=0,nprocs-1 if(i.ge.jstas(j).and.i.le.jends(j)) then if(jjlen(j).eq.0) then idisp(j)=isend endif jjlen(j)=jjlen(j)+1 endif enddo isend=isend+1 enddo npr=jjlen(myrank) allocate(tdvr(0:npr-1)) call MPI_SCATTERV(tdvs,jjlen,idisp,MPI_DOUBLE_PRECISION & ,tdvr,npr,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD,ierr) if(iv.eq.1) then do i=0,jjlen(myrank)-1 mzHe_p(i+nc)=tdvr(i) enddo else if(iv.eq.2) then do i=0,jjlen(myrank)-1 mzZ_p(i+nc)=tdvr(i) enddo else if(iv.eq.3) then do i=0,jjlen(myrank)-1 mzC_p(i+nc)=tdvr(i) enddo else if(iv.eq.4) then do i=0,jjlen(myrank)-1 mzN_p(i+nc)=tdvr(i) enddo else if(iv.eq.5) then do i=0,jjlen(myrank)-1 mzO_p(i+nc)=tdvr(i) enddo else if(iv.eq.6) then do i=0,jjlen(myrank)-1 mzNe_p(i+nc)=tdvr(i) enddo else if(iv.eq.7) then do i=0,jjlen(myrank)-1 mzMg_p(i+nc)=tdvr(i) enddo else if(iv.eq.8) then do i=0,jjlen(myrank)-1 mzSi_p(i+nc)=tdvr(i) enddo else if(iv.eq.9) then do i=0,jjlen(myrank)-1 mzFe_p(i+nc)=tdvr(i) enddo endif nc=nc+jjlen(myrank) deallocate(tdvs) deallocate(tdvr) enddo enddo ! star data do iv=1,ndbsf nc=np do ip=ifn,nprocr-1,nof npr=npenp(ip)-npstp(ip)+1 allocate(tdvs(0:npr-1)) if(myrank.eq.0) then read(54) (tdvs(i),i=0,npr-1) endif ! *** set idisp and jjlen *** do i=0,nprocs-1 idisp(i)=0 jjlen(i)=0 enddo ! *** count number of particles read for each proc isend=0 do i=npstp(ip),npenp(ip) do j=0,nprocs-1 if(i.ge.jstas(j).and.i.le.jends(j)) then if(jjlen(j).eq.0) then idisp(j)=isend endif jjlen(j)=jjlen(j)+1 endif enddo isend=isend+1 enddo npr=jjlen(myrank) allocate(tdvr(0:npr-1)) call MPI_SCATTERV(tdvs,jjlen,idisp,MPI_DOUBLE_PRECISION & ,tdvr,npr,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD,ierr) if(iv.eq.1) then do i=0,jjlen(myrank)-1 ts_p(i+nc)=tdvr(i) enddo endif nc=nc+jjlen(myrank) deallocate(tdvs) deallocate(tdvr) enddo enddo close(51) close(52) close(54) endif ! update np and ifn np=np+nc ifn=ifn+1 close(50) if(ifn.lt.nof) then goto 73 endif if(np.ne.jends(myrank)-jstas(myrank)+1) then if(myrank.eq.0) then write(6,*) ' Nbaryon particle in each proc is inconsistent',myrank write(6,*) ' np (read)=',np,myrank write(6,*) ' np (from npt)=',jends(myrank)-jstas(myrank)+1,myrank endif call MPI_FINALIZE() stop endif ! *** re-construct list_ap *** 95 cnst=0 cngt=0 allocate(lists(0:np-1)) do i=0,np-1 if(flagc_p(i).le.0) then list_ap(cngt)=i cngt=cngt+1 else lists(cnst)=i cnst=cnst+1 endif enddo ng=cngt ns=cnst do i=0,ns-1 list_ap(ng+i)=lists(i) enddo deallocate(lists) ! *** get total number of star and gas particle allocate(tivs(0:1)) allocate(tivr(0:1)) tivs(0)=ng tivs(1)=ns call MPI_ALLREDUCE(tivs,tivr,2,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) ngt=tivr(0) nst=tivr(1) deallocate(tivs) deallocate(tivr) if(myrank.eq.0) then write(6,*) ' ngt,nst,npt=',ngt,nst,npt endif ! write(6,*) ' myrank,ng,ns,np=',myrank,ng,ns,np ! *** re-construct list_adm *** allocate(lists(0:ndm-1)) cnst=0 cngt=0 do i=0,ndm-1 if(id_dm(i).lt.ndm1t) then list_adm(cngt)=i cngt=cngt+1 else lists(cnst)=i cnst=cnst+1 endif enddo ndm1=cngt do i=0,ndm-ndm1-1 list_adm(ndm1+i)=lists(i) enddo ! write(6,*) ' myrank,ndm,ndm1=',myrank,ndm,ndm1,ndm1t deallocate(lists) ! *** send ai,tn allocate(tdvr(0:1)) tdvr(0)=ai tdvr(1)=tn call MPI_BCAST(tdvr,2,MPI_DOUBLE_PRECISION,0,MPI_COMM_WORLD,ierr) ai=tdvr(0) tn=tdvr(1) deallocate(tdvr) ! write(filei,'(a3,i3.3)') 'hdm',myrank ! open(60,file=filei,status='unknown') ! do i=0,ndm1-1 ! pn=list_adm(i) ! write(60,'(4(1pE13.5),I10)') x_dm(pn),y_dm(pn),z_dm(pn),m_dm(pn),id_dm(pn) ! enddo ! close(60) ! write(filei,'(a3,i3.3)') 'ldm',myrank ! open(60,file=filei,status='unknown') ! do i=ndm1,ndm-1 ! pn=list_adm(i) ! write(60,'(4(1pE13.5),I10)') x_dm(pn),y_dm(pn),z_dm(pn),m_dm(pn),id_dm(pn) ! enddo ! close(60) ! write(filei,'(a3,i3.3)') 'gas',myrank ! open(60,file=filei,status='unknown') ! do i=0,ng-1 ! pn=list_ap(i) ! write(60,'(3(1pE13.5),I10)') x_p(pn),y_p(pn),z_p(pn),id_p(pn) ! enddo ! close(60) ! write(filei,'(a4,i3.3)') 'star',myrank ! open(60,file=filei,status='unknown') ! do i=0,ns-1 ! pn=list_ap(i+ng) ! write(60,'(3(1pE13.5),I10)') x_p(pn),y_p(pn),z_p(pn),id_p(pn) ! enddo ! close(60) deallocate(npstp) deallocate(npenp) deallocate(jstas) deallocate(jends) deallocate(idisp) deallocate(jjlen) end subroutine
# Creating seismic synthetics with Devito and GemPy In this tutorial, we aim to provide a start-to-finish guide for creating synthetic seismic models and datasets using the open-source packages Devito and GemPy. We will highlight the steps involved, from creating a 3D geological model, to forward modelling, to visualisation. ## Goals * Introduce users to creating simple geological models with GemPy * Illustrate how to include physical properties in the geological model and how to convert this into a format usable for seismic modelling with Devito * Set up a Devito `operator` to solve the 2nd order acoustic wave equation, including sparse sources and receivers * Forward propagate a shot through the model, and record a synthetic gather using receivers * Visualise the wavefield in 3D using slicing in PyVista ## Structure This tutorial will be presented in three sections. The first section will focus on creating the seismic synthetic, the second on model setup and the Devito API, and the latter will cover running the seismic wave model and visualising the wavefield. Throughout the tutorial, several members of the team will be available in the `#t21-thurs-devito` channel of the [Software Underground Slack](https://softwareunderground.org/slack). For in-depth GemPy questions, the `#gempy` channel is another option. For Devito-centric questions, try the `#devito` channel or the [Devito Slack](https://devitocodes.slack.com/). ## Motivation Synthetic seismic models are crucial for testing and benchmarking numerous seismic modelling and imaging applications. Whilst several seismic synthetics are commonplace within the community for example the ubiquitous Marmousi model, or the SEG/EAGE Salt and Overthrust Models, the range of freely-available synthetics is relatively small, with other purpose-built models carrying a steep cost. Furthermore currently-available seismic synthetics may not bear sufficient resemblence to the desired target, or may be lacking certain material properties required for a particular wave equation. This has implications for both accessibility and reproducibility of studies, as benchmarks used may not be readily available for others to verify the work. By using freely-available tools to create synthetic models and datasets, we can create open, sharable, customisable models for a wide range of applications. ## Side Note (Google Colab only) If running on Google Colab, you will need to run the following cells to prevent 3D renders crashing your notebook. ```python """ !apt-get update !apt-get -qq install xvfb !pip install pyvirtualdisplay """ ``` ```python """ from pyvirtualdisplay import Display display = Display(visible=0, size=(600, 400)) display.start() """ ``` ## GemPy installation [Source](https://docs.gempy.org/examples/real/Greenstone.html#sphx-glr-download-examples-real-greenstone-py): An example of a detailed geological model, taken from the Greenstone GemPy example. The synthetics which we will be building in this tutorial will be made with the use of GemPy, an open-source 3D geological modelling package for Python. If not already installed, we will need to install it. If issues are encountered whilst installing GemPy into a `conda` environment using `pip`, you can alternatively create a python `venv` and install into this environment using `pip` as per usual. Note that it will also be necesary to install an `ipykernel` in this environment to run this notebook. From here, we can install GemPy: ```python try: # Import gempy import gempy as gp except ModuleNotFoundError: # Install gempy ! pip install gempy # Import gempy import gempy as gp # Check jinja2 is installed for colour-coding of geological units try: import jinja2 except ModuleNotFoundError: ! pip install jinja2 import jinja2 ``` No module named 'osgeo' ## Model overview The simple geological model which we will be building is designed to evoke carbon-capture and storage (CCS) scenarios. The model consists of a CO2 lens in a sandstone reservoir, with a shale layer in the overarching anticline providing the structural trap. This is then overlain by a layer of sediment, with water at the top of the model. Geological strata and their respective velocities are based on values detailed in [Queißer et al. 2013](https://doi.org/10.1190/geo2012-0216.1), a paper imaging the P-wave velocity anomaly generated by CO2 injection into the Utsira Sand at Sleipnir in the North Sea using FWI. The model we will create features a similar shale trap/permeable sandstone reservoir structure, albeit with a small number of thick layers rather than the thin interbedding, to limit model complexity for this tutorial. Further inspiration was taken from [Chadwick et al. 2004](https://doi.org/10.1016/j.energy.2004.03.071), a paper characterizing the Utsira Sand reservoir based on 2D seismic lines and well logs. ## Creating our geological model: To begin, alongside GemPy, we need to import some auxiliary modules: ```python # Import auxiliary modules import numpy as np %matplotlib inline ``` ### Initial setup We will now set up a GemPy `Model` object. This encapsulates the grid onto which the scalar fields associated with various surfaces are interpolated. Note that the extent is slightly greater than it will be for our Devito model (an extra half a grid spacing is added to each side). A comparison of the cell-centered vs node-centered conventions of GemPy and Devito respectively, along with the differences in how they measure extent. It is necessary to account for this to ensure that the two grids are co-located. As we can see in the figure above, this is due to differences in the way in which grids are defined in each package and is necessary to ensure that the model is not stretched and distorted when transistioning between the two, and that they are correctly aligned. ```python # Set overarching model parameters extent = (-5., 1005., -5., 1005., -1005., 5.) shape = (101, 101, 101) geo_model = gp.create_model('transform-2021') geo_model = gp.init_data(geo_model, extent=extent, resolution=shape) ``` Active grids: ['regular'] We now need to set up Theano for our model (used by GemPy for interpolation of model properties). Bear in mind that that this may take some time to run. ```python gp.set_interpolator(geo_model, output=['geology'], theano_optimizer='fast_compile') ``` Setting kriging parameters to their default values. Compiling theano function... Level of Optimization: fast_compile Device: cpu Precision: float64 Number of faults: 0 Compilation Done! Kriging values: values range 1749.371316 $C_o$ 72864.285714 drift equations [3] <gempy.core.interpolator.InterpolatorModel at 0x7fc28874a110> ### Adding geological units As the top CO2 surface is truncated by the upper shale layer, we will need to separate the geological strata into two GemPy `Series`. Each `Series` object, as the name suggests is intended to correspond with a geological unit, and they can be made to onlap, erode, etc one another. Whilst in practice, the top CO2 contact is not an erosive surface, treating it as such is the most straightforward way to create the desired truncation, as the shale trap is unconformable on the CO2 lens and reservoir sandstone. A default series is included in the model. As such, rather than creating a new series, we will simply rename it to 'Lower'. As you can imagine, this is going to be used to contain the lower geological units, these being the lower shale, reservoir sandstone, and CO2 lens. ```python geo_model.rename_series({'Default series': 'Lower'}) ``` And now add our surfaces: ```python geo_model.add_surfaces(['co2', 'sands', 'lowershale']) ``` <style type="text/css" > #T_35dc1_row0_col3{ background-color: #015482; }#T_35dc1_row1_col3{ background-color: #9f0052; }#T_35dc1_row2_col3{ background-color: #ffbe00; }</style><table id="T_35dc1_" ><thead> <tr> <th class="blank level0" ></th> <th class="col_heading level0 col0" >surface</th> <th class="col_heading level0 col1" >series</th> <th class="col_heading level0 col2" >order_surfaces</th> <th class="col_heading level0 col3" >color</th> <th class="col_heading level0 col4" >id</th> </tr></thead><tbody> <tr> <th id="T_35dc1_level0_row0" class="row_heading level0 row0" >0</th> <td id="T_35dc1_row0_col0" class="data row0 col0" >co2</td> <td id="T_35dc1_row0_col1" class="data row0 col1" >Lower</td> <td id="T_35dc1_row0_col2" class="data row0 col2" >1</td> <td id="T_35dc1_row0_col3" class="data row0 col3" >#015482</td> <td id="T_35dc1_row0_col4" class="data row0 col4" >1</td> </tr> <tr> <th id="T_35dc1_level0_row1" class="row_heading level0 row1" >1</th> <td id="T_35dc1_row1_col0" class="data row1 col0" >sands</td> <td id="T_35dc1_row1_col1" class="data row1 col1" >Lower</td> <td id="T_35dc1_row1_col2" class="data row1 col2" >2</td> <td id="T_35dc1_row1_col3" class="data row1 col3" >#9f0052</td> <td id="T_35dc1_row1_col4" class="data row1 col4" >2</td> </tr> <tr> <th id="T_35dc1_level0_row2" class="row_heading level0 row2" >2</th> <td id="T_35dc1_row2_col0" class="data row2 col0" >lowershale</td> <td id="T_35dc1_row2_col1" class="data row2 col1" >Lower</td> <td id="T_35dc1_row2_col2" class="data row2 col2" >3</td> <td id="T_35dc1_row2_col3" class="data row2 col3" >#ffbe00</td> <td id="T_35dc1_row2_col4" class="data row2 col4" >3</td> </tr> </tbody></table> We will now set some points for the base of the sands and CO2. The lower shale is considered the basement, meaning that its base does not need to be defined and it will extend to the bottom of the model. Alongside these points, we wil need to define an orientation for the surface. To minimise repetition, we will define a function to loop over a list of points and add each to the surface. ```python def create_surface(model, points, surface): """Add a list of points to a surface in a model""" xyz = ('X', 'Y', 'Z') for point in points: kwargs = {**dict(zip(xyz, point)), 'surface': surface} model.add_surface_points(**kwargs) # The points defining the base of the sand layer sand_points = [(322, 135, -783), (635, 702, -791), (221, 668, -772), (732, 235, -801), (442, 454, -702)] # Call our function create_surface(geo_model, sand_points, 'sands') # Add the surface orientation geo_model.add_orientations(X=442., Y=495., Z=-752., surface='sands', pole_vector=(0.05, 0.05, 0.95)) ``` <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>X</th> <th>Y</th> <th>Z</th> <th>G_x</th> <th>G_y</th> <th>G_z</th> <th>smooth</th> <th>surface</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>442.0</td> <td>495.0</td> <td>-752.0</td> <td>0.05</td> <td>0.05</td> <td>0.95</td> <td>0.01</td> <td>sands</td> </tr> </tbody> </table> We will now repeat this process for the CO2 lens. ```python # Points defining the base of the CO2 layer co2_points = [(322, 135, -650), (635, 702, -650), (221, 668, -650), (732, 235, -650), (442, 454, -650)] create_surface(geo_model, co2_points, 'co2') # Add the surface orientation geo_model.add_orientations(X=495., Y=495., Z=-650., surface='co2', pole_vector=(0., 0., 1.)) ``` <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>X</th> <th>Y</th> <th>Z</th> <th>G_x</th> <th>G_y</th> <th>G_z</th> <th>smooth</th> <th>surface</th> </tr> </thead> <tbody> <tr> <th>1</th> <td>495.0</td> <td>495.0</td> <td>-650.0</td> <td>0.00</td> <td>0.00</td> <td>1.00</td> <td>0.01</td> <td>co2</td> </tr> <tr> <th>0</th> <td>442.0</td> <td>495.0</td> <td>-752.0</td> <td>0.05</td> <td>0.05</td> <td>0.95</td> <td>0.01</td> <td>sands</td> </tr> </tbody> </table> We will now add an upper series, containing statigraphy above the CO2 lens. ```python geo_model.add_series('Upper') ``` <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>order_series</th> <th>BottomRelation</th> <th>isActive</th> <th>isFault</th> <th>isFinite</th> </tr> </thead> <tbody> <tr> <th>Lower</th> <td>1</td> <td>Erosion</td> <td>True</td> <td>False</td> <td>False</td> </tr> <tr> <th>Upper</th> <td>2</td> <td>Erosion</td> <td>False</td> <td>False</td> <td>False</td> </tr> </tbody> </table> As we can see, the upper series has been added below the lower series. This is not ideal for obvious reasons, and hence we will reorder them: ```python geo_model.reorder_series(['Upper', 'Lower']) ``` <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>order_series</th> <th>BottomRelation</th> <th>isActive</th> <th>isFault</th> <th>isFinite</th> </tr> </thead> <tbody> <tr> <th>Upper</th> <td>1</td> <td>Erosion</td> <td>False</td> <td>False</td> <td>False</td> </tr> <tr> <th>Lower</th> <td>2</td> <td>Erosion</td> <td>True</td> <td>False</td> <td>False</td> </tr> </tbody> </table> And add our remaining surfaces: ```python geo_model.add_surfaces(['water', 'sediments', 'uppershale']) ``` <style type="text/css" > #T_97620_row0_col3{ background-color: #015482; }#T_97620_row1_col3{ background-color: #9f0052; }#T_97620_row2_col3{ background-color: #ffbe00; }#T_97620_row3_col3{ background-color: #728f02; }#T_97620_row4_col3{ background-color: #443988; }#T_97620_row5_col3{ background-color: #ff3f20; }</style><table id="T_97620_" ><thead> <tr> <th class="blank level0" ></th> <th class="col_heading level0 col0" >surface</th> <th class="col_heading level0 col1" >series</th> <th class="col_heading level0 col2" >order_surfaces</th> <th class="col_heading level0 col3" >color</th> <th class="col_heading level0 col4" >id</th> </tr></thead><tbody> <tr> <th id="T_97620_level0_row0" class="row_heading level0 row0" >0</th> <td id="T_97620_row0_col0" class="data row0 col0" >co2</td> <td id="T_97620_row0_col1" class="data row0 col1" >Lower</td> <td id="T_97620_row0_col2" class="data row0 col2" >1</td> <td id="T_97620_row0_col3" class="data row0 col3" >#015482</td> <td id="T_97620_row0_col4" class="data row0 col4" >1</td> </tr> <tr> <th id="T_97620_level0_row1" class="row_heading level0 row1" >1</th> <td id="T_97620_row1_col0" class="data row1 col0" >sands</td> <td id="T_97620_row1_col1" class="data row1 col1" >Lower</td> <td id="T_97620_row1_col2" class="data row1 col2" >2</td> <td id="T_97620_row1_col3" class="data row1 col3" >#9f0052</td> <td id="T_97620_row1_col4" class="data row1 col4" >2</td> </tr> <tr> <th id="T_97620_level0_row2" class="row_heading level0 row2" >2</th> <td id="T_97620_row2_col0" class="data row2 col0" >lowershale</td> <td id="T_97620_row2_col1" class="data row2 col1" >Lower</td> <td id="T_97620_row2_col2" class="data row2 col2" >3</td> <td id="T_97620_row2_col3" class="data row2 col3" >#ffbe00</td> <td id="T_97620_row2_col4" class="data row2 col4" >3</td> </tr> <tr> <th id="T_97620_level0_row3" class="row_heading level0 row3" >3</th> <td id="T_97620_row3_col0" class="data row3 col0" >water</td> <td id="T_97620_row3_col1" class="data row3 col1" >Lower</td> <td id="T_97620_row3_col2" class="data row3 col2" >4</td> <td id="T_97620_row3_col3" class="data row3 col3" >#728f02</td> <td id="T_97620_row3_col4" class="data row3 col4" >4</td> </tr> <tr> <th id="T_97620_level0_row4" class="row_heading level0 row4" >4</th> <td id="T_97620_row4_col0" class="data row4 col0" >sediments</td> <td id="T_97620_row4_col1" class="data row4 col1" >Lower</td> <td id="T_97620_row4_col2" class="data row4 col2" >5</td> <td id="T_97620_row4_col3" class="data row4 col3" >#443988</td> <td id="T_97620_row4_col4" class="data row4 col4" >5</td> </tr> <tr> <th id="T_97620_level0_row5" class="row_heading level0 row5" >5</th> <td id="T_97620_row5_col0" class="data row5 col0" >uppershale</td> <td id="T_97620_row5_col1" class="data row5 col1" >Lower</td> <td id="T_97620_row5_col2" class="data row5 col2" >6</td> <td id="T_97620_row5_col3" class="data row5 col3" >#ff3f20</td> <td id="T_97620_row5_col4" class="data row5 col4" >6</td> </tr> </tbody></table> As these surfaces are not mapped to the upper series by default, we shall do so: ```python gp.map_stack_to_surfaces(geo_model, {'Upper': ('water', 'sediments', 'uppershale')}) ``` <style type="text/css" > #T_2946f_row0_col3{ background-color: #728f02; }#T_2946f_row1_col3{ background-color: #443988; }#T_2946f_row2_col3{ background-color: #ff3f20; }#T_2946f_row3_col3{ background-color: #015482; }#T_2946f_row4_col3{ background-color: #9f0052; }#T_2946f_row5_col3{ background-color: #ffbe00; }</style><table id="T_2946f_" ><thead> <tr> <th class="blank level0" ></th> <th class="col_heading level0 col0" >surface</th> <th class="col_heading level0 col1" >series</th> <th class="col_heading level0 col2" >order_surfaces</th> <th class="col_heading level0 col3" >color</th> <th class="col_heading level0 col4" >id</th> </tr></thead><tbody> <tr> <th id="T_2946f_level0_row0" class="row_heading level0 row0" >3</th> <td id="T_2946f_row0_col0" class="data row0 col0" >water</td> <td id="T_2946f_row0_col1" class="data row0 col1" >Upper</td> <td id="T_2946f_row0_col2" class="data row0 col2" >1</td> <td id="T_2946f_row0_col3" class="data row0 col3" >#728f02</td> <td id="T_2946f_row0_col4" class="data row0 col4" >1</td> </tr> <tr> <th id="T_2946f_level0_row1" class="row_heading level0 row1" >4</th> <td id="T_2946f_row1_col0" class="data row1 col0" >sediments</td> <td id="T_2946f_row1_col1" class="data row1 col1" >Upper</td> <td id="T_2946f_row1_col2" class="data row1 col2" >2</td> <td id="T_2946f_row1_col3" class="data row1 col3" >#443988</td> <td id="T_2946f_row1_col4" class="data row1 col4" >2</td> </tr> <tr> <th id="T_2946f_level0_row2" class="row_heading level0 row2" >5</th> <td id="T_2946f_row2_col0" class="data row2 col0" >uppershale</td> <td id="T_2946f_row2_col1" class="data row2 col1" >Upper</td> <td id="T_2946f_row2_col2" class="data row2 col2" >3</td> <td id="T_2946f_row2_col3" class="data row2 col3" >#ff3f20</td> <td id="T_2946f_row2_col4" class="data row2 col4" >3</td> </tr> <tr> <th id="T_2946f_level0_row3" class="row_heading level0 row3" >0</th> <td id="T_2946f_row3_col0" class="data row3 col0" >co2</td> <td id="T_2946f_row3_col1" class="data row3 col1" >Lower</td> <td id="T_2946f_row3_col2" class="data row3 col2" >1</td> <td id="T_2946f_row3_col3" class="data row3 col3" >#015482</td> <td id="T_2946f_row3_col4" class="data row3 col4" >4</td> </tr> <tr> <th id="T_2946f_level0_row4" class="row_heading level0 row4" >1</th> <td id="T_2946f_row4_col0" class="data row4 col0" >sands</td> <td id="T_2946f_row4_col1" class="data row4 col1" >Lower</td> <td id="T_2946f_row4_col2" class="data row4 col2" >2</td> <td id="T_2946f_row4_col3" class="data row4 col3" >#9f0052</td> <td id="T_2946f_row4_col4" class="data row4 col4" >5</td> </tr> <tr> <th id="T_2946f_level0_row5" class="row_heading level0 row5" >2</th> <td id="T_2946f_row5_col0" class="data row5 col0" >lowershale</td> <td id="T_2946f_row5_col1" class="data row5 col1" >Lower</td> <td id="T_2946f_row5_col2" class="data row5 col2" >3</td> <td id="T_2946f_row5_col3" class="data row5 col3" >#ffbe00</td> <td id="T_2946f_row5_col4" class="data row5 col4" >6</td> </tr> </tbody></table> Now we will add the points for the upper series. Note that there is only a single orientation included. It is not necessary to define an orientation for each surface, so long as there is an orientation in the series. ```python # Surface points uppershale_points = [(322, 135, -633), (635, 702, -641), (221, 668, -622), (732, 235, -651), (442, 454, -552)] sediments_points = [(322, 135, -433), (635, 702, -441), (221, 668, -422), (732, 235, -451), (442, 454, -352)] water_points = [(232, 153, -221), (653, 234, -216), (112, 872, -198), (532, 572, -223), (722, 884, -189), (632, 429, -201), (732, 348, -222)] # Add the points to our surfaces create_surface(geo_model, uppershale_points, 'uppershale') create_surface(geo_model, sediments_points, 'sediments') create_surface(geo_model, water_points, 'water') # Set an orientation geo_model.add_orientations(X=442., Y=495., Z=-502., surface='uppershale', pole_vector=(0.05, 0.05, 0.95)) ``` <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>X</th> <th>Y</th> <th>Z</th> <th>G_x</th> <th>G_y</th> <th>G_z</th> <th>smooth</th> <th>surface</th> </tr> </thead> <tbody> <tr> <th>2</th> <td>442.0</td> <td>495.0</td> <td>-502.0</td> <td>0.05</td> <td>0.05</td> <td>0.95</td> <td>0.01</td> <td>uppershale</td> </tr> <tr> <th>1</th> <td>495.0</td> <td>495.0</td> <td>-650.0</td> <td>0.00</td> <td>0.00</td> <td>1.00</td> <td>0.01</td> <td>co2</td> </tr> <tr> <th>0</th> <td>442.0</td> <td>495.0</td> <td>-752.0</td> <td>0.05</td> <td>0.05</td> <td>0.95</td> <td>0.01</td> <td>sands</td> </tr> </tbody> </table> ### Adding physical properties (Vp) Finally, we can add the p-wave velocities associated with each of these layers. Note that any parameter can be set in this manner (density, elastic parameters, attenuation, etc) if desired for more complex synthetics. This is one of the benefits of using GemPy to create these synthetics: models can be expanded upon with a huge suite of material properties. ```python geo_model.add_surface_values([1.5, 1.75, 2.5, 1.1, 2., 2.5], ['vp']) geo_model.surfaces ``` <style type="text/css" > #T_e6ae4_row0_col3{ background-color: #728f02; }#T_e6ae4_row1_col3{ background-color: #443988; }#T_e6ae4_row2_col3{ background-color: #ff3f20; }#T_e6ae4_row3_col3{ background-color: #015482; }#T_e6ae4_row4_col3{ background-color: #9f0052; }#T_e6ae4_row5_col3{ background-color: #ffbe00; }</style><table id="T_e6ae4_" ><thead> <tr> <th class="blank level0" ></th> <th class="col_heading level0 col0" >surface</th> <th class="col_heading level0 col1" >series</th> <th class="col_heading level0 col2" >order_surfaces</th> <th class="col_heading level0 col3" >color</th> <th class="col_heading level0 col4" >id</th> <th class="col_heading level0 col5" >vp</th> </tr></thead><tbody> <tr> <th id="T_e6ae4_level0_row0" class="row_heading level0 row0" >3</th> <td id="T_e6ae4_row0_col0" class="data row0 col0" >water</td> <td id="T_e6ae4_row0_col1" class="data row0 col1" >Upper</td> <td id="T_e6ae4_row0_col2" class="data row0 col2" >1</td> <td id="T_e6ae4_row0_col3" class="data row0 col3" >#728f02</td> <td id="T_e6ae4_row0_col4" class="data row0 col4" >1</td> <td id="T_e6ae4_row0_col5" class="data row0 col5" >1.500000</td> </tr> <tr> <th id="T_e6ae4_level0_row1" class="row_heading level0 row1" >4</th> <td id="T_e6ae4_row1_col0" class="data row1 col0" >sediments</td> <td id="T_e6ae4_row1_col1" class="data row1 col1" >Upper</td> <td id="T_e6ae4_row1_col2" class="data row1 col2" >2</td> <td id="T_e6ae4_row1_col3" class="data row1 col3" >#443988</td> <td id="T_e6ae4_row1_col4" class="data row1 col4" >2</td> <td id="T_e6ae4_row1_col5" class="data row1 col5" >1.750000</td> </tr> <tr> <th id="T_e6ae4_level0_row2" class="row_heading level0 row2" >5</th> <td id="T_e6ae4_row2_col0" class="data row2 col0" >uppershale</td> <td id="T_e6ae4_row2_col1" class="data row2 col1" >Upper</td> <td id="T_e6ae4_row2_col2" class="data row2 col2" >3</td> <td id="T_e6ae4_row2_col3" class="data row2 col3" >#ff3f20</td> <td id="T_e6ae4_row2_col4" class="data row2 col4" >3</td> <td id="T_e6ae4_row2_col5" class="data row2 col5" >2.500000</td> </tr> <tr> <th id="T_e6ae4_level0_row3" class="row_heading level0 row3" >0</th> <td id="T_e6ae4_row3_col0" class="data row3 col0" >co2</td> <td id="T_e6ae4_row3_col1" class="data row3 col1" >Lower</td> <td id="T_e6ae4_row3_col2" class="data row3 col2" >1</td> <td id="T_e6ae4_row3_col3" class="data row3 col3" >#015482</td> <td id="T_e6ae4_row3_col4" class="data row3 col4" >4</td> <td id="T_e6ae4_row3_col5" class="data row3 col5" >1.100000</td> </tr> <tr> <th id="T_e6ae4_level0_row4" class="row_heading level0 row4" >1</th> <td id="T_e6ae4_row4_col0" class="data row4 col0" >sands</td> <td id="T_e6ae4_row4_col1" class="data row4 col1" >Lower</td> <td id="T_e6ae4_row4_col2" class="data row4 col2" >2</td> <td id="T_e6ae4_row4_col3" class="data row4 col3" >#9f0052</td> <td id="T_e6ae4_row4_col4" class="data row4 col4" >5</td> <td id="T_e6ae4_row4_col5" class="data row4 col5" >2.000000</td> </tr> <tr> <th id="T_e6ae4_level0_row5" class="row_heading level0 row5" >2</th> <td id="T_e6ae4_row5_col0" class="data row5 col0" >lowershale</td> <td id="T_e6ae4_row5_col1" class="data row5 col1" >Lower</td> <td id="T_e6ae4_row5_col2" class="data row5 col2" >3</td> <td id="T_e6ae4_row5_col3" class="data row5 col3" >#ffbe00</td> <td id="T_e6ae4_row5_col4" class="data row5 col4" >6</td> <td id="T_e6ae4_row5_col5" class="data row5 col5" >2.500000</td> </tr> </tbody></table> ### Visualising the GemPy model Now we can visualise our model, plotting data points and orientations. Firstly, the model must be computed to interpolate the surfaces and any associated scalar fields. Then we can plot our surfaces and the associated units. ```python # Set up plotter p3d = gp.plot_3d(geo_model, notebook=True) # Plot data points and orientations p3d.plot_data() # Compute the model. Note that a solution is returned. We will use this later sol = gp.compute_model(geo_model) # Plot the surfaces p3d.plot_surfaces() # Plot the lithological units p3d.plot_structured_grid('lith') ``` ## Bridging the gap from GemPy to Devito: As you may have noticed, when we compute our GemPy model, a `Solution` object is returned. From this, we can extract the rasterized values attached to each of our geological units. With this in mind, we can print the solution values: ```python sol.values_matrix ``` array([[2.5 , 2.44720385, 2. , ..., 1.5 , 1.5 , 1.5 ]]) You will notice that these values correspond with the p-wave velocities we specified. However, they are in the form of 1D vector. Consequently, will need to reshape this array to fit into the `vp` parameter of a Devito `Model`. This can be done with further parameters such as density or shear wave velocity for more complex models. In this case, you would want to set up a Devito `Function` to contain each parameter. Note that in this case, we need to select c-like index order to get the axes in the correct order. ```python # Reshaping our data to the shape required by Devito reshaped = np.reshape(sol.values_matrix, shape, order='C') reshaped.shape ``` (101, 101, 101) ### Quality checking Now let us plot a slice through this model for quality checking purposes. Note that we need to bear Devito's `[x, y, z]` indexing convention in mind. ```python import matplotlib.pyplot as plt # Take the center slice in the x direction # Remember that in Devito, indexing convention is [x, y, z] (need to flip for correct imshow display) plt.imshow(reshaped[50].T, cmap='viridis', origin='lower') plt.colorbar() plt.show() ``` The model looks good, and we can see the geometry of the CO2 lens clearly. We are now ready to begin our seismic modelling with Devito. ## Break Time! Time to go and get a coffee. ## Seismic modelling with Devito We can now start building our Devito model. The following draws heavily from the Devito `examples/sesimic/tutorials/01_modelling.ipynb` notebook (see [here](https://github.com/devitocodes/devito/blob/master/examples/seismic/tutorials/01_modelling.ipynb)). We will begin, as always with some imports. If Devito is not installed, we will need to install it. We will also be drawing on some convenient helper functions for "toy" seismic models from `examples.seismic`. Whilst these are not part of core Devito and probably shouldn't be used in practical implementations, they are helpful for the purposes of learning the workflow. ```python try: # Import devito import devito as dv from examples.seismic import Model except ModuleNotFoundError: # Install newest SymPy version ! pip install -Iv sympy==1.8 # Install devito ! pip install devito # Import devito import devito as dv from examples.seismic import Model ``` ### Modelling workflow The core process we are aiming to model is a seismic survey, which consists of two main components: * **Source** - A source is positioned at a single or a few physical locations where artificial pressure is injected into the domain we want to model. In the case of land survey, it is usually dynamite, or a vibroseis (a truck fitted with a vibrating plate generating continuous sound waves). For a marine survey, the source is an air gun sending a bubble of compressed air into the water that will expand and generate a seismic wave. * **Receivers** - A set of geophones or hydrophones are used to measure the resulting wave and create a set of measurements called a "Shot Record" or "Shot Gather". These measurements are recorded across an array, usually located near the surface, although they may be a depth in the case of ocean-bottom seismometers (OBS) or downhole receivers. In order to create a numerical model of a seismic survey, we need to solve a wave equation and implement source and receiver interpolation to inject the source and record the seismic wave at sparse point locations in the grid. [Source](https://upload.wikimedia.org/wikipedia/commons/0/01/Diagram_of_a_marine_seismic_survey.png) A diagram of a marine seismic survey. ### The acoustic wave equation The acoustic wave equation for the square slowness $m$, defined as $m=\frac{1}{c^2}$, where $c$ is the speed of sound in the given physical media, and a source $q$ is given by: \begin{cases} &m \frac{d^2 u(x,t)}{dt^2} - \nabla^2 u(x,t) = q \ \text{in } \Omega \\ &u(.,t=0) = 0 \\ &\frac{d u(x,t)}{dt}|_{t=0} = 0 \end{cases} with the zero initial conditions to guarantee unicity of the solution. The boundary conditions are Dirichlet conditions: \begin{equation} u(x,t)|_\delta\Omega = 0 \end{equation} where $\delta\Omega$ is the surface of the boundary of the model $\Omega$. ### Finite domains The last piece of the puzzle is the computational limitation. In the field, the seismic wave propagates in every direction to an "infinite" distance. However, solving the wave equation in a mathematically/discrete infinite domain is not feasible. In order to compensate, Absorbing Boundary Conditions (ABC) or Perfectly Matched Layers (PML) are required to mimic an infinite domain. These two methods allow to approximate an infinite media by damping and absorbing the waves at the limit of the domain to avoid reflections. The simplest of these methods is the absorbing boundary region. The core idea is to extend the physical domain and to add an absorbing region in this extension that will absorb the incident waves. The acoustic wave equation with this damping region can be rewritten as: \begin{cases} &m \frac{\partial^2 u(\textbf{x},t)}{\partial t^2} - \nabla^2 u(\textbf{x},t) + \eta \frac{\partial u(\textbf{x},t)}{\partial t}=q \ \text{in } \Omega \\ &u(.,0) = 0 \\ &\frac{\partial u(\textbf{x},t)}{\partial t}|_{t=0} = 0 \end{cases} where $\eta$ is the damping coefficient equal to $0$ inside the physical domain and increasing inside the absorbing region. Multiple choices of profile can be chosen for $\eta$ from linear to exponential. ### Defining the physical problem The first step is to define the physical model: * What are the physical dimensions of interest? * What is the velocity profile of this physical domain? We will use the velocity velocity model which we have just created using GemPy. As such we need to create a physical domain of the same size. As mentioned earlier, Devito and GemPy have slightly different grid implementations, which we will need to bear in mind to ensure our Devito model maps correctly to the GemPy grid. We can now construct a Devito `Model`. This is a convenience object encapsulating the necessary parameters and components of an acoustic wave model, including additional damping layers around the perimeter (specified by `bcs="damp"`). For implementing custom damping setups, see `examples/userapi/04_boundary_conditions.ipynb` in the Devito repository. Note that we are using a relatively large number of damping layers here. This is to avoid our gathers becoming too messy, and ensure that reflections from horizons can be straightforwardly identified in the gathers. These damping layers are added to the edge of the specified model, increasing its extent (this is important for later visualisation). ```python seis_model = Model(vp=reshaped, origin=(0., 0., -1000.), spacing=(10., 10., 10.), shape=shape, nbl=30, space_order=4, bcs="damp") ``` Operator `initdamp` run in 0.05 s Operator `pad_vp` run in 0.01 s ### Acquisition geometry To fully define our problem setup we also need to define the source that injects the wave to model and the set of receiver locations at which to sample the wavefield. The source time signature will be modelled using a Ricker wavelet defined as \begin{equation} q(t) = (1-2\pi^2 f_0^2 (t - \frac{1}{f_0})^2 )e^{- \pi^2 f_0^2 (t - \frac{1}{f_0})} \end{equation} To fully define the source signature we first need to define the time duration for our model and the timestep size, which is dictated by the CFL condition and our grid spacing. Luckily, our `Model` utility provides us with the critical timestep size, so we can fully discretize our model time axis as an array: ```python from examples.seismic import TimeAxis t0 = 0. # Simulation starts at t=0 tn = 1000. # Simulation last 1 second (1000 ms) dt = seis_model.critical_dt # Time step from model grid spacing time_range = TimeAxis(start=t0, stop=tn, step=dt) ``` We will position our source at a depth of 20m, center it in all other axes, and set the peak wavelet frequency to 15Hz. ```python from examples.seismic import RickerSource f0 = 0.015 # Source peak frequency is 15Hz (0.015 kHz) src = RickerSource(name='src', grid=seis_model.grid, f0=f0, npoint=1, time_range=time_range) # First, position source centrally in all dimensions, then set depth src.coordinates.data[:] = np.array(seis_model.domain_size) * .5 src.coordinates.data[0, -1] = -20 # Depth is 20m # We can plot the time signature to see the wavelet src.show() ``` Similarly to our source object, we can now define our receiver geometry as a symbol of type `Receiver`. It is worth noting here that both utility classes, `RickerSource` and `Receiver` are thin wrappers around the Devito's `SparseTimeFunction` type, which encapsulates sparse point data and allows us to inject and interpolate values into and out of the computational grid. As we have already seen, both types provide a `.coordinates` property to define the position within the domain of all points encapsulated by that symbol. In this example we will position receivers at the same depth as the source, every $10m$ along the x axis, on the midline of the y axis. The `rec.data` property will be initialized, but left empty, as we will compute the receiver readings during the simulation. ```python from examples.seismic import Receiver # Create symbol for 101 receivers rec = Receiver(name='rec', grid=seis_model.grid, npoint=101, time_range=time_range) # Prescribe even spacing for receivers along the x-axis rec.coordinates.data[:, 0] = np.linspace(0, seis_model.domain_size[0], num=101) rec.coordinates.data[:, 1] = 0.5*seis_model.domain_size[1] rec.coordinates.data[:, -1] = -20. # Depth is 20m ``` ### Wave equation and specification As mentioned previously, we will be using the 2nd order acoustic wave equation in this tutorial. One of the key features of Devito is the ability to specify an equation, or system of equation, in a symbolic manner, and have this automatically discretized using the finite difference method. In Devito, variables which vary in space only are represented using `Function` objects. If we also want them to vary over time, we must use a `TimeFunction`. There are further `Function`s for vector and tensor symbols: useful for concisely specifying the elastic wave equation for example. Each `Function` has a `.data` attribute, which contains the discretized values of the field. The FD discretization is specified using the `time_order` and `space_order` keyword arguments. In this case we will use a discretization with 2nd order accuracy in time, and 4th order accuracy in space. We will create a function $u(t, x, y, z)$ to contain our pressure field. To do this, we need to provide a name (used when printing the function, or symbolic expressions which contain it), and a Devito `Grid` to which it should be attached (in this case, the grid included in the `Model` convenience object). Additional information about the discretization including time order, space order, and staggering can also be supplied. ```python # Define the wavefield with the size of the model u = dv.TimeFunction(name="u", grid=seis_model.grid, time_order=2, space_order=4) u ``` $\displaystyle u{\left(t,x,y,z \right)}$ We can also view the data attached to this `Function` as below. When the object is created, this is initialised to zero. ```python u.data.shape ``` (3, 161, 161, 161) There are also various convenient aliases contained within the `Function` object. ```python u.shape == u.data.shape ``` True Since we set `time_order=2` when creating the function, Devito has allocated three buffers to represent `u(t-1, x, y, z)`, `u(t, x, y, z)`, and `u(t+1, x, y, z)`, hence the size of the time dimension being 3. A larger buffer can be specified when creating the `TimeFunction` allowing all timesteps to be saved, but we have not done so in this case. The `data` attribute is a Numpy array, and can be interacted with like any other. ```python isinstance(u.data, np.ndarray) ``` True To take the first of a variable `u` with respect to a dimension `x`, you can simply write `u.dx`, or `u.dx2` for the second derivative. For other dimensions (including time), simply swap out the `x`. ```python # Show the derivative stencil u.dx2.evaluate ``` $\displaystyle - \frac{2.5 u{\left(t,x,y,z \right)}}{h_{x}^{2}} - \frac{0.0833333333 u{\left(t,x - 2 h_{x},y,z \right)}}{h_{x}^{2}} + \frac{1.33333333 u{\left(t,x - h_{x},y,z \right)}}{h_{x}^{2}} + \frac{1.33333333 u{\left(t,x + h_{x},y,z \right)}}{h_{x}^{2}} - \frac{0.0833333333 u{\left(t,x + 2 h_{x},y,z \right)}}{h_{x}^{2}}$ ### A detour into Finite Differences This next section is included for completeness, explaining the derivation of finite difference stencils for given derivatives, and explicit timestepping schemes. Whilst an overview of the standard Taylor-series-based stencils is given, Devito also supports the specification of custom stencil coefficients necessary for various dispersion-relation preserving schemes. A tutorial for this can be found [here](https://github.com/devitocodes/devito/blob/master/examples/seismic/tutorials/07_DRP_schemes.ipynb). Note that there is also support for spatially-variant coefficients, either based on subdomains or over the full domain. To understand where this stencil comes from, lets have a quick overview of finite differences. Consider a function $f(x,t)$. Recall that the Taylor series of $f(x,t)$ in the spatial dimension takes the form \begin{equation*} f(x+h,t)=f(x,t)+\frac{\partial f}{\partial x}h+\frac{1}{2}\frac{\partial^2 f}{\partial x^2}h^2+\frac{1}{3!}\frac{\partial^3 f}{\partial x^3}h^3+\frac{1}{4!}\frac{\partial^4 f}{\partial x^4}h^4+\ldots. \end{equation*} We can re-arrange the above expansion in the form \begin{equation*} \frac{\partial f}{\partial x}=\frac{f(x+h,t)-f(x,t)}{h}-\frac{1}{h}\sum_{n=2}^{\infty}\frac{1}{n!}\frac{\partial^n f}{\partial x^n}h^n. \end{equation*} Thus, provided $h$ is small we can say \begin{equation*} \frac{\partial f}{\partial x}\approx\frac{f(x+h,t)-f(x,t)}{h}, \end{equation*} which will have an associated error \begin{equation*} -\frac{1}{h}\sum_{n=2}^{\infty}\frac{1}{n!}\frac{\partial^n f}{\partial x^n}h^n \end{equation*} In comparison to our result (again, provided $h$ is small) the associated error will therefore be proportional to $h^2$ (if $h$ is small, $h^2$ will be very small and $h^3$ will be tiny). It is common to write this as \begin{equation*} \frac{\partial f}{\partial x}=\frac{f(x+h,t)-f(x,t)+\mathcal{O}(h^2)}{h}. \end{equation*} This is the well known *forward difference* approximation and is how spatial derivatives are approximated in 'space order' 1 finite difference schemes. We can also write the following Taylor expansion \begin{equation*} f(x-h,t)=f(x,t)-\frac{\partial f}{\partial x}h+\frac{1}{2}\frac{\partial^2 f}{\partial x^2}h^2-\frac{1}{3!}\frac{\partial^3 f}{\partial x^3}h^3+\frac{1}{4!}\frac{\partial^4 f}{\partial x^4}h^4+\ldots. \end{equation*} This leads to the backward difference approximation which is also 'first order accurate' (i.e. the error is proportional to $h^2$) \begin{equation*} \frac{\partial f}{\partial x}\approx\frac{f(x,t)-f(x-h,t)}{h}. \end{equation*} Note now that if we look at $f(x+h)-f(x-h)$ and re-arrange we arrive at \begin{equation*} \frac{\partial f}{\partial x}=\frac{f(x+h,t)-f(x-h,t)+\mathcal{O}(h^3)}{2h}, \end{equation*} which is known as the centered difference approximation. Notice that now the $h^2$ terms cancel and the error is 'pushed' back to be of order $h^3$. That is, this approximation is (in theory) more accurate. We can continue this process by considering more and more expansions and solving the resulting set of 'simultaneous equations' for $\partial f/\partial x$ to create higher and higher order approximations. For example, if we consider the expansions $f(x+2h,t)$ and $f(x-2h,t)$ we arrive at a 'fourth order' accurate scheme of the form \begin{equation*} \frac{\partial f}{\partial x}=\frac{\frac{1}{12}f(x-2h,t)-\frac{2}{3}f(x-h,t)+\frac{2}{3}f(x+h,t)-\frac{1}{12}f(x+2h,t)+\mathcal{O}(h^5)}{h}. \end{equation*} Notice that as we increase the accuracy of our approximation the **stencil** becomes larger (and hence computations with higher order stencils become more demanding). Through this process of manipulating Taylor series we can also derive approximations for other derivatives. For example, summing the series expansions $f(x+h,t)+f(x-h,t)$ and re-ordering we arrive at \begin{equation*} \frac{\partial^2 f}{\partial x^2}=\frac{f(x-h,t)-2f(x,t)+f(x+h,t)+\mathcal{O}(h^3)}{h^2}. \end{equation*} Further, we can do the same thing for time derivatives (or other spatial dimensions in higher dimensional problems) and arrive at \begin{equation*} \frac{\partial f}{\partial t}\approx\frac{f(x,t+\delta t)-f(x,t)}{\delta t}, \end{equation*} where $\delta t$ is some small increment in time. This then allows us to create 'time-stepping schemes'. For example, consider the partial differential equation \begin{equation*} \frac{\partial u}{\partial t}+c\frac{\partial u}{\partial x}=0, \end{equation*} discretizing only the temporal term for the time being we have \begin{equation*} \frac{f(x,t+\delta t)-f(x,t)}{\delta t}+c\frac{\partial u}{\partial x}=0, \end{equation*} which we can re-arrange as \begin{equation*} f(x,t+\delta t)=f(x,t)-\delta tc\frac{\partial u}{\partial x}. \end{equation*} which is one of the most basic *explicit* time stepping schemes. Now if we approximated our spatial derivative with a forward difference approximation our scheme will become \begin{equation*} f(x,t+\delta t)=f(x,t)-\delta tc\frac{f(x+h,t)-f(x,t)}{h}. \end{equation*} Then, provided we know the status of our function $f$ at $t=0$ we can use the above scheme to compute the evolution of $f$ forward in time. ### Setting up the equation With this out of the way, we can define our partial differential equation, essentially as we would write it on paper: ```python # We can now write the PDE pde = seis_model.m * u.dt2 - u.laplace + seis_model.damp * u.dt # The PDE representation is as on paper pde ``` $\displaystyle \operatorname{damp}{\left(x,y,z \right)} \frac{\partial}{\partial t} u{\left(t,x,y,z \right)} - \frac{\partial^{2}}{\partial x^{2}} u{\left(t,x,y,z \right)} - \frac{\partial^{2}}{\partial y^{2}} u{\left(t,x,y,z \right)} - \frac{\partial^{2}}{\partial z^{2}} u{\left(t,x,y,z \right)} + \frac{\frac{\partial^{2}}{\partial t^{2}} u{\left(t,x,y,z \right)}}{\operatorname{vp}^{2}{\left(x,y,z \right)}}$ Now we can create our update stencil to get the solution at the forward timestep. The value of `u` at the forward timestep can be accessed using `u.forward`. ```python u.forward ``` $\displaystyle u{\left(t + dt,x,y,z \right)}$ Similarly, there is a `u.backward` for the backward timestep. ```python u.backward ``` $\displaystyle u{\left(t - dt,x,y,z \right)}$ Additionally, derivatives can be taken at these positions using `u.forward.dx`. ```python u.forward.dx ``` $\displaystyle \frac{\partial}{\partial x} u{\left(t + dt,x,y,z \right)}$ With this, we can solve our discretized PDE for u at the forward timestep, and set this equal to `u.forward`. This creates our update stencil. ```python # This discrete PDE can be solved in a time-marching way updating u(t+dt) from the previous time step # Devito as a shortcut for u(t+dt) which is u.forward. We can then rewrite the PDE as # a time marching updating equation known as a stencil using customized SymPy functions stencil = dv.Eq(u.forward, dv.solve(pde, u.forward)) stencil ``` $\displaystyle u{\left(t + dt,x,y,z \right)} = \frac{- \frac{- \frac{2.0 u{\left(t,x,y,z \right)}}{dt^{2}} + \frac{u{\left(t - dt,x,y,z \right)}}{dt^{2}}}{\operatorname{vp}^{2}{\left(x,y,z \right)}} - \frac{2.5 u{\left(t,x,y,z \right)}}{h_{z}^{2}} - \frac{0.0833333333 u{\left(t,x,y,z - 2 h_{z} \right)}}{h_{z}^{2}} + \frac{1.33333333 u{\left(t,x,y,z - h_{z} \right)}}{h_{z}^{2}} + \frac{1.33333333 u{\left(t,x,y,z + h_{z} \right)}}{h_{z}^{2}} - \frac{0.0833333333 u{\left(t,x,y,z + 2 h_{z} \right)}}{h_{z}^{2}} - \frac{2.5 u{\left(t,x,y,z \right)}}{h_{y}^{2}} - \frac{0.0833333333 u{\left(t,x,y - 2 h_{y},z \right)}}{h_{y}^{2}} + \frac{1.33333333 u{\left(t,x,y - h_{y},z \right)}}{h_{y}^{2}} + \frac{1.33333333 u{\left(t,x,y + h_{y},z \right)}}{h_{y}^{2}} - \frac{0.0833333333 u{\left(t,x,y + 2 h_{y},z \right)}}{h_{y}^{2}} - \frac{2.5 u{\left(t,x,y,z \right)}}{h_{x}^{2}} - \frac{0.0833333333 u{\left(t,x - 2 h_{x},y,z \right)}}{h_{x}^{2}} + \frac{1.33333333 u{\left(t,x - h_{x},y,z \right)}}{h_{x}^{2}} + \frac{1.33333333 u{\left(t,x + h_{x},y,z \right)}}{h_{x}^{2}} - \frac{0.0833333333 u{\left(t,x + 2 h_{x},y,z \right)}}{h_{x}^{2}} + \frac{\operatorname{damp}{\left(x,y,z \right)} u{\left(t,x,y,z \right)}}{dt}}{\frac{\operatorname{damp}{\left(x,y,z \right)}}{dt} + \frac{1}{dt^{2} \operatorname{vp}^{2}{\left(x,y,z \right)}}}$ ## Break time! (Again) Maybe have a biscuit this time? ### Source injection and receiver interpolation With a numerical scheme to solve the homogenous wave equation, we need to add the source to introduce seismic waves and to implement the measurement operator, and interpolation operator. This operation is linked to the discrete scheme and needs to be done at the proper time step. With this in mind, we can set up the source and reciever terms to include in our `Operator`. ```python # Finally we define the source injection and receiver read function to generate the corresponding code src_term = src.inject(field=u.forward, expr=src * dt**2 / seis_model.m) # Create interpolation expression for receivers rec_term = rec.interpolate(expr=u.forward) ``` ### Devito operator and solve (where the magic happens) After constructing all the necessary expressions for updating the wavefield, injecting the source term and interpolating onto the receiver points, we can now create the Devito operator that will generate the C code at runtime. When creating the operator, Devito performs multiple optimizations to reduce flop count and ensure memory locality. Devito can generate code to run on CPUs, GPUs, and clusters thereof, and makes use of several layers of parallelism. **Note**: The argument `subs=model.spacing_map` causes the operator to substitute values for our current grid spacing into the expressions before code generation. This reduces the number of floating point operations executed by the kernel by pre-evaluating certain coefficients. ```python op = dv.Operator([stencil] + src_term + rec_term, subs=seis_model.spacing_map) ``` Now we can execute the operator for a number of timesteps. We specify the number of timesteps to compute with the keyword `time` and the timestep size with `dt`. ```python op(time=time_range.num-1, dt=seis_model.critical_dt) ``` Operator `Kernel` run in 7.76 s PerformanceSummary([(PerfKey(name='section0', rank=None), PerfEntry(time=7.7405060000000105, gflopss=0.0, gpointss=0.0, oi=0.0, ops=0, itershapes=[])), (PerfKey(name='section1', rank=None), PerfEntry(time=0.0009480000000000107, gflopss=0.0, gpointss=0.0, oi=0.0, ops=0, itershapes=[])), (PerfKey(name='section2', rank=None), PerfEntry(time=0.01052299999999995, gflopss=0.0, gpointss=0.0, oi=0.0, ops=0, itershapes=[]))]) ### Plotting the synthetic shot record We can now plot our shot record using everyone's favourite colourmap. We can clearly see the reflected arrivals from the seabed, top shale, and top CO2 (look for the reverse polarity arrival). We can also distinguish the base of the CO2 lens and the interface between the reservoir sandstone and the underlying shale. ```python plt.imshow(rec.data, cmap='viridis', aspect='auto', vmax=0.01, vmin=-0.01, extent=(0, 100, tn, t0)) plt.xlabel("Reciever number") plt.ylabel("Time (ms)") plt.colorbar() plt.show() ``` ## Visualisation with PyVista: In the final section of this tutorial, we will cover visualisation of the wavefield using PyVista: a dependency of GemPy. We can use its plotting and manipulation capabilities to visualise various slices of our wavefield. Firstly, we need to import PyVista ```python import pyvista as pv ``` With this done, we will want to take the 'data' attribute of 'u', containing the values of the field. This is a numpy array, indexed `[t, x, y, z]`, corresponding to the dimensions of `u`. As we want to plot the current (final) timestep, we will select time index 1. ```python # Trim down the data from u to remove damping field trimmed_data = u.data[1, 30:-30, 30:-30, 30:-30] ``` Now we will create a `UniformGrid`. This creates a simple uniform grid from a 3D NumPy array of values. ```python # Create the spatial reference grid = pv.UniformGrid() # Set the grid dimensions: shape + 1 because we want to inject our values on # the CELL data grid.dimensions = np.array(trimmed_data.shape) + 1 # Edit the spatial reference grid.origin = (0., 0., -1000.) # The bottom left corner of the data set grid.spacing = (10, 10, 10) # These are the cell sizes along each axis ``` We can now fill the grid cells with our wavefield data: ```python # Add the data values to the cell data grid.cell_arrays["values"] = trimmed_data.flatten(order="F") # Flatten the array! ``` And plot some orthogonal slices of this grid to visualise its internal structure: ```python orth_slices = grid.slice_orthogonal(x=200, y=200, z=-500) orth_slices.plot(cmap='seismic', clim=[-0.01, 0.01], notebook=True, window_size=(600,400)) ``` An alternative visualisation is to take a series of slices along an axis. By setting each slice to be slightly transparent, it is possible to show a fairly complete image of the 3D structure without the need for in interactive plot. ```python y_slices = grid.slice_along_axis(n=5, axis="y") p = pv.Plotter(notebook=True, window_size=(600,400)) p.add_mesh(grid.outline(), color="k") p.add_mesh(y_slices, cmap='seismic', clim=[-0.01, 0.01], opacity=0.8) p.show() ``` ## Further reading and exercise: There is a further reading notebook `further_reading_fwi.ipynb` showing how to set up a toy full-waveform inversion problem using a slice of the velocity model we created in this tutorial for some inspiration on possible applications. As an exercise, try adding an additional density parameter to the GemPy model, and use this to create a 1st-order (staggered) acoustic model with Devito. Some tips: * Look to `examples/seismic/tutorials/05_staggered_acoustic.ipynb` for a 1st-order acoustic example with constant physical parameters. * Reshaping the data for use with Devito may be a little more involved. Make sure you separate the density and velocity data before applying the reshape. ## Thank you for listening! The ecosystem of open-source geoscience software continues to grow, with high-quality packages like GemPy providing accessable, versatile tools to tackle all manner of scientific problems. In combination, we can use these to build new resources for the geoscience community, and ensure that they are adaptable and widely available. Devito is used commercially, and has been deployed at scale, allowing straightforward scaling between small examples and production-scale codes. We look forward to seeing where you will take this: it would be great to see a 100% open-source, community-created Swung benchmark synthetic with Devito RTM/FWI examples by the time the next Transform rolls around. We envision generating synthetics for machine learning as one potential application, but are excited to see what applications the community will build from this tutorial. I hope this tutorial has been useful. I will leave you with some helpful resources, and once again, feel free to ask on the Software Underground Slack or our own active Slack if you have any questions! * [Devito website](https://www.devitoproject.org/) * [Devito slack](https://devitocodes.slack.com/) * [GemPy website](https://www.gempy.org/)
import coprod.pre variables {ι : Type*} (M : ι → Type*) {G : ι → Type*} {N : Type*} variables [Π i, monoid (M i)] [Π i, group (G i)] [monoid N] def coprod : Type* := {l : list (Σ i, M i) // coprod.pre.reduced l} namespace coprod open coprod.pre list variables {M} [decidable_eq ι] [Π i : ι, decidable_eq (M i)] [Π i, decidable_eq (G i)] instance : has_one (coprod M) := ⟨⟨[], trivial, by simp⟩⟩ instance : has_mul (coprod M) := ⟨λ a b, ⟨pre.mul a.1 b.1, pre.reduced_mul a.2 b.2⟩⟩ instance : monoid (coprod M) := { mul := (*), one := 1, mul_assoc := λ a b c, subtype.eq $ pre.mul_assoc a.2 b.2 c.2, one_mul := λ _, subtype.eq (pre.one_mul _), mul_one := λ a, subtype.eq (pre.mul_one a.2) } instance : has_inv (coprod G) := ⟨λ a, ⟨pre.inv a.1, reduced_inv _ a.2⟩⟩ instance : group (coprod G) := { mul := (*), inv := has_inv.inv, one := 1, mul_left_inv := λ a, subtype.eq (pre.mul_left_inv _), ..coprod.monoid } def of (i : ι) : M i →* coprod M := { to_fun := λ a, ⟨of i a, reduced_of _ _⟩, map_one' := subtype.eq $ of_one _, map_mul' := λ a b, subtype.eq $ of_mul _ _ _ } @[simp] lemma cons_eq_of_mul {l : list (Σ i, M i)} (i : Σ i , M i) (h : reduced (i :: l)) : @eq (coprod M) ⟨i :: l, h⟩ (of i.1 i.2 * ⟨l, reduced_of_reduced_cons h⟩) := begin unfold has_mul.mul, cases i with i a, have ha : a ≠ 1, from h.2 _ (mem_cons_self _ _), have hi' : reduced [⟨i, a⟩], from reduced_singleton ha, simp [pre.mul, of, mul_aux, pre.of, ha, mul_aux_eq_reduce_append hi' (reduced_of_reduced_cons h), reduce_eq_self_of_reduced h], end @[simp] lemma nil_eq_one : @eq (coprod M) ⟨[], reduced_nil⟩ 1 := rfl @[simp] lemma append_eq_mul {l₁ l₂ : list (Σ i, M i)} (h : reduced (l₁ ++ l₂)) : @eq (coprod M) ⟨l₁ ++ l₂, h⟩ (⟨l₁, reduced_of_reduced_append_left h⟩ * ⟨l₂, reduced_of_reduced_append_right h⟩) := begin induction l₁ with i l₂ ih, { simp }, { rw [cons_append] at h, simp [mul_assoc, ih (reduced_of_reduced_cons h)] } end @[simp] lemma eta (w : coprod M) : (⟨w.1, w.2⟩ : coprod M) = w := subtype.eta _ _ instance : decidable_eq (coprod M) := subtype.decidable_eq def lift (f : Π i, M i →* N) : coprod M →* N := { to_fun := λ a, lift f a.1, map_one' := rfl, map_mul' := λ _ _, lift_mul _ _ _ } @[simp] lemma lift_of (f : Π i, M i →* N) (i : ι) (a : M i) : lift f (of i a) = f i a := lift_of _ _ _ @[simp] lemma lift_comp_of (f : Π i, M i →* N) (i : ι) : (lift f).comp (of i) = f i := monoid_hom.ext (by simp) def rec_on_aux {C : coprod M → Sort*} : Π (l : list (Σ i, M i)) (hl : reduced l) (h1 : C 1) (hof : Π i (a : M i), C (of i a)) (ih : Π (i : ι) (a : M i) (b : coprod M), C (of i a) → C b → C (of i a * b)), C ⟨l, hl⟩ | [] hl h1 hof ih := h1 | (⟨i,a⟩::l) hl h1 hof ih := begin rw [cons_eq_of_mul], exact ih _ _ _ (by convert hof i a; simp [pre.of, hl.2 _ (mem_cons_self _ _)]) (rec_on_aux _ _ h1 hof ih) end @[elab_as_eliminator] def rec_on {C : coprod M → Sort*} (a : coprod M) (h1 : C 1) (hof : Π i (a : M i), C (of i a)) (ih : Π (i : ι) (a : M i) (b : coprod M), C (of i a) → C b → C (of i a * b)) : C a := by cases a with i a; exact rec_on_aux i a h1 hof ih lemma hom_ext {f g : coprod M →* N} (h : ∀ i, f.comp (of i) = g.comp (of i)) : f = g := begin ext g, refine coprod.rec_on g _ _ _, { simp }, { intros i a, simpa using monoid_hom.ext_iff.1 (h i) a }, { simp {contextual := tt} } end lemma of_mul_cons (i j : ι) (a : M i) (b : M j) (l : list (Σ i, M i)) (h : reduced (⟨j, b⟩ :: l)) : of i a * ⟨⟨j, b⟩ :: l, h⟩ = if ha1 : a = 1 then ⟨⟨j, b⟩ :: l, h⟩ else if hij : i = j then if hab : a * cast (congr_arg M hij).symm b = 1 then ⟨l, reduced_of_reduced_cons h⟩ else ⟨⟨i, a * cast (congr_arg M hij).symm b⟩ :: l, reduced_cons_of_reduced_cons (show reduced (⟨i, cast (congr_arg M hij).symm b⟩ :: l), by subst hij; simpa) hab⟩ else ⟨⟨i, a⟩ :: ⟨j, b⟩ :: l, reduced_cons_cons hij ha1 h⟩ := show (show coprod M, from ⟨pre.mul_aux _ _, _⟩) = _, begin simp [of, pre.of], split_ifs; simp [mul_aux, of, pre.of]; split_ifs; simp [reverse_core_eq]; split_ifs; simp [mul_assoc, of, pre.of] end def to_list : coprod M → list (Σ i, M i) := subtype.val @[simp] lemma to_list_one : (1 : coprod M).to_list = [] := rfl lemma to_list_of (i : ι) (a : M i) : (of i a).to_list = if a = 1 then [] else [⟨i, a⟩] := rfl @[simp] lemma to_list_mk (l : list (Σ i, M i)) (hl : reduced l) : @to_list _ M _ _ _ ⟨l, hl⟩ = l := rfl end coprod
## Multi-Armed Bandits Suppose you have $N$ slot machines (a multi-armed bandit) with each bandit having an unkwown probability of distributing a prize. Assuming the prizes are the same for each bandit and only their probabilities of winning are different, our goal is to develop a strategy to maximize our winnings with fewest trials. If we knew which of the bandits results in the highest probability of winning, we would exercise that bandit all the time. However, a sub-optimal bandit can generate a winning sequence purely by chance and even if we found a good enough bandit, do we keep using it or explore in the hope of finding an even higher probability of winning. A Bayesian solution to the multi-armed bandit problem involves updating our prior belief on the probability of success for each bandit based on the observed draws. A beta-binomial model provides a conjugate likelihood and prior with closed form posterior updates. Thus, we can assume an uninformative $Beta(1,1)$ prior for each of the $N$ bandits. Thus, our bayesian bandits algorithm can be summarized as follows: 1. Draw a sample $X_b$ from the prior of bandit $b$ for all $b$ 2. Select the bandit with the largest sample: $B = \arg \max X_b$ 3. Observe the result of pulling banding $B$ and update your prior on bandit $B$ 4. Repeat This algorithm suggests that we shouldn't discard low success probability bandits but instead that we should pick them at a decreasing rate as we gather more confidence that there exist better bandits. ``` %matplotlib inline import numpy as np import pandas as pd import scipy.stats as stats import seaborn as sns import matplotlib.pyplot as plt ``` ``` class Bandits(object): """ This class represents N bandits machines. parameters: p_array: a (n,) Numpy array of probabilities >0, <1. methods: pull( i ): return the results, 0 or 1, of pulling the ith bandit. """ def __init__(self, p_array): self.p = p_array self.optimal = np.argmax(p_array) def pull(self, i): #i is which arm to pull return np.random.rand() < self.p[i] def __len__(self): return len(self.p) ``` ``` class BayesianStrategy(object): """ Implements a online, learning strategy to solve the Multi-Armed Bandit problem. parameters: bandits: a Bandit class with .pull method methods: sample_bandits(n): sample and train on n pulls. attributes: N: the cumulative number of samples choices: the historical choices as a (N,) array bb_score: the historical score as a (N,) array """ def __init__(self, bandits): self.bandits = bandits n_bandits = len(self.bandits) self.wins = np.zeros(n_bandits) self.trials = np.zeros(n_bandits) self.N = 0 self.choices = [] self.bb_score = [] def sample_bandits(self, n=1): bb_score = np.zeros(n) choices = np.zeros(n) for k in range(n): #sample from the bandits's priors, and select the largest sample choice = np.argmax(np.random.beta(1 + self.wins, 1 + self.trials - self.wins)) #sample the chosen bandit result = self.bandits.pull(choice) #update priors and score self.wins[choice] += result self.trials[choice] += 1 bb_score[k] = result self.N += 1 choices[k] = choice self.bb_score = np.r_[self.bb_score, bb_score] self.choices = np.r_[self.choices, choices] return ``` ``` def plot_priors(bayesian_strategy, prob, lw = 3, alpha = 0.2, plt_vlines = True): ## plotting function wins = bayesian_strategy.wins trials = bayesian_strategy.trials for i in range(prob.shape[0]): y = beta(1+wins[i], 1 + trials[i] - wins[i]) p = plt.plot(x, y.pdf(x), lw = lw) c = p[0].get_markeredgecolor() plt.fill_between(x,y.pdf(x),0, color = c, alpha = alpha, label="underlying probability: %.2f" % prob[i]) if plt_vlines: plt.vlines(prob[i], 0, y.pdf(prob[i]) , colors = c, linestyles = "--", lw = 2) plt.autoscale(tight = "True") plt.title("Posteriors After %d pull" % bayesian_strategy.N +\ "s"*(bayesian_strategy.N > 1)) plt.autoscale(tight=True) return ``` ``` def regret(probabilities, choices): w_opt = probabilities.max() return (w_opt - probabilities[choices.astype(int)]).cumsum() ``` ``` rand = np.random.rand beta = stats.beta x = np.linspace(0.001,.999,200) ``` ``` #bandits success probability hidden_prob = np.array([0.85, 0.60, 0.75]) #instantiate multi-armed bandit class bandits = Bandits(hidden_prob) bayesian_strat = BayesianStrategy(bandits) ``` ``` #generate plots draw_samples = [1, 1, 3, 10, 10, 25] for j,i in enumerate(draw_samples): plt.subplot(3, 2, j+1) bayesian_strat.sample_bandits(i) plot_priors(bayesian_strat, hidden_prob) #plt.legend() plt.autoscale(tight = True) plt.tight_layout() plt.show() ``` In the above figure we are showing the posterior distributions for a mutli-armed bandit with $N=3$ arms for multiple draws with hidden winning probability marked by the dashed lines. We can see that all three bandits were exercised with red and blue giving the highest returns. The mode of the posterior distribution shows that green and red bandits produced returns higher then their hidden probability purely by chance. Nonetheless the blue bandit was chosen the greatest number of time leading to the highest reward probability. ``` regret_bound = regret(hidden_prob, bayesian_strat.choices) plt.figure() plt.plot(regret_bound, lw=3) plt.xlabel('number of trials') plt.ylabel('regret') plt.show() ``` We can measure the performance of our on-line algorithm using a regret bound as shown in the figure above. The best we can do is to always pick the bandit with the largest probability of winning ($w_opt$). We can make our score relevant to the optimum bandit probability: \begin{equation} R_T = \sum_{t=1}^{T} (w_{opt} - w_{B(i)}) \end{equation} where $w_{B(i)}$ is the probability of winning for a chosen bandit in $i$-th round. A regret of zero means the strategy is matching the best possible score. Ideally, our strategy should flatten as it learns the best bandit.
r=0.58 https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7wk5x/media/images/d7wk5x-003/svc:tesseract/full/full/0.58/default.jpg Accept:application/hocr+xml
#include <deip/chain/database/database.hpp> #include <deip/chain/services/dbs_contract_agreement.hpp> #include <boost/algorithm/string/join.hpp> #include <boost/algorithm/string/split.hpp> #include <boost/algorithm/string/classification.hpp> namespace deip { namespace chain { dbs_contract_agreement::dbs_contract_agreement(database& db) : _base_type(db) { } const contract_agreement_object& dbs_contract_agreement::create( const protocol::external_id_type& external_id, const protocol::account_name_type& creator, const flat_set<protocol::account_name_type>& parties, const std::string& hash, const fc::time_point_sec& start_time, const fc::optional<fc::time_point_sec>& end_time) { const auto& block_time = db_impl().head_block_time(); const auto& contract = db_impl().create<contract_agreement_object>([&](contract_agreement_object& c_o) { c_o.external_id = external_id; c_o.creator = creator; for (const auto& party: parties) { c_o.parties[party] = static_cast<uint8_t>(acceptance_status::NotAccepted); } fc::from_string(c_o.hash, hash); c_o.start_time = start_time; c_o.end_time = end_time; c_o.created_at = block_time; }); return contract; } const bool dbs_contract_agreement::exists(const external_id_type &external_id) const { const auto& idx = db_impl() .get_index<contract_agreement_index>() .indices() .get<by_external_id>(); auto itr = idx.find(external_id); return itr != idx.end(); } const dbs_contract_agreement::optional_ref_type dbs_contract_agreement::get_if_exists(const external_id_type& id) const { optional_ref_type result; const auto& idx = db_impl() .get_index<contract_agreement_index>() .indicies() .get<by_external_id>(); auto itr = idx.find(id); if (itr != idx.end()) { result = *itr; } return result; } const dbs_contract_agreement::refs_type dbs_contract_agreement::get_by_creator( const account_name_type& creator) const { refs_type ret; auto it_pair = db_impl() .get_index<contract_agreement_index>() .indicies() .get<by_creator>() .equal_range(creator); auto it = it_pair.first; const auto it_end = it_pair.second; while (it != it_end) { ret.push_back(std::cref(*it)); ++it; } return ret; } const contract_agreement_object& dbs_contract_agreement::accept_by( const contract_agreement_object& contract, const account_name_type& party) { db_impl().modify(contract, [&](contract_agreement_object& o) { o.parties[party] = static_cast<uint8_t>(acceptance_status::Accepted); }); return contract; } const contract_agreement_object& dbs_contract_agreement::reject_by( const contract_agreement_object& contract, const account_name_type& party) { db_impl().modify(contract, [&](contract_agreement_object& o) { o.parties[party] = static_cast<uint8_t>(acceptance_status::Rejected); }); return contract; } } //namespace chain } //namespace deip
[STATEMENT] lemma unlock_lock_SomeD: "unlock_lock l = \<lfloor>(t', n)\<rfloor> \<Longrightarrow> l = \<lfloor>(t', Suc n)\<rfloor>" [PROOF STATE] proof (prove) goal (1 subgoal): 1. unlock_lock l = \<lfloor>(t', n)\<rfloor> \<Longrightarrow> l = \<lfloor>(t', Suc n)\<rfloor> [PROOF STEP] by(cases l, auto split: nat.split_asm)
module Data.Nat.Instance where open import Agda.Builtin.Nat open import Class.Equality open import Class.Monoid open import Class.Show open import Data.Char open import Data.List open import Data.Nat renaming (_≟_ to _≟ℕ_; _+_ to _+ℕ_) open import Data.String open import Function private postulate primShowNat : ℕ -> List Char {-# COMPILE GHC primShowNat = show #-} showNat : ℕ -> String showNat = fromList ∘ primShowNat instance Eq-ℕ : Eq ℕ Eq-ℕ = record { _≟_ = _≟ℕ_ } EqB-ℕ : EqB ℕ EqB-ℕ = record { _≣_ = Agda.Builtin.Nat._==_ } ℕ-Monoid : Monoid ℕ ℕ-Monoid = record { mzero = zero ; _+_ = _+ℕ_ } Show-ℕ : Show ℕ Show-ℕ = record { show = showNat }
[STATEMENT] lemma ceiling_log_eq_powr_iff: "\<lbrakk> x > 0; b > 1 \<rbrakk> \<Longrightarrow> \<lceil>log b x\<rceil> = int k + 1 \<longleftrightarrow> b powr k < x \<and> x \<le> b powr (k + 1)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>0 < x; 1 < b\<rbrakk> \<Longrightarrow> (\<lceil>log b x\<rceil> = int k + 1) = (b powr real k < x \<and> x \<le> b powr real (k + 1)) [PROOF STEP] by (auto simp: ceiling_eq_iff powr_less_iff le_powr_iff)
module CS410-Monoid where open import CS410-Prelude record Monoid (M : Set) : Set where field -- OPERATIONS ---------------------------------------- e : M op : M -> M -> M -- LAWS ---------------------------------------------- lunit : forall m -> op e m == m runit : forall m -> op m e == m assoc : forall m m' m'' -> op m (op m' m'') == op (op m m') m''
/- Copyright (c) 2021 Bhavik Mehta. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Bhavik Mehta -/ import category_theory.epi_mono import category_theory.limits.has_limits import category_theory.limits.shapes.equalizers /-! # Wide equalizers and wide coequalizers This file defines wide (co)equalizers as special cases of (co)limits. A wide equalizer for the family of morphisms `X ⟶ Y` indexed by `J` is the categorical generalization of the subobject `{a ∈ A | ∀ j₁ j₂, f(j₁, a) = f(j₂, a)}`. Note that if `J` has fewer than two morphisms this condition is trivial, so some lemmas and definitions assume `J` is nonempty. ## Main definitions * `walking_parallel_family` is the indexing category used for wide (co)equalizer diagrams * `parallel_family` is a functor from `walking_parallel_family` to our category `C`. * a `trident` is a cone over a parallel family. * there is really only one interesting morphism in a trident: the arrow from the vertex of the trident to the domain of f and g. It is called `trident.ι`. * a `wide_equalizer` is now just a `limit (parallel_family f)` Each of these has a dual. ## Main statements * `wide_equalizer.ι_mono` states that every wide_equalizer map is a monomorphism * `is_iso_limit_cone_parallel_family_of_self` states that the identity on the domain of `f` is an equalizer of `f` and `f`. ## Implementation notes As with the other special shapes in the limits library, all the definitions here are given as `abbreviation`s of the general statements for limits, so all the `simp` lemmas and theorems about general limits can be used. ## References * [F. Borceux, *Handbook of Categorical Algebra 1*][borceux-vol1] -/ noncomputable theory namespace category_theory.limits open category_theory universes w v u u₂ variables {J : Type w} /-- The type of objects for the diagram indexing a wide (co)equalizer. -/ inductive walking_parallel_family (J : Type w) : Type w | zero : walking_parallel_family | one : walking_parallel_family open walking_parallel_family instance : decidable_eq (walking_parallel_family J) | zero zero := is_true rfl | zero one := is_false (λ t, walking_parallel_family.no_confusion t) | one zero := is_false (λ t, walking_parallel_family.no_confusion t) | one one := is_true rfl instance : inhabited (walking_parallel_family J) := ⟨zero⟩ /-- The type family of morphisms for the diagram indexing a wide (co)equalizer. -/ @[derive decidable_eq] inductive walking_parallel_family.hom (J : Type w) : walking_parallel_family J → walking_parallel_family J → Type w | id : Π X : walking_parallel_family.{w} J, walking_parallel_family.hom X X | line : Π (j : J), walking_parallel_family.hom zero one /-- Satisfying the inhabited linter -/ instance (J : Type v) : inhabited (walking_parallel_family.hom J zero zero) := { default := hom.id _ } open walking_parallel_family.hom /-- Composition of morphisms in the indexing diagram for wide (co)equalizers. -/ def walking_parallel_family.hom.comp : Π (X Y Z : walking_parallel_family J) (f : walking_parallel_family.hom J X Y) (g : walking_parallel_family.hom J Y Z), walking_parallel_family.hom J X Z | _ _ _ (id _) h := h | _ _ _ (line j) (id one) := line j. local attribute [tidy] tactic.case_bash instance walking_parallel_family.category : small_category (walking_parallel_family J) := { hom := walking_parallel_family.hom J, id := walking_parallel_family.hom.id, comp := walking_parallel_family.hom.comp } @[simp] lemma walking_parallel_family.hom_id (X : walking_parallel_family J) : walking_parallel_family.hom.id X = 𝟙 X := rfl variables {C : Type u} [category.{v} C] variables {X Y : C} (f : J → (X ⟶ Y)) /-- `parallel_family f` is the diagram in `C` consisting of the given family of morphisms, each with common domain and codomain. -/ def parallel_family : walking_parallel_family J ⥤ C := { obj := λ x, walking_parallel_family.cases_on x X Y, map := λ x y h, match x, y, h with | _, _, (id _) := 𝟙 _ | _, _, (line j) := f j end, map_comp' := begin rintro _ _ _ ⟨⟩ ⟨⟩; { unfold_aux, simp; refl }, end } @[simp] lemma parallel_family_obj_zero : (parallel_family f).obj zero = X := rfl @[simp] lemma parallel_family_obj_one : (parallel_family f).obj one = Y := rfl @[simp] lemma parallel_family_map_left {j : J} : (parallel_family f).map (line j) = f j := rfl /-- Every functor indexing a wide (co)equalizer is naturally isomorphic (actually, equal) to a `parallel_family` -/ @[simps] def diagram_iso_parallel_family (F : walking_parallel_family J ⥤ C) : F ≅ parallel_family (λ j, F.map (line j)) := nat_iso.of_components (λ j, eq_to_iso $ by cases j; tidy) $ by tidy /-- `walking_parallel_pair` as a category is equivalent to a special case of `walking_parallel_family`. -/ @[simps] def walking_parallel_family_equiv_walking_parallel_pair : walking_parallel_family.{w} (ulift bool) ≌ walking_parallel_pair := { functor := parallel_family (λ p, cond p.down walking_parallel_pair_hom.left walking_parallel_pair_hom.right), inverse := parallel_pair (line (ulift.up tt)) (line (ulift.up ff)), unit_iso := nat_iso.of_components (λ X, eq_to_iso (by cases X; refl)) (by tidy), counit_iso := nat_iso.of_components (λ X, eq_to_iso (by cases X; refl)) (by tidy) } /-- A trident on `f` is just a `cone (parallel_family f)`. -/ abbreviation trident := cone (parallel_family f) /-- A cotrident on `f` and `g` is just a `cocone (parallel_family f)`. -/ abbreviation cotrident := cocone (parallel_family f) variables {f} /-- A trident `t` on the parallel family `f : J → (X ⟶ Y)` consists of two morphisms `t.π.app zero : t.X ⟶ X` and `t.π.app one : t.X ⟶ Y`. Of these, only the first one is interesting, and we give it the shorter name `trident.ι t`. -/ abbreviation trident.ι (t : trident f) := t.π.app zero /-- A cotrident `t` on the parallel family `f : J → (X ⟶ Y)` consists of two morphisms `t.ι.app zero : X ⟶ t.X` and `t.ι.app one : Y ⟶ t.X`. Of these, only the second one is interesting, and we give it the shorter name `cotrident.π t`. -/ abbreviation cotrident.π (t : cotrident f) := t.ι.app one @[simp] lemma trident.ι_eq_app_zero (t : trident f) : t.ι = t.π.app zero := rfl @[simp] lemma cotrident.π_eq_app_one (t : cotrident f) : t.π = t.ι.app one := rfl @[simp, reassoc] lemma trident.app_zero (s : trident f) (j : J) : s.π.app zero ≫ f j = s.π.app one := by rw [←s.w (line j), parallel_family_map_left] @[simp, reassoc] lemma cotrident.app_one (s : cotrident f) (j : J) : f j ≫ s.ι.app one = s.ι.app zero := by rw [←s.w (line j), parallel_family_map_left] /-- A trident on `f : J → (X ⟶ Y)` is determined by the morphism `ι : P ⟶ X` satisfying `∀ j₁ j₂, ι ≫ f j₁ = ι ≫ f j₂`. -/ @[simps] def trident.of_ι [nonempty J] {P : C} (ι : P ⟶ X) (w : ∀ j₁ j₂, ι ≫ f j₁ = ι ≫ f j₂) : trident f := { X := P, π := { app := λ X, walking_parallel_family.cases_on X ι (ι ≫ f (classical.arbitrary J)), naturality' := λ i j f, begin dsimp, cases f with _ k, { simp }, { simp [w (classical.arbitrary J) k] }, end } } /-- A cotrident on `f : J → (X ⟶ Y)` is determined by the morphism `π : Y ⟶ P` satisfying `∀ j₁ j₂, f j₁ ≫ π = f j₂ ≫ π`. -/ @[simps] def cotrident.of_π [nonempty J] {P : C} (π : Y ⟶ P) (w : ∀ j₁ j₂, f j₁ ≫ π = f j₂ ≫ π) : cotrident f := { X := P, ι := { app := λ X, walking_parallel_family.cases_on X (f (classical.arbitrary J) ≫ π) π, naturality' := λ i j f, begin dsimp, cases f with _ k, { simp }, { simp [w (classical.arbitrary J) k] } end } } -- See note [dsimp, simp] lemma trident.ι_of_ι [nonempty J] {P : C} (ι : P ⟶ X) (w : ∀ j₁ j₂, ι ≫ f j₁ = ι ≫ f j₂) : (trident.of_ι ι w).ι = ι := rfl lemma cotrident.π_of_π [nonempty J] {P : C} (π : Y ⟶ P) (w : ∀ j₁ j₂, f j₁ ≫ π = f j₂ ≫ π) : (cotrident.of_π π w).π = π := rfl @[reassoc] lemma trident.condition (j₁ j₂ : J) (t : trident f) : t.ι ≫ f j₁ = t.ι ≫ f j₂ := by rw [t.app_zero, t.app_zero] @[reassoc] lemma cotrident.condition (j₁ j₂ : J) (t : cotrident f) : f j₁ ≫ t.π = f j₂ ≫ t.π := by rw [t.app_one, t.app_one] /-- To check whether two maps are equalized by both maps of a trident, it suffices to check it for the first map -/ lemma trident.equalizer_ext [nonempty J] (s : trident f) {W : C} {k l : W ⟶ s.X} (h : k ≫ s.ι = l ≫ s.ι) : ∀ (j : walking_parallel_family J), k ≫ s.π.app j = l ≫ s.π.app j | zero := h | one := by rw [←s.app_zero (classical.arbitrary J), reassoc_of h] /-- To check whether two maps are coequalized by both maps of a cotrident, it suffices to check it for the second map -/ lemma cotrident.coequalizer_ext [nonempty J] (s : cotrident f) {W : C} {k l : s.X ⟶ W} (h : s.π ≫ k = s.π ≫ l) : ∀ (j : walking_parallel_family J), s.ι.app j ≫ k = s.ι.app j ≫ l | zero := by rw [←s.app_one (classical.arbitrary J), category.assoc, category.assoc, h] | one := h lemma trident.is_limit.hom_ext [nonempty J] {s : trident f} (hs : is_limit s) {W : C} {k l : W ⟶ s.X} (h : k ≫ s.ι = l ≫ s.ι) : k = l := hs.hom_ext $ trident.equalizer_ext _ h lemma cotrident.is_colimit.hom_ext [nonempty J] {s : cotrident f} (hs : is_colimit s) {W : C} {k l : s.X ⟶ W} (h : s.π ≫ k = s.π ≫ l) : k = l := hs.hom_ext $ cotrident.coequalizer_ext _ h /-- If `s` is a limit trident over `f`, then a morphism `k : W ⟶ X` satisfying `∀ j₁ j₂, k ≫ f j₁ = k ≫ f j₂` induces a morphism `l : W ⟶ s.X` such that `l ≫ trident.ι s = k`. -/ def trident.is_limit.lift' [nonempty J] {s : trident f} (hs : is_limit s) {W : C} (k : W ⟶ X) (h : ∀ j₁ j₂, k ≫ f j₁ = k ≫ f j₂) : {l : W ⟶ s.X // l ≫ trident.ι s = k} := ⟨hs.lift $ trident.of_ι _ h, hs.fac _ _⟩ /-- If `s` is a colimit cotrident over `f`, then a morphism `k : Y ⟶ W` satisfying `∀ j₁ j₂, f j₁ ≫ k = f j₂ ≫ k` induces a morphism `l : s.X ⟶ W` such that `cotrident.π s ≫ l = k`. -/ def cotrident.is_colimit.desc' [nonempty J] {s : cotrident f} (hs : is_colimit s) {W : C} (k : Y ⟶ W) (h : ∀ j₁ j₂, f j₁ ≫ k = f j₂ ≫ k) : {l : s.X ⟶ W // cotrident.π s ≫ l = k} := ⟨hs.desc $ cotrident.of_π _ h, hs.fac _ _⟩ /-- This is a slightly more convenient method to verify that a trident is a limit cone. It only asks for a proof of facts that carry any mathematical content -/ def trident.is_limit.mk [nonempty J] (t : trident f) (lift : Π (s : trident f), s.X ⟶ t.X) (fac : ∀ (s : trident f), lift s ≫ t.ι = s.ι) (uniq : ∀ (s : trident f) (m : s.X ⟶ t.X) (w : ∀ j : walking_parallel_family J, m ≫ t.π.app j = s.π.app j), m = lift s) : is_limit t := { lift := lift, fac' := λ s j, walking_parallel_family.cases_on j (fac s) (by rw [←t.w (line (classical.arbitrary J)), reassoc_of fac, s.w]), uniq' := uniq } /-- This is another convenient method to verify that a trident is a limit cone. It only asks for a proof of facts that carry any mathematical content, and allows access to the same `s` for all parts. -/ def trident.is_limit.mk' [nonempty J] (t : trident f) (create : Π (s : trident f), {l // l ≫ t.ι = s.ι ∧ ∀ {m}, m ≫ t.ι = s.ι → m = l}) : is_limit t := trident.is_limit.mk t (λ s, (create s).1) (λ s, (create s).2.1) (λ s m w, (create s).2.2 (w zero)) /-- This is a slightly more convenient method to verify that a cotrident is a colimit cocone. It only asks for a proof of facts that carry any mathematical content -/ def cotrident.is_colimit.mk [nonempty J] (t : cotrident f) (desc : Π (s : cotrident f), t.X ⟶ s.X) (fac : ∀ (s : cotrident f), t.π ≫ desc s = s.π) (uniq : ∀ (s : cotrident f) (m : t.X ⟶ s.X) (w : ∀ j : walking_parallel_family J, t.ι.app j ≫ m = s.ι.app j), m = desc s) : is_colimit t := { desc := desc, fac' := λ s j, walking_parallel_family.cases_on j (by rw [←t.w_assoc (line (classical.arbitrary J)), fac, s.w]) (fac s), uniq' := uniq } /-- This is another convenient method to verify that a cotrident is a colimit cocone. It only asks for a proof of facts that carry any mathematical content, and allows access to the same `s` for all parts. -/ def cotrident.is_colimit.mk' [nonempty J] (t : cotrident f) (create : Π (s : cotrident f), {l : t.X ⟶ s.X // t.π ≫ l = s.π ∧ ∀ {m}, t.π ≫ m = s.π → m = l}) : is_colimit t := cotrident.is_colimit.mk t (λ s, (create s).1) (λ s, (create s).2.1) (λ s m w, (create s).2.2 (w one)) /-- Given a limit cone for the family `f : J → (X ⟶ Y)`, for any `Z`, morphisms from `Z` to its point are in bijection with morphisms `h : Z ⟶ X` such that `∀ j₁ j₂, h ≫ f j₁ = h ≫ f j₂`. Further, this bijection is natural in `Z`: see `trident.is_limit.hom_iso_natural`. -/ @[simps] def trident.is_limit.hom_iso [nonempty J] {t : trident f} (ht : is_limit t) (Z : C) : (Z ⟶ t.X) ≃ {h : Z ⟶ X // ∀ j₁ j₂, h ≫ f j₁ = h ≫ f j₂} := { to_fun := λ k, ⟨k ≫ t.ι, by simp⟩, inv_fun := λ h, (trident.is_limit.lift' ht _ h.prop).1, left_inv := λ k, trident.is_limit.hom_ext ht (trident.is_limit.lift' _ _ _).prop, right_inv := λ h, subtype.ext (trident.is_limit.lift' ht _ _).prop } /-- The bijection of `trident.is_limit.hom_iso` is natural in `Z`. -/ lemma trident.is_limit.hom_iso_natural [nonempty J] {t : trident f} (ht : is_limit t) {Z Z' : C} (q : Z' ⟶ Z) (k : Z ⟶ t.X) : (trident.is_limit.hom_iso ht _ (q ≫ k) : Z' ⟶ X) = q ≫ (trident.is_limit.hom_iso ht _ k : Z ⟶ X) := category.assoc _ _ _ /-- Given a colimit cocone for the family `f : J → (X ⟶ Y)`, for any `Z`, morphisms from the cocone point to `Z` are in bijection with morphisms `h : Z ⟶ X` such that `∀ j₁ j₂, f j₁ ≫ h = f j₂ ≫ h`. Further, this bijection is natural in `Z`: see `cotrident.is_colimit.hom_iso_natural`. -/ @[simps] def cotrident.is_colimit.hom_iso [nonempty J] {t : cotrident f} (ht : is_colimit t) (Z : C) : (t.X ⟶ Z) ≃ {h : Y ⟶ Z // ∀ j₁ j₂, f j₁ ≫ h = f j₂ ≫ h} := { to_fun := λ k, ⟨t.π ≫ k, by simp⟩, inv_fun := λ h, (cotrident.is_colimit.desc' ht _ h.prop).1, left_inv := λ k, cotrident.is_colimit.hom_ext ht (cotrident.is_colimit.desc' _ _ _).prop, right_inv := λ h, subtype.ext (cotrident.is_colimit.desc' ht _ _).prop } /-- The bijection of `cotrident.is_colimit.hom_iso` is natural in `Z`. -/ lemma cotrident.is_colimit.hom_iso_natural [nonempty J] {t : cotrident f} {Z Z' : C} (q : Z ⟶ Z') (ht : is_colimit t) (k : t.X ⟶ Z) : (cotrident.is_colimit.hom_iso ht _ (k ≫ q) : Y ⟶ Z') = (cotrident.is_colimit.hom_iso ht _ k : Y ⟶ Z) ≫ q := (category.assoc _ _ _).symm /-- This is a helper construction that can be useful when verifying that a category has certain wide equalizers. Given `F : walking_parallel_family ⥤ C`, which is really the same as `parallel_family (λ j, F.map (line j))`, and a trident on `λ j, F.map (line j)`, we get a cone on `F`. If you're thinking about using this, have a look at `has_wide_equalizers_of_has_limit_parallel_family`, which you may find to be an easier way of achieving your goal. -/ def cone.of_trident {F : walking_parallel_family J ⥤ C} (t : trident (λ j, F.map (line j))) : cone F := { X := t.X, π := { app := λ X, t.π.app X ≫ eq_to_hom (by tidy), naturality' := λ j j' g, by { cases g; { dsimp, simp } } } } /-- This is a helper construction that can be useful when verifying that a category has all coequalizers. Given `F : walking_parallel_family ⥤ C`, which is really the same as `parallel_family (λ j, F.map (line j))`, and a cotrident on `λ j, F.map (line j)` we get a cocone on `F`. If you're thinking about using this, have a look at `has_wide_coequalizers_of_has_colimit_parallel_family`, which you may find to be an easier way of achieving your goal. -/ def cocone.of_cotrident {F : walking_parallel_family J ⥤ C} (t : cotrident (λ j, F.map (line j))) : cocone F := { X := t.X, ι := { app := λ X, eq_to_hom (by tidy) ≫ t.ι.app X, naturality' := λ j j' g, by { cases g; dsimp; simp [cotrident.app_one t] } } } @[simp] lemma cone.of_trident_π {F : walking_parallel_family J ⥤ C} (t : trident (λ j, F.map (line j))) (j) : (cone.of_trident t).π.app j = t.π.app j ≫ eq_to_hom (by tidy) := rfl @[simp] lemma cocone.of_cotrident_ι {F : walking_parallel_family J ⥤ C} (t : cotrident (λ j, F.map (line j))) (j) : (cocone.of_cotrident t).ι.app j = eq_to_hom (by tidy) ≫ t.ι.app j := rfl /-- Given `F : walking_parallel_family ⥤ C`, which is really the same as `parallel_family (λ j, F.map (line j))` and a cone on `F`, we get a trident on `λ j, F.map (line j)`. -/ def trident.of_cone {F : walking_parallel_family J ⥤ C} (t : cone F) : trident (λ j, F.map (line j)) := { X := t.X, π := { app := λ X, t.π.app X ≫ eq_to_hom (by tidy) } } /-- Given `F : walking_parallel_family ⥤ C`, which is really the same as `parallel_family (F.map left) (F.map right)` and a cocone on `F`, we get a cotrident on `λ j, F.map (line j)`. -/ def cotrident.of_cocone {F : walking_parallel_family J ⥤ C} (t : cocone F) : cotrident (λ j, F.map (line j)) := { X := t.X, ι := { app := λ X, eq_to_hom (by tidy) ≫ t.ι.app X } } @[simp] lemma trident.of_cone_π {F : walking_parallel_family J ⥤ C} (t : cone F) (j) : (trident.of_cone t).π.app j = t.π.app j ≫ eq_to_hom (by tidy) := rfl @[simp] lemma cotrident.of_cocone_ι {F : walking_parallel_family J ⥤ C} (t : cocone F) (j) : (cotrident.of_cocone t).ι.app j = eq_to_hom (by tidy) ≫ t.ι.app j := rfl /-- Helper function for constructing morphisms between wide equalizer tridents. -/ @[simps] def trident.mk_hom [nonempty J] {s t : trident f} (k : s.X ⟶ t.X) (w : k ≫ t.ι = s.ι) : s ⟶ t := { hom := k, w' := begin rintro ⟨_|_⟩, { exact w }, { simpa using w =≫ f (classical.arbitrary J) }, end } /-- To construct an isomorphism between tridents, it suffices to give an isomorphism between the cone points and check that it commutes with the `ι` morphisms. -/ @[simps] def trident.ext [nonempty J] {s t : trident f} (i : s.X ≅ t.X) (w : i.hom ≫ t.ι = s.ι) : s ≅ t := { hom := trident.mk_hom i.hom w, inv := trident.mk_hom i.inv (by rw [← w, iso.inv_hom_id_assoc]) } /-- Helper function for constructing morphisms between coequalizer cotridents. -/ @[simps] def cotrident.mk_hom [nonempty J] {s t : cotrident f} (k : s.X ⟶ t.X) (w : s.π ≫ k = t.π) : s ⟶ t := { hom := k, w' := begin rintro ⟨_|_⟩, { simpa using f (classical.arbitrary J) ≫= w }, { exact w }, end } /-- To construct an isomorphism between cotridents, it suffices to give an isomorphism between the cocone points and check that it commutes with the `π` morphisms. -/ def cotrident.ext [nonempty J] {s t : cotrident f} (i : s.X ≅ t.X) (w : s.π ≫ i.hom = t.π) : s ≅ t := { hom := cotrident.mk_hom i.hom w, inv := cotrident.mk_hom i.inv (by rw [iso.comp_inv_eq, w]) } variables (f) section /-- `has_wide_equalizer f` represents a particular choice of limiting cone for the parallel family of morphisms `f`. -/ abbreviation has_wide_equalizer := has_limit (parallel_family f) variables [has_wide_equalizer f] /-- If a wide equalizer of `f` exists, we can access an arbitrary choice of such by saying `wide_equalizer f`. -/ abbreviation wide_equalizer : C := limit (parallel_family f) /-- If a wide equalizer of `f` exists, we can access the inclusion `wide_equalizer f ⟶ X` by saying `wide_equalizer.ι f`. -/ abbreviation wide_equalizer.ι : wide_equalizer f ⟶ X := limit.π (parallel_family f) zero /-- A wide equalizer cone for a parallel family `f`. -/ abbreviation wide_equalizer.trident : trident f := limit.cone (parallel_family f) @[simp] lemma wide_equalizer.trident_ι : (wide_equalizer.trident f).ι = wide_equalizer.ι f := rfl @[simp] lemma wide_equalizer.trident_π_app_zero : (wide_equalizer.trident f).π.app zero = wide_equalizer.ι f := rfl @[reassoc] lemma wide_equalizer.condition (j₁ j₂ : J) : wide_equalizer.ι f ≫ f j₁ = wide_equalizer.ι f ≫ f j₂ := trident.condition j₁ j₂ $ limit.cone $ parallel_family f /-- The wide_equalizer built from `wide_equalizer.ι f` is limiting. -/ def wide_equalizer_is_wide_equalizer [nonempty J] : is_limit (trident.of_ι (wide_equalizer.ι f) (wide_equalizer.condition f)) := is_limit.of_iso_limit (limit.is_limit _) (trident.ext (iso.refl _) (by tidy)) variables {f} /-- A morphism `k : W ⟶ X` satisfying `∀ j₁ j₂, k ≫ f j₁ = k ≫ f j₂` factors through the wide equalizer of `f` via `wide_equalizer.lift : W ⟶ wide_equalizer f`. -/ abbreviation wide_equalizer.lift [nonempty J] {W : C} (k : W ⟶ X) (h : ∀ j₁ j₂, k ≫ f j₁ = k ≫ f j₂) : W ⟶ wide_equalizer f := limit.lift (parallel_family f) (trident.of_ι k h) @[simp, reassoc] lemma wide_equalizer.lift_ι [nonempty J] {W : C} (k : W ⟶ X) (h : ∀ j₁ j₂, k ≫ f j₁ = k ≫ f j₂) : wide_equalizer.lift k h ≫ wide_equalizer.ι f = k := limit.lift_π _ _ /-- A morphism `k : W ⟶ X` satisfying `∀ j₁ j₂, k ≫ f j₁ = k ≫ f j₂` induces a morphism `l : W ⟶ wide_equalizer f` satisfying `l ≫ wide_equalizer.ι f = k`. -/ def wide_equalizer.lift' [nonempty J] {W : C} (k : W ⟶ X) (h : ∀ j₁ j₂, k ≫ f j₁ = k ≫ f j₂) : {l : W ⟶ wide_equalizer f // l ≫ wide_equalizer.ι f = k} := ⟨wide_equalizer.lift k h, wide_equalizer.lift_ι _ _⟩ /-- Two maps into a wide equalizer are equal if they are are equal when composed with the wide equalizer map. -/ @[ext] lemma wide_equalizer.hom_ext [nonempty J] {W : C} {k l : W ⟶ wide_equalizer f} (h : k ≫ wide_equalizer.ι f = l ≫ wide_equalizer.ι f) : k = l := trident.is_limit.hom_ext (limit.is_limit _) h /-- A wide equalizer morphism is a monomorphism -/ instance wide_equalizer.ι_mono [nonempty J] : mono (wide_equalizer.ι f) := { right_cancellation := λ Z h k w, wide_equalizer.hom_ext w } end section variables {f} /-- The wide equalizer morphism in any limit cone is a monomorphism. -/ lemma mono_of_is_limit_parallel_family [nonempty J] {c : cone (parallel_family f)} (i : is_limit c) : mono (trident.ι c) := { right_cancellation := λ Z h k w, trident.is_limit.hom_ext i w } end section /-- `has_wide_coequalizer f g` represents a particular choice of colimiting cocone for the parallel family of morphisms `f`. -/ abbreviation has_wide_coequalizer := has_colimit (parallel_family f) variables [has_wide_coequalizer f] /-- If a wide coequalizer of `f`, we can access an arbitrary choice of such by saying `wide_coequalizer f`. -/ abbreviation wide_coequalizer : C := colimit (parallel_family f) /-- If a wide_coequalizer of `f` exists, we can access the corresponding projection by saying `wide_coequalizer.π f`. -/ abbreviation wide_coequalizer.π : Y ⟶ wide_coequalizer f := colimit.ι (parallel_family f) one /-- An arbitrary choice of coequalizer cocone for a parallel family `f`. -/ abbreviation wide_coequalizer.cotrident : cotrident f := colimit.cocone (parallel_family f) @[simp] lemma wide_coequalizer.cotrident_π : (wide_coequalizer.cotrident f).π = wide_coequalizer.π f := rfl @[simp] lemma wide_coequalizer.cotrident_ι_app_one : (wide_coequalizer.cotrident f).ι.app one = wide_coequalizer.π f := rfl @[reassoc] lemma wide_coequalizer.condition (j₁ j₂ : J) : f j₁ ≫ wide_coequalizer.π f = f j₂ ≫ wide_coequalizer.π f := cotrident.condition j₁ j₂ $ colimit.cocone $ parallel_family f /-- The cotrident built from `wide_coequalizer.π f` is colimiting. -/ def wide_coequalizer_is_wide_coequalizer [nonempty J] : is_colimit (cotrident.of_π (wide_coequalizer.π f) (wide_coequalizer.condition f)) := is_colimit.of_iso_colimit (colimit.is_colimit _) (cotrident.ext (iso.refl _) (by tidy)) variables {f} /-- Any morphism `k : Y ⟶ W` satisfying `∀ j₁ j₂, f j₁ ≫ k = f j₂ ≫ k` factors through the wide coequalizer of `f` via `wide_coequalizer.desc : wide_coequalizer f ⟶ W`. -/ abbreviation wide_coequalizer.desc [nonempty J] {W : C} (k : Y ⟶ W) (h : ∀ j₁ j₂, f j₁ ≫ k = f j₂ ≫ k) : wide_coequalizer f ⟶ W := colimit.desc (parallel_family f) (cotrident.of_π k h) @[simp, reassoc] lemma wide_coequalizer.π_desc [nonempty J] {W : C} (k : Y ⟶ W) (h : ∀ j₁ j₂, f j₁ ≫ k = f j₂ ≫ k) : wide_coequalizer.π f ≫ wide_coequalizer.desc k h = k := colimit.ι_desc _ _ /-- Any morphism `k : Y ⟶ W` satisfying `∀ j₁ j₂, f j₁ ≫ k = f j₂ ≫ k` induces a morphism `l : wide_coequalizer f ⟶ W` satisfying `wide_coequalizer.π ≫ g = l`. -/ def wide_coequalizer.desc' [nonempty J] {W : C} (k : Y ⟶ W) (h : ∀ j₁ j₂, f j₁ ≫ k = f j₂ ≫ k) : {l : wide_coequalizer f ⟶ W // wide_coequalizer.π f ≫ l = k} := ⟨wide_coequalizer.desc k h, wide_coequalizer.π_desc _ _⟩ /-- Two maps from a wide coequalizer are equal if they are equal when composed with the wide coequalizer map -/ @[ext] lemma wide_coequalizer.hom_ext [nonempty J] {W : C} {k l : wide_coequalizer f ⟶ W} (h : wide_coequalizer.π f ≫ k = wide_coequalizer.π f ≫ l) : k = l := cotrident.is_colimit.hom_ext (colimit.is_colimit _) h /-- A wide coequalizer morphism is an epimorphism -/ instance wide_coequalizer.π_epi [nonempty J] : epi (wide_coequalizer.π f) := { left_cancellation := λ Z h k w, wide_coequalizer.hom_ext w } end section variables {f} /-- The wide coequalizer morphism in any colimit cocone is an epimorphism. -/ lemma epi_of_is_colimit_parallel_family [nonempty J] {c : cocone (parallel_family f)} (i : is_colimit c) : epi (c.ι.app one) := { left_cancellation := λ Z h k w, cotrident.is_colimit.hom_ext i w } end variables (C) /-- `has_wide_equalizers` represents a choice of wide equalizer for every family of morphisms -/ abbreviation has_wide_equalizers := Π J, has_limits_of_shape (walking_parallel_family.{w} J) C /-- `has_wide_coequalizers` represents a choice of wide coequalizer for every family of morphisms -/ abbreviation has_wide_coequalizers := Π J, has_colimits_of_shape (walking_parallel_family.{w} J) C /-- If `C` has all limits of diagrams `parallel_family f`, then it has all wide equalizers -/ lemma has_wide_equalizers_of_has_limit_parallel_family [Π {J : Type w} {X Y : C} {f : J → (X ⟶ Y)}, has_limit (parallel_family f)] : has_wide_equalizers.{w} C := λ J, { has_limit := λ F, has_limit_of_iso (diagram_iso_parallel_family F).symm } /-- If `C` has all colimits of diagrams `parallel_family f`, then it has all wide coequalizers -/ lemma has_wide_coequalizers_of_has_colimit_parallel_family [Π {J : Type w} {X Y : C} {f : J → (X ⟶ Y)}, has_colimit (parallel_family f)] : has_wide_coequalizers.{w} C := λ J, { has_colimit := λ F, has_colimit_of_iso (diagram_iso_parallel_family F) } @[priority 10] instance has_equalizers_of_has_wide_equalizers [has_wide_equalizers.{w} C] : has_equalizers C := has_limits_of_shape_of_equivalence.{w} walking_parallel_family_equiv_walking_parallel_pair @[priority 10] instance has_coequalizers_of_has_wide_coequalizers [has_wide_coequalizers.{w} C] : has_coequalizers C := has_colimits_of_shape_of_equivalence.{w} walking_parallel_family_equiv_walking_parallel_pair end category_theory.limits
lemma lipschitz_on_cmult [lipschitz_intros]: fixes f::"'a::metric_space \<Rightarrow> 'b::real_normed_vector" assumes "C-lipschitz_on U f" shows "(abs(a) * C)-lipschitz_on U (\<lambda>x. a *\<^sub>R f x)"
r=358.16 https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7vp48/media/images/d7vp48-013/svc:tesseract/full/full/358.16/default.jpg Accept:application/hocr+xml
import sys import numpy as np import cv2 src = cv2.imread('vlcsnap-2021-02-04-10h05m23s567.png') if src is None: print('Image load failed!') sys.exit() w, h = 640, 480 srcQuad = np.array([[296, 92], [501, 92], [501, 253], [296, 253]], np.float32) #[20, 125], [90, 100], [90, 160], [30, 160]] dstQuad = np.array([[0, 0], [w-1, 0], [w-1, h-1], [0, h-1]], np.float32) pers = cv2.getPerspectiveTransform(srcQuad, dstQuad) dst = cv2.warpPerspective(src, pers, (w, h)) cv2.imshow('src', src) cv2.imshow('dst', dst) cv2.waitKey() cv2.destroyAllWindows()
using LiteQTL using DelimitedFiles using CSV using DataFrames function main() geno_file = joinpath(pathof(LiteQTL),"../..", "data", "processed", "spleen-bxd-genoprob.csv") pheno_file = joinpath(pathof(LiteQTL), "../..", "data","processed", "spleen-pheno-nomissing.csv") gmap_file = joinpath(pathof(LiteQTL), "../..","data","processed", "gmap.csv") export_matrix = false output_file = "output.csv" rqtl_file = joinpath(pathof(LiteQTL), "../..", "data", "UTHSC_SPL_RMA_1210.zip") LiteQTL.set_blas_threads(16); # Read in data. G = get_geno_data(geno_file, Float64) Y = get_pheno_data(pheno_file, Float64, transposed=false) # getting geno and pheno file size. n = size(Y,1) m = size(Y,2) p = size(G,2) println("******* Indivuduals n: $n, Traits m: $m, Markers p: $p ****************"); # cpu_timing = benchmark(5, scan, Y, G, n; export_matrix); # println("CPU timing: $(cpu_timing[3])") # running analysis without covariates. @time lodc = scan(Y, G, export_matrix=export_matrix, maf_threshold=0.00, usegpu=false, lod_or_pval="lod"); # lodg = scan(Y, G; usegpu=true) if !export_matrix gmap = CSV.read(gmap_file, DataFrame) idx = trunc.(Int, lodc[:,1]) gmap_lod = hcat(gmap[idx,:], DataFrame(lodc, [:idx, :maxlod])) end # write output to file # writedlm(joinpath(Base.@__DIR__, "..", "data", "results", output_file), lod, ',') # TODO: generate plot? # return lod end lod = main()
import Base.*, Base.+, Base.-, Base./, Base.show, Base.!=, Base.==, Base.<=, Base.<, Base.>, Base.>=, Base.divrem const z0 = "0" const z1 = "1" const flipordered = (z1 < z0) mutable struct Z s::String end Z() = Z(z0) Z(z::Z) = Z(z.s) pairlen(x::Z, y::Z) = max(length(x.s), length(y.s)) tolen(x::Z, n::Int) = (s = x.s; while length(s) < n s = z0 * s end; s) <(x::Z, y::Z) = (l = pairlen(x, y); flipordered ? tolen(x, l) > tolen(y, l) : tolen(x, l) < tolen(y, l)) >(x::Z, y::Z) = (l = pairlen(x, y); flipordered ? tolen(x, l) < tolen(y, l) : tolen(x, l) > tolen(y, l)) ==(x::Z, y::Z) = (l = pairlen(x, y); tolen(x, l) == tolen(y, l)) <=(x::Z, y::Z) = (l = pairlen(x, y); flipordered ? tolen(x, l) >= tolen(y, l) : tolen(x, l) <= tolen(y, l)) >=(x::Z, y::Z) = (l = pairlen(x, y); flipordered ? tolen(x, l) <= tolen(y, l) : tolen(x, l) >= tolen(y, l)) !=(x::Z, y::Z) = (l = pairlen(x, y); tolen(x, l) != tolen(y, l)) function tocanonical(z::Z) while occursin(z0 * z1 * z1, z.s) z.s = replace(z.s, z0 * z1 * z1 => z1 * z0 * z0) end len = length(z.s) if len > 1 && z.s[1:2] == z1 * z1 z.s = z1 * z0 * z0 * ((len > 2) ? z.s[3:end] : "") end while (len = length(z.s)) > 1 && string(z.s[1]) == z0 if len == 2 if z.s == z0 * z0 z.s = z0 elseif z.s == z0 * z1 z.s = z1 end else z.s = z.s[2:end] end end z end function inc(z) if z.s[end] == z0[1] z.s = z.s[1:end-1] * z1[1] elseif z.s[end] == z1[1] if length(z.s) > 1 if z.s[end-1:end] == z0 * z1 z.s = z.s[1:end-2] * z1 * z0 end else z.s = z1 * z0 end end tocanonical(z) end function dec(z) if z.s[end] == z1[1] z.s = z.s[1:end-1] * z0 else if (m = match(Regex(z1 * z0 * '+' * '$'), z.s)) != nothing len = length(m.match) if iseven(len) z.s = z.s[1:end-len] * (z0 * z1) ^ div(len, 2) else z.s = z.s[1:end-len] * (z0 * z1) ^ div(len, 2) * z0 end end end tocanonical(z) z end function +(x::Z, y::Z) a = Z(x.s) b = Z(y.s) while b.s != z0 inc(a) dec(b) end a end function -(x::Z, y::Z) a = Z(x.s) b = Z(y.s) while b.s != z0 dec(a) dec(b) end a end function *(x::Z, y::Z) if (x.s == z0) || (y.s == z0) return Z(z0) elseif x.s == z1 return Z(y.s) elseif y.s == z1 return Z(x.s) end a = Z(x.s) b = Z(z1) while b != y c = Z(z0) while c != x inc(a) inc(c) end inc(b) end a end function divrem(x::Z, y::Z) if y.s == z0 throw("Zeckendorf division by 0") elseif (y.s == z1) || (x.s == z0) return Z(x.s) end a = Z(x.s) b = Z(y.s) c = Z(z0) while a > b a = a - b inc(c) end tocanonical(c), tocanonical(a) end function /(x::Z, y::Z) a, _ = divrem(x, y) a end show(io::IO, z::Z) = show(io, parse(BigInt, tocanonical(z).s)) function zeckendorftest() a = Z("10") b = Z("1001") c = Z("1000") d = Z("10101") println("Addition:") x = a println(x += a) println(x += a) println(x += b) println(x += c) println(x += d) println("\nSubtraction:") x = Z("1000") println(x - Z("101")) x = Z("10101010") println(x - Z("1010101")) println("\nMultiplication:") x = Z("1001") y = Z("101") println(x * y) println(Z("101010") * y) println("\nDivision:") x = Z("1000101") y = Z("101") println(x / y) println(divrem(x, y)) end zeckendorftest()
With the release of FFXIV patch 3.4 - Soul Surrender, many players complain about its content. I want to give some suggestions about this new content for FFXIV. To people who complain about the same-y content, I think that the main idea is that the "1 raid, 1 primal, 2 dungeons per patch" is the base of the patch, and the other things that come with the patch are the things they are trying to do different. Do take note that in the course of 3 major patches, and 3 mini patches, the team has implemented Lord of Verminion, the Diadem, the Palace of the Dead, the Aquapolis, and the Feast. That is a lot of content, especially comparing FFXIV to its close competitor, World of Warcraft. 1. Introduce a new post-expansion zone where which people can do daily quests and goals (think of Isle of Thunder/Timeless Isle/Tanaan Jungle in WoW). Maybe reintroduce the Diadem as a static, non-instanced zone with a daily quest hub that offers crafting rewards? 2. They can also try mixing up gameplay by adding more ways to play the game other than battle or trading quests. For example, think of the Beast Tribe quests where you ride Sanuwas or Kognamatos, but used to attack Garlean structures, or something similar. 3. Introduce a server-wide goal or effort for people to word towards, similar to the Gates of Ahn' Qiraj event back in Vanilla WoW, or the Isle of Thunder Progression in Mists. Not only will this foster server cooperation, but it can encourage people to pick up or do things they have not done in a long time. I have a couple of other suggestions in mind (i.e. A midcore raiding difficulty level), but specifically for new ideas, I think that's a good set of ideas to start in. If you want to buy cheap FFXIV Gil, FFXIVGilBuy is your good choice with instant delivery and lowest price!
I am guilty of being a nail extremist. If you have the same affliction as me, you will agree that hands and nails are nothing to joke about and need to be taken as seriously as hair is by the hair-obsessives in this world. I am not one of those, thankfully. If you do not have this condition you might find the following a little disconcerting. Since I was sixteen I have been doing my nails myself in all matter of ways, with my speciality back then being french mani. French mani is now not acceptable. I am sure we will see its resurrection at some point, but for the time being it means years in fashion jail, so let's not talk about it. I grew up being so OCD about how nails should be done that I could no longer face saying, "Ah - that's great!", to a lady at a nail salon that had practically had a seizure with the nail brush on my hand. Even worse; trying to explain what a 'half-moon' is and why I want one nail on each hand a different colour than the others. I became an annoyance to them and their vacuous expressions an annoyance to me. This had to end. As a result of these symptoms, I have taken to either doing my own nails, or sharing them with a very select circle of nail virtuosos, who can do their magic on them. The 'Lucky Stripe' started with Art Deco in my head and a 'V' shape up the nail, which then morphed into a straight stripe up the nail, to keep things more 2013/minimal. So anyways, if you guys want to join in with the stripes go on Instagram, follow --> marinalondon and #luckystripe. Hands show the first signs of ageing and are one of your expressive tools when talking, so let's keep them healthy and jazzy you guys.
James Pollock , in his final report as Mint Director in 1873 , advocated limiting striking of gold dollars to depositors who specifically requested it . " The gold dollar is not a convenient coin , on account of its small size , and it suffers more proportionately from abrasion than larger coins . " His successors called for its abolition , with James P. Kimball , before he left office in 1889 , writing to Congress that except as jewelry , " little practical use has been found for this coin " . Later that year , the new director , Edward O. Leech , issued a report stating that the gold dollar " is too small for circulation , and ... [ is ] used almost exclusively for the purposes of ornament . The last year in which the gold dollar was struck was 1889 . Congress abolished the gold dollar , along with the three @-@ cent nickel and three @-@ dollar piece , by the Act of September 26 , 1890 .
# Copyright 2021 CR.Sparse Development Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import jax.numpy as jnp from jax import vmap, jit, lax from .defs import RecoverySolution, HTPState from cr.sparse import (hard_threshold, hard_threshold_sorted, build_signal_from_indices_and_values) import cr.sparse.dict as crdict import cr.sparse.lop as lop def matrix_solve(Phi, y, K, normalized=False, step_size=None, max_iters=None, res_norm_rtol=1e-4): """Solves the sparse recovery problem :math:`y = \\Phi x + e` using Hard Thresholding Pursuit for matrices """ ## Initialize some constants for the algorithm M, N = Phi.shape # squared norm of the signal y_norm_sqr = y.T @ y max_r_norm_sqr = y_norm_sqr * (res_norm_rtol ** 2) if not normalized and step_size is None: step_size = 0.98 / crdict.upper_frame_bound(Phi) if max_iters is None: max_iters = M min_iters = min(3*K, 20) def compute_step_size(h, I): h_I = h[I] Phi_I = Phi[:, I] # Step size calculation Ah = Phi_I @ h_I mu = h_I.T @ h_I / (Ah.T @ Ah) return mu def get_step_size(h, I): return compute_step_size(h, I) if normalized else step_size def init(): # Data for the previous approximation [r = y, x = 0] I_prev = jnp.arange(0, K) x_I_prev = jnp.zeros(K) r_norm_sqr_prev = y_norm_sqr # Assume previous estimate to be zero and conduct first iteration # compute the correlations of atoms with signal y h = Phi.T @ y mu = get_step_size(h, I_prev) # update x = mu * h # threshold I, x_I = hard_threshold(x, K) # Form the subdictionary of corresponding atoms Phi_I = Phi[:, I] # Compute new residual r = y - Phi_I @ x_I # Compute residual norm squared r_norm_sqr = r.T @ r return HTPState(x_I=x_I, I=I, r=r, r_norm_sqr=r_norm_sqr, iterations=1, I_prev=I_prev, x_I_prev=x_I_prev, r_norm_sqr_prev=r_norm_sqr_prev) def iteration(state): I_prev = state.I x_I_prev = state.x_I r_norm_sqr_prev = state.r_norm_sqr # compute the correlations of dictionary atoms with the residual h = Phi.T @ state.r # current approximation x = build_signal_from_indices_and_values(N, state.I, state.x_I) # Step size calculation mu = get_step_size(h, I_prev) # update x = x + mu * h # threshold I, x_I = hard_threshold_sorted(x, K) # Form the subdictionary of corresponding atoms Phi_I = Phi[:, I] # Solve least squares over the selected K indices x_I, _, _, _ = jnp.linalg.lstsq(Phi_I, y) # Compute new residual y_hat = Phi_I @ x_I r = y - y_hat # Compute residual norm squared r_norm_sqr = r.T @ r return HTPState(x_I=x_I, I=I, r=r, r_norm_sqr=r_norm_sqr, iterations=state.iterations+1, I_prev=I_prev, x_I_prev=x_I_prev, r_norm_sqr_prev=r_norm_sqr_prev ) def cond(state): # limit on residual norm a = state.r_norm_sqr > max_r_norm_sqr # limit on number of iterations b = state.iterations < max_iters c = jnp.logical_and(a, b) # checking if support is still changing d = jnp.any(jnp.not_equal(state.I, state.I_prev)) # consider support change only after some iterations d = jnp.logical_or(state.iterations < min_iters, d) c = jnp.logical_and(c,d) # overall condition return c state = lax.while_loop(cond, iteration, init()) return RecoverySolution(x_I=state.x_I, I=state.I, r=state.r, r_norm_sqr=state.r_norm_sqr, iterations=state.iterations) matrix_solve_jit = jit(matrix_solve, static_argnums=(2), static_argnames=("normalized", "step_size", "max_iters", "res_norm_rtol")) def operator_solve(Phi, y, K, normalized=False, step_size=None, max_iters=None, res_norm_rtol=1e-4): """Solves the sparse recovery problem :math:`y = \\Phi x + e` using Hard Thresholding Pursuit for linear operators """ ## Initialize some constants for the algorithm M, N = Phi.shape trans = Phi.trans # squared norm of the signal y_norm_sqr = y.T @ y max_r_norm_sqr = y_norm_sqr * (res_norm_rtol ** 2) if not normalized and step_size is None: step_size = 0.98 / lop.upper_frame_bound(Phi) if max_iters is None: max_iters = M min_iters = min(3*K, 20) def compute_step_size(h, I): h_I = h[I] Phi_I = Phi.columns(I) # Step size calculation Ah = Phi_I @ h_I mu = h_I.T @ h_I / (Ah.T @ Ah) return mu def get_step_size(h, I): return compute_step_size(h, I) if normalized else step_size def init(): # Data for the previous approximation [r = y, x = 0] I_prev = jnp.arange(0, K) x_I_prev = jnp.zeros(K) r_norm_sqr_prev = y_norm_sqr # Assume previous estimate to be zero and conduct first iteration # compute the correlations of atoms with signal y h = trans(y) mu = get_step_size(h, I_prev) # update x = mu * h # threshold I, x_I = hard_threshold(x, K) # Form the subdictionary of corresponding atoms Phi_I = Phi.columns(I) # Compute new residual r = y - Phi_I @ x_I # Compute residual norm squared r_norm_sqr = r.T @ r return HTPState(x_I=x_I, I=I, r=r, r_norm_sqr=r_norm_sqr, iterations=1, I_prev=I_prev, x_I_prev=x_I_prev, r_norm_sqr_prev=r_norm_sqr_prev) def iteration(state): I_prev = state.I x_I_prev = state.x_I r_norm_sqr_prev = state.r_norm_sqr # compute the correlations of dictionary atoms with the residual h = trans(state.r) # current approximation x = build_signal_from_indices_and_values(N, state.I, state.x_I) # Step size calculation mu = get_step_size(h, I_prev) # update x = x + mu * h # threshold I, x_I = hard_threshold_sorted(x, K) # Form the subdictionary of corresponding atoms Phi_I = Phi.columns(I) # Solve least squares over the selected K indices x_I, r_I_norms, rank_I, s_I = jnp.linalg.lstsq(Phi_I, y) # Compute new residual y_hat = Phi_I @ x_I r = y - y_hat # Compute residual norm squared r_norm_sqr = r.T @ r return HTPState(x_I=x_I, I=I, r=r, r_norm_sqr=r_norm_sqr, iterations=state.iterations+1, I_prev=I_prev, x_I_prev=x_I_prev, r_norm_sqr_prev=r_norm_sqr_prev ) def cond(state): # limit on residual norm a = state.r_norm_sqr > max_r_norm_sqr # limit on number of iterations b = state.iterations < max_iters c = jnp.logical_and(a, b) # checking if support is still changing d = jnp.any(jnp.not_equal(state.I, state.I_prev)) # consider support change only after some iterations d = jnp.logical_or(state.iterations < min_iters, d) c = jnp.logical_and(c,d) # overall condition return c state = lax.while_loop(cond, iteration, init()) return RecoverySolution(x_I=state.x_I, I=state.I, r=state.r, r_norm_sqr=state.r_norm_sqr, iterations=state.iterations) operator_solve_jit = jit(operator_solve, static_argnums=(0, 2), static_argnames=("normalized", "step_size", "max_iters", "res_norm_rtol")) solve = operator_solve_jit
lemma continuous_on_iff: "continuous_on s f \<longleftrightarrow> (\<forall>x\<in>s. \<forall>e>0. \<exists>d>0. \<forall>x'\<in>s. dist x' x < d \<longrightarrow> dist (f x') (f x) < e)"
import numpy as np import pandas as pd import tensorflow as tf import tensorflow_hub as hub from sklearn.model_selection import KFold class embedding_model(): def __init__(self, file_path:str): self.data = pd.read_csv(file_path) self.df = pd.DataFrame(self.data) self.df['username'] = self.df['username'].fillna('') self.df = self.df.sample(frac=1).reset_index(drop=True) self.kfold = KFold(n_splits=5) def feature_input(self): self.content = self.df['text'].values self.type = self.df['type'].values def embedding_feature(self): self.text_embedding = hub.text_embedding_column( "content", module_spec="https://tfhub.dev/google/nnlm-en-dim128-with-normalization/1", trainable=False ) def model_setup(self): self.binary_label_head = tf.contrib.estimator.binary_classification_head( loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE ) self.estimator = tf.estimator.DNNEstimator( head=self.binary_label_head, hidden_units=[128,64], feature_columns=[self.text_embedding], batch_norm=True, model_dir="./estimator_cred_score" ) def train_model(self): for train_index, test_index in self.kfold.split(self.type): self.train_content = self.content[train_index].astype(np.str) self.train_type = self.type[train_index].astype(np.int32) self.test_content = self.content[test_index].astype(np.str) self.test_type = self.type[test_index].astype(np.int32) features = { "content": self.train_content, } labels = self.train_type train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( features, labels, shuffle=False, batch_size=64, num_epochs=10 ) print("start training") self.estimator.train(input_fn=train_input_fn) def restore_saved_model(self): print("start restoring model") self.estimator = tf.estimator.DNNClassifier( hidden_units=[128,64], feature_columns=[self.text_embedding], warm_start_from="./estimator_cred_score" ) def test_model(self): eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn({ "content": self.test_content, }, self.test_type, shuffle=False ) print("start predicting") return self.estimator.evaluate(input_fn=eval_input_fn) def predict_model(self, content:object): eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn({ "content": content, }, shuffle=False ) print("start predicting") return self.estimator.predict(input_fn=eval_input_fn) class dnn_model(): def __init__(self, file_path:str): self.data = pd.read_csv(file_path) self.df = pd.DataFrame(self.data) self.df = self.df.sample(frac=1).reset_index(drop=True) self.kfold = KFold(n_splits=5) def feature_input(self): self.content = self.df['text'].values self.type = self.df['type'].values self.user_cred = self.df['user_credibility'].values self.user_verf = self.df['verified'].values def embedding_feature(self): self.text_embedding = hub.text_embedding_column( "content", module_spec="https://tfhub.dev/google/nnlm-en-dim128-with-normalization/1", # module_spec="https://tfhub.dev/google/Wiki-words-250-with-normalization/1", trainable=False ) self.user_cred_feature = tf.feature_column.numeric_column("user_credibility") self.user_verf_feature = tf.feature_column.numeric_column("user_verified") def model_setup(self): # self.binary_label_head = tf.contrib.estimator.binary_classification_head( # loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE # ) # self.estimator = tf.estimator.DNNEstimator( # head=self.binary_label_head, # hidden_units=[128,64], # feature_columns=[self.text_embedding, self.user_cred_feature, self.user_verf_feature], # batch_norm=True, # model_dir="./estimator_dnn" # ) self.estimator = tf.estimator.DNNClassifier( n_classes=2, hidden_units=[128,64], feature_columns=[self.text_embedding, self.user_cred_feature, self.user_verf_feature], batch_norm=True, model_dir="./estimator_new" ) # self.estimator = tf.estimator.DNNLinearCombinedClassifier( # dnn_feature_columns=[self.text_embedding, self.user_verf_feature, self.user_cred_feature], # dnn_hidden_units=[128,64], # batch_norm=True, # model_dir='./estimator_linear_classifier' # ) # self.estimator = tf.estimator.LinearClassifier( # feature_columns=[self.text_embedding, self.user_cred_feature, self.user_verf_feature], # optimizer='Adagrad', # model_dir='./estimator_linear' # ) def restore_saved_model(self): print("start restoring model") self.estimator = tf.estimator.DNNClassifier( hidden_units=[128,64], feature_columns=[self.text_embedding, self.user_cred_feature, self.user_verf_feature], warm_start_from="./estimator_dnn" ) def train_model(self): for train_index, test_index in self.kfold.split(self.type): self.train_content = self.content[train_index].astype(np.str) self.train_verf = self.user_verf[train_index].astype(np.float) self.train_cred = self.user_cred[train_index].astype(np.float) self.train_type = self.type[train_index].astype(np.int32) self.test_content = self.content[test_index].astype(np.str) self.test_verf = self.user_verf[test_index].astype(np.float) self.test_cred = self.user_cred[test_index].astype(np.float) self.test_type = self.type[test_index].astype(np.int32) features = { "content": self.train_content, "user_credibility": self.train_cred, "user_verified": self.train_verf } labels = self.train_type train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( features, labels, shuffle=False, batch_size=64, num_epochs=10 ) print("start training") self.estimator.train(input_fn=train_input_fn) def test_model(self): eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn({ "content": self.test_content, "user_credibility": self.test_cred, "user_verified": self.test_verf }, self.test_type, shuffle=False ) print("start predicting") print(self.estimator.evaluate(input_fn=eval_input_fn)) def predict_model(self, content:list, verified:list, credibility:list): eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn({ "content": np.array(content), "user_credibility": np.array(credibility), "user_verified": np.array(verified) }, shuffle=False ) print("start predicting") return self.estimator.predict(input_fn=eval_input_fn)
# Lambda School Data Science Module 143 ## Introduction to Bayesian Inference !['Detector! What would the Bayesian statistician say if I asked him whether the--' [roll] 'I AM A NEUTRINO DETECTOR, NOT A LABYRINTH GUARD. SERIOUSLY, DID YOUR BRAIN FALL OUT?' [roll] '... yes.'](https://imgs.xkcd.com/comics/frequentists_vs_bayesians.png) *[XKCD 1132](https://www.xkcd.com/1132/)* ## Prepare - Bayes' Theorem and the Bayesian mindset Bayes' theorem possesses a near-mythical quality - a bit of math that somehow magically evaluates a situation. But this mythicalness has more to do with its reputation and advanced applications than the actual core of it - deriving it is actually remarkably straightforward. ### The Law of Total Probability By definition, the total probability of all outcomes (events) if some variable (event space) $A$ is 1. That is: $$P(A) = \sum_n P(A_n) = 1$$ The law of total probability takes this further, considering two variables ($A$ and $B$) and relating their marginal probabilities (their likelihoods considered independently, without reference to one another) and their conditional probabilities (their likelihoods considered jointly). A marginal probability is simply notated as e.g. $P(A)$, while a conditional probability is notated $P(A|B)$, which reads "probability of $A$ *given* $B$". The law of total probability states: $$P(A) = \sum_n P(A | B_n) P(B_n)$$ In words - the total probability of $A$ is equal to the sum of the conditional probability of $A$ on any given event $B_n$ times the probability of that event $B_n$, and summed over all possible events in $B$. ### The Law of Conditional Probability What's the probability of something conditioned on something else? To determine this we have to go back to set theory and think about the intersection of sets: The formula for actual calculation: $$P(A|B) = \frac{P(A \cap B)}{P(B)}$$ Think of the overall rectangle as the whole probability space, $A$ as the left circle, $B$ as the right circle, and their intersection as the red area. Try to visualize the ratio being described in the above formula, and how it is different from just the $P(A)$ (not conditioned on $B$). We can see how this relates back to the law of total probability - multiply both sides by $P(B)$ and you get $P(A|B)P(B) = P(A \cap B)$ - replaced back into the law of total probability we get $P(A) = \sum_n P(A \cap B_n)$. This may not seem like an improvement at first, but try to relate it back to the above picture - if you think of sets as physical objects, we're saying that the total probability of $A$ given $B$ is all the little pieces of it intersected with $B$, added together. The conditional probability is then just that again, but divided by the probability of $B$ itself happening in the first place. ### Bayes Theorem Here is is, the seemingly magic tool: $$P(A|B) = \frac{P(B|A)P(A)}{P(B)}$$ In words - the probability of $A$ conditioned on $B$ is the probability of $B$ conditioned on $A$, times the probability of $A$ and divided by the probability of $B$. These unconditioned probabilities are referred to as "prior beliefs", and the conditioned probabilities as "updated." Why is this important? Scroll back up to the XKCD example - the Bayesian statistician draws a less absurd conclusion because their prior belief in the likelihood that the sun will go nova is extremely low. So, even when updated based on evidence from a detector that is $35/36 = 0.972$ accurate, the prior belief doesn't shift enough to change their overall opinion. There's many examples of Bayes' theorem - one less absurd example is to apply to [breathalyzer tests](https://www.bayestheorem.net/breathalyzer-example/). You may think that a breathalyzer test that is 100% accurate for true positives (detecting somebody who is drunk) is pretty good, but what if it also has 8% false positives (indicating somebody is drunk when they're not)? And furthermore, the rate of drunk driving (and thus our prior belief) is 1/1000. What is the likelihood somebody really is drunk if they test positive? Some may guess it's 92% - the difference between the true positives and the false positives. But we have a prior belief of the background/true rate of drunk driving. Sounds like a job for Bayes' theorem! $$ \begin{aligned} P(Drunk | Positive) &= \frac{P(Positive | Drunk)P(Drunk)}{P(Positive)} \\ &= \frac{1 \times 0.001}{0.08} \\ &= 0.0125 \end{aligned} $$ In other words, the likelihood that somebody is drunk given they tested positive with a breathalyzer in this situation is only 1.25% - probably much lower than you'd guess. This is why, in practice, it's important to have a repeated test to confirm (the probability of two false positives in a row is $0.08 * 0.08 = 0.0064$, much lower), and Bayes' theorem has been relevant in court cases where proper consideration of evidence was important. ## Derive Baye's Rule \begin{align} P(A|B) &= \frac{P(A \cap B)}{P(B)}\\ \Rightarrow P(A|B)P(B) &= P(A \cap B)\\ P(B|A) &= \frac{P(B \cap A)}{P(A)}\\ \Rightarrow P(B|A)P(A) &= P(B \cap A)\\ \Rightarrow P(A|B)P(B) &= P(B|A)P(A) \\ P(A \cap B) &= P(B \cap A)\\ P(A|B) &= \frac{P(B|A) \times P(A)}{P(B)} \end{align} ## Live Lecture - Deriving Bayes' Theorem, Calculating Bayesian Confidence Notice that $P(A|B)$ appears in the above laws - in Bayesian terms, this is the belief in $A$ updated for the evidence $B$. So all we need to do is solve for this term to derive Bayes' theorem. Let's do it together! ```python # Activity 2 - Use SciPy to calculate Bayesian confidence intervals # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bayes_mvs.html#scipy.stats.bayes_mvs from scipy import stats import numpy as np np.random.seed(seed=42) coinflips = np.random.binomial(n=1, p=.5, size=100) print(coinflips) ``` [0 1 1 1 0 0 0 1 1 1 0 1 1 0 0 0 0 1 0 0 1 0 0 0 0 1 0 1 1 0 1 0 0 1 1 1 0 0 1 0 0 0 0 1 0 1 0 1 1 0 1 1 1 1 1 1 0 0 0 0 0 0 1 0 0 1 0 1 0 1 1 0 0 1 1 1 1 0 0 0 1 1 0 0 0 0 1 1 1 0 0 1 1 1 1 0 1 0 0 0] ```python def confidence_interval(data, confidence=.95): n = len(data) mean = sum(data)/n data = np.array(data) stderr = stats.sem(data) interval = stderr * stats.t.ppf((1 + confidence) / 2.0, n-1) return (mean , mean-interval, mean+interval) ``` ```python confidence_interval(coinflips) ``` (0.47, 0.3704689875017368, 0.5695310124982632) ```python help(stats.bayes_mvs) ``` Help on function bayes_mvs in module scipy.stats.morestats: bayes_mvs(data, alpha=0.9) Bayesian confidence intervals for the mean, var, and std. Parameters ---------- data : array_like Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`. Requires 2 or more data points. alpha : float, optional Probability that the returned confidence interval contains the true parameter. Returns ------- mean_cntr, var_cntr, std_cntr : tuple The three results are for the mean, variance and standard deviation, respectively. Each result is a tuple of the form:: (center, (lower, upper)) with `center` the mean of the conditional pdf of the value given the data, and `(lower, upper)` a confidence interval, centered on the median, containing the estimate to a probability ``alpha``. See Also -------- mvsdist Notes ----- Each tuple of mean, variance, and standard deviation estimates represent the (center, (lower, upper)) with center the mean of the conditional pdf of the value given the data and (lower, upper) is a confidence interval centered on the median, containing the estimate to a probability ``alpha``. Converts data to 1-D and assumes all data has the same mean and variance. Uses Jeffrey's prior for variance and std. Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))`` References ---------- T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and standard-deviation from data", https://scholarsarchive.byu.edu/facpub/278, 2006. Examples -------- First a basic example to demonstrate the outputs: >>> from scipy import stats >>> data = [6, 9, 12, 7, 8, 8, 13] >>> mean, var, std = stats.bayes_mvs(data) >>> mean Mean(statistic=9.0, minmax=(7.103650222612533, 10.896349777387467)) >>> var Variance(statistic=10.0, minmax=(3.176724206..., 24.45910382...)) >>> std Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.945614605014631)) Now we generate some normally distributed random data, and get estimates of mean and standard deviation with 95% confidence intervals for those estimates: >>> n_samples = 100000 >>> data = stats.norm.rvs(size=n_samples) >>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95) >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.hist(data, bins=100, density=True, label='Histogram of data') >>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean') >>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r', ... alpha=0.2, label=r'Estimated mean (95% limits)') >>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale') >>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2, ... label=r'Estimated scale (95% limits)') >>> ax.legend(fontsize=10) >>> ax.set_xlim([-4, 4]) >>> ax.set_ylim([0, 0.5]) >>> plt.show() ```python #0.37046898750173674 #0.3704689875017368 stats.bayes_mvs(coinflips, alpha=.95) ``` (Mean(statistic=0.47, minmax=(0.37046898750173674, 0.5695310124982632)), Variance(statistic=0.25680412371134015, minmax=(0.1939698977025208, 0.3395533426586547)), Std_dev(statistic=0.5054540733507159, minmax=(0.44042013771229943, 0.5827120581030176))) ```python coinflips_mean_dist, _, _ = stats.mvsdist(coinflips) coinflips_mean_dist ``` <scipy.stats._distn_infrastructure.rv_frozen at 0x2afcb4869b0> ```python help(coinflips_mean_dist) ``` Help on rv_frozen in module scipy.stats._distn_infrastructure object: class rv_frozen(builtins.object) | rv_frozen(dist, *args, **kwds) | | # Frozen RV class | | Methods defined here: | | __init__(self, dist, *args, **kwds) | Initialize self. See help(type(self)) for accurate signature. | | cdf(self, x) | | entropy(self) | | expect(self, func=None, lb=None, ub=None, conditional=False, **kwds) | | interval(self, alpha) | | isf(self, q) | | logcdf(self, x) | | logpdf(self, x) | | logpmf(self, k) | | logsf(self, x) | | mean(self) | | median(self) | | moment(self, n) | | pdf(self, x) | | pmf(self, k) | | ppf(self, q) | | rvs(self, size=None, random_state=None) | | sf(self, x) | | stats(self, moments='mv') | | std(self) | | var(self) | | ---------------------------------------------------------------------- | Data descriptors defined here: | | __dict__ | dictionary for instance variables (if defined) | | __weakref__ | list of weak references to the object (if defined) | | random_state ```python coinflips_mean_dist.rvs(100) ``` array([0.47447628, 0.51541425, 0.54722018, 0.4589882 , 0.51501386, 0.53819192, 0.43382292, 0.53546659, 0.47026173, 0.44967562, 0.4621107 , 0.42691904, 0.37324325, 0.47531437, 0.46052277, 0.48711257, 0.52456771, 0.43332181, 0.49545882, 0.44671454, 0.47520117, 0.47047251, 0.41828918, 0.50159477, 0.42965501, 0.45273383, 0.48045849, 0.45342529, 0.48238344, 0.53966291, 0.48230241, 0.48073422, 0.48553525, 0.47962228, 0.41274185, 0.42892633, 0.5170948 , 0.42678096, 0.42249309, 0.51499109, 0.47059199, 0.39903942, 0.41790336, 0.46406817, 0.42232382, 0.42163269, 0.47848227, 0.48232842, 0.4731858 , 0.51077244, 0.3957508 , 0.48504646, 0.49014295, 0.53252732, 0.45495376, 0.47883978, 0.60393033, 0.4492549 , 0.44797902, 0.54782121, 0.43380002, 0.5760073 , 0.36941266, 0.44467418, 0.4939245 , 0.45278835, 0.55635162, 0.48695459, 0.39080983, 0.45948606, 0.2941779 , 0.35950718, 0.44805696, 0.4725126 , 0.42218381, 0.45985418, 0.47545393, 0.44317753, 0.46267013, 0.4458753 , 0.44204707, 0.51334913, 0.50914181, 0.49923748, 0.46895674, 0.43892798, 0.45984946, 0.44984632, 0.53560791, 0.45865723, 0.48646824, 0.55937503, 0.41464303, 0.50701457, 0.46934196, 0.37681534, 0.42748113, 0.49812825, 0.48278895, 0.4964763 ]) ```python import pandas as pd pd.DataFrame(coinflips).describe() ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>0</th> </tr> </thead> <tbody> <tr> <th>count</th> <td>100.000000</td> </tr> <tr> <th>mean</th> <td>0.470000</td> </tr> <tr> <th>std</th> <td>0.501614</td> </tr> <tr> <th>min</th> <td>0.000000</td> </tr> <tr> <th>25%</th> <td>0.000000</td> </tr> <tr> <th>50%</th> <td>0.000000</td> </tr> <tr> <th>75%</th> <td>1.000000</td> </tr> <tr> <th>max</th> <td>1.000000</td> </tr> </tbody> </table> </div> ## Assignment - Code it up! Most of the above was pure math - now write Python code to reproduce the results! This is purposefully open ended - you'll have to think about how you should represent probabilities and events. You can and should look things up, and as a stretch goal - refactor your code into helpful reusable functions! Specific goals/targets: 1. Write a function `def prob_drunk_given_positive(prob_drunk_prior, prob_positive, prob_positive_drunk)` that reproduces the example from lecture, and use it to calculate and visualize a range of situations 2. Explore `scipy.stats.bayes_mvs` - read its documentation, and experiment with it on data you've tested in other ways earlier this week 3. Create a visualization comparing the results of a Bayesian approach to a traditional/frequentist approach 4. In your own words, summarize the difference between Bayesian and Frequentist statistics If you're unsure where to start, check out [this blog post of Bayes theorem with Python](https://dataconomy.com/2015/02/introduction-to-bayes-theorem-with-python/) - you could and should create something similar! Stretch goals: - Apply a Bayesian technique to a problem you previously worked (in an assignment or project work) on from a frequentist (standard) perspective - Check out [PyMC3](https://docs.pymc.io/) (note this goes beyond hypothesis tests into modeling) - read the guides and work through some examples - Take PyMC3 further - see if you can build something with it! ```python import pandas as pd import numpy as np %matplotlib inline import matplotlib.pyplot as plt import scipy.stats as stats ``` ## Problem 1 ```python def prob_drunk_given_positive(prob_positive_drunk, prob_drunk_prior, prob_positive): test1 = (prob_positive_drunk * prob_drunk_prior) / prob_positive test2 = (prob_positive_drunk * prob_drunk_prior) / (prob_positive)**2 result1 = 'Probability a person is drunk, given one failed breathalyzer test:', test1 result2 = 'Probability a person is drunk, given two failed breathalyzer tests:', test2 return result1, result2 prob_drunk_given_positive(1, 0.001, 0.08) ``` (('Probability a person is drunk, given one failed breathalyzer test:', 0.0125), ('Probability a person is drunk, given two failed breathalyzer tests:', 0.15625)) ## Problem 2 ```python url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data' columns = ['class_name', 'handicapped_infants', 'water_project_cost_sharing', 'adoption_of_the_budget_resolution', 'physician_fee_freeze', 'el_salvador_aid', 'religious_groups_in_schools', 'anti_satellite_test_ban', 'aid_to_nicaraguan_contras', 'mx_missile', 'immigration', 'synfuels_corporation_cutback', 'education_spending', 'superfund_right_to_sue', 'crime', 'duty_free_exports', 'export_administration_act_south_africa'] df = pd.read_csv(url, header=None, names=columns, na_values='?').set_index('class_name') df.head() ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>handicapped_infants</th> <th>water_project_cost_sharing</th> <th>adoption_of_the_budget_resolution</th> <th>physician_fee_freeze</th> <th>el_salvador_aid</th> <th>religious_groups_in_schools</th> <th>anti_satellite_test_ban</th> <th>aid_to_nicaraguan_contras</th> <th>mx_missile</th> <th>immigration</th> <th>synfuels_corporation_cutback</th> <th>education_spending</th> <th>superfund_right_to_sue</th> <th>crime</th> <th>duty_free_exports</th> <th>export_administration_act_south_africa</th> </tr> <tr> <th>class_name</th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> </tr> </thead> <tbody> <tr> <th>republican</th> <td>n</td> <td>y</td> <td>n</td> <td>y</td> <td>y</td> <td>y</td> <td>n</td> <td>n</td> <td>n</td> <td>y</td> <td>NaN</td> <td>y</td> <td>y</td> <td>y</td> <td>n</td> <td>y</td> </tr> <tr> <th>republican</th> <td>n</td> <td>y</td> <td>n</td> <td>y</td> <td>y</td> <td>y</td> <td>n</td> <td>n</td> <td>n</td> <td>n</td> <td>n</td> <td>y</td> <td>y</td> <td>y</td> <td>n</td> <td>NaN</td> </tr> <tr> <th>democrat</th> <td>NaN</td> <td>y</td> <td>y</td> <td>NaN</td> <td>y</td> <td>y</td> <td>n</td> <td>n</td> <td>n</td> <td>n</td> <td>y</td> <td>n</td> <td>y</td> <td>y</td> <td>n</td> <td>n</td> </tr> <tr> <th>democrat</th> <td>n</td> <td>y</td> <td>y</td> <td>n</td> <td>NaN</td> <td>y</td> <td>n</td> <td>n</td> <td>n</td> <td>n</td> <td>y</td> <td>n</td> <td>y</td> <td>n</td> <td>n</td> <td>y</td> </tr> <tr> <th>democrat</th> <td>y</td> <td>y</td> <td>y</td> <td>n</td> <td>y</td> <td>y</td> <td>n</td> <td>n</td> <td>n</td> <td>n</td> <td>y</td> <td>NaN</td> <td>y</td> <td>y</td> <td>y</td> <td>y</td> </tr> </tbody> </table> </div> ```python # Removed nan values because bayes_mvs doesn't have an omit param df = df.dropna() ``` ```python df = df.replace({'y': 1, 'n': 0}) ``` ```python rep = df.loc['republican'] dem = df.loc['democrat'] ``` ```python rep.head() ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>handicapped_infants</th> <th>water_project_cost_sharing</th> <th>adoption_of_the_budget_resolution</th> <th>physician_fee_freeze</th> <th>el_salvador_aid</th> <th>religious_groups_in_schools</th> <th>anti_satellite_test_ban</th> <th>aid_to_nicaraguan_contras</th> <th>mx_missile</th> <th>immigration</th> <th>synfuels_corporation_cutback</th> <th>education_spending</th> <th>superfund_right_to_sue</th> <th>crime</th> <th>duty_free_exports</th> <th>export_administration_act_south_africa</th> </tr> <tr> <th>class_name</th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> </tr> </thead> <tbody> <tr> <th>republican</th> <td>0</td> <td>1</td> <td>0</td> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>1</td> </tr> <tr> <th>republican</th> <td>1</td> <td>0</td> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>1</td> </tr> <tr> <th>republican</th> <td>0</td> <td>1</td> <td>0</td> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> </tr> <tr> <th>republican</th> <td>0</td> <td>1</td> <td>0</td> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>1</td> </tr> <tr> <th>republican</th> <td>0</td> <td>1</td> <td>0</td> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> </tr> </tbody> </table> </div> ```python dem.head() ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>handicapped_infants</th> <th>water_project_cost_sharing</th> <th>adoption_of_the_budget_resolution</th> <th>physician_fee_freeze</th> <th>el_salvador_aid</th> <th>religious_groups_in_schools</th> <th>anti_satellite_test_ban</th> <th>aid_to_nicaraguan_contras</th> <th>mx_missile</th> <th>immigration</th> <th>synfuels_corporation_cutback</th> <th>education_spending</th> <th>superfund_right_to_sue</th> <th>crime</th> <th>duty_free_exports</th> <th>export_administration_act_south_africa</th> </tr> <tr> <th>class_name</th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> </tr> </thead> <tbody> <tr> <th>democrat</th> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>1</td> <td>1</td> <td>1</td> </tr> <tr> <th>democrat</th> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>1</td> </tr> <tr> <th>democrat</th> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>1</td> </tr> <tr> <th>democrat</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>1</td> </tr> <tr> <th>democrat</th> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>1</td> <td>1</td> <td>0</td> <td>1</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> <td>1</td> </tr> </tbody> </table> </div> ```python def confidence_interval(data, confidence=0.95): ''' Calculate a confidence interval around a sample mean for given data. using t-distribution and two tailed test, default 95% confidence. Arguments: data = iterable (list or np array) of sample observations confidence - level of confidence for interval ''' data = np.array(data) mean = np.mean(data) n = len(data) stderr = stats.sem(data) interval = stderr * stats.t.ppf((1 + confidence) / 2., n-1) return (mean, mean - interval, mean + interval) def report_confidence_interval(confidence_interval): ''' Return a string with a pretty report of a confidence interval Arguments: confidence_interval - a tuple of (mean, lower bount, upper bound) Returns: None, but prints to screen the report ''' # print('Mean: {}'.format(confidence_interval[0])) # print('Lower bound: {}'.format(confidence_interval[1])) # print('Upper bound: {}'.format(confidence_interval[2])) s = 'our mean lies in the interval [{:.2}, {:.2}]'.format( confidence_interval[1], confidence_interval[2]) return s ``` ```python stats.bayes_mvs(rep['handicapped_infants'], alpha=0.95) ``` (Mean(statistic=0.21296296296296297, minmax=(0.13450349074958223, 0.2914224351763437)), Variance(statistic=0.1723985890652557, minmax=(0.13163384272877396, 0.22552107883595443)), Std_dev(statistic=0.4142216885759803, minmax=(0.3628137851967231, 0.4748905967019713))) ```python confidence_interval(rep['handicapped_infants']) ``` (0.21296296296296297, 0.13450349074958223, 0.2914224351763437) ```python rep.describe().T ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>count</th> <th>mean</th> <th>std</th> <th>min</th> <th>25%</th> <th>50%</th> <th>75%</th> <th>max</th> </tr> </thead> <tbody> <tr> <th>handicapped_infants</th> <td>108.0</td> <td>0.212963</td> <td>0.411310</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>1.0</td> </tr> <tr> <th>water_project_cost_sharing</th> <td>108.0</td> <td>0.472222</td> <td>0.501555</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> </tr> <tr> <th>adoption_of_the_budget_resolution</th> <td>108.0</td> <td>0.157407</td> <td>0.365882</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>1.0</td> </tr> <tr> <th>physician_fee_freeze</th> <td>108.0</td> <td>0.990741</td> <td>0.096225</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> </tr> <tr> <th>el_salvador_aid</th> <td>108.0</td> <td>0.953704</td> <td>0.211106</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> </tr> <tr> <th>religious_groups_in_schools</th> <td>108.0</td> <td>0.870370</td> <td>0.337461</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> </tr> <tr> <th>anti_satellite_test_ban</th> <td>108.0</td> <td>0.268519</td> <td>0.445255</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> </tr> <tr> <th>aid_to_nicaraguan_contras</th> <td>108.0</td> <td>0.148148</td> <td>0.356903</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>1.0</td> </tr> <tr> <th>mx_missile</th> <td>108.0</td> <td>0.138889</td> <td>0.347443</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>1.0</td> </tr> <tr> <th>immigration</th> <td>108.0</td> <td>0.574074</td> <td>0.496788</td> <td>0.0</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> </tr> <tr> <th>synfuels_corporation_cutback</th> <td>108.0</td> <td>0.157407</td> <td>0.365882</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>1.0</td> </tr> <tr> <th>education_spending</th> <td>108.0</td> <td>0.851852</td> <td>0.356903</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> </tr> <tr> <th>superfund_right_to_sue</th> <td>108.0</td> <td>0.842593</td> <td>0.365882</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> </tr> <tr> <th>crime</th> <td>108.0</td> <td>0.981481</td> <td>0.135445</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> </tr> <tr> <th>duty_free_exports</th> <td>108.0</td> <td>0.111111</td> <td>0.315735</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>1.0</td> </tr> <tr> <th>export_administration_act_south_africa</th> <td>108.0</td> <td>0.666667</td> <td>0.473602</td> <td>0.0</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> </tr> </tbody> </table> </div> ```python stats.bayes_mvs(dem['handicapped_infants'], alpha=0.95) ``` (Mean(statistic=0.5887096774193549, minmax=(0.5008854514528095, 0.6765339033859002)), Variance(statistic=0.24813383097840572, minmax=(0.1929709352919263, 0.3187452362753357)), Std_dev(statistic=0.4971022146015008, minmax=(0.4392845721077925, 0.5645752706905747))) ### Tuple unpacking: this will allow me to add the bayesian mean, upper bound, and lower bound to the comparison table for problem 3. ```python a, b, c = stats.bayes_mvs(dem['handicapped_infants'], alpha=0.95) ``` ```python print(a) print(b) print(c) ``` Mean(statistic=0.5887096774193549, minmax=(0.5008854514528095, 0.6765339033859002)) Variance(statistic=0.24813383097840572, minmax=(0.1929709352919263, 0.3187452362753357)) Std_dev(statistic=0.4971022146015008, minmax=(0.4392845721077925, 0.5645752706905747)) ```python d, e = a ``` ```python print(d) print(e) ``` 0.5887096774193549 (0.5008854514528095, 0.6765339033859002) ```python f,g = e print(f) print(g) ``` 0.5008854514528095 0.6765339033859002 ```python stats.ttest_1samp(dem['handicapped_infants'], 0.588710) ``` Ttest_1sampResult(statistic=-7.270529297663421e-06, pvalue=0.9999942107355586) ```python confidence_interval(dem['handicapped_infants']) ``` (0.5887096774193549, 0.5008854514528094, 0.6765339033859004) ```python dem.describe().T ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>count</th> <th>mean</th> <th>std</th> <th>min</th> <th>25%</th> <th>50%</th> <th>75%</th> <th>max</th> </tr> </thead> <tbody> <tr> <th>handicapped_infants</th> <td>124.0</td> <td>0.588710</td> <td>0.494064</td> <td>0.0</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> </tr> <tr> <th>water_project_cost_sharing</th> <td>124.0</td> <td>0.451613</td> <td>0.499672</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> </tr> <tr> <th>adoption_of_the_budget_resolution</th> <td>124.0</td> <td>0.854839</td> <td>0.353692</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> </tr> <tr> <th>physician_fee_freeze</th> <td>124.0</td> <td>0.048387</td> <td>0.215453</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>1.0</td> </tr> <tr> <th>el_salvador_aid</th> <td>124.0</td> <td>0.201613</td> <td>0.402832</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>1.0</td> </tr> <tr> <th>religious_groups_in_schools</th> <td>124.0</td> <td>0.443548</td> <td>0.498818</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> </tr> <tr> <th>anti_satellite_test_ban</th> <td>124.0</td> <td>0.766129</td> <td>0.425008</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> </tr> <tr> <th>aid_to_nicaraguan_contras</th> <td>124.0</td> <td>0.830645</td> <td>0.376587</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> </tr> <tr> <th>mx_missile</th> <td>124.0</td> <td>0.790323</td> <td>0.408730</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> </tr> <tr> <th>immigration</th> <td>124.0</td> <td>0.532258</td> <td>0.500983</td> <td>0.0</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> </tr> <tr> <th>synfuels_corporation_cutback</th> <td>124.0</td> <td>0.508065</td> <td>0.501963</td> <td>0.0</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> </tr> <tr> <th>education_spending</th> <td>124.0</td> <td>0.129032</td> <td>0.336596</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>1.0</td> </tr> <tr> <th>superfund_right_to_sue</th> <td>124.0</td> <td>0.290323</td> <td>0.455753</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> </tr> <tr> <th>crime</th> <td>124.0</td> <td>0.346774</td> <td>0.477874</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> </tr> <tr> <th>duty_free_exports</th> <td>124.0</td> <td>0.596774</td> <td>0.492535</td> <td>0.0</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> </tr> <tr> <th>export_administration_act_south_africa</th> <td>124.0</td> <td>0.943548</td> <td>0.231728</td> <td>0.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> <td>1.0</td> </tr> </tbody> </table> </div> ## Problem 3 ```python table = pd.DataFrame() def comparison_table(rep_df, dem_df): confidence = 0.95 for issue in rep_df.describe(): table.loc[issue, 'dem_mean'] = dem_df[issue].mean() table.loc[issue, 'rep_mean'] = rep_df[issue].mean() table.loc[issue, 'dem_interval'] = stats.sem(dem_df[issue])*stats.t.ppf((1+confidence)/2, dem_df[issue].size-1) table.loc[issue, 'rep_interval'] = stats.sem(rep_df[issue])*stats.t.ppf((1+confidence)/2, rep_df[issue].size-1) table.loc[issue, 'dem_ub_ci'] = table.loc[issue, 'dem_mean'] + table.loc[issue, 'dem_interval'] table.loc[issue, 'rep_ub_ci'] = table.loc[issue, 'rep_mean'] + table.loc[issue, 'rep_interval'] table.loc[issue, 'dem_lb_ci'] = table.loc[issue, 'dem_mean'] - table.loc[issue, 'dem_interval'] table.loc[issue, 'rep_lb_ci'] = table.loc[issue, 'rep_mean'] - table.loc[issue, 'rep_interval'] dem_bayes = stats.bayes_mvs(dem_df[issue], alpha=0.95) dem_a, dem_b, dem_c = dem_bayes dem_d, dem_e = dem_a dem_f, dem_g = dem_e table.loc[issue, 'dem_bayes_mean'] = dem_d table.loc[issue, 'dem_bayes_ub'] = dem_g table.loc[issue, 'dem_bayes_lb'] = dem_f rep_bayes = stats.bayes_mvs(rep_df[issue], alpha=0.95) rep_a, rep_b, rep_c = rep_bayes rep_d, rep_e = rep_a rep_f, rep_g = rep_e table.loc[issue, 'rep_bayes_mean'] = rep_d table.loc[issue, 'rep_bayes_ub'] = rep_g table.loc[issue, 'rep_bayes_lb'] = rep_f return table comparison_table(rep, dem) ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>dem_mean</th> <th>rep_mean</th> <th>dem_interval</th> <th>rep_interval</th> <th>dem_ub_ci</th> <th>rep_ub_ci</th> <th>dem_lb_ci</th> <th>rep_lb_ci</th> <th>dem_bayes_mean</th> <th>dem_bayes_ub</th> <th>dem_bayes_lb</th> <th>rep_bayes_mean</th> <th>rep_bayes_ub</th> <th>rep_bayes_lb</th> </tr> </thead> <tbody> <tr> <th>handicapped_infants</th> <td>0.588710</td> <td>0.212963</td> <td>0.087824</td> <td>0.078459</td> <td>0.676534</td> <td>0.291422</td> <td>0.500885</td> <td>0.134503</td> <td>0.588710</td> <td>0.676534</td> <td>0.500885</td> <td>0.212963</td> <td>0.291422</td> <td>0.134503</td> </tr> <tr> <th>water_project_cost_sharing</th> <td>0.451613</td> <td>0.472222</td> <td>0.088821</td> <td>0.095674</td> <td>0.540434</td> <td>0.567896</td> <td>0.362792</td> <td>0.376548</td> <td>0.451613</td> <td>0.540434</td> <td>0.362792</td> <td>0.472222</td> <td>0.567896</td> <td>0.376548</td> </tr> <tr> <th>adoption_of_the_budget_resolution</th> <td>0.854839</td> <td>0.157407</td> <td>0.062872</td> <td>0.069794</td> <td>0.917711</td> <td>0.227201</td> <td>0.791967</td> <td>0.087614</td> <td>0.854839</td> <td>0.917711</td> <td>0.791967</td> <td>0.157407</td> <td>0.227201</td> <td>0.087614</td> </tr> <tr> <th>physician_fee_freeze</th> <td>0.048387</td> <td>0.990741</td> <td>0.038299</td> <td>0.018355</td> <td>0.086686</td> <td>1.009096</td> <td>0.010088</td> <td>0.972385</td> <td>0.048387</td> <td>0.086686</td> <td>0.010088</td> <td>0.990741</td> <td>1.009096</td> <td>0.972385</td> </tr> <tr> <th>el_salvador_aid</th> <td>0.201613</td> <td>0.953704</td> <td>0.071607</td> <td>0.040269</td> <td>0.273220</td> <td>0.993973</td> <td>0.130006</td> <td>0.913434</td> <td>0.201613</td> <td>0.273220</td> <td>0.130006</td> <td>0.953704</td> <td>0.993973</td> <td>0.913434</td> </tr> <tr> <th>religious_groups_in_schools</th> <td>0.443548</td> <td>0.870370</td> <td>0.088669</td> <td>0.064372</td> <td>0.532218</td> <td>0.934743</td> <td>0.354879</td> <td>0.805998</td> <td>0.443548</td> <td>0.532218</td> <td>0.354879</td> <td>0.870370</td> <td>0.934743</td> <td>0.805998</td> </tr> <tr> <th>anti_satellite_test_ban</th> <td>0.766129</td> <td>0.268519</td> <td>0.075549</td> <td>0.084935</td> <td>0.841678</td> <td>0.353453</td> <td>0.690580</td> <td>0.183584</td> <td>0.766129</td> <td>0.841678</td> <td>0.690580</td> <td>0.268519</td> <td>0.353453</td> <td>0.183584</td> </tr> <tr> <th>aid_to_nicaraguan_contras</th> <td>0.830645</td> <td>0.148148</td> <td>0.066942</td> <td>0.068081</td> <td>0.897587</td> <td>0.216229</td> <td>0.763704</td> <td>0.080067</td> <td>0.830645</td> <td>0.897587</td> <td>0.763704</td> <td>0.148148</td> <td>0.216229</td> <td>0.080067</td> </tr> <tr> <th>mx_missile</th> <td>0.790323</td> <td>0.138889</td> <td>0.072655</td> <td>0.066276</td> <td>0.862978</td> <td>0.205165</td> <td>0.717667</td> <td>0.072612</td> <td>0.790323</td> <td>0.862978</td> <td>0.717667</td> <td>0.138889</td> <td>0.205165</td> <td>0.072612</td> </tr> <tr> <th>immigration</th> <td>0.532258</td> <td>0.574074</td> <td>0.089054</td> <td>0.094765</td> <td>0.621312</td> <td>0.668839</td> <td>0.443204</td> <td>0.479309</td> <td>0.532258</td> <td>0.621312</td> <td>0.443204</td> <td>0.574074</td> <td>0.668839</td> <td>0.479309</td> </tr> <tr> <th>synfuels_corporation_cutback</th> <td>0.508065</td> <td>0.157407</td> <td>0.089228</td> <td>0.069794</td> <td>0.597293</td> <td>0.227201</td> <td>0.418836</td> <td>0.087614</td> <td>0.508065</td> <td>0.597293</td> <td>0.418836</td> <td>0.157407</td> <td>0.227201</td> <td>0.087614</td> </tr> <tr> <th>education_spending</th> <td>0.129032</td> <td>0.851852</td> <td>0.059833</td> <td>0.068081</td> <td>0.188865</td> <td>0.919933</td> <td>0.069199</td> <td>0.783771</td> <td>0.129032</td> <td>0.188865</td> <td>0.069199</td> <td>0.851852</td> <td>0.919933</td> <td>0.783771</td> </tr> <tr> <th>superfund_right_to_sue</th> <td>0.290323</td> <td>0.842593</td> <td>0.081014</td> <td>0.069794</td> <td>0.371337</td> <td>0.912386</td> <td>0.209309</td> <td>0.772799</td> <td>0.290323</td> <td>0.371337</td> <td>0.209309</td> <td>0.842593</td> <td>0.912386</td> <td>0.772799</td> </tr> <tr> <th>crime</th> <td>0.346774</td> <td>0.981481</td> <td>0.084946</td> <td>0.025837</td> <td>0.431721</td> <td>1.007318</td> <td>0.261828</td> <td>0.955645</td> <td>0.346774</td> <td>0.431721</td> <td>0.261828</td> <td>0.981481</td> <td>1.007318</td> <td>0.955645</td> </tr> <tr> <th>duty_free_exports</th> <td>0.596774</td> <td>0.111111</td> <td>0.087553</td> <td>0.060228</td> <td>0.684327</td> <td>0.171339</td> <td>0.509222</td> <td>0.050883</td> <td>0.596774</td> <td>0.684327</td> <td>0.509222</td> <td>0.111111</td> <td>0.171339</td> <td>0.050883</td> </tr> <tr> <th>export_administration_act_south_africa</th> <td>0.943548</td> <td>0.666667</td> <td>0.041192</td> <td>0.090342</td> <td>0.984740</td> <td>0.757009</td> <td>0.902357</td> <td>0.576325</td> <td>0.943548</td> <td>0.984740</td> <td>0.902357</td> <td>0.666667</td> <td>0.757009</td> <td>0.576325</td> </tr> </tbody> </table> </div> ```python table = table.reset_index() table.head() ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>index</th> <th>dem_mean</th> <th>rep_mean</th> <th>dem_interval</th> <th>rep_interval</th> <th>dem_ub_ci</th> <th>rep_ub_ci</th> <th>dem_lb_ci</th> <th>rep_lb_ci</th> <th>dem_bayes_mean</th> <th>dem_bayes_ub</th> <th>dem_bayes_lb</th> <th>rep_bayes_mean</th> <th>rep_bayes_ub</th> <th>rep_bayes_lb</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>handicapped_infants</td> <td>0.588710</td> <td>0.212963</td> <td>0.087824</td> <td>0.078459</td> <td>0.676534</td> <td>0.291422</td> <td>0.500885</td> <td>0.134503</td> <td>0.588710</td> <td>0.676534</td> <td>0.500885</td> <td>0.212963</td> <td>0.291422</td> <td>0.134503</td> </tr> <tr> <th>1</th> <td>water_project_cost_sharing</td> <td>0.451613</td> <td>0.472222</td> <td>0.088821</td> <td>0.095674</td> <td>0.540434</td> <td>0.567896</td> <td>0.362792</td> <td>0.376548</td> <td>0.451613</td> <td>0.540434</td> <td>0.362792</td> <td>0.472222</td> <td>0.567896</td> <td>0.376548</td> </tr> <tr> <th>2</th> <td>adoption_of_the_budget_resolution</td> <td>0.854839</td> <td>0.157407</td> <td>0.062872</td> <td>0.069794</td> <td>0.917711</td> <td>0.227201</td> <td>0.791967</td> <td>0.087614</td> <td>0.854839</td> <td>0.917711</td> <td>0.791967</td> <td>0.157407</td> <td>0.227201</td> <td>0.087614</td> </tr> <tr> <th>3</th> <td>physician_fee_freeze</td> <td>0.048387</td> <td>0.990741</td> <td>0.038299</td> <td>0.018355</td> <td>0.086686</td> <td>1.009096</td> <td>0.010088</td> <td>0.972385</td> <td>0.048387</td> <td>0.086686</td> <td>0.010088</td> <td>0.990741</td> <td>1.009096</td> <td>0.972385</td> </tr> <tr> <th>4</th> <td>el_salvador_aid</td> <td>0.201613</td> <td>0.953704</td> <td>0.071607</td> <td>0.040269</td> <td>0.273220</td> <td>0.993973</td> <td>0.130006</td> <td>0.913434</td> <td>0.201613</td> <td>0.273220</td> <td>0.130006</td> <td>0.953704</td> <td>0.993973</td> <td>0.913434</td> </tr> </tbody> </table> </div> ```python rep_freq = table[['index', 'rep_mean', 'rep_lb_ci', 'rep_ub_ci']] dem_freq = table[['index', 'dem_mean', 'dem_lb_ci', 'dem_ub_ci']] rep_bayes = table[['index', 'rep_bayes_mean', 'rep_bayes_lb', 'rep_bayes_ub']] dem_bayes = table[['index', 'dem_bayes_mean', 'dem_bayes_lb', 'dem_bayes_ub']] ``` ```python # Plot frequentist approach dem_means, dem_std = dem_freq['dem_mean'], (dem_freq['dem_ub_ci'] - dem_freq['dem_mean']); rep_means, rep_std = rep_freq['rep_mean'], (rep_freq['rep_ub_ci'] - rep_freq['rep_mean']); ind = np.arange(len(dem_freq)) width = 0.3 # create plot fig, ax = plt.subplots(figsize=(25, 9)) dem_rects = ax.bar(ind - width/2, dem_means, width, yerr=dem_std, color='blue', label='House Democrats'); rep_rects = ax.bar(ind + width/2, rep_means, width, yerr=rep_std, color='red', label='House Republicans'); # labeling ax.set_title('Distribution of House of Representatives Voting in 1984', fontsize=18); ax.set_ylabel('Probability to Vote Yes', fontsize=14); ax.set_xticks(ind); ax.set_xticklabels(('handicapped_infants', 'water_project_cost_sharing', 'adoption_of_the_budget_resolution', 'physician_fee_freeze', 'el_salvador_aid', 'religious_groups_in_schools', 'anti_satellite_test_ban', 'aid_to_nicaraguan_contras', 'mx_missile', 'immigration', 'synfuels_corporation_cutback', 'education_spending', 'superfund_right_to_sue', 'crime', 'duty_free_exports', 'export_administration_act_south_africa')); ax.set_xticklabels(ax.get_xticklabels(), rotation=60, horizontalalignment='right', fontsize=14); ax.legend(); ``` ```python # Plot frequentist approach dem_means, dem_std = dem_bayes['dem_bayes_mean'], (dem_bayes['dem_bayes_ub'] - dem_bayes['dem_bayes_mean']); rep_means, rep_std = rep_bayes['rep_bayes_mean'], (rep_bayes['rep_bayes_ub'] - rep_bayes['rep_bayes_mean']); ind = np.arange(len(dem_bayes)) width = 0.3 # create plot fig, ax = plt.subplots(figsize=(25, 9)) dem_rects = ax.bar(ind - width/2, dem_means, width, yerr=dem_std, color='blue', label='House Democrats'); rep_rects = ax.bar(ind + width/2, rep_means, width, yerr=rep_std, color='red', label='House Republicans'); # labeling ax.set_title('Distribution of House of Representatives Voting in 1984', fontsize=18); ax.set_ylabel('Probability to Vote Yes', fontsize=14); ax.set_xticks(ind); ax.set_xticklabels(('handicapped_infants', 'water_project_cost_sharing', 'adoption_of_the_budget_resolution', 'physician_fee_freeze', 'el_salvador_aid', 'religious_groups_in_schools', 'anti_satellite_test_ban', 'aid_to_nicaraguan_contras', 'mx_missile', 'immigration', 'synfuels_corporation_cutback', 'education_spending', 'superfund_right_to_sue', 'crime', 'duty_free_exports', 'export_administration_act_south_africa')); ax.set_xticklabels(ax.get_xticklabels(), rotation=60, horizontalalignment='right', fontsize=14); ax.legend(); ``` ## Problem 4 The results from the frequentist approach and the Bayesian approach were very similar, but had slight differences. Those differences could be enough to make the frequentist approach less reliable, depending on the data set. For data sets that require high precision, you should use the Baeysian approach. ## Resources - [Worked example of Bayes rule calculation](https://en.wikipedia.org/wiki/Bayes'_theorem#Examples) (helpful as it fully breaks out the denominator) - [Source code for mvsdist in scipy](https://github.com/scipy/scipy/blob/90534919e139d2a81c24bf08341734ff41a3db12/scipy/stats/morestats.py#L139)
[STATEMENT] lemma not_conga: assumes "A B C CongA A' B' C'" and "\<not> A B C CongA D E F" shows "\<not> A' B' C' CongA D E F" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<not> A' B' C' CongA D E F [PROOF STEP] by (meson assms(1) assms(2) conga_trans)
subroutine read_sp3_200sats(inputfile, gps_weeks, . gps_seconds, nsat, satnames, XYZ,haveorbit, . ipointer,nepochs,relTime) implicit none c kristine larson, september 15,2015 c the input is a 15 minute sp3file name c 17nov02 tried to extend to 5 minute spfiles c KL 19feb01, allow sp3 files that are longer than 23 hours and 45 minute c times will now be in gps seconds but a second day will no longer go back to c zero on a day for new week. this will make it easier to interpolate using a c common time frame, which will be called relTime c previous behavior assumed all data were from the same day c c returns satellite names, c gps weeks and c gps seconds of the week, XYZ (in km) c 17oct12, KL c changed to allow multiple GNSS and 200 satellites c INPUT c inputfile is sp3 filename c OUTPUT c gps_weeks and gps_seconds are arrays? of times on the sp3file c nsat - number of satellites c satnames - integers c 1-99 for GPS c 101-199 for GLONASS c 201-299 for GALILEO c 301-399 for BDS c default nsat value of 0 c added pointer array for orbits c c 17nov03 returns number of time epochs now c 18may14 read the header date and ensure that only c data from that date are used (sp3 files from CODE had c had two midnites in them). c 19mar25 changed filename of sp3 to be really really long include 'local.inc' character*128 inputfile character*80 line, outputfile character*2 chr integer satnames(maxsat),nsat, gpsweek, prn, i, j, k, . time, itime(5), gps_weeks(np), msec, sec, ios,newname, . hdr_yy, hdr_mm, hdr_dd, hdr_hour, hdr_minute, FirstWeek real*8 x,y,z, XYZ(maxsat,np,3), gps_seconds(np), gpssecond, . FirstSecond, relTime(np), rt character*1 duh/'+'/ character*1 satID(maxsat), constell logical haveorbit(maxGNSS) integer ipointer(maxGNSS), nepochs, s1, s2 print*, 'Enter sp3 file reading code' nsat = 0 do i=1,maxGNSS haveorbit(i) = .false. ipointer(i) = 0 enddo do i=1,maxsat satnames(i) = 0 enddo c define everything as zero to start do i=1,np gps_seconds(i) = 0 gps_weeks(i) = 0 do j=1,maxsat do k=1,3 XYZ(j,i,k) = 0.d0 enddo enddo enddo sec= 0 msec = 0 c open the sp3file c open(12, file=inputfile,status='old',iostat=ios) if (ios .ne. 0) then print*, 'the sp3 file does not exist ' print*, inputfile(1:80) call exit endif c #cP2015 12 30 0 0 0.00000000 97 d+D IGb08 FIT AIUB c skip first two lines of the header- c now save the month and day of the file read(12,'(a80)') line print*, 'First epoch of SP3 file ', line print*, 'Number of epochs', line(37:39) c removed the commas that are not compliant with new fortran? READ (line(37:39), '(I3)') nepochs if (nepochs.gt.np) then print*,'there are more epochs in this file than the code' print*,'is dimensioned for. Exiting.' call exit endif READ (line(6:7), '(I2)') hdr_yy READ (line(9:10), '(I2)') hdr_mm READ (line(12:13), '(I2)') hdr_dd READ (line(15:16), '(I2)') hdr_hour READ (line(18:19), '(I2)') hdr_minute print*, 'num epochs in header', nepochs print*, 'header time:', hdr_yy, hdr_mm, hdr_dd,hdr_hour,hdr_minute itime(1) = hdr_yy itime(2) = hdr_mm itime(3) = hdr_dd itime(4) = hdr_hour itime(5) = hdr_minute sec = 0 msec = 0 call convert_time(itime,sec, msec, FirstWeek, FirstSecond) print*, 'first week/sec', FirstWeek, FirstSecond read(12,'(a80)') line read(12,'(1x,i5,3x, 17(a1,i2) )')nsat, . (satID(i), satnames(i), i=1,17) read(12,'(9x, 17(a1,i2))')(satID(i),satnames(i), i=18,34) read(12,'(9x, 17(a1,i2))')(satID(i),satnames(i), i=35,51) read(12,'(9x, 17(a1,i2))')(satID(i),satnames(i), i=52,68) read(12,'(9x, 17(a1,i2))')(satID(i),satnames(i), i=69,85) s1 = 86 s2 = 102 113 read(12,'(a80)') line c print*, line if (line(1:2) .eq. '+ ') then print*,'found another sat line' read(line,'(9x, 17(a1,i2))')(satID(i),satnames(i), i=s1,s2) c increment the counters. this has only been tested up to 102 s1 = s1 +17 s2 = s2 + 17 else if (line(1:2) .eq. '/*') then c print*,'comment line i think' else if (line(1:2) .eq. '++') then c print*,'qual flag' endif if (line(1:1).ne.'*') goto 113 call fill_pointer(nsat,satID,satnames,haveorbit,ipointer) c start your counter for number of epochs c I think this also means you have read the header time = 1 15 continue if (line(1:1).eq.'*') then c decode your time tag read(line(6:7), '(i2)') itime(1) read(line(9:10), '(i2)') itime(2) read(line(12:13), '(i2)') itime(3) read(line(15:16), '(i2)') itime(4) read(line(18:19), '(i2)') itime(5) c trying to read two day sp3 file, so changes wrt previous file if (.true.) then call convert_time(itime,sec, msec, gpsweek, gpssecond) c now need to read nsat lines to get the coordinates of the satellite do i=1,nsat read(12,'(a80)') line read(line(2:2),'(a1)') constell read(line(3:46),*)prn,x,y,z c change prn to new system call newSat(constell,prn,newname) prn = newname c now using index i instead of PRN number to store data XYZ(i,time,1) = x XYZ(i,time,2) = y XYZ(i,time,3) = z gps_weeks(time) = gpsweek gps_seconds(time) = gpssecond call rel_time(gpsweek, gpssecond, . FirstWeek, FirstSecond,rt) c save seconds since first epoch relTime(time) = rt c print*, gpsweek,gpssecond, rt enddo time = time + 1 endif c read the next line - it should be a time tag read(12,'(a80)') line c increment the time variable if (line(1:3).eq.'EOF') goto 55 if (time >np) then print*,'your sp3 file exceeds max number ', np, ' values' print*,'this is bad - exiting the subroutine' goto 55 endif endif goto 15 55 continue c subtract one because of the CODE midnite issue nepochs = time - 1 print*, 'RETURNING epochs: ', nepochs c you are done reading the file close(12) print*, 'exiting the sp3 reading code' 56 continue end subroutine newSat(constell, satnum,nsatnum) implicit none c takes constellation ID and satellite number c and returns a new satellite number, offset by c 100 for glonass, 200 for galileo etc c unknown satellites are all assigned to 400 c author: kristine larson 17oct15 c old rinex files do not use a G for GPS c so allow blank c 17nov05 added Q satellites 381, 382, etc integer satnum,nsatnum character constell if (constell .eq. 'G') then nsatnum = satnum elseif (constell .eq. ' ') then nsatnum = satnum elseif (constell .eq. 'R') then nsatnum = 100+satnum elseif (constell .eq. 'E') then nsatnum = 200+satnum elseif (constell .eq. 'C') then nsatnum = 300+satnum elseif (constell .eq. 'J') then c Japanese satellites nsatnum = 380+satnum else nsatnum = 400 endif end subroutine fill_pointer(nsat,satID,satnames,haveorbit,ipointer) c author: kristine larson 17nov03 c purpose: change the satellite names to integers from R??, E??, etc c and fill the pointer array c inputs: nsat is number of satellites, satID is one character constellation ID c outputs: satnames uses our 100,200,300 convention for naming satellites c ipointer tells you where it is in the sp3 file implicit none include 'local.inc' integer satnames(maxsat), ipointer(maxGNSS), i, nsat, newname character*1 satID(maxsat) logical haveorbit(maxGNSS) do i =1, nsat call newSat(satID(i), satnames(i),newname) c was mostly for debugging c write(6,'(a1, i2, 1x, i3)') satID(i), satnames(i), newname satnames(i) = newname haveorbit(newname) = .true. ipointer(newname) = i enddo end subroutine rel_time(gps_week,gps_second,epochWeek,epochSec,rt) c send times (week,secs) and epoch times (epochWeek,epochSec) c return relative time, rt integer gps_week, epochWeek real*8 gps_second, epochSec, rt if (gps_week.eq.epochWeek) then rt = gps_second - epochSec else c add a week of seconds rt = 7*86400 + gps_second - epochSec endif c print*, gps_second, rt end
QPM Cosmetology belongs to the Wellness range. All of the applications contained in this range are intended for professionals in a variety of sectors. It provides them with information on the levels and consequences of oxidative stress on the individual. This oxidative stress, which is responsible for all our ills, is exactly what the bio-impedance measurement measures. If oxidation occurs somewhere in the body, then obviously this alters the passage of the electrical current. For QPM Cosmetology the skin is a system in its own right, having its own laws and functions. Like all of the other systems of the body, this system is prey to oxidative stress due to the very fact of the aging process in living systems. Each human being possesses its own, specific Longevity depending both on the genetic makeup and on biochemical and metabolic factors, environment agents and also the inner self, the psychological makeup (for instance, anxious individuals age more quickly). The skin on the face is the one place in particular where the individual’s inner self meets with the physical stressors due to the environment (the skin is usually not covered by clothing) and the free radicals present a more intense oxidative activity here merely by virtue of that absence of protection. The tensions and stresses of modern day living can be seen more markedly in the face than anywhere else on the body, because the face is the most exposed portion of our body as it is usually left uncovered. Local oxidative stress causes aging, deterioration of the conjunctive tissue, loss of hydration, poor oxygenation of the tissues, just so many factors that predispose the face to wear. If the skin is hydrated, oxygenated, if the pH is modified, visible and measurable effects can be objectively observed through the before and after measurements of bio-impedance. This therefore enables us to show the real, biophysical condition of the skin and its oxidation, to propose an assessment of the skin’s condition, to recommend the type of product to apply, to monitor the progress of the results of application of the cosmetics. QPM Cosmetology performs a direct measurement of clinical stress, which is also a factor that predicts the action of the different cosmetics applied to the skin. The technology indicates which products of the range are most appropriate to the individual, and prescribes then in a way that admits of little discussion, hence a wager of efficacy. This application concerns cosmetics manufacturers first and foremost, as well as all professionals called upon to give advice or apply cosmetics.
#include <cctbx/boost_python/flex_fwd.h> #include <boost/python/class.hpp> #include <boost/python/return_value_policy.hpp> #include <boost/python/return_by_value.hpp> #include <boost/python/return_arg.hpp> #include <cctbx/crystal/asu_clusters.h> #include <cctbx/crystal/workarounds_bpl.h> namespace cctbx { namespace crystal { namespace { struct asu_clusters_wrappers { typedef asu_clusters w_t; static void wrap() { using namespace boost::python; typedef return_value_policy<return_by_value> rbv; class_<w_t>("asu_clusters", no_init) .def(init<pair_asu_table<> const&, bool>(( arg("pair_asu_table"), arg("strictly_in_asu")=true))) .def("sort_index_groups_by_size", &w_t::sort_index_groups_by_size, return_self<>()) .def("sort_indices_in_each_group", &w_t::sort_indices_in_each_group, return_self<>()) .add_property("index_groups", make_getter(&w_t::index_groups, rbv())) ; } }; void wrap_all() { asu_clusters_wrappers::wrap(); } } // namespace <anonymous> namespace boost_python { void wrap_asu_clusters() { wrap_all(); } }}} // namespace cctbx::crystal::boost_python
/* BRAINS * (B)LR (R)everberation-mapping (A)nalysis (I)n AGNs with (N)ested (S)ampling * Yan-Rong Li, [email protected] * Thu, Aug 4, 2016 */ /*! * \file sim.c * \brief generate mocked 2d data. */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <stddef.h> #include <math.h> #include <float.h> #include <string.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include <gsl/gsl_interp.h> #include <mpi.h> #include "brains.h" ///////////////////////////////////////////////////////////////////////////////// #ifdef SpecAstro /* baselines for 3C 273 dataset observed by the GRAVITY (Nature, 2020, 563, 657), * unit is meter */ int n_base_sa_3c273 = 24; double base_sa_3c273[]={ -39.847287, 18.757261, -72.901618, -13.864206, -105.767452, -59.897942, -32.453674, -32.669331, -64.714063, -78.769286, -32.064961, -46.108491, -53.774627, 21.045077, -71.062525, -10.421538, -79.670158, -55.520144, -16.867270, -31.447448, -25.151512, -76.555774, -8.846202, -45.116789, -54.064283, 19.430318, -86.349699, -12.827258, -115.711019, -58.525511, -32.484928, -32.180585, -61.579109, -77.881063, -29.231494, -45.704607, -52.779207, 21.321411, -83.468788, -12.802497, -107.827509, -59.908816, -28.601448, -33.449429, -50.961674, -81.626333, -23.699820, -46.979455 }; #endif ////////////////////////////////////////////////////////////////////////////////////// void *model; void sim() { if(thistask != roottask) return; FILE *fp; char fname[200]; int i, j, incr; sim_init(); double *pm = (double *)model, error, fcon; smooth_init(parset.n_vel_recon, TransV); if(parset.flag_dim == -1) { /* note that here use sigma_hat = sigma/sqrt(tau) */ printf("sim with ln(sigma) = %f and ln(taud) = %f.\n", var_param[1], var_param[2]); reconstruct_con_from_varmodel(exp(var_param[1]), exp(var_param[2]), 1.0, 0.0); } else { con_scale = 1.0; line_scale = 1.0; line_error_mean = con_error_mean = 0.01; /* arguments: sigma_hat, tau, alapha, and syserr */ create_con_from_random(0.03, 45.0, 1.0, 0.0); } calculate_con_rm(model); sprintf(fname, "%s/%s", parset.file_dir, "/data/sim_con_full.txt"); fp = fopen(fname, "w"); if(fp == NULL) { fprintf(stderr, "# Error: Cannot open file %s\n", fname); exit(-1); } for(i=0; i<parset.n_con_recon; i++) { fprintf(fp, "%e %e %e\n", Tcon[i]*(1.0+parset.redshift), Fcon[i]/con_scale, Fcerrs[i]/con_scale); } fclose(fp); sprintf(fname, "%s/%s", parset.file_dir, "/data/sim_con.txt"); fp = fopen(fname, "w"); if(fp == NULL) { fprintf(stderr, "# Error: Cannot open file %s\n", fname); exit(-1); } if(parset.flag_dim != -2) { gsl_interp_init(gsl_linear, Tcon, Fcon, parset.n_con_recon); for(i=0; i<n_con_data; i++) { //fprintf(fp, "%f %f %f\n", Tcon[i], Fcon[i]/con_scale, Fcerrs[i]/con_scale); fcon = gsl_interp_eval(gsl_linear, Tcon, Fcon, Tcon_data[i], gsl_acc); fprintf(fp, "%e %e %e\n", Tcon_data[i]*(1.0+parset.redshift), (fcon+gsl_ran_ugaussian(gsl_r)*con_error_mean)/con_scale, con_error_mean/con_scale); } } else { incr = fmax(0.5/(Tcon[1]-Tcon[0]), 1.0); //cadence to be 0.5day or increasement to be 1 for(i=0; i<parset.n_con_recon; i+=incr) { if(Tcon[i] >= 0.0) { fprintf(fp, "%e %e %e\n", Tcon[i]*(1.0+parset.redshift), Fcon[i]/con_scale, Fcerrs[i]/con_scale); } } } fclose(fp); gsl_interp_init(gsl_linear, Tcon, Fcon_rm, parset.n_con_recon); transfun_1d_cal(model, 0); calculate_line_from_blrmodel(model, Tline, Fline, parset.n_line_recon); sprintf(fname, "%s/%s", parset.file_dir, "/data/sim_line.txt"); fp = fopen(fname, "w"); if(fp == NULL) { fprintf(stderr, "# Error: Cannot open file %s\n", fname); exit(-1); } if(parset.flag_dim == -1) { error = line_error_mean * sqrt(n_line_data) * (Vline_data[1] - Vline_data[0]); } else { error = line_error_mean; } for(i=0; i<parset.n_line_recon; i++) { fprintf(fp, "%e %e %e\n", Tline[i]*(1.0+parset.redshift), Fline[i]/line_scale + gsl_ran_ugaussian(gsl_r)*error/line_scale, error/line_scale); } // output transfer function. sprintf(fname, "%s/%s", parset.file_dir, parset.tran_out_file); fp = fopen(fname, "w"); if(fp == NULL) { fprintf(stderr, "# Error: Cannot open file %s\n", fname); exit(-1); } for(i=0; i<parset.n_tau; i++) { fprintf(fp, "%e %e\n", TransTau[i], Trans1D[i]); } fclose(fp); transfun_2d_cal(model, TransV, Trans2D, parset.n_vel_recon, 1); calculate_line2d_from_blrmodel(model, Tline, TransV, Trans2D, Fline2d, parset.n_line_recon, parset.n_vel_recon); sprintf(fname, "%s/%s", parset.file_dir, "/data/sim_line2d.txt"); fp = fopen(fname, "w"); if(fp == NULL) { fprintf(stderr, "# Error: Cannot open file %s\n", fname); exit(-1); } fprintf(fp, "# %d %d\n", parset.n_line_recon, parset.n_vel_recon); for(i=0; i<parset.n_line_recon; i++) { fprintf(fp, "# %f\n", Tline[i]*(1.0+parset.redshift)); for(j=0; j<parset.n_vel_recon; j++) { fprintf(fp, "%e %e %e\n", TransW[j], (Fline2d[i*parset.n_vel_recon + j] + gsl_ran_ugaussian(gsl_r)*line_error_mean*0.3)/line_scale, line_error_mean/line_scale); } fprintf(fp, "\n"); } fclose(fp); sprintf(fname, "%s/%s", parset.file_dir, "/data/sim_broadening.txt"); fp = fopen(fname, "w"); if(fp == NULL) { fprintf(stderr, "# Error: Cannot open file %s\n", fname); exit(-1); } for(i=0; i<parset.n_line_recon; i++) { fprintf(fp, "%f %f\n", parset.InstRes * VelUnit, parset.InstRes_err * VelUnit); } fclose(fp); // output 2d transfer function sprintf(fname, "%s/%s", parset.file_dir, parset.tran2d_out_file); fp = fopen(fname, "w"); if(fp == NULL) { fprintf(stderr, "# Error: Cannot open file %s\n", fname); exit(-1); } fprintf(fp, "# %d %d\n", parset.n_tau, parset.n_vel_recon); for(i=0; i<parset.n_tau; i++) { for(j=0; j<parset.n_vel_recon; j++) { fprintf(fp, "%e %e %e\n", TransV[j]*VelUnit, TransTau[i], Trans2D[i*parset.n_vel_recon + j]); } fprintf(fp, "\n"); } fclose(fp); #ifdef SpecAstro double *sa_pm; sa_pm = (double *)pm + num_params_blr; sa_smooth_init(parset.n_sa_vel_recon, vel_sa, parset.sa_InstRes); gen_sa_cloud_sample((void *)sa_pm, 3, 0); calculate_sa_sim_with_sample(pm, vel_sa, parset.n_sa_vel_recon, base_sa, parset.n_sa_base_recon, phase_sa, Fline_sa); sprintf(fname, "%s/%s", parset.file_dir, "data/sim_sa.txt"); fp = fopen(fname, "w"); if(fp == NULL) { fprintf(stderr, "# Error: Cannot open file %s.\n", fname); exit(0); } // output sa line fprintf(fp, "# %d %d %d\n", 1, parset.n_sa_vel_recon, parset.n_sa_base_recon); for(j=0; j<parset.n_sa_vel_recon; j++) { fprintf(fp, "%e %e %e\n", wave_sa[j], Fline_sa[j] + gsl_ran_ugaussian(gsl_r)*sa_line_error_mean, sa_line_error_mean); } fprintf(fp, "\n"); for(i=0; i<parset.n_sa_base_recon; i++) { fprintf(fp, "# %f %f\n", base_sa[i*2], base_sa[i*2+1]); for(j=0; j<parset.n_sa_vel_recon; j++) { fprintf(fp, "%e %e %e\n", wave_sa[j], phase_sa[i*parset.n_sa_vel_recon + j]/(PhaseFactor * wave_sa[j]) + gsl_ran_ugaussian(gsl_r)*sa_phase_error_mean, sa_phase_error_mean); } fprintf(fp, "\n"); } fclose(fp); sa_smooth_end(); #endif smooth_end(); sim_end(); } void sim_init() { int i, j, idx; double dT, Tspan; double *pm, Rblr, mbh; switch(parset.flag_blrmodel) { case -1: num_params_blr_model = num_params_MyTransfun2d; transfun_1d_cal = transfun_1d_cal_mytransfun; transfun_2d_cal = transfun_2d_cal_mytransfun; break; case 0: num_params_blr_model = num_params_MyBLRmodel2d; gen_cloud_sample = gen_cloud_sample_mymodel; transfun_1d_cal = transfun_1d_cal_cloud; transfun_2d_cal = transfun_2d_cal_cloud; break; case 1: num_params_blr_model = sizeof(BLRmodel1)/sizeof(double); gen_cloud_sample = gen_cloud_sample_model1; transfun_1d_cal = transfun_1d_cal_cloud; transfun_2d_cal = transfun_2d_cal_cloud; break; case 2: num_params_blr_model = sizeof(BLRmodel2)/sizeof(double); gen_cloud_sample = gen_cloud_sample_model2; transfun_1d_cal = transfun_1d_cal_cloud; transfun_2d_cal = transfun_2d_cal_cloud; break; case 3: num_params_blr_model = sizeof(BLRmodel3)/sizeof(double); gen_cloud_sample = gen_cloud_sample_model3; transfun_1d_cal = transfun_1d_cal_cloud; transfun_2d_cal = transfun_2d_cal_cloud; break; case 4: num_params_blr_model = sizeof(BLRmodel4)/sizeof(double); gen_cloud_sample = gen_cloud_sample_model4; transfun_1d_cal = transfun_1d_cal_cloud; transfun_2d_cal = transfun_2d_cal_cloud; break; case 5: num_params_blr_model = sizeof(BLRmodel5)/sizeof(double); gen_cloud_sample = gen_cloud_sample_model5; transfun_1d_cal = transfun_1d_cal_cloud; transfun_2d_cal = transfun_2d_cal_cloud; break; case 6: num_params_blr_model = sizeof(BLRmodel6)/sizeof(double); gen_cloud_sample = gen_cloud_sample_model6; transfun_1d_cal = transfun_1d_cal_cloud; transfun_2d_cal = transfun_2d_cal_cloud; break; case 7: num_params_blr_model = sizeof(BLRmodel7)/sizeof(double); gen_cloud_sample = gen_cloud_sample_model7; transfun_1d_cal = transfun_1d_cal_cloud; transfun_2d_cal = transfun_2d_cal_cloud; break; case 8: num_params_blr_model = sizeof(BLRmodel8)/sizeof(double); gen_cloud_sample = gen_cloud_sample_model8; transfun_1d_cal = transfun_1d_cal_cloud; transfun_2d_cal = transfun_2d_cal_cloud; break; case 9: num_params_blr_model = sizeof(BLRmodel9)/sizeof(double); gen_cloud_sample = gen_cloud_sample_model9; transfun_1d_cal = transfun_1d_cal_cloud; transfun_2d_cal = transfun_2d_cal_cloud; break; default: num_params_blr_model = sizeof(BLRmodel1)/sizeof(double); gen_cloud_sample = gen_cloud_sample_model1; transfun_1d_cal = transfun_1d_cal_cloud; transfun_2d_cal = transfun_2d_cal_cloud; break; } #ifdef SpecAstro set_sa_blr_model(); /* SA */ num_params_sa_blr = num_params_sa_blr_model + num_params_sa_extpar; num_params_sa = num_params_sa_blr; #endif /* use epoch-independent broadening */ if(parset.flag_InstRes > 1) { num_params_res = 1; parset.InstRes = 220.0/VelUnit; parset.InstRes_err = 0.0; } else { parset.InstRes /= VelUnit; parset.InstRes_err = 0.0; } if(parset.flag_narrowline > 1) { printf("# set flag_narrowline to 1.\n"); parset.flag_narrowline = 1; } else if(parset.flag_narrowline == 0) { parset.width_narrowline = 0.0; } parset.flag_linecenter = 0; num_params_linecenter = 0; parset.num_particles = 1; which_particle_update = 0; force_update = 1; which_parameter_update = -1; num_params_blr = num_params_blr_model + num_params_nlr + num_params_res + num_params_linecenter + 1; /* include line sys err */ num_params_var = num_params_drw + num_params_trend + num_params_difftrend + num_params_resp; num_params_blr_tot = num_params_blr; #ifdef SpecAstro num_params_blr_tot += num_params_sa_blr; #endif num_params = num_params_blr_tot + num_params_var + parset.n_con_recon; /* index of A and Ag */ idx_resp = num_params_blr_tot + num_params_drw + num_params_trend; /* index of different trend */ idx_difftrend = idx_resp + num_params_resp; model = malloc(num_params * sizeof(double)); par_fix = (int *) malloc(num_params * sizeof(int)); par_fix_val = (double *) malloc(num_params * sizeof(double)); /* setup parameters */ pm = (double *)model; if(parset.flag_blrmodel == -1) { set_par_value_mytransfun_sim(pm); } else { set_par_value_sim(pm, parset.flag_blrmodel); set_par_fix_blrmodel(); for(i=0; i<num_params_blr_model; i++) { if(par_fix[i] == 1) { pm[i] = par_fix_val[i]; } } } /* spectral broadening, note this is a deviation from the input value */ pm[num_params_blr_model + num_params_nlr ] = 0.0; pm[idx_resp + 0] = log(1.0); //A pm[idx_resp + 1] = 0.0; //Ag #ifdef SpecAstro double *sa_model = pm + num_params_blr; set_idx_par_mutual(); set_par_value_sim(sa_model, parset.flag_sa_blrmodel); set_par_fix_sa_blrmodel(); for(i=0; i<num_params_sa_blr_model; i++) { if(par_fix[num_params_blr + i] == 1) { pm[num_params_blr + i] = par_fix_val[num_params_blr + i]; } } /* set the same mbh and inc */ sa_model[idx_sa_par_mutual[0]] = pm[idx_rm_par_mutual[0]]; //mbh sa_model[idx_sa_par_mutual[1]] = pm[idx_rm_par_mutual[1]]; //inc sa_model[num_params_sa_blr_model] = log(550.0); //DA sa_model[num_params_sa_blr_model + 1] = 0.0; //PA sa_model[num_params_sa_blr_model + 2] = 0.0; //FA sa_model[num_params_sa_blr_model + 3] = 0.0; //CO #endif Fcon = malloc(parset.n_con_recon * sizeof(double)); Fcon_rm = malloc(parset.n_con_recon * sizeof(double)); idx = get_idx_mbh_from_blrmodel(); mbh = exp(pm[idx]); if(parset.flag_blrmodel != 8) /* model 8 is particular */ { Rblr = exp(pm[0]); } else { Rblr = exp(pm[9]); } if(parset.flag_dim == -1) { Tspan = Tcon_data[n_con_data -1] - Tcon_data[0]; /* set time array for continuum */ Tcon_min = Tcon_data[0] - time_back_set - 10.0; Tcon_max = fmax(Tcon_data[n_con_data-1], Tline_data[n_line_data-1]) + fmax(0.05*Tspan, 10.0); dT = (Tcon_max - Tcon_min)/(parset.n_con_recon -1); for(i=0; i<parset.n_con_recon; i++) { Tcon[i] = Tcon_min + i*dT; } } else { Tspan = Rblr*10.0; rcloud_min_set = 0.0; rcloud_max_set = Tspan/2.0; if(parset.rcloud_max > 0.0) rcloud_max_set = fmin(rcloud_max_set, parset.rcloud_max); printf("RM rcloud_min_max_set: %f %f\n", rcloud_min_set, rcloud_max_set); time_back_set = 2.0*rcloud_max_set; Tcon_min = 0.0 - time_back_set - 10.0; Tcon_max = Tspan + 10.0; dT = (Tcon_max - Tcon_min)/(parset.n_con_recon -1); for(i=0; i<parset.n_con_recon; i++) { Tcon[i] = Tcon_min + i*dT; } } /* set Larr_rec */ for(i=0;i<parset.n_con_recon;i++) { Larr_rec[i*nq + 0]=1.0; for(j=1; j<nq; j++) Larr_rec[i*nq + j] = pow(Tcon[i], j); } if(parset.flag_dim == -1) { parset.n_line_recon = n_line_data; parset.n_vel_recon = n_vel_data; } TransTau = malloc(parset.n_tau * sizeof(double)); TransV = malloc(parset.n_vel_recon * sizeof(double)); TransW = malloc(parset.n_vel_recon * sizeof(double)); Trans1D = malloc(parset.n_tau * sizeof(double)); Trans2D = malloc(parset.n_tau * parset.n_vel_recon * sizeof(double)); Tline = malloc(parset.n_line_recon * sizeof(double)); Fline = malloc(parset.n_line_recon * sizeof(double)); Fline2d = malloc(parset.n_line_recon * parset.n_vel_recon * sizeof(double)); if(parset.flag_dim == -1) { memcpy(Tline, Tline_data, n_line_data * sizeof(double)); memcpy(TransV, Vline_data, n_vel_data * sizeof(double)); memcpy(TransW, Wline_data, n_vel_data * sizeof(double)); } else { Tline_min = 0.0; Tline_max = Tcon_max - 1.0; dT = (Tline_max - Tline_min)/(parset.n_line_recon - 1); for(i=0; i<parset.n_line_recon; i++) { Tline[i] = Tline_min + i*dT; } double vel_max_set, vel_min_set; vel_max_set = sqrt(pow(2.0*sqrt(mbh/Rblr), 2.0) + pow(2.0*parset.InstRes, 2.0)); vel_min_set = - vel_max_set; double dVel = (vel_max_set- vel_min_set)/(parset.n_vel_recon -1.0); for(i=0; i<parset.n_vel_recon; i++) { TransV[i] = vel_min_set + dVel*i; TransW[i] = (1.0 + TransV[i]/C_Unit) * parset.linecenter * (1.0+parset.redshift); } } clouds_tau = malloc(parset.n_cloud_per_task * sizeof(double)); clouds_weight = malloc(parset.n_cloud_per_task * sizeof(double)); clouds_vel = malloc(parset.n_cloud_per_task * parset.n_vel_per_cloud * sizeof(double)); if(parset.flag_save_clouds && thistask == roottask) { if(parset.n_cloud_per_task <= 1000) icr_cloud_save = 1; else icr_cloud_save = parset.n_cloud_per_task/1000; char fname[200]; sprintf(fname, "%s/%s", parset.file_dir, parset.cloud_out_file); fcloud_out = fopen(fname, "w"); if(fcloud_out == NULL) { fprintf(stderr, "# Error: Cannot open file %s\n", fname); exit(-1); } } #ifdef SpecAstro double saRblr; if(parset.flag_dim == -1) { parset.n_sa_vel_recon = n_vel_sa_data; parset.n_sa_base_recon = n_base_sa_data; } else { sa_flux_norm = 1.0; parset.n_sa_vel_recon = 40; if(parset.flag_gravity == 1) parset.n_sa_base_recon = n_base_sa_3c273; else parset.n_sa_base_recon = 20; sa_phase_error_mean = 0.01; sa_line_error_mean = 0.01; } vel_sa = malloc(parset.n_sa_vel_recon * sizeof(double)); wave_sa = malloc(parset.n_sa_vel_recon * sizeof(double)); Fline_sa = malloc(parset.n_sa_vel_recon * sizeof(double)); base_sa = malloc(parset.n_sa_base_recon * 2 * sizeof(double)); phase_sa = malloc(parset.n_sa_vel_recon * parset.n_sa_base_recon * sizeof(double)); clouds_alpha = malloc(parset.n_cloud_per_task * sizeof(double)); clouds_beta = malloc(parset.n_cloud_per_task * sizeof(double)); workspace_phase = malloc(parset.n_sa_vel_recon * 3 * sizeof(double)); if(parset.flag_sa_blrmodel != 8) { saRblr = exp(pm[num_params_blr]); } else { saRblr = exp(pm[num_params_blr + 9]); } rcloud_max_set = fmax(rcloud_max_set, saRblr * 5.0); printf("SA rcloud_min_max_set: %f %f\n", rcloud_min_set, rcloud_max_set); if(parset.flag_dim == -1) { memcpy(vel_sa, vel_sa_data, parset.n_sa_vel_recon * sizeof(double)); memcpy(wave_sa, wave_sa_data, parset.n_sa_vel_recon * sizeof(double)); memcpy(base_sa, base_sa_data, parset.n_sa_base_recon * 2 * sizeof(double)); } else { double vel_max_set, vel_min_set; vel_max_set = sqrt(pow(2.0*sqrt(mbh/saRblr), 2.0) + pow(2.0*parset.sa_InstRes, 2.0)); vel_min_set = - vel_max_set; double dVel = (vel_max_set- vel_min_set)/(parset.n_sa_vel_recon -1.0); for(i=0; i<parset.n_sa_vel_recon; i++) { vel_sa[i] = vel_min_set + dVel*i; wave_sa[i] = (1.0 + vel_sa[i]/C_Unit) * parset.sa_linecenter * (1.0+parset.redshift); } if(parset.flag_gravity == 1) memcpy(base_sa, base_sa_3c273, n_base_sa_3c273*2*sizeof(double)); else { double phi; for(i=0; i<parset.n_sa_base_recon; i++) { phi = -PI/2.0 + PI/parset.n_sa_base_recon * i; base_sa[i*2+0] = 100.0*cos(phi); base_sa[i*2+1] = 100.0*sin(phi); } } } #endif return; } void sim_end() { free(model); free(par_fix); free(par_fix_val); free(Fcon); free(Fcon_rm); free(TransTau); free(TransV); free(TransW); free(Tline); free(Fline); free(Fline2d); free(Trans2D); free(Trans1D); free(clouds_tau); free(clouds_weight); free(clouds_vel); if(parset.flag_save_clouds && thistask == roottask) { fclose(fcloud_out); } #ifdef SpecAstro free(vel_sa); free(wave_sa); free(Fline_sa); free(base_sa); free(phase_sa); free(clouds_alpha); free(clouds_beta); free(workspace_phase); #endif } /* * set parameter values for either RM or SA */ void set_par_value_sim(double *pm, int flag_model) { int i; switch(flag_model) { case 0: set_par_value_mymodel_sim(pm); break; case 1: i=0; pm[i++] = log(4.0); pm[i++] = 0.9; pm[i++] = 0.2; pm[i++] = cos(20.0/180.0*PI); pm[i++] = 40.0; pm[i++] = 0.0; pm[i++] = log(3.0); pm[i++] = 0.1; pm[i++] = 0.5; break; case 2: i=0; pm[i++] = log(4.0); pm[i++] = 0.9; pm[i++] = 0.2; pm[i++] = cos(20.0/180.0*PI); pm[i++] = 40.0; pm[i++] = 0.0; pm[i++] = log(3.0); pm[i++] = log(0.01); pm[i++] = log(0.1); break; case 3: i=0; pm[i++] = log(3.0); pm[i++] = log(5.0); pm[i++] = -1.0; pm[i++] = cos(20.0/180.0*PI); pm[i++] = 40.0; pm[i++] = 0.0; pm[i++] = log(3.0); pm[i++] = 0.5; pm[i++] = 0.5; break; case 4: i=0; pm[i++] = log(3.0); pm[i++] = log(5.0); pm[i++] = -1.0; pm[i++] = cos(20.0/180.0*PI); pm[i++] = 40.0; pm[i++] = 0.0; pm[i++] = log(3.0); pm[i++] = 0.5; pm[i++] = 0.5; break; case 5: i=0; pm[i++] = log(4.0); //mu pm[i++] = 0.5; //Fin pm[i++] = log(2.0); //Fout pm[i++] = 1.5; //alpha pm[i++] = cos(20.0/180.0*PI); //inc pm[i++] = 40.0; //opn pm[i++] = 0.5; //k pm[i++] = 2.0; //gam pm[i++] = 0.5; //xi pm[i++] = log(2.0); //mbh pm[i++] = 0.5; //fellip pm[i++] = 0.5; //fflow pm[i++] = log(0.01); pm[i++] = log(0.1); pm[i++] = log(0.01); pm[i++] = log(0.1); pm[i++] = 0.0; //theta_rot pm[i++] = log(0.001); //sig_turb break; case 6: i=0; pm[i++] = log(4.0); // mu pm[i++] = 1.0; // beta pm[i++] = 0.25; // F pm[i++] = cos(20.0/180.0*PI); // inc pm[i++] = 40.0; // opn pm[i++] = 0.0; // kappa pm[i++] = 1.0; // gamma pm[i++] = 1.0; // obscuration pm[i++] = log(2.0); //mbh pm[i++] = 0.5; //fellip pm[i++] = 0.4; //fflow pm[i++] = log(0.01); // pm[i++] = log(0.1); // pm[i++] = log(0.01); // pm[i++] = log(0.1); // pm[i++] = 0.0; // theta_rot pm[i++] = log(0.001); // sig_turb break; case 7: i=0; pm[i++] = log(4.0); //mu pm[i++] = 0.8; //beta pm[i++] = 0.1; //F pm[i++] = cos(20.0/180.0*PI);//inc pm[i++] = 40.0; //opn pm[i++] = 0.0; //kappa pm[i++] = 1.0; //gamma pm[i++] = 0.0; //xi pm[i++] = 0.5; //fsh pm[i++] = log(8.0); //mu_un pm[i++] = 1.2; //beta_un pm[i++] = 0.1; //F_un pm[i++] = 20.0; //opn_un pm[i++] = log(2.0); //mbh pm[i++] = 0.5; //fellip pm[i++] = 0.4; //fflow pm[i++] = log(0.01); pm[i++] = log(0.1); pm[i++] = log(0.01); pm[i++] = log(0.1); pm[i++] = 0.0; //theta_rot pm[i++] = 0.5; //fellip_un pm[i++] = 0.4; //fflow_un pm[i++] = log(0.001); // sig_turb break; case 8: i=0; pm[i++] = 50.0; //theta_min pm[i++] = 20.0; //dtheta_max pm[i++] = log(1.0); // r_min pm[i++] = 4.0; // fr_max pm[i++] = 1.0; // gamma pm[i++] = 1.0; // alpha pm[i++] = -2.0; // lambda pm[i++] = -0.5; // k pm[i++] = 0.0; // xi pm[i++] = log(30.0); // Rv pm[i++] = log(20.0); // Rblr, should larger than r_max=r_max * fr_max pm[i++] = cos(30.0/180.0*PI); // inc pm[i++] = log(4.0); // mbh break; case 9: i=0; pm[i++] = log(4.0); //mu pm[i++] = 1.0; //beta pm[i++] = 0.2; //F pm[i++] = cos(30.0/180.0*PI); // inc pm[i++] = 30.0; //opn pm[i++] = log(4.0); // mbh break; } return; } /* * get index of mbh from a BLR model. * */ int get_idx_mbh_from_blrmodel() { int idx = -1; switch(parset.flag_blrmodel) { case 0: idx = offsetof(MyBLRmodel, mbh); break; case 1: idx = offsetof(BLRmodel1, mbh); break; case 2: idx = offsetof(BLRmodel2, mbh); break; case 3: idx = offsetof(BLRmodel3, mbh); break; case 4: idx = offsetof(BLRmodel4, mbh); break; case 5: idx = offsetof(BLRmodel5, mbh); break; case 6: idx = offsetof(BLRmodel6, mbh); break; case 7: idx = offsetof(BLRmodel7, mbh); break; case 8: idx = offsetof(BLRmodel8, mbh); break; case 9: idx = offsetof(BLRmodel9, mbh); break; } return idx / sizeof(double); }
open import AEff open import EffectAnnotations open import Types hiding (``) open import Relation.Binary.PropositionalEquality hiding ([_] ; Extensionality) --open ≡-Reasoning module Renamings where -- SET OF RENAMINGS BETWEEN CONTEXTS Ren : Ctx → Ctx → Set Ren Γ Γ' = {X : VType} → X ∈ Γ → X ∈ Γ' -- IDENTITY, COMPOSITION, AND EXCHANGE RENAMINGS id-ren : {Γ : Ctx} → Ren Γ Γ id-ren {X} x = x comp-ren : {Γ Γ' Γ'' : Ctx} → Ren Γ' Γ'' → Ren Γ Γ' → Ren Γ Γ'' comp-ren f g x = f (g x) exchange : {Γ : Ctx} {X Y : VType} → Ren (Γ ∷ X ∷ Y) (Γ ∷ Y ∷ X) exchange Hd = Tl Hd exchange (Tl Hd) = Hd exchange (Tl (Tl x)) = Tl (Tl x) -- WEAKENING OF RENAMINGS wk₁ : {Γ : Ctx} {X : VType} → Ren Γ (Γ ∷ X) wk₁ = Tl wk₂ : {Γ Γ' : Ctx} {X : VType} → Ren Γ Γ' → Ren (Γ ∷ X) (Γ' ∷ X) wk₂ f Hd = Hd wk₂ f (Tl v) = Tl (f v) wk₃ : {Γ : Ctx} {X Y Z : VType} → Ren (Γ ∷ Y ∷ Z) (Γ ∷ X ∷ Y ∷ Z) wk₃ Hd = Hd wk₃ (Tl Hd) = Tl Hd wk₃ (Tl (Tl x)) = Tl (Tl (Tl x)) -- ACTION OF RENAMING ON WELL-TYPED VALUES AND COMPUTATIONS mutual V-rename : {X : VType} {Γ Γ' : Ctx} → Ren Γ Γ' → Γ ⊢V⦂ X → Γ' ⊢V⦂ X V-rename f (` x) = ` f x V-rename f (`` c) = `` c V-rename f (ƛ M) = ƛ (M-rename (wk₂ f) M) V-rename f ⟨ V ⟩ = ⟨ V-rename f V ⟩ M-rename : {C : CType} {Γ Γ' : Ctx} → Ren Γ Γ' → Γ ⊢M⦂ C → Γ' ⊢M⦂ C M-rename f (return V) = return (V-rename f V) M-rename f (let= M `in N) = let= M-rename f M `in M-rename (wk₂ f) N M-rename f (letrec M `in N) = letrec M-rename (wk₂ (wk₂ f)) M `in M-rename (wk₂ f) N M-rename f (V · W) = V-rename f V · V-rename f W M-rename f (↑ op p V M) = ↑ op p (V-rename f V) (M-rename f M) M-rename f (↓ op V M) = ↓ op (V-rename f V) (M-rename f M) M-rename f (promise op ∣ p ↦ M `in N) = promise op ∣ p ↦ M-rename (wk₂ f) M `in M-rename (wk₂ f) N M-rename f (await V until M) = await (V-rename f V) until (M-rename (wk₂ f) M) M-rename f (coerce p q M) = coerce p q (M-rename f M) -- ACTION OF RENAMING ON WELL-TYPED PROCESSES P-rename : {o : O} {PP : PType o} {Γ Γ' : Ctx} → Ren Γ Γ' → Γ ⊢P⦂ PP → Γ' ⊢P⦂ PP P-rename f (run M) = run (M-rename f M) P-rename f (P ∥ Q) = P-rename f P ∥ P-rename f Q P-rename f (↑ op p V P) = ↑ op p (V-rename f V) (P-rename f P) P-rename f (↓ op V P) = ↓ op (V-rename f V) (P-rename f P)
To recognise their achievement , 89 decorations were awarded for the raid . This total includes the five Victoria Crosses awarded to Lieutenant Commander Beattie , Lieutenant Colonel Newman and Commander Ryder , and posthumous awards to Sergeant Durrant and Able Seaman Savage . Four Distinguished Service Orders were awarded to Major William Copland , Captain Donald Roy , Lieutenant T Boyd and Lieutenant T D L Platt . Other decorations awarded were four Conspicuous Gallantry Medals , five Distinguished Conduct Medals , 17 Distinguished Service Crosses , 11 Military Crosses , 24 Distinguished Service Medals and 15 Military Medals . Four men were awarded the Croix de guerre by France , and another 51 were mentioned in despatches .
[STATEMENT] lemma ereal_mult_divide: fixes a b :: ereal shows "0 < b \<Longrightarrow> b < \<infinity> \<Longrightarrow> b * (a / b) = a" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>0 < b; b < \<infinity>\<rbrakk> \<Longrightarrow> b * (a / b) = a [PROOF STEP] by (cases a b rule: ereal2_cases) auto
function [F,G] = snopt_callback(x,mmodel) persistent model if nargin > 1 model = mmodel; return end if model.SimpleLinearObjective | model.SimpleQuadraticObjective [f,df] = fmincon_fun_liftlayer(x,model); [g,geq,dg,dgeq] = fmincon_con_liftlayer(x,model); else [f,df,xevaled] = fmincon_fun_liftlayer(x,model); [g,geq,dg,dgeq] = fmincon_con_liftlayer(x,model,xevaled); end F = [f;g;geq]; n = length(x); m = length(g); p = length(geq); G = [reshape(df,1,n);dg';dgeq']; if ~isempty(model.A) F = [F;model.A*x - model.b]; G = [G;model.A]; end if ~isempty(model.Aeq) F = [F;model.Aeq*x - model.beq]; G = [G;model.Aeq]; end G = full(G(model.sparsityElements));
using RoadRunner using Test ant_str = """ J1: 2 S1 + 3 S2 -> 5 S3 + 7 S4; v v = 0 S1 = 10; S2 = 2.5; S3 = 3.4; S4 = 0 """ rr = RoadRunner.loada(ant_str) @testset "compartment" begin @test RoadRunner.getNumberOfCompartments(rr) == 1 end @testset "reaction" begin @test RoadRunner.getNumberOfReactions(rr) == 1 @test RoadRunner.getNumberOfRules(rr) == 0 @test RoadRunner.getReactionIds(rr) == ["J1"] end @testset "species" begin @test RoadRunner.getNumberOfFloatingSpecies(rr) == 4 @test RoadRunner.getFloatingSpeciesIds(rr) == ["S1", "S2", "S3", "S4"] @test RoadRunner.getNumberOfBoundarySpecies(rr) == 0 @test RoadRunner.getBoundarySpeciesIds(rr) == [] @test RoadRunner.getFloatingSpeciesInitialConcentrationByIndex(rr, 0) == 10 @test RoadRunner.getFloatingSpeciesInitialConcentrationByIndex(rr, 1) == 2.5 @test RoadRunner.getFloatingSpeciesInitialConcentrationByIndex(rr, 2) == 3.4 @test RoadRunner.getFloatingSpeciesInitialConcentrationByIndex(rr, 3) == 0 end
[STATEMENT] lemma assign_upd_blah: "(\<lambda>a. if a = x1 then s x1 else (s(x1 := aval x2 s)) a) = s" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<lambda>a. if a = x1 then s x1 else (s(x1 := aval x2 s)) a) = s [PROOF STEP] by(rule ext, auto)
# coding:utf-8 """ 显示从c0 - c9 一行一个状态,每个状态6张图片 图片从训练集中随机获取 """ import os import cv2 import glob import numpy as np import random import matplotlib.pyplot as plt import pandas as pd data_dir = "../data/" subdir = "train" driver_imgs_list_csv = os.path.join(data_dir, "driver_imgs_list.csv") df = pd.read_csv(driver_imgs_list_csv) def driving_status_image(): img_list = {} for i in range(10): train_dir = os.path.join(data_dir, "train", "c%d"%i) image_files = glob.glob(os.path.join(train_dir,"*.jpg")) files_count = len(image_files) select_image = random.sample(range(files_count), 6) status = 'c{}'.format(i) img_list[status] = [] for select in select_image: img_path = image_files[select] img = cv2.imread(img_path, cv2.IMREAD_COLOR) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img_list[status].append(img) plt.figure(figsize=(6, 10)) plt.suptitle('driving status') for status_id in range(10): status = 'c{}'.format(status_id) status_imgs = img_list[status] for i, img in enumerate(status_imgs): if i == 0: index = status_id+1 else: index = (status_id+1)+10*i plt.subplot(6, 10, index) if i == 0: plt.title(status) plt.imshow(img) plt.axis('off') plt.show() driving_status_image()
Require Import Coq.Lists.List. Require Import Coq.micromega.Lia. Require Import Coq.Init.Peano. Require Import Coq.Arith.PeanoNat. Require Import Coq.Arith.Compare_dec. Require Import Coq.PArith.BinPosDef. Require Import Coq.ZArith.BinInt Coq.ZArith.ZArith Coq.ZArith.Zdiv Coq.ZArith.Znumtheory Coq.NArith.NArith. (* import Zdiv before Znumtheory *) Require Import Coq.NArith.Nnat. Require Import Crypto.Algebra.Hierarchy Crypto.Algebra.Field. Require Import Crypto.Spec.ModularArithmetic. Require Import Crypto.Arithmetic.ModularArithmeticTheorems Crypto.Arithmetic.PrimeFieldTheorems. Require Import Crypto.Util.Decidable. (* Crypto.Util.Notations. *) Require Import Coq.setoid_ring.Ring_theory Coq.setoid_ring.Field_theory Coq.setoid_ring.Field_tac. Require Import Ring. From Circom Require Import Circom Default Util DSL Tuple ListUtil LibTactics Simplify. From Circom Require Import Repr ReprZ. From Circom.CircomLib Require Import Bitify Comparators. From Circom.BigInt.Proof Require Import BigAdd BigLessThan BigSub. From Circom.BigInt.Definition Require Import BigAdd BigLessThan BigSub. From Circom.BigInt.Definition Require Import BigAddModP. (* Circuit: * https://github.com/yi-sun/circom-pairing/blob/master/circuits/bigint.circom *) Module BigAddModP. Module B := Bitify. Module D := DSL. Module Cmp := Comparators. Module RZU := ReprZUnsigned. Module RZ := RZU.RZ. Module R := Repr. Module Add := BigAdd.BigAdd. Module Sub := BigSub.BigSub. Module Lt := BigLessThan.BigLessThan. Local Open Scope list_scope. Local Open Scope Z_scope. Local Open Scope F_scope. Local Open Scope circom_scope. Local Open Scope tuple_scope. Local Coercion Z.of_nat: nat >-> Z. Local Coercion N.of_nat: nat >-> N. Section _BigAddModP. Context {n k: nat}. Definition cons (a b p out: F^k) := exists (add: @Add.t n k) (lt: @Lt.t n (S k)) (sub: @Sub.t n (S k)), D.iter (fun i _cons => _cons /\ add.(Add.a)[i] = a[i] /\ add.(Add.b)[i] = b[i]) k True /\ D.iter (fun i _cons => _cons /\ lt.(Lt.a)[i] = add.(Add.out)[i] /\ lt.(Lt.b)[i] = p[i]) k True /\ lt.(Lt.a)[k] = add.(Add.out)[k] /\ lt.(Lt.b)[k] = 0 /\ D.iter (fun i _cons => _cons /\ sub.(Sub.a)[i] = add.(Add.out)[i] /\ sub.(Sub.b)[i] = (1 - lt.(Lt.out)) * p[i]) k True /\ sub.(Sub.a)[k] = add.(Add.out)[k] /\ sub.(Sub.b)[k] = 0 /\ sub.(Sub.out)[k] = 0 /\ D.iter (fun i _cons => _cons /\ out[i] = sub.(Sub.out)[i]) k True. Record t := { a: F^k; b: F^k; p: F^k; out: F^k; _cons: cons a b p out }. Local Notation "[| xs |]" := (RZ.as_le n xs). Local Notation "xs !! i" := (List.nth i xs _) (at level 10). Ltac pose_as_le_nonneg := repeat match goal with | [ |- context[RZ.as_le ?n ?xs ] ] => let t := type of (RZU.as_le_nonneg n xs) in lazymatch goal with (* already posed *) | [ _: t |- _] => fail | _ => let Hnonneg := fresh "_Hnonneg" in pose proof (RZU.as_le_nonneg n xs) as Hnonneg ;move Hnonneg at top end | _ => fail end. Ltac rewrite_length := repeat match goal with | [ H: length ?xs = ?l |- context[length ?xs] ] => rewrite H end; simplify. Ltac lrewrite := repeat match goal with | [ H: ?x = _ |- context[?x] ] => rewrite H end. Ltac rrewrite := repeat match goal with | [ H: _ = ?x |- context[?x] ] => rewrite H end. Lemma scale_0: forall l, List.map (fun pi => 0 * pi) l = List.repeat (0:F) (length l). Proof. induction l as [ | x l]; simpl; auto. simplify. f_equal. auto. Qed. Lemma as_le_0: forall i, [| List.repeat (0:F) i|] = 0%Z. Proof. induction i; simpl; auto. rewrite IHi. simplify. Qed. Lemma as_le_msb_0: forall xs l, length xs = S l -> xs |: (n) -> [| xs |] <= 2^(n*l)-1 -> [| xs |] = [| xs[:l] |]. Proof. intros. unwrap_C. pose proof (RZ.as_le_split_last' n l xs). specialize (H2 H). rewrite H2. destruct (dec (xs ! l = 0)). rewrite e. simplify. remember (RZ.ToZ.to_Z (xs ! l)) as y. assert (y <> 0)%Z. subst. apply F.to_Z_nonzero. auto. assert (0 <= y). subst. apply F.to_Z_range. lia. assert (0 <= [|xs [:l]|]). pose_as_le_nonneg. lia. assert ([|xs [:l]|] <= 2 ^ (n * l) - 1). eapply RZU.repr_le_ub. applys_eq RZ.repr_trivial. rewrite firstn_length_le; lia. apply Forall_firstn. auto. exfalso. nia. Qed. Lemma scale_binary_range: forall s xs, xs |: (n) -> binary s -> List.map (fun x => s * x) xs |: (n). Proof. induction xs; simpl; intros; constructor; inversion H; subst; clear H. destruct H0; subst; simplify. rewrite F.to_Z_0. lia. apply IHxs; auto. Qed. Lemma scale_binary0: forall (s: F) xs l, s = 0 -> l = length xs -> List.map (fun x => s * x) xs = List.repeat 0 l. Proof. induction xs; intros; subst; simpl in *; simplify. reflexivity. f_equal. auto. Qed. Lemma scale_binary1: forall (s: F) xs l, s = 1 -> l = length xs -> List.map (fun x => s * x) xs = xs. Proof. induction xs; intros; subst; simpl in *; simplify. reflexivity. f_equal. erewrite IHxs; eauto. Qed. Lemma Zmod_once: forall a b c, 0 <= a < c -> 0 <= b < c -> c <= a + b -> ((a + b) mod c = (a + b) - c)%Z. Proof. intros a b c. intros. rewrite Zmod_eq by lia. assert ((a+b)/c < 2). apply Zdiv_lt_upper_bound. lia. lia. assert (1 <= (a+b)/c). apply Zdiv_le_lower_bound; lia. nia. Qed. #[local]Hint Extern 10 => match goal with | [ |- context[ length (firstn _ _) ] ] => rewrite firstn_length_le by lia end : core. #[local]Hint Extern 10 (eq (F.F q) _ _) => fqsatz : core. #[local]Hint Extern 10 F => exact F.zero: core. Theorem soundness: forall (c: t), (* pre-conditions *) 0 < n -> 0 < k -> n + 2 <= C.k -> 'c.(a) |: (n) -> 'c.(b) |: (n) -> 'c.(p) |: (n) -> [|'c.(a)|] <= [|'c.(p)|] - 1 -> [|'c.(b)|] <= [|'c.(p)|] - 1 -> (* post-conditions *) [|'c.(out)|] = ([|'c.(a)|] + [|'c.(b)|]) mod [|'c.(p)|] /\ 'c.(out) |: (n). Proof with (lia || eauto). unwrap_C. intros c Hn Hk Hnk Ha Hb Hp Hap Hbp. destruct c as [a b p out [add [lt [sub prog]]]]. destruct prog as [Padd [Plt [Plt_ak [Plt_bk [Psub [Psub_ak [Psub_bk [Psub_outk Pout]]]]]]]]. simpl in *. lift_to_list. pose_lengths. rem_iter. assert (0 < 2^n). { apply Zpow_facts.Zpower_gt_0... } assert (0 <= [|' a|] + [|' b|]) by (pose_as_le_nonneg; lia). assert ([|'p|] <= 2^(n*k)-1). { eapply RZU.repr_le_ub with (xs:='p). unfold RZU.RZ.repr_le. intuition. } (* add *) pose (Iadd := fun (i:nat) _cons => _cons -> 'add.(Add.a)[:i] = 'a[:i] /\ 'add.(Add.b)[:i] = 'b[:i]). assert (Hadd: Iadd k (D.iter f2 k True)) by connection Iadd. pose proof (Add.soundness add) as Sadd. destruct Hadd as [Hadd_a Hadd_b]... firstn_all. rewrite Hadd_a, Hadd_b in *. clear Hadd_a Hadd_b. destruct Sadd as [Sadd_out Sadd_out_range]... subst f2. clear Padd Iadd. (* less than *) pose (Ilt := fun (i:nat) _cons => _cons -> 'lt.(Lt.a)[:i] = 'add.(Add.out)[:i] /\ 'lt.(Lt.b)[:i] = 'p[:i]). assert (Hlt: Ilt k (D.iter f1 k True)) by connection Ilt. destruct Hlt as [Hlt_a Hlt_b]... clear Ilt. pose proof (Lt.soundness lt) as Slt. destruct Slt as [Slt_bin Slt]; try lia; try (applys_eq (@Forall_firstn_and_last F); rewrite_length; fold_default; lrewrite; unfold_default; try apply Forall_firstn)... apply Forall_nth... rewrite F.to_Z_0... assert (Hlt_a': 'lt.(Lt.a) = 'add.(Add.out)). { applys_eq (@list_tail_congruence F)... fold_default... } rewrite Hlt_a', Sadd_out in *. clear Hlt_a Hlt_a'. assert (Hlt_b': [|' Lt.b lt |] = [|'p|]). { erewrite RZ.as_le_split_last with (ws:='Lt.b lt) (i:=k). rewrite Hlt_b, Plt_bk. firstn_all. simplify. applys_eq RZ.repr_trivial... eapply Forall_firstn_and_last; rewrite_length; fold_default; lrewrite; unfold_default. apply Forall_firstn... rewrite F.to_Z_0... } move Hlt_b' before Hlt_b. (* sub *) (* sub.a = add.out *) pose (Isub_a := fun (i: nat) _cons => _cons -> 'Sub.a sub [:i] = 'Add.out add [:i]). assert (Hsub_a: Isub_a k (D.iter f0 k True)) by connection Isub_a. specialize (Hsub_a Psub). assert (Hsub_a': 'sub.(Sub.a) = 'add.(Add.out)). { applys_eq (@list_tail_congruence F)... fold_default... } clear Hsub_a Isub_a Psub_ak. (* [|sub.b|] = [|p|] *) pose (Isub_b := fun (i: nat) _cons => _cons -> 'Sub.b sub [:i] = List.map (fun pi => (1-Lt.out lt) * pi) ('p)[:i]). assert (Hsub_b: Isub_b k (D.iter f0 k True)). { apply D.iter_inv; unfold Isub_b. - easy. - intros i _cons IH Hi Hstep. subst f0. lift_to_list. intuition. applys_eq (@firstn_congruence F)... rewrite map_nth. fold_default... } specialize (Hsub_b Psub). clear Isub_b. rewrite Hlt_b' in *. (* out *) pose (Iout := fun (i: nat) _cons => _cons -> 'out [:i] = 'Sub.out sub [:i]). assert (Hout: Iout k (D.iter f k True)) by connection Iout. specialize (Hout Pout). clear Pout Iout. firstn_all. destruct (Sub.soundness_ite sub) as [H_sub_out_bin [H_sub_out H_sub]]; try lia. rewrite Hsub_a'. auto. eapply Forall_firstn_and_last; rewrite_length. rewrite Hsub_b. rewrite firstn_map. firstn_all. apply scale_binary_range... eapply one_minus_binary; eauto. fold_default. rewrite Psub_bk. autorewrite with F_to_Z... rewrite Hsub_a', Sadd_out in *. destruct (dec ([|' a|] + [|' b|] < [|' p|])). - rewrite Zmod_small... assert (Hsub_b': 'Sub.b sub = List.repeat 0 (S k)). { erewrite <- firstn_split_last with (l:=' Sub.b sub) (n:=k)... cbn [List.repeat]. rewrite repeat_cons. apply app_congruence_iff; intuition. rewrite firstn_length_le, repeat_length... replace (1 - lt.(Lt.out)) with (0:F) in Hsub_b by fqsatz. rewrite Hsub_b, scale_0. rewrite_length. rewrite firstn_all2... rewrite repeat_length... fold_default. f_equal... } rewrite Hsub_b', as_le_0 in *. destruct (dec ([|' a|] + [|' b|] >= 0)); try lia. intuition; rewrite Hout. rewrite <- as_le_msb_0... apply Forall_firstn... - rewrite Zmod_once... rewrite Hout. assert (Hsub_bk: [|'sub.(Sub.b)|] = [|'sub.(Sub.b)[:k]|]). { erewrite RZ.as_le_split_last with (i:=k). lrewrite. simplify. applys_eq RZ.repr_trivial... eapply Forall_firstn_and_last; rewrite_length; lrewrite... apply Forall_firstn. apply scale_binary_range... eapply one_minus_binary... fold_default. lrewrite. autorewrite with F_to_Z... } assert (Hsub_outk: [|'sub.(Sub.out)|] = [|'sub.(Sub.out)[:k]|]). { erewrite RZ.as_le_split_last with (i:=k). lrewrite. simplify. applys_eq RZ.repr_trivial... } destruct (Slt_bin) as [Hlt_out | Hlt_out]; rewrite Hlt_out in *. split_dec. erewrite scale_binary1 with (l:=k) in Hsub_b; try fqsatz... rewrite firstn_all2 with (l:='p) in Hsub_b... rewrite Hsub_bk, Hsub_b, <- Hsub_outk in *. intuition. apply Forall_firstn... erewrite scale_binary1 with (l:=k) in Hsub_b; try fqsatz... rewrite firstn_all2 with (l:='p) in Hsub_b... rewrite Hsub_bk, Hsub_b, <- Hsub_outk in *. lia. exfalso. erewrite scale_binary0 with (l:=k) in Hsub_b; try fqsatz... assert ([|' a|] + [|' b|] < [|' p|]). apply Slt... lia. Unshelve. all:auto. Qed. End _BigAddModP. End BigAddModP.
# Recursive Least Squares Estimation ## Reference [1] L. Ljung, System Identification Theory for the User ## 1 Linear Regressions and Least Squares ### 1.1 Autoregressive-moving-average Model 考虑自回归滑动平均模型(ARMA), ### 1.2 Least-squares Criterion 为了得到$\theta$,我们首先需要一个判断准则去评定$\theta$的好坏。自然的,我们会关心模型输出与实际输出的差。所以定义误差为: \begin{equation} \epsilon(t,\theta)=y(t)-\phi^T(t)\theta \end{equation} 为后续计算的方便,取 $$V_N(\theta,Z^N)=\frac{1}{2N}\sum_{t=1}^N[\epsilon]^2$$ 为指标函数。上式$V_N$对$\theta$求偏导数,并使用$\phi\phi^T\theta=\phi y$,可以得出对$\theta$的最小二乘估计$\hat{\theta}$。 $$\hat{\theta}=\arg_\theta\min V_N(\theta,Z^N)=[\frac{1}{N}\sum_{t=1}^N\phi(t)\phi^T(t)]^{-1}\frac{1}{N}\sum_{t=1}^N\phi(t)y(t)$$ *注意:这里处理的是SISO情形,即$y(t),u(t)$均是标量。* 定义: $$R(N)=\frac{1}{N}\sum_{t=1}^N\phi(t)\phi^T(t)\in R^{d\times d}$$ $$f(N)=\frac{1}{N}\sum_{t=1}^N\phi(t)y(t)\in R^d$$ ### 1.3 Weighted Least-Squares Estimation(WLS) 如果系统参数时变较快,对过去的输入-输出的拟合意义不大。所以在指标函数中引入权重,衰减过去数据的影响。 修改指标函数为: $$V_N(\theta,Z^N)=\frac{1}{2N}\sum_{t=1}^N[\alpha_i\epsilon]^2$$(权重固定) 或 $$V_N(\theta,Z^N)=\frac{1}{2N}\sum_{t=1}^N[\beta(N,t)\epsilon]^2$$(权重时变) ## 2 Recursive Least-Squares Algorithm ### 2.1 Prototype of RLS 采用带有权重的最小二乘准则: $$\hat{\theta_t}=\arg_\theta\min\sum_{k=1}^{t}\beta(t,k)[y(k)-\phi^T(k)\theta]^2$$ 可以得到最优估计为: $$\hat{\theta_t}=\bar{R}^{-1}(t)f(t)$$ 其中: $$\bar{R}(t)=\sum_{k=1}^t\beta(t,k)\phi(k)\phi^T(k)$$ $$f(t)=\sum_{k=1}^t\beta(t,k)\phi(k)y(k)$$ 设权重序列满足: $$\beta(t,k)=\lambda(t)\beta(t-1,k)$$ $$\beta(t,t)=1$$ 即: $$\beta(t,k)=\prod_{j=k+1}^t\lambda(t)$$ *注意:这里处理一种特殊的权重序列。* 在上述权重序列的影响下,回顾$\bar{R}(t),f(t)$的定义式,$\bar{R}(t),f(t)$满足关系: $$\bar{R}(t)=\lambda(t)\bar{R}(t-1)+\phi(t)\phi^T(t)$$ $$f(t)=\lambda(t)f(t-1)+\phi(t)y(t)$$ 将上述关系带入$\hat{\theta}_t$的估计式中: \begin{align} \hat{\theta}_t & = \bar{R}(t)^{-1}f(t) \\ & = \bar{R}(t)^{-1}[\lambda(t)f(t-1)+\phi(t)y(t)] \\ & = \bar{R}(t)^{-1}[\lambda(t)\bar{R}(t-1)\hat{\theta}_{t-1}+\phi(t)y(t)] \\ & = \bar{R}(t)^{-1}[\lambda(t)(\bar{R}(t)-\phi(t)\phi^T(t))\hat{\theta}_{t-1}+\phi(t)y(t)] \\ & = \hat{\theta}_{t-1} + \bar{R}^{-1}(t)\phi(t)[y(t)-\phi^T(t)\hat{\theta}_{t-1}] \end{align} 这样我们就得到了**RLS的两个递推式**: \begin{eqnarray} & \hat{\theta}_t & = \hat{\theta}_{t-1} + \bar{R}^{-1}(t)\phi(t)[y(t)-\phi^T(t)\hat{\theta}_{t-1}] \\ & \bar{R}(t) & = \lambda(t)\bar{R}(t-1)+\phi(t)\phi^T(t) \end{eqnarray} 上式要求计算矩阵的逆,在计算机中计算复杂度较高。所以需要尝试将求逆的操作转化为一个计算较为简便的操作。 引入定理: $$\exists A,B,C,D:P=A+BCD \rightarrow P^{-1}=[A+BCD]^{-1}=A^{-1}-A^{-1}B[DA^{-1}B+C^{-1}]^{-1}DA^{-1}$$ 其中$A,B,C,D,P$均为适当维矩阵。 令$P(t)=\bar{R}^{-1}(t)$,使用关系式凑上述定理的形式,则有$A=\lambda(t)\bar{R}(t-1), B=\phi(t), C=I, D=\phi^T(t)$, 则 \begin{align} P(t) &= A^{-1}-A^{-1}B[DA^{-1}B+C^{-1}]^{-1}DA^{-1} \\ &= \frac{1}{\lambda(t)}[P(t-1)-\frac{P(t-1)\phi(t)\phi^T(t)P(t-1)}{\lambda(t)+\phi^T(t)P(t-1)\phi(t)}] \end{align} 上面的迭代式还可以再进行简化,考虑$P(t)\phi(t)$: \begin{align} P(t)\phi(t) &= \frac{1}{\lambda(t)}P(t-1)\phi(t)-\frac{1}{\lambda(t)}\frac{P(t-1)\phi(t)\phi^T(t)P(t-1)\phi(t)}{\lambda(t)+\phi^T(t)P(t-1)\phi(t)} \\ &=\frac{P(t-1)\phi(t)}{\lambda(t)+\phi^T(t)P(t-1)\phi(t)} \end{align} 利用上述结果,令$L(t)=P(t)\phi(t)$,我们就获得了带权重的RLS估计算法(也可以称带遗忘因子的RLS估计算法): >带权重的RLS估计算法: \begin{eqnarray} & L(t) &= \frac{P(t-1)\phi(t)}{\lambda(t)+\phi^T(t)P(t-1)\phi(t)} \\ & \hat{\theta}_t & = \hat{\theta}_{t-1} + L(t)[y(t)-\phi^T(t)\hat{\theta}_{t-1}] \\ & P(t) &= \frac{1}{\lambda(t)}[P(t-1)-L(t)\phi^T(t)P(t-1)] \\ \end{eqnarray} ```python ```
From Test Require Import tactic. Section FOFProblem. Variable Universe : Set. Variable UniverseElement : Universe. Variable wd_ : Universe -> Universe -> Prop. Variable col_ : Universe -> Universe -> Universe -> Prop. Variable col_swap1_1 : (forall A B C : Universe, (col_ A B C -> col_ B A C)). Variable col_swap2_2 : (forall A B C : Universe, (col_ A B C -> col_ B C A)). Variable col_triv_3 : (forall A B : Universe, col_ A B B). Variable wd_swap_4 : (forall A B : Universe, (wd_ A B -> wd_ B A)). Variable col_trans_5 : (forall P Q A B C : Universe, ((wd_ P Q /\ (col_ P Q A /\ (col_ P Q B /\ col_ P Q C))) -> col_ A B C)). Theorem pipo_6 : (forall A B C Aprime Cprime P Q : Universe, ((wd_ A B /\ (wd_ B C /\ (wd_ A C /\ (wd_ Aprime Cprime /\ (wd_ P Q /\ (wd_ A Aprime /\ (wd_ B Aprime /\ (wd_ C Cprime /\ (wd_ A P /\ (wd_ A Q /\ (wd_ Aprime Aprime /\ (wd_ B Cprime /\ (wd_ Aprime C /\ (col_ Aprime P Q /\ (col_ Cprime P Q /\ (col_ A B C /\ col_ A B Aprime)))))))))))))))) -> col_ Aprime B C)). Proof. time tac. Qed. End FOFProblem.
[STATEMENT] lemma boundOutputSupportDerivative: assumes "P \<longmapsto>a<\<nu>x> \<prec> P'" and "x \<sharp> P" shows "(supp P') - {x} \<subseteq> supp P" [PROOF STATE] proof (prove) goal (1 subgoal): 1. supp P' - {x} \<subseteq> supp P [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: P \<longmapsto> a<\<nu>x> \<prec> P' x \<sharp> P goal (1 subgoal): 1. supp P' - {x} \<subseteq> supp P [PROOF STEP] by(nominal_induct rule: boundOutputInduct) (auto simp add: pi.supp abs_supp supp_atm dest: outputSupportDerivative)
\subsection{Classical principal component analysis} \subsubsection{Introduction} Principal component analysis takes a dataset \(X\) with \(m\) variables and returns a principal component matrix \(A\) with size \(m\times k\). Each new dimension is a linear function of the existing data. \(Z=XA\). Each dimension in uncorrelated, and ordered, in order of descending explanation of variability. The problem of principal component analysis is to find these weightings \(A\). \subsubsection{Classical PCA} We take the first \(k\) eigenvectors of the covariance matrix, ordered by eigenvalue. \subsubsection{Getting the eigenvectors using SVD} We can decompose \(X=U\Sigma A^T\). We can take the eigenvectors from \(A\). \subsubsection{Choosing the number of dimension} We can choose \(k\) such that a certain percentage of the variance is retained.
The Bobro Engineering LowRider sights are designed from the start to be an extremely low profile set of BUIS for a specific purpose. Many Short Barreled Rifle owners have run into an issue with figuring out how to mount a set of BUIS on the upper and still fit their needed Lights, Lasers and Optics. The idea was to make it so that the Rear Sight was so low profile that there was almost no possible way it could interfere with optics and the Front Sight could easily be mounted under the front arm of the optic mount. This keeps them out of the way, but can still be used in the even the primary optic has a failure. Even though these sights were designed to fill a specific need, they can Mfg: Bobro Engineering.