text
stringlengths 0
3.34M
|
---|
module Issue759a where
import Common.Level
abstract
record Wrap (A : Set) : Set where
field wrapped : A
open Wrap public
wrap : {A : Set} → A → Wrap A
wrap a = record { wrapped = a }
-- WAS: Broken error message:
-- Not in scope:
-- Issue759a.recCon-NOT-PRINTED at
-- when checking the definition of wrap
-- NOW:
-- Expected non-abstract record type, found Wrap
-- when checking that the expression record { wrapped = a } has type
-- Wrap .A
|
(* Title: Containers/Lexicographic_Order.thy
Author: Andreas Lochbihler, KIT *)
theory Lexicographic_Order imports
List_Fusion
"HOL-Library.Char_ord"
begin
hide_const (open) List.lexordp
section \<open>List fusion for lexicographic order\<close>
context linorder begin
lemma lexordp_take_index_conv:
"lexordp xs ys \<longleftrightarrow>
(length xs < length ys \<and> take (length xs) ys = xs) \<or>
(\<exists>i < min (length xs) (length ys). take i xs = take i ys \<and> xs ! i < ys ! i)"
(is "?lhs = ?rhs")
proof
assume ?lhs thus ?rhs
by induct (auto 4 3 del: disjCI intro: disjI2 exI[where x="Suc i" for i])
next
assume ?rhs (is "?prefix \<or> ?less") thus ?lhs
proof
assume "?prefix"
hence "ys = xs @ hd (drop (length xs) ys) # tl (drop (length xs) ys)"
by (metis append_Nil2 append_take_drop_id less_not_refl list.collapse)
thus ?thesis unfolding lexordp_iff by blast
next
assume "?less"
then obtain i where "i < min (length xs) (length ys)"
and "take i xs = take i ys" and nth: "xs ! i < ys ! i" by blast
hence "xs = take i xs @ xs ! i # drop (Suc i) xs" "ys = take i xs @ ys ! i # drop (Suc i) ys"
by -(subst append_take_drop_id[symmetric, of _ i], simp_all add: Cons_nth_drop_Suc)
with nth show ?thesis unfolding lexordp_iff by blast
qed
qed
\<comment> \<open>lexord is extension of partial ordering List.lex\<close>
lemma lexordp_lex: "(xs, ys) \<in> lex {(xs, ys). xs < ys} \<longleftrightarrow> lexordp xs ys \<and> length xs = length ys"
proof(induct xs arbitrary: ys)
case Nil thus ?case by clarsimp
next
case Cons thus ?case by(cases ys)(simp_all, safe, simp)
qed
end
subsection \<open>Setup for list fusion\<close>
context ord begin
definition lexord_fusion :: "('a, 's1) generator \<Rightarrow> ('a, 's2) generator \<Rightarrow> 's1 \<Rightarrow> 's2 \<Rightarrow> bool"
where [code del]: "lexord_fusion g1 g2 s1 s2 = lexordp (list.unfoldr g1 s1) (list.unfoldr g2 s2)"
definition lexord_eq_fusion :: "('a, 's1) generator \<Rightarrow> ('a, 's2) generator \<Rightarrow> 's1 \<Rightarrow> 's2 \<Rightarrow> bool"
where [code del]: "lexord_eq_fusion g1 g2 s1 s2 = lexordp_eq (list.unfoldr g1 s1) (list.unfoldr g2 s2)"
lemma lexord_fusion_code:
"lexord_fusion g1 g2 s1 s2 \<longleftrightarrow>
(if list.has_next g1 s1 then
if list.has_next g2 s2 then
let (x, s1') = list.next g1 s1;
(y, s2') = list.next g2 s2
in x < y \<or> \<not> y < x \<and> lexord_fusion g1 g2 s1' s2'
else False
else list.has_next g2 s2)"
unfolding lexord_fusion_def
by(subst (1 2) list.unfoldr.simps)(auto split: prod.split_asm)
lemma lexord_eq_fusion_code:
"lexord_eq_fusion g1 g2 s1 s2 \<longleftrightarrow>
(list.has_next g1 s1 \<longrightarrow>
list.has_next g2 s2 \<and>
(let (x, s1') = list.next g1 s1;
(y, s2') = list.next g2 s2
in x < y \<or> \<not> y < x \<and> lexord_eq_fusion g1 g2 s1' s2'))"
unfolding lexord_eq_fusion_def
by(subst (1 2) list.unfoldr.simps)(auto split: prod.split_asm)
end
lemmas [code] =
lexord_fusion_code ord.lexord_fusion_code
lexord_eq_fusion_code ord.lexord_eq_fusion_code
lemmas [symmetric, code_unfold] =
lexord_fusion_def ord.lexord_fusion_def
lexord_eq_fusion_def ord.lexord_eq_fusion_def
end
|
C**************************************************************************
C FFTPACK: Available at http://www.scd.ucar.edu/softlib/mathlib.html
C
C ***NOTE (C.T.) *** this is a subset of FFTPACK, which only includes
C the routines for the complex FFT (forward & inverse).
C
C Modified: November 1999 by Arjan van Dijk to include IMPLICIT NONE and
C to convert all routines to DOUBLE precision.
C**************************************************************************
C * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
C * *
C * FFTPACK *
C * *
C * A package of Fortran subprograms for calculating *
C * fast Fourier transforms for both complex and real *
C * periodic sequences and certain other symmetric *
C * sequences that are listed below *
C * (Version 4.1 November 1988) *
C * by *
C * Paul Swarztrauber *
C * of *
C * The National Center for Atmospheric Research *
C * Boulder, Colorado (80307) U.S.A. *
C * which is sponsored by *
C * the National Science Foundation *
C * *
C * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
C
C Any source code available through this distribution interface at NCAR
C is free of charge, but there is no guarantee.
C
C FFTPACK breaks the FORTRAN 77 ANSI Standard
C by passing REAL arrays to subroutines and using the arrays within
C the subroutines as DOUBLE PRECISION or other types. This infraction
C may cause data alignment problems when the source code is compiled
C and loaded in an executable.
C****************************************************************************
C SUBROUTINE CFFTI(N,WSAVE)
C
C SUBROUTINE CFFTI INITIALIZES THE ARRAY WSAVE WHICH IS USED IN
C BOTH CFFTF AND CFFTB. THE PRIME FACTORIZATION OF N TOGETHER WITH
C A TABULATION OF THE TRIGONOMETRIC FUNCTIONS ARE COMPUTED AND
C STORED IN WSAVE.
C
C INPUT PARAMETER
C
C N THE LENGTH OF THE SEQUENCE TO BE TRANSFORMED
C
C OUTPUT PARAMETER
C
C WSAVE A WORK ARRAY WHICH MUST BE DIMENSIONED AT LEAST 4*N+15
C THE SAME WORK ARRAY CAN BE USED FOR BOTH CFFTF AND CFFTB
C AS LONG AS N REMAINS UNCHANGED. DIFFERENT WSAVE ARRAYS
C ARE REQUIRED FOR DIFFERENT VALUES OF N. THE CONTENTS OF
C WSAVE MUST NOT BE CHANGED BETWEEN CALLS OF CFFTF OR CFFTB.
C
SUBROUTINE CFFTI (N,WSAVE)
IMPLICIT NONE
INTEGER N,IW1,IW2
DOUBLE PRECISION WSAVE
DIMENSION WSAVE(*)
C
IF (N .EQ. 1) RETURN
IW1 = N+N+1
IW2 = IW1+N+N
CALL CFFTI1 (N,WSAVE(IW1),WSAVE(IW2))
RETURN
END
C****************************************************************************
C SUBROUTINE CFFTF(N,C,WSAVE)
C
C SUBROUTINE CFFTF COMPUTES THE FORWARD COMPLEX DISCRETE FOURIER
C TRANSFORM (THE FOURIER ANALYSIS). EQUIVALENTLY , CFFTF COMPUTES
C THE FOURIER COEFFICIENTS OF A COMPLEX PERIODIC SEQUENCE.
C THE TRANSFORM IS DEFINED BELOW AT OUTPUT PARAMETER C.
C
C THE TRANSFORM IS NOT NORMALIZED. TO OBTAIN A NORMALIZED TRANSFORM
C THE OUTPUT MUST BE DIVIDED BY N. OTHERWISE A CALL OF CFFTF
C FOLLOWED BY A CALL OF CFFTB WILL MULTIPLY THE SEQUENCE BY N.
C
C THE ARRAY WSAVE WHICH IS USED BY SUBROUTINE CFFTF MUST BE
C INITIALIZED BY CALLING SUBROUTINE CFFTI(N,WSAVE).
C
C INPUT PARAMETERS
C
C
C N THE LENGTH OF THE COMPLEX SEQUENCE C. THE METHOD IS
C MORE EFFICIENT WHEN N IS THE PRODUCT OF SMALL PRIMES. N
C
C C A COMPLEX ARRAY OF LENGTH N WHICH CONTAINS THE SEQUENCE
C
C WSAVE A REAL WORK ARRAY WHICH MUST BE DIMENSIONED AT LEAST 4N+15
C IN THE PROGRAM THAT CALLS CFFTF. THE WSAVE ARRAY MUST BE
C INITIALIZED BY CALLING SUBROUTINE CFFTI(N,WSAVE) AND A
C DIFFERENT WSAVE ARRAY MUST BE USED FOR EACH DIFFERENT
C VALUE OF N. THIS INITIALIZATION DOES NOT HAVE TO BE
C REPEATED SO LONG AS N REMAINS UNCHANGED THUS SUBSEQUENT
C TRANSFORMS CAN BE OBTAINED FASTER THAN THE FIRST.
C THE SAME WSAVE ARRAY CAN BE USED BY CFFTF AND CFFTB.
C
C OUTPUT PARAMETERS
C
C C FOR J=1,...,N
C
C C(J)=THE SUM FROM K=1,...,N OF
C
C C(K)*EXP(-I*(J-1)*(K-1)*2*PI/N)
C
C WHERE I=SQRT(-1)
C
C WSAVE CONTAINS INITIALIZATION CALCULATIONS WHICH MUST NOT BE
C DESTROYED BETWEEN CALLS OF SUBROUTINE CFFTF OR CFFTB
C
SUBROUTINE CFFTF (N,C,WSAVE)
IMPLICIT NONE
INTEGER N,IW1,IW2
DOUBLE PRECISION C,WSAVE
DIMENSION C(*) ,WSAVE(*)
C
IF (N .EQ. 1) RETURN
IW1 = N+N+1
IW2 = IW1+N+N
CALL CFFTF1 (N,C,WSAVE,WSAVE(IW1),WSAVE(IW2))
RETURN
END
C****************************************************************************
C SUBROUTINE CFFTB(N,C,WSAVE)
C
C SUBROUTINE CFFTB COMPUTES THE BACKWARD COMPLEX DISCRETE FOURIER
C TRANSFORM (THE FOURIER SYNTHESIS). EQUIVALENTLY , CFFTB COMPUTES
C A COMPLEX PERIODIC SEQUENCE FROM ITS FOURIER COEFFICIENTS.
C THE TRANSFORM IS DEFINED BELOW AT OUTPUT PARAMETER C.
C
C A CALL OF CFFTF FOLLOWED BY A CALL OF CFFTB WILL MULTIPLY THE
C SEQUENCE BY N.
C
C THE ARRAY WSAVE WHICH IS USED BY SUBROUTINE CFFTB MUST BE
C INITIALIZED BY CALLING SUBROUTINE CFFTI(N,WSAVE).
C
C INPUT PARAMETERS
C
C
C N THE LENGTH OF THE COMPLEX SEQUENCE C. THE METHOD IS
C MORE EFFICIENT WHEN N IS THE PRODUCT OF SMALL PRIMES.
C
C C A COMPLEX ARRAY OF LENGTH N WHICH CONTAINS THE SEQUENCE
C
C WSAVE A REAL WORK ARRAY WHICH MUST BE DIMENSIONED AT LEAST 4N+15
C IN THE PROGRAM THAT CALLS CFFTB. THE WSAVE ARRAY MUST BE
C INITIALIZED BY CALLING SUBROUTINE CFFTI(N,WSAVE) AND A
C DIFFERENT WSAVE ARRAY MUST BE USED FOR EACH DIFFERENT
C VALUE OF N. THIS INITIALIZATION DOES NOT HAVE TO BE
C REPEATED SO LONG AS N REMAINS UNCHANGED THUS SUBSEQUENT
C TRANSFORMS CAN BE OBTAINED FASTER THAN THE FIRST.
C THE SAME WSAVE ARRAY CAN BE USED BY CFFTF AND CFFTB.
C
C OUTPUT PARAMETERS
C
C C FOR J=1,...,N
C
C C(J)=THE SUM FROM K=1,...,N OF
C
C C(K)*EXP(I*(J-1)*(K-1)*2*PI/N)
C
C WHERE I=SQRT(-1)
C
C WSAVE CONTAINS INITIALIZATION CALCULATIONS WHICH MUST NOT BE
C DESTROYED BETWEEN CALLS OF SUBROUTINE CFFTF OR CFFTB
C
SUBROUTINE CFFTB (N,C,WSAVE)
IMPLICIT NONE
INTEGER N,IW1,IW2
DOUBLE PRECISION C,WSAVE
DIMENSION C(*) ,WSAVE(*)
C
IF (N .EQ. 1) RETURN
IW1 = N+N+1
IW2 = IW1+N+N
CALL CFFTB1 (N,C,WSAVE,WSAVE(IW1),WSAVE(IW2))
RETURN
END
C****************************************************************************
SUBROUTINE CFFTI1 (N,WA,WIFAC)
IMPLICIT NONE
INTEGER N,NTRYH,NL,NF,J,NTRY,NQ,NR,IB,I,L1,K1,IP,LD,L2,
& IDO,IDOT,IPM,I1,II
DOUBLE PRECISION WA,TPI,ARGH,FI,ARGLD,ARG,WIFAC
DIMENSION WA(*) ,WIFAC(*) ,NTRYH(4)
DATA NTRYH(1),NTRYH(2),NTRYH(3),NTRYH(4)/3,4,2,5/
NL = N
NF = 0
J = 0
101 J = J+1
IF (J-4) 102,102,103
102 NTRY = NTRYH(J)
GO TO 104
103 NTRY = NTRY+2
104 NQ = NL/NTRY
NR = NL-NTRY*NQ
IF (NR) 101,105,101
105 NF = NF+1
WIFAC(NF+2) = DBLE(NTRY)
NL = NQ
IF (NTRY .NE. 2) GO TO 107
IF (NF .EQ. 1) GO TO 107
DO 106 I=2,NF
IB = NF-I+2
WIFAC(IB+2) = WIFAC(IB+1)
106 CONTINUE
WIFAC(3) = 2.D0
107 IF (NL .NE. 1) GO TO 104
WIFAC(1) = DBLE(N)
WIFAC(2) = DBLE(NF)
TPI = 2.D0*(4.D0*ATAN(1.D0))
ARGH = TPI/DBLE(N)
I = 2
L1 = 1
DO 110 K1=1,NF
IP = NINT(WIFAC(K1+2))
LD = 0
L2 = L1*IP
IDO = N/L2
IDOT = IDO+IDO+2
IPM = IP-1
DO 109 J=1,IPM
I1 = I
WA(I-1) = 1.D0
WA(I) = 0.D0
LD = LD+L1
FI = 0.D0
ARGLD = DBLE(LD)*ARGH
DO 108 II=4,IDOT,2
I = I+2
FI = FI+1.D0
ARG = FI*ARGLD
WA(I-1) = COS(ARG)
WA(I) = SIN(ARG)
108 CONTINUE
IF (IP .LE. 5) GO TO 109
WA(I1-1) = WA(I-1)
WA(I1) = WA(I)
109 CONTINUE
L1 = L2
110 CONTINUE
RETURN
END
C****************************************************************************
SUBROUTINE CFFTF1 (N,C,CH,WA,WIFAC)
IMPLICIT NONE
INTEGER N,NF,NA,L1,IW,K1,IP,L2,IDO,IDOT,IDL1,IX2,IX3,IX4,N2,
& I,NAC
DOUBLE PRECISION C,CH,WA,WIFAC
DIMENSION CH(*) ,C(*) ,WA(*) ,WIFAC(*)
NF = NINT(WIFAC(2))
NA = 0
L1 = 1
IW = 1
DO 116 K1=1,NF
IP = NINT(WIFAC(K1+2))
L2 = IP*L1
IDO = N/L2
IDOT = IDO+IDO
IDL1 = IDOT*L1
IF (IP .NE. 4) GO TO 103
IX2 = IW+IDOT
IX3 = IX2+IDOT
IF (NA .NE. 0) GO TO 101
CALL PASSF4 (IDOT,L1,C,CH,WA(IW),WA(IX2),WA(IX3))
GO TO 102
101 CALL PASSF4 (IDOT,L1,CH,C,WA(IW),WA(IX2),WA(IX3))
102 NA = 1-NA
GO TO 115
103 IF (IP .NE. 2) GO TO 106
IF (NA .NE. 0) GO TO 104
CALL PASSF2 (IDOT,L1,C,CH,WA(IW))
GO TO 105
104 CALL PASSF2 (IDOT,L1,CH,C,WA(IW))
105 NA = 1-NA
GO TO 115
106 IF (IP .NE. 3) GO TO 109
IX2 = IW+IDOT
IF (NA .NE. 0) GO TO 107
CALL PASSF3 (IDOT,L1,C,CH,WA(IW),WA(IX2))
GO TO 108
107 CALL PASSF3 (IDOT,L1,CH,C,WA(IW),WA(IX2))
108 NA = 1-NA
GO TO 115
109 IF (IP .NE. 5) GO TO 112
IX2 = IW+IDOT
IX3 = IX2+IDOT
IX4 = IX3+IDOT
IF (NA .NE. 0) GO TO 110
CALL PASSF5 (IDOT,L1,C,CH,WA(IW),WA(IX2),WA(IX3),WA(IX4))
GO TO 111
110 CALL PASSF5 (IDOT,L1,CH,C,WA(IW),WA(IX2),WA(IX3),WA(IX4))
111 NA = 1-NA
GO TO 115
112 IF (NA .NE. 0) GO TO 113
CALL PASSF (NAC,IDOT,IP,L1,IDL1,C,C,C,CH,CH,WA(IW))
GO TO 114
113 CALL PASSF (NAC,IDOT,IP,L1,IDL1,CH,CH,CH,C,C,WA(IW))
114 IF (NAC .NE. 0) NA = 1-NA
115 L1 = L2
IW = IW+(IP-1)*IDOT
116 CONTINUE
IF (NA .EQ. 0) RETURN
N2 = N+N
DO 117 I=1,N2
C(I) = CH(I)
117 CONTINUE
RETURN
END
C****************************************************************************
SUBROUTINE CFFTB1 (N,C,CH,WA,WIFAC)
IMPLICIT NONE
INTEGER N,NF,NA,L1,IW,K1,IP,L2,IDO,IDOT,IDL1,IX2,IX3,IX4,N2,
& I,NAC
DOUBLE PRECISION C,CH,WA,WIFAC
DIMENSION CH(*) ,C(*) ,WA(*) ,WIFAC(*)
NF = NINT(WIFAC(2))
NA = 0
L1 = 1
IW = 1
DO 116 K1=1,NF
IP = NINT(WIFAC(K1+2))
L2 = IP*L1
IDO = N/L2
IDOT = IDO+IDO
IDL1 = IDOT*L1
IF (IP .NE. 4) GO TO 103
IX2 = IW+IDOT
IX3 = IX2+IDOT
IF (NA .NE. 0) GO TO 101
CALL PASSB4 (IDOT,L1,C,CH,WA(IW),WA(IX2),WA(IX3))
GO TO 102
101 CALL PASSB4 (IDOT,L1,CH,C,WA(IW),WA(IX2),WA(IX3))
102 NA = 1-NA
GO TO 115
103 IF (IP .NE. 2) GO TO 106
IF (NA .NE. 0) GO TO 104
CALL PASSB2 (IDOT,L1,C,CH,WA(IW))
GO TO 105
104 CALL PASSB2 (IDOT,L1,CH,C,WA(IW))
105 NA = 1-NA
GO TO 115
106 IF (IP .NE. 3) GO TO 109
IX2 = IW+IDOT
IF (NA .NE. 0) GO TO 107
CALL PASSB3 (IDOT,L1,C,CH,WA(IW),WA(IX2))
GO TO 108
107 CALL PASSB3 (IDOT,L1,CH,C,WA(IW),WA(IX2))
108 NA = 1-NA
GO TO 115
109 IF (IP .NE. 5) GO TO 112
IX2 = IW+IDOT
IX3 = IX2+IDOT
IX4 = IX3+IDOT
IF (NA .NE. 0) GO TO 110
CALL PASSB5 (IDOT,L1,C,CH,WA(IW),WA(IX2),WA(IX3),WA(IX4))
GO TO 111
110 CALL PASSB5 (IDOT,L1,CH,C,WA(IW),WA(IX2),WA(IX3),WA(IX4))
111 NA = 1-NA
GO TO 115
112 IF (NA .NE. 0) GO TO 113
CALL PASSB (NAC,IDOT,IP,L1,IDL1,C,C,C,CH,CH,WA(IW))
GO TO 114
113 CALL PASSB (NAC,IDOT,IP,L1,IDL1,CH,CH,CH,C,C,WA(IW))
114 IF (NAC .NE. 0) NA = 1-NA
115 L1 = L2
IW = IW+(IP-1)*IDOT
116 CONTINUE
IF (NA .EQ. 0) RETURN
N2 = N+N
DO 117 I=1,N2
C(I) = CH(I)
117 CONTINUE
RETURN
END
C****************************************************************************
SUBROUTINE PASSB (NAC,IDO,IP,L1,IDL1,CC,C1,C2,CH,CH2,WA)
IMPLICIT NONE
INTEGER NAC,IDO,IP,L1,IDL1,IDOT,IPP2,IPPH,IDP,J,K,I,IDL,LC,L,
& IK,IDLJ,JC,INC,IDIJ,IDJ
DOUBLE PRECISION CC,C1,C2,CH,CH2,WA,WAR,WAI
DIMENSION CH(IDO,L1,IP) ,CC(IDO,IP,L1) ,
1 C1(IDO,L1,IP) ,WA(*) ,C2(IDL1,IP),
2 CH2(IDL1,IP)
IDOT = IDO/2
IPP2 = IP+2
IPPH = (IP+1)/2
IDP = IP*IDO
C
IF (IDO .LT. L1) GO TO 106
DO 103 J=2,IPPH
JC = IPP2-J
DO 102 K=1,L1
DO 101 I=1,IDO
CH(I,K,J) = CC(I,J,K)+CC(I,JC,K)
CH(I,K,JC) = CC(I,J,K)-CC(I,JC,K)
101 CONTINUE
102 CONTINUE
103 CONTINUE
DO 105 K=1,L1
DO 104 I=1,IDO
CH(I,K,1) = CC(I,1,K)
104 CONTINUE
105 CONTINUE
GO TO 112
106 DO 109 J=2,IPPH
JC = IPP2-J
DO 108 I=1,IDO
DO 107 K=1,L1
CH(I,K,J) = CC(I,J,K)+CC(I,JC,K)
CH(I,K,JC) = CC(I,J,K)-CC(I,JC,K)
107 CONTINUE
108 CONTINUE
109 CONTINUE
DO 111 I=1,IDO
DO 110 K=1,L1
CH(I,K,1) = CC(I,1,K)
110 CONTINUE
111 CONTINUE
112 IDL = 2-IDO
INC = 0
DO 116 L=2,IPPH
LC = IPP2-L
IDL = IDL+IDO
DO 113 IK=1,IDL1
C2(IK,L) = CH2(IK,1)+WA(IDL-1)*CH2(IK,2)
C2(IK,LC) = WA(IDL)*CH2(IK,IP)
113 CONTINUE
IDLJ = IDL
INC = INC+IDO
DO 115 J=3,IPPH
JC = IPP2-J
IDLJ = IDLJ+INC
IF (IDLJ .GT. IDP) IDLJ = IDLJ-IDP
WAR = WA(IDLJ-1)
WAI = WA(IDLJ)
DO 114 IK=1,IDL1
C2(IK,L) = C2(IK,L)+WAR*CH2(IK,J)
C2(IK,LC) = C2(IK,LC)+WAI*CH2(IK,JC)
114 CONTINUE
115 CONTINUE
116 CONTINUE
DO 118 J=2,IPPH
DO 117 IK=1,IDL1
CH2(IK,1) = CH2(IK,1)+CH2(IK,J)
117 CONTINUE
118 CONTINUE
DO 120 J=2,IPPH
JC = IPP2-J
DO 119 IK=2,IDL1,2
CH2(IK-1,J) = C2(IK-1,J)-C2(IK,JC)
CH2(IK-1,JC) = C2(IK-1,J)+C2(IK,JC)
CH2(IK,J) = C2(IK,J)+C2(IK-1,JC)
CH2(IK,JC) = C2(IK,J)-C2(IK-1,JC)
119 CONTINUE
120 CONTINUE
NAC = 1
IF (IDO .EQ. 2) RETURN
NAC = 0
DO 121 IK=1,IDL1
C2(IK,1) = CH2(IK,1)
121 CONTINUE
DO 123 J=2,IP
DO 122 K=1,L1
C1(1,K,J) = CH(1,K,J)
C1(2,K,J) = CH(2,K,J)
122 CONTINUE
123 CONTINUE
IF (IDOT .GT. L1) GO TO 127
IDIJ = 0
DO 126 J=2,IP
IDIJ = IDIJ+2
DO 125 I=4,IDO,2
IDIJ = IDIJ+2
DO 124 K=1,L1
C1(I-1,K,J) = WA(IDIJ-1)*CH(I-1,K,J)-WA(IDIJ)*CH(I,K,J)
C1(I,K,J) = WA(IDIJ-1)*CH(I,K,J)+WA(IDIJ)*CH(I-1,K,J)
124 CONTINUE
125 CONTINUE
126 CONTINUE
RETURN
127 IDJ = 2-IDO
DO 130 J=2,IP
IDJ = IDJ+IDO
DO 129 K=1,L1
IDIJ = IDJ
DO 128 I=4,IDO,2
IDIJ = IDIJ+2
C1(I-1,K,J) = WA(IDIJ-1)*CH(I-1,K,J)-WA(IDIJ)*CH(I,K,J)
C1(I,K,J) = WA(IDIJ-1)*CH(I,K,J)+WA(IDIJ)*CH(I-1,K,J)
128 CONTINUE
129 CONTINUE
130 CONTINUE
RETURN
END
C****************************************************************************
SUBROUTINE PASSB2 (IDO,L1,CC,CH,WA1)
IMPLICIT NONE
INTEGER IDO,L1,K,I
DOUBLE PRECISION CC,CH,WA1,TR2,TI2
DIMENSION CC(IDO,2,L1) ,CH(IDO,L1,2) ,
1 WA1(1)
IF (IDO .GT. 2) GO TO 102
DO 101 K=1,L1
CH(1,K,1) = CC(1,1,K)+CC(1,2,K)
CH(1,K,2) = CC(1,1,K)-CC(1,2,K)
CH(2,K,1) = CC(2,1,K)+CC(2,2,K)
CH(2,K,2) = CC(2,1,K)-CC(2,2,K)
101 CONTINUE
RETURN
102 DO 104 K=1,L1
DO 103 I=2,IDO,2
CH(I-1,K,1) = CC(I-1,1,K)+CC(I-1,2,K)
TR2 = CC(I-1,1,K)-CC(I-1,2,K)
CH(I,K,1) = CC(I,1,K)+CC(I,2,K)
TI2 = CC(I,1,K)-CC(I,2,K)
CH(I,K,2) = WA1(I-1)*TI2+WA1(I)*TR2
CH(I-1,K,2) = WA1(I-1)*TR2-WA1(I)*TI2
103 CONTINUE
104 CONTINUE
RETURN
END
C****************************************************************************
SUBROUTINE PASSB3 (IDO,L1,CC,CH,WA1,WA2)
IMPLICIT NONE
INTEGER IDO,L1,K,I
DOUBLE PRECISION CC,CH,WA1,WA2,TAUR,TAUI,TR2,CR2,TI2,CI2,CR3,CI3,
& DR2,DR3,DI2,DI3
DIMENSION CC(IDO,3,L1) ,CH(IDO,L1,3) ,
1 WA1(*) ,WA2(*)
DATA TAUR,TAUI /-.5D0,.866025403784439D0/
IF (IDO .NE. 2) GO TO 102
DO 101 K=1,L1
TR2 = CC(1,2,K)+CC(1,3,K)
CR2 = CC(1,1,K)+TAUR*TR2
CH(1,K,1) = CC(1,1,K)+TR2
TI2 = CC(2,2,K)+CC(2,3,K)
CI2 = CC(2,1,K)+TAUR*TI2
CH(2,K,1) = CC(2,1,K)+TI2
CR3 = TAUI*(CC(1,2,K)-CC(1,3,K))
CI3 = TAUI*(CC(2,2,K)-CC(2,3,K))
CH(1,K,2) = CR2-CI3
CH(1,K,3) = CR2+CI3
CH(2,K,2) = CI2+CR3
CH(2,K,3) = CI2-CR3
101 CONTINUE
RETURN
102 DO 104 K=1,L1
DO 103 I=2,IDO,2
TR2 = CC(I-1,2,K)+CC(I-1,3,K)
CR2 = CC(I-1,1,K)+TAUR*TR2
CH(I-1,K,1) = CC(I-1,1,K)+TR2
TI2 = CC(I,2,K)+CC(I,3,K)
CI2 = CC(I,1,K)+TAUR*TI2
CH(I,K,1) = CC(I,1,K)+TI2
CR3 = TAUI*(CC(I-1,2,K)-CC(I-1,3,K))
CI3 = TAUI*(CC(I,2,K)-CC(I,3,K))
DR2 = CR2-CI3
DR3 = CR2+CI3
DI2 = CI2+CR3
DI3 = CI2-CR3
CH(I,K,2) = WA1(I-1)*DI2+WA1(I)*DR2
CH(I-1,K,2) = WA1(I-1)*DR2-WA1(I)*DI2
CH(I,K,3) = WA2(I-1)*DI3+WA2(I)*DR3
CH(I-1,K,3) = WA2(I-1)*DR3-WA2(I)*DI3
103 CONTINUE
104 CONTINUE
RETURN
END
C****************************************************************************
SUBROUTINE PASSB4 (IDO,L1,CC,CH,WA1,WA2,WA3)
IMPLICIT NONE
INTEGER IDO,L1,K,I
DOUBLE PRECISION CC,CH,WA1,WA2,WA3,TI1,TI2,TI3,TI4,TR1,TR2,TR3,
& TR4,CR3,CR2,CI3,CR4,CI4,CI2
DIMENSION CC(IDO,4,L1) ,CH(IDO,L1,4) ,
1 WA1(*) ,WA2(*) ,WA3(*)
IF (IDO .NE. 2) GO TO 102
DO 101 K=1,L1
TI1 = CC(2,1,K)-CC(2,3,K)
TI2 = CC(2,1,K)+CC(2,3,K)
TR4 = CC(2,4,K)-CC(2,2,K)
TI3 = CC(2,2,K)+CC(2,4,K)
TR1 = CC(1,1,K)-CC(1,3,K)
TR2 = CC(1,1,K)+CC(1,3,K)
TI4 = CC(1,2,K)-CC(1,4,K)
TR3 = CC(1,2,K)+CC(1,4,K)
CH(1,K,1) = TR2+TR3
CH(1,K,3) = TR2-TR3
CH(2,K,1) = TI2+TI3
CH(2,K,3) = TI2-TI3
CH(1,K,2) = TR1+TR4
CH(1,K,4) = TR1-TR4
CH(2,K,2) = TI1+TI4
CH(2,K,4) = TI1-TI4
101 CONTINUE
RETURN
102 DO 104 K=1,L1
DO 103 I=2,IDO,2
TI1 = CC(I,1,K)-CC(I,3,K)
TI2 = CC(I,1,K)+CC(I,3,K)
TI3 = CC(I,2,K)+CC(I,4,K)
TR4 = CC(I,4,K)-CC(I,2,K)
TR1 = CC(I-1,1,K)-CC(I-1,3,K)
TR2 = CC(I-1,1,K)+CC(I-1,3,K)
TI4 = CC(I-1,2,K)-CC(I-1,4,K)
TR3 = CC(I-1,2,K)+CC(I-1,4,K)
CH(I-1,K,1) = TR2+TR3
CR3 = TR2-TR3
CH(I,K,1) = TI2+TI3
CI3 = TI2-TI3
CR2 = TR1+TR4
CR4 = TR1-TR4
CI2 = TI1+TI4
CI4 = TI1-TI4
CH(I-1,K,2) = WA1(I-1)*CR2-WA1(I)*CI2
CH(I,K,2) = WA1(I-1)*CI2+WA1(I)*CR2
CH(I-1,K,3) = WA2(I-1)*CR3-WA2(I)*CI3
CH(I,K,3) = WA2(I-1)*CI3+WA2(I)*CR3
CH(I-1,K,4) = WA3(I-1)*CR4-WA3(I)*CI4
CH(I,K,4) = WA3(I-1)*CI4+WA3(I)*CR4
103 CONTINUE
104 CONTINUE
RETURN
END
C****************************************************************************
SUBROUTINE PASSB5 (IDO,L1,CC,CH,WA1,WA2,WA3,WA4)
IMPLICIT NONE
INTEGER IDO,L1,K,I
DOUBLE PRECISION CC,CH,WA1,WA2,WA3,WA4,TR11,TI11,TR12,TI12,
& TI2,TI3,TI4,TI5,TR2,TR3,TR4,TR5,CR2,CI2,CR3,CI3,CR4,CI4,CR5,CI5,
& DR3,DI3,DR4,DI4,DR5,DI5,DR2,DI2
DIMENSION CC(IDO,5,L1) ,CH(IDO,L1,5) ,
1 WA1(*) ,WA2(*) ,WA3(*) ,WA4(*)
DATA TR11,TI11,TR12,TI12 /.309016994374947D0,.951056516295154D0,
1-.809016994374947D0,.587785252292473D0/
IF (IDO .NE. 2) GO TO 102
DO 101 K=1,L1
TI5 = CC(2,2,K)-CC(2,5,K)
TI2 = CC(2,2,K)+CC(2,5,K)
TI4 = CC(2,3,K)-CC(2,4,K)
TI3 = CC(2,3,K)+CC(2,4,K)
TR5 = CC(1,2,K)-CC(1,5,K)
TR2 = CC(1,2,K)+CC(1,5,K)
TR4 = CC(1,3,K)-CC(1,4,K)
TR3 = CC(1,3,K)+CC(1,4,K)
CH(1,K,1) = CC(1,1,K)+TR2+TR3
CH(2,K,1) = CC(2,1,K)+TI2+TI3
CR2 = CC(1,1,K)+TR11*TR2+TR12*TR3
CI2 = CC(2,1,K)+TR11*TI2+TR12*TI3
CR3 = CC(1,1,K)+TR12*TR2+TR11*TR3
CI3 = CC(2,1,K)+TR12*TI2+TR11*TI3
CR5 = TI11*TR5+TI12*TR4
CI5 = TI11*TI5+TI12*TI4
CR4 = TI12*TR5-TI11*TR4
CI4 = TI12*TI5-TI11*TI4
CH(1,K,2) = CR2-CI5
CH(1,K,5) = CR2+CI5
CH(2,K,2) = CI2+CR5
CH(2,K,3) = CI3+CR4
CH(1,K,3) = CR3-CI4
CH(1,K,4) = CR3+CI4
CH(2,K,4) = CI3-CR4
CH(2,K,5) = CI2-CR5
101 CONTINUE
RETURN
102 DO 104 K=1,L1
DO 103 I=2,IDO,2
TI5 = CC(I,2,K)-CC(I,5,K)
TI2 = CC(I,2,K)+CC(I,5,K)
TI4 = CC(I,3,K)-CC(I,4,K)
TI3 = CC(I,3,K)+CC(I,4,K)
TR5 = CC(I-1,2,K)-CC(I-1,5,K)
TR2 = CC(I-1,2,K)+CC(I-1,5,K)
TR4 = CC(I-1,3,K)-CC(I-1,4,K)
TR3 = CC(I-1,3,K)+CC(I-1,4,K)
CH(I-1,K,1) = CC(I-1,1,K)+TR2+TR3
CH(I,K,1) = CC(I,1,K)+TI2+TI3
CR2 = CC(I-1,1,K)+TR11*TR2+TR12*TR3
CI2 = CC(I,1,K)+TR11*TI2+TR12*TI3
CR3 = CC(I-1,1,K)+TR12*TR2+TR11*TR3
CI3 = CC(I,1,K)+TR12*TI2+TR11*TI3
CR5 = TI11*TR5+TI12*TR4
CI5 = TI11*TI5+TI12*TI4
CR4 = TI12*TR5-TI11*TR4
CI4 = TI12*TI5-TI11*TI4
DR3 = CR3-CI4
DR4 = CR3+CI4
DI3 = CI3+CR4
DI4 = CI3-CR4
DR5 = CR2+CI5
DR2 = CR2-CI5
DI5 = CI2-CR5
DI2 = CI2+CR5
CH(I-1,K,2) = WA1(I-1)*DR2-WA1(I)*DI2
CH(I,K,2) = WA1(I-1)*DI2+WA1(I)*DR2
CH(I-1,K,3) = WA2(I-1)*DR3-WA2(I)*DI3
CH(I,K,3) = WA2(I-1)*DI3+WA2(I)*DR3
CH(I-1,K,4) = WA3(I-1)*DR4-WA3(I)*DI4
CH(I,K,4) = WA3(I-1)*DI4+WA3(I)*DR4
CH(I-1,K,5) = WA4(I-1)*DR5-WA4(I)*DI5
CH(I,K,5) = WA4(I-1)*DI5+WA4(I)*DR5
103 CONTINUE
104 CONTINUE
RETURN
END
C****************************************************************************
SUBROUTINE PASSF (NAC,IDO,IP,L1,IDL1,CC,C1,C2,CH,CH2,WA)
IMPLICIT NONE
INTEGER NAC,IDO,IP,L1,IDL1,IDOT,IPP2,IPPH,IDP,J,JC,K,I,IDL,
& INC,L,LC,IK,IDLJ,IDIJ,IDJ
DOUBLE PRECISION CC,C1,C2,CH,CH2,WA,WAR,WAI
DIMENSION CH(IDO,L1,IP) ,CC(IDO,IP,L1) ,
1 C1(IDO,L1,IP) ,WA(*) ,C2(IDL1,IP),
2 CH2(IDL1,IP)
IDOT = IDO/2
IPP2 = IP+2
IPPH = (IP+1)/2
IDP = IP*IDO
C
IF (IDO .LT. L1) GO TO 106
DO 103 J=2,IPPH
JC = IPP2-J
DO 102 K=1,L1
DO 101 I=1,IDO
CH(I,K,J) = CC(I,J,K)+CC(I,JC,K)
CH(I,K,JC) = CC(I,J,K)-CC(I,JC,K)
101 CONTINUE
102 CONTINUE
103 CONTINUE
DO 105 K=1,L1
DO 104 I=1,IDO
CH(I,K,1) = CC(I,1,K)
104 CONTINUE
105 CONTINUE
GO TO 112
106 DO 109 J=2,IPPH
JC = IPP2-J
DO 108 I=1,IDO
DO 107 K=1,L1
CH(I,K,J) = CC(I,J,K)+CC(I,JC,K)
CH(I,K,JC) = CC(I,J,K)-CC(I,JC,K)
107 CONTINUE
108 CONTINUE
109 CONTINUE
DO 111 I=1,IDO
DO 110 K=1,L1
CH(I,K,1) = CC(I,1,K)
110 CONTINUE
111 CONTINUE
112 IDL = 2-IDO
INC = 0
DO 116 L=2,IPPH
LC = IPP2-L
IDL = IDL+IDO
DO 113 IK=1,IDL1
C2(IK,L) = CH2(IK,1)+WA(IDL-1)*CH2(IK,2)
C2(IK,LC) = -WA(IDL)*CH2(IK,IP)
113 CONTINUE
IDLJ = IDL
INC = INC+IDO
DO 115 J=3,IPPH
JC = IPP2-J
IDLJ = IDLJ+INC
IF (IDLJ .GT. IDP) IDLJ = IDLJ-IDP
WAR = WA(IDLJ-1)
WAI = WA(IDLJ)
DO 114 IK=1,IDL1
C2(IK,L) = C2(IK,L)+WAR*CH2(IK,J)
C2(IK,LC) = C2(IK,LC)-WAI*CH2(IK,JC)
114 CONTINUE
115 CONTINUE
116 CONTINUE
DO 118 J=2,IPPH
DO 117 IK=1,IDL1
CH2(IK,1) = CH2(IK,1)+CH2(IK,J)
117 CONTINUE
118 CONTINUE
DO 120 J=2,IPPH
JC = IPP2-J
DO 119 IK=2,IDL1,2
CH2(IK-1,J) = C2(IK-1,J)-C2(IK,JC)
CH2(IK-1,JC) = C2(IK-1,J)+C2(IK,JC)
CH2(IK,J) = C2(IK,J)+C2(IK-1,JC)
CH2(IK,JC) = C2(IK,J)-C2(IK-1,JC)
119 CONTINUE
120 CONTINUE
NAC = 1
IF (IDO .EQ. 2) RETURN
NAC = 0
DO 121 IK=1,IDL1
C2(IK,1) = CH2(IK,1)
121 CONTINUE
DO 123 J=2,IP
DO 122 K=1,L1
C1(1,K,J) = CH(1,K,J)
C1(2,K,J) = CH(2,K,J)
122 CONTINUE
123 CONTINUE
IF (IDOT .GT. L1) GO TO 127
IDIJ = 0
DO 126 J=2,IP
IDIJ = IDIJ+2
DO 125 I=4,IDO,2
IDIJ = IDIJ+2
DO 124 K=1,L1
C1(I-1,K,J) = WA(IDIJ-1)*CH(I-1,K,J)+WA(IDIJ)*CH(I,K,J)
C1(I,K,J) = WA(IDIJ-1)*CH(I,K,J)-WA(IDIJ)*CH(I-1,K,J)
124 CONTINUE
125 CONTINUE
126 CONTINUE
RETURN
127 IDJ = 2-IDO
DO 130 J=2,IP
IDJ = IDJ+IDO
DO 129 K=1,L1
IDIJ = IDJ
DO 128 I=4,IDO,2
IDIJ = IDIJ+2
C1(I-1,K,J) = WA(IDIJ-1)*CH(I-1,K,J)+WA(IDIJ)*CH(I,K,J)
C1(I,K,J) = WA(IDIJ-1)*CH(I,K,J)-WA(IDIJ)*CH(I-1,K,J)
128 CONTINUE
129 CONTINUE
130 CONTINUE
RETURN
END
C****************************************************************************
SUBROUTINE PASSF2 (IDO,L1,CC,CH,WA1)
IMPLICIT NONE
INTEGER IDO,L1,K,I
DOUBLE PRECISION CC,CH,WA1,TR2,TI2
DIMENSION CC(IDO,2,L1) ,CH(IDO,L1,2) ,
1 WA1(*)
IF (IDO .GT. 2) GO TO 102
DO 101 K=1,L1
CH(1,K,1) = CC(1,1,K)+CC(1,2,K)
CH(1,K,2) = CC(1,1,K)-CC(1,2,K)
CH(2,K,1) = CC(2,1,K)+CC(2,2,K)
CH(2,K,2) = CC(2,1,K)-CC(2,2,K)
101 CONTINUE
RETURN
102 DO 104 K=1,L1
DO 103 I=2,IDO,2
CH(I-1,K,1) = CC(I-1,1,K)+CC(I-1,2,K)
TR2 = CC(I-1,1,K)-CC(I-1,2,K)
CH(I,K,1) = CC(I,1,K)+CC(I,2,K)
TI2 = CC(I,1,K)-CC(I,2,K)
CH(I,K,2) = WA1(I-1)*TI2-WA1(I)*TR2
CH(I-1,K,2) = WA1(I-1)*TR2+WA1(I)*TI2
103 CONTINUE
104 CONTINUE
RETURN
END
C****************************************************************************
SUBROUTINE PASSF3 (IDO,L1,CC,CH,WA1,WA2)
IMPLICIT NONE
INTEGER IDO,L1,K,I
DOUBLE PRECISION CC,CH,WA1,WA2,TAUR,TAUI,TR2,CR2,TI2,CI2,CR3,CI3,
& DR2,DR3,DI2,DI3
DIMENSION CC(IDO,3,L1) ,CH(IDO,L1,3) ,
1 WA1(*) ,WA2(*)
DATA TAUR,TAUI /-.5D0,-.866025403784439D0/
IF (IDO .NE. 2) GO TO 102
DO 101 K=1,L1
TR2 = CC(1,2,K)+CC(1,3,K)
CR2 = CC(1,1,K)+TAUR*TR2
CH(1,K,1) = CC(1,1,K)+TR2
TI2 = CC(2,2,K)+CC(2,3,K)
CI2 = CC(2,1,K)+TAUR*TI2
CH(2,K,1) = CC(2,1,K)+TI2
CR3 = TAUI*(CC(1,2,K)-CC(1,3,K))
CI3 = TAUI*(CC(2,2,K)-CC(2,3,K))
CH(1,K,2) = CR2-CI3
CH(1,K,3) = CR2+CI3
CH(2,K,2) = CI2+CR3
CH(2,K,3) = CI2-CR3
101 CONTINUE
RETURN
102 DO 104 K=1,L1
DO 103 I=2,IDO,2
TR2 = CC(I-1,2,K)+CC(I-1,3,K)
CR2 = CC(I-1,1,K)+TAUR*TR2
CH(I-1,K,1) = CC(I-1,1,K)+TR2
TI2 = CC(I,2,K)+CC(I,3,K)
CI2 = CC(I,1,K)+TAUR*TI2
CH(I,K,1) = CC(I,1,K)+TI2
CR3 = TAUI*(CC(I-1,2,K)-CC(I-1,3,K))
CI3 = TAUI*(CC(I,2,K)-CC(I,3,K))
DR2 = CR2-CI3
DR3 = CR2+CI3
DI2 = CI2+CR3
DI3 = CI2-CR3
CH(I,K,2) = WA1(I-1)*DI2-WA1(I)*DR2
CH(I-1,K,2) = WA1(I-1)*DR2+WA1(I)*DI2
CH(I,K,3) = WA2(I-1)*DI3-WA2(I)*DR3
CH(I-1,K,3) = WA2(I-1)*DR3+WA2(I)*DI3
103 CONTINUE
104 CONTINUE
RETURN
END
C****************************************************************************
SUBROUTINE PASSF4 (IDO,L1,CC,CH,WA1,WA2,WA3)
IMPLICIT NONE
INTEGER IDO,L1,K,I
DOUBLE PRECISION CC,CH,WA1,WA2,WA3,TI1,TI2,TI3,TI4,TR1,TR2,TR3,
& TR4,CR2,CR3,CR4,CI2,CI3,CI4
DIMENSION CC(IDO,4,L1) ,CH(IDO,L1,4) ,
1 WA1(*) ,WA2(*) ,WA3(*)
IF (IDO .NE. 2) GO TO 102
DO 101 K=1,L1
TI1 = CC(2,1,K)-CC(2,3,K)
TI2 = CC(2,1,K)+CC(2,3,K)
TR4 = CC(2,2,K)-CC(2,4,K)
TI3 = CC(2,2,K)+CC(2,4,K)
TR1 = CC(1,1,K)-CC(1,3,K)
TR2 = CC(1,1,K)+CC(1,3,K)
TI4 = CC(1,4,K)-CC(1,2,K)
TR3 = CC(1,2,K)+CC(1,4,K)
CH(1,K,1) = TR2+TR3
CH(1,K,3) = TR2-TR3
CH(2,K,1) = TI2+TI3
CH(2,K,3) = TI2-TI3
CH(1,K,2) = TR1+TR4
CH(1,K,4) = TR1-TR4
CH(2,K,2) = TI1+TI4
CH(2,K,4) = TI1-TI4
101 CONTINUE
RETURN
102 DO 104 K=1,L1
DO 103 I=2,IDO,2
TI1 = CC(I,1,K)-CC(I,3,K)
TI2 = CC(I,1,K)+CC(I,3,K)
TI3 = CC(I,2,K)+CC(I,4,K)
TR4 = CC(I,2,K)-CC(I,4,K)
TR1 = CC(I-1,1,K)-CC(I-1,3,K)
TR2 = CC(I-1,1,K)+CC(I-1,3,K)
TI4 = CC(I-1,4,K)-CC(I-1,2,K)
TR3 = CC(I-1,2,K)+CC(I-1,4,K)
CH(I-1,K,1) = TR2+TR3
CR3 = TR2-TR3
CH(I,K,1) = TI2+TI3
CI3 = TI2-TI3
CR2 = TR1+TR4
CR4 = TR1-TR4
CI2 = TI1+TI4
CI4 = TI1-TI4
CH(I-1,K,2) = WA1(I-1)*CR2+WA1(I)*CI2
CH(I,K,2) = WA1(I-1)*CI2-WA1(I)*CR2
CH(I-1,K,3) = WA2(I-1)*CR3+WA2(I)*CI3
CH(I,K,3) = WA2(I-1)*CI3-WA2(I)*CR3
CH(I-1,K,4) = WA3(I-1)*CR4+WA3(I)*CI4
CH(I,K,4) = WA3(I-1)*CI4-WA3(I)*CR4
103 CONTINUE
104 CONTINUE
RETURN
END
C****************************************************************************
SUBROUTINE PASSF5 (IDO,L1,CC,CH,WA1,WA2,WA3,WA4)
IMPLICIT NONE
INTEGER IDO,L1,K,I
DOUBLE PRECISION CC,CH,WA1,WA2,WA3,WA4,TR11,TI11,TR12,TI12,
& TI2,TI3,TI4,TI5,TR2,TR3,TR4,TR5,CI2,CI3,CI4,CI5,CR2,CR3,CR4,
& CR5,DI2,DI3,DI4,DI5,DR2,DR3,DR4,DR5
DIMENSION CC(IDO,5,L1) ,CH(IDO,L1,5) ,
1 WA1(*) ,WA2(*) ,WA3(*) ,WA4(*)
DATA TR11,TI11,TR12,TI12 /.309016994374947D0,-.951056516295154D0,
1-.809016994374947D0,-.587785252292473D0/
IF (IDO .NE. 2) GO TO 102
DO 101 K=1,L1
TI5 = CC(2,2,K)-CC(2,5,K)
TI2 = CC(2,2,K)+CC(2,5,K)
TI4 = CC(2,3,K)-CC(2,4,K)
TI3 = CC(2,3,K)+CC(2,4,K)
TR5 = CC(1,2,K)-CC(1,5,K)
TR2 = CC(1,2,K)+CC(1,5,K)
TR4 = CC(1,3,K)-CC(1,4,K)
TR3 = CC(1,3,K)+CC(1,4,K)
CH(1,K,1) = CC(1,1,K)+TR2+TR3
CH(2,K,1) = CC(2,1,K)+TI2+TI3
CR2 = CC(1,1,K)+TR11*TR2+TR12*TR3
CI2 = CC(2,1,K)+TR11*TI2+TR12*TI3
CR3 = CC(1,1,K)+TR12*TR2+TR11*TR3
CI3 = CC(2,1,K)+TR12*TI2+TR11*TI3
CR5 = TI11*TR5+TI12*TR4
CI5 = TI11*TI5+TI12*TI4
CR4 = TI12*TR5-TI11*TR4
CI4 = TI12*TI5-TI11*TI4
CH(1,K,2) = CR2-CI5
CH(1,K,5) = CR2+CI5
CH(2,K,2) = CI2+CR5
CH(2,K,3) = CI3+CR4
CH(1,K,3) = CR3-CI4
CH(1,K,4) = CR3+CI4
CH(2,K,4) = CI3-CR4
CH(2,K,5) = CI2-CR5
101 CONTINUE
RETURN
102 DO 104 K=1,L1
DO 103 I=2,IDO,2
TI5 = CC(I,2,K)-CC(I,5,K)
TI2 = CC(I,2,K)+CC(I,5,K)
TI4 = CC(I,3,K)-CC(I,4,K)
TI3 = CC(I,3,K)+CC(I,4,K)
TR5 = CC(I-1,2,K)-CC(I-1,5,K)
TR2 = CC(I-1,2,K)+CC(I-1,5,K)
TR4 = CC(I-1,3,K)-CC(I-1,4,K)
TR3 = CC(I-1,3,K)+CC(I-1,4,K)
CH(I-1,K,1) = CC(I-1,1,K)+TR2+TR3
CH(I,K,1) = CC(I,1,K)+TI2+TI3
CR2 = CC(I-1,1,K)+TR11*TR2+TR12*TR3
CI2 = CC(I,1,K)+TR11*TI2+TR12*TI3
CR3 = CC(I-1,1,K)+TR12*TR2+TR11*TR3
CI3 = CC(I,1,K)+TR12*TI2+TR11*TI3
CR5 = TI11*TR5+TI12*TR4
CI5 = TI11*TI5+TI12*TI4
CR4 = TI12*TR5-TI11*TR4
CI4 = TI12*TI5-TI11*TI4
DR3 = CR3-CI4
DR4 = CR3+CI4
DI3 = CI3+CR4
DI4 = CI3-CR4
DR5 = CR2+CI5
DR2 = CR2-CI5
DI5 = CI2-CR5
DI2 = CI2+CR5
CH(I-1,K,2) = WA1(I-1)*DR2+WA1(I)*DI2
CH(I,K,2) = WA1(I-1)*DI2-WA1(I)*DR2
CH(I-1,K,3) = WA2(I-1)*DR3+WA2(I)*DI3
CH(I,K,3) = WA2(I-1)*DI3-WA2(I)*DR3
CH(I-1,K,4) = WA3(I-1)*DR4+WA3(I)*DI4
CH(I,K,4) = WA3(I-1)*DI4-WA3(I)*DR4
CH(I-1,K,5) = WA4(I-1)*DR5+WA4(I)*DI5
CH(I,K,5) = WA4(I-1)*DI5-WA4(I)*DR5
103 CONTINUE
104 CONTINUE
RETURN
END
|
This shower curtain is perfect for the Mercedes Sprinter rear doors. Simply install four twist turn locks at your desired height and attach the curtain. Make sure the curtain loops reach your twist turn locks before installation. |
# Copyright (c) 2018-2021, Carnegie Mellon University
# See LICENSE for details
ImportAll(paradigms.vector);
Class(LSKernel, SumsBase, BaseContainer, rec(
doNotMarkBB:= true,
abbrevs := [ ch -> [ch,0], (ch,ops) -> [ch,ops] ],
new := (self, ch, ops) >> SPL(WithBases(self, rec(
info := Cond(
IsInt(ops) or IsRat(ops)
or IsValue(ops) or IsExp(ops), rec(
opcount := When(IsValue(ops),ops.v,ops),
free := Set([]),
loadFunc := fId(ch.dimensions[2]),
storeFunc := fId(ch.dimensions[1])
),
IsRec(ops), ops,
Error("unknown info")
),
_children := [ch],
dimensions := ch.dimensions
))),
rChildren := self >> [self._children[1], self.info],
rSetChild := meth(self, n, what)
if n=2 then self.info := what;
elif n=1 then self._children[1] := what;
else Error("<n> must be in [1..2]");
fi;
end,
mergeInfo := (self, r1, r2) >> rec(
opcount := r1.opcount + r2.opcount,
free := Concat(r1.free, r2.free),
loadFunc := r2.loadFunc,
storeFunc := r1.storeFunc
),
print := meth(self, indent, indentStep)
local s,ch,first,newline;
ch := [self.child(1),self.info];
if self._short_print or ForAll(ch, x->IsSPLSym(x) or IsSPLMat(x)) then
newline := Ignore;
else
newline := self._newline;
fi;
first:=true;
Print(self.__name__, "(");
for s in ch do
if(first) then first:=false;
else Print(", "); fi;
newline(indent + indentStep);
When(IsSPL(s) or (IsRec(s) and IsBound(s.print) and NumGenArgs(s.print)=2),
s.print(indent + indentStep, indentStep), Print(s));
od;
newline(indent);
Print(")");
self.printA();
end,
));
Class(DMAGath, Gath, rec(doNotMarkBB:= true));
Class(DMAScat, Scat, rec(doNotMarkBB:= true));
Class(DMAFence, Buf, rec(doNotMarkBB:= true));
Class(SWPSum, ISum, rec(doNotMarkBB := true));
Class(DMAGathV, VGath, rec(doNotMarkBB := true));
Class(DMAScatV, VScat, rec(doNotMarkBB := true));
|
Emmanuel DiazOrdaz is a current ASUCD ASUCD Senate Senator. He was elected in the Fall 2010 ASUCD Election as an independent.
Support Emmanuel on Facebook http://www.facebook.com/home.php?#!/event.php?eid156028331099935 here
UCD Involvement :
P.E.A.C.E. TrainerCross Cultural Center,
P.E.A.C.E. CoCoordinatorCross Cultural Center
Admin. Director for Yik’al Kuyum Student Recruitment & Retention Center
ASUCD Gender And Sexualities Commissioner, Spring 2009Present
REACE Participant Winter 2010, Participant
Queer Leadership Retreat Spring 2010, Participant
Chican@/Latin@ Leadership Retreat Fall 2010, Workshop Facilitator
Native Leadership Retreat Fall 2010, Workshop Facilitator
Outreach to multiple Yolo/Sacramento County High Schools
Aggies of Color
Candidate Statement
Hello beautiful Aggies!! My name is Emmanuel DiazOrdaz and I am running for ASUCD Senate!!! (vote for me :)
In case youve never voted in an ASUCD election before (and thats most of the student body) heres a little bit of infothere are twelve seats in the ASUCD Senate, and six of them are up for election this quarter! Because ASUCD elections use choice voting, where you RANK the candidates instead of just picking one, just ...remember to vote Emmanuel DiazOrdaz #1 and Cameron Brown #2!!
Campaign Platform
AB540 AWARENESS WEEK
A weeklong event dedicated to the issues around AB540, and undocumented students ending with a scholarship giveaway open to all students.
TACO TRUCK ON CAMPUS
In collaboration with the Retention Coordinator for Yik’al Kuyum at the Student Recruitment & Retention Center, I want to bring a Taco Truck to campus at least twice a month during La Raza Tuesdays.
GENERATION SEX WEEK PROGRAMING
Gender Soliloqueers
The second annual Gender Soliloqueers invites students of all genders and sexualities to express themselves through performances, spoken word, and radial monologues.
Foreskin Awareness Day
A day dedicated to foreskin awareness specifically it’s effects on men physically, mentally, emotionally, and creatively.
Rape Culture In Greek Life
A program advocating for victims of violence and rape, specifically within Greek communities.
INCREASE LEADERSHIP RETREAT/CONFERENCE ALLOCATION
As of now, some money is being allocated to certain leadership retreats on campus. However, it is not nearly enough. There needs to be more institutionalized money for these very crucial community building, ally developing retreats and conferences in order for us to learn from each other and other UC’s to make our own campus a safer space for all communities.
Short and Simple, mostly.
If you have any questions, ask me :
|
#' Function for the method of Independent Evolution
#'
#' This function allows computing rescaled branch lengths according to inferred phenotypic evolution and ancestral states using the method of Independent Evolution as described in Smaers & Vinicius (2009)
#' @param data numeric vector with elements in the same order as tiplabels in the tree
#' @return dataframe with rescaled branch lengths (rBL) for all branches in the tree
#' @details This function poses the following restrictions on the input data: no polytomies in the tree, no duplicated values, no zero values, and no negative values in the data. The algorithm was designed to deal with linear data (unlogged). This function is now deprecated; 'mvBM' should be used instead.
#' @export
ie<-function(data,tree){
data_original<-data
N=length(data)
#Make phylo matrix and list nodes
matrix=tree$edge
phy.matrix=data.frame(tree$edge,tree$edge.length,data[matrix[,2]])
names(phy.matrix)=c("Anc","Desc","Length","Value")
phy.matrix.length=nrow(phy.matrix)
nodes_extant=1:N
nodes_extinct=(N+1):(N+(N-1))
nodes_all=1:(N+(N-1))
#CODE
#1. CALCULATE AP BRANCH LENGTHS
matrix_lengths=dist.nodes(tree)
#2. CALCULATE AP-values
values=phy.matrix$Value[which(phy.matrix$Desc<=N)]
extant_values=data.frame(values,nodes_extant)
AP_values=c()
for(j in nodes_extinct){
#calculate nominator
nominator=c()
for(i in nodes_extant){
nominator=rbind(nominator,(extant_values[i,1]/matrix_lengths[i,j]))
}
#calculate denominator
denominator=c()
for(i in nodes_extant){
denominator=rbind(denominator,(1/matrix_lengths[i,j]))
}
#save AP
AP=sum(nominator)/sum(denominator)
AP_values=rbind(AP_values,AP)
}
AP=c()
AP=cbind(AP,AP_values)
AP=cbind(AP,nodes_extinct)
AP=as.data.frame(AP)
colnames(AP)=c("value","nodes")
#3. CALCULATE ANCESTRAL STATES
nodes_extinct_reverse=sort(nodes_extinct,decreasing=TRUE)
ancestral_states=c()
ancestors=c()
results=c()
results_rBLs=c()
results_branchlength=c()
results_node_anc=c()
results_node_desc=c()
for(i in nodes_extinct_reverse) {
#calculating ancestral states
sister_species=which(phy.matrix$Anc==i)
X1=phy.matrix$Value[sister_species[1]]
X2=phy.matrix$Value[sister_species[2]]
AP_desc=AP$value[which(AP$nodes==i)]
BL1=phy.matrix$Length[sister_species[1]]
BL2=phy.matrix$Length[sister_species[2]]
S1=abs(abs(X1-X2)/mean(c(X1,X2)))
S2=abs(abs(X2-AP_desc)/mean(c(X2,AP_desc)))
S3=abs(abs(X1-AP_desc)/mean(c(X1,AP_desc)))
T1=((S1+S3)-S2)/2
T2=((S1+S2)-S3)/2
T3=((S2+S3)-S1)/2
rBL1=T1*((BL1/(BL1+BL2))*2)
rBL2=T2*((BL2/(BL1+BL2))*2)
A=((X1/rBL1)+(X2/rBL2))/((1/rBL1)+(1/rBL2))
#Saving results
desc1=phy.matrix$Desc[sister_species[1]]
desc2=phy.matrix$Desc[sister_species[2]]
value_desc1=phy.matrix$Value[which(phy.matrix$Desc==desc1)]
value_desc2=phy.matrix$Value[which(phy.matrix$Desc==desc2)]
phy.matrix$Value[which(phy.matrix$Desc==i)]=A
#building results dataframe
results_node_anc=rbind(results_node_anc,i)
results_node_anc=rbind(results_node_anc,i)
results_node_desc=rbind(results_node_desc,desc1)
results_node_desc=rbind(results_node_desc,desc2)
results_rBLs=rbind(results_rBLs,rBL1)
results_rBLs=rbind(results_rBLs,rBL2)
results_branchlength=rbind(results_branchlength,BL1)
results_branchlength=rbind(results_branchlength,BL2)
}
#'results' dataframe
results<-c()
results=cbind(results,results_node_anc)
results=cbind(results,results_node_desc)
results=cbind(results,results_branchlength)
results=cbind(results,results_rBLs)
colnames(results)=c("node_anc","node_desc","BL","rBLs")
#ancestors
results<-results[order(match(results[,2],phy.matrix[,2])),]
rownames(results)<-c(1:length(results[,1]))
results<-as.data.frame(results)
return(results)
}
|
function [MU,k]=vonMisesStat(T)
% function [MU,k]=vonMisesStat(T)
% ------------------------------------------------------------------------
%
%
% ------------------------------------------------------------------------
%%
MU=angle(mean(exp(1i.*T)));
Rsq=mean(cos(T)).^2+mean(sin(T)).^2;
R=sqrt(Rsq);
p=2;
ki=R.*(p-Rsq)./(1-Rsq);
diffTol=ki/1000;
qIter=1;
while 1
kn=ki;
A=besseli(p/2,ki)./ besseli((p/2)-1,ki);
ki=ki-((A-R)./(1-A^2-((p-1)/ki)*A));
if abs(kn-ki)<=diffTol
break
end
qIter=qIter+1;
end
k=ki;
%%
% _*GIBBON footer text*_
%
% License: <https://github.com/gibbonCode/GIBBON/blob/master/LICENSE>
%
% GIBBON: The Geometry and Image-based Bioengineering add-On. A toolbox for
% image segmentation, image-based modeling, meshing, and finite element
% analysis.
%
% Copyright (C) 2006-2022 Kevin Mattheus Moerman and the GIBBON contributors
%
% This program is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with this program. If not, see <http://www.gnu.org/licenses/>.
|
(*******************************************************************************
Project: Development of Security Protocols by Refinement
Module: Refinement/Keys.thy (Isabelle/HOL 2016-1)
ID: $Id: Keys.thy 132773 2016-12-09 15:30:22Z csprenge $
Author: Christoph Sprenger, ETH Zurich <[email protected]>
Symmetric (shared) and asymmetric (public/private) keys.
(based on Larry Paulson's theory Public.thy)
Copyright (c) 2012-2016 Christoph Sprenger
Licence: LGPL
*******************************************************************************)
section \<open>Symmetric and Assymetric Keys\<close>
theory Keys imports Agents begin
text \<open>Divide keys into session and long-term keys. Define different kinds
of long-term keys in second step.\<close>
datatype ltkey = \<comment> \<open>long-term keys\<close>
sharK "agent" \<comment> \<open>key shared with server\<close>
| publK "agent" \<comment> \<open>agent's public key\<close>
| privK "agent" \<comment> \<open>agent's private key\<close>
datatype key =
sesK "fresh_t" \<comment> \<open>session key\<close>
| ltK "ltkey" \<comment> \<open>long-term key\<close>
abbreviation
shrK :: "agent \<Rightarrow> key" where
"shrK A \<equiv> ltK (sharK A)"
abbreviation
pubK :: "agent \<Rightarrow> key" where
"pubK A \<equiv> ltK (publK A)"
abbreviation
priK :: "agent \<Rightarrow> key" where
"priK A \<equiv> ltK (privK A)"
text\<open>The inverse of a symmetric key is itself; that of a public key
is the private key and vice versa\<close>
fun invKey :: "key \<Rightarrow> key" where
"invKey (ltK (publK A)) = priK A"
| "invKey (ltK (privK A)) = pubK A"
| "invKey K = K"
definition
symKeys :: "key set" where
"symKeys \<equiv> {K. invKey K = K}"
lemma invKey_K: "K \<in> symKeys \<Longrightarrow> invKey K = K"
by (simp add: symKeys_def)
text \<open>Most lemmas we need come for free with the inductive type definition:
injectiveness and distinctness.\<close>
lemma invKey_invKey_id [simp]: "invKey (invKey K) = K"
by (cases K, auto)
(rename_tac ltk, case_tac ltk, auto)
lemma invKey_eq [simp]: "(invKey K = invKey K') = (K=K')"
apply (safe)
apply (drule_tac f=invKey in arg_cong, simp)
done
text \<open>We get most lemmas below for free from the inductive definition
of type @{typ key}. Many of these are just proved as a reality check.\<close>
subsection\<open>Asymmetric Keys\<close>
(******************************************************************************)
text \<open>No private key equals any public key (essential to ensure that private
keys are private!). A similar statement an axiom in Paulson's theory!\<close>
lemma privateKey_neq_publicKey: "priK A \<noteq> pubK A'"
by auto
lemma publicKey_neq_privateKey: "pubK A \<noteq> priK A'"
by auto
subsection\<open>Basic properties of @{term pubK} and @{term priK}\<close>
lemma publicKey_inject [iff]: "(pubK A = pubK A') = (A = A')"
by (auto)
lemma not_symKeys_pubK [iff]: "pubK A \<notin> symKeys"
by (simp add: symKeys_def)
lemma not_symKeys_priK [iff]: "priK A \<notin> symKeys"
by (simp add: symKeys_def)
lemma symKey_neq_priK: "K \<in> symKeys \<Longrightarrow> K \<noteq> priK A"
by (auto simp add: symKeys_def)
lemma symKeys_neq_imp_neq: "(K \<in> symKeys) \<noteq> (K' \<in> symKeys) \<Longrightarrow> K \<noteq> K'"
by blast
lemma symKeys_invKey_iff [iff]: "(invKey K \<in> symKeys) = (K \<in> symKeys)"
by (unfold symKeys_def, auto)
subsection\<open>"Image" equations that hold for injective functions\<close>
lemma invKey_image_eq [simp]: "(invKey x \<in> invKey`A) = (x \<in> A)"
by auto
(*holds because invKey is injective*)
lemma invKey_pubK_image_priK_image [simp]: "invKey ` pubK ` AS = priK ` AS"
by (auto simp add: image_def)
lemma publicKey_notin_image_privateKey: "pubK A \<notin> priK ` AS"
by auto
lemma privateKey_notin_image_publicKey: "priK x \<notin> pubK ` AA"
by auto
lemma publicKey_image_eq [simp]: "(pubK x \<in> pubK ` AA) = (x \<in> AA)"
by auto
lemma privateKey_image_eq [simp]: "(priK A \<in> priK ` AS) = (A \<in> AS)"
by auto
subsection\<open>Symmetric Keys\<close>
(******************************************************************************)
text \<open>The following was stated as an axiom in Paulson's theory.\<close>
lemma sym_sesK: "sesK f \<in> symKeys" \<comment> \<open>All session keys are symmetric\<close>
by (simp add: symKeys_def)
lemma sym_shrK: "shrK X \<in> symKeys" \<comment> \<open>All shared keys are symmetric\<close>
by (simp add: symKeys_def)
text \<open>Symmetric keys and inversion\<close>
lemma symK_eq_invKey: "\<lbrakk> SK = invKey K; SK \<in> symKeys \<rbrakk> \<Longrightarrow> K = SK"
by (auto simp add: symKeys_def)
text \<open>Image-related lemmas.\<close>
lemma publicKey_notin_image_shrK: "pubK x \<notin> shrK ` AA"
by auto
lemma privateKey_notin_image_shrK: "priK x \<notin> shrK ` AA"
by auto
lemma shrK_notin_image_publicKey: "shrK x \<notin> pubK ` AA"
by auto
lemma shrK_notin_image_privateKey: "shrK x \<notin> priK ` AA"
by auto
lemma sesK_notin_image_shrK [simp]: "sesK K \<notin> shrK`AA"
by (auto)
lemma shrK_notin_image_sesK [simp]: "shrK K \<notin> sesK`AA"
by (auto)
lemma sesK_image_eq [simp]: "(sesK x \<in> sesK ` AA) = (x \<in> AA)"
by auto
lemma shrK_image_eq [simp]: "(shrK x \<in> shrK ` AA) = (x \<in> AA)"
by auto
end
|
.__Rcpuid_flops <- list(
names = c("FLOPS", "KFLOPS", "MFLOPS", "GFLOPS", "TFLOPS", "PFLOPS", "EFLOPS", "ZFLOPS", "YFLOPS"),
ordmag = c(0, 3, 6, 9, 12, 15, 18, 21, 24)
)
flops_units <- function()
{
.__Rcpuid_flops$names
}
flops_ordmag <- function()
{
.__Rcpuid_flops$ordmag
}
find_unit <- function(unit)
{
unit <- match.arg(toupper(unit), .__Rcpuid_flops$names)
unit
}
best_unit <- function(x)
{
f <- 1e3
fun <- function(x) log10(abs(x))
dgts <- 3
size <- x
class(size) <- NULL
if (size == 0)
return( flops_units()[1L] )
num.digits <-fun(size)
for (i in seq.int(9))
{
if (num.digits < dgts*i)
{
unit <- flops_units()[i]
break
}
}
size <- size/(f^(i-1))
class(size) <- "flops"
attr(size, "unit") <- flops_units()[i]
return( size )
}
swap_unit <- function(x, unit)
{
inunit <- attr(x, "unit")
index <- which(flops_units() == inunit)
ordmag <- flops_ordmag()[index]
flops <- as.numeric(x) * 10^ordmag
index <- which(flops_units() == unit)
ordmag <- flops_ordmag()[index]
ret <- flops / 10^ordmag
class(ret) <- "flops"
attr(ret, "unit") <- unit
return( ret )
}
#' Flops Constructor
#'
#' Function to build a flops object.
#'
#' This provides a simple way of representing flops, scaled by
#' some SI unit (e.g., mega, giga, ...).
#'
#' @param size
#' A number of flops, scaled by the input unit unit.
#' @param unit
#' An SI unit of flops (e.g., MFLOPS for MegaFLOPS, GFLOPS for
#' GigaFLOPS, etc.).
#'
#' @return
#' Returns a flops object.
#'
#' @examples
#' \dontrun{
#' library(okcpuid, quietly=TRUE)
#' flops(2000000) # 2 MFLOPS
#' }
#'
#' @export flops
flops <- function(size=0, unit)
{
if (missing(unit))
{
size <- best_unit(size)
}
else
{
unit <- match.arg(tolower(unit), flops_units())
attr(size, "unit") <- "FLOPS"
class(size) <- "flops"
size <- swap_unit(size, unit)
}
return( size )
}
#' unflop
#'
#' Convert a flops object into a numeric.
#'
#' This function differs from a simple \code{as.numeric()} call in
#' that flops objects store the unit flops. So if you have a flops
#' object \code{x} that prints "10 MFLOPS", \code{as.numeric(x)}
#' would produce 10, while \code{unflop()} would produce 10000000.
#'
#' @param x
#' A flops object.
#'
#' @return
#' The number of flops represented by x.
#'
#' @rdname unflop
#' @export
unflop <- function(x)
{
if (class(x) != "flops")
stop("Argument 'x' must be a flops object")
as.numeric(swap_unit(x, "FLOPS"))
}
#' print-flops
#'
#' @param x
#' A flops object.
#' @param ...
#' Ignored.
#' @param unit
#' The flops display unit to use.
#' @param digits
#' The number of decimal digits to display.
#'
#' @name print-flops
#' @rdname print-flops
#' @method print flops
#' @export
print.flops <- function(x, ..., unit="best", digits=3)
{
if (unit == "best")
x <- best_unit(unflop(x))
else
{
unit <- match.arg(tolower(unit), flops_units())
x <- swap_unit(x, unit)
}
size <- as.numeric(x)
unit <- attr(x, "unit")
if (x > 1e22)
format <- "e"
else
format <- "f"
cat(sprintf(paste("%.", digits, format, " ", unit, "\n", sep=""), size))
}
|
\chapter{Quality of Service}
\section{Introduction}
\subsection{Traffic characteristics}
\paragraph{Voice:}Voice traffic is predictable and smooth. However, voice is delay-sensitive and there is no reason to re-transmit voice if packets are lost. Therefore, voice packets must receive a higher priority than other types of traffic. Latency should be no more than \textbf{150 ms}. Jitter should be no more than \textbf{30 ms}, and voice packet loss should be no more than \textbf{1\%}. Voice traffic requires at least \textbf{30 Kb/s} of bandwidth.
\paragraph{Video:}Video traffic tends to be unpredictable, inconsistent, and bursty compared to voice traffic. Compared to voice, video is less resilient to loss and has a higher volume of data per packet. Latency should be no more than \textbf{400 ms}. Jitter should be no more than \textbf{50 ms}, and video packet loss should be no more than \textbf{1\%}. Video traffic requires at least \textbf{384 Kb/s} of bandwidth.
\paragraph{Data:}Data traffic is relatively insensitive to drops and delays compared to voice and video. The two main factors a network administrator needs to ask about the flow of data traffic are the following: Does the data come from an interactive application? Is the data mission critical?
Network congestion causes \textbf{delay}. Two types of delays are fixed and variable. A \emph{fixed delay} is a specific amount of time a specific process takes, such as how long it takes to place a bit on the transmission media. A \emph{variable delay} take an unspecified amount of time and is affected by factors such as how much traffic is being processed. \emph{Jitter} is the variation in the delay of received packets.
\subsection{QoS tools}
When the volume of traffic is greater than what can be transported across the network, network devices (router, switch, etc.) hold the packets in memory until resources become available to transmit them. If the number of packets continues to increase, the memory within the device fills up and packets are dropped. This problem can be solved by either increasing link capacity or implementing QoS.\\
A device implements QoS only when it is experiencing congestion. There are three categories of QoS tools: Classification and marking, Congestion avoidance, Congestion management. Refer to Figure \ref{QoStools} to help understand the sequence of how these tools are used when QoS is applied to packet flows.\\
\begin{figure}[hbtp]
\caption{QoS sequence}\label{QoStools}
\centering
\includegraphics[ width=0.8\textwidth ]{pictures/QoStools.PNG}
\end{figure}
\section{Classification and marking}
Before a packet can have a QoS policy applied to it, the packet has to be classified. Classification and marking identifies types of packets. Traffic should be classified and marked as close to its source as technically and administratively feasible. This defines the trust boundary. \\
\textbf{Marking} means that we are adding a value to the packet header. Devices receiving the packet look at this field to see if it matches a defined policy. Trusted endpoints have the capabilities and intelligence to mark application traffic to the appropriate \textbf{Layer 2 CoS and/or Layer 3 DSCP} values. Examples of trusted endpoints include IP phones, wireless access points, videoconferencing gateways and systems, IP conferencing stations, and more.\\
Methods of \textbf{classifying} traffic flows at Layer 2 and 3 using interfaces, ACLs, and class maps.
\subsection{Marking at Layer 2 (CoS field)}
802.1Q is the IEEE standard that supports VLAN tagging at layer 2 on Ethernet networks. The 802.1Q standard also includes the QoS prioritization scheme known as \textbf{802.1p}. The 802.1p standard uses the first three bits in the TCI (Tag Control Information) field, to identifie the CoS (Class of Service) markings (Figure \ref{CoS}).\\
\begin{figure}[hbtp]
\caption{Ethernet Class of Service values}\label{CoS}
\centering
\includegraphics[ width=0.8\textwidth ]{pictures/CoS.PNG}
\end{figure}
\note As frame header is changed hop by hop, Layer 2 marking and the QoS information also change.
\subsection{Marking at Layer 3 (DSCP field)}
Unlike Layer 2 marking, Layer 3 marking does not change QoS information hop by hop, but instead, maintain the information from end to end.\\
Both IPv4 and IPv6 support Layer 3 marking: the \textbf{ToS} field of IPv4 packet and the \textbf{Traffic Class} field of IPv6 packet. The content of these two fields are identical, as shown in Figure \ref{ToS}. The most important portion in the field is the \textbf{DSCP} (\textbf{DiffServ} Code Point) field, which is designated for QoS. The DSCP values are organized into three categories: \\
\begin{figure}[hbtp]
\caption{Type of Service/Traffic Class Field}\label{ToS}
\centering
\includegraphics[ width=0.8\textwidth ]{pictures/ToS.PNG}
\end{figure}
\begin{itemize}
\item \textbf{Best-Effort (BE):} When a router experiences congestion, these packets will be dropped. No QoS plan is implemented.
\item \textbf{Expedited Forwarding (EF):} DSCP decimal value is 46 (binary 101110). At Layer 3, Cisco recommends that EF only be used to mark \textbf{voice} packets.
\item \textbf{Assured Forwarding (AF):} Use the 5 bits to indicate queues and drop preference. As shown in Figure \ref{DSCP}, the first 3 bits are used to designate the class. The remaining two bits are used to designate the drop preference. The 6th is set to zero. The AFxy formula shows how the AF values are calculated. For example, AF32 belongs to class 3 (binary 011) and has a medium drop preference (binary 10).
\end{itemize}
The ECN (Extended Congestion Notification) field can be used by routers to mark packets instead of dropping them. The ECN marking informs downstream routers that there is congestion in the packet flow. \\
\begin{figure}[hbtp]
\caption{Assured forwarding values}\label{DSCP}
\centering
\includegraphics[ scale=0.7 ]{pictures/DSCP.PNG}
\end{figure}
\section{Congestion Avoidance}
We avoid congestion by dropping lower-priority packets before congestion occurs. When the queue fills up to the maximum threshold, a small percentage of packets are dropped. When the maximum threshold is passed, all packets are dropped. WRED, traffic shaping, and traffic policing are three mechanisms provided by Cisco IOS QoS software to prevent congestion.
\paragraph{WRED} is the primary congestion avoidance tool. It regulates TCP data traffic before tail drops (caused by queue overflows) occur.
\paragraph{Traffic shaping}is applied to \emph{outbound} traffic, meaning that excess packets going out an interface get queued and scheduled for later transmission. The result of traffic shaping is a \emph{smoothed} packet output rate, as shown in Figure \ref{Spacing}. Ensure that you have sufficient memory when enabling shaping.\\
\begin{figure}[hbtp]
\caption{Spacing traffic example}\label{Spacing}
\centering
\includegraphics[scale=0.7]{pictures/Spacing.PNG}
\end{figure}
\paragraph{Traffic policing}is applied to inbound traffic on an interface. When the traffic rate reaches the configured maximum rate, excess traffic is \emph{dropped} (or \emph{remarked}), as shown in figure \ref{policing}.\\
\begin{figure}[hbtp]
\caption{Spacing traffic example}\label{policing}
\centering
\includegraphics[ scale=0.7 ]{pictures/policing.PNG}
\end{figure}
\section{Congestion management}
When traffic exceeds available network resources, Congestion management buffers and prioritizes packets before being transmitted to the destination. Common Cisco IOS-based congestion management tools include CBWFQ and LLQ algorithms.
\subsection{WFQ}
WFQ (Weighted Fair Queuing) is an automated scheduling method that provides fair bandwidth allocation to all network traffic. WFQ applies priority to identified traffic and classifies it into flows, as shown in the figure \ref{WFQ}. WFQ then determines how much bandwidth each flow is allowed. WFQ classifies traffic into different flows based on packet header addressing.\\
\begin{figure}[hbtp]
\caption{WFQ example}\label{WFQ}
\centering
\includegraphics[ width=0.8\textwidth ]{pictures/WFQ.PNG}
\end{figure}
WFQ is not supported with tunneling and encryption. It does not allow users to take control over bandwidth allocation.
\subsection{CBWFQ}
Class-Based Weighted Fair Queuing (CBWFQ) extends the standard WFQ functionality to provide support for user-defined traffic classes. For CBWFQ, you define traffic classes based on match criteria including protocols, access control lists (ACLs), and input interfaces.\\
To characterize a class, you assign it bandwidth, weight, and queue limit. After a queue has reached its configured queue limit, adding more packets to the class causes tail drop. Tail drop means a router simply discards any packet that arrives at the end of a queue.
\subsection{LLQ}
The Low Latency Queuing (LLQ) feature brings strict priority queuing to CBWFQ. \emph{Strict PQ allows voice to be sent first}. Without LLQ, CBWFQ services fairly based on weight; no class of packets may be granted strict priority. This scheme poses problems for voice traffic that is largely intolerant of delay.
\section{QoS models}
The three models for implementing QoS are: Best-effort model, Integrated services (IntServ), Differentiated services (DiffServ). Best-effort model means \emph{no QoS} is implemented. QoS is really implemented in a network using either IntServ or DiffServ.
\subsection{Best effort}
The best-effort model (meaning no QoS) treats all network packets in the same way. This model is used when QoS is not required. The table \ref{BestEffort} lists the benefits and drawbacks of the best effort model.
\begin{table}[hbtp]
\centering
\caption{Pros and Cons of Best-effort}\label{BestEffort}
\begin{tabular}{ll}
\toprule
\head{Benefits} & \head{Drawbacks} \\
\midrule
Most scalable & No guarantees of delivery \\
Scalability is limited by bandwidth & Packets can arrive in any order \\
No special QoS mechanism required & No packets have preferential treatment \\
Easy to deploy & Critical data is treated the same as casual one \\
\bottomrule
\end{tabular}
\end{table}
\subsection{Integrated services}
Integrated Services (IntServ) is a multiple-service model that can accommodate multiple QoS requirements.\\
It uses \emph{resource reservation} and \emph{admission-control mechanisms} as building blocks to establish and maintain QoS. Each individual communication must explicitly specify its traffic descriptor and requested resources to the network (Figure \ref{IntServ}). The edge router performs admission control to ensure that available resources are sufficient in the network.\\
\begin{figure}[hbtp]
\caption{Simple IntServ example}\label{IntServ}
\centering
\includegraphics[scale=0.7]{pictures/IntServ.PNG}
\end{figure}
IntServ uses the \textbf{RSVP} (Resource Reservation Protocol) to reserve bandwidth for an application's traffic (e.g. VoIP) across the entire path. If this requested reservation fails along the path, the originating application does not send any data.\\
\begin{table}[hbtp]
\centering
\caption{Pros and Cons of IntServ}
\begin{tabular}{ll}
\toprule
\head{Benefits} & \head{Drawbacks} \\
\midrule
Explicit end-to-end resource admission control & Resource intensive \\
Per-request policy admission control & Not scalable \\
Signaling of dynamic port numbers & \\
\bottomrule
\end{tabular}
\end{table}
\subsection{Differentiated services}
The DiffServ design overcomes the limitations of both the best-effort and IntServ models. Unlike IntServ, DiffServ is not an end-to-end QoS strategy and does not use signaling. Instead, DiffServ uses a “soft QoS” approach (Figure \ref{DiffServ1}). For example, DiffServ can provide low-latency guaranteed service to voice or video while providing best-effort traffic to web traffic or file transfers.\\
\begin{figure}[hbtp]
\caption{Simple DiffServ example}\label{DiffServ1}
\centering
\includegraphics[scale=0.7]{pictures/DiffServ.PNG}
\end{figure}
Specifically, DiffServ divides network traffic into classes based on business requirements. Each of the classes can then be assigned a different level of service. You pay for a level of service. Throughout the network, the level of service you paid for is recognized and your package is given either preferential or normal traffic, depending on what you requested.\\
\begin{table}[hbtp]
\centering
\caption{Pros and Cons of DiffServ}\label{DiffServ2}
\begin{tabular}{ll}
\toprule
\head{Benefits} & \head{Drawbacks} \\
\midrule
Highly scalable & No absolute guarantee of delivery \\
Many different levels of quality & Requires complex mechanisms \\
\bottomrule
\end{tabular}
\end{table}
|
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
s : Set α
a : α
di : DenseInducing i
hs : s ∈ 𝓝 a
⊢ closure (i '' s) ∈ 𝓝 (i a)
[PROOFSTEP]
rw [di.nhds_eq_comap a, ((nhds_basis_opens _).comap _).mem_iff] at hs
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
s : Set α
a : α
di : DenseInducing i
hs : ∃ i_1, (i a ∈ i_1 ∧ IsOpen i_1) ∧ i ⁻¹' i_1 ⊆ s
⊢ closure (i '' s) ∈ 𝓝 (i a)
[PROOFSTEP]
rcases hs with ⟨U, ⟨haU, hUo⟩, sub : i ⁻¹' U ⊆ s⟩
[GOAL]
case intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
s : Set α
a : α
di : DenseInducing i
U : Set β
sub : i ⁻¹' U ⊆ s
haU : i a ∈ U
hUo : IsOpen U
⊢ closure (i '' s) ∈ 𝓝 (i a)
[PROOFSTEP]
refine' mem_of_superset (hUo.mem_nhds haU) _
[GOAL]
case intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
s : Set α
a : α
di : DenseInducing i
U : Set β
sub : i ⁻¹' U ⊆ s
haU : i a ∈ U
hUo : IsOpen U
⊢ U ⊆ closure (i '' s)
[PROOFSTEP]
calc
U ⊆ closure (i '' (i ⁻¹' U)) := di.dense.subset_closure_image_preimage_of_isOpen hUo
_ ⊆ closure (i '' s) := closure_mono (image_subset i sub)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalSpace β
i : α → β
di✝ di : DenseInducing i
s : Set α
⊢ Dense (i '' s) ↔ Dense s
[PROOFSTEP]
refine' ⟨fun H x => _, di.dense.dense_image di.continuous⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalSpace β
i : α → β
di✝ di : DenseInducing i
s : Set α
H : Dense (i '' s)
x : α
⊢ x ∈ closure s
[PROOFSTEP]
rw [di.toInducing.closure_eq_preimage_closure_image, H.closure_eq, preimage_univ]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalSpace β
i : α → β
di✝ di : DenseInducing i
s : Set α
H : Dense (i '' s)
x : α
⊢ x ∈ univ
[PROOFSTEP]
trivial
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝ : T2Space β
di : DenseInducing i
hd : Dense (range i)ᶜ
s : Set α
hs : IsCompact s
⊢ interior s = ∅
[PROOFSTEP]
refine' eq_empty_iff_forall_not_mem.2 fun x hx => _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝ : T2Space β
di : DenseInducing i
hd : Dense (range i)ᶜ
s : Set α
hs : IsCompact s
x : α
hx : x ∈ interior s
⊢ False
[PROOFSTEP]
rw [mem_interior_iff_mem_nhds] at hx
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝ : T2Space β
di : DenseInducing i
hd : Dense (range i)ᶜ
s : Set α
hs : IsCompact s
x : α
hx : s ∈ 𝓝 x
⊢ False
[PROOFSTEP]
have := di.closure_image_mem_nhds hx
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝ : T2Space β
di : DenseInducing i
hd : Dense (range i)ᶜ
s : Set α
hs : IsCompact s
x : α
hx : s ∈ 𝓝 x
this : closure (i '' s) ∈ 𝓝 (i x)
⊢ False
[PROOFSTEP]
rw [(hs.image di.continuous).isClosed.closure_eq] at this
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝ : T2Space β
di : DenseInducing i
hd : Dense (range i)ᶜ
s : Set α
hs : IsCompact s
x : α
hx : s ∈ 𝓝 x
this : i '' s ∈ 𝓝 (i x)
⊢ False
[PROOFSTEP]
rcases hd.inter_nhds_nonempty this with ⟨y, hyi, hys⟩
[GOAL]
case intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝ : T2Space β
di : DenseInducing i
hd : Dense (range i)ᶜ
s : Set α
hs : IsCompact s
x : α
hx : s ∈ 𝓝 x
this : i '' s ∈ 𝓝 (i x)
y : β
hyi : y ∈ (range i)ᶜ
hys : y ∈ i '' s
⊢ False
[PROOFSTEP]
exact hyi (image_subset_range _ _ hys)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝ : TopologicalSpace δ
f : γ → α
g : γ → δ
h : δ → β
d : δ
a : α
di : DenseInducing i
H : Tendsto h (𝓝 d) (𝓝 (i a))
comm : h ∘ g = i ∘ f
⊢ Tendsto f (comap g (𝓝 d)) (𝓝 a)
[PROOFSTEP]
have lim1 : map g (comap g (𝓝 d)) ≤ 𝓝 d := map_comap_le
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝ : TopologicalSpace δ
f : γ → α
g : γ → δ
h : δ → β
d : δ
a : α
di : DenseInducing i
H : Tendsto h (𝓝 d) (𝓝 (i a))
comm : h ∘ g = i ∘ f
lim1 : map g (comap g (𝓝 d)) ≤ 𝓝 d
⊢ Tendsto f (comap g (𝓝 d)) (𝓝 a)
[PROOFSTEP]
replace lim1 : map h (map g (comap g (𝓝 d))) ≤ map h (𝓝 d) := map_mono lim1
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝ : TopologicalSpace δ
f : γ → α
g : γ → δ
h : δ → β
d : δ
a : α
di : DenseInducing i
H : Tendsto h (𝓝 d) (𝓝 (i a))
comm : h ∘ g = i ∘ f
lim1 : map h (map g (comap g (𝓝 d))) ≤ map h (𝓝 d)
⊢ Tendsto f (comap g (𝓝 d)) (𝓝 a)
[PROOFSTEP]
rw [Filter.map_map, comm, ← Filter.map_map, map_le_iff_le_comap] at lim1
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝ : TopologicalSpace δ
f : γ → α
g : γ → δ
h : δ → β
d : δ
a : α
di : DenseInducing i
H : Tendsto h (𝓝 d) (𝓝 (i a))
comm : h ∘ g = i ∘ f
lim1 : map f (comap g (𝓝 d)) ≤ comap i (map h (𝓝 d))
⊢ Tendsto f (comap g (𝓝 d)) (𝓝 a)
[PROOFSTEP]
have lim2 : comap i (map h (𝓝 d)) ≤ comap i (𝓝 (i a)) := comap_mono H
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝ : TopologicalSpace δ
f : γ → α
g : γ → δ
h : δ → β
d : δ
a : α
di : DenseInducing i
H : Tendsto h (𝓝 d) (𝓝 (i a))
comm : h ∘ g = i ∘ f
lim1 : map f (comap g (𝓝 d)) ≤ comap i (map h (𝓝 d))
lim2 : comap i (map h (𝓝 d)) ≤ comap i (𝓝 (i a))
⊢ Tendsto f (comap g (𝓝 d)) (𝓝 a)
[PROOFSTEP]
rw [← di.nhds_eq_comap] at lim2
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝ : TopologicalSpace δ
f : γ → α
g : γ → δ
h : δ → β
d : δ
a : α
di : DenseInducing i
H : Tendsto h (𝓝 d) (𝓝 (i a))
comm : h ∘ g = i ∘ f
lim1 : map f (comap g (𝓝 d)) ≤ comap i (map h (𝓝 d))
lim2 : comap i (map h (𝓝 d)) ≤ 𝓝 a
⊢ Tendsto f (comap g (𝓝 d)) (𝓝 a)
[PROOFSTEP]
exact le_trans lim1 lim2
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝ : TopologicalSpace δ
f : γ → α
g : γ → δ
h : δ → β
di : DenseInducing i
b : β
s : Set β
hs : s ∈ 𝓝 b
⊢ ∃ a, i a ∈ s
[PROOFSTEP]
rcases mem_closure_iff_nhds.1 (di.dense b) s hs with ⟨_, ⟨ha, a, rfl⟩⟩
[GOAL]
case intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝ : TopologicalSpace δ
f : γ → α
g : γ → δ
h : δ → β
di : DenseInducing i
b : β
s : Set β
hs : s ∈ 𝓝 b
a : α
ha : i a ∈ s
⊢ ∃ a, i a ∈ s
[PROOFSTEP]
exact ⟨a, ha⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T2Space γ
f : α → γ
di : DenseInducing i
hf : ∀ (b : β), ∃ c, Tendsto f (comap i (𝓝 b)) (𝓝 c)
a : α
⊢ extend di f (i a) = f a
[PROOFSTEP]
rcases hf (i a) with ⟨b, hb⟩
[GOAL]
case intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T2Space γ
f : α → γ
di : DenseInducing i
hf : ∀ (b : β), ∃ c, Tendsto f (comap i (𝓝 b)) (𝓝 c)
a : α
b : γ
hb : Tendsto f (comap i (𝓝 (i a))) (𝓝 b)
⊢ extend di f (i a) = f a
[PROOFSTEP]
refine' di.extend_eq_at' b _
[GOAL]
case intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T2Space γ
f : α → γ
di : DenseInducing i
hf : ∀ (b : β), ∃ c, Tendsto f (comap i (𝓝 b)) (𝓝 c)
a : α
b : γ
hb : Tendsto f (comap i (𝓝 (i a))) (𝓝 b)
⊢ Tendsto f (𝓝 a) (𝓝 b)
[PROOFSTEP]
rwa [← di.toInducing.nhds_eq_comap] at hb
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g✝ : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T2Space γ
b : β
f : α → γ
g : β → γ
di : DenseInducing i
hf : ∀ᶠ (x : α) in comap i (𝓝 b), g (i x) = f x
hg : ContinuousAt g b
⊢ extend di f b = g b
[PROOFSTEP]
refine' di.extend_eq_of_tendsto fun s hs => mem_map.2 _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g✝ : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T2Space γ
b : β
f : α → γ
g : β → γ
di : DenseInducing i
hf : ∀ᶠ (x : α) in comap i (𝓝 b), g (i x) = f x
hg : ContinuousAt g b
s : Set γ
hs : s ∈ 𝓝 (g b)
⊢ f ⁻¹' s ∈ comap i (𝓝 b)
[PROOFSTEP]
suffices : ∀ᶠ x : α in comap i (𝓝 b), g (i x) ∈ s
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g✝ : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T2Space γ
b : β
f : α → γ
g : β → γ
di : DenseInducing i
hf : ∀ᶠ (x : α) in comap i (𝓝 b), g (i x) = f x
hg : ContinuousAt g b
s : Set γ
hs : s ∈ 𝓝 (g b)
this : ∀ᶠ (x : α) in comap i (𝓝 b), g (i x) ∈ s
⊢ f ⁻¹' s ∈ comap i (𝓝 b)
case this
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g✝ : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T2Space γ
b : β
f : α → γ
g : β → γ
di : DenseInducing i
hf : ∀ᶠ (x : α) in comap i (𝓝 b), g (i x) = f x
hg : ContinuousAt g b
s : Set γ
hs : s ∈ 𝓝 (g b)
⊢ ∀ᶠ (x : α) in comap i (𝓝 b), g (i x) ∈ s
[PROOFSTEP]
exact hf.mp (this.mono fun x hgx hfx => hfx ▸ hgx)
[GOAL]
case this
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g✝ : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T2Space γ
b : β
f : α → γ
g : β → γ
di : DenseInducing i
hf : ∀ᶠ (x : α) in comap i (𝓝 b), g (i x) = f x
hg : ContinuousAt g b
s : Set γ
hs : s ∈ 𝓝 (g b)
⊢ ∀ᶠ (x : α) in comap i (𝓝 b), g (i x) ∈ s
[PROOFSTEP]
clear hf f
[GOAL]
case this
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f : γ → α
g✝ : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T2Space γ
b : β
g : β → γ
di : DenseInducing i
hg : ContinuousAt g b
s : Set γ
hs : s ∈ 𝓝 (g b)
⊢ ∀ᶠ (x : α) in comap i (𝓝 b), g (i x) ∈ s
[PROOFSTEP]
refine' eventually_comap.2 ((hg.eventually hs).mono _)
[GOAL]
case this
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f : γ → α
g✝ : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T2Space γ
b : β
g : β → γ
di : DenseInducing i
hg : ContinuousAt g b
s : Set γ
hs : s ∈ 𝓝 (g b)
⊢ ∀ (x : β), s (g x) → ∀ (a : α), i a = x → g (i a) ∈ s
[PROOFSTEP]
rintro _ hxs x rfl
[GOAL]
case this
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f : γ → α
g✝ : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T2Space γ
b : β
g : β → γ
di : DenseInducing i
hg : ContinuousAt g b
s : Set γ
hs : s ∈ 𝓝 (g b)
x : α
hxs : s (g (i x))
⊢ g (i x) ∈ s
[PROOFSTEP]
exact hxs
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T3Space γ
b : β
f : α → γ
di : DenseInducing i
hf : ∀ᶠ (x : β) in 𝓝 b, ∃ c, Tendsto f (comap i (𝓝 x)) (𝓝 c)
⊢ ContinuousAt (extend di f) b
[PROOFSTEP]
set φ := di.extend f
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T3Space γ
b : β
f : α → γ
di : DenseInducing i
hf : ∀ᶠ (x : β) in 𝓝 b, ∃ c, Tendsto f (comap i (𝓝 x)) (𝓝 c)
φ : β → γ := extend di f
⊢ ContinuousAt φ b
[PROOFSTEP]
haveI := di.comap_nhds_neBot
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T3Space γ
b : β
f : α → γ
di : DenseInducing i
hf : ∀ᶠ (x : β) in 𝓝 b, ∃ c, Tendsto f (comap i (𝓝 x)) (𝓝 c)
φ : β → γ := extend di f
this : ∀ (b : β), NeBot (comap i (𝓝 b))
⊢ ContinuousAt φ b
[PROOFSTEP]
suffices ∀ V' ∈ 𝓝 (φ b), IsClosed V' → φ ⁻¹' V' ∈ 𝓝 b by
simpa [ContinuousAt, (closed_nhds_basis (φ b)).tendsto_right_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T3Space γ
b : β
f : α → γ
di : DenseInducing i
hf : ∀ᶠ (x : β) in 𝓝 b, ∃ c, Tendsto f (comap i (𝓝 x)) (𝓝 c)
φ : β → γ := extend di f
this✝ : ∀ (b : β), NeBot (comap i (𝓝 b))
this : ∀ (V' : Set γ), V' ∈ 𝓝 (φ b) → IsClosed V' → φ ⁻¹' V' ∈ 𝓝 b
⊢ ContinuousAt φ b
[PROOFSTEP]
simpa [ContinuousAt, (closed_nhds_basis (φ b)).tendsto_right_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T3Space γ
b : β
f : α → γ
di : DenseInducing i
hf : ∀ᶠ (x : β) in 𝓝 b, ∃ c, Tendsto f (comap i (𝓝 x)) (𝓝 c)
φ : β → γ := extend di f
this : ∀ (b : β), NeBot (comap i (𝓝 b))
⊢ ∀ (V' : Set γ), V' ∈ 𝓝 (φ b) → IsClosed V' → φ ⁻¹' V' ∈ 𝓝 b
[PROOFSTEP]
intro V' V'_in V'_closed
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T3Space γ
b : β
f : α → γ
di : DenseInducing i
hf : ∀ᶠ (x : β) in 𝓝 b, ∃ c, Tendsto f (comap i (𝓝 x)) (𝓝 c)
φ : β → γ := extend di f
this : ∀ (b : β), NeBot (comap i (𝓝 b))
V' : Set γ
V'_in : V' ∈ 𝓝 (φ b)
V'_closed : IsClosed V'
⊢ φ ⁻¹' V' ∈ 𝓝 b
[PROOFSTEP]
set V₁ := {x | Tendsto f (comap i <| 𝓝 x) (𝓝 <| φ x)}
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T3Space γ
b : β
f : α → γ
di : DenseInducing i
hf : ∀ᶠ (x : β) in 𝓝 b, ∃ c, Tendsto f (comap i (𝓝 x)) (𝓝 c)
φ : β → γ := extend di f
this : ∀ (b : β), NeBot (comap i (𝓝 b))
V' : Set γ
V'_in : V' ∈ 𝓝 (φ b)
V'_closed : IsClosed V'
V₁ : Set β := {x | Tendsto f (comap i (𝓝 x)) (𝓝 (φ x))}
⊢ φ ⁻¹' V' ∈ 𝓝 b
[PROOFSTEP]
have V₁_in : V₁ ∈ 𝓝 b := by
filter_upwards [hf]
rintro x ⟨c, hc⟩
rwa [di.extend_eq_of_tendsto hc]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T3Space γ
b : β
f : α → γ
di : DenseInducing i
hf : ∀ᶠ (x : β) in 𝓝 b, ∃ c, Tendsto f (comap i (𝓝 x)) (𝓝 c)
φ : β → γ := extend di f
this : ∀ (b : β), NeBot (comap i (𝓝 b))
V' : Set γ
V'_in : V' ∈ 𝓝 (φ b)
V'_closed : IsClosed V'
V₁ : Set β := {x | Tendsto f (comap i (𝓝 x)) (𝓝 (φ x))}
⊢ V₁ ∈ 𝓝 b
[PROOFSTEP]
filter_upwards [hf]
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T3Space γ
b : β
f : α → γ
di : DenseInducing i
hf : ∀ᶠ (x : β) in 𝓝 b, ∃ c, Tendsto f (comap i (𝓝 x)) (𝓝 c)
φ : β → γ := extend di f
this : ∀ (b : β), NeBot (comap i (𝓝 b))
V' : Set γ
V'_in : V' ∈ 𝓝 (φ b)
V'_closed : IsClosed V'
V₁ : Set β := {x | Tendsto f (comap i (𝓝 x)) (𝓝 (φ x))}
⊢ ∀ (a : β), (∃ c, Tendsto f (comap i (𝓝 a)) (𝓝 c)) → Tendsto f (comap i (𝓝 a)) (𝓝 (extend di f a))
[PROOFSTEP]
rintro x ⟨c, hc⟩
[GOAL]
case h.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T3Space γ
b : β
f : α → γ
di : DenseInducing i
hf : ∀ᶠ (x : β) in 𝓝 b, ∃ c, Tendsto f (comap i (𝓝 x)) (𝓝 c)
φ : β → γ := extend di f
this : ∀ (b : β), NeBot (comap i (𝓝 b))
V' : Set γ
V'_in : V' ∈ 𝓝 (φ b)
V'_closed : IsClosed V'
V₁ : Set β := {x | Tendsto f (comap i (𝓝 x)) (𝓝 (φ x))}
x : β
c : γ
hc : Tendsto f (comap i (𝓝 x)) (𝓝 c)
⊢ Tendsto f (comap i (𝓝 x)) (𝓝 (extend di f x))
[PROOFSTEP]
rwa [di.extend_eq_of_tendsto hc]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T3Space γ
b : β
f : α → γ
di : DenseInducing i
hf : ∀ᶠ (x : β) in 𝓝 b, ∃ c, Tendsto f (comap i (𝓝 x)) (𝓝 c)
φ : β → γ := extend di f
this : ∀ (b : β), NeBot (comap i (𝓝 b))
V' : Set γ
V'_in : V' ∈ 𝓝 (φ b)
V'_closed : IsClosed V'
V₁ : Set β := {x | Tendsto f (comap i (𝓝 x)) (𝓝 (φ x))}
V₁_in : V₁ ∈ 𝓝 b
⊢ φ ⁻¹' V' ∈ 𝓝 b
[PROOFSTEP]
obtain ⟨V₂, V₂_in, V₂_op, hV₂⟩ : ∃ V₂ ∈ 𝓝 b, IsOpen V₂ ∧ ∀ x ∈ i ⁻¹' V₂, f x ∈ V' := by
simpa [and_assoc] using ((nhds_basis_opens' b).comap i).tendsto_left_iff.mp (mem_of_mem_nhds V₁_in : b ∈ V₁) V' V'_in
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T3Space γ
b : β
f : α → γ
di : DenseInducing i
hf : ∀ᶠ (x : β) in 𝓝 b, ∃ c, Tendsto f (comap i (𝓝 x)) (𝓝 c)
φ : β → γ := extend di f
this : ∀ (b : β), NeBot (comap i (𝓝 b))
V' : Set γ
V'_in : V' ∈ 𝓝 (φ b)
V'_closed : IsClosed V'
V₁ : Set β := {x | Tendsto f (comap i (𝓝 x)) (𝓝 (φ x))}
V₁_in : V₁ ∈ 𝓝 b
⊢ ∃ V₂, V₂ ∈ 𝓝 b ∧ IsOpen V₂ ∧ ∀ (x : α), x ∈ i ⁻¹' V₂ → f x ∈ V'
[PROOFSTEP]
simpa [and_assoc] using ((nhds_basis_opens' b).comap i).tendsto_left_iff.mp (mem_of_mem_nhds V₁_in : b ∈ V₁) V' V'_in
[GOAL]
case intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T3Space γ
b : β
f : α → γ
di : DenseInducing i
hf : ∀ᶠ (x : β) in 𝓝 b, ∃ c, Tendsto f (comap i (𝓝 x)) (𝓝 c)
φ : β → γ := extend di f
this : ∀ (b : β), NeBot (comap i (𝓝 b))
V' : Set γ
V'_in : V' ∈ 𝓝 (φ b)
V'_closed : IsClosed V'
V₁ : Set β := {x | Tendsto f (comap i (𝓝 x)) (𝓝 (φ x))}
V₁_in : V₁ ∈ 𝓝 b
V₂ : Set β
V₂_in : V₂ ∈ 𝓝 b
V₂_op : IsOpen V₂
hV₂ : ∀ (x : α), x ∈ i ⁻¹' V₂ → f x ∈ V'
⊢ φ ⁻¹' V' ∈ 𝓝 b
[PROOFSTEP]
suffices ∀ x ∈ V₁ ∩ V₂, φ x ∈ V' by filter_upwards [inter_mem V₁_in V₂_in] using this
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T3Space γ
b : β
f : α → γ
di : DenseInducing i
hf : ∀ᶠ (x : β) in 𝓝 b, ∃ c, Tendsto f (comap i (𝓝 x)) (𝓝 c)
φ : β → γ := extend di f
this✝ : ∀ (b : β), NeBot (comap i (𝓝 b))
V' : Set γ
V'_in : V' ∈ 𝓝 (φ b)
V'_closed : IsClosed V'
V₁ : Set β := {x | Tendsto f (comap i (𝓝 x)) (𝓝 (φ x))}
V₁_in : V₁ ∈ 𝓝 b
V₂ : Set β
V₂_in : V₂ ∈ 𝓝 b
V₂_op : IsOpen V₂
hV₂ : ∀ (x : α), x ∈ i ⁻¹' V₂ → f x ∈ V'
this : ∀ (x : β), x ∈ V₁ ∩ V₂ → φ x ∈ V'
⊢ φ ⁻¹' V' ∈ 𝓝 b
[PROOFSTEP]
filter_upwards [inter_mem V₁_in V₂_in] using this
[GOAL]
case intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T3Space γ
b : β
f : α → γ
di : DenseInducing i
hf : ∀ᶠ (x : β) in 𝓝 b, ∃ c, Tendsto f (comap i (𝓝 x)) (𝓝 c)
φ : β → γ := extend di f
this : ∀ (b : β), NeBot (comap i (𝓝 b))
V' : Set γ
V'_in : V' ∈ 𝓝 (φ b)
V'_closed : IsClosed V'
V₁ : Set β := {x | Tendsto f (comap i (𝓝 x)) (𝓝 (φ x))}
V₁_in : V₁ ∈ 𝓝 b
V₂ : Set β
V₂_in : V₂ ∈ 𝓝 b
V₂_op : IsOpen V₂
hV₂ : ∀ (x : α), x ∈ i ⁻¹' V₂ → f x ∈ V'
⊢ ∀ (x : β), x ∈ V₁ ∩ V₂ → φ x ∈ V'
[PROOFSTEP]
rintro x ⟨x_in₁, x_in₂⟩
[GOAL]
case intro.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T3Space γ
b : β
f : α → γ
di : DenseInducing i
hf : ∀ᶠ (x : β) in 𝓝 b, ∃ c, Tendsto f (comap i (𝓝 x)) (𝓝 c)
φ : β → γ := extend di f
this : ∀ (b : β), NeBot (comap i (𝓝 b))
V' : Set γ
V'_in : V' ∈ 𝓝 (φ b)
V'_closed : IsClosed V'
V₁ : Set β := {x | Tendsto f (comap i (𝓝 x)) (𝓝 (φ x))}
V₁_in : V₁ ∈ 𝓝 b
V₂ : Set β
V₂_in : V₂ ∈ 𝓝 b
V₂_op : IsOpen V₂
hV₂ : ∀ (x : α), x ∈ i ⁻¹' V₂ → f x ∈ V'
x : β
x_in₁ : x ∈ V₁
x_in₂ : x ∈ V₂
⊢ φ x ∈ V'
[PROOFSTEP]
have hV₂x : V₂ ∈ 𝓝 x := IsOpen.mem_nhds V₂_op x_in₂
[GOAL]
case intro.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T3Space γ
b : β
f : α → γ
di : DenseInducing i
hf : ∀ᶠ (x : β) in 𝓝 b, ∃ c, Tendsto f (comap i (𝓝 x)) (𝓝 c)
φ : β → γ := extend di f
this : ∀ (b : β), NeBot (comap i (𝓝 b))
V' : Set γ
V'_in : V' ∈ 𝓝 (φ b)
V'_closed : IsClosed V'
V₁ : Set β := {x | Tendsto f (comap i (𝓝 x)) (𝓝 (φ x))}
V₁_in : V₁ ∈ 𝓝 b
V₂ : Set β
V₂_in : V₂ ∈ 𝓝 b
V₂_op : IsOpen V₂
hV₂ : ∀ (x : α), x ∈ i ⁻¹' V₂ → f x ∈ V'
x : β
x_in₁ : x ∈ V₁
x_in₂ : x ∈ V₂
hV₂x : V₂ ∈ 𝓝 x
⊢ φ x ∈ V'
[PROOFSTEP]
apply V'_closed.mem_of_tendsto x_in₁
[GOAL]
case intro.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T3Space γ
b : β
f : α → γ
di : DenseInducing i
hf : ∀ᶠ (x : β) in 𝓝 b, ∃ c, Tendsto f (comap i (𝓝 x)) (𝓝 c)
φ : β → γ := extend di f
this : ∀ (b : β), NeBot (comap i (𝓝 b))
V' : Set γ
V'_in : V' ∈ 𝓝 (φ b)
V'_closed : IsClosed V'
V₁ : Set β := {x | Tendsto f (comap i (𝓝 x)) (𝓝 (φ x))}
V₁_in : V₁ ∈ 𝓝 b
V₂ : Set β
V₂_in : V₂ ∈ 𝓝 b
V₂_op : IsOpen V₂
hV₂ : ∀ (x : α), x ∈ i ⁻¹' V₂ → f x ∈ V'
x : β
x_in₁ : x ∈ V₁
x_in₂ : x ∈ V₂
hV₂x : V₂ ∈ 𝓝 x
⊢ ∀ᶠ (x : α) in comap i (𝓝 x), f x ∈ V'
[PROOFSTEP]
use V₂
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalSpace β
i : α → β
di✝ : DenseInducing i
inst✝² : TopologicalSpace δ
f✝ : γ → α
g : γ → δ
h : δ → β
inst✝¹ : TopologicalSpace γ
inst✝ : T3Space γ
b : β
f : α → γ
di : DenseInducing i
hf : ∀ᶠ (x : β) in 𝓝 b, ∃ c, Tendsto f (comap i (𝓝 x)) (𝓝 c)
φ : β → γ := extend di f
this : ∀ (b : β), NeBot (comap i (𝓝 b))
V' : Set γ
V'_in : V' ∈ 𝓝 (φ b)
V'_closed : IsClosed V'
V₁ : Set β := {x | Tendsto f (comap i (𝓝 x)) (𝓝 (φ x))}
V₁_in : V₁ ∈ 𝓝 b
V₂ : Set β
V₂_in : V₂ ∈ 𝓝 b
V₂_op : IsOpen V₂
hV₂ : ∀ (x : α), x ∈ i ⁻¹' V₂ → f x ∈ V'
x : β
x_in₁ : x ∈ V₁
x_in₂ : x ∈ V₂
hV₂x : V₂ ∈ 𝓝 x
⊢ V₂ ∈ 𝓝 x ∧ i ⁻¹' V₂ ⊆ {x | (fun x => f x ∈ V') x}
[PROOFSTEP]
tauto
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
i✝ : α → β
di : DenseInducing i✝
inst✝¹ : TopologicalSpace δ
f : γ → α
g : γ → δ
h : δ → β
inst✝ : TopologicalSpace γ
i : α → β
c : Continuous i
dense : ∀ (x : β), x ∈ closure (range i)
H : ∀ (a : α) (s : Set α), s ∈ 𝓝 a → ∃ t, t ∈ 𝓝 (i a) ∧ ∀ (b : α), i b ∈ t → b ∈ s
a : α
⊢ comap i (𝓝 (i a)) ≤ 𝓝 a
[PROOFSTEP]
simpa [Filter.le_def] using H a
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
e : α → β
de : DenseEmbedding e
p : α → Prop
x✝ : { x // p x }
x : α
hx : p x
⊢ 𝓝 { val := x, property := hx } = comap (subtypeEmb p e) (𝓝 (subtypeEmb p e { val := x, property := hx }))
[PROOFSTEP]
simp [subtypeEmb, nhds_subtype_eq_comap, de.toInducing.nhds_eq_comap, comap_comap, (· ∘ ·)]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
e : α → β
de : DenseEmbedding e
p : α → Prop
⊢ closure (range (subtypeEmb p e)) = univ
[PROOFSTEP]
ext ⟨x, hx⟩
[GOAL]
case h.mk
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
e : α → β
de : DenseEmbedding e
p : α → Prop
x : β
hx : x ∈ closure (e '' {x | p x})
⊢ { val := x, property := hx } ∈ closure (range (subtypeEmb p e)) ↔ { val := x, property := hx } ∈ univ
[PROOFSTEP]
rw [image_eq_range] at hx
[GOAL]
case h.mk
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : TopologicalSpace α
inst✝² : TopologicalSpace β
inst✝¹ : TopologicalSpace γ
inst✝ : TopologicalSpace δ
e : α → β
de : DenseEmbedding e
p : α → Prop
x : β
hx✝ : x ∈ closure (e '' {x | p x})
hx : x ∈ closure (range fun x => e ↑x)
⊢ { val := x, property := hx✝ } ∈ closure (range (subtypeEmb p e)) ↔ { val := x, property := hx✝ } ∈ univ
[PROOFSTEP]
simpa [closure_subtype, ← range_comp, (· ∘ ·)]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : T3Space β
ι : Type u_5
s : ι → Set α
p : ι → Prop
x : α
h : HasBasis (𝓝 x) p s
f : α → β
hf : DenseInducing f
⊢ HasBasis (𝓝 (f x)) p fun i => closure (f '' s i)
[PROOFSTEP]
rw [Filter.hasBasis_iff] at h ⊢
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : T3Space β
ι : Type u_5
s : ι → Set α
p : ι → Prop
x : α
h : ∀ (t : Set α), t ∈ 𝓝 x ↔ ∃ i, p i ∧ s i ⊆ t
f : α → β
hf : DenseInducing f
⊢ ∀ (t : Set β), t ∈ 𝓝 (f x) ↔ ∃ i, p i ∧ closure (f '' s i) ⊆ t
[PROOFSTEP]
intro T
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : T3Space β
ι : Type u_5
s : ι → Set α
p : ι → Prop
x : α
h : ∀ (t : Set α), t ∈ 𝓝 x ↔ ∃ i, p i ∧ s i ⊆ t
f : α → β
hf : DenseInducing f
T : Set β
⊢ T ∈ 𝓝 (f x) ↔ ∃ i, p i ∧ closure (f '' s i) ⊆ T
[PROOFSTEP]
refine' ⟨fun hT => _, fun hT => _⟩
[GOAL]
case refine'_1
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : T3Space β
ι : Type u_5
s : ι → Set α
p : ι → Prop
x : α
h : ∀ (t : Set α), t ∈ 𝓝 x ↔ ∃ i, p i ∧ s i ⊆ t
f : α → β
hf : DenseInducing f
T : Set β
hT : T ∈ 𝓝 (f x)
⊢ ∃ i, p i ∧ closure (f '' s i) ⊆ T
[PROOFSTEP]
obtain ⟨T', hT₁, hT₂, hT₃⟩ := exists_mem_nhds_isClosed_subset hT
[GOAL]
case refine'_1.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : T3Space β
ι : Type u_5
s : ι → Set α
p : ι → Prop
x : α
h : ∀ (t : Set α), t ∈ 𝓝 x ↔ ∃ i, p i ∧ s i ⊆ t
f : α → β
hf : DenseInducing f
T : Set β
hT : T ∈ 𝓝 (f x)
T' : Set β
hT₁ : T' ∈ 𝓝 (f x)
hT₂ : IsClosed T'
hT₃ : T' ⊆ T
⊢ ∃ i, p i ∧ closure (f '' s i) ⊆ T
[PROOFSTEP]
have hT₄ : f ⁻¹' T' ∈ 𝓝 x := by
rw [hf.toInducing.nhds_eq_comap x]
exact ⟨T', hT₁, Subset.rfl⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : T3Space β
ι : Type u_5
s : ι → Set α
p : ι → Prop
x : α
h : ∀ (t : Set α), t ∈ 𝓝 x ↔ ∃ i, p i ∧ s i ⊆ t
f : α → β
hf : DenseInducing f
T : Set β
hT : T ∈ 𝓝 (f x)
T' : Set β
hT₁ : T' ∈ 𝓝 (f x)
hT₂ : IsClosed T'
hT₃ : T' ⊆ T
⊢ f ⁻¹' T' ∈ 𝓝 x
[PROOFSTEP]
rw [hf.toInducing.nhds_eq_comap x]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : T3Space β
ι : Type u_5
s : ι → Set α
p : ι → Prop
x : α
h : ∀ (t : Set α), t ∈ 𝓝 x ↔ ∃ i, p i ∧ s i ⊆ t
f : α → β
hf : DenseInducing f
T : Set β
hT : T ∈ 𝓝 (f x)
T' : Set β
hT₁ : T' ∈ 𝓝 (f x)
hT₂ : IsClosed T'
hT₃ : T' ⊆ T
⊢ f ⁻¹' T' ∈ Filter.comap f (𝓝 (f x))
[PROOFSTEP]
exact ⟨T', hT₁, Subset.rfl⟩
[GOAL]
case refine'_1.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : T3Space β
ι : Type u_5
s : ι → Set α
p : ι → Prop
x : α
h : ∀ (t : Set α), t ∈ 𝓝 x ↔ ∃ i, p i ∧ s i ⊆ t
f : α → β
hf : DenseInducing f
T : Set β
hT : T ∈ 𝓝 (f x)
T' : Set β
hT₁ : T' ∈ 𝓝 (f x)
hT₂ : IsClosed T'
hT₃ : T' ⊆ T
hT₄ : f ⁻¹' T' ∈ 𝓝 x
⊢ ∃ i, p i ∧ closure (f '' s i) ⊆ T
[PROOFSTEP]
obtain ⟨i, hi, hi'⟩ := (h _).mp hT₄
[GOAL]
case refine'_1.intro.intro.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : T3Space β
ι : Type u_5
s : ι → Set α
p : ι → Prop
x : α
h : ∀ (t : Set α), t ∈ 𝓝 x ↔ ∃ i, p i ∧ s i ⊆ t
f : α → β
hf : DenseInducing f
T : Set β
hT : T ∈ 𝓝 (f x)
T' : Set β
hT₁ : T' ∈ 𝓝 (f x)
hT₂ : IsClosed T'
hT₃ : T' ⊆ T
hT₄ : f ⁻¹' T' ∈ 𝓝 x
i : ι
hi : p i
hi' : s i ⊆ f ⁻¹' T'
⊢ ∃ i, p i ∧ closure (f '' s i) ⊆ T
[PROOFSTEP]
exact
⟨i, hi,
(closure_mono (image_subset f hi')).trans (Subset.trans (closure_minimal (image_preimage_subset _ _) hT₂) hT₃)⟩
[GOAL]
case refine'_2
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : T3Space β
ι : Type u_5
s : ι → Set α
p : ι → Prop
x : α
h : ∀ (t : Set α), t ∈ 𝓝 x ↔ ∃ i, p i ∧ s i ⊆ t
f : α → β
hf : DenseInducing f
T : Set β
hT : ∃ i, p i ∧ closure (f '' s i) ⊆ T
⊢ T ∈ 𝓝 (f x)
[PROOFSTEP]
obtain ⟨i, hi, hi'⟩ := hT
[GOAL]
case refine'_2.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : T3Space β
ι : Type u_5
s : ι → Set α
p : ι → Prop
x : α
h : ∀ (t : Set α), t ∈ 𝓝 x ↔ ∃ i, p i ∧ s i ⊆ t
f : α → β
hf : DenseInducing f
T : Set β
i : ι
hi : p i
hi' : closure (f '' s i) ⊆ T
⊢ T ∈ 𝓝 (f x)
[PROOFSTEP]
suffices closure (f '' s i) ∈ 𝓝 (f x) by filter_upwards [this] using hi'
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : T3Space β
ι : Type u_5
s : ι → Set α
p : ι → Prop
x : α
h : ∀ (t : Set α), t ∈ 𝓝 x ↔ ∃ i, p i ∧ s i ⊆ t
f : α → β
hf : DenseInducing f
T : Set β
i : ι
hi : p i
hi' : closure (f '' s i) ⊆ T
this : closure (f '' s i) ∈ 𝓝 (f x)
⊢ T ∈ 𝓝 (f x)
[PROOFSTEP]
filter_upwards [this] using hi'
[GOAL]
case refine'_2.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : T3Space β
ι : Type u_5
s : ι → Set α
p : ι → Prop
x : α
h : ∀ (t : Set α), t ∈ 𝓝 x ↔ ∃ i, p i ∧ s i ⊆ t
f : α → β
hf : DenseInducing f
T : Set β
i : ι
hi : p i
hi' : closure (f '' s i) ⊆ T
⊢ closure (f '' s i) ∈ 𝓝 (f x)
[PROOFSTEP]
replace h := (h (s i)).mpr ⟨i, hi, Subset.rfl⟩
[GOAL]
case refine'_2.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalSpace β
inst✝ : T3Space β
ι : Type u_5
s : ι → Set α
p : ι → Prop
x : α
f : α → β
hf : DenseInducing f
T : Set β
i : ι
hi : p i
hi' : closure (f '' s i) ⊆ T
h : s i ∈ 𝓝 x
⊢ closure (f '' s i) ∈ 𝓝 (f x)
[PROOFSTEP]
exact hf.closure_image_mem_nhds h
|
/-
Copyright (c) 2018 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl
Without loss of generality tactic.
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.data.list.perm
import Mathlib.PostPort
namespace Mathlib
namespace tactic
namespace interactive
/-- Without loss of generality: reduces to one goal under variables permutations.
Given a goal of the form `g xs`, a predicate `p` over a set of variables, as well as variable
permutations `xs_i`. Then `wlog` produces goals of the form
The case goal, i.e. the permutation `xs_i` covers all possible cases:
`⊢ p xs_0 ∨ ⋯ ∨ p xs_n`
The main goal, i.e. the goal reduced to `xs_0`:
`(h : p xs_0) ⊢ g xs_0`
The invariant goals, i.e. `g` is invariant under `xs_i`:
`(h : p xs_i) (this : g xs_0) ⊢ gs xs_i`
Either the permutation is provided, or a proof of the disjunction is provided to compute the
permutation. The disjunction need to be in assoc normal form, e.g. `p₀ ∨ (p₁ ∨ p₂)`. In many cases
the invariant goals can be solved by AC rewriting using `cc` etc.
Example:
On a state `(n m : ℕ) ⊢ p n m` the tactic `wlog h : n ≤ m using [n m, m n]` produces the following
states:
`(n m : ℕ) ⊢ n ≤ m ∨ m ≤ n`
`(n m : ℕ) (h : n ≤ m) ⊢ p n m`
`(n m : ℕ) (h : m ≤ n) (this : p n m) ⊢ p m n`
`wlog` supports different calling conventions. The name `h` is used to give a name to the introduced
case hypothesis. If the name is avoided, the default will be `case`.
(1) `wlog : p xs0 using [xs0, …, xsn]`
Results in the case goal `p xs0 ∨ ⋯ ∨ ps xsn`, the main goal `(case : p xs0) ⊢ g xs0` and the
invariance goals `(case : p xsi) (this : g xs0) ⊢ g xsi`.
(2) `wlog : p xs0 := r using xs0`
The expression `r` is a proof of the shape `p xs0 ∨ ⋯ ∨ p xsi`, it is also used to compute the
variable permutations.
(3) `wlog := r using xs0`
The expression `r` is a proof of the shape `p xs0 ∨ ⋯ ∨ p xsi`, it is also used to compute the
variable permutations. This is not as stable as (2), for example `p` cannot be a disjunction.
(4) `wlog : R x y using x y` and `wlog : R x y`
Produces the case `R x y ∨ R y x`. If `R` is ≤, then the disjunction discharged using linearity.
If `using x y` is avoided then `x` and `y` are the last two variables appearing in the
expression `R x y`. -/
end Mathlib |
#include <algorithm>
#include <sstream>
#include <boost/lexical_cast.hpp>
#include "sink.h"
namespace flexicamore {
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FlexibleSink::FlexibleSink(cyclus::Context* ctx)
: cyclus::Facility(ctx),
current_throughput(1e299),
throughput_vals(std::vector<double>({})),
throughput_times(std::vector<int>({})),
latitude(0.0),
longitude(0.0),
coordinates(latitude, longitude) {
SetMaxInventorySize(std::numeric_limits<double>::max());}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FlexibleSink::~FlexibleSink() {}
#pragma cyclus def schema flexicamore::FlexibleSink
#pragma cyclus def annotations flexicamore::FlexibleSink
#pragma cyclus def infiletodb flexicamore::FlexibleSink
#pragma cyclus def snapshot flexicamore::FlexibleSink
#pragma cyclus def snapshotinv flexicamore::FlexibleSink
#pragma cyclus def initinv flexicamore::FlexibleSink
#pragma cyclus def clone flexicamore::FlexibleSink
#pragma cyclus def initfromdb flexicamore::FlexibleSink
#pragma cyclus def initfromcopy flexicamore::FlexibleSink
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
void FlexibleSink::EnterNotify() {
cyclus::Facility::EnterNotify();
if (throughput_times[0]==-1) {
flexible_throughput = FlexibleInput<double>(this, throughput_vals);
} else {
flexible_throughput = FlexibleInput<double>(this, throughput_vals,
throughput_times);
}
current_throughput = throughput_vals[0];
if (in_commod_prefs.size() == 0) {
for (int i = 0; i < in_commods.size(); ++i) {
in_commod_prefs.push_back(cyclus::kDefaultPref);
}
} else if (in_commod_prefs.size() != in_commods.size()) {
std::stringstream ss;
ss << "in_commod_prefs has " << in_commod_prefs.size()
<< " values, expected " << in_commods.size();
throw cyclus::ValueError(ss.str());
}
RecordPosition();
}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
std::string FlexibleSink::str() {
using std::string;
using std::vector;
std::stringstream ss;
ss << cyclus::Facility::str();
string msg = "";
msg += "accepts commodities ";
for (vector<string>::iterator commod = in_commods.begin();
commod != in_commods.end();
commod++) {
msg += (commod == in_commods.begin() ? "{" : ", ");
msg += (*commod);
}
msg += "} until its inventory is full at ";
ss << msg << inventory.capacity() << " kg.";
return "" + ss.str();
}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
std::set<cyclus::RequestPortfolio<cyclus::Material>::Ptr>
FlexibleSink::GetMatlRequests() {
using cyclus::Material;
using cyclus::RequestPortfolio;
using cyclus::Request;
using cyclus::Composition;
std::set<RequestPortfolio<Material>::Ptr> ports;
RequestPortfolio<Material>::Ptr port(new RequestPortfolio<Material>());
double amt = RequestAmt();
Material::Ptr mat;
if (recipe_name.empty()) {
mat = cyclus::NewBlankMaterial(amt);
} else {
Composition::Ptr rec = this->context()->GetRecipe(recipe_name);
mat = cyclus::Material::CreateUntracked(amt, rec);
}
if (amt > cyclus::eps()) {
std::vector<Request<Material>*> mutuals;
for (int i = 0; i < in_commods.size(); i++) {
mutuals.push_back(port->AddRequest(mat, this, in_commods[i],
in_commod_prefs[i]));
}
port->AddMutualReqs(mutuals);
ports.insert(port);
}
return ports;
}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
std::set<cyclus::RequestPortfolio<cyclus::Product>::Ptr>
FlexibleSink::GetGenRsrcRequests() {
using cyclus::CapacityConstraint;
using cyclus::Product;
using cyclus::RequestPortfolio;
using cyclus::Request;
std::set<RequestPortfolio<Product>::Ptr> ports;
RequestPortfolio<Product>::Ptr
port(new RequestPortfolio<Product>());
double amt = RequestAmt();
if (amt > cyclus::eps()) {
CapacityConstraint<Product> cc(amt);
port->AddConstraint(cc);
std::vector<std::string>::const_iterator it;
for (it = in_commods.begin(); it != in_commods.end(); ++it) {
std::string quality = ""; // not clear what this should be..
Product::Ptr rsrc = Product::CreateUntracked(amt, quality);
port->AddRequest(rsrc, this, *it);
}
ports.insert(port);
}
return ports;
}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
void FlexibleSink::AcceptMatlTrades(
const std::vector< std::pair<cyclus::Trade<cyclus::Material>,
cyclus::Material::Ptr> >& responses) {
std::vector< std::pair<cyclus::Trade<cyclus::Material>,
cyclus::Material::Ptr> >::const_iterator it;
for (it = responses.begin(); it != responses.end(); ++it) {
try {
inventory.Push(it->second);
} catch (cyclus::Error& e) {
e.msg(Agent::InformErrorMsg(e.msg()));
throw e;
}
}
}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
void FlexibleSink::AcceptGenRsrcTrades(
const std::vector< std::pair<cyclus::Trade<cyclus::Product>,
cyclus::Product::Ptr> >& responses) {
std::vector< std::pair<cyclus::Trade<cyclus::Product>,
cyclus::Product::Ptr> >::const_iterator it;
for (it = responses.begin(); it != responses.end(); ++it) {
try {
inventory.Push(it->second);
} catch (cyclus::Error& e) {
e.msg(Agent::InformErrorMsg(e.msg()));
throw e;
}
}
}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
void FlexibleSink::Tick() {
using std::string;
using std::vector;
// For an unknown reason, 'UpdateValue' has to be called with a copy of
// the 'this' pointer. When directly using 'this', the address passed to
// the function is increased by 8 bits resulting later on in a
// segmentation fault.
// TODO The problem described above has not been checked in FlexibleSink
// but it was the case in MIsoEnrichment. Maybe check if this has changed
// (for whatever reason) here in FlexibleSink?
cyclus::Agent* copy_ptr;
cyclus::Agent* source_ptr = this;
std::memcpy((void*) ©_ptr, (void*) &source_ptr, sizeof(cyclus::Agent*));
current_throughput = flexible_throughput.UpdateValue(copy_ptr);
// Crucial that current_throughput gets updated before!
double requestAmt = RequestAmt();
if (requestAmt > cyclus::eps()) {
for (vector<string>::iterator commod = in_commods.begin();
commod != in_commods.end();
commod++) {
LOG(cyclus::LEV_INFO4, "FlxSnk") << prototype() << " will request "
<< requestAmt << " kg of "
<< *commod << ".";
cyclus::toolkit::RecordTimeSeries<double>("demand"+*commod, this,
requestAmt);
}
}
}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
void FlexibleSink::Tock() {
double total_material = inventory.quantity();
LOG(cyclus::LEV_INFO4, "FlxSnk") << "FlexibleSink " << this->id()
<< " is holding " << total_material
<< " units of material at the close of step "
<< context()->time() << ".";
cyclus::toolkit::RecordTimeSeries<double>("SinkTotalMats", this,
total_material);
}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
void FlexibleSink::RecordPosition() {
std::string specification = this->spec();
context()->NewDatum("AgentPosition")
->AddVal("Spec", specification)
->AddVal("Prototype", this->prototype())
->AddVal("AgentId", id())
->AddVal("Latitude", latitude)
->AddVal("Longitude", longitude)
->Record();
}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
extern "C" cyclus::Agent* ConstructFlexibleSink(cyclus::Context* ctx) {
return new FlexibleSink(ctx);
}
} // namespace flexicamore
|
NQcontent Government’s extensive out-of-the-box functionality is encompassed in a new approach for rapidly creating and deploying eGovernment websites, which has evolved through our partnership with public sector customers. At Forest of Dean District Council, for example, NQcontent enabled a new site to be developed and deployed in weeks rather than months, through simplifying the development and implementation processes.
out-of-the-box options for applying design elements, such as different types of layouts, page widths, headers, content areas layouts etc.
the inclusion of all the meta-data standard interfaces - IPSV, LGNL etc.
all sites come pre-populated with Local Government Services standard content structured in accordance with the Local Government Navigation list.
pre-configured approval workflows typically used in Local Government.
This unique approach ensures both time and cost savings. The site design procedure (typically taking between 3-5 weeks for each site) can be cut down to between 3 and 7 days (depending on requirements). This is due to the fact that NQcontent now includes built-in interfaces for changing layout options and other design elements, therefore simplifying the development of a site's look and feel.
NQcontent Government’s out-of-the-box features include LGSL content and pre-configured workflows, GCL categorisation (Local Government Category Listing) and eGMS v2 Meta data tagging. LGSL and GCL is the recommended way to categorise information so that it is easily accessible, searchable and tagged for easy interoperability with other government systems.
Cotswold Council goes from Brochure to Transactional in One Go! |
\input{preamble}
\begin{document}
\section{Subjects}
\begin{itemize}
\item What is RNA $2^{nd}$ structure?
\item Computing a pseudo-knot free RNA $2^{nd}$ structure.
\end{itemize}
\section{Notes}
\subsection{RNA and second structure}
Messenger RNA is often described as a linear, unstructured sequence, only
interesting for the protein amino acid sequence that it encodes.
However, many non-coding RNA's exist which adopt sophisticated
three-dimensional structures, and even catalyse biochemical reactions. RNA
is typically produced as a single stranded molecule, which then folds to
form a number of short base-paired stem, this is what we call the seconday
structure of the RNA.
RNA is a polymer of four different nucleotide subunits, we abbreviate them
$A,C,G$ and $U$. In DNA, thymine $T$ replaces uracil $U$. $G-C$ and $A-U$
form hydrogen bonded base pairs. $G-C$ form three hydrogen bonds and tend
to be more stable than $A-U$ pairs which form only two. Some non-canonical
pairs also forms, like the $G-U$ pair, and others which distort regular
A-form RNA helices.
Base pairs are approximately coplanar and are almost always
\textit{stacked} onto other base pairs. Such contiguous stacked base pairs
are called \textit{stems}. In three dimensional space, the stems generally
form a regula (A-form) double helix. We typically represent the RNA
$2^{nd}$ structure in two-dimensional pictures.
\\
\\
Single stranded subsequences bounded by base pairs are called loops. A loop
at the end of a stem is called a \textit{hairpin loop}. Simple
substructures which just consist of a stem and a loop is called
\textit{stem loops} or \textit{hairpins}. Single stranded bases occuring
within a stem are culled a \textit{bulge} or \textit{bulge loop} if the
single stranded bases on only on side of the stem. it's called an
\textit{interior loop} if there is a bulge on both sides. If a loop
connects three or more stems, then it is called a \textit{multibranched
loop}.
Base pairs almost always occur in a nested fashion in RNA secondary
structure. Base pairs are nested if we can draw arcs over them, and none of
the arcs intersect. Formally, if $i,j$ is a base pair and $i',j'$ is a base
pair then $i<i'<j'<j$. If it happens that these arcs would cross, then they
are called \textit{pseudo-knots}.
\begin{minipage}{\textwidth}
Just to spell it out, this is nested:
\begin{tikzpicture}
\node[smallcirclebox] (x1) {};
\node[smallcirclebox, right =of x1] (x2) {};
\node[smallcirclebox, right =of x2] (x3) {};
\node[smallcirclebox, right =of x3] (x4) {};
\path[edgepath]
(x1) edge [bend left,-] node {} (x4)
(x2) edge [bend left,-] node {} (x3);
\end{tikzpicture}
This is juxtaposed:
\begin{tikzpicture}
\node[smallcirclebox] (x1) {};
\node[smallcirclebox, right =of x1] (x2) {};
\node[smallcirclebox, right =of x2] (x3) {};
\node[smallcirclebox, right =of x3] (x4) {};
\path[edgepath]
(x1) edge [bend left,-] node {} (x2)
(x3) edge [bend left,-] node {} (x4);
\end{tikzpicture}
This is overlapping (pseudo-knot):
\begin{tikzpicture}
\node[smallcirclebox] (x1) {};
\node[smallcirclebox, right =of x1] (x2) {};
\node[smallcirclebox, right =of x2] (x3) {};
\node[smallcirclebox, right =of x3] (x4) {};
\path[edgepath]
(x1) edge [bend left,-] node {} (x3)
(x2) edge [bend left,-] node {} (x4);
\end{tikzpicture}
\end{minipage}
\\
\\
If there are no pseudo-knots, then we can represent it as a planar graph,
and in general it is easier to find the compute the secondary structure
with the least ``free energy'' without the pseudo-knots. Fortunately, there
are very few pseudo-knots compared to the number of base pairs in nested
secondary structure, so it is usually acceptable to sacrifice the
information in pseudo-knots in return of efficient algorithms.
\subsection{Predicting $2^{nd}$ RNA structure}
Usually, when we want to predict the secondary stucture, we will try to
minimize the amount of ``free energy''. The first example we will look at,
bases the prediction on the primary structure (the simple sequence) only.
For this we have Nussinov and Zuker's Mfold algorithm. Other methods use
comparative structure prediction which is based on a prior alignment. As
well as probabilistic methods.
\subsubsection{Nussinov}
When we need to predict the secondary structure, there are many plausible
secondary structures. An RNA of length $200$ has over $10^{50}$ possible
base-paired structures. Therefore, we need both a function that assigns the
correct structure the highest score, and an algorithm for evaluating the
scores.
Nussinov attempts to find the structure with the most base pairs, it is a
dynamic programming approach, which calculate the best structure for small
subsequences and work outwards. Let's first introduce som notations:
\begin{itemize}
\item $seq$ the RNA sequence of $\{A,C,G,U\}$
\item $seq[i,j]$ the RNA sequence from position $i$ to $j$
\item $str$ the best $2^{nd}$ structure for $seq$ of $\{(,),.\}$
\item $str[i,j]$ the best $2^{nd}$ structure for $seq[i,j]$
\item $score[i,j]$ the number of base pairs in $str[i,j]$
\end{itemize}
In the Nussinov algorithm we look at four cases:
$i$ being unpaired and $str[i+1,j]$, that is we just prepend $i$ to the
rest of the structure:
\begin{tikzpicture}
\node[circlebox] (x1) {$i$};
\node[circlebox, right =of x1] (x2) {$i+1$};
\node[right =of x2] (x3) {};
\node[right =of x3] (x4) {};
\node[circlebox, right =of x4] (x5) {$j$};
\path[edgepath,red,dashed]
(x2) edge [bend left,-] node {} (x5);
\path[edgepath,-]
(x1) edge node {} (x2)
(x2) edge node {} (x5);
\end{tikzpicture}
$j$ being unpaired and $str[i,j-1]$, that is we just append $j$ to the rest
of the structure:
\begin{tikzpicture}
\node[circlebox] (x1) {$i$};
\node[right =of x1] (x2) {};
\node[right =of x2] (x3) {};
\node[circlebox, right =of x3] (x4) {$j-1$};
\node[circlebox, right =of x4] (x5) {$j$};
\path[edgepath,red,dashed]
(x1) edge [bend left,-] node {} (x4);
\path[edgepath,-]
(x1) edge node {} (x4)
(x4) edge node {} (x5);
\end{tikzpicture}
$seq[i] \cdot seq[j]$ and $str[i+1, j-1]$, that is we add the base-pair
$i,j$ to the rest of the structure:
\begin{tikzpicture}
\node[circlebox] (x1) {$i$};
\node[circlebox, right =of x1] (x2) {$i+1$};
\node[right =of x2] (x3) {};
\node[circlebox, right =of x3] (x4) {$j-1$};
\node[circlebox, right =of x4] (x5) {$j$};
\path[edgepath,red,dashed]
(x2) edge [bend left,-] node {} (x4);
\path[edgepath,blue]
(x1) edge [bend left,-] node {} (x5);
\path[edgepath,-]
(x1) edge node {} (x2)
(x2) edge node {} (x4)
(x4) edge node {} (x5);
\end{tikzpicture}
$str[i,k]$ and $str[k+1,j]$ for some $i<k<j$, that is we just concatenate
the two structures:
\begin{tikzpicture}
\node[circlebox] (x1) {$i$};
\node[circlebox, right =of x1] (x2) {$k$};
\node[right =of x2] (x3) {};
\node[circlebox, right =of x3] (x4) {$k+1$};
\node[circlebox, right =of x4] (x5) {$j$};
\path[edgepath,red,dashed]
(x1) edge [bend left,-] node {} (x2)
(x4) edge [bend left,-] node {} (x5);
\path[edgepath,-]
(x1) edge node {} (x2)
(x2) edge node {} (x4)
(x4) edge node {} (x5);
\end{tikzpicture}
We then find the one of these cases which returns the highest score, which
can be described formally as:
\begin{equation*}
score[i,j]=\begin{cases}
0\text{ if }j-1<2\\
\max\begin{cases}
score[i+1,j]\\
score[i, j-1]\\
score[i+1, j-1]+1\text{ if }seq[i]\cdot seq[j]\\
max_{i<k<j-1}(score[i,k]+score[k+1,j])
\end{cases}
\end{cases}
\end{equation*}
We have to save all the results in table of space \bigO{n^2} and it will
take \bigO{n^3} to compute. We then simply start at the top-right corner of
the table produced by the algorithm (the index corresponding to the first
and last index) and traceback through the table. The path we trace back
through the table is the optimal structure.
This can also be described as a stochastic CFG:
\begin{align*}
S &\rightarrow aS|cS|gS|uS &\text{($i$ unpaired)}\\
S &\rightarrow Sa|Sc|Sg|Su &\text{($j$ unparied)}\\
S &\rightarrow aSu|cSg|gSc|uSa &\text{(i,j pair)}\\
S &\rightarrow SS &\text{bifurcation}
\end{align*}
\subsubsection{RNA evolution}
RNA's can have a common $2^{nd}$ structure without sharing a siginificant
sequence similarity. For example, look at the image below a mutation has
happened, and in order to maintain the base-pairing complementarity a
comensatory mutation has happened at the other end of the base-pair.
\begin{figure}[H]
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=.6\linewidth]{figures/rna-mut-1.png}
\end{subfigure}%
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=.6\linewidth]{figures/rna-mut-2.png}
\end{subfigure}
\end{figure}
In a structurally correct multiple alignment of RNAs, conserved base pairs
are often revealed by the presence of frequent correlated compensatory
mutations. Intuition: in order to conserve the base pairs, compensatory
mutations happen. Therefore if compensatory mutations happen, there must be
some base pairs we want to conserve.
Therefore we will measure the pairwise sequence co-variation between two
aligned columns $i$ and $j$ by:
\begin{equation*}
M_{ij}=\sum_{x_i,x_j}f_{x_i,x_j} \log_2\frac{f_{x_i,x_j}}{f_{x_i} \cdot
f_{x_j}}
\end{equation*}
Where:
\begin{itemize}
\item $f_{x_i}$ is the frequency of one of the five possible characters
observed in column $i$
\item $_{x_i,x_j}$ is the joint frequency of the pairs observed in
columns $i$ and $j$
\end{itemize}
For example, say we have the three alignments:
\begin{align*}
seq1 = &GUCUGGAC\\
seq2 = &GACUGGUC\\
seq3 = &GGCUGGCC
\end{align*}
Recall that the frequency of an event $i$ is the number $n_i$ of times it
occured, and the relative frequency is the number $n_i$ divided by the
total number of events $N_i$.
Then the columns $2$, and $7$ which represents compensatory mutations can
be computed as:
\begin{equation*}
M_{2,7}= \sum_{x_i,x_j \in \{seq1, seq2, seq3\}} \frac{1}{3} \log_2
\frac{1/3}{1/3 \cdot 1/3} = \log_2 3\approx 1.59
\end{equation*}
Therefore, we get the following properties of the mutual information
$M_{ij}$:
\begin{itemize}
\item $M_{ij}$ is maximum if $i$ and $j$ appear completely random, but
are perfectly correlated
\item If $i$ and $j$ are uncorrelated, then the mutual information is
$0$
\item If either $i$ or $j$ are highly conserved positions, then we get
little or no mutual information
\end{itemize}
Think of mutual information, as what we know about $j$ if we know $i$.
Using this comparative analysis, this is how we would find the secondary
structure:
\begin{itemize}
\item Start with a multiple alignment
\item Predict $2^{nd}$ structure base on alignment
\item Refine alignment based on $2^{nd}$ structure
\item Repeat
\end{itemize}
In order to compare the sequences they must be:
\begin{itemize}
\item Sufficiently similar that they can be initially aligned by
primary sequence
\item Sufficiently dissimilar that a number of co-varying substitutions
can be detected
\end{itemize}
How to build $2^{nd}$ structure based on alignment, we can do a greedy
method:
\begin{itemize}
\item Choose the pair of columns that have the highest $M_{ij}$
\item Make a base pair
\item Carry on with the second highest $M_{ij}$
\end{itemize}
The problem with this solution is that columns might end up in more than
one base pair.
Another solution is to modify the Nussinov algorithm to take alignments
into account. We introduce some new notation for the Nussinov algorithm:
\begin{itemize}
\item $aln$ the RNA alignment
\item $aln_k$ the $k^{th}$ sequence in the alignment
\item $aln[i,j]$ the RNA alignment from position $i$ to $j$
\item $str$ the best $2^{nd}$ structure for $aln$
\item $str[i,j]$ the best $2^{nd}$ structure for $aln[i,j]$
\item $score[i,j]$ the number of base pairs in $str[i,j]$
\item $aln[i] \cdot aln[j]$ if for all $k$, $aln_k[i]\cdot aln_k[j]$
\end{itemize}
We then increment the score by $1+M_{ij}$ instead of just by $1$ in order
to favour base pairs between columns with high mutual information.
\subsubsection{Zuker folding algorithm (MFold)}
Zuker's algorithm assumes that the correct structure is the one with the
lowest \textit{equilibrium free energy} $(\Delta G)$.
The $\Delta G$ of an RNA secondary structure is approximated as the sum of
individual contributions from loops, base pairs on other elements. It can
then be solved in pretty much the same way as Nussinov, i.e. using a
dynamic programming algorithm.
\subsubsection{The grammatical approach}
As mentioned earlier, we can describe these dynamic programming algorithms
as stochastic CFG's (SCFG). SCFG's work like CFG's we simply assign
probabilities to each rule. For example we could write:
\begin{equation*}
S \rightarrow \overset{0.25}{a}|\overset{0.75}{b}
\end{equation*}
For the grammar that writes an $a$ with $25\%$ probability and $b$ with
$75\%$ probability.
If we have such a grammar, for example the one for Nussinov:
\begin{align*}
S &\rightarrow aS|cS|gS|uS &\text{($i$ unpaired)}\\
S &\rightarrow Sa|Sc|Sg|Su &\text{($j$ unparied)}\\
S &\rightarrow aSu|cSg|gSc|uSa &\text{(i,j pair)}\\
S &\rightarrow SS &\text{bifurcation}
\end{align*}
Then we can convert it to Chomsky Normal Form and use the CYK algorithm to
find the most probable structure for a RNA sequence, but it's usually
better to use a specialized algorithm for your grammar in order to improve
efficiency.
\subsubsection{Pseudo-knots are NP-Hard}
There are methods to handle pseudo-knots but the problem itself is NP-Hard.
\end{document} |
(* Title: HOL/Induct/Infinitely_Branching_Tree.thy
Author: Stefan Berghofer, TU Muenchen
Author: Lawrence C Paulson, Cambridge University Computer Laboratory
*)
section \<open>Infinitely branching trees\<close>
theory Infinitely_Branching_Tree
imports Main
begin
datatype 'a tree =
Atom 'a
| Branch "nat \<Rightarrow> 'a tree"
primrec map_tree :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a tree \<Rightarrow> 'b tree"
where
"map_tree f (Atom a) = Atom (f a)"
| "map_tree f (Branch ts) = Branch (\<lambda>x. map_tree f (ts x))"
lemma tree_map_compose: "map_tree g (map_tree f t) = map_tree (g \<circ> f) t"
by (induct t) simp_all
primrec exists_tree :: "('a \<Rightarrow> bool) \<Rightarrow> 'a tree \<Rightarrow> bool"
where
"exists_tree P (Atom a) = P a"
| "exists_tree P (Branch ts) = (\<exists>x. exists_tree P (ts x))"
lemma exists_map:
"(\<And>x. P x \<Longrightarrow> Q (f x)) \<Longrightarrow>
exists_tree P ts \<Longrightarrow> exists_tree Q (map_tree f ts)"
by (induct ts) auto
subsection\<open>The Brouwer ordinals, as in ZF/Induct/Brouwer.thy.\<close>
datatype brouwer = Zero | Succ brouwer | Lim "nat \<Rightarrow> brouwer"
text \<open>Addition of ordinals\<close>
primrec add :: "brouwer \<Rightarrow> brouwer \<Rightarrow> brouwer"
where
"add i Zero = i"
| "add i (Succ j) = Succ (add i j)"
| "add i (Lim f) = Lim (\<lambda>n. add i (f n))"
text \<open>Multiplication of ordinals\<close>
primrec mult :: "brouwer \<Rightarrow> brouwer \<Rightarrow> brouwer"
where
"mult i Zero = Zero"
| "mult i (Succ j) = add (mult i j) i"
| "mult i (Lim f) = Lim (\<lambda>n. mult i (f n))"
lemma add_mult_distrib: "mult i (add j k) = add (mult i j) (mult i k)"
by (induct k) (auto simp add: add_assoc)
lemma mult_assoc: "mult (mult i j) k = mult i (mult j k)"
by (induct k) (auto simp add: add_mult_distrib)
text \<open>We could probably instantiate some axiomatic type classes and use
the standard infix operators.\<close>
subsection \<open>A WF Ordering for The Brouwer ordinals (Michael Compton)\<close>
text \<open>To use the function package we need an ordering on the Brouwer
ordinals. Start with a predecessor relation and form its transitive
closure.\<close>
definition brouwer_pred :: "(brouwer \<times> brouwer) set"
where "brouwer_pred = (\<Union>i. {(m, n). n = Succ m \<or> (\<exists>f. n = Lim f \<and> m = f i)})"
definition brouwer_order :: "(brouwer \<times> brouwer) set"
where "brouwer_order = brouwer_pred\<^sup>+"
lemma wf_brouwer_pred: "wf brouwer_pred"
unfolding wf_def brouwer_pred_def
apply clarify
apply (induct_tac x)
apply blast+
done
lemma wf_brouwer_order[simp]: "wf brouwer_order"
unfolding brouwer_order_def
by (rule wf_trancl[OF wf_brouwer_pred])
lemma [simp]: "(f n, Lim f) \<in> brouwer_order"
by (auto simp add: brouwer_order_def brouwer_pred_def)
text \<open>Example of a general function\<close>
function add2 :: "brouwer \<Rightarrow> brouwer \<Rightarrow> brouwer"
where
"add2 i Zero = i"
| "add2 i (Succ j) = Succ (add2 i j)"
| "add2 i (Lim f) = Lim (\<lambda>n. add2 i (f n))"
by pat_completeness auto
termination
by (relation "inv_image brouwer_order snd") auto
lemma add2_assoc: "add2 (add2 i j) k = add2 i (add2 j k)"
by (induct k) auto
end
|
import os
import logging
import traceback
import warnings
import pandas as pd
import numpy as np
from pandas.core.common import SettingWithCopyWarning
from common.base_parser import BaseParser
from common.constants import *
from common.database import get_database
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s', handlers=[logging.StreamHandler()])
NODE_ASSOCIATION = 'Association'
ZENODO_CHEMICAL2DISEASE_FILE = 'Chemical2Disease_assoc_theme.tsv'
ZENODO_CHEMICAL2GENE_FILE = 'Chemical2Gene_assoc_theme.tsv'
ZENODO_GENE2DISEASE_FILE = 'Gene2Disease_assoc_theme.tsv'
ZENODO_GENE2GENE_FILE = 'Gene2Gene_assoc_theme.tsv'
headers = ['pmid', 'sentence_num', 'entry_formatted', 'entry1Loc', 'entry2_formatted', 'entry2Loc',
'entry1_name', 'entry2_name', 'entry1_id', 'entry2_id', 'entry1_type', 'entry2_type', 'path', 'sentence']
dependency_headers = ['snippet_id', 'entry1_id', 'entry2_id', 'entry1_name', 'entry2_name', 'path']
columns = ['pmid', 'sentence_num', 'entry1_name', 'entry2_name', 'entry1_id', 'entry2_id', 'path', 'sentence']
theme_map = {
'A+': 'agonism, activation',
'A-': 'antagonism, blocking',
'B': 'binding, ligand (esp. receptors)',
'E+': 'increases expression/production',
'E-': 'decreases expression/production',
'E': 'affects expression/production (neutral)',
'N': 'inhibits',
'O': 'transport, channels',
'K': 'metabolism, pharmacokinetics',
'Z': 'enzyme activity',
'T': 'treatment/therapy (including investigatory)',
'C': 'inhibits cell growth (esp. cancers)',
'Sa': 'side effect/adverse event',
'Pr': 'prevents, suppresses',
'Pa': 'alleviates, reduces',
'J': 'role in disease pathogenesis',
'Mp': 'biomarkers (of disease progression)',
'U': 'causal mutations',
'Ud': 'mutations affecting disease course',
'D': 'drug targets',
'Te': 'possible therapeutic effect',
'Y': 'polymorphisms alter risk',
'G': 'promotes progression',
'Md': 'biomarkers (diagnostic)',
'X': 'overexpression in disease',
'L': 'improper regulation linked to disease',
'W': 'enhances response',
'V+': 'activates, stimulates',
'I': 'signaling pathway',
'H': 'same protein or complex',
'Rg': 'regulation',
'Q': 'production by cell population',
}
class LiteratureDataParser(BaseParser):
def __init__(self, prefix: str):
BaseParser.__init__(self, prefix, 'literature')
self.parsed_dir = os.path.join(self.output_dir, 'parsed')
os.makedirs(self.output_dir, 0o777, True)
os.makedirs(self.parsed_dir, 0o777, True)
self.literature_chemicals = set()
self.literature_genes = set()
self.literature_diseases = set()
def get_datafile_name(self, entry1_type, entry2_type, with_theme=False):
if with_theme:
return os.path.join(
self.download_dir, f'part-ii-dependency-paths-{entry1_type.lower()}-{entry2_type.lower()}-sorted-with-themes.txt.gz')
return os.path.join(
self.download_dir, f'part-ii-dependency-paths-{entry1_type.lower()}-{entry2_type.lower()}-sorted.txt.gz')
def get_path2theme_datafile_name(self, entry1_type, entry2_type):
return os.path.join(
self.download_dir, f'part-i-{entry1_type.lower()}-{entry2_type.lower()}-path-theme-distributions.txt.gz')
def parse_dependency_file(self, entry1_type, entry2_type, snippet_file, with_theme=True):
"""
clean file, and write into a few cleaned file format into outfile folder 'parsed'. Update entities set,
and write data to the following files in the parsed folder
- snippet.tsv
- entity12entity2_assoc.tsv: with snippet_id, entry1, entry2, path columns
The files need to be further validated and cleaned by removing duplicates, un-matched genes, chemicals and diseaese
:param entry1_type:
:param entry2_type:
:return:
"""
file = self.get_datafile_name(entry1_type, entry2_type, with_theme)
if with_theme:
outfile = open(os.path.join(self.parsed_dir, f'{entry1_type}2{entry2_type}_assoc_theme.tsv'), 'w')
else:
outfile = open(os.path.join(self.parsed_dir, f'{entry1_type}2{entry2_type}_assoc.tsv'), 'w')
f = lambda x: str(x)
converters = {'pmid': f, 'sentence_num': f, 'entry1_id': f, 'entry2_id': f}
logging.info('processing ' + file)
count = 0
filerow_count = 0
data_chunk = pd.read_csv(
file, sep='\t',
names=headers,
usecols=columns,
converters=converters,
chunksize=10000,
index_col=False
)
for i, trunk in enumerate(data_chunk):
filerow_count += len(trunk)
if i % 10 == 0:
print(i)
try:
df = trunk.replace({'null': np.nan, '-': np.nan})
df.dropna(inplace=True)
df.drop_duplicates(inplace=True)
if entry1_type == entry2_type:
df = df[(df['entry1_name'] != df['entry2_name']) & (df['entry1_id'] != df['entry2_id'])]
if len(df) == 0:
continue
# clean gene ids
if entry1_type == NODE_GENE:
df = self.clean_gene_column(df, 'entry1_id')
df['entry1_id'] = df['entry1_id'].apply(f)
if entry2_type == NODE_GENE:
df = self.clean_gene_column(df, 'entry2_id')
df['entry2_id'] = df['entry2_id'].apply(f)
if entry1_type == NODE_GENE:
df = df[df['entry1_id'] != df['entry2_id']]
if entry2_type == NODE_DISEASE:
df = df[~df['entry2_id'].str.startswith('OMIM')]
df['entry2_id'] = df['entry2_id'].apply(
lambda x: x if str(x).startswith('MESH') else 'MESH:' + str(x))
if entry1_type == NODE_CHEMICAL:
df['entry1_id'] = df['entry1_id'].apply(
lambda x: x if str(x).startswith('CHEBI') or str(x).startswith('MESH') else 'MESH:' + str(x)
)
df['snippet_id'] = df.apply(lambda row: str(row['pmid']) + '-' + str(row['sentence_num']), axis=1)
df_assoc = df[dependency_headers]
df_assoc.to_csv(outfile, index=False, mode='a', sep='\t')
if snippet_file:
df_snippet = df[['snippet_id', 'pmid', 'sentence']].copy()
df_snippet.drop_duplicates(inplace=True)
df_snippet.to_csv(snippet_file, index=False, mode='a', sep='\t')
# update literature genes, diseases
if entry1_type == NODE_GENE:
self.literature_genes.update(df['entry1_id'].tolist())
if entry1_type == NODE_CHEMICAL:
self.literature_chemicals.update(df['entry1_id'].tolist())
if entry2_type == NODE_GENE:
self.literature_genes.update(df['entry2_id'].tolist())
if entry2_type == NODE_DISEASE:
self.literature_diseases.update(df['entry2_id'].tolist())
count = count + len(df)
except Exception as ex:
traceback.print_exc()
print(f'Errored out at index {i}')
break
logging.info('file rows processed: ' + str(filerow_count) + ', cleaned file row:' + str(count))
outfile.close()
def parse_dependency_files(self):
"""
Process all dependency file (with theme), write into parsed folder.
part-ii-dependency-paths-chemical-disease-sorted.txt.gz
file rows processed: 15645444, cleaned file row:12881577
part-ii-dependency-paths-chemical-gene-sorted.txt.gz
file rows processed: 9525647, cleaned file row:7958425
part-ii-dependency-paths-gene-disease-sorted.txt.gz
file rows processed: 12792758, cleaned file row:12808885
part-ii-dependency-paths-gene-gene-sorted.txt.gz
file rows processed: 34089578, cleaned file row:25333884
literature genes:150380
literature diseases:8586
literature chemicals:66178
:return:
"""
snippet_file = open(os.path.join(self.parsed_dir, self.file_prefix + 'snippet.tsv'), 'w')
self.parse_dependency_file(NODE_CHEMICAL, NODE_DISEASE, snippet_file, True)
self.parse_dependency_file(NODE_CHEMICAL, NODE_GENE, snippet_file, True)
self.parse_dependency_file(NODE_GENE, NODE_DISEASE, snippet_file, True)
self.parse_dependency_file(NODE_GENE, NODE_GENE, snippet_file, True)
snippet_file.close()
self.parse_dependency_file(NODE_CHEMICAL, NODE_DISEASE, None, True)
self.parse_dependency_file(NODE_CHEMICAL, NODE_GENE, None, True)
self.parse_dependency_file(NODE_GENE, NODE_DISEASE, None, True)
self.parse_dependency_file(NODE_GENE, NODE_GENE, None, True)
logging.info('literature genes:' + str(len(self.literature_genes)))
logging.info('literature diseases:' + str(len(self.literature_diseases)))
logging.info('literature chemicals:' + str(len(self.literature_chemicals)))
db = get_database()
print('Cleaning chemical...')
self.literature_chemicals = set(val for entry in self.literature_chemicals for val in entry.split('|'))
chemical_ids_to_exclude = db.get_data(
'MATCH (n:Chemical) WITH collect(n.eid) AS entity_ids RETURN [entry in $zenodo_ids WHERE NOT split(entry, ":")[1] IN entity_ids] AS exclude',
{'zenodo_ids': list(self.literature_chemicals)})['exclude'].tolist()[0]
print('Cleaning disease...')
disease_ids_to_exclude = db.get_data(
'MATCH (n:Disease) WITH collect(n.eid) AS entity_ids RETURN [entry in $zenodo_ids WHERE NOT split(entry, ":")[1] IN entity_ids] AS exclude',
{'zenodo_ids': list(self.literature_diseases)})['exclude'].tolist()[0]
print('Cleaning gene...')
gene_ids_to_exclude = db.get_data(
'MATCH (n:Gene:db_NCBI) WITH collect(n.eid) AS entity_ids RETURN [entry in $zenodo_ids WHERE NOT entry IN entity_ids] AS exclude',
{'zenodo_ids': list(self.literature_genes)})['exclude'].tolist()[0]
# print('Cleaning chemical...')
# with open(os.path.join(self.parsed_dir, self.file_prefix + 'chemical.tsv'), 'w') as f:
# f.writelines([s + '\n' for s in list(self.literature_chemicals - set(chemical_ids_to_exclude))])
# print('Cleaning disease...')
# with open(os.path.join(self.parsed_dir, self.file_prefix + 'disease.tsv'), 'w') as f:
# f.writelines([s + '\n' for s in list(self.literature_diseases - set(disease_ids_to_exclude))])
# print('Cleaning gene...')
# with open(os.path.join(self.parsed_dir, self.file_prefix + 'gene.tsv'), 'w') as f:
# f.writelines([s + '\n' for s in list(self.literature_genes - set(gene_ids_to_exclude))])
cleaned_chemical_ids = list(self.literature_chemicals - set(chemical_ids_to_exclude))
cleaned_disease_ids = list(self.literature_diseases - set(disease_ids_to_exclude))
cleaned_gene_ids = list(self.literature_genes - set(gene_ids_to_exclude))
self.clean_dependency_files(NODE_CHEMICAL, NODE_DISEASE, cleaned_chemical_ids, cleaned_disease_ids)
self.clean_dependency_files(NODE_CHEMICAL, NODE_GENE, cleaned_chemical_ids, cleaned_gene_ids)
self.clean_dependency_files(NODE_GENE, NODE_DISEASE, cleaned_gene_ids, cleaned_disease_ids)
self.clean_dependency_files(NODE_GENE, NODE_GENE, cleaned_gene_ids, cleaned_gene_ids)
def clean_dependency_files(self, entry1_type, entry2_type, entry1_ids, entry2_ids):
input_file = f'{entry1_type}2{entry2_type}_assoc_theme.tsv'
file_path = os.path.join(self.parsed_dir, input_file)
converters = None
if entry1_type == NODE_GENE or entry2_type == NODE_GENE:
f = lambda x: str(x)
converters = {'entry1_id': f, 'entry2_id': f}
df = pd.read_csv(file_path, header=0, sep='\t', converters=converters, names=dependency_headers)
df.drop_duplicates(inplace=True)
df.set_index('entry1_id', inplace=True)
df = df[df.index.isin(entry1_ids)]
df.reset_index(inplace=True)
df.set_index('entry2_id', inplace=True)
df = df[df.index.isin(entry2_ids)]
df.reset_index(inplace=True)
df.to_csv(file_path, index=False, sep='\t', chunksize=50000)
def parse_path2theme_file(self, entry1_type, entry2_type):
file = self.get_path2theme_datafile_name(entry1_type, entry2_type)
df = pd.read_csv(file, sep='\t', index_col='path')
cols = [col for col in df.columns if not col.endswith('.ind')]
df = df[cols]
df['max'] = df.max(axis=1)
df['sum'] = df[cols].sum(axis=1)
# keep only scores that is max or relative score > 0.3
for c in cols:
df.loc[(df[c] < df['max']) & (df[c] / df['sum'] < 0.3), c] = np.nan
df.reset_index(inplace=True)
# melt columns - change matrix format to database table format
df_theme = pd.melt(df, id_vars=['path', 'sum'], value_vars=cols, var_name='theme', value_name='score')
df_theme.dropna(inplace=True)
df_theme['relscore'] = df_theme['score'] / df_theme['sum']
df_theme.set_index('path', inplace=True)
df_theme.drop(['sum'], axis=1, inplace=True)
df_theme.sort_index(inplace=True)
df_theme.reset_index(inplace=True)
df_theme.set_index('theme', inplace=True)
# add theme description column
themes = pd.DataFrame.from_dict(theme_map, orient='index', columns=['description'])
themes.index.name = 'theme'
df_path2theme = pd.merge(df_theme, themes, how='inner', left_index=True, right_index=True)
df_path2theme.reset_index(inplace=True)
return df_path2theme
@staticmethod
def get_create_literature_query(entry1_type, entry2_type):
query = """
UNWIND $rows AS row
MERGE (n1:db_Literature:LiteratureEntity {eid:row.entry1_id})
ON CREATE SET n1.name = apoc.text.join([c in apoc.text.split(row.entry1_name, ' ') | apoc.text.capitalize(toLower(c))], '')
FOREACH (item IN CASE WHEN NOT '%s' IN labels(n1) THEN [1] ELSE [] END | SET n1:%s)
MERGE (n2:db_Literature:LiteratureEntity {eid:row.entry2_id})
ON CREATE SET n2.name = apoc.text.join([c in apoc.text.split(row.entry2_name,' ') | apoc.text.capitalize(toLower(c))], '')
FOREACH (item IN CASE WHEN NOT '%s' IN labels(n2) THEN [1] ELSE [] END | SET n2:%s)
WITH n1, n2, row
MERGE (a:Association {eid:row.entry1_id + '-' + row.entry2_id + '-' + row.theme})
ON CREATE
SET a:db_Literature,
a.entry1_type = '%s',
a.entry2_type = '%s',
a.description = row.description,
a.type = row.theme,
a.data_source = 'Literature'
MERGE (n1)-[:ASSOCIATED {description:row.description, type:row.theme}]->(n2)
MERGE (n1)-[:HAS_ASSOCIATION]->(a)
MERGE (a)-[:HAS_ASSOCIATION]->(n2)
""" % (
f'Literature{entry1_type}',
f'Literature{entry1_type}',
f'Literature{entry2_type}',
f'Literature{entry2_type}',
entry1_type,
entry2_type
)
return query
@staticmethod
def get_create_literature_snippet_pub_query():
query = """
UNWIND $rows AS row
MERGE (s:Snippet {eid:row.snippet_id})
ON CREATE
SET s:db_Literature,
s.pmid = row.pmid,
s.sentence = row.sentence,
s.data_source = 'Literature'
WITH s, row
MATCH (a:Association {eid:row.entry1_id + '-' + row.entry2_id + '-' + row.theme})
MERGE (s)-[i:INDICATES {normalized_score:toFloat(row.relscore), raw_score:toFloat(row.score), entry1_text:row.entry1_name, entry2_text:row.entry2_name, path:row.path}]->(a)
MERGE (pub:Publication {pmid:row.pmid}) SET pub:db_Literature
MERGE (s)-[:IN_PUB]->(pub)
"""
return query
def clean_gene_column(self, df, column_name):
"""
Remove tax_id from gene column, then split gene id's into multiple rows
:param df:
:return: cleaned df
"""
df[column_name].replace(r"\(Tax[:][0-9]*\)", '', regex=True, inplace=True)
new_df = df.set_index(df.columns.drop(column_name,1).tolist())[column_name].str.split(';', expand=True).stack()
new_df = new_df.reset_index().rename(columns={0:column_name}).loc[:, df.columns]
return new_df
def clean_snippets(self):
"""Remove duplicates"""
logging.info('clean snippet.tsv')
df = pd.read_csv(os.path.join(self.parsed_dir, self.file_prefix + 'snippet.tsv'), sep='\t', header=0,
names=['id', 'pmid', 'sentence'])
logging.info('total rows:' + str(len(df)))
df.drop_duplicates(subset=['id'], inplace=True)
logging.info('unique rows: ' + str(len(df)))
translate = {'-LRB- ': '(', ' -RRB-': ')', '-LRB-': '(', '-RRB-': ')', '-LSB- ': '[', ' -RSB-': ']'}
for current, replace in translate.items():
df.sentence = df.sentence.str.replace(current, replace)
df.to_csv(os.path.join(self.parsed_dir, self.file_prefix + 'snippet.tsv'), index=False, sep='\t', chunksize=50000)
logging.info('done')
def parse_and_write_data_files(self):
df = pd.read_csv(os.path.join(self.parsed_dir, self.file_prefix + 'snippet.tsv'), sep='\t')
for filename, (entity1_type, entity2_type) in [
(ZENODO_CHEMICAL2DISEASE_FILE, (NODE_CHEMICAL, NODE_DISEASE)),
(ZENODO_CHEMICAL2GENE_FILE, (NODE_CHEMICAL, NODE_GENE)),
(ZENODO_GENE2DISEASE_FILE, (NODE_GENE, NODE_DISEASE)),
(ZENODO_GENE2GENE_FILE, (NODE_GENE, NODE_GENE))
]:
file_df = pd.read_csv(os.path.join(self.parsed_dir, filename), sep='\t')
file_df['sentence'] = file_df.snippet_id.map(df.set_index('id')['sentence'].to_dict())
file_df['pmid'] = file_df.snippet_id.map(df.set_index('id')['pmid'].to_dict())
cols = list(file_df.columns.values)
# put pmid column first
cols = cols[-1:] + cols[:-1]
reordered_df = file_df[cols]
path_df = self.parse_path2theme_file(entity1_type, entity2_type)
reordered_df2 = reordered_df.copy(deep=True)
"""
The path_df has lower case paths, need to lower in order to merge.
Quote from ref: https://zenodo.org/record/3459420
A few users have mentioned that the dependency paths in the "part-i" files are all lowercase text,
whereas those in the "part-ii" files maintain the case of the original sentence.
This complicates mapping between the two sets of files.
We kept the part-ii files in the same case as the original sentence to facilitate downstream
debugging - it's easier to tell which words in a particular sentence are contributing to the
dependency path if their original case is maintained. When working with the part-ii "with-themes" files,
if you simply convert the dependency path to lowercase, it is guaranteed to match to one of the
paths in the corresponding part-i file and you'll be able to get the theme scores.
"""
reordered_df2['PATH'] = reordered_df2.path
reordered_df2.path = reordered_df2.path.str.lower()
# do a left-join
reordered_df3 = reordered_df2.merge(path_df, how='left', left_on='path', right_on='path')
del reordered_df2
# revert the path column back to original casing
reordered_df3.drop(columns=['path'], inplace=True)
reordered_df3.rename(columns={'PATH': 'path'}, inplace=True)
reordered_df3.dropna(inplace=True)
reordered_df3.drop_duplicates(inplace=True)
reordered_df3.to_csv(os.path.join(self.output_dir, self.file_prefix + filename), index=False, sep='\t', chunksize=50000)
def main(args):
parser = LiteratureDataParser(args.prefix)
parser.parse_dependency_files()
parser.clean_snippets()
parser.parse_and_write_data_files()
for filename in [ZENODO_CHEMICAL2DISEASE_FILE, ZENODO_CHEMICAL2GENE_FILE, ZENODO_GENE2DISEASE_FILE, ZENODO_GENE2GENE_FILE]:
parser.upload_azure_file(filename, args.prefix)
if __name__ == '__main__':
main()
|
[STATEMENT]
lemma compact_scaling:
fixes s :: "'a::real_normed_vector set"
assumes "compact s"
shows "compact ((\<lambda>x. c *\<^sub>R x) ` s)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. compact ((*\<^sub>R) c ` s)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. compact ((*\<^sub>R) c ` s)
[PROOF STEP]
let ?f = "\<lambda>x. scaleR c x"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. compact ((*\<^sub>R) c ` s)
[PROOF STEP]
have *: "bounded_linear ?f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bounded_linear ((*\<^sub>R) c)
[PROOF STEP]
by (rule bounded_linear_scaleR_right)
[PROOF STATE]
proof (state)
this:
bounded_linear ((*\<^sub>R) c)
goal (1 subgoal):
1. compact ((*\<^sub>R) c ` s)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. compact ((*\<^sub>R) c ` s)
[PROOF STEP]
using compact_continuous_image[of s ?f] continuous_at_imp_continuous_on[of s ?f]
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>continuous_on s ((*\<^sub>R) c); compact s\<rbrakk> \<Longrightarrow> compact ((*\<^sub>R) c ` s)
\<forall>x\<in>s. isCont ((*\<^sub>R) c) x \<Longrightarrow> continuous_on s ((*\<^sub>R) c)
goal (1 subgoal):
1. compact ((*\<^sub>R) c ` s)
[PROOF STEP]
using linear_continuous_at[OF *] assms
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>continuous_on s ((*\<^sub>R) c); compact s\<rbrakk> \<Longrightarrow> compact ((*\<^sub>R) c ` s)
\<forall>x\<in>s. isCont ((*\<^sub>R) c) x \<Longrightarrow> continuous_on s ((*\<^sub>R) c)
isCont ((*\<^sub>R) c) ?a
compact s
goal (1 subgoal):
1. compact ((*\<^sub>R) c ` s)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
compact ((*\<^sub>R) c ` s)
goal:
No subgoals!
[PROOF STEP]
qed |
(* Title: JinjaDCI/Compiler/J1WellForm.thy
Author: Tobias Nipkow, Susannah Mansky
Copyright 2003 Technische Universitaet Muenchen, 2019-20 UIUC
Based on the Jinja theory Compiler/J1WellForm.thy by Tobias Nipkow
*)
section \<open> Well-Formedness of Intermediate Language \<close>
theory J1WellForm
imports "../J/JWellForm" J1
begin
subsection "Well-Typedness"
type_synonym
env\<^sub>1 = "ty list" \<comment> \<open>type environment indexed by variable number\<close>
inductive
WT\<^sub>1 :: "[J\<^sub>1_prog,env\<^sub>1, expr\<^sub>1 , ty ] \<Rightarrow> bool"
("(_,_ \<turnstile>\<^sub>1/ _ :: _)" [51,51,51]50)
and WTs\<^sub>1 :: "[J\<^sub>1_prog,env\<^sub>1, expr\<^sub>1 list, ty list] \<Rightarrow> bool"
("(_,_ \<turnstile>\<^sub>1/ _ [::] _)" [51,51,51]50)
for P :: J\<^sub>1_prog
where
WTNew\<^sub>1:
"is_class P C \<Longrightarrow>
P,E \<turnstile>\<^sub>1 new C :: Class C"
| WTCast\<^sub>1:
"\<lbrakk> P,E \<turnstile>\<^sub>1 e :: Class D; is_class P C; P \<turnstile> C \<preceq>\<^sup>* D \<or> P \<turnstile> D \<preceq>\<^sup>* C \<rbrakk>
\<Longrightarrow> P,E \<turnstile>\<^sub>1 Cast C e :: Class C"
| WTVal\<^sub>1:
"typeof v = Some T \<Longrightarrow>
P,E \<turnstile>\<^sub>1 Val v :: T"
| WTVar\<^sub>1:
"\<lbrakk> E!i = T; i < size E \<rbrakk>
\<Longrightarrow> P,E \<turnstile>\<^sub>1 Var i :: T"
| WTBinOp\<^sub>1:
"\<lbrakk> P,E \<turnstile>\<^sub>1 e\<^sub>1 :: T\<^sub>1; P,E \<turnstile>\<^sub>1 e\<^sub>2 :: T\<^sub>2;
case bop of Eq \<Rightarrow> (P \<turnstile> T\<^sub>1 \<le> T\<^sub>2 \<or> P \<turnstile> T\<^sub>2 \<le> T\<^sub>1) \<and> T = Boolean
| Add \<Rightarrow> T\<^sub>1 = Integer \<and> T\<^sub>2 = Integer \<and> T = Integer \<rbrakk>
\<Longrightarrow> P,E \<turnstile>\<^sub>1 e\<^sub>1 \<guillemotleft>bop\<guillemotright> e\<^sub>2 :: T"
| WTLAss\<^sub>1:
"\<lbrakk> E!i = T; i < size E; P,E \<turnstile>\<^sub>1 e :: T'; P \<turnstile> T' \<le> T \<rbrakk>
\<Longrightarrow> P,E \<turnstile>\<^sub>1 i:=e :: Void"
| WTFAcc\<^sub>1:
"\<lbrakk> P,E \<turnstile>\<^sub>1 e :: Class C; P \<turnstile> C sees F,NonStatic:T in D \<rbrakk>
\<Longrightarrow> P,E \<turnstile>\<^sub>1 e\<bullet>F{D} :: T"
| WTSFAcc\<^sub>1:
"\<lbrakk> P \<turnstile> C sees F,Static:T in D \<rbrakk>
\<Longrightarrow> P,E \<turnstile>\<^sub>1 C\<bullet>\<^sub>sF{D} :: T"
| WTFAss\<^sub>1:
"\<lbrakk> P,E \<turnstile>\<^sub>1 e\<^sub>1 :: Class C; P \<turnstile> C sees F,NonStatic:T in D; P,E \<turnstile>\<^sub>1 e\<^sub>2 :: T'; P \<turnstile> T' \<le> T \<rbrakk>
\<Longrightarrow> P,E \<turnstile>\<^sub>1 e\<^sub>1\<bullet>F{D} := e\<^sub>2 :: Void"
| WTSFAss\<^sub>1:
"\<lbrakk> P \<turnstile> C sees F,Static:T in D; P,E \<turnstile>\<^sub>1 e\<^sub>2 :: T'; P \<turnstile> T' \<le> T \<rbrakk>
\<Longrightarrow> P,E \<turnstile>\<^sub>1 C\<bullet>\<^sub>sF{D}:=e\<^sub>2 :: Void"
| WTCall\<^sub>1:
"\<lbrakk> P,E \<turnstile>\<^sub>1 e :: Class C; P \<turnstile> C sees M,NonStatic:Ts' \<rightarrow> T = m in D;
P,E \<turnstile>\<^sub>1 es [::] Ts; P \<turnstile> Ts [\<le>] Ts' \<rbrakk>
\<Longrightarrow> P,E \<turnstile>\<^sub>1 e\<bullet>M(es) :: T"
| WTSCall\<^sub>1:
"\<lbrakk> P \<turnstile> C sees M,Static:Ts \<rightarrow> T = m in D;
P,E \<turnstile>\<^sub>1 es [::] Ts'; P \<turnstile> Ts' [\<le>] Ts; M \<noteq> clinit \<rbrakk>
\<Longrightarrow> P,E \<turnstile>\<^sub>1 C\<bullet>\<^sub>sM(es) :: T"
| WTBlock\<^sub>1:
"\<lbrakk> is_type P T; P,E@[T] \<turnstile>\<^sub>1 e::T' \<rbrakk>
\<Longrightarrow> P,E \<turnstile>\<^sub>1 {i:T; e} :: T'"
| WTSeq\<^sub>1:
"\<lbrakk> P,E \<turnstile>\<^sub>1 e\<^sub>1::T\<^sub>1; P,E \<turnstile>\<^sub>1 e\<^sub>2::T\<^sub>2 \<rbrakk>
\<Longrightarrow> P,E \<turnstile>\<^sub>1 e\<^sub>1;;e\<^sub>2 :: T\<^sub>2"
| WTCond\<^sub>1:
"\<lbrakk> P,E \<turnstile>\<^sub>1 e :: Boolean; P,E \<turnstile>\<^sub>1 e\<^sub>1::T\<^sub>1; P,E \<turnstile>\<^sub>1 e\<^sub>2::T\<^sub>2;
P \<turnstile> T\<^sub>1 \<le> T\<^sub>2 \<or> P \<turnstile> T\<^sub>2 \<le> T\<^sub>1; P \<turnstile> T\<^sub>1 \<le> T\<^sub>2 \<longrightarrow> T = T\<^sub>2; P \<turnstile> T\<^sub>2 \<le> T\<^sub>1 \<longrightarrow> T = T\<^sub>1 \<rbrakk>
\<Longrightarrow> P,E \<turnstile>\<^sub>1 if (e) e\<^sub>1 else e\<^sub>2 :: T"
| WTWhile\<^sub>1:
"\<lbrakk> P,E \<turnstile>\<^sub>1 e :: Boolean; P,E \<turnstile>\<^sub>1 c::T \<rbrakk>
\<Longrightarrow> P,E \<turnstile>\<^sub>1 while (e) c :: Void"
| WTThrow\<^sub>1:
"P,E \<turnstile>\<^sub>1 e :: Class C \<Longrightarrow>
P,E \<turnstile>\<^sub>1 throw e :: Void"
| WTTry\<^sub>1:
"\<lbrakk> P,E \<turnstile>\<^sub>1 e\<^sub>1 :: T; P,E@[Class C] \<turnstile>\<^sub>1 e\<^sub>2 :: T; is_class P C \<rbrakk>
\<Longrightarrow> P,E \<turnstile>\<^sub>1 try e\<^sub>1 catch(C i) e\<^sub>2 :: T"
| WTNil\<^sub>1:
"P,E \<turnstile>\<^sub>1 [] [::] []"
| WTCons\<^sub>1:
"\<lbrakk> P,E \<turnstile>\<^sub>1 e :: T; P,E \<turnstile>\<^sub>1 es [::] Ts \<rbrakk>
\<Longrightarrow> P,E \<turnstile>\<^sub>1 e#es [::] T#Ts"
(*<*)
declare WT\<^sub>1_WTs\<^sub>1.intros[intro!]
declare WTNil\<^sub>1[iff]
lemmas WT\<^sub>1_WTs\<^sub>1_induct = WT\<^sub>1_WTs\<^sub>1.induct [split_format (complete)]
and WT\<^sub>1_WTs\<^sub>1_inducts = WT\<^sub>1_WTs\<^sub>1.inducts [split_format (complete)]
inductive_cases eee[elim!]:
"P,E \<turnstile>\<^sub>1 Val v :: T"
"P,E \<turnstile>\<^sub>1 Var i :: T"
"P,E \<turnstile>\<^sub>1 Cast D e :: T"
"P,E \<turnstile>\<^sub>1 i:=e :: T"
"P,E \<turnstile>\<^sub>1 {i:U; e} :: T"
"P,E \<turnstile>\<^sub>1 e\<^sub>1;;e\<^sub>2 :: T"
"P,E \<turnstile>\<^sub>1 if (e) e\<^sub>1 else e\<^sub>2 :: T"
"P,E \<turnstile>\<^sub>1 while (e) c :: T"
"P,E \<turnstile>\<^sub>1 throw e :: T"
"P,E \<turnstile>\<^sub>1 try e\<^sub>1 catch(C i) e\<^sub>2 :: T"
"P,E \<turnstile>\<^sub>1 e\<bullet>F{D} :: T"
"P,E \<turnstile>\<^sub>1 C\<bullet>\<^sub>sF{D} :: T"
"P,E \<turnstile>\<^sub>1 e\<^sub>1\<bullet>F{D}:=e\<^sub>2 :: T"
"P,E \<turnstile>\<^sub>1 C\<bullet>\<^sub>sF{D}:=e\<^sub>2 :: T"
"P,E \<turnstile>\<^sub>1 e\<^sub>1 \<guillemotleft>bop\<guillemotright> e\<^sub>2 :: T"
"P,E \<turnstile>\<^sub>1 new C :: T"
"P,E \<turnstile>\<^sub>1 e\<bullet>M(es) :: T"
"P,E \<turnstile>\<^sub>1 C\<bullet>\<^sub>sM(es) :: T"
"P,E \<turnstile>\<^sub>1 [] [::] Ts"
"P,E \<turnstile>\<^sub>1 e#es [::] Ts"
(*>*)
lemma init_nWT\<^sub>1 [simp]:"\<not>P,E \<turnstile>\<^sub>1 INIT C (Cs,b) \<leftarrow> e :: T"
by(auto elim:WT\<^sub>1.cases)
lemma rinit_nWT\<^sub>1 [simp]:"\<not>P,E \<turnstile>\<^sub>1 RI(C,e);Cs \<leftarrow> e' :: T"
by(auto elim:WT\<^sub>1.cases)
lemma WTs\<^sub>1_same_size: "\<And>Ts. P,E \<turnstile>\<^sub>1 es [::] Ts \<Longrightarrow> size es = size Ts"
(*<*)by (induct es type:list) auto(*>*)
lemma WT\<^sub>1_unique:
"P,E \<turnstile>\<^sub>1 e :: T\<^sub>1 \<Longrightarrow> (\<And>T\<^sub>2. P,E \<turnstile>\<^sub>1 e :: T\<^sub>2 \<Longrightarrow> T\<^sub>1 = T\<^sub>2)" and
WTs\<^sub>1_unique: "P,E \<turnstile>\<^sub>1 es [::] Ts\<^sub>1 \<Longrightarrow> (\<And>Ts\<^sub>2. P,E \<turnstile>\<^sub>1 es [::] Ts\<^sub>2 \<Longrightarrow> Ts\<^sub>1 = Ts\<^sub>2)"
(*<*)
proof(induct rule:WT\<^sub>1_WTs\<^sub>1.inducts)
case WTVal\<^sub>1 then show ?case by clarsimp
next
case (WTBinOp\<^sub>1 E e\<^sub>1 T\<^sub>1 e\<^sub>2 T\<^sub>2 bop T)
then show ?case by(case_tac bop) force+
next
case WTFAcc\<^sub>1 then show ?case
by (blast dest:sees_field_idemp sees_field_fun)
next
case WTSFAcc\<^sub>1 then show ?case by (blast dest:sees_field_fun)
next
case WTSFAss\<^sub>1 then show ?case by (blast dest:sees_field_fun)
next
case WTCall\<^sub>1 then show ?case
by (blast dest:sees_method_idemp sees_method_fun)
next
case WTSCall\<^sub>1 then show ?case by (blast dest:sees_method_fun)
qed blast+
(*>*)
lemma assumes wf: "wf_prog p P"
shows WT\<^sub>1_is_type: "P,E \<turnstile>\<^sub>1 e :: T \<Longrightarrow> set E \<subseteq> types P \<Longrightarrow> is_type P T"
and "P,E \<turnstile>\<^sub>1 es [::] Ts \<Longrightarrow> True"
(*<*)
proof(induct rule:WT\<^sub>1_WTs\<^sub>1.inducts)
case WTVal\<^sub>1 then show ?case by (simp add:typeof_lit_is_type)
next
case WTVar\<^sub>1 then show ?case by (blast intro:nth_mem)
next
case WTBinOp\<^sub>1 then show ?case by (simp split:bop.splits)
next
case WTFAcc\<^sub>1 then show ?case
by (simp add:sees_field_is_type[OF _ wf])
next
case WTSFAcc\<^sub>1 then show ?case
by (simp add:sees_field_is_type[OF _ wf])
next
case WTCall\<^sub>1 then show ?case
by (fastforce dest!: sees_wf_mdecl[OF wf] simp:wf_mdecl_def)
next
case WTSCall\<^sub>1 then show ?case
by (fastforce dest!: sees_wf_mdecl[OF wf] simp:wf_mdecl_def)
next
case WTCond\<^sub>1 then show ?case by blast
qed simp+
(*>*)
lemma WT\<^sub>1_nsub_RI: "P,E \<turnstile>\<^sub>1 e :: T \<Longrightarrow> \<not>sub_RI e"
and WTs\<^sub>1_nsub_RIs: "P,E \<turnstile>\<^sub>1 es [::] Ts \<Longrightarrow> \<not>sub_RIs es"
proof(induct rule: WT\<^sub>1_WTs\<^sub>1.inducts) qed(simp_all)
subsection\<open> Runtime Well-Typedness \<close>
inductive
WTrt\<^sub>1 :: "J\<^sub>1_prog \<Rightarrow> heap \<Rightarrow> sheap \<Rightarrow> env\<^sub>1 \<Rightarrow> expr\<^sub>1 \<Rightarrow> ty \<Rightarrow> bool"
and WTrts\<^sub>1 :: "J\<^sub>1_prog \<Rightarrow> heap \<Rightarrow> sheap \<Rightarrow> env\<^sub>1 \<Rightarrow> expr\<^sub>1 list \<Rightarrow> ty list \<Rightarrow> bool"
and WTrt2\<^sub>1 :: "[J\<^sub>1_prog,env\<^sub>1,heap,sheap,expr\<^sub>1,ty] \<Rightarrow> bool"
("_,_,_,_ \<turnstile>\<^sub>1 _ : _" [51,51,51,51]50)
and WTrts2\<^sub>1 :: "[J\<^sub>1_prog,env\<^sub>1,heap,sheap,expr\<^sub>1 list, ty list] \<Rightarrow> bool"
("_,_,_,_ \<turnstile>\<^sub>1 _ [:] _" [51,51,51,51]50)
for P :: J\<^sub>1_prog and h :: heap and sh :: sheap
where
"P,E,h,sh \<turnstile>\<^sub>1 e : T \<equiv> WTrt\<^sub>1 P h sh E e T"
| "P,E,h,sh \<turnstile>\<^sub>1 es[:]Ts \<equiv> WTrts\<^sub>1 P h sh E es Ts"
| WTrtNew\<^sub>1:
"is_class P C \<Longrightarrow>
P,E,h,sh \<turnstile>\<^sub>1 new C : Class C"
| WTrtCast\<^sub>1:
"\<lbrakk> P,E,h,sh \<turnstile>\<^sub>1 e : T; is_refT T; is_class P C \<rbrakk>
\<Longrightarrow> P,E,h,sh \<turnstile>\<^sub>1 Cast C e : Class C"
| WTrtVal\<^sub>1:
"typeof\<^bsub>h\<^esub> v = Some T \<Longrightarrow>
P,E,h,sh \<turnstile>\<^sub>1 Val v : T"
| WTrtVar\<^sub>1:
"\<lbrakk> E!i = T; i < size E \<rbrakk> \<Longrightarrow>
P,E,h,sh \<turnstile>\<^sub>1 Var i : T"
| WTrtBinOpEq\<^sub>1:
"\<lbrakk> P,E,h,sh \<turnstile>\<^sub>1 e\<^sub>1 : T\<^sub>1; P,E,h,sh \<turnstile>\<^sub>1 e\<^sub>2 : T\<^sub>2 \<rbrakk>
\<Longrightarrow> P,E,h,sh \<turnstile>\<^sub>1 e\<^sub>1 \<guillemotleft>Eq\<guillemotright> e\<^sub>2 : Boolean"
| WTrtBinOpAdd\<^sub>1:
"\<lbrakk> P,E,h,sh \<turnstile>\<^sub>1 e\<^sub>1 : Integer; P,E,h,sh \<turnstile>\<^sub>1 e\<^sub>2 : Integer \<rbrakk>
\<Longrightarrow> P,E,h,sh \<turnstile>\<^sub>1 e\<^sub>1 \<guillemotleft>Add\<guillemotright> e\<^sub>2 : Integer"
| WTrtLAss\<^sub>1:
"\<lbrakk> E!i = T; i < size E; P,E,h,sh \<turnstile>\<^sub>1 e : T'; P \<turnstile> T' \<le> T \<rbrakk>
\<Longrightarrow> P,E,h,sh \<turnstile>\<^sub>1 i:=e : Void"
| WTrtFAcc\<^sub>1:
"\<lbrakk> P,E,h,sh \<turnstile>\<^sub>1 e : Class C; P \<turnstile> C has F,NonStatic:T in D \<rbrakk> \<Longrightarrow>
P,E,h,sh \<turnstile>\<^sub>1 e\<bullet>F{D} : T"
| WTrtFAccNT\<^sub>1:
"P,E,h,sh \<turnstile>\<^sub>1 e : NT \<Longrightarrow>
P,E,h,sh \<turnstile>\<^sub>1 e\<bullet>F{D} : T"
| WTrtSFAcc\<^sub>1:
"\<lbrakk> P \<turnstile> C has F,Static:T in D \<rbrakk> \<Longrightarrow>
P,E,h,sh \<turnstile>\<^sub>1 C\<bullet>\<^sub>sF{D} : T"
| WTrtFAss\<^sub>1:
"\<lbrakk> P,E,h,sh \<turnstile>\<^sub>1 e\<^sub>1 : Class C; P \<turnstile> C has F,NonStatic:T in D; P,E,h,sh \<turnstile>\<^sub>1 e\<^sub>2 : T\<^sub>2; P \<turnstile> T\<^sub>2 \<le> T \<rbrakk>
\<Longrightarrow> P,E,h,sh \<turnstile>\<^sub>1 e\<^sub>1\<bullet>F{D}:=e\<^sub>2 : Void"
| WTrtFAssNT\<^sub>1:
"\<lbrakk> P,E,h,sh \<turnstile>\<^sub>1 e\<^sub>1:NT; P,E,h,sh \<turnstile>\<^sub>1 e\<^sub>2 : T\<^sub>2 \<rbrakk>
\<Longrightarrow> P,E,h,sh \<turnstile>\<^sub>1 e\<^sub>1\<bullet>F{D}:=e\<^sub>2 : Void"
| WTrtSFAss\<^sub>1:
"\<lbrakk> P,E,h,sh \<turnstile>\<^sub>1 e\<^sub>2 : T\<^sub>2; P \<turnstile> C has F,Static:T in D; P \<turnstile> T\<^sub>2 \<le> T \<rbrakk>
\<Longrightarrow> P,E,h,sh \<turnstile>\<^sub>1 C\<bullet>\<^sub>sF{D}:=e\<^sub>2 : Void"
| WTrtCall\<^sub>1:
"\<lbrakk> P,E,h,sh \<turnstile>\<^sub>1 e : Class C; P \<turnstile> C sees M,NonStatic:Ts \<rightarrow> T = m in D;
P,E,h,sh \<turnstile>\<^sub>1 es [:] Ts'; P \<turnstile> Ts' [\<le>] Ts \<rbrakk>
\<Longrightarrow> P,E,h,sh \<turnstile>\<^sub>1 e\<bullet>M(es) : T"
| WTrtCallNT\<^sub>1:
"\<lbrakk> P,E,h,sh \<turnstile>\<^sub>1 e : NT; P,E,h,sh \<turnstile>\<^sub>1 es [:] Ts \<rbrakk>
\<Longrightarrow> P,E,h,sh \<turnstile>\<^sub>1 e\<bullet>M(es) : T"
| WTrtSCall\<^sub>1:
"\<lbrakk> P \<turnstile> C sees M,Static:Ts \<rightarrow> T = m in D;
P,E,h,sh \<turnstile>\<^sub>1 es [:] Ts'; P \<turnstile> Ts' [\<le>] Ts;
M = clinit \<longrightarrow> sh D = \<lfloor>(sfs,Processing)\<rfloor> \<and> es = map Val vs \<rbrakk>
\<Longrightarrow> P,E,h,sh \<turnstile>\<^sub>1 C\<bullet>\<^sub>sM(es) : T"
| WTrtBlock\<^sub>1:
"P,E@[T],h,sh \<turnstile>\<^sub>1 e : T' \<Longrightarrow>
P,E,h,sh \<turnstile>\<^sub>1 {i:T; e} : T'"
| WTrtSeq\<^sub>1:
"\<lbrakk> P,E,h,sh \<turnstile>\<^sub>1 e\<^sub>1:T\<^sub>1; P,E,h,sh \<turnstile>\<^sub>1 e\<^sub>2:T\<^sub>2 \<rbrakk>
\<Longrightarrow> P,E,h,sh \<turnstile>\<^sub>1 e\<^sub>1;;e\<^sub>2 : T\<^sub>2"
| WTrtCond\<^sub>1:
"\<lbrakk> P,E,h,sh \<turnstile>\<^sub>1 e : Boolean; P,E,h,sh \<turnstile>\<^sub>1 e\<^sub>1:T\<^sub>1; P,E,h,sh \<turnstile>\<^sub>1 e\<^sub>2:T\<^sub>2;
P \<turnstile> T\<^sub>1 \<le> T\<^sub>2 \<or> P \<turnstile> T\<^sub>2 \<le> T\<^sub>1; P \<turnstile> T\<^sub>1 \<le> T\<^sub>2 \<longrightarrow> T = T\<^sub>2; P \<turnstile> T\<^sub>2 \<le> T\<^sub>1 \<longrightarrow> T = T\<^sub>1 \<rbrakk>
\<Longrightarrow> P,E,h,sh \<turnstile>\<^sub>1 if (e) e\<^sub>1 else e\<^sub>2 : T"
| WTrtWhile\<^sub>1:
"\<lbrakk> P,E,h,sh \<turnstile>\<^sub>1 e : Boolean; P,E,h,sh \<turnstile>\<^sub>1 c:T \<rbrakk>
\<Longrightarrow> P,E,h,sh \<turnstile>\<^sub>1 while(e) c : Void"
| WTrtThrow\<^sub>1:
"\<lbrakk> P,E,h,sh \<turnstile>\<^sub>1 e : T\<^sub>r; is_refT T\<^sub>r \<rbrakk> \<Longrightarrow>
P,E,h,sh \<turnstile>\<^sub>1 throw e : T"
| WTrtTry\<^sub>1:
"\<lbrakk> P,E,h,sh \<turnstile>\<^sub>1 e\<^sub>1 : T\<^sub>1; P,E@[Class C],h,sh \<turnstile>\<^sub>1 e\<^sub>2 : T\<^sub>2; P \<turnstile> T\<^sub>1 \<le> T\<^sub>2 \<rbrakk>
\<Longrightarrow> P,E,h,sh \<turnstile>\<^sub>1 try e\<^sub>1 catch(C i) e\<^sub>2 : T\<^sub>2"
| WTrtInit\<^sub>1:
"\<lbrakk> P,E,h,sh \<turnstile>\<^sub>1 e : T; \<forall>C' \<in> set (C#Cs). is_class P C'; \<not>sub_RI e;
\<forall>C' \<in> set (tl Cs). \<exists>sfs. sh C' = \<lfloor>(sfs,Processing)\<rfloor>;
b \<longrightarrow> (\<forall>C' \<in> set Cs. \<exists>sfs. sh C' = \<lfloor>(sfs,Processing)\<rfloor>);
distinct Cs; supercls_lst P Cs \<rbrakk>
\<Longrightarrow> P,E,h,sh \<turnstile>\<^sub>1 INIT C (Cs, b) \<leftarrow> e : T"
| WTrtRI\<^sub>1:
"\<lbrakk> P,E,h,sh \<turnstile>\<^sub>1 e : T; P,E,h,sh \<turnstile>\<^sub>1 e' : T'; \<forall>C' \<in> set (C#Cs). is_class P C'; \<not>sub_RI e';
\<forall>C' \<in> set (C#Cs). not_init C' e;
\<forall>C' \<in> set Cs. \<exists>sfs. sh C' = \<lfloor>(sfs,Processing)\<rfloor>;
\<exists>sfs. sh C = \<lfloor>(sfs, Processing)\<rfloor> \<or> (sh C = \<lfloor>(sfs, Error)\<rfloor> \<and> e = THROW NoClassDefFoundError);
distinct (C#Cs); supercls_lst P (C#Cs) \<rbrakk>
\<Longrightarrow> P,E,h,sh \<turnstile>\<^sub>1 RI(C, e);Cs \<leftarrow> e' : T'"
\<comment> \<open>well-typed expression lists\<close>
| WTrtNil\<^sub>1:
"P,E,h,sh \<turnstile>\<^sub>1 [] [:] []"
| WTrtCons\<^sub>1:
"\<lbrakk> P,E,h,sh \<turnstile>\<^sub>1 e : T; P,E,h,sh \<turnstile>\<^sub>1 es [:] Ts \<rbrakk>
\<Longrightarrow> P,E,h,sh \<turnstile>\<^sub>1 e#es [:] T#Ts"
(*<*)
declare WTrt\<^sub>1_WTrts\<^sub>1.intros[intro!] WTrtNil\<^sub>1[iff]
declare
WTrtFAcc\<^sub>1[rule del] WTrtFAccNT\<^sub>1[rule del] WTrtSFAcc\<^sub>1[rule del]
WTrtFAss\<^sub>1[rule del] WTrtFAssNT\<^sub>1[rule del] WTrtSFAss\<^sub>1[rule del]
WTrtCall\<^sub>1[rule del] WTrtCallNT\<^sub>1[rule del] WTrtSCall\<^sub>1[rule del]
lemmas WTrt\<^sub>1_induct = WTrt\<^sub>1_WTrts\<^sub>1.induct [split_format (complete)]
and WTrt\<^sub>1_inducts = WTrt\<^sub>1_WTrts\<^sub>1.inducts [split_format (complete)]
(*>*)
(*<*)
inductive_cases WTrt\<^sub>1_elim_cases[elim!]:
"P,E,h,sh \<turnstile>\<^sub>1 Val v : T"
"P,E,h,sh \<turnstile>\<^sub>1 Var i : T"
"P,E,h,sh \<turnstile>\<^sub>1 v :=e : T"
"P,E,h,sh \<turnstile>\<^sub>1 {i:U; e} : T"
"P,E,h,sh \<turnstile>\<^sub>1 e\<^sub>1;;e\<^sub>2 : T\<^sub>2"
"P,E,h,sh \<turnstile>\<^sub>1 if (e) e\<^sub>1 else e\<^sub>2 : T"
"P,E,h,sh \<turnstile>\<^sub>1 while(e) c : T"
"P,E,h,sh \<turnstile>\<^sub>1 throw e : T"
"P,E,h,sh \<turnstile>\<^sub>1 try e\<^sub>1 catch(C V) e\<^sub>2 : T"
"P,E,h,sh \<turnstile>\<^sub>1 Cast D e : T"
"P,E,h,sh \<turnstile>\<^sub>1 e\<bullet>F{D} : T"
"P,E,h,sh \<turnstile>\<^sub>1 C\<bullet>\<^sub>sF{D} : T"
"P,E,h,sh \<turnstile>\<^sub>1 e\<bullet>F{D} := v : T"
"P,E,h,sh \<turnstile>\<^sub>1 C\<bullet>\<^sub>sF{D} := v : T"
"P,E,h,sh \<turnstile>\<^sub>1 e\<^sub>1 \<guillemotleft>bop\<guillemotright> e\<^sub>2 : T"
"P,E,h,sh \<turnstile>\<^sub>1 new C : T"
"P,E,h,sh \<turnstile>\<^sub>1 e\<bullet>M{D}(es) : T"
"P,E,h,sh \<turnstile>\<^sub>1 C\<bullet>\<^sub>sM{D}(es) : T"
"P,E,h,sh \<turnstile>\<^sub>1 INIT C (Cs,b) \<leftarrow> e : T"
"P,E,h,sh \<turnstile>\<^sub>1 RI(C,e);Cs \<leftarrow> e' : T"
"P,E,h,sh \<turnstile>\<^sub>1 [] [:] Ts"
"P,E,h,sh \<turnstile>\<^sub>1 e#es [:] Ts"
(*>*)
lemma WT\<^sub>1_implies_WTrt\<^sub>1: "P,E \<turnstile>\<^sub>1 e :: T \<Longrightarrow> P,E,h,sh \<turnstile>\<^sub>1 e : T"
and WTs\<^sub>1_implies_WTrts\<^sub>1: "P,E \<turnstile>\<^sub>1 es [::] Ts \<Longrightarrow> P,E,h,sh \<turnstile>\<^sub>1 es [:] Ts"
(*<*)
proof(induct rule: WT\<^sub>1_WTs\<^sub>1_inducts)
case WTVal\<^sub>1 then show ?case by (fastforce dest:typeof_lit_typeof)
next
case (WTBinOp\<^sub>1 E e\<^sub>1 T\<^sub>1 e\<^sub>2 T\<^sub>2 bop T)
then show ?case by (case_tac bop) fastforce+
next
case WTFAcc\<^sub>1 then show ?case
by (fastforce simp: WTrtFAcc\<^sub>1 has_visible_field)
next
case WTSFAcc\<^sub>1 then show ?case
by (fastforce simp: WTrtSFAcc\<^sub>1 has_visible_field)
next
case WTFAss\<^sub>1 then show ?case by (meson WTrtFAss\<^sub>1 has_visible_field)
next
case WTSFAss\<^sub>1 then show ?case by (meson WTrtSFAss\<^sub>1 has_visible_field)
next
case WTCall\<^sub>1 then show ?case by (fastforce simp: WTrtCall\<^sub>1)
next
case WTSCall\<^sub>1 then show ?case by (fastforce simp: WTrtSCall\<^sub>1)
qed fastforce+
(*>*)
subsection\<open> Well-formedness\<close>
\<comment> \<open>Indices in blocks increase by 1\<close>
primrec \<B> :: "expr\<^sub>1 \<Rightarrow> nat \<Rightarrow> bool"
and \<B>s :: "expr\<^sub>1 list \<Rightarrow> nat \<Rightarrow> bool" where
"\<B> (new C) i = True" |
"\<B> (Cast C e) i = \<B> e i" |
"\<B> (Val v) i = True" |
"\<B> (e\<^sub>1 \<guillemotleft>bop\<guillemotright> e\<^sub>2) i = (\<B> e\<^sub>1 i \<and> \<B> e\<^sub>2 i)" |
"\<B> (Var j) i = True" |
"\<B> (e\<bullet>F{D}) i = \<B> e i" |
"\<B> (C\<bullet>\<^sub>sF{D}) i = True" |
"\<B> (j:=e) i = \<B> e i" |
"\<B> (e\<^sub>1\<bullet>F{D} := e\<^sub>2) i = (\<B> e\<^sub>1 i \<and> \<B> e\<^sub>2 i)" |
"\<B> (C\<bullet>\<^sub>sF{D} := e\<^sub>2) i = \<B> e\<^sub>2 i" |
"\<B> (e\<bullet>M(es)) i = (\<B> e i \<and> \<B>s es i)" |
"\<B> (C\<bullet>\<^sub>sM(es)) i = \<B>s es i" |
"\<B> ({j:T ; e}) i = (i = j \<and> \<B> e (i+1))" |
"\<B> (e\<^sub>1;;e\<^sub>2) i = (\<B> e\<^sub>1 i \<and> \<B> e\<^sub>2 i)" |
"\<B> (if (e) e\<^sub>1 else e\<^sub>2) i = (\<B> e i \<and> \<B> e\<^sub>1 i \<and> \<B> e\<^sub>2 i)" |
"\<B> (throw e) i = \<B> e i" |
"\<B> (while (e) c) i = (\<B> e i \<and> \<B> c i)" |
"\<B> (try e\<^sub>1 catch(C j) e\<^sub>2) i = (\<B> e\<^sub>1 i \<and> i=j \<and> \<B> e\<^sub>2 (i+1))" |
"\<B> (INIT C (Cs,b) \<leftarrow> e) i = \<B> e i" |
"\<B> (RI(C,e);Cs \<leftarrow> e') i = (\<B> e i \<and> \<B> e' i)" |
"\<B>s [] i = True" |
"\<B>s (e#es) i = (\<B> e i \<and> \<B>s es i)"
definition wf_J\<^sub>1_mdecl :: "J\<^sub>1_prog \<Rightarrow> cname \<Rightarrow> expr\<^sub>1 mdecl \<Rightarrow> bool"
where
"wf_J\<^sub>1_mdecl P C \<equiv> \<lambda>(M,b,Ts,T,body).
\<not>sub_RI body \<and>
(case b of
NonStatic \<Rightarrow>
(\<exists>T'. P,Class C#Ts \<turnstile>\<^sub>1 body :: T' \<and> P \<turnstile> T' \<le> T) \<and>
\<D> body \<lfloor>{..size Ts}\<rfloor> \<and> \<B> body (size Ts + 1)
| Static \<Rightarrow> (\<exists>T'. P,Ts \<turnstile>\<^sub>1 body :: T' \<and> P \<turnstile> T' \<le> T) \<and>
\<D> body \<lfloor>{..<size Ts}\<rfloor> \<and> \<B> body (size Ts))"
lemma wf_J\<^sub>1_mdecl_NonStatic[simp]:
"wf_J\<^sub>1_mdecl P C (M,NonStatic,Ts,T,body) \<equiv>
(\<not>sub_RI body \<and>
(\<exists>T'. P,Class C#Ts \<turnstile>\<^sub>1 body :: T' \<and> P \<turnstile> T' \<le> T) \<and>
\<D> body \<lfloor>{..size Ts}\<rfloor> \<and> \<B> body (size Ts + 1))"
(*<*)by (simp add:wf_J\<^sub>1_mdecl_def)(*>*)
lemma wf_J\<^sub>1_mdecl_Static[simp]:
"wf_J\<^sub>1_mdecl P C (M,Static,Ts,T,body) \<equiv>
(\<not>sub_RI body \<and>
(\<exists>T'. P,Ts \<turnstile>\<^sub>1 body :: T' \<and> P \<turnstile> T' \<le> T) \<and>
\<D> body \<lfloor>{..<size Ts}\<rfloor> \<and> \<B> body (size Ts))"
(*<*)by (simp add:wf_J\<^sub>1_mdecl_def)(*>*)
abbreviation "wf_J\<^sub>1_prog == wf_prog wf_J\<^sub>1_mdecl"
lemma sees_wf\<^sub>1_nsub_RI:
assumes wf: "wf_J\<^sub>1_prog P" and cM: "P \<turnstile> C sees M,b : Ts\<rightarrow>T = body in D"
shows "\<not>sub_RI body"
using sees_wf_mdecl[OF wf cM] by(simp add: wf_J\<^sub>1_mdecl_def wf_mdecl_def)
lemma wf\<^sub>1_types_clinit:
assumes wf:"wf_prog wf_md P" and ex: "class P C = Some a" and proc: "sh C = \<lfloor>(sfs, Processing)\<rfloor>"
shows "P,E,h,sh \<turnstile>\<^sub>1 C\<bullet>\<^sub>sclinit([]) : Void"
proof -
from ex obtain D fs ms where "a = (D,fs,ms)" by(cases a)
then have sP: "(C, D, fs, ms) \<in> set P" using ex map_of_SomeD[of P C a] by(simp add: class_def)
then have "wf_clinit ms" using assms by(unfold wf_prog_def wf_cdecl_def, auto)
then obtain m where sm: "(clinit, Static, [], Void, m) \<in> set ms"
by(unfold wf_clinit_def) auto
then have "P \<turnstile> C sees clinit,Static:[] \<rightarrow> Void = m in C"
using mdecl_visible[OF wf sP sm] by simp
then show ?thesis using WTrtSCall\<^sub>1 proc by blast
qed
lemma assumes wf: "wf_J\<^sub>1_prog P"
shows eval\<^sub>1_proc_pres: "P \<turnstile>\<^sub>1 \<langle>e,(h,l,sh)\<rangle> \<Rightarrow> \<langle>e',(h',l',sh')\<rangle>
\<Longrightarrow> not_init C e \<Longrightarrow> \<exists>sfs. sh C = \<lfloor>(sfs, Processing)\<rfloor> \<Longrightarrow> \<exists>sfs'. sh' C = \<lfloor>(sfs', Processing)\<rfloor>"
and evals\<^sub>1_proc_pres: "P \<turnstile>\<^sub>1 \<langle>es,(h,l,sh)\<rangle> [\<Rightarrow>] \<langle>es',(h',l',sh')\<rangle>
\<Longrightarrow> not_inits C es \<Longrightarrow> \<exists>sfs. sh C = \<lfloor>(sfs, Processing)\<rfloor> \<Longrightarrow> \<exists>sfs'. sh' C = \<lfloor>(sfs', Processing)\<rfloor>"
(*<*)
proof(induct rule:eval\<^sub>1_evals\<^sub>1_inducts)
case Call\<^sub>1 then show ?case using sees_wf\<^sub>1_nsub_RI[OF wf Call\<^sub>1.hyps(6)] nsub_RI_not_init by auto
next
case (SCallInit\<^sub>1 ps h l sh vs h\<^sub>1 l\<^sub>1 sh\<^sub>1 C' M Ts T body D v' h\<^sub>2 l\<^sub>2 sh\<^sub>2 l\<^sub>2' e' h\<^sub>3 l\<^sub>3 sh\<^sub>3)
then show ?case
using SCallInit\<^sub>1 sees_wf\<^sub>1_nsub_RI[OF wf SCallInit\<^sub>1.hyps(3)] nsub_RI_not_init[of body] by auto
next
case SCall\<^sub>1 then show ?case using sees_wf\<^sub>1_nsub_RI[OF wf SCall\<^sub>1.hyps(3)] nsub_RI_not_init by auto
next
case (InitNone\<^sub>1 sh C1 C' Cs h l e' a a b) then show ?case by(cases "C = C1") auto
next
case (InitDone\<^sub>1 sh C sfs C' Cs h l e' a a b) then show ?case by(cases Cs, auto)
next
case (InitProcessing\<^sub>1 sh C sfs C' Cs h l e' a a b) then show ?case by(cases Cs, auto)
next
case (InitError\<^sub>1 sh C1 sfs Cs h l e' a a b C') then show ?case by(cases "C = C1") auto
next
case (InitObject\<^sub>1 sh C1 sfs sh' C' Cs h l e' a a b) then show ?case by(cases "C = C1") auto
next
case (InitNonObject\<^sub>1 sh C1 sfs D a b sh' C' Cs h l e' a a b)
then show ?case by(cases "C = C1") auto
next
case (RInit\<^sub>1 e a a b v h' l' sh' C sfs i sh'' C' Cs e\<^sub>1 a a b) then show ?case by(cases Cs, auto)
next
case (RInitInitFail\<^sub>1 e h l sh a h' l' sh' C1 sfs i sh'' D Cs e\<^sub>1 h1 l1 sh1)
then show ?case using eval\<^sub>1_final by fastforce
qed(auto)
(*>*)
lemma clinit\<^sub>1_proc_pres:
"\<lbrakk> wf_J\<^sub>1_prog P; P \<turnstile>\<^sub>1 \<langle>C\<^sub>0\<bullet>\<^sub>sclinit([]),(h,l,sh)\<rangle> \<Rightarrow> \<langle>e',(h',l',sh')\<rangle>;
sh C' = \<lfloor>(sfs,Processing)\<rfloor> \<rbrakk>
\<Longrightarrow> \<exists>sfs. sh' C' = \<lfloor>(sfs,Processing)\<rfloor>"
by(auto dest: eval\<^sub>1_proc_pres)
end
|
(* begin hide *)
From mathcomp Require Import all_ssreflect.
Set Implicit Arguments.
Unset Strict Implicit.
Unset Printing Implicit Defensive.
Require Import String.
Require Import QString.
Require Import Value.
Notation Name := string.
(* end hide *)
(**
#<div class="jumbotron">
<div class="container">
<h1 class="display-4">GraphQL Graph</h1>
<p class="lead">
This file contains the basic building blocks to define a GraphQL Graph.
</p>
</div>
</div>#
*)
Section GraphQLGraph.
Variables (Scalar : eqType).
(** *** Label
----
It corresponds to an edge's label or a property's key.
**)
Record label := Label {
lname : string;
args : seq (string * @Value Scalar)
}.
Coercion label_of_label (f : label) := let: Label l _ := f in l.
Coercion fun_of_label (f : label) := let: Label _ a := f in a.
(** *** Node
----
It corresponds to a node in a graph.
It contains its type and its properties (as a list of key-value pairs).
*)
Record node := Node {
ntype : Name;
nprops : seq (label * @Value Scalar)
}.
(** *** GraphQL Graph
----
The collection of edges and a root node.
*)
Record graphQLGraph := GraphQLGraph {
root : node;
edges : seq (node * label * node)
}.
Coercion edges_of_graph (g : graphQLGraph) := g.(edges).
End GraphQLGraph.
(** ---- *)
(* begin hide *)
Arguments label [Scalar].
Arguments node [Scalar].
Arguments graphQLGraph [Scalar].
(* end hide *)
Delimit Scope graph_scope with graph.
Open Scope graph_scope.
(* Keep an eye that → is the symbol, not pretty printing of '->' *)
Notation "src '⟜' lab '→' tgt" := (src, lab, tgt) (at level 20) : graph_scope.
(**
We also establish that all these structures have a decidable procedure for equality but
we omit them here to unclutter the doc (they may still be seen in the source code).
*)
(* begin hide *)
Section Equality.
(** * Equality
In this section we establish that the different components of a graph
have a decidable procedure to establish equality (they belong to the eqType).
This is basically done by establishing isomorphisms between the different structures
to others that already have a decidable procedure.
*)
Variable (Scalar : eqType).
(** Labels can be transformed into pairs *)
Definition prod_of_label (f : @label Scalar) := let: Label l a := f in (l, a).
Definition label_of_prod (p : string * seq (string * @Value Scalar)) := let: (l, a) := p in Label l a.
(** Cancelation lemma for label *)
Lemma can_label_of_prod : cancel prod_of_label label_of_prod.
Proof. by case. Qed.
(** Declaring labels in eqType *)
Canonical label_eqType := EqType label (CanEqMixin can_label_of_prod).
(** Nodes can be transformed into pairs *)
Definition prod_of_node (n : @node Scalar) := let: Node t f := n in (t, f).
Definition node_of_prod (p : string * seq (label * @Value Scalar)) :=
let: (t, f) := p in Node t f.
(** Cancelation lemma for a node **)
Definition prod_of_nodeK : cancel prod_of_node node_of_prod.
Proof. by case. Qed.
(** Declaring nodes in eqType *)
Canonical node_eqType := EqType node (CanEqMixin prod_of_nodeK).
(** Graphs can be transformed into pairs *)
Definition prod_of_graph (g : @graphQLGraph Scalar) := let: GraphQLGraph r e := g in (r, e).
Definition graph_of_prod (p : node * seq (node * label * node)) :=
let: (r, e) := p in @GraphQLGraph Scalar r e.
(** Cancelation lemma for a graph **)
Definition prod_of_graphK : cancel prod_of_graph graph_of_prod.
Proof. by case. Qed.
(** Declaring graphs in eqType *)
Canonical graph_eqType := EqType graphQLGraph (CanEqMixin prod_of_graphK).
End Equality.
(* end hide *)
(** ---- *)
(**
#<div>
<a href='GraphCoQL.SchemaWellFormedness.html' class="btn btn-light" role='button'>Previous ← SchemaWellFormedness</a>
<a href='GraphCoQL.GraphConformance.html' class="btn btn-info" role='button'>Continue Reading → Graph Conformance</a>
</div>#
*) |
# Parameters
THETA <- c(10L, 90L) # Fundamental Biodiversity Number. May et al. 2015 PRSB fit thetas of 25 to 90 to BCI data, depending on which pattern was fit. Tittensor & Worm 2016 GEB use 10, citing Hubbell. These values span a range.
M <- c(0.01, 0.1, 0.9) # migration rate (probability of new individual coming from regional pool)
N <- c(20L, 200L) # number of individuals in local community
NSAMPS <- 600L # Number of sampled timesteps
STEPL <- 0.5 # Sampling frequency multiplier (multiplies by N). Scaled so that, on average, half the individuals die between every sample
SEED <- 42L # Seed
NBURN = 100L # Burnin length multiplier (multiplies by N)
parameter_table <- expand.grid(THETA = THETA, M = M, N = N, NSAMPS = NSAMPS, STEPL=STEPL, SEED=SEED, NBURN=NBURN)
parameter_table$parameter_id <- seq_len(nrow(parameter_table))
|
InputFolder = './Images/NucleiDAB/';
OutputFolder = './Results/Images/NucleiDAB3/';
Rescale = 1;
Dilate = 2;
Fill = 1;
Lbl = 1;
@iA = '*_C0000*.tif'; % Image filter
@iL = '*_C0001*.tif'; % Image annotations filter
@fxg_sLocMax [iA] > [iS];
params.GRad = 7;
params.LocalMaxBox = [5 5];
params.ThrLocalMax = 127;
params.Polarity = -1;
/endf
@fxgs_lPatchClassify [iA, iS, iL] > [C];
params.FeatType = 'RadFeat';
params.ScanRad = 21;
params.ScanStep = 1;
params.NAngles = 8;
params.ClassifierType = 'SVM';
params.ClassifierFile = './Classifiers/ClassifierDABNuc_RadSVM.mat';
params.ExportAnnotations = './Classifiers/Annotation_DABNuc_RadSVM.tif';
/endf
/show iA > C;
/keep C > tif; |
*DECK SVOUT
SUBROUTINE SVOUT (N, SX, IFMT, IDIGIT)
C***BEGIN PROLOGUE SVOUT
C***SUBSIDIARY
C***PURPOSE Subsidiary to SPLP
C***LIBRARY SLATEC
C***TYPE SINGLE PRECISION (SVOUT-S, DVOUT-D)
C***AUTHOR (UNKNOWN)
C***DESCRIPTION
C
C SINGLE PRECISION VECTOR OUTPUT ROUTINE.
C
C INPUT..
C
C N,SX(*) PRINT THE SINGLE PRECISION ARRAY SX(I),I=1,...,N, ON
C OUTPUT UNIT LOUT. THE HEADING IN THE FORTRAN FORMAT
C STATEMENT IFMT(*), DESCRIBED BELOW, IS PRINTED AS A FIRST
C STEP. THE COMPONENTS SX(I) ARE INDEXED, ON OUTPUT,
C IN A PLEASANT FORMAT.
C IFMT(*) A FORTRAN FORMAT STATEMENT. THIS IS PRINTED ON OUTPUT
C UNIT LOUT WITH THE VARIABLE FORMAT FORTRAN STATEMENT
C WRITE(LOUT,IFMT)
C IDIGIT PRINT AT LEAST ABS(IDIGIT) DECIMAL DIGITS PER NUMBER.
C THE SUBPROGRAM WILL CHOOSE THAT INTEGER 4,6,10 OR 14
C WHICH WILL PRINT AT LEAST ABS(IDIGIT) NUMBER OF
C PLACES. IF IDIGIT.LT.0, 72 PRINTING COLUMNS ARE UTILIZED
C TO WRITE EACH LINE OF OUTPUT OF THE ARRAY SX(*). (THIS
C CAN BE USED ON MOST TIME-SHARING TERMINALS). IF
C IDIGIT.GE.0, 133 PRINTING COLUMNS ARE UTILIZED. (THIS CAN
C BE USED ON MOST LINE PRINTERS).
C
C EXAMPLE..
C
C PRINT AN ARRAY CALLED (COSTS OF PURCHASES) OF LENGTH 100 SHOWING
C 6 DECIMAL DIGITS PER NUMBER. THE USER IS RUNNING ON A TIME-SHARING
C SYSTEM WITH A 72 COLUMN OUTPUT DEVICE.
C
C DIMENSION COSTS(100)
C N = 100
C IDIGIT = -6
C CALL SVOUT(N,COSTS,'(''1COSTS OF PURCHASES'')',IDIGIT)
C
C***SEE ALSO SPLP
C***ROUTINES CALLED I1MACH
C***REVISION HISTORY (YYMMDD)
C 811215 DATE WRITTEN
C 890531 Changed all specific intrinsics to generic. (WRB)
C 891107 Added comma after 1P edit descriptor in FORMAT
C statements. (WRB)
C 891214 Prologue converted to Version 4.0 format. (BAB)
C 900328 Added TYPE section. (WRB)
C***END PROLOGUE SVOUT
DIMENSION SX(*)
CHARACTER IFMT*(*)
C
C GET THE UNIT NUMBER WHERE OUTPUT WILL BE WRITTEN.
C***FIRST EXECUTABLE STATEMENT SVOUT
J=2
LOUT=I1MACH(J)
WRITE(LOUT,IFMT)
IF(N.LE.0) RETURN
NDIGIT = IDIGIT
IF(IDIGIT.EQ.0) NDIGIT = 4
IF(IDIGIT.GE.0) GO TO 80
C
NDIGIT = -IDIGIT
IF(NDIGIT.GT.4) GO TO 20
C
DO 10 K1=1,N,5
K2 = MIN(N,K1+4)
WRITE(LOUT,1000) K1,K2,(SX(I),I=K1,K2)
10 CONTINUE
RETURN
C
20 CONTINUE
IF(NDIGIT.GT.6) GO TO 40
C
DO 30 K1=1,N,4
K2 = MIN(N,K1+3)
WRITE(LOUT,1001) K1,K2,(SX(I),I=K1,K2)
30 CONTINUE
RETURN
C
40 CONTINUE
IF(NDIGIT.GT.10) GO TO 60
C
DO 50 K1=1,N,3
K2=MIN(N,K1+2)
WRITE(LOUT,1002) K1,K2,(SX(I),I=K1,K2)
50 CONTINUE
RETURN
C
60 CONTINUE
DO 70 K1=1,N,2
K2 = MIN(N,K1+1)
WRITE(LOUT,1003) K1,K2,(SX(I),I=K1,K2)
70 CONTINUE
RETURN
C
80 CONTINUE
IF(NDIGIT.GT.4) GO TO 100
C
DO 90 K1=1,N,10
K2 = MIN(N,K1+9)
WRITE(LOUT,1000) K1,K2,(SX(I),I=K1,K2)
90 CONTINUE
RETURN
C
100 CONTINUE
IF(NDIGIT.GT.6) GO TO 120
C
DO 110 K1=1,N,8
K2 = MIN(N,K1+7)
WRITE(LOUT,1001) K1,K2,(SX(I),I=K1,K2)
110 CONTINUE
RETURN
C
120 CONTINUE
IF(NDIGIT.GT.10) GO TO 140
C
DO 130 K1=1,N,6
K2 = MIN(N,K1+5)
WRITE(LOUT,1002) K1,K2,(SX(I),I=K1,K2)
130 CONTINUE
RETURN
C
140 CONTINUE
DO 150 K1=1,N,5
K2 = MIN(N,K1+4)
WRITE(LOUT,1003) K1,K2,(SX(I),I=K1,K2)
150 CONTINUE
RETURN
1000 FORMAT(1X,I4,' - ',I4,1P,10E12.3)
1001 FORMAT(1X,I4,' - ',I4,1X,1P,8E14.5)
1002 FORMAT(1X,I4,' - ',I4,1X,1P,6E18.9)
1003 FORMAT(1X,I4,' - ',I4,1X,1P,5E24.13)
END
|
If $f$ is a function defined on a set $S$ such that no point of $S$ is a limit point of $S$, then $f$ is continuous on $S$. |
{-# OPTIONS --without-K --safe #-}
module Data.Bool where
open import Data.Bool.Base public
open import Data.Bool.Truth public
|
#ifndef HYBRID_ASTAR_TEST_SUBSCRIBER_MAP_SUBSCRIBER_H_PP
#define HYBRID_ASTAR_TEST_SUBSCRIBER_MAP_SUBSCRIBER_H_PP
#include <ros/ros.h>
#include <nav_msgs/OccupancyGrid.h>
#include <costmap_2d/costmap_2d_ros.h>
#include <costmap_2d/costmap_2d.h>
#include <tf2_ros/transform_listener.h>
#include <tf2/convert.h>
#include <tf2/utils.h>
#include <tf2_geometry_msgs/tf2_geometry_msgs.h>
#include <thread>
#include <boost/bind.hpp>
#include <std_msgs/Bool.h>
class MapSubscriber {
public:
MapSubscriber(ros::NodeHandle& nh, const std::string& map_type);
~MapSubscriber();
public:
void GetCostMap();
private:
void ClearCostMap(const std_msgs::Bool::ConstPtr &msg);
void CostMapThread();
private:
costmap_2d::Costmap2DROS *costmap_ros_;
ros::Subscriber clear_costmap_sub_;
ros::NodeHandle nh_;
tf2_ros::Buffer *buffer;
tf2_ros::TransformListener *tf;
std::string map_type_;
};
#endif |
c
c -----------------------------------------------------
c
subroutine valout (mgrid, lst, lend, time, nvar)
c
implicit double precision (a-h,o-z)
logical graf
parameter (maxgr = 192, maxlv=12)
common /nodal/ rnode(12,maxgr),node(17,maxgr),lstart(maxlv),
* newstl(maxlv),
* listsp(maxlv),intrat(maxlv),tol,bzonex,bzoney,mstart,ndfree,
* lfine,iorder,mxnest,kcheck,lwidth,
* maxcl, graf, lhead
include "calloc.i"
common /userdt/ cfl,gamma,gamma1,xprob,yprob,alpha,Re,iprob,
. ismp,gradThreshold
logical flag
c
c valout = graphics output of soln values for contour or surface plots.
c if mgrid <> 0, output only that grid, (should have
c lst = lend here)
c
call basic (time, lst, lend, 3 )
c
write(3,100) mgrid, nvar
100 format(10h*VALS ,2i10)
c
flag = .false.
if (lst .eq. lend .and. mgrid .ne. 0) flag = .true.
c
level = lst
10 if (level .gt. lend) go to 99
mptr = lstart(level)
if (flag) mptr = mgrid
20 if (mptr .eq. 0) go to 50
maxi = node(5,mptr)
maxj = node(6,mptr)
xlow = rnode(1,mptr)
ylow = rnode(2,mptr)
maxip1 = maxi + 1
maxjp1 = maxj + 1
loc = node(7,mptr)
locirr = node(14,mptr)
mitot = maxi-1+2*lwidth
mjtot = maxj-1+2*lwidth
locsm = igetsp(mitot*mjtot*nvar)
call outvar(alloc(loc),maxip1,maxjp1,nvar,mptr,
1 alloc(locirr),mitot,mjtot,lwidth,
2 node(17,mptr),rnode(9,mptr),
3 rnode(10,mptr),xlow,ylow,alloc(locsm))
call reclam(locsm,mitot*mjtot*nvar)
mptr = node(10,mptr)
if (flag) go to 50
go to 20
50 continue
level = level + 1
go to 10
c
99 return
end
|
Formal statement is: corollary Hurwitz_injective: assumes S: "open S" "connected S" and holf: "\<And>n::nat. \<F> n holomorphic_on S" and holg: "g holomorphic_on S" and ul_g: "\<And>K. \<lbrakk>compact K; K \<subseteq> S\<rbrakk> \<Longrightarrow> uniform_limit K \<F> g sequentially" and nonconst: "\<not> g constant_on S" and inj: "\<And>n. inj_on (\<F> n) S" shows "inj_on g S" Informal statement is: If a sequence of holomorphic functions $\{f_n\}$ converges uniformly to a holomorphic function $g$ on a connected open set $S$, and if each $f_n$ is injective on $S$, then $g$ is injective on $S$. |
### Example 2: Nonlinear convection in 2D
Following the initial convection tutorial with a single state variable $u$, we will now look at non-linear convection (step 6 in the original). This brings one new crucial challenge: computing a pair of coupled equations and thus updating two time-dependent variables $u$ and $v$.
The full set of coupled equations is now
\begin{aligned}
\frac{\partial u}{\partial t} + u \frac{\partial u}{\partial x} + v \frac{\partial u}{\partial y} = 0 \\
\\
\frac{\partial v}{\partial t} + u \frac{\partial v}{\partial x} + v \frac{\partial v}{\partial y} = 0\\
\end{aligned}
and rearranging the discretized version gives us an expression for the update of both variables
\begin{aligned}
u_{i,j}^{n+1} &= u_{i,j}^n - u_{i,j}^n \frac{\Delta t}{\Delta x} (u_{i,j}^n-u_{i-1,j}^n) - v_{i,j}^n \frac{\Delta t}{\Delta y} (u_{i,j}^n-u_{i,j-1}^n) \\
\\
v_{i,j}^{n+1} &= v_{i,j}^n - u_{i,j}^n \frac{\Delta t}{\Delta x} (v_{i,j}^n-v_{i-1,j}^n) - v_{i,j}^n \frac{\Delta t}{\Delta y} (v_{i,j}^n-v_{i,j-1}^n)
\end{aligned}
So, for starters we will re-create the original example run in pure NumPy array notation, before demonstrating
the Devito version. Let's start again with some utilities and parameters:
```python
from examples.cfd import plot_field, init_hat
import numpy as np
import sympy
%matplotlib inline
# Some variable declarations
nx = 101
ny = 101
nt = 80
c = 1.
dx = 2. / (nx - 1)
dy = 2. / (ny - 1)
sigma = .2
dt = sigma * dx
```
Let's re-create the initial setup with a 2D "hat function", but this time for two state variables.
```python
#NBVAL_IGNORE_OUTPUT
# Allocate fields and assign initial conditions
u = np.empty((nx, ny))
v = np.empty((nx, ny))
init_hat(field=u, dx=dx, dy=dy, value=2.)
init_hat(field=v, dx=dx, dy=dy, value=2.)
plot_field(u)
```
Now we can create the two stencil expression for our two coupled equations according to the discretized equation above. We again use some simple Dirichlet boundary conditions to keep the values on all sides constant.
```python
#NBVAL_IGNORE_OUTPUT
for n in range(nt + 1): ##loop across number of time steps
un = u.copy()
vn = v.copy()
u[1:, 1:] = (un[1:, 1:] -
(un[1:, 1:] * c * dt / dy * (un[1:, 1:] - un[1:, :-1])) -
vn[1:, 1:] * c * dt / dx * (un[1:, 1:] - un[:-1, 1:]))
v[1:, 1:] = (vn[1:, 1:] -
(un[1:, 1:] * c * dt / dy * (vn[1:, 1:] - vn[1:, :-1])) -
vn[1:, 1:] * c * dt / dx * (vn[1:, 1:] - vn[:-1, 1:]))
u[0, :] = 1
u[-1, :] = 1
u[:, 0] = 1
u[:, -1] = 1
v[0, :] = 1
v[-1, :] = 1
v[:, 0] = 1
v[:, -1] = 1
plot_field(u)
```
Excellent, we again get a wave that resembles the one from the oiginal examples.
Now we can set up our coupled problem in Devito. Let's start by creating two initial state variables $u$ and $v$, as before, and initialising them with our "hat function.
```python
#NBVAL_IGNORE_OUTPUT
from devito import Grid, TimeFunction
# First we need two time-dependent data fields, both initialized with the hat function
grid = Grid(shape=(nx, ny), extent=(2., 2.))
u = TimeFunction(name='u', grid=grid)
init_hat(field=u.data[0], dx=dx, dy=dy, value=2.)
v = TimeFunction(name='v', grid=grid)
init_hat(field=v.data[0], dx=dx, dy=dy, value=2.)
plot_field(u.data[0])
```
Using the two `TimeFunction` objects we can again derive our discretized equation, rearrange for the forward stencil point in time and define our variable update expression - only we have to do everything twice now! We again use forward differences for time via `u.dt` and backward differences in space via `u.dxl` and `u.dyl` to match the original tutorial.
```python
from devito import Eq, solve
eq_u = Eq(u.dt + u*u.dxl + v*u.dyl)
eq_v = Eq(v.dt + u*v.dxl + v*v.dyl)
# We can use the same SymPy trick to generate two
# stencil expressions, one for each field update.
stencil_u = solve(eq_u, u.forward)
stencil_v = solve(eq_v, v.forward)
update_u = Eq(u.forward, stencil_u, subdomain=grid.interior)
update_v = Eq(v.forward, stencil_v, subdomain=grid.interior)
print("U update:\n%s\n" % update_u)
print("V update:\n%s\n" % update_v)
```
U update:
Eq(u(t + dt, x, y), dt*(-u(t, x, y)*Derivative(u(t, x, y), x) - v(t, x, y)*Derivative(u(t, x, y), y) + u(t, x, y)/dt))
V update:
Eq(v(t + dt, x, y), dt*(-u(t, x, y)*Derivative(v(t, x, y), x) - v(t, x, y)*Derivative(v(t, x, y), y) + v(t, x, y)/dt))
We then set Dirichlet boundary conditions at all sides of the domain to $1$.
```python
x, y = grid.dimensions
t = grid.stepping_dim
bc_u = [Eq(u[t+1, 0, y], 1.)] # left
bc_u += [Eq(u[t+1, nx-1, y], 1.)] # right
bc_u += [Eq(u[t+1, x, ny-1], 1.)] # top
bc_u += [Eq(u[t+1, x, 0], 1.)] # bottom
bc_v = [Eq(v[t+1, 0, y], 1.)] # left
bc_v += [Eq(v[t+1, nx-1, y], 1.)] # right
bc_v += [Eq(v[t+1, x, ny-1], 1.)] # top
bc_v += [Eq(v[t+1, x, 0], 1.)] # bottom
```
And finally we can put it all together to build an operator and solve our coupled problem.
```python
#NBVAL_IGNORE_OUTPUT
from devito import Operator
# Reset our data field and ICs
init_hat(field=u.data[0], dx=dx, dy=dy, value=2.)
init_hat(field=v.data[0], dx=dx, dy=dy, value=2.)
op = Operator([update_u, update_v] + bc_u + bc_v)
op(time=nt, dt=dt)
plot_field(u.data[0])
```
Excellent, we have now a scalar implementation of a convection problem, but this can be written as a single vectorial equation:
$\frac{d U}{dt} + \nabla(U)U = 0$
Let's now use devito vectorial utilities and implement the vectorial equation
```python
from devito import VectorTimeFunction, grad
U = VectorTimeFunction(name='U', grid=grid)
init_hat(field=U[0].data[0], dx=dx, dy=dy, value=2.)
init_hat(field=U[1].data[0], dx=dx, dy=dy, value=2.)
plot_field(U[1].data[0])
eq_u = Eq(U.dt + grad(U)*U)
```
We now have a vectorial equation. Unlike in the previous case, we do not need to play with left/right derivatives
as the automated staggering of the vectorial function takes care of this.
```python
eq_u
```
$\displaystyle \left[\begin{matrix}\operatorname{U_{x}}{\left(t,x + \frac{h_{x}}{2},y \right)} \frac{\partial}{\partial x} \operatorname{U_{x}}{\left(t,x + \frac{h_{x}}{2},y \right)} + \operatorname{U_{y}}{\left(t,x,y + \frac{h_{y}}{2} \right)} \frac{\partial}{\partial y} \operatorname{U_{x}}{\left(t,x + \frac{h_{x}}{2},y \right)} + \frac{\partial}{\partial t} \operatorname{U_{x}}{\left(t,x + \frac{h_{x}}{2},y \right)}\\\operatorname{U_{x}}{\left(t,x + \frac{h_{x}}{2},y \right)} \frac{\partial}{\partial x} \operatorname{U_{y}}{\left(t,x,y + \frac{h_{y}}{2} \right)} + \operatorname{U_{y}}{\left(t,x,y + \frac{h_{y}}{2} \right)} \frac{\partial}{\partial y} \operatorname{U_{y}}{\left(t,x,y + \frac{h_{y}}{2} \right)} + \frac{\partial}{\partial t} \operatorname{U_{y}}{\left(t,x,y + \frac{h_{y}}{2} \right)}\end{matrix}\right] = 0$
Then we set the nboundary conditions
```python
x, y = grid.dimensions
t = grid.stepping_dim
bc_u = [Eq(U[0][t+1, 0, y], 1.)] # left
bc_u += [Eq(U[0][t+1, nx-1, y], 1.)] # right
bc_u += [Eq(U[0][t+1, x, ny-1], 1.)] # top
bc_u += [Eq(U[0][t+1, x, 0], 1.)] # bottom
bc_v = [Eq(U[1][t+1, 0, y], 1.)] # left
bc_v += [Eq(U[1][t+1, nx-1, y], 1.)] # right
bc_v += [Eq(U[1][t+1, x, ny-1], 1.)] # top
bc_v += [Eq(U[1][t+1, x, 0], 1.)] # bottom
```
```python
# We can use the same SymPy trick to generate two
# stencil expressions, one for each field update.
stencil_U = solve(eq_u, U.forward)
update_U = Eq(U.forward, stencil_U, subdomain=grid.interior)
```
And we have the updated (stencil) as a vectorial equation once again
```python
update_U
```
$\displaystyle \left[\begin{matrix}\operatorname{U_{x}}{\left(t + dt,x + \frac{h_{x}}{2},y \right)}\\\operatorname{U_{y}}{\left(t + dt,x,y + \frac{h_{y}}{2} \right)}\end{matrix}\right] = \left[\begin{matrix}dt \left(- \operatorname{U_{x}}{\left(t,x + \frac{h_{x}}{2},y \right)} \frac{\partial}{\partial x} \operatorname{U_{x}}{\left(t,x + \frac{h_{x}}{2},y \right)} - \operatorname{U_{y}}{\left(t,x,y + \frac{h_{y}}{2} \right)} \frac{\partial}{\partial y} \operatorname{U_{x}}{\left(t,x + \frac{h_{x}}{2},y \right)} + \frac{\operatorname{U_{x}}{\left(t,x + \frac{h_{x}}{2},y \right)}}{dt}\right)\\dt \left(- \operatorname{U_{x}}{\left(t,x + \frac{h_{x}}{2},y \right)} \frac{\partial}{\partial x} \operatorname{U_{y}}{\left(t,x,y + \frac{h_{y}}{2} \right)} - \operatorname{U_{y}}{\left(t,x,y + \frac{h_{y}}{2} \right)} \frac{\partial}{\partial y} \operatorname{U_{y}}{\left(t,x,y + \frac{h_{y}}{2} \right)} + \frac{\operatorname{U_{y}}{\left(t,x,y + \frac{h_{y}}{2} \right)}}{dt}\right)\end{matrix}\right]$
We finally run the operator
```python
#NBVAL_IGNORE_OUTPUT
op = Operator([update_U] + bc_u + bc_v)
op(time=nt, dt=dt)
# The result is indeed the expected one.
plot_field(U[0].data[0])
```
```python
from devito import norm
assert np.isclose(norm(u), norm(U[0]), rtol=1e-5, atol=0)
```
|
/** ****************************************************************************
* @file FaceComposite.hpp
* @brief Composition of one-or-more FaceComponent objects
* @author Roberto Valle Fernandez
* @date 2015/06
* @copyright All rights reserved.
* Software developed by UPM PCR Group: http://www.dia.fi.upm.es/~pcr
******************************************************************************/
// ------------------ RECURSION PROTECTION -------------------------------------
#ifndef FACE_COMPOSITE_HPP
#define FACE_COMPOSITE_HPP
// ----------------------- INCLUDES --------------------------------------------
#include <Viewer.hpp>
#include <FaceComponent.hpp>
#include <FaceAnnotation.hpp>
#include <vector>
#include <boost/shared_ptr.hpp>
#include <opencv2/opencv.hpp>
namespace upm {
/** ****************************************************************************
* @class FaceComposite
* @brief Composition of one-or-more similar objects.
******************************************************************************/
class FaceComposite : public FaceComponent
{
public:
FaceComposite() : FaceComponent(0) {};
~FaceComposite() {};
void
parseOptions
(
int argc,
char **argv
)
{
for (unsigned int i=0; i < m_components.size(); i++)
m_components[i]->parseOptions(argc, argv);
};
void
train
(
const std::vector<upm::FaceAnnotation> &anns_train,
const std::vector<upm::FaceAnnotation> &anns_valid
)
{
for (unsigned int i=0; i < m_components.size(); i++)
m_components[i]->train(anns_train, anns_valid);
};
void
load()
{
for (unsigned int i=0; i < m_components.size(); i++)
m_components[i]->load();
};
void
process
(
cv::Mat frame,
std::vector<upm::FaceAnnotation> &faces,
const upm::FaceAnnotation &ann
)
{
for (unsigned int i=0; i < m_components.size(); i++)
m_components[i]->process(frame, faces, ann);
};
void
show
(
const boost::shared_ptr<upm::Viewer> &viewer,
const std::vector<upm::FaceAnnotation> &faces,
const upm::FaceAnnotation &ann
)
{
for (unsigned int i=0; i < m_components.size(); i++)
m_components[i]->show(viewer, faces, ann);
};
void
evaluate
(
boost::shared_ptr<std::ostream> output,
const std::vector<upm::FaceAnnotation> &faces,
const upm::FaceAnnotation &ann
)
{
for (unsigned int i=0; i < m_components.size(); i++)
m_components[i]->evaluate(output, faces, ann);
};
void
save
(
const std::string dirpath,
const std::vector<upm::FaceAnnotation> &faces,
const upm::FaceAnnotation &ann
)
{
for (unsigned int i=0; i < m_components.size(); i++)
m_components[i]->save(dirpath, faces, ann);
};
void
addComponent
(
boost::shared_ptr<upm::FaceComponent> component
)
{
m_components.push_back(component);
};
bool
containsPart
(
unsigned int part
)
{
for (unsigned int i=0; i < m_components.size(); i++)
if (m_components[i]->getComponentClass() == part)
return true;
return false;
};
private:
std::vector< boost::shared_ptr<upm::FaceComponent> > m_components;
};
} // namespace upm
#endif /* FACE_COMPOSITE_HPP */
|
# keep this separate because it may change between IDL versions
# some constants from idl_export.h
const IDL_TRUE = convert(Int32, 1)
const IDL_FALSE = convert(Int32, 0)
const IDL_MAX_ARRAY_DIM = 8
# IDL types from idl_export.h
const IDL_TYP_UNDEF = 0
const IDL_TYP_BYTE = 1
const IDL_TYP_INT = 2
const IDL_TYP_LONG = 3
const IDL_TYP_FLOAT = 4
const IDL_TYP_DOUBLE = 5
const IDL_TYP_COMPLEX = 6
const IDL_TYP_STRING = 7
const IDL_TYP_STRUCT = 8
const IDL_TYP_DCOMPLEX = 9
const IDL_TYP_PTR = 10
const IDL_TYP_OBJREF = 11
const IDL_TYP_UINT = 12
const IDL_TYP_ULONG = 13
const IDL_TYP_LONG64 = 14
const IDL_TYP_ULONG64 = 15
# translating IDL/C types to julia
typealias IDL_MEMINT Int
typealias IDL_UMEMINT UInt
typealias UCHAR Cuchar
# NOTE: IDL_ARRAY_DIM is fixed length array IDL_MEMINT[IDL_MAX_ARRAY_DIM] (i.e, Int[8])
typealias IDL_ARRAY_DIM Ptr{IDL_MEMINT}
typealias IDL_ARRAY_FREE_CB Ptr{Void}
typealias IDL_FILEINT Int # possibly different on Windows
typealias IDL_STRING_SLEN_T Cint
const IDL_STRING_MAX_SLEN = 2147483647 # should you check this?
# /***** IDL_VARIABLE flag values ********/
const IDL_V_CONST = 1
const IDL_V_TEMP = 2
const IDL_V_ARR = 4
const IDL_V_FILE = 8
const IDL_V_DYNAMIC = 16
const IDL_V_STRUCT = 32
const IDL_V_NULL = 64
function idl_type(jl_t)
# IDL type index from julia type
t = typeof(jl_t)
if t <: AbstractArray
t = eltype(jl_t)
end
idl_t = -1
if t == UInt8
idl_t = IDL_TYP_BYTE
elseif t == Int16
idl_t = IDL_TYP_INT
elseif t == Int32
idl_t = IDL_TYP_LONG
elseif t == Float32
idl_t = IDL_TYP_FLOAT
elseif t == Float64
idl_t = IDL_TYP_DOUBLE
elseif t == Complex64
idl_t = IDL_TYP_COMPLEX
elseif t <: AbstractString
idl_t = IDL_TYP_STRING
elseif t == Complex128
idl_t = IDL_TYP_DCOMPLEX
elseif t == UInt16
idl_t = IDL_TYP_UINT
elseif t == UInt32
idl_t = IDL_TYP_ULONG
elseif t == Int64
idl_t = IDL_TYP_LONG64
elseif t == UInt64
idl_t = IDL_TYP_ULONG64
end
if idl_t < 0 error("IDL.idl_type: type not found: " * string(t)) end
return idl_t
end
function jl_type(idl_t)
# julia type from IDL type index
jl_t = Any
if idl_t == IDL_TYP_BYTE
jl_t = UInt8
elseif idl_t == IDL_TYP_INT
jl_t = Int16
elseif idl_t == IDL_TYP_LONG
jl_t = Int32
elseif idl_t == IDL_TYP_FLOAT
jl_t = Float32
elseif idl_t == IDL_TYP_DOUBLE
jl_t = Float64
elseif idl_t == IDL_TYP_COMPLEX
jl_t = Complex64
elseif idl_t == IDL_TYP_STRING
jl_t = Compat.String
elseif idl_t == IDL_TYP_DCOMPLEX
jl_t = Complex128
elseif idl_t == IDL_TYP_UINT
jl_t = UInt16
elseif idl_t == IDL_TYP_ULONG
jl_t = UInt32
elseif idl_t == IDL_TYP_LONG64
jl_t = Int64
elseif idl_t == IDL_TYP_ULONG64
jl_t = UInt64
end
if jl_t == Any
error("IDL.jl_type: type not found: " * string(idl_t))
end
return jl_t
end
#*************************************************************************************************#
# some IDL types from extern.jl
# sizeof(buf) is max size of IDL_ALLTYPES Union (64x2=128 bits or 16 bytes on all platforms)
typealias IDL_ALLTYPES UInt128
immutable IDL_Variable
vtype::UCHAR
flags::UCHAR
flags2::UCHAR
buf::IDL_ALLTYPES
end
# works as a fixed length array
immutable IDL_DIMS
d1::IDL_MEMINT
d2::IDL_MEMINT
d3::IDL_MEMINT
d4::IDL_MEMINT
d5::IDL_MEMINT
d6::IDL_MEMINT
d7::IDL_MEMINT
d8::IDL_MEMINT
end
dims(d::IDL_DIMS) = (d.d1,d.d2,d.d3,d.d4,d.d5,d.d6,d.d7,d.d8)
dims(d::IDL_DIMS, ndims::Integer) = (d.d1,d.d2,d.d3,d.d4,d.d5,d.d6,d.d7,d.d8)[1:ndims]
immutable IDL_Array
elt_len::IDL_MEMINT # Length of element in char units
arr_len::IDL_MEMINT # Length of entire array (char)
n_elts::IDL_MEMINT # total # of elements
data::Ptr{UCHAR} # ^ to beginning of array data
n_dim::UCHAR # # of dimensions used by array
flags::UCHAR # Array block flags
file_unit::Cshort # # of assoc file if file var
dim::IDL_DIMS # dimensions
free_cb::IDL_ARRAY_FREE_CB # Free callback
offset::IDL_FILEINT # Offset to base of data for file var
data_guard::IDL_MEMINT # Guard longword
end
immutable IDL_String
slen::IDL_STRING_SLEN_T # Length of string, 0 for null
stype::Cshort # type of string, static or dynamic
s::Ptr{Cchar} # Addr of string
IDL_String() = new(0, 0, Base.unsafe_convert(Ptr{Cchar}, Array(Cchar, IDL_RPC_MAX_STRLEN)))
end
# From idl_rpc.h
const IDL_RPC_MAX_STRLEN = 512 # max string length
immutable IDL_RPC_LINE_S
flags::Cint
buf::Ptr{Cchar}
IDL_RPC_LINE_S() = new(0, Base.unsafe_convert(Ptr{Cchar}, Array(Cchar, IDL_RPC_MAX_STRLEN)))
end
const IDL_TOUT_F_STDERR = 1 # Output to stderr instead of stdout
const IDL_TOUT_F_NLPOST = 4 # Output a newline at end of line
|
# A broad study on `Chi^2` algorithm
<hr>
#### Before going to broad discussion, first lets understand what is `Chi^2` algorithm ? What it does? Why we need to learn it?
* __`Chi^2` (pronounced as kai-square) is an algorithm that helps us to understand the relationship between two [categorical](https://youtu.be/o8gs-zgPfp4) variables.__
* __It helps us to compare what we actually observed with what we expected.__
* __We use it to accept or reject our [hypothesis](https://youtu.be/AYSbHbM7Wp0).__
* __It also used for feature selection__
### There are tow types of Hypotheses test are present in `Chi^2` algorithm
* test for fitting/ goodness of fit
* test for independence
#### `Chi^2` test for fitting or goodness of fit
Chi-squared __goodness-of-fit__ test is an analog of the one way to test for categorical variables: it tests whether the distribution of sample categorical data matches an expected distribution.
#### `Chi^2` test of independence
The Chi-Square test of independence is a statistical test to determine if there is a __significant relationship__ between 2 categorical variables.
<hr>
## `Chi^2` test for goodness of fit
Lets understand the scenerio first:
Mr. Rahim thinking about buying a restaurant. So, he go and ask the owner what is the distribution of the `number of customer` you get in each day. The owner gives him distribution data of 6 days. BUT Mr. Rahim get little bit suspicious and he dicide to see how good the owners provided distribution!
So he started observing the number of customer came to the restaurant in a week. And he finally collect the observed distribution data. Both Owner's distribution and observed distribution data showing below:
> __The above example taken from [Khan Academy](https://youtu.be/2QeDRsxSF9M)__
```python
import pandas as pd
data = pd.read_csv(r"C:\Users\DIU\Desktop\goodness_of_fit.csv")
data
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>day</th>
<th>owners_distribution</th>
<th>observed_distribution</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>sat</td>
<td>10</td>
<td>30</td>
</tr>
<tr>
<th>1</th>
<td>sun</td>
<td>10</td>
<td>14</td>
</tr>
<tr>
<th>2</th>
<td>mon</td>
<td>15</td>
<td>34</td>
</tr>
<tr>
<th>3</th>
<td>tue</td>
<td>20</td>
<td>45</td>
</tr>
<tr>
<th>4</th>
<td>wed</td>
<td>30</td>
<td>57</td>
</tr>
<tr>
<th>5</th>
<td>thu</td>
<td>15</td>
<td>20</td>
</tr>
</tbody>
</table>
</div>
`Chi^2` has a standard distribution [table](https://en.wikipedia.org/wiki/Chi-squared_distribution#Table_of_%CF%872_values_vs_p-values). We need this table on both test
The Equation of `Chi^2`:
\begin{equation}
\chi=\sum\frac{\ (observed - expected)^2}{expected}
\end{equation}
In `data` dataframe we have __Owner's distribution__ and __observed distribution__, but we do not have expected values! The formula of finding __expected__ values:
> expected = total no. of observed customer * (% of owners observation each day)
lets find out the expected values first.
```python
data["expected"] = (sum(data["observed_distribution"]) * (data["owners_distribution"]/100)).astype(int)
data
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>day</th>
<th>owners_distribution</th>
<th>observed_distribution</th>
<th>expected</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>sat</td>
<td>10</td>
<td>30</td>
<td>20</td>
</tr>
<tr>
<th>1</th>
<td>sun</td>
<td>10</td>
<td>14</td>
<td>20</td>
</tr>
<tr>
<th>2</th>
<td>mon</td>
<td>15</td>
<td>34</td>
<td>30</td>
</tr>
<tr>
<th>3</th>
<td>tue</td>
<td>20</td>
<td>45</td>
<td>40</td>
</tr>
<tr>
<th>4</th>
<td>wed</td>
<td>30</td>
<td>57</td>
<td>60</td>
</tr>
<tr>
<th>5</th>
<td>thu</td>
<td>15</td>
<td>20</td>
<td>30</td>
</tr>
</tbody>
</table>
</div>
__There are maily two hypothesis in `Chi^2`__
\begin{equation}
H_o = Null Hypothesis\\
H_a = Alternative Hypothesis
\end{equation}
* __Null Hypothesis => There's no significent relationship between specified features__
* __Alternative Hypothesis => reverse of Null Hypothesis__
__Now lets calculate the `chi-square` and find out the Null hypothesis of `Owners_distribution` is correct or not__
> `Correct means => Accepted => when Chi-square value less than Critical Value`
> `Incorrect means => Rejected => when Chi-square value greater than Critical Value`
|Calculating Chi-square value by Hand||#|#|#|#|#|#|
| --- | ---- | ---- | ---- | --- | -- | --- |
| \begin{equation}Observed\end{equation} || 30 | 14 | 34 | 45 | 57 | 20|
| \begin{equation}Expected\end{equation}|| 20 | 20 | 30 | 40 | 60 | 30 |
| \begin{equation}(O-E)\end{equation}|| 10 | -6 | 4 | 5 | -3 | -10 |
|\begin{equation}(O-E)^2\end{equation} || 100 | 36 | 16 | 25 | 9 | 100 |
|\begin{equation}\frac{(O-E)^2}{E}\end{equation} || 5 | 1.8 | 0.54 | 0.625 | 0.15 | 3.34 |
|\begin{equation}\sum\end{equation}||||||| __11.45__|
```python
# Same calculation using python manually
subtract = data["observed_distribution"] - data["expected"]
subtract_sqr = subtract**2
division = subtract_sqr / data["expected"]
chi_square = division.sum()
print(round(chi_square, 3))
```
11.442
Now we need to check whether `owner's disribution` is accepted or not, to do this we need some extra information:
* What is the `Degree of freedom`?
* What is the significant level ?
* What is the Critical Value?
__Answer:__
Degree of freedom = number of observation - 1 = __5__
Significant Level : 0.05 (_most used significant level by statistians_)
Critical Value: we need to find the critical value from the chi^2 distribution [table](https://en.wikipedia.org/wiki/Chi-squared_distribution#Table_of_%CF%872_values_vs_p-values).
We need to look at where the degree of freedom intersect the significant level(P value):
so we can see that, degree of freedom => 5 and significant level 0.05 will intersect at: 11.07
Hence, Critical Value = 11.07
```python
critical_value = 11.07
if(chi_square<critical_value):
print("Owner's distribution is correct, Accepted")
else:
print("Owner's distribution is not correct, Rejected")
```
Owner's distribution is not correct, Rejected
### We can achive exact same thing by using `scipy`:
```python
import scipy.stats as stats
(chi_square, p) = stats.chisquare(data["observed_distribution"], data["expected"], ddof=1)
print ('Chi-square Value = %f, P-value = %f' % (chi_square, p))
alpha = 0.05 # significance level
```
Chi-square Value = 11.441667, P-value = 0.022024
```python
# another way to check the observation
# Correct means => Accepted => p (resulted level) > alpha (significant level)
# Incorrect means => Rejected => p < alpha
if p <= alpha:
# we reject null hypothesis and accept alternative hypothesis
print ("Owner's distribution is not correct, Rejected")
else:
# we accept null hypothesis and reject alternative hypothesis
print("Owner's distribution is correct, Accepted")
```
Owner's distribution is not correct, Rejected
## `Chi^2` test of independence
__Independence__ is a key concept in probability that describes a situation where knowing the value of one variable tells you nothing about the value of another.
For instance, the __month__ you were born probably doesn't tell you anything about which __web browser__ you use :p
So we'd expect birth month and browser preference to be __independent__.
On the other hand, your month of birth might be related to whether you __excelled__ at sports in school, so month of birth and sports performance might __not__ be __independent__.
* The chi-squared `test of independence` tests whether two categorical variables are independent.
* The test of independence is commonly used to determine whether variables like education, political views and other preferences vary based on demographic factors like gender, race and religion.
> __The above content collected from this [blog](http://hamelg.blogspot.com)__
Let's say there are couple of herbs that people beleives help to prevent __flu__. So to test this, we randomly assign people into three different groups. And first two groups are taking herbs1 and herbs2 and third group doesnot take anything:
> __The above example collected from [KhanAcademy](https://youtu.be/hpWdDmgsIRE)__
```python
flu_dataset = pd.read_csv(r"C:\Users\DIU\Desktop\flu_dataset.csv")
copy_df = flu_dataset.copy()
flu_dataset
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>status</th>
<th>herb1</th>
<th>herb2</th>
<th>noherb</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>sick</td>
<td>20</td>
<td>30</td>
<td>30</td>
</tr>
<tr>
<th>1</th>
<td>not_sick</td>
<td>100</td>
<td>110</td>
<td>90</td>
</tr>
</tbody>
</table>
</div>
__Now we need to find out the total both column and row wise:__
```python
# row wise sum added into a new column called 'total'
flu_dataset["total"] = flu_dataset.iloc[:, 1:].sum(axis=1)
# column wise added into a new row with a index called 'Grand Total'
flu_dataset = pd.concat([flu_dataset, pd.DataFrame(flu_dataset.sum(axis=0), columns=['Grand Total']).T])
flu_dataset
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>status</th>
<th>herb1</th>
<th>herb2</th>
<th>noherb</th>
<th>total</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>sick</td>
<td>20</td>
<td>30</td>
<td>30</td>
<td>80</td>
</tr>
<tr>
<th>1</th>
<td>not_sick</td>
<td>100</td>
<td>110</td>
<td>90</td>
<td>300</td>
</tr>
<tr>
<th>Grand Total</th>
<td>sicknot_sick</td>
<td>120</td>
<td>140</td>
<td>120</td>
<td>380</td>
</tr>
</tbody>
</table>
</div>
> The main difference between `goodness of fit` and `test of independence` is that in `test of independent` we have to find expected value for every cell in a two dimentional space.
Now firstly we need to find out the expected frequency of getting sick or not sick:
> expected frequency for getting `sick = 80/380 = 0.2105 ~= 21%`
> expected frequency for getting `not sick = 300/380 = 0.7894 ~= 79%`
For each cell we need to find the expected value:
for, `sick patient = total frequency of getting sick * Total number of people taking herb or not`
for, `not sick patient = total frequency of getting not sick * Total number of people taking herb or not`
Expected `frequency for sick patient who takes Herb1 = total frequency of getting sick * Total number of people whom are taking herb1`
> expected_sick_herb1 = 21% * 120 = 25.2
> expected_sick_herb2 = 21% * 140 = 29.4
> expected_sick_noherb = 21% * 120 = 25.2
> expected_notsick_herb1 = 79% * 120 = 94.8
> expected_notsick_herb2 = 79% * 140 = 110.6
> expected_notsick_noherb = 79% * 120 = 94.8
|status| herb1| herb2|noherb|total|
|------|------|------|------|-----|
|sick|20|30|30|80|
|__Exp. Freq.__|__25.2__|__29.4__|__25.2__|__21%__|
|not_sick|100|110|90|300|
|__Exp. Freq.__|__94.8__|__110.6__|__94.8__|__79%__|
|GrandTotal|120|140|120|380|
No we need to calculate the chi-square:
\begin{equation}
\chi^2 = \sum\frac{(Observed - Expected)^2}{Expected}\\
=\frac{(20-25.2)^2}{25.2} + \frac{(30-29.4)^2}{29.4} + \frac{(30-25.2)^2}{25.2} + \frac{(100-94.7)^2}{94.7} + \frac{(110-110.6)^2}{110.6} + \frac{(90-94.7)^2}{94.7}\\
= 2.52825\\
\end{equation}
### We can achive exact same thing in python using `stats` Library:
```python
del copy_df["status"]
copy_df
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>herb1</th>
<th>herb2</th>
<th>noherb</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>20</td>
<td>30</td>
<td>30</td>
</tr>
<tr>
<th>1</th>
<td>100</td>
<td>110</td>
<td>90</td>
</tr>
</tbody>
</table>
</div>
```python
chiStats = stats.chi2_contingency(observed = copy_df)
print ('Chi-square Value = %f, p-value=%f' % (chiStats[0], chiStats[1]))
```
Chi-square Value = 2.525794, p-value=0.282834
__Now to Accept or Reject the hypothesis we need to look at chi-square distribution [table](https://en.wikipedia.org/wiki/Chi-squared_distribution#Table_of_%CF%872_values_vs_p-values).__
First thing first, we have a significant level/alpha for this problem = 10% = 0.10
and the degree of freedom for Contingency = (number of row - 1)* (number of column - 1) = (2-1) * (3-1) = 2
__Now, we need to find out the critical value where the `degree of freedom => 2` interset the `significant level 0.10`__
according to the chi-square distribution table the `intersect/ critical value is = 4.61`
If the `chi-square` value less than the `critical value` then the hypothesis is acceted (and that means variables are independent)
```python
significant_level = 0.10
degree_of_freedom = 2
critical_value = crit = stats.chi2.ppf(q = 1 - significant_level, df = degree_of_freedom)
print("Critical Value: ", critical_value)
observe_chi_square = chiStats[0]
print("Observed Chi Value: ", observe_chi_square)
if observe_chi_square <= critical_value:
# observed chi square value is not in critical area therefore we accept null hypothesis
print ('Null hypothesis Accetped (variables are Independent)')
else:
# observed value is in critical area therefore we reject null hypothesis
print ('Null hypothesis Rejected (variables are related/dependent)')
```
Critical Value: 4.605170185988092
Observed Chi Value: 2.5257936507936507
Null hypothesis Accetped (variables are Independent)
```python
```
|
Find the right tour for you through Melaka. We've got 50 tours going to Melaka, starting from just 4 days in length, and the longest tour is 105 days. The most popular month to go is July, which has the most number of tour departures. |
KEEP COOL® Curve is an elegant and user-friendly sliding glass cover with numerous configuration options, which allow for countless compositions.
Learn more about our "Design Your Own Cover" concept here.
KEEP COOL® Curve Combi II is a stylish, functional and flexible glass cover, customisable to all combi cabinets.
The modular design gives high potential for satisfying specific requests to personal design, and the discreet 1-glass solution creates a fine balance to the rest of the combi cabinet.
KEEP COOL® Combi II is available with both 625 mm and 1250 mm glass panels and three different RAL colours for the PVC components. |
module Vectors
import Data.Vect
%default total
fourInts : Vect 4 Int
fourInts = [1, 2, 3, 4]
sixInts : Vect 6 Int
sixInts = [5, 6, 7, 8, 9, 10]
tenInts : Vect 10 Int
tenInts = fourInts ++ sixInts
allLengths : Vect len String -> Vect len Nat
allLengths [] = []
allLengths (x :: xs) = length x :: allLengths xs
|
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <php.h>
#include <cblas.h>
#include <lapacke.h>
#include "php_ext.h"
#include "kernel/operators.h"
void tensor_matmul(zval * return_value, zval * a, zval * b)
{
unsigned int i, j;
Bucket * row;
zval rowC, c;
zend_zephir_globals_def * zephir_globals = ZEPHIR_VGLOBAL;
openblas_set_num_threads(zephir_globals->num_threads);
zend_array * aHat = Z_ARR_P(a);
zend_array * bHat = Z_ARR_P(b);
Bucket * ba = aHat->arData;
Bucket * bb = bHat->arData;
unsigned int m = zend_array_count(aHat);
unsigned int p = zend_array_count(bHat);
unsigned int n = zend_array_count(Z_ARR(bb[0].val));
double * va = emalloc(m * p * sizeof(double));
double * vb = emalloc(n * p * sizeof(double));
double * vc = emalloc(m * n * sizeof(double));
for (i = 0; i < m; ++i) {
row = Z_ARR(ba[i].val)->arData;
for (j = 0; j < p; ++j) {
va[i * p + j] = zephir_get_doubleval(&row[j].val);
}
}
for (i = 0; i < p; ++i) {
row = Z_ARR(bb[i].val)->arData;
for (j = 0; j < n; ++j) {
vb[i * n + j] = zephir_get_doubleval(&row[j].val);
}
}
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, m, n, p, 1.0, va, p, vb, n, 0.0, vc, n);
array_init_size(&c, m);
for (i = 0; i < m; ++i) {
array_init_size(&rowC, n);
for (j = 0; j < n; ++j) {
add_next_index_double(&rowC, vc[i * n + j]);
}
add_next_index_zval(&c, &rowC);
}
RETVAL_ARR(Z_ARR(c));
efree(va);
efree(vb);
efree(vc);
}
void tensor_dot(zval * return_value, zval * a, zval * b)
{
unsigned int i;
zend_array * aHat = Z_ARR_P(a);
zend_array * bHat = Z_ARR_P(b);
Bucket * ba = aHat->arData;
Bucket * bb = bHat->arData;
unsigned int n = zend_array_count(aHat);
double sigma = 0.0;
for (i = 0; i < n; ++i) {
sigma += zephir_get_doubleval(&ba[i].val) * zephir_get_doubleval(&bb[i].val);
}
RETVAL_DOUBLE(sigma);
}
void tensor_inverse(zval * return_value, zval * a)
{
unsigned int i, j;
Bucket * row;
zval rowB, b;
zend_zephir_globals_def * zephir_globals = ZEPHIR_VGLOBAL;
openblas_set_num_threads(zephir_globals->num_threads);
zend_array * aHat = Z_ARR_P(a);
Bucket * ba = aHat->arData;
unsigned int n = zend_array_count(aHat);
double * va = emalloc(n * n * sizeof(double));
for (i = 0; i < n; ++i) {
row = Z_ARR(ba[i].val)->arData;
for (j = 0; j < n; ++j) {
va[i * n + j] = zephir_get_doubleval(&row[j].val);
}
}
lapack_int status;
int pivots[n];
status = LAPACKE_dgetrf(LAPACK_ROW_MAJOR, n, n, va, n, pivots);
if (status != 0) {
RETURN_NULL();
}
status = LAPACKE_dgetri(LAPACK_ROW_MAJOR, n, va, n, pivots);
if (status != 0) {
RETURN_NULL();
}
array_init_size(&b, n);
for (i = 0; i < n; ++i) {
array_init_size(&rowB, n);
for (j = 0; j < n; ++j) {
add_next_index_double(&rowB, va[i * n + j]);
}
add_next_index_zval(&b, &rowB);
}
RETVAL_ARR(Z_ARR(b));
efree(va);
}
|
[STATEMENT]
lemma eval_tm_rename:
assumes "atom k' \<sharp> t"
shows "\<lbrakk>t\<rbrakk>(finfun_update e k x) = \<lbrakk>(k' \<leftrightarrow> k) \<bullet> t\<rbrakk>(finfun_update e k' x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>t\<rbrakk>(finfun_update e k x) = \<lbrakk>(k' \<leftrightarrow> k) \<bullet> t\<rbrakk>(finfun_update e k' x)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
atom k' \<sharp> t
goal (1 subgoal):
1. \<lbrakk>t\<rbrakk>(finfun_update e k x) = \<lbrakk>(k' \<leftrightarrow> k) \<bullet> t\<rbrakk>(finfun_update e k' x)
[PROOF STEP]
by (induct t rule: tm.induct) (auto simp: permute_flip_at) |
#include <boost/python/class.hpp>
#include <boost/python/args.hpp>
#include <boost/python/return_internal_reference.hpp>
#include <cctbx/sgtbx/reciprocal_space_asu.h>
namespace cctbx { namespace sgtbx { namespace boost_python {
namespace {
struct reciprocal_space_asu_wrappers
{
typedef reciprocal_space::asu w_t;
static void
wrap()
{
using namespace boost::python;
typedef return_internal_reference<> rir;
class_<w_t>("reciprocal_space_asu", no_init)
.def(init<space_group_type const&>((arg("space_group_type"))))
.def("cb_op", &w_t::cb_op, rir())
.def("is_reference", &w_t::is_reference)
.def("reference_as_string", &w_t::reference_as_string)
.def("is_inside", &w_t::is_inside, (arg("miller_index")))
.def("which",
(int(w_t::*)(miller::index<> const&) const) &w_t::which, (
arg("miller_index")))
;
}
};
} // namespace <anoymous>
void wrap_reciprocal_space_asu()
{
reciprocal_space_asu_wrappers::wrap();
}
}}} // namespace cctbx::sgtbx::boost_python
|
State Before: α : Type u_1
β : Type u_2
f : α → β → β
inst✝¹ : BEq α
inst✝ : Hashable α
m : Imp α β
k : α
h : m.size = Bucket.size m.buckets
⊢ (modify m k f).size = Bucket.size (modify m k f).buckets State After: α : Type u_1
β : Type u_2
f : α → β → β
inst✝¹ : BEq α
inst✝ : Hashable α
m : Imp α β
k : α
h : m.size = Bucket.size m.buckets
⊢ m.size =
Bucket.size
(Bucket.update
(Bucket.update m.buckets (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val AssocList.nil
(_ :
USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val <
Array.size m.buckets.val))
(mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val
(AssocList.modify k f
m.buckets.val[USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val])
(_ :
USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val <
Array.size
(Bucket.update m.buckets (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val
AssocList.nil
(_ :
USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val <
Array.size m.buckets.val)).val)) Tactic: dsimp [modify, cond] State Before: α : Type u_1
β : Type u_2
f : α → β → β
inst✝¹ : BEq α
inst✝ : Hashable α
m : Imp α β
k : α
h : m.size = Bucket.size m.buckets
⊢ m.size =
Bucket.size
(Bucket.update
(Bucket.update m.buckets (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val AssocList.nil
(_ :
USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val <
Array.size m.buckets.val))
(mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val
(AssocList.modify k f
m.buckets.val[USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val])
(_ :
USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val <
Array.size
(Bucket.update m.buckets (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val
AssocList.nil
(_ :
USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val <
Array.size m.buckets.val)).val)) State After: α : Type u_1
β : Type u_2
f : α → β → β
inst✝¹ : BEq α
inst✝ : Hashable α
m : Imp α β
k : α
h : m.size = Bucket.size m.buckets
⊢ m.size =
Bucket.size
(Bucket.update m.buckets (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val
(AssocList.modify k f
m.buckets.val[USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val])
(_ :
USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val <
Array.size m.buckets.val)) Tactic: rw [Bucket.update_update] State Before: α : Type u_1
β : Type u_2
f : α → β → β
inst✝¹ : BEq α
inst✝ : Hashable α
m : Imp α β
k : α
h : m.size = Bucket.size m.buckets
⊢ m.size =
Bucket.size
(Bucket.update m.buckets (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val
(AssocList.modify k f
m.buckets.val[USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val])
(_ :
USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val <
Array.size m.buckets.val)) State After: α : Type u_1
β : Type u_2
f : α → β → β
inst✝¹ : BEq α
inst✝ : Hashable α
m : Imp α β
k : α
h : m.size = Bucket.size m.buckets
⊢ Nat.sum (List.map (fun x => List.length (AssocList.toList x)) m.buckets.val.data) =
Nat.sum
(List.map (fun x => List.length (AssocList.toList x))
(Bucket.update m.buckets (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val
(AssocList.modify k f
m.buckets.val[USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val])
(_ :
USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val <
Array.size m.buckets.val)).val.data) Tactic: simp [h, Bucket.size] State Before: α : Type u_1
β : Type u_2
f : α → β → β
inst✝¹ : BEq α
inst✝ : Hashable α
m : Imp α β
k : α
h : m.size = Bucket.size m.buckets
⊢ Nat.sum (List.map (fun x => List.length (AssocList.toList x)) m.buckets.val.data) =
Nat.sum
(List.map (fun x => List.length (AssocList.toList x))
(Bucket.update m.buckets (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val
(AssocList.modify k f
m.buckets.val[USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val])
(_ :
USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val <
Array.size m.buckets.val)).val.data) State After: α : Type u_1
β : Type u_2
f : α → β → β
inst✝¹ : BEq α
inst✝ : Hashable α
m : Imp α β
k : α
h : m.size = Bucket.size m.buckets
w✝¹ w✝ : List (AssocList α β)
h₁ :
m.buckets.val.data =
w✝¹ ++ m.buckets.val[(mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val] :: w✝
left✝ : List.length w✝¹ = USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val
eq :
(Bucket.update m.buckets (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val
(AssocList.modify k f
m.buckets.val[USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val])
(_ :
USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val <
Array.size m.buckets.val)).val.data =
w✝¹ ++
AssocList.modify k f
m.buckets.val[USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val] ::
w✝
⊢ Nat.sum (List.map (fun x => List.length (AssocList.toList x)) m.buckets.val.data) =
Nat.sum
(List.map (fun x => List.length (AssocList.toList x))
(w✝¹ ++
AssocList.modify k f
m.buckets.val[USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val] ::
w✝)) Tactic: refine have ⟨_, _, h₁, _, eq⟩ := Bucket.exists_of_update ..; eq ▸ ?_ State Before: α : Type u_1
β : Type u_2
f : α → β → β
inst✝¹ : BEq α
inst✝ : Hashable α
m : Imp α β
k : α
h : m.size = Bucket.size m.buckets
w✝¹ w✝ : List (AssocList α β)
h₁ :
m.buckets.val.data =
w✝¹ ++ m.buckets.val[(mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val] :: w✝
left✝ : List.length w✝¹ = USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val
eq :
(Bucket.update m.buckets (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val
(AssocList.modify k f
m.buckets.val[USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val])
(_ :
USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val <
Array.size m.buckets.val)).val.data =
w✝¹ ++
AssocList.modify k f
m.buckets.val[USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val] ::
w✝
⊢ Nat.sum (List.map (fun x => List.length (AssocList.toList x)) m.buckets.val.data) =
Nat.sum
(List.map (fun x => List.length (AssocList.toList x))
(w✝¹ ++
AssocList.modify k f
m.buckets.val[USize.toNat (mkIdx (_ : 0 < Array.size m.buckets.val) (UInt64.toUSize (hash k))).val] ::
w✝)) State After: no goals Tactic: simp [h, h₁, Bucket.size_eq] |
[STATEMENT]
lemma prim_red_cong1: "e1 \<longrightarrow>* e1' \<Longrightarrow> EPrim f e1 e2 \<longrightarrow>* EPrim f e1' e2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. e1 \<longrightarrow>* e1' \<Longrightarrow> EPrim f e1 e2 \<longrightarrow>* EPrim f e1' e2
[PROOF STEP]
by (induction rule: multi_step.induct) blast+ |
OffCampus Books is the most prominent alternative to UC Davis Bookstore the campus bookstore. Sometimes their selection of texts for the current quarter leaves something to be desired. Customer service can be gruff at times. Their used books are asis, so check to see that they are in usable condition prior to purchase.
If you are seeking textbooks for pleasure or general knowledge you can get good deals on books that are no longer tied to classes.
Receipts from an online textbook reseller shipping from the same address are labeled Davis Textbooks, and some people refer to the store as such. The sign on the west side of the building also reads Davis Textbooks. OffCampus Books used to have another store in Pleasant Hill, California. It closed down in the first few years of the 2000s.
Compared to Campus Bookstore
Theyre more likely to be able to help than the campus book store if you are looking for either textbooks that arent tied to classed offered that quarter, or ones that are of different editions.
Their prices are almost always lower (usually $5$15) than at the UC Davis Bookstore, but sometimes higher. Understandably, then, OffCampus Books has a reputation for lowballing prices on your buybacks, frequently for drastically lower than available across the street at the campus bookstore! OffCampus Books may offer less than the Campus Bookstore, but they will sometimes buy back books that the campus wont.
It has been noted that they make buyback offers on the collection of what you bring in, not on each individual book. Some people haul their books between the two and find the higher offer. This can result in a difference between $40 at OffCampus Books and $175 at the MU book exchange.
Backpack Policy
To prevent stealing, they have a policy where you have to No Backpack Policy leave your bags by the door and sometimes even outside. Not all bookstores have this policy so if youd rather hold on to your bag or keep it secure, you may want to patronize another business.
Other
See that little yellow booth? Thats Downtown Phone 7!
See also Bookstores and the Cheap Textbook Guide.
20080402 20:50:58 nbsp I just bought a book from this place and it was pretty decent price. I noticed that the prices were about the same or a bit more than the ones I could find on Amazon.com. I have never seen so many used books before! Heh. Users/Aarolye
20080416 00:41:17 nbsp I got my philosophy of religion book here for about 30 bucks cheaper than the campus book store. I say, if you arent lazy, go on campus, spot the prices, check online, then go here (and find that it is cheapest here). Users/CodyDuncan
20080416 23:52:22 nbsp I dropped by once just to check out what textbooks they had. My backpack would not fit in their little cubbyholes, and I didnt want to leave it where someone else could grab it. So I left without even seeing what they had. Too bad for them, making it too inconvenient to shop there... Users/IDoNotExist
20080417 11:24:12 nbsp I hate that, leave your bag at the door policy. Look, were not all thieves. I hate being treated like a criminal. Just because a few people steal, doesnt mean we all steal. Anyway, they might be cheaper, but if its too inconvenient, Ill go ahead and pay a few more bucks elsewhere. The book may cost more, but at least youre treated with a modicum of respect as a customer. Users/CurlyGirl26
20080902 14:28:13 nbsp This place was very rude to me when I went to go sell back my book. When I asked them if they could purchase my book, the guy cut me off when he saw the book in my hand and told me that the edition that I had was too old and they werent purchasing it anymore. Unfortunately, even though it is one edition older, the UCD Bookstore buys the book back for $70.00. Its unfortunate that they are rude up front, I did not even say anything to provoke them. Users/IdealParadigm
20090315 13:39:42 nbsp When I went to return a book I bought approximately for $100, they wrote me store credit for $95. They didnt add tax paid on the slip until I mentioned it to them. Although their prices are cheaper Id rather pay the extra $5 just to get friendlier customer service at the other book stores. Users/eatsoupwithsticks
20090610 23:25:35 nbsp I really dont like this place. I bought a book here (sold out at UCD bookstore) and I didnt notice there was black tape on the front/back where it blended in with the cover. I took it off two weeks later and it say Free Copy for Instructors NOT FOR RESALE, SHADY! I paid full price and they werent even supposed to sell it, and there were some appendices/chapters missing from it.
The most annoying thing is that they do treat everyone like criminals. Once I left my backpack, got the book and went back to get my backpack (AND WALLET) and the girl got on my case about it...even though I was just walking over with my backpack/wallet to stand in line and pay. Apparently its possible to steal a book on my way to the cashier...3 feet of space.
I am never going there again, it truly is ridiculous how rude they are. Not everyone is a thief! Users/sgent
20090921 12:33:04 nbsp This place treats you like crap, and I got ripped off on my MAT 21A book here. Users/michellefong
20091114 23:42:36 nbsp When i first visited this store, i also disliked their policy of leaving the backpack in the front but once i witnessed a shoplifter who was trying to put the book in the backpack w/o paying for it. They called the police to handover the guy , i didnt like that rather they d ve just warned him, however crime is crime. The prices are always way cheaper than bookstore and also they bought all my books that main bookstore rejected paying more than i expected. Id absolutely recommend!!! Users/exoticdreamer
20101012 16:22:15 nbsp I bought two books from this place a while ago since I no longer have a use for them I was planning on selling them online. The only problem is that both textbooks have black tape in the front and the back. I decided to remove the tape in order to see why it had been added to the textbooks. These textbooks are REVIEW COPIES which really pisses me off since I paid almost $200 for both textbooks that I wont be able sell back. To be honest I dont know how these people are still in business. I wouldnt recommend anyone buying anything from this store!!! Users/XavierDavis
20110114 14:50:56 nbsp They are possibly the rudest storekeepers I have encountered in Davis. Workers are unapproachable! I thought it was just me, but Im not surprised anymore after reading these comments.
I wish that Davis Copy Shop didnt refer their readers here. Otherwise, I would never step foot in this place again. Users/SusanChang
20110608 18:08:36 nbsp Dont sell books back here, they are ultra sneaky. I went by with two books and a clicker and got offered $17 for everything. When I told them I wanted to check the offer at the MU, the guy at the counter said dont bother we use the same software as they do. I walked over to the MU and got paid $130 for the lot. Users/dijudy
20110921 11:41:39 nbsp Sellers Beware I wanted to sell my math16 series math book (hard cover) and solution manual. They wanted to buy it for 5 dollars and 1 dollar for the solution manual because it was an old edition. However, they were selling the old editions for $70 and $45(sol) dollars a piece. I rather burn my books than sell it to these people.
FYIsold it on uloop.com instead . Users/kkha91
20111030 14:50:02 nbsp I bought all of my textbooks here based on price for the ccurrent quarter, and have found that ALL of my books have writing in them. Some just a few notes here and there, others have full chapters underlined and highlighted. I didnt think to look closely before purchasing as Ive never experienced this issue with any other bookstore on or off campus. The prices are definitely better, but be sure to thoroughly inspect books before buying. Users/NicoleLombard
20120715 14:32:49 nbsp Bought books here for the first time at the beginning of Summer Session I. I read all of the comments above before I went. Luckily, all of the books I needed were there. As with any used condition book, I checked the books for highlighting/writing/damage. A lot of them had the aforementioned on the pages but not all of them. IF YOU PLAN TO TRY TO SELL BACK ANY OF THE BOOKS YOU BUY FROM THIS PLACE TO THE UCD BOOKSTORE: Be careful of the black tape! All but one of the copies of one of the books I needed had black tape over the covers. Upon peeling the tape back carefully at the top of the block of black tape on the back cover, I discovered that those books were instructors edition books and were not for resale. THE UCD BOOKSTORE WONT TAKE THESE. Take the effort to find one that doesnt have black tape; youll actually get money for these ones for resale then. They also put a big barcode sticker from their store over the books original barcode area. Dont know if this would be a problem when youre trying to sell them back, but I was able to remove them by first peeling off as much as possible, then laying a rubbingalcoholdampened/soaked washcloth over the label, letting it sit for a bit, and then rubbing off the sticker with the washcloth.
Overall, not a bad place to buy from, but read all the comments mentioned before and know what youre getting yourself into before coming to the store. Users/kimmisan
20120715 14:35:23 nbsp Edit to my above post: they were review copies and so were not resellable. Either that or insstructors copies. Either way theres a reason for the black tape (hiding something) so if you want to resell any books you buy here, just avoid the blacktapedbooks. Users/kimmisan
|
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.montgomery32_2e322m2e161m1_11limbs.Synthesis.
(* TODO : change this to field once field isomorphism happens *)
Definition opp :
{ opp : feBW_small -> feBW_small
| forall a, phiM_small (opp a) = F.opp (phiM_small a) }.
Proof.
Set Ltac Profiling.
Time synthesize_opp ().
Show Ltac Profile.
Time Defined.
Print Assumptions opp.
|
State Before: α : Type u_1
E : Type ?u.920015
F : Type u_2
G : Type ?u.920021
m m0 : MeasurableSpace α
p : ℝ≥0∞
q : ℝ
μ ν : Measure α
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedAddCommGroup G
⊢ snormEssSup 0 μ = 0 State After: α : Type u_1
E : Type ?u.920015
F : Type u_2
G : Type ?u.920021
m m0 : MeasurableSpace α
p : ℝ≥0∞
q : ℝ
μ ν : Measure α
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedAddCommGroup G
⊢ essSup (fun x => ⊥) μ = ⊥ Tactic: simp_rw [snormEssSup, Pi.zero_apply, nnnorm_zero, ENNReal.coe_zero, ← ENNReal.bot_eq_zero] State Before: α : Type u_1
E : Type ?u.920015
F : Type u_2
G : Type ?u.920021
m m0 : MeasurableSpace α
p : ℝ≥0∞
q : ℝ
μ ν : Measure α
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedAddCommGroup G
⊢ essSup (fun x => ⊥) μ = ⊥ State After: no goals Tactic: exact essSup_const_bot |
| pc = 0xc002 | a = 0x01 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc004 | a = 0x01 | x = 0x01 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc006 | a = 0x01 | x = 0x01 | y = 0x01 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc008 | a = 0x01 | x = 0x01 | y = 0x01 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0000] = 0x01 |
| pc = 0xc00b | a = 0x01 | x = 0x01 | y = 0x01 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x07fe] = 0x01 |
| pc = 0xc00d | a = 0x01 | x = 0x01 | y = 0x01 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0001] = 0x01 |
| pc = 0xc010 | a = 0x01 | x = 0x01 | y = 0x01 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x07ff] = 0x01 |
| pc = 0xc011 | a = 0x01 | x = 0x00 | y = 0x01 | sp = 0x01fd | p[NV-BDIZC] = 00110110 |
| pc = 0xc012 | a = 0x01 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110110 |
| pc = 0xc013 | a = 0x01 | x = 0xff | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 10110100 |
| pc = 0xc014 | a = 0x01 | x = 0xff | y = 0xff | sp = 0x01fd | p[NV-BDIZC] = 10110100 |
| pc = 0xc016 | a = 0x01 | x = 0xff | y = 0xff | sp = 0x01fd | p[NV-BDIZC] = 00110110 | MEM[0x0000] = 0x00 |
| pc = 0xc019 | a = 0x01 | x = 0xff | y = 0xff | sp = 0x01fd | p[NV-BDIZC] = 00110110 | MEM[0x07fe] = 0x00 |
| pc = 0xc01b | a = 0x01 | x = 0xff | y = 0xff | sp = 0x01fd | p[NV-BDIZC] = 10110100 | MEM[0x0000] = 0xff |
| pc = 0xc01e | a = 0x01 | x = 0xff | y = 0xff | sp = 0x01fd | p[NV-BDIZC] = 10110100 | MEM[0x07fe] = 0xff |
| pc = 0xc020 | a = 0x01 | x = 0x01 | y = 0xff | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc022 | a = 0x01 | x = 0x01 | y = 0xff | sp = 0x01fd | p[NV-BDIZC] = 00110110 | MEM[0x0001] = 0x00 |
| pc = 0xc025 | a = 0x01 | x = 0x01 | y = 0xff | sp = 0x01fd | p[NV-BDIZC] = 00110110 | MEM[0x07ff] = 0x00 |
| pc = 0xc027 | a = 0x01 | x = 0x01 | y = 0xff | sp = 0x01fd | p[NV-BDIZC] = 10110100 | MEM[0x0001] = 0xff |
| pc = 0xc02a | a = 0x01 | x = 0x01 | y = 0xff | sp = 0x01fd | p[NV-BDIZC] = 10110100 | MEM[0x07ff] = 0xff |
|
/-
Copyright (c) 2021 Henry Swanson. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Henry Swanson
-/
import data.equiv.basic
import data.equiv.option
import dynamics.fixed_points.basic
import group_theory.perm.option
/-!
# Derangements on types
In this file we define `derangements α`, the set of derangements on a type `α`.
We also define some equivalences involving various subtypes of `perm α` and `derangements α`:
* `derangements_option_equiv_sigma_at_most_one_fixed_point`: An equivalence between
`derangements (option α)` and the sigma-type `Σ a : α, {f : perm α // fixed_points f ⊆ a}`.
* `derangements_recursion_equiv`: An equivalence between `derangements (option α)` and the
sigma-type `Σ a : α, (derangements (({a}ᶜ : set α) : Type _) ⊕ derangements α)` which is later
used to inductively count the number of derangements.
In order to prove the above, we also prove some results about the effect of `equiv.remove_none`
on derangements: `remove_none.fiber_none` and `remove_none.fiber_some`.
-/
open equiv function
/-- A permutation is a derangement if it has no fixed points. -/
def derangements (α : Type*) : set (perm α) := {f : perm α | ∀ x : α, f x ≠ x}
variables {α β : Type*}
lemma mem_derangements_iff_fixed_points_eq_empty {f : perm α} :
f ∈ derangements α ↔ fixed_points f = ∅ :=
set.eq_empty_iff_forall_not_mem.symm
/-- If `α` is equivalent to `β`, then `derangements α` is equivalent to `derangements β`. -/
def equiv.derangements_congr (e : α ≃ β) : (derangements α ≃ derangements β) :=
e.perm_congr.subtype_equiv $ λ f, e.forall_congr $ by simp
namespace derangements
/-- Derangements on a subtype are equivalent to permutations on the original type where points are
fixed iff they are not in the subtype. -/
protected def subtype_equiv (p : α → Prop) [decidable_pred p] :
derangements (subtype p) ≃ {f : perm α // ∀ a, ¬p a ↔ a ∈ fixed_points f} :=
calc
derangements (subtype p)
≃ {f : {f : perm α // ∀ a, ¬p a → a ∈ fixed_points f} // ∀ a, a ∈ fixed_points f → ¬p a}
: begin
refine (perm.subtype_equiv_subtype_perm p).subtype_equiv (λ f, ⟨λ hf a hfa ha, _, _⟩),
{ refine hf ⟨a, ha⟩ (subtype.ext _),
rwa [mem_fixed_points, is_fixed_pt, perm.subtype_equiv_subtype_perm, @coe_fn_coe_base',
equiv.coe_fn_mk, subtype.coe_mk, equiv.perm.of_subtype_apply_of_mem]
at hfa },
rintro hf ⟨a, ha⟩ hfa,
refine hf _ _ ha,
change perm.subtype_equiv_subtype_perm p f a = a,
rw [perm.subtype_equiv_subtype_perm_apply_of_mem f ha, hfa, subtype.coe_mk],
end
... ≃ {f : perm α // ∃ (h : ∀ a, ¬p a → a ∈ fixed_points f), ∀ a, a ∈ fixed_points f → ¬p a}
: subtype_subtype_equiv_subtype_exists _ _
... ≃ {f : perm α // ∀ a, ¬p a ↔ a ∈ fixed_points f}
: subtype_equiv_right (λ f, by simp_rw [exists_prop, ←forall_and_distrib,
←iff_iff_implies_and_implies])
/-- The set of permutations that fix either `a` or nothing is equivalent to the sum of:
- derangements on `α`
- derangements on `α` minus `a`. -/
def at_most_one_fixed_point_equiv_sum_derangements [decidable_eq α] (a : α) :
{f : perm α // fixed_points f ⊆ {a}} ≃ (derangements ({a}ᶜ : set α)) ⊕ derangements α :=
calc
{f : perm α // fixed_points f ⊆ {a}}
≃ {f : {f : perm α // fixed_points f ⊆ {a}} // a ∈ fixed_points f}
⊕ {f : {f : perm α // fixed_points f ⊆ {a}} // a ∉ fixed_points f}
: (equiv.sum_compl _).symm
... ≃ {f : perm α // fixed_points f ⊆ {a} ∧ a ∈ fixed_points f}
⊕ {f : perm α // fixed_points f ⊆ {a} ∧ a ∉ fixed_points f}
: begin
refine equiv.sum_congr _ _;
{ convert subtype_subtype_equiv_subtype_inter _ _, ext f, refl }
end
... ≃ {f : perm α // fixed_points f = {a}} ⊕ {f : perm α // fixed_points f = ∅}
: begin
refine equiv.sum_congr (subtype_equiv_right $ λ f, _) (subtype_equiv_right $ λ f, _),
{ rw [set.eq_singleton_iff_unique_mem, and_comm],
refl },
{ rw set.eq_empty_iff_forall_not_mem,
refine ⟨λ h x hx, h.2 (h.1 hx ▸ hx), λ h, ⟨λ x hx, (h _ hx).elim, h _⟩⟩ }
end
... ≃ (derangements ({a}ᶜ : set α)) ⊕ derangements α
: begin
refine equiv.sum_congr ((derangements.subtype_equiv _).trans $ subtype_equiv_right $ λ x,
_).symm (subtype_equiv_right $ λ f, mem_derangements_iff_fixed_points_eq_empty.symm),
rw [eq_comm, set.ext_iff],
simp_rw [set.mem_compl_iff, not_not],
end
namespace equiv
variables [decidable_eq α]
/-- The set of permutations `f` such that the preimage of `(a, f)` under
`equiv.perm.decompose_option` is a derangement. -/
def remove_none.fiber (a : option α) : set (perm α) :=
{f : perm α | (a, f) ∈ equiv.perm.decompose_option '' derangements (option α)}
lemma remove_none.mem_fiber (a : option α) (f : perm α) :
f ∈ remove_none.fiber a ↔
∃ F : perm (option α), F ∈ derangements (option α) ∧ F none = a ∧ remove_none F = f :=
by simp [remove_none.fiber, derangements]
lemma remove_none.fiber_none : remove_none.fiber (@none α) = ∅ :=
begin
rw set.eq_empty_iff_forall_not_mem,
intros f hyp,
rw remove_none.mem_fiber at hyp,
rcases hyp with ⟨F, F_derangement, F_none, _⟩,
exact F_derangement none F_none
end
/-- For any `a : α`, the fiber over `some a` is the set of permutations
where `a` is the only possible fixed point. -/
lemma remove_none.fiber_some (a : α) :
(remove_none.fiber (some a)) = {f : perm α | fixed_points f ⊆ {a}} :=
begin
ext f,
split,
{ rw remove_none.mem_fiber,
rintro ⟨F, F_derangement, F_none, rfl⟩ x x_fixed,
rw mem_fixed_points_iff at x_fixed,
apply_fun some at x_fixed,
cases Fx : F (some x) with y,
{ rwa [remove_none_none F Fx, F_none, option.some_inj, eq_comm] at x_fixed },
{ exfalso, rw remove_none_some F ⟨y, Fx⟩ at x_fixed, exact F_derangement _ x_fixed } },
{ intro h_opfp,
use equiv.perm.decompose_option.symm (some a, f),
split,
{ intro x,
apply_fun (swap none (some a)),
simp only [perm.decompose_option_symm_apply, swap_apply_self, perm.coe_mul],
cases x,
{ simp },
simp only [equiv_functor.map_equiv_apply, equiv_functor.map,
option.map_eq_map, option.map_some'],
by_cases x_vs_a : x = a,
{ rw [x_vs_a, swap_apply_right], apply option.some_ne_none },
have ne_1 : some x ≠ none := option.some_ne_none _,
have ne_2 : some x ≠ some a := (option.some_injective α).ne_iff.mpr x_vs_a,
rw [swap_apply_of_ne_of_ne ne_1 ne_2, (option.some_injective α).ne_iff],
intro contra,
exact x_vs_a (h_opfp contra) },
{ rw apply_symm_apply } }
end
end equiv
section option
variables [decidable_eq α]
/-- The set of derangements on `option α` is equivalent to the union over `a : α`
of "permutations with `a` the only possible fixed point". -/
def derangements_option_equiv_sigma_at_most_one_fixed_point :
derangements (option α) ≃ Σ a : α, {f : perm α | fixed_points f ⊆ {a}} :=
begin
have fiber_none_is_false : (equiv.remove_none.fiber (@none α)) -> false,
{ rw equiv.remove_none.fiber_none, exact is_empty.false },
calc derangements (option α)
≃ equiv.perm.decompose_option '' derangements (option α) : equiv.image _ _
... ≃ Σ (a : option α), ↥(equiv.remove_none.fiber a) : set_prod_equiv_sigma _
... ≃ Σ (a : α), ↥(equiv.remove_none.fiber (some a))
: sigma_option_equiv_of_some _ fiber_none_is_false
... ≃ Σ (a : α), {f : perm α | fixed_points f ⊆ {a}}
: by simp_rw equiv.remove_none.fiber_some,
end
/-- The set of derangements on `option α` is equivalent to the union over all `a : α` of
"derangements on `α` ⊕ derangements on `{a}ᶜ`". -/
def derangements_recursion_equiv :
derangements (option α) ≃ Σ a : α, (derangements (({a}ᶜ : set α) : Type _) ⊕ derangements α) :=
derangements_option_equiv_sigma_at_most_one_fixed_point.trans (sigma_congr_right
at_most_one_fixed_point_equiv_sum_derangements)
end option
end derangements
|
/****************************************************************************
* hipipe library
* Copyright (c) 2017, Cognexa Solutions s.r.o.
* Copyright (c) 2018, Iterait a.s.
* Author(s) Filip Matzner
*
* This file is distributed under the MIT License.
* See the accompanying file LICENSE.txt for the complete license agreement.
****************************************************************************/
#include <hipipe/build_config.hpp>
#ifdef HIPIPE_BUILD_PYTHON
#include <hipipe/core/python/utility/pyboost_fs_path_converter.hpp>
#include <boost/python.hpp>
#include <experimental/filesystem>
namespace hipipe::python::utility {
PyObject* fs_path_to_python_str::convert(const std::experimental::filesystem::path& path)
{
return boost::python::incref(boost::python::object(path.string()).ptr());
}
fs_path_from_python_str::fs_path_from_python_str()
{
boost::python::converter::registry::push_back(
&convertible,
&construct,
boost::python::type_id<std::experimental::filesystem::path>());
}
void* fs_path_from_python_str::convertible(PyObject* obj_ptr)
{
if (!PyUnicode_Check(obj_ptr)) return 0;
return obj_ptr;
}
void fs_path_from_python_str::construct(
PyObject* obj_ptr,
boost::python::converter::rvalue_from_python_stage1_data* data)
{
const char* value = PyUnicode_AsUTF8(obj_ptr);
if (value == 0) boost::python::throw_error_already_set();
void* storage = ((boost::python::converter::rvalue_from_python_storage<
std::experimental::filesystem::path>*)data)
->storage.bytes;
new (storage) std::experimental::filesystem::path(value);
data->convertible = storage;
}
} // namespace hipipe::python::utility
#endif // HIPIPE_BUILD_PYTHON |
A Lock Haven log boom , smaller than but otherwise similar to the Susquehanna Boom at Williamsport , was constructed in 1849 . Large cribs of timbers weighted with tons of stone were arranged in the pool behind the Dunnstown Dam , named for a settlement on the shore opposite Lock Haven . The piers , about 150 feet ( 46 m ) from one another , stretched in a line from the dam to a point 3 miles ( 5 km ) upriver . Connected by timbers shackled together with iron yokes and rings , the piers anchored an enclosure into which the river current forced floating logs . Workers called boom rats sorted the captured logs , branded like cattle , for delivery to sawmills and other owners . Lock Haven became the lumber center of Clinton County and the site of many businesses related to forest products .
|
[STATEMENT]
lemma [code]: "List.maps f l = flatmap l f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. List.maps f l = flatmap l f
[PROOF STEP]
by (simp add: flatmap_def) |
function [Gs, op, nodes] = mk_nbrs_of_dag(G0)
% MK_NBRS_OF_DAG Make all DAGs that differ from G0 by a single edge deletion, addition or reversal
% [Gs, op, nodes] = mk_nbrs_of_dag(G0)
%
% Gs{i} is the i'th neighbor.
% op{i} = 'add', 'del', or 'rev' is the operation used to create the i'th neighbor.
% nodes(i,1:2) are the head and tail of the operated-on arc.
Gs = {};
op = {};
nodes = [];
[I,J] = find(G0);
nnbrs = 1;
% all single edge deletions
for e=1:length(I)
i = I(e); j = J(e);
G = G0;
G(i,j) = 0;
Gs{nnbrs} = G;
op{nnbrs} = 'del';
nodes(nnbrs, :) = [i j];
nnbrs = nnbrs + 1;
end
% all single edge reversals
for e=1:length(I)
i = I(e); j = J(e);
G = G0;
G(i,j) = 0;
G(j,i) = 1;
if acyclic(G)
Gs{nnbrs} = G;
op{nnbrs} = 'rev';
nodes(nnbrs, :) = [i j];
nnbrs = nnbrs + 1;
end
end
[I,J] = find(~G0);
% all single edge additions
for e=1:length(I)
i = I(e); j = J(e);
if i ~= j % don't add self arcs
G = G0;
G(i,j) = 1;
if G(j,i)==0 % don't add i->j if j->i exists already
if acyclic(G)
Gs{nnbrs} = G;
op{nnbrs} = 'add';
nodes(nnbrs, :) = [i j];
nnbrs = nnbrs + 1;
end
end
end
end
|
[STATEMENT]
lemma pos_lit_in_atms_of: "Pos A \<in># C \<Longrightarrow> A \<in> atms_of C"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Pos A \<in># C \<Longrightarrow> A \<in> atms_of C
[PROOF STEP]
unfolding atms_of_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Pos A \<in># C \<Longrightarrow> A \<in> atm_of ` set_mset C
[PROOF STEP]
by force |
\section{Procedure}
\label{procedure}
More than 250 computers were profiled for system resource allocation
and utilization. The profiling was carried out for both local and network
accounts. For the two types of accounts, I profiled the systems at login
after they have been idle or restarted. In other to reproduce end-user
experience, the profiling script was set up to lauch at login, taking
continous snapshots for 35 minutes. The subprocess class and its Popen
constructor in Python (version 3.6)~\cite{python366} was used to start, time,
and exit the top command[citation]. The profiled computers had either the
Mac OS X 10.8 Mountain Lion, the OS X 10.9 Mavericks, the OS X 10.11 El
Captain or the macOS 10.12 Sierra operating system. We profiled computers
from one classroom to the other, lebelling the log files to account for
the date of data collection, the classroom and the computer inventory tag.
A trunck directory and subdirectories were then created with each subdirectory
named after the classroom from which profile data was collected. A copy of the
trunck directory was saved for reference and future need before the log files
were cleaned up and computer resource metrics extracted.
|
{-# OPTIONS --no-positivity-check #-}
module Section7 where
open import Section6 public
-- 7. Correspondence between proof trees and terms
-- ===============================================
--
-- We define a function that translates the proof trees to the corresponding untyped terms nad
-- likewise for the substitutions, we write `M ⁻` and `γ ⁻ˢ` for these operations. The definitions
-- are:
mutual
_⁻ : ∀ {Γ A} → Γ ⊢ A → 𝕋
(ν x i) ⁻ = ν x
(ƛ x M) ⁻ = ƛ x (M ⁻)
(M ∙ N) ⁻ = (M ⁻) ∙ (N ⁻)
(M ▶ γ) ⁻ = (M ⁻) ▶ (γ ⁻ˢ)
_⁻ˢ : ∀ {Δ Γ} → Δ ⋙ Γ → 𝕊
π⟨ c ⟩ ⁻ˢ = []
(γ ● γ′) ⁻ˢ = (γ ⁻ˢ) ● (γ′ ⁻ˢ)
[ γ , x ≔ M ] ⁻ˢ = [ γ ⁻ˢ , x ≔ M ⁻ ]
-- It is easy to prove that the translation of a proof tree is well-typed:
-- Lemma 12.
mutual
lem₁₂ : ∀ {Γ A} → (M : Γ ⊢ A) → Γ ⊢ M ⁻ ∷ A
lem₁₂ (ν x i) = ν x i
lem₁₂ (ƛ x M) = ƛ x (lem₁₂ M)
lem₁₂ (M ∙ N) = lem₁₂ M ∙ lem₁₂ N
lem₁₂ (M ▶ γ) = lem₁₂ M ▶ lem₁₂ₛ γ
lem₁₂ₛ : ∀ {Γ Γ′} → (γ : Γ′ ⋙ Γ) → Γ′ ⋙ γ ⁻ˢ ∷ Γ
lem₁₂ₛ π⟨ c ⟩ = ↑⟨ c ⟩ refl⋙∷
lem₁₂ₛ (γ ● γ′) = lem₁₂ₛ γ ● lem₁₂ₛ γ′
lem₁₂ₛ [ γ , x ≔ M ] = [ lem₁₂ₛ γ , x ≔ lem₁₂ M ]
-- In general, we may have `M ⁻ ≡ N ⁻` but `M` different from `N`. Take for example
-- `(λ(y : B ⊃ B).z) ∙ λ(x : B).x : [ z : A ] ⊢ A` and `(λ(y : C ⊃ C).z ∙ λ(x : C).x : [ z : A ] ⊢ A`
-- which are both
-- translated into `(λ y.z) ∙ λ x.x`. This shows that a given term can be decorated into different
-- proof trees.
--
-- We define a relation between terms and their possible decorations (and likewise for the
-- substitutions) as an inductively defined set. (…)
--
-- The introduction rules are: (…)
mutual
infix 3 _𝒟_
data _𝒟_ : ∀ {Γ A} → 𝕋 → Γ ⊢ A → Set where
ν : ∀ {Γ A} →
(x : Name) (i : Γ ∋ x ∷ A) →
ν x 𝒟 ν x i
_∙_ : ∀ {Γ A B t₁ t₂} {M : Γ ⊢ A ⊃ B} {N : Γ ⊢ A} →
t₁ 𝒟 M → t₂ 𝒟 N →
t₁ ∙ t₂ 𝒟 M ∙ N
π⟨_⟩ : ∀ {Γ Δ A t} {M : Δ ⊢ A} →
(c : Γ ⊇ Δ) → t 𝒟 M →
t 𝒟 M ▶ π⟨ c ⟩
_▶_ : ∀ {Γ Δ A s t} {M : Δ ⊢ A} {γ : Γ ⋙ Δ} →
t 𝒟 M → s 𝒟ₛ γ →
t ▶ s 𝒟 M ▶ γ
ƛ : ∀ {Γ A B t} →
(x : Name) {{_ : T (fresh x Γ)}} {M : [ Γ , x ∷ A ] ⊢ B} → t 𝒟 M →
ƛ x t 𝒟 ƛ x M
infix 3 _𝒟ₛ_
data _𝒟ₛ_ : ∀ {Γ Δ} → 𝕊 → Γ ⋙ Δ → Set where
π⟨_⟩ : ∀ {Γ Δ} →
(c : Γ ⊇ Δ) →
[] 𝒟ₛ π⟨ c ⟩
[_,_≔_] : ∀ {Γ Δ A s t} {γ : Δ ⋙ Γ} {M : Δ ⊢ A} →
s 𝒟ₛ γ → (x : Name) {{_ : T (fresh x Γ)}} → t 𝒟 M →
[ s , x ≔ t ] 𝒟ₛ [ γ , x ≔ M ]
↓⟨_⟩𝒟ₛ : ∀ {Γ Δ Θ s} {γ : Θ ⋙ Γ} →
(c : Γ ⊇ Δ) → s 𝒟ₛ γ →
s 𝒟ₛ ↓⟨ c ⟩ γ
↑⟨_⟩𝒟ₛ : ∀ {Γ Δ Θ s} {γ : Γ ⋙ Δ} →
(c : Θ ⊇ Γ) → s 𝒟ₛ γ →
s 𝒟ₛ ↑⟨ c ⟩ γ
_●_ : ∀ {Γ Δ Θ s₁ s₂} {γ₂ : Γ ⋙ Δ} {γ₁ : Θ ⋙ Γ} →
s₂ 𝒟ₛ γ₂ → s₁ 𝒟ₛ γ₁ →
s₂ ● s₁ 𝒟ₛ γ₂ ● γ₁
-- It is straightforward to prove Lemma 13
-- mutually with a corresponding lemma for substitutions.
-- Lemma 13.
mutual
lem₁₃ : ∀ {Γ A} → (M : Γ ⊢ A) → M ⁻ 𝒟 M
lem₁₃ (ν x i) = ν x i
lem₁₃ (ƛ x M) = ƛ x (lem₁₃ M)
lem₁₃ (M ∙ N) = lem₁₃ M ∙ lem₁₃ N
lem₁₃ (M ▶ γ) = lem₁₃ M ▶ lem₁₃ₛ γ
lem₁₃ₛ : ∀ {Γ Γ′} → (γ : Γ′ ⋙ Γ) → γ ⁻ˢ 𝒟ₛ γ
lem₁₃ₛ π⟨ c ⟩ = π⟨ c ⟩
lem₁₃ₛ (γ ● γ′) = lem₁₃ₛ γ ● lem₁₃ₛ γ′
lem₁₃ₛ [ γ , x ≔ M ] = [ lem₁₃ₛ γ , x ≔ lem₁₃ M ]
-- Using the discussion in Section 3.3 on how to define the monotonicity and projection
-- rules with `π⟨_⟩` we can find a proof tree that corresponds to a well-typed term:
-- Lemma 14.
postulate
lem₁₄ : ∀ {Γ A t} → Γ ⊢ t ∷ A → Σ (Γ ⊢ A) (λ M → M ⁻ ≡ t)
-- As a direct consequence of this lemma and Lemma 13 we know that every well-typed term
-- has a decoration.
-- Lemma 15.
lem₁₅ : ∀ {Γ A t} → Γ ⊢ t ∷ A → Σ (Γ ⊢ A) (λ M → t 𝒟 M)
lem₁₅ D with lem₁₄ D
… | (M , refl) = M , lem₁₃ M
-- As a consequence of this lemma we can now define the semantics of a well-typed term in
-- a Kripke model as the semantics of the decorated term. In the remaining text, however, we
-- study only the correspondence between terms and proof trees since the translation to the
-- semantics is direct.
--
-- TODO: What to do about the above paragraph?
--
-- As we mentioned above a well-typed term may be decorated to several proof trees. We
-- can however prove that if two proof trees are in η-normal form and they are decorations of
-- the same term, then the two proof trees are convertible. We prove Lemma 16
-- together with two corresponding lemmas for proof trees in applicative normal form:
-- Lemma 16.
mutual
postulate
lem₁₆ : ∀ {Γ A t} {M M′ : Γ ⊢ A} {{_ : enf M}} {{_ : enf M′}} →
t 𝒟 M → t 𝒟 M′ →
M ≡ M′
postulate
lem₁₆′ : ∀ {Γ A A′ t} {M : Γ ⊢ A} {N : Γ ⊢ A′} {{_ : anf M}} {{_ : anf N}} →
t 𝒟 M → t 𝒟 N →
A ≡ A′
-- TODO: Uh oh. Heterogeneous equality?
-- postulate
-- lem₁₆″ : ∀ {Γ A A′ t} {M : Γ ⊢ A} {M′ : Γ ⊢ A′} {{_ : anf M}} {{_ : anf M′}} →
-- t 𝒟 M → t 𝒟 M′ →
-- M ≡ M′
postulate
lem₁₆″ : ∀ {Γ A t} {M M′ : Γ ⊢ A} {{_ : anf M}} {{_ : anf M′}} →
t 𝒟 M → t 𝒟 M′ →
M ≡ M′
-- As a consequence we get that if `nf M ⁻` and `nf N ⁻` are the same, then `M ≅ N`.
-- Corollary 2.
postulate
cor₂ : ∀ {Γ A} → (M M′ : Γ ⊢ A) → nf M ⁻ ≡ nf M′ ⁻ → M ≅ M′
-- Proof: By Lemma 16 and Theorem 7 we get `nf N ≡ nf M` and by Theorem 5 we get `M ≅ N`.
-- 7.1. Reduction
-- --------------
--
-- We mutually inductively define when a term is in weak head normal form (abbreviated
-- `whnf`) and in weak head applicative normal form (abbreviated `whanf`) by:
mutual
data whnf : 𝕋 → Set where
ƛ : ∀ {t} →
(x : Name) → whnf t →
whnf (ƛ x t)
α : ∀ {t} → whanf t →
whnf t
data whanf : 𝕋 → Set where
ν : (x : Name) →
whanf (ν x)
_∙_ : ∀ {t u} →
whanf t → whnf u →
whanf (t ∙ u)
-- We inductively define a deterministic untyped one-step reduction on terms and
-- substitutions: (…)
mutual
infix 3 _⟶_
data _⟶_ : 𝕋 → 𝕋 → Set where
red₁ : ∀ {a s t x} →
(ƛ x t ▶ s) ∙ a ⟶ t ▶ [ s , x ≔ a ]
red₂ : ∀ {t t₁ t₂} →
t₁ ⟶ t₂ →
t₁ ∙ t ⟶ t₂ ∙ t
red₃ : ∀ {s t x} →
ν x ▶ [ s , x ≔ t ] ⟶ t
red₄ : ∀ {s t x y} {{_ : x ≢ y}} →
ν x ▶ [ s , y ≔ t ] ⟶ ν x ▶ s
red₅ : ∀ {x} →
ν x ▶ [] ⟶ ν x
red₆ : ∀ {s₁ s₂ x} →
s₁ ⟶ₛ s₂ →
x ▶ s₁ ⟶ x ▶ s₂
red₇ : ∀ {s t₁ t₂} →
(t₁ ∙ t₂) ▶ s ⟶ (t₁ ▶ s) ∙ (t₂ ▶ s)
red₈ : ∀ {s₁ s₂ t} →
(t ▶ s₁) ▶ s₂ ⟶ t ▶ (s₁ ● s₂)
infix 3 _⟶ₛ_
data _⟶ₛ_ : 𝕊 → 𝕊 → Set where
red₁ₛ : ∀ {s₀ s₁ t x} →
[ s₀ , x ≔ t ] ● s₁ ⟶ₛ [ s₀ ● s₁ , x ≔ t ▶ s₁ ]
red₂ₛ : ∀ {s₁ s₂ s₃} →
(s₁ ● s₂) ● s₃ ⟶ₛ s₁ ● (s₂ ● s₃)
red₃ₛ : ∀ {s} →
[] ● s ⟶ₛ s
-- The untyped evaluation to `whnf`, `_⟹_`, is inductively defined by:
infix 3 _⟹_
data _⟹_ : 𝕋 → 𝕋 → Set where
eval₁ : ∀ {t} {{_ : whnf t}} →
t ⟹ t
eval₂ : ∀ {t₁ t₂ t₃} →
t₁ ⟶ t₂ → t₂ ⟹ t₃ →
t₁ ⟹ t₃
-- It is easy to see that this relation is deterministic.
--
-- TODO: What to do about the above paragraph?
--
-- In order to define a deterministic reduction that gives a term on long η-normal form
-- we need to use its type. We define this typed reduction, `_⊢_↓_∷_`, simultaneously with `_⊢_↓ₛ_∷_` which
-- η-expands the arguments in an application on `whnf`:
mutual
infix 3 _⊢_↓_∷_
data _⊢_↓_∷_ : 𝒞 → 𝕋 → 𝕋 → 𝒯 → Set where
red₁ : ∀ {Γ t₀ t₂} →
Σ 𝕋 (λ t₁ → t₀ ⟹ t₁ × Γ ⊢ t₁ ↓ₛ t₂ ∷ •) →
Γ ⊢ t₀ ↓ t₂ ∷ •
red₂ : ∀ {Γ A B t₁ t₂} →
let z , φ = gensym Γ in
let instance _ = φ in
[ Γ , z ∷ A ] ⊢ t₁ ∙ ν z ↓ t₂ ∷ B →
Γ ⊢ t₁ ↓ ƛ z t₂ ∷ A ⊃ B
infix 3 _⊢_↓ₛ_∷_
data _⊢_↓ₛ_∷_ : 𝒞 → 𝕋 → 𝕋 → 𝒯 → Set where
red₁ₛ : ∀ {Γ A x} →
Γ ∋ x ∷ A →
Γ ⊢ ν x ↓ₛ ν x ∷ A
red₂ₛ : ∀ {Γ B t₁ t₂ t₁′ t₂′} →
Σ 𝒯 (λ A → Γ ⊢ t₁ ↓ₛ t₁′ ∷ A ⊃ B × Γ ⊢ t₂ ↓ t₂′ ∷ A) →
Γ ⊢ t₁ ∙ t₂ ↓ₛ t₁′ ∙ t₂′ ∷ B
-- Finally we define `Γ ⊢ t ⇓ t′ ∷ A` to hold if `Γ ⊢ t [] ↓ t′ ∷ A`.
_⊢_⇓_∷_ : 𝒞 → 𝕋 → 𝕋 → 𝒯 → Set
Γ ⊢ t ⇓ t′ ∷ A = Γ ⊢ t ▶ [] ↓ t′ ∷ A
-- 7.2. Equivalence between proof trees and terms
-- ----------------------------------------------
--
-- We can prove that if `M : Γ ⊢ A`, then `Γ ⊢ M ⁻ ⇓ nf M ⁻ ∷ A`. This we do by defining a
-- Kripke logical relation, `_ℛ_`. (…)
--
-- When `f : Γ ⊩ •` we intuitively have that `t ℛ f` holds if `Γ ⊢ t ↓ f ⁻`.
--
-- When `f : Γ ⊩ A ⊃ B`, then `t ℛ f` holds if for all `t′` and `a : Γ ⊩ A` such that `t′ ℛ a`, we
-- have that `t ∙ t′ ℛ f ⟦∙⟧ a`.
infix 3 _ℛ_
data _ℛ_ : ∀ {Γ A} → 𝕋 → Γ ⊩ A → Set where
𝓇• : ∀ {Δ} →
(t : 𝕋) (f : Δ ⊩ •) →
(∀ {Γ} →
(c : Γ ⊇ Δ) (t′ : 𝕋) → t′ 𝒟 f ⟦g⟧⟨ c ⟩ →
Γ ⊢ t ↓ t′ ∷ •) →
t ℛ f
𝓇⊃ : ∀ {Δ A B} →
(t : 𝕋) (f : Δ ⊩ A ⊃ B) →
(∀ {Γ} →
(c : Γ ⊇ Δ) (a : Γ ⊩ A) (t′ : 𝕋) → Γ ⊢ t′ ∷ A → t′ ℛ a →
t ∙ t′ ℛ f ⟦∙⟧⟨ c ⟩ a) →
t ℛ f
-- For the substitutions we define correspondingly:
infix 3 _ℛₛ_
data _ℛₛ_ : ∀ {Γ Δ} → 𝕊 → Γ ⊩⋆ Δ → Set where
𝓇ₛ[] : ∀ {Δ s} →
Δ ⋙ s ∷ [] →
s ℛₛ ([] {w = Δ})
-- NOTE: Mistake in paper? Changed `v : Δ ⊩ A` to `a : Γ ⊩ A`.
rₛ≔ : ∀ {Γ Δ A s x} {{_ : T (fresh x Γ)}} {{_ : T (fresh x Δ)}} →
Δ ⋙ s ∷ [ Γ , x ∷ A ] → (ρ : Γ ⊩⋆ Δ) (a : Γ ⊩ A) → s ℛₛ ρ → ν x ▶ s ℛ a →
s ℛₛ [ ρ , x ≔ a ]
-- The following lemmas are straightforward to prove:
postulate
aux₇₂₁ : ∀ {Γ A t₁ t₂} →
(a : Γ ⊩ A) → t₁ ℛ a → t₂ ⟶ t₁ →
t₂ ℛ a
postulate
aux₇₂₂ : ∀ {Γ Δ s₁ s₂} →
(ρ : Γ ⊩⋆ Δ) → Δ ⋙ s₁ ∷ Γ → s₁ ⟶ₛ s₂ → s₂ ℛₛ ρ →
s₁ ℛₛ ρ
-- NOTE: Mistake in paper? Changed `Occur(x, A, Γ)` to `Δ ∋ x ∷ A`.
postulate
aux₇₂₃ : ∀ {Γ Δ A s x} →
(ρ : Γ ⊩⋆ Δ) (i : Δ ∋ x ∷ A) → Δ ⋙ s ∷ Γ →
ν x ▶ s ℛ lookup ρ i
postulate
aux₇₂₄⟨_⟩ : ∀ {Γ Δ A t} →
(c : Γ ⊇ Δ) (a : Δ ⊩ A) → t ℛ a →
t ℛ ↑⟨ c ⟩ a
-- NOTE: Mistake in paper? Changed `ρ ∈ Γ ⊩ Δ` to `ρ : Δ ⊩⋆ Γ`.
postulate
aux₇₂₅⟨_⟩ : ∀ {Γ Δ Θ s} →
(c : Θ ⊇ Δ) → Δ ⋙ s ∷ Γ → (ρ : Δ ⊩⋆ Γ) → s ℛₛ ρ →
s ℛₛ ↑⟨ c ⟩ ρ
-- NOTE: Mistake in paper? Changed `ρ ∈ Γ ⊩ Δ` to `ρ : Δ ⊩⋆ Γ`.
postulate
aux₇₂₆⟨_⟩ : ∀ {Γ Δ Θ s} →
(c : Γ ⊇ Θ) → Δ ⋙ s ∷ Γ → (ρ : Δ ⊩⋆ Γ) → s ℛₛ ρ →
s ℛₛ ↓⟨ c ⟩ ρ
postulate
aux₇₂₇ : ∀ {Γ Δ A s t x} →
Γ ⊢ t ∷ A → Γ ⋙ s ∷ Δ → (ρ : Γ ⊩⋆ Δ) → s ℛₛ ρ →
[ s , x ≔ t ] ℛₛ ρ
-- Using these lemmas we can prove by mutual induction on the proof tree of terms and
-- substitutions that:
-- NOTE: Mistake in paper? Changed `ρ ∈ Γ ⊩ Δ` to `ρ : Δ ⊩⋆ Γ`.
postulate
aux₇₂₈ : ∀ {Γ Δ A s t} →
(M : Γ ⊢ A) (ρ : Δ ⊩⋆ Γ) → Δ ⋙ s ∷ Γ → t 𝒟 M → s ℛₛ ρ →
t ▶ s ℛ ⟦ M ⟧ ρ
postulate
aux₇₂₉ : ∀ {Γ Δ Θ s₁ s₂} →
(γ : Γ ⋙ Θ) (ρ : Δ ⊩⋆ Γ) → Δ ⋙ s₂ ∷ Γ → s₁ 𝒟ₛ γ → s₂ ℛₛ ρ →
s₂ ● s₁ ℛₛ ⟦ γ ⟧ₛ ρ
-- We also show, intuitively, that if `t ℛ a`, `a : Γ ⊩ A`, then `Γ ⊢ t ↓ reify a ⁻ ∷ A`
-- together with a corresponding lemma for `val`:
-- Lemma 17.
mutual
postulate
lem₁₇ : ∀ {Γ A t₀ t₁} →
Γ ⊢ t₀ ∷ A → (a : Γ ⊩ A) → t₀ ℛ a → t₁ 𝒟 reify a →
Γ ⊢ t₀ ↓ t₁ ∷ A
-- NOTE: Mistake in paper? Changed `t ℛ val(f)` to `t₀ ℛ val f`.
postulate
aux₇₂₁₀ : ∀ {Γ A t₀} →
Γ ⊢ t₀ ∷ A → whanf t₀ →
(f : ∀ {Δ} → (c : Δ ⊇ Γ) → Δ ⊢ A) →
(∀ {Δ} → (c : Δ ⊇ Γ) → Δ ⊢ t₀ ↓ₛ f c ⁻ ∷ A) →
t₀ ℛ val f
-- The proof that the translation of proof trees reduces to the translation of its normal form
-- follows directly:
-- Theorem 8.
postulate
thm₈ : ∀ {Γ A t} →
(M : Γ ⊢ A) → t 𝒟 M →
Γ ⊢ t ⇓ nf M ⁻ ∷ A
-- As a consequence we get that if two proof trees are decorations of the same term, then they
-- are convertible with each other:
-- Corollary 3.
postulate
cor₃ : ∀ {Γ A t} →
(M N : Γ ⊢ A) → t 𝒟 M → t 𝒟 N →
M ≅ N
-- Proof: By Theorem 8 we get that `Γ ⊢ t ⇓ nf M ⁻ ∷ A` and `Γ ⊢ t ⇓ nf N ⁻ ∷ A`. Since
-- the reduction is deterministic we get `nf M ⁻ ≡ nf N ⁻` and by Corollary 2 we get that
-- `M ≅ N`.
|
# Problem Set 3, Spring 2021, Villas-Boas
Due <u>Tuesday March 9, end of day Pacific Time</u>
Submit materials (Jupyter notebook with all code cells run) as one pdf on [Gradescope](https://www.gradescope.com/courses/226571).
# Exercise 1: The Value of Environmental Services
## Background
This exercise is based on the paper [Does Hazardous Waste Matter? Evidence from the Housing Market and the Superfund Program by Greenstone and Gallagher 2008](https://academic.oup.com/qje/article-abstract/123/3/951/1928203?redirectedFrom=fulltext). This
paper explores individuals’ willingness to pay (WTP) for environmental quality by observing the
impact of increasing environmental quality on housing prices. This method is known as the hedonic
valuation method, whereby the price of a good is determined by each of its attributes. In the case of a house, its value is determined by all of its physical characteristics (e.g., number of bedrooms) as well as the characteristics of the neighborhood in which it is located (including environmental quality).
For this paper, the change in environmental quality results from the cleanup of hazardous
waste sites. The study focuses on Superfund sites, areas designated by the US government as
contaminated by hazardous waste and that pose a hazard to environmental/human health. The
EPA placed certain Superfund sites on the National Priorities List (NPL), which meant that these
sites were legally required to undergo remediation. If individuals value environmental quality, then
housing prices should increase after nearby NPL sites are cleaned up. To determine the extent to
which individuals value the cleanup, we can compare housing close to a hazardous waste site that
was cleaned up to *comparable* homes near a similar waste site that was not cleaned up.
## Data
The data include observations on 447 census tracts that are within 2 miles of a hazardous waste site.$^1$ This includes census tracts where the site in question was on the NPL, and thus was legally required to be cleaned up, as well as those that were not on the NPL.
The shared dataset contains the following variables for each census tract in the year 2000.
|Variable Name | Description |
| :----------- | :----------------------- |
| _fips_ | Federal Information Processing Standards (FIPS) census tract identifier |
| _npl_ | binary indicator for whether the site in a given census tract was placed on the NPL|
| _lnmdvalhs_ | log median housing value $^2$ |
| *owner\_occupied* | % of housing that is owner occupied |
| *pop\_den* | Population density|
| *ba\_or\_better* | % of the population that has a Bachelors degree or higher |
| *unemprt* | Unemployment rate |
| *povrat* | Poverty rate |
| *bedrms1-bedrms5* | % of housing with the indicated number of bedrooms |
| *bedrms\_3orless* | % of housing with 3 bedrooms or less |
|*blt0\_10yrs* |% of housing $< 10$ years old |
$^1$ : Census tracts are statistical subdivisions of a county that are defined by the Census Bureau to allow comparisons from census to census.
$^2$ : Median Housing Value, *before logging*, is recorded in USD.
## Tips
* Do **NOT** install packages on the server. All packages that you need to complete the assignment are already installedand can be loaded with the `library()` function. Trying to install packages will create conflicts and potentially require reloading a fresh copy of the notebook.
* When submitting the notebook, do **NOT** include output of the entire dataset. This is a large dataset, and printing out all rows will take multiple pages of output and make it much harder for the GSIs to find your answers.
* `xtable` and `stargazer` are two great packages for making tables. `xtable` is great for turning a dataframe into a table output, while `stargazer` is great for making a table of your regression results.$^3$ I recommend exploring these packages when you are making your summary statistics table and regression output table in this problem set. The first half of Coding Bootcamp Part 5 goes over these two packages and is accessible [here](https://r.datahub.berkeley.edu/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fds-modules%2FENVECON-118&urlpath=tree%2FENVECON-118%2FSpring2021-J%2FSections%2FCoding+Bootcamps).
* All packages that you need for this problem set are already installed and ready for use on Datahub. \textbf{Do not try to install packages on the Datahub server}; if you want to use new packages in Datahub to solve the problem set inside the Problem Set 3 notebook, let any of the GSI's know so they ask for those packages you want to use to be installed.
$^3$: `stargazer` is great for professional-looking LaTeX or postscript tables, but if you want to produce tables for your regression output in the notebook you will need to use `type = "html"` and then copy/paste the html output into a Markdown cell.
## Preamble
#### Use the below code cell to load all your packages (we will use `haven()` and `tidyverse()`).
```R
# Add your preamble code here
```
**1.** Load the data. This problem set will focus on census tracts that have more than 20% of homes built within the last ten years; that is for $blt0\_ 10yrs > 0.2$.
So first open the original data (`Greenstone_Gallagher_PS3_2021.dta`) and create a subset of the data to be used in this problem set. Call this data frame in R $my\_dataPset3$. How many observations do you lose when you focus on this subset of the data?
(Hint: use `filter(data, condition)`)
```R
# Add any code for part 1 here.
```
➡️ Type your written answer for part 1 here.
**2.** Briefly describe your $my\_dataPset3$ data set. Since these data use the impact of the NPL to estimate WTP for environmental services, you
**(a)** Want to look at the number of census tracts in both the NPL group and the non-NPL group (and the number in the sample overall),
**(b)** Create a variable that is the median value of housing price and call it $housep$, which is the median housing price in dollars (in levels, not logs) and report the average in npl=1 and in npl=0 groups, and
**(c)** compare the means of the following characteristics (covariates) of these census tracts across the two groups. By compare, we mean report them and discuss whether the sample averages are similar across groups, and why you might expect such a result (no need to test formally if means of these covariates are similar). The covariates you are to check are percent owner\_occupied, unemployment rate, and poverty rate.
(Hint: use `group_by()` and `summarise()` to obtain separate summary statistics according to the value of $npl$)
```R
# Add any code for part 2 here.
```
➡️ Type your written answer for part 2 here.
**3.** We will now compare housing prices (in levels, not logs) across the two groups (group *npl* = 1 and
*npl* = 0) using the $my\_dataPset3$ sub dataset. Draw a histogram of the median housing price in
each group.
(Hint: see [Coding Bootcamp Part 4](https://r.datahub.berkeley.edu/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fds-modules%2FENVECON-118&urlpath=tree%2FENVECON-118%2FSpring2021-J%2FSections%2FCoding+Bootcamps) for how to do this using **ggplot2**. For base R use `hist(data$variable, main = "Title", xlab = "MedianHousingPrice", ylab = "Frequency")`)
```R
# Add any code for part 3 here.
```
**4.** Overlap both histograms into the same graph and comment on differences (be precise - and explain
why the differences intuitively make sense).
(Hint: see the Histograms section of [Coding Bootcamp Part 4](https://r.datahub.berkeley.edu/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fds-modules%2FENVECON-118&urlpath=tree%2FENVECON-118%2FSpring2021-J%2FSections%2FCoding+Bootcamps))
```R
# Add any code for part 4 here.
```
➡️ Type your written answer for part 4 here.
**Show work for all steps for Questions 5-7. You can use R to calculate the sample mean,
variance, and # obs, and perform arithmetic operations (or do it by hand). No credit will
be given if you use a canned condence interval/hypothesis test function in R. Credit will
be lost if you do not clearly show your steps.**
**5.** Compute an estimate for the mean of the variable housep for the NPL group (npl=1) in the
$my\_dataPset3$ data frame. Construct a 90% confidence interval for this mean. Give an interpretation of these results in a sentence.
(Hint: use `mean()` and `sd()` to get the necessary information to construct the CI)
```R
# Add any code for part 5 here.
```
➡️ Type your written work for and answer to part 5 here.
**6.** Let $D$ be the difference in $housep$ between the NPL (npl=1) and non-NPL (npl=0) groups. State
an estimator $\hat D$
for $D$ and use the estimator to compute an estimate of $D$. Compute a standard
error for $\hat D$. Derive a 95% confidence interval for $D$ and interpret in one sentence.
```R
# Add any code for part 6 here.
```
➡️ Type your written work for and answer to part 6 here.
**7.** Using the $my\_dataPset3$ data frame, test whether the average of the housing values ($housep$) for
the NPL group is statistically different at the 10% significance level ($\alpha = 0.1$) from average housing values in the non-NPL group (that is, in terms of the hypothesis, the null is equal, and the alternative is not equal). (Recall
the 5 step-procedure for hypothesis testing).
```R
# Add any code for part 7 here.
```
➡️ Type your written work for and answer to part 7 here.
```R
# insert code here
```
**8.** Now we want to see how adding covariates on the right hand side of our equation affects the coefficient on the treatment indicator $npl$. We run the following regressions using the data $my\_dataPset3$
in R:
\begin{align}
housep&= \beta_0+\beta_1 npl+ u & (1)\\
housep&= \beta_0+\beta_1 npl+ \beta_2 unemprt +u & (2)\\
housep&= \beta_0+\beta_1 npl+ \beta_2 unemprt + \beta_3 owner\_occupied +u &(3)
\end{align}
```R
# Add any code for part 8 here.
```
**(a)** Interpret (SSS) the estimated coefficient, $\hat \beta_1$, that you obtain from estimating equation (1).
➡️ Type your written answer to 8 (a) here.
**(b)** Looking both at $R^2$ and the evolution of $\hat \beta_1$ as we add variables from equation (1) to (2) to (3),
1. Comment on which variable matters in explaining the outcome, and which is likely correlated with the variable $npl$ (go through equation by equation).
2. What does this tell you about how sites were selected to end up on the National Priorities List (that is, about the correlation between $npl$ and those additional variables you added? (Hint: go through the OVB formula)
➡️ Type your written answer to 8 (b) here.
**9.** If you estimate
\begin{align*}housep= \beta_0+\beta_1 npl+ u\end{align*}
using the complete data set (not the subset $my\_dataPset3$), what happens to the standard errors of the coefficient of $npl$? Explain briefly why that is.
➡️ Type your written answer to 9 here.
## Exercise 2: Attitudes toward the COVID Vaccines
*Note:* this question does not require R. If you do use R, you must show all steps used to calculate the relevant formulas from lecture. No credit will be given if a canned routine is used. Credit will be lost if values are given and work is not shown.
The Institute of Global Health Innovation (Imperial College London) released a report about Global Attitudes towards a COVID-19 vaccine. The survey covers 15 countries and was implemented at different periods in time, from November 2020 to mid January of 2021. The total number of responses was around 13,500.
In one of the questions, respondents were asked whether they strongly agreed (scale of 5), agreed (a4), neither agree nor disagree (a 3), disagree (a 2) or strongly disagree (a 1) with the statement:$^4$
"To what extent do you agree or disagree that if a COVID-19 vaccine were made available to you this week, you would defnitely get it?"
$^4$: *Aggregate view of latest week available for each country - see page 13 for exact survey dates:* [Link to Nature Article](https://www.nature.com/articles/d41586-021-00368-6)
In row 1 below are the percentage and number of respondents that agree or strongly agree with the statement, over the whole sample for all countries. Below that first row, we also report results broken down for the United Kingdom (UK) for two periods during which the survey was conducted (Nov 05-15,2020; and then Jan 11-17, 2021) in that country.
| | % Responding "agree/strongly agree"| Total Number of Respondents|
|--|--|--|
|All Countries and Periods of Survey | 54% | 13,500 |
| UK: Nov 5-15, 2020 | 55% | 1,005|
| UK: Jan 11-17, 2021 | 80% | 1,000|
Consider first the overall result (all countries/periods). Let $p$ be the fraction of individuals in these 15 countries who approve (agree or strongly agree).
**1.** Use the survey results to estimate $p$. Also estimate the standard error of your estimate.
➡️ Type your written work for and answer to Part 1 here.
**2.** Construct a 95% confidence interval for $p$. Interpret.
➡️ Type your written work for and answer to Part 2 here.
**3.** Construct a 90% confidence interval for $p$. Is it larger or narrower than the 95% confidence interval?
Why?
➡️ Type your written work for and answer to Part 3 here.
**4.** Is there statistical evidence that more than 50% of UK in November 2020 agreed that they would
get the vaccine if given to them? Use the 5 steps for hypothesis testing with a 1% signifiance level.
➡️ Type your written work for and answer to Part 4 here.
**5.** Is there statistical evidence that agreement with taking the vaccine in the UK increased in 2021
relative to November 2020 at the 5% significance level? Explain (to answer this question use the 5
steps for hypothesis testing).
➡️ Type your written work for and answer to Part 5 here.
|
lemma dvd_iff_poly_eq_0: "[:c, 1:] dvd p \<longleftrightarrow> poly p (- c) = 0" for c :: "'a::comm_ring_1" |
% !TEX root = ../main.tex
\chapter{Preface}% (fold)
\label{chp:preface}
The goal of a \gls{library} is a noble one. It's a way to share knowledge someone has in a way that is available to everyone. This applies to libraries that hold books, and have been an important source of sharing knowledge over the ages, but might even be more true for programming libraries.
By encapsulating knowledge in a \gls{library}, not everyone needs to have niche knowledge or how to execute tedious operations. But it goes further than that, because in the other direction it also is very important that libraries don't constrict the user in their intentions.
The challenge when building libraries that will be used in other libraries is finding a balance between abstracting away the difficult parts, just exposing a simple way to tweak things, but also exposing an \acrfull{api} that allows \emph{anyone} that wants to dig further to get into the nitty-gritty of the \gls{library} and find reasons why something is working like it is, and changing it if needed.
When writing a \gls{library}, it's not just written for the developer that will consume the library, it's also written for the users of that developer. This gives a deep relationship between someone who depends on a \gls{library} and its maintainer.
Almost all recent innovations in front-end and web programming have been explored in open source solutions. That means a lot can be learned, just by looking at how other people solve a certain problem. A mix of different approaches can be created, without coming up with each of the ideas from scratch.
As Isaac Newton\cite{newton-giants} attributed Bernard of Chartres\cite{quote-giants-source} a lot better than I could: ``If I have seen further, it is by standing on the shoulders of giants''. Working together as a community of developers and engineers, the possibilities are nearly endless. I strongly believe that everything we do --- regardless how small or big it is --- has use in being shared with everyone.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 10 14:33:19 2019
@author: ziskin
"""
from PW_paths import work_yuval
from pathlib import Path
cwd = Path().cwd()
# TODO: build curve fit tool with various function model: power, sum of sin ...
# TODO: no need to build it, use lmfit instead:
# TODO: check if lmfit accepts- datetimeindex, xarrays and NaNs.
# TODO: if not, build func to replace datetimeindex to numbers and vise versa
# def high_sample_and_smooth_dataframe_time_series(df):
# import pandas as pd
# dfs = df.copy()
# dfs.index = pd.to_timedelta(dfs.index, unit='d')
# dfs = dfs.resample('15S').interpolate(method='cubic').T.mean().resample('5T').mean()
# better = better.reset_index(drop=True)
# better.index = np.arange(-days_prior, days_after, 1/pts_per_day)
month_to_doy_dict = {1: 1, 2: 32, 3: 61, 4: 92, 5: 122,
6: 153, 7: 183, 8: 214, 9: 245, 10: 275, 11: 306, 12: 336}
def replace_char_at_string_position(string, char='s', pos=3):
if pos != -1:
string = string[:pos] + char + string[pos+1:]
else:
string = string[:pos] + char
return string
def read_converted_G0_stations(path=cwd):
import pandas as pd
df = pd.read_excel(path/'G0-converted.xlsx', skiprows=11)
return df
def fill_na_xarray_time_series_with_its_group(xarray, grp='month', time_dim='time',
smooth=True, window=11, order=3,
plot=False):
""" fill the NaNs of a Dataset or DataArray with mean grp cycle
(hourly, monthly, etc) and smooth using savgol filter"""
from scipy.signal import savgol_filter
import xarray as xr
def fill_na_dataarray(da, grp=grp, time_dim=time_dim, smooth=smooth,
window=window, order=order, plot=plot):
print('selected {}ly NaN filling for {}.'.format(grp, da.name))
da_old = da.copy()
mean_signal = da.groupby('{}.{}'.format(time_dim, grp)).mean()
da = da.groupby('{}.{}'.format(time_dim, grp)).fillna(mean_signal)
da = da.reset_coords(drop=True)
if smooth:
print('smoothing.')
da = da.copy(data=savgol_filter(da, window, order))
da.attrs['smoothing'] = 'savgol filter, window {}, order {}.'.format(window, order)
da.attrs['NaN filling'] = 'mean {}ly values'.format(grp)
if plot:
da.plot()
da_old.plot()
return da
if isinstance(xarray, xr.DataArray):
xarray = fill_na_dataarray(xarray)
elif isinstance(xarray, xr.Dataset):
dal = []
attrs = xarray.attrs
for da in xarray:
dal.append(fill_na_dataarray(xarray[da]))
xarray = xr.merge(dal)
xarray.attrs = attrs
return xarray
def replace_xarray_time_series_with_its_group(da, grp='month', time_dim='time'):
"""run the same func on each dim in da"""
import xarray as xr
dims = [x for x in da.dims if time_dim not in x]
if len(dims) == 0:
# no other dim except time:
da = replace_time_series_with_its_group(da, grp=grp)
return da
dims_attrs = [da[x].attrs for x in dims]
dims_attrs_dict = dict(zip(dims, dims_attrs))
if len(dims) == 1:
dim0_list = []
for dim0 in da[dims[0]]:
da0 = da.sel({dims[0]: dim0})
da0 = replace_time_series_with_its_group(da0, grp=grp)
dim0_list.append(da0)
da_transformed = xr.concat(dim0_list, dims[0])
da_transformed[dims[0]] = da[dims[0]]
da_transformed.attrs[dims[0]] = dims_attrs_dict.get(dims[0])
elif len(dims) == 2:
dim0_list = []
for dim0 in da[dims[0]]:
dim1_list = []
for dim1 in da[dims[1]]:
da0 = da.sel({dims[0]: dim0, dims[1]: dim1})
da0 = replace_time_series_with_its_group(da0, grp=grp)
dim1_list.append(da0)
dim0_list.append(xr.concat(dim1_list, dims[1]))
da_transformed = xr.concat(dim0_list, dims[0])
da_transformed[dims[0]] = da[dims[0]]
da_transformed[dims[1]] = da[dims[1]]
da_transformed.attrs[dims[0]] = dims_attrs_dict.get(dims[0])
da_transformed.attrs[dims[1]] = dims_attrs_dict.get(dims[1])
elif len(dims) == 3:
dim0_list = []
for dim0 in da[dims[0]]:
dim1_list = []
for dim1 in da[dims[1]]:
dim2_list = []
for dim2 in da[dims[2]]:
da0 = da.sel({dims[0]: dim0, dims[1]: dim1, dims[2]: dim2})
da0 = replace_time_series_with_its_group(da0, grp=grp)
dim2_list.append(da0)
dim1_list.append(xr.concat(dim2_list, dims[2]))
dim0_list.append(xr.concat(dim1_list, dims[1]))
da_transformed = xr.concat(dim0_list, dims[0])
da_transformed[dims[0]] = da[dims[0]]
da_transformed[dims[1]] = da[dims[1]]
da_transformed[dims[2]] = da[dims[2]]
da_transformed.attrs[dims[0]] = dims_attrs_dict.get(dims[0])
da_transformed.attrs[dims[1]] = dims_attrs_dict.get(dims[1])
da_transformed.attrs[dims[2]] = dims_attrs_dict.get(dims[2])
return da_transformed
def replace_time_series_with_its_group(da_ts, grp='month'):
""" replace an xarray time series with its mean grouping e.g., time.month,
time.dayofyear, time.hour etc.., basiaclly implement .transform method
on 1D dataarray, index must be datetime"""
import xarray as xr
import pandas as pd
da_ts = da_ts.reset_coords(drop=True)
attrs = da_ts.attrs
df = da_ts.to_dataframe(da_ts.name)
if grp == 'month':
grp_ind = df.index.month
elif grp == 'hour':
grp_ind = df.index.hour
df = df.groupby(grp_ind).transform('mean')
ds = df.to_xarray()
da = ds[da_ts.name]
da.attrs = attrs
return da
def read_ims_api_token():
from PW_paths import home_path
with open(home_path / '.imsapi') as fp:
token = fp.readlines()[0].strip('\n')
return token
def calculate_gradient(f, lat_dim='latitude', lon_dim='longitude',
level_dim='level', time_dim='time', savepath=None):
from metpy.calc import lat_lon_grid_deltas
from metpy.calc import gradient
from aux_gps import save_ncfile
import xarray as xr
name = f.name
dx, dy = lat_lon_grid_deltas(f[lon_dim], f[lat_dim])
# f = f.transpose(..., lat_dim, lon_dim)
# fy, fx = gradient(f, deltas=(dy, dx))
if level_dim in f.dims and time_dim in f.dims:
min_year = f[time_dim].dt.year.min().item()
max_year = f[time_dim].dt.year.max().item()
level_cnt = f[level_dim].size
label = '{}_{}-{}.nc'.format(level_cnt, min_year, max_year)
times = []
for time in f[time_dim]:
print('{}-{}'.format(time[time_dim].dt.month.item(), time[time_dim].dt.year.item()))
levels = []
for level in f[level_dim]:
ftl = f.sel({time_dim: time, level_dim: level})
fy, fx = gradient(ftl, deltas=(dy, dx))
fx_da = xr.DataArray(fx.magnitude, dims=[lat_dim, lon_dim])
fx_da.name = '{}x'.format(name)
fy_da = xr.DataArray(fy.magnitude, dims=[lat_dim, lon_dim])
fy_da.name = '{}y'.format(name)
fx_da.attrs['units'] = fx.units.format_babel()
fy_da.attrs['units'] = fy.units.format_babel()
grad = xr.merge([fx_da, fy_da])
levels.append(grad)
times.append(xr.concat(levels, level_dim))
ds = xr.concat(times, time_dim)
ds[level_dim] = f[level_dim]
ds[time_dim] = f[time_dim]
ds[lat_dim] = f[lat_dim]
ds[lon_dim] = f[lon_dim]
else:
if level_dim in f.dims:
level_cnt = f[level_dim].size
label = '{}.nc'.format(level_cnt)
levels = []
for level in f[level_dim]:
fl = f.sel({level_dim: level})
fy, fx = gradient(fl, deltas=(dy, dx))
fx_da = xr.DataArray(fx.magnitude, dims=[lat_dim, lon_dim])
fx_da.name = '{}x'.format(name)
fy_da = xr.DataArray(fy.magnitude, dims=[lat_dim, lon_dim])
fy_da.name = '{}y'.format(name)
fx_da.attrs['units'] = fx.units.format_babel()
fy_da.attrs['units'] = fy.units.format_babel()
grad = xr.merge([fx_da, fy_da])
levels.append(grad)
da = xr.concat(levels, level_dim)
da[level_dim] = f[level_dim]
elif time_dim in f.dims:
min_year = f[time_dim].dt.year.min().item()
max_year = f[time_dim].dt.year.max().item()
min_year = f[time_dim].dt.year.min().item()
max_year = f[time_dim].dt.year.max().item()
times = []
for time in f[time_dim]:
ft = f.sel({time_dim: time})
fy, fx = gradient(ft, deltas=(dy, dx))
fx_da = xr.DataArray(fx.magnitude, dims=[lat_dim, lon_dim])
fx_da.name = '{}x'.format(name)
fy_da = xr.DataArray(fy.magnitude, dims=[lat_dim, lon_dim])
fy_da.name = '{}y'.format(name)
fx_da.attrs['units'] = fx.units.format_babel()
fy_da.attrs['units'] = fy.units.format_babel()
grad = xr.merge([fx_da, fy_da])
times.append(grad)
ds = xr.concat(times, time_dim)
ds[time_dim] = f[time_dim]
ds[lat_dim] = f[lat_dim]
ds[lon_dim] = f[lon_dim]
if savepath is not None:
filename = '{}_grad_{}'.format(f.name, label)
save_ncfile(ds, savepath, filename)
return ds
def calculate_divergence(u, v, lat_dim='latitude', lon_dim='longitude',
level_dim='level', time_dim='time', savepath=None):
from metpy.calc import divergence
from metpy.calc import lat_lon_grid_deltas
from aux_gps import save_ncfile
import xarray as xr
dx, dy = lat_lon_grid_deltas(u[lon_dim], u[lat_dim])
u = u.transpose(..., lat_dim, lon_dim)
v = v.transpose(..., lat_dim, lon_dim)
if level_dim in u.dims and time_dim in u.dims:
min_year = u[time_dim].dt.year.min().item()
max_year = u[time_dim].dt.year.max().item()
level_cnt = u[level_dim].size
label = '{}_{}-{}.nc'.format(level_cnt, min_year, max_year)
times = []
for time in u[time_dim]:
print('{}-{}'.format(time[time_dim].dt.month.item(), time[time_dim].dt.year.item()))
levels = []
for level in u[level_dim]:
utl = u.sel({time_dim: time, level_dim: level})
vtl = v.sel({time_dim: time, level_dim: level})
div = divergence(utl, vtl, dx=dx, dy=dy)
div_da = xr.DataArray(div.magnitude, dims=[lat_dim, lon_dim])
div_da.attrs['units'] = div.units.format_babel()
levels.append(div_da)
times.append(xr.concat(levels, level_dim))
da = xr.concat(times, time_dim)
da[level_dim] = u[level_dim]
da[time_dim] = u[time_dim]
da[lat_dim] = u[lat_dim]
da[lon_dim] = u[lon_dim]
da.name = '{}{}_div'.format(u.name, v.name)
else:
if level_dim in u.dims:
level_cnt = u[level_dim].size
label = '{}.nc'.format(level_cnt)
levels = []
for level in u[level_dim]:
ul = u.sel({level_dim: level})
vl = v.sel({level_dim: level})
div = divergence(ul, vl, dx=dx, dy=dy)
div_da = xr.DataArray(div.magnitude, dims=[lat_dim, lon_dim])
div_da.attrs['units'] = div.units.format_babel()
levels.append(div_da)
da = xr.concat(levels, level_dim)
da[level_dim] = u[level_dim]
elif time_dim in u.dims:
min_year = u[time_dim].dt.year.min().item()
max_year = u[time_dim].dt.year.max().item()
min_year = u[time_dim].dt.year.min().item()
max_year = u[time_dim].dt.year.max().item()
times = []
for time in u[time_dim]:
ut = u.sel({time_dim: time})
vt = v.sel({time_dim: time})
div = divergence(ut, vt, dx=dx, dy=dy)
div_da = xr.DataArray(div.magnitude, dims=[lat_dim, lon_dim])
div_da.attrs['units'] = div.units.format_babel()
times.append(div_da)
da = xr.concat(times, time_dim)
da[time_dim] = u[time_dim]
da[lat_dim] = u[lat_dim]
da[lon_dim] = u[lon_dim]
da.name = '{}{}_div'.format(u.name, v.name)
if savepath is not None:
filename = '{}{}_div_{}'.format(u.name, v.name, label)
save_ncfile(da, savepath, filename)
return da
def calculate_pressure_integral(da, pdim='level'):
import numpy as np
# first sort to decending levels:
da = da.sortby(pdim, ascending=False)
try:
units = da[pdim].attrs['units']
except KeyError:
print('no units attrs found, assuming units are hPa')
units = 'hPa'
# transform to Pa:
if units != 'Pa':
print('{} units detected, converting to Pa!'.format(units))
da[pdim] = da[pdim] * 100
# P_{i+1} - P_i:
plevel_diff = np.abs(da[pdim].diff(pdim, label='lower'))
# var_i + var_{i+1}:
da_sum = da.shift(level=-1) + da
p_int = ((da_sum * plevel_diff) / 2.0).sum(pdim)
return p_int
def linear_fit_using_scipy_da_ts(da_ts, model='TSEN', slope_factor=3650.25,
plot=False, ax=None, units=None,
method='simple', weights=None, not_time=False):
"""linear fit using scipy for dataarray time series,
support for theilslopes(TSEN) and lingress(LR), produce 95% CI"""
import xarray as xr
from scipy.stats.mstats import theilslopes
from scipy.stats import linregress
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import numpy as np
time_dim = list(set(da_ts.dims))[0]
y = da_ts.dropna(time_dim).values
if not_time:
X = da_ts[time_dim].values.reshape(-1, 1)
jul_no_nans = da_ts.dropna(time_dim)[time_dim].values
# jul_no_nans -= np.median(jul_no_nans)
jul = da_ts[time_dim].values
# jul -= np.median(jul)
else:
jul, jul_no_nans = get_julian_dates_from_da(da_ts, subtract='median')
X = jul_no_nans.reshape(-1, 1)
if model == 'LR':
if method == 'simple':
coef, intercept, r_value, p_value, std_err = linregress(jul_no_nans, y)
confidence_interval = 1.96 * std_err
coef_lo = coef - confidence_interval
coef_hi = coef + confidence_interval
elif method == 'curve_fit':
func = lambda x, a, b: a * x + b
if weights is not None:
sigma = weights.dropna(time_dim).values
else:
sigma = None
best_fit_ab, covar = curve_fit(func, jul_no_nans, y,
sigma=sigma, p0=[0, 0],
absolute_sigma = False)
sigma_ab = np.sqrt(np.diagonal(covar))
coef = best_fit_ab[0]
intercept = best_fit_ab[1]
coef_lo = coef - sigma_ab[0]
coef_hi = coef + sigma_ab[0]
elif model == 'TSEN':
coef, intercept, coef_lo, coef_hi = theilslopes(y, X)
predict = jul * coef + intercept
predict_lo = jul * coef_lo + intercept
predict_hi = jul * coef_hi + intercept
trend_hi = xr.DataArray(predict_hi, dims=[time_dim])
trend_hi.name = 'trend_hi'
trend_lo = xr.DataArray(predict_lo, dims=[time_dim])
trend_lo.name = 'trend_lo'
trend_hi[time_dim] = da_ts[time_dim]
trend_lo[time_dim] = da_ts[time_dim]
slope_in_factor_scale_lo = coef_lo * slope_factor
slope_in_factor_scale_hi = coef_hi * slope_factor
trend = xr.DataArray(predict, dims=[time_dim])
trend.name = 'trend'
trend[time_dim] = da_ts[time_dim]
slope_in_factor_scale = coef * slope_factor
if plot:
labels = ['{}'.format(da_ts.name)]
if ax is None:
fig, ax = plt.subplots()
origln = da_ts.plot.line('k-', marker='o', ax=ax, linewidth=1.5, markersize=2.5)
trendln = trend.plot(ax=ax, color='r', linewidth=2)
trend_hi.plot.line('r--', ax=ax, linewidth=1.5)
trend_lo.plot.line('r--', ax=ax, linewidth=1.5)
trend_label = '{} model, slope={:.2f} ({:.2f}, {:.2f}) {}'.format(model, slope_in_factor_scale, slope_in_factor_scale_lo, slope_in_factor_scale_hi, units)
handles = origln
handles += trendln
labels.append(trend_label)
ax.legend(handles=handles, labels=labels, loc='upper left')
ax.grid()
trend_ds = xr.merge([trend, trend_hi, trend_lo])
results_dict = {'slope_hi': slope_in_factor_scale_hi, 'slope_lo': slope_in_factor_scale_lo, 'slope': slope_in_factor_scale}
results_dict['intercept'] = intercept
return trend_ds, results_dict
def scatter_plot_and_fit(df, x, y, color='b', ax=None):
import matplotlib.pyplot as plt
import seaborn as sns
if ax is None:
fig, ax = plt.subplots()
sns.scatterplot(x=x, y=y, data=df, ax=ax, color=color, s=10)
def linear_regression_scikit_learn(da1, da2, same_dim='time'):
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
import numpy as np
from aux_gps import dim_intersection
shared = dim_intersection([da1, da2])
da1 = da1.sel({same_dim: shared})
da2 = da2.sel({same_dim: shared})
X = da1.dropna(same_dim).values.reshape(-1, 1)
y = da2.dropna(same_dim).values
lr = LinearRegression()
lr.fit(X, y)
slope = lr.coef_[0]
inter = lr.intercept_
pred = lr.predict(X)
rmse = mean_squared_error(y, pred, squared=False)
resid = pred - y
mean = np.sum(resid)
return slope, inter, mean, rmse
def split_equal_da_ts_around_datetime(da_ts, dt='2014-05-01'):
time_dim = list(set(da_ts.dims))[0]
x1 = da_ts.dropna(time_dim).sel({time_dim: slice(None, dt)})
x2 = da_ts.dropna(time_dim).sel({time_dim: slice(dt, None)})
if x1.size == 0 or x2.size == 0:
raise ValueError('one or two of the sub-series is 0 size.')
if x1.size > x2.size:
x1 = x1.isel({time_dim: slice(-x2.size , None)})
elif x1.size < x2.size:
x2 = x2.isel({time_dim: slice(0, x1.size)})
return x1, x2
def wilcoxon_rank_test_xr(
da_ts, alpha=0.05,
cp_dt='2014-05-01',
zero_method='wilcox',
correction=False,
alternative='two-sided',
mode='auto'):
import xarray as xr
from scipy.stats import wilcoxon
x, y = split_equal_da_ts_around_datetime(da_ts, dt=cp_dt)
stat, pvalue = wilcoxon(x, y, zero_method=zero_method,
correction=correction, alternative=alternative
)
if pvalue < alpha:
# the two parts of the time series come from different distributions
print('Two distributions!')
normal = False
else:
# same distribution
print('Same distribution')
normal = True
da = xr.DataArray([stat, pvalue, normal], dims=['result'])
da['result'] = ['stat', 'pvalue', 'h']
return da
def normality_test_xr(da_ts, sample=None, alpha=0.05, test='lili',
dropna=True, verbose=True):
"""normality tests on da_ts"""
from statsmodels.stats.diagnostic import lilliefors
from scipy.stats import shapiro
from scipy.stats import normaltest
import xarray as xr
time_dim = list(set(da_ts.dims))[0]
if sample is not None:
da_ts = da_ts.resample({time_dim: sample}).mean()
if dropna:
da_ts = da_ts.dropna(time_dim)
if test == 'shapiro':
stat, pvalue = shapiro(da_ts)
elif test == 'lili':
stat, pvalue = lilliefors(da_ts, dist='norm', pvalmethod='table')
elif test == 'normaltest':
stat, pvalue = normaltest(da_ts)
if pvalue < alpha:
Not = 'NOT'
normal = False
else:
Not = ''
normal = True
if verbose:
print('Mean: {:.4f}, pvalue: {:.4f}'.format(stat, pvalue))
print('Thus, the data is {} Normally distributed with alpha {}'.format(Not, alpha))
da = xr.DataArray([stat, pvalue, normal], dims=['result'])
da['result'] = ['stat', 'pvalue', 'h']
return da
def homogeneity_test_xr(da_ts, hg_test_func, dropna=True, alpha=0.05,
sim=None, verbose=True):
"""False means data is homogenous, True means non-homogenous with significance alpha"""
import xarray as xr
import pandas as pd
time_dim = list(set(da_ts.dims))[0]
if dropna:
da_ts = da_ts.dropna(time_dim)
h, cp, p, U, mu = hg_test_func(da_ts, alpha=alpha, sim=sim)
result = hg_test_func(da_ts, alpha=alpha, sim=sim)
name = type(result).__name__
if verbose:
print('running homogeneity {} with alpha {} and sim {}'.format(name, alpha, sim))
cpl = pd.to_datetime(da_ts.isel({time_dim: result.cp})[time_dim].values)
if 'U' in result._fields:
stat = result.U
elif 'T' in result._fields:
stat = result.T
elif 'Q' in result._fields:
stat = result.Q
elif 'R' in result._fields:
stat = result.R
elif 'V' in result._fields:
stat = result.V
da = xr.DataArray([name, result.h, cpl, result.p, stat, result.avg], dims=['results'])
da['results'] = ['name', 'h', 'cp_dt', 'pvalue', 'stat', 'means']
return da
def VN_ratio_trend_test_xr(da_ts, dropna=True, alpha=0.05, loadpath=work_yuval,
verbose=True, return_just_trend=False):
"""calculate the Von Nuemann ratio test statistic and test for trend."""
import xarray as xr
time_dim = list(set(da_ts.dims))[0]
if dropna:
da_ts = da_ts.dropna(time_dim)
n = da_ts.dropna(time_dim).size
d2 = (da_ts.diff(time_dim)**2.0).sum() / (n - 1)
# s**2 is the variance:
s2 = da_ts.var()
eta = (d2 / s2).item()
cv_da = xr.load_dataarray(loadpath / 'VN_critical_values.nc')
cv = cv_da.sel(sample_size=n, pvalue=alpha, method='nearest').item()
if eta < cv:
if verbose:
print('the hypothesis of stationary cannot be rejected at the level {}'.format(alpha))
trend = True
else:
trend = False
if return_just_trend:
return trend
else:
da = xr.DataArray([eta, cv, trend, n], dims=['results'])
da['results'] = ['eta', 'cv', 'trend', 'n']
return da
def reduce_tail_xr(xarray, reduce='mean', time_dim='time', records=120,
return_df=False):
import xarray as xr
def reduce_tail_da(da, reduce=reduce, time_dim=time_dim, records=records):
if reduce == 'mean':
da = da.dropna(time_dim).tail(records).mean(time_dim)
return da
if isinstance(xarray, xr.DataArray):
xarray = reduce_tail_da(xarray, reduce, time_dim, records)
elif isinstance(xarray, xr.Dataset):
xarray = xarray.map(reduce_tail_da, args=(reduce, time_dim, records))
if return_df:
df = xarray.to_array('dum').to_dataframe(reduce)
df.index.name = ''
return df
return xarray
def decimal_year_to_datetime(decimalyear):
from datetime import datetime, timedelta
import pandas as pd
year = int(decimalyear)
rem = decimalyear - year
base = datetime(year, 1, 1)
result = base + timedelta(seconds=(base.replace(year=base.year + 1) - base).total_seconds() * rem)
return pd.to_datetime(result)
def select_months(da_ts, months, remove=False, reindex=True):
import xarray as xr
from aux_gps import xr_reindex_with_date_range
import pandas as pd
import numpy as np
time_dim = list(set(da_ts.dims))[0]
attrs = da_ts.attrs
try:
name = da_ts.name
except AttributeError:
name = ''
if remove:
all_months = np.arange(1, 13)
months = list(set(all_months).difference(set(months)))
print('selecting months #{} from {}'.format(', #'.join([str(x) for x in months]), name))
to_add = []
for month in months:
sliced = da_ts.sel({time_dim: da_ts['{}.month'.format(time_dim)] == int(month)})
to_add.append(sliced)
da = xr.concat(to_add, time_dim)
da.attrs = attrs
if reindex:
freq = pd.infer_freq(da_ts[time_dim].values)
da = xr_reindex_with_date_range(da, freq=freq)
return da
def run_MLR_harmonics(harmonic_dss, season=None, n_max=4,
plot=True, cunits='cpd',
ax=None, legend_loc=None, ncol=1,
legsize=8, lw=1, legend_S_only=False):
""" change cunits to 'cpy' to process annual harmonics"""
from sklearn.linear_model import LinearRegression
from sklearn.metrics import explained_variance_score
import matplotlib.pyplot as plt
import numpy as np
if n_max > harmonic_dss[cunits].max().values.item():
n_max = harmonic_dss[cunits].max().values.item()
try:
field = harmonic_dss.attrs['field']
if field == 'PW':
field = 'PWV'
except KeyError:
field = 'no name'
name = [x for x in harmonic_dss][0].split('_')[0]
if season is None and 'season' not in harmonic_dss.dims:
harmonic = harmonic_dss # .sel(season='ALL')
elif season is None and 'season' in harmonic_dss.dims:
harmonic = harmonic_dss.sel(season='ALL')
elif season is not None:
harmonic = harmonic_dss.sel(season=season)
# pre-proccess:
if 'month' in harmonic.dims:
harmonic = harmonic.transpose('month', cunits, ...)
elif 'hour' in harmonic.dims:
harmonic = harmonic.transpose('hour', cunits, ...)
harmonic = harmonic.sel({cunits: slice(1, n_max)})
# X = harmonic[name + '_mean'].values
y = harmonic[name].values.reshape(-1, 1)
exp_list = []
for cycle in harmonic[cunits].values:
X = harmonic[name + '_mean'].sel({cunits: cycle}).values.reshape(-1, 1)
lr = LinearRegression(fit_intercept=False)
lr.fit(X, y)
y_pred = lr.predict(X)
ex_var = explained_variance_score(y, y_pred)
exp_list.append(ex_var)
explained = np.array(exp_list) * 100.0
exp_dict = dict(zip([x for x in harmonic[cunits].values], explained))
exp_dict['total'] = np.cumsum(explained)
exp_dict['season'] = season
exp_dict['name'] = name
if plot:
if ax is None:
fig, ax = plt.subplots(figsize=(8, 6))
markers = ['s', 'x', '^', '>', '<', 'X']
colors = ['tab:cyan', 'tab:brown', 'tab:pink', 'tab:orange',
'tab:purple', 'tab:yellow']
styles = ['--', '-.', ':', ' ', 'None', ' ']
S = ['S{}'.format(x) for x in harmonic[cunits].values]
S_total = ['+'.join(S)]
S = ['S{} ({:.0f}%)'.format(x, exp_dict[int(x)]) for x in harmonic[cunits].values]
for i, cycle in enumerate(harmonic[cunits].values):
harmonic[name + '_mean'].sel({cunits: cycle}).plot(ax=ax,
linestyle=styles[i],
color=colors[i],
linewidth=lw,
label=S[i]) # marker=markers[i])
harmonic[name + '_mean'].sum(cunits).plot(ax=ax, marker=None, color='k',
alpha=0.7, linewidth=lw, label=S_total)
harmonic[name].plot(ax=ax, marker='o', linewidth=0., color='k', alpha=0.7, label=field)
handles, labels = ax.get_legend_handles_labels()
if legend_S_only:
handles1 = handles[:-2]
labels1 = labels[:-2]
ax.legend(
handles=handles1, labels=labels1,
prop={'size': legsize},
framealpha=0.5,
fancybox=True,
loc=legend_loc, ncol=ncol, columnspacing=0.75, handlelength=1.0)
else:
ax.legend(
S + S_total + [field],
prop={'size': legsize},
framealpha=0.5,
fancybox=True,
loc=legend_loc, ncol=ncol, columnspacing=0.75, handlelength=1.0)
# ax.grid()
ax.set_xlabel('Time of day [UTC]')
# ax.set_ylabel('{} anomalies [mm]'.format(field))
if season is None:
ax.set_title('Annual {} diurnal cycle for {} station'.format(field, name.upper()))
else:
ax.set_title('{} diurnal cycle for {} station in {}'.format(field, name.upper(), season))
if legend_S_only:
return ax, handles, labels
else:
return ax
else:
return exp_dict
def harmonic_analysis_xr(da, n=6, normalize=False, anomalize=False, freq='D',
user_field_name=None):
import xarray as xr
from aux_gps import fit_da_to_model
from aux_gps import normalize_xr
from aux_gps import anomalize_xr
try:
field = da.attrs['channel_name']
except KeyError:
field = user_field_name
if field is None:
field = ''
if normalize:
da = normalize_xr(da, norm=1)
time_dim = list(set(da.dims))[0]
if anomalize:
da = anomalize_xr(da, freq=freq)
seasons = ['JJA', 'SON', 'DJF', 'MAM', 'ALL']
print('station name: {}'.format(da.name))
print('performing harmonic analysis with 1 to {} cycles per day.'.format(n))
season_list = []
for season in seasons:
if season != 'ALL':
print('analysing season {}.'.format(season))
das = da.sel({time_dim: da['{}.season'.format(time_dim)] == season})
else:
print('analysing ALL seasons.')
das = da
ds = harmonic_da(das, n=n)
season_list.append(ds)
dss = xr.concat(season_list, 'season')
dss['season'] = seasons
dss.attrs['field'] = field
return dss
def harmonic_da(da_ts, n=3, field=None, init=None):
from aux_gps import fit_da_to_model
import xarray as xr
time_dim = list(set(da_ts.dims))[0]
harmonics = [x + 1 for x in range(n)]
if init is not None:
init_amp = da_ts.groupby('{}.hour'.format(time_dim)).mean().mean('hour').values
else:
init_amp = 1.0
init_values = [init_amp/float(x) for x in harmonics]
params_list = []
di_mean_list = []
di_std_list = []
for cpd, init_val in zip(harmonics, init_values):
print('fitting harmonic #{}'.format(cpd))
params = dict(
sin_freq={
'value': cpd}, sin_amp={
'value': init_val}, sin_phase={
'value': 0})
res = fit_da_to_model(
da_ts,
modelname='sin',
params=params,
plot=False,
verbose=False)
name = da_ts.name.split('_')[0]
params_da = xr.DataArray([x for x in res.attrs.values()],
dims=['params', 'val_err'])
params_da['params'] = [x for x in res.attrs.keys()]
params_da['val_err'] = ['value', 'stderr']
params_da.name = name + '_params'
name = res.name.split('_')[0]
diurnal_mean = res.groupby('{}.hour'.format(time_dim)).mean()
diurnal_std = res.groupby('{}.hour'.format(time_dim)).std()
# diurnal_mean.attrs.update(attrs)
# diurnal_std.attrs.update(attrs)
diurnal_mean.name = name + '_mean'
diurnal_std.name = name + '_std'
params_list.append(params_da)
di_mean_list.append(diurnal_mean)
di_std_list.append(diurnal_std)
da_mean = xr.concat(di_mean_list, 'cpd')
da_std = xr.concat(di_std_list, 'cpd')
da_params = xr.concat(params_list, 'cpd')
ds = da_mean.to_dataset(name=da_mean.name)
ds[da_std.name] = da_std
ds['cpd'] = harmonics
ds[da_params.name] = da_params
ds[da_ts.name] = da_ts.groupby('{}.hour'.format(time_dim)).mean()
if field is not None:
ds.attrs['field'] = field
return ds
def harmonic_da_ts(da_ts, n=3, grp='month', return_ts_fit=False,
verbose=True):
from aux_gps import fit_da_ts_to_sine_model
import xarray as xr
time_dim = list(set(da_ts.dims))[0]
harmonics = [x + 1 for x in range(n)]
if grp == 'month':
init_freqs = [x / 366 for x in harmonics]
cunits = 'cpy'
cu_name = 'cycles per year'
elif grp == 'hour':
init_freqs = harmonics
cunits = 'cpd'
cu_name = 'cycles per day'
params_list = []
di_mean_list = []
di_std_list = []
tss = []
params_dicts = []
for cycle, init_freq in zip(harmonics, init_freqs):
if verbose:
print('fitting harmonic #{}'.format(cycle))
res = fit_da_ts_to_sine_model(
da_ts, init_freq=init_freq, verbose=False, plot=False)
name = da_ts.name.split('_')[0]
params_da = xr.DataArray([x for x in res.attrs.values()],
dims=['params', 'val_err'])
params_da['params'] = [x for x in res.attrs.keys()]
params_da['val_err'] = ['value', 'stderr']
params_da.name = name + '_params'
name = res.name.split('_')[0]
diurnal_mean = res.groupby('{}.{}'.format(time_dim, grp)).mean()
diurnal_std = res.groupby('{}.{}'.format(time_dim, grp)).std()
# diurnal_mean.attrs.update(attrs)
# diurnal_std.attrs.update(attrs)
diurnal_mean.name = name + '_mean'
diurnal_std.name = name + '_std'
params_list.append(params_da)
di_mean_list.append(diurnal_mean)
di_std_list.append(diurnal_std)
tss.append(res)
params_dicts.append(res.attrs)
da_mean = xr.concat(di_mean_list, cunits)
da_std = xr.concat(di_std_list, cunits)
da_params = xr.concat(params_list, cunits)
ds = da_mean.to_dataset(name=da_mean.name)
ds[da_std.name] = da_std
ds[cunits] = harmonics
ds[cunits].attrs['long_name'] = cu_name
ds[da_params.name] = da_params
ds[da_ts.name] = da_ts.groupby('{}.{}'.format(time_dim, grp)).mean(keep_attrs=True)
if return_ts_fit:
ds = xr.concat(tss, cunits)
ds[cunits] = harmonics
di = {}
for i, harm in enumerate(harmonics):
keys = [x + '_{}'.format(harm) for x in params_dicts[i].keys()]
di.update(dict(zip(keys, [x for x in params_dicts[i].values()])))
ds.attrs = di
return ds
def convert_da_to_long_form_df(da, var_name=None, value_name=None):
""" convert xarray dataarray to long form pandas df
to use with seaborn"""
import xarray as xr
if var_name is None:
var_name = 'var'
if value_name is None:
value_name = 'value'
dims = [x for x in da.dims]
if isinstance(da, xr.Dataset):
value_vars = [x for x in da]
elif isinstance(da, xr.DataArray):
value_vars = [da.name]
df = da.to_dataframe()
for i, dim in enumerate(da.dims):
df[dim] = df.index.get_level_values(i)
df = df.melt(value_vars=value_vars, value_name=value_name,
id_vars=dims, var_name=var_name)
return df
def get_season_for_pandas_dtindex(df):
import pandas as pd
if not isinstance(df.index, pd.DatetimeIndex):
raise ValueError('index needs to be datetimeindex!')
season = []
months = [x.month for x in df.index]
for month in months:
if month <= 8 and month >=6:
season.append('JJA')
elif month <=5 and month >= 3:
season.append('MAM')
elif month >=9 and month<=11:
season.append('SON')
elif month == 12 or month == 1 or month ==2:
season.append('DJF')
return pd.Series(season, index=df.index)
def anomalize_xr(da_ts, freq='D', time_dim=None, units=None, verbose=True): # i.e., like deseason
import xarray as xr
if time_dim is None:
time_dim = list(set(da_ts.dims))[0]
attrs = da_ts.attrs
if isinstance(da_ts, xr.Dataset):
da_attrs = dict(zip([x for x in da_ts],[da_ts[x].attrs for x in da_ts]))
try:
name = da_ts.name
except AttributeError:
name = ''
if isinstance(da_ts, xr.Dataset):
name = [x for x in da_ts]
if freq == 'D':
if verbose:
print('removing daily means from {}'.format(name))
frq = 'daily'
date = groupby_date_xr(da_ts)
grp = date
elif freq == 'H':
if verbose:
print('removing hourly means from {}'.format(name))
frq = 'hourly'
grp = '{}.hour'.format(time_dim)
elif freq == 'MS':
if verbose:
print('removing monthly means from {}'.format(name))
frq = 'monthly'
grp = '{}.month'.format(time_dim)
elif freq == 'AS':
if verbose:
print('removing yearly means from {}'.format(name))
frq = 'yearly'
grp = '{}.year'.format(time_dim)
elif freq == 'DOY':
if verbose:
print('removing day of year means from {}'.format(name))
frq = 'dayofyear'
grp = '{}.dayofyear'.format(time_dim)
elif freq == 'WOY':
if verbose:
print('removing week of year means from {}'.format(name))
frq = 'weekofyear'
grp = '{}.weekofyear'.format(time_dim)
# calculate climatology:
climatology = da_ts.groupby(grp).mean()
climatology_std = da_ts.groupby(grp).std()
da_anoms = da_ts.groupby(grp) - climatology
if units == '%':
da_anoms = 100.0 * (da_anoms.groupby(grp) / climatology)
# da_anoms = 100.0 * (da_anoms / da_ts.mean())
# da_anoms = 100.0 * (da_ts.groupby(grp)/climatology - 1)
# da_anoms = 100.0 * (da_ts.groupby(grp)-climatology) / da_ts
if verbose:
print('Using % as units.')
elif units == 'std':
da_anoms = (da_anoms.groupby(grp) / climatology_std)
if verbose:
print('Using std as units.')
da_anoms = da_anoms.reset_coords(drop=True)
da_anoms.attrs.update(attrs)
da_anoms.attrs.update(action='removed {} means'.format(frq))
# if dataset, update attrs for each dataarray and add action='removed x means'
if isinstance(da_ts, xr.Dataset):
for x in da_ts:
da_anoms[x].attrs.update(da_attrs.get(x))
da_anoms[x].attrs.update(action='removed {} means'.format(frq))
if units == '%':
da_anoms[x].attrs.update(units='%')
return da_anoms
def line_and_num_for_phrase_in_file(phrase='the dog barked', filename='file.txt'):
with open(filename, 'r') as f:
for (i, line) in enumerate(f):
if phrase in line:
return i, line
return None, None
def grab_n_consecutive_epochs_from_ts(da_ts, sep='nan', n=10, time_dim=None,
return_largest=False):
"""grabs n consecutive epochs from time series (xarray dataarrays)
and return list of either dataarrays"""
if time_dim is None:
time_dim = list(set(da_ts.dims))[0]
df = da_ts.to_dataframe()
A = consecutive_runs(df, num='nan')
A = A.sort_values('total_not-nan', ascending=False)
max_n = len(A)
if return_largest:
start = A.iloc[0, 0]
end = A.iloc[0, 1]
da = da_ts.isel({time_dim:slice(start, end)})
return da
if n > max_n:
print('{} epoches requested but only {} available'.format(n, max_n))
n = max_n
da_list = []
for i in range(n):
start = A.iloc[i, 0]
end = A.iloc[i, 1]
da = da_ts.isel({time_dim: slice(start, end)})
da_list.append(da)
return da_list
def keep_full_years_of_monthly_mean_data(da_ts, verbose=False):
name = da_ts.name
time_dim = list(set(da_ts.dims))[0]
df = da_ts.dropna(time_dim).to_dataframe()
# calculate yearly data to drop (if points less than threshold):
df['year'] = df.index.year
points_in_year = df.groupby(['year']).count()[name].to_frame()
# calculate total years with any data:
tot_years = points_in_year[points_in_year >0].dropna().count().values.item()
# calculate yealy data percentage (from maximum available):
points_in_year['percent'] = (points_in_year[name] / 12) * 100.0
# get the number of years to drop and the years themselves:
number_of_years_to_drop = points_in_year[name][points_in_year['percent'] <= 99].count()
percent_of_years_to_drop = 100.0 * \
number_of_years_to_drop / len(points_in_year)
years_to_drop = points_in_year.index[points_in_year['percent'] <= 99]
if verbose:
print('for {}: found {} ({:.2f} %) bad years with {:.0f} % drop thresh.'.format(
name, number_of_years_to_drop, percent_of_years_to_drop, 99))
# now drop the days:
for year_to_drop in years_to_drop:
df = df[df['year'] != year_to_drop]
if verbose:
print('for {}: kept {} years.'.format(name, df['year'].unique().size))
da = df[name].to_xarray()
# add some more metadata:
da.attrs['years_kept'] = sorted(df['year'].unique().tolist())
da.attrs['years_total'] = tot_years
da.attrs['years_dropped'] = number_of_years_to_drop
da.attrs['years_dropped_percent'] = '{:.1f}'.format(percent_of_years_to_drop)
return da
#def assemble_semi_period(reduced_da_ts):
# import numpy as np
# import xarray as xr
# period = [x for x in reduced_da_ts.dims][0]
# if period == 'month':
# plength = reduced_da_ts[period].size
# mnth_arr = np.arange(1, 13)
# mnth_splt = np.array_split(mnth_arr, int(12/plength))
# vals = reduced_da_ts.values
# vals_list = []
# vals_list.append(vals)
# for i in range(len(mnth_splt)-1):
# vals_list.append(vals)
# modified_reduced = xr.DataArray(np.concatenate(vals_list), dims=['month'])
# modified_reduced['month'] = mnth_arr
# return modified_reduced
# elif period == 'hour':
# plength = reduced_da_ts[period].size
# hr_arr = np.arange(0, 24)
# hr_splt = np.array_split(hr_arr, int(24/plength))
# vals = reduced_da_ts.values
# vals_list = []
# vals_list.append(vals)
# for i in range(len(hr_splt)-1):
# vals_list.append(vals)
# modified_reduced = xr.DataArray(np.concatenate(vals_list), dims=['hour'])
# modified_reduced['hour'] = hr_arr
# return modified_reduced
#
#
#def groupby_semi_period(da_ts, period='6M'):
# """return an xarray DataArray with the semi period of 1 to 11 months or
# 1 to 23 hours.
# Input: period : string, first char is period length, second is frequency.
# for now support is M for month and H for hour."""
# import numpy as np
# df = da_ts.to_dataframe()
# plength = [x for x in period if x.isdigit()]
# if len(plength) == 1:
# plength = int(plength[0])
# elif len(plength) == 2:
# plength = int(''.join(plength))
# freq = [x for x in period if x.isalpha()][0]
# print(plength, freq)
# if freq == 'M':
# if np.mod(12, plength) != 0:
# raise('pls choose integer amounts, e.g., 3M, 4M, 6M...')
# mnth_arr = np.arange(1, 13)
# mnth_splt = np.array_split(mnth_arr, int(12 / plength))
# rpld = {}
# for i in range(len(mnth_splt) - 1):
# rpld.update(dict(zip(mnth_splt[i + 1], mnth_splt[0])))
# df['month'] = df.index.month
# df['month'] = df['month'].replace(rpld)
# month = df['month'].to_xarray()
# return month
# if freq == 'H':
# if np.mod(24, plength) != 0:
# raise('pls choose integer amounts, e.g., 6H, 8H, 12H...')
# hr_arr = np.arange(0, 24)
# hr_splt = np.array_split(hr_arr, int(24 / plength))
# rpld = {}
# for i in range(len(hr_splt) - 1):
# rpld.update(dict(zip(hr_splt[i + 1], hr_splt[0])))
# df['hour'] = df.index.hour
# df['hour'] = df['hour'].replace(rpld)
# hour = df['hour'].to_xarray()
# return hour
def groupby_half_hour_xr(da_ts, reduce='mean'):
import pandas as pd
import numpy as np
df = da_ts.to_dataframe()
native_freq = pd.infer_freq(df.index)
if not native_freq:
raise('Cannot infer frequency...')
if reduce == 'mean':
df = df.groupby([df.index.hour, df.index.minute]).mean()
elif reduce == 'std':
df = df.groupby([df.index.hour, df.index.minute]).std()
time = pd.date_range(start='1900-01-01', periods=df.index.size,
freq=native_freq)
df = df.set_index(time)
df = df.resample('30T').mean()
half_hours = np.arange(0, 24, 0.5)
df.index = half_hours
df.index.name = 'half_hour'
ds = df.to_xarray()
return ds
def groupby_date_xr(da_ts, time_dim='time'):
df = da_ts[time_dim].to_dataframe()
df['date'] = df.index.date
date = df['date'].to_xarray()
return date
def loess_curve(da_ts, time_dim='time', season=None, plot=True):
from skmisc.loess import loess
import matplotlib.pyplot as plt
import xarray as xr
import numpy as np
if season is not None:
da_ts = da_ts.sel({time_dim: da_ts[time_dim + '.season'] == season})
x = da_ts.dropna(time_dim)[time_dim].values
y = da_ts.dropna(time_dim).values
l_obj = loess(x, y)
l_obj.fit()
pred = l_obj.predict(x, stderror=True)
conf = pred.confidence()
lowess = np.copy(pred.values)
ll = np.copy(conf.lower)
ul = np.copy(conf.upper)
da_lowess = xr.Dataset()
da_lowess['mean'] = xr.DataArray(lowess, dims=[time_dim])
da_lowess['upper'] = xr.DataArray(ul, dims=[time_dim])
da_lowess['lower'] = xr.DataArray(ll, dims=[time_dim])
da_lowess[time_dim] = x
if plot:
plt.plot(x, y, '+')
plt.plot(x, lowess)
plt.fill_between(x, ll, ul, alpha=.33)
plt.show()
return da_lowess
def detrend_ts(da_ts, method='scipy', verbose=False,
time_dim='time'):
from scipy.signal import detrend
import xarray as xr
import pandas as pd
if verbose:
print('detrending using {}.'.format(method))
if method == 'loess':
trend = loess_curve(da_ts, plot=False)
detrended = da_ts - trend['mean']
detrended.name = da_ts.name
elif method == 'scipy':
freq = xr.infer_freq(da_ts[time_dim])
y = da_ts.dropna(time_dim)
y_detrended = y.copy(data=detrend(y))
detrended = y_detrended
start = pd.to_datetime(y_detrended[time_dim].isel({time_dim: 0}).item())
end = pd.to_datetime(y_detrended[time_dim].isel({time_dim: -1}).item())
new_time = pd.date_range(start, end, freq=freq)
detrended = detrended.reindex({time_dim:new_time})
return detrended
def autocorr_plot(da_ts, max_lag=40):
import pandas as pd
ser = pd.Series(da_ts)
corrs = [ser.autocorr(lag=x) for x in range(0, max_lag)]
lags = [x for x in range(0, max_lag)]
lags_ser = pd.Series(corrs, index=lags)
ax = lags_ser.plot(kind='bar', rot=0, figsize=(10, 5))
return ax
def error_mean_rmse(y, y_pred):
from sklearn.metrics import mean_squared_error
import numpy as np
mse = mean_squared_error(y.values, y_pred.values)
rmse = np.sqrt(mse)
mean = np.mean(y.values-y_pred.values)
print('mean : {:.2f}, rmse : {:.2f}'.format(mean, rmse))
return mean, rmse
def remove_suffix_from_ds(ds, sep='_'):
import xarray as xr
if not isinstance(ds, xr.Dataset):
raise ValueError('input must be an xarray dataset object!')
vnames = [x for x in ds.data_vars]
new_names = [x.split(sep)[0] for x in ds.data_vars]
name_dict = dict(zip(vnames, new_names))
ds = ds.rename_vars(name_dict)
return ds
def rename_data_vars(ds, suffix='_error', prefix=None,
verbose=False):
import xarray as xr
if not isinstance(ds, xr.Dataset):
raise ValueError('input must be an xarray dataset object!')
vnames = [x for x in ds.data_vars]
# if remove_suffix:
# new_names = [x.replace(suffix, '') for x in ds.data_vars]
if suffix is not None:
new_names = [str(x) + suffix for x in ds.data_vars]
if prefix is not None:
new_names = [prefix + str(x) for x in ds.data_vars]
name_dict = dict(zip(vnames, new_names))
ds = ds.rename_vars(name_dict)
if verbose:
print('var names were added the suffix {}.'.format(suffix))
return ds
def remove_duplicate_spaces_in_string(line):
import re
line_removed = " ".join(re.split("\s+", line, flags=re.UNICODE))
return line_removed
def save_ncfile(xarray, savepath, filename='temp.nc', engine=None, dtype=None,
fillvalue=None):
import xarray as xr
print('saving {} to {}'.format(filename, savepath))
if dtype is None:
comp = dict(zlib=True, complevel=9, _FillValue=fillvalue) # best compression
else:
comp = dict(zlib=True, complevel=9, dtype=dtype, _FillValue=fillvalue) # best compression
if isinstance(xarray, xr.Dataset):
encoding = {var: comp for var in xarray}
elif isinstance(xarray, xr.DataArray):
encoding = {var: comp for var in xarray.to_dataset()}
xarray.to_netcdf(savepath / filename, 'w', encoding=encoding, engine=engine)
print('File saved!')
return
def weighted_long_term_monthly_means_da(da_ts, plot=True):
"""create a long term monthly means(climatology) from a dataarray time
series with weights of items(mins,days etc..) per each month
apperently, DataArray.groupby('time.month').mean('time') does exactely
this... so this function is redundant"""
import pandas as pd
name = da_ts.name
# first save attrs:
attrs = da_ts.attrs
try:
df = da_ts.to_dataframe()
except ValueError:
name = 'name'
df = da_ts.to_dataframe(name=name)
df = df.dropna()
df['month'] = df.index.month
df['year'] = df.index.year
cnt = df.groupby(['month', 'year']).count()[name].to_frame()
cnt /= cnt.max()
weights = pd.pivot_table(cnt, index='year', columns='month')
dfmm = df.groupby(['month', 'year']).mean()[name].to_frame()
dfmm = pd.pivot_table(dfmm, index='year', columns='month')
# wrong:
# weighted_monthly_means = dfmm * weights
# normalize weights:
wtag = weights / weights.sum(axis=0)
weighted_clim = (dfmm*wtag).sum(axis=0).unstack().squeeze()
# convert back to time-series:
# df_ts = weighted_monthly_means.stack().reset_index()
# df_ts['dt'] = df_ts.year.astype(str) + '-' + df_ts.month.astype(str)
# df_ts['dt'] = pd.to_datetime(df_ts['dt'])
# df_ts = df_ts.set_index('dt')
# df_ts = df_ts.drop(['year', 'month'], axis=1)
# df_ts.index.name = 'time'
# da = df_ts[name].to_xarray()
da = weighted_clim.to_xarray()
da.attrs = attrs
# da = xr_reindex_with_date_range(da, drop=True, freq='MS')
if plot:
da.plot()
return da
def create_monthly_index(dt_da, period=6, unit='month'):
import numpy as np
pdict = {6: 'H', 4: 'T', 3: 'Q'}
dt = dt_da.to_dataframe()
if unit == 'month':
dt[unit] = getattr(dt.index, unit)
months = np.arange(1, 13)
month_groups = np.array_split(months, len(months) / period)
for i, month_grp in enumerate(month_groups):
dt.loc[(dt['month'] >=month_grp[0]) & (dt['month'] <=month_grp[-1]), 'grp_months'] = '{}{}'.format(pdict.get(period), i+1)
da = dt['grp_months'].to_xarray()
return da
def compute_consecutive_events_datetimes(da_ts, time_dim='time',
minimum_epochs=10):
"""WARNING : for large xarrays it takes alot of time and memory!"""
import pandas as pd
import xarray as xr
df = da_ts.notnull().to_dataframe()
A = consecutive_runs(df, num=False)
# filter out minimum consecutive epochs:
if minimum_epochs is not None:
A = A[A['total_True'] > minimum_epochs]
dt_min = df.iloc[A['{}_True_start'.format(da_ts.name)]].index
try:
dt_max = df.iloc[A['{}_True_end'.format(da_ts.name)]].index
except IndexError:
dt_max = df.iloc[A['{}_True_end'.format(da_ts.name)][:-1]]
end = pd.DataFrame(index=[df.index[-1]], data=[False],
columns=[da_ts.name])
dt_max = dt_max.append(end)
dt_max = dt_max.index
events = []
print('done part1')
for i_min, i_max in zip(dt_min, dt_max):
events.append(da_ts.sel({time_dim: slice(i_min, i_max)}))
events_da = xr.concat(events, 'event')
events_da['event'] = range(len(events))
return events_da
def multi_time_coord_slice(min_time, max_time, freq='5T', time_dim='time',
name='general_group'):
"""returns a datetimeindex array of the multi-time-coords slice defined by
min_time, max_time vectors and freq."""
import pandas as pd
import numpy as np
assert len(min_time) == len(max_time)
dates = [
pd.date_range(
start=min_time[i],
end=max_time[i],
freq=freq) for i in range(
len(min_time))]
dates = [pd.Series(np.ones(dates[i].shape, dtype=int) * i, index=dates[i]) for i in range(len(dates))]
dates = pd.concat(dates)
da = dates.to_xarray()
da = da.rename({'index': time_dim})
da.name = name
return da
def calculate_g(lat):
"""calculate the gravitational acceleration with lat in degrees"""
import numpy as np
g0 = 9.780325
nom = 1.0 + 0.00193185 * np.sin(np.deg2rad(lat)) ** 2.0
denom = 1.0 - 0.00669435 * np.sin(np.deg2rad(lat)) ** 2.0
g = g0 * (nom / denom)**0.5
return g
def find_consecutive_vals_df(df, col='class', val=7):
import numpy as np
bool_vals = np.where(df[col] == val, 1, 0)
con_df = consecutive_runs(bool_vals, num=0)
return con_df
def lat_mean(xarray, method='cos', dim='lat', copy_attrs=True):
import numpy as np
import xarray as xr
def mean_single_da(da, dim=dim, method=method):
if dim not in da.dims:
return da
if method == 'cos':
weights = np.cos(np.deg2rad(da[dim].values))
da_mean = (weights * da).sum(dim) / sum(weights)
if copy_attrs:
da_mean.attrs = da.attrs
return da_mean
xarray = xarray.transpose(..., 'lat')
if isinstance(xarray, xr.DataArray):
xarray = mean_single_da(xarray)
elif isinstance(xarray, xr.Dataset):
xarray = xarray.map(mean_single_da, keep_attrs=copy_attrs)
return xarray
def consecutive_runs(arr, num=False):
import numpy as np
import pandas as pd
"""get the index ranges (min, max) of the ~num condition.
num can be 1 or 0 or True or False"""
# Create an array that is 1 where a is num, and pad each end with an extra
# 1.
if isinstance(arr, pd.DataFrame):
a = arr.squeeze().values
name = arr.columns[0]
elif isinstance(arr, np.ndarray):
a = arr
elif isinstance(arr, list):
a = np.array(arr)
if num == 'nan':
isone = np.concatenate(([1], np.isnan(a).view(np.int8), [1]))
else:
isone = np.concatenate(([1], np.equal(a, num).view(np.int8), [1]))
absdiff = np.abs(np.diff(isone))
# Runs start and end where absdiff is 1.
ranges = np.where(absdiff == 1)[0].reshape(-1, 2)
A = pd.DataFrame(ranges)
A['2'] = A.iloc[:, 1] - A.iloc[:, 0]
if isinstance(arr, pd.DataFrame):
if isinstance(num, bool):
notnum = not num
elif isinstance(num, int):
notnum = 'not-{}'.format(num)
elif num == 'nan':
notnum = 'not-nan'
A.columns = [
'{}_{}_start'.format(
name, notnum), '{}_{}_end'.format(
name, notnum), 'total_{}'.format(notnum)]
return A
def get_all_possible_combinations_from_list(li, reduce_single_list=True, combine_by_sep='+'):
from itertools import combinations
output = sum([list(map(list, combinations(li, i)))
for i in range(len(li) + 1)], [])
output = output[1:]
if reduce_single_list:
output = [x[0] if len(x) == 1 else x for x in output]
if combine_by_sep is not None:
for out in output:
if isinstance(out, list):
ind = output.index(out)
output[ind] = '+'.join(out)
return output
def gantt_chart(ds, fw='bold', ax=None, pe_dict=None, fontsize=14, linewidth=10,
title='RINEX files availability for the Israeli GNSS stations',
time_dim='time', antialiased=False, colors=None, grid=False,
marker='x', marker_suffix='_tide'):
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import matplotlib.dates as mdates
from matplotlib.ticker import AutoMinorLocator
import matplotlib.patheffects as pe
# TODO: fix the ticks/ticks labels
# sns.set_palette(sns.color_palette("tab10", len(ds)))
sns.set_palette(sns.color_palette("Dark2", len(ds)))
if ax is None:
fig, ax = plt.subplots(figsize=(20, 6))
names = []
for x in ds:
if marker_suffix in x:
names.append('')
else:
names.append(x)
# names = [x for x in ds]
vals = range(1, len(ds) + 1)
xmin = pd.to_datetime(ds[time_dim].min().values) - pd.Timedelta(1, unit='W')
xmax = pd.to_datetime(ds[time_dim].max().values) + pd.Timedelta(1, unit='W')
if colors is None:
colors = plt.rcParams["axes.prop_cycle"].by_key()["color"]
# dt_min_list = []
# dt_max_list = []
for i, da in enumerate(ds):
print(da)
df = ds[da].notnull().to_dataframe()
A = consecutive_runs(df, num=False)
dt_min = df.iloc[A['{}_True_start'.format(da)]].index
try:
dt_max = df.iloc[A['{}_True_end'.format(da)]].index
except IndexError:
dt_max = df.iloc[A['{}_True_end'.format(da)][:-1]]
end = pd.DataFrame(index=[df.index[-1]], data=[False], columns=[da])
dt_max = dt_max.append(end)
dt_max = dt_max.index
y = len(ds) + 1 - np.ones(dt_min.shape) * (i + 1)
y1 = len(ds) + 1 - np.ones(dt_min.shape) * (i + 0.5)
# y_list.append(y)
# dt_min_list.append(dt_min)
# dt_max_list.append(dt_max)
# v = int(calc(i, max = len(ds)))
if marker_suffix in da:
x = pd.to_datetime(ds[da].dropna('time')['time'].values)
# print(x)
ax.scatter(x, y1, color=colors[i], marker=marker, s=150)
# ax.vlines(y, dt_min, dt_max, linewidth=1000, color=colors[i])
else:
if pe_dict is not None:
ax.hlines(y, dt_min, dt_max, linewidth=linewidth, color=colors[i], path_effects=[pe.Stroke(linewidth=15, foreground='k'), pe.Normal()])
else:
ax.hlines(y, dt_min, dt_max, linewidth=linewidth, color=colors[i], antialiased=antialiased)
#plt.show()
# ds[da][~ds[da].isnull()] = i + 1
# ds[da] = ds[da].fillna(0)
if grid:
ax.grid(True, axis='x')
# yticks and their labels:
ax.set_yticks(vals)
ax.set_yticklabels(names[::-1], fontweight=fw, fontsize=fontsize)
[ax.get_yticklabels()[i].set_color(colors[::-1][i]) for i in range(len(colors))]
ax.set_xlim(xmin, xmax)
# handle x-axis (time):
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.tick_params(which='major',
direction='out',
labeltop=False,
labelbottom=True,
top=False,
bottom=True, left=True, labelsize=fontsize)
ax.minorticks_on()
ax.tick_params(which='minor',
direction='out',
labeltop=False,
labelbottom=True,
top=False,
bottom=True, left=False)
# ax.xaxis.set_minor_locator(mdates.YearLocator())
# ax.xaxis.set_minor_formatter(mdates.DateFormatter("\n%Y"))
plt.setp(ax.xaxis.get_majorticklabels(), rotation=30, ha='center',
fontweight=fw, fontsize=fontsize)
plt.setp(ax.xaxis.get_minorticklabels(), rotation=30, ha='center',
fontweight=fw, fontsize=fontsize)
# grid lines:
# ax.grid(which='major', axis='x', linestyle='-', color='k')
# ax.grid(which='minor', axis='x', linestyle='-', color='k')
if title is not None:
ax.set_title(title, fontsize=14, fontweight=fw)
# fig.tight_layout()
return ax
def time_series_stack_with_window(ts_da, time_dim='time',
window='1D'):
"""make it faster, much faster using isel and then cocant to dataset
save also the datetimes"""
import pandas as pd
import xarray as xr
window_dt = pd.Timedelta(window)
freq = pd.infer_freq(ts_da[time_dim].values)
if not any(i.isdigit() for i in freq):
freq = '1' + freq
freq_td = pd.Timedelta(freq)
window_points = int(window_dt / freq_td)
inds = []
end_index = ts_da[time_dim].size - window_points
index_to_run_over = range(0, end_index)
for i in range(end_index):
inds.append([i, i + window_points])
arr_list = []
arr_time_list = []
ts_arr = ts_da.values
ts_time_arr = ts_da[time_dim].values
for ind in inds:
arr_list.append(ts_arr[ind[0]: ind[1]])
arr_time_list.append(ts_time_arr[ind[0]: ind[1]])
ds = xr.Dataset()
ds[ts_da.name] = xr.DataArray(arr_list, dims=['start_date', 'points'])
ds[ts_da.name].attrs = ts_da.attrs
ds[time_dim] = xr.DataArray(arr_time_list, dims=['start_date', 'points'])
ds['start_date'] = ts_da.isel({time_dim: index_to_run_over})[time_dim].values
ds['points'] = range(window_points)
ds.attrs['freq'] = freq
return ds
def get_RI_reg_combinations(dataset):
"""return n+1 sized dataset of full regressors and median value regressors"""
import xarray as xr
def replace_dta_with_median(dataset, dta):
ds = dataset.copy()
ds[dta] = dataset[dta] - dataset[dta] + dataset[dta].median('time')
ds.attrs['median'] = dta
return ds
if type(dataset) != xr.Dataset:
return print('Input is xarray dataset only')
ds_list = []
ds_list.append(dataset)
dataset.attrs['median'] = 'full_set'
for da in dataset.data_vars:
ds_list.append(replace_dta_with_median(dataset, da))
return ds_list
def annual_standertize(data, time_dim='time', std_nan=1.0):
"""just divide by the time.month std()"""
attrs = data.attrs
std_longterm = data.groupby('{}.month'.format(time_dim)).std(keep_attrs=True)
if std_nan is not None:
std_longterm = std_longterm.fillna(std_nan)
data = data.groupby('{}.month'.format(time_dim)) / std_longterm
data = data.reset_coords(drop=True)
data.attrs.update(attrs)
return data
def normalize_xr(data, time_dim='time', norm=1, down_bound=-1.,
upper_bound=1., verbose=True):
attrs = data.attrs
avg = data.mean(time_dim, keep_attrs=True)
sd = data.std(time_dim, keep_attrs=True)
if norm == 0:
data = data
norm_str = 'No'
elif norm == 1:
data = (data-avg)/sd
norm_str = '(data-avg)/std'
elif norm == 2:
data = (data-avg)/avg
norm_str = '(data-avg)/avg'
elif norm == 3:
data = data/avg
norm_str = '(data/avg)'
elif norm == 4:
data = data/sd
norm_str = '(data)/std'
elif norm == 5:
dh = data.max()
dl = data.min()
# print dl
data = (((data-dl)*(upper_bound-down_bound))/(dh-dl))+down_bound
norm_str = 'mapped between ' + str(down_bound) + ' and ' + str(upper_bound)
# print data
if verbose:
print('Data is ' + norm_str)
elif norm == 6:
data = data-avg
norm_str = 'data-avg'
if verbose and norm != 5:
print('Preforming ' + norm_str + ' Normalization')
data.attrs = attrs
data.attrs['Normalize'] = norm_str
return data
def slice_task_date_range(files, date_range, task='non-specific'):
from aux_gps import get_timedate_and_station_code_from_rinex
import pandas as pd
from pathlib import Path
import logging
""" return a slice files object (list of rfn Paths) with the correct
within the desired date range"""
logger = logging.getLogger('gipsyx')
date_range = pd.to_datetime(date_range)
logger.info(
'performing {} task within the dates: {} to {}'.format(task,
date_range[0].strftime(
'%Y-%m-%d'),
date_range[1].strftime('%Y-%m-%d')))
if not files:
return files
path = Path(files[0].as_posix().split('/')[0])
rfns = [x.as_posix().split('/')[-1][0:12] for x in files]
dts = get_timedate_and_station_code_from_rinex(rfns)
rfn_series = pd.Series(rfns, index=dts)
rfn_series = rfn_series.sort_index()
mask = (rfn_series.index >= date_range[0]) & (
rfn_series.index <= date_range[1])
files = [path / x for x in rfn_series.loc[mask].values]
return files
def geo_annotate(ax, lons, lats, labels, xytext=(3, 3), fmt=None, c='k',
fw='normal', fs=None, colorupdown=False):
for x, y, label in zip(lons, lats, labels):
if colorupdown:
if float(label) >= 0.0:
c = 'r'
elif float(label) < 0.0:
c = 'b'
if fmt is not None:
annot = ax.annotate(fmt.format(label), xy=(x, y), xytext=xytext,
textcoords="offset points", color=c,
fontweight=fw, fontsize=fs)
else:
annot = ax.annotate(label, xy=(x, y), xytext=xytext,
textcoords="offset points", color=c,
fontweight=fw, fontsize=fs)
return annot
def piecewise_linear_fit(da, k=1, plot=True):
"""return dataarray with coords k "piece" indexing to k parts of
datetime. k=None means get all datetime index"""
import numpy as np
import xarray as xr
time_dim = list(set(da.dims))[0]
time_no_nans = da.dropna(time_dim)[time_dim]
time_pieces = np.array_split(time_no_nans.values, k)
params = lmfit_params('line')
best_values = []
best_fits = []
for piece in time_pieces:
dap = da.sel({time_dim: piece})
result = fit_da_to_model(dap, params, model_dict={'model_name': 'line'},
method='leastsq', plot=False, verbose=False)
best_values.append(result.best_values)
best_fits.append(result.best_fit)
bfs = np.concatenate(best_fits)
tps = np.concatenate(time_pieces)
da_final = xr.DataArray(bfs, dims=[time_dim])
da_final[time_dim] = tps
if plot:
ax = plot_tmseries_xarray(da, points=True)
for piece in time_pieces:
da_final.sel({time_dim: piece}).plot(color='r', ax=ax)
return da_final
def convert_wind_direction(u=None, v=None, ws=None, wd=None, verbose=False):
"""
Parameters
----------
u : TYPE, optional
zonal direction. The default is None.
v : TYPE, optional
meridional direction. The default is None.
ws : TYPE, optional
magnitude. The default is None.
wd : TYPE, optional
meteorological direction. The default is None.
verbose : TYPE, optional
DESCRIPTION. The default is False.
Raises
------
ValueError
DESCRIPTION.
Returns
-------
None.
"""
import numpy as np
if (u is None and v is None) and (ws is not None and wd is not None):
if verbose:
print('converting from WS, WD to U, V')
u = -ws*np.sin(np.deg2rad(wd))
v = -ws*np.cos(np.deg2rad(wd))
return u, v
elif (u is not None and v is not None) and (ws is None and wd is None):
if verbose:
print('converting from U, V to WS, WD')
wd = 180 + np.rad2deg(np.arctan2(u, v))
ws = np.sqrt(u**2+v**2)
return ws, wd
else:
raise ValueError('choose either ws and wd or u and v!')
def lmfit_params(model_name, k=None):
from lmfit.parameter import Parameters
sin_params = Parameters()
# add with tuples: (NAME VALUE VARY MIN MAX EXPR BRUTE_STEP)
amp = ['sin_amp', 50, True, None, None, None, None]
phase = ['sin_phase', 0, True, None, None, None, None]
# freq = ['sin_freq', 1/365.0, True, None, None, None]
freq = ['sin_freq', 4, True, None, None, None]
sin_params.add(*amp)
sin_params.add(*phase)
sin_params.add(*freq)
line_params = Parameters()
slope = ['line_slope', 1e-6, True, None, None, None, None]
intercept = ['line_intercept', 58.6, True, None, None, None, None]
line_params.add(*slope)
line_params.add(*intercept)
constant = Parameters()
constant.add(*['constant', 40.0, True,None, None, None, None])
if k is not None:
sum_sin_params = Parameters()
for mode in range(k):
amp[0] = 'sin{}_amp'.format(mode)
phase[0] = 'sin{}_phase'.format(mode)
freq[0] = 'sin{}_freq'.format(mode)
sum_sin_params.add(*amp)
sum_sin_params.add(*phase)
sum_sin_params.add(*freq)
if model_name == 'sin_linear':
return line_params + sin_params
elif model_name == 'sin':
return sin_params
elif model_name == 'sin_constant':
return sin_params + constant
elif model_name == 'line':
return line_params
elif model_name == 'sum_sin' and k is not None:
return sum_sin_params
elif model_name == 'sum_sin_linear' and k is not None:
return sum_sin_params + line_params
def fit_da_to_model(da, params=None, modelname='sin', method='leastsq', times=None, plot=True, verbose=True):
"""options for modelname:'sin', 'sin_linear', 'line', 'sin_constant', and
'sum_sin'"""
# for sum_sin or sum_sin_linear use model_dict={'model_name': 'sum_sin', k:3}
# usage for params: you need to know the parameter names first:
# modelname='sin', params=dict(sin_freq={'value':3},sin_amp={'value':0.3},sin_phase={'value':0})
# fit_da_to_model(alon, modelname='sin', params=dict(sin_freq={'value':3},sin_amp={'value':0.3},sin_phase={'value':0}))
import matplotlib.pyplot as plt
import pandas as pd
import xarray as xr
time_dim = list(set(da.dims))[0]
if times is not None:
da = da.sel({time_dim: slice(*times)})
lm = lmfit_model_switcher()
lm.pick_model(modelname)
lm.generate_params(**params)
params = lm.params
model = lm.model
if verbose:
print(model)
print(params)
jul, jul_no_nans = get_julian_dates_from_da(da)
y = da.dropna(time_dim).values
result = model.fit(**params, data=y, time=jul_no_nans, method=method)
if not result.success:
raise ValueError('model not fitted properly...')
fit_y = result.eval(**result.best_values, time=jul)
fit = xr.DataArray(fit_y, dims=time_dim)
fit[time_dim] = da[time_dim]
fit.name = da.name + '_fit'
p = {}
for name, param in result.params.items():
p[name] = [param.value, param.stderr]
fit.attrs.update(**p)
# return fit
if verbose:
print(result.best_values)
if plot:
fig, ax = plt.subplots(figsize=(8, 6))
da.plot.line(marker='.', linewidth=0., color='b', ax=ax)
dt = pd.to_datetime(da[time_dim].values)
ax.plot(dt, fit_y, c='r')
plt.legend(['data', 'fit'])
return fit
def fit_da_ts_to_sine_model(da_ts, init_freq=1/366, verbose=False, plot=True):
"""
Use lmfit MySineModel class to fit time series in DataArray
Parameters
----------
da_ts : TYPE
DESCRIPTION.
plot : TYPE, optional
DESCRIPTION. The default is True.
Returns
-------
fitted_ds : Xarray Dataset
DESCRIPTION.
"""
import xarray as xr
import matplotlib.pyplot as plt
import pandas as pd
model = pick_lmfit_model(name='sine')
time_dim = list(set(da_ts.dims))[0]
jul, jul_no_nans = get_julian_dates_from_da(da_ts)
y = da_ts.dropna(time_dim).values
params = model.guess(data=y, freq=init_freq)
if verbose:
print(model)
print(params)
result = model.fit(data=y, params=params, x=jul_no_nans)
if not result.success:
raise ValueError('model not fitted properly...')
fit_y = result.eval(**result.best_values, x=jul)
fit = xr.DataArray(fit_y, dims=[time_dim])
fit[time_dim] = da_ts[time_dim]
fit.name = da_ts.name + '_fit'
p = {}
for name, param in result.params.items():
p[name] = [param.value, param.stderr]
fit.attrs.update(**p)
# return fit
if verbose:
print(result.best_values)
if plot:
fig, ax = plt.subplots(figsize=(8, 6))
da_ts.plot.line(marker='.', linewidth=0., color='b', ax=ax)
dt = pd.to_datetime(da_ts[time_dim].values)
ax.plot(dt, fit_y, c='r')
plt.legend(['data', 'fit'])
return fit
def get_julian_dates_from_da(da, subtract='first'):
"""transform the time dim of a dataarray to julian dates(days since)"""
import pandas as pd
import numpy as np
# get time dim:
time_dim = list(set(da.dims))[0]
# convert to days since 2000 (julian_date):
jul = pd.to_datetime(da[time_dim].values).to_julian_date()
# normalize all days to first entry:
if subtract == 'first':
first_day = jul[0]
jul -= first_day
elif subtract == 'median':
med = np.median(jul)
jul -= med
# do the same but without nans:
jul_no_nans = pd.to_datetime(
da.dropna(time_dim)[time_dim].values).to_julian_date()
if subtract == 'first':
jul_no_nans -= first_day
elif subtract == 'median':
jul_no_nans -= med
return jul.values, jul_no_nans.values
def lomb_scargle_xr(da_ts, units='cpy', user_freq='MS', plot=True, kwargs=None):
from astropy.timeseries import LombScargle
import pandas as pd
import xarray as xr
time_dim = list(set(da_ts.dims))[0]
sp_str = pd.infer_freq(da_ts[time_dim].values)
if not sp_str:
print('using user-defined freq: {}'.format(user_freq))
sp_str = user_freq
if units == 'cpy':
# cycles per year:
freq_dict = {'MS': 12, 'D': 365.25, 'H': 8766}
long_name = 'Cycles per Year'
elif units == 'cpd':
# cycles per day:
freq_dict = {'H': 24}
long_name = 'Cycles per Day'
t = [x for x in range(da_ts[time_dim].size)]
y = da_ts.values
lomb_kwargs = {'samples_per_peak': 10, 'nyquist_factor': 2}
if kwargs is not None:
lomb_kwargs.update(kwargs)
freq, power = LombScargle(t, y).autopower(**lomb_kwargs)
unit_freq = freq_dict.get(sp_str)
da = xr.DataArray(power, dims=['freq'])
da['freq'] = freq * unit_freq
da.attrs['long_name'] = 'Power from LombScargle'
da.name = '{}_power'.format(da_ts.name)
da['freq'].attrs['long_name'] = long_name
if plot:
da.plot()
return da
def fft_xr(xarray, method='fft', units='cpy', nan_fill='mean', user_freq='MS',
plot=True):
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import xarray as xr
from scipy import signal
# import matplotlib
# matplotlib.rcParams['text.usetex'] = True
def fft_da(da, units, nan_fill, periods):
time_dim = list(set(da.dims))[0]
try:
p_units = da.attrs['units']
except KeyError:
p_units = 'amp'
if nan_fill == 'mean':
x = da.fillna(da.mean(time_dim))
elif nan_fill == 'zero':
x = da.fillna(0)
# infer freq of time series:
sp_str = pd.infer_freq(x[time_dim].values)
if user_freq is None:
if not sp_str:
raise Exception('Didnt find a frequency for {}, check for nans!'.format(da.name))
if len(sp_str) > 1:
mul = [char for char in sp_str if char.isdigit()]
sp_str = ''.join([char for char in sp_str if char.isalpha()])
if not mul:
mul = 1
else:
if len(mul) > 1:
mul = int(''.join(mul))
else:
mul = int(mul[0])
period = sp_str
elif len(sp_str) == 1:
mul = 1
period = sp_str[0]
p_name = periods[period][0]
p_val = mul * periods[period][1]
print('found {} {} frequency in {} time-series'.format(mul, p_name, da.name))
else:
p_name = periods[user_freq][0]
# number of seconds in freq units in time-series:
p_val = periods[user_freq][1]
print('using user freq of {}'.format(user_freq))
print('sample rate in seconds: {}'.format(p_val))
if method == 'fft':
# run fft:
p = 20 * np.log10(np.abs(np.fft.rfft(x, n=None)))
f = np.linspace(0, (1 / p_val) / 2, len(p))
elif method == 'welch':
f, p = signal.welch(x, 1e-6, 'hann', 1024, scaling='spectrum')
if units == 'cpy':
unit_freq = 1.0 / periods['Y'][1] # in Hz
print('unit_freq: cycles per year ({} seconds)'.format(periods['Y'][1]))
elif units == 'cpd':
unit_freq = 1.0 / periods['D'][1] # in Hz
print('unit_freq: cycles per day ({} seconds)'.format(periods['D'][1]))
# unit_freq_in_time_series = unit_freq * p_val # in Hz
# f = np.linspace(0, unit_freq_in_time_series / 2, len(p))
f_in_unit_freq = f / unit_freq
p_units = '{}^2/{}'.format(p_units, units)
power = xr.DataArray(p, dims=['freq'])
power.name = da.name
power['freq'] = f_in_unit_freq
power['freq'].attrs['long_name'] = 'Frequency'
power['freq'].attrs['units'] = units
power.attrs['long_name'] = 'Power'
power.attrs['units'] = p_units
return power
periods = {'N': ['nanoseconds', 1e-9],
'U': ['microseconds', 1e-6],
'us': ['microseconds', 1e-6],
'L': ['milliseconds', 1e-3],
'ms': ['milliseconds', 1e-3],
'T': ['minutes', 60.0],
'5T': ['minutes', 300.0],
'min': ['minutes', 60.0],
'H': ['hours', 3600.0],
'D': ['days', 86400.0],
'W': ['weeks', 604800.0],
'MS': ['months', 86400.0 * 30],
'Y': ['years', 86400.0 * 365.25]
}
if isinstance(xarray, xr.DataArray):
power = fft_da(xarray, units, nan_fill, periods)
if plot:
fig, ax = plt.subplots(figsize=(6, 8))
power.plot.line(ax=ax, xscale='log', yscale='log')
ax.grid()
return power
elif isinstance(xarray, xr.Dataset):
p_list = []
for da in xarray:
p_list.append(fft_da(xarray[da], units, nan_fill, periods))
ds = xr.merge(p_list)
da_from_ds = ds.to_array(dim='station')
try:
ds.attrs['full_name'] = 'Power spectra for {}'.format(xarray.attrs['full_name'])
except KeyError:
pass
elif isinstance(xarray, list):
p_list = []
for da in xarray:
p_list.append(fft_da(da, units, nan_fill, periods))
ds = xr.merge(p_list, compat='override')
da_from_ds = ds.to_array(dim='epochs')
try:
ds.attrs['full_name'] = 'Power spectra for {}'.format(da.attrs['full_name'])
except KeyError:
pass
if plot:
da_mean = da_from_ds.mean('epochs')
da_mean.attrs = da_from_ds.attrs
# da_from_ds.plot.line(xscale='log', yscale='log', hue='station')
fig, ax = plt.subplots(figsize=(8, 6))
da_mean.plot.line(ax=ax, xscale='log', yscale='log')
ax.grid()
return ds
return
def standard_error_slope(X, y):
""" get the standard error of the slope of the linear regression,
works in the case that X is a vector only"""
import numpy as np
ssxm, ssxym, ssyxm, ssym = np.cov(X, y, bias=1).flat
r_num = ssxym
r_den = np.sqrt(ssxm * ssym)
if r_den == 0.0:
r = 0.0
else:
r = r_num / r_den
n = len(X)
df = n - 2
sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df)
return sterrest
def tar_dir(files_with_path_to_tar, filename, savepath, compresslevel=9,
with_dir_struct=False, verbose=False):
import tarfile as tr
""" compresses all glob_str_to_tar files (e.g., *.txt) in path_to_tar,
and save it all to savepath with filename as filename. by default adds .tar
suffix if not supplied by user. control compression level with
compresslevel (i.e., None means no compression)."""
def aname(file, arcname):
if arcname is None:
return None
else:
return file.as_posix().split('/')[-1]
path_to_tar = files_with_path_to_tar[0].as_posix().split('/')[0]
if len(filename.split('.')) < 2:
filename += '.tar'
if verbose:
print('added .tar suffix to {}'.format(filename.split('.'[0])))
else:
filename = filename.split('.')[0]
filename += '.tar'
if verbose:
print('changed suffix to tar')
tarfile = savepath / filename
if compresslevel is None:
tar = tr.open(tarfile, "w")
else:
tar = tr.open(tarfile, "w:gz", compresslevel=compresslevel)
if not with_dir_struct:
arcname = True
if verbose:
print('files were archived without directory structure')
else:
arcname = None
if verbose:
print('files were archived with {} dir structure'.format(path_to_tar))
total = len(files_with_path_to_tar)
print('Found {} files to tar in dir {}'.format(total, path_to_tar))
cnt = 0
for file in files_with_path_to_tar:
tar.add(file, arcname=aname(file, arcname=arcname))
cnt += 1
# if np.mod(cnt, 10) == 0:
# print('.', end=" ")
tar.close()
print('Compressed all files in {} to {}'.format(
path_to_tar, savepath / filename))
return
def query_yes_no(question, default="no"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
import sys
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def get_var(varname):
"""get a linux shell var (without the $)"""
import subprocess
CMD = 'echo $%s' % varname
p = subprocess.Popen(
CMD,
stdout=subprocess.PIPE,
shell=True,
executable='/bin/bash')
out = p.stdout.readlines()[0].strip().decode("utf-8")
if len(out) == 0:
return None
else:
return out
def plot_tmseries_xarray(ds, fields=None, points=False, error_suffix='_error',
errorbar_alpha=0.5, trend_suffix='_trend'):
"""plot time-series plot w/o errorbars of a xarray dataset"""
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
if points:
ma = '.' # marker
lw = 0. # linewidth
else:
ma = None # marker
lw = 1.0 # linewidth
if isinstance(ds, xr.DataArray):
ds = ds.to_dataset()
# if len(ds.dims) > 1:
# raise ValueError('Number of dimensions in Dataset exceeds 1!')
if isinstance(fields, str):
fields = [fields]
error_fields = [x for x in ds.data_vars if error_suffix in x]
trend_fields = [x for x in ds.data_vars if trend_suffix in x]
if fields is None and error_fields:
all_fields = [x for x in ds.data_vars if error_suffix not in x]
elif fields is None and trend_fields:
all_fields = [x for x in ds.data_vars if trend_suffix not in x]
elif fields is None and not error_fields:
all_fields = [x for x in ds.data_vars]
elif fields is None and not trend_fields:
all_fields = [x for x in ds.data_vars]
elif fields is not None and isinstance(fields, list):
all_fields = sorted(fields)
time_dim = list(set(ds[all_fields].dims))[0]
if len(all_fields) == 1:
da = ds[all_fields[0]]
ax = da.plot(figsize=(20, 4), color='b', marker=ma, linewidth=lw)[0].axes
ax.grid()
if error_fields:
print('adding errorbars fillbetween...')
error = da.name + error_suffix
ax.fill_between(da[time_dim].values, da.values - ds[error].values,
da.values + ds[error].values,
where=np.isfinite(da.values),
alpha=errorbar_alpha)
if trend_fields:
print('adding trends...')
trend = da.name + trend_suffix
da[trend].plot(ax=ax, color='r')
trend_attr = [x for x in da[trend].attrs.keys()
if 'trend' in x][0]
if trend_attr:
trend_str = trend_attr.split('>')[-1]
trend_val = da[trend].attrs[trend_attr]
ax.text(0.1, 0.9, '{}: {:.2f}'.format(trend_str, trend_val),
horizontalalignment='center',
verticalalignment='top', color='green', fontsize=15,
transform=ax.transAxes)
ax.grid(True)
ax.set_title(da.name)
plt.tight_layout()
plt.subplots_adjust(top=0.93)
return ax
else:
da = ds[all_fields].to_array('var')
fg = da.plot(row='var', sharex=True, sharey=False, figsize=(20, 15),
hue='var', color='k', marker=ma, linewidth=lw)
for i, (ax, field) in enumerate(zip(fg.axes.flatten(), all_fields)):
ax.grid(True)
if error_fields:
print('adding errorbars fillbetween...')
ax.fill_between(da[time_dim].values,
da.sel(var=field).values - ds[field + error_suffix].values,
da.sel(var=field).values + ds[field + error_suffix].values,
where=np.isfinite(da.sel(var=field).values),
alpha=errorbar_alpha)
if trend_fields:
print('adding trends...')
ds[field + trend_suffix].plot(ax=ax, color='r')
trend_attr = [x for x in ds[field + trend_suffix].attrs.keys()
if 'trend' in x][0]
if trend_attr:
trend_str = trend_attr.split('>')[-1]
trend_val = ds[field + trend_suffix].attrs[trend_attr]
ax.text(0.1, 0.9, '{}: {:.2f}'.format(trend_str, trend_val),
horizontalalignment='center',
verticalalignment='top', color='green', fontsize=15,
transform=ax.transAxes)
try:
ax.set_ylabel('[' + ds[field].attrs['units'] + ']')
except KeyError:
pass
ax.lines[0].set_color('C{}'.format(i))
ax.grid(True)
# fg.fig.suptitle()
fg.fig.subplots_adjust(left=0.1, top=0.93)
return fg
def flip_xy_axes(ax, ylim=None):
if ylim is None:
new_y_lim = ax.get_xlim()
else:
new_y_lim = ylim
new_x_lim = ax.get_ylim()
ylabel = ax.get_xlabel()
xlabel = ax.get_ylabel()
newx = ax.lines[0].get_ydata()
newy = ax.lines[0].get_xdata()
# set new x- and y- data for the line
# ax.margins(y=0)
ax.lines[0].set_xdata(newx)
ax.lines[0].set_ydata(newy)
ax.set_xlim(new_x_lim)
ax.set_ylim(new_y_lim)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.invert_xaxis()
ax.invert_yaxis()
ax.invert_yaxis()
return ax
def choose_time_groupby_arg(da_ts, time_dim='time', grp='hour'):
if grp != 'date':
grp_arg = '{}.{}'.format(time_dim, grp)
else:
grp_arg = groupby_date_xr(da_ts)
return grp_arg
def time_series_stack(time_da, time_dim='time', grp1='hour', grp2='month',
return_just_stacked_da=False):
"""Takes a time-series xr.DataArray objects and reshapes it using
grp1 and grp2. output is a xr.Dataset that includes the reshaped DataArray
, its datetime-series and the grps."""
import xarray as xr
import pandas as pd
# try to infer the freq and put it into attrs for later reconstruction:
freq = pd.infer_freq(time_da[time_dim].values)
name = time_da.name
time_da.attrs['freq'] = freq
attrs = time_da.attrs
# drop all NaNs:
time_da = time_da.dropna(time_dim)
# first grouping:
grp1_arg = choose_time_groupby_arg(time_da, time_dim=time_dim, grp=grp1)
grp_obj1 = time_da.groupby(grp1_arg)
da_list = []
t_list = []
for grp1_name, grp1_inds in grp_obj1.groups.items():
da = time_da.isel({time_dim: grp1_inds})
if grp2 is not None:
# second grouping:
grp2_arg = choose_time_groupby_arg(time_da, time_dim=time_dim, grp=grp2)
grp_obj2 = da.groupby(grp2_arg)
for grp2_name, grp2_inds in grp_obj2.groups.items():
da2 = da.isel({time_dim: grp2_inds})
# extract datetimes and rewrite time coord to 'rest':
times = da2[time_dim]
times = times.rename({time_dim: 'rest'})
times.coords['rest'] = range(len(times))
t_list.append(times)
da2 = da2.rename({time_dim: 'rest'})
da2.coords['rest'] = range(len(da2))
da_list.append(da2)
else:
times = da[time_dim]
times = times.rename({time_dim: 'rest'})
times.coords['rest'] = range(len(times))
t_list.append(times)
da = da.rename({time_dim: 'rest'})
da.coords['rest'] = range(len(da))
da_list.append(da)
# get group keys:
grps1 = [x for x in grp_obj1.groups.keys()]
if grp2 is not None:
grps2 = [x for x in grp_obj2.groups.keys()]
# concat and convert to dataset:
stacked_ds = xr.concat(da_list, dim='all').to_dataset(name=name)
stacked_ds[time_dim] = xr.concat(t_list, 'all')
if grp2 is not None:
# create a multiindex for the groups:
mindex = pd.MultiIndex.from_product([grps1, grps2], names=[grp1, grp2])
stacked_ds.coords['all'] = mindex
else:
# create a multiindex for first group only:
mindex = pd.MultiIndex.from_product([grps1], names=[grp1])
stacked_ds.coords['all'] = mindex
# unstack:
# ds = stacked_ds.unstack('all')[time_da.name]
ds = stacked_ds.unstack('all')
if return_just_stacked_da:
ds = ds[time_da.name]
ds.attrs = attrs
# if plot:
# plot_stacked_time_series(ds[name].mean('rest', keep_attrs=True))
return ds
def plot_stacked_time_series(stacked_da):
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import numpy as np
try:
units = stacked_da.attrs['units']
except KeyError:
units = ''
try:
station = stacked_da.attrs['station']
except KeyError:
station = ''
try:
name = stacked_da.name
except KeyError:
name = ''
SMALL_SIZE = 12
MEDIUM_SIZE = 16
BIGGER_SIZE = 18
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
# plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
# plt.rc('text', usetex=True)
grp1_mean = stacked_da.mean(stacked_da.dims[0])
grp2_mean = stacked_da.mean(stacked_da.dims[1])
fig = plt.figure(figsize=(16, 10), dpi=80)
grid = plt.GridSpec(
2, 2, width_ratios=[
1, 4], height_ratios=[
5, 1], wspace=0, hspace=0)
# grid = plt.GridSpec(2, 2, hspace=0.5, wspace=0.2)
# ax_main = fig.add_subplot(grid[:-1, :-1])
# ax_left = fig.add_subplot(grid[:-1, 0], xticklabels=[], yticklabels=[])
# ax_bottom = fig.add_subplot(grid[-1, 0:-1], xticklabels=[], yticklabels=[])
ax_main = fig.add_subplot(grid[0, 1])
ax_left = fig.add_subplot(grid[0, 0])
ax_left.grid()
ax_bottom = fig.add_subplot(grid[1, 1])
ax_bottom.grid()
pcl = stacked_da.T.plot.contourf(
ax=ax_main, add_colorbar=False, cmap=plt.cm.get_cmap(
'viridis', 41), levels=41)
ax_main.xaxis.set_minor_locator(tck.AutoMinorLocator())
ax_main.tick_params(
direction='out',
top='on',
bottom='off',
left='off',
right='on',
labelleft='off',
labelbottom='off',
labeltop='on',
labelright='on',
which='major')
ax_main.tick_params(
direction='out',
top='on',
bottom='off',
left='off',
right='on',
which='minor')
ax_main.grid(
True,
which='major',
axis='both',
linestyle='-',
color='k',
alpha=0.2)
ax_main.grid(
True,
which='minor',
axis='both',
linestyle='--',
color='k',
alpha=0.2)
ax_main.tick_params(
top='on',
bottom='off',
left='off',
right='on',
labelleft='off',
labelbottom='off',
labeltop='on',
labelright='on')
bottom_limit = ax_main.get_xlim()
left_limit = ax_main.get_ylim()
grp1_mean.plot(ax=ax_left)
grp2_mean.plot(ax=ax_bottom)
ax_bottom.set_xlim(bottom_limit)
ax_left = flip_xy_axes(ax_left, left_limit)
ax_bottom.set_ylabel(r'${}$'.format(units), fontsize=12)
ax_left.set_xlabel(r'${}$'.format(units), fontsize=12)
fig.subplots_adjust(right=0.8)
# divider = make_axes_locatable(ax_main)
# cax1 = divider.append_axes("right", size="5%", pad=0.2)
# [left, bottom, width, height] of figure:
cbar_ax = fig.add_axes([0.85, 0.15, 0.02, 0.75])
# fig.colorbar(pcl, orientation="vertical", pad=0.2, label=units)
pcl_ticks = np.linspace(
stacked_da.min().item(),
stacked_da.max().item(),
11)
cbar = fig.colorbar(
pcl,
cax=cbar_ax,
label=r'${}$'.format(units),
ticks=pcl_ticks)
cbar.set_ticklabels(['{:.1f}'.format(x) for x in pcl_ticks])
title = ' '.join([name, station])
fig.suptitle(title, fontweight='bold', fontsize=15)
return fig
def time_series_stack_decrapeted(time_da, time_dim='time', grp1='hour', grp2='month'):
"""Takes a time-series xr.DataArray objects and reshapes it using
grp1 and grp2. outout is a xr.Dataset that includes the reshaped DataArray
, its datetime-series and the grps."""
import xarray as xr
import numpy as np
import pandas as pd
# try to infer the freq and put it into attrs for later reconstruction:
freq = pd.infer_freq(time_da[time_dim].values)
name = time_da.name
time_da.attrs['freq'] = freq
attrs = time_da.attrs
# drop all NaNs:
time_da = time_da.dropna(time_dim)
# group grp1 and concat:
grp_obj1 = time_da.groupby(time_dim + '.' + grp1)
s_list = []
for grp_name, grp_inds in grp_obj1.groups.items():
da = time_da.isel({time_dim: grp_inds})
s_list.append(da)
grps1 = [x for x in grp_obj1.groups.keys()]
stacked_da = xr.concat(s_list, dim=grp1)
stacked_da[grp1] = grps1
# group over the concatenated da and concat again:
grp_obj2 = stacked_da.groupby(time_dim + '.' + grp2)
s_list = []
for grp_name, grp_inds in grp_obj2.groups.items():
da = stacked_da.isel({time_dim: grp_inds})
s_list.append(da)
grps2 = [x for x in grp_obj2.groups.keys()]
stacked_da = xr.concat(s_list, dim=grp2)
stacked_da[grp2] = grps2
# numpy part:
# first, loop over both dims and drop NaNs, append values and datetimes:
vals = []
dts = []
for grp1_val in stacked_da[grp1]:
da = stacked_da.sel({grp1: grp1_val})
for grp2_val in da[grp2]:
val = da.sel({grp2: grp2_val}).dropna(time_dim)
vals.append(val.values)
dts.append(val[time_dim].values)
# second, we get the max of the vals after the second groupby:
max_size = max([len(x) for x in vals])
# we fill NaNs and NaT for the remainder of them:
concat_sizes = [max_size - len(x) for x in vals]
concat_arrys = [np.empty((x)) * np.nan for x in concat_sizes]
concat_vals = [np.concatenate(x) for x in list(zip(vals, concat_arrys))]
# 1970-01-01 is the NaT for this time-series:
concat_arrys = [np.zeros((x), dtype='datetime64[ns]')
for x in concat_sizes]
concat_dts = [np.concatenate(x) for x in list(zip(dts, concat_arrys))]
concat_vals = np.array(concat_vals)
concat_dts = np.array(concat_dts)
# finally , we reshape them:
concat_vals = concat_vals.reshape((stacked_da[grp1].shape[0],
stacked_da[grp2].shape[0],
max_size))
concat_dts = concat_dts.reshape((stacked_da[grp1].shape[0],
stacked_da[grp2].shape[0],
max_size))
# create a Dataset and DataArrays for them:
sda = xr.Dataset()
sda.attrs = attrs
sda[name] = xr.DataArray(concat_vals, dims=[grp1, grp2, 'rest'])
sda[time_dim] = xr.DataArray(concat_dts, dims=[grp1, grp2, 'rest'])
sda[grp1] = grps1
sda[grp2] = grps2
sda['rest'] = range(max_size)
return sda
#def time_series_stack2(time_da, time_dim='time', grp1='hour', grp2='month',
# plot=True):
# """produces a stacked plot with two groupings for a time-series"""
# import xarray as xr
# import matplotlib.pyplot as plt
# import numpy as np
# import matplotlib.ticker as tck
# grp_obj1 = time_da.groupby(time_dim + '.' + grp1)
# s_list = []
# for grp_name, grp_inds in grp_obj1.groups.items():
# da = time_da.isel({time_dim: grp_inds})
# # da = da.rename({time_dim: grp + '_' + str(grp_name)})
# # da.name += '_' + grp + '_' + str(grp_name)
# s_list.append(da)
# grps1 = [x for x in grp_obj1.groups.keys()]
# stacked_da = xr.concat(s_list, dim=grp1)
# stacked_da[grp1] = grps1
# s_list = []
# for grp_val in grps1:
# da = stacked_da.sel({grp1: grp_val}).groupby(time_dim + '.' + grp2).mean()
# s_list.append(da)
# stacked_da2 = xr.concat(s_list, dim=grp1)
# if plot:
# try:
# units = time_da.attrs['units']
# except KeyError:
# units = ''
# try:
# station = time_da.attrs['station']
# except KeyError:
# station = ''
# try:
# name = time_da.name
# except KeyError:
# name = ''
# SMALL_SIZE = 12
# MEDIUM_SIZE = 16
# BIGGER_SIZE = 18
# plt.rc('font', size=SMALL_SIZE) # controls default text sizes
# plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
# plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
# plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
# plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
# plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
# plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
# grp1_mean = stacked_da2.mean(grp1)
# grp2_mean = stacked_da2.mean(grp2)
# fig = plt.figure(figsize=(16, 10), dpi=80)
# grid = plt.GridSpec(2, 2, width_ratios=[1, 4], height_ratios=[5, 1], wspace=0, hspace=0)
# # grid = plt.GridSpec(2, 2, hspace=0.5, wspace=0.2)
## ax_main = fig.add_subplot(grid[:-1, :-1])
## ax_left = fig.add_subplot(grid[:-1, 0], xticklabels=[], yticklabels=[])
## ax_bottom = fig.add_subplot(grid[-1, 0:-1], xticklabels=[], yticklabels=[])
# ax_main = fig.add_subplot(grid[0, 1])
# ax_left = fig.add_subplot(grid[0, 0])
# ax_left.grid()
# ax_bottom = fig.add_subplot(grid[1, 1])
# ax_bottom.grid()
# pcl = stacked_da2.T.plot.pcolormesh(ax = ax_main, add_colorbar=False, cmap=plt.cm.get_cmap('viridis', 19), snap=True)
# ax_main.xaxis.set_minor_locator(tck.AutoMinorLocator())
# ax_main.tick_params(direction='out', top='on', bottom='off', left='off', right='on', labelleft='off', labelbottom='off', labeltop='on', labelright='on', which='major')
# ax_main.tick_params(direction='out', top='on', bottom='off', left='off', right='on', which='minor')
# ax_main.grid(True, which='major', axis='both', linestyle='-', color='k', alpha=0.2)
# ax_main.grid(True, which='minor', axis='both', linestyle='--', color='k', alpha=0.2)
# ax_main.tick_params(top='on', bottom='off', left='off', right='on', labelleft='off', labelbottom='off', labeltop='on', labelright='on')
# bottom_limit = ax_main.get_xlim()
# left_limit = ax_main.get_ylim()
# grp1_mean.plot(ax=ax_left)
# grp2_mean.plot(ax=ax_bottom)
# ax_bottom.set_xlim(bottom_limit)
# ax_left = flip_xy_axes(ax_left, left_limit)
# ax_bottom.set_ylabel(units)
# ax_left.set_xlabel(units)
# fig.subplots_adjust(right=0.8)
# # divider = make_axes_locatable(ax_main)
# # cax1 = divider.append_axes("right", size="5%", pad=0.2)
# # [left, bottom, width, height] of figure:
# cbar_ax = fig.add_axes([0.85, 0.15, 0.02, 0.75])
# # fig.colorbar(pcl, orientation="vertical", pad=0.2, label=units)
# pcl_ticks = np.linspace(stacked_da2.min().item(), stacked_da2.max().item(), 11)
# cbar = fig.colorbar(pcl, cax=cbar_ax, label=units, ticks=pcl_ticks)
# cbar.set_ticklabels(['{:.1f}'.format(x) for x in pcl_ticks])
# title = ' '.join([name, station])
# fig.suptitle(title, fontweight='bold', fontsize=15)
# # fig.colorbar(pcl, ax=ax_main)
# # plt.colorbar(pcl, cax=ax_main)
# return stacked_da2
#def time_series_stack_decraped(time_da, time_dim='time', grp='hour', plot=True):
# import xarray as xr
# grp_obj = time_da.groupby(time_dim + '.' + grp)
# s_list = []
# for grp_name, grp_inds in grp_obj.groups.items():
# da = time_da.isel({time_dim: grp_inds})
# # da = da.rename({time_dim: grp + '_' + str(grp_name)})
# # da.name += '_' + grp + '_' + str(grp_name)
# s_list.append(da)
# grps = [x for x in grp_obj.groups.keys()]
# stacked_da = xr.concat(s_list, dim=grp)
# stacked_da[grp] = grps
# if 'year' in grp:
# resample_span = '1Y'
# elif grp == 'month':
# resample_span = '1Y'
# elif grp == 'day':
# resample_span = '1MS'
# elif grp == 'hour':
# resample_span = '1D'
# elif grp == 'minute':
# resample_span = '1H'
# stacked_da = stacked_da.resample({time_dim: resample_span}).mean(time_dim)
# if plot:
# stacked_da.T.plot.pcolormesh(figsize=(6, 8))
# return stacked_da
def dt_to_np64(time_coord, unit='m', convert_back=False):
"""accepts time_coord and a required time unit and returns a dataarray
of time_coord and unix POSIX continous float index"""
import numpy as np
import xarray as xr
unix_epoch = np.datetime64(0, unit)
one_time_unit = np.timedelta64(1, unit)
time_unit_since_epoch = (time_coord.values - unix_epoch) / one_time_unit
units = {'Y': 'years', 'M': 'months', 'W': 'weeks', 'D': 'days',
'h': 'hours', 'm': 'minutes', 's': 'seconds'}
new_time = xr.DataArray(time_unit_since_epoch, coords=[time_coord],
dims=[time_coord.name])
new_time.attrs['units'] = units[unit] + ' since 1970-01-01 00:00:00'
return new_time
def xr_reindex_with_date_range(ds, drop=True, time_dim=None, freq='5min',
dt_min=None, dt_max=None):
"""be careful when drop=True in datasets that have various nans in dataarrays"""
import pandas as pd
if time_dim is None:
time_dim = list(set(ds.dims))[0]
if drop:
ds = ds.dropna(time_dim)
if dt_min is not None:
dt_min = pd.to_datetime(dt_min)
start = pd.to_datetime(dt_min)
else:
start = pd.to_datetime(ds[time_dim].min().item())
if dt_max is not None:
dt_max = pd.to_datetime(dt_max)
end = pd.to_datetime(dt_max)
else:
end = pd.to_datetime(ds[time_dim].max().item())
new_time = pd.date_range(start, end, freq=freq)
ds = ds.reindex({time_dim: new_time})
return ds
def add_attr_to_xr(da, key, value, append=False):
"""add attr to da, if append=True, appends it, if key exists"""
import xarray as xr
if isinstance(da, xr.Dataset):
raise TypeError('only xr.DataArray allowd!')
if key in da.attrs and not append:
raise ValueError('{} already exists in {}, use append=True'.format(key, da.name))
elif key in da.attrs and append:
da.attrs[key] += value
else:
da.attrs[key] = value
return da
def filter_nan_errors(ds, error_str='_error', dim='time', meta='action'):
"""return the data in a dataarray only if its error is not NaN,
assumes that ds is a xr.dataset and includes fields and their error
like this: field, field+error_str"""
import xarray as xr
import numpy as np
from aux_gps import add_attr_to_xr
if isinstance(ds, xr.DataArray):
raise TypeError('only xr.Dataset allowd!')
fields = [x for x in ds.data_vars if error_str not in x]
for field in fields:
ds[field] = ds[field].where(np.isfinite(
ds[field + error_str])).dropna(dim)
if meta in ds[field].attrs:
append = True
add_attr_to_xr(
ds[field],
meta,
', filtered values with NaN errors',
append)
return ds
def smooth_xr(da, dim='time', weights=[0.25, 0.5, 0.25]):
# fix to accept wither da or ds:
import xarray as xr
weight = xr.DataArray(weights, dims=['window'])
if isinstance(da, xr.Dataset):
attrs = dict(zip(da.data_vars, [da[x].attrs for x in da]))
da_roll = da.to_array('dummy').rolling(
{dim: len(weights)}, center=True).construct('window').dot(weight)
da_roll = da_roll.to_dataset('dummy')
for das, attr in attrs.items():
da_roll[das].attrs = attr
da_roll[das].attrs['action'] = 'weighted rolling mean with {} on {}'.format(
weights, dim)
else:
da_roll = da.rolling({dim: len(weights)},
center=True).construct('window').dot(weight)
da_roll.attrs['action'] = 'weighted rolling mean with {} on {}'.format(
weights, dim)
return da_roll
def keep_iqr(da, dim='time', qlow=0.25, qhigh=0.75, k=1.5, drop_with_freq=None,
verbose=False):
"""return the data in a dataarray only in the k times the
Interquartile Range (low, high), drop"""
from aux_gps import add_attr_to_xr
from aux_gps import xr_reindex_with_date_range
try:
quan = da.quantile([qlow, qhigh], dim).values
except TypeError:
# support for datetime64 dtypes:
if da.dtype == '<M8[ns]':
quan = da.astype(int).quantile(
[qlow, qhigh], dim).astype('datetime64[ns]').values
# support for timedelta64 dtypes:
elif da.dtype == '<m8[ns]':
quan = da.astype(int).quantile(
[qlow, qhigh], dim).astype('timedelta64[ns]').values
low = quan[0]
high = quan[1]
iqr = high - low
lower = low - (iqr * k)
higher = high + (iqr * k)
before = da.size
da = da.where((da < higher) & (da > lower)).dropna(dim)
after = da.size
if verbose:
print('dropped {} outliers from {}.'.format(before-after, da.name))
if 'action' in da.attrs:
append = True
else:
append = False
add_attr_to_xr(
da, 'action', ', kept IQR ({}, {}, {})'.format(
qlow, qhigh, k), append)
if drop_with_freq is not None:
da = xr_reindex_with_date_range(da, freq=drop_with_freq)
return da
def transform_ds_to_lat_lon_alt(ds, coords_name=['X', 'Y', 'Z'],
error_str='_error', time_dim='time'):
"""appends to the data vars of ds(xr.dataset) the lat, lon, alt fields
and their error where the geocent fields are X, Y, Z"""
import xarray as xr
from aux_gps import get_latlonalt_error_from_geocent_error
geo_fields = [ds[x].values for x in coords_name]
geo_errors = [ds[x + error_str].values for x in coords_name]
latlong = get_latlonalt_error_from_geocent_error(*geo_fields, *geo_errors)
new_fields = ['lon', 'lat', 'alt', 'lon_error', 'lat_error', 'alt_error']
new_names = ['Longitude', 'Latitude', 'Altitude']
new_units = ['Degrees', 'Degrees', 'm']
for name, data in zip(new_fields, latlong):
ds[name] = xr.DataArray(data, dims=[time_dim])
for name, unit, full_name in zip(new_fields[0:3], new_units[0:3],
new_names[0:3]):
ds[name].attrs['full_name'] = full_name
ds[name].attrs['units'] = unit
return ds
def get_latlonalt_error_from_geocent_error(X, Y, Z, xe=None, ye=None, ze=None):
"""returns the value and error in lat(decimal degree), lon(decimal degree)
and alt(meters) for X, Y, Z in geocent coords (in meters), all input is
lists or np.arrays"""
import pyproj
ecef = pyproj.Proj(proj='geocent', ellps='WGS84', datum='WGS84')
lla = pyproj.Proj(proj='latlong', ellps='WGS84', datum='WGS84')
lon, lat, alt = pyproj.transform(ecef, lla, X, Y, Z, radians=False)
if (xe is not None) and (ye is not None) and (ze is not None):
lon_pe, lat_pe, alt_pe = pyproj.transform(ecef, lla, X + xe, Y + ye,
Z + ze, radians=False)
lon_me, lat_me, alt_me = pyproj.transform(ecef, lla, X - xe, Y - ye,
Z - ze, radians=False)
lon_e = (lon_pe - lon_me) / 2.0
lat_e = (lat_pe - lat_me) / 2.0
alt_e = (alt_pe - alt_me) / 2.0
return lon, lat, alt, lon_e, lat_e, alt_e
else:
return lon, lat, alt
def path_glob(path, glob_str='*.Z', return_empty_list=False):
"""returns all the files with full path(pathlib3 objs) if files exist in
path, if not, returns FilenotFoundErro"""
from pathlib import Path
# if not isinstance(path, Path):
# raise Exception('{} must be a pathlib object'.format(path))
path = Path(path)
files_with_path = [file for file in path.glob(glob_str) if file.is_file]
if not files_with_path and not return_empty_list:
raise FileNotFoundError('{} search in {} found no files.'.format(glob_str,
path))
elif not files_with_path and return_empty_list:
return files_with_path
else:
return files_with_path
def find_cross_points(df, cols=None):
"""find if col A is crossing col B in df and is higher (Up) or lower (Down)
than col B (after crossing). cols=None means that the first two cols of
df are used."""
import numpy as np
if cols is None:
cols = df.columns.values[0:2]
df['Diff'] = df[cols[0]] - df[cols[1]]
df['Cross'] = np.select([((df.Diff < 0) & (df.Diff.shift() > 0)), ((
df.Diff > 0) & (df.Diff.shift() < 0))], ['Up', 'Down'], None)
return df
def get_rinex_filename_from_datetime(station, dt='2012-05-07', st_lower=True):
"""return rinex filename from datetime string"""
import pandas as pd
def filename_from_single_date(station, date):
day = pd.to_datetime(date, format='%Y-%m-%d').dayofyear
year = pd.to_datetime(date, format='%Y-%m-%d').year
if 'T' in date:
hour = pd.to_datetime(date, format='%Y-%m-%d').hour
hour = letters_to_hours_and_vice_verse(hour)
else:
hour = '0'
if len(str(day)) == 1:
str_day = '00' + str(day) + hour
elif len(str(day)) == 2:
str_day = '0' + str(day) + hour
elif len(str(day)) == 3:
str_day = str(day) + hour
if st_lower:
st = station.lower()
else:
st = station
filename = st + str_day + '.' + str(year)[2:4] + 'd'
return filename
if isinstance(dt, list):
filenames = []
for date in dt:
filename = filename_from_single_date(station, date)
filenames.append(filename)
return filenames
else:
filename = filename_from_single_date(station, dt)
return filename
def letters_to_hours_and_vice_verse(symbol):
"""A - 0 hours, B- 1 hours, until X = 23 hours"""
import string
import numpy as np
import pandas as pd
hour_letters = [x.upper() for x in string.ascii_letters][:24]
hour_numbers = np.arange(0, 24)
hour_string_dict = dict(zip(hour_letters, hour_numbers))
reverse_dict = dict(zip(hour_numbers, hour_letters))
if isinstance(symbol, int):
return reverse_dict.get(symbol, 'NaN')
elif isinstance(symbol, str):
return pd.Timedelta('{} hour'.format(hour_string_dict.get(symbol), 'NaN'))
def get_timedate_and_station_code_from_rinex(rinex_str='tela0010.05d',
just_dt=False, st_upper=True):
"""return datetime from rinex2 format"""
import pandas as pd
import datetime
def get_dt_from_single_rinex(rinex_str):
station = rinex_str[0:4]
days = int(rinex_str[4:7])
hour = rinex_str[7]
year = rinex_str[-3:-1]
Year = datetime.datetime.strptime(year, '%y').strftime('%Y')
dt = datetime.datetime(int(Year), 1, 1) + datetime.timedelta(days - 1)
dt = pd.to_datetime(dt)
if hour != '0':
hours_to_add = letters_to_hours_and_vice_verse(hour)
# print(hours_to_add)
dt += hours_to_add
if st_upper:
st = station.upper()
else:
st = station
return dt, st
if isinstance(rinex_str, list):
dt_list = []
for rstr in rinex_str:
dt, station = get_dt_from_single_rinex(rstr)
dt_list.append(dt)
return dt_list
else:
dt, station = get_dt_from_single_rinex(rinex_str)
if just_dt:
return dt
else:
return dt, station
def configure_logger(name='general', filename=None):
import logging
import sys
stdout_handler = logging.StreamHandler(sys.stdout)
if filename is not None:
file_handler = logging.FileHandler(filename=filename, mode='a')
handlers = [file_handler, stdout_handler]
else:
handlers = [stdout_handler]
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',
handlers=handlers
)
logger = logging.getLogger(name=name)
return logger
def process_gridsearch_results(GridSearchCV):
import xarray as xr
import pandas as pd
import numpy as np
"""takes GridSreachCV object with cv_results and xarray it into dataarray"""
params = GridSearchCV.param_grid
scoring = GridSearchCV.scoring
names = [x for x in params.keys()]
if len(params) > 1:
# unpack param_grid vals to list of lists:
pro = [[y for y in x] for x in params.values()]
ind = pd.MultiIndex.from_product((pro), names=names)
result_names = [x for x in GridSearchCV.cv_results_.keys() if
'time' not in x and 'param' not in x and
'rank' not in x]
ds = xr.Dataset()
for da_name in result_names:
da = xr.DataArray(GridSearchCV.cv_results_[da_name])
ds[da_name] = da
ds = ds.assign(dim_0=ind).unstack('dim_0')
elif len(params) == 1:
result_names = [x for x in GridSearchCV.cv_results_.keys() if
'time' not in x and 'param' not in x and
'rank' not in x]
ds = xr.Dataset()
for da_name in result_names:
da = xr.DataArray(GridSearchCV.cv_results_[da_name], dims={**params})
ds[da_name] = da
for k, v in params.items():
ds[k] = v
name = [x for x in ds.data_vars.keys() if 'split' in x and 'test' in x]
split_test = xr.concat(ds[name].data_vars.values(), dim='kfolds')
split_test.name = 'split_test'
kfolds_num = len(name)
name = [x for x in ds.data_vars.keys() if 'split' in x and 'train' in x]
split_train = xr.concat(ds[name].data_vars.values(), dim='kfolds')
split_train.name = 'split_train'
name = [x for x in ds.data_vars.keys() if 'mean_test' in x]
mean_test = xr.concat(ds[name].data_vars.values(), dim='scoring')
mean_test.name = 'mean_test'
name = [x for x in ds.data_vars.keys() if 'mean_train' in x]
mean_train = xr.concat(ds[name].data_vars.values(), dim='scoring')
mean_train.name = 'mean_train'
name = [x for x in ds.data_vars.keys() if 'std_test' in x]
std_test = xr.concat(ds[name].data_vars.values(), dim='scoring')
std_test.name = 'std_test'
name = [x for x in ds.data_vars.keys() if 'std_train' in x]
std_train = xr.concat(ds[name].data_vars.values(), dim='scoring')
std_train.name = 'std_train'
ds = ds.drop(ds.data_vars.keys())
ds['mean_test'] = mean_test
ds['mean_train'] = mean_train
ds['std_test'] = std_test
ds['std_train'] = std_train
ds['split_test'] = split_test
ds['split_train'] = split_train
mean_test_train = xr.concat(ds[['mean_train', 'mean_test']].data_vars.
values(), dim='train_test')
std_test_train = xr.concat(ds[['std_train', 'std_test']].data_vars.
values(), dim='train_test')
split_test_train = xr.concat(ds[['split_train', 'split_test']].data_vars.
values(), dim='train_test')
ds['train_test'] = ['train', 'test']
ds = ds.drop(ds.data_vars.keys())
ds['MEAN'] = mean_test_train
ds['STD'] = std_test_train
# CV = xr.Dataset(coords=GridSearchCV.param_grid)
ds = xr.concat(ds[['MEAN', 'STD']].data_vars.values(), dim='MEAN_STD')
ds['MEAN_STD'] = ['MEAN', 'STD']
ds.name = 'CV_mean_results'
ds.attrs['param_names'] = names
if isinstance(scoring, str):
ds.attrs['scoring'] = scoring
ds = ds.squeeze(drop=True)
else:
ds['scoring'] = scoring
ds = ds.to_dataset()
ds['CV_full_results'] = split_test_train
ds['kfolds'] = np.arange(kfolds_num)
return ds
def calculate_std_error(arr, statistic='std'):
from scipy.stats import moment
import numpy as np
# remove nans:
arr = arr[np.logical_not(np.isnan(arr))]
n = len(arr)
if statistic == 'std':
mu4 = moment(arr, moment=4)
sig4 = np.var(arr)**2.0
se = mu4 - sig4 * (n - 3) / (n - 1)
se = (se / n)**0.25
elif statistic == 'mean':
std = np.std(arr)
se = std / np.sqrt(n)
return se
def calculate_distance_between_two_lat_lon_points(
lat1,
lon1,
lat2,
lon2,
orig_epsg='4326',
meter_epsg='2039',
verbose=False):
"""calculate the distance between two points (lat,lon) with epsg of
WGS84 and convert to meters with a local epsg. if lat1 is array then
calculates the distance of many points."""
import geopandas as gpd
import pandas as pd
try:
df1 = pd.DataFrame(index=lat1.index)
except AttributeError:
try:
len(lat1)
except TypeError:
lat1 = [lat1]
df1 = pd.DataFrame(index=[x for x in range(len(lat1))])
df1['lat'] = lat1
df1['lon'] = lon1
first_gdf = gpd.GeoDataFrame(
df1, geometry=gpd.points_from_xy(
df1['lon'], df1['lat']))
first_gdf.crs = {'init': 'epsg:{}'.format(orig_epsg)}
first_gdf.to_crs(epsg=int(meter_epsg), inplace=True)
try:
df2 = pd.DataFrame(index=lat2.index)
except AttributeError:
try:
len(lat2)
except TypeError:
lat2 = [lat2]
df2 = pd.DataFrame(index=[x for x in range(len(lat2))])
df2['lat'] = lat2
df2['lon'] = lon2
second_gdf = gpd.GeoDataFrame(
df2, geometry=gpd.points_from_xy(
df2['lon'], df2['lat']))
second_gdf.crs = {'init': 'epsg:{}'.format(orig_epsg)}
second_gdf.to_crs(epsg=int(meter_epsg), inplace=True)
ddf = first_gdf.geometry.distance(second_gdf.geometry)
return ddf
def get_nearest_lat_lon_for_xy(lat_da, lon_da, points):
"""used to access UERRA reanalysis, where the variable has x,y as coords"""
import numpy as np
from scipy.spatial import cKDTree
if isinstance(points, np.ndarray):
points = list(points)
combined_x_y_arrays = np.dstack(
[lat_da.values.ravel(), lon_da.values.ravel()])[0]
mytree = cKDTree(combined_x_y_arrays)
points = np.atleast_2d(points)
dist, inds = mytree.query(points)
yx = []
for ind in inds:
y, x = np.unravel_index(ind, lat_da.shape)
yx.append([y, x])
return yx
def get_altitude_of_point_using_dem(lat, lon, dem_path=work_yuval / 'AW3D30'):
import xarray as xr
file = sorted(path_glob(dem_path, 'israel_dem*.nc'))[0]
awd = xr.load_dataarray(file)
alt = awd.sel(lon=float(lon), lat=float(lat),
method='nearest').values.item()
return alt
def coarse_dem(data, dem_path=work_yuval / 'AW3D30'):
"""coarsen to data coords"""
# data is lower resolution than awd
import salem
import xarray as xr
# determine resulotion:
try:
lat_size = data.lat.size
lon_size = data.lon.size
except AttributeError:
print('data needs to have lat and lon coords..')
return
# check for file exist:
filename = 'israel_dem_' + str(lon_size) + '_' + str(lat_size) + '.nc'
my_file = dem_path / filename
if my_file.is_file():
awds = xr.open_dataarray(my_file)
print('{} is found and loaded...'.format(filename))
else:
awd = salem.open_xr_dataset(dem_path / 'israel_dem.tif')
awds = data.salem.lookup_transform(awd)
awds = awds['data']
awds.to_netcdf(dem_path / filename)
print('{} is saved to {}'.format(filename, dem_path))
return awds
def invert_dict(d):
"""unvert dict"""
inverse = dict()
for key in d:
# Go through the list that is saved in the dict:
for item in d[key]:
# Check if in the inverted dict the key exists
if item not in inverse:
# If not create a new list
inverse[item] = key
else:
inverse[item].append(key)
return inverse
def concat_shp(path, shp_file_list, saved_filename):
import geopandas as gpd
import pandas as pd
shapefiles = [path / x for x in shp_file_list]
gdf = pd.concat([gpd.read_file(shp)
for shp in shapefiles]).pipe(gpd.GeoDataFrame)
gdf.to_file(path / saved_filename)
print('saved {} to {}'.format(saved_filename, path))
return
def scale_xr(da, upper=1.0, lower=0.0, unscale=False):
if not unscale:
dh = da.max()
dl = da.min()
da_scaled = (((da-dl)*(upper-lower))/(dh-dl)) + lower
da_scaled.attrs = da.attrs
da_scaled.attrs['scaled'] = True
da_scaled.attrs['lower'] = dl.item()
da_scaled.attrs['upper'] = dh.item()
if unscale and da.attrs['scaled']:
dh = da.max()
dl = da.min()
upper = da.attrs['upper']
lower = da.attrs['lower']
da_scaled = (((da-dl)*(upper-lower))/(dh-dl)) + lower
return da_scaled
def print_saved_file(name, path):
print(name + ' was saved to ' + str(path))
return
def dim_union(da_list, dim='time'):
import pandas as pd
setlist = [set(x[dim].values) for x in da_list]
empty_list = [x for x in setlist if not x]
if empty_list:
print('NaN dim drop detected, check da...')
return
u = list(set.union(*setlist))
# new_dim = list(set(a.dropna(dim)[dim].values).intersection(
# set(b.dropna(dim)[dim].values)))
if dim == 'time':
new_dim = sorted(pd.to_datetime(u))
else:
new_dim = sorted(u)
return new_dim
def dim_intersection(da_list, dim='time', dropna=True, verbose=None):
import pandas as pd
if dropna:
setlist = [set(x.dropna(dim)[dim].values) for x in da_list]
else:
setlist = [set(x[dim].values) for x in da_list]
empty_list = [x for x in setlist if not x]
if empty_list:
if verbose == 0:
print('NaN dim drop detected, check da...')
return None
u = list(set.intersection(*setlist))
# new_dim = list(set(a.dropna(dim)[dim].values).intersection(
# set(b.dropna(dim)[dim].values)))
if dim == 'time':
new_dim = sorted(pd.to_datetime(u))
else:
new_dim = sorted(u)
return new_dim
def get_unique_index(da, dim='time', verbose=False):
import numpy as np
before = da[dim].size
_, index = np.unique(da[dim], return_index=True)
da = da.isel({dim: index})
after = da[dim].size
if verbose:
print('dropped {} duplicate coord entries.'.format(before-after))
return da
def Zscore_xr(da, dim='time'):
"""input is a dattarray of data and output is a dattarray of Zscore
for the dim"""
attrs = da.attrs
z = (da - da.mean(dim=dim)) / da.std(dim=dim)
z.attrs = attrs
if 'units' in attrs.keys():
z.attrs['old_units'] = attrs['units']
z.attrs['action'] = 'converted to Z-score'
z.attrs['units'] = 'std'
return z
def desc_nan(data, verbose=True):
"""count only NaNs in data and returns the thier amount and the non-NaNs"""
import numpy as np
import xarray as xr
def nan_da(data):
nans = np.count_nonzero(np.isnan(data.values))
non_nans = np.count_nonzero(~np.isnan(data.values))
if verbose:
print(str(type(data)))
print(data.name + ': non-NaN entries: ' + str(non_nans) + ' of total ' +
str(data.size) + ', shape:' + str(data.shape) + ', type:' +
str(data.dtype))
print('Dimensions:')
dim_nn_list = []
for dim in data.dims:
dim_len = data[dim].size
dim_non_nans = np.int(data.dropna(dim)[dim].count())
dim_nn_list.append(dim_non_nans)
if verbose:
print(dim + ': non-NaN labels: ' +
str(dim_non_nans) + ' of total ' + str(dim_len))
return non_nans
if isinstance(data, xr.DataArray):
nn_dict = nan_da(data)
return nn_dict
elif isinstance(data, np.ndarray):
nans = np.count_nonzero(np.isnan(data))
non_nans = np.count_nonzero(~np.isnan(data))
if verbose:
print(str(type(data)))
print('non-NaN entries: ' + str(non_nans) + ' of total ' +
str(data.size) + ', shape:' + str(data.shape) + ', type:' +
str(data.dtype))
elif isinstance(data, xr.Dataset):
for varname in data.data_vars.keys():
non_nans = nan_da(data[varname])
return non_nans
class lmfit_model_switcher(object):
def pick_model(self, model_name, *args, **kwargs):
"""Dispatch method"""
method_name = str(model_name)
# Get the method from 'self'. Default to a lambda.
method = getattr(self, method_name, lambda: "Invalid ML Model")
# Call the method as we return it
self.model = method(*args, **kwargs)
return self
def pick_param(self, name, **kwargs):
# **kwargs.keys() = value, vary, min, max, expr
if not hasattr(self, 'model'):
raise('pls pick model first!')
return
else:
self.model.set_param_hint(name, **kwargs)
return
def generate_params(self, **kwargs):
if not hasattr(self, 'model'):
raise('pls pick model first!')
return
else:
if kwargs is not None:
for key, val in kwargs.items():
self.model.set_param_hint(key, **val)
self.params = self.model.make_params()
else:
self.params = self.model.make_params()
return
def line(self, line_pre='line_'):
from lmfit import Model
def func(time, slope, intercept):
f = slope * time + intercept
return f
return Model(func, independent_vars=['time'], prefix=line_pre)
def sin(self, sin_pre='sin_'):
from lmfit import Model
def func(time, amp, freq, phase):
import numpy as np
f = amp * np.sin(2 * np.pi * freq * (time - phase))
return f
return Model(func, independent_vars=['time'], prefix=sin_pre)
def sin_constant(self, sin_pre='sin_', con_pre='constant_'):
from lmfit.models import ConstantModel
constant = ConstantModel(prefix=con_pre)
lmfit = lmfit_model_switcher()
lmfit.pick_model('sin', sin_pre)
return lmfit.model + constant
def sin_linear(self, sin_pre='sin_', line_pre='line_'):
lmfit = lmfit_model_switcher()
sin = lmfit.pick_model('sin', sin_pre)
line = lmfit.pick_model('line', line_pre)
return sin + line
def sum_sin(self, k):
lmfit = lmfit_model_switcher()
sin = lmfit.pick_model('sin', 'sin0_')
for k in range(k-1):
sin += lmfit.pick_model('sin', 'sin{}_'.format(k+1))
return sin
def sum_sin_constant(self, k, con_pre='constant_'):
from lmfit.models import ConstantModel
constant = ConstantModel(prefix=con_pre)
lmfit = lmfit_model_switcher()
sum_sin = lmfit.pick_model('sum_sin', k)
return sum_sin + constant
def sum_sin_linear(self, k, line_pre='line_'):
lmfit = lmfit_model_switcher()
sum_sin = lmfit.pick_model('sum_sin', k)
line = lmfit.pick_model('line', line_pre)
return sum_sin + line
def pick_lmfit_model(name='sine'):
import numpy as np
import lmfit
class MySineModel(lmfit.Model):
def __init__(self, *args, **kwargs):
def sine(x, ampl, offset, freq, x0):
return ampl * np.sin((x - x0)*2*np.pi*freq) + offset
super(MySineModel, self).__init__(sine, *args, **kwargs)
def guess(self, data, freq=None, **kwargs):
params = self.make_params()
def pset(param, value):
params['{}{}'.format(self.prefix, param)].set(value=value)
pset("ampl", np.max(data) - np.min(data))
pset("offset", np.mean(data) + 0.01)
if freq is None:
pset("freq", 1)
else:
pset("freq", freq)
pset("x0", 0)
return lmfit.models.update_param_vals(params, self.prefix, **kwargs)
name_dict = {'sine': MySineModel()}
return name_dict.get(name)
def move_or_copy_files_from_doy_dir_structure_to_single_path(yearly_path=work_yuval/'SST',
movepath=work_yuval/'SST',
filetype='*.nc',
opr='copy'):
"""move files from year-doy directory structure to another path."""
from aux_gps import path_glob
import shutil
year_dirs = sorted([x for x in path_glob(yearly_path, '*/') if x.is_dir()])
years = []
for year_dir in year_dirs:
print('year {} is being processed.'.format(year_dir))
years.append(year_dir.as_posix().split('/')[-1])
doy_dirs = sorted([x for x in path_glob(year_dir, '*/') if x.is_dir()])
for doy_dir in doy_dirs:
file = path_glob(doy_dir, filetype)[0]
same_file = file.as_posix().split('/')[-1]
orig = file
dest = movepath / same_file
if opr == 'copy':
shutil.copy(orig.resolve(), dest.resolve())
elif opr == 'move':
shutil.move(orig.resolve(), dest.resolve())
print('{} has been {}ed {}'.format(same_file, opr, movepath))
return years
|
function [MOIndex] = lipschitz(u, y, maxlag, model, fig)
% A method to determine the lag space, based on Lipschitz quotients
%
%% Syntax
% [MOIndex] = lipschitz(u, y, maxlag)
%
%% Description
% Given a set of corresponding inputs and outputs the function calculates
% so called Lipschitz number for each combination of m and l where m
% represents the number of delayed outputs, and l the number of delayed
% inputs for the case of dynamic system: y(t) = f(y(t-1),...,y(t-l),
% u(t-1),..., u(t-m)).
% To small m and l result in a large Lipschitz nuber, while to large lag
% spaces do not have greater effect on the Lipschitz nuber. In order to
% determine the proper lag space we have to look for the knee point, where
% Lipscitz number stops decreasing.
%
% Input:
% * u ... the system input(column vector)
% * y ... the system output(column vector)
% * maxlag ... the max lag space to investigate
% * model ... optional - 'arx' if we only want to investigate m = l case
%
% Output:
% * MOIndex ... the maxlag by maxlag matrix containing calculated Lipscitz
% numbers (Model order index) for each combination of m and l
%% Signature
% Written by Tomaz Sustar
% Based on the algorithm by Xiangdong He and Haruhiko Asada
if(nargin<4), model='unknown'; end;
NN = length(y); % number of samples
MOIndex = zeros(maxlag); % matrix of lipschitz's indexes
for m=1:maxlag, % number of delayed outputs
% m,
for l=0:maxlag, % number of delayed inputs
% if we are investigating arx model srtucture only, we calculate MOIndex only when m = l
if(strcmp(model, 'arx') && l ~= m), continue; end;
lag = max(l,m); % the greater from m, l
% Because of the lag we can construct only NN - lag input output pairs
N = NN-lag; % number of input - output pairs.
p = floor(0.02*N); % number of Lipschitz quotients used to determine model order index
[input target] = construct([m l], u, y); % construct regressors and target
% calculation of Lipschitz quotients
Q = zeros(N); % initialize Q matrix for storing Lipschitz qotients
for i=1:N-1,
% for each input/output pair calculate the their Lipschitz quotients all
% further inputs/outputs pairs. In this way all possible Lipschitz
% quotients q(i,j) are calculated.
Q(i,i+1:N)=(target(i)-target(i+1:N)).^2 ./ ...
sum((repmat(input(i,:), N-i, 1)-input(i+1:N,:)).^2, 2);
end
Q_max = Q(Q~=0); % remove zeros
Q_max = (-sort(-Q_max(:))); % sort qoutients in descending order
Q_max = sqrt(Q_max(1:p)); % take p - largest quotients
n = m+l;
MOIndex(m,l+1)=prod(sqrt(n)*Q_max)^(1/p); % calculates order index and stores it to the matrix
end % end for l
end % end for m
% draw some figures
if(~strcmp(model, 'arx'))
figure('Name', 'Model order index vs. lag space')
surf(1:maxlag, 0:maxlag,MOIndex');
view([-600 40]);
set(gca, 'Zscale','log');
set(gca, 'XTick', 1:maxlag)
set(gca, 'XTick', 1:maxlag)
xlabel('l - number of past outputs')
ylabel('m - number of past inputs')
zlabel('Model Order Index')
end
if(nargin > 4)
figure(fig);
else
figure('Name', 'Model order index vs. lag space - arx case')
end
semilogy(diag(MOIndex));
xlabel('m = l - number of past inputs and outputs');
ylabel('Model order index');
set(gca, 'XTick', 1:maxlag);
grid on;
|
///////////////////////////////////////////////////////////////////////////////
/// \file characteristic_series.hpp
/// A time series that uses characteristic storage
//
// Copyright 2006 Eric Niebler. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_TIME_SERIES_CHARACTERISTIC_SERIES_HPP_EAN_05_29_2006
#define BOOST_TIME_SERIES_CHARACTERISTIC_SERIES_HPP_EAN_05_29_2006
#include <boost/time_series/time_series_facade.hpp>
#include <boost/time_series/storage/characteristic_array.hpp>
namespace boost { namespace time_series
{
/// \brief A \c Mutable_TimeSeries that has the unit value within some <tt>[start,stop)</tt> range,
/// and zero elsewhere.
///
/// A \c Mutable_TimeSeries that has the unit value within some <tt>[start,stop)</tt> range,
/// and zero elsewhere.
///
/// The named parameters for the constructor are, in order:
/// -# \c start, with a default of \c Offset(0)
/// -# \c stop, with a default of \c Offset(0)
/// -# \c value, with a default of \c Value(1)
/// -# \c discretization, with a default of \c Discretization(1)
/// -# \c zero, with a default of \c Value(0)
template<typename Value, typename Discretization, typename Offset>
struct characteristic_unit_series
: time_series_facade<
characteristic_unit_series<Value, Discretization, Offset>
, storage::characteristic_array<Value, Offset>
, Discretization
>
{
typedef time_series_facade<
characteristic_unit_series<Value, Discretization, Offset>
, storage::characteristic_array<Value, Offset>
, Discretization
> base_type;
using base_type::operator=;
BOOST_TIME_SERIES_DEFINE_CTORS(characteristic_unit_series)
};
namespace traits
{
/// INTERNAL ONLY
template<typename Value, typename Discretization, typename Offset>
struct storage_category<characteristic_unit_series<Value, Discretization, Offset> >
: storage_category<storage::characteristic_array<Value, Offset> >
{};
/// INTERNAL ONLY
template<typename Value, typename Discretization, typename Offset>
struct discretization_type<characteristic_unit_series<Value, Discretization, Offset> >
{
typedef Discretization type;
};
/// INTERNAL ONLY
template<typename Value, typename Discretization, typename Offset>
struct offset_type<characteristic_unit_series<Value, Discretization, Offset> >
{
typedef Offset type;
};
/// INTERNAL ONLY
template<typename Value, typename Discretization, typename Offset>
struct generate_series<characteristic_storage_tag, Value, Discretization, Offset>
{
typedef characteristic_unit_series<Value, Discretization, Offset> type;
};
}
}}
namespace boost { namespace sequence { namespace impl
{
/// INTERNAL ONLY
template<typename Value, typename Discretization, typename Offset>
struct tag<time_series::characteristic_unit_series<Value, Discretization, Offset> >
{
typedef time_series_tag type;
};
}}}
namespace boost { namespace time_series
{
/// \brief A \c Mutable_TimeSeries that has a distinct value within some <tt>[start,stop)</tt> range,
/// and zero elsewhere.
///
/// A \c Mutable_TimeSeries that has a distinct value within some <tt>[start,stop)</tt> range,
/// and zero elsewhere.
///
/// The named parameters for the constructor are, in order:
/// -# \c start, with a default of \c Offset(0)
/// -# \c stop, with a default of \c Offset(0)
/// -# \c value, with a default of \c Value(1)
/// -# \c discretization, with a default of \c Discretization(1)
/// -# \c zero, with a default of \c Value(0)
template<typename Value, typename Discretization, typename Offset>
struct characteristic_series
: time_series_facade<
characteristic_series<Value, Discretization, Offset>
, storage::characteristic_array<Value, Offset, storage::constant_elements<Value> >
, Discretization
>
{
typedef time_series_facade<
characteristic_series<Value, Discretization, Offset>
, storage::characteristic_array<Value, Offset, storage::constant_elements<Value> >
, Discretization
> base_type;
using base_type::operator=;
BOOST_TIME_SERIES_DEFINE_CTORS(characteristic_series)
};
namespace traits
{
/// INTERNAL ONLY
template<typename Value, typename Discretization, typename Offset>
struct storage_category<characteristic_series<Value, Discretization, Offset> >
: storage_category<storage::characteristic_array<Value, Offset, storage::constant_elements<Value> > >
{};
/// INTERNAL ONLY
template<typename Value, typename Discretization, typename Offset>
struct discretization_type<characteristic_series<Value, Discretization, Offset> >
{
typedef Discretization type;
};
/// INTERNAL ONLY
template<typename Value, typename Discretization, typename Offset>
struct offset_type<characteristic_series<Value, Discretization, Offset> >
{
typedef Offset type;
};
/// INTERNAL ONLY
template<typename Value, typename Discretization, typename Offset>
struct generate_series<scaled_storage_tag<characteristic_storage_tag>, Value, Discretization, Offset>
{
typedef characteristic_series<Value, Discretization, Offset> type;
};
}
}}
namespace boost { namespace sequence { namespace impl
{
/// INTERNAL ONLY
template<typename Value, typename Discretization, typename Offset>
struct tag<time_series::characteristic_series<Value, Discretization, Offset> >
{
typedef time_series_tag type;
};
}}}
namespace boost { namespace constructors { namespace impl
{
/// INTERNAL ONLY
struct characteristic_series_tag;
/// INTERNAL ONLY
template<typename Value, typename Discretization, typename Offset>
struct tag<time_series::characteristic_series<Value, Discretization, Offset> >
{
typedef characteristic_series_tag type;
};
/// INTERNAL ONLY
template<typename Value, typename Discretization, typename Offset>
struct tag<time_series::characteristic_unit_series<Value, Discretization, Offset> >
{
typedef characteristic_series_tag type;
};
/// INTERNAL ONLY
template<typename T>
struct construct<T, characteristic_series_tag>
: arg_pack_construct
{
typedef parameter::parameters<
parameter::optional<time_series::tag::start>
, parameter::optional<time_series::tag::stop>
, parameter::optional<time_series::tag::value>
, parameter::optional<time_series::tag::discretization>
, parameter::optional<time_series::tag::zero>
> args_type;
};
}}}
#endif
|
{-# LANGUAGE DeriveDataTypeable, DeriveGeneric #-}
-- |
-- Module : Statistics.Distribution.Exponential
-- Copyright : (c) 2009 Bryan O'Sullivan
-- License : BSD3
--
-- Maintainer : [email protected]
-- Stability : experimental
-- Portability : portable
--
-- The exponential distribution. This is the continunous probability
-- distribution of the times between events in a poisson process, in
-- which events occur continuously and independently at a constant
-- average rate.
module Statistics.Distribution.Exponential
(
ExponentialDistribution
-- * Constructors
, exponential
, exponentialFromSample
-- * Accessors
, edLambda
) where
import Data.Aeson (FromJSON, ToJSON)
import Data.Binary (Binary)
import Data.Data (Data, Typeable)
import GHC.Generics (Generic)
import Numeric.MathFunctions.Constants (m_neg_inf)
import qualified Statistics.Distribution as D
import qualified Statistics.Sample as S
import qualified System.Random.MWC.Distributions as MWC
import Statistics.Types (Sample)
import Data.Binary (put, get)
newtype ExponentialDistribution = ED {
edLambda :: Double
} deriving (Eq, Read, Show, Typeable, Data, Generic)
instance FromJSON ExponentialDistribution
instance ToJSON ExponentialDistribution
instance Binary ExponentialDistribution where
put = put . edLambda
get = fmap ED get
instance D.Distribution ExponentialDistribution where
cumulative = cumulative
complCumulative = complCumulative
instance D.ContDistr ExponentialDistribution where
density (ED l) x
| x < 0 = 0
| otherwise = l * exp (-l * x)
logDensity (ED l) x
| x < 0 = m_neg_inf
| otherwise = log l + (-l * x)
quantile = quantile
instance D.Mean ExponentialDistribution where
mean (ED l) = 1 / l
instance D.Variance ExponentialDistribution where
variance (ED l) = 1 / (l * l)
instance D.MaybeMean ExponentialDistribution where
maybeMean = Just . D.mean
instance D.MaybeVariance ExponentialDistribution where
maybeStdDev = Just . D.stdDev
maybeVariance = Just . D.variance
instance D.Entropy ExponentialDistribution where
entropy (ED l) = 1 - log l
instance D.MaybeEntropy ExponentialDistribution where
maybeEntropy = Just . D.entropy
instance D.ContGen ExponentialDistribution where
genContVar = MWC.exponential . edLambda
cumulative :: ExponentialDistribution -> Double -> Double
cumulative (ED l) x | x <= 0 = 0
| otherwise = 1 - exp (-l * x)
complCumulative :: ExponentialDistribution -> Double -> Double
complCumulative (ED l) x | x <= 0 = 1
| otherwise = exp (-l * x)
quantile :: ExponentialDistribution -> Double -> Double
quantile (ED l) p
| p == 1 = 1 / 0
| p >= 0 && p < 1 = -log (1 - p) / l
| otherwise =
error $ "Statistics.Distribution.Exponential.quantile: p must be in [0,1] range. Got: "++show p
-- | Create an exponential distribution.
exponential :: Double -- ^ λ (scale) parameter.
-> ExponentialDistribution
exponential l
| l <= 0 =
error $ "Statistics.Distribution.Exponential.exponential: scale parameter must be positive. Got " ++ show l
| otherwise = ED l
-- | Create exponential distribution from sample. No tests are made to
-- check whether it truly is exponential.
exponentialFromSample :: Sample -> ExponentialDistribution
exponentialFromSample = ED . S.mean
|
<a href="https://colab.research.google.com/github/marianasmoura/tecnicas-de-otimizacao/blob/main/Otimizacao_irrestrita_Mono_Bissecao.ipynb" target="_parent"></a>
UNIVERSIDADE FEDERAL DO PIAUÍ
CURSO DE GRADUAÇÃO EM ENGENHARIA ELÉTRICA
DISCIPLINA: TÉCNICAS DE OTIMIZAÇÃO
DOCENTE: ALDIR SILVA SOUSA
DISCENTE: MARIANA DE SOUSA MOURA
---
Atividade 2: Otimização Irrestrita pelo Método da Bisseção - Monovariável
**A classe Parametros**
Esta classe tem por finalidade enviar em uma única variável, todos os parâmetros necessários para executar o método.
Para o método da Bisseção, precisamos da função que se deseja minimizar, o intervalo de incerteza inicial e a tolerância requerida.
```python
import numpy as np
import sympy as sym #Para criar variáveis simbólicas.
class Parametros:
def __init__(self,f,vars,eps,a,b):
self.f = f
self.a = a
self.b = b
self.vars = vars #variáveis simbólicas
self.eps = eps
```
```python
def eval(sym_f,vars,x):
map = dict()
map[vars[0]] = x
return float(sym_f.subs(map))
import pandas as pd
import math
def bissecao(params):
n = math.ceil( -math.log(params.eps/(params.b-params.a),2) )
f = params.f
diff = sym.diff(f) #retorna a derivada simbólica de f
cols = ['a','b','x','f(x)','df(x)']
a = params.a
b = params.b
df = pd.DataFrame([], columns=cols)
for k in range(n):
x = float((b + a)/2)
fx = eval(f,params.vars,x) #Não é necessário. Somente para debug
dfx = eval(diff,params.vars,x)
#fx = float(fx)
#dfx = float(dfx)
row = pd.DataFrame([[a,b,x,fx,dfx]],columns=cols)
df = df.append(row, ignore_index=True)
if (dfx == 0): break # Mínimo encontrado. Parar.
if (dfx > 0 ):
#Passo 2
b = x
else:
#Passo 3
a = x
x = float((b + a)/2)
return x,df
```
**Exercícios**
**1)** Resolva
min $x^2 -cos(x) + e ^{-2x}$
sujeito a: $~-1\leq x \leq 1$
```python
import numpy as np
import sympy as sym
x = sym.Symbol('x');
vars = [x]
a = -1
b = 1
l = 1e-3
f1 = x*x - sym.cos(x) + sym.exp(-2*x)
params = Parametros(f1,vars,l,a,b)
x,df=bissecao(params)
```
```python
df
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>a</th>
<th>b</th>
<th>x</th>
<th>f(x)</th>
<th>df(x)</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>-1</td>
<td>1</td>
<td>0.000000</td>
<td>0.000000</td>
<td>-2.000000</td>
</tr>
<tr>
<th>1</th>
<td>0</td>
<td>1</td>
<td>0.500000</td>
<td>-0.259703</td>
<td>0.743667</td>
</tr>
<tr>
<th>2</th>
<td>0</td>
<td>0.5</td>
<td>0.250000</td>
<td>-0.299882</td>
<td>-0.465657</td>
</tr>
<tr>
<th>3</th>
<td>0.25</td>
<td>0.5</td>
<td>0.375000</td>
<td>-0.317516</td>
<td>0.171539</td>
</tr>
<tr>
<th>4</th>
<td>0.25</td>
<td>0.375</td>
<td>0.312500</td>
<td>-0.318650</td>
<td>-0.138084</td>
</tr>
<tr>
<th>5</th>
<td>0.3125</td>
<td>0.375</td>
<td>0.343750</td>
<td>-0.320502</td>
<td>0.018857</td>
</tr>
<tr>
<th>6</th>
<td>0.3125</td>
<td>0.34375</td>
<td>0.328125</td>
<td>-0.320189</td>
<td>-0.059068</td>
</tr>
<tr>
<th>7</th>
<td>0.328125</td>
<td>0.34375</td>
<td>0.335938</td>
<td>-0.320498</td>
<td>-0.019971</td>
</tr>
<tr>
<th>8</th>
<td>0.335938</td>
<td>0.34375</td>
<td>0.339844</td>
<td>-0.320538</td>
<td>-0.000523</td>
</tr>
<tr>
<th>9</th>
<td>0.339844</td>
<td>0.34375</td>
<td>0.341797</td>
<td>-0.320529</td>
<td>0.009175</td>
</tr>
<tr>
<th>10</th>
<td>0.339844</td>
<td>0.341797</td>
<td>0.340820</td>
<td>-0.320536</td>
<td>0.004328</td>
</tr>
</tbody>
</table>
</div>
```python
x
```
0.34033203125
**2)** A localização do centróide de um setor circular
>
é dada por:
$\overline{x} = \frac{2r \sin(\theta)}{3\theta}$
Determine o ângulo $\theta$ para o qual x = r/2.
```python
# x = 2*r*sin(teta)/(3*teta)
# x = r/2
# r/2 = 2*r*sin(teta)/(3*teta)
# 3*teta - 4*sin(teta) = 0
# z = 3*teta - 4*sin(teta)
import numpy as np
import sympy as sym
x = sym.Symbol('x');
vars = [x]
a = 70
b = 80
a = (a*sym.pi)/180
b = (b*sym.pi)/180
l = 1e-3
f2 = (3*x -4*sym.sin(x))**2
params = Parametros(f2,vars,l,a,b)
x,df=bissecao(params)
```
```python
df
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>a</th>
<th>b</th>
<th>x</th>
<th>f(x)</th>
<th>df(x)</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>7*pi/18</td>
<td>4*pi/9</td>
<td>1.308997</td>
<td>4.005309e-03</td>
<td>0.248685</td>
</tr>
<tr>
<th>1</th>
<td>7*pi/18</td>
<td>1.309</td>
<td>1.265364</td>
<td>3.525637e-04</td>
<td>-0.067490</td>
</tr>
<tr>
<th>2</th>
<td>1.26536</td>
<td>1.309</td>
<td>1.287180</td>
<td>4.554619e-04</td>
<td>0.080273</td>
</tr>
<tr>
<th>3</th>
<td>1.26536</td>
<td>1.28718</td>
<td>1.276272</td>
<td>1.112400e-06</td>
<td>0.003879</td>
</tr>
<tr>
<th>4</th>
<td>1.26536</td>
<td>1.27627</td>
<td>1.270818</td>
<td>7.952763e-05</td>
<td>-0.032425</td>
</tr>
<tr>
<th>5</th>
<td>1.27082</td>
<td>1.27627</td>
<td>1.273545</td>
<td>1.556920e-05</td>
<td>-0.014429</td>
</tr>
<tr>
<th>6</th>
<td>1.27354</td>
<td>1.27627</td>
<td>1.274908</td>
<td>2.099881e-06</td>
<td>-0.005314</td>
</tr>
<tr>
<th>7</th>
<td>1.27491</td>
<td>1.27627</td>
<td>1.275590</td>
<td>3.923802e-08</td>
<td>-0.000727</td>
</tr>
</tbody>
</table>
</div>
```python
x
```
1.2759311309013235
**3)** Metodologia do Financiamento com Prestações Fixas. Cálculo com juros compostos e capitalização mensal.
$q_0 = \frac{1-(1+j)^{-n}}{j}p$
Onde:
* n = Número de meses
* j= taxa de juros mensal
* p = valor da prestação
* q0 = valor financiado.
Fonte: BC.
Maria pretende comprar um automóvel que custa R$\$~80.000$. Para tanto, ela dispõe de no máximo R$\$ ~20.000$ para dar de entrada e o restante seria financiado. O banco de Maria está propondo que essa dê a entrada de R$\$~ 20.000$ e divida o restante em 36x de R$\$ ~2.300$. Qual é a taxa de juros mensal desta operação proposta pelo banco?
```python
# q0 = (1-(1+j)**(-n))*p/j
# q0 - (1-(1+j)**(-n))*p/j = 0
# z = q0 - (1-(1+j)**(-n))*p/j
import numpy as np
import sympy as sym
j = sym.Symbol('j');
vars = [j]
a = 0
b = 1
q0 = 60000
p = 2300
n = 36
l = 1e-5
f3 = (q0 - (1-(1+j)**(-n))*p/j)**2
params = Parametros(f3,vars,l,a,b)
juros,df=bissecao(params)
```
```python
df
```
```python
juros
```
0.018566131591796875
**Conclusão**
Ambos os métodos conseguem resolver um problema de minimização ou maximização de uma função derivável. O código para método de Newton apresenta uma precisão muito maior que para o caso do método da Bisseção, porém para que este método convirja, é necessário um ponto de partida próximo do valor da solução. Caso contrário, o código entrará em um loop e não conseguirá convergir para a resposta correta. O método da Bisseção não tem esse problema, visto que o valor da derivada no ponto auxilia na busca, indicando a direção do mínimo local mesmo que o intervalo de busca inicial não esteja perto do mínimo. Para aplicações em que se priorize a precisão, pode-se unir as vantagens de ambos os métodos, iniciando a busca através do método da Bisseção e, em seguida, melhorando a precisão ainda mais, aplicando-se o ponto de partida obtido por este ao método de Newton.
|
// from ros-control meta packages
#include <controller_interface/controller.h>
#include <hardware_interface/joint_command_interface.h>
#include <pluginlib/class_list_macros.h>
#include <std_msgs/Float64MultiArray.h>
#include <urdf/model.h>
// from kdl packages
#include <kdl/tree.hpp>
#include <kdl/kdl.hpp>
#include <kdl/chain.hpp>
#include <kdl_parser/kdl_parser.hpp> // get kdl tree from urdf
#include <kdl/chaindynparam.hpp> // inverse dynamics
#include <kdl/chainjnttojacsolver.hpp> // jacobian
#include <kdl/chainfksolverpos_recursive.hpp> // forward kinematics
#include <boost/scoped_ptr.hpp>
#include <boost/lexical_cast.hpp>
#define PI 3.141592
#define D2R PI / 180.0
#define R2D 180.0 / PI
#define SaveDataMax 49
#define num_taskspace 6
namespace arm_controllers
{
class Kinematic_Controller : public controller_interface::Controller<hardware_interface::EffortJointInterface>
{
public:
bool init(hardware_interface::EffortJointInterface *hw, ros::NodeHandle &n)
{
// ********* 1. Get joint name / gain from the parameter server *********
// 1.1 Joint Name
if (!n.getParam("joints", joint_names_))
{
ROS_ERROR("Could not find joint name");
return false;
}
n_joints_ = joint_names_.size();
if (n_joints_ == 0)
{
ROS_ERROR("List of joint names is empty.");
return false;
}
else
{
ROS_INFO("Found %d joint names", n_joints_);
for (int i = 0; i < n_joints_; i++)
{
ROS_INFO("%s", joint_names_[i].c_str());
}
}
// 1.2 Gain
// 1.2.1 Joint Controller
Kp_.resize(n_joints_);
Kp_E_.resize(n_joints_);
Kd_.resize(n_joints_);
Ki_.resize(n_joints_);
std::vector<double> Kp(n_joints_), Kp_E(n_joints_), Ki(n_joints_), Kd(n_joints_);
for (size_t i = 0; i < n_joints_; i++)
{
std::string si = boost::lexical_cast<std::string>(i + 1);
if (n.getParam("/elfin/kinematic_controller/gains/elfin_joint" + si + "/pid/p", Kp[i]))
{
Kp_(i) = Kp[i];
}
else
{
std::cout << "/elfin/kinematic_controller/gains/elfin_joint" + si + "/pid/p" << std::endl;
ROS_ERROR("Cannot find pid/p gain");
return false;
}
if (n.getParam("/elfin/kinematic_controller/gains/elfin_joint" + si + "/p_gain/p", Kp_E[i]))
{
Kp_E_(i) = Kp_E[i];
}
else
{
std::cout << "/elfin/kinematic_controller/gains/elfin_joint" + si + "/pid/p" << std::endl;
ROS_ERROR("Cannot find pid/p gain");
return false;
}
if (n.getParam("/elfin/kinematic_controller/gains/elfin_joint" + si + "/pid/i", Ki[i]))
{
Ki_(i) = Ki[i];
}
else
{
ROS_ERROR("Cannot find pid/i gain");
return false;
}
if (n.getParam("/elfin/kinematic_controller/gains/elfin_joint" + si + "/pid/d", Kd[i]))
{
Kd_(i) = Kd[i];
}
else
{
ROS_ERROR("Cannot find pid/d gain");
return false;
}
}
// 2. ********* urdf *********
urdf::Model urdf;
if (!urdf.initParam("robot_description"))
{
ROS_ERROR("Failed to parse urdf file");
return false;
}
else
{
ROS_INFO("Found robot_description");
}
// 3. ********* Get the joint object to use in the realtime loop [Joint Handle, URDF] *********
for (int i = 0; i < n_joints_; i++)
{
try
{
joints_.push_back(hw->getHandle(joint_names_[i]));
}
catch (const hardware_interface::HardwareInterfaceException &e)
{
ROS_ERROR_STREAM("Exception thrown: " << e.what());
return false;
}
urdf::JointConstSharedPtr joint_urdf = urdf.getJoint(joint_names_[i]);
if (!joint_urdf)
{
ROS_ERROR("Could not find joint '%s' in urdf", joint_names_[i].c_str());
return false;
}
joint_urdfs_.push_back(joint_urdf);
}
// 4. ********* KDL *********
// 4.1 kdl parser
if (!kdl_parser::treeFromUrdfModel(urdf, kdl_tree_))
{
ROS_ERROR("Failed to construct kdl tree");
return false;
}
else
{
ROS_INFO("Constructed kdl tree");
}
// 4.2 kdl chain
std::string root_name, tip_name;
if (!n.getParam("root_link", root_name))
{
ROS_ERROR("Could not find root link name");
return false;
}
if (!n.getParam("tip_link", tip_name))
{
ROS_ERROR("Could not find tip link name");
return false;
}
if (!kdl_tree_.getChain(root_name, tip_name, kdl_chain_))
{
ROS_ERROR_STREAM("Failed to get KDL chain from tree: ");
ROS_ERROR_STREAM(" " << root_name << " --> " << tip_name);
ROS_ERROR_STREAM(" Tree has " << kdl_tree_.getNrOfJoints() << " joints");
ROS_ERROR_STREAM(" Tree has " << kdl_tree_.getNrOfSegments() << " segments");
ROS_ERROR_STREAM(" The segments are:");
KDL::SegmentMap segment_map = kdl_tree_.getSegments();
KDL::SegmentMap::iterator it;
for (it = segment_map.begin(); it != segment_map.end(); it++)
ROS_ERROR_STREAM(" " << (*it).first);
return false;
}
else
{
ROS_INFO("Got kdl chain");
}
// 4.3 inverse dynamics solver 초기화
gravity_ = KDL::Vector::Zero(); // ?
gravity_(2) = -9.81; // 0: x-axis 1: y-axis 2: z-axis
id_solver_.reset(new KDL::ChainDynParam(kdl_chain_, gravity_));
// 4.4 jacobian solver 초기화
jnt_to_jac_solver_.reset(new KDL::ChainJntToJacSolver(kdl_chain_));
// 4.5 forward kinematics solver 초기화
fk_pos_solver_.reset(new KDL::ChainFkSolverPos_recursive(kdl_chain_));
// ********* 5. 각종 변수 초기화 *********
// 5.1 Vector 초기화 (사이즈 정의 및 값 0)
tau_d_.data = Eigen::VectorXd::Zero(n_joints_);
x_cmd_.data = Eigen::VectorXd::Zero(num_taskspace);
x_cmd_(0) = 0.0;
x_cmd_(1) = -0.32;
x_cmd_(2) = 0.56;
q_.data = Eigen::VectorXd::Zero(n_joints_);
qdot_.data = Eigen::VectorXd::Zero(n_joints_);
qC_dot_.data = Eigen::VectorXd::Zero(n_joints_);
eC_dot_.data = Eigen::VectorXd::Zero(n_joints_);
// 5.2 Matrix 초기화 (사이즈 정의 및 값 0)
J_.resize(kdl_chain_.getNrOfJoints());
M_.resize(kdl_chain_.getNrOfJoints());
C_.resize(kdl_chain_.getNrOfJoints());
G_.resize(kdl_chain_.getNrOfJoints());
// ********* 6. ROS 명령어 *********
// 6.1 publisher
pub_q_ = n.advertise<std_msgs::Float64MultiArray>("q", 1000);
pub_xd_ = n.advertise<std_msgs::Float64MultiArray>("xd", 1000);
pub_x_ = n.advertise<std_msgs::Float64MultiArray>("x", 1000);
pub_ex_ = n.advertise<std_msgs::Float64MultiArray>("ex", 1000);
pub_SaveData_ = n.advertise<std_msgs::Float64MultiArray>("SaveData", 1000); // 뒤에 숫자는?
// 6.2 subsriber
sub = n.subscribe("command", 1000, &Kinematic_Controller::commandCB, this);
return true;
}
void commandCB(const std_msgs::Float64MultiArrayConstPtr &msg)
{
if (msg->data.size() != n_joints_)
{
ROS_ERROR_STREAM("Dimension of command (" << msg->data.size() << ") does not match number of joints (" << n_joints_ << ")! Not executing!");
return;
}
for (int i = 0; i < num_taskspace; i++)
{
x_cmd_(i) = msg->data[i];
}
}
void starting(const ros::Time &time)
{
t = 0.0;
ROS_INFO("Starting Kinematic Controller");
}
void update(const ros::Time &time, const ros::Duration &period)
{
// ********* 0. Get states from gazebo *********
// 0.1 sampling time
double dt = period.toSec();
t = t + 0.001;
// 0.2 joint state
for (int i = 0; i < n_joints_; i++){
q_(i) = joints_[i].getPosition();
qdot_(i) = joints_[i].getVelocity();
}
// ******** 2. Compute end-effector Position*************
fk_pos_solver_->JntToCart(q_,x_);
xd_.p(0) = x_cmd_(0);
xd_.p(1) = x_cmd_(1);
xd_.p(2) = x_cmd_(2);
xd_.M = KDL::Rotation(KDL::Rotation::RPY(x_cmd_(3), x_cmd_(4), x_cmd_(5)));
xd_dot_(0) = 0;
xd_dot_(1) = 0;
xd_dot_(2) = 0;
xd_dot_(3) = 0;
xd_dot_(4) = 0;
xd_dot_(5) = 0;
ex_temp_ = diff(x_, xd_);
ex_(0) = ex_temp_(0);
ex_(1) = ex_temp_(1);
ex_(2) = ex_temp_(2);
ex_(3) = ex_temp_(3);
ex_(4) = ex_temp_(4);
ex_(5) = ex_temp_(5);
//std::cout << "error X " << ex_ << std::endl;
aux_2_d_.data = xd_dot_ + Kp_E_.data.cwiseProduct(ex_);
jnt_to_jac_solver_->JntToJac(q_, J_);
// *** 2.2 computing Jacobian transpose/inversion ***
J_inv_ = J_.data.inverse();
qC_dot_.data = J_inv_ * aux_2_d_.data;
// ********* 3. Motion Controller in Joint Space*********
// *** 3.2 Compute model(M,C,G) ***
id_solver_->JntToMass(q_, M_);
id_solver_->JntToCoriolis(q_, qdot_, C_);
id_solver_->JntToGravity(q_, G_);
// *** 3.3 Apply Torque Command to Actuator ***
// Kinematic control
eC_dot_.data = qC_dot_.data - qdot_.data;
aux_d_.data = M_.data * (Kd_.data.cwiseProduct(eC_dot_.data)) ;
comp_d_.data = C_.data.cwiseProduct(qdot_.data) + G_.data;
tau_d_.data = aux_d_.data + comp_d_.data;
// ISHIRA: Manipulation
for (int i = 0; i < n_joints_; i++)
{
joints_[i].setCommand(tau_d_(i));
// joints_[i].setCommand(0.0);
}
// ********* 3. data 저장 *********
// save_data();
// ********* 4. state 출력 *********
print_state();
}
void stopping(const ros::Time &time)
{
}
void save_data()
{
// 1
// Simulation time (unit: sec)
SaveData_[0] = t;
// Actual position in joint space (unit: rad)
SaveData_[19] = q_(0);
SaveData_[20] = q_(1);
SaveData_[21] = q_(2);
SaveData_[22] = q_(3);
SaveData_[23] = q_(4);
SaveData_[24] = q_(5);
// Actual velocity in joint space (unit: rad/s)
SaveData_[25] = qdot_(0);
SaveData_[26] = qdot_(1);
SaveData_[27] = qdot_(2);
SaveData_[28] = qdot_(3);
SaveData_[29] = qdot_(4);
SaveData_[30] = qdot_(5);
// 2
msg_q_.data.clear();
msg_SaveData_.data.clear();
// 3
for (int i = 0; i < n_joints_; i++)
msg_q_.data.push_back(q_(i));
for (int i = 0; i < SaveDataMax; i++)
msg_SaveData_.data.push_back(SaveData_[i]);
// 4
pub_q_.publish(msg_q_);
pub_SaveData_.publish(msg_SaveData_);
}
void print_state()
{
static int count = 0;
if (count > 99)
{
printf("*********************************************************\n\n");
printf("*** Simulation Time (unit: sec) ***\n");
printf("t = %f\n", t);
printf("\n");
printf("*** Desired Rotation Matrix of end-effector ***\n");
printf("%f, ",xd_.M(0,0));
printf("%f, ",xd_.M(0,1));
printf("%f\n",xd_.M(0,2));
printf("%f, ",xd_.M(1,0));
printf("%f, ",xd_.M(1,1));
printf("%f\n",xd_.M(1,2));
printf("%f, ",xd_.M(2,0));
printf("%f, ",xd_.M(2,1));
printf("%f\n",xd_.M(2,2));
printf("Xd : %f, ",xd_.p(0));
printf("Yd : %f, ",xd_.p(1));
printf("Zd : %f, ",xd_.p(2));
printf("%f, ",xd_.M(2,1));
printf("%f\n",xd_.M(2,2));
printf("\n");
printf("*** Actual State in Joint Space (unit: deg) ***\n");
printf("q_(0): %f, ", q_(0) * R2D);
printf("q_(1): %f, ", q_(1) * R2D);
printf("q_(2): %f, ", q_(2) * R2D);
printf("q_(3): %f, ", q_(3) * R2D);
printf("q_(4): %f, ", q_(4) * R2D);
printf("q_(5): %f\n", q_(5) * R2D);
printf("\n");
printf("*** Actual Rotation Matrix of end-effector ***\n");
printf("%f, ",x_.M(0,0));
printf("%f, ",x_.M(0,1));
printf("%f\n",x_.M(0,2));
printf("%f, ",x_.M(1,0));
printf("%f, ",x_.M(1,1));
printf("%f\n",x_.M(1,2));
printf("%f, ",x_.M(2,0));
printf("%f, ",x_.M(2,1));
printf("%f\n",x_.M(2,2));
printf("Xd : %f, ",x_.p(0));
printf("Yd : %f, ",x_.p(1));
printf("Zd : %f, ",x_.p(2));
printf("\n");
count = 0;
}
count++;
}
private:
// others
double t;
//Joint handles
unsigned int n_joints_; // joint 숫자
std::vector<std::string> joint_names_; // joint name ??
std::vector<hardware_interface::JointHandle> joints_; // ??
std::vector<urdf::JointConstSharedPtr> joint_urdfs_; // ??
// kdl
KDL::Tree kdl_tree_; // tree?
KDL::Chain kdl_chain_; // chain?
// kdl M,C,G
KDL::JntSpaceInertiaMatrix M_; // intertia matrix
KDL::JntArray C_; // coriolis
KDL::JntArray G_; // gravity torque vector
KDL::Vector gravity_;
// kdl and Eigen Jacobian
KDL::Jacobian J_;
Eigen::MatrixXd J_inv_;
Eigen::Matrix<double, num_taskspace, num_taskspace> J_transpose_;
// kdl solver
boost::scoped_ptr<KDL::ChainFkSolverPos_recursive> fk_pos_solver_; //Solver to compute the forward kinematics (position)
boost::scoped_ptr<KDL::ChainJntToJacSolver> jnt_to_jac_solver_; //Solver to compute the jacobian
boost::scoped_ptr<KDL::ChainDynParam> id_solver_; // Solver To compute the inverse dynamics
// Joint Space State
KDL::JntArray q_, qdot_ ,qC_dot_, eC_dot_;
KDL::JntArray x_cmd_;
// Task Space State
// ver. 01
KDL::Frame xd_; // x.p: frame position(3x1), x.m: frame orientation (3x3)
KDL::Frame x_;
KDL::Twist ex_temp_;
// KDL::Twist xd_dot_, xd_ddot_;
Eigen::Matrix<double, num_taskspace, 1> ex_;
Eigen::Matrix<double, num_taskspace, 1> xd_dot_, xd_ddot_;
// Input
KDL::JntArray aux_d_;
KDL::JntArray aux_2_d_;
KDL::JntArray comp_d_;
KDL::JntArray tau_d_;
// gains
KDL::JntArray Kp_, Ki_, Kd_, Kp_E_;
// save the data
double SaveData_[SaveDataMax];
// ros publisher
ros::Publisher pub_q_;
ros::Publisher pub_xd_, pub_x_, pub_ex_;
ros::Publisher pub_SaveData_;
// ros subscriber
ros::Subscriber sub;
// ros message
std_msgs::Float64MultiArray msg_q_;
std_msgs::Float64MultiArray msg_xd_, msg_x_, msg_ex_;
std_msgs::Float64MultiArray msg_SaveData_;
};
}; // namespace arm_controllers
PLUGINLIB_EXPORT_CLASS(arm_controllers::Kinematic_Controller, controller_interface::ControllerBase)
|
The zero function is a Z-function. |
/-
Copyright (c) 2021 Eric Wieser. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Eric Wieser
-/
import linear_algebra.matrix.determinant
import data.mv_polynomial.basic
import data.mv_polynomial.comm_ring
/-!
# Matrices of multivariate polynomials
In this file, we prove results about matrices over an mv_polynomial ring.
In particular, we provide `matrix.mv_polynomial_X` which associates every entry of a matrix with a
unique variable.
## Tags
matrix determinant, multivariate polynomial
-/
variables {m n R S : Type*}
namespace matrix
variables (m n R)
/-- The matrix with variable `X (i,j)` at location `(i,j)`. -/
@[simp] noncomputable def mv_polynomial_X [comm_semiring R] : matrix m n (mv_polynomial (m × n) R)
| i j := mv_polynomial.X (i, j)
variables {m n R S}
/-- Any matrix `A` can be expressed as the evaluation of `matrix.mv_polynomial_X`.
This is of particular use when `mv_polynomial (m × n) R` is an integral domain but `S` is
not, as if the `mv_polynomial.eval₂` can be pulled to the outside of a goal, it can be solved in
under cancellative assumptions. -/
lemma mv_polynomial_X_map_eval₂ [comm_semiring R] [comm_semiring S]
(f : R →+* S) (A : matrix m n S) :
(mv_polynomial_X m n R).map (mv_polynomial.eval₂ f $ λ p : m × n, A p.1 p.2) = A :=
ext $ λ i j, mv_polynomial.eval₂_X _ (λ p : m × n, A p.1 p.2) (i, j)
/-- A variant of `matrix.mv_polynomial_X_map_eval₂` with a bundled `ring_hom` on the LHS. -/
lemma mv_polynomial_X_map_matrix_eval [fintype m] [decidable_eq m]
[comm_semiring R] (A : matrix m m R) :
(mv_polynomial.eval $ λ p : m × m, A p.1 p.2).map_matrix (mv_polynomial_X m m R) = A :=
mv_polynomial_X_map_eval₂ _ A
variables (R)
/-- A variant of `matrix.mv_polynomial_X_map_eval₂` with a bundled `alg_hom` on the LHS. -/
lemma mv_polynomial_X_map_matrix_aeval [fintype m] [decidable_eq m]
[comm_semiring R] [comm_semiring S] [algebra R S] (A : matrix m m S) :
(mv_polynomial.aeval $ λ p : m × m, A p.1 p.2).map_matrix (mv_polynomial_X m m R) = A :=
mv_polynomial_X_map_eval₂ _ A
variables (m R)
/-- In a nontrivial ring, `matrix.mv_polynomial_X m m R` has non-zero determinant. -/
lemma det_mv_polynomial_X_ne_zero [decidable_eq m] [fintype m] [comm_ring R] [nontrivial R] :
det (mv_polynomial_X m m R) ≠ 0 :=
begin
intro h_det,
have := congr_arg matrix.det (mv_polynomial_X_map_matrix_eval (1 : matrix m m R)),
rw [det_one, ←ring_hom.map_det, h_det, ring_hom.map_zero] at this,
exact zero_ne_one this,
end
end matrix
|
#ifndef NewtonRaphsonSolver_H
#define NewtonRaphsonSolver_H
#include <stdio.h>
#include <iostream>
#include <vector>
#include <gsl/gsl_linalg.h>
#include "Node.h"
#include "ShapeBase.h"
//#include <omp.h>
/**
* The Newton-Raphson solver class
* */
class NewtonRaphsonSolver{
private:
size_t nDim; ///< Dimension of the space (3D)
size_t nNodes; ///< Number of nodes of the system
double threshold; ///< Convergence threshold for iterations
public:
NewtonRaphsonSolver(int nDim, int nNodes); ///< Constructer of the N-R solver
~NewtonRaphsonSolver(); ///< Desturctor of the N-R solver
gsl_matrix* un; ///< The initial positions of the nodes, as calculated at the end of previous step "n"
gsl_matrix* ge; ///< The matrix containing elastic forces on each node, size (nDim*nNodes,1). Organisation is [Node0,x ; Node0,y ; Node0,z; ... ; Noden,x ; Noden,y ; Noden,z]
gsl_matrix* gvInternal; ///< The matrix containing internal viscous forces on each node, size (nDim*nNodes,1). Organisation is [Node0,x ; Node0,y ; Node0,z; ... ; Noden,x ; Noden,y ; Noden,z]
gsl_matrix* gvExternal; ///< The matrix containing external viscous forces on each node, size (nDim*nNodes,1). Organisation is [Node0,x ; Node0,y ; Node0,z; ... ; Noden,x ; Noden,y ; Noden,z]
gsl_matrix* gExt; ///< The matrix containing external forces on each node (currently includes packing forces), size (nDim*nNodes,1). Organisation is [Node0,x ; Node0,y ; Node0,z; ... ; Noden,x ; Noden,y ; Noden,z]
gsl_vector* gSum; ///< The matrix containing sum of NewtonRaphsonSolver#ge, NewtonRaphsonSolver#gvInternal, NewtonRaphsonSolver#gvExternal, NewtonRaphsonSolver#gExt. Organisation is [Node0,x ; Node0,y ; Node0,z; ... ; Noden,x ; Noden,y ; Noden,z]
gsl_matrix* uk; ///< The matrix storing the position of each node at iteration "k". Initiated in function NewtonRaphsonSolver#initialteUkMatrix at the beginning of each step, and updated by function NewtonRaphsonSolver#updateUkInIteration during the iteartions.
gsl_matrix* displacementPerDt; ///< The displacement per time step of each node in current iteration "k", from its position at the end of the last time step "n"
gsl_vector* deltaU; ///< The incremental change in positions as calculated in current iteration, resulting from the imbalance of elastic, viscous and any other external forces acting on each nodes. The solver minimises this value, convergence occurs when all incremental movements for all nodes sufficiently close to zero.
gsl_matrix* K; ///< The Jacobian matrix, derivative of sum of forces acting on each node with respect to displacements.
bool boundNodesWithSlaveMasterDefinition; ///< The boolean stating if there are degrees of freedom slave to other nodes (masters).
std::vector< std::vector<int> > slaveMasterList; ///< The 2D integer vector storing the slave-master degrees of freedom couples, such that array [i][0] = slave to array[i][1], each i representing one dim of node position ( z of node 2 is DoF i=8);
void setMatricesToZeroAtTheBeginningOfIteration(); ///< The function setting the calculation matrices to zero at the beginning of each iteration.
void setMatricesToZeroInsideIteration(); ///< The function setting the relevant matrices to zero at each iteration.
void constructUnMatrix(const std::vector<std::unique_ptr<Node> > &Nodes); ///< This function constructs NewtonRaphsonSolver#un matrix at the beginning of the iterations.
void initialteUkMatrix(); ///< This function initiates NewtonRaphsonSolver#uk matrix at the beginning of the iterations, it is initiated to be equal to NewtonRaphsonSolver#un.
void calculateBoundKWithSlavesMasterDoF(); ///< This function updates the Jacobian of the system, NewtonRaphsonSolver#K, to reflect degrees of freedom binding.
void equateSlaveDisplacementsToMasters(); ///< This function moves the slaves of bound couples with a displacement equivalent to the masters'.
void calculateDisplacementMatrix(double dt); ///< This function calculates the displacement of each node in current iteration "k", from their positions at the end of the previous step "n" (NewtonRaphsonSolver#uk - NewtonRaphsonSolver#un)
void calcutateFixedK(const std::vector <std::unique_ptr<Node>>& Nodes); ///< This function updates the Jacobian to account for nodes that are fixed in certain dimensions in space, as part of boundary conditions.
void calculateForcesAndJacobianMatrixNR(const std::vector <std::unique_ptr<Node>>& Nodes, const std::vector <std::unique_ptr<ShapeBase>>& Elements, double dt ); ///< This function calculates elemental forces and Jacobians, later to be combined in NewtonRaphsonSolver#K and NewtonRaphsonSolver#gSum
void writeForcesTogeAndgvInternal(const std::vector <std::unique_ptr<Node>>& Nodes, const std::vector <std::unique_ptr<ShapeBase>>& Elements, std::vector<std::array<double,3>>& SystemForces); ///< This function writes the values of elemental elastic (ShapeBase#ge) and internal viscous forces (ShapeBase#gvInternal) into the system elastic and internal viscous forces, NewtonRaphsonSolver#ge, and NewtonRaphsonSolver#gvInternal, respectively.
void writeImplicitElementalKToJacobian(const std::vector <std::unique_ptr<ShapeBase>>& Elements); ///< This function writes the elemental values for elastic part of the Jacobian - stiffness matrix - (ShapeBase#TriPointKe) and for viscous part of Jacobian (ShapeBase#TriPointKv) into the system Jacobian NewtonRaphsonSolver#K.
void calculateExternalViscousForcesForNR(const std::vector <std::unique_ptr<Node>>& Nodes); ///< This function calculates the external viscous forces acting on each node, the values are sotred in NewtonRaphsonSolver#gvExternal
void addImplicitKViscousExternalToJacobian(const std::vector <std::unique_ptr<Node>>& Nodes, double dt); ///< This function adds the external related terms of the Jacobian to the system Jacobian NewtonRaphsonSolver#K.
void checkJacobianForAblatedNodes(std::vector <int> & AblatedNodes); ///< This functions checks the Jacobian to ensure the diagonal terms are non-zero for ablated nodes.
void calculateSumOfInternalForces(); ///< This function adds the ealsticity and viscosity related forces (NewtonRaphsonSolver#ge, NewtonRaphsonSolver#gvInternal, NewtonRaphsonSolver#gvExternal) to sum of forces, NewtonRaphsonSolver#gSum.
void addExernalForces();
void solveForDeltaU(); ///< This function solves for the displacements within the N-R step.
//raw pointers necessary for Pardiso
int solveWithPardiso(double* a, double*b, int* ia, int* ja, const int n_variables);
void constructiaForPardiso(int* ia, const int nmult, std::vector<int> &ja_vec, std::vector<double> &a_vec);
void writeKinPardisoFormat(const int nNonzero, std::vector<int> &ja_vec, std::vector<double> &a_vec, int* ja, double* a);
void writeginPardisoFormat(double* b, const int n);
bool checkConvergenceViaDeltaU(); ///< Check for cenvergence with the norm of displacements vector,against the threshold NewtonRaphsonSolver#threshold.
bool checkConvergenceViaForce(); ///< Check for cenvergence with the norm of forces vector,against the threshold NewtonRaphsonSolver#threshold.
void updateUkInIteration(); ///< Calulate the nodal displacemetns at the kth iteration of NR dolver.
void displayMatrix(gsl_matrix* mat, std::string matname);
void displayMatrix(gsl_vector* mat, std::string matname);
bool checkIfCombinationExists(int dofSlave, int dofMaster);
void checkMasterUpdate(int& dofMaster, int& masterId); ///< This function takes a degree of freedom number as input. This DOF is supposed to be a master. If, the dof is already a slave to another dof, then update the masetr dof. Anything that would be bound to the input dof can be bound to the already existing master of the input dof.
bool checkIfSlaveIsAlreadyMasterOfOthers(int dofSlave, int dofMaster); ///< This function checks if the slave DOF is already master of others, if so, updates the master of said slave to the new master the current slave will be bound to.
void updateElementPositions(const std::vector<std::unique_ptr<Node> > &Nodes, const std::vector <std::unique_ptr<ShapeBase>>& Elements);
};
#endif
|
function [dets, boxes, t] = cascade_detect(pyra, model, thresh)
% AUTORIGHTS
% -------------------------------------------------------
% Copyright (C) 2009-2012 Ross Girshick
%
% This file is part of the voc-releaseX code
% (http://people.cs.uchicago.edu/~rbg/latent/)
% and is available under the terms of an MIT-like license
% provided in COPYING. Please retain this notice and
% COPYING if you use this file (or a portion of it) in
% your project.
% -------------------------------------------------------
th = tic();
% gather PCA root filters for convolution
numrootfilters = length(model.rootfilters);
rootfilters = cell(numrootfilters, 1);
for i = 1:numrootfilters
rootfilters{i} = model.rootfilters{i}.wpca;
end
% compute PCA projection of the feature pyramid
projpyra = project_pyramid(model, pyra);
% stage 0: convolution with PCA root filters is done densely
% before any pruning can be applied
% Precompute location/scale scores
loc_f = loc_feat(model, pyra.num_levels);
loc_scores = cell(model.numcomponents, 1);
for c = 1:model.numcomponents
loc_w = model.loc{c}.w;
loc_scores{c} = loc_w * loc_f;
end
pyra.loc_scores = loc_scores;
numrootlocs = 0;
nlevels = size(pyra.feat,1);
rootscores = cell(model.numcomponents, nlevels);
s = 0; % will hold the amount of temp storage needed by cascade()
for i = 1:pyra.num_levels
s = s + size(pyra.feat{i},1)*size(pyra.feat{i},2);
if i > model.interval
scores = fconv_var_dim(projpyra.feat{i}, rootfilters, 1, numrootfilters);
for c = 1:model.numcomponents
u = model.components{c}.rootindex;
v = model.components{c}.offsetindex;
rootscores{c,i} = scores{u} + model.offsets{v}.w + loc_scores{c}(i);
numrootlocs = numrootlocs + numel(scores{u});
end
end
end
s = s*length(model.partfilters);
model.thresh = thresh;
% run remaining cascade stages and collect object hypotheses
coords = cascade(model, pyra, projpyra, rootscores, numrootlocs, s);
boxes = coords';
dets = boxes(:,[1:4 end-1 end]);
t = toc(th);
|
import data.real.basic
variables {x y : ℝ}
#check le_or_lt
#check @abs_of_neg
#check @abs_of_nonneg
-- BEGIN
example : x < abs y → x < y ∨ x < -y :=
begin
cases le_or_gt 0 y with h1 h2,
{ rw abs_of_nonneg h1,
sorry, },
rw abs_of_neg h2,
sorry,
end
-- END |
section \<open>Basic Concepts\<close>
theory Refine_Basic
imports Main
"HOL-Library.Monad_Syntax"
Refine_Misc
"Generic/RefineG_Recursion"
"Generic/RefineG_Assert"
begin
subsection \<open>Nondeterministic Result Lattice and Monad\<close>
text \<open>
In this section we introduce a complete lattice of result sets with an
additional top element that represents failure. On this lattice, we define
a monad: The return operator models a result that consists of a single value,
and the bind operator models applying a function to all results.
Binding a failure yields always a failure.
In addition to the return operator, we also introduce the operator
\<open>RES\<close>, that embeds a set of results into our lattice. Its synonym for
a predicate is \<open>SPEC\<close>.
Program correctness is expressed by refinement, i.e., the expression
\<open>M \<le> SPEC \<Phi>\<close> means that \<open>M\<close> is correct w.r.t.\
specification \<open>\<Phi>\<close>. This suggests the following view on the program
lattice: The top-element is the result that is never correct. We call this
result \<open>FAIL\<close>. The bottom element is the program that is always correct.
It is called \<open>SUCCEED\<close>. An assertion can be encoded by failing if the
asserted predicate is not true. Symmetrically, an assumption is encoded by
succeeding if the predicate is not true.
\<close>
datatype 'a nres = FAILi | RES "'a set"
text \<open>
\<open>FAILi\<close> is only an internal notation, that should not be exposed to
the user.
Instead, \<open>FAIL\<close> should be used, that is defined later as abbreviation
for the top element of the lattice.
\<close>
instantiation nres :: (type) complete_lattice
begin
fun less_eq_nres where
"_ \<le> FAILi \<longleftrightarrow> True" |
"(RES a) \<le> (RES b) \<longleftrightarrow> a\<subseteq>b" |
"FAILi \<le> (RES _) \<longleftrightarrow> False"
fun less_nres where
"FAILi < _ \<longleftrightarrow> False" |
"(RES _) < FAILi \<longleftrightarrow> True" |
"(RES a) < (RES b) \<longleftrightarrow> a\<subset>b"
fun sup_nres where
"sup _ FAILi = FAILi" |
"sup FAILi _ = FAILi" |
"sup (RES a) (RES b) = RES (a\<union>b)"
fun inf_nres where
"inf x FAILi = x" |
"inf FAILi x = x" |
"inf (RES a) (RES b) = RES (a\<inter>b)"
definition "Sup X \<equiv> if FAILi\<in>X then FAILi else RES (\<Union>{x . RES x \<in> X})"
definition "Inf X \<equiv> if \<exists>x. RES x\<in>X then RES (\<Inter>{x . RES x \<in> X}) else FAILi"
definition "bot \<equiv> RES {}"
definition "top \<equiv> FAILi"
instance
apply (intro_classes)
unfolding Sup_nres_def Inf_nres_def bot_nres_def top_nres_def
apply (case_tac x, case_tac [!] y, auto) []
apply (case_tac x, auto) []
apply (case_tac x, case_tac [!] y, case_tac [!] z, auto) []
apply (case_tac x, (case_tac [!] y)?, auto) []
apply (case_tac x, (case_tac [!] y)?, simp_all) []
apply (case_tac x, (case_tac [!] y)?, auto) []
apply (case_tac x, case_tac [!] y, case_tac [!] z, auto) []
apply (case_tac x, (case_tac [!] y)?, auto) []
apply (case_tac x, (case_tac [!] y)?, auto) []
apply (case_tac x, case_tac [!] y, case_tac [!] z, auto) []
apply (case_tac x, auto) []
apply (case_tac z, fastforce+) []
apply (case_tac x, auto) []
apply (case_tac z, fastforce+) []
apply auto []
apply auto []
done
end
abbreviation "FAIL \<equiv> top::'a nres"
abbreviation "SUCCEED \<equiv> bot::'a nres"
abbreviation "SPEC \<Phi> \<equiv> RES (Collect \<Phi>)"
definition "RETURN x \<equiv> RES {x}"
text \<open>We try to hide the original \<open>FAILi\<close>-element as well as possible.
\<close>
lemma nres_cases[case_names FAIL RES, cases type]:
obtains "M=FAIL" | X where "M=RES X"
apply (cases M, fold top_nres_def) by auto
lemma nres_simp_internals:
"RES {} = SUCCEED"
"FAILi = FAIL"
unfolding top_nres_def bot_nres_def by simp_all
lemma nres_inequalities[simp]:
"FAIL \<noteq> RES X"
"FAIL \<noteq> SUCCEED"
"FAIL \<noteq> RETURN x"
"SUCCEED \<noteq> FAIL"
"SUCCEED \<noteq> RETURN x"
"RES X \<noteq> FAIL"
"RETURN x \<noteq> FAIL"
"RETURN x \<noteq> SUCCEED"
unfolding top_nres_def bot_nres_def RETURN_def
by auto
lemma nres_more_simps[simp]:
"SUCCEED = RES X \<longleftrightarrow> X={}"
"RES X = SUCCEED \<longleftrightarrow> X={}"
"RES X = RETURN x \<longleftrightarrow> X={x}"
"RES X = RES Y \<longleftrightarrow> X=Y"
"RETURN x = RES X \<longleftrightarrow> {x}=X"
"RETURN x = RETURN y \<longleftrightarrow> x=y"
unfolding top_nres_def bot_nres_def RETURN_def by auto
lemma nres_order_simps[simp]:
"\<And>M. SUCCEED \<le> M"
"\<And>M. M \<le> SUCCEED \<longleftrightarrow> M=SUCCEED"
"\<And>M. M \<le> FAIL"
"\<And>M. FAIL \<le> M \<longleftrightarrow> M=FAIL"
"\<And>X Y. RES X \<le> RES Y \<longleftrightarrow> X\<le>Y"
"\<And>X. Sup X = FAIL \<longleftrightarrow> FAIL\<in>X"
"\<And>X f. Sup (f ` X) = FAIL \<longleftrightarrow> FAIL \<in> f ` X"
"\<And>X. FAIL = Sup X \<longleftrightarrow> FAIL\<in>X"
"\<And>X f. FAIL = Sup (f ` X) \<longleftrightarrow> FAIL \<in> f ` X"
"\<And>X. FAIL\<in>X \<Longrightarrow> Sup X = FAIL"
"\<And>X. FAIL\<in>f ` X \<Longrightarrow> Sup (f ` X) = FAIL"
"\<And>A. Sup (RES ` A) = RES (Sup A)"
"\<And>A. Sup (RES ` A) = RES (Sup A)"
"\<And>A. A\<noteq>{} \<Longrightarrow> Inf (RES`A) = RES (Inf A)"
"\<And>A. A\<noteq>{} \<Longrightarrow> Inf (RES ` A) = RES (Inf A)"
"Inf {} = FAIL"
"Inf UNIV = SUCCEED"
"Sup {} = SUCCEED"
"Sup UNIV = FAIL"
"\<And>x y. RETURN x \<le> RETURN y \<longleftrightarrow> x=y"
"\<And>x Y. RETURN x \<le> RES Y \<longleftrightarrow> x\<in>Y"
"\<And>X y. RES X \<le> RETURN y \<longleftrightarrow> X \<subseteq> {y}"
unfolding Sup_nres_def Inf_nres_def RETURN_def
by (auto simp add: bot_unique top_unique nres_simp_internals)
lemma Sup_eq_RESE:
assumes "Sup A = RES B"
obtains C where "A=RES`C" and "B=Sup C"
proof -
show ?thesis
using assms unfolding Sup_nres_def
apply (simp split: if_split_asm)
apply (rule_tac C="{X. RES X \<in> A}" in that)
apply auto []
apply (case_tac x, auto simp: nres_simp_internals) []
apply (auto simp: nres_simp_internals) []
done
qed
declare nres_simp_internals[simp]
subsubsection \<open>Pointwise Reasoning\<close>
ML \<open>
structure refine_pw_simps = Named_Thms
( val name = @{binding refine_pw_simps}
val description = "Refinement Framework: " ^
"Simplifier rules for pointwise reasoning" )
\<close>
setup \<open>refine_pw_simps.setup\<close>
definition "nofail S \<equiv> S\<noteq>FAIL"
definition "inres S x \<equiv> RETURN x \<le> S"
lemma nofail_simps[simp, refine_pw_simps]:
"nofail FAIL \<longleftrightarrow> False"
"nofail (RES X) \<longleftrightarrow> True"
"nofail (RETURN x) \<longleftrightarrow> True"
"nofail SUCCEED \<longleftrightarrow> True"
unfolding nofail_def
by (simp_all add: RETURN_def)
lemma inres_simps[simp, refine_pw_simps]:
"inres FAIL = (\<lambda>_. True)"
"inres (RES X) = (\<lambda>x. x\<in>X)"
"inres (RETURN x) = (\<lambda>y. x=y)"
"inres SUCCEED = (\<lambda>_. False)"
unfolding inres_def [abs_def]
by (auto simp add: RETURN_def)
lemma not_nofail_iff:
"\<not>nofail S \<longleftrightarrow> S=FAIL" by (cases S) auto
lemma not_nofail_inres[simp, refine_pw_simps]:
"\<not>nofail S \<Longrightarrow> inres S x"
apply (cases S) by auto
lemma intro_nofail[refine_pw_simps]:
"S\<noteq>FAIL \<longleftrightarrow> nofail S"
"FAIL\<noteq>S \<longleftrightarrow> nofail S"
by (cases S, simp_all)+
text \<open>The following two lemmas will introduce pointwise reasoning for
orderings and equalities.\<close>
lemma pw_le_iff:
"S \<le> S' \<longleftrightarrow> (nofail S'\<longrightarrow> (nofail S \<and> (\<forall>x. inres S x \<longrightarrow> inres S' x)))"
apply (cases S, simp_all)
apply (case_tac [!] S', auto)
done
lemma pw_eq_iff:
"S=S' \<longleftrightarrow> (nofail S = nofail S' \<and> (\<forall>x. inres S x \<longleftrightarrow> inres S' x))"
apply (rule iffI)
apply simp
apply (rule antisym)
apply (simp_all add: pw_le_iff)
done
lemma pw_flat_le_iff: "flat_le S S' \<longleftrightarrow>
(\<exists>x. inres S x) \<longrightarrow> (nofail S \<longleftrightarrow> nofail S') \<and> (\<forall>x. inres S x \<longleftrightarrow> inres S' x)"
by (auto simp : flat_ord_def pw_eq_iff)
lemma pw_flat_ge_iff: "flat_ge S S' \<longleftrightarrow>
(nofail S) \<longrightarrow> nofail S' \<and> (\<forall>x. inres S x \<longleftrightarrow> inres S' x)"
apply (simp add: flat_ord_def pw_eq_iff) apply safe
apply simp
apply simp
apply simp
apply (rule ccontr)
apply simp
done
lemmas pw_ords_iff = pw_le_iff pw_flat_le_iff pw_flat_ge_iff
lemma pw_leI:
"(nofail S'\<longrightarrow> (nofail S \<and> (\<forall>x. inres S x \<longrightarrow> inres S' x))) \<Longrightarrow> S \<le> S'"
by (simp add: pw_le_iff)
lemma pw_leI':
assumes "nofail S' \<Longrightarrow> nofail S"
assumes "\<And>x. \<lbrakk>nofail S'; inres S x\<rbrakk> \<Longrightarrow> inres S' x"
shows "S \<le> S'"
using assms
by (simp add: pw_le_iff)
lemma pw_eqI:
assumes "nofail S = nofail S'"
assumes "\<And>x. inres S x \<longleftrightarrow> inres S' x"
shows "S=S'"
using assms by (simp add: pw_eq_iff)
lemma pwD1:
assumes "S\<le>S'" "nofail S'"
shows "nofail S"
using assms by (simp add: pw_le_iff)
lemma pwD2:
assumes "S\<le>S'" "inres S x"
shows "inres S' x"
using assms
by (auto simp add: pw_le_iff)
lemmas pwD = pwD1 pwD2
text \<open>
When proving refinement, we may assume that the refined program does not
fail.\<close>
lemma le_nofailI: "\<lbrakk> nofail M' \<Longrightarrow> M \<le> M' \<rbrakk> \<Longrightarrow> M \<le> M'"
by (cases M') auto
text \<open>The following lemmas push pointwise reasoning over operators,
thus converting an expression over lattice operators into a logical
formula.\<close>
lemma pw_sup_nofail[refine_pw_simps]:
"nofail (sup a b) \<longleftrightarrow> nofail a \<and> nofail b"
apply (cases a, simp)
apply (cases b, simp_all)
done
lemma pw_sup_inres[refine_pw_simps]:
"inres (sup a b) x \<longleftrightarrow> inres a x \<or> inres b x"
apply (cases a, simp)
apply (cases b, simp)
apply (simp)
done
lemma pw_Sup_inres[refine_pw_simps]: "inres (Sup X) r \<longleftrightarrow> (\<exists>M\<in>X. inres M r)"
apply (cases "Sup X")
apply (simp)
apply (erule bexI[rotated])
apply simp
apply (erule Sup_eq_RESE)
apply (simp)
done
lemma pw_SUP_inres [refine_pw_simps]: "inres (Sup (f ` X)) r \<longleftrightarrow> (\<exists>M\<in>X. inres (f M) r)"
using pw_Sup_inres [of "f ` X"] by simp
lemma pw_Sup_nofail[refine_pw_simps]: "nofail (Sup X) \<longleftrightarrow> (\<forall>x\<in>X. nofail x)"
apply (cases "Sup X")
apply force
apply simp
apply (erule Sup_eq_RESE)
apply auto
done
lemma pw_SUP_nofail [refine_pw_simps]: "nofail (Sup (f ` X)) \<longleftrightarrow> (\<forall>x\<in>X. nofail (f x))"
using pw_Sup_nofail [of "f ` X"] by simp
lemma pw_inf_nofail[refine_pw_simps]:
"nofail (inf a b) \<longleftrightarrow> nofail a \<or> nofail b"
apply (cases a, simp)
apply (cases b, simp_all)
done
lemma pw_inf_inres[refine_pw_simps]:
"inres (inf a b) x \<longleftrightarrow> inres a x \<and> inres b x"
apply (cases a, simp)
apply (cases b, simp)
apply (simp)
done
lemma pw_Inf_nofail[refine_pw_simps]: "nofail (Inf C) \<longleftrightarrow> (\<exists>x\<in>C. nofail x)"
apply (cases "C={}")
apply simp
apply (cases "Inf C")
apply (subgoal_tac "C={FAIL}")
apply simp
apply auto []
apply (subgoal_tac "C\<noteq>{FAIL}")
apply (auto simp: not_nofail_iff) []
apply auto []
done
lemma pw_INF_nofail [refine_pw_simps]: "nofail (Inf (f ` C)) \<longleftrightarrow> (\<exists>x\<in>C. nofail (f x))"
using pw_Inf_nofail [of "f ` C"] by simp
lemma pw_Inf_inres[refine_pw_simps]: "inres (Inf C) r \<longleftrightarrow> (\<forall>M\<in>C. inres M r)"
apply (unfold Inf_nres_def)
apply auto
apply (case_tac M)
apply force
apply force
apply (case_tac M)
apply force
apply force
done
lemma pw_INF_inres [refine_pw_simps]: "inres (Inf (f ` C)) r \<longleftrightarrow> (\<forall>M\<in>C. inres (f M) r)"
using pw_Inf_inres [of "f ` C"] by simp
lemma nofail_RES_conv: "nofail m \<longleftrightarrow> (\<exists>M. m=RES M)" by (cases m) auto
primrec the_RES where "the_RES (RES X) = X"
lemma the_RES_inv[simp]: "nofail m \<Longrightarrow> RES (the_RES m) = m"
by (cases m) auto
definition [refine_pw_simps]: "nf_inres m x \<equiv> nofail m \<and> inres m x"
lemma nf_inres_RES[simp]: "nf_inres (RES X) x \<longleftrightarrow> x\<in>X"
by (simp add: refine_pw_simps)
lemma nf_inres_SPEC[simp]: "nf_inres (SPEC \<Phi>) x \<longleftrightarrow> \<Phi> x"
by (simp add: refine_pw_simps)
lemma nofail_antimono_fun: "f \<le> g \<Longrightarrow> (nofail (g x) \<longrightarrow> nofail (f x))"
by (auto simp: pw_le_iff dest: le_funD)
subsubsection \<open>Monad Operators\<close>
definition bind where "bind M f \<equiv> case M of
FAILi \<Rightarrow> FAIL |
RES X \<Rightarrow> Sup (f`X)"
lemma bind_FAIL[simp]: "bind FAIL f = FAIL"
unfolding bind_def by (auto split: nres.split)
lemma bind_SUCCEED[simp]: "bind SUCCEED f = SUCCEED"
unfolding bind_def by (auto split: nres.split)
lemma bind_RES: "bind (RES X) f = Sup (f`X)" unfolding bind_def
by (auto)
adhoc_overloading
Monad_Syntax.bind Refine_Basic.bind
lemma pw_bind_nofail[refine_pw_simps]:
"nofail (bind M f) \<longleftrightarrow> (nofail M \<and> (\<forall>x. inres M x \<longrightarrow> nofail (f x)))"
apply (cases M)
by (auto simp: bind_RES refine_pw_simps)
lemma pw_bind_inres[refine_pw_simps]:
"inres (bind M f) = (\<lambda>x. nofail M \<longrightarrow> (\<exists>y. (inres M y \<and> inres (f y) x)))"
apply (rule ext)
apply (cases M)
apply (auto simp add: bind_RES refine_pw_simps)
done
lemma pw_bind_le_iff:
"bind M f \<le> S \<longleftrightarrow> (nofail S \<longrightarrow> nofail M) \<and>
(\<forall>x. nofail M \<and> inres M x \<longrightarrow> f x \<le> S)"
by (auto simp: pw_le_iff refine_pw_simps)
lemma pw_bind_leI: "\<lbrakk>
nofail S \<Longrightarrow> nofail M; \<And>x. \<lbrakk>nofail M; inres M x\<rbrakk> \<Longrightarrow> f x \<le> S\<rbrakk>
\<Longrightarrow> bind M f \<le> S"
by (simp add: pw_bind_le_iff)
text \<open>\paragraph{Monad Laws}\<close>
text \<open>\paragraph{Congruence rule for bind}\<close>
lemma bind_cong:
assumes "m=m'"
assumes "\<And>x. RETURN x \<le> m' \<Longrightarrow> f x = f' x"
shows "bind m f = bind m' f'"
using assms
by (auto simp: refine_pw_simps pw_eq_iff pw_le_iff)
text \<open>\paragraph{Monotonicity and Related Properties}\<close>
lemma bind_mono[refine_mono]:
"\<lbrakk> M \<le> M'; \<And>x. RETURN x \<le> M \<Longrightarrow> f x \<le> f' x \<rbrakk> \<Longrightarrow> bind M f \<le> bind M' f'"
(*"\<lbrakk> flat_le M M'; \<And>x. flat_le (f x) (f' x) \<rbrakk> \<Longrightarrow> flat_le (bind M f) (bind M' f')"*)
"\<lbrakk> flat_ge M M'; \<And>x. flat_ge (f x) (f' x) \<rbrakk> \<Longrightarrow> flat_ge (bind M f) (bind M' f')"
apply (auto simp: refine_pw_simps pw_ords_iff) []
apply (auto simp: refine_pw_simps pw_ords_iff) []
done
lemma bind_mono1[simp, intro!]: "mono (\<lambda>M. bind M f)"
apply (rule monoI)
apply (rule bind_mono)
by auto
lemma bind_mono1'[simp, intro!]: "mono bind"
apply (rule monoI)
apply (rule le_funI)
apply (rule bind_mono)
by auto
lemma bind_mono2'[simp, intro!]: "mono (bind M)"
apply (rule monoI)
apply (rule bind_mono)
by (auto dest: le_funD)
lemma bind_distrib_sup1: "bind (sup M N) f = sup (bind M f) (bind N f)"
by (auto simp add: pw_eq_iff refine_pw_simps)
lemma bind_distrib_sup2: "bind m (\<lambda>x. sup (f x) (g x)) = sup (bind m f) (bind m g)"
by (auto simp: pw_eq_iff refine_pw_simps)
lemma bind_distrib_Sup1: "bind (Sup M) f = (SUP m\<in>M. bind m f)"
by (auto simp: pw_eq_iff refine_pw_simps)
lemma bind_distrib_Sup2: "F\<noteq>{} \<Longrightarrow> bind m (Sup F) = (SUP f\<in>F. bind m f)"
by (auto simp: pw_eq_iff refine_pw_simps)
lemma RES_Sup_RETURN: "Sup (RETURN`X) = RES X"
by (rule pw_eqI) (auto simp add: refine_pw_simps)
subsection \<open>VCG Setup\<close>
lemma SPEC_cons_rule:
assumes "m \<le> SPEC \<Phi>"
assumes "\<And>x. \<Phi> x \<Longrightarrow> \<Psi> x"
shows "m \<le> SPEC \<Psi>"
using assms by (auto simp: pw_le_iff)
lemmas SPEC_trans = order_trans[where z="SPEC Postcond" for Postcond, zero_var_indexes]
ML \<open>
structure Refine = struct
structure vcg = Named_Thms
( val name = @{binding refine_vcg}
val description = "Refinement Framework: " ^
"Verification condition generation rules (intro)" )
structure vcg_cons = Named_Thms
( val name = @{binding refine_vcg_cons}
val description = "Refinement Framework: " ^
"Consequence rules tried by VCG" )
structure refine0 = Named_Thms
( val name = @{binding refine0}
val description = "Refinement Framework: " ^
"Refinement rules applied first (intro)" )
structure refine = Named_Thms
( val name = @{binding refine}
val description = "Refinement Framework: Refinement rules (intro)" )
structure refine2 = Named_Thms
( val name = @{binding refine2}
val description = "Refinement Framework: " ^
"Refinement rules 2nd stage (intro)" )
(* If set to true, the product splitter of refine_rcg is disabled. *)
val no_prod_split =
Attrib.setup_config_bool @{binding refine_no_prod_split} (K false);
fun rcg_tac add_thms ctxt =
let
val cons_thms = vcg_cons.get ctxt
val ref_thms = (refine0.get ctxt
@ add_thms @ refine.get ctxt @ refine2.get ctxt);
val prod_ss = (Splitter.add_split @{thm prod.split}
(put_simpset HOL_basic_ss ctxt));
val prod_simp_tac =
if Config.get ctxt no_prod_split then
K no_tac
else
(simp_tac prod_ss THEN'
REPEAT_ALL_NEW (resolve_tac ctxt @{thms impI allI}));
in
REPEAT_ALL_NEW_FWD (DETERM o FIRST' [
resolve_tac ctxt ref_thms,
resolve_tac ctxt cons_thms THEN' resolve_tac ctxt ref_thms,
prod_simp_tac
])
end;
fun post_tac ctxt = REPEAT_ALL_NEW_FWD (FIRST' [
eq_assume_tac,
(*match_tac ctxt thms,*)
SOLVED' (Tagged_Solver.solve_tac ctxt)])
end;
\<close>
setup \<open>Refine.vcg.setup\<close>
setup \<open>Refine.vcg_cons.setup\<close>
setup \<open>Refine.refine0.setup\<close>
setup \<open>Refine.refine.setup\<close>
setup \<open>Refine.refine2.setup\<close>
(*setup {* Refine.refine_post.setup *}*)
method_setup refine_rcg =
\<open>Attrib.thms >> (fn add_thms => fn ctxt => SIMPLE_METHOD' (
Refine.rcg_tac add_thms ctxt THEN_ALL_NEW_FWD (TRY o Refine.post_tac ctxt)
))\<close>
"Refinement framework: Generate refinement conditions"
method_setup refine_vcg =
\<open>Attrib.thms >> (fn add_thms => fn ctxt => SIMPLE_METHOD' (
Refine.rcg_tac (add_thms @ Refine.vcg.get ctxt) ctxt THEN_ALL_NEW_FWD (TRY o Refine.post_tac ctxt)
))\<close>
"Refinement framework: Generate refinement and verification conditions"
(* Use tagged-solver instead!
method_setup refine_post =
{* Scan.succeed (fn ctxt => SIMPLE_METHOD' (
Refine.post_tac ctxt
)) *}
"Refinement framework: Postprocessing of refinement goals"
*)
declare SPEC_cons_rule[refine_vcg_cons]
subsection \<open>Data Refinement\<close>
text \<open>
In this section we establish a notion of pointwise data refinement, by
lifting a relation \<open>R\<close> between concrete and abstract values to
our result lattice.
Given a relation \<open>R\<close>, we define a {\em concretization function}
\<open>\<Down>R\<close> that takes an abstract result, and returns a concrete result.
The concrete result contains all values that are mapped by \<open>R\<close> to
a value in the abstract result.
Note that our concretization function forms no Galois connection, i.e.,
in general there is no \<open>\<alpha>\<close> such that
\<open>m \<le>\<Down> R m'\<close> is equivalent to \<open>\<alpha> m \<le> m'\<close>.
However, we get a Galois connection for the special case of
single-valued relations.
Regarding data refinement as Galois connections is inspired by \cite{mmo97},
that also uses the adjuncts of
a Galois connection to express data refinement by program refinement.
\<close>
definition conc_fun ("\<Down>") where
"conc_fun R m \<equiv> case m of FAILi \<Rightarrow> FAIL | RES X \<Rightarrow> RES (R\<inverse>``X)"
definition abs_fun ("\<Up>") where
"abs_fun R m \<equiv> case m of FAILi \<Rightarrow> FAIL
| RES X \<Rightarrow> if X\<subseteq>Domain R then RES (R``X) else FAIL"
lemma
conc_fun_FAIL[simp]: "\<Down>R FAIL = FAIL" and
conc_fun_RES: "\<Down>R (RES X) = RES (R\<inverse>``X)"
unfolding conc_fun_def by (auto split: nres.split)
lemma abs_fun_simps[simp]:
"\<Up>R FAIL = FAIL"
"X\<subseteq>Domain R \<Longrightarrow> \<Up>R (RES X) = RES (R``X)"
"\<not>(X\<subseteq>Domain R) \<Longrightarrow> \<Up>R (RES X) = FAIL"
unfolding abs_fun_def by (auto split: nres.split)
context fixes R assumes SV: "single_valued R" begin
lemma conc_abs_swap: "m' \<le> \<Down>R m \<longleftrightarrow> \<Up>R m' \<le> m"
unfolding conc_fun_def abs_fun_def using SV
by (auto split: nres.split)
(metis ImageE converseD single_valuedD subsetD)
lemma ac_galois: "galois_connection (\<Up>R) (\<Down>R)"
apply (unfold_locales)
by (rule conc_abs_swap)
end
lemma pw_abs_nofail[refine_pw_simps]:
"nofail (\<Up>R M) \<longleftrightarrow> (nofail M \<and> (\<forall>x. inres M x \<longrightarrow> x\<in>Domain R))"
apply (cases M)
apply simp
apply (auto simp: abs_fun_simps abs_fun_def)
done
lemma pw_abs_inres[refine_pw_simps]:
"inres (\<Up>R M) a \<longleftrightarrow> (nofail (\<Up>R M) \<longrightarrow> (\<exists>c. inres M c \<and> (c,a)\<in>R))"
apply (cases M)
apply simp
apply (auto simp: abs_fun_def)
done
lemma pw_conc_nofail[refine_pw_simps]:
"nofail (\<Down>R S) = nofail S"
by (cases S) (auto simp: conc_fun_RES)
lemma pw_conc_inres[refine_pw_simps]:
"inres (\<Down>R S') = (\<lambda>s. nofail S'
\<longrightarrow> (\<exists>s'. (s,s')\<in>R \<and> inres S' s'))"
apply (rule ext)
apply (cases S')
apply (auto simp: conc_fun_RES)
done
lemma abs_fun_strict[simp]:
"\<Up> R SUCCEED = SUCCEED"
unfolding abs_fun_def by (auto split: nres.split)
lemma conc_fun_strict[simp]:
"\<Down> R SUCCEED = SUCCEED"
unfolding conc_fun_def by (auto split: nres.split)
lemma conc_fun_mono[simp, intro!]: "mono (\<Down>R)"
by rule (auto simp: pw_le_iff refine_pw_simps)
lemma abs_fun_mono[simp, intro!]: "mono (\<Up>R)"
by rule (auto simp: pw_le_iff refine_pw_simps)
lemma conc_fun_R_mono:
assumes "R \<subseteq> R'"
shows "\<Down>R M \<le> \<Down>R' M"
using assms
by (auto simp: pw_le_iff refine_pw_simps)
lemma conc_fun_chain: "\<Down>R (\<Down>S M) = \<Down>(R O S) M"
unfolding conc_fun_def
by (auto split: nres.split)
lemma conc_Id[simp]: "\<Down>Id = id"
unfolding conc_fun_def [abs_def] by (auto split: nres.split)
lemma abs_Id[simp]: "\<Up>Id = id"
unfolding abs_fun_def [abs_def] by (auto split: nres.split)
lemma conc_fun_fail_iff[simp]:
"\<Down>R S = FAIL \<longleftrightarrow> S=FAIL"
"FAIL = \<Down>R S \<longleftrightarrow> S=FAIL"
by (auto simp add: pw_eq_iff refine_pw_simps)
lemma conc_trans[trans]:
assumes A: "C \<le> \<Down>R B" and B: "B \<le> \<Down>R' A"
shows "C \<le> \<Down>R (\<Down>R' A)"
using assms by (fastforce simp: pw_le_iff refine_pw_simps)
lemma abs_trans[trans]:
assumes A: "\<Up>R C \<le> B" and B: "\<Up>R' B \<le> A"
shows "\<Up>R' (\<Up>R C) \<le> A"
using assms by (fastforce simp: pw_le_iff refine_pw_simps)
subsubsection \<open>Transitivity Reasoner Setup\<close>
text \<open>WARNING: The order of the single statements is important here!\<close>
lemma conc_trans_additional[trans]:
"\<And>A B C. A\<le>\<Down>R B \<Longrightarrow> B\<le> C \<Longrightarrow> A\<le>\<Down>R C"
"\<And>A B C. A\<le>\<Down>Id B \<Longrightarrow> B\<le>\<Down>R C \<Longrightarrow> A\<le>\<Down>R C"
"\<And>A B C. A\<le>\<Down>R B \<Longrightarrow> B\<le>\<Down>Id C \<Longrightarrow> A\<le>\<Down>R C"
"\<And>A B C. A\<le>\<Down>Id B \<Longrightarrow> B\<le>\<Down>Id C \<Longrightarrow> A\<le> C"
"\<And>A B C. A\<le>\<Down>Id B \<Longrightarrow> B\<le> C \<Longrightarrow> A\<le> C"
"\<And>A B C. A\<le> B \<Longrightarrow> B\<le>\<Down>Id C \<Longrightarrow> A\<le> C"
using conc_trans[where R=R and R'=Id]
by (auto intro: order_trans)
text \<open>WARNING: The order of the single statements is important here!\<close>
lemma abs_trans_additional[trans]:
"\<And>A B C. \<lbrakk> A \<le> B; \<Up> R B \<le> C\<rbrakk> \<Longrightarrow> \<Up> R A \<le> C"
"\<And>A B C. \<lbrakk>\<Up> Id A \<le> B; \<Up> R B \<le> C\<rbrakk> \<Longrightarrow> \<Up> R A \<le> C"
"\<And>A B C. \<lbrakk>\<Up> R A \<le> B; \<Up> Id B \<le> C\<rbrakk> \<Longrightarrow> \<Up> R A \<le> C"
"\<And>A B C. \<lbrakk>\<Up> Id A \<le> B; \<Up> Id B \<le> C\<rbrakk> \<Longrightarrow> A \<le> C"
"\<And>A B C. \<lbrakk>\<Up> Id A \<le> B; B \<le> C\<rbrakk> \<Longrightarrow> A \<le> C"
"\<And>A B C. \<lbrakk>A \<le> B; \<Up> Id B \<le> C\<rbrakk> \<Longrightarrow> A \<le> C"
apply (auto simp: refine_pw_simps pw_le_iff)
apply fastforce+
done
subsection \<open>Derived Program Constructs\<close>
text \<open>
In this section, we introduce some programming constructs that are derived
from the basic monad and ordering operations of our nondeterminism monad.
\<close>
subsubsection \<open>ASSUME and ASSERT\<close>
definition ASSERT where "ASSERT \<equiv> iASSERT RETURN"
definition ASSUME where "ASSUME \<equiv> iASSUME RETURN"
interpretation assert?: generic_Assert bind RETURN ASSERT ASSUME
apply unfold_locales
by (simp_all add: ASSERT_def ASSUME_def)
text \<open>Order matters! \<close>
lemmas [refine_vcg] = ASSERT_leI
lemmas [refine_vcg] = le_ASSUMEI
lemmas [refine_vcg] = le_ASSERTI
lemmas [refine_vcg] = ASSUME_leI
lemma pw_ASSERT[refine_pw_simps]:
"nofail (ASSERT \<Phi>) \<longleftrightarrow> \<Phi>"
"inres (ASSERT \<Phi>) x"
by (cases \<Phi>, simp_all)+
lemma pw_ASSUME[refine_pw_simps]:
"nofail (ASSUME \<Phi>)"
"inres (ASSUME \<Phi>) x \<longleftrightarrow> \<Phi>"
by (cases \<Phi>, simp_all)+
subsubsection \<open>Recursion\<close>
lemma pw_REC_nofail:
shows "nofail (REC B x) \<longleftrightarrow> trimono B \<and>
(\<exists>F. (\<forall>x.
nofail (F x) \<longrightarrow> nofail (B F x)
\<and> (\<forall>x'. inres (B F x) x' \<longrightarrow> inres (F x) x')
) \<and> nofail (F x))"
proof -
have "nofail (REC B x) \<longleftrightarrow> trimono B \<and>
(\<exists>F. (\<forall>x. B F x \<le> F x) \<and> nofail (F x))"
unfolding REC_def lfp_def
apply (auto simp: refine_pw_simps intro: le_funI dest: le_funD)
done
thus ?thesis
unfolding pw_le_iff .
qed
lemma pw_REC_inres:
"inres (REC B x) x' = (trimono B \<longrightarrow>
(\<forall>F. (\<forall>x''.
nofail (F x'') \<longrightarrow> nofail (B F x'')
\<and> (\<forall>x. inres (B F x'') x \<longrightarrow> inres (F x'') x))
\<longrightarrow> inres (F x) x'))"
proof -
have "inres (REC B x) x'
\<longleftrightarrow> (trimono B \<longrightarrow> (\<forall>F. (\<forall>x''. B F x'' \<le> F x'') \<longrightarrow> inres (F x) x'))"
unfolding REC_def lfp_def
by (auto simp: refine_pw_simps intro: le_funI dest: le_funD)
thus ?thesis unfolding pw_le_iff .
qed
lemmas pw_REC = pw_REC_inres pw_REC_nofail
lemma pw_RECT_nofail:
shows "nofail (RECT B x) \<longleftrightarrow> trimono B \<and>
(\<forall>F. (\<forall>y. nofail (B F y) \<longrightarrow>
nofail (F y) \<and> (\<forall>x. inres (F y) x \<longrightarrow> inres (B F y) x)) \<longrightarrow>
nofail (F x))"
proof -
have "nofail (RECT B x) \<longleftrightarrow> (trimono B \<and> (\<forall>F. (\<forall>y. F y \<le> B F y) \<longrightarrow> nofail (F x)))"
unfolding RECT_gfp_def gfp_def
by (auto simp: refine_pw_simps intro: le_funI dest: le_funD)
thus ?thesis
unfolding pw_le_iff .
qed
lemma pw_RECT_inres:
shows "inres (RECT B x) x' = (trimono B \<longrightarrow>
(\<exists>M. (\<forall>y. nofail (B M y) \<longrightarrow>
nofail (M y) \<and> (\<forall>x. inres (M y) x \<longrightarrow> inres (B M y) x)) \<and>
inres (M x) x'))"
proof -
have "inres (RECT B x) x' \<longleftrightarrow> trimono B \<longrightarrow> (\<exists>M. (\<forall>y. M y \<le> B M y) \<and> inres (M x) x')"
unfolding RECT_gfp_def gfp_def
by (auto simp: refine_pw_simps intro: le_funI dest: le_funD)
thus ?thesis unfolding pw_le_iff .
qed
lemmas pw_RECT = pw_RECT_inres pw_RECT_nofail
subsection \<open>Proof Rules\<close>
subsubsection \<open>Proving Correctness\<close>
text \<open>
In this section, we establish Hoare-like rules to prove that a program
meets its specification.
\<close>
lemma le_SPEC_UNIV_rule [refine_vcg]:
"m \<le> SPEC (\<lambda>_. True) \<Longrightarrow> m \<le> RES UNIV" by auto
lemma RETURN_rule[refine_vcg]: "\<Phi> x \<Longrightarrow> RETURN x \<le> SPEC \<Phi>"
by (auto simp: RETURN_def)
lemma RES_rule[refine_vcg]: "\<lbrakk>\<And>x. x\<in>S \<Longrightarrow> \<Phi> x\<rbrakk> \<Longrightarrow> RES S \<le> SPEC \<Phi>"
by auto
lemma SUCCEED_rule[refine_vcg]: "SUCCEED \<le> SPEC \<Phi>" by auto
lemma FAIL_rule: "False \<Longrightarrow> FAIL \<le> SPEC \<Phi>" by auto
lemma SPEC_rule[refine_vcg]: "\<lbrakk>\<And>x. \<Phi> x \<Longrightarrow> \<Phi>' x\<rbrakk> \<Longrightarrow> SPEC \<Phi> \<le> SPEC \<Phi>'" by auto
lemma RETURN_to_SPEC_rule[refine_vcg]: "m\<le>SPEC ((=) v) \<Longrightarrow> m\<le>RETURN v"
by (simp add: pw_le_iff refine_pw_simps)
lemma Sup_img_rule_complete:
"(\<forall>x. x\<in>S \<longrightarrow> f x \<le> SPEC \<Phi>) \<longleftrightarrow> Sup (f`S) \<le> SPEC \<Phi>"
apply rule
apply (rule pw_leI)
apply (auto simp: pw_le_iff refine_pw_simps) []
apply (intro allI impI)
apply (rule pw_leI)
apply (auto simp: pw_le_iff refine_pw_simps) []
done
lemma SUP_img_rule_complete:
"(\<forall>x. x\<in>S \<longrightarrow> f x \<le> SPEC \<Phi>) \<longleftrightarrow> Sup (f ` S) \<le> SPEC \<Phi>"
using Sup_img_rule_complete [of S f] by simp
lemma Sup_img_rule[refine_vcg]:
"\<lbrakk> \<And>x. x\<in>S \<Longrightarrow> f x \<le> SPEC \<Phi> \<rbrakk> \<Longrightarrow> Sup(f`S) \<le> SPEC \<Phi>"
by (auto simp: SUP_img_rule_complete[symmetric])
text \<open>This lemma is just to demonstrate that our rule is complete.\<close>
lemma bind_rule_complete: "bind M f \<le> SPEC \<Phi> \<longleftrightarrow> M \<le> SPEC (\<lambda>x. f x \<le> SPEC \<Phi>)"
by (auto simp: pw_le_iff refine_pw_simps)
lemma bind_rule[refine_vcg]:
"\<lbrakk> M \<le> SPEC (\<lambda>x. f x \<le> SPEC \<Phi>) \<rbrakk> \<Longrightarrow> bind M (\<lambda>x. f x) \<le> SPEC \<Phi>"
\<comment> \<open>Note: @{text "\<eta>"}-expanded version helps Isabelle's unification to keep meaningful
variable names from the program\<close>
by (auto simp: bind_rule_complete)
lemma ASSUME_rule[refine_vcg]: "\<lbrakk>\<Phi> \<Longrightarrow> \<Psi> ()\<rbrakk> \<Longrightarrow> ASSUME \<Phi> \<le> SPEC \<Psi>"
by (cases \<Phi>) auto
lemma ASSERT_rule[refine_vcg]: "\<lbrakk>\<Phi>; \<Phi> \<Longrightarrow> \<Psi> ()\<rbrakk> \<Longrightarrow> ASSERT \<Phi> \<le> SPEC \<Psi>" by auto
lemma prod_rule[refine_vcg]:
"\<lbrakk>\<And>a b. p=(a,b) \<Longrightarrow> S a b \<le> SPEC \<Phi>\<rbrakk> \<Longrightarrow> case_prod S p \<le> SPEC \<Phi>"
by (auto split: prod.split)
(* TODO: Add a simplifier setup that normalizes nested case-expressions to
the vcg! *)
lemma prod2_rule[refine_vcg]:
assumes "\<And>a b c d. \<lbrakk>ab=(a,b); cd=(c,d)\<rbrakk> \<Longrightarrow> f a b c d \<le> SPEC \<Phi>"
shows "(\<lambda>(a,b) (c,d). f a b c d) ab cd \<le> SPEC \<Phi>"
using assms
by (auto split: prod.split)
lemma if_rule[refine_vcg]:
"\<lbrakk> b \<Longrightarrow> S1 \<le> SPEC \<Phi>; \<not>b \<Longrightarrow> S2 \<le> SPEC \<Phi>\<rbrakk>
\<Longrightarrow> (if b then S1 else S2) \<le> SPEC \<Phi>"
by (auto)
lemma option_rule[refine_vcg]:
"\<lbrakk> v=None \<Longrightarrow> S1 \<le> SPEC \<Phi>; \<And>x. v=Some x \<Longrightarrow> f2 x \<le> SPEC \<Phi>\<rbrakk>
\<Longrightarrow> case_option S1 f2 v \<le> SPEC \<Phi>"
by (auto split: option.split)
lemma Let_rule[refine_vcg]:
"f x \<le> SPEC \<Phi> \<Longrightarrow> Let x f \<le> SPEC \<Phi>" by auto
lemma Let_rule':
assumes "\<And>x. x=v \<Longrightarrow> f x \<le> SPEC \<Phi>"
shows "Let v (\<lambda>x. f x) \<le> SPEC \<Phi>"
using assms by simp
(* Obsolete, use RECT_eq_REC_tproof instead
text {* The following lemma shows that greatest and least fixed point are equal,
if we can provide a variant. *}
thm RECT_eq_REC
lemma RECT_eq_REC_old:
assumes WF: "wf V"
assumes I0: "I x"
assumes IS: "\<And>f x. I x \<Longrightarrow>
body (\<lambda>x'. do { ASSERT (I x' \<and> (x',x)\<in>V); f x'}) x \<le> body f x"
shows "REC\<^sub>T body x = REC body x"
apply (rule RECT_eq_REC)
apply (rule WF)
apply (rule I0)
apply (rule order_trans[OF _ IS])
apply (subgoal_tac "(\<lambda>x'. if I x' \<and> (x', x) \<in> V then f x' else FAIL) =
(\<lambda>x'. ASSERT (I x' \<and> (x', x) \<in> V) \<bind> (\<lambda>_. f x'))")
apply simp
apply (rule ext)
apply (rule pw_eqI)
apply (auto simp add: refine_pw_simps)
done
*)
(* TODO: Also require RECT_le_rule. Derive RECT_invisible_refine from that. *)
lemma REC_le_rule:
assumes M: "trimono body"
assumes I0: "(x,x')\<in>R"
assumes IS: "\<And>f x x'. \<lbrakk> \<And>x x'. (x,x')\<in>R \<Longrightarrow> f x \<le> M x'; (x,x')\<in>R \<rbrakk>
\<Longrightarrow> body f x \<le> M x'"
shows "REC body x \<le> M x'"
by (rule REC_rule_arb[OF M, where pre="\<lambda>x' x. (x,x')\<in>R", OF I0 IS])
(* TODO: Invariant annotations and vcg-rule
Possibility 1: Semantically alter the program, such that it fails if the
invariant does not hold
Possibility 2: Only syntactically annotate the invariant, as hint for the VCG.
*)
subsubsection \<open>Proving Monotonicity\<close>
lemma nr_mono_bind:
assumes MA: "mono A" and MB: "\<And>s. mono (B s)"
shows "mono (\<lambda>F s. bind (A F s) (\<lambda>s'. B s F s'))"
apply (rule monoI)
apply (rule le_funI)
apply (rule bind_mono)
apply (auto dest: monoD[OF MA, THEN le_funD]) []
apply (auto dest: monoD[OF MB, THEN le_funD]) []
done
lemma nr_mono_bind': "mono (\<lambda>F s. bind (f s) F)"
apply rule
apply (rule le_funI)
apply (rule bind_mono)
apply (auto dest: le_funD)
done
lemmas nr_mono = nr_mono_bind nr_mono_bind' mono_const mono_if mono_id
subsubsection \<open>Proving Refinement\<close>
text \<open>In this subsection, we establish rules to prove refinement between
structurally similar programs. All rules are formulated including a possible
data refinement via a refinement relation. If this is not required, the
refinement relation can be chosen to be the identity relation.
\<close>
text \<open>If we have two identical programs, this rule solves the refinement goal
immediately, using the identity refinement relation.\<close>
lemma Id_refine[refine0]: "S \<le> \<Down>Id S" by auto
lemma RES_refine:
"\<lbrakk> \<And>s. s\<in>S \<Longrightarrow> \<exists>s'\<in>S'. (s,s')\<in>R\<rbrakk> \<Longrightarrow> RES S \<le> \<Down>R (RES S')"
by (auto simp: conc_fun_RES)
lemma SPEC_refine:
assumes "S \<le> SPEC (\<lambda>x. \<exists>x'. (x,x')\<in>R \<and> \<Phi> x')"
shows "S \<le> \<Down>R (SPEC \<Phi>)"
using assms
by (force simp: pw_le_iff refine_pw_simps)
(* TODO/FIXME: This is already part of a type-based heuristics! *)
lemma Id_SPEC_refine[refine]:
"S \<le> SPEC \<Phi> \<Longrightarrow> S \<le> \<Down>Id (SPEC \<Phi>)" by simp
lemma RETURN_SPEC_refine:
assumes "\<exists>x'. (x,x')\<in>R \<and> \<Phi> x'"
shows "RETURN x \<le> \<Down>R (SPEC \<Phi>)"
using assms
by (auto simp: pw_le_iff refine_pw_simps)
lemma FAIL_refine[refine]: "X \<le> \<Down>R FAIL" by auto
lemma SUCCEED_refine[refine]: "SUCCEED \<le> \<Down>R X'" by auto
lemma sup_refine[refine]:
assumes "ai \<le>\<Down>R a"
assumes "bi \<le>\<Down>R b"
shows "sup ai bi \<le>\<Down>R (sup a b)"
using assms by (auto simp: pw_le_iff refine_pw_simps)
text \<open>The next two rules are incomplete, but a good approximation for refining
structurally similar programs.\<close>
lemma bind_refine':
fixes R' :: "('a\<times>'b) set" and R::"('c\<times>'d) set"
assumes R1: "M \<le> \<Down> R' M'"
assumes R2: "\<And>x x'. \<lbrakk> (x,x')\<in>R'; inres M x; inres M' x';
nofail M; nofail M'
\<rbrakk> \<Longrightarrow> f x \<le> \<Down> R (f' x')"
shows "bind M (\<lambda>x. f x) \<le> \<Down> R (bind M' (\<lambda>x'. f' x'))"
using assms
apply (simp add: pw_le_iff refine_pw_simps)
apply fast
done
lemma bind_refine[refine]:
fixes R' :: "('a\<times>'b) set" and R::"('c\<times>'d) set"
assumes R1: "M \<le> \<Down> R' M'"
assumes R2: "\<And>x x'. \<lbrakk> (x,x')\<in>R' \<rbrakk>
\<Longrightarrow> f x \<le> \<Down> R (f' x')"
shows "bind M (\<lambda>x. f x) \<le> \<Down> R (bind M' (\<lambda>x'. f' x'))"
apply (rule bind_refine') using assms by auto
lemma bind_refine_abs': (* Only keep nf_inres-information for abstract *)
fixes R' :: "('a\<times>'b) set" and R::"('c\<times>'d) set"
assumes R1: "M \<le> \<Down> R' M'"
assumes R2: "\<And>x x'. \<lbrakk> (x,x')\<in>R'; nf_inres M' x'
\<rbrakk> \<Longrightarrow> f x \<le> \<Down> R (f' x')"
shows "bind M (\<lambda>x. f x) \<le> \<Down> R (bind M' (\<lambda>x'. f' x'))"
using assms
apply (simp add: pw_le_iff refine_pw_simps)
apply blast
done
text \<open>Special cases for refinement of binding to \<open>RES\<close>
statements\<close>
lemma bind_refine_RES:
"\<lbrakk>RES X \<le> \<Down> R' M';
\<And>x x'. \<lbrakk>(x, x') \<in> R'; x \<in> X \<rbrakk> \<Longrightarrow> f x \<le> \<Down> R (f' x')\<rbrakk>
\<Longrightarrow> RES X \<bind> (\<lambda>x. f x) \<le> \<Down> R (M' \<bind> (\<lambda>x'. f' x'))"
"\<lbrakk>M \<le> \<Down> R' (RES X');
\<And>x x'. \<lbrakk>(x, x') \<in> R'; x' \<in> X' \<rbrakk> \<Longrightarrow> f x \<le> \<Down> R (f' x')\<rbrakk>
\<Longrightarrow> M \<bind> (\<lambda>x. f x) \<le> \<Down> R (RES X' \<bind> (\<lambda>x'. f' x'))"
"\<lbrakk>RES X \<le> \<Down> R' (RES X');
\<And>x x'. \<lbrakk>(x, x') \<in> R'; x \<in> X; x' \<in> X'\<rbrakk> \<Longrightarrow> f x \<le> \<Down> R (f' x')\<rbrakk>
\<Longrightarrow> RES X \<bind> (\<lambda>x. f x) \<le> \<Down> R (RES X' \<bind> (\<lambda>x'. f' x'))"
by (auto intro!: bind_refine')
declare bind_refine_RES(1,2)[refine]
declare bind_refine_RES(3)[refine]
lemma ASSERT_refine[refine]:
"\<lbrakk> \<Phi>'\<Longrightarrow>\<Phi> \<rbrakk> \<Longrightarrow> ASSERT \<Phi> \<le> \<Down>Id (ASSERT \<Phi>')"
by (cases \<Phi>') auto
lemma ASSUME_refine[refine]:
"\<lbrakk> \<Phi> \<Longrightarrow> \<Phi>' \<rbrakk> \<Longrightarrow> ASSUME \<Phi> \<le> \<Down>Id (ASSUME \<Phi>')"
by (cases \<Phi>) auto
text \<open>
Assertions and assumptions are treated specially in bindings
\<close>
lemma ASSERT_refine_right:
assumes "\<Phi> \<Longrightarrow> S \<le>\<Down>R S'"
shows "S \<le>\<Down>R (do {ASSERT \<Phi>; S'})"
using assms by (cases \<Phi>) auto
lemma ASSERT_refine_right_pres:
assumes "\<Phi> \<Longrightarrow> S \<le>\<Down>R (do {ASSERT \<Phi>; S'})"
shows "S \<le>\<Down>R (do {ASSERT \<Phi>; S'})"
using assms by (cases \<Phi>) auto
lemma ASSERT_refine_left:
assumes "\<Phi>"
assumes "\<Phi> \<Longrightarrow> S \<le> \<Down>R S'"
shows "do{ASSERT \<Phi>; S} \<le> \<Down>R S'"
using assms by (cases \<Phi>) auto
lemma ASSUME_refine_right:
assumes "\<Phi>"
assumes "\<Phi> \<Longrightarrow> S \<le>\<Down>R S'"
shows "S \<le>\<Down>R (do {ASSUME \<Phi>; S'})"
using assms by (cases \<Phi>) auto
lemma ASSUME_refine_left:
assumes "\<Phi> \<Longrightarrow> S \<le> \<Down>R S'"
shows "do {ASSUME \<Phi>; S} \<le> \<Down>R S'"
using assms by (cases \<Phi>) auto
lemma ASSUME_refine_left_pres:
assumes "\<Phi> \<Longrightarrow> do {ASSUME \<Phi>; S} \<le> \<Down>R S'"
shows "do {ASSUME \<Phi>; S} \<le> \<Down>R S'"
using assms by (cases \<Phi>) auto
text \<open>Warning: The order of \<open>[refine]\<close>-declarations is
important here, as preconditions should be generated before
additional proof obligations.\<close>
lemmas [refine0] = ASSUME_refine_right
lemmas [refine0] = ASSERT_refine_left
lemmas [refine0] = ASSUME_refine_left
lemmas [refine0] = ASSERT_refine_right
text \<open>For backward compatibility, as \<open>intro refine\<close> still
seems to be used instead of \<open>refine_rcg\<close>.\<close>
lemmas [refine] = ASSUME_refine_right
lemmas [refine] = ASSERT_refine_left
lemmas [refine] = ASSUME_refine_left
lemmas [refine] = ASSERT_refine_right
definition lift_assn :: "('a \<times> 'b) set \<Rightarrow> ('b \<Rightarrow> bool) \<Rightarrow> ('a \<Rightarrow> bool)"
\<comment> \<open>Lift assertion over refinement relation\<close>
where "lift_assn R \<Phi> s \<equiv> \<exists>s'. (s,s')\<in>R \<and> \<Phi> s'"
lemma lift_assnI: "\<lbrakk>(s,s')\<in>R; \<Phi> s'\<rbrakk> \<Longrightarrow> lift_assn R \<Phi> s"
unfolding lift_assn_def by auto
lemma REC_refine[refine]:
assumes M: "trimono body"
assumes R0: "(x,x')\<in>R"
assumes RS: "\<And>f f' x x'. \<lbrakk> \<And>x x'. (x,x')\<in>R \<Longrightarrow> f x \<le>\<Down>S (f' x'); (x,x')\<in>R;
REC body' = f' \<rbrakk>
\<Longrightarrow> body f x \<le>\<Down>S (body' f' x')"
shows "REC (\<lambda>f x. body f x) x \<le>\<Down>S (REC (\<lambda>f' x'. body' f' x') x')"
unfolding REC_def
apply (clarsimp simp add: M)
apply (rule lfp_induct_pointwise[where pre="\<lambda>x' x. (x,x')\<in>R" and B=body])
apply rule
apply clarsimp
apply (blast intro: SUP_least)
apply simp
apply (simp add: trimonoD[OF M])
apply (rule R0)
apply (subst lfp_unfold, simp add: trimonoD)
apply (rule RS)
apply blast
apply blast
apply (simp add: REC_def[abs_def])
done
lemma RECT_refine[refine]:
assumes M: "trimono body"
assumes R0: "(x,x')\<in>R"
assumes RS: "\<And>f f' x x'. \<lbrakk> \<And>x x'. (x,x')\<in>R \<Longrightarrow> f x \<le>\<Down>S (f' x'); (x,x')\<in>R \<rbrakk>
\<Longrightarrow> body f x \<le>\<Down>S (body' f' x')"
shows "RECT (\<lambda>f x. body f x) x \<le>\<Down>S (RECT (\<lambda>f' x'. body' f' x') x')"
unfolding RECT_def
apply (clarsimp simp add: M)
apply (rule flatf_fixp_transfer[where
fp'="flatf_gfp body"
and B'=body
and P="\<lambda>x x'. (x',x)\<in>R",
OF _ _ flatf_ord.fixp_unfold[OF M[THEN trimonoD_flatf_ge]] R0])
apply simp
apply (simp add: trimonoD)
by (rule RS)
lemma if_refine[refine]:
assumes "b \<longleftrightarrow> b'"
assumes "\<lbrakk>b;b'\<rbrakk> \<Longrightarrow> S1 \<le> \<Down>R S1'"
assumes "\<lbrakk>\<not>b;\<not>b'\<rbrakk> \<Longrightarrow> S2 \<le> \<Down>R S2'"
shows "(if b then S1 else S2) \<le> \<Down>R (if b' then S1' else S2')"
using assms by auto
lemma Let_unfold_refine[refine]:
assumes "f x \<le> \<Down>R (f' x')"
shows "Let x f \<le> \<Down>R (Let x' f')"
using assms by auto
text \<open>The next lemma is sometimes more convenient, as it prevents
large let-expressions from exploding by being completely unfolded.\<close>
lemma Let_refine:
assumes "(m,m')\<in>R'"
assumes "\<And>x x'. (x,x')\<in>R' \<Longrightarrow> f x \<le> \<Down>R (f' x')"
shows "Let m (\<lambda>x. f x) \<le>\<Down>R (Let m' (\<lambda>x'. f' x'))"
using assms by auto
lemma Let_refine':
assumes "(m,m')\<in>R"
assumes "(m,m')\<in>R \<Longrightarrow> f m \<le>\<Down>S (f' m')"
shows "Let m f \<le> \<Down>S (Let m' f')"
using assms by simp
lemma case_option_refine[refine]:
assumes "(v,v')\<in>\<langle>Ra\<rangle>option_rel"
assumes "\<lbrakk>v=None; v'=None\<rbrakk> \<Longrightarrow> n \<le> \<Down> Rb n'"
assumes "\<And>x x'. \<lbrakk> v=Some x; v'=Some x'; (x, x') \<in> Ra \<rbrakk>
\<Longrightarrow> f x \<le> \<Down> Rb (f' x')"
shows "case_option n f v \<le>\<Down>Rb (case_option n' f' v')"
using assms
by (auto split: option.split simp: option_rel_def)
lemma list_case_refine[refine]:
assumes "(li,l)\<in>\<langle>S\<rangle>list_rel"
assumes "fni \<le>\<Down>R fn"
assumes "\<And>xi x xsi xs. \<lbrakk> (xi,x)\<in>S; (xsi,xs)\<in>\<langle>S\<rangle>list_rel; li=xi#xsi; l=x#xs \<rbrakk> \<Longrightarrow> fci xi xsi \<le>\<Down>R (fc x xs)"
shows "(case li of [] \<Rightarrow> fni | xi#xsi \<Rightarrow> fci xi xsi) \<le> \<Down>R (case l of [] \<Rightarrow> fn | x#xs \<Rightarrow> fc x xs)"
using assms by (auto split: list.split)
text \<open>It is safe to split conjunctions in refinement goals.\<close>
declare conjI[refine]
text \<open>The following rules try to compensate for some structural changes,
like inlining lets or converting binds to lets.\<close>
lemma remove_Let_refine[refine2]:
assumes "M \<le> \<Down>R (f x)"
shows "M \<le> \<Down>R (Let x f)" using assms by auto
lemma intro_Let_refine[refine2]:
assumes "f x \<le> \<Down>R M'"
shows "Let x f \<le> \<Down>R M'" using assms by auto
lemma bind_Let_refine2[refine2]: "\<lbrakk>
m' \<le>\<Down>R' (RETURN x);
\<And>x'. \<lbrakk>inres m' x'; (x',x)\<in>R'\<rbrakk> \<Longrightarrow> f' x' \<le> \<Down>R (f x)
\<rbrakk> \<Longrightarrow> m'\<bind>(\<lambda>x'. f' x') \<le> \<Down>R (Let x (\<lambda>x. f x))"
apply (simp add: pw_le_iff refine_pw_simps)
apply blast
done
lemma bind2letRETURN_refine[refine2]:
assumes "RETURN x \<le> \<Down>R' M'"
assumes "\<And>x'. (x,x')\<in>R' \<Longrightarrow> RETURN (f x) \<le> \<Down>R (f' x')"
shows "RETURN (Let x f) \<le> \<Down>R (bind M' (\<lambda>x'. f' x'))"
using assms
apply (simp add: pw_le_iff refine_pw_simps)
apply fast
done
lemma RETURN_as_SPEC_refine[refine2]:
assumes "M \<le> SPEC (\<lambda>c. (c,a)\<in>R)"
shows "M \<le> \<Down>R (RETURN a)"
using assms
by (simp add: pw_le_iff refine_pw_simps)
lemma RETURN_as_SPEC_refine_old:
"\<And>M R. M \<le> \<Down>R (SPEC (\<lambda>x. x=v)) \<Longrightarrow> M \<le>\<Down>R (RETURN v)"
by (simp add: RETURN_def)
lemma if_RETURN_refine [refine2]:
assumes "b \<longleftrightarrow> b'"
assumes "\<lbrakk>b;b'\<rbrakk> \<Longrightarrow> RETURN S1 \<le> \<Down>R S1'"
assumes "\<lbrakk>\<not>b;\<not>b'\<rbrakk> \<Longrightarrow> RETURN S2 \<le> \<Down>R S2'"
shows "RETURN (if b then S1 else S2) \<le> \<Down>R (if b' then S1' else S2')"
(* this is nice to have for small functions, hence keep it in refine2 *)
using assms
by (simp add: pw_le_iff refine_pw_simps)
lemma RES_sng_as_SPEC_refine[refine2]:
assumes "M \<le> SPEC (\<lambda>c. (c,a)\<in>R)"
shows "M \<le> \<Down>R (RES {a})"
using assms
by (simp add: pw_le_iff refine_pw_simps)
lemma intro_spec_refine_iff:
"(bind (RES X) f \<le> \<Down>R M) \<longleftrightarrow> (\<forall>x\<in>X. f x \<le> \<Down>R M)"
apply (simp add: pw_le_iff refine_pw_simps)
apply blast
done
lemma intro_spec_refine[refine2]:
assumes "\<And>x. x\<in>X \<Longrightarrow> f x \<le> \<Down>R M"
shows "bind (RES X) (\<lambda>x. f x) \<le> \<Down>R M"
using assms
by (simp add: intro_spec_refine_iff)
text \<open>The following rules are intended for manual application, to reflect
some common structural changes, that, however, are not suited to be applied
automatically.\<close>
text \<open>Replacing a let by a deterministic computation\<close>
lemma let2bind_refine:
assumes "m \<le> \<Down>R' (RETURN m')"
assumes "\<And>x x'. (x,x')\<in>R' \<Longrightarrow> f x \<le> \<Down>R (f' x')"
shows "bind m (\<lambda>x. f x) \<le> \<Down>R (Let m' (\<lambda>x'. f' x'))"
using assms
apply (simp add: pw_le_iff refine_pw_simps)
apply blast
done
text \<open>Introduce a new binding, without a structural match in the abstract
program\<close>
lemma intro_bind_refine:
assumes "m \<le> \<Down>R' (RETURN m')"
assumes "\<And>x. (x,m')\<in>R' \<Longrightarrow> f x \<le> \<Down>R m''"
shows "bind m (\<lambda>x. f x) \<le> \<Down>R m''"
using assms
apply (simp add: pw_le_iff refine_pw_simps)
apply blast
done
lemma intro_bind_refine_id:
assumes "m \<le> (SPEC ((=) m'))"
assumes "f m' \<le> \<Down>R m''"
shows "bind m f \<le> \<Down>R m''"
using assms
apply (simp add: pw_le_iff refine_pw_simps)
apply blast
done
text \<open>The following set of rules executes a step on the LHS or RHS of
a refinement proof obligation, without changing the other side.
These kind of rules is useful for performing refinements with
invisible steps.\<close>
lemma lhs_step_If:
"\<lbrakk> b \<Longrightarrow> t \<le> m; \<not>b \<Longrightarrow> e \<le> m \<rbrakk> \<Longrightarrow> If b t e \<le> m" by simp
lemma lhs_step_SPEC:
"\<lbrakk> \<And>x. \<Phi> x \<Longrightarrow> RETURN x \<le> m \<rbrakk> \<Longrightarrow> SPEC (\<lambda>x. \<Phi> x) \<le> m"
by (simp add: pw_le_iff)
lemma lhs_step_bind:
fixes m :: "'a nres" and f :: "'a \<Rightarrow> 'b nres"
assumes "nofail m' \<Longrightarrow> nofail m"
assumes "\<And>x. nf_inres m x \<Longrightarrow> f x \<le> m'"
shows "do {x\<leftarrow>m; f x} \<le> m'"
using assms
by (simp add: pw_le_iff refine_pw_simps) blast
lemma rhs_step_bind:
assumes "m \<le> \<Down>R m'" "inres m x" "\<And>x'. (x,x')\<in>R \<Longrightarrow> lhs \<le>\<Down>S (f' x')"
shows "lhs \<le> \<Down>S (m' \<bind> f')"
using assms
by (simp add: pw_le_iff refine_pw_simps) blast
lemma rhs_step_bind_SPEC:
assumes "\<Phi> x'"
assumes "m \<le> \<Down>R (f' x')"
shows "m \<le> \<Down>R (SPEC \<Phi> \<bind> f')"
using assms by (simp add: pw_le_iff refine_pw_simps) blast
lemma RES_bind_choose:
assumes "x\<in>X"
assumes "m \<le> f x"
shows "m \<le> RES X \<bind> f"
using assms by (auto simp: pw_le_iff refine_pw_simps)
lemma pw_RES_bind_choose:
"nofail (RES X \<bind> f) \<longleftrightarrow> (\<forall>x\<in>X. nofail (f x))"
"inres (RES X \<bind> f) y \<longleftrightarrow> (\<exists>x\<in>X. inres (f x) y)"
by (auto simp: refine_pw_simps)
lemma prod_case_refine:
assumes "(p',p)\<in>R1\<times>\<^sub>rR2"
assumes "\<And>x1' x2' x1 x2. \<lbrakk> p'=(x1',x2'); p=(x1,x2); (x1',x1)\<in>R1; (x2',x2)\<in>R2\<rbrakk> \<Longrightarrow> f' x1' x2' \<le> \<Down>R (f x1 x2)"
shows "(case p' of (x1',x2') \<Rightarrow> f' x1' x2') \<le>\<Down>R (case p of (x1,x2) \<Rightarrow> f x1 x2)"
using assms by (auto split: prod.split)
subsection \<open>Relators\<close>
declare fun_relI[refine]
definition nres_rel where
nres_rel_def_internal: "nres_rel R \<equiv> {(c,a). c \<le> \<Down>R a}"
lemma nres_rel_def: "\<langle>R\<rangle>nres_rel \<equiv> {(c,a). c \<le> \<Down>R a}"
by (simp add: nres_rel_def_internal relAPP_def)
lemma nres_relD: "(c,a)\<in>\<langle>R\<rangle>nres_rel \<Longrightarrow> c \<le>\<Down>R a" by (simp add: nres_rel_def)
lemma nres_relI[refine]: "c \<le>\<Down>R a \<Longrightarrow> (c,a)\<in>\<langle>R\<rangle>nres_rel" by (simp add: nres_rel_def)
lemma nres_rel_comp: "\<langle>A\<rangle>nres_rel O \<langle>B\<rangle>nres_rel = \<langle>A O B\<rangle>nres_rel"
by (auto simp: nres_rel_def conc_fun_chain[symmetric] conc_trans)
lemma pw_nres_rel_iff: "(a,b)\<in>\<langle>A\<rangle>nres_rel \<longleftrightarrow> nofail (\<Down> A b) \<longrightarrow> nofail a \<and> (\<forall>x. inres a x \<longrightarrow> inres (\<Down> A b) x)"
by (simp add: pw_le_iff nres_rel_def)
lemma param_SUCCEED[param]: "(SUCCEED,SUCCEED) \<in> \<langle>R\<rangle>nres_rel"
by (auto simp: nres_rel_def)
lemma param_FAIL[param]: "(FAIL,FAIL) \<in> \<langle>R\<rangle>nres_rel"
by (auto simp: nres_rel_def)
lemma param_RES[param]:
"(RES,RES) \<in> \<langle>R\<rangle>set_rel \<rightarrow> \<langle>R\<rangle>nres_rel"
unfolding set_rel_def nres_rel_def
by (fastforce intro: RES_refine)
lemma param_RETURN[param]:
"(RETURN,RETURN) \<in> R \<rightarrow> \<langle>R\<rangle>nres_rel"
by (auto simp: nres_rel_def RETURN_refine)
lemma param_bind[param]:
"(bind,bind) \<in> \<langle>Ra\<rangle>nres_rel \<rightarrow> (Ra\<rightarrow>\<langle>Rb\<rangle>nres_rel) \<rightarrow> \<langle>Rb\<rangle>nres_rel"
by (auto simp: nres_rel_def intro: bind_refine dest: fun_relD)
lemma param_ASSERT_bind[param]: "\<lbrakk>
(\<Phi>,\<Psi>) \<in> bool_rel;
\<lbrakk> \<Phi>; \<Psi> \<rbrakk> \<Longrightarrow> (f,g)\<in>\<langle>R\<rangle>nres_rel
\<rbrakk> \<Longrightarrow> (ASSERT \<Phi> \<then> f, ASSERT \<Psi> \<then> g) \<in> \<langle>R\<rangle>nres_rel"
by (auto intro: nres_relI)
subsection \<open>Autoref Setup\<close>
consts i_nres :: "interface \<Rightarrow> interface"
lemmas [autoref_rel_intf] = REL_INTFI[of nres_rel i_nres]
(*lemma id_nres[autoref_id_self]: "ID_LIST
(l SUCCEED FAIL bind (REC::_ \<Rightarrow> _ \<Rightarrow> _ nres,1) (RECT::_ \<Rightarrow> _ \<Rightarrow> _ nres,1))"
by simp_all
*)
(*definition [simp]: "op_RETURN x \<equiv> RETURN x"
lemma [autoref_op_pat_def]: "RETURN x \<equiv> op_RETURN x" by simp
*)
definition [simp]: "op_nres_ASSERT_bnd \<Phi> m \<equiv> do {ASSERT \<Phi>; m}"
lemma param_op_nres_ASSERT_bnd[param]:
assumes "\<Phi>' \<Longrightarrow> \<Phi>"
assumes "\<lbrakk>\<Phi>'; \<Phi>\<rbrakk> \<Longrightarrow> (m,m')\<in>\<langle>R\<rangle>nres_rel"
shows "(op_nres_ASSERT_bnd \<Phi> m, op_nres_ASSERT_bnd \<Phi>' m') \<in> \<langle>R\<rangle>nres_rel"
using assms
by (auto simp: pw_le_iff refine_pw_simps nres_rel_def)
context begin interpretation autoref_syn .
lemma id_ASSERT[autoref_op_pat_def]:
"do {ASSERT \<Phi>; m} \<equiv> OP (op_nres_ASSERT_bnd \<Phi>)$m"
by simp
definition [simp]: "op_nres_ASSUME_bnd \<Phi> m \<equiv> do {ASSUME \<Phi>; m}"
lemma id_ASSUME[autoref_op_pat_def]:
"do {ASSUME \<Phi>; m} \<equiv> OP (op_nres_ASSUME_bnd \<Phi>)$m"
by simp
end
lemma autoref_SUCCEED[autoref_rules]: "(SUCCEED,SUCCEED) \<in> \<langle>R\<rangle>nres_rel"
by (auto simp: nres_rel_def)
lemma autoref_FAIL[autoref_rules]: "(FAIL,FAIL) \<in> \<langle>R\<rangle>nres_rel"
by (auto simp: nres_rel_def)
lemma autoref_RETURN[autoref_rules]:
"(RETURN,RETURN) \<in> R \<rightarrow> \<langle>R\<rangle>nres_rel"
by (auto simp: nres_rel_def RETURN_refine)
lemma autoref_bind[autoref_rules]:
"(bind,bind) \<in> \<langle>R1\<rangle>nres_rel \<rightarrow> (R1\<rightarrow>\<langle>R2\<rangle>nres_rel) \<rightarrow> \<langle>R2\<rangle>nres_rel"
apply (intro fun_relI)
apply (rule nres_relI)
apply (rule bind_refine)
apply (erule nres_relD)
apply (erule (1) fun_relD[THEN nres_relD])
done
context begin interpretation autoref_syn .
lemma autoref_ASSERT[autoref_rules]:
assumes "\<Phi> \<Longrightarrow> (m',m)\<in>\<langle>R\<rangle>nres_rel"
shows "(
m',
(OP (op_nres_ASSERT_bnd \<Phi>) ::: \<langle>R\<rangle>nres_rel \<rightarrow> \<langle>R\<rangle>nres_rel) $ m)\<in>\<langle>R\<rangle>nres_rel"
using assms unfolding nres_rel_def
by (simp add: ASSERT_refine_right)
lemma autoref_ASSUME[autoref_rules]:
assumes "SIDE_PRECOND \<Phi>"
assumes "\<Phi> \<Longrightarrow> (m',m)\<in>\<langle>R\<rangle>nres_rel"
shows "(
m',
(OP (op_nres_ASSUME_bnd \<Phi>) ::: \<langle>R\<rangle>nres_rel \<rightarrow> \<langle>R\<rangle>nres_rel) $ m)\<in>\<langle>R\<rangle>nres_rel"
using assms unfolding nres_rel_def
by (simp add: ASSUME_refine_right)
lemma autoref_REC[autoref_rules]:
assumes "(B,B')\<in>(Ra\<rightarrow>\<langle>Rr\<rangle>nres_rel) \<rightarrow> Ra \<rightarrow> \<langle>Rr\<rangle>nres_rel"
assumes "DEFER trimono B"
shows "(REC B,
(OP REC
::: ((Ra\<rightarrow>\<langle>Rr\<rangle>nres_rel) \<rightarrow> Ra \<rightarrow> \<langle>Rr\<rangle>nres_rel) \<rightarrow> Ra \<rightarrow> \<langle>Rr\<rangle>nres_rel)$B'
) \<in> Ra \<rightarrow> \<langle>Rr\<rangle>nres_rel"
apply (intro fun_relI)
using assms
apply (auto simp: nres_rel_def intro!: REC_refine)
apply (simp add: fun_rel_def)
apply blast
done
theorem param_RECT[param]:
assumes "(B, B') \<in> (Ra \<rightarrow> \<langle>Rr\<rangle>nres_rel) \<rightarrow> Ra \<rightarrow> \<langle>Rr\<rangle>nres_rel"
and "trimono B"
shows "(REC\<^sub>T B, REC\<^sub>T B')\<in> Ra \<rightarrow> \<langle>Rr\<rangle>nres_rel"
apply (intro fun_relI)
using assms
apply (auto simp: nres_rel_def intro!: RECT_refine)
apply (simp add: fun_rel_def)
apply blast
done
lemma autoref_RECT[autoref_rules]:
assumes "(B,B') \<in> (Ra\<rightarrow>\<langle>Rr\<rangle>nres_rel) \<rightarrow> Ra\<rightarrow>\<langle>Rr\<rangle>nres_rel"
assumes "DEFER trimono B"
shows "(RECT B,
(OP RECT
::: ((Ra\<rightarrow>\<langle>Rr\<rangle>nres_rel) \<rightarrow> Ra \<rightarrow> \<langle>Rr\<rangle>nres_rel) \<rightarrow> Ra \<rightarrow> \<langle>Rr\<rangle>nres_rel)$B'
) \<in> Ra \<rightarrow> \<langle>Rr\<rangle>nres_rel"
using assms
unfolding autoref_tag_defs
by (rule param_RECT)
end
subsection \<open>Convenience Rules\<close>
text \<open>
In this section, we define some lemmas that simplify common prover tasks.
\<close>
lemma ref_two_step: "A\<le>\<Down>R B \<Longrightarrow> B\<le>C \<Longrightarrow> A\<le>\<Down>R C"
by (rule conc_trans_additional)
lemma pw_ref_iff:
shows "S \<le> \<Down>R S'
\<longleftrightarrow> (nofail S'
\<longrightarrow> nofail S \<and> (\<forall>x. inres S x \<longrightarrow> (\<exists>s'. (x, s') \<in> R \<and> inres S' s')))"
by (simp add: pw_le_iff refine_pw_simps)
lemma pw_ref_I:
assumes "nofail S'
\<longrightarrow> nofail S \<and> (\<forall>x. inres S x \<longrightarrow> (\<exists>s'. (x, s') \<in> R \<and> inres S' s'))"
shows "S \<le> \<Down>R S'"
using assms
by (simp add: pw_ref_iff)
text \<open>Introduce an abstraction relation. Usage:
\<open>rule introR[where R=absRel]\<close>
\<close>
lemma introR: "(a,a')\<in>R \<Longrightarrow> (a,a')\<in>R" .
lemma intro_prgR: "c \<le> \<Down>R a \<Longrightarrow> c \<le> \<Down>R a" by auto
lemma refine_IdI: "m \<le> m' \<Longrightarrow> m \<le> \<Down>Id m'" by simp
lemma le_ASSERTI_pres:
assumes "\<Phi> \<Longrightarrow> S \<le> do {ASSERT \<Phi>; S'}"
shows "S \<le> do {ASSERT \<Phi>; S'}"
using assms by (auto intro: le_ASSERTI)
lemma RETURN_ref_SPECD:
assumes "RETURN c \<le> \<Down>R (SPEC \<Phi>)"
obtains a where "(c,a)\<in>R" "\<Phi> a"
using assms
by (auto simp: pw_le_iff refine_pw_simps)
lemma RETURN_ref_RETURND:
assumes "RETURN c \<le> \<Down>R (RETURN a)"
shows "(c,a)\<in>R"
using assms
apply (auto simp: pw_le_iff refine_pw_simps)
done
lemma return_refine_prop_return:
assumes "nofail m"
assumes "RETURN x \<le> \<Down>R m"
obtains x' where "(x,x')\<in>R" "RETURN x' \<le> m"
using assms
by (auto simp: refine_pw_simps pw_le_iff)
lemma ignore_snd_refine_conv:
"(m \<le> \<Down>(R\<times>\<^sub>rUNIV) m') \<longleftrightarrow> m\<bind>(RETURN o fst) \<le>\<Down>R (m'\<bind>(RETURN o fst))"
by (auto simp: pw_le_iff refine_pw_simps)
lemma ret_le_down_conv:
"nofail m \<Longrightarrow> RETURN c \<le> \<Down>R m \<longleftrightarrow> (\<exists>a. (c,a)\<in>R \<and> RETURN a \<le> m)"
by (auto simp: pw_le_iff refine_pw_simps)
lemma SPEC_eq_is_RETURN:
"SPEC ((=) x) = RETURN x"
"SPEC (\<lambda>x. x=y) = RETURN y"
by (auto simp: RETURN_def)
lemma RETURN_SPEC_conv: "RETURN r = SPEC (\<lambda>x. x=r)"
by (simp add: RETURN_def)
lemma refine2spec_aux:
"a \<le> \<Down>R b \<longleftrightarrow> ( (nofail b \<longrightarrow> a \<le> SPEC ( \<lambda>r. (\<exists>x. inres b x \<and> (r,x)\<in>R) )) )"
by (auto simp: pw_le_iff refine_pw_simps)
lemma build_rel_SPEC_conv: "\<Down>(br \<alpha> I) (SPEC \<Phi>) = SPEC (\<lambda>x. I x \<and> \<Phi> (\<alpha> x))"
by (auto simp: br_def pw_eq_iff refine_pw_simps)
lemma refine_IdD: "c \<le> \<Down>Id a \<Longrightarrow> c \<le> a" by simp
lemma bind_sim_select_rule:
assumes "m\<bind>f' \<le> SPEC \<Psi>"
assumes "\<And>x. \<lbrakk>nofail m; inres m x; f' x\<le>SPEC \<Psi>\<rbrakk> \<Longrightarrow> f x\<le>SPEC \<Phi>"
shows "m\<bind>f \<le> SPEC \<Phi>"
\<comment> \<open>Simultaneously select a result from assumption and verification goal.
Useful to work with assumptions that restrict the current program to
be verified.\<close>
using assms
by (auto simp: pw_le_iff refine_pw_simps)
lemma assert_bind_spec_conv: "ASSERT \<Phi> \<then> m \<le> SPEC \<Psi> \<longleftrightarrow> (\<Phi> \<and> m \<le> SPEC \<Psi>)"
\<comment> \<open>Simplify a bind-assert verification condition.
Useful if this occurs in the assumptions, and considerably faster than
using pointwise reasoning, which may causes a blowup for many chained
assertions.\<close>
by (auto simp: pw_le_iff refine_pw_simps)
lemma summarize_ASSERT_conv: "do {ASSERT \<Phi>; ASSERT \<Psi>; m} = do {ASSERT (\<Phi> \<and> \<Psi>); m}"
by (auto simp: pw_eq_iff refine_pw_simps)
lemma bind_ASSERT_eq_if: "do { ASSERT \<Phi>; m } = (if \<Phi> then m else FAIL)"
by auto
lemma le_RES_nofailI:
assumes "a\<le>RES x"
shows "nofail a"
using assms
by (metis nofail_simps(2) pwD1)
lemma add_invar_refineI:
assumes "f x \<le>\<Down>R (f' x')"
and "nofail (f x) \<Longrightarrow> f x \<le> SPEC I"
shows "f x \<le> \<Down> {(c, a). (c, a) \<in> R \<and> I c} (f' x')"
using assms
by (simp add: pw_le_iff refine_pw_simps sv_add_invar)
lemma bind_RES_RETURN_eq: "bind (RES X) (\<lambda>x. RETURN (f x)) =
RES { f x | x. x\<in>X }"
by (simp add: pw_eq_iff refine_pw_simps)
blast
lemma bind_RES_RETURN2_eq: "bind (RES X) (\<lambda>(x,y). RETURN (f x y)) =
RES { f x y | x y. (x,y)\<in>X }"
apply (simp add: pw_eq_iff refine_pw_simps)
apply blast
done
lemma le_SPEC_bindI:
assumes "\<Phi> x"
assumes "m \<le> f x"
shows "m \<le> SPEC \<Phi> \<bind> f"
using assms by (auto simp add: pw_le_iff refine_pw_simps)
lemma bind_assert_refine:
assumes "m1 \<le> SPEC \<Phi>"
assumes "\<And>x. \<Phi> x \<Longrightarrow> m2 x \<le> m'"
shows "do {x\<leftarrow>m1; ASSERT (\<Phi> x); m2 x} \<le> m'"
using assms
by (simp add: pw_le_iff refine_pw_simps) blast
lemma RETURN_refine_iff[simp]: "RETURN x \<le>\<Down>R (RETURN y) \<longleftrightarrow> (x,y)\<in>R"
by (auto simp: pw_le_iff refine_pw_simps)
lemma RETURN_RES_refine_iff:
"RETURN x \<le>\<Down>R (RES Y) \<longleftrightarrow> (\<exists>y\<in>Y. (x,y)\<in>R)"
by (auto simp: pw_le_iff refine_pw_simps)
lemma RETURN_RES_refine:
assumes "\<exists>x'. (x,x')\<in>R \<and> x'\<in>X"
shows "RETURN x \<le> \<Down>R (RES X)"
using assms
by (auto simp: pw_le_iff refine_pw_simps)
lemma in_nres_rel_iff: "(a,b)\<in>\<langle>R\<rangle>nres_rel \<longleftrightarrow> a \<le>\<Down>R b"
by (auto simp: nres_rel_def)
lemma inf_RETURN_RES:
"inf (RETURN x) (RES X) = (if x\<in>X then RETURN x else SUCCEED)"
"inf (RES X) (RETURN x) = (if x\<in>X then RETURN x else SUCCEED)"
by (auto simp: pw_eq_iff refine_pw_simps)
lemma inf_RETURN_SPEC[simp]:
"inf (RETURN x) (SPEC (\<lambda>y. \<Phi> y)) = SPEC (\<lambda>y. y=x \<and> \<Phi> x)"
"inf (SPEC (\<lambda>y. \<Phi> y)) (RETURN x) = SPEC (\<lambda>y. y=x \<and> \<Phi> x)"
by (auto simp: pw_eq_iff refine_pw_simps)
lemma RES_sng_eq_RETURN: "RES {x} = RETURN x"
by simp
lemma nofail_inf_serialize:
"\<lbrakk>nofail a; nofail b\<rbrakk> \<Longrightarrow> inf a b = do {x\<leftarrow>a; ASSUME (inres b x); RETURN x}"
by (auto simp: pw_eq_iff refine_pw_simps)
lemma conc_fun_SPEC:
"\<Down>R (SPEC (\<lambda>x. \<Phi> x)) = SPEC (\<lambda>y. \<exists>x. (y,x)\<in>R \<and> \<Phi> x)"
by (auto simp: pw_eq_iff refine_pw_simps)
lemma conc_fun_RETURN:
"\<Down>R (RETURN x) = SPEC (\<lambda>y. (y,x)\<in>R)"
by (auto simp: pw_eq_iff refine_pw_simps)
lemma use_spec_rule:
assumes "m \<le> SPEC \<Psi>"
assumes "m \<le> SPEC (\<lambda>s. \<Psi> s \<longrightarrow> \<Phi> s)"
shows "m \<le> SPEC \<Phi>"
using assms
by (auto simp: pw_le_iff refine_pw_simps)
lemma strengthen_SPEC: "m \<le> SPEC \<Phi> \<Longrightarrow> m \<le> SPEC(\<lambda>s. inres m s \<and> nofail m \<and> \<Phi> s)"
\<comment> \<open>Strengthen SPEC by adding trivial upper bound for result\<close>
by (auto simp: pw_le_iff refine_pw_simps)
lemma weaken_SPEC:
"m \<le> SPEC \<Phi> \<Longrightarrow> (\<And>x. \<Phi> x \<Longrightarrow> \<Psi> x) \<Longrightarrow> m \<le> SPEC \<Psi>"
by (force elim!: order_trans)
lemma bind_le_nofailI:
assumes "nofail m"
assumes "\<And>x. RETURN x \<le> m \<Longrightarrow> f x \<le> m'"
shows "m\<bind>f \<le> m'"
using assms
by (simp add: refine_pw_simps pw_le_iff) blast
lemma bind_le_shift:
"bind m f \<le> m'
\<longleftrightarrow> m \<le> (if nofail m' then SPEC (\<lambda>x. f x \<le> m') else FAIL)"
by (auto simp: pw_le_iff refine_pw_simps)
lemma If_bind_distrib[simp]:
fixes t e :: "'a nres"
shows "(If b t e \<bind> (\<lambda>x. f x)) = (If b (t\<bind>(\<lambda>x. f x)) (e\<bind>(\<lambda>x. f x)))"
by simp
(* TODO: Can we make this a simproc, using NO_MATCH? *)
lemma unused_bind_conv:
assumes "NO_MATCH (ASSERT \<Phi>) m"
assumes "NO_MATCH (ASSUME \<Phi>) m"
shows "(m\<bind>(\<lambda>x. c)) = (ASSERT (nofail m) \<bind> (\<lambda>_. ASSUME (\<exists>x. inres m x) \<bind> (\<lambda>x. c)))"
by (auto simp: pw_eq_iff refine_pw_simps)
text \<open>The following rules are useful for massaging programs before the
refinement takes place\<close>
lemma let_to_bind_conv:
"Let x f = RETURN x\<bind>f"
by simp
lemmas bind_to_let_conv = let_to_bind_conv[symmetric]
lemma pull_out_let_conv: "RETURN (Let x f) = Let x (\<lambda>x. RETURN (f x))"
by simp
lemma push_in_let_conv:
"Let x (\<lambda>x. RETURN (f x)) = RETURN (Let x f)"
"Let x (RETURN o f) = RETURN (Let x f)"
by simp_all
lemma pull_out_RETURN_case_option:
"case_option (RETURN a) (\<lambda>v. RETURN (f v)) x = RETURN (case_option a f x)"
by (auto split: option.splits)
lemma if_bind_cond_refine:
assumes "ci \<le> RETURN b"
assumes "b \<Longrightarrow> ti\<le>\<Down>R t"
assumes "\<not>b \<Longrightarrow> ei\<le>\<Down>R e"
shows "do {b\<leftarrow>ci; if b then ti else ei} \<le> \<Down>R (if b then t else e)"
using assms
by (auto simp add: refine_pw_simps pw_le_iff)
lemma intro_RETURN_Let_refine:
assumes "RETURN (f x) \<le> \<Down>R M'"
shows "RETURN (Let x f) \<le> \<Down>R M'"
(* this should be needed very rarely - so don't add it *)
using assms by auto
lemma ife_FAIL_to_ASSERT_cnv:
"(if \<Phi> then m else FAIL) = op_nres_ASSERT_bnd \<Phi> m"
by (cases \<Phi>, auto)
lemma nres_bind_let_law: "(do { x \<leftarrow> do { let y=v; f y }; g x } :: _ nres)
= do { let y=v; x\<leftarrow> f y; g x }" by auto
lemma unused_bind_RES_ne[simp]: "X\<noteq>{} \<Longrightarrow> do { _ \<leftarrow> RES X; m} = m"
by (auto simp: pw_eq_iff refine_pw_simps)
lemma le_ASSERT_defI1:
assumes "c \<equiv> do {ASSERT \<Phi>; m}"
assumes "\<Phi> \<Longrightarrow> m' \<le> c"
shows "m' \<le> c"
using assms
by (simp add: le_ASSERTI)
lemma refine_ASSERT_defI1:
assumes "c \<equiv> do {ASSERT \<Phi>; m}"
assumes "\<Phi> \<Longrightarrow> m' \<le> \<Down>R c"
shows "m' \<le> \<Down>R c"
using assms
by (simp, refine_vcg)
lemma le_ASSERT_defI2:
assumes "c \<equiv> do {ASSERT \<Phi>; ASSERT \<Psi>; m}"
assumes "\<lbrakk>\<Phi>; \<Psi>\<rbrakk> \<Longrightarrow> m' \<le> c"
shows "m' \<le> c"
using assms
by (simp add: le_ASSERTI)
lemma refine_ASSERT_defI2:
assumes "c \<equiv> do {ASSERT \<Phi>; ASSERT \<Psi>; m}"
assumes "\<lbrakk>\<Phi>; \<Psi>\<rbrakk> \<Longrightarrow> m' \<le> \<Down>R c"
shows "m' \<le> \<Down>R c"
using assms
by (simp, refine_vcg)
lemma ASSERT_le_defI:
assumes "c \<equiv> do { ASSERT \<Phi>; m'}"
assumes "\<Phi>"
assumes "\<Phi> \<Longrightarrow> m' \<le> m"
shows "c \<le> m"
using assms by (auto)
lemma ASSERT_same_eq_conv: "(ASSERT \<Phi> \<then> m) = (ASSERT \<Phi> \<then> n) \<longleftrightarrow> (\<Phi> \<longrightarrow> m=n)"
by auto
lemma case_prod_bind_simp[simp]: "
(\<lambda>x. (case x of (a, b) \<Rightarrow> f a b) \<le> SPEC \<Phi>) = (\<lambda>(a,b). f a b \<le> SPEC \<Phi>)"
by auto
lemma RECT_eq_REC': "nofail (RECT B x) \<Longrightarrow> RECT B x = REC B x"
by (subst RECT_eq_REC; simp_all add: nofail_def)
lemma rel2p_nres_RETURN[rel2p]: "rel2p (\<langle>A\<rangle>nres_rel) (RETURN x) (RETURN y) = rel2p A x y"
by (auto simp: rel2p_def dest: nres_relD intro: nres_relI)
subsubsection \<open>Boolean Operations on Specifications\<close>
lemma SPEC_iff:
assumes "P \<le> SPEC (\<lambda>s. Q s \<longrightarrow> R s)"
and "P \<le> SPEC (\<lambda>s. \<not> Q s \<longrightarrow> \<not> R s)"
shows "P \<le> SPEC (\<lambda>s. Q s \<longleftrightarrow> R s)"
using assms[THEN pw_le_iff[THEN iffD1]]
by (auto intro!: pw_leI)
lemma SPEC_rule_conjI:
assumes "A \<le> SPEC P" and "A \<le> SPEC Q"
shows "A \<le> SPEC (\<lambda>v. P v \<and> Q v)"
proof -
have "A \<le> inf (SPEC P) (SPEC Q)" using assms by (rule_tac inf_greatest) assumption
thus ?thesis by (auto simp add:Collect_conj_eq)
qed
lemma SPEC_rule_conjunct1:
assumes "A \<le> SPEC (\<lambda>v. P v \<and> Q v)"
shows "A \<le> SPEC P"
proof -
note assms
also have "\<dots> \<le> SPEC P" by (rule SPEC_rule) auto
finally show ?thesis .
qed
lemma SPEC_rule_conjunct2:
assumes "A \<le> SPEC (\<lambda>v. P v \<and> Q v)"
shows "A \<le> SPEC Q"
proof -
note assms
also have "\<dots> \<le> SPEC Q" by (rule SPEC_rule) auto
finally show ?thesis .
qed
subsubsection \<open>Pointwise Reasoning\<close>
lemma inres_if:
"\<lbrakk> inres (if P then Q else R) x; \<lbrakk>P; inres Q x\<rbrakk> \<Longrightarrow> S; \<lbrakk>\<not> P; inres R x\<rbrakk> \<Longrightarrow> S \<rbrakk> \<Longrightarrow> S"
by (metis (full_types))
lemma inres_SPEC:
"inres M x \<Longrightarrow> M \<le> SPEC \<Phi> \<Longrightarrow> \<Phi> x"
by (auto dest: pwD2)
lemma SPEC_nofail:
"X \<le> SPEC \<Phi> \<Longrightarrow> nofail X"
by (auto dest: pwD1)
lemma nofail_SPEC: "nofail m \<Longrightarrow> m \<le> SPEC (\<lambda>_. True)"
by (simp add: pw_le_iff)
lemma nofail_SPEC_iff: "nofail m \<longleftrightarrow> m \<le> SPEC (\<lambda>_. True)"
by (simp add: pw_le_iff)
lemma nofail_SPEC_triv_refine: "\<lbrakk> nofail m; \<And>x. \<Phi> x \<rbrakk> \<Longrightarrow> m \<le> SPEC \<Phi>"
by (simp add: pw_le_iff)
end
|
[STATEMENT]
lemma remove1_mset: "t \<in> set q \<Longrightarrow>
queue_to_multiset (remove1 t q) =
queue_to_multiset q - tree_to_multiset t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. t \<in> set q \<Longrightarrow> queue_to_multiset (remove1 t q) = queue_to_multiset q - tree_to_multiset t
[PROOF STEP]
by (induct q) (auto simp: qtm_in_set_subset) |
[STATEMENT]
lemma "(-x < y) = (0 < x + (y::real))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (- x < y) = (0 < x + y)
[PROOF STEP]
by arith |
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
⊢ ↑(hasseDeriv k) f = sum f fun i r => ↑(monomial (i - k)) (↑(choose i k) * r)
[PROOFSTEP]
dsimp [hasseDeriv]
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
⊢ (sum f fun x x_1 => ↑(monomial (x - k)) (choose x k • x_1)) = sum f fun i r => ↑(monomial (i - k)) (↑(choose i k) * r)
[PROOFSTEP]
congr
[GOAL]
case e_f
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
⊢ (fun x x_1 => ↑(monomial (x - k)) (choose x k • x_1)) = fun i r => ↑(monomial (i - k)) (↑(choose i k) * r)
[PROOFSTEP]
ext
[GOAL]
case e_f.h.h.a
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
x✝¹ : ℕ
x✝ : R
n✝ : ℕ
⊢ coeff (↑(monomial (x✝¹ - k)) (choose x✝¹ k • x✝)) n✝ = coeff (↑(monomial (x✝¹ - k)) (↑(choose x✝¹ k) * x✝)) n✝
[PROOFSTEP]
congr
[GOAL]
case e_f.h.h.a.e_a.h.e_6.h
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
x✝¹ : ℕ
x✝ : R
n✝ : ℕ
⊢ choose x✝¹ k • x✝ = ↑(choose x✝¹ k) * x✝
[PROOFSTEP]
apply nsmul_eq_mul
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n : ℕ
⊢ coeff (↑(hasseDeriv k) f) n = ↑(choose (n + k) k) * coeff f (n + k)
[PROOFSTEP]
rw [hasseDeriv_apply, coeff_sum, sum_def, Finset.sum_eq_single (n + k), coeff_monomial]
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n : ℕ
⊢ (if n + k - k = n then ↑(choose (n + k) k) * coeff f (n + k) else 0) = ↑(choose (n + k) k) * coeff f (n + k)
[PROOFSTEP]
simp only [if_true, add_tsub_cancel_right, eq_self_iff_true]
[GOAL]
case h₀
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n : ℕ
⊢ ∀ (b : ℕ), b ∈ support f → b ≠ n + k → coeff (↑(monomial (b - k)) (↑(choose b k) * coeff f b)) n = 0
[PROOFSTEP]
intro i _hi hink
[GOAL]
case h₀
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n i : ℕ
_hi : i ∈ support f
hink : i ≠ n + k
⊢ coeff (↑(monomial (i - k)) (↑(choose i k) * coeff f i)) n = 0
[PROOFSTEP]
rw [coeff_monomial]
[GOAL]
case h₀
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n i : ℕ
_hi : i ∈ support f
hink : i ≠ n + k
⊢ (if i - k = n then ↑(choose i k) * coeff f i else 0) = 0
[PROOFSTEP]
by_cases hik : i < k
[GOAL]
case pos
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n i : ℕ
_hi : i ∈ support f
hink : i ≠ n + k
hik : i < k
⊢ (if i - k = n then ↑(choose i k) * coeff f i else 0) = 0
[PROOFSTEP]
simp only [Nat.choose_eq_zero_of_lt hik, ite_self, Nat.cast_zero, zero_mul]
[GOAL]
case neg
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n i : ℕ
_hi : i ∈ support f
hink : i ≠ n + k
hik : ¬i < k
⊢ (if i - k = n then ↑(choose i k) * coeff f i else 0) = 0
[PROOFSTEP]
push_neg at hik
[GOAL]
case neg
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n i : ℕ
_hi : i ∈ support f
hink : i ≠ n + k
hik : k ≤ i
⊢ (if i - k = n then ↑(choose i k) * coeff f i else 0) = 0
[PROOFSTEP]
rw [if_neg]
[GOAL]
case neg.hnc
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n i : ℕ
_hi : i ∈ support f
hink : i ≠ n + k
hik : k ≤ i
⊢ ¬i - k = n
[PROOFSTEP]
contrapose! hink
[GOAL]
case neg.hnc
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n i : ℕ
_hi : i ∈ support f
hik : k ≤ i
hink : i - k = n
⊢ i = n + k
[PROOFSTEP]
exact (tsub_eq_iff_eq_add_of_le hik).mp hink
[GOAL]
case h₁
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n : ℕ
⊢ ¬n + k ∈ support f → coeff (↑(monomial (n + k - k)) (↑(choose (n + k) k) * coeff f (n + k))) n = 0
[PROOFSTEP]
intro h
[GOAL]
case h₁
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n : ℕ
h : ¬n + k ∈ support f
⊢ coeff (↑(monomial (n + k - k)) (↑(choose (n + k) k) * coeff f (n + k))) n = 0
[PROOFSTEP]
simp only [not_mem_support_iff.mp h, monomial_zero_right, mul_zero, coeff_zero]
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
⊢ ↑(hasseDeriv 0) f = f
[PROOFSTEP]
simp only [hasseDeriv_apply, tsub_zero, Nat.choose_zero_right, Nat.cast_one, one_mul, sum_monomial_eq]
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f p : R[X]
n : ℕ
h : natDegree p < n
⊢ ↑(hasseDeriv n) p = 0
[PROOFSTEP]
rw [hasseDeriv_apply, sum_def]
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f p : R[X]
n : ℕ
h : natDegree p < n
⊢ ∑ n_1 in support p, ↑(monomial (n_1 - n)) (↑(choose n_1 n) * coeff p n_1) = 0
[PROOFSTEP]
refine' Finset.sum_eq_zero fun x hx => _
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f p : R[X]
n : ℕ
h : natDegree p < n
x : ℕ
hx : x ∈ support p
⊢ ↑(monomial (x - n)) (↑(choose x n) * coeff p x) = 0
[PROOFSTEP]
simp [Nat.choose_eq_zero_of_lt ((le_natDegree_of_mem_supp _ hx).trans_lt h)]
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
⊢ ↑(hasseDeriv 1) f = ↑derivative f
[PROOFSTEP]
simp only [hasseDeriv_apply, derivative_apply, ← C_mul_X_pow_eq_monomial, Nat.choose_one_right,
(Nat.cast_commute _ _).eq]
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n : ℕ
r : R
⊢ ↑(hasseDeriv k) (↑(monomial n) r) = ↑(monomial (n - k)) (↑(choose n k) * r)
[PROOFSTEP]
ext i
[GOAL]
case a
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n : ℕ
r : R
i : ℕ
⊢ coeff (↑(hasseDeriv k) (↑(monomial n) r)) i = coeff (↑(monomial (n - k)) (↑(choose n k) * r)) i
[PROOFSTEP]
simp only [hasseDeriv_coeff, coeff_monomial]
[GOAL]
case a
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n : ℕ
r : R
i : ℕ
⊢ (↑(choose (i + k) k) * if n = i + k then r else 0) = if n - k = i then ↑(choose n k) * r else 0
[PROOFSTEP]
by_cases hnik : n = i + k
[GOAL]
case pos
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n : ℕ
r : R
i : ℕ
hnik : n = i + k
⊢ (↑(choose (i + k) k) * if n = i + k then r else 0) = if n - k = i then ↑(choose n k) * r else 0
[PROOFSTEP]
rw [if_pos hnik, if_pos, ← hnik]
[GOAL]
case pos.hc
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n : ℕ
r : R
i : ℕ
hnik : n = i + k
⊢ n - k = i
[PROOFSTEP]
apply tsub_eq_of_eq_add_rev
[GOAL]
case pos.hc.h
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n : ℕ
r : R
i : ℕ
hnik : n = i + k
⊢ n = k + i
[PROOFSTEP]
rwa [add_comm]
[GOAL]
case neg
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n : ℕ
r : R
i : ℕ
hnik : ¬n = i + k
⊢ (↑(choose (i + k) k) * if n = i + k then r else 0) = if n - k = i then ↑(choose n k) * r else 0
[PROOFSTEP]
rw [if_neg hnik, mul_zero]
[GOAL]
case neg
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n : ℕ
r : R
i : ℕ
hnik : ¬n = i + k
⊢ 0 = if n - k = i then ↑(choose n k) * r else 0
[PROOFSTEP]
by_cases hkn : k ≤ n
[GOAL]
case pos
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n : ℕ
r : R
i : ℕ
hnik : ¬n = i + k
hkn : k ≤ n
⊢ 0 = if n - k = i then ↑(choose n k) * r else 0
[PROOFSTEP]
rw [← tsub_eq_iff_eq_add_of_le hkn] at hnik
[GOAL]
case pos
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n : ℕ
r : R
i : ℕ
hnik : ¬n - k = i
hkn : k ≤ n
⊢ 0 = if n - k = i then ↑(choose n k) * r else 0
[PROOFSTEP]
rw [if_neg hnik]
[GOAL]
case neg
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n : ℕ
r : R
i : ℕ
hnik : ¬n = i + k
hkn : ¬k ≤ n
⊢ 0 = if n - k = i then ↑(choose n k) * r else 0
[PROOFSTEP]
push_neg at hkn
[GOAL]
case neg
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
n : ℕ
r : R
i : ℕ
hnik : ¬n = i + k
hkn : n < k
⊢ 0 = if n - k = i then ↑(choose n k) * r else 0
[PROOFSTEP]
rw [Nat.choose_eq_zero_of_lt hkn, Nat.cast_zero, zero_mul, ite_self]
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
r : R
hk : 0 < k
⊢ ↑(hasseDeriv k) (↑C r) = 0
[PROOFSTEP]
rw [← monomial_zero_left, hasseDeriv_monomial, Nat.choose_eq_zero_of_lt hk, Nat.cast_zero, zero_mul,
monomial_zero_right]
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
hk : 0 < k
⊢ ↑(hasseDeriv k) 1 = 0
[PROOFSTEP]
rw [← C_1, hasseDeriv_C k _ hk]
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
hk : 1 < k
⊢ ↑(hasseDeriv k) X = 0
[PROOFSTEP]
rw [← monomial_one_one_eq_X, hasseDeriv_monomial, Nat.choose_eq_zero_of_lt hk, Nat.cast_zero, zero_mul,
monomial_zero_right]
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
⊢ ↑(k ! • hasseDeriv k) = (↑derivative)^[k]
[PROOFSTEP]
induction' k with k ih
[GOAL]
case zero
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
⊢ ↑(Nat.zero ! • hasseDeriv Nat.zero) = (↑derivative)^[Nat.zero]
[PROOFSTEP]
rw [hasseDeriv_zero, factorial_zero, iterate_zero, one_smul, LinearMap.id_coe]
[GOAL]
case succ
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k : ℕ
ih : ↑(k ! • hasseDeriv k) = (↑derivative)^[k]
⊢ ↑((succ k)! • hasseDeriv (succ k)) = (↑derivative)^[succ k]
[PROOFSTEP]
ext f n : 2
[GOAL]
case succ.h.a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f✝ : R[X]
k : ℕ
ih : ↑(k ! • hasseDeriv k) = (↑derivative)^[k]
f : R[X]
n : ℕ
⊢ coeff (↑((succ k)! • hasseDeriv (succ k)) f) n = coeff ((↑derivative)^[succ k] f) n
[PROOFSTEP]
rw [iterate_succ_apply', ← ih]
[GOAL]
case succ.h.a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f✝ : R[X]
k : ℕ
ih : ↑(k ! • hasseDeriv k) = (↑derivative)^[k]
f : R[X]
n : ℕ
⊢ coeff (↑((succ k)! • hasseDeriv (succ k)) f) n = coeff (↑derivative (↑(k ! • hasseDeriv k) f)) n
[PROOFSTEP]
simp only [LinearMap.smul_apply, coeff_smul, LinearMap.map_smul_of_tower, coeff_derivative, hasseDeriv_coeff, ←
@choose_symm_add _ k]
[GOAL]
case succ.h.a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f✝ : R[X]
k : ℕ
ih : ↑(k ! • hasseDeriv k) = (↑derivative)^[k]
f : R[X]
n : ℕ
⊢ (succ k)! • (↑(choose (n + succ k) (succ k)) * coeff f (n + succ k)) =
k ! • (↑(choose (n + 1 + k) (n + 1)) * coeff f (n + 1 + k) * (↑n + 1))
[PROOFSTEP]
simp only [nsmul_eq_mul, factorial_succ, mul_assoc, succ_eq_add_one, ← add_assoc, add_right_comm n 1 k, ← cast_succ]
[GOAL]
case succ.h.a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f✝ : R[X]
k : ℕ
ih : ↑(k ! • hasseDeriv k) = (↑derivative)^[k]
f : R[X]
n : ℕ
⊢ ↑((k + 1) * k !) * (↑(choose (n + k + 1) (k + 1)) * coeff f (n + k + 1)) =
↑k ! * (↑(choose (n + k + 1) (n + 1)) * (coeff f (n + k + 1) * ↑(n + 1)))
[PROOFSTEP]
rw [← (cast_commute (n + 1) (f.coeff (n + k + 1))).eq]
[GOAL]
case succ.h.a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f✝ : R[X]
k : ℕ
ih : ↑(k ! • hasseDeriv k) = (↑derivative)^[k]
f : R[X]
n : ℕ
⊢ ↑((k + 1) * k !) * (↑(choose (n + k + 1) (k + 1)) * coeff f (n + k + 1)) =
↑k ! * (↑(choose (n + k + 1) (n + 1)) * (↑(n + 1) * coeff f (n + k + 1)))
[PROOFSTEP]
simp only [← mul_assoc]
[GOAL]
case succ.h.a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f✝ : R[X]
k : ℕ
ih : ↑(k ! • hasseDeriv k) = (↑derivative)^[k]
f : R[X]
n : ℕ
⊢ ↑((k + 1) * k !) * ↑(choose (n + k + 1) (k + 1)) * coeff f (n + k + 1) =
↑k ! * ↑(choose (n + k + 1) (n + 1)) * ↑(n + 1) * coeff f (n + k + 1)
[PROOFSTEP]
norm_cast
[GOAL]
case succ.h.a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f✝ : R[X]
k : ℕ
ih : ↑(k ! • hasseDeriv k) = (↑derivative)^[k]
f : R[X]
n : ℕ
⊢ ↑((k + 1) * k ! * choose (n + k + 1) (k + 1)) * coeff f (n + k + 1) =
↑(k ! * choose (n + k + 1) (n + 1) * (n + 1)) * coeff f (n + k + 1)
[PROOFSTEP]
congr 2
[GOAL]
case succ.h.a.e_a.e_a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f✝ : R[X]
k : ℕ
ih : ↑(k ! • hasseDeriv k) = (↑derivative)^[k]
f : R[X]
n : ℕ
⊢ (k + 1) * k ! * choose (n + k + 1) (k + 1) = k ! * choose (n + k + 1) (n + 1) * (n + 1)
[PROOFSTEP]
rw [mul_comm (k + 1) _, mul_assoc, mul_assoc]
[GOAL]
case succ.h.a.e_a.e_a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f✝ : R[X]
k : ℕ
ih : ↑(k ! • hasseDeriv k) = (↑derivative)^[k]
f : R[X]
n : ℕ
⊢ k ! * ((k + 1) * choose (n + k + 1) (k + 1)) = k ! * (choose (n + k + 1) (n + 1) * (n + 1))
[PROOFSTEP]
congr 1
[GOAL]
case succ.h.a.e_a.e_a.e_a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f✝ : R[X]
k : ℕ
ih : ↑(k ! • hasseDeriv k) = (↑derivative)^[k]
f : R[X]
n : ℕ
⊢ (k + 1) * choose (n + k + 1) (k + 1) = choose (n + k + 1) (n + 1) * (n + 1)
[PROOFSTEP]
have : n + k + 1 = n + (k + 1) := by apply add_assoc
[GOAL]
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f✝ : R[X]
k : ℕ
ih : ↑(k ! • hasseDeriv k) = (↑derivative)^[k]
f : R[X]
n : ℕ
⊢ n + k + 1 = n + (k + 1)
[PROOFSTEP]
apply add_assoc
[GOAL]
case succ.h.a.e_a.e_a.e_a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f✝ : R[X]
k : ℕ
ih : ↑(k ! • hasseDeriv k) = (↑derivative)^[k]
f : R[X]
n : ℕ
this : n + k + 1 = n + (k + 1)
⊢ (k + 1) * choose (n + k + 1) (k + 1) = choose (n + k + 1) (n + 1) * (n + 1)
[PROOFSTEP]
rw [← choose_symm_of_eq_add this, choose_succ_right_eq, mul_comm]
[GOAL]
case succ.h.a.e_a.e_a.e_a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f✝ : R[X]
k : ℕ
ih : ↑(k ! • hasseDeriv k) = (↑derivative)^[k]
f : R[X]
n : ℕ
this : n + k + 1 = n + (k + 1)
⊢ choose (n + k + 1) n * (k + 1) = choose (n + k + 1) n * (n + k + 1 - n)
[PROOFSTEP]
congr
[GOAL]
case succ.h.a.e_a.e_a.e_a.e_a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f✝ : R[X]
k : ℕ
ih : ↑(k ! • hasseDeriv k) = (↑derivative)^[k]
f : R[X]
n : ℕ
this : n + k + 1 = n + (k + 1)
⊢ k + 1 = n + k + 1 - n
[PROOFSTEP]
rw [add_assoc, add_tsub_cancel_left]
[GOAL]
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l : ℕ
⊢ LinearMap.comp (hasseDeriv k) (hasseDeriv l) = choose (k + l) k • hasseDeriv (k + l)
[PROOFSTEP]
ext i : 2
[GOAL]
case h.h
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
⊢ ↑(LinearMap.comp (LinearMap.comp (hasseDeriv k) (hasseDeriv l)) (monomial i)) 1 =
↑(LinearMap.comp (choose (k + l) k • hasseDeriv (k + l)) (monomial i)) 1
[PROOFSTEP]
simp only [LinearMap.smul_apply, comp_apply, LinearMap.coe_comp, smul_monomial, hasseDeriv_apply, mul_one,
monomial_eq_zero_iff, sum_monomial_index, mul_zero, ← tsub_add_eq_tsub_tsub, add_comm l k]
[GOAL]
case h.h
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
⊢ ↑(monomial (i - (k + l))) (↑(choose (i - l) k) * ↑(choose i l)) =
↑(monomial (i - (k + l))) (choose (k + l) k • ↑(choose i (k + l)))
[PROOFSTEP]
rw_mod_cast [nsmul_eq_mul]
[GOAL]
case h.h
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
⊢ ↑(monomial (i - (k + l))) ↑(choose (i - l) k * choose i l) =
↑(monomial (i - (k + l))) (↑(choose (k + l) k) * ↑(choose i (k + l)))
[PROOFSTEP]
rw [← Nat.cast_mul]
[GOAL]
case h.h
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
⊢ ↑(monomial (i - (k + l))) ↑(choose (i - l) k * choose i l) =
↑(monomial (i - (k + l))) ↑(choose (k + l) k * choose i (k + l))
[PROOFSTEP]
congr 2
[GOAL]
case h.h.h.e_6.h.e_a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
⊢ choose (i - l) k * choose i l = choose (k + l) k * choose i (k + l)
[PROOFSTEP]
by_cases hikl : i < k + l
[GOAL]
case pos
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
hikl : i < k + l
⊢ choose (i - l) k * choose i l = choose (k + l) k * choose i (k + l)
[PROOFSTEP]
rw [choose_eq_zero_of_lt hikl, mul_zero]
[GOAL]
case pos
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
hikl : i < k + l
⊢ choose (i - l) k * choose i l = 0
[PROOFSTEP]
by_cases hil : i < l
[GOAL]
case pos
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
hikl : i < k + l
hil : i < l
⊢ choose (i - l) k * choose i l = 0
[PROOFSTEP]
rw [choose_eq_zero_of_lt hil, mul_zero]
[GOAL]
case neg
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
hikl : i < k + l
hil : ¬i < l
⊢ choose (i - l) k * choose i l = 0
[PROOFSTEP]
push_neg at hil
[GOAL]
case neg
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
hikl : i < k + l
hil : l ≤ i
⊢ choose (i - l) k * choose i l = 0
[PROOFSTEP]
rw [← tsub_lt_iff_right hil] at hikl
[GOAL]
case neg
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
hikl : i - l < k
hil : l ≤ i
⊢ choose (i - l) k * choose i l = 0
[PROOFSTEP]
rw [choose_eq_zero_of_lt hikl, zero_mul]
[GOAL]
case neg
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
hikl : ¬i < k + l
⊢ choose (i - l) k * choose i l = choose (k + l) k * choose i (k + l)
[PROOFSTEP]
push_neg at hikl
[GOAL]
case neg
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
hikl : k + l ≤ i
⊢ choose (i - l) k * choose i l = choose (k + l) k * choose i (k + l)
[PROOFSTEP]
apply @cast_injective ℚ
[GOAL]
case neg.a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
hikl : k + l ≤ i
⊢ ↑(choose (i - l) k * choose i l) = ↑(choose (k + l) k * choose i (k + l))
[PROOFSTEP]
have h1 : l ≤ i := le_of_add_le_right hikl
[GOAL]
case neg.a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
hikl : k + l ≤ i
h1 : l ≤ i
⊢ ↑(choose (i - l) k * choose i l) = ↑(choose (k + l) k * choose i (k + l))
[PROOFSTEP]
have h2 : k ≤ i - l := le_tsub_of_add_le_right hikl
[GOAL]
case neg.a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
hikl : k + l ≤ i
h1 : l ≤ i
h2 : k ≤ i - l
⊢ ↑(choose (i - l) k * choose i l) = ↑(choose (k + l) k * choose i (k + l))
[PROOFSTEP]
have h3 : k ≤ k + l := le_self_add
[GOAL]
case neg.a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
hikl : k + l ≤ i
h1 : l ≤ i
h2 : k ≤ i - l
h3 : k ≤ k + l
⊢ ↑(choose (i - l) k * choose i l) = ↑(choose (k + l) k * choose i (k + l))
[PROOFSTEP]
push_cast
[GOAL]
case neg.a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
hikl : k + l ≤ i
h1 : l ≤ i
h2 : k ≤ i - l
h3 : k ≤ k + l
⊢ ↑(choose (i - l) k) * ↑(choose i l) = ↑(choose (k + l) k) * ↑(choose i (k + l))
[PROOFSTEP]
rw [cast_choose ℚ h1, cast_choose ℚ h2, cast_choose ℚ h3, cast_choose ℚ hikl]
[GOAL]
case neg.a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
hikl : k + l ≤ i
h1 : l ≤ i
h2 : k ≤ i - l
h3 : k ≤ k + l
⊢ ↑(i - l)! / (↑k ! * ↑(i - l - k)!) * (↑i ! / (↑l ! * ↑(i - l)!)) =
↑(k + l)! / (↑k ! * ↑(k + l - k)!) * (↑i ! / (↑(k + l)! * ↑(i - (k + l))!))
[PROOFSTEP]
rw [show i - (k + l) = i - l - k by rw [add_comm]; apply tsub_add_eq_tsub_tsub]
[GOAL]
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
hikl : k + l ≤ i
h1 : l ≤ i
h2 : k ≤ i - l
h3 : k ≤ k + l
⊢ i - (k + l) = i - l - k
[PROOFSTEP]
rw [add_comm]
[GOAL]
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
hikl : k + l ≤ i
h1 : l ≤ i
h2 : k ≤ i - l
h3 : k ≤ k + l
⊢ i - (l + k) = i - l - k
[PROOFSTEP]
apply tsub_add_eq_tsub_tsub
[GOAL]
case neg.a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
hikl : k + l ≤ i
h1 : l ≤ i
h2 : k ≤ i - l
h3 : k ≤ k + l
⊢ ↑(i - l)! / (↑k ! * ↑(i - l - k)!) * (↑i ! / (↑l ! * ↑(i - l)!)) =
↑(k + l)! / (↑k ! * ↑(k + l - k)!) * (↑i ! / (↑(k + l)! * ↑(i - l - k)!))
[PROOFSTEP]
simp only [add_tsub_cancel_left]
[GOAL]
case neg.a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
hikl : k + l ≤ i
h1 : l ≤ i
h2 : k ≤ i - l
h3 : k ≤ k + l
⊢ ↑(i - l)! / (↑k ! * ↑(i - l - k)!) * (↑i ! / (↑l ! * ↑(i - l)!)) =
↑(k + l)! / (↑k ! * ↑l !) * (↑i ! / (↑(k + l)! * ↑(i - l - k)!))
[PROOFSTEP]
have H : ∀ n : ℕ, (n ! : ℚ) ≠ 0 := by exact_mod_cast factorial_ne_zero
[GOAL]
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
hikl : k + l ≤ i
h1 : l ≤ i
h2 : k ≤ i - l
h3 : k ≤ k + l
⊢ ∀ (n : ℕ), ↑n ! ≠ 0
[PROOFSTEP]
exact_mod_cast factorial_ne_zero
[GOAL]
case neg.a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
hikl : k + l ≤ i
h1 : l ≤ i
h2 : k ≤ i - l
h3 : k ≤ k + l
H : ∀ (n : ℕ), ↑n ! ≠ 0
⊢ ↑(i - l)! / (↑k ! * ↑(i - l - k)!) * (↑i ! / (↑l ! * ↑(i - l)!)) =
↑(k + l)! / (↑k ! * ↑l !) * (↑i ! / (↑(k + l)! * ↑(i - l - k)!))
[PROOFSTEP]
field_simp [H]
[GOAL]
case neg.a
R : Type u_1
inst✝ : Semiring R
k✝ : ℕ
f : R[X]
k l i : ℕ
hikl : k + l ≤ i
h1 : l ≤ i
h2 : k ≤ i - l
h3 : k ≤ k + l
H : ∀ (n : ℕ), ↑n ! ≠ 0
⊢ ↑(i - l)! * ↑i ! * (↑k ! * ↑l ! * (↑(k + l)! * ↑(i - l - k)!)) =
↑(k + l)! * ↑i ! * (↑k ! * ↑(i - l - k)! * (↑l ! * ↑(i - l)!))
[PROOFSTEP]
ring
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f p : R[X]
n : ℕ
⊢ natDegree (↑(hasseDeriv n) p) ≤ natDegree p - n
[PROOFSTEP]
classical
rw [hasseDeriv_apply, sum_def]
refine' (natDegree_sum_le _ _).trans _
simp_rw [Function.comp, natDegree_monomial]
rw [Finset.fold_ite, Finset.fold_const]
· simp only [ite_self, max_eq_right, zero_le', Finset.fold_max_le, true_and_iff, and_imp, tsub_le_iff_right,
mem_support_iff, Ne.def, Finset.mem_filter]
intro x hx hx'
have hxp : x ≤ p.natDegree := le_natDegree_of_ne_zero hx
have hxn : n ≤ x := by
contrapose! hx'
simp [Nat.choose_eq_zero_of_lt hx']
rwa [tsub_add_cancel_of_le (hxn.trans hxp)]
· simp
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f p : R[X]
n : ℕ
⊢ natDegree (↑(hasseDeriv n) p) ≤ natDegree p - n
[PROOFSTEP]
rw [hasseDeriv_apply, sum_def]
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f p : R[X]
n : ℕ
⊢ natDegree (∑ n_1 in support p, ↑(monomial (n_1 - n)) (↑(choose n_1 n) * coeff p n_1)) ≤ natDegree p - n
[PROOFSTEP]
refine' (natDegree_sum_le _ _).trans _
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f p : R[X]
n : ℕ
⊢ Finset.fold max 0 (natDegree ∘ fun n_1 => ↑(monomial (n_1 - n)) (↑(choose n_1 n) * coeff p n_1)) (support p) ≤
natDegree p - n
[PROOFSTEP]
simp_rw [Function.comp, natDegree_monomial]
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f p : R[X]
n : ℕ
⊢ Finset.fold max 0 (fun x => if ↑(choose x n) * coeff p x = 0 then 0 else x - n) (support p) ≤ natDegree p - n
[PROOFSTEP]
rw [Finset.fold_ite, Finset.fold_const]
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f p : R[X]
n : ℕ
⊢ max (if Finset.filter (fun x => ↑(choose x n) * coeff p x = 0) (support p) = ∅ then 0 else max 0 0)
(Finset.fold max 0 (fun x => x - n) (Finset.filter (fun i => ¬↑(choose i n) * coeff p i = 0) (support p))) ≤
natDegree p - n
[PROOFSTEP]
simp only [ite_self, max_eq_right, zero_le', Finset.fold_max_le, true_and_iff, and_imp, tsub_le_iff_right,
mem_support_iff, Ne.def, Finset.mem_filter]
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f p : R[X]
n : ℕ
⊢ ∀ (x : ℕ), ¬coeff p x = 0 → ¬↑(choose x n) * coeff p x = 0 → x ≤ natDegree p - n + n
[PROOFSTEP]
intro x hx hx'
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f p : R[X]
n x : ℕ
hx : ¬coeff p x = 0
hx' : ¬↑(choose x n) * coeff p x = 0
⊢ x ≤ natDegree p - n + n
[PROOFSTEP]
have hxp : x ≤ p.natDegree := le_natDegree_of_ne_zero hx
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f p : R[X]
n x : ℕ
hx : ¬coeff p x = 0
hx' : ¬↑(choose x n) * coeff p x = 0
hxp : x ≤ natDegree p
⊢ x ≤ natDegree p - n + n
[PROOFSTEP]
have hxn : n ≤ x := by
contrapose! hx'
simp [Nat.choose_eq_zero_of_lt hx']
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f p : R[X]
n x : ℕ
hx : ¬coeff p x = 0
hx' : ¬↑(choose x n) * coeff p x = 0
hxp : x ≤ natDegree p
⊢ n ≤ x
[PROOFSTEP]
contrapose! hx'
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f p : R[X]
n x : ℕ
hx : ¬coeff p x = 0
hxp : x ≤ natDegree p
hx' : x < n
⊢ ↑(choose x n) * coeff p x = 0
[PROOFSTEP]
simp [Nat.choose_eq_zero_of_lt hx']
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f p : R[X]
n x : ℕ
hx : ¬coeff p x = 0
hx' : ¬↑(choose x n) * coeff p x = 0
hxp : x ≤ natDegree p
hxn : n ≤ x
⊢ x ≤ natDegree p - n + n
[PROOFSTEP]
rwa [tsub_add_cancel_of_le (hxn.trans hxp)]
[GOAL]
case h
R : Type u_1
inst✝ : Semiring R
k : ℕ
f p : R[X]
n : ℕ
⊢ max 0 (max 0 0) = max 0 0
[PROOFSTEP]
simp
[GOAL]
R : Type u_1
inst✝¹ : Semiring R
k : ℕ
f : R[X]
inst✝ : NoZeroSMulDivisors ℕ R
p : R[X]
n : ℕ
⊢ natDegree (↑(hasseDeriv n) p) = natDegree p - n
[PROOFSTEP]
cases' lt_or_le p.natDegree n with hn hn
[GOAL]
case inl
R : Type u_1
inst✝¹ : Semiring R
k : ℕ
f : R[X]
inst✝ : NoZeroSMulDivisors ℕ R
p : R[X]
n : ℕ
hn : natDegree p < n
⊢ natDegree (↑(hasseDeriv n) p) = natDegree p - n
[PROOFSTEP]
simpa [hasseDeriv_eq_zero_of_lt_natDegree, hn] using (tsub_eq_zero_of_le hn.le).symm
[GOAL]
case inr
R : Type u_1
inst✝¹ : Semiring R
k : ℕ
f : R[X]
inst✝ : NoZeroSMulDivisors ℕ R
p : R[X]
n : ℕ
hn : n ≤ natDegree p
⊢ natDegree (↑(hasseDeriv n) p) = natDegree p - n
[PROOFSTEP]
refine' map_natDegree_eq_sub _ _
[GOAL]
case inr.refine'_1
R : Type u_1
inst✝¹ : Semiring R
k : ℕ
f : R[X]
inst✝ : NoZeroSMulDivisors ℕ R
p : R[X]
n : ℕ
hn : n ≤ natDegree p
⊢ ∀ (f : R[X]), natDegree f < n → ↑(hasseDeriv n) f = 0
[PROOFSTEP]
exact fun h => hasseDeriv_eq_zero_of_lt_natDegree _ _
[GOAL]
case inr.refine'_2
R : Type u_1
inst✝¹ : Semiring R
k : ℕ
f : R[X]
inst✝ : NoZeroSMulDivisors ℕ R
p : R[X]
n : ℕ
hn : n ≤ natDegree p
⊢ ∀ (n_1 : ℕ) (c : R), c ≠ 0 → natDegree (↑(hasseDeriv n) (↑(monomial n_1) c)) = n_1 - n
[PROOFSTEP]
classical
simp only [ite_eq_right_iff, Ne.def, natDegree_monomial, hasseDeriv_monomial]
intro k c c0 hh
rw [← nsmul_eq_mul, smul_eq_zero, Nat.choose_eq_zero_iff] at hh
exact (tsub_eq_zero_of_le (Or.resolve_right hh c0).le).symm
[GOAL]
case inr.refine'_2
R : Type u_1
inst✝¹ : Semiring R
k : ℕ
f : R[X]
inst✝ : NoZeroSMulDivisors ℕ R
p : R[X]
n : ℕ
hn : n ≤ natDegree p
⊢ ∀ (n_1 : ℕ) (c : R), c ≠ 0 → natDegree (↑(hasseDeriv n) (↑(monomial n_1) c)) = n_1 - n
[PROOFSTEP]
simp only [ite_eq_right_iff, Ne.def, natDegree_monomial, hasseDeriv_monomial]
[GOAL]
case inr.refine'_2
R : Type u_1
inst✝¹ : Semiring R
k : ℕ
f : R[X]
inst✝ : NoZeroSMulDivisors ℕ R
p : R[X]
n : ℕ
hn : n ≤ natDegree p
⊢ ∀ (n_1 : ℕ) (c : R), ¬c = 0 → ↑(choose n_1 n) * c = 0 → 0 = n_1 - n
[PROOFSTEP]
intro k c c0 hh
[GOAL]
case inr.refine'_2
R : Type u_1
inst✝¹ : Semiring R
k✝ : ℕ
f : R[X]
inst✝ : NoZeroSMulDivisors ℕ R
p : R[X]
n : ℕ
hn : n ≤ natDegree p
k : ℕ
c : R
c0 : ¬c = 0
hh : ↑(choose k n) * c = 0
⊢ 0 = k - n
[PROOFSTEP]
rw [← nsmul_eq_mul, smul_eq_zero, Nat.choose_eq_zero_iff] at hh
[GOAL]
case inr.refine'_2
R : Type u_1
inst✝¹ : Semiring R
k✝ : ℕ
f : R[X]
inst✝ : NoZeroSMulDivisors ℕ R
p : R[X]
n : ℕ
hn : n ≤ natDegree p
k : ℕ
c : R
c0 : ¬c = 0
hh : k < n ∨ c = 0
⊢ 0 = k - n
[PROOFSTEP]
exact (tsub_eq_zero_of_le (Or.resolve_right hh c0).le).symm
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f✝ f g : R[X]
⊢ ↑(hasseDeriv k) (f * g) = ∑ ij in antidiagonal k, ↑(hasseDeriv ij.fst) f * ↑(hasseDeriv ij.snd) g
[PROOFSTEP]
let D k := (@hasseDeriv R _ k).toAddMonoidHom
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f✝ f g : R[X]
D : ℕ → R[X] →+ R[X] := fun k => LinearMap.toAddMonoidHom (hasseDeriv k)
⊢ ↑(hasseDeriv k) (f * g) = ∑ ij in antidiagonal k, ↑(hasseDeriv ij.fst) f * ↑(hasseDeriv ij.snd) g
[PROOFSTEP]
let Φ := @AddMonoidHom.mul R[X] _
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f✝ f g : R[X]
D : ℕ → R[X] →+ R[X] := fun k => LinearMap.toAddMonoidHom (hasseDeriv k)
Φ : R[X] →+ R[X] →+ R[X] := mul
⊢ ↑(hasseDeriv k) (f * g) = ∑ ij in antidiagonal k, ↑(hasseDeriv ij.fst) f * ↑(hasseDeriv ij.snd) g
[PROOFSTEP]
show
(compHom (D k)).comp Φ f g = ∑ ij : ℕ × ℕ in antidiagonal k, ((compHom.comp ((compHom Φ) (D ij.1))).flip (D ij.2) f) g
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f✝ f g : R[X]
D : ℕ → R[X] →+ R[X] := fun k => LinearMap.toAddMonoidHom (hasseDeriv k)
Φ : R[X] →+ R[X] →+ R[X] := mul
⊢ ↑(↑(AddMonoidHom.comp (↑compHom (D k)) Φ) f) g =
∑ ij in antidiagonal k,
↑(↑(↑(AddMonoidHom.flip (AddMonoidHom.comp compHom (↑(↑compHom Φ) (D ij.fst)))) (D ij.snd)) f) g
[PROOFSTEP]
simp only [← finset_sum_apply]
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f✝ f g : R[X]
D : ℕ → R[X] →+ R[X] := fun k => LinearMap.toAddMonoidHom (hasseDeriv k)
Φ : R[X] →+ R[X] →+ R[X] := mul
⊢ ↑(↑(AddMonoidHom.comp (↑compHom (LinearMap.toAddMonoidHom (hasseDeriv k))) mul) f) g =
↑(↑(∑ x in antidiagonal k,
↑(AddMonoidHom.flip
(AddMonoidHom.comp compHom (↑(↑compHom mul) (LinearMap.toAddMonoidHom (hasseDeriv x.fst)))))
(LinearMap.toAddMonoidHom (hasseDeriv x.snd)))
f)
g
[PROOFSTEP]
congr 2
[GOAL]
case e_a.e_a
R : Type u_1
inst✝ : Semiring R
k : ℕ
f✝ f g : R[X]
D : ℕ → R[X] →+ R[X] := fun k => LinearMap.toAddMonoidHom (hasseDeriv k)
Φ : R[X] →+ R[X] →+ R[X] := mul
⊢ AddMonoidHom.comp (↑compHom (LinearMap.toAddMonoidHom (hasseDeriv k))) mul =
∑ x in antidiagonal k,
↑(AddMonoidHom.flip (AddMonoidHom.comp compHom (↑(↑compHom mul) (LinearMap.toAddMonoidHom (hasseDeriv x.fst)))))
(LinearMap.toAddMonoidHom (hasseDeriv x.snd))
[PROOFSTEP]
clear f g
[GOAL]
case e_a.e_a
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
D : ℕ → R[X] →+ R[X] := fun k => LinearMap.toAddMonoidHom (hasseDeriv k)
Φ : R[X] →+ R[X] →+ R[X] := mul
⊢ AddMonoidHom.comp (↑compHom (LinearMap.toAddMonoidHom (hasseDeriv k))) mul =
∑ x in antidiagonal k,
↑(AddMonoidHom.flip (AddMonoidHom.comp compHom (↑(↑compHom mul) (LinearMap.toAddMonoidHom (hasseDeriv x.fst)))))
(LinearMap.toAddMonoidHom (hasseDeriv x.snd))
[PROOFSTEP]
ext m r n s : 4
[GOAL]
case e_a.e_a.h.h.h.h
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
D : ℕ → R[X] →+ R[X] := fun k => LinearMap.toAddMonoidHom (hasseDeriv k)
Φ : R[X] →+ R[X] →+ R[X] := mul
m : ℕ
r : R
n : ℕ
s : R
⊢ ↑(AddMonoidHom.comp
(↑(AddMonoidHom.comp (AddMonoidHom.comp (↑compHom (LinearMap.toAddMonoidHom (hasseDeriv k))) mul)
(LinearMap.toAddMonoidHom (monomial m)))
r)
(LinearMap.toAddMonoidHom (monomial n)))
s =
↑(AddMonoidHom.comp
(↑(AddMonoidHom.comp
(∑ x in antidiagonal k,
↑(AddMonoidHom.flip
(AddMonoidHom.comp compHom (↑(↑compHom mul) (LinearMap.toAddMonoidHom (hasseDeriv x.fst)))))
(LinearMap.toAddMonoidHom (hasseDeriv x.snd)))
(LinearMap.toAddMonoidHom (monomial m)))
r)
(LinearMap.toAddMonoidHom (monomial n)))
s
[PROOFSTEP]
simp only [finset_sum_apply, coe_mulLeft, coe_comp, flip_apply, Function.comp_apply, hasseDeriv_monomial,
LinearMap.toAddMonoidHom_coe, compHom_apply_apply, coe_mul, monomial_mul_monomial]
[GOAL]
case e_a.e_a.h.h.h.h
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
D : ℕ → R[X] →+ R[X] := fun k => LinearMap.toAddMonoidHom (hasseDeriv k)
Φ : R[X] →+ R[X] →+ R[X] := mul
m : ℕ
r : R
n : ℕ
s : R
⊢ ↑(monomial (m + n - k)) (↑(choose (m + n) k) * (r * s)) =
∑ x in antidiagonal k, ↑(monomial (m - x.fst + (n - x.snd))) (↑(choose m x.fst) * r * (↑(choose n x.snd) * s))
[PROOFSTEP]
have aux :
∀ x : ℕ × ℕ,
x ∈ antidiagonal k →
monomial (m - x.1 + (n - x.2)) (↑(m.choose x.1) * r * (↑(n.choose x.2) * s)) =
monomial (m + n - k) (↑(m.choose x.1) * ↑(n.choose x.2) * (r * s)) :=
by
intro x hx
rw [Finset.Nat.mem_antidiagonal] at hx
subst hx
by_cases hm : m < x.1
· simp only [Nat.choose_eq_zero_of_lt hm, Nat.cast_zero, zero_mul, monomial_zero_right]
by_cases hn : n < x.2
· simp only [Nat.choose_eq_zero_of_lt hn, Nat.cast_zero, zero_mul, mul_zero, monomial_zero_right]
push_neg at hm hn
rw [tsub_add_eq_add_tsub hm, ← add_tsub_assoc_of_le hn, ← tsub_add_eq_tsub_tsub, add_comm x.2 x.1, mul_assoc, ←
mul_assoc r, ← (Nat.cast_commute _ r).eq, mul_assoc, mul_assoc]
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
D : ℕ → R[X] →+ R[X] := fun k => LinearMap.toAddMonoidHom (hasseDeriv k)
Φ : R[X] →+ R[X] →+ R[X] := mul
m : ℕ
r : R
n : ℕ
s : R
⊢ ∀ (x : ℕ × ℕ),
x ∈ antidiagonal k →
↑(monomial (m - x.fst + (n - x.snd))) (↑(choose m x.fst) * r * (↑(choose n x.snd) * s)) =
↑(monomial (m + n - k)) (↑(choose m x.fst) * ↑(choose n x.snd) * (r * s))
[PROOFSTEP]
intro x hx
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
D : ℕ → R[X] →+ R[X] := fun k => LinearMap.toAddMonoidHom (hasseDeriv k)
Φ : R[X] →+ R[X] →+ R[X] := mul
m : ℕ
r : R
n : ℕ
s : R
x : ℕ × ℕ
hx : x ∈ antidiagonal k
⊢ ↑(monomial (m - x.fst + (n - x.snd))) (↑(choose m x.fst) * r * (↑(choose n x.snd) * s)) =
↑(monomial (m + n - k)) (↑(choose m x.fst) * ↑(choose n x.snd) * (r * s))
[PROOFSTEP]
rw [Finset.Nat.mem_antidiagonal] at hx
[GOAL]
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
D : ℕ → R[X] →+ R[X] := fun k => LinearMap.toAddMonoidHom (hasseDeriv k)
Φ : R[X] →+ R[X] →+ R[X] := mul
m : ℕ
r : R
n : ℕ
s : R
x : ℕ × ℕ
hx : x.fst + x.snd = k
⊢ ↑(monomial (m - x.fst + (n - x.snd))) (↑(choose m x.fst) * r * (↑(choose n x.snd) * s)) =
↑(monomial (m + n - k)) (↑(choose m x.fst) * ↑(choose n x.snd) * (r * s))
[PROOFSTEP]
subst hx
[GOAL]
R : Type u_1
inst✝ : Semiring R
f : R[X]
D : ℕ → R[X] →+ R[X] := fun k => LinearMap.toAddMonoidHom (hasseDeriv k)
Φ : R[X] →+ R[X] →+ R[X] := mul
m : ℕ
r : R
n : ℕ
s : R
x : ℕ × ℕ
⊢ ↑(monomial (m - x.fst + (n - x.snd))) (↑(choose m x.fst) * r * (↑(choose n x.snd) * s)) =
↑(monomial (m + n - (x.fst + x.snd))) (↑(choose m x.fst) * ↑(choose n x.snd) * (r * s))
[PROOFSTEP]
by_cases hm : m < x.1
[GOAL]
case pos
R : Type u_1
inst✝ : Semiring R
f : R[X]
D : ℕ → R[X] →+ R[X] := fun k => LinearMap.toAddMonoidHom (hasseDeriv k)
Φ : R[X] →+ R[X] →+ R[X] := mul
m : ℕ
r : R
n : ℕ
s : R
x : ℕ × ℕ
hm : m < x.fst
⊢ ↑(monomial (m - x.fst + (n - x.snd))) (↑(choose m x.fst) * r * (↑(choose n x.snd) * s)) =
↑(monomial (m + n - (x.fst + x.snd))) (↑(choose m x.fst) * ↑(choose n x.snd) * (r * s))
[PROOFSTEP]
simp only [Nat.choose_eq_zero_of_lt hm, Nat.cast_zero, zero_mul, monomial_zero_right]
[GOAL]
case neg
R : Type u_1
inst✝ : Semiring R
f : R[X]
D : ℕ → R[X] →+ R[X] := fun k => LinearMap.toAddMonoidHom (hasseDeriv k)
Φ : R[X] →+ R[X] →+ R[X] := mul
m : ℕ
r : R
n : ℕ
s : R
x : ℕ × ℕ
hm : ¬m < x.fst
⊢ ↑(monomial (m - x.fst + (n - x.snd))) (↑(choose m x.fst) * r * (↑(choose n x.snd) * s)) =
↑(monomial (m + n - (x.fst + x.snd))) (↑(choose m x.fst) * ↑(choose n x.snd) * (r * s))
[PROOFSTEP]
by_cases hn : n < x.2
[GOAL]
case pos
R : Type u_1
inst✝ : Semiring R
f : R[X]
D : ℕ → R[X] →+ R[X] := fun k => LinearMap.toAddMonoidHom (hasseDeriv k)
Φ : R[X] →+ R[X] →+ R[X] := mul
m : ℕ
r : R
n : ℕ
s : R
x : ℕ × ℕ
hm : ¬m < x.fst
hn : n < x.snd
⊢ ↑(monomial (m - x.fst + (n - x.snd))) (↑(choose m x.fst) * r * (↑(choose n x.snd) * s)) =
↑(monomial (m + n - (x.fst + x.snd))) (↑(choose m x.fst) * ↑(choose n x.snd) * (r * s))
[PROOFSTEP]
simp only [Nat.choose_eq_zero_of_lt hn, Nat.cast_zero, zero_mul, mul_zero, monomial_zero_right]
[GOAL]
case neg
R : Type u_1
inst✝ : Semiring R
f : R[X]
D : ℕ → R[X] →+ R[X] := fun k => LinearMap.toAddMonoidHom (hasseDeriv k)
Φ : R[X] →+ R[X] →+ R[X] := mul
m : ℕ
r : R
n : ℕ
s : R
x : ℕ × ℕ
hm : ¬m < x.fst
hn : ¬n < x.snd
⊢ ↑(monomial (m - x.fst + (n - x.snd))) (↑(choose m x.fst) * r * (↑(choose n x.snd) * s)) =
↑(monomial (m + n - (x.fst + x.snd))) (↑(choose m x.fst) * ↑(choose n x.snd) * (r * s))
[PROOFSTEP]
push_neg at hm hn
[GOAL]
case neg
R : Type u_1
inst✝ : Semiring R
f : R[X]
D : ℕ → R[X] →+ R[X] := fun k => LinearMap.toAddMonoidHom (hasseDeriv k)
Φ : R[X] →+ R[X] →+ R[X] := mul
m : ℕ
r : R
n : ℕ
s : R
x : ℕ × ℕ
hm : x.fst ≤ m
hn : x.snd ≤ n
⊢ ↑(monomial (m - x.fst + (n - x.snd))) (↑(choose m x.fst) * r * (↑(choose n x.snd) * s)) =
↑(monomial (m + n - (x.fst + x.snd))) (↑(choose m x.fst) * ↑(choose n x.snd) * (r * s))
[PROOFSTEP]
rw [tsub_add_eq_add_tsub hm, ← add_tsub_assoc_of_le hn, ← tsub_add_eq_tsub_tsub, add_comm x.2 x.1, mul_assoc, ←
mul_assoc r, ← (Nat.cast_commute _ r).eq, mul_assoc, mul_assoc]
[GOAL]
case e_a.e_a.h.h.h.h
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
D : ℕ → R[X] →+ R[X] := fun k => LinearMap.toAddMonoidHom (hasseDeriv k)
Φ : R[X] →+ R[X] →+ R[X] := mul
m : ℕ
r : R
n : ℕ
s : R
aux :
∀ (x : ℕ × ℕ),
x ∈ antidiagonal k →
↑(monomial (m - x.fst + (n - x.snd))) (↑(choose m x.fst) * r * (↑(choose n x.snd) * s)) =
↑(monomial (m + n - k)) (↑(choose m x.fst) * ↑(choose n x.snd) * (r * s))
⊢ ↑(monomial (m + n - k)) (↑(choose (m + n) k) * (r * s)) =
∑ x in antidiagonal k, ↑(monomial (m - x.fst + (n - x.snd))) (↑(choose m x.fst) * r * (↑(choose n x.snd) * s))
[PROOFSTEP]
rw [Finset.sum_congr rfl aux]
[GOAL]
case e_a.e_a.h.h.h.h
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
D : ℕ → R[X] →+ R[X] := fun k => LinearMap.toAddMonoidHom (hasseDeriv k)
Φ : R[X] →+ R[X] →+ R[X] := mul
m : ℕ
r : R
n : ℕ
s : R
aux :
∀ (x : ℕ × ℕ),
x ∈ antidiagonal k →
↑(monomial (m - x.fst + (n - x.snd))) (↑(choose m x.fst) * r * (↑(choose n x.snd) * s)) =
↑(monomial (m + n - k)) (↑(choose m x.fst) * ↑(choose n x.snd) * (r * s))
⊢ ↑(monomial (m + n - k)) (↑(choose (m + n) k) * (r * s)) =
∑ x in antidiagonal k, ↑(monomial (m + n - k)) (↑(choose m x.fst) * ↑(choose n x.snd) * (r * s))
[PROOFSTEP]
rw [← LinearMap.map_sum, ← Finset.sum_mul]
[GOAL]
case e_a.e_a.h.h.h.h
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
D : ℕ → R[X] →+ R[X] := fun k => LinearMap.toAddMonoidHom (hasseDeriv k)
Φ : R[X] →+ R[X] →+ R[X] := mul
m : ℕ
r : R
n : ℕ
s : R
aux :
∀ (x : ℕ × ℕ),
x ∈ antidiagonal k →
↑(monomial (m - x.fst + (n - x.snd))) (↑(choose m x.fst) * r * (↑(choose n x.snd) * s)) =
↑(monomial (m + n - k)) (↑(choose m x.fst) * ↑(choose n x.snd) * (r * s))
⊢ ↑(monomial (m + n - k)) (↑(choose (m + n) k) * (r * s)) =
↑(monomial (m + n - k)) ((∑ x in antidiagonal k, ↑(choose m x.fst) * ↑(choose n x.snd)) * (r * s))
[PROOFSTEP]
congr
[GOAL]
case e_a.e_a.h.h.h.h.h.e_6.h.e_a
R : Type u_1
inst✝ : Semiring R
k : ℕ
f : R[X]
D : ℕ → R[X] →+ R[X] := fun k => LinearMap.toAddMonoidHom (hasseDeriv k)
Φ : R[X] →+ R[X] →+ R[X] := mul
m : ℕ
r : R
n : ℕ
s : R
aux :
∀ (x : ℕ × ℕ),
x ∈ antidiagonal k →
↑(monomial (m - x.fst + (n - x.snd))) (↑(choose m x.fst) * r * (↑(choose n x.snd) * s)) =
↑(monomial (m + n - k)) (↑(choose m x.fst) * ↑(choose n x.snd) * (r * s))
⊢ ↑(choose (m + n) k) = ∑ x in antidiagonal k, ↑(choose m x.fst) * ↑(choose n x.snd)
[PROOFSTEP]
rw_mod_cast [← Nat.add_choose_eq]
|
2001 , 2008
|
from winning.std_calibration import centered_std_density
from winning.lattice_calibration import dividend_implied_ability
from winning.lattice_conventions import STD_UNIT, STD_SCALE, STD_L, STD_A
import numpy as np
# Illustrates the basic calibration
# Exactly the same but here we modify the discretization parameters
if __name__ =='__main__':
# Choose the length of the lattice, which is 2*L+1
L = 700
# Choose the unit of discretization
unit = 0.005
# The unit is used to create an approximation of a density, here N(0,1) for simplicity
density = centered_std_density(L=L, unit=unit)
# Step 2. We set winning probabilities, most commonly represented in racing as inverse probabilities ('dividends')
dividends = [2,6,np.nan, 3]
# Step 3. The algorithm implies relative ability (i.e. how much to translate the performance distributions)
# Missing values will be assigned odds of 1999:1 ... or you can leave them out.
abilities = dividend_implied_ability(dividends=dividends,density=density, nan_value=2000, unit=unit)
# That's all. Lower ability is better.
print(abilities)
# Note that if you don't supply the unit, the abilities take on greater magnitudes than before (i.e. they are offsets on the lattice)
# So you'll have to multiply them by the unit to get a scaled ability consistent with the density definition
scale_free_abilities = dividend_implied_ability(dividends=dividends, density=density, nan_value=2000)
scaled_ability = [ a*unit for a in scale_free_abilities ]
print(scaled_ability) |
function output = bicubic_interpolation_at(input,uu,vv,nx,ny,border_out,BOUNDARY_CONDITION)
output = 0.0;
sx = 0;
sy = 0;
if(uu < 0)
sx = -1;
else
sx = 1;
end
if(vv < 0)
sy = -1;
else
sy = 1;
end
out = 0;
switch(BOUNDARY_CONDITION)
case 0
[out,x] = neumann_bc(uu, nx);
[out,y] = neumann_bc(vv, ny);
[out,mx] = neumann_bc(uu - sx, nx);
[out,my] = neumann_bc(vv - sx, ny);
[out,dx] = neumann_bc( uu + sx, nx);
[out,dy] = neumann_bc( vv + sy, ny);
[out,ddx] = neumann_bc( uu + 2*sx, nx);
[out,ddy] = neumann_bc( vv + 2*sy, ny);
case 1
[out,x] =periodic_bc(uu, nx);
[out,y] = periodic_bc(vv, ny);
[out,mx] = periodic_bc(uu - sx, nx);
[out,my] = periodic_bc(vv - sx, ny);
[out,dx] = periodic_bc( uu + sx, nx);
[out,dy] = periodic_bc( vv + sy, ny);
[out,ddx] = periodic_bc( uu + 2*sx, nx);
[out,ddy] = periodic_bc( vv + 2*sy, ny);
case 2
[out,x] = symmetric_bc(uu, nx);
[out,y] = symmetric_bc(vv, ny);
[out,mx] = symmetric_bc(uu - sx, nx);
[out,my] = symmetric_bc(vv - sx, ny);
[out,dx] = symmetric_bc( uu + sx, nx);
[out,dy] = symmetric_bc( vv + sy, ny);
[out,ddx] = symmetric_bc( uu + 2*sx, nx);
[out,ddy] = symmetric_bc( vv + 2*sy, ny);
otherwise
[out,x] = neumann_bc(uu, nx);
[out,y] = neumann_bc(vv, ny);
[out,mx] = neumann_bc(uu - sx, nx);
[out,my] = neumann_bc(vv - sx, ny);
[out,dx] = neumann_bc( uu + sx, nx);
[out,dy] = neumann_bc( vv + sy, ny);
[out,ddx] = neumann_bc( uu + 2*sx, nx);
[out,ddy] = neumann_bc( vv + 2*sy, ny);
if((out == 1) && (border_out == 0))
output = 0.0;
else
p11 = input(mx + nx * my);
p12 = input(x + nx * my);
p13 = input(dx + nx * my);
p14 = input(ddx + nx * my);
p21 = input(mx + nx * y);
p22 = input(x + nx * y);
p23 = input(dx + nx * y);
p24 = input(ddx + nx * y);
p31 = input(mx + nx * dy);
p32 = input(x + nx * dy);
p33 = input(dx + nx * dy);
p34 = input(ddx + nx * dy);
p41 = input(mx + nx * ddy);
p42 = input(x + nx * ddy);
p43 = input(dx + nx * ddy);
p44 = input(ddx + nx * ddy);
pol = [p11, p21, p31, p41;
p12, p22, p32, p42;
p13, p23, p33, p43;
p14, p24, p34, p44];
output = bicubic_interpolation_cell(pol, uu-x, vv-y);
end
end |
```python
# 그래프, 수학 기능 추가
# Add graph and math features
import pylab as py
import numpy as np
```
# 1차 적분<br>First Order Numerical Integration
[](https://www.youtube.com/watch?v=1p0NHR5w0Lc)
다시 면적이 1인 반원을 생각해 보자.<br>Again, let's think about the half circle with area of 1.
$$
\begin{align}
\pi r^2 &= 2 \\
r^2 &= \frac{2}{\pi} \\
r &= \sqrt{\frac{2}{\pi}}
\end{align}
$$
```python
r = py.sqrt(2.0 / py.pi)
```
```python
def half_circle(x):
return py.sqrt(r**2 - x**2)
```
$$
y = \sqrt{r^2 - x^2}
$$
```python
import plot_num_int as pi
```
```python
pi.plot_a_half_circle_of_area(1)
pi.axis_equal_grid_True()
```
이번에는 사다리꼴 규칙을 이용해서 구해 보기로 하자.<br>
This time, let's use the trapezoid rule to find its area.
## 사다리꼴 규칙<br>Trapezoid Rule
다음과 같은 사다리꼴을 생각해 보자.<br>Let's think about a trapezoid as follows.
```python
x_array = (0, 1)
y_array = (1, 2)
py.fill_between(x_array, y_array)
py.axis('equal')
py.axis('off')
py.text(-0.25, 0.5, '$y_i$')
py.text(1.15, 1, '$y_{i+1}$')
py.text(0.5, -0.3, '$\Delta x$');
```
사다리꼴의 면적은 다음과 같다.<br>
Area of a trapezoid is as follows.
$$
a_i=\frac{1}{2} \left( y_i + y_{i+1} \right) \Delta x
$$
## 1차 적분<br>First order numerical integration
마찬가지로 일정 간격으로 $x$ 좌표를 나누어 보자.<br>
Same as before, let's divide $x$ coordinates in a constant interval.
```python
n = 10
pi.plot_half_circle_with_stems(n, 1)
# 사다리꼴의 좌표를 나눔 Find coordinates for the trapezoids
x_array_bar = py.linspace(-r, r, n+1)
y_array_bar = half_circle(x_array_bar)
# 각 사다리꼴의 폭 Width of each trapezoid
delta_x = x_array_bar[1] - x_array_bar[0]
# 일련의 사다리꼴을 그림 Plot a series of the trapezoids
xp, yp = x_array_bar[0], y_array_bar[0]
for x, y in zip(x_array_bar[1:], y_array_bar[1:]):
py.fill_between((xp, x), (yp, y), alpha=0.5, color=py.random((1, 3)))
xp, yp = x, y
py.axis('equal')
py.grid(True)
```
사다리꼴의 면적을 하나씩 구해서 더해보자.<br>Let's accumulate the area of trapezoids.
$$
Area = \sum_{k=0}^{n-1} F_k
$$
$$
F_k = \frac{\Delta x}{2}\left[f(x_k)+f(x_{k+1})\right]
$$
$$
Area = \sum_{k=0}^{n-1} \frac{1}{2}\left[f(x_k)+f(x_{k+1})\right] \Delta x
$$
```python
def get_delta_x(xi, xe, n):
return (xe - xi) / n
```
```python
def num_int_1(f, xi, xe, n, b_verbose=False):
x_array = py.linspace(xi, xe, n+1)
delta_x = x_array[1] - x_array[0]
integration_result = 0.0
x_k = x_array[0]
y_k = f(x_k)
for k, x_k_plus_1 in enumerate(x_array[1:]):
y_k_plus_1 = f(x_k_plus_1)
F_k = 0.5 * (y_k + y_k_plus_1) * (x_k_plus_1 - x_k)
if b_verbose: print('i = %2d, F_k = %g' % (k, F_k))
integration_result += F_k
x_k, y_k = x_k_plus_1, y_k_plus_1
return integration_result
```
```python
n = 10
result = num_int_1(half_circle, -r, r, n, b_verbose=True)
print('result =', result)
```
예상한 값 1에 더 비슷한 값을 얻기 위해 더 잘게 나누어 보자<br>
To obtain the result closer to the expected value of 1, let's divide with a narrower interval.
```python
n = 100
result = num_int_1(half_circle, -r, r, n)
print('result =', result)
```
```python
%timeit -n 100 result = num_int_1(half_circle, -r, r, n)
```
### $cos \theta$의 반 주기<br>Half period of $cos \theta$
```python
n = 10
result_cos = num_int_1(py.cos, 0, py.pi, n, b_verbose=True)
print('result =', result_cos)
```
```python
n = 100
result_cos = num_int_1(py.cos, 0, py.pi, n)
print('result =', result_cos)
```
### 1/4 원<br>A quarter circle
```python
n = 10
result_quarter = num_int_1(half_circle, -r, 0, n, b_verbose=True)
print('result =', result_quarter)
```
```python
n = 100
result_quarter = num_int_1(half_circle, -r, 0, n)
print('result =', result_quarter)
```
## 연습문제<br>Exercises
도전 과제 1 : 다른 조건이 같을 때 0차 적분과 사다리꼴 적분의 오차를 비교해 보시오. 필요하면 해당 파이썬 함수를 복사하시오.<br>Try this 1 : Compare the errors of the zeroth and first order integrations of the half circle example above using the same conditions. Duplicate the python function if necessary.
```python
```
도전 과제 2 : 길이 $L=3[m]$ 인 외팔보가 분포 하중 $\omega=50sin\left(\frac{1}{2L}\pi x\right)[N/m]$을 받고 있을 때 전단력과 굽힘모멘트 선도를 구하시오.<br>
Try this 2 : Plot diagrams of shear force and bending moment of a cantilever with length $L=3m$ under distributed load $\omega=50sin\left(\frac{1}{2L}\pi x\right)[N/m]$. <br>
(ref : C 4.4, Pytel, Kiusalaas & Sharma, Mechanics of Materials, 2nd Ed, SI, Cengage Learning, 2011.)
```python
```
## 함수형 프로그래밍<br>Functional programming
간격이 일정하다면 면적의 근사값을 다음과 같이 바꾸어 쓸 수 있다.<br>
If the interval $\Delta x$ is constant, we may rewrite the approximation of the area as follows.
$$
\begin{align}
Area &= \sum_{k=0}^{n-1} \frac{1}{2}\left[f(x_k)+f(x_{k+1})\right] \Delta x \\
&= \Delta x \sum_{k=0}^{n-1} \frac{1}{2}\left[f(x_k)+f(x_{k+1})\right]
\end{align}
$$
$$
\begin{align}
\sum_{k=0}^{n-1} \frac{1}{2}\left[f(x_k)+f(x_{k+1})\right] &= \frac{1}{2}\left[f(x_0)+f(x_1)\right] \\
&+ \frac{1}{2}\left[f(x_1)+f(x_2)\right] \\
&+ \frac{1}{2}\left[f(x_2)+f(x_3)\right] \\
& \ldots \\
&+ \frac{1}{2}\left[f(x_{n-2})+f(x_{n-1})\right] \\
&+ \frac{1}{2}\left[f(x_{n-1})+f(x_{n})\right] \\
&= \frac{1}{2}f(x_0) + \sum_{k=1}^{n-1} f(x_k) + \frac{1}{2}f(x_{n}) \\
&= \frac{1}{2}\left[f(x_0) + f(x_{n})\right] + \sum_{k=1}^{n-1} f(x_k)
\end{align}
$$
$$
\begin{align}
Area &= \Delta x \sum_{k=0}^{n-1} \frac{1}{2}\left[f(x_k)+f(x_{k+1})\right] \\
&= \Delta x \left[\frac{1}{2}\left[f(x_0) + f(x_{n})\right] + \sum_{k=1}^{n-1} f(x_k)\right]
\end{align}
$$
할당문 없이 `sum()` 과 `map()` 함수로 구현해 보자.<br>
Instead of assignments, let's implement using `sum()` and `map()` functions.
```python
def num_int_1_functional(f, xi, xe, n):
# get_delta_x() 함수 호출 횟수를 줄이기 위해 함수 안의 함수를 사용
# To reduce the number of calling function get_delta_x(), define inner functions
def with_delta_x(f, xi, n, delta_x=get_delta_x(xi, xe, n)):
return delta_x * (
0.5 * (f(xi) + f(xe))
+ sum(
map(
f,
py.arange(xi + delta_x, xe - delta_x*0.1, delta_x),
)
)
)
return with_delta_x(f, xi, n)
```
```python
n = 100
result_func = num_int_1_functional(half_circle, -r, r, n)
print('result_func =', result_func)
```
```python
assert 1e-7 > abs(result - result_func), f"result = {result}, result_func = {result_func}"
```
```python
%timeit -n 100 result_func = num_int_1_functional(half_circle, -r, r, n)
```
## NumPy 벡터화<br>Vectorization of NumPy
```python
import pylab as py
```
```python
def num_int_1_vector_with_delta_x(f, xi, xe, n, delta_x):
return delta_x * (
f(py.arange(xi+delta_x, xe-delta_x*0.5, get_delta_x(xi, xe, n))).sum()
+ 0.5 * f(py.array((xi, xe))).sum()
)
def num_int_1_vector(f, xi, xe, n):
return num_int_1_vector_with_delta_x(f, xi, xe, n, get_delta_x(xi, xe, n))
```
```python
n = 100
result_vect = num_int_1_vector(half_circle, -r, r, n)
print('result_vect =', result_vect)
```
```python
assert 1e-7 > abs(result - result_vect), f"result = {result}, result_vect = {result_vect}"
```
```python
%timeit -n 100 result_func = num_int_1_vector(half_circle, -r, r, n)
```
## 시험<br>Test
아래는 함수가 맞게 작동하는지 확인함<br>
Following cells verify whether the functions work correctly.
```python
import pylab as py
r = py.sqrt(1.0 / py.pi)
n = 10
delta_x = r/n
def half_circle(x):
return py.sqrt(r**2 - x ** 2)
assert 0.25 > num_int_1(half_circle, -r, 0, n)
assert 0.25 > num_int_1(half_circle, 0, r, n)
assert 0.25 > num_int_1_functional(half_circle, -r, 0, n)
assert 0.25 > num_int_1_functional(half_circle, 0, r, n)
assert 0.25 > num_int_1_vector(half_circle, -r, 0, n)
assert 0.25 > num_int_1_vector(half_circle, 0, r, n)
```
```python
assert 0.1 > (abs(num_int_1(half_circle, -r, 0, n) - 0.25) * 4)
assert 0.1 > (abs(num_int_1(half_circle, 0, r, n) - 0.25) * 4)
assert 0.1 > (abs(num_int_1_functional(half_circle, -r, 0, n) - 0.25) * 4)
assert 0.1 > (abs(num_int_1_functional(half_circle, 0, r, n) - 0.25) * 4)
assert 0.1 > (abs(num_int_1_vector(half_circle, -r, 0, n) - 0.25) * 4)
assert 0.1 > (abs(num_int_1_vector(half_circle, 0, r, n) - 0.25) * 4)
```
## Final Bell<br>마지막 종
```python
# stackoverfow.com/a/24634221
import os
os.system("printf '\a'");
```
```python
```
|
#EAFEFE (or 0xEAFEFE) is unknown color: approx Azure. HEX triplet: EA, FE and FE. RGB value is (234,254,254). Sum of RGB (Red+Green+Blue) = 234+254+254=742 (98% of max value = 765). Red value is 234 (91.80% from 255 or 31.54% from 742); Green value is 254 (99.61% from 255 or 34.23% from 742); Blue value is 254 (99.61% from 255 or 34.23% from 742); Max value from RGB is 254 - color contains mainly: green, blue. Hex color #EAFEFE is not a web safe color. Web safe color analog (approx): #FFFFFF. Inversed color of #EAFEFE is #150101. Grayscale: #F8F8F8. Windows color (decimal): -1376514 or 16711402. OLE color: 16711402.
HSL color Cylindrical-coordinate representation of color #EAFEFE: hue angle of 180º degrees, saturation: 0.91, lightness: 0.96%. HSV value (or HSB Brightness) of color is 1% and HSV saturation: 0.08%. Process color model (Four color, CMYK) of #EAFEFE is Cyan = 0.08, Magento = 0, Yellow = 0 and Black (K on CMYK) = 0.00. |
import torch
import fairseq
import soundfile as sf
from datasets import load_dataset
import numpy as np
from itertools import groupby
import os
class Decoder:
def __init__(self, json_dict):
self.dict = json_dict
self.look_up = np.asarray(list(self.dict.keys()))
def decode(self, ids):
converted_tokens = self.look_up[ids]
fused_tokens = [tok[0] for tok in groupby(converted_tokens)]
output = ' '.join(''.join(''.join(fused_tokens).split("<s>")).split("|"))
return output
json_dict = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3, "|": 4, "E": 5, "T": 6, "A": 7, "O": 8, "N": 9, "I": 10,
"H": 11, "S": 12, "R": 13, "D": 14, "L": 15, "U": 16, "M": 17, "W": 18, "C": 19, "F": 20, "G": 21, "Y": 22,
"P": 23, "B": 24, "V": 25, "K": 26, "'": 27, "X": 28, "J": 29, "Q": 30, "Z": 31}
libri_dummy = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
# check out the dataset
print(libri_dummy)
input_sample = torch.tensor(sf.read(libri_dummy[0]['file'])[0]).unsqueeze(0)
input_sample = input_sample.to(torch.float32)
# model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task(['../../pretrain/wav2vec_small_960h.pt'], arg_overrides={"data": "../../pretrain/"})
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task(['../checkpoints_m2v_finetune/checkpoint_last.pt'], arg_overrides={"data": "../../pretrain/"})
model = model[0]
model.eval()
"""
audio_source :[batch_size * num_of_sample_points] : tensor
visual_source : [batch_size * visual_samples(112*(112*frames))] :tensor_list
"""
logits = model(audio_source=input_sample, visual_source=input_sample, padding_mask=None)["encoder_out"]
predicted_ids = torch.argmax(logits[:, 0], axis=-1)
decoder = Decoder(json_dict=json_dict)
print("Prediction: ", decoder.decode(predicted_ids))
print(libri_dummy[0]["text"]) |
She has completed her Masters in Business Administration .My daughter is a beautiful, intelligent and family oriented person with a good value system. A qualified working professional, having growth oriented career & a person with pleasing & amiable personality , is what we see in a groom.
We come from an upper middle class, nuclear family with moderate values. Our family lives in Kolkata. Her father has retired while her mother is a homemaker. She has 1 sister. |
import os.path
import torchvision.transforms as transforms
from data.base_dataset import BaseDataset, get_transform , get_sparse_transform , get_mask_transform
from data.image_folder import make_dataset
from PIL import Image
import PIL
import random
import os
import numpy as np
class LabeledDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.dir_scribbles = os.path.join(opt.dataroot, 'scribbles') #'pix2pix') #'scribbles' ) #'masks')
self.dir_images = os.path.join(opt.dataroot, 'images') #os.path.join(opt.dataroot, 'images')
self.classes = sorted(os.listdir(self.dir_images)) # sorted so that the same order in all cases; check if you've to change this with other models
self.num_classes = len(self.classes)
self.scribble_paths = []
self.images_paths = []
for cl in self.classes:
self.scribble_paths.append(sorted( make_dataset( os.path.join( self.dir_scribbles , cl ) ) ) )
self.images_paths.append( sorted( make_dataset( os.path.join( self.dir_images , cl ) ) ) )
self.cum_sizes = []
self.sizes = []
size =0
for i in range(self.num_classes):
size += len(self.scribble_paths[i])
self.cum_sizes.append(size)
self.sizes.append(size)
self.transform = get_transform(opt)
self.sparse_transform = get_sparse_transform(opt)
self.mask_transform = get_mask_transform(opt)
def find_label(self,index):
sub=0
for i in range(self.num_classes):
if index < self.cum_sizes[i]:
return i,(index-sub)
sub= self.cum_sizes[i]
def __getitem__(self, index):
index = index % self.cum_sizes[ self.num_classes -1 ]
label , relative_index = self.find_label(index)
if self.opt.sketchy_dataset:
A_path = self.scribble_paths[label][ relative_index ]
B_path = A_path.replace('scribbles','images').split('-')[0]+'.jpg'
elif self.opt.autocomplete_dataset_outline:
A_path = self.scribble_paths[label][ relative_index ]
B_path = A_path.replace('scribbles','images').split('_')[0]+'.png'
elif self.opt.autocomplete_dataset_edges:
A_path = self.scribble_paths[label][ relative_index ]
B_path = A_path.replace('scribbles','images').split('_')[0]+'_AB.jpg'
elif self.opt.edges_outlines_dataset:
A_path = self.scribble_paths[label][ relative_index ]
B_path = A_path.replace('scribbles','images')
if np.random.multinomial(1, [1.0 / 3, 2.0 / 3])[0]==1:
A_path=A_path.replace('scribbles','edges_outlines')
else :
A_path = self.scribble_paths[label][ relative_index ]
B_path = self.images_paths[label][relative_index]
A_img = Image.open(A_path).convert('RGB')
B_img = Image.open(B_path).convert('RGB')
A = self.transform(A_img)
B = self.transform(B_img)
A_mask = self.mask_transform(A_img)
A_sparse = self.sparse_transform(A_img)
if self.opt.which_direction == 'BtoA':
input_nc = self.opt.output_nc
output_nc = self.opt.input_nc
else:
input_nc = self.opt.input_nc
output_nc = self.opt.output_nc
if input_nc == 1: # RGB to gray
tmp = A[0, ...] * 0.299 + A[1, ...] * 0.587 + A[2, ...] * 0.114
A = tmp.unsqueeze(0)
if input_nc == 1: # RGB to gray
tmp = A_sparse[0, ...] * 0.299 + A_sparse[1, ...] * 0.587 + A_sparse[2, ...] * 0.114
A_sparse = tmp.unsqueeze(0)
if output_nc == 1: # RGB to gray
tmp = B[0, ...] * 0.299 + B[1, ...] * 0.587 + B[2, ...] * 0.114
B = tmp.unsqueeze(0)
return {'A': A,'A_sparse':A_sparse, 'A_mask':A_mask, 'B': B,
'A_paths': A_path, 'B_paths': B_path, 'label': label }
def __len__(self):
return self.cum_sizes[ self.num_classes - 1 ]
def get_transform(self):
return self.transform
def get_root(self):
return self.root
def get_classes(self):
return self.classes
def get_num_classes(self):
return len(self.classes)
def name(self):
return 'LabeledDataset'
|
/-
Copyright (c) 2021 Rémy Degenne. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Rémy Degenne
-/
import measure_theory.measure_space
import measure_theory.pi_system
import algebra.big_operators.intervals
import data.finset.intervals
/-!
# Independence of sets of sets and measure spaces (σ-algebras)
* A family of sets of sets `π : ι → set (set α)` is independent with respect to a measure `μ` if for
any finite set of indices `s = {i_1, ..., i_n}`, for any sets `f i_1 ∈ π i_1, ..., f i_n ∈ π i_n`,
`μ (⋂ i in s, f i) = ∏ i in s, μ (f i) `. It will be used for families of π-systems.
* A family of measurable space structures (i.e. of σ-algebras) is independent with respect to a
measure `μ` (typically defined on a finer σ-algebra) if the family of sets of measurable sets they
define is independent. I.e., `m : ι → measurable_space α` is independent with respect to a
measure `μ` if for any finite set of indices `s = {i_1, ..., i_n}`, for any sets
`f i_1 ∈ m i_1, ..., f i_n ∈ m i_n`, then `μ (⋂ i in s, f i) = ∏ i in s, μ (f i)`.
* Independence of sets (or events in probabilistic parlance) is defined as independence of the
measurable space structures they generate: a set `s` generates the measurable space structure with
measurable sets `∅, s, sᶜ, univ`.
* Independence of functions (or random variables) is also defined as independence of the measurable
space structures they generate: a function `f` for which we have a measurable space `m` on the
codomain generates `measurable_space.comap f m`.
## Main statements
* TODO: `Indep_of_Indep_sets`: if π-systems are independent as sets of sets, then the
measurable space structures they generate are independent.
* `indep_of_indep_sets`: variant with two π-systems.
## Implementation notes
We provide one main definition of independence:
* `Indep_sets`: independence of a family of sets of sets `pi : ι → set (set α)`.
Three other independence notions are defined using `Indep_sets`:
* `Indep`: independence of a family of measurable space structures `m : ι → measurable_space α`,
* `Indep_set`: independence of a family of sets `s : ι → set α`,
* `Indep_fun`: independence of a family of functions. For measurable spaces
`m : Π (i : ι), measurable_space (β i)`, we consider functions `f : Π (i : ι), α → β i`.
Additionally, we provide four corresponding statements for two measurable space structures (resp.
sets of sets, sets, functions) instead of a family. These properties are denoted by the same names
as for a family, but without a capital letter, for example `indep_fun` is the version of `Indep_fun`
for two functions.
The definition of independence for `Indep_sets` uses finite sets (`finset`). An alternative and
equivalent way of defining independence would have been to use countable sets.
TODO: prove that equivalence.
Most of the definitions and lemma in this file list all variables instead of using the `variables`
keyword at the beginning of a section, for example
`lemma indep.symm {α} {m₁ m₂ : measurable_space α} [measurable_space α] {μ : measure α} ...` .
This is intentional, to be able to control the order of the `measurable_space` variables. Indeed
when defining `μ` in the example above, the measurable space used is the last one defined, here
`[measurable_space α]`, and not `m₁` or `m₂`.
## References
* Williams, David. Probability with martingales. Cambridge university press, 1991.
Part A, Chapter 4.
-/
open measure_theory measurable_space
open_locale big_operators classical
namespace probability_theory
section definitions
/-- A family of sets of sets `π : ι → set (set α)` is independent with respect to a measure `μ` if
for any finite set of indices `s = {i_1, ..., i_n}`, for any sets
`f i_1 ∈ π i_1, ..., f i_n ∈ π i_n`, then `μ (⋂ i in s, f i) = ∏ i in s, μ (f i) `.
It will be used for families of pi_systems. -/
def Indep_sets {α ι} [measurable_space α] (π : ι → set (set α)) (μ : measure α . volume_tac) :
Prop :=
∀ (s : finset ι) {f : ι → set α} (H : ∀ i, i ∈ s → f i ∈ π i), μ (⋂ i ∈ s, f i) = ∏ i in s, μ (f i)
/-- Two sets of sets `s₁, s₂` are independent with respect to a measure `μ` if for any sets
`t₁ ∈ p₁, t₂ ∈ s₂`, then `μ (t₁ ∩ t₂) = μ (t₁) * μ (t₂)` -/
def indep_sets {α} [measurable_space α] (s1 s2 : set (set α)) (μ : measure α . volume_tac) : Prop :=
∀ t1 t2 : set α, t1 ∈ s1 → t2 ∈ s2 → μ (t1 ∩ t2) = μ t1 * μ t2
/-- A family of measurable space structures (i.e. of σ-algebras) is independent with respect to a
measure `μ` (typically defined on a finer σ-algebra) if the family of sets of measurable sets they
define is independent. `m : ι → measurable_space α` is independent with respect to measure `μ` if
for any finite set of indices `s = {i_1, ..., i_n}`, for any sets
`f i_1 ∈ m i_1, ..., f i_n ∈ m i_n`, then `μ (⋂ i in s, f i) = ∏ i in s, μ (f i) `. -/
def Indep {α ι} (m : ι → measurable_space α) [measurable_space α] (μ : measure α . volume_tac) :
Prop :=
Indep_sets (λ x, (m x).measurable_set') μ
/-- Two measurable space structures (or σ-algebras) `m₁, m₂` are independent with respect to a
measure `μ` (defined on a third σ-algebra) if for any sets `t₁ ∈ m₁, t₂ ∈ m₂`,
`μ (t₁ ∩ t₂) = μ (t₁) * μ (t₂)` -/
def indep {α} (m₁ m₂ : measurable_space α) [measurable_space α] (μ : measure α . volume_tac) :
Prop :=
indep_sets (m₁.measurable_set') (m₂.measurable_set') μ
/-- A family of sets is independent if the family of measurable space structures they generate is
independent. For a set `s`, the generated measurable space has measurable sets `∅, s, sᶜ, univ`. -/
def Indep_set {α ι} [measurable_space α] (s : ι → set α) (μ : measure α . volume_tac) : Prop :=
Indep (λ i, generate_from {s i}) μ
/-- Two sets are independent if the two measurable space structures they generate are independent.
For a set `s`, the generated measurable space structure has measurable sets `∅, s, sᶜ, univ`. -/
def indep_set {α} [measurable_space α] (s t : set α) (μ : measure α . volume_tac) : Prop :=
indep (generate_from {s}) (generate_from {t}) μ
/-- A family of functions defined on the same space `α` and taking values in possibly different
spaces, each with a measurable space structure, is independent if the family of measurable space
structures they generate on `α` is independent. For a function `g` with codomain having measurable
space structure `m`, the generated measurable space structure is `measurable_space.comap g m`. -/
def Indep_fun {α ι} [measurable_space α] {β : ι → Type*} (m : Π (x : ι), measurable_space (β x))
(f : Π (x : ι), α → β x) (μ : measure α . volume_tac) : Prop :=
Indep (λ x, measurable_space.comap (f x) (m x)) μ
/-- Two functions are independent if the two measurable space structures they generate are
independent. For a function `f` with codomain having measurable space structure `m`, the generated
measurable space structure is `measurable_space.comap f m`. -/
def indep_fun {α β γ} [measurable_space α] (mβ : measurable_space β) (mγ : measurable_space γ)
(f : α → β) (g : α → γ) (μ : measure α . volume_tac) : Prop :=
indep (measurable_space.comap f mβ) (measurable_space.comap g mγ) μ
end definitions
section indep
lemma indep_sets.symm {α} {s₁ s₂ : set (set α)} [measurable_space α] {μ : measure α}
(h : indep_sets s₁ s₂ μ) :
indep_sets s₂ s₁ μ :=
by { intros t1 t2 ht1 ht2, rw [set.inter_comm, mul_comm], exact h t2 t1 ht2 ht1, }
lemma indep.symm {α} {m₁ m₂ : measurable_space α} [measurable_space α] {μ : measure α}
(h : indep m₁ m₂ μ) :
indep m₂ m₁ μ :=
indep_sets.symm h
lemma indep_sets_of_indep_sets_of_le_left {α} {s₁ s₂ s₃: set (set α)} [measurable_space α]
{μ : measure α} (h_indep : indep_sets s₁ s₂ μ) (h31 : s₃ ⊆ s₁) :
indep_sets s₃ s₂ μ :=
λ t1 t2 ht1 ht2, h_indep t1 t2 (set.mem_of_subset_of_mem h31 ht1) ht2
lemma indep_sets_of_indep_sets_of_le_right {α} {s₁ s₂ s₃: set (set α)} [measurable_space α]
{μ : measure α} (h_indep : indep_sets s₁ s₂ μ) (h32 : s₃ ⊆ s₂) :
indep_sets s₁ s₃ μ :=
λ t1 t2 ht1 ht2, h_indep t1 t2 ht1 (set.mem_of_subset_of_mem h32 ht2)
lemma indep_of_indep_of_le_left {α} {m₁ m₂ m₃: measurable_space α} [measurable_space α]
{μ : measure α} (h_indep : indep m₁ m₂ μ) (h31 : m₃ ≤ m₁) :
indep m₃ m₂ μ :=
λ t1 t2 ht1 ht2, h_indep t1 t2 (h31 _ ht1) ht2
lemma indep_of_indep_of_le_right {α} {m₁ m₂ m₃: measurable_space α} [measurable_space α]
{μ : measure α} (h_indep : indep m₁ m₂ μ) (h32 : m₃ ≤ m₂) :
indep m₁ m₃ μ :=
λ t1 t2 ht1 ht2, h_indep t1 t2 ht1 (h32 _ ht2)
lemma indep_sets.union {α} [measurable_space α] {s₁ s₂ s' : set (set α)} {μ : measure α}
(h₁ : indep_sets s₁ s' μ) (h₂ : indep_sets s₂ s' μ) :
indep_sets (s₁ ∪ s₂) s' μ :=
begin
intros t1 t2 ht1 ht2,
cases (set.mem_union _ _ _).mp ht1 with ht1₁ ht1₂,
{ exact h₁ t1 t2 ht1₁ ht2, },
{ exact h₂ t1 t2 ht1₂ ht2, },
end
@[simp] lemma indep_sets.union_iff {α} [measurable_space α] {s₁ s₂ s' : set (set α)}
{μ : measure α} :
indep_sets (s₁ ∪ s₂) s' μ ↔ indep_sets s₁ s' μ ∧ indep_sets s₂ s' μ :=
⟨λ h, ⟨indep_sets_of_indep_sets_of_le_left h (set.subset_union_left s₁ s₂),
indep_sets_of_indep_sets_of_le_left h (set.subset_union_right s₁ s₂)⟩,
λ h, indep_sets.union h.left h.right⟩
lemma indep_sets.Union {α ι} [measurable_space α] {s : ι → set (set α)} {s' : set (set α)}
{μ : measure α} (hyp : ∀ n, indep_sets (s n) s' μ) :
indep_sets (⋃ n, s n) s' μ :=
begin
intros t1 t2 ht1 ht2,
rw set.mem_Union at ht1,
cases ht1 with n ht1,
exact hyp n t1 t2 ht1 ht2,
end
lemma indep_sets.inter {α} [measurable_space α] {s₁ s' : set (set α)} (s₂ : set (set α))
{μ : measure α} (h₁ : indep_sets s₁ s' μ) :
indep_sets (s₁ ∩ s₂) s' μ :=
λ t1 t2 ht1 ht2, h₁ t1 t2 ((set.mem_inter_iff _ _ _).mp ht1).left ht2
lemma indep_sets.Inter {α ι} [measurable_space α] {s : ι → set (set α)} {s' : set (set α)}
{μ : measure α} (h : ∃ n, indep_sets (s n) s' μ) :
indep_sets (⋂ n, s n) s' μ :=
by {intros t1 t2 ht1 ht2, cases h with n h, exact h t1 t2 (set.mem_Inter.mp ht1 n) ht2 }
lemma indep_sets_singleton_iff {α} [measurable_space α] {s t : set α} {μ : measure α} :
indep_sets {s} {t} μ ↔ μ (s ∩ t) = μ s * μ t :=
⟨λ h, h s t rfl rfl,
λ h s1 t1 hs1 ht1, by rwa [set.mem_singleton_iff.mp hs1, set.mem_singleton_iff.mp ht1]⟩
end indep
/-! ### Deducing `indep` from `Indep` -/
section from_Indep_to_indep
lemma Indep_sets.indep_sets {α ι} {s : ι → set (set α)} [measurable_space α] {μ : measure α}
(h_indep : Indep_sets s μ) {i j : ι} (hij : i ≠ j) :
indep_sets (s i) (s j) μ :=
begin
intros t₁ t₂ ht₁ ht₂,
have hf_m : ∀ (x : ι), x ∈ {i, j} → (ite (x=i) t₁ t₂) ∈ s x,
{ intros x hx,
cases finset.mem_insert.mp hx with hx hx,
{ simp [hx, ht₁], },
{ simp [finset.mem_singleton.mp hx, hij.symm, ht₂], }, },
have h1 : t₁ = ite (i = i) t₁ t₂, by simp only [if_true, eq_self_iff_true],
have h2 : t₂ = ite (j = i) t₁ t₂, by simp only [hij.symm, if_false],
have h_inter : (⋂ (t : ι) (H : t ∈ ({i, j} : finset ι)), ite (t = i) t₁ t₂)
= (ite (i = i) t₁ t₂) ∩ (ite (j = i) t₁ t₂),
by simp only [finset.set_bInter_singleton, finset.set_bInter_insert],
have h_prod : (∏ (t : ι) in ({i, j} : finset ι), μ (ite (t = i) t₁ t₂))
= μ (ite (i = i) t₁ t₂) * μ (ite (j = i) t₁ t₂),
by simp only [hij, finset.prod_singleton, finset.prod_insert, not_false_iff,
finset.mem_singleton],
rw h1,
nth_rewrite 1 h2,
nth_rewrite 3 h2,
rw [←h_inter, ←h_prod, h_indep {i, j} hf_m],
end
lemma Indep.indep {α ι} {m : ι → measurable_space α} [measurable_space α] {μ : measure α}
(h_indep : Indep m μ) {i j : ι} (hij : i ≠ j) :
indep (m i) (m j) μ :=
begin
change indep_sets ((λ x, (m x).measurable_set') i) ((λ x, (m x).measurable_set') j) μ,
exact Indep_sets.indep_sets h_indep hij,
end
end from_Indep_to_indep
/-!
## π-system lemma
Independence of measurable spaces is equivalent to independence of generating π-systems.
-/
section from_measurable_spaces_to_sets_of_sets
/-! ### Independence of measurable space structures implies independence of generating π-systems -/
lemma Indep.Indep_sets {α ι} [measurable_space α] {μ : measure α} {m : ι → measurable_space α}
{s : ι → set (set α)} (hms : ∀ n, m n = measurable_space.generate_from (s n))
(h_indep : Indep m μ) :
Indep_sets s μ :=
begin
refine (λ S f hfs, h_indep S (λ x hxS, _)),
simp_rw hms x,
exact measurable_set_generate_from (hfs x hxS),
end
lemma indep.indep_sets {α} [measurable_space α] {μ : measure α} {s1 s2 : set (set α)}
(h_indep : indep (generate_from s1) (generate_from s2) μ) :
indep_sets s1 s2 μ :=
λ t1 t2 ht1 ht2, h_indep t1 t2 (measurable_set_generate_from ht1) (measurable_set_generate_from ht2)
end from_measurable_spaces_to_sets_of_sets
section from_pi_systems_to_measurable_spaces
/-! ### Independence of generating π-systems implies independence of measurable space structures -/
private lemma indep_sets.indep_aux {α} {m2 : measurable_space α}
{m : measurable_space α} {μ : measure α} [probability_measure μ] {p1 p2 : set (set α)}
(h2 : m2 ≤ m) (hp2 : is_pi_system p2) (hpm2 : m2 = generate_from p2)
(hyp : indep_sets p1 p2 μ) {t1 t2 : set α} (ht1 : t1 ∈ p1) (ht2m : m2.measurable_set' t2) :
μ (t1 ∩ t2) = μ t1 * μ t2 :=
begin
let μ_inter := μ.restrict t1,
let ν := (μ t1) • μ,
have h_univ : μ_inter set.univ = ν set.univ,
by rw [measure.restrict_apply_univ, measure.smul_apply, measure_univ, mul_one],
haveI : finite_measure μ_inter := @restrict.finite_measure α _ t1 μ ⟨measure_lt_top μ t1⟩,
rw [set.inter_comm, ←@measure.restrict_apply α _ μ t1 t2 (h2 t2 ht2m)],
refine ext_on_measurable_space_of_generate_finite m p2 (λ t ht, _) h2 hpm2 hp2 h_univ ht2m,
have ht2 : m.measurable_set' t,
{ refine h2 _ _,
rw hpm2,
exact measurable_set_generate_from ht, },
rw [measure.restrict_apply ht2, measure.smul_apply, set.inter_comm],
exact hyp t1 t ht1 ht,
end
lemma indep_sets.indep {α} {m1 m2 : measurable_space α} {m : measurable_space α}
{μ : measure α} [probability_measure μ] {p1 p2 : set (set α)} (h1 : m1 ≤ m) (h2 : m2 ≤ m)
(hp1 : is_pi_system p1) (hp2 : is_pi_system p2) (hpm1 : m1 = generate_from p1)
(hpm2 : m2 = generate_from p2) (hyp : indep_sets p1 p2 μ) :
indep m1 m2 μ :=
begin
intros t1 t2 ht1 ht2,
let μ_inter := μ.restrict t2,
let ν := (μ t2) • μ,
have h_univ : μ_inter set.univ = ν set.univ,
by rw [measure.restrict_apply_univ, measure.smul_apply, measure_univ, mul_one],
haveI : finite_measure μ_inter := @restrict.finite_measure α _ t2 μ ⟨measure_lt_top μ t2⟩,
rw [mul_comm, ←@measure.restrict_apply α _ μ t2 t1 (h1 t1 ht1)],
refine ext_on_measurable_space_of_generate_finite m p1 (λ t ht, _) h1 hpm1 hp1 h_univ ht1,
have ht1 : m.measurable_set' t,
{ refine h1 _ _,
rw hpm1,
exact measurable_set_generate_from ht, },
rw [measure.restrict_apply ht1, measure.smul_apply, mul_comm],
exact indep_sets.indep_aux h2 hp2 hpm2 hyp ht ht2,
end
end from_pi_systems_to_measurable_spaces
section indep_set
/-! ### Independence of measurable sets
We prove the following equivalences on `indep_set`, for measurable sets `s, t`.
* `indep_set s t μ ↔ μ (s ∩ t) = μ s * μ t`,
* `indep_set s t μ ↔ indep_sets {s} {t} μ`.
-/
variables {α : Type*} [measurable_space α] {s t : set α} (S T : set (set α))
lemma indep_set_iff_indep_sets_singleton (hs_meas : measurable_set s) (ht_meas : measurable_set t)
(μ : measure α . volume_tac) [probability_measure μ] :
indep_set s t μ ↔ indep_sets {s} {t} μ :=
⟨indep.indep_sets, λ h, indep_sets.indep
(generate_from_le (λ u hu, by rwa set.mem_singleton_iff.mp hu))
(generate_from_le (λ u hu, by rwa set.mem_singleton_iff.mp hu)) (is_pi_system.singleton s)
(is_pi_system.singleton t) rfl rfl h⟩
lemma indep_set_iff_measure_inter_eq_mul (hs_meas : measurable_set s) (ht_meas : measurable_set t)
(μ : measure α . volume_tac) [probability_measure μ] :
indep_set s t μ ↔ μ (s ∩ t) = μ s * μ t :=
(indep_set_iff_indep_sets_singleton hs_meas ht_meas μ).trans indep_sets_singleton_iff
lemma indep_sets.indep_set_of_mem (hs : s ∈ S) (ht : t ∈ T) (hs_meas : measurable_set s)
(ht_meas : measurable_set t) (μ : measure α . volume_tac) [probability_measure μ]
(h_indep : indep_sets S T μ) :
indep_set s t μ :=
(indep_set_iff_measure_inter_eq_mul hs_meas ht_meas μ).mpr (h_indep s t hs ht)
end indep_set
end probability_theory
|
And mid @-@ May 's eldest child ,
|
[STATEMENT]
lemma derived_set_of_finite:
"\<lbrakk>Hausdorff_space X; finite S\<rbrakk> \<Longrightarrow> X derived_set_of S = {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>Hausdorff_space X; finite S\<rbrakk> \<Longrightarrow> X derived_set_of S = {}
[PROOF STEP]
using Hausdorff_imp_t1_space t1_space_derived_set_of_finite
[PROOF STATE]
proof (prove)
using this:
Hausdorff_space ?X \<Longrightarrow> t1_space ?X
t1_space ?X = (\<forall>S. finite S \<longrightarrow> ?X derived_set_of S = {})
goal (1 subgoal):
1. \<lbrakk>Hausdorff_space X; finite S\<rbrakk> \<Longrightarrow> X derived_set_of S = {}
[PROOF STEP]
by auto |
= = Media figure and business interests = =
|
utils::globalVariables("calculated_groups")
calculated_groups <- new.env(hash = TRUE) |
section "Amortized Complexity (Unary Operations)"
theory Amortized_Framework0
imports Complex_Main
begin
text\<open>
This theory provides a simple amortized analysis framework where all operations
act on a single data type, i.e. no union-like operations. This is the basis of
the ITP 2015 paper by Nipkow. Although it is superseded by the model in
\<open>Amortized_Framework\<close> that allows arbitrarily many parameters, it is still
of interest because of its simplicity.\<close>
locale Amortized =
fixes init :: "'s"
fixes nxt :: "'o \<Rightarrow> 's \<Rightarrow> 's"
fixes inv :: "'s \<Rightarrow> bool"
fixes t :: "'o \<Rightarrow> 's \<Rightarrow> real"
fixes \<Phi> :: "'s \<Rightarrow> real"
fixes U :: "'o \<Rightarrow> 's \<Rightarrow> real"
assumes inv_init: "inv init"
assumes inv_nxt: "inv s \<Longrightarrow> inv(nxt f s)"
assumes ppos: "inv s \<Longrightarrow> \<Phi> s \<ge> 0"
assumes p0: "\<Phi> init = 0"
assumes U: "inv s \<Longrightarrow> t f s + \<Phi>(nxt f s) - \<Phi> s \<le> U f s"
begin
fun state :: "(nat \<Rightarrow> 'o) \<Rightarrow> nat \<Rightarrow> 's" where
"state f 0 = init" |
"state f (Suc n) = nxt (f n) (state f n)"
lemma inv_state: "inv(state f n)"
by(induction n)(simp_all add: inv_init inv_nxt)
definition a :: "(nat \<Rightarrow> 'o) \<Rightarrow> nat \<Rightarrow> real" where
"a f i = t (f i) (state f i) + \<Phi>(state f (i+1)) - \<Phi>(state f i)"
lemma aeq: "(\<Sum>i<n. t (f i) (state f i)) = (\<Sum>i<n. a f i) - \<Phi>(state f n)"
apply(induction n)
apply (simp add: p0)
apply (simp add: a_def)
done
corollary ta: "(\<Sum>i<n. t (f i) (state f i)) \<le> (\<Sum>i<n. a f i)"
by (metis add.commute aeq diff_add_cancel le_add_same_cancel2 ppos[OF inv_state])
lemma aa1: "a f i \<le> U (f i) (state f i)"
by(simp add: a_def U inv_state)
lemma ub: "(\<Sum>i<n. t (f i) (state f i)) \<le> (\<Sum>i<n. U (f i) (state f i))"
by (metis (mono_tags) aa1 order.trans sum_mono ta)
end
subsection "Binary Counter"
fun incr where
"incr [] = [True]" |
"incr (False#bs) = True # bs" |
"incr (True#bs) = False # incr bs"
fun t\<^sub>i\<^sub>n\<^sub>c\<^sub>r :: "bool list \<Rightarrow> real" where
"t\<^sub>i\<^sub>n\<^sub>c\<^sub>r [] = 1" |
"t\<^sub>i\<^sub>n\<^sub>c\<^sub>r (False#bs) = 1" |
"t\<^sub>i\<^sub>n\<^sub>c\<^sub>r (True#bs) = t\<^sub>i\<^sub>n\<^sub>c\<^sub>r bs + 1"
definition p_incr :: "bool list \<Rightarrow> real" ("\<Phi>\<^sub>i\<^sub>n\<^sub>c\<^sub>r") where
"\<Phi>\<^sub>i\<^sub>n\<^sub>c\<^sub>r bs = length(filter id bs)"
lemma a_incr: "t\<^sub>i\<^sub>n\<^sub>c\<^sub>r bs + \<Phi>\<^sub>i\<^sub>n\<^sub>c\<^sub>r(incr bs) - \<Phi>\<^sub>i\<^sub>n\<^sub>c\<^sub>r bs = 2"
apply(induction bs rule: incr.induct)
apply (simp_all add: p_incr_def)
done
interpretation incr: Amortized
where init = "[]" and nxt = "%_. incr" and inv = "\<lambda>_. True"
and t = "\<lambda>_. t\<^sub>i\<^sub>n\<^sub>c\<^sub>r" and \<Phi> = \<Phi>\<^sub>i\<^sub>n\<^sub>c\<^sub>r and U = "\<lambda>_ _. 2"
proof (standard, goal_cases)
case 1 show ?case by simp
next
case 2 show ?case by simp
next
case 3 show ?case by(simp add: p_incr_def)
next
case 4 show ?case by(simp add: p_incr_def)
next
case 5 show ?case by(simp add: a_incr)
qed
thm incr.ub
subsection "Dynamic tables: insert only"
fun t\<^sub>i\<^sub>n\<^sub>s :: "nat \<times> nat \<Rightarrow> real" where
"t\<^sub>i\<^sub>n\<^sub>s (n,l) = (if n<l then 1 else n+1)"
interpretation ins: Amortized
where init = "(0::nat,0::nat)"
and nxt = "\<lambda>_ (n,l). (n+1, if n<l then l else if l=0 then 1 else 2*l)"
and inv = "\<lambda>(n,l). if l=0 then n=0 else n \<le> l \<and> l < 2*n"
and t = "\<lambda>_. t\<^sub>i\<^sub>n\<^sub>s" and \<Phi> = "\<lambda>(n,l). 2*n - l" and U = "\<lambda>_ _. 3"
proof (standard, goal_cases)
case 1 show ?case by auto
next
case (2 s) thus ?case by(cases s) auto
next
case (3 s) thus ?case by(cases s)(simp split: if_splits)
next
case 4 show ?case by(simp)
next
case (5 s) thus ?case by(cases s) auto
qed
locale table_insert =
fixes a :: real
fixes c :: real
assumes c1[arith]: "c > 1"
assumes ac2: "a \<ge> c/(c - 1)"
begin
lemma ac: "a \<ge> 1/(c - 1)"
using ac2 by(simp add: field_simps)
lemma a0[arith]: "a>0"
proof-
have "1/(c - 1) > 0" using ac by simp
thus ?thesis by (metis ac dual_order.strict_trans1)
qed
definition "b = 1/(c - 1)"
lemma b0[arith]: "b > 0"
using ac by (simp add: b_def)
fun "ins" :: "nat * nat \<Rightarrow> nat * nat" where
"ins(n,l) = (n+1, if n<l then l else if l=0 then 1 else nat(ceiling(c*l)))"
fun pins :: "nat * nat => real" where
"pins(n,l) = a*n - b*l"
interpretation ins: Amortized
where init = "(0,0)" and nxt = "%_. ins"
and inv = "\<lambda>(n,l). if l=0 then n=0 else n \<le> l \<and> (b/a)*l \<le> n"
and t = "\<lambda>_. t\<^sub>i\<^sub>n\<^sub>s" and \<Phi> = pins and U = "\<lambda>_ _. a + 1"
proof (standard, goal_cases)
case 1 show ?case by auto
next
case (2 s)
show ?case
proof (cases s)
case [simp]: (Pair n l)
show ?thesis
proof cases
assume "l=0" thus ?thesis using 2 ac
by (simp add: b_def field_simps)
next
assume "l\<noteq>0"
show ?thesis
proof cases
assume "n<l"
thus ?thesis using 2 by(simp add: algebra_simps)
next
assume "\<not> n<l"
hence [simp]: "n=l" using 2 \<open>l\<noteq>0\<close> by simp
have 1: "(b/a) * ceiling(c * l) \<le> real l + 1"
proof-
have "(b/a) * ceiling(c * l) = ceiling(c * l)/(a*(c - 1))"
by(simp add: b_def)
also have "ceiling(c * l) \<le> c*l + 1" by simp
also have "\<dots> \<le> c*(real l+1)" by (simp add: algebra_simps)
also have "\<dots> / (a*(c - 1)) = (c/(a*(c - 1))) * (real l + 1)" by simp
also have "c/(a*(c - 1)) \<le> 1" using ac2 by (simp add: field_simps)
finally show ?thesis by (simp add: divide_right_mono)
qed
have 2: "real l + 1 \<le> ceiling(c * real l)"
proof-
have "real l + 1 = of_int(int(l)) + 1" by simp
also have "... \<le> ceiling(c * real l)" using \<open>l \<noteq> 0\<close>
by(simp only: int_less_real_le[symmetric] less_ceiling_iff)
(simp add: mult_less_cancel_right1)
finally show ?thesis .
qed
from \<open>l\<noteq>0\<close> 1 2 show ?thesis by simp (simp add: not_le zero_less_mult_iff)
qed
qed
qed
next
case (3 s) thus ?case by(cases s)(simp add: field_simps split: if_splits)
next
case 4 show ?case by(simp)
next
case (5 s)
show ?case
proof (cases s)
case [simp]: (Pair n l)
show ?thesis
proof cases
assume "l=0" thus ?thesis using 5 by (simp)
next
assume [arith]: "l\<noteq>0"
show ?thesis
proof cases
assume "n<l"
thus ?thesis using 5 ac by(simp add: algebra_simps b_def)
next
assume "\<not> n<l"
hence [simp]: "n=l" using 5 by simp
have "t\<^sub>i\<^sub>n\<^sub>s s + pins (ins s) - pins s = l + a + 1 + (- b*ceiling(c*l)) + b*l"
using \<open>l\<noteq>0\<close>
by(simp add: algebra_simps less_trans[of "-1::real" 0])
also have "- b * ceiling(c*l) \<le> - b * (c*l)" by (simp add: ceiling_correct)
also have "l + a + 1 + - b*(c*l) + b*l = a + 1 + l*(1 - b*(c - 1))"
by (simp add: algebra_simps)
also have "b*(c - 1) = 1" by(simp add: b_def)
also have "a + 1 + (real l)*(1 - 1) = a+1" by simp
finally show ?thesis by simp
qed
qed
qed
qed
thm ins.ub
end
subsection "Stack with multipop"
datatype 'a op\<^sub>s\<^sub>t\<^sub>k = Push 'a | Pop nat
fun nxt\<^sub>s\<^sub>t\<^sub>k :: "'a op\<^sub>s\<^sub>t\<^sub>k \<Rightarrow> 'a list \<Rightarrow> 'a list" where
"nxt\<^sub>s\<^sub>t\<^sub>k (Push x) xs = x # xs" |
"nxt\<^sub>s\<^sub>t\<^sub>k (Pop n) xs = drop n xs"
fun t\<^sub>s\<^sub>t\<^sub>k:: "'a op\<^sub>s\<^sub>t\<^sub>k \<Rightarrow> 'a list \<Rightarrow> real" where
"t\<^sub>s\<^sub>t\<^sub>k (Push x) xs = 1" |
"t\<^sub>s\<^sub>t\<^sub>k (Pop n) xs = min n (length xs)"
interpretation stack: Amortized
where init = "[]" and nxt = nxt\<^sub>s\<^sub>t\<^sub>k and inv = "\<lambda>_. True"
and t = t\<^sub>s\<^sub>t\<^sub>k and \<Phi> = "length" and U = "\<lambda>f _. case f of Push _ \<Rightarrow> 2 | Pop _ \<Rightarrow> 0"
proof (standard, goal_cases)
case 1 show ?case by auto
next
case (2 s) thus ?case by(cases s) auto
next
case 3 thus ?case by simp
next
case 4 show ?case by(simp)
next
case (5 _ f) thus ?case by (cases f) auto
qed
subsection "Queue"
text\<open>See, for example, the book by Okasaki~\cite{Okasaki}.\<close>
datatype 'a op\<^sub>q = Enq 'a | Deq
type_synonym 'a queue = "'a list * 'a list"
fun nxt\<^sub>q :: "'a op\<^sub>q \<Rightarrow> 'a queue \<Rightarrow> 'a queue" where
"nxt\<^sub>q (Enq x) (xs,ys) = (x#xs,ys)" |
"nxt\<^sub>q Deq (xs,ys) = (if ys = [] then ([], tl(rev xs)) else (xs,tl ys))"
fun t\<^sub>q :: "'a op\<^sub>q \<Rightarrow> 'a queue \<Rightarrow> real" where
"t\<^sub>q (Enq x) (xs,ys) = 1" |
"t\<^sub>q Deq (xs,ys) = (if ys = [] then length xs else 0)"
interpretation queue: Amortized
where init = "([],[])" and nxt = nxt\<^sub>q and inv = "\<lambda>_. True"
and t = t\<^sub>q and \<Phi> = "\<lambda>(xs,ys). length xs" and U = "\<lambda>f _. case f of Enq _ \<Rightarrow> 2 | Deq \<Rightarrow> 0"
proof (standard, goal_cases)
case 1 show ?case by auto
next
case (2 s) thus ?case by(cases s) auto
next
case (3 s) thus ?case by(cases s) auto
next
case 4 show ?case by(simp)
next
case (5 s f) thus ?case
apply(cases s)
apply(cases f)
by auto
qed
fun balance :: "'a queue \<Rightarrow> 'a queue" where
"balance(xs,ys) = (if size xs \<le> size ys then (xs,ys) else ([], ys @ rev xs))"
fun nxt_q2 :: "'a op\<^sub>q \<Rightarrow> 'a queue \<Rightarrow> 'a queue" where
"nxt_q2 (Enq a) (xs,ys) = balance (a#xs,ys)" |
"nxt_q2 Deq (xs,ys) = balance (xs, tl ys)"
fun t_q2 :: "'a op\<^sub>q \<Rightarrow> 'a queue \<Rightarrow> real" where
"t_q2 (Enq _) (xs,ys) = 1 + (if size xs + 1 \<le> size ys then 0 else size xs + 1 + size ys)" |
"t_q2 Deq (xs,ys) = (if size xs \<le> size ys - 1 then 0 else size xs + (size ys - 1))"
interpretation queue2: Amortized
where init = "([],[])" and nxt = nxt_q2
and inv = "\<lambda>(xs,ys). size xs \<le> size ys"
and t = t_q2 and \<Phi> = "\<lambda>(xs,ys). 2 * size xs"
and U = "\<lambda>f _. case f of Enq _ \<Rightarrow> 3 | Deq \<Rightarrow> 0"
proof (standard, goal_cases)
case 1 show ?case by auto
next
case (2 s f) thus ?case by(cases s) (cases f, auto)
next
case (3 s) thus ?case by(cases s) auto
next
case 4 show ?case by(simp)
next
case (5 s f) thus ?case
apply(cases s)
apply(cases f)
by (auto simp: split: prod.splits)
qed
subsection "Dynamic tables: insert and delete"
datatype op\<^sub>t\<^sub>b = Ins | Del
fun nxt\<^sub>t\<^sub>b :: "op\<^sub>t\<^sub>b \<Rightarrow> nat*nat \<Rightarrow> nat*nat" where
"nxt\<^sub>t\<^sub>b Ins (n,l) = (n+1, if n<l then l else if l=0 then 1 else 2*l)" |
"nxt\<^sub>t\<^sub>b Del (n,l) = (n - 1, if n=1 then 0 else if 4*(n - 1)<l then l div 2 else l)"
fun t\<^sub>t\<^sub>b :: "op\<^sub>t\<^sub>b \<Rightarrow> nat*nat \<Rightarrow> real" where
"t\<^sub>t\<^sub>b Ins (n,l) = (if n<l then 1 else n+1)" |
"t\<^sub>t\<^sub>b Del (n,l) = (if n=1 then 1 else if 4*(n - 1)<l then n else 1)"
interpretation tb: Amortized
where init = "(0,0)" and nxt = nxt\<^sub>t\<^sub>b
and inv = "\<lambda>(n,l). if l=0 then n=0 else n \<le> l \<and> l \<le> 4*n"
and t = t\<^sub>t\<^sub>b and \<Phi> = "(\<lambda>(n,l). if 2*n < l then l/2 - n else 2*n - l)"
and U = "\<lambda>f _. case f of Ins \<Rightarrow> 3 | Del \<Rightarrow> 2"
proof (standard, goal_cases)
case 1 show ?case by auto
next
case (2 s f) thus ?case by(cases s, cases f) (auto split: if_splits)
next
case (3 s) thus ?case by(cases s)(simp split: if_splits)
next
case 4 show ?case by(simp)
next
case (5 s f) thus ?case apply(cases s) apply(cases f)
by (auto simp: field_simps)
qed
end
|
<a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/bayes/numpyro_intro.ipynb" target="_parent"></a>
```
# This notebook illustrates how to use numpyro
# https://github.com/pyro-ppl/numpyro
# Speed comparison with TFP
# https://rlouf.github.io/post/jax-random-walk-metropolis/
# Speed comparison with pymc3
# https://www.kaggle.com/s903124/numpyro-speed-benchmark
```
# Installation
```
# Standard Python libraries
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import time
#import numpy as np
#np.set_printoptions(precision=3)
import glob
import matplotlib.pyplot as plt
import PIL
import imageio
from IPython import display
%matplotlib inline
import sklearn
import seaborn as sns;
sns.set(style="ticks", color_codes=True)
import pandas as pd
pd.set_option('precision', 2) # 2 decimal places
pd.set_option('display.max_rows', 20)
pd.set_option('display.max_columns', 30)
pd.set_option('display.width', 100) # wide windows
```
/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.
import pandas.util.testing as tm
```
# As of 5/25/20, colab has jax=0.1.67 and jaxlib=0.1.47 builtin
import jax
import jax.numpy as np
import numpy as onp # original numpy
from jax import grad, hessian, jit, vmap, random
print("jax version {}".format(jax.__version__))
```
jax version 0.1.67
```
# Check if GPU is available
!nvidia-smi
# Check if JAX is using GPU
print("jax backend {}".format(jax.lib.xla_bridge.get_backend().platform))
```
Mon May 25 21:46:40 2020
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 440.82 Driver Version: 418.67 CUDA Version: 10.1 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|===============================+======================+======================|
| 0 Tesla P100-PCIE... Off | 00000000:00:04.0 Off | 0 |
| N/A 41C P0 27W / 250W | 0MiB / 16280MiB | 0% Default |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: GPU Memory |
| GPU PID Type Process name Usage |
|=============================================================================|
| No running processes found |
+-----------------------------------------------------------------------------+
jax backend gpu
```
#https://github.com/pyro-ppl/numpyro/issues/531
# https://github.com/pyro-ppl/numpyro
!pip install numpyro # requires jax=0.1.57, jaxlib=0.1.37
print("jax version {}".format(jax.__version__))
print("jax backend {}".format(jax.lib.xla_bridge.get_backend().platform))
```
Collecting numpyro
[?25l Downloading https://files.pythonhosted.org/packages/b8/58/54e914bb6d8ee9196f8dbf28b81057fea81871fc171dbee03b790336d0c5/numpyro-0.2.4-py3-none-any.whl (159kB)
[K |████████████████████████████████| 163kB 6.4MB/s
[?25hCollecting jaxlib==0.1.37
[?25l Downloading https://files.pythonhosted.org/packages/24/bf/e181454464b866f30f09b5d74d1dd08e8b15e032716d8bcc531c659776ab/jaxlib-0.1.37-cp36-none-manylinux2010_x86_64.whl (25.4MB)
[K |████████████████████████████████| 25.4MB 4.8MB/s
[?25hCollecting jax==0.1.57
[?25l Downloading https://files.pythonhosted.org/packages/ae/f2/ea981ed2659f70a1d8286ce41b5e74f1d9df844c1c6be6696144ed3f2932/jax-0.1.57.tar.gz (255kB)
[K |████████████████████████████████| 256kB 42.9MB/s
[?25hRequirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from numpyro) (4.41.1)
Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from jaxlib==0.1.37->numpyro) (1.4.1)
Requirement already satisfied: numpy>=1.12 in /usr/local/lib/python3.6/dist-packages (from jaxlib==0.1.37->numpyro) (1.18.4)
Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from jaxlib==0.1.37->numpyro) (1.12.0)
Requirement already satisfied: protobuf>=3.6.0 in /usr/local/lib/python3.6/dist-packages (from jaxlib==0.1.37->numpyro) (3.10.0)
Requirement already satisfied: absl-py in /usr/local/lib/python3.6/dist-packages (from jaxlib==0.1.37->numpyro) (0.9.0)
Requirement already satisfied: opt_einsum in /usr/local/lib/python3.6/dist-packages (from jax==0.1.57->numpyro) (3.2.1)
Collecting fastcache
Downloading https://files.pythonhosted.org/packages/5f/a3/b280cba4b4abfe5f5bdc643e6c9d81bf3b9dc2148a11e5df06b6ba85a560/fastcache-1.1.0.tar.gz
Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf>=3.6.0->jaxlib==0.1.37->numpyro) (46.3.0)
Building wheels for collected packages: jax, fastcache
Building wheel for jax (setup.py) ... [?25l[?25hdone
Created wheel for jax: filename=jax-0.1.57-cp36-none-any.whl size=297709 sha256=c2bea348ae9097f522298dfd173b13b80d87b6b3a8694218cc7f41561b5baef6
Stored in directory: /root/.cache/pip/wheels/8a/b4/75/859bcdaf181569124306615bd9b68c747725c60bfa68826378
Building wheel for fastcache (setup.py) ... [?25l[?25hdone
Created wheel for fastcache: filename=fastcache-1.1.0-cp36-cp36m-linux_x86_64.whl size=39211 sha256=8ccffe72cb0f057afa1a0020eba9ee41bfde1c83b251e4a4f8c5051f751b9233
Stored in directory: /root/.cache/pip/wheels/6a/80/bf/30024738b03fa5aa521e2a2ac952a8d77d0c65e68d92bcd3b6
Successfully built jax fastcache
Installing collected packages: jaxlib, fastcache, jax, numpyro
Found existing installation: jaxlib 0.1.47
Uninstalling jaxlib-0.1.47:
Successfully uninstalled jaxlib-0.1.47
Found existing installation: jax 0.1.67
Uninstalling jax-0.1.67:
Successfully uninstalled jax-0.1.67
Successfully installed fastcache-1.1.0 jax-0.1.57 jaxlib-0.1.37 numpyro-0.2.4
jax version 0.1.67
jax backend gpu
```
'''
#https://github.com/pyro-ppl/numpyro/issues/531
#!pip install --upgrade jax==0.1.57
#!pip install --upgrade jaxlib==0.1.37
#!pip install --upgrade -q https://storage.googleapis.com/jax-releases/cuda$(echo $CUDA_VERSION | sed -e 's/\.//' -e 's/\..*//')/jaxlib-$(pip search jaxlib | grep -oP '[0-9\.]+' | head -n 1)-cp36-none-linux_x86_64.whl
#!pip install --upgrade -q jax
ver = !echo $CUDA_VERSION
print(ver)
# install jaxlib
PYTHON_VERSION='cp36' # alternatives: cp36, cp37, cp38
CUDA_VERSION='cuda101' # alternatives: cuda92, cuda100, cuda101, cuda102
PLATFORM='linux_x86_64' # alternatives: linux_x86_64
BASE_URL='https://storage.googleapis.com/jax-releases'
fname = f'{BASE_URL}/{CUDA_VERSION}/jaxlib-0.1.37-{PYTHON_VERSION}-none-{PLATFORM}.whl'
print(fname)
#!pip install --upgrade $BASE_URL/$CUDA_VERSION/jaxlib-0.1.37-$PYTHON_VERSION-none-$PLATFORM.whl
!pip install --upgrade $fname
!pip install numpyro
!pip install --upgrade jax==0.1.57
print("jax backend {}".format(jax.lib.xla_bridge.get_backend().platform))
'''
```
['cuda101']
https://storage.googleapis.com/jax-releases/cuda101/jaxlib-0.1.37-cp36-none-linux_x86_64.whl
Collecting jaxlib==0.1.37
[?25l Downloading https://storage.googleapis.com/jax-releases/cuda101/jaxlib-0.1.37-cp36-none-linux_x86_64.whl (48.3MB)
[K |████████████████████████████████| 48.3MB 66kB/s
[?25hRequirement already satisfied, skipping upgrade: scipy in /usr/local/lib/python3.6/dist-packages (from jaxlib==0.1.37) (1.4.1)
Requirement already satisfied, skipping upgrade: six in /usr/local/lib/python3.6/dist-packages (from jaxlib==0.1.37) (1.12.0)
Requirement already satisfied, skipping upgrade: numpy>=1.12 in /usr/local/lib/python3.6/dist-packages (from jaxlib==0.1.37) (1.18.4)
Requirement already satisfied, skipping upgrade: absl-py in /usr/local/lib/python3.6/dist-packages (from jaxlib==0.1.37) (0.9.0)
Requirement already satisfied, skipping upgrade: protobuf>=3.6.0 in /usr/local/lib/python3.6/dist-packages (from jaxlib==0.1.37) (3.10.0)
Requirement already satisfied, skipping upgrade: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf>=3.6.0->jaxlib==0.1.37) (46.3.0)
[31mERROR: numpyro 0.2.4 has requirement jax>=0.1.65, but you'll have jax 0.1.57 which is incompatible.[0m
[31mERROR: numpyro 0.2.4 has requirement jaxlib>=0.1.45, but you'll have jaxlib 0.1.37 which is incompatible.[0m
Installing collected packages: jaxlib
Found existing installation: jaxlib 0.1.47
Uninstalling jaxlib-0.1.47:
Successfully uninstalled jaxlib-0.1.47
Successfully installed jaxlib-0.1.37
Requirement already satisfied: numpyro in ./numpyro (0.2.4)
Processing /root/.cache/pip/wheels/3d/8d/d8/b0463ab20eb85b4ae7c602f7fbc0bd890f2af483b61e6d6096/jax-0.1.68-cp36-none-any.whl
Collecting jaxlib>=0.1.45
Using cached https://files.pythonhosted.org/packages/ea/c0/64c0e5a2c6da1d3ffdec95da74abf14df2c7508776ff5f155461fec1ef1d/jaxlib-0.1.47-cp36-none-manylinux2010_x86_64.whl
Requirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from numpyro) (4.41.1)
Requirement already satisfied: numpy>=1.12 in /usr/local/lib/python3.6/dist-packages (from jax>=0.1.65->numpyro) (1.18.4)
Requirement already satisfied: opt-einsum in /usr/local/lib/python3.6/dist-packages (from jax>=0.1.65->numpyro) (3.2.1)
Requirement already satisfied: absl-py in /usr/local/lib/python3.6/dist-packages (from jax>=0.1.65->numpyro) (0.9.0)
Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from jaxlib>=0.1.45->numpyro) (1.4.1)
Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from absl-py->jax>=0.1.65->numpyro) (1.12.0)
Installing collected packages: jax, jaxlib
Found existing installation: jax 0.1.57
Uninstalling jax-0.1.57:
Successfully uninstalled jax-0.1.57
Found existing installation: jaxlib 0.1.37
Uninstalling jaxlib-0.1.37:
Successfully uninstalled jaxlib-0.1.37
Successfully installed jax-0.1.68 jaxlib-0.1.47
Processing /root/.cache/pip/wheels/8a/b4/75/859bcdaf181569124306615bd9b68c747725c60bfa68826378/jax-0.1.57-cp36-none-any.whl
Requirement already satisfied, skipping upgrade: fastcache in /usr/local/lib/python3.6/dist-packages (from jax==0.1.57) (1.1.0)
Requirement already satisfied, skipping upgrade: numpy>=1.12 in /usr/local/lib/python3.6/dist-packages (from jax==0.1.57) (1.18.4)
Requirement already satisfied, skipping upgrade: absl-py in /usr/local/lib/python3.6/dist-packages (from jax==0.1.57) (0.9.0)
Requirement already satisfied, skipping upgrade: opt-einsum in /usr/local/lib/python3.6/dist-packages (from jax==0.1.57) (3.2.1)
Requirement already satisfied, skipping upgrade: six in /usr/local/lib/python3.6/dist-packages (from absl-py->jax==0.1.57) (1.12.0)
[31mERROR: numpyro 0.2.4 has requirement jax>=0.1.65, but you'll have jax 0.1.57 which is incompatible.[0m
Installing collected packages: jax
Found existing installation: jax 0.1.68
Uninstalling jax-0.1.68:
Successfully uninstalled jax-0.1.68
Successfully installed jax-0.1.57
jax backend cpu
```
'''
# The latest version uses jax >= 0.1.65, jaxlib >= 0.1.45
# https://github.com/pyro-ppl/numpyro/blob/master/setup.py
#https://medium.com/@ashwindesilva/how-to-use-google-colaboratory-to-clone-a-github-repository-e07cf8d3d22b
!git clone https://github.com/pyro-ppl/numpyro.git
%cd numpyro
!pip install -e .[dev]
print("jax version {}".format(jax.__version__))
print("jax backend {}".format(jax.lib.xla_bridge.get_backend().platform))
'''
```
fatal: destination path 'numpyro' already exists and is not an empty directory.
/content/numpyro
Obtaining file:///content/numpyro
Requirement already satisfied: jax>=0.1.65 in /usr/local/lib/python3.6/dist-packages (from numpyro==0.2.4) (0.1.68)
Requirement already satisfied: jaxlib>=0.1.45 in /usr/local/lib/python3.6/dist-packages (from numpyro==0.2.4) (0.1.47)
Requirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from numpyro==0.2.4) (4.41.1)
Requirement already satisfied: ipython in /usr/local/lib/python3.6/dist-packages (from numpyro==0.2.4) (5.5.0)
Requirement already satisfied: isort in /usr/local/lib/python3.6/dist-packages (from numpyro==0.2.4) (4.3.21)
Requirement already satisfied: absl-py in /usr/local/lib/python3.6/dist-packages (from jax>=0.1.65->numpyro==0.2.4) (0.9.0)
Requirement already satisfied: opt-einsum in /usr/local/lib/python3.6/dist-packages (from jax>=0.1.65->numpyro==0.2.4) (3.2.1)
Requirement already satisfied: numpy>=1.12 in /usr/local/lib/python3.6/dist-packages (from jax>=0.1.65->numpyro==0.2.4) (1.18.4)
Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from jaxlib>=0.1.45->numpyro==0.2.4) (1.4.1)
Requirement already satisfied: pickleshare in /usr/local/lib/python3.6/dist-packages (from ipython->numpyro==0.2.4) (0.7.5)
Requirement already satisfied: simplegeneric>0.8 in /usr/local/lib/python3.6/dist-packages (from ipython->numpyro==0.2.4) (0.8.1)
Requirement already satisfied: traitlets>=4.2 in /usr/local/lib/python3.6/dist-packages (from ipython->numpyro==0.2.4) (4.3.3)
Requirement already satisfied: prompt-toolkit<2.0.0,>=1.0.4 in /usr/local/lib/python3.6/dist-packages (from ipython->numpyro==0.2.4) (1.0.18)
Requirement already satisfied: decorator in /usr/local/lib/python3.6/dist-packages (from ipython->numpyro==0.2.4) (4.4.2)
Requirement already satisfied: pygments in /usr/local/lib/python3.6/dist-packages (from ipython->numpyro==0.2.4) (2.1.3)
Requirement already satisfied: pexpect; sys_platform != "win32" in /usr/local/lib/python3.6/dist-packages (from ipython->numpyro==0.2.4) (4.8.0)
Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.6/dist-packages (from ipython->numpyro==0.2.4) (46.3.0)
Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from absl-py->jax>=0.1.65->numpyro==0.2.4) (1.12.0)
Requirement already satisfied: ipython-genutils in /usr/local/lib/python3.6/dist-packages (from traitlets>=4.2->ipython->numpyro==0.2.4) (0.2.0)
Requirement already satisfied: wcwidth in /usr/local/lib/python3.6/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython->numpyro==0.2.4) (0.1.9)
Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.6/dist-packages (from pexpect; sys_platform != "win32"->ipython->numpyro==0.2.4) (0.6.0)
Installing collected packages: numpyro
Found existing installation: numpyro 0.2.4
Can't uninstall 'numpyro'. No files were found to uninstall.
Running setup.py develop for numpyro
Successfully installed numpyro
# Distributions
```
import numpyro
import numpyro.distributions as dist
from numpyro.diagnostics import hpdi
from numpyro.distributions.transforms import AffineTransform
from numpyro.infer import MCMC, NUTS, Predictive
rng_key = random.PRNGKey(0)
rng_key, rng_key_ = random.split(rng_key)
```
## 1d Gaussian
```
# 2 independent 1d gaussians (ie 1 diagonal Gaussian)
mu = 1.5
sigma = 2
d = dist.Normal(mu, sigma)
dir(d)
```
['__call__',
'__class__',
'__delattr__',
'__dict__',
'__dir__',
'__doc__',
'__eq__',
'__format__',
'__ge__',
'__getattribute__',
'__gt__',
'__hash__',
'__init__',
'__init_subclass__',
'__le__',
'__lt__',
'__module__',
'__ne__',
'__new__',
'__reduce__',
'__reduce_ex__',
'__repr__',
'__setattr__',
'__sizeof__',
'__str__',
'__subclasshook__',
'__weakref__',
'_batch_shape',
'_event_shape',
'_validate_args',
'_validate_sample',
'arg_constraints',
'batch_shape',
'event_shape',
'icdf',
'loc',
'log_prob',
'mean',
'reparametrized_params',
'sample',
'sample_with_intermediates',
'scale',
'set_default_validate_args',
'support',
'to_event',
'transform_with_intermediates',
'variance']
```
#rng_key, rng_key_ = random.split(rng_key)
nsamples = 1000
ys = d.sample(rng_key_, (nsamples,))
print(ys.shape)
mu_hat = np.mean(ys,0)
print(mu_hat)
sigma_hat = np.std(ys, 0)
print(sigma_hat)
```
(1000,)
1.4788736
2.0460527
## Multivariate Gaussian
```
mu = np.array([-1, 1])
sigma = np.array([1, 2])
Sigma = np.diag(sigma)
d2 = dist.MultivariateNormal(mu, Sigma)
```
```
#rng_key, rng_key_ = random.split(rng_key)
nsamples = 1000
ys = d2.sample(rng_key_, (nsamples,))
print(ys.shape)
mu_hat = np.mean(ys,0)
print(mu_hat)
Sigma_hat = onp.cov(ys, rowvar=False) #jax.np.cov not implemented
print(Sigma_hat)
```
(1000, 2)
[-0.9644672 0.99415004]
[[0.93275181 0.0756547 ]
[0.0756547 1.91598212]]
## Shape semantics
Numpyro, [Pyro](https://pyro.ai/examples/tensor_shapes.html) and [TFP](https://www.tensorflow.org/probability/examples/Understanding_TensorFlow_Distributions_Shapes) all distinguish between 'event shape' and 'batch shape'.
For a D-dimensional Gaussian, the event shape is (D,), and the batch shape
will be (), meaning we have a single instance of this distribution.
If the covariance is diagonal, we can view this as D independent
1d Gaussians, stored along the batch dimension; this will have event shape () but batch shape (2,).
When we sample from a distribution, we also specify the sample_shape.
Suppose we draw N samples from a single D-dim diagonal Gaussian,
and N samples from D 1d Gaussians. These samples will have the same shape.
However, the semantics of logprob differs.
We illustrate this below.
```
d2 = dist.MultivariateNormal(mu, Sigma)
print(d2.event_shape)
print(d2.batch_shape)
nsamples = 1000
ys2 = d2.sample(rng_key_, (nsamples,))
print(ys2.shape)
# 2 independent 1d gaussians (same as one 2d diagonal Gaussian)
d3 = dist.Normal(mu, np.diag(Sigma))
print(d3.event_shape)
print(d3.batch_shape)
ys3 = d3.sample(rng_key_, (nsamples,))
print(ys3.shape)
print(np.allclose(ys2, ys3))
```
(2,)
()
(1000, 2)
()
(2,)
(1000, 2)
True
```
y = ys2[0,:] # 2 numbers
print(d2.log_prob(y)) # log prob of a single 2d distribution on 2d input
print(d3.log_prob(y)) # log prob of two 1d distributions on 2d input
```
-2.1086864
[-1.1897303 -0.9189563]
We can turn a set of independent distributions into a single product
distribution using the [Independent class](http://num.pyro.ai/en/stable/distributions.html#independent)
```
d4 = dist.Independent(d3, 1) # treat the first batch dimension as an event dimensions
print(d4.event_shape)
print(d4.batch_shape)
print(d4.log_prob(y))
```
(2,)
()
-2.1086864
# Posterior inference with MCMC
## Example: 1d Gaussian with unknown mean.
We use the simple example from the [Pyro intro](https://pyro.ai/examples/intro_part_ii.html#A-Simple-Example). The goal is to infer the weight $\theta$ of an object, given noisy measurements $y$. We assume the following model:
$$
\begin{align}
\theta &\sim N(\mu=8.5, \tau^2=1.0)\\
y \sim &N(\theta, \sigma^2=0.75^2)
\end{align}
$$
Where $\mu=8.5$ is the initial guess.
By Bayes rule for Gaussians, we know that the exact posterior,
given a single observation $y=9.5$, is given by
$$
\begin{align}
\theta|y &\sim N(m, s^s) \\
m &=\frac{\sigma^2 \mu + \tau^2 y}{\sigma^2 + \tau^2}
= \frac{0.75^2 \times 8.5 + 1 \times 9.5}{0.75^2 + 1^2}
= 9.14 \\
s^2 &= \frac{\sigma^2 \tau^2}{\sigma^2 + \tau^2}
= \frac{0.75^2 \times 1^2}{0.75^2 + 1^2}= 0.6^2
\end{align}
$$
```
mu = 8.5; tau = 1.0; sigma = 0.75; y = 9.5
m = (sigma**2 * mu + tau**2 * y)/(sigma**2 + tau**2)
s2 = (sigma**2 * tau**2)/(sigma**2 + tau**2)
s = np.sqrt(s2)
print(m)
print(s)
```
9.14
0.6
```
def model(guess, measurement=None):
weight = numpyro.sample("weight", dist.Normal(guess, tau))
return numpyro.sample("measurement", dist.Normal(weight, sigma), obs=measurement)
```
9.14
```
nuts_kernel = NUTS(model)
mcmc = MCMC(nuts_kernel, num_warmup=100, num_samples=1000)
guess = mu
measurement = y
mcmc.run(rng_key_, guess, measurement=measurement)
mcmc.print_summary()
samples = mcmc.get_samples()
```
sample: 100%|██████████| 1100/1100 [00:04<00:00, 229.85it/s, 1 steps of size 8.84e-01. acc. prob=0.96]
mean std median 5.0% 95.0% n_eff r_hat
weight 9.09 0.62 9.11 8.05 10.03 325.40 1.01
Number of divergences: 0
```
```
|
#' Minimum time-period site selection
#'
#' This function uses part of the method outlined in Roy et al (2012) and Isaac et al (2014) for selecting
#' well-sampled sites from a dataset using the number of time periods only. \code{\link{siteSelection}} is a wrapper
#' for this function that performs the complete site selection process as outlined in these papers.
#'
#' @param taxa A character vector of taxon names
#' @param site A character vector of site names
#' @param time_period A numeric vector of user defined time periods, or a date vector
#' @param minTP numeric, The minimum number of time periods, or if time_period is a date the minimum
#' number of years, a site must be sampled in for it be be considered well sampled.
#' @return A data.frame of data that forefills the selection criteria. This data has two attributes:
#' \code{sites} gives the total number of sites in the dataset, and \code{sucess} gives the number
#' of sites that satify the selection criteria
#' @export
#' @importFrom dplyr distinct
#' @references needed
siteSelectionMinTP <- function(taxa, site, time_period, minTP){
# Error checks
errorChecks(taxa = taxa, site = site, time_period = time_period)
if(!is.numeric(minTP)) stop('minTP must be numeric')
# Create a data.frame from the vectors
Data <- distinct(data.frame(taxa, site, time_period))
# If tp is a date get the year out
# This will be used in TP selection step
year <- NULL
if(any(class(time_period) %in% c("Date", "POSIXct", "POSIXt"))){
year <- as.numeric(format(Data$time_period, '%Y'))
}
if(!is.null(year)) Data$year <- year
# Get a list of sites with visits in >=minTP time_periods
# If we have year use that, else time_period
if(!is.null(year)){
minTP_site_counts <- tapply(Data$year, Data$site, FUN = function(x) length(unique(x)))
} else {
minTP_site_counts <- tapply(Data$time_period, Data$site, FUN = function(x) length(unique(x)))
}
minTP_sites <- names(minTP_site_counts[minTP_site_counts >= minTP])
# Subset the data to these sites
minTP_Data <- Data[Data$site %in% minTP_sites,]
# Add some helpful attributes
attr(minTP_Data, which = 'sites') <- length(minTP_site_counts)
attr(minTP_Data, which = 'success') <- length(minTP_sites)
if(nrow(minTP_Data) == 0) warning('Filtering in siteSelectionMinTP resulted in no data returned')
# Return
return(minTP_Data[,c('taxa', 'site', 'time_period')])
} |
using SimpleProbabilitySets
using Base.Test, Distributions
@testset "SimpleProbabilitySets" begin
d = Categorical([0.1, 0.2, 0.7])
d2 = Categorical([0.4, 0.4, 0.2])
pb = PBox(d, d2)
@test cdfs(pb)[2] == [0.4, 0.8, 1.0]
@test pints(pb)[1] ≈ [0.1, 0.0, 0.2] atol = 1e-6
pl = [0.1, 0.2, 0.5]
pu = [0.4, 0.4, 0.7]
uncsmall = 0.05
unclarge = 0.6
pi = PInterval(pl, pu)
pi2 = PInterval(pl, uncsmall)
pi3 = PInterval(pl, unclarge)
@test pi.plower == [0.1, 0.2, 0.5]
@test pi2.plower ≈ [0.05, 0.15, 0.45] atol = 1e-6
@test pi3.pupper ≈ [0.7, 0.8, 1.0] atol = 1e-6
p = psample(pi)
@test sum(p) ≈ 1.0 atol = 1e-9
@test all(pi.plower .< p .< pi.pupper)
end
|
State Before: α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
⊢ ∃ y, closedBall y (δ / 4) ⊆ closedBall x δ ∧ closedBall y (δ / 4) ⊆ interior s State After: case refine'_1
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
⊢ δ / 4 + dist (x + const ι (3 / 4 * δ)) x ≤ δ
case refine'_2
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
⊢ closedBall (x + const ι (3 / 4 * δ)) (δ / 4) ⊆ interior s Tactic: refine' ⟨x + const _ (3 / 4 * δ), closedBall_subset_closedBall' _, _⟩ State Before: case refine'_2
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
⊢ closedBall (x + const ι (3 / 4 * δ)) (δ / 4) ⊆ interior s State After: case refine'_2.intro.intro
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y✝ : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
y : ι → ℝ
hy : y ∈ s
hxy : dist x y < δ / 4
⊢ closedBall (x + const ι (3 / 4 * δ)) (δ / 4) ⊆ interior s Tactic: obtain ⟨y, hy, hxy⟩ := Metric.mem_closure_iff.1 hx _ (div_pos hδ zero_lt_four) State Before: case refine'_2.intro.intro
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y✝ : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
y : ι → ℝ
hy : y ∈ s
hxy : dist x y < δ / 4
⊢ closedBall (x + const ι (3 / 4 * δ)) (δ / 4) ⊆ interior s State After: case refine'_2.intro.intro
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y✝ : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
y : ι → ℝ
hy : y ∈ s
hxy : dist x y < δ / 4
z : ι → ℝ
hz : z ∈ closedBall (x + const ι (3 / 4 * δ)) (δ / 4)
i : ι
⊢ y i < z i Tactic: refine' fun z hz => hs.mem_interior_of_forall_lt (subset_closure hy) fun i => _ State Before: case refine'_2.intro.intro
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y✝ : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
y : ι → ℝ
hy : y ∈ s
hxy : dist x y < δ / 4
z : ι → ℝ
hz : z ∈ closedBall (x + const ι (3 / 4 * δ)) (δ / 4)
i : ι
⊢ y i < z i State After: case refine'_2.intro.intro
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y✝ : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
y : ι → ℝ
hy : y ∈ s
hxy : dist x y < δ / 4
z : ι → ℝ
hz : ‖x + const ι (3 / 4 * δ) - z‖ ≤ δ / 4
i : ι
⊢ y i < z i Tactic: rw [mem_closedBall, dist_eq_norm'] at hz State Before: case refine'_2.intro.intro
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y✝ : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
y : ι → ℝ
hy : y ∈ s
hxy : dist x y < δ / 4
z : ι → ℝ
hz : ‖x + const ι (3 / 4 * δ) - z‖ ≤ δ / 4
i : ι
⊢ y i < z i State After: case refine'_2.intro.intro
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y✝ : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
y : ι → ℝ
hy : y ∈ s
hxy : ‖x - y‖ < δ / 4
z : ι → ℝ
hz : ‖x + const ι (3 / 4 * δ) - z‖ ≤ δ / 4
i : ι
⊢ y i < z i Tactic: rw [dist_eq_norm] at hxy State Before: case refine'_2.intro.intro
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y✝ : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
y : ι → ℝ
hy : y ∈ s
hxy : ‖x - y‖ < δ / 4
z : ι → ℝ
hz : ‖x + const ι (3 / 4 * δ) - z‖ ≤ δ / 4
i : ι
⊢ y i < z i State After: case refine'_2.intro.intro
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y✝ : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
y : ι → ℝ
hy : y ∈ s
z : ι → ℝ
hz : ‖x + const ι (3 / 4 * δ) - z‖ ≤ δ / 4
i : ι
hxy : ‖(x - y) i‖ ≤ δ / 4
⊢ y i < z i Tactic: replace hxy := (norm_le_pi_norm _ i).trans hxy.le State Before: case refine'_2.intro.intro
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y✝ : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
y : ι → ℝ
hy : y ∈ s
z : ι → ℝ
hz : ‖x + const ι (3 / 4 * δ) - z‖ ≤ δ / 4
i : ι
hxy : ‖(x - y) i‖ ≤ δ / 4
⊢ y i < z i State After: case refine'_2.intro.intro
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y✝ : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
y : ι → ℝ
hy : y ∈ s
z : ι → ℝ
i : ι
hxy : ‖(x - y) i‖ ≤ δ / 4
hz : ‖(x + const ι (3 / 4 * δ) - z) i‖ ≤ δ / 4
⊢ y i < z i Tactic: replace hz := (norm_le_pi_norm _ i).trans hz State Before: case refine'_2.intro.intro
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y✝ : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
y : ι → ℝ
hy : y ∈ s
z : ι → ℝ
i : ι
hxy : ‖(x - y) i‖ ≤ δ / 4
hz : ‖(x + const ι (3 / 4 * δ) - z) i‖ ≤ δ / 4
⊢ y i < z i State After: case refine'_2.intro.intro
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y✝ : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
y : ι → ℝ
hy : y ∈ s
z : ι → ℝ
i : ι
hxy : abs (x i - y i) ≤ δ / 4
hz : abs (x i + 3 / 4 * δ - z i) ≤ δ / 4
⊢ y i < z i Tactic: dsimp at hxy hz State Before: case refine'_2.intro.intro
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y✝ : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
y : ι → ℝ
hy : y ∈ s
z : ι → ℝ
i : ι
hxy : abs (x i - y i) ≤ δ / 4
hz : abs (x i + 3 / 4 * δ - z i) ≤ δ / 4
⊢ y i < z i State After: case refine'_2.intro.intro
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y✝ : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
y : ι → ℝ
hy : y ∈ s
z : ι → ℝ
i : ι
hxy : x i - y i ≤ δ / 4 ∧ y i - x i ≤ δ / 4
hz : x i + 3 / 4 * δ - z i ≤ δ / 4 ∧ z i - (x i + 3 / 4 * δ) ≤ δ / 4
⊢ y i < z i Tactic: rw [abs_sub_le_iff] at hxy hz State Before: case refine'_2.intro.intro
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y✝ : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
y : ι → ℝ
hy : y ∈ s
z : ι → ℝ
i : ι
hxy : x i - y i ≤ δ / 4 ∧ y i - x i ≤ δ / 4
hz : x i + 3 / 4 * δ - z i ≤ δ / 4 ∧ z i - (x i + 3 / 4 * δ) ≤ δ / 4
⊢ y i < z i State After: no goals Tactic: linarith State Before: case refine'_1
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
⊢ δ / 4 + dist (x + const ι (3 / 4 * δ)) x ≤ δ State After: case refine'_1
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
⊢ δ / 4 + ‖const ι (3 / 4 * δ)‖ ≤ δ Tactic: rw [dist_self_add_left] State Before: case refine'_1
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
⊢ δ / 4 + ‖const ι (3 / 4 * δ)‖ ≤ δ State After: case refine'_1
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
⊢ δ / 4 + ‖3 / 4 * δ‖ = δ Tactic: refine' (add_le_add_left (pi_norm_const_le <| 3 / 4 * δ) _).trans_eq _ State Before: case refine'_1
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
⊢ δ / 4 + ‖3 / 4 * δ‖ = δ State After: case refine'_1
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
⊢ δ / 4 + abs 3 / abs 4 * abs δ = δ Tactic: simp [Real.norm_of_nonneg, hδ.le, zero_le_three] State Before: case refine'_1
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
⊢ δ / 4 + abs 3 / abs 4 * abs δ = δ State After: case refine'_1
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
⊢ δ / 4 + 3 / 4 * δ = δ Tactic: simp [abs_of_pos, abs_of_pos hδ] State Before: case refine'_1
α : Type ?u.9777
ι : Type u_1
inst✝ : Fintype ι
s : Set (ι → ℝ)
x y : ι → ℝ
δ : ℝ
hs : IsUpperSet s
hx : x ∈ closure s
hδ : 0 < δ
⊢ δ / 4 + 3 / 4 * δ = δ State After: no goals Tactic: ring |
Galveston Artist Residency
|
// Copyright 2018-2019 Hans Dembinski and Henry Schreiner
//
// Distributed under the Boost Software License, version 1.0.
// (See accompanying file LICENSE_1_0.txt
// or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// Based on boost/histogram/accumulators/weighted_mean.hpp
//
// Changes:
// * Internal values are public for access from Python
// * A special constructor added for construction from Python
#pragma once
#include <boost/core/nvp.hpp>
#include <boost/histogram/weight.hpp>
namespace accumulators {
/**
Calculates mean and variance of weighted sample.
Uses West's incremental algorithm to improve numerical stability
of mean and variance computation.
*/
template <typename ValueType>
struct weighted_mean {
using value_type = ValueType;
using const_reference = const value_type&;
weighted_mean() = default;
weighted_mean(const value_type& wsum,
const value_type& wsum2,
const value_type& mean,
const value_type& variance)
: sum_of_weights(wsum)
, sum_of_weights_squared(wsum2)
, value(mean)
, sum_of_weighted_deltas_squared(
variance * (sum_of_weights - sum_of_weights_squared / sum_of_weights)) {}
weighted_mean(const value_type& wsum,
const value_type& wsum2,
const value_type& mean,
const value_type& sum_of_weighted_deltas_squared,
bool /* tag to trigger Python internal constructor */)
: sum_of_weights(wsum)
, sum_of_weights_squared(wsum2)
, value(mean)
, sum_of_weighted_deltas_squared(sum_of_weighted_deltas_squared) {}
void operator()(const value_type& x) { operator()(boost::histogram::weight(1), x); }
void operator()(const boost::histogram::weight_type<value_type>& w,
const value_type& x) {
sum_of_weights += w.value;
sum_of_weights_squared += w.value * w.value;
const auto delta = x - value;
value += w.value * delta / sum_of_weights;
sum_of_weighted_deltas_squared += w.value * delta * (x - value);
}
weighted_mean& operator+=(const weighted_mean& rhs) {
if(sum_of_weights != 0 || rhs.sum_of_weights != 0) {
const auto tmp = value * sum_of_weights + rhs.value * rhs.sum_of_weights;
sum_of_weights += rhs.sum_of_weights;
sum_of_weights_squared += rhs.sum_of_weights_squared;
value = tmp / sum_of_weights;
}
sum_of_weighted_deltas_squared += rhs.sum_of_weighted_deltas_squared;
return *this;
}
weighted_mean& operator*=(const value_type& s) {
value *= s;
sum_of_weighted_deltas_squared *= s * s;
return *this;
}
bool operator==(const weighted_mean& rhs) const noexcept {
return sum_of_weights == rhs.sum_of_weights
&& sum_of_weights_squared == rhs.sum_of_weights_squared
&& value == rhs.value
&& sum_of_weighted_deltas_squared == rhs.sum_of_weighted_deltas_squared;
}
bool operator!=(const weighted_mean rhs) const noexcept { return !operator==(rhs); }
value_type variance() const {
return sum_of_weighted_deltas_squared
/ (sum_of_weights - sum_of_weights_squared / sum_of_weights);
}
template <class Archive>
void serialize(Archive& ar, unsigned /* version */) {
ar& boost::make_nvp("sum_of_weights", sum_of_weights);
ar& boost::make_nvp("sum_of_weights_squared", sum_of_weights_squared);
ar& boost::make_nvp("value", value);
ar& boost::make_nvp("sum_of_weighted_deltas_squared",
sum_of_weighted_deltas_squared);
}
value_type sum_of_weights{};
value_type sum_of_weights_squared{};
value_type value{};
value_type sum_of_weighted_deltas_squared{};
};
} // namespace accumulators
|
/-
Copyright (c) 2019 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
! This file was ported from Lean 3 source module category_theory.sums.basic
! leanprover-community/mathlib commit 23aa88e32dcc9d2a24cca7bc23268567ed4cd7d6
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.CategoryTheory.EqToHom
/-!
# Binary disjoint unions of categories
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
We define the category instance on `C ⊕ D` when `C` and `D` are categories.
We define:
* `inl_` : the functor `C ⥤ C ⊕ D`
* `inr_` : the functor `D ⥤ C ⊕ D`
* `swap` : the functor `C ⊕ D ⥤ D ⊕ C`
(and the fact this is an equivalence)
We further define sums of functors and natural transformations, written `F.sum G` and `α.sum β`.
-/
namespace CategoryTheory
universe v₁ u₁
-- morphism levels before object levels. See note [category_theory universes].
open Sum
section
variable (C : Type u₁) [Category.{v₁} C] (D : Type u₁) [Category.{v₁} D]
#print CategoryTheory.sum /-
/-- `sum C D` gives the direct sum of two categories.
-/
instance sum : Category.{v₁} (Sum C D)
where
Hom X Y :=
match X, Y with
| inl X, inl Y => X ⟶ Y
| inl X, inr Y => PEmpty
| inr X, inl Y => PEmpty
| inr X, inr Y => X ⟶ Y
id X :=
match X with
| inl X => 𝟙 X
| inr X => 𝟙 X
comp X Y Z f g :=
match X, Y, Z, f, g with
| inl X, inl Y, inl Z, f, g => f ≫ g
| inr X, inr Y, inr Z, f, g => f ≫ g
#align category_theory.sum CategoryTheory.sum
-/
#print CategoryTheory.sum_comp_inl /-
@[simp]
theorem sum_comp_inl {P Q R : C} (f : (inl P : Sum C D) ⟶ inl Q) (g : (inl Q : Sum C D) ⟶ inl R) :
@CategoryStruct.comp _ _ P Q R (f : P ⟶ Q) (g : Q ⟶ R) =
@CategoryStruct.comp _ _ (inl P) (inl Q) (inl R) (f : P ⟶ Q) (g : Q ⟶ R) :=
rfl
#align category_theory.sum_comp_inl CategoryTheory.sum_comp_inl
-/
#print CategoryTheory.sum_comp_inr /-
@[simp]
theorem sum_comp_inr {P Q R : D} (f : (inr P : Sum C D) ⟶ inr Q) (g : (inr Q : Sum C D) ⟶ inr R) :
@CategoryStruct.comp _ _ P Q R (f : P ⟶ Q) (g : Q ⟶ R) =
@CategoryStruct.comp _ _ (inr P) (inr Q) (inr R) (f : P ⟶ Q) (g : Q ⟶ R) :=
rfl
#align category_theory.sum_comp_inr CategoryTheory.sum_comp_inr
-/
end
namespace Sum
variable (C : Type u₁) [Category.{v₁} C] (D : Type u₁) [Category.{v₁} D]
#print CategoryTheory.Sum.inl_ /-
-- Unfortunate naming here, suggestions welcome.
/-- `inl_` is the functor `X ↦ inl X`. -/
@[simps]
def inl_ : C ⥤ Sum C D where
obj X := inl X
map X Y f := f
#align category_theory.sum.inl_ CategoryTheory.Sum.inl_
-/
#print CategoryTheory.Sum.inr_ /-
/-- `inr_` is the functor `X ↦ inr X`. -/
@[simps]
def inr_ : D ⥤ Sum C D where
obj X := inr X
map X Y f := f
#align category_theory.sum.inr_ CategoryTheory.Sum.inr_
-/
#print CategoryTheory.Sum.swap /-
/-- The functor exchanging two direct summand categories. -/
def swap : Sum C D ⥤ Sum D C
where
obj X :=
match X with
| inl X => inr X
| inr X => inl X
map X Y f :=
match X, Y, f with
| inl X, inl Y, f => f
| inr X, inr Y, f => f
#align category_theory.sum.swap CategoryTheory.Sum.swap
-/
/- warning: category_theory.sum.swap_obj_inl -> CategoryTheory.Sum.swap_obj_inl is a dubious translation:
lean 3 declaration is
forall (C : Type.{u2}) [_inst_1 : CategoryTheory.Category.{u1, u2} C] (D : Type.{u2}) [_inst_2 : CategoryTheory.Category.{u1, u2} D] (X : C), Eq.{succ u2} (Sum.{u2, u2} D C) (CategoryTheory.Functor.obj.{u1, u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2) (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1) (CategoryTheory.Sum.swap.{u1, u2} C _inst_1 D _inst_2) (Sum.inl.{u2, u2} C D X)) (Sum.inr.{u2, u2} D C X)
but is expected to have type
forall (C : Type.{u2}) [_inst_1 : CategoryTheory.Category.{u1, u2} C] (D : Type.{u2}) [_inst_2 : CategoryTheory.Category.{u1, u2} D] (X : C), Eq.{succ u2} (Sum.{u2, u2} D C) (Prefunctor.obj.{succ u1, succ u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2))) (Sum.{u2, u2} D C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2) (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1) (CategoryTheory.Sum.swap.{u1, u2} C _inst_1 D _inst_2)) (Sum.inl.{u2, u2} C D X)) (Sum.inr.{u2, u2} D C X)
Case conversion may be inaccurate. Consider using '#align category_theory.sum.swap_obj_inl CategoryTheory.Sum.swap_obj_inlₓ'. -/
@[simp]
theorem swap_obj_inl (X : C) : (swap C D).obj (inl X) = inr X :=
rfl
#align category_theory.sum.swap_obj_inl CategoryTheory.Sum.swap_obj_inl
/- warning: category_theory.sum.swap_obj_inr -> CategoryTheory.Sum.swap_obj_inr is a dubious translation:
lean 3 declaration is
forall (C : Type.{u2}) [_inst_1 : CategoryTheory.Category.{u1, u2} C] (D : Type.{u2}) [_inst_2 : CategoryTheory.Category.{u1, u2} D] (X : D), Eq.{succ u2} (Sum.{u2, u2} D C) (CategoryTheory.Functor.obj.{u1, u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2) (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1) (CategoryTheory.Sum.swap.{u1, u2} C _inst_1 D _inst_2) (Sum.inr.{u2, u2} C D X)) (Sum.inl.{u2, u2} D C X)
but is expected to have type
forall (C : Type.{u2}) [_inst_1 : CategoryTheory.Category.{u1, u2} C] (D : Type.{u2}) [_inst_2 : CategoryTheory.Category.{u1, u2} D] (X : D), Eq.{succ u2} (Sum.{u2, u2} D C) (Prefunctor.obj.{succ u1, succ u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2))) (Sum.{u2, u2} D C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2) (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1) (CategoryTheory.Sum.swap.{u1, u2} C _inst_1 D _inst_2)) (Sum.inr.{u2, u2} C D X)) (Sum.inl.{u2, u2} D C X)
Case conversion may be inaccurate. Consider using '#align category_theory.sum.swap_obj_inr CategoryTheory.Sum.swap_obj_inrₓ'. -/
@[simp]
theorem swap_obj_inr (X : D) : (swap C D).obj (inr X) = inl X :=
rfl
#align category_theory.sum.swap_obj_inr CategoryTheory.Sum.swap_obj_inr
/- warning: category_theory.sum.swap_map_inl -> CategoryTheory.Sum.swap_map_inl is a dubious translation:
lean 3 declaration is
forall (C : Type.{u2}) [_inst_1 : CategoryTheory.Category.{u1, u2} C] (D : Type.{u2}) [_inst_2 : CategoryTheory.Category.{u1, u2} D] {X : C} {Y : C} {f : Quiver.Hom.{succ u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2))) (Sum.inl.{u2, u2} C D X) (Sum.inl.{u2, u2} C D Y)}, Eq.{succ u1} (Quiver.Hom.{succ u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1))) (CategoryTheory.Functor.obj.{u1, u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2) (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1) (CategoryTheory.Sum.swap.{u1, u2} C _inst_1 D _inst_2) (Sum.inl.{u2, u2} C D X)) (CategoryTheory.Functor.obj.{u1, u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2) (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1) (CategoryTheory.Sum.swap.{u1, u2} C _inst_1 D _inst_2) (Sum.inl.{u2, u2} C D Y))) (CategoryTheory.Functor.map.{u1, u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2) (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1) (CategoryTheory.Sum.swap.{u1, u2} C _inst_1 D _inst_2) (Sum.inl.{u2, u2} C D X) (Sum.inl.{u2, u2} C D Y) f) f
but is expected to have type
forall (C : Type.{u2}) [_inst_1 : CategoryTheory.Category.{u1, u2} C] (D : Type.{u2}) [_inst_2 : CategoryTheory.Category.{u1, u2} D] {X : C} {Y : C} {f : Quiver.Hom.{succ u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2))) (Sum.inl.{u2, u2} C D X) (Sum.inl.{u2, u2} C D Y)}, Eq.{succ u1} (Quiver.Hom.{succ u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1))) (Prefunctor.obj.{succ u1, succ u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2))) (Sum.{u2, u2} D C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2) (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1) (CategoryTheory.Sum.swap.{u1, u2} C _inst_1 D _inst_2)) (Sum.inl.{u2, u2} C D X)) (Prefunctor.obj.{succ u1, succ u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2))) (Sum.{u2, u2} D C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2) (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1) (CategoryTheory.Sum.swap.{u1, u2} C _inst_1 D _inst_2)) (Sum.inl.{u2, u2} C D Y))) (Prefunctor.map.{succ u1, succ u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2))) (Sum.{u2, u2} D C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2) (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1) (CategoryTheory.Sum.swap.{u1, u2} C _inst_1 D _inst_2)) (Sum.inl.{u2, u2} C D X) (Sum.inl.{u2, u2} C D Y) f) f
Case conversion may be inaccurate. Consider using '#align category_theory.sum.swap_map_inl CategoryTheory.Sum.swap_map_inlₓ'. -/
@[simp]
theorem swap_map_inl {X Y : C} {f : inl X ⟶ inl Y} : (swap C D).map f = f :=
rfl
#align category_theory.sum.swap_map_inl CategoryTheory.Sum.swap_map_inl
/- warning: category_theory.sum.swap_map_inr -> CategoryTheory.Sum.swap_map_inr is a dubious translation:
lean 3 declaration is
forall (C : Type.{u2}) [_inst_1 : CategoryTheory.Category.{u1, u2} C] (D : Type.{u2}) [_inst_2 : CategoryTheory.Category.{u1, u2} D] {X : D} {Y : D} {f : Quiver.Hom.{succ u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2))) (Sum.inr.{u2, u2} C D X) (Sum.inr.{u2, u2} C D Y)}, Eq.{succ u1} (Quiver.Hom.{succ u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1))) (CategoryTheory.Functor.obj.{u1, u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2) (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1) (CategoryTheory.Sum.swap.{u1, u2} C _inst_1 D _inst_2) (Sum.inr.{u2, u2} C D X)) (CategoryTheory.Functor.obj.{u1, u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2) (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1) (CategoryTheory.Sum.swap.{u1, u2} C _inst_1 D _inst_2) (Sum.inr.{u2, u2} C D Y))) (CategoryTheory.Functor.map.{u1, u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2) (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1) (CategoryTheory.Sum.swap.{u1, u2} C _inst_1 D _inst_2) (Sum.inr.{u2, u2} C D X) (Sum.inr.{u2, u2} C D Y) f) f
but is expected to have type
forall (C : Type.{u2}) [_inst_1 : CategoryTheory.Category.{u1, u2} C] (D : Type.{u2}) [_inst_2 : CategoryTheory.Category.{u1, u2} D] {X : D} {Y : D} {f : Quiver.Hom.{succ u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2))) (Sum.inr.{u2, u2} C D X) (Sum.inr.{u2, u2} C D Y)}, Eq.{succ u1} (Quiver.Hom.{succ u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1))) (Prefunctor.obj.{succ u1, succ u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2))) (Sum.{u2, u2} D C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2) (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1) (CategoryTheory.Sum.swap.{u1, u2} C _inst_1 D _inst_2)) (Sum.inr.{u2, u2} C D X)) (Prefunctor.obj.{succ u1, succ u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2))) (Sum.{u2, u2} D C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2) (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1) (CategoryTheory.Sum.swap.{u1, u2} C _inst_1 D _inst_2)) (Sum.inr.{u2, u2} C D Y))) (Prefunctor.map.{succ u1, succ u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2))) (Sum.{u2, u2} D C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2) (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1) (CategoryTheory.Sum.swap.{u1, u2} C _inst_1 D _inst_2)) (Sum.inr.{u2, u2} C D X) (Sum.inr.{u2, u2} C D Y) f) f
Case conversion may be inaccurate. Consider using '#align category_theory.sum.swap_map_inr CategoryTheory.Sum.swap_map_inrₓ'. -/
@[simp]
theorem swap_map_inr {X Y : D} {f : inr X ⟶ inr Y} : (swap C D).map f = f :=
rfl
#align category_theory.sum.swap_map_inr CategoryTheory.Sum.swap_map_inr
namespace Swap
/- warning: category_theory.sum.swap.equivalence -> CategoryTheory.Sum.Swap.equivalence is a dubious translation:
lean 3 declaration is
forall (C : Type.{u2}) [_inst_1 : CategoryTheory.Category.{u1, u2} C] (D : Type.{u2}) [_inst_2 : CategoryTheory.Category.{u1, u2} D], CategoryTheory.Equivalence.{u1, u1, u2, u2} (Sum.{u2, u2} C D) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2) (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1)
but is expected to have type
forall (C : Type.{u2}) [_inst_1 : CategoryTheory.Category.{u1, u2} C] (D : Type.{u2}) [_inst_2 : CategoryTheory.Category.{u1, u2} D], CategoryTheory.Equivalence.{u1, u1, u2, u2} (Sum.{u2, u2} C D) (Sum.{u2, u2} D C) (CategoryTheory.sum.{u1, u2} C _inst_1 D _inst_2) (CategoryTheory.sum.{u1, u2} D _inst_2 C _inst_1)
Case conversion may be inaccurate. Consider using '#align category_theory.sum.swap.equivalence CategoryTheory.Sum.Swap.equivalenceₓ'. -/
/-- `swap` gives an equivalence between `C ⊕ D` and `D ⊕ C`. -/
def equivalence : Sum C D ≌ Sum D C :=
Equivalence.mk (swap C D) (swap D C)
(NatIso.ofComponents (fun X => eqToIso (by cases X <;> rfl)) (by tidy))
(NatIso.ofComponents (fun X => eqToIso (by cases X <;> rfl)) (by tidy))
#align category_theory.sum.swap.equivalence CategoryTheory.Sum.Swap.equivalence
#print CategoryTheory.Sum.Swap.isEquivalence /-
instance isEquivalence : IsEquivalence (swap C D) :=
(by infer_instance : IsEquivalence (equivalence C D).Functor)
#align category_theory.sum.swap.is_equivalence CategoryTheory.Sum.Swap.isEquivalence
-/
#print CategoryTheory.Sum.Swap.symmetry /-
/-- The double swap on `C ⊕ D` is naturally isomorphic to the identity functor. -/
def symmetry : swap C D ⋙ swap D C ≅ 𝟭 (Sum C D) :=
(equivalence C D).unitIso.symm
#align category_theory.sum.swap.symmetry CategoryTheory.Sum.Swap.symmetry
-/
end Swap
end Sum
variable {A : Type u₁} [Category.{v₁} A] {B : Type u₁} [Category.{v₁} B] {C : Type u₁}
[Category.{v₁} C] {D : Type u₁} [Category.{v₁} D]
namespace Functor
#print CategoryTheory.Functor.sum /-
/-- The sum of two functors. -/
def sum (F : A ⥤ B) (G : C ⥤ D) : Sum A C ⥤ Sum B D
where
obj X :=
match X with
| inl X => inl (F.obj X)
| inr X => inr (G.obj X)
map X Y f :=
match X, Y, f with
| inl X, inl Y, f => F.map f
| inr X, inr Y, f => G.map f
map_id' X := by cases X <;> unfold_aux; erw [F.map_id]; rfl; erw [G.map_id]; rfl
map_comp' X Y Z f g :=
match X, Y, Z, f, g with
| inl X, inl Y, inl Z, f, g => by
unfold_aux
erw [F.map_comp]
rfl
| inr X, inr Y, inr Z, f, g => by
unfold_aux
erw [G.map_comp]
rfl
#align category_theory.functor.sum CategoryTheory.Functor.sum
-/
/- warning: category_theory.functor.sum_obj_inl -> CategoryTheory.Functor.sum_obj_inl is a dubious translation:
lean 3 declaration is
forall {A : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} A] {B : Type.{u2}} [_inst_2 : CategoryTheory.Category.{u1, u2} B] {C : Type.{u2}} [_inst_3 : CategoryTheory.Category.{u1, u2} C] {D : Type.{u2}} [_inst_4 : CategoryTheory.Category.{u1, u2} D] (F : CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2) (G : CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4) (a : A), Eq.{succ u2} (Sum.{u2, u2} B D) (CategoryTheory.Functor.obj.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F G) (Sum.inl.{u2, u2} A C a)) (Sum.inl.{u2, u2} B D (CategoryTheory.Functor.obj.{u1, u1, u2, u2} A _inst_1 B _inst_2 F a))
but is expected to have type
forall {A : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} A] {B : Type.{u2}} [_inst_2 : CategoryTheory.Category.{u1, u2} B] {C : Type.{u2}} [_inst_3 : CategoryTheory.Category.{u1, u2} C] {D : Type.{u2}} [_inst_4 : CategoryTheory.Category.{u1, u2} D] (F : CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2) (G : CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4) (a : A), Eq.{succ u2} (Sum.{u2, u2} B D) (Prefunctor.obj.{succ u1, succ u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3))) (Sum.{u2, u2} B D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F G)) (Sum.inl.{u2, u2} A C a)) (Sum.inl.{u2, u2} B D (Prefunctor.obj.{succ u1, succ u1, u2, u2} A (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} A (CategoryTheory.Category.toCategoryStruct.{u1, u2} A _inst_1)) B (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} B (CategoryTheory.Category.toCategoryStruct.{u1, u2} B _inst_2)) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} A _inst_1 B _inst_2 F) a))
Case conversion may be inaccurate. Consider using '#align category_theory.functor.sum_obj_inl CategoryTheory.Functor.sum_obj_inlₓ'. -/
@[simp]
theorem sum_obj_inl (F : A ⥤ B) (G : C ⥤ D) (a : A) : (F.Sum G).obj (inl a) = inl (F.obj a) :=
rfl
#align category_theory.functor.sum_obj_inl CategoryTheory.Functor.sum_obj_inl
/- warning: category_theory.functor.sum_obj_inr -> CategoryTheory.Functor.sum_obj_inr is a dubious translation:
lean 3 declaration is
forall {A : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} A] {B : Type.{u2}} [_inst_2 : CategoryTheory.Category.{u1, u2} B] {C : Type.{u2}} [_inst_3 : CategoryTheory.Category.{u1, u2} C] {D : Type.{u2}} [_inst_4 : CategoryTheory.Category.{u1, u2} D] (F : CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2) (G : CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4) (c : C), Eq.{succ u2} (Sum.{u2, u2} B D) (CategoryTheory.Functor.obj.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F G) (Sum.inr.{u2, u2} A C c)) (Sum.inr.{u2, u2} B D (CategoryTheory.Functor.obj.{u1, u1, u2, u2} C _inst_3 D _inst_4 G c))
but is expected to have type
forall {A : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} A] {B : Type.{u2}} [_inst_2 : CategoryTheory.Category.{u1, u2} B] {C : Type.{u2}} [_inst_3 : CategoryTheory.Category.{u1, u2} C] {D : Type.{u2}} [_inst_4 : CategoryTheory.Category.{u1, u2} D] (F : CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2) (G : CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4) (c : C), Eq.{succ u2} (Sum.{u2, u2} B D) (Prefunctor.obj.{succ u1, succ u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3))) (Sum.{u2, u2} B D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F G)) (Sum.inr.{u2, u2} A C c)) (Sum.inr.{u2, u2} B D (Prefunctor.obj.{succ u1, succ u1, u2, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_3)) D (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} D (CategoryTheory.Category.toCategoryStruct.{u1, u2} D _inst_4)) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} C _inst_3 D _inst_4 G) c))
Case conversion may be inaccurate. Consider using '#align category_theory.functor.sum_obj_inr CategoryTheory.Functor.sum_obj_inrₓ'. -/
@[simp]
theorem sum_obj_inr (F : A ⥤ B) (G : C ⥤ D) (c : C) : (F.Sum G).obj (inr c) = inr (G.obj c) :=
rfl
#align category_theory.functor.sum_obj_inr CategoryTheory.Functor.sum_obj_inr
/- warning: category_theory.functor.sum_map_inl -> CategoryTheory.Functor.sum_map_inl is a dubious translation:
lean 3 declaration is
forall {A : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} A] {B : Type.{u2}} [_inst_2 : CategoryTheory.Category.{u1, u2} B] {C : Type.{u2}} [_inst_3 : CategoryTheory.Category.{u1, u2} C] {D : Type.{u2}} [_inst_4 : CategoryTheory.Category.{u1, u2} D] (F : CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2) (G : CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4) {a : A} {a' : A} (f : Quiver.Hom.{succ u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3))) (Sum.inl.{u2, u2} A C a) (Sum.inl.{u2, u2} A C a')), Eq.{succ u1} (Quiver.Hom.{succ u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4))) (CategoryTheory.Functor.obj.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F G) (Sum.inl.{u2, u2} A C a)) (CategoryTheory.Functor.obj.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F G) (Sum.inl.{u2, u2} A C a'))) (CategoryTheory.Functor.map.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F G) (Sum.inl.{u2, u2} A C a) (Sum.inl.{u2, u2} A C a') f) (CategoryTheory.Functor.map.{u1, u1, u2, u2} A _inst_1 B _inst_2 F a a' f)
but is expected to have type
forall {A : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} A] {B : Type.{u2}} [_inst_2 : CategoryTheory.Category.{u1, u2} B] {C : Type.{u2}} [_inst_3 : CategoryTheory.Category.{u1, u2} C] {D : Type.{u2}} [_inst_4 : CategoryTheory.Category.{u1, u2} D] (F : CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2) (G : CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4) {a : A} {a' : A} (f : Quiver.Hom.{succ u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3))) (Sum.inl.{u2, u2} A C a) (Sum.inl.{u2, u2} A C a')), Eq.{succ u1} (Quiver.Hom.{succ u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4))) (Prefunctor.obj.{succ u1, succ u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3))) (Sum.{u2, u2} B D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F G)) (Sum.inl.{u2, u2} A C a)) (Prefunctor.obj.{succ u1, succ u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3))) (Sum.{u2, u2} B D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F G)) (Sum.inl.{u2, u2} A C a'))) (Prefunctor.map.{succ u1, succ u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3))) (Sum.{u2, u2} B D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F G)) (Sum.inl.{u2, u2} A C a) (Sum.inl.{u2, u2} A C a') f) (Prefunctor.map.{succ u1, succ u1, u2, u2} A (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} A (CategoryTheory.Category.toCategoryStruct.{u1, u2} A _inst_1)) B (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} B (CategoryTheory.Category.toCategoryStruct.{u1, u2} B _inst_2)) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} A _inst_1 B _inst_2 F) a a' f)
Case conversion may be inaccurate. Consider using '#align category_theory.functor.sum_map_inl CategoryTheory.Functor.sum_map_inlₓ'. -/
@[simp]
theorem sum_map_inl (F : A ⥤ B) (G : C ⥤ D) {a a' : A} (f : inl a ⟶ inl a') :
(F.Sum G).map f = F.map f :=
rfl
#align category_theory.functor.sum_map_inl CategoryTheory.Functor.sum_map_inl
/- warning: category_theory.functor.sum_map_inr -> CategoryTheory.Functor.sum_map_inr is a dubious translation:
lean 3 declaration is
forall {A : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} A] {B : Type.{u2}} [_inst_2 : CategoryTheory.Category.{u1, u2} B] {C : Type.{u2}} [_inst_3 : CategoryTheory.Category.{u1, u2} C] {D : Type.{u2}} [_inst_4 : CategoryTheory.Category.{u1, u2} D] (F : CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2) (G : CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4) {c : C} {c' : C} (f : Quiver.Hom.{succ u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3))) (Sum.inr.{u2, u2} A C c) (Sum.inr.{u2, u2} A C c')), Eq.{succ u1} (Quiver.Hom.{succ u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4))) (CategoryTheory.Functor.obj.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F G) (Sum.inr.{u2, u2} A C c)) (CategoryTheory.Functor.obj.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F G) (Sum.inr.{u2, u2} A C c'))) (CategoryTheory.Functor.map.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F G) (Sum.inr.{u2, u2} A C c) (Sum.inr.{u2, u2} A C c') f) (CategoryTheory.Functor.map.{u1, u1, u2, u2} C _inst_3 D _inst_4 G c c' f)
but is expected to have type
forall {A : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} A] {B : Type.{u2}} [_inst_2 : CategoryTheory.Category.{u1, u2} B] {C : Type.{u2}} [_inst_3 : CategoryTheory.Category.{u1, u2} C] {D : Type.{u2}} [_inst_4 : CategoryTheory.Category.{u1, u2} D] (F : CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2) (G : CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4) {c : C} {c' : C} (f : Quiver.Hom.{succ u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3))) (Sum.inr.{u2, u2} A C c) (Sum.inr.{u2, u2} A C c')), Eq.{succ u1} (Quiver.Hom.{succ u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4))) (Prefunctor.obj.{succ u1, succ u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3))) (Sum.{u2, u2} B D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F G)) (Sum.inr.{u2, u2} A C c)) (Prefunctor.obj.{succ u1, succ u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3))) (Sum.{u2, u2} B D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F G)) (Sum.inr.{u2, u2} A C c'))) (Prefunctor.map.{succ u1, succ u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3))) (Sum.{u2, u2} B D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F G)) (Sum.inr.{u2, u2} A C c) (Sum.inr.{u2, u2} A C c') f) (Prefunctor.map.{succ u1, succ u1, u2, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_3)) D (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} D (CategoryTheory.Category.toCategoryStruct.{u1, u2} D _inst_4)) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} C _inst_3 D _inst_4 G) c c' f)
Case conversion may be inaccurate. Consider using '#align category_theory.functor.sum_map_inr CategoryTheory.Functor.sum_map_inrₓ'. -/
@[simp]
theorem sum_map_inr (F : A ⥤ B) (G : C ⥤ D) {c c' : C} (f : inr c ⟶ inr c') :
(F.Sum G).map f = G.map f :=
rfl
#align category_theory.functor.sum_map_inr CategoryTheory.Functor.sum_map_inr
end Functor
namespace NatTrans
#print CategoryTheory.NatTrans.sum /-
/-- The sum of two natural transformations. -/
def sum {F G : A ⥤ B} {H I : C ⥤ D} (α : F ⟶ G) (β : H ⟶ I) : F.Sum H ⟶ G.Sum I
where
app X :=
match X with
| inl X => α.app X
| inr X => β.app X
naturality' X Y f :=
match X, Y, f with
| inl X, inl Y, f => by unfold_aux; erw [α.naturality]; rfl
| inr X, inr Y, f => by unfold_aux; erw [β.naturality]; rfl
#align category_theory.nat_trans.sum CategoryTheory.NatTrans.sum
-/
/- warning: category_theory.nat_trans.sum_app_inl -> CategoryTheory.NatTrans.sum_app_inl is a dubious translation:
lean 3 declaration is
forall {A : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} A] {B : Type.{u2}} [_inst_2 : CategoryTheory.Category.{u1, u2} B] {C : Type.{u2}} [_inst_3 : CategoryTheory.Category.{u1, u2} C] {D : Type.{u2}} [_inst_4 : CategoryTheory.Category.{u1, u2} D] {F : CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2} {G : CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2} {H : CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4} {I : CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4} (α : Quiver.Hom.{succ (max u2 u1), max u1 u2} (CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2) (CategoryTheory.CategoryStruct.toQuiver.{max u2 u1, max u1 u2} (CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2) (CategoryTheory.Category.toCategoryStruct.{max u2 u1, max u1 u2} (CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2) (CategoryTheory.Functor.category.{u1, u1, u2, u2} A _inst_1 B _inst_2))) F G) (β : Quiver.Hom.{succ (max u2 u1), max u1 u2} (CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4) (CategoryTheory.CategoryStruct.toQuiver.{max u2 u1, max u1 u2} (CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4) (CategoryTheory.Category.toCategoryStruct.{max u2 u1, max u1 u2} (CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4) (CategoryTheory.Functor.category.{u1, u1, u2, u2} C _inst_3 D _inst_4))) H I) (a : A), Eq.{succ u1} (Quiver.Hom.{succ u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4))) (CategoryTheory.Functor.obj.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F H) (Sum.inl.{u2, u2} A C a)) (CategoryTheory.Functor.obj.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 G I) (Sum.inl.{u2, u2} A C a))) (CategoryTheory.NatTrans.app.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F H) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 G I) (CategoryTheory.NatTrans.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F G H I α β) (Sum.inl.{u2, u2} A C a)) (CategoryTheory.NatTrans.app.{u1, u1, u2, u2} A _inst_1 B _inst_2 F G α a)
but is expected to have type
forall {A : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} A] {B : Type.{u2}} [_inst_2 : CategoryTheory.Category.{u1, u2} B] {C : Type.{u2}} [_inst_3 : CategoryTheory.Category.{u1, u2} C] {D : Type.{u2}} [_inst_4 : CategoryTheory.Category.{u1, u2} D] {F : CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2} {G : CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2} {H : CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4} {I : CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4} (α : Quiver.Hom.{max (succ u2) (succ u1), max u2 u1} (CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2) (CategoryTheory.CategoryStruct.toQuiver.{max u2 u1, max u2 u1} (CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2) (CategoryTheory.Category.toCategoryStruct.{max u2 u1, max u2 u1} (CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2) (CategoryTheory.Functor.category.{u1, u1, u2, u2} A _inst_1 B _inst_2))) F G) (β : Quiver.Hom.{max (succ u2) (succ u1), max u2 u1} (CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4) (CategoryTheory.CategoryStruct.toQuiver.{max u2 u1, max u2 u1} (CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4) (CategoryTheory.Category.toCategoryStruct.{max u2 u1, max u2 u1} (CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4) (CategoryTheory.Functor.category.{u1, u1, u2, u2} C _inst_3 D _inst_4))) H I) (a : A), Eq.{succ u1} (Quiver.Hom.{succ u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4))) (Prefunctor.obj.{succ u1, succ u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3))) (Sum.{u2, u2} B D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F H)) (Sum.inl.{u2, u2} A C a)) (Prefunctor.obj.{succ u1, succ u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3))) (Sum.{u2, u2} B D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 G I)) (Sum.inl.{u2, u2} A C a))) (CategoryTheory.NatTrans.app.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F H) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 G I) (CategoryTheory.NatTrans.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F G H I α β) (Sum.inl.{u2, u2} A C a)) (CategoryTheory.NatTrans.app.{u1, u1, u2, u2} A _inst_1 B _inst_2 F G α a)
Case conversion may be inaccurate. Consider using '#align category_theory.nat_trans.sum_app_inl CategoryTheory.NatTrans.sum_app_inlₓ'. -/
@[simp]
theorem sum_app_inl {F G : A ⥤ B} {H I : C ⥤ D} (α : F ⟶ G) (β : H ⟶ I) (a : A) :
(sum α β).app (inl a) = α.app a :=
rfl
#align category_theory.nat_trans.sum_app_inl CategoryTheory.NatTrans.sum_app_inl
/- warning: category_theory.nat_trans.sum_app_inr -> CategoryTheory.NatTrans.sum_app_inr is a dubious translation:
lean 3 declaration is
forall {A : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} A] {B : Type.{u2}} [_inst_2 : CategoryTheory.Category.{u1, u2} B] {C : Type.{u2}} [_inst_3 : CategoryTheory.Category.{u1, u2} C] {D : Type.{u2}} [_inst_4 : CategoryTheory.Category.{u1, u2} D] {F : CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2} {G : CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2} {H : CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4} {I : CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4} (α : Quiver.Hom.{succ (max u2 u1), max u1 u2} (CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2) (CategoryTheory.CategoryStruct.toQuiver.{max u2 u1, max u1 u2} (CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2) (CategoryTheory.Category.toCategoryStruct.{max u2 u1, max u1 u2} (CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2) (CategoryTheory.Functor.category.{u1, u1, u2, u2} A _inst_1 B _inst_2))) F G) (β : Quiver.Hom.{succ (max u2 u1), max u1 u2} (CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4) (CategoryTheory.CategoryStruct.toQuiver.{max u2 u1, max u1 u2} (CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4) (CategoryTheory.Category.toCategoryStruct.{max u2 u1, max u1 u2} (CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4) (CategoryTheory.Functor.category.{u1, u1, u2, u2} C _inst_3 D _inst_4))) H I) (c : C), Eq.{succ u1} (Quiver.Hom.{succ u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4))) (CategoryTheory.Functor.obj.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F H) (Sum.inr.{u2, u2} A C c)) (CategoryTheory.Functor.obj.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 G I) (Sum.inr.{u2, u2} A C c))) (CategoryTheory.NatTrans.app.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F H) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 G I) (CategoryTheory.NatTrans.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F G H I α β) (Sum.inr.{u2, u2} A C c)) (CategoryTheory.NatTrans.app.{u1, u1, u2, u2} C _inst_3 D _inst_4 H I β c)
but is expected to have type
forall {A : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} A] {B : Type.{u2}} [_inst_2 : CategoryTheory.Category.{u1, u2} B] {C : Type.{u2}} [_inst_3 : CategoryTheory.Category.{u1, u2} C] {D : Type.{u2}} [_inst_4 : CategoryTheory.Category.{u1, u2} D] {F : CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2} {G : CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2} {H : CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4} {I : CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4} (α : Quiver.Hom.{max (succ u2) (succ u1), max u2 u1} (CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2) (CategoryTheory.CategoryStruct.toQuiver.{max u2 u1, max u2 u1} (CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2) (CategoryTheory.Category.toCategoryStruct.{max u2 u1, max u2 u1} (CategoryTheory.Functor.{u1, u1, u2, u2} A _inst_1 B _inst_2) (CategoryTheory.Functor.category.{u1, u1, u2, u2} A _inst_1 B _inst_2))) F G) (β : Quiver.Hom.{max (succ u2) (succ u1), max u2 u1} (CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4) (CategoryTheory.CategoryStruct.toQuiver.{max u2 u1, max u2 u1} (CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4) (CategoryTheory.Category.toCategoryStruct.{max u2 u1, max u2 u1} (CategoryTheory.Functor.{u1, u1, u2, u2} C _inst_3 D _inst_4) (CategoryTheory.Functor.category.{u1, u1, u2, u2} C _inst_3 D _inst_4))) H I) (c : C), Eq.{succ u1} (Quiver.Hom.{succ u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4))) (Prefunctor.obj.{succ u1, succ u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3))) (Sum.{u2, u2} B D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F H)) (Sum.inr.{u2, u2} A C c)) (Prefunctor.obj.{succ u1, succ u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3))) (Sum.{u2, u2} B D) (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.Category.toCategoryStruct.{u1, u2} (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 G I)) (Sum.inr.{u2, u2} A C c))) (CategoryTheory.NatTrans.app.{u1, u1, u2, u2} (Sum.{u2, u2} A C) (CategoryTheory.sum.{u1, u2} A _inst_1 C _inst_3) (Sum.{u2, u2} B D) (CategoryTheory.sum.{u1, u2} B _inst_2 D _inst_4) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F H) (CategoryTheory.Functor.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 G I) (CategoryTheory.NatTrans.sum.{u1, u2} A _inst_1 B _inst_2 C _inst_3 D _inst_4 F G H I α β) (Sum.inr.{u2, u2} A C c)) (CategoryTheory.NatTrans.app.{u1, u1, u2, u2} C _inst_3 D _inst_4 H I β c)
Case conversion may be inaccurate. Consider using '#align category_theory.nat_trans.sum_app_inr CategoryTheory.NatTrans.sum_app_inrₓ'. -/
@[simp]
theorem sum_app_inr {F G : A ⥤ B} {H I : C ⥤ D} (α : F ⟶ G) (β : H ⟶ I) (c : C) :
(sum α β).app (inr c) = β.app c :=
rfl
#align category_theory.nat_trans.sum_app_inr CategoryTheory.NatTrans.sum_app_inr
end NatTrans
end CategoryTheory
|
using WaterLily
using LinearAlgebra: norm2
include("ThreeD_Plots.jl")
function TGV(p=6,Re=1e5)
# Define vortex size, velocity, viscosity
L = 2^p; U = 1; ν = U*L/Re
# Taylor-Green-Vortex initial velocity field
function uλ(i,vx)
x,y,z = @. (vx-1.5)*π/L # scaled coordinates
i==1 && return -U*sin(x)*cos(y)*cos(z) # u_x
i==2 && return U*cos(x)*sin(y)*cos(z) # u_y
return 0. # u_z
end
# Initialize simulation
return Simulation((L+2,L+2,L+2),zeros(3),L;U,uλ,ν)
end
function ω_mag_data(sim)
# plot the vorticity modulus
@inside sim.flow.σ[I] = WaterLily.ω_mag(I,sim.flow.u)*sim.L/sim.U
return @view sim.flow.σ[2:end-1,2:end-1,2:end-1]
end
sim,fig = volume_video!(TGV(),ω_mag_data,name="TGV.mp4",duration=10)
|
import numpy as np
import Amoebes
world_map = np.zeros((30, 30))
world = Amoebes.World(world_map)
starting_point_coord = int(np.floor(world.world_size/2))
starting_point = [starting_point_coord, starting_point_coord]
amoebe = Amoebes.Amoebe(world, starting_point, mark=1)
amoebe_2 = Amoebes.Amoebe(world, [0,0], mark=2)
amoebe_3 = Amoebes.Amoebe(world, [world_size-1,0], mark=3)
amoebe_4 = Amoebes.Amoebe(world, [0,world_size-1], mark=4)
amoebe_5 = Amoebes.Amoebe(world, [world_size-1,world_size-1], mark=5)
amoebes = [amoebe_2, amoebe, amoebe_3, amoebe_4, amoebe_5]
world.init_amoebes(amoebes)
world.visualise_amoebes_evolution(interval_ms=50, frames=200, output_filename="anim.mp4") |
\documentclass[11pt,]{article}
\usepackage[osf,sc]{mathpazo}
\usepackage{setspace}
\setstretch{1.05}
\usepackage{amssymb,amsmath}
\usepackage{ifxetex,ifluatex}
\usepackage{fixltx2e} % provides \textsubscript
\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\else % if luatex or xelatex
\ifxetex
\usepackage{mathspec}
\else
\usepackage{fontspec}
\fi
\defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase}
\fi
% use upquote if available, for straight quotes in verbatim environments
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
% use microtype if available
\IfFileExists{microtype.sty}{%
\usepackage{microtype}
\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts
}{}
\usepackage{hyperref}
\hypersetup{unicode=true,
pdftitle={Modeling the Effect of U.S. Arms Transfers on FDI},
pdfauthor={Ben Horvath},
pdfborder={0 0 0},
breaklinks=true}
\urlstyle{same} % don't use monospace font for urls
\usepackage{color}
\usepackage{fancyvrb}
\newcommand{\VerbBar}{|}
\newcommand{\VERB}{\Verb[commandchars=\\\{\}]}
\DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}}
% Add ',fontsize=\small' for more characters per line
\usepackage{framed}
\definecolor{shadecolor}{RGB}{248,248,248}
\newenvironment{Shaded}{\begin{snugshade}}{\end{snugshade}}
\newcommand{\AlertTok}[1]{\textcolor[rgb]{0.94,0.16,0.16}{#1}}
\newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.77,0.63,0.00}{#1}}
\newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\BuiltInTok}[1]{#1}
\newcommand{\CharTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\CommentTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}}
\newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}}
\newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{#1}}
\newcommand{\DecValTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\ErrorTok}[1]{\textcolor[rgb]{0.64,0.00,0.00}{\textbf{#1}}}
\newcommand{\ExtensionTok}[1]{#1}
\newcommand{\FloatTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\ImportTok}[1]{#1}
\newcommand{\InformationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}}
\newcommand{\NormalTok}[1]{#1}
\newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.81,0.36,0.00}{\textbf{#1}}}
\newcommand{\OtherTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{#1}}
\newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}}
\newcommand{\RegionMarkerTok}[1]{#1}
\newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\StringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\VariableTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\WarningTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\usepackage{graphicx,grffile}
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi}
\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi}
\makeatother
% Scale images if necessary, so that they will not overflow the page
% margins by default, and it is still possible to overwrite the defaults
% using explicit options in \includegraphics[width, height, ...]{}
\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio}
\IfFileExists{parskip.sty}{%
\usepackage{parskip}
}{% else
\setlength{\parindent}{0pt}
\setlength{\parskip}{6pt plus 2pt minus 1pt}
}
\setlength{\emergencystretch}{3em} % prevent overfull lines
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\setcounter{secnumdepth}{5}
% Redefines (sub)paragraphs to behave more like sections
\ifx\paragraph\undefined\else
\let\oldparagraph\paragraph
\renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}}
\fi
\ifx\subparagraph\undefined\else
\let\oldsubparagraph\subparagraph
\renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}}
\fi
%%% Use protect on footnotes to avoid problems with footnotes in titles
\let\rmarkdownfootnote\footnote%
\def\footnote{\protect\rmarkdownfootnote}
%%% Change title format to be more compact
\usepackage{titling}
% Create subtitle command for use in maketitle
\newcommand{\subtitle}[1]{
\posttitle{
\begin{center}\large#1\end{center}
}
}
\setlength{\droptitle}{-2em}
\title{Modeling the Effect of U.S. Arms Transfers on FDI}
\pretitle{\vspace{\droptitle}\centering\huge}
\posttitle{\par}
\author{Ben Horvath}
\preauthor{\centering\large\emph}
\postauthor{\par}
\predate{\centering\large\emph}
\postdate{\par}
\date{December 12, 2018}
\usepackage{eulervm}
\begin{document}
\maketitle
{
\setcounter{tocdepth}{2}
\tableofcontents
}
Load libraries:
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{library}\NormalTok{(corrplot)}
\KeywordTok{library}\NormalTok{(dplyr)}
\KeywordTok{library}\NormalTok{(ggplot2)}
\KeywordTok{library}\NormalTok{(lme4)}
\KeywordTok{source}\NormalTok{(}\StringTok{'../R/multiplot.R'}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\hypertarget{introduction}{%
\section{Introduction}\label{introduction}}
Corporations and investors can directly invest in enterprises in foreign
countries, as opposed to, for instance, portfolio investments like
stocks and bonds. This type of investment is called \emph{foreign direct
investment}.
Some countries will have specific advantages over others that attract
FDI. Rugman (2001, 157--58) divides them into `harder' and `softer'
advantages. The former are primarily economic---access to natural
resources, a cheaper component of production, etc. Soft locational
advantages refer to intangible benefits, e.g., subjective firm director
preferences.
Political factors must be counted among locational advantages or
disadvantages. When a left-wing government comes to power and threatens
nationalization of large industries, investors are likely to become
wary. International politics advantages have been well-studied. Many
scholars have tested and confirmed the hypothesis that `alliances have a
direct, statistically significant, and large effect on bilateral trade'
(Gowa 1994, 54; see also Gowa and Mansfield 1993 and Long 2003). Other
scholars have examined FDI in this context. Biglaiser and DeRouen (2007)
and Little and Leblong (2004) found that the presence of U.S. troops in
a potential host country increases the level of incoming FDI, i.e., is a
locational advantage to investors.
This analysis provides another test of the relationship between FDI and
international security arrangements: Are U.S. arms transfers, like the
presence of the U.S. military itself, a locational advantage to
investors? There are a number of major reasons to believe this might be
so. First, military aid suggests a friendly atmosphere between the U.S.
and potential host. Second, corporate interests often correspond to the
interests of the U.S. government: To quote Gilpin, ``although the
interests of American corporations and U.S. foreign policy objectives
have collided on many occasions, a complementarity of interests has
tended to exist between the corporations and the U.S. government''
(1987, 241). Third, like the presence of U.S. soldiers, military aid can
(but does not always) signal stability to investors and a decreased
chance of political or econmomic disruption.
To test this theory, I assemble a data set of FDI flows from the U.S.,
military sales from the U.S., and supplementary variables likely to also
affect FDI flows. I run numerous kinds of regressions, in search of the
best model of this phenomenon.
\hypertarget{data-collection}{%
\section{Data Collection}\label{data-collection}}
I assemble a dataset with the following variables. For full citations,
see the references at the end of the paper.
\begin{itemize}
\item
\textbf{FDI}. The OECD provides FDI data for U.S. outflows on its
website, from 2003 to 2013. These years will have to bound this study
temporally:
\url{https://stats.oecd.org/index.aspx?DataSetCode=FDI_FLOW_PARTNER}
\item
\textbf{Arms Transfers}. The Stockholm International Peace Research
Institute maintains a database of arms transfers:
\url{https://www.sipri.org/databases/armstransfers}. The value has
been `normalized' by the researchers themselves to account for
fluctuations in the market value of weapons as well as allowing
comparability between, e.g., 100 assault rifles and 2 large
artilleries.
\item
\textbf{Yearly Population}. This is available for most countries on a
yearly basis via the UN:
\url{https://population.un.org/wpp/Download/Standard/Population/}.
\item
\textbf{Presence of Conflict}. Political scientists testing hypotheses
on armed conflict frequently make use of the Armed Conflict dataset,
available at: \url{http://ucdp.uu.se/downloads/\#d3}. This dataset is
very detailed in describing exactly the kind of conflict. Instead, I
will use a dichotomous variable: 0 for no conflict, 1 for conflict.
\item
\textbf{Regime Type}. This is available in one of the most popular
political science datasets, the Polity dataset. It encodes regime type
in a range from perfectly democratic to perfectly autocratic for most
countries from 1800 on. Specifically I will use the Polity2 variable:
\url{http://www.systemicpeace.org/inscrdata.html}. See also the user
manual: \url{http://www.systemicpeace.org/inscr/p4manualv2017.pdf}.
Although Polity2 is a scale from -10 to 10, it is often coded to an
ordinal variable with the following levels: autocracy, closed
anocracy, open anocracy, democracy, full democracy, and
conflict/occupied.
\item
\textbf{Alliances}. The Correlates of War project maintains another
popular data set encoding international alliances in `dyadic' form a
year-to-year basis:
\url{http://www.correlatesofwar.org/data-sets/formal-alliances}.
\item
\textbf{Distance}. Kristian Gleditsch developed a data set containing
the distance between capital cities, which we'll use to proxy
distance: \url{http://ksgleditsch.com/data-5.html}, using this system
of country codes: \url{http://ksgleditsch.com/statelist.html}.
\item
\textbf{GDP}. Where else but the World Bank?:
\url{https://data.worldbank.org/indicator/NY.GDP.MKTP.CD}
\end{itemize}
Assembling the complete dataset is a lengthy affair. I have contained it
to the \texttt{R/clean\_data.R} script.
I will simply load the finished dataset here:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{df <-}\StringTok{ }\KeywordTok{read.csv}\NormalTok{(}\StringTok{'../data/clean/master_dataset.tsv'}\NormalTok{, }\DataTypeTok{sep=}\StringTok{'}\CharTok{\textbackslash{}t}\StringTok{'}\NormalTok{, }
\DataTypeTok{stringsAsFactors=}\OtherTok{FALSE}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
The primary barrier to assembling this data set was standardization of
country names. For instance, in some data sets, the Vatican is called
`Holy See (Vatican City State),' and others, simply `Holy See.' There
are also occasional typos, e.g., `NewZealand' or `SriLanka' rather than
`New Zealand' or `Sri Lanka.' Countries with accented characters posed
another issue.
Tediously and laborously, I standardized each seperate piece of the
dataset by hand.
There is one more wrinkle to deal with. Since we are testing a theory
about how agents react to information, our dataset has to reflect that
agent's knowledge. To do so, each of the independent variables is lagged
by one year. Thus, for instance, the model will be trained on data from
2003 to predict a country's \texttt{fdi} in 2004:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{df <-}\StringTok{ }\NormalTok{df }\OperatorTok{%>%}
\StringTok{ }\KeywordTok{group_by}\NormalTok{(country) }\OperatorTok{%>%}
\StringTok{ }\KeywordTok{mutate}\NormalTok{(}\DataTypeTok{population=}\KeywordTok{lag}\NormalTok{(population, }\DataTypeTok{order_by=}\NormalTok{year),}
\DataTypeTok{arms_exports=}\KeywordTok{lag}\NormalTok{(arms_exports, }\DataTypeTok{order_by=}\NormalTok{year),}
\DataTypeTok{conflict=}\KeywordTok{lag}\NormalTok{(conflict, }\DataTypeTok{order_by=}\NormalTok{year),}
\DataTypeTok{alliance=}\KeywordTok{lag}\NormalTok{(alliance, }\DataTypeTok{order_by=}\NormalTok{year),}
\DataTypeTok{gdp=}\KeywordTok{lag}\NormalTok{(gdp, }\DataTypeTok{order_by=}\NormalTok{year),}
\DataTypeTok{gdp_perc_growth=}\KeywordTok{lag}\NormalTok{(gdp_perc_growth, }\DataTypeTok{order_by=}\NormalTok{year),}
\DataTypeTok{regime_type=}\KeywordTok{lag}\NormalTok{(regime_type, }\DataTypeTok{order_by=}\NormalTok{year)) }\OperatorTok{%>%}
\StringTok{ }\KeywordTok{na.omit}\NormalTok{()}
\end{Highlighting}
\end{Shaded}
The accurately evaluate each of the models, I partition the data into
separate train and test sets, where the last three years of the dataset
for each country are loaded into the test set (approximately 30 percent
of the observations). This will provide a good measure of how well the
developed models can be expected to perform in reality.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{train <-}\StringTok{ }\NormalTok{df }\OperatorTok{%>%}\StringTok{ }\KeywordTok{filter}\NormalTok{(year }\OperatorTok{<=}\StringTok{ }\DecValTok{2010}\NormalTok{)}
\NormalTok{test <-}\StringTok{ }\NormalTok{df }\OperatorTok{%>%}\StringTok{ }\KeywordTok{filter}\NormalTok{(year }\OperatorTok{>}\StringTok{ }\DecValTok{2010}\NormalTok{)}
\KeywordTok{write.table}\NormalTok{(train, }\StringTok{'../data/clean/train.tsv'}\NormalTok{, }\DataTypeTok{sep=}\StringTok{'}\CharTok{\textbackslash{}t}\StringTok{'}\NormalTok{, }
\DataTypeTok{row.names=}\OtherTok{FALSE}\NormalTok{)}
\KeywordTok{write.table}\NormalTok{(test, }\StringTok{'../data/clean/test.tsv'}\NormalTok{, }\DataTypeTok{sep=}\StringTok{'}\CharTok{\textbackslash{}t}\StringTok{'}\NormalTok{, }
\DataTypeTok{row.names=}\OtherTok{FALSE}\NormalTok{)}
\CommentTok{# remove from workspace for now to keep models uncontaminated}
\KeywordTok{rm}\NormalTok{(test)}
\end{Highlighting}
\end{Shaded}
\hypertarget{exploratory-data-analysis}{%
\section{Exploratory Data Analysis}\label{exploratory-data-analysis}}
The training set:
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{head}\NormalTok{(train)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## # A tibble: 6 x 11
## # Groups: country [1]
## year country fdi population arms_exports conflict alliance km_dist
## <int> <chr> <dbl> <int> <int> <int> <int> <int>
## 1 2005 Afghani~ 0. 24118979 0 1 0 11132
## 2 2006 Afghani~ 0. 25070798 19 1 0 11132
## 3 2007 Afghani~ 0. 25893450 0 1 0 11132
## 4 2008 Afghani~ 0. 26616792 22 1 0 11132
## 5 2009 Afghani~ -1.00e6 27294031 78 1 0 11132
## 6 2010 Afghani~ -1.00e6 28004331 280 1 0 11132
## # ... with 3 more variables: gdp <dbl>, gdp_perc_growth <dbl>,
## # regime_type <chr>
\end{verbatim}
To get a sense of the range of our dependent variable and main
independent variable:
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{mean}\NormalTok{(train}\OperatorTok{$}\NormalTok{fdi)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 1488869736
\end{verbatim}
Mean \texttt{fdi} is about \$1.5 billion, while median is only \$1
million. This suggests a \emph{highly} right-skewed distribution:
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{quantile}\NormalTok{(train}\OperatorTok{$}\NormalTok{fdi, }\KeywordTok{c}\NormalTok{(}\DecValTok{0}\NormalTok{, }\FloatTok{.25}\NormalTok{, }\FloatTok{0.5}\NormalTok{, }\FloatTok{0.75}\NormalTok{, }\FloatTok{0.9}\NormalTok{, }\FloatTok{0.95}\NormalTok{, }\FloatTok{0.99}\NormalTok{))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## 0% 25% 50% 75% 90% 95%
## -1.9284e+10 0.0000e+00 1.0000e+06 4.4700e+08 3.0580e+09 8.5720e+09
## 99%
## 3.0351e+10
\end{verbatim}
The min of \texttt{fdi} is -\$20 billion. Negative FDI seems puzzling.
According to this World Bank explainer
\url{https://www.oecd.org/daf/inv/FDI-statistics-explanatory-notes.pdf},
\begin{quote}
FDI financial transactions may be negative for three reasons. First, if
there is disinvestment in assets--- that is, the direct investor sells
its interest in a direct investment enterprise to a third party or back
to the direct investment enterprise. Second, if the parent borrowed
money from its affiliate or if the affiliate paid off a loan from its
direct investor. Third, if reinvested earnings are negative. Reinvested
earnings are negative if the affiliate loses money or if the dividends
paid out to the direct investor are greater than the income recorded in
that period. Negative FDI positions largely result when the loans from
the affiliate to its parent exceed the loans and equity capital given by
the parent to the affiliate. This is most likely to occur when FDI
statistics are presented by partner country.
\end{quote}
W see that middle 50 percent of the variable is between \$0 and about
\$150 million, and the top one percent is greater than \$30 billion.
\texttt{arms\_exports} is less extreme, but still skewed,
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{mean}\NormalTok{(train}\OperatorTok{$}\NormalTok{arms_exports)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 44.59256
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{quantile}\NormalTok{(train}\OperatorTok{$}\NormalTok{arms_exports, }\KeywordTok{c}\NormalTok{(}\DecValTok{0}\NormalTok{, }\FloatTok{.25}\NormalTok{, }\FloatTok{0.5}\NormalTok{, }\FloatTok{0.75}\NormalTok{, }\FloatTok{0.9}\NormalTok{, }\FloatTok{0.95}\NormalTok{, }\FloatTok{0.99}\NormalTok{))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## 0% 25% 50% 75% 90% 95% 99%
## 0.0 0.0 0.0 5.0 89.0 303.0 847.8
\end{verbatim}
with a mean of 44.5 arms units and a median of 0. The 25th and 75th
percentile ranges from 0 to 5. The largest arms transfers seem to make
up about 5 percent of the total data set.
\hypertarget{distributions}{%
\subsection{Distributions}\label{distributions}}
For the first pass through, let's examine the distributions of the
numeric variables:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{hist_fdi <-}\StringTok{ }\KeywordTok{ggplot}\NormalTok{(train, }\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x=}\NormalTok{fdi)) }\OperatorTok{+}\StringTok{ }
\StringTok{ }\KeywordTok{geom_histogram}\NormalTok{(}\DataTypeTok{colour=}\StringTok{"black"}\NormalTok{, }\DataTypeTok{fill=}\StringTok{"white"}\NormalTok{)}
\NormalTok{hist_arms <-}\StringTok{ }\KeywordTok{ggplot}\NormalTok{(train, }\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x=}\NormalTok{arms_exports)) }\OperatorTok{+}\StringTok{ }
\StringTok{ }\KeywordTok{geom_histogram}\NormalTok{(}\DataTypeTok{colour=}\StringTok{"black"}\NormalTok{, }\DataTypeTok{fill=}\StringTok{"white"}\NormalTok{)}
\NormalTok{hist_pop <-}\StringTok{ }\KeywordTok{ggplot}\NormalTok{(train, }\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x=}\NormalTok{population)) }\OperatorTok{+}\StringTok{ }
\StringTok{ }\KeywordTok{geom_histogram}\NormalTok{(}\DataTypeTok{colour=}\StringTok{"black"}\NormalTok{, }\DataTypeTok{fill=}\StringTok{"white"}\NormalTok{)}
\NormalTok{hist_conflict <-}\StringTok{ }\KeywordTok{ggplot}\NormalTok{(train, }\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x=}\NormalTok{conflict)) }\OperatorTok{+}\StringTok{ }
\StringTok{ }\KeywordTok{geom_histogram}\NormalTok{(}\DataTypeTok{colour=}\StringTok{"black"}\NormalTok{, }\DataTypeTok{fill=}\StringTok{"white"}\NormalTok{)}
\NormalTok{hist_alliance <-}\StringTok{ }\KeywordTok{ggplot}\NormalTok{(train, }\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x=}\NormalTok{alliance)) }\OperatorTok{+}\StringTok{ }
\StringTok{ }\KeywordTok{geom_histogram}\NormalTok{(}\DataTypeTok{colour=}\StringTok{"black"}\NormalTok{, }\DataTypeTok{fill=}\StringTok{"white"}\NormalTok{)}
\NormalTok{hist_dist <-}\StringTok{ }\KeywordTok{ggplot}\NormalTok{(train, }\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x=}\NormalTok{km_dist)) }\OperatorTok{+}\StringTok{ }
\StringTok{ }\KeywordTok{geom_histogram}\NormalTok{(}\DataTypeTok{colour=}\StringTok{"black"}\NormalTok{, }\DataTypeTok{fill=}\StringTok{"white"}\NormalTok{)}
\NormalTok{hist_gdp <-}\StringTok{ }\KeywordTok{ggplot}\NormalTok{(train, }\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x=}\NormalTok{gdp)) }\OperatorTok{+}\StringTok{ }
\StringTok{ }\KeywordTok{geom_histogram}\NormalTok{(}\DataTypeTok{colour=}\StringTok{"black"}\NormalTok{, }\DataTypeTok{fill=}\StringTok{"white"}\NormalTok{)}
\NormalTok{hist_growth <-}\StringTok{ }\KeywordTok{ggplot}\NormalTok{(train, }\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x=}\NormalTok{gdp_perc_growth)) }\OperatorTok{+}\StringTok{ }
\StringTok{ }\KeywordTok{geom_histogram}\NormalTok{(}\DataTypeTok{colour=}\StringTok{"black"}\NormalTok{, }\DataTypeTok{fill=}\StringTok{"white"}\NormalTok{)}
\KeywordTok{multiplot}\NormalTok{(hist_fdi, hist_arms, hist_pop, hist_conflict, }
\NormalTok{ hist_alliance, hist_dist, hist_gdp, hist_growth, }\DataTypeTok{cols=}\DecValTok{2}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\includegraphics{report_files/figure-latex/unnamed-chunk-9-1.pdf}
From these graphs it's clear there are only two `nice' variables:
\texttt{km\_dist} and \texttt{gdp\_perc\_growth}, i.e., they are
approximately normally distributed.
Our two most important variables, \texttt{fdi} and
\texttt{arms\_exports}, are not normal. They are both
\emph{zero-inflated}, with a long right tail. This may prove challenging
in attempting to model them with standard linear regression.
The variables \texttt{gdp} and \texttt{population} also have a small
central tendancy, with a long right tail. This reflects the fact that
most countries have a small population with a correspondingly small GDP,
and that there are a few large countries with large GDPs.
The remaining variables are dichotomous, \texttt{conflict} and
\texttt{alliance}. These plots show that most of the observations in the
dataset are in a time of peace, and that most of them did not occur when
the country was allied with the United States.
\hypertarget{associations-and-correlations}{%
\subsection{Associations and
Correlations}\label{associations-and-correlations}}
The next step is to take a look for associations in our data set:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{scat_arms <-}\StringTok{ }\KeywordTok{ggplot}\NormalTok{(train, }\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x=}\NormalTok{arms_exports, }\DataTypeTok{y=}\NormalTok{fdi)) }\OperatorTok{+}
\StringTok{ }\KeywordTok{geom_point}\NormalTok{() }\OperatorTok{+}
\StringTok{ }\KeywordTok{geom_smooth}\NormalTok{()}
\NormalTok{scat_pop <-}\StringTok{ }\KeywordTok{ggplot}\NormalTok{(train, }\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x=}\NormalTok{population, }\DataTypeTok{y=}\NormalTok{fdi)) }\OperatorTok{+}
\StringTok{ }\KeywordTok{geom_point}\NormalTok{() }\OperatorTok{+}
\StringTok{ }\KeywordTok{geom_smooth}\NormalTok{()}
\NormalTok{scat_conflict <-}\StringTok{ }\KeywordTok{ggplot}\NormalTok{(train, }\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x=}\NormalTok{conflict, }\DataTypeTok{y=}\NormalTok{fdi)) }\OperatorTok{+}
\StringTok{ }\KeywordTok{geom_point}\NormalTok{() }\OperatorTok{+}
\StringTok{ }\KeywordTok{geom_smooth}\NormalTok{()}
\NormalTok{scat_alliance <-}\StringTok{ }\KeywordTok{ggplot}\NormalTok{(train, }\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x=}\NormalTok{alliance, }\DataTypeTok{y=}\NormalTok{fdi)) }\OperatorTok{+}
\StringTok{ }\KeywordTok{geom_point}\NormalTok{() }\OperatorTok{+}
\StringTok{ }\KeywordTok{geom_smooth}\NormalTok{()}
\NormalTok{scat_dist <-}\StringTok{ }\KeywordTok{ggplot}\NormalTok{(train, }\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x=}\NormalTok{km_dist, }\DataTypeTok{y=}\NormalTok{fdi)) }\OperatorTok{+}
\StringTok{ }\KeywordTok{geom_point}\NormalTok{() }\OperatorTok{+}
\StringTok{ }\KeywordTok{geom_smooth}\NormalTok{()}
\NormalTok{scat_gdp <-}\StringTok{ }\KeywordTok{ggplot}\NormalTok{(train, }\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x=}\NormalTok{gdp, }\DataTypeTok{y=}\NormalTok{fdi)) }\OperatorTok{+}
\StringTok{ }\KeywordTok{geom_point}\NormalTok{() }\OperatorTok{+}
\StringTok{ }\KeywordTok{geom_smooth}\NormalTok{()}
\NormalTok{scat_growth <-}\StringTok{ }\KeywordTok{ggplot}\NormalTok{(train, }\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x=}\NormalTok{gdp_perc_growth, }\DataTypeTok{y=}\NormalTok{fdi)) }\OperatorTok{+}
\StringTok{ }\KeywordTok{geom_point}\NormalTok{() }\OperatorTok{+}
\StringTok{ }\KeywordTok{geom_smooth}\NormalTok{()}
\KeywordTok{multiplot}\NormalTok{(scat_arms, scat_pop, scat_conflict, scat_alliance,}
\NormalTok{ scat_dist, scat_gdp, scat_growth, }\DataTypeTok{cols=}\DecValTok{2}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\includegraphics{report_files/figure-latex/unnamed-chunk-10-1.pdf}
Few of these variables are related to \texttt{fdi} in a
straight-forward, linear way.
Examining our categorical variable, \texttt{regime\_type}:
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{ggplot}\NormalTok{(train, }\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x=}\NormalTok{regime_type, }\DataTypeTok{y=}\NormalTok{fdi)) }\OperatorTok{+}
\StringTok{ }\KeywordTok{geom_boxplot}\NormalTok{()}
\end{Highlighting}
\end{Shaded}
\includegraphics{report_files/figure-latex/unnamed-chunk-11-1.pdf}
Full democracies obviously receive the most FDI from the U.S., though
autocratic countries still receive some. Conflicted/occupied countries
recieve very little, which makes sense.
The correlation matrix of the numerical variables:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{train_cor <-}\StringTok{ }\KeywordTok{cor}\NormalTok{(train[, }\KeywordTok{c}\NormalTok{(}\DecValTok{3}\OperatorTok{:}\DecValTok{10}\NormalTok{)])}
\KeywordTok{corrplot}\NormalTok{(train_cor, }\DataTypeTok{type=}\StringTok{'lower'}\NormalTok{, }\DataTypeTok{method=}\StringTok{'number'}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\includegraphics{report_files/figure-latex/unnamed-chunk-12-1.pdf}
Unfortunately for us, the correlation between the main variables of
interest, \texttt{fdi} and \texttt{arms\_exports} is low, only 0.13.
But, more hopefully, few of our other independent variables are
correlated, which protects our regression models from multicollinearity.
The two worrying relationships are \texttt{gdp} and \texttt{population},
which are naturally related, with a correlation of 0.43. More
interesting is the correlation of \texttt{km\_dist} and
\texttt{alliance} (-0.54). This suggests that the further away a country
is from the U.S., the less likely an alliance with it will be. This is a
non-intuitive finding, but could possibly be explained by the difficulty
of projecting force across large distances and oceans.
\hypertarget{statistical-analysis}{%
\section{Statistical Analysis}\label{statistical-analysis}}
\hypertarget{inferential-statistics}{%
\subsection{Inferential Statistics}\label{inferential-statistics}}
We've seen above that appears to be some kind of association between
\texttt{regime\_type} and \texttt{fdi}, where democracies recieve more
trade that autocracies. This section will formally test this, with the
hypotheses,
\begin{quote}
\(H_0\): \(\mu_{democracy} - \mu_{autocracy} = 0\)
\end{quote}
\begin{quote}
\(H_1\): \(\mu_{democracy} - \mu_{autocracy} > 0\)
\end{quote}
I create two vectors of \texttt{fdi}, one for country-years where the
country was either a `full democracy' or a `democracy,' and the other
for countries that are an `autocracy' or `closed anonocracy'. Their
respective distributions are plotted:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{x_demo <-}\StringTok{ }\NormalTok{train }\OperatorTok{%>%}
\StringTok{ }\KeywordTok{filter}\NormalTok{(regime_type }\OperatorTok{==}\StringTok{ 'full democracy'} \OperatorTok{|}\StringTok{ }\NormalTok{regime_type }\OperatorTok{==}\StringTok{ 'democracy'}\NormalTok{) }\OperatorTok{%>%}\StringTok{ }
\StringTok{ }\KeywordTok{select}\NormalTok{(fdi)}
\NormalTok{x_auto <-}\StringTok{ }\NormalTok{train }\OperatorTok{%>%}
\StringTok{ }\KeywordTok{filter}\NormalTok{(regime_type }\OperatorTok{==}\StringTok{ 'autocracy'} \OperatorTok{|}\StringTok{ }\NormalTok{regime_type }\OperatorTok{==}\StringTok{ 'closed anocracy'}\NormalTok{) }\OperatorTok{%>%}
\StringTok{ }\KeywordTok{select}\NormalTok{(fdi)}
\NormalTok{hist_demo <-}\StringTok{ }\KeywordTok{ggplot}\NormalTok{(x_demo, }\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x=}\NormalTok{fdi)) }\OperatorTok{+}
\StringTok{ }\KeywordTok{geom_histogram}\NormalTok{(}\DataTypeTok{colour=}\StringTok{"black"}\NormalTok{, }\DataTypeTok{fill=}\StringTok{"white"}\NormalTok{)}
\NormalTok{hist_auto <-}\StringTok{ }\KeywordTok{ggplot}\NormalTok{(x_auto, }\KeywordTok{aes}\NormalTok{(}\DataTypeTok{x=}\NormalTok{fdi)) }\OperatorTok{+}
\StringTok{ }\KeywordTok{geom_histogram}\NormalTok{(}\DataTypeTok{colour=}\StringTok{"black"}\NormalTok{, }\DataTypeTok{fill=}\StringTok{"white"}\NormalTok{)}
\KeywordTok{multiplot}\NormalTok{(hist_demo, hist_auto, }\DataTypeTok{cols=}\DecValTok{2}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\includegraphics{report_files/figure-latex/unnamed-chunk-13-1.pdf}
The distributions suggest a couple of problems with this hypothesis
test: Because of the skew and the excessive zeros, neither of these
aappear close to a normal distribution. Additionally, the variance of
the two samples are quite different. I will carry on the hypothesis
test, with the hopes that the larger sample size and R's
\texttt{var.equal=FALSE} setting will carry me through:
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{t.test}\NormalTok{(x_demo}\OperatorTok{$}\NormalTok{fdi, x_auto}\OperatorTok{$}\NormalTok{fdi, }\DataTypeTok{alternative=}\StringTok{'greater'}\NormalTok{, }
\DataTypeTok{var.equal=}\OtherTok{FALSE}\NormalTok{, }\DataTypeTok{conf.level=}\FloatTok{0.95}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
##
## Welch Two Sample t-test
##
## data: x_demo$fdi and x_auto$fdi
## t = 5.6175, df = 686.26, p-value = 1.409e-08
## alternative hypothesis: true difference in means is greater than 0
## 95 percent confidence interval:
## 1407487063 Inf
## sample estimates:
## mean of x mean of y
## 2365246998 373876221
\end{verbatim}
This \(t\)-test suggetss we should reject the null hypothesis \(H_0\)
that the mean \texttt{fdi} is the same for both democracies and
autocracies. This result is very significant, with a \(t\)-value of
5.62! The larger sample size, the obvious difference between the two
means, and the high significance allay most of my concerns about the
violations noted above.
\hypertarget{models}{%
\subsection{Models}\label{models}}
Each model will be evaluated by its performance on the test set. The
metric to optimize is means squared errors (MSE):
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{mse <-}\StringTok{ }\ControlFlowTok{function}\NormalTok{(m) }\KeywordTok{mean}\NormalTok{(}\KeywordTok{resid}\NormalTok{(m)}\OperatorTok{^}\DecValTok{2}\NormalTok{)}
\NormalTok{calc_r2 <-}\StringTok{ }\ControlFlowTok{function}\NormalTok{(y, y_hat) \{}
\NormalTok{ rss <-}\StringTok{ }\KeywordTok{sum}\NormalTok{((y_hat }\OperatorTok{-}\StringTok{ }\NormalTok{y)}\OperatorTok{^}\DecValTok{2}\NormalTok{)}
\NormalTok{ tss <-}\StringTok{ }\KeywordTok{sum}\NormalTok{((y }\OperatorTok{-}\StringTok{ }\KeywordTok{mean}\NormalTok{(y_hat))}\OperatorTok{^}\DecValTok{2}\NormalTok{)}
\KeywordTok{return}\NormalTok{(}\DecValTok{1} \OperatorTok{-}\StringTok{ }\NormalTok{(rss}\OperatorTok{/}\NormalTok{tss))}
\NormalTok{\}}
\end{Highlighting}
\end{Shaded}
Attention will be paid to \(R^2\) as well as performance on training
set. However, \(MSE\) on the test set is the ultimate metric to
minimize.
\hypertarget{m_0-predicting-the-mean}{%
\subsubsection{\texorpdfstring{\(M_0\): Predicting the
Mean}{M\_0: Predicting the Mean}}\label{m_0-predicting-the-mean}}
For the purposes of establishing a baseline performance, the first model
will be a dummy model, predicting only the average FDI.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{m0 <-}\StringTok{ }\KeywordTok{lm}\NormalTok{(fdi }\OperatorTok{~}\StringTok{ }\DecValTok{1}\NormalTok{, train)}
\KeywordTok{mse}\NormalTok{(m0)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 4.024362e+19
\end{verbatim}
With an MSE of over \(4e^{19}\) (dollars), this model performs very
poorly. Hopefully further iteration can improve it.
\hypertarget{m_1-linear-model-all-variables}{%
\subsubsection{\texorpdfstring{\(M_1\): Linear Model, All
Variables}{M\_1: Linear Model, All Variables}}\label{m_1-linear-model-all-variables}}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{m1 <-}\StringTok{ }\KeywordTok{lm}\NormalTok{(fdi }\OperatorTok{~}\StringTok{ }\NormalTok{population }\OperatorTok{+}\StringTok{ }\NormalTok{arms_exports }\OperatorTok{+}\StringTok{ }\KeywordTok{as.factor}\NormalTok{(conflict) }\OperatorTok{+}\StringTok{ }
\StringTok{ }\KeywordTok{as.factor}\NormalTok{(alliance) }\OperatorTok{+}\StringTok{ }\NormalTok{km_dist }\OperatorTok{+}\StringTok{ }\NormalTok{gdp }\OperatorTok{+}\StringTok{ }\NormalTok{gdp_perc_growth }\OperatorTok{+}
\StringTok{ }\KeywordTok{as.factor}\NormalTok{(regime_type), train)}
\KeywordTok{summary}\NormalTok{(m1)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
##
## Call:
## lm(formula = fdi ~ population + arms_exports + as.factor(conflict) +
## as.factor(alliance) + km_dist + gdp + gdp_perc_growth + as.factor(regime_type),
## data = train)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.502e+10 -1.125e+09 -1.353e+08 3.409e+08 1.029e+11
##
## Coefficients:
## Estimate Std. Error t value
## (Intercept) 9.639e+08 8.463e+08 1.139
## population -4.508e-01 1.483e+00 -0.304
## arms_exports 1.802e+06 1.260e+06 1.429
## as.factor(conflict)1 -2.019e+08 5.622e+08 -0.359
## as.factor(alliance)1 7.686e+08 5.530e+08 1.390
## km_dist -8.739e+04 6.836e+04 -1.278
## gdp 2.123e-03 3.586e-04 5.922
## gdp_perc_growth -1.321e+09 1.266e+09 -1.044
## as.factor(regime_type)closed anocracy 4.451e+08 6.798e+08 0.655
## as.factor(regime_type)conflict/occupied 1.217e+08 1.743e+09 0.070
## as.factor(regime_type)democracy -1.620e+08 6.054e+08 -0.268
## as.factor(regime_type)full democracy 3.182e+09 7.074e+08 4.498
## as.factor(regime_type)open anocracy 5.443e+07 7.407e+08 0.073
## Pr(>|t|)
## (Intercept) 0.255
## population 0.761
## arms_exports 0.153
## as.factor(conflict)1 0.720
## as.factor(alliance)1 0.165
## km_dist 0.201
## gdp 4.36e-09 ***
## gdp_perc_growth 0.297
## as.factor(regime_type)closed anocracy 0.513
## as.factor(regime_type)conflict/occupied 0.944
## as.factor(regime_type)democracy 0.789
## as.factor(regime_type)full democracy 7.66e-06 ***
## as.factor(regime_type)open anocracy 0.941
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 5.847e+09 on 1008 degrees of freedom
## Multiple R-squared: 0.1614, Adjusted R-squared: 0.1514
## F-statistic: 16.17 on 12 and 1008 DF, p-value: < 2.2e-16
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{mse}\NormalTok{(m1)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 3.37478e+19
\end{verbatim}
Unfortunately, this straight-forward model is not impressive. It's
\(MSE\) is only about 16 percent better than predicting the average,
though it does have a not-insignificant \(R^2\) of .15, and the
\(F\)-statistic says it is statistically different from the dummy model.
Only GDP and regime type of full democracy are significant.
Examine the residuals:
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{par}\NormalTok{(}\DataTypeTok{mfrow=}\KeywordTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{,}\DecValTok{2}\NormalTok{))}
\KeywordTok{hist}\NormalTok{(}\KeywordTok{resid}\NormalTok{(m1))}
\KeywordTok{qqnorm}\NormalTok{(}\KeywordTok{rstandard}\NormalTok{(m1)); }\KeywordTok{qqline}\NormalTok{(}\KeywordTok{rstandard}\NormalTok{(m1), }\DataTypeTok{col =} \DecValTok{2}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\includegraphics{report_files/figure-latex/unnamed-chunk-18-1.pdf}
It is clear that the residuals are not as normal as we like. The model
performs well for `typical' observations (between the -2 and 2
quartiles), but fails for the quite a few outlying observations. Both
positive and negative outliers have large residuals.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{train_m1 <-}\StringTok{ }\NormalTok{train}
\NormalTok{train_m1}\OperatorTok{$}\NormalTok{resid <-}\StringTok{ }\KeywordTok{resid}\NormalTok{(m1)}
\NormalTok{train_m1 <-}\StringTok{ }\NormalTok{train_m1 }\OperatorTok{%>%}\StringTok{ }
\StringTok{ }\KeywordTok{mutate}\NormalTok{(}\DataTypeTok{resid_abs =} \KeywordTok{abs}\NormalTok{(resid)) }\OperatorTok{%>%}
\StringTok{ }\KeywordTok{arrange}\NormalTok{(}\KeywordTok{desc}\NormalTok{(resid))}
\KeywordTok{head}\NormalTok{(train_m1[}\KeywordTok{c}\NormalTok{(}\StringTok{'year'}\NormalTok{, }\StringTok{'country'}\NormalTok{, }\StringTok{'resid'}\NormalTok{)], }\DecValTok{10}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## # A tibble: 10 x 3
## # Groups: country [4]
## year country resid
## <int> <chr> <dbl>
## 1 2007 Netherlands 102855267532.
## 2 2009 Netherlands 45365348354.
## 3 2010 Luxembourg 43584466944.
## 4 2010 Netherlands 38522907898.
## 5 2006 Netherlands 35269523242.
## 6 2004 United Kingdom 33068662761.
## 7 2008 Netherlands 32645722874.
## 8 2010 United Kingdom 28689500402.
## 9 2008 Ireland 27763820382.
## 10 2004 Netherlands 26096392930.
\end{verbatim}
Interestingly, the top cases with the most errors are all developed
Western European allies of the U.S. \(M_1\) seems to have over-estimated
all of these cases, by tens of billions of dollars. Future work might
try to account for this by including an variable indicating if a country
is West European, or perhaps an (original) member of NATO.
Looking at the cases with negative residuals, the model seems to have
especially underestimated China and Japan, especially in the period
around the 2007--2008 years (during which there was an economic crisis).
My intuition is that some state of affairs---an overheated world market,
perhaps?---was directing excessive FDI to these countries over this time
period. This suggests adding a variable to account for the state of the
world market might help.
\hypertarget{m_2-linear-model-some-logged-variables}{%
\subsubsection{\texorpdfstring{\(M_2\): Linear Model, Some Logged
Variables}{M\_2: Linear Model, Some Logged Variables}}\label{m_2-linear-model-some-logged-variables}}
One way to make these residuals more normal is to log some of the poorly
behaved independent variables, transforming them to normality:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{m2 <-}\StringTok{ }\KeywordTok{lm}\NormalTok{(fdi }\OperatorTok{~}\StringTok{ }\KeywordTok{log}\NormalTok{(population) }\OperatorTok{+}\StringTok{ }\NormalTok{arms_exports }\OperatorTok{+}\StringTok{ }\KeywordTok{as.factor}\NormalTok{(conflict) }\OperatorTok{+}\StringTok{ }
\StringTok{ }\KeywordTok{as.factor}\NormalTok{(alliance) }\OperatorTok{+}\StringTok{ }\NormalTok{km_dist }\OperatorTok{+}\StringTok{ }\KeywordTok{log}\NormalTok{(gdp) }\OperatorTok{+}\StringTok{ }\NormalTok{gdp_perc_growth }\OperatorTok{+}
\StringTok{ }\KeywordTok{as.factor}\NormalTok{(regime_type), train)}
\KeywordTok{summary}\NormalTok{(m2)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
##
## Call:
## lm(formula = fdi ~ log(population) + arms_exports + as.factor(conflict) +
## as.factor(alliance) + km_dist + log(gdp) + gdp_perc_growth +
## as.factor(regime_type), data = train)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.592e+10 -1.482e+09 -3.372e+08 7.731e+08 1.021e+11
##
## Coefficients:
## Estimate Std. Error t value
## (Intercept) -1.554e+10 2.807e+09 -5.539
## log(population) -2.219e+08 1.993e+08 -1.113
## arms_exports 9.975e+05 1.291e+06 0.773
## as.factor(conflict)1 -7.189e+08 5.897e+08 -1.219
## as.factor(alliance)1 9.931e+08 5.511e+08 1.802
## km_dist -3.895e+04 6.910e+04 -0.564
## log(gdp) 8.270e+08 1.622e+08 5.099
## gdp_perc_growth -1.550e+09 1.267e+09 -1.224
## as.factor(regime_type)closed anocracy 1.115e+09 7.054e+08 1.580
## as.factor(regime_type)conflict/occupied 7.597e+08 1.750e+09 0.434
## as.factor(regime_type)democracy 6.077e+07 6.125e+08 0.099
## as.factor(regime_type)full democracy 2.740e+09 7.337e+08 3.734
## as.factor(regime_type)open anocracy 6.930e+08 7.635e+08 0.908
## Pr(>|t|)
## (Intercept) 3.89e-08 ***
## log(population) 0.265983
## arms_exports 0.439887
## as.factor(conflict)1 0.223072
## as.factor(alliance)1 0.071812 .
## km_dist 0.573041
## log(gdp) 4.08e-07 ***
## gdp_perc_growth 0.221193
## as.factor(regime_type)closed anocracy 0.114419
## as.factor(regime_type)conflict/occupied 0.664196
## as.factor(regime_type)democracy 0.920982
## as.factor(regime_type)full democracy 0.000199 ***
## as.factor(regime_type)open anocracy 0.364291
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 5.857e+09 on 1008 degrees of freedom
## Multiple R-squared: 0.1584, Adjusted R-squared: 0.1484
## F-statistic: 15.82 on 12 and 1008 DF, p-value: < 2.2e-16
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{mse}\NormalTok{(m2)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 3.386704e+19
\end{verbatim}
Logging these variables is actually slightly worse than \(M_1\), both in
terms of \(MSE\) and \(R^2\). In terms of variable significance, the
only change is that alliance becomes significant at \(p = .10\).
Residuals are almost identical to previous model.
\hypertarget{m_4-mixed-effects-panel-model}{%
\subsubsection{\texorpdfstring{\(M_4\): Mixed Effects Panel
Model}{M\_4: Mixed Effects Panel Model}}\label{m_4-mixed-effects-panel-model}}
This model attempts to deal with the fact that most subjects (states)
are sampled from multiple times. This kind of \emph{mixed effects}
models adds a second layer of \emph{random} effects to the usual
regression model's \emph{fixed} effects. This model will include country
as a variable in an attempt to quantify specific differences due to a
country that are not attributable to the independent variables.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{m4 <-}\StringTok{ }\KeywordTok{lmer}\NormalTok{(fdi }\OperatorTok{~}\StringTok{ }\NormalTok{population }\OperatorTok{+}\StringTok{ }\NormalTok{arms_exports }\OperatorTok{+}\StringTok{ }\KeywordTok{as.factor}\NormalTok{(conflict) }\OperatorTok{+}\StringTok{ }
\StringTok{ }\KeywordTok{as.factor}\NormalTok{(alliance) }\OperatorTok{+}\StringTok{ }\NormalTok{km_dist }\OperatorTok{+}\StringTok{ }\NormalTok{gdp }\OperatorTok{+}\StringTok{ }\NormalTok{gdp_perc_growth }\OperatorTok{+}
\StringTok{ }\KeywordTok{as.factor}\NormalTok{(regime_type) }\OperatorTok{+}\StringTok{ }\NormalTok{(}\DecValTok{1} \OperatorTok{|}\StringTok{ }\NormalTok{country), train)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Warning: Some predictor variables are on very different scales: consider
## rescaling
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{summary}\NormalTok{(m4)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Linear mixed model fit by REML ['lmerMod']
## Formula:
## fdi ~ population + arms_exports + as.factor(conflict) + as.factor(alliance) +
## km_dist + gdp + gdp_perc_growth + as.factor(regime_type) +
## (1 | country)
## Data: train
##
## REML criterion at convergence: 48034
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -13.1700 -0.0780 -0.0103 0.0418 16.6464
##
## Random effects:
## Groups Name Variance Std.Dev.
## country (Intercept) 1.595e+19 3.993e+09
## Residual 1.837e+19 4.285e+09
## Number of obs: 1021, groups: country, 154
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) 8.819e+08 1.505e+09 0.586
## population 7.535e-01 2.735e+00 0.276
## arms_exports 2.863e+06 1.357e+06 2.109
## as.factor(conflict)1 -5.032e+08 6.691e+08 -0.752
## as.factor(alliance)1 9.608e+08 1.022e+09 0.940
## km_dist -7.679e+04 1.276e+05 -0.602
## gdp 1.515e-03 5.745e-04 2.636
## gdp_perc_growth -1.213e+09 9.812e+08 -1.236
## as.factor(regime_type)closed anocracy 2.189e+08 1.027e+09 0.213
## as.factor(regime_type)conflict/occupied 1.260e+08 2.480e+09 0.051
## as.factor(regime_type)democracy 2.548e+07 9.524e+08 0.027
## as.factor(regime_type)full democracy 3.106e+09 1.198e+09 2.592
## as.factor(regime_type)open anocracy 8.678e+06 1.041e+09 0.008
\end{verbatim}
\begin{verbatim}
##
## Correlation matrix not shown by default, as p = 13 > 12.
## Use print(x, correlation=TRUE) or
## vcov(x) if you need it
\end{verbatim}
\begin{verbatim}
## fit warnings:
## Some predictor variables are on very different scales: consider rescaling
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{mse}\NormalTok{(m4)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 1.590054e+19
\end{verbatim}
From the \(MSE\) output, we see this model surpasses all previous
models. While it has 60 percent less error than the dummy model, this is
still a disappointing result.
However, with this model, \texttt{arms\_exports} becomes significant at
\(p = 0.05\)! GDP and full democracy also retain strongly significant
effects.
The residual plots are also more encouraging, as many of the extreme
errors we saw in \(M_1\) disappear. The theoretical quartile plot shows
a much nicer distribution, with less of a deviation from normality:
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{par}\NormalTok{(}\DataTypeTok{mfrow=}\KeywordTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{,}\DecValTok{2}\NormalTok{))}
\KeywordTok{hist}\NormalTok{(}\KeywordTok{resid}\NormalTok{(m4))}
\KeywordTok{qqnorm}\NormalTok{(}\KeywordTok{scale}\NormalTok{(}\KeywordTok{resid}\NormalTok{(m4))); }\KeywordTok{qqline}\NormalTok{(}\KeywordTok{scale}\NormalTok{(}\KeywordTok{resid}\NormalTok{(m4)), }\DataTypeTok{col=}\StringTok{'2'}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\includegraphics{report_files/figure-latex/unnamed-chunk-22-1.pdf}
Examining some of the largest residuals:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{train_m4 <-}\StringTok{ }\NormalTok{train}
\NormalTok{train_m4}\OperatorTok{$}\NormalTok{resid <-}\StringTok{ }\KeywordTok{resid}\NormalTok{(m4)}
\NormalTok{train_m4 <-}\StringTok{ }\NormalTok{train_m4 }\OperatorTok{%>%}\StringTok{ }
\StringTok{ }\KeywordTok{mutate}\NormalTok{(}\DataTypeTok{resid_abs =} \KeywordTok{abs}\NormalTok{(resid)) }\OperatorTok{%>%}
\StringTok{ }\KeywordTok{arrange}\NormalTok{(}\KeywordTok{desc}\NormalTok{(resid))}
\KeywordTok{head}\NormalTok{(train_m4[}\KeywordTok{c}\NormalTok{(}\StringTok{'year'}\NormalTok{, }\StringTok{'country'}\NormalTok{)], }\DecValTok{10}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## # A tibble: 10 x 2
## # Groups: country [7]
## year country
## <int> <chr>
## 1 2007 Netherlands
## 2 2010 Luxembourg
## 3 2008 Ireland
## 4 2004 United Kingdom
## 5 2008 Switzerland
## 6 2009 Netherlands
## 7 2010 Ireland
## 8 2010 United Kingdom
## 9 2010 Australia
## 10 2004 Canada
\end{verbatim}
Like \(M_1\), the Netherlands and the U.K. are present, but appear less
often. Other countries include Ireland, Australia, and Canada. Again,
this suggests we might want to add a variable for either NATO or
English-speaking countries.
\hypertarget{m_5-mixed-effects-nato}{%
\subsubsection{\texorpdfstring{\(M_5\): Mixed Effects +
NATO}{M\_5: Mixed Effects + NATO}}\label{m_5-mixed-effects-nato}}
Since it's relatively simple, let's create a dummy variable indicating
whether a country was a founding member of NATO:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{nato <-}\StringTok{ }\KeywordTok{c}\NormalTok{(}\StringTok{'Belgium'}\NormalTok{, }\StringTok{'Canada'}\NormalTok{, }\StringTok{'Denmark'}\NormalTok{, }\StringTok{'France'}\NormalTok{, }\StringTok{'Iceland'}\NormalTok{, }\StringTok{'Italy'}\NormalTok{,}
\StringTok{'Luxembourg'}\NormalTok{, }\StringTok{'Netherlands'}\NormalTok{, }\StringTok{'Norway'}\NormalTok{, }\StringTok{'Portugal'}\NormalTok{,}
\StringTok{'United Kingdom'}\NormalTok{, }\StringTok{'United States'}\NormalTok{)}
\NormalTok{train_m5 <-}\StringTok{ }\NormalTok{train }\OperatorTok{%>%}
\StringTok{ }\KeywordTok{mutate}\NormalTok{(}\DataTypeTok{nato=}\KeywordTok{ifelse}\NormalTok{(country }\OperatorTok{%in%}\StringTok{ }\NormalTok{nato, }\DecValTok{1}\NormalTok{, }\DecValTok{0}\NormalTok{))}
\NormalTok{m5 <-}\StringTok{ }\KeywordTok{lmer}\NormalTok{(fdi }\OperatorTok{~}\StringTok{ }\NormalTok{population }\OperatorTok{+}\StringTok{ }\NormalTok{arms_exports }\OperatorTok{+}\StringTok{ }\KeywordTok{as.factor}\NormalTok{(conflict) }\OperatorTok{+}\StringTok{ }
\StringTok{ }\KeywordTok{as.factor}\NormalTok{(alliance) }\OperatorTok{+}\StringTok{ }\NormalTok{km_dist }\OperatorTok{+}\StringTok{ }\NormalTok{gdp }\OperatorTok{+}\StringTok{ }\NormalTok{gdp_perc_growth }\OperatorTok{+}
\StringTok{ }\KeywordTok{as.factor}\NormalTok{(regime_type) }\OperatorTok{+}\StringTok{ }\KeywordTok{as.factor}\NormalTok{(nato) }\OperatorTok{+}\StringTok{ }\NormalTok{(}\DecValTok{1} \OperatorTok{|}\StringTok{ }\NormalTok{country),}
\DataTypeTok{data=}\NormalTok{train_m5)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Warning: Some predictor variables are on very different scales: consider
## rescaling
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{summary}\NormalTok{(m5)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Linear mixed model fit by REML ['lmerMod']
## Formula:
## fdi ~ population + arms_exports + as.factor(conflict) + as.factor(alliance) +
## km_dist + gdp + gdp_perc_growth + as.factor(regime_type) +
## as.factor(nato) + (1 | country)
## Data: train_m5
##
## REML criterion at convergence: 47956.5
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -13.1715 -0.0696 -0.0105 0.0396 16.6870
##
## Random effects:
## Groups Name Variance Std.Dev.
## country (Intercept) 1.246e+19 3.530e+09
## Residual 1.831e+19 4.279e+09
## Number of obs: 1021, groups: country, 154
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) 6.414e+08 1.370e+09 0.468
## population 1.521e+00 2.487e+00 0.612
## arms_exports 3.092e+06 1.327e+06 2.329
## as.factor(conflict)1 -4.185e+08 6.473e+08 -0.647
## as.factor(alliance)1 -3.494e+08 9.502e+08 -0.368
## km_dist -4.897e+04 1.155e+05 -0.424
## gdp 1.077e-03 5.429e-04 1.983
## gdp_perc_growth -1.233e+09 9.773e+08 -1.262
## as.factor(regime_type)closed anocracy 2.901e+08 9.638e+08 0.301
## as.factor(regime_type)conflict/occupied 8.905e+07 2.332e+09 0.038
## as.factor(regime_type)democracy 1.818e+08 8.886e+08 0.205
## as.factor(regime_type)full democracy 1.891e+09 1.125e+09 1.682
## as.factor(regime_type)open anocracy 1.828e+08 9.831e+08 0.186
## as.factor(nato)1 9.033e+09 1.488e+09 6.071
\end{verbatim}
\begin{verbatim}
##
## Correlation matrix not shown by default, as p = 14 > 12.
## Use print(x, correlation=TRUE) or
## vcov(x) if you need it
\end{verbatim}
\begin{verbatim}
## fit warnings:
## Some predictor variables are on very different scales: consider rescaling
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{mse}\NormalTok{(m5)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 1.594544e+19
\end{verbatim}
Adding \texttt{nato} is very consequential to the model. Our main
independent variable \texttt{arms\_exports} becomes stronger and more
significant (\(p < .05\)). GDP becomes less significant, though it is
still significant at \(p < 0.05\). Population becomes more signficiant
but is `less important.' The full democracy indicator, becomes
insignicant at .05 and the magnitude of its coefficient decreases.
Interestingly, \(MSE\) is a smidge higher than \(M_4\), by 0.2 percent.
The residual graphs appear mostly the same as those of \(M_4\),
unfortunately.
\hypertarget{model-evaluations}{%
\section{Model Evaluations}\label{model-evaluations}}
We can now test our five models: \(M_0, M_1, M_2, M_4,\) and \(M_5\).
Reload the test data and get their predictions for the \texttt{test}
set:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{test <-}\StringTok{ }\KeywordTok{read.csv}\NormalTok{(}\StringTok{'../data/clean/test.tsv'}\NormalTok{, }\DataTypeTok{sep=}\StringTok{'}\CharTok{\textbackslash{}t}\StringTok{'}\NormalTok{, }
\DataTypeTok{stringsAsFactors=}\OtherTok{FALSE}\NormalTok{)}
\NormalTok{test_m0 <-}\StringTok{ }\NormalTok{test }\OperatorTok{%>%}\StringTok{ }
\StringTok{ }\KeywordTok{mutate}\NormalTok{(}\DataTypeTok{pred =} \KeywordTok{predict}\NormalTok{(m0, test),}
\DataTypeTok{resid =}\NormalTok{ fdi }\OperatorTok{-}\StringTok{ }\NormalTok{pred)}
\NormalTok{test_m1 <-}\StringTok{ }\NormalTok{test }\OperatorTok{%>%}\StringTok{ }
\StringTok{ }\KeywordTok{mutate}\NormalTok{(}\DataTypeTok{pred =} \KeywordTok{predict}\NormalTok{(m1, test),}
\DataTypeTok{resid =}\NormalTok{ fdi }\OperatorTok{-}\StringTok{ }\NormalTok{pred)}
\NormalTok{test_m2 <-}\StringTok{ }\NormalTok{test }\OperatorTok{%>%}\StringTok{ }
\StringTok{ }\KeywordTok{mutate}\NormalTok{(}\DataTypeTok{pred =} \KeywordTok{predict}\NormalTok{(m2, test),}
\DataTypeTok{resid =}\NormalTok{ fdi }\OperatorTok{-}\StringTok{ }\NormalTok{pred)}
\NormalTok{test_m4 <-}\StringTok{ }\NormalTok{test }\OperatorTok{%>%}\StringTok{ }
\StringTok{ }\KeywordTok{mutate}\NormalTok{(}\DataTypeTok{pred =} \KeywordTok{predict}\NormalTok{(m4, test),}
\DataTypeTok{resid =}\NormalTok{ fdi }\OperatorTok{-}\StringTok{ }\NormalTok{pred)}
\CommentTok{# add NATO variable in}
\NormalTok{test_m5 <-}\StringTok{ }\NormalTok{test }\OperatorTok{%>%}
\StringTok{ }\KeywordTok{mutate}\NormalTok{(}\DataTypeTok{nato=}\KeywordTok{ifelse}\NormalTok{(country }\OperatorTok{%in%}\StringTok{ }\NormalTok{nato, }\DecValTok{1}\NormalTok{, }\DecValTok{0}\NormalTok{))}
\NormalTok{test_m5}\OperatorTok{$}\NormalTok{pred <-}\StringTok{ }\KeywordTok{predict}\NormalTok{(m5, test_m5)}
\NormalTok{test_m5}\OperatorTok{$}\NormalTok{resid <-}\StringTok{ }\NormalTok{test_m5}\OperatorTok{$}\NormalTok{fdi }\OperatorTok{-}\StringTok{ }\NormalTok{test_m5}\OperatorTok{$}\NormalTok{pred}
\end{Highlighting}
\end{Shaded}
Calculate \(MSE\) for each (divided by \(10^{18}\) for readability), in
order of best to worst:
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{paste}\NormalTok{(}\StringTok{'M_5:'}\NormalTok{, }\KeywordTok{mean}\NormalTok{(test_m5}\OperatorTok{$}\NormalTok{resid}\OperatorTok{^}\DecValTok{2}\NormalTok{) }\OperatorTok{/}\StringTok{ }\DecValTok{10}\OperatorTok{^}\DecValTok{18}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] "M_5: 18.3317314308561"
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{paste}\NormalTok{(}\StringTok{'M_4:'}\NormalTok{, }\KeywordTok{mean}\NormalTok{(test_m4}\OperatorTok{$}\NormalTok{resid}\OperatorTok{^}\DecValTok{2}\NormalTok{) }\OperatorTok{/}\StringTok{ }\DecValTok{10}\OperatorTok{^}\DecValTok{18}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] "M_4: 18.7364181449195"
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{paste}\NormalTok{(}\StringTok{'M_2:'}\NormalTok{, }\KeywordTok{mean}\NormalTok{(test_m2}\OperatorTok{$}\NormalTok{resid}\OperatorTok{^}\DecValTok{2}\NormalTok{) }\OperatorTok{/}\StringTok{ }\DecValTok{10}\OperatorTok{^}\DecValTok{18}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] "M_2: 51.6602441882049"
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{paste}\NormalTok{(}\StringTok{'M_1:'}\NormalTok{, }\KeywordTok{mean}\NormalTok{(test_m1}\OperatorTok{$}\NormalTok{resid}\OperatorTok{^}\DecValTok{2}\NormalTok{) }\OperatorTok{/}\StringTok{ }\DecValTok{10}\OperatorTok{^}\DecValTok{18}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] "M_1: 53.8012662506875"
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{paste}\NormalTok{(}\StringTok{'M_0:'}\NormalTok{, }\KeywordTok{mean}\NormalTok{(test_m0}\OperatorTok{$}\NormalTok{resid}\OperatorTok{^}\DecValTok{2}\NormalTok{) }\OperatorTok{/}\StringTok{ }\DecValTok{10}\OperatorTok{^}\DecValTok{18}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] "M_0: 63.9952822690553"
\end{verbatim}
Immediately it is clear that the mixed models, \(M_4\) and \(M_5\), have
far superior performance over the `vanilla' \(M_1\) and \(M_2\) (with
logged variables). Interesting, even though adding the \texttt{nato}
varible slightly decreased in-sample \(MSE\), it improved the model on
the test set.
Calculate \(R^2\), from best to worst:
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{paste}\NormalTok{(}\StringTok{'M_5:'}\NormalTok{, }\KeywordTok{calc_r2}\NormalTok{(test_m5}\OperatorTok{$}\NormalTok{fdi, test_m5}\OperatorTok{$}\NormalTok{pred))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] "M_5: 0.712975771040528"
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{paste}\NormalTok{(}\StringTok{'M_4:'}\NormalTok{, }\KeywordTok{calc_r2}\NormalTok{(test_m4}\OperatorTok{$}\NormalTok{fdi, test_m4}\OperatorTok{$}\NormalTok{pred))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] "M_4: 0.706469257997731"
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{paste}\NormalTok{(}\StringTok{'M_2:'}\NormalTok{, }\KeywordTok{calc_r2}\NormalTok{(test_m2}\OperatorTok{$}\NormalTok{fdi, test_m2}\OperatorTok{$}\NormalTok{pred))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] "M_2: 0.188953574659956"
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{paste}\NormalTok{(}\StringTok{'M_1:'}\NormalTok{, }\KeywordTok{calc_r2}\NormalTok{(test_m1}\OperatorTok{$}\NormalTok{fdi, test_m1}\OperatorTok{$}\NormalTok{pred))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] "M_1: 0.156506262254786"
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{paste}\NormalTok{(}\StringTok{'M_0:'}\NormalTok{, }\KeywordTok{calc_r2}\NormalTok{(test_m0}\OperatorTok{$}\NormalTok{fdi, test_m0}\OperatorTok{$}\NormalTok{pred))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] "M_0: 0"
\end{verbatim}
The ordering is the same as in the case of \(MSE\). I am pleased to see
the best model explains 71 percent of the variable in FDI! (Interesting,
the in-sample \(R^2\) for \(M_5\) is only .60.)
One final look at residuals, \(M_5\) on the test sample:
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{par}\NormalTok{(}\DataTypeTok{mfrow=}\KeywordTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{,}\DecValTok{2}\NormalTok{))}
\KeywordTok{hist}\NormalTok{(test_m5}\OperatorTok{$}\NormalTok{resid)}
\KeywordTok{qqnorm}\NormalTok{(}\KeywordTok{scale}\NormalTok{(test_m5}\OperatorTok{$}\NormalTok{resid)); }\KeywordTok{qqline}\NormalTok{(}\KeywordTok{scale}\NormalTok{(test_m5}\OperatorTok{$}\NormalTok{resid), }\DataTypeTok{col=}\StringTok{'2'}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\includegraphics{report_files/figure-latex/unnamed-chunk-28-1.pdf}
The residuals histogram appears about the same shape as that of the
training set. However, the theoretical quartile plot is not as
smooth---their are many observations where predicted value is very far
from their actual value:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{test_m5 <-}\StringTok{ }\KeywordTok{arrange}\NormalTok{(test_m5, }\KeywordTok{desc}\NormalTok{(resid))}
\KeywordTok{head}\NormalTok{(test_m5[}\KeywordTok{c}\NormalTok{(}\StringTok{'year'}\NormalTok{, }\StringTok{'country'}\NormalTok{, }\StringTok{'resid'}\NormalTok{)], }\DecValTok{10}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## year country resid
## 1 2011 Netherlands 37699414141
## 2 2011 Luxembourg 32406834743
## 3 2011 Canada 31936016110
## 4 2012 United Kingdom 20971748405
## 5 2012 Luxembourg 15131703741
## 6 2012 Australia 13388939978
## 7 2012 Netherlands 13053412619
## 8 2012 Canada 10909575647
## 9 2012 Ireland 8886915986
## 10 2012 Switzerland 8734211652
\end{verbatim}
Among the largest residuals are the same old culprites: Netherlands, the
U.K., etc.
\hypertarget{conclusion}{%
\section{Conclusion}\label{conclusion}}
This paper confirmed the relationship between U.S. arms sales and U.S.
FDI. The more arms a country recieves from the U.S. in year \(t\), the
more direct foreign investment the country will recieve from the U.S.
the next year \(t+1\). This relationship is statistically significant,
even when controlling for other factors well-known to influence FDI.
I tested a number of models and found that mixed effect models best
capture the data set. The final model \(M_5\) performed the best,
explaining 71 percent of variation in \texttt{fdi} on the test dataset.
Other significant predictors of FDI include GDP and NATO membership,
which both have a positive effect (\(p < .05\)). Regime type of full
democracy also has a positive relationship with FDI at a lower
significance (\(p < .10\)).
Future work should focus on finding an explanation for why every
substantial model tended to overestimated FDI in a handful of highly
developed NATO allies, especially the Netherlands and the U.K.
Introducing the NATO membership as a variable helped, but was
insufficient to fully account for it.
\hypertarget{references}{%
\section{References}\label{references}}
Biglaiser, Glen, and Karl DeRouen, Jr. ``Following the Flag: Troop
Deployment and U.S. Foreign Direct Investment.'' \emph{International
Studies Quarterly} 51 (4): 835-854.
Center for Systemic Peace. 2017. \emph{Polity IV Annual Time-Series,
1800-2017} (Excel file).
\textless{}\url{http://www.systemicpeace.org/inscrdata.html}\textgreater{}.
Gibler, Douglas M. 2013. ``Formal Alliances (v4.1).''
\emph{International Military Alliances, 1648-2008}. CQ Press.
\textless{}\url{http://www.correlatesofwar.org/data-sets/formal-alliances}\textgreater{}.
Gilpin, Robert. 1987. \emph{The Political Economy of International
Relations}. Princeton: Princeton University Press.
Gleditsch, Kristian Skrede. \_Distance Between Capital Cities."
\textless{}\url{http://ksgleditsch.com/data-5.html}\textgreater{}.
Gleditsch, Nils Petter, Peter Wallensteen, Mikael Eriksson, Margareta
Sollenberg, and Håvard Strand. 2002. ``Armed Conflict 1946-2001: A New
Dataset.'' \emph{Journal of Peace Research} 39 (5).
Gowa, Joanne. 1994. \emph{Allies, Adversaries, and International Trade}.
Princeton: Princeton University Press.
Gowa, Joanne, and Edward D. Mansfield. 1993. ``Power Politics and
International Trade.'' \emph{American Political Science Review} 87 (2):
408-20.
Little, Andrea, and David Leblang. 2004. ``Military Securities:
Financial Flows and the Deployment of U.S. Troops.'' In \emph{Annual
Meeting of the American Political Science Association}. Chicago, IL.
Long, Andrew G. 2003. ``Defense Pacts and International Trade.''
\emph{Journal of Peace Research} 40 (5): 537--52.
OECD. 2018. ``Benchmark definition, 3rd edition (BMD3): Foreign direct
investment: flows by partner country.'' \emph{OECD International Direct
Investment Statistics} (database).
Rugman, Alan M., and Alain Verbeke. 2001. ``Location, Competitiveness,
and the Multinational Enterprise.'' In \emph{Oxford Handbook of
International Business}, ed. A. M. Rugman and T. L. Brewer. Oxford:
Oxford University Press.
Stockholm International Peace Research Institute. \emph{Arms Transfers
Database}.
\textless{}\url{https://www.sipri.org/databases/armstransfers}\textgreater{}.
World Bank. 2018. \emph{National Accounts Data}.
\textless{}\url{https://data.worldbank.org/indicator/NY.GDP.MKTP.CD}\textgreater{}.
United Nations, Population Division. ``Total Population - Both Sexes''
(Excel file).
\textless{}\url{https://population.un.org/wpp/Download/Standard/Population/}\textgreater{}.
\end{document}
|
```python
import numpy as np
import numpy.linalg as LA
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
```
# Logistic Regression
## Maximim Log-likelihood
Here we will use logistic regression to conduct a binary classification. The logistic regression process will be formulated using Maximum Likelihood estimation. To begin, consider a random variable $y \in\{0,1\}$. Let the $p$ be the probability that $y=1$. Given a set of $N$ trials, the liklihood of the sequence $y_{1}, y_{2}, \dots, y_{N}$ is given by:
$\mathcal{L} = \Pi_{i}^{N}p^{y_{i}}(1-p)^{1 - y_{i}}$
Given a set of labeled training data, the goal of maximum liklihood estimation is to determine a probability distribution that best recreates the empirical distribution of the training set.
The log-likelihood is the logarithmic transformation of the likelihood function. As logarithms are strictly increasing functions, the resulting solution from maximizing the likelihood vs. the log-likelihood are the equivalent. Given a dataset of cardinality $N$, the log-likelihood (normalized by $N$) is given by:
$l = \frac{1}{N}\sum_{i=1}^{N}\Big(y_{i}\log(p) + (1 - y_{i})\log(1 - p)\Big)$
## Logistic function
Logistic regression performs binary classification based on a probabilistic interpretation of the data. Essentially, the process seeks to assign a probability to new observations. If the probability associated with the new instance of data is greater than 0.5, then the new observation is assigned to 1 (for example). If the probability associated with the new instance of the data is less than 0.5, then it is assigned to 0. To map the real numerical values into probabilities (which must lie between 0 and 1), logistic regression makes use of the logistic (sigmoid) function, given by:
$\sigma(t) = \frac{1}{1 + e^{-t}}$
Note that by setting $t=0$, $\sigma(0) = 0.5$, which is the decision boundary. We should also note that the derivative of the logistic function with respect to the parameter $t$ is:
$\frac{d}{dt}\sigma(t) = \sigma(t)(1 - \sigma(t))$
## Logistic Regression and Derivation of the Gradient
Let's assume the training data consists of $N$ observations, where observation $i$ is denoted by the pair $(y_{i},\mathbf{x}_{i})$, where $y \in \{0,1\}$ is the label for the feature vector $\mathbf{x}$. We wish to compute a linear decision boundary that best seperates the labeled observations. Let $\mathbf{\theta}$ denote the vector of coefficients to be estimated. In this problem, the log likelihood can be expressed as:
$l = \frac{1}{N}\sum_{i=1}^{N}\Big(y_{i}\log\big(\sigma(\mathbf{\theta}^{T}\mathbf{x}_{i})\big) + (1 - y_{i}) \log\big( 1 - \sigma(\mathbf{\theta}^{T}\mathbf{x}_{i})\big)\Big)$
The gradient of the objective with respect to the $j^{th}$ element of $\mathbf{\theta}$ is:
$$
\begin{aligned}
\frac{d}{d\theta^{(j)}} l &= \frac{1}{N}\sum_{i=1}^{N}\Bigg( \frac{d}{d\theta^{(j)}} y_{i}\log\big(\sigma(\mathbf{\theta}^{T}\mathbf{x}_{i})\big) + \frac{d}{d\theta^{(j)}}(1 - y_{i}) \log\big( 1 - \sigma(\mathbf{\theta}^{T}\mathbf{x}_{i})\big)\Bigg) \\
&= \frac{1}{N}\sum_{i=1}^{N}\Bigg(\frac{y_{i}}{\sigma(\mathbf{\theta}^{T}\mathbf{x}_{i})} - \frac{1 - y_{i}}{1 - \sigma(\mathbf{\theta}^{T}\mathbf{x}_{i})} \Bigg)\frac{d}{d\theta^{(j)}}\sigma(\mathbf{\theta}^{T}\mathbf{x}_{i})\\
&= \frac{1}{N}\sum_{i=1}^{N}\Bigg(\frac{y_{i}}{\sigma(\mathbf{\theta}^{T}\mathbf{x}_{i})} - \frac{1 - y_{i}}{1 - \sigma(\mathbf{\theta}^{T}\mathbf{x}_{i})} \Bigg)\sigma(\mathbf{\theta}^{T}\mathbf{x}_{i})\Big(1 - \sigma(\mathbf{\theta}^{T}\mathbf{x}_{i})\Big)x_{i}^{(j)}\\
&= \frac{1}{N}\sum_{i=1}^{N}\Bigg(\frac{y_{i} - \sigma(\mathbf{\theta}^{T}\mathbf{x}_{i})}{\sigma(\mathbf{\theta}^{T}\mathbf{x}_{i})\Big(1 - \sigma(\mathbf{\theta}^{T}\mathbf{x}_{i})\Big)}\Bigg)\sigma(\mathbf{\theta}^{T}\mathbf{x}_{i})\Big(1 - \sigma(\mathbf{\theta}^{T}\mathbf{x}_{i})\Big)x_{i}^{(j)}\\
&= \frac{1}{N}\sum_{i=1}^{N}\Bigg(y_{i} - \sigma(\mathbf{\theta}^{T}\mathbf{x}_{i})\Bigg)x_{i}^{(j)}
\end{aligned}
$$
where the last equation has the familiar form of the product of the prediciton error and the $j^{th}$ feature. With the gradient of the log likelihood function, the parameter vector $\mathbf{\theta}$ can now be calculated via gradient ascent (as we're <em>maximizing</em> the log likelihood):
$$
\begin{equation}
\mathbf{\theta}^{(j)}(k+1) = \mathbf{\theta}^{(j)}(k) + \alpha \frac{1}{N}\sum_{i=1}^{N}\Bigg( y_{i} - \sigma(\mathbf{\theta}^{T}(k)\mathbf{x}_{i}))\Bigg)x_{i}^{(j)}
\end{equation}
$$
```python
# Supporting Methods
#logistic function
def sigmoid(a):
return 1/(1 + np.exp(-a))
#ll function
def log_likelihood(x, y, theta):
logits = np.dot(x, theta)
log_like = np.sum(y * logits - np.log(1 + np.exp(logits)))
return log_like
```
```python
#Load the data
iris = datasets.load_iris()
x = iris.data[:,2:] #features will be petal width and petal length
y = (iris.target==2).astype(int).reshape(len(x),1) #1 of iris-virginica, and 0 ow
#Prepare Data for Regression
#pad x with a vector of ones for computation of intercept
x_aug = np.concatenate( (x,np.ones((len(x),1))) , axis=1)
```
```python
#sklearn logistic regression
log_reg = LogisticRegression(penalty='none')
log_reg.fit(x,y.ravel())
log_reg.get_params()
coefs = log_reg.coef_.reshape(-1,1)
intercept = log_reg.intercept_
theta_sklearn = np.concatenate((coefs, intercept.reshape(-1,1)), axis=0)
print("sklearn coefficients:")
print(theta_sklearn)
print("sklearn log likelihood: ", log_likelihood(x_aug, y, theta_sklearn))
```
sklearn coefficients:
[[ 5.75452053]
[ 10.44681116]
[-45.27248307]]
sklearn log likelihood: -10.281754052558687
```python
#Perform Logistic Regression
num_iterations = 40000
alpha = 1e-2
theta0 = np.ones((3,1))
theta = []
theta.append(theta0)
k=0
while k < num_iterations:
#compute prediction error
e = y - sigmoid(np.dot(x_aug, theta[k]))
#compute the gradient of the log-likelihood
grad_ll = np.dot(x_aug.T, e)
#gradient ascent
theta.append(theta[k] + alpha * grad_ll)
#update iteration step
k += 1
if k % 4000 == 0:
#print("iteration: ", k, " delta: ", delta)
print("iteration: ", k, " log_likelihood:", log_likelihood(x_aug, y, theta[k]))
theta_final = theta[k]
print("scratch coefficients:")
print(theta_final)
```
iteration: 4000 log_likelihood: -11.529302688612685
iteration: 8000 log_likelihood: -10.800986140073512
iteration: 12000 log_likelihood: -10.543197464480874
iteration: 16000 log_likelihood: -10.425775111214602
iteration: 20000 log_likelihood: -10.36535749825992
iteration: 24000 log_likelihood: -10.331961877189842
iteration: 28000 log_likelihood: -10.312622172168293
iteration: 32000 log_likelihood: -10.301055609225223
iteration: 36000 log_likelihood: -10.293975480174714
iteration: 40000 log_likelihood: -10.289566343598294
scratch coefficients:
[[ 5.5044475 ]
[ 10.17562424]
[-43.60242548]]
```python
#Plot the data and the decision boundary
#create feature data for plotting
x_dec_bnd = np.linspace(0,7,100).reshape(-1,1)
#classification boundary from sklearn
y_sklearn = (theta_sklearn[2] * np.ones((100,1)) + theta_sklearn[0] * x_dec_bnd) / -theta_sklearn[1]
#classification boundary from scratch
y_scratch = (theta_final[2] * np.ones((100,1)) + theta_final[0] * x_dec_bnd) / -theta_final[1]
y_1 = np.where(y==1)[0] #training data, iris-virginica
y_0 = np.where(y==0)[0] #training data, not iris-virginica
plt.plot(x[y_0,0],x[y_0,1],'bo',label="not iris-virginica")
plt.plot(x[y_1,0],x[y_1,1],'k+',label="iris-virginica")
plt.plot(x_dec_bnd,y_sklearn,'r',label="sklearn dec. boundary")
plt.plot(x_dec_bnd,y_scratch,'g',label="scratch dec. boundary")
plt.xlabel('petal length')
plt.ylabel('petal width')
plt.title('Logistic Regression Classification')
plt.xlim((3,7))
plt.ylim((0,3.5))
plt.legend()
plt.show()
```
|
[STATEMENT]
lemma approx_subset_monad: "x \<approx> y \<Longrightarrow> {x, y} \<le> monad x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<approx> y \<Longrightarrow> {x, y} \<subseteq> monad x
[PROOF STEP]
by (simp (no_asm)) (simp add: approx_monad_iff) |
# Typesetting Math in your book
Jupyter Book uses [MathJax](http://docs.mathjax.org/) for typesetting math in your
book. This allows you to have LaTeX-style mathematics in your online content.
This page shows you a few ways to control this.
For more information about equation numbering, see the
[MathJax equation numbering documentation](http://docs.mathjax.org/en/v2.7-latest/tex.html#automatic-equation-numbering).
# In-line math
To insert in-line math use the `$` symbol within a Markdown cell.
For example, the text `$this_{is}^{inline}$` will produce: $this_{is}^{inline}$.
# Math blocks
You can also include math blocks for separate equations. This allows you to focus attention
on more complex or longer equations, as well as link to them in your pages. To use a block
equation, wrap the equation in either `$$` or `\begin` statements.
For example,
```latex
\begin{equation}
\int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
\end{equation}
```
results in
\begin{equation}
\int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
\end{equation}
and
```latex
$$
\int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
$$
```
results in
$$
\int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
$$
## Numbering equations
MathJax has built-in support for numbering equations. This makes it possible to
easily reference equations throughout your page. To do so, add this tag
to a block equation:
`\tag{<number>}`
The `\tag` provides a number for the equation that will be inserted when you refer
to it in the text.
For example, the following code:
```latex
equation 1
$$
\int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
\tag{1}
$$
equation 2
$$
\int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
\tag{2}
$$
equation 999
$$
\int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
\tag{999}
$$
```
Results in these math blocks:
equation 1
$$
\int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
\tag{1}
$$
equation 2
$$
\int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
\tag{2}
$$
equation 999
$$
\int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
\tag{999}
$$
### Automatic numbering
If you'd like **all** block equations to be numbered with MathJax, you can activate
this with the following configuration in your `_config.yml` file:
```yaml
number_equations: true
```
In this case, all equations will have numbers. If you'd like to deactivate
an equation's number, include a `\notag` with your equation, like so:
```latex
$$
\int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
\notag
$$
```
## Linking to equations
Adding `\label{mylabel}` to an equation allows you to refer to the equation elsewhere in the page. You
can define a human-friendly label and MathJax will insert an anchor with the following form:
```html
#mjx-eqn-mylabel
```
If you use `\label` in conjunction with `\tag`, then you can insert references directly to an equation
by using the `\ref` syntax. For example, here's an equation with a tag and label:
```latex
$$
\int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
\label{mylabel1}\tag{24}
$$
$$
\int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
\label{mylabel2}\tag{25}
$$
```
$$
\int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
\label{mylabel1}\tag{24}
$$
$$
\int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
\label{mylabel2}\tag{25}
$$
Now, we can refer to these math blocks with `\ref` elements. For example,
we can mention Equation \ref{mylabel1} using `\ref{mylabel1}` and
Equation \ref{mylabel2} with `\ref{mylabel2}`.
Note that these equations also have anchors on them, which can be used to link
to an equation from elsewhere, for example with this link text:
```html
<a href="#mjx-eqn-mylabel2">My link</a>
```
```python
```
|
# Tests for entropy function
arr = rand(1000)
sqrt_1000 = sqrt(1000)
# Test with 1 values array
@test get_entropy(arr) ≈ log2(sqrt_1000) atol = 0.05
println("Entropy with 1 array passed.")
# Test with 2 values arrays
@test get_entropy(arr, arr) ≈ log2(sqrt_1000) atol = 0.05
println("Entropy with 2 arrays passed.")
# Test with 3 values arrays
@test get_entropy(arr, arr, arr) ≈ log2(sqrt_1000) atol = 0.05
println("Entropy with 3 arrays passed.")
# Test with uniform_width
@test get_entropy(arr, mode = "uniform_width") ≈ log2(sqrt_1000) atol = 0.05
println("Entropy with uniform width passed.")
# Test with uniform_count
@test get_entropy(arr, mode = "uniform_count") ≈ log2(sqrt_1000) atol = 0.05
println("Entropy with uniform count passed.")
# Test with bayesian_blocks
# @test get_entropy(arr, mode = "bayesian_blocks") ≈ log2(10) atol = 0.05
# println("Entropy with Bayesian blocks passed.")
# Test with number_of_bins
@test get_entropy(arr, number_of_bins = 5) ≈ log2(5) atol = 0.05
println("Entropy with number of bins passed.")
# Test with get_number_of_bins
@test get_entropy(arr, get_number_of_bins = function(values) return 2 end) ≈ log2(2) atol = 0.05
println("Entropy with get number of bins passed.")
# Test with maximum_likelihood
@test get_entropy(arr, estimator = "maximum_likelihood") ≈ log2(sqrt_1000) atol = 0.05
println("Entropy with maximum likelihood passed.")
# Test with shrinkage
@test get_entropy(arr, estimator = "shrinkage") ≈ log2(sqrt_1000) atol = 0.05
println("Entropy with shrinkage passed.")
# Test with shrinkage and lambda
@test get_entropy(arr, estimator = "shrinkage", lambda = 0) ≈ log2(sqrt_1000) atol = 0.05
println("Entropy with shrinkage and lambda passed.")
# Test with dirichlet
@test get_entropy(arr, estimator = "dirichlet") ≈ log2(sqrt_1000) atol = 0.05
println("Entropy with Dirichlet passed.")
# Test with dirichlet and prior
@test get_entropy(arr, estimator = "dirichlet", prior = 1) ≈ log2(sqrt_1000) atol = 0.05
println("Entropy with Dirichlet and prior passed.")
# Test with miller_madow
@test get_entropy(arr, estimator = "miller_madow") ≈ log2(sqrt_1000) atol = 0.05
println("Entropy with Miller-Madow passed.")
# Test with base e
@test get_entropy(arr, base = e) ≈ log(sqrt_1000) atol = 0.05
println("Entropy with change of base passed.")
# Test with discretized
@test get_entropy(discretize_values(arr), discretized = true) ≈ log2(sqrt_1000) atol = 0.05
println("Entropy with discretized input passed.")
|
universe u
variables (α : Type u) (a b c d : α)
variables (hab : a = b) (hcb : c = b) (hcd : c = d)
example : a = d := eq.trans (eq.trans hab (eq.symm hcb)) hcd
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.