text
stringlengths 0
3.34M
|
---|
module elem
using (xs : List a)
data Elem : a -> List a -> Type where
Here : Elem x (x :: xs)
There : Elem x xs -> Elem x (y :: xs)
isElem : DecEq a => (x : a) -> (xs : List a) -> Maybe (Elem x xs)
isElem x [] = Nothing
isElem x (y :: xs) with (decEq x y)
isElem x (x :: xs) | (Yes refl) = return Here
isElem x (y :: xs) | (No f) = return (There !(isElem x xs))
|
{-# OPTIONS --exact-split #-}
{-# OPTIONS --no-sized-types #-}
{-# OPTIONS --no-universe-polymorphism #-}
{-# OPTIONS --without-K #-}
module FOT.FOTC.Program.Mirror.StructurallySmaller.StructurallySmallerSL where
open import Data.List
data Tree (D : Set) : Set where
tree : D → List (Tree D) → Tree D
foo : {D : Set} → Tree D → D
foo (tree a []) = a
foo (tree a (t ∷ ts)) = foo (tree a ts)
bar : {D : Set} → Tree D → D
bar (tree a []) = a
bar {D} (tree a (t ∷ ts)) = helper (bar t) (bar (tree a ts))
where
postulate helper : D → D → D
|
Time-saving detangling conditioner to nourish and replenish any moisture that your curls may be lacking to reduce the appearance of dryness, frizz, and breakage.
After shampooing with our Natural Almond & Avocado Moisturizing & Detangling Sulfate Free Shampoo, apply Natural Almond & Avocado Moisturizing & Detangling Conditioner and comb through. Allow to sit for up to 3 minutes. Rinse thoroughly.
Water (Aqua), Cetyl Alcohol, Behentrimonium Chloride, Adomethicone (and) Cetrimonium Chloride (and) Trideceth-12, Glycol Distearate, Glycol Stearate, Phenoxethanol (and) Benzoic Acid (and) Dehydroacetic Acid, Prunus Amygdalus Dulcis (Sweet Almond) Oil, Propylene Glycol (and) Water (and) Persea Gratissima (Avocado) Leaf Extract, Persea Gratissima Oil (and) Hydrogenated Vegetable Oil (Avocado Butter), Prunus Amygdalus Dulcis Oil (and) Hydrogenated Vegetable Oil (Almond Butter), Hydrolyzed Wheat Protein, Fragrance (Parfum), Sodium PCA, Tocopheryl acetate, Citric Acid, Benzyl Benzoate, Citral, Geraniol, Hydroxyisohexyl 3-cyclohexene carboxaldehyde, Limonene, Linalool, Butylphenyl methylpropanal. |
lemma analytic_on_add [analytic_intros]: assumes f: "f analytic_on S" and g: "g analytic_on S" shows "(\<lambda>z. f z + g z) analytic_on S" |
! ##################################################################################################################################
! Begin MIT license text.
! _______________________________________________________________________________________________________
! Copyright 2019 Dr William R Case, Jr ([email protected])
! Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
! associated documentation files (the "Software"), to deal in the Software without restriction, including
! without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
! copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to
! the following conditions:
! The above copyright notice and this permission notice shall be included in all copies or substantial
! portions of the Software and documentation.
! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
! OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
! FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
! AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
! LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
! OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
! THE SOFTWARE.
! _______________________________________________________________________________________________________
! End MIT license text.
SUBROUTINE FORCE_MOM_PROC
! Force/moment processor
! Transforms the input B.D. force and moment data to system force data in the SYS_LOAD array for dof's in the G set.
! File LINK1I was written when FORCE or MOMENT B.D entries were read, with one record for each such card.
! There are NFORCE total no. records written to file LINK1I with each record containing:
! SETID = Load set ID on the FORCE or MOMENT card
! AGRID = GRID where load is located
! ACID_L = Local coord sys ID that load is given in
! FORMON(1-3) = 3 components of force or moment in coord system ACID_L
! NAME = 'FORCE' or 'MOMENT' to describe whether B.D card was a FORCE or MOMENT card
! The process in creating array SYS_LOAD from this information is as follows:
! (1) For each record (1 to NFORCE) in file LINK1I:
! (a) Read a record: (SETID, AGRID, ACID_L, FORMON(1-3) and NAME)
! (b) Transform coords from local to basic
! (c) Transform from basic to global (as defined on the GRID card for AGRID).
! (d) Write NFORCE records, similar to those in LINK1I, but now in global coords, to a scratch file (SCRATCH99).
! (2) For each subcase (1 to NSUB):
! (a) Generate LSID, RSID tables of load set ID's/scale factors for load sets for this subcase:
! ( i) LLOADC is the max number of pairs of scale factors/load set ID's over all LOAD Bulk Data cards
! including the pair defined by the set ID and overall scale factor on the LOAD Bulk Data card.
! LSID and RSID are dimensioned 1 to LLOADC and:
! LSID(1) is always the load set ID requested in Case Control for this subcase.
! RSID(1) is always 1.0
! ( ii) If the load set requested in Case Control is not a set ID from a LOAD Bulk data card then the
! load must be on a separate FORCE/MOMENT in which case LSID(1) and RSID(1) are all that is needed
! to define the load set contribution due to the FORCE/MOMENT cards for this subcase.
! (iii) If the load set requested in Case Control is a set ID from a LOAD Bulk data card then the ramainder
! (K = 2,LLOADC) of entries into LSID and RSID will be the pairs of load set ID's/scale factors from
! that LOAD Bulk Data card (with RSID also multiplied by the overall scale factor on the LOAD Bulk data
! card. The load set ID's are in array LOAD_SIDS(i,j) created when LOAD Bulk Data cards were read.
! The scale factors are in array LOAD_FACS(i,j) also created when LOAD Bulk Data cards were read.
! Note, there may not be as many as LLOADC pairs of set ID's/scale factors on a given LOAD Bulk Data
! card since LLOADC is the max, from all LOAD Bulk Data cards, of pairs.
! Thus, the entries in LSID from the last entry (for a given LOAD card) to LLOADC will be zero (LSID
! was initialized to zero). This fact is used in a DO loop to EXIT when LSID(K) = 0
! (b) For each record in SCRATCH-991 (1 to NFORCE)
! ( i) Read a record from file: (SETID, AGRID, ACID_G, FORMON(i) (in coord sys ACID_ G), NAME)
! ( ii) Scan LSID and RSID to get the scale factor for the FORMON components in SETID, if this
! FORCE/MOMENT's set ID is in LSID
! (iii) Load these force/moment values into the system load array, SYS_LOAD
USE PENTIUM_II_KIND, ONLY : BYTE, LONG, DOUBLE
USE IOUNT1, ONLY : FILE_NAM_MAXLEN, WRT_ERR, WRT_LOG, ERR, F04, F06, SCR, L1I, LINK1I, L1I_MSG
USE SCONTR, ONLY : BLNK_SUB_NAM, FATAL_ERR, LLOADC, NCORD, NFORCE, NGRID, NLOAD, NSUB, WARN_ERR
USE TIMDAT, ONLY : TSEC
USE SUBR_BEGEND_LEVELS, ONLY : FORCE_MOM_PROC_BEGEND
USE CONSTANTS_1, ONLY : ZERO, ONE
USE PARAMS, ONLY : EPSIL, SUPWARN
USE DOF_TABLES, ONLY : TDOF, TDOF_ROW_START
USE MODEL_STUF, ONLY : LOAD_SIDS, LOAD_FACS, SYS_LOAD, SUBLOD, GRID, GRID_ID, CORD
USE FORCE_MOM_PROC_USE_IFs
IMPLICIT NONE
CHARACTER(LEN=LEN(BLNK_SUB_NAM)):: SUBR_NAME = 'FORCE_MOM_PROC'
CHARACTER( 1*BYTE) :: FOUND ! Indicator on whether we found something we were looking for
CHARACTER( 1*BYTE) :: CORD_FND ! = 'Y' if coord sys ID on FORCE/MOMENT defined, 'N' otherwise
CHARACTER( 1*BYTE) :: GRID_FND ! = 'Y' if grid ID on FORCE/MOMENT defined, 'N' otherwise
CHARACTER(24*BYTE) :: MESSAG ! File description.
CHARACTER( 8*BYTE) :: NAME ! Name to indicate whether we are processing a FORCE or a MOMENT
CHARACTER(FILE_NAM_MAXLEN*BYTE) :: SCRFIL ! File name
INTEGER(LONG) :: ACID_L ! Actual local coord sys ID on FORCE or MOMENT card
INTEGER(LONG) :: ACID_G ! Actual global coord sys ID for AGRID
INTEGER(LONG) :: AGRID ! Actual grid number from FORCE or MOMENT card
INTEGER(LONG) :: COMP1, COMP2 ! DOF components (1-6)
INTEGER(LONG) :: GDOF ! G-set DOF no. for actual grid AGRID
INTEGER(LONG) :: GRID_ID_ROW_NUM ! Row number in array GRID_ID where AGRID is found
INTEGER(LONG) :: G_SET_COL_NUM ! Col no. in array TDOF where G-set DOF's are kept
INTEGER(LONG) :: I,J,K ! DO loop indices
INTEGER(LONG) :: ICID ! Internal coordinate system ID for ACID_L or ACID_G
INTEGER(LONG) :: IERROR = 0 ! Local error count
INTEGER(LONG) :: IGRID ! Internal grid ID
INTEGER(LONG) :: IOCHK ! IOSTAT error number when opening a file
INTEGER(LONG) :: K1 ! Counter
INTEGER(LONG) :: SETID ! Load set ID read from record in file LINK1I
INTEGER(LONG) :: LSID(LLOADC+1) ! Array of load SID's, from a LOAD Bulk Data card, for one S/C
INTEGER(LONG) :: NSID ! Count on no. of pairs of entries on a LOAD B.D. card (<= LLOADC)
INTEGER(LONG) :: OUNT(2) ! File units to write messages to.
INTEGER(LONG) :: READ_ERR = 0 ! Count of errors reading records from FORCE/MOMENT file
INTEGER(LONG) :: REC_NO ! Record number when reading a file
INTEGER(LONG) :: ROW_NUM ! Row no. in array TDOF corresponding to GDOF
INTEGER(LONG) :: ROW_NUM_START ! Row no. in array TDOF where data begins for AGRID
INTEGER(LONG), PARAMETER :: SUBR_BEGEND = FORCE_MOM_PROC_BEGEND
REAL(DOUBLE) :: EPS1 ! A small number to compare real zero
REAL(DOUBLE) :: F1(3), F2(3) ! 3 force or moment components in intermediate calcs
REAL(DOUBLE) :: FORCEI ! The force value that goes into SYS_LOAD for a grid/subcase
REAL(DOUBLE) :: PHID, THETAD ! Outputs from subr GEN_T0L
REAL(DOUBLE) :: FORMON(3) ! Array of 3 force/moment mag's read from file LINK1I
REAL(DOUBLE) :: RSID(LLOADC+1) ! Array of load magnitudes (for LSID set ID's) needed for one S/C
REAL(DOUBLE) :: SCALE ! Scale factor for a load
REAL(DOUBLE) :: T12(3,3) ! Coord transformation matrix
INTRINSIC :: DABS
! **********************************************************************************************************************************
IF (WRT_LOG >= SUBR_BEGEND) THEN
CALL OURTIM
WRITE(F04,9001) SUBR_NAME,TSEC
9001 FORMAT(1X,A,' BEGN ',F10.3)
ENDIF
! **********************************************************************************************************************************
EPS1 = EPSIL(1)
NAME = ' ' ! will be read as FORCE or MOMENT from file L1I
! Make units for writing errors the error file and output file
OUNT(1) = ERR
OUNT(2) = F06
! Open a scratch file that will be used to rewrite data from L1I after the coords have been transformed to
! global. This file is only needed in this subr and is closed and deleted herein.
SCRFIL(1:) = ' '
SCRFIL(1:9) = 'SCRATCH-991'
OPEN (SCR(1),STATUS='SCRATCH',POSITION='REWIND',FORM='UNFORMATTED',ACTION='READWRITE',IOSTAT=IOCHK)
IF (IOCHK /= 0) THEN
CALL OPNERR ( IOCHK, SCRFIL, OUNT, 'Y' )
CALL FILE_CLOSE ( SCR(1), SCRFIL, 'DELETE', 'Y' )
CALL OUTA_HERE ( 'Y' ) ! Error opening scratch file, so quit
ENDIF
REWIND (SCR(1))
! **********************************************************************************************************************************
! Successively read records from LINK1I and transform coords:
! (1) From local (as defined on the FORCE/MOMENT card) to basic
! (2) From basic to global (global for the grid point that the load is at)
! Then write out a record similar to what was read from LINK1I to a scratch file that has the same info as
! records from LINK1I but with data in global coords
i_do1:DO I=1,NFORCE
! (1-a) Read a record from file LINK1I
READ(L1I,IOSTAT=IOCHK) SETID,AGRID,ACID_L,(FORMON(J),J=1,3),NAME
IF (IOCHK /= 0) THEN
REC_NO = I
CALL READERR ( IOCHK, LINK1I, L1I_MSG, REC_NO, OUNT, 'Y' )
READ_ERR = READ_ERR + 1
CYCLE i_do1
ENDIF
DO J=1,3
F1(J) = FORMON(J)
ENDDO
! From actual grid pt number (AGRID) on the FORCE/MOMENT card, get row number in array GRID_ID where AGRID exists
GRID_FND = 'Y'
CALL GET_ARRAY_ROW_NUM ( 'GRID_ID', SUBR_NAME, NGRID, GRID_ID, AGRID, GRID_ID_ROW_NUM )
IF (GRID_ID_ROW_NUM == -1) THEN
GRID_FND = 'N' ! Grid ID on FORCE/MOMENT undefined
IERROR = IERROR + 1
FATAL_ERR = FATAL_ERR + 1
WRITE(ERR,1822) 'GRID ', AGRID, NAME, SETID
WRITE(F06,1822) 'GRID ', AGRID, NAME, SETID
ENDIF
CORD_FND = 'N'
ICID = -1
IF (ACID_L /= 0) THEN ! Get local coord sys for this FORCE/MOMENT card
j_do12: DO J=1,NCORD
IF (ACID_L == CORD(J,2)) THEN
CORD_FND = 'Y'
ICID = J
EXIT j_do12
ENDIF
ENDDO j_do12
IF (CORD_FND == 'N') THEN ! Coord sys ID on FORCE/MOMENT undefined
IERROR = IERROR + 1
FATAL_ERR = FATAL_ERR + 1
WRITE(ERR,1822) 'COORD SYSTEM ', ACID_L, NAME, SETID
WRITE(F06,1822) 'COORD SYSTEM ', ACID_L, NAME, SETID
ENDIF
IF (( CORD_FND == 'N') .OR. (GRID_FND == 'N')) THEN
CYCLE i_do1 ! Can't continue (GRID_ID_ROW_NUM or ACID_L not found),
! so CYCLE & read next record
ENDIF
! Get coord transformation matrix - local to basic
CALL GEN_T0L ( GRID_ID_ROW_NUM, ICID, THETAD, PHID, T12 )
CALL MATMULT_FFF ( T12, F1, 3, 3, 1, F2 ) ! Transform coords from local (forces F1) to basic (forces F2)
ELSE ! Local coord system is basic, so equate F2, F1
DO J=1,3
F2(J) = F1(J)
ENDDO
ENDIF
! (1-c) Transform from basic to global. Note that F2 are forces in basic
IF (IERROR == 0) THEN
ACID_G = GRID(GRID_ID_ROW_NUM,3) ! Get internal coord sys number for this grid's global coord sys
ICID = -1
IF (ACID_G /= 0) THEN ! We know ACID_G exists; that was verified in subr GRID_PROC
j_do14: DO J=1,NCORD
IF (ACID_G == CORD(J,2)) THEN
ICID = J
EXIT j_do14
ENDIF
ENDDO j_do14
! Get coord transformation matrix - basic to global
CALL GEN_T0L ( GRID_ID_ROW_NUM, ICID, THETAD, PHID, T12 )
CALL MATMULT_FFF_T (T12, F2, 3, 3, 1, F1) ! Transform coords from basic to global for F2
DO J=1,3
F2(J) = F1(J) ! F2 has the force/moment values in global coords
ENDDO
ENDIF
DO J=1,3 ! Reset FORMON to F2, which now has forces in global coords
FORMON(J) = F2(J)
ENDDO
! (1-d) Write results to scratch file. FORMON comps arein global coords
WRITE(SCR(1)) SETID,AGRID,ACID_G,(FORMON(J),J=1,3),NAME
ENDIF
ENDDO i_do1
IF (READ_ERR > 0) THEN
WRITE(ERR,9998) READ_ERR,LINK1I
WRITE(ERR,9998) READ_ERR,LINK1I
CALL OUTA_HERE ( 'Y' ) ! Quit due to errors reading FORCE/MOMENT file
ENDIF
IF (IERROR > 0) THEN
WRITE(ERR,9996) SUBR_NAME,IERROR
WRITE(ERR,9996) SUBR_NAME,IERROR
CALL OUTA_HERE ( 'Y' ) ! Quit due to undefined grid and coord sys ID's
ENDIF
! **********************************************************************************************************************************
! Now process forces in global coords into SYS_LOAD for each subcase.
DO I=1,LLOADC ! Initialize LSID, RSID arrays
LSID(I) = 0
RSID(I) = ZERO
ENDDO
REWIND (SCR(1))
MESSAG = 'SCRATCH: FORCE_MOM_PROC '
i_do2:DO I=1,NSUB ! Loop through the S/C's
IF (SUBLOD(I,1) == 0) THEN ! If no load for this S/C, CYCLE
CYCLE i_do2
ENDIF
! (2-a) Generate LSID/RSID tables for this S/C.
NSID = 1 ! There is always 1 pair (more if there are LOAD B.D cards).
LSID(1) = SUBLOD(I,1) ! Note: If there are no LOAD B.D. cards, LSID(1) and RSID(1) will be
RSID(1) = ONE ! for the FORCE or MOMENT card in file LINK1I that matches SUBLOD(I,1)
DO J = 1,NLOAD ! Then, the actual mag. will come from RSID(1) & the FORMON components
IF (SUBLOD(I,1) == LOAD_SIDS(J,1)) THEN
k_do21: DO K = 2,LLOADC
IF (LOAD_SIDS(J,K) == 0) THEN
EXIT k_do21
ELSE
NSID = K ! Note: NSID will not get larger than LLOADC
RSID(K) = LOAD_FACS(J,1)*LOAD_FACS(J,K)
LSID(K) = LOAD_SIDS(J,K)
ENDIF
ENDDO k_do21
ENDIF
ENDDO
j_do22: DO J=1,NFORCE ! Process FORCE / MOMENT card info
! (2-b- i) Read a record from scratch - forces are in global coords
READ(SCR(1),IOSTAT=IOCHK) SETID,AGRID,ACID_G,(FORMON(K),K=1,3),NAME
IF (IOCHK /= 0) THEN
REC_NO = J
CALL READERR ( IOCHK, SCRFIL, MESSAG, REC_NO, OUNT, 'Y' )
CALL FILE_CLOSE ( SCR(1), SCRFIL, 'DELETE', 'Y' )
CALL OUTA_HERE ( 'Y' ) ! Error reading scratch file, so quit
ENDIF
FOUND = 'N' ! (2-b- ii). Scan through LSID to find set that matches SETID read.
k_do221: DO K = 1,NSID ! There is a match; we made sure all requested loads were in B.D. deck
IF (SETID == LSID(K)) THEN ! We start with K = 1 to cover the case of no LOAD B.D cards
SCALE = RSID(K)
FOUND = 'Y'
EXIT k_do221
ENDIF
ENDDO k_do221
IF (FOUND == 'N') THEN ! Cycle back on J loop and read another force/moment card
CYCLE j_do22
ENDIF
IF (NAME == 'FORCE ') THEN ! Set component range (for loop below) based on card type
COMP1 = 1
COMP2 = 3
ELSE IF (NAME == 'MOMENT ') THEN
COMP1 = 4
COMP2 = 6
ELSE
WRITE(ERR,1516) SUBR_NAME,NAME
WRITE(F06,1516) SUBR_NAME,NAME
CALL FILE_CLOSE ( SCR(1), SCRFIL, 'DELETE', 'Y' )
FATAL_ERR = FATAL_ERR + 1
CALL OUTA_HERE ( 'Y' ) ! Coding error (not FORCE or MOMENT), so quit
ENDIF
! Get GRID_ID_ROW_NUM, we checked it's existence earlier
CALL GET_ARRAY_ROW_NUM ( 'GRID_ID', SUBR_NAME, NGRID, GRID_ID, AGRID, GRID_ID_ROW_NUM )
IF ((DABS(FORMON(1)) < EPS1) .AND. (DABS(FORMON(2)) < EPS1) .AND. (DABS(FORMON(3)) < EPS1)) THEN
WARN_ERR = WARN_ERR + 1
WRITE(ERR,1513) NAME,SETID
IF (SUPWARN == 'N') THEN ! Issue warning if all force components zero
WRITE(F06,1513) NAME,SETID
ENDIF
ENDIF
!xx CALL CALC_TDOF_ROW_NUM ( AGRID, ROW_NUM_START, 'N' )
CALL GET_ARRAY_ROW_NUM ( 'GRID_ID', SUBR_NAME, NGRID, GRID_ID, AGRID, IGRID )
ROW_NUM_START = TDOF_ROW_START(IGRID)
K1 = 0 ! (2-b-iii). Put forces and moments into SYS_LOAD array
k_do222: DO K = COMP1,COMP2
K1 = K1+1
IF ((K1 < 1) .OR. (K1 > 3)) THEN
WRITE(ERR,1514) SUBR_NAME
WRITE(F06,1514) SUBR_NAME
FATAL_ERR = FATAL_ERR + 1
CALL FILE_CLOSE ( SCR(1), SCRFIL, 'DELETE', 'Y' )
CALL OUTA_HERE ( 'Y' ) ! Coding error (dim on array FORMON out of bounds), so quit
ELSE
FORCEI = SCALE*FORMON(K1)
IF (DABS(FORCEI) < EPS1) THEN
CYCLE k_do222
ELSE
CALL TDOF_COL_NUM ( 'G ', G_SET_COL_NUM )
ROW_NUM = ROW_NUM_START + K -1
GDOF = TDOF(ROW_NUM,G_SET_COL_NUM)
SYS_LOAD(GDOF,I) = SYS_LOAD(GDOF,I) + FORCEI
ENDIF
ENDIF
ENDDO k_do222
ENDDO j_do22
REWIND (SCR(1)) ! Need to read all of the FORCE/MOMENT records again for the next S/C
ENDDO i_do2
CALL FILE_CLOSE ( SCR(1), SCRFIL, 'DELETE', 'Y' )
! **********************************************************************************************************************************
IF (WRT_LOG >= SUBR_BEGEND) THEN
CALL OURTIM
WRITE(F04,9002) SUBR_NAME,TSEC
9002 FORMAT(1X,A,' END ',F10.3)
ENDIF
RETURN
! **********************************************************************************************************************************
1822 FORMAT(' *ERROR 1822: ',A,I8,' ON ',A,I8,' IS UNDEFINED')
1513 FORMAT(' *WARNING : ',A8,1X,I8,' HAS ALL ZERO COMPONENTS')
1514 FORMAT(' *ERROR 1514: PROGRAMMING ERROR IN SUBROUTINE ',A &
,/,14X,' K1 = ',I8,' BUT MUST BE 1 <= K1 <= 3')
1516 FORMAT(' *ERROR 1516: PROGRAMMING ERROR IN SUBROUTINE ',A &
,/,14X,' VARIABLE "NAME" SHOULD BE "FORCE" OR "MOMENT" BUT IS: ',A8)
9998 FORMAT(/,' PROCESSING TERMINATED DUE TO ABOVE ',I8,' ERRORS READING FILE:',/,A)
9996 FORMAT(/,' PROCESSING ABORTED IN SUBROUTINE ',A,' DUE TO ABOVE ',I8,' ERRORS')
! **********************************************************************************************************************************
END SUBROUTINE FORCE_MOM_PROC
|
variable (p q r : Prop)
-- commutativity of ∧ and ∨
example : p ∧ q ↔ q ∧ p :=
⟨λ h => ⟨h.right, h.left⟩, λ h => ⟨h.right, h.left⟩⟩
example : p ∨ q ↔ q ∨ p :=
⟨λ h => h.elim Or.inr Or.inl, λ h => h.elim Or.inr Or.inl⟩
-- associativity of ∧ and ∨
example : (p ∧ q) ∧ r ↔ p ∧ (q ∧ r) :=
⟨λ h => ⟨h.left.left, ⟨h.left.right, h.right⟩⟩,
λ h => ⟨⟨h.left, h.right.left⟩, h.right.right⟩⟩
example : (p ∨ q) ∨ r ↔ p ∨ (q ∨ r) :=
⟨λ h => h.elim (λ g => g.elim Or.inl (Or.inr ∘ Or.inl)) (Or.inr ∘ Or.inr),
λ h => h.elim (Or.inl ∘ Or.inl) (λ g => g.elim (Or.inl ∘ Or.inr) Or.inr)⟩
-- distributivity
example : p ∧ (q ∨ r) ↔ (p ∧ q) ∨ (p ∧ r) :=
⟨λ h => h.right.elim (Or.inl ∘ (λ hq => ⟨h.left, hq⟩)) (Or.inr ∘ (λ hr => ⟨h.left, hr⟩)),
λ h => h.elim (λ hpq => ⟨hpq.left, Or.inl hpq.right⟩) (λ hpr => ⟨hpr.left, Or.inr hpr.right⟩)⟩
example : p ∨ (q ∧ r) ↔ (p ∨ q) ∧ (p ∨ r) :=
⟨λ l => l.elim (λ hp => ⟨Or.inl hp, Or.inl hp⟩) (λ hqr => ⟨Or.inr hqr.left, Or.inr hqr.right⟩),
λ r => r.left.elim Or.inl (λ hq => r.right.elim Or.inl (λ hr => Or.inr ⟨hq, hr⟩))⟩
-- other properties
example : (p → (q → r)) ↔ (p ∧ q → r) :=
⟨λ p_q_r => (λ hpq => p_q_r hpq.left hpq.right),
λ paq_r => (λ hp => (λ hq => paq_r ⟨hp, hq⟩))⟩
example : ((p ∨ q) → r) ↔ (p → r) ∧ (q → r) :=
⟨λ poq_r => ⟨poq_r ∘ Or.inl, poq_r ∘ Or.inr⟩,
λ pr_qr => (λ hpq => hpq.elim pr_qr.left pr_qr.right)⟩
-- note: ¬p is equivalent to p → False
example : ¬(p ∨ q) ↔ ¬p ∧ ¬q :=
⟨λ npq => ⟨λ hp => npq (Or.inl hp), λ hq => npq (Or.inr hq)⟩,
λ npnq => (λ hpq => hpq.elim npnq.left npnq.right)⟩
example : ¬p ∨ ¬q → ¬(p ∧ q) :=
λ npnq => npnq.elim (λ hnp => (λ hpq => hnp hpq.left)) (λ hnq => (λ hpq => hnq hpq.right))
example : ¬(p ∧ ¬p) :=
λ pnp => pnp.right pnp.left
example : p ∧ ¬q → ¬(p → q) :=
λ pnq => (λ hpq => pnq.right (hpq pnq.left))
-- ex falso
example : ¬p → (p → q) :=
λ hnp => λ hp => False.elim (hnp hp)
example : (¬p ∨ q) → (p → q) :=
λ npq => λ hp => npq.elim (λ np => False.elim (np hp)) id
example : p ∨ False ↔ p :=
⟨λ hpf => hpf.elim id False.elim, Or.inl⟩
example : p ∧ False ↔ False :=
⟨And.right, False.elim⟩
example : (p → q) → (¬q → ¬p) :=
λ hpq => λ hnq => λ hp => hnq (hpq hp)
section classical
open Classical
variable (p q r : Prop)
example : (p → q ∨ r) → ((p → q) ∨ (p → r)) :=
λ p_qr => byCases
(λ (hq : q) => Or.inl (λ _ => hq))
(λ (hnq : ¬q) => byCases
(λ (hr : r) => Or.inr (λ _ => hr))
(λ (hnr : ¬r) => (Or.inl (λ p => (p_qr p).elim (False.elim ∘ hnq) (False.elim ∘ hnr)))))
example : ¬(p ∧ q) → ¬p ∨ ¬q :=
λ hnpq => byCases
(λ (hp : p) => byCases
(λ (hq : q) => False.elim (hnpq ⟨hp, hq⟩))
(λ (hnq : ¬q) => Or.inr hnq))
(λ (hnp : ¬p) => Or.inl hnp)
example : ¬(p → q) → p ∧ ¬q :=
λ npq => byCases
(λ (hq : q) => False.elim (npq (λ _ => hq)))
(λ (hnq : ¬q) => byCases
(λ (hp : p) => ⟨hp, hnq⟩)
(λ (hnp : ¬p) => False.elim (npq (λ hp => absurd hp hnp))))
example : (p → q) → (¬p ∨ q) :=
λ hpq => byCases
(λ (hq : q) => Or.inr hq)
(λ (hnq : ¬q) => byCases
(λ (hp : p) => False.elim (hnq (hpq hp)))
(λ (hnp : ¬p) => Or.inl hnp))
example : (¬q → ¬p) → (p → q) :=
λ hnqnp => byCases
(λ (hq : q) => λ _ => hq)
(λ (hnq : ¬q) => byCases
(λ (hp : p) => False.elim ((hnqnp hnq) hp))
(λ (hnp : ¬p) => λ hp => absurd hp hnp))
example : p ∨ ¬p := byCases
(λ (hp : p) => Or.inl hp)
(λ (hnp : ¬p) => Or.inr hnp)
example : (((p → q) → p) → p) := byCases
(λ (hp : p) => λ _ => hp)
(λ (hnp : ¬p) => λ hpqp => hpqp (λ hp => absurd hp hnp))
end classical
example: ¬(p ↔ ¬p) :=
λ h =>
have hnp : ¬p := λ hp => (h.mp hp) hp;
have hnnp : ¬¬p := λ hnp => hnp (h.mpr hnp);
hnnp hnp
|
/* randist/tdist.c
*
* Copyright (C) 1996, 1997, 1998, 1999, 2000 James Theiler, Brian Gough
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <config.h>
#include <math.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_sf_gamma.h>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
/* The t-distribution has the form
p(x) dx = (Gamma((nu + 1)/2)/(sqrt(pi nu) Gamma(nu/2))
* (1 + (x^2)/nu)^-((nu + 1)/2) dx
The method used here is the one described in Knuth */
double
gsl_ran_tdist (const gsl_rng * r, const double nu)
{
if (nu <= 2)
{
double Y1 = gsl_ran_ugaussian (r);
double Y2 = gsl_ran_chisq (r, nu);
double t = Y1 / sqrt (Y2 / nu);
return t;
}
else
{
double Y1, Y2, Z, t;
do
{
Y1 = gsl_ran_ugaussian (r);
Y2 = gsl_ran_exponential (r, 1 / (nu/2 - 1));
Z = Y1 * Y1 / (nu - 2);
}
while (1 - Z < 0 || exp (-Y2 - Z) > (1 - Z));
/* Note that there is a typo in Knuth's formula, the line below
is taken from the original paper of Marsaglia, Mathematics of
Computation, 34 (1980), p 234-256 */
t = Y1 / sqrt ((1 - 2 / nu) * (1 - Z));
return t;
}
}
double
gsl_ran_tdist_pdf (const double x, const double nu)
{
double p;
double lg1 = gsl_sf_lngamma (nu / 2);
double lg2 = gsl_sf_lngamma ((nu + 1) / 2);
p = ((exp (lg2 - lg1) / sqrt (M_PI * nu))
* pow ((1 + x * x / nu), -(nu + 1) / 2));
return p;
}
|
import LMT
variable {I} [Nonempty I] {E} [Nonempty E] [Nonempty (A I E)]
example {a1 a2 a3 : A I E} :
(((a1).write i3 ((a1).read i2)).read i2) ≠ ((a1).read i2) → False := by
arr
|
function asap3(style = Rectangular(2,1))
toplevel = TopLevel{2}("asap3")
toplevel.metadata["style"] = style
####################
# Normal Processor #
####################
# Get a processor tile and instantiate it.
processor = build_processor_tile(style)
for r in 0:29, c in 0:31
add_child(toplevel, processor, CartesianIndex(r,c))
end
for r in 30:31, c in 12:19
add_child(toplevel, processor, CartesianIndex(r,c))
end
####################
# Memory Processor #
####################
memory_processor = build_processor_tile(style, include_memory = true)
for r = 30, c = 0:11
add_child(toplevel, memory_processor, CartesianIndex(r,c))
end
for r = 30, c = 20:31
add_child(toplevel, memory_processor, CartesianIndex(r,c))
end
#################
# 2 Port Memory #
#################
memory_2port = build_memory(2)
for r = 31, c in (0:2:10)
add_child(toplevel, memory_2port, CartesianIndex(r,c))
end
for r = 31, c in (20:2:30)
add_child(toplevel, memory_2port, CartesianIndex(r,c))
end
#################
# Input Handler #
#################
input_handler = build_input_handler(style)
add_child(toplevel, input_handler, CartesianIndex(0,-1))
##################
# Output Handler #
##################
output_handler = build_output_handler(style)
add_child(toplevel, output_handler, CartesianIndex(0,32))
connect_processors(toplevel, style)
connect_io(toplevel, style)
connect_memories(toplevel, style)
return toplevel
end
|
import game.max.level03 -- hide
open_locale classical -- hide
noncomputable theory -- hide
namespace xena -- hide
/-
# Chapter ? : Max and abs
## Level 4
`le_max_right` is the statement that `b ≤ max a b`. There's a short
proof using what we've already done.
-/
/- Hint :
Why not start with `rw max_comm`?
-/
/- Lemma
For any real numbers $a$ and $b$, we have $b\leq\max(a,b).$
-/
theorem le_max_right (a b : ℝ) : b ≤ max a b :=
begin
rw max_comm,
apply le_max_left
end
end xena --hide
|
\section*{Exercises}
\begin{ex} The following are the cylindrical coordinates of points, $(
r,\theta,z)$. Find the Cartesian and spherical coordinates of each point.
\begin{enumerate}
\item $\paren{5,\frac{5\pi }{6},-3} $
\item $\paren{3,\frac{\pi }{3},4} $
\item $\paren{4,\frac{2\pi }{3},1} $
\item $\paren{2,\frac{3\pi }{4},-2} $
\item $\paren{3,\frac{3\pi }{2},-1} $
\item $\paren{8,\frac{11\pi }{6},-11} $
\end{enumerate}
%\begin{sol}
%\end{sol}
\end{ex}
\begin{ex} The following are the Cartesian coordinates of points, $(
x,y,z)$. Find the cylindrical and spherical coordinates of these
points.
\begin{enumerate}
\item $\paren{\frac{5}{2}\sqrt{2},\frac{5}{2}\sqrt{2},-3} $
\item $\paren{\frac{3}{2},\frac{3}{2}\sqrt{3},2} $
\item $\paren{-\frac{5}{2}\sqrt{2},\frac{5}{2}\sqrt{2},11} $
\item $\paren{-\frac{5}{2},\frac{5}{2}\sqrt{3},23} $
\item $(-\sqrt{3},-1,-5) $
\item $\paren{\frac{3}{2},-\frac{3}{2}\sqrt{3},-7} $
\item $(\sqrt{2},\sqrt{6},2\sqrt{2}) $
\item $\paren{-\frac{1}{2}\sqrt{3},\frac{3}{2},1} $
\item $\paren{-\frac{3}{4}\sqrt{2},\frac{3}{4}\sqrt{2},-\frac{3}{2}\sqrt{3}} $
\item $(-\sqrt{3},1,2\sqrt{3}) $
\item $\paren{-\frac{1}{4}\sqrt{2},\frac{1}{4}\sqrt{6},-\frac{1}{2}\sqrt{2}} $
\end{enumerate}
%\begin{sol}
%\end{sol}
\end{ex}
\begin{ex}
The following are spherical coordinates of points in the form $(
\rho,\phi,\theta)$. Find the Cartesian and cylindrical
coordinates of each point.
\begin{enumerate}
\item $\paren{4,\frac{\pi }{4},\frac{5\pi }{6}} $
\item $\paren{2,\frac{\pi }{3},\frac{2\pi }{3}} $
\item $\paren{3,\frac{5\pi }{6},\frac{3\pi }{2}} $
\item $\paren{4,\frac{\pi }{2},\frac{7\pi }{4}} $
\item $\paren{4,\frac{2\pi }{3},\frac{\pi }{6}} $
\item $\paren{4,\frac{3\pi }{4},\frac{5\pi }{3}} $
\end{enumerate}
%\begin{sol}
%\end{sol}
\end{ex}
\begin{ex} Describe the surface $\phi =\pi
/4$ in Cartesian coordinates, where $\phi $ is the polar angle in spherical coordinates.
%\begin{sol}
%\end{sol}
\end{ex}
\begin{ex} Describe the surface $\theta =\pi /4$ in spherical coordinates, where $\theta $ is
the angle measured from the positive $x$ axis.
%\begin{sol}
%\end{sol}
\end{ex}
\begin{ex} Describe the surface $r=5$ in Cartesian coordinates, where
$r$ is one of the cylindrical coordinates.
%\begin{sol}
%\end{sol}
\end{ex}
\begin{ex} Describe the surface $\rho =4$ in Cartesian coordinates,
where $\rho $ is the distance to the origin.
%\begin{sol}
%\end{sol}
\end{ex}
\begin{ex} Give the cone described by $z=\sqrt{x^{2}+y^{2}}$ in cylindrical coordinates and
in spherical coordinates.
%\begin{sol}
%\end{sol}
\end{ex}
\begin{ex} The following are described in Cartesian coordinates. Rewrite them in terms of spherical coordinates.
\begin{enumerate}
\item $z=x^{2}+y^{2}$.
\item $x^{2}-y^{2}=1$.
\item $z^{2}+x^{2}+y^{2}=6$.
\item $z=\sqrt{x^{2}+y^{2}}$.
\item $y=x$.
\item $z=x$.
\end{enumerate}
%\begin{sol}
%\end{sol}
\end{ex}
\begin{ex} The following are described in Cartesian coordinates. Rewrite them in terms of cylindrical coordinates.
\begin{enumerate}
\item $z=x^{2}+y^{2}$.
\item $x^{2}-y^{2}=1$.
\item $z^{2}+x^{2}+y^{2}=6$.
\item $z=\sqrt{x^{2}+y^{2}}$.
\item $y=x$.
\item $z=x$.
\end{enumerate}
%\begin{sol}
%\end{sol}
\end{ex}
|
#reduce { 2, 4 } ⊆ { n : ℕ | ∃ m, n = 2 * m }
example : { 2, 4 } ⊆ { n : ℕ | ∃ m, n = 2 * m } :=
begin
change ∀ ⦃a : ℕ⦄, a = 4 ∨ a = 2 ∨ false → (∃ (m : ℕ), a = nat.mul 2 m),
intro,
assume h,
cases h with four rest,
apply exists.intro 2,
assumption,
cases rest with two f,
apply exists.intro 1,
assumption,
contradiction,
end
|
{-# OPTIONS --without-K #-}
{- The type of all types in some universe with a fixed truncation level
behaves almost like a universe itself. In this utility module, we develop
some notation for efficiently working with this pseudo-universe.
It will lead to considerably more briefer and more comprehensible proofs. -}
module Universe.Utility.TruncUniverse where
open import lib.Basics
open import lib.NType2
open import lib.types.Pi
open import lib.types.Sigma
open import lib.types.TLevel
open import lib.types.Unit
open import Universe.Utility.General
module _ {n : ℕ₋₂} where
⟦_⟧ : ∀ {i} → n -Type i → Type i
⟦ (B , _) ⟧ = B
module _ {n : ℕ₋₂} where
Lift-≤ : ∀ {i j} → n -Type i → n -Type (i ⊔ j)
Lift-≤ {i} {j} (A , h) = (Lift {j = j} A , equiv-preserves-level (lift-equiv ⁻¹) h)
raise : ∀ {i} → n -Type i → S n -Type i
raise (A , h) = (A , raise-level n h)
raise-≤T : ∀ {i} {m n : ℕ₋₂} → m ≤T n → m -Type i → n -Type i
raise-≤T p (A , h) = (A , raise-level-≤T p h)
⊤-≤ : n -Type lzero
⊤-≤ = (⊤ , raise-level-≤T (-2≤T n) Unit-is-contr)
Π-≤ : ∀ {i j} (A : Type i) → (A → n -Type j) → n -Type (i ⊔ j)
Π-≤ A B = (Π A (fst ∘ B) , Π-level (snd ∘ B))
infixr 2 _→-≤_
_→-≤_ : ∀ {i j} → Type i → n -Type j → n -Type (i ⊔ j)
A →-≤ B = Π-≤ A (λ _ → B)
Σ-≤ : ∀ {i j} (A : n -Type i) → (⟦ A ⟧ → n -Type j) → n -Type (i ⊔ j)
Σ-≤ A B = (Σ ⟦ A ⟧ (λ a → ⟦ B a ⟧) , Σ-level (snd A) (snd ∘ B))
infixr 4 _×-≤_
_×-≤_ : ∀ {i j} → n -Type i → n -Type j → n -Type (i ⊔ j)
A ×-≤ B = Σ-≤ A (λ _ → B)
Path-< : ∀ {i} (A : S n -Type i) (x y : ⟦ A ⟧) → n -Type i
Path-< A x y = (x == y , snd A _ _)
Path-≤ : ∀ {i} (A : n -Type i) (x y : ⟦ A ⟧) → n -Type i
Path-≤ A x y = Path-< (raise A) x y
_≃-≤_ : ∀ {i j} (A : n -Type i) (B : n -Type j) → n -Type (i ⊔ j)
A ≃-≤ B = (⟦ A ⟧ ≃ ⟦ B ⟧ , ≃-level (snd A) (snd B))
_-Type-≤_ : (n : ℕ₋₂) (i : ULevel) → S n -Type lsucc i
n -Type-≤ i = (n -Type i , n -Type-level i)
|
# Python Basics
<style>
@import url("./custom/gtag.js");
</style>
Please refer to [python 3 documentation](https://docs.python.org/3/)
<h4 style='color:black;text-align:center;'><space>Table of Contents</h4>
| |
| :--- |
| [Numpy and Linear Algebra](#Numpy-and-Linear-Algebra) |
| [Matrices with sympy](#Matrices-with-sympy) |
| [Scipy and Matplotlib Plotting](#Scipy-and-Matplotlib-Plotting) |
| [Control/Conditional Statements](#Control/Conditional-Statements) |
| [Creating Methods](#Creating-Methods) |
| [Strings](#Strings) |
| [Pandas - DataFrame](#Pandas-intro-using-DataFrame) |
## Numpy and Linear Algebra
```python
import numpy as np
# Creating two lists, a and b
a=[1,2,3]
b=[4,5,6]
c=np.concatenate((a,b))
print(c)
```
[1 2 3 4 5 6]
```python
## Logial operations are element-wise ##
# Create an array
f = np.array([1,2,3,4,5])
print('This is the frist output: ',f < 4)
print('This is the second output: ',(f<4) | (f>4)) # | is logical symbol for 'or'
```
This is the frist output: [ True True True False False]
This is the second output: [ True True True False True]
```python
## Sums and Averages of elements inside array##
g = np.array([[1,2,3],[4,5,6]])
print(g)
g.sum()
```
[[1 2 3]
[4 5 6]]
21
```python
g.sum(axis=1)
```
array([ 6, 15])
```python
g.mean()
```
3.5
```python
g.std()
```
1.707825127659933
```python
# If we are not sure what a function or object is, then
# we can look to the help page.
help(g.std)
```
Help on built-in function std:
std(...) method of numpy.ndarray instance
a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the standard deviation of the array elements along given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std : equivalent function
```python
g.std(axis=1)
```
array([0.81649658, 0.81649658])
```python
# Take two matrices, a and b, then concatenate them.
a=np.array([[1,2,3],[4,5,6]])
b=np.array([[1,2,3],[7,8,9]])
c=np.concatenate((a,b))
print(c)
```
[[1 2 3]
[4 5 6]
[1 2 3]
[7 8 9]]
```python
d = np.transpose(c) # transpose of c
f = d[:,0:3] # first 3 columns of d
g = np.eye(3) # identity 3x3
h = np.concatenate((f,g), axis=0) # linking f and g
I = np.concatenate((f,g), axis=1) # linking f and g
print(I) # axis=1
```
[[1. 4. 1. 1. 0. 0.]
[2. 5. 2. 0. 1. 0.]
[3. 6. 3. 0. 0. 1.]]
## Matrices with sympy
```python
# Lets use sympy to construct a matrix
## Matrices from docs.sympy.org ##
from sympy import *
# We can also make the output look pretty
init_printing(use_unicode=True)
M = Matrix([[1,2,3],[4,5,6],[7,8,9]])
print('Row Reduce Echelon Form of the Matrix M is:\n')
M.rref()
```
Row Reduce Echelon Form of the Matrix M is:
$$\left ( \left[\begin{matrix}1 & 0 & -1\\0 & 1 & 2\\0 & 0 & 0\end{matrix}\right], \quad \left ( 0, \quad 1\right )\right )$$
```python
print("See, we can still print, but it doesn't look as nice\n",M)
```
See, we can still print, but it doesn't look as nice
Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
```python
A = Matrix([[1,2,3],[4,5,6],[7,8,9]])
```
```python
# If you need an immutable version of Matrix, use ImmutableMatrix
A.shape
```
```python
# to get individual rows of columns, use:
A.row(0)
A.col(1)
```
$$\left[\begin{matrix}2\\5\\8\end{matrix}\right]$$
```python
# deleting and inserting rows and columns
A.row_del(1)
A
```
$$\left[\begin{matrix}1 & 2 & 3\\7 & 8 & 9\end{matrix}\right]$$
```python
A.T
```
$$\left[\begin{matrix}1 & 7\\2 & 8\\3 & 9\end{matrix}\right]$$
```python
A = Matrix([[2,3,4],[5,8,3],[3,3,3]])
A
```
$$\left[\begin{matrix}2 & 3 & 4\\5 & 8 & 3\\3 & 3 & 3\end{matrix}\right]$$
```python
A.rref() # (rref, pivot columns)
```
$$\left ( \left[\begin{matrix}1 & 0 & 0\\0 & 1 & 0\\0 & 0 & 1\end{matrix}\right], \quad \left ( 0, \quad 1, \quad 2\right )\right )$$
```python
A.nullspace()
```
```python
B = Matrix([[1,2,3],[4,5,6],[7,8,9]])
B
```
$$\left[\begin{matrix}1 & 2 & 3\\4 & 5 & 6\\7 & 8 & 9\end{matrix}\right]$$
```python
B.rref()
```
$$\left ( \left[\begin{matrix}1 & 0 & -1\\0 & 1 & 2\\0 & 0 & 0\end{matrix}\right], \quad \left ( 0, \quad 1\right )\right )$$
```python
B.nullspace()
```
$$\left [ \left[\begin{matrix}1\\-2\\1\end{matrix}\right]\right ]$$
```python
## Eigenvalues and Eigenvectors ##
A.eigenvals() # returns (eigenvalue:algebraic multiplicity)
```
```python
A.eigenvects()
# which returns (eigenvalue:algebraic multiplicity, [eigenvectors])
```
$$\left [ \left ( -1, \quad 1, \quad \left [ \left[\begin{matrix}- \frac{9}{4}\\\frac{11}{12}\\1\end{matrix}\right]\right ]\right ), \quad \left ( 2, \quad 1, \quad \left [ \left[\begin{matrix}1\\- \frac{4}{3}\\1\end{matrix}\right]\right ]\right ), \quad \left ( 12, \quad 1, \quad \left [ \left[\begin{matrix}1\\2\\1\end{matrix}\right]\right ]\right )\right ]$$
```python
## dagonlizable matrices ##
# A = PDP^-1
P, D = A.diagonalize()
print(P)
print(D)
```
Matrix([[-27, 3, 1], [11, -4, 2], [12, 3, 1]])
Matrix([[-1, 0, 0], [0, 2, 0], [0, 0, 12]])
```python
# remember that lambda is a reserved keyword in python, so
# to create the symbol use: lamda (without the 'b')
lamda = symbols('lamda')
B = Matrix([[1, 0 , 1],[1, 1, 2], [ 1, 0, 1]])
B
b = B.charpoly(lamda)
b
factor(b)
```
## Scipy and Matplotlib Plotting
```python
x = symbols('x')
y = x**2 + 5*x + 6
print(y)
```
x**2 + 5*x + 6
```python
y_x = integrate(y)
pprint(y_x)
```
3 2
x 5⋅x
── + ──── + 6⋅x
3 2
```python
## Constructing a large lil_matrix and add values to it ##
from scipy.sparse import lil_matrix
from scipy.sparse.linalg import spsolve
from numpy.linalg import solve, norm
from numpy.random import rand
## Optimization ##
#Minimization (steepest-decentm conjugate gradient), curve-fitting, least squares, root finding (Newton's method), annealing, etc.
import scipy.optimize as optimize
```
```python
## Curve Fitting Example ##
def func(x,a,b,c):
return a*np.exp(-b*x) + c
```
```python
x = np.linspace(0,4,50) # 50 values linearly in [0,4]
y = func(x,2.5,1.3,0.5)
yn = y + 0.2*np.random.normal(size=len(x))
popt,pcov = optimize.curve_fit(func,x,yn)
popt,pcov # optimized parameters and their covariance
```
(array([2.68646283, 1.51320963, 0.53692262]),
array([[ 0.01767912, 0.00851316, -0.00063102],
[ 0.00851316, 0.02293804, 0.00501921],
[-0.00063102, 0.00501921, 0.00242487]]))
```python
## Matplotlib ##
from matplotlib.pyplot import *
from pylab import *
x = np.arange(0,10,0.2)
y = np.cos(x)
figure()
```
<Figure size 640x480 with 0 Axes>
```python
plot(x,y,'g*')
show()
```
```python
# or we can show the figure inline
%matplotlib inline
plot(x,-np.exp(2*y),'g*')
```
## Control/Conditional Statements
```python
## Contidtionsl and Flow Statements ##
## 'if' Statement
x = int(1) # Change the initial condition to alt output
if x < 0:
x = 0
print('Negative goes to zero')
elif x == 0:
print('Zero')
elif x == 1:
print('single')
else:
print('More')
```
single
```python
## Examples using 'for' statement
# create a list of names
Names = ['Rob', 'Ron', 'Felish', 'George', 'Shoe']
for x in Names:
# print the ith element of the list, followed by
# the number of charecters the name has.
print(x, len(x))
# Here's another way to do the same thing...
# but let x be the ith iteration.
Names = ['Rob', 'Ron', 'Felish', 'George', 'Shoe']
for i in range(len(Names)):
print(i,Names[i])
```
Rob 3
Ron 3
Felish 6
George 6
Shoe 4
0 Rob
1 Ron
2 Felish
3 George
4 Shoe
```python
for x in range(5): # range(5) = {0,1,2,3,4}
if x == 3: # if x=3, print out the string
print('Three')
elif x == 0: # "elseif" needed after 'if' to continue with condiditions of the same statement
print('Zero')
else: # otherwise, print out the remaining
print(x)
```
Zero
1
2
Three
4
```python
for x in Names[:]: # Loops over a slice copy of the entire list.
if len(x) > 5:
Names.remove(x)
Names.append('Name')
Names
```
['Rob', 'Ron', 'Shoe', 'Name', 'Name']
```python
## 'break' and 'continue' Statements, and 'else' Clauses on loops
## Prime Numbers ##
for n in range(2,50):
for x in range(2, n):
if n % x == 0:
print(n, '=', x, '*', n/x)
break
else:
# loop fell through without finding a factor
print(n, 'is a prime number')
```
2 is a prime number
3 is a prime number
4 = 2 * 2.0
5 is a prime number
6 = 2 * 3.0
7 is a prime number
8 = 2 * 4.0
9 = 3 * 3.0
10 = 2 * 5.0
11 is a prime number
12 = 2 * 6.0
13 is a prime number
14 = 2 * 7.0
15 = 3 * 5.0
16 = 2 * 8.0
17 is a prime number
18 = 2 * 9.0
19 is a prime number
20 = 2 * 10.0
21 = 3 * 7.0
22 = 2 * 11.0
23 is a prime number
24 = 2 * 12.0
25 = 5 * 5.0
26 = 2 * 13.0
27 = 3 * 9.0
28 = 2 * 14.0
29 is a prime number
30 = 2 * 15.0
31 is a prime number
32 = 2 * 16.0
33 = 3 * 11.0
34 = 2 * 17.0
35 = 5 * 7.0
36 = 2 * 18.0
37 is a prime number
38 = 2 * 19.0
39 = 3 * 13.0
40 = 2 * 20.0
41 is a prime number
42 = 2 * 21.0
43 is a prime number
44 = 2 * 22.0
45 = 3 * 15.0
46 = 2 * 23.0
47 is a prime number
48 = 2 * 24.0
49 = 7 * 7.0
```python
## Even and Odd example
for n in range(2, 10):
if n % 2 == 0:
print('Found an even number', n)
continue
print('Found an odd number', n)
## 'pass' Statements
# pass statement does nothin. It can be used requires no action.
# It can be used for creaing minimal classes.
# It can also be used as a place-holder for a function
def initlog(*arguments):
pass # remember to implement this!
```
Found an even number 2
Found an odd number 3
Found an even number 4
Found an odd number 5
Found an even number 6
Found an odd number 7
Found an even number 8
Found an odd number 9
## Creating Methods
```python
## Defining Functions ##
def fib(n): # write Fibonacci series up to n
"""Print a Fibonacci series up to n."""
a,b = 0,1
while a < n:
print(a,)
a,b = b,a+b
# Now, calling on the function just made
fib(2000)
```
0
1
1
2
3
5
8
13
21
34
55
89
144
233
377
610
987
1597
```python
## changing fib to fib2
def fib2(n): # write Fibonacci series up to n
"""Return a list containing the Fibonacci series up to n."""
result = []
a,b = 0,1
while a < n:
result.append(a)
a,b = b,a+b
return result
fib2(100)
```
```python
#The return statement returns with a value from a function.
#return without an expression arugment returns None.
#The statement result.append(a) calls a method of the list object result.
#A method i a function that belongs to an object and is named obj.methodname
def f(a, L=[]):
L.append(a)
return L
print(f(1))
print(f(2))
print(f(3))
print(f(100))
```
[1]
[1, 2]
[1, 2, 3]
[1, 2, 3, 100]
```python
# If you dont want the default to be shared with subsequent calls...
def f(a, L=None):
if L is None:
L = [] # generate a new list every iteration
L.append(a)
return L
print(f(0))
print(f(1))
print(f(2))
print(f(100))
```
[0]
[1]
[2]
[100]
```python
## Keyword Arguments ##
# which are of the form: kwarg=value
# In general:
# def func(required, state, action, type):
# lets make one up...
food=['apple', 'organges', 'bread', 'salad']
def store(food, state='need', action='buy', type='cash'):
if food == 'salad':
print('This store has the groceries I', state,',' )
print("but doesn't have", food, )
elif food == 'organges':
print('They have', food, 'but I have insignificant', type,)
```
```python
store('salad')
# This needs work (not finished)
```
This store has the groceries I need ,
but doesn't have salad
```python
## The template for writing a function ##
def my_function():
'''Insert a short, concise summary of the object's purpose.'''
pass
print(help(my_function)) # just showing the docstring for the function
```
Help on function my_function in module __main__:
my_function()
Insert a short, concise summary of the object's purpose.
None
## Strings
```python
name = 'Enter Name Here' # Enter your name here
intro = 'Hello, my name is %s.'%(name)
## Let's do some string minipulation...
# First, lets separate this statement by each space
# we can put each word into a list
List = [word for word in intro.split(" ")]
print(List)
```
['Hello,', 'my', 'name', 'is', 'Enter', 'Name', 'Here.']
```python
# We can get rid of any punctuation marks with replace
List = [word.replace(".","").replace(",","") for word in intro.split(" ")]
print(List)
```
['Hello', 'my', 'name', 'is', 'Enter', 'Name', 'Here']
```python
# We can print the length of the list (number of words)
print(len(List))
```
7
```python
# We can print the length of each element in the list (number of characters)
nChar = 0 # start a count of the number of characters
for i in range(len(List)):
nChar += len(List[i])
print('Word %i/%i has %i characters.'%(i+1,len(List),len(List[i])))
print('The total number of characters in the list is %i'%nChar)
```
Word 1/7 has 5 characters.
Word 2/7 has 2 characters.
Word 3/7 has 4 characters.
Word 4/7 has 2 characters.
Word 5/7 has 5 characters.
Word 6/7 has 4 characters.
Word 7/7 has 4 characters.
The total number of characters in the list is 26
## Pandas intro using `DataFrame`
```python
import pandas as pd
```
```python
m = 5 # number of participants or "members"
n = 10 # number of observations of position
observation_times = pd.Series(["01-%0.2i-2020"%i for i in range(n)])
# You can do the same thing using: dates_index = pd.date_range("20200101", periods=m)
array = np.random.random((n,m)) # creating a bunch (n x m) of random samples
samples = pd.DataFrame(array, index=observation_times, columns=["m%i"%i for i in range(m)])
samples
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>m0</th>
<th>m1</th>
<th>m2</th>
<th>m3</th>
<th>m4</th>
</tr>
</thead>
<tbody>
<tr>
<th>01-00-2020</th>
<td>0.037611</td>
<td>0.656845</td>
<td>0.140001</td>
<td>0.886104</td>
<td>0.667229</td>
</tr>
<tr>
<th>01-01-2020</th>
<td>0.647753</td>
<td>0.977726</td>
<td>0.295408</td>
<td>0.732274</td>
<td>0.310976</td>
</tr>
<tr>
<th>01-02-2020</th>
<td>0.025785</td>
<td>0.764663</td>
<td>0.889219</td>
<td>0.635461</td>
<td>0.248914</td>
</tr>
<tr>
<th>01-03-2020</th>
<td>0.077782</td>
<td>0.977541</td>
<td>0.126935</td>
<td>0.937189</td>
<td>0.735266</td>
</tr>
<tr>
<th>01-04-2020</th>
<td>0.751781</td>
<td>0.658810</td>
<td>0.222319</td>
<td>0.410867</td>
<td>0.703010</td>
</tr>
<tr>
<th>01-05-2020</th>
<td>0.671324</td>
<td>0.114019</td>
<td>0.147262</td>
<td>0.568094</td>
<td>0.009472</td>
</tr>
<tr>
<th>01-06-2020</th>
<td>0.931435</td>
<td>0.436208</td>
<td>0.370047</td>
<td>0.752514</td>
<td>0.038555</td>
</tr>
<tr>
<th>01-07-2020</th>
<td>0.464833</td>
<td>0.739809</td>
<td>0.938925</td>
<td>0.300964</td>
<td>0.080938</td>
</tr>
<tr>
<th>01-08-2020</th>
<td>0.512085</td>
<td>0.142006</td>
<td>0.008359</td>
<td>0.195520</td>
<td>0.947673</td>
</tr>
<tr>
<th>01-09-2020</th>
<td>0.490614</td>
<td>0.539010</td>
<td>0.972949</td>
<td>0.687977</td>
<td>0.869855</td>
</tr>
</tbody>
</table>
</div>
```python
# gather specific memembers
sub_samples = samples[["m2","m4"]] # grabbing only the 2nd and 4th members
sub_samples
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>m2</th>
<th>m4</th>
</tr>
</thead>
<tbody>
<tr>
<th>01-00-2020</th>
<td>0.140001</td>
<td>0.667229</td>
</tr>
<tr>
<th>01-01-2020</th>
<td>0.295408</td>
<td>0.310976</td>
</tr>
<tr>
<th>01-02-2020</th>
<td>0.889219</td>
<td>0.248914</td>
</tr>
<tr>
<th>01-03-2020</th>
<td>0.126935</td>
<td>0.735266</td>
</tr>
<tr>
<th>01-04-2020</th>
<td>0.222319</td>
<td>0.703010</td>
</tr>
<tr>
<th>01-05-2020</th>
<td>0.147262</td>
<td>0.009472</td>
</tr>
<tr>
<th>01-06-2020</th>
<td>0.370047</td>
<td>0.038555</td>
</tr>
<tr>
<th>01-07-2020</th>
<td>0.938925</td>
<td>0.080938</td>
</tr>
<tr>
<th>01-08-2020</th>
<td>0.008359</td>
<td>0.947673</td>
</tr>
<tr>
<th>01-09-2020</th>
<td>0.972949</td>
<td>0.869855</td>
</tr>
</tbody>
</table>
</div>
```python
sub_samples.sum()
```
m2 4.111425
m4 4.611888
dtype: float64
```python
# Subtract the element one row up from the current row
sub_samples.diff(periods=1, axis=0)
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>m2</th>
<th>m4</th>
</tr>
</thead>
<tbody>
<tr>
<th>01-00-2020</th>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>01-01-2020</th>
<td>0.155408</td>
<td>-0.356254</td>
</tr>
<tr>
<th>01-02-2020</th>
<td>0.593811</td>
<td>-0.062061</td>
</tr>
<tr>
<th>01-03-2020</th>
<td>-0.762284</td>
<td>0.486351</td>
</tr>
<tr>
<th>01-04-2020</th>
<td>0.095384</td>
<td>-0.032256</td>
</tr>
<tr>
<th>01-05-2020</th>
<td>-0.075058</td>
<td>-0.693538</td>
</tr>
<tr>
<th>01-06-2020</th>
<td>0.222786</td>
<td>0.029083</td>
</tr>
<tr>
<th>01-07-2020</th>
<td>0.568878</td>
<td>0.042384</td>
</tr>
<tr>
<th>01-08-2020</th>
<td>-0.930566</td>
<td>0.866734</td>
</tr>
<tr>
<th>01-09-2020</th>
<td>0.964590</td>
<td>-0.077817</td>
</tr>
</tbody>
</table>
</div>
```python
# Subtract one column with the other
sub_samples.diff(periods=1, axis=1)
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>m2</th>
<th>m4</th>
</tr>
</thead>
<tbody>
<tr>
<th>01-00-2020</th>
<td>NaN</td>
<td>0.527229</td>
</tr>
<tr>
<th>01-01-2020</th>
<td>NaN</td>
<td>0.015567</td>
</tr>
<tr>
<th>01-02-2020</th>
<td>NaN</td>
<td>-0.640305</td>
</tr>
<tr>
<th>01-03-2020</th>
<td>NaN</td>
<td>0.608330</td>
</tr>
<tr>
<th>01-04-2020</th>
<td>NaN</td>
<td>0.480691</td>
</tr>
<tr>
<th>01-05-2020</th>
<td>NaN</td>
<td>-0.137790</td>
</tr>
<tr>
<th>01-06-2020</th>
<td>NaN</td>
<td>-0.331493</td>
</tr>
<tr>
<th>01-07-2020</th>
<td>NaN</td>
<td>-0.857987</td>
</tr>
<tr>
<th>01-08-2020</th>
<td>NaN</td>
<td>0.939313</td>
</tr>
<tr>
<th>01-09-2020</th>
<td>NaN</td>
<td>-0.103094</td>
</tr>
</tbody>
</table>
</div>
```python
# Another way of getting a subset...
samples[1:4]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>m0</th>
<th>m1</th>
<th>m2</th>
<th>m3</th>
<th>m4</th>
</tr>
</thead>
<tbody>
<tr>
<th>01-01-2020</th>
<td>0.647753</td>
<td>0.977726</td>
<td>0.295408</td>
<td>0.732274</td>
<td>0.310976</td>
</tr>
<tr>
<th>01-02-2020</th>
<td>0.025785</td>
<td>0.764663</td>
<td>0.889219</td>
<td>0.635461</td>
<td>0.248914</td>
</tr>
<tr>
<th>01-03-2020</th>
<td>0.077782</td>
<td>0.977541</td>
<td>0.126935</td>
<td>0.937189</td>
<td>0.735266</td>
</tr>
</tbody>
</table>
</div>
```python
```
|
"""
Outlier Engineering
- An outlier is a data point which is significantly different from the remaining data.
- “An outlier is an observation which deviates so much from the other observations as to arouse suspicions that it
was generated by a different mechanism.” [D. Hawkins. Identification of Outliers, Chapman and Hall , 1980].
- Statistics such as the mean and variance are very susceptible to outliers.
- In addition, some Machine Learning models are sensitive to outliers which may decrease their performance.
- Thus, depending on which algorithm we wish to train, we often remove outliers from our variables.
- In section 0.2.5 of this series we have seen how to identify outliers.
- In this example, we we discuss how we can process them to train our machine learning models.
How can we pre-process outliers?
- Trimming: remove the outliers from our dataset
- Treat outliers as missing data, and proceed with any missing data imputation technique
- Discrestisation: outliers are placed in border bins together with higher or lower values of the distribution
- Censoring: capping the variable distribution at a max and / or minimum value
Censoring is also known as:
- top and bottom coding
- winsorization
- capping
Censoring or Capping.
- Censoring, or capping, means capping the maximum and /or minimum of a distribution at an arbitrary value.
- On other words, values bigger or smaller than the arbitrarily determined ones are censored.
- Capping can be done at both tails, or just one of the tails, depending on the variable and the user.
- Check my talk in pydata for an example of capping used in a finance company.
- The numbers at which to cap the distribution can be determined:
- arbitrarily
- using the inter-quantal range proximity rule
- using the gaussian approximation
- using quantiles
Advantages
- does not remove data
Limitations
- distorts the distributions of the variables
- distorts the relationships among variables
In this example
- We will see how to perform capping with arbitrary values using the Titanic dataset
Important
- Outliers should be detected AND removed ONLY from the training set, and NOT from the test set.
- So we should first divide our data set into train and tests, and remove outliers in the train set,
but keep those in the test set, and measure how well our model is doing.
I will not do that in this example, but please keep that in mind when setting up your pipelines """
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from feature_engine import outliers as outr
def load_titanic():
data = pd.read_csv('dataset/titanic.csv')
data['cabin'] = data['cabin'].astype(str).str[0]
data['pclass'] = data['pclass'].astype('O')
data['embarked'].fillna('C', inplace=True)
return data
data = load_titanic()
data.head()
"""
pclass survived name sex age sibsp parch ticket fare cabin embarked boat body home.dest
0 1 1 Allen, Miss. Elisabeth Walton female 29.0000 0 0 24160 211.3375 B S 2 NaN St Louis, MO
1 1 1 Allison, Master. Hudson Trevor male 0.9167 1 2 113781 151.5500 C S 11 NaN Montreal, PQ / Chesterville, ON
2 1 0 Allison, Miss. Helen Loraine female 2.0000 1 2 113781 151.5500 C S NaN NaN Montreal, PQ / Chesterville, ON
3 1 0 Allison, Mr. Hudson Joshua Creighton male 30.0000 1 2 113781 151.5500 C S NaN 135.0 Montreal, PQ / Chesterville, ON
4 1 0 Allison, Mrs. Hudson J C (Bessie Waldo Daniels) female 25.0000 1 2 113781 151.5500 C S NaN NaN Montreal, PQ / Chesterville, ON
ArbitraryOutlierCapper
- The ArbitraryOutlierCapper caps the minimum and maximum values by a value determined by the user.
"""
data.shape
# (1309, 14)
# let's find out the maximum Age and maximum Fare in the titanic
data.age.max(), data.fare.max()
# (80.0, 512.3292)
# check for missing values before doing ArbitraryOutlierCapper
data.isnull().sum()
"""
pclass 0
survived 0
name 0
sex 0
age 263
sibsp 0
parch 0
ticket 0
fare 1
cabin 0
embarked 0
boat 823
body 1188
home.dest 564
dtype: int64 """
# missing values exists, to focus on ArbitraryOutlierCapper, lets drop NAN for 2 features
# Credits: https://stackoverflow.com/questions/13413590/how-to-drop-rows-of-pandas-dataframe-whose-value-in-a-certain-column-is-nan
data = data[data['age'].notnull()]
data = data[data['fare'].notnull()]
data.isnull().sum()
"""
pclass 0
survived 0
name 0
sex 0
age 0
sibsp 0
parch 0
ticket 0
fare 0
cabin 0
embarked 0
boat 628
body 926
home.dest 360
dtype: int64 """
data.shape
# (1045, 14)
capper = outr.ArbitraryOutlierCapper(max_capping_dict = {'age':50, 'fare':200},
min_capping_dict = None)
capper.fit(data)
# ArbitraryOutlierCapper(max_capping_dict={'age': 50, 'fare': 200})
capper.right_tail_caps_
# {'age': 50, 'fare': 200}
capper.left_tail_caps_
# {}
temp = capper.transform(data)
temp.age.max(), temp.fare.max()
# (50.0, 200.0)
# Minimum capping
capper = outr.ArbitraryOutlierCapper(max_capping_dict=None,
min_capping_dict={
'age': 10,
'fare': 100
})
capper.fit(data)
# ArbitraryOutlierCapper(min_capping_dict={'age': 10, 'fare': 100})
capper.variables
# ['age', 'fare']
capper.right_tail_caps_
# {}
capper.left_tail_caps_
# {'age': 10, 'fare': 100}
temp = capper.transform(data)
temp.age.min(), temp.fare.min()
# (10.0, 100.0)
# Both ends capping
capper = outr.ArbitraryOutlierCapper(max_capping_dict={
'age': 50, 'fare': 200},
min_capping_dict={
'age': 10, 'fare': 100})
capper.fit(data)
# ArbitraryOutlierCapper(max_capping_dict={'age': 50, 'fare': 200},
# min_capping_dict={'age': 10, 'fare': 100})
capper.right_tail_caps_
# {'age': 50, 'fare': 200}
capper.left_tail_caps_
# {'age': 10, 'fare': 100}
temp = capper.transform(data)
temp.age.min(), temp.fare.min()
# (10.0, 100.0)
temp.age.max(), temp.fare.max()
# (50.0, 200.0)
# That is all for this example. I hope you enjoyed the information, and see you in the next one. |
Mountain Song Wind Chimes are crafted by hand in our Kalorama workshop in the Dandenong Ranges, 35km east of Melbourne, Victoria.
We are a small, community-based business, manufacturing wind chimes not simply to 'turn a buck,' but because we have a one-eyed passion for them. We thoroughly enjoy the delight expressed by folk as they listen to their gentle resonant tones!
With a strong musical interest and a background in physics, professional careers in engineering and sales, and solid workshop skills, we're well placed to meet the demands of this market.
So... what is 'this market' and its demands? Wind chimes have been around for a very long time (Ancient Romans called them 'Tintinnabulum'!), and the price-driven end of the spectrum is out there and doing nicely, thank you - no point in our trying to compete with it head-on. If you find what you like at a price you're happy to pay, we say "buy it!" Assuming that you can find what you like (and that - usually - you get what you pay for).
If you can't, that's why we do what we do. Our wind chimes are made with care. For example, a tiny offset in the chime's suspension point (i.e. where the hole for the string is drilled) can dampen its resonant qualities. Did you want 'ringing like a bell'... or a 'dull thunk'? Intonation is important to us and we spend a fair time making sure our chimes are tuned to their correct tones, and referenced to concert pitch, A=440Hz. It's fiddly, but really does make the difference.
There have been some insightful comments. A market-goer noted recently of our chime-sets: "They sound like you want wind chimes to sound when you buy them, but they hardly ever do," referring to resonance and sustain. Another said of a dozen or more chime-sets all sounding simultaneously in a gust of wind: "They blend so well, it's like a symphony."
Recycling & biodegradability - where possible, we try to use recycled materials in our packaging and - if their new owner doesn't wish to reuse them - to make sure they can be safely thrown out in the recycling bin. Cardboard packing boxes can often be turned inside-out to reveal a plain outer... 'though the new owner of a carton that previously contained recycled-fibre toilet paper, of the Who Gives A Crap® brand, might be in for a surprise when they unpack their wind chimes!
Mindful Ethos - if manufacturing for you is simply a matter of peg A into hole B and a quick whack with a hammer, then read no further! If, though, you sense that something of the craftsperson can be embedded in his or her work, that their philosophy finds its way into the end product, then be assured that we 'work mindfully' in the making of Mountain Song Wind Chimes. In particular, when completing a piece that has been especially commissioned for 'an occasion', we hold in heart/mind/spirit/prayer – whichever word works for you – the individual(s) whom the work celebrates. In a mass-produced world it is difficult to grasp and explain exactly why this is important… it just is. |
[GOAL]
r : ℝ
hr : r < 0
⊢ sign r = -1
[PROOFSTEP]
rw [sign, if_pos hr]
[GOAL]
r : ℝ
hr : 0 < r
⊢ sign r = 1
[PROOFSTEP]
rw [sign, if_pos hr, if_neg hr.not_lt]
[GOAL]
⊢ sign 0 = 0
[PROOFSTEP]
rw [sign, if_neg (lt_irrefl _), if_neg (lt_irrefl _)]
[GOAL]
⊢ 0 < 1
[PROOFSTEP]
norm_num
[GOAL]
r : ℝ
⊢ sign r = -1 ∨ sign r = 0 ∨ sign r = 1
[PROOFSTEP]
obtain hn | rfl | hp := lt_trichotomy r (0 : ℝ)
[GOAL]
case inl
r : ℝ
hn : r < 0
⊢ sign r = -1 ∨ sign r = 0 ∨ sign r = 1
[PROOFSTEP]
exact Or.inl <| sign_of_neg hn
[GOAL]
case inr.inl
⊢ sign 0 = -1 ∨ sign 0 = 0 ∨ sign 0 = 1
[PROOFSTEP]
exact Or.inr <| Or.inl <| sign_zero
[GOAL]
case inr.inr
r : ℝ
hp : 0 < r
⊢ sign r = -1 ∨ sign r = 0 ∨ sign r = 1
[PROOFSTEP]
exact Or.inr <| Or.inr <| sign_of_pos hp
[GOAL]
r : ℝ
⊢ sign r = 0 ↔ r = 0
[PROOFSTEP]
refine' ⟨fun h => _, fun h => h.symm ▸ sign_zero⟩
[GOAL]
r : ℝ
h : sign r = 0
⊢ r = 0
[PROOFSTEP]
obtain hn | rfl | hp := lt_trichotomy r (0 : ℝ)
[GOAL]
case inl
r : ℝ
h : sign r = 0
hn : r < 0
⊢ r = 0
[PROOFSTEP]
rw [sign_of_neg hn, neg_eq_zero] at h
[GOAL]
case inl
r : ℝ
h : 1 = 0
hn : r < 0
⊢ r = 0
[PROOFSTEP]
exact (one_ne_zero h).elim
[GOAL]
case inr.inl
h : sign 0 = 0
⊢ 0 = 0
[PROOFSTEP]
rfl
[GOAL]
case inr.inr
r : ℝ
h : sign r = 0
hp : 0 < r
⊢ r = 0
[PROOFSTEP]
rw [sign_of_pos hp] at h
[GOAL]
case inr.inr
r : ℝ
h : 1 = 0
hp : 0 < r
⊢ r = 0
[PROOFSTEP]
exact (one_ne_zero h).elim
[GOAL]
z : ℤ
⊢ sign ↑z = ↑(Int.sign z)
[PROOFSTEP]
obtain hn | rfl | hp := lt_trichotomy z (0 : ℤ)
[GOAL]
case inl
z : ℤ
hn : z < 0
⊢ sign ↑z = ↑(Int.sign z)
[PROOFSTEP]
rw [sign_of_neg (Int.cast_lt_zero.mpr hn), Int.sign_eq_neg_one_of_neg hn, Int.cast_neg, Int.cast_one]
[GOAL]
case inr.inl
⊢ sign ↑0 = ↑(Int.sign 0)
[PROOFSTEP]
rw [Int.cast_zero, sign_zero, Int.sign_zero, Int.cast_zero]
[GOAL]
case inr.inr
z : ℤ
hp : 0 < z
⊢ sign ↑z = ↑(Int.sign z)
[PROOFSTEP]
rw [sign_of_pos (Int.cast_pos.mpr hp), Int.sign_eq_one_of_pos hp, Int.cast_one]
[GOAL]
r : ℝ
⊢ sign (-r) = -sign r
[PROOFSTEP]
obtain hn | rfl | hp := lt_trichotomy r (0 : ℝ)
[GOAL]
case inl
r : ℝ
hn : r < 0
⊢ sign (-r) = -sign r
[PROOFSTEP]
rw [sign_of_neg hn, sign_of_pos (neg_pos.mpr hn), neg_neg]
[GOAL]
case inr.inl
⊢ sign (-0) = -sign 0
[PROOFSTEP]
rw [sign_zero, neg_zero, sign_zero]
[GOAL]
case inr.inr
r : ℝ
hp : 0 < r
⊢ sign (-r) = -sign r
[PROOFSTEP]
rw [sign_of_pos hp, sign_of_neg (neg_lt_zero.mpr hp)]
[GOAL]
r : ℝ
⊢ 0 ≤ sign r * r
[PROOFSTEP]
obtain hn | rfl | hp := lt_trichotomy r (0 : ℝ)
[GOAL]
case inl
r : ℝ
hn : r < 0
⊢ 0 ≤ sign r * r
[PROOFSTEP]
rw [sign_of_neg hn]
[GOAL]
case inl
r : ℝ
hn : r < 0
⊢ 0 ≤ -1 * r
[PROOFSTEP]
exact mul_nonneg_of_nonpos_of_nonpos (by norm_num) hn.le
[GOAL]
r : ℝ
hn : r < 0
⊢ -1 ≤ 0
[PROOFSTEP]
norm_num
[GOAL]
case inr.inl
⊢ 0 ≤ sign 0 * 0
[PROOFSTEP]
rw [mul_zero]
[GOAL]
case inr.inr
r : ℝ
hp : 0 < r
⊢ 0 ≤ sign r * r
[PROOFSTEP]
rw [sign_of_pos hp, one_mul]
[GOAL]
case inr.inr
r : ℝ
hp : 0 < r
⊢ 0 ≤ r
[PROOFSTEP]
exact hp.le
[GOAL]
r : ℝ
hr : r ≠ 0
⊢ 0 < sign r * r
[PROOFSTEP]
refine' lt_of_le_of_ne (sign_mul_nonneg r) fun h => hr _
[GOAL]
r : ℝ
hr : r ≠ 0
h : 0 = sign r * r
⊢ r = 0
[PROOFSTEP]
have hs0 := (zero_eq_mul.mp h).resolve_right hr
[GOAL]
r : ℝ
hr : r ≠ 0
h : 0 = sign r * r
hs0 : sign r = 0
⊢ r = 0
[PROOFSTEP]
exact sign_eq_zero_iff.mp hs0
[GOAL]
r : ℝ
⊢ (sign r)⁻¹ = sign r
[PROOFSTEP]
obtain hn | hz | hp := sign_apply_eq r
[GOAL]
case inl
r : ℝ
hn : sign r = -1
⊢ (sign r)⁻¹ = sign r
[PROOFSTEP]
rw [hn]
[GOAL]
case inl
r : ℝ
hn : sign r = -1
⊢ (-1)⁻¹ = -1
[PROOFSTEP]
norm_num
[GOAL]
case inr.inl
r : ℝ
hz : sign r = 0
⊢ (sign r)⁻¹ = sign r
[PROOFSTEP]
rw [hz]
[GOAL]
case inr.inl
r : ℝ
hz : sign r = 0
⊢ 0⁻¹ = 0
[PROOFSTEP]
exact inv_zero
[GOAL]
case inr.inr
r : ℝ
hp : sign r = 1
⊢ (sign r)⁻¹ = sign r
[PROOFSTEP]
rw [hp]
[GOAL]
case inr.inr
r : ℝ
hp : sign r = 1
⊢ 1⁻¹ = 1
[PROOFSTEP]
exact inv_one
[GOAL]
r : ℝ
⊢ sign r⁻¹ = sign r
[PROOFSTEP]
obtain hn | rfl | hp := lt_trichotomy r (0 : ℝ)
[GOAL]
case inl
r : ℝ
hn : r < 0
⊢ sign r⁻¹ = sign r
[PROOFSTEP]
rw [sign_of_neg hn, sign_of_neg (inv_lt_zero.mpr hn)]
[GOAL]
case inr.inl
⊢ sign 0⁻¹ = sign 0
[PROOFSTEP]
rw [sign_zero, inv_zero, sign_zero]
[GOAL]
case inr.inr
r : ℝ
hp : 0 < r
⊢ sign r⁻¹ = sign r
[PROOFSTEP]
rw [sign_of_pos hp, sign_of_pos (inv_pos.mpr hp)]
|
So many people are quick to point out the inadequacies of food pyramids. But only focusing on criticism is boring. Let’s ask a different question: what do we do about it? What is the one food change we can make that is easy, simple, and relevant to everyone?
I say eat a rainbow of whole foods every day.
Different nutrients impart different colors to fruits and vegetables.
I remember back in the day when I started doing Jane Fonda-style aerobic workouts. It had nothing to do with form or engaging muscle groups. It was all about how many reps you could do for maximum burn and pain. As athletes and coaches, we now know that a solid rep executed with perfect form is more important than number of reps.
This number-focused mindset can rear its ugly head with nutrition, too. From a psychological perspective, anything that leads to a feeling of restriction or excess typically does not offer a positive long-term outcome. More often the reverse happens, causing people to bounce from over-restricted tendencies to excessive eating patterns. In the dieting world, this has been coined as “yo-yo dieting.” One of its most common symptoms is calorie counting.
When I first started a paleo lifestyle a little over four years ago, there were a few things I really struggled with, and counting calories was at the top of my list. I was shocked to learn just how engrained calorie counting had become in my daily thinking and decision making. I realized my mind was programmed to pay attention to the numbers of calories first and foremost.
I have to admit, I stored up a lot of random knowledge about calorie counts for the foods I loved. But I grew to see that most of those foods were not even real foods, but processed, packaged poison. I also realized the knowledge I gained by counting calories was often the deciding factor of what foods I ate, more than nutritional content. I was sad to realize this was how I was living.
Today I have a simple guiding principle: I try to eat at least two platefuls of an organic, whole-food rainbow every day. Instead of counting calories, I count colors. Five different colors a day is my goal. Many times I am lucky to have that with each plateful.
Food has become like art mixed with a seasonal scavenger hunt. My search for colorful foods to keep my plate interesting and fun has led me to try new seasonal and native foods. Here are a few examples of colorful meals that are also perfect for winter months and great for families or large groups.
Line cookie sheet with parchment paper and place diced squash coated in the melted oil on tray. Bake for 30 minutes until soft to touch and slightly brown.
Meanwhile, begin to heat bone broth on stove, adding sliced carrots.
Once done, add the cooked squash to the broth.
Cook soup for about 20 minutes or until carrots are fork soft. Remove soup from heat.
Using an immersion blender or hand mixer, mix soup into a nice creamy consistency, adding coconut milk and spices to taste right in the soup pot.
Add spices to taste. Cook for 10 hours on low. Once done, turn off and enjoy with a side salad.
Photos 1, 2 and teaser courtesy of Shutterstock.
Photo 3 courtesy of V. Capaldi. |
#include <ros/ros.h>
#include <kuka_manipulation_moveit/KukaMoveit.hpp>
#include <kuka_cv/Color.h>
#include <kuka_cv/RequestPalette.h>
#include <kuka_cv/RequestCanvas.h>
#include <std_srvs/Empty.h>
#include <visualization_msgs/Marker.h>
#include <boost/thread/thread.hpp>
// TF
#include <tf2_ros/transform_listener.h>
#include <tf2_ros/transform_broadcaster.h>
#include <tf2/LinearMath/Matrix3x3.h>
#include <tf2/LinearMath/Vector3.h>
#include <tf2/convert.h>
#include <tf2/impl/utils.h>
#include <tf2/utils.h>
#include <geometry_msgs/TransformStamped.h>
#include <std_msgs/String.h>
#define DEBUG false
// TODO try using TF as main linear math.
bool start = false;
size_t printedMarkers = 0;
tf2::Matrix3x3 R;
tf2::Vector3 v;
const double COLOR_BOTLE_HEIGHT = 0.06;
const double COLOR_HEIGHT = 0.045;
const double HEIGHT_OFFSET = COLOR_BOTLE_HEIGHT - COLOR_HEIGHT + 0.02;
const double BRUSH_HEIGHT = 0.01;
const double BRUSH_WIDTH = 0.01;
visualization_msgs::Marker createMarkerMsg(std::vector<kuka_cv::Color> & colors, std::vector<kuka_cv::Pose> & poses) {
if (colors.empty() || poses.empty()) {
ROS_FATAL_STREAM("Picture pre processing Error: Empty respospone!");
ros::shutdown();
return visualization_msgs::Marker();
}
uint32_t shape = visualization_msgs::Marker::SPHERE_LIST;
visualization_msgs::Marker marker;
// Set the frame ID and timestamp. See the TF tutorials for information on these.
marker.header.frame_id = "/canvas_link";
marker.header.stamp = ros::Time::now();
// Set the namespace and id for this marker. This serves to create a unique ID
// Any marker sent with the same namespace and id will overwrite the old one
marker.ns = "basic_shapes";
marker.id = 0;
marker.type = shape;
// Set the marker action. Options are ADD, DELETE, and new in ROS Indigo: 3 (DELETEALL)
marker.action = visualization_msgs::Marker::ADD;
// Set the pose of the marker. This is a full 6DOF pose relative to the frame/time specified in the header
marker.pose.orientation.x = 0.0;
marker.pose.orientation.y = 0.0;
marker.pose.orientation.z = 0.0;
marker.pose.orientation.w = 1.0;
// Set the scale of the marker -- 1x1x1 here means 1m on a side
marker.scale.x = 0.01;
marker.scale.y = 0.01;
marker.scale.z = 0.01;
marker.lifetime = ros::Duration(1);
// Create the vertices for the points and lines
for (uint32_t i = 0; i < colors.size(); ++i)
{
geometry_msgs::Point p;
p.x = poses[i].x;
p.y = poses[i].y;
p.z = poses[i].z;
std_msgs::ColorRGBA color;
color.r = 1.0*colors[i].r/255;
color.g = 1.0*colors[i].g/255;
color.b = 1.0*colors[i].b/255;
color.a = 1.0;
marker.points.push_back(p);
marker.colors.push_back(color);
}
return marker;
}
void publishMarkers(visualization_msgs::Marker & marker, size_t rate) {
ros::NodeHandlePtr node = boost::make_shared<ros::NodeHandle>();
ros::Publisher pub = node->advertise<visualization_msgs::Marker>("/visualization_marker", 1);
visualization_msgs::Marker activeMarkers;
activeMarkers.header.frame_id = marker.header.frame_id;
activeMarkers.header.stamp = ros::Time::now();
activeMarkers.ns = marker.ns;
activeMarkers.id = marker.id;
activeMarkers.type = marker.type;
activeMarkers.action = marker.action;
activeMarkers.scale.x = marker.scale.x;
activeMarkers.scale.y = marker.scale.y;
activeMarkers.scale.z = marker.scale.z;
activeMarkers.lifetime = marker.lifetime;
size_t prevValue = printedMarkers;
// ROS_INFO_STREAM("printedMarkers:" << printedMarkers);
if (printedMarkers != 0) {
for (size_t i = 0; i < printedMarkers; ++i) {
activeMarkers.points.push_back(marker.points[i]);
activeMarkers.colors.push_back(marker.colors[i]);
}
}
ROS_INFO_STREAM("[LTP] Start marker publishing");
ros::Rate r(rate);
while (ros::ok()) {
while (pub.getNumSubscribers() < 1)
{
if (!ros::ok())
{
return;
}
ROS_WARN_ONCE("Please create a subscriber to the marker");
sleep(1);
}
activeMarkers.header.stamp = ros::Time::now();
if (printedMarkers - prevValue == 1) {
activeMarkers.points.push_back(marker.points[printedMarkers - 1]);
activeMarkers.colors.push_back(marker.colors[printedMarkers - 1]);
prevValue = printedMarkers;
} else if (printedMarkers - prevValue > 1) {
ROS_ERROR_STREAM("Markers ERROR.");
}
pub.publish(activeMarkers);
r.sleep();
}
}
void collectPaintOnBrush(KukaMoveit & manipulator, kuka_cv::Pose & pose)
{
geometry_msgs::Pose p;
p.position.x = pose.x;
p.position.y = pose.y;
p.position.z = pose.z + COLOR_BOTLE_HEIGHT + HEIGHT_OFFSET;
p.orientation.w = 1;
manipulator.move(p, DEBUG);
p.position.z = pose.z + COLOR_HEIGHT - BRUSH_HEIGHT;
manipulator.move(p, DEBUG);
p.position.z = pose.z + COLOR_BOTLE_HEIGHT + HEIGHT_OFFSET;
manipulator.move(p, DEBUG);
}
void doSmear(KukaMoveit & manipulator, kuka_cv::Pose & pose)
{
geometry_msgs::Pose p;
p.position.x = pose.x;
p.position.y = pose.y;
p.position.z = pose.z + HEIGHT_OFFSET;
p.orientation.w = 1;
ROS_INFO_STREAM("P: [" << p.position.x << ", " << p.position.y << ", " << p.position.z << "]");
manipulator.move(p, DEBUG);
p.position.z = pose.z;
manipulator.move(p, DEBUG);
p.position.x -= BRUSH_WIDTH;
manipulator.move(p, DEBUG);
p.position.z = pose.z + COLOR_BOTLE_HEIGHT + HEIGHT_OFFSET;
manipulator.move(p, DEBUG);
}
void transformPointFromCanvasToBase(kuka_cv::Pose & pose) {
// Transform:
// R*p + v
// R - canvas transform matrix, v - translation or canvas frame, p - painting point
tf2::Vector3 result, p(pose.x, pose.y, pose.z);
result = R*p + v;
pose.x = result.m_floats[0];
pose.y = result.m_floats[1];
pose.z = result.m_floats[2];
pose.phi = 0; pose.theta = 0; pose.psi = 0;
}
void chatterCallback(const std_msgs::String::ConstPtr& msg)
{
ROS_INFO("I heard: [%s]", msg->data.c_str());
start = true;
}
int main(int argc, char ** argv)
{
ros::init(argc, argv, "camera_test");
ros::NodeHandle nh;
ros::AsyncSpinner spinner(1);
spinner.start();
// Service client
ros::ServiceClient paletteClient = nh.serviceClient<kuka_cv::RequestPalette>("/request_palette");
ros::ServiceClient canvasClient = nh.serviceClient<kuka_cv::RequestCanvas>("/request_canvas");
ros::ServiceClient startImgProcClient = nh.serviceClient<std_srvs::Empty>("/start_image_preprocessing");
ros::ServiceClient imgPaletteClient = nh.serviceClient<kuka_cv::RequestPalette>("/request_image_palette");
ros::Publisher markerPublisher = nh.advertise<visualization_msgs::Marker>("/visualization_marker", 1);
/* AIRA Stack */
ros::Subscriber runSubscriber = nh.subscribe("run", 10, chatterCallback);
ros::ServiceClient liabilityFinishClient = nh.serviceClient<std_srvs::Empty>("liability/finish");
// Initialize manipulator
KukaMoveit manipulator("manipulator");
/* Set joint constraints */
// Joint a1
moveit_msgs::Constraints constraints;
constraints.joint_constraints.resize(2);
constraints.joint_constraints[0].joint_name = "joint_a1";
constraints.joint_constraints[0].position = 0.0;
constraints.joint_constraints[0].tolerance_above = 1;
constraints.joint_constraints[0].tolerance_below = 1;
constraints.joint_constraints[0].weight = 1.0;
// Joint a4
constraints.joint_constraints[1].joint_name = "joint_a4";
constraints.joint_constraints[1].position = 0.0;
constraints.joint_constraints[1].tolerance_above = 1;
constraints.joint_constraints[1].tolerance_below = 1;
constraints.joint_constraints[1].weight = 1.0;
manipulator.getMoveGroup()->setPathConstraints(constraints);
kuka_cv::RequestPalette::Response palette;
kuka_cv::RequestPalette paletteInfo;
kuka_cv::RequestCanvas canvasInfo;
// Drawing image Colors
std::vector<kuka_cv::Color> pictureColors;
std::vector<kuka_cv::Pose> pictureColorsPoses;
while(ros::ok()) {
if (start) {
/* Palette info */
paletteInfo.request.mode = 0;
ROS_INFO_STREAM("[LTP] Receive palette message.");
do {
if (paletteClient.call(paletteInfo)) {
palette = paletteInfo.response;
break;
}
ROS_WARN_STREAM("[LTP] Receive Colors array size = 0");
} while ((palette.colors.empty() || palette.poses.empty()) && ros::ok());
/* Canvas info */
canvasInfo.request.mode = 0;
ROS_INFO_STREAM("[LTP] Receive canvas message: ");
do {
if (canvasClient.call(canvasInfo)) {
break;
}
ROS_WARN_STREAM("[LTP] Receive wrong canvas info");
} while (canvasInfo.response.width == 0 && ros::ok());
R.setRPY(canvasInfo.response.p.phi, canvasInfo.response.p.theta, canvasInfo.response.p.psi);
v = tf2::Vector3(canvasInfo.response.p.x, canvasInfo.response.p.y, canvasInfo.response.p.z);
/* Image palette info */
std_srvs::Empty emptyMsg;
ROS_INFO_STREAM("[LTP] START image processing.");
if (!startImgProcClient.call(emptyMsg)) {
ROS_ERROR_STREAM("\t ERROR");
return 0;
}
ROS_INFO_STREAM("[LTP] Request information about pixels Color and position");
if (!imgPaletteClient.call(paletteInfo)) {
ROS_ERROR_STREAM("\t ERROR");
return 0;
}
pictureColors = paletteInfo.response.colors;
pictureColorsPoses = paletteInfo.response.poses;
ROS_INFO_STREAM("[LTP] Start Drawing...");
// Draw Params
bool isDraw = true;
bool updatePaint = true;
visualization_msgs::Marker marker = createMarkerMsg(pictureColors, pictureColorsPoses);
size_t pxNum = pictureColors.size();
size_t paletteSize = palette.colors.size();
ROS_INFO_STREAM("[LTP] Points number: " << pxNum);
size_t rate = 3;
ros::Rate rt(0.5);
boost::thread thr(publishMarkers, marker, rate);
size_t swearsNumber = 0;
size_t currColorIndex = 0, prevColorIndex = 0;
// Color properties
// We must use one "ideal" brush for measuring alive time. Brush coefficient is equal 1
double aliveTime = 4; // Number of swears for reduce color of paint to zero
double brushCoeff = 1; // Brush coeff. that contain reduce color of paint to zero if we using not ideal brush
// Global drawing circle
while (ros::ok() && isDraw) {
if (printedMarkers == pxNum) {
ROS_ERROR("printedMarkers == pxNum");
isDraw = false;
}
// Find color in palette
prevColorIndex = currColorIndex;
while (currColorIndex < paletteSize &&
(pictureColors[printedMarkers].r != palette.colors[currColorIndex].r ||
pictureColors[printedMarkers].g != palette.colors[currColorIndex].g ||
pictureColors[printedMarkers].b != palette.colors[currColorIndex].b))
{
++currColorIndex;
ROS_INFO("Select color! (%lu | %lu)", paletteSize, currColorIndex);
}
ROS_WARN("Select color! (%lu | %lu)", paletteSize, currColorIndex);
if (currColorIndex == paletteSize) {
currColorIndex = 0;
continue;
} else if (currColorIndex > paletteSize) {
ROS_ERROR_STREAM("Error of changing palette color.");
}
if (DEBUG) {
ROS_INFO_STREAM("Count: " << printedMarkers);
ROS_INFO_STREAM("[COLOR] palette: ["
<< (uint)palette.colors[currColorIndex].b << ","
<< (uint)palette.colors[currColorIndex].g << ","
<< (uint)palette.colors[currColorIndex].r << "] vs ("
<< (uint)pictureColors[printedMarkers].b << ","
<< (uint)pictureColors[printedMarkers].g << ","
<< (uint)pictureColors[printedMarkers].r << ")");
}
if (swearsNumber >= aliveTime || swearsNumber == 0) {
collectPaintOnBrush(manipulator, palette.poses[currColorIndex]);
swearsNumber = 0;
}
transformPointFromCanvasToBase(pictureColorsPoses[printedMarkers]);
doSmear(manipulator, pictureColorsPoses[printedMarkers]);
ROS_INFO_STREAM("[LTP] POINT (" << printedMarkers << ")");
++printedMarkers;
++swearsNumber;
thr.interrupt();
}
thr.join();
std_srvs::Empty empty;
if (!liabilityFinishClient.call(empty)) {
return 0;
}
start = false;
}
}
return 0;
} |
/-
The functor typeclass enables us to change the behavior
of (what amounts to) *function application* In particular,
the functor.map operation (<$>) lifts any pure function,
h : α → β, to a "structure-preserving" function on data
values containing α and β values: e.g., mapping options,
lists, or trees of α values into structurally identical
trees of β values. Preservation of structure is assured
by the functor laws.
-/
/-
The applicative functor typeclass enables us to change
the behavior of *multi-argument* function application.
The pure function takes a function and lifts it into
one that can then serve as a first argument to the seq
function, which then "applies" that function to a data
structure containing first arguments to that function.
The result is then a data structure holding partially
evaluated functions that is then "applied" to the next
data structure containing argument values. Overriding
the definitions of pure and seq in particular ways for
different data structures, such as option and list,
has given us nice ways to implement "non-deterministic"
function application as well as error propagation over
applications of functions to multiple arguments. Note:
an applicative functor *is a kind of functor,* so we
can assume that the map function is availabel when we
define pure and seq.
-/
/-
We can now explain the purpose of the monad typeclass.
It provides us with a way to override the behavior of
ordinary function *composition*. So, to begin, we will
review this essential concept.
-/
/-
Suppose we have three sets: dogs = {Benji, ChewChew};
breeds = {poodle, husky}; and messy = { true, false };
as well as two functions, breedOf : dogs → breeds;
and messy : breed → sheds. For each dog, breedOf returns
its breed; and for each breed, sheds returns true or
false reflecting whether that breed of dog sheds or not.
In this context, we can apply the composition operator
to sheds and breedOf, to derive a new function, isMessy :
from dog → bool.
-/
/-
axioms
(dogs breeds messy : Type)
(breed : dogs → breeds)
(sheds : breeds → messy)
noncomputable def isMessy : dogs → messy := (sheds ∘ breed)
-/
/-
Next we confirm that (1) isMessy is a function that takes
a dog and returns its messiness, and (2) that when applied
to an argument (dog), x, it works by first computing the
value of (breed x), yielding a breed, and then applying the
sheds function to that result of that first operation.
-/
namespace hidden
inductive dogs | fido | polly
inductive breeds | poodle | husky
inductive coats | fur | hair
inductive yesno | yes | no
open dogs breeds coats yesno
def breed : dogs → breeds
| polly := poodle
| fido := husky
def coat : breeds → coats
| poodle := hair
| husky := fur
def sheds : coats → yesno
| fur := yes
| hair := no
def messy : dogs → yesno :=
sheds ∘ coat ∘ breed
#check messy -- isMessy : dogs → messy
#reduce messy -- λ (x : dogs), sheds (breed x)
#reduce messy polly
#reduce messy fido
/-
We pronounce (sheds ∘ coat ∘ breed) as "sheds after
coat after breed." It is defined to be the function
obtained by taking a dog as an argument and returning
the value obtained by first applying breed to the dog,
yielding its breed, then applying coat to that breed,
to obtain whether it's fur or hair, and then finally
applying sheds to that value to get a yes/no answer
to the question of whether that partiular dog is messy
(because it sheds).#check
Function composition is associative.
-/
/-
Let's unpack one of these examples. We know now that
(messy polly) reduces to no. We also know it reduces
to (λ d, (messy (coat (breed d)))). We see again that
to get a final answer we apply "messy *after* applying
coat after applying breed to the given dog, d.
This is a sort of "backwards" sequential notation, in
the sense that the last operation to be applied is the
leftmost one in the expression. The argument d "flows"
into the breed function from the right; the result comes
out and "flows" left into coat function; the result of
that emerges and flows to the left into sheds function;
yielding a final result.
English is read from left to right and top to bottom,
and so English speakers expect temporal sequences to
be organized in that style. By a simple notation change
we can express the same composition of functions.
-/
def isPollyMessy :=
(let
b := (breed polly) in let
c := (coat b) in let
m := (sheds c) in
m
)
/-
Of course, even though our new code looks sequential,
it's still purely functional. In the first line we
*bind* the result of the first application to b. We
use this value as an argument in the second line and
bind its result to c. Next in this context in which
b and c are thus bound, we evaluate (sheds c) and bind
its result to m; and in the very last line we "return"
(reduce to) the value of m.
If we generalize the choice of polly as the dog in
this case (replacing it with a parameter) then we
get our messy function back.
-/
def messy' (d : dogs) :=
(
let b := (breed d) in -- bind b, call rest
let c := (coat b) in -- bind c, call rest
let m := (sheds c) in -- bind m, call rest
m -- "return" m
)
/-
Some form of bind here
-/
-- It desugars to just what we expect
#reduce messy'
/-
As a conceptual step without being formal, we can
easily imagine a new notation that allows us to
write that program in a more sequential style.
-/
/-
Note that we can see a sequential chain of function
applications in this code: sheds ∘ coat ∘ breed means
- apply sheds after (to the result of)
- apply coat after (to the result of)
- apply breed to polly.
But in this style of presentation, the applications
appear in the reserse order in which they occur. We
can reasily develop a new notation in which functions
appear in our code in the same order in which they
are actually applied. The easy trick is to define a
notation for function application where the argument
is on the left and the function to be applied is on
the right. We'll use >> as an infix operator. So, for
example 0 >> nat.succ would evaluate to 1, because
it means nothing other than nat.succ 0.
-/
local notation v ` >> ` f :120 := f v
example : 0 >> nat.succ = 1 := rfl
example : 0 >> (λ n, n + 1) = 1 := rfl
/-
Study the second example. What it means if we desugar
the >> is ((λ n, n + 1) 0); and what this means is to
first *bind* n to the incoming argument, then in this
context evaluate the expression, n + 1. We'll return
to this perspective later in these notes.
-/
#reduce
polly >> -- a pure argument bound as argument of rest
breed >> -- apply bread to incoing, pass result to rest
coat >> -- apply coat to incoming, pass result to rest
sheds -- apply sheds to incoming, return final result
/-
So now we have an abstraction of a sequential pipeline
that works by taking a pure argument (like polly), or in
general the result of a preceding computation, and passes
that value as the argument to the function that implements
the rest of the pipeline.
-/
-- Example: the following programs pairs are equivalent
#reduce polly >> breed -- feed polly as argument into breed
#reduce
(λ d, breed d) -- bind d and apply breed to it
polly -- argument to be bound
#reduce polly >> breed >> coat
#reduce
(λ b, coat b) -- function
((λ d, breed d) -- argument (rest)
polly
)
#reduce polly >> breed >> coat >> sheds
#reduce
(λ c, sheds c) -- function
((λ b, coat b) -- argument (rest)
((λ d, breed d)
polly
)
)
/-
========================
-/
/-
So what does a monad do? The answe: it gives us a
more powerful version of >>, in the form of a generic
function call bind, denoted >=>, that enables one to
add hidden processing of metadata to the evaluation
of pipelines like the ones we've just seen. Monads
give us *function composition with effects*.
Let's start to illustrate this idea by continuing
to develop our dog examle above. Evaluating messy
applied to a dog will result in three function
application operations. What if we wanted a way
to have application of compositions of functions
return both the expected pure answer along with a
natural number count of the number of pure function
applications were performed to produce the final
result.
For example, now we'd expect the result of evaluating
(messy fido) to return not just "yes" but the pair,
"(yes, 3)," meaning yes fido's messy and oh by the
way, computing this answer took three ordinary
function applications.
-/
/-
Clearly the types of the functions that we need to
"compose" changes. Each function in the "chain"
needs to return not just the correct "pure" result
but also a natural number accounting for the call
to this function and to any other functions it might
have called. Focus especially on the types of these
functions. Remember if S and T are types then S × T
is a type, the type of (S value, T value) pairs.
Note that each of these functions takes a pure
argument but returns a result embedded in a larger
context object, here in a pair that also contains
a natural number.
-/
universe u
def prof_result (α : Type u) := α × ℕ
def m_breed : dogs → prof_result breeds :=
λ d, (breed d, 1)
def m_coat : breeds → prof_result coats :=
λ b, (coat b, 1)
def m_sheds : coats → prof_result yesno :=
λ c, (sheds c, 1)
/-
The operation of pure function composition will not
work here as it did before, because it requires that
the argument type of the next function to be applied
is the same as the return type of the function most
recently applied. For example, breed takes a dog and
returns a breed, coat takes a breed and returns what
kind of coat it has, and messy takes a kind of coat
and returns a yesno as the final answer.
-/
#check coat ∘ breed
/-
But our extended functions won't compose this way.
The m_breed function takes a dog and returns a pair
containing it's breed and the value 1. But we can'
feed this pair to m_coat because takes a pure breed
value. We need a new kind of composition operator
for our "effectful" functions.
-/
#check m_coat ∘ m_breed -- type error!
/-
Clearly we need to define a new form of function
composition: one that handles any pure applications
that are needed but that also handles the metadata
returned along with a pure result value.
Let's just write a concrete implementation of the
our desired pipeline. Then we can study it as a
basis from which to generalize.
-/
def messy'' : dogs → yesno × ℕ :=
λ d,
let
(b, n1) := m_breed d in let -- use d, get b, n1
(c, n2) := m_coat b in let -- use b, get c, n2
(s, n3) := m_sheds c in -- use c, get s, n3
(s, n1+n2+n3) -- produce final result
-- Let's compute whether polly is messy''
#reduce messy'' polly
-- No! And that took 3 operations to compute
/-
Let's rewrite messy'' applied to polly once again,
but put it in the style using lambda to bind a variable
to the result of running the rest of the computation and
returning the result of performing some operation on it.
We'll do it in steps, as we did before.
-/
/-
this is a function application: bind d to polly, and
then in this context, compute and return (m_breed d).
-/
#reduce
(λ d, m_breed d) -- bind d to polly, apply breed to d
polly
/-
So far so good, but that's the end of our good luck. In
retrospect it should be clar that we won't be able to chain
our effectful functions using plain old function composition.
-/
#reduce m_coat ∘ m_breed
/-
Here the problem is that m_breed returns a breed along with
a count, while m_coat just expects to receive a breed value
as an argument. Somehow what we need is a novel composition
operator: one that can (1) "reach into" the data structure
that m_breed returns, to pull out the "breed" value that it
contains, and pass it on to m_coat, while also combining the
counts from both function applications to return as part of
the final result. Let's denote this new composition operator
as >>=, an infix notation for a function we can call "bind'."
Now let's think about types:
- m_breed : dogs → (breeds × ℕ)
- m_coat : breeds → (coats × ℕ)
- m_coat >>= m_breed : dog → (coats × ℕ)
-/
def compose_coat_breed :
(dogs → (breeds × ℕ)) → (breeds → (coats × ℕ)) → (dogs → (coats × ℕ))
| db bc :=
λ (d : dogs),
let (b, c1) := db d in
let (c, c2) := bc b in
(c, c1+c2)
/-
We can now see that the composition of m_breed and m_coat using our
new, special case, compose-like function does two things: it computes
the desired composition of the pure functions, breed and coat, while
also combining the effects of m_breed and m_coat in an effect that is
returned along with the pure result.
-/
#reduce compose_coat_breed m_breed m_coat
#reduce (compose_coat_breed m_breed m_coat) polly
/-
Now the question is, can we somehow generalize the approach we've
taken here to the special case of composing m_coat and m_breed. A
first step is to recognize that the arguments to this function are
all similar in an important way: each is of a pair type, combining
a pure value and an effect in the form of a natural number. Let's
start by factoring out the common structure of these pairs.
-/
#check prof_result
def compose_coat_breed' :
(dogs → (prof_result breeds)) →
(breeds → (prof_result coats)) →
(dogs → prof_result coats)
| db bc :=
λ (d : dogs),
let (b, c1) := db d in
let (c, c2) := bc b in
(c, c1+c2)
/-
Now let's simply generalize the pure argument and result types.
-/
def prof_compose {α β γ : Type} :
(α → (prof_result β)) → (β → (prof_result γ)) → (α → prof_result γ)
| f g :=
λ (a : α),
let (b, c1) := f a in
let (c, c2) := g b in
(c, c1+c2)
/-
Finally, let's think about generalizing prof_result to any kind
of object possibly containing values of type α. Note that in a
strong sense, prof_result is analogous to list, option, tree,
etc: a data structure that can contain values of type α. We've
seen how to do this kind of generalization before.
-/
def m_compose'
{α β γ : Type} (m : Type → Type): (α → m β) → (β → m γ) → (α → m γ)
| f g :=
λ (a : α),
let (b, c1) := f a in -- we don't know that m is a pair!
let (c, c2) := g b in
(c, c1+c2)
/-
The problem is that we can no longer rely on a value of type
(m α) to be a pair, which is what we have assumed in the let
expressions up to now. So this is not going to work as is. We
can see that if m is, say, list or option, an implementation
of composition for pairs isn't going to work at all. We will
need a different implementation for option, a different one,
again, for list, and so forth. Indeed, for each type builder,
m, we'll need a different implementation of the binding code,
where we bind b to the "pure" result of applying f to a then
then treat b as an argument to the rest of the computation.
-/
/-
That is, we need an overloaded definition of m_compose for
each "context-imposing" type, m. And central to this goal
is an operation that takes an effectful value, m α, and a
function from α → m β. That is, we're going to need what
we call a overloaded bind operation, as follows.
-/
class has_bind' (m : Type → Type) :=
(bind : ∀ {α β : Type}, m α → (α → m β) → m β)
/-
Here's the standard infix notation for bind.
Note that it takes an α-monadic argument value
and "feeds" it to a function that takes a pure
α value in and produces β-monadic value out. To
obtain this result it will usually "reach into"
the monadic value and behave subsequently based
on what it finds. In many cases it will "apply"
its α → m β function to α values, if any, that
it obtains from the monadic m α argument.
-/
local infixl ` >>= `:55 := has_bind'.bind
/-
We now have enough machinery to cobble together
a definition of bind for the prof_result monad.
-/
instance bind_prof_result : has_bind' (prof_result) :=
⟨
λ {α β : Type } (ma : prof_result α) a2mb,
let (a, c1) := ma in
let (b, c2) := a2mb a in
(b, c1+c2)
⟩
def m_comp'
{α β γ : Type}
{m : Type → Type} [has_bind' m]
(f : α → m β)
(g : β → m γ) :
(α → m γ) :=
λ a : α,
let mb := f a in mb >>= g
#reduce m_comp' m_breed m_coat
local infixl ` >=> `:60 := m_comp'
def is_messy_prof := (m_breed >=> m_coat >=> m_sheds)
#reduce is_messy_prof fido
#reduce is_messy_prof polly
/-
So the fish operator supports the feeding of
a monadic value into a composition of monadic
functions. What is doesn't support is feeding
of a pure value into such a pipeline, as the
following example shows.
-/
/-
The problem is that polly is pure but >=> expects
a monadic value. The solution is to define a new
function, one that will have to be overloaded for
each monadic type) that takes a pure value as its
argument "lifts" it to a monadic value. Then we
can use >=> to feed it into a pipeline composed
of monadic functions.
-/
class has_return' (m : Type → Type) :=
(return' : ∀ {α : Type}, α → m α)
open has_return'
instance : has_return' (prof_result) :=
⟨
λ α a, (a, 0)
⟩
/-
Now we can write a complete pipeline with
input fed in "from the left."
-/
#reduce (return' polly) >>= (m_breed >=> m_coat >=> m_sheds)
/-
-/
instance bind_option : has_bind' (option) :=
⟨
λ {α β : Type } (ma : option α) a2mb,
match ma with
| none := none
| (some a) := a2mb a
end
⟩
instance : has_return' (option) :=
⟨
λ α a, some a
⟩
end hidden
-- We can now recommend that you use Lean's monad definitions
#check @monad |
test3ok : Nat
test3ok = case (the Nat 1, the Nat 2) of
(x, y) => x + y
test3ok' : Nat
test3ok' = case (1, 2) of
(x, y) => x + y
|
From mathcomp Require Export ssreflect.
Require Export Classical.
Definition set T := T -> Prop.
Definition In {T : Type} (x : T)(X : set T) := X x.
Notation "x ∈ X" := (@In _ x X )(at level 60).
Definition setU {T : Type} (A B : set T) : set T :=
fun x => x ∈ A \/ x ∈ B .
Definition setI {T : Type} (A B : set T) : set T :=
fun x => x ∈ A /\ x ∈ B.
Definition subset {T : Type} (A B : set T) : Prop :=
forall x, x ∈ A -> x ∈ B.
Definition setD {T : Type} (A B : set T) : set T
:= fun x => x ∈ A /\ ~ x ∈ B.
Definition setC {T : Type} (A : set T) : set T
:= fun x => ~ x ∈ A.
Definition set0 {T : Type} : set T :=
fun _ : T => False.
Notation "A ∩ B" := (@setI _ A B)(at level 40).
Notation "A ∪ B" := (@setU _ A B)(at level 40).
Notation "A ⊂ B" := (@subset _ A B)(at level 30).
Notation "A // B" := (@setD _ A B)(at level 40).
Notation "¬ A" := (@setC _ A)(at level 40).
Notation "∅" := set0.
Axiom extension : forall {T : Type} (A B : set T),
A ⊂ B /\ B ⊂ A -> A = B.
Section SetsFacts.
Context {T : Type}.
Theorem setCU (A B : set T):
¬ (A ∪ B) = ¬ A ∩ ¬ B.
Proof.
apply extension; split => x Hx.
+ split => F; apply Hx; [left|right] => //.
+ move : Hx => [Ha_ Hb_].
move => [Ha|Hb]; [apply Ha_ | apply Hb_] => //.
Qed.
Theorem setCI (A B : set T):
¬ (A ∩ B) = ¬ A ∪ ¬ B.
Proof.
apply extension; split => x Hx.
+ apply NNPP => F.
rewrite /setC /setU /In /= in F.
move /not_or_and : F => [/NNPP HA /NNPP NB ].
apply Hx; split => //.
+ move => [HA HB].
move : Hx => [H|H]; apply H => //.
Qed. |
{-# OPTIONS --without-K --rewriting #-}
open import HoTT
open import homotopy.PtdMapSequence
open import homotopy.CofiberSequence
open import groups.Exactness
open import groups.ExactSequence
open import groups.HomSequence
open import cohomology.Theory
module cohomology.Sigma {i} (CT : CohomologyTheory i)
(n : ℤ) (X : Ptd i) (Y : de⊙ X → Ptd i) where
open CohomologyTheory CT
open import cohomology.PtdMapSequence CT
{- Cⁿ(Σx:X.Y) = Cⁿ(⋁x:X.Y) × Cⁿ(X). The proof is by constructing a
- splitting exact sequence
0 → Cⁿ(⋁x:X.Y) → Cⁿ(Σx:X.Y) → Cⁿ(X)
- by observing that the map [select : x ↦ (x, pt Yₓ)] has a left inverse
- and satisfies [Cofiber select == ⋁x:X.Y. -}
⊙select : X ⊙→ ⊙Σ X Y
⊙select = (bigwedge-f Y , idp)
⊙Σbwin : ⊙Σ X Y ⊙→ ⊙BigWedge Y
⊙Σbwin = ⊙cfcod' ⊙select
private
abstract
cst-C-Σbwin-is-exact : is-exact (cst-hom {G = C n (⊙Susp X)}) (C-fmap n ⊙Σbwin)
cst-C-Σbwin-is-exact = equiv-preserves-exact
{φ₁ = cst-hom {G = C n (⊙Susp X)}}
{ξG = C-fmap n (⊙Susp-to-⊙Cof² ⊙select)} {ξH = idhom _} {ξK = idhom _}
(comm-sqrᴳ λ x →
CEl-fmap n (⊙cfcod²' ⊙select) x
=⟨ ! $ CEl-fmap-idf n $ CEl-fmap n (⊙cfcod²' ⊙select) x ⟩
CEl-fmap n (⊙idf _) (CEl-fmap n (⊙cfcod²' ⊙select) x)
=⟨ C-comm-square n (extract-glue-cod²-comm-sqr ⊙select) □$ᴳ x ⟩
CEl-fmap n ⊙extract-glue (CEl-fmap n (⊙Susp-to-⊙Cof² ⊙select) x)
=⟨ CEl-fmap-const n (extract-glue-from-BigWedge-is-const Y) _ ⟩
Cident n _
=∎)
(comm-sqrᴳ λ _ → idp)
(C-isemap n (⊙Susp-to-⊙Cof² ⊙select) (snd (Cof²-equiv-Susp ⊙select ⁻¹)))
(idf-is-equiv _)
(idf-is-equiv _)
(C-exact n ⊙Σbwin)
χ : C n X →ᴳ C n (⊙Σ X Y)
χ = C-fmap n (⊙fstᵈ Y)
abstract
select-χ-is-idf : ∀ s → CEl-fmap n ⊙select (GroupHom.f χ s) == s
select-χ-is-idf = CEl-fmap-inverse n ⊙select (⊙fstᵈ Y) λ _ → idp
C-Σ : C n (⊙Σ X Y) ≃ᴳ C n (⊙BigWedge Y) ×ᴳ C n X
C-Σ = Exact.φ-inj-and-ψ-has-rinv-split
(C-exact n ⊙select) (C-is-abelian n _)
(Exact.φ-const-implies-ψ-is-inj cst-C-Σbwin-is-exact (λ _ → idp))
χ select-χ-is-idf
{-
⊙Σbwin-over : CF-hom n ⊙Σbwin == ×ᴳ-inl
[ (λ G → GroupHom (C n (⊙BigWedge Y)) G) ↓ path ]
⊙Σbwin-over = SER.φ-over-iso
⊙select-over : CF-hom n ⊙select == ×ᴳ-snd {G = C n (⊙BigWedge Y)}
[ (λ G → GroupHom G (C n X)) ↓ path ]
⊙select-over = SER.ψ-over-iso
open CofSelect public using (select; ⊙select; ⊙Σbwin)
-}
|
using CUDA.APIUtils
@testset "@enum_without_prefix" begin
mod = @eval module $(gensym())
using CUDA.APIUtils
@enum MY_ENUM MY_ENUM_VALUE
@enum_without_prefix MY_ENUM MY_
end
@test mod.ENUM_VALUE == mod.MY_ENUM_VALUE
end
@testset "@checked" begin
mod = @eval module $(gensym())
using CUDA.APIUtils
const checks = Ref(0)
macro check(ex)
esc(quote
$checks[] += 1
$ex
end)
end
@checked function foo()
ccall(:jl_getpid, Cint, ())
end
end
@test mod.checks[] == 0
@test mod.foo() == getpid()
@test mod.checks[] == 1
@test mod.unsafe_foo() == getpid()
@test mod.checks[] == 1
end
@testset "@argout" begin
f() = 1
f(a) = 2
f(a,b) = 3
@test CUDA.@argout(f()) == nothing
@test CUDA.@argout(f(out(4))) == 4
@test CUDA.@argout(f(out(5), out(6))) == (5,6)
end
|
function [ value, ifault ] = alogam ( x )
%*****************************************************************************80
%
%% ALOGAM computes the logarithm of the Gamma function.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 22 January 2008
%
% Author:
%
% Original FORTRAN77 version by Malcolm Pike, David Hill.
% MATLAB version by John Burkardt
%
% Reference:
%
% Malcolm Pike, David Hill,
% Algorithm 291:
% Logarithm of Gamma Function,
% Communications of the ACM,
% Volume 9, Number 9, September 1966, page 684.
%
% Parameters:
%
% Input, real X, the argument of the Gamma function.
% X should be greater than 0.
%
% Output, real ALOGAM, the logarithm of the Gamma
% function of X.
%
% Output, integer IFAULT, error flag.
% 0, no error.
% 1, X <= 0.
%
if ( x <= 0.0 )
ifault = 1;
value = 0.0;
return
end
ifault = 0;
y = x;
if ( x < 7.0 )
f = 1.0;
z = y;
while ( z < 7.0 )
f = f * z;
z = z + 1.0;
end
y = z;
f = - log ( f );
else
f = 0.0;
end
z = 1.0 / y / y;
value = f + ( y - 0.5 ) * log ( y ) - y ...
+ 0.918938533204673 + ...
((( ...
- 0.000595238095238 * z ...
+ 0.000793650793651 ) * z ...
- 0.002777777777778 ) * z ...
+ 0.083333333333333 ) / y;
return
end
|
If $r$ is a real number, then $\frac{\mathrm{Re}(r)}{z} = \frac{\mathrm{Re}(r) \cdot \mathrm{Re}(z)}{|z|^2}$. |
FUNCTION print_fortran()
PRINT *, 'Hello World from Fortran'
RETURN
END
|
On 14 October 2013 , Woodhouse was announced as the assistant manager at Northern Premier League Division One South club Goole , with former team mate David Holdsworth being appointed manager . In January 2014 Woodhouse replaced Holdsworth as manager following Holdsworth 's resignation . Woodhouse then left Goole after some issues with the board , and took over at Hull United in January 2015 .
|
module FFT.FFTData (
fftbFlySParZip,
fftbFlySParZipMap,
fftMapReduce
) where
import Data.Complex
import FFT.Samples
import System.Environment
import Control.Parallel
import Strategies
import Criterion.Main
import FFT.Orig
-- twiddle factors
tw :: Int -> Int -> Complex Float
tw n k = cis (-2 * pi * fromIntegral k / fromIntegral n)
fftMapReduce :: [Complex Float] -> [Complex Float]
fftMapReduce [a] = [a]
fftMapReduce as = parMapReduceSimple rdeepseq mapper rdeepseq reducer [as]
where
mapper = bflyS
reducer [(cs,ds)] = parinterleave (fftMapReduce cs) (fftMapReduce ds)
fftbFlySParZip :: [Complex Float] -> [Complex Float]
fftbFlySParZip [a] = [a]
fftbFlySParZip as = interleave ls rs
where
(cs,ds) = bflySParZip as
ls = fft cs
rs = fft ds
fftbFlySParZipMap :: [Complex Float] -> [Complex Float]
fftbFlySParZipMap [a] = [a]
fftbFlySParZipMap as = interleave ls rs
where
(cs,ds) = bflySParZipMap as
ls = fft cs
rs = fft ds
parinterleave [] bs = bs
parinterleave (a:as) bs = a `par` a : interleave bs as
interleave [] bs = bs
interleave (a:as) bs = a : interleave bs as
bflyS :: [Complex Float] -> ([Complex Float], [Complex Float])
bflyS as = (los,rts)
where
(ls,rs) = halve as
los = zipWith (+) ls rs
ros = zipWith (-) ls rs
-- rts = parzipwith (*) ros (parmap (\i -> tw (length (as)) i) [0..(length ros) - 1])
-- rts = zipWith (*) ros (parmap (\i -> tw (length (as)) i) [0..(length ros) - 1])
-- rts = parzipwith (*) ros [tw (length as) i | i <- [0..(length ros) - 1]]
rts = zipWith (*) ros [tw (length as) i | i <- [0..(length ros) - 1]]
bflySParZip as = (los, rts)
where
(ls,rs) = halve as
los = parzipwith (+) ls rs
ros = parzipwith (-) ls rs
rts = parzipwith (*) ros [tw (length as) i | i <- [0..(length ros) - 1]]
bflySParZipMap as = (los, rts)
where
(ls,rs) = halve as
los = parzipwith (+) ls rs
ros = parzipwith (-) ls rs
rts = parzipwith (*) ros (parmap (\i -> tw (length (as)) i) [0..(length ros) - 1])
halve as = splitAt n' as
where
n' = div (length as + 1) 2
|
#### text_rep ===============================================================
context("text_dup test_rep")
test_that("text_dup text_rep", {
expect_true({
all(
text_dup(letters[1:2],1)==c("a","b")
)
})
expect_error({
text_dup(letters[1:2])
})
expect_true({
all(
text_dup(letters[1:2],2)==c("aa","bb")
)
})
expect_true({
all(
text_dup(c("bubbu","\ue4"),2)==c("bubbububbu","\ue4\ue4")
)
})
expect_true({
all(
text_dup(list(1:4, 1:2),4)[[1]]==c("1111","2222","3333","4444")
)
})
expect_true({
all(
text_dup(list(1:4, 1:2),4)[[2]]==c("1111","2222")
)
})
expect_true({
all(
text_dup(list(1:4, 1:2),4:1, vectorize = TRUE)[[1]]$i==1:4
)
})
expect_true({
all(
text_dup(list(1:4, 1:2),4:1, vectorize = TRUE)[[1]]$i==1:4
)
})
expect_true({
all(
text_dup(list(1:2, 1:4),4:1, vectorize = TRUE)[[1]]$i==c(1,2,1,2)
)
})
expect_true({
all(
text_dup(list(1:4, 1:2),1:2, vectorize = TRUE)[[1]]$p==c(1,2,1,2)
)
})
expect_true({
class(text_dup(list(1:4, 1:2),4:1, vectorize = TRUE)[[1]]$t)=="character"
})
expect_true({
all(
text_dup(list(1:4, 1:2),4:1, vectorize = TRUE)[[1]]$t==c("1111","222","33","4")
)
})
expect_true({
all(
text_dup(list(1:4, 1:2),4:1, vectorize = TRUE)[[2]]$t==c("1111","222","11","2")
)
})
})
|
(*
Copyright 2018
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
theory sys_getpid_mem
imports syscall
begin
text \<open>One locale per function in the binary.\<close>
locale sys_getpid_function = syscall_context +
fixes rsp\<^sub>0 rbp\<^sub>0 current_task id' a sys_getpid_ret :: \<open>64 word\<close>
and v\<^sub>0 :: \<open>8 word\<close>
and blocks :: \<open>(nat \<times> 64 word \<times> nat) set\<close>
assumes seps: \<open>seps blocks\<close>
and masters:
\<open>master blocks (a, 1) 0\<close>
\<open>master blocks (rsp\<^sub>0, 8) 1\<close>
\<open>master blocks (rsp\<^sub>0-8, 8) 2\<close>
\<open>master blocks (rsp\<^sub>0-16, 8) 3\<close>
\<open>master blocks (current_task, 8) 4\<close>
\<open>master blocks (id', 4) 5\<close>
and ret_address: \<open>outside sys_getpid_ret 0 23\<close> \<comment> \<open>Only works for non-recursive functions.\<close>
and task: \<open>the (label_to_address assembly ''current_task'') = current_task\<close>
begin
text \<open>
The Floyd invariant expresses for some locations properties that are invariably true.
Simply expresses that a byte in the memory remains untouched.
\<close>
definition pp_\<Theta> :: floyd_invar where
\<open>pp_\<Theta> \<equiv> [
\<comment> \<open>precondition\<close>
boffset \<mapsto> \<lambda>\<sigma>. regs \<sigma> rsp = rsp\<^sub>0
\<and> regs \<sigma> rbp = rbp\<^sub>0
\<and> \<sigma> \<turnstile> *[current_task,8] = id'
\<and> \<sigma> \<turnstile> *[rsp\<^sub>0,8] = boffset+sys_getpid_ret
\<and> \<sigma> \<turnstile> *[a,1] = v\<^sub>0,
\<comment> \<open>postcondition\<close>
boffset+sys_getpid_ret \<mapsto> \<lambda>\<sigma>. \<sigma> \<turnstile> *[a,1] = v\<^sub>0
\<and> regs \<sigma> rsp = rsp\<^sub>0+8
\<and> regs \<sigma> rbp = rbp\<^sub>0
]\<close>
text \<open>Adding some rules to the simplifier to simplify proofs.\<close>
schematic_goal pp_\<Theta>_zero[simp]:
shows \<open>pp_\<Theta> boffset = ?x\<close>
unfolding pp_\<Theta>_def
by simp
schematic_goal pp_\<Theta>_numeral_l[simp]:
shows \<open>pp_\<Theta> (n + boffset) = ?x\<close>
unfolding pp_\<Theta>_def
by simp
schematic_goal pp_\<Theta>_numeral_r[simp]:
shows \<open>pp_\<Theta> (boffset + n) = ?x\<close>
unfolding pp_\<Theta>_def
by simp
lemma rewrite_sys_getpid_mem:
\<open>is_std_invar sys_getpid_ret (floyd.invar sys_getpid_ret pp_\<Theta>)\<close>
text \<open>Boilerplate code to start the VCG\<close>
apply (rule floyd_invarI)
apply (rewrite at \<open>floyd_vcs sys_getpid_ret \<hole> _\<close> pp_\<Theta>_def)
apply (intro floyd_vcsI)
text \<open>Subgoal for rip = boffset\<close>
subgoal premises prems for \<sigma>
text \<open>Insert relevant knowledge\<close>
apply (insert prems seps ret_address task)
text \<open>Apply VCG/symb.\ execution\<close>
apply (symbolic_execution masters: masters)+
apply (finish_symbolic_execution masters: masters)
done
text \<open>Trivial ending subgoal.\<close>
subgoal
by simp
done
end
end
|
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
x : E
⊢ x ∈ parallelepiped v ↔ ∃ t _ht, x = ∑ i : ι, t i • v i
[PROOFSTEP]
simp [parallelepiped, eq_comm]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
f : E →ₗ[ℝ] F
v : ι → E
⊢ ↑f '' parallelepiped v = parallelepiped (↑f ∘ v)
[PROOFSTEP]
simp only [parallelepiped, ← image_comp]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
f : E →ₗ[ℝ] F
v : ι → E
⊢ ((fun a => ↑f a) ∘ fun t => ∑ i : ι, t i • v i) '' Icc 0 1 = (fun a => ∑ x : ι, a x • (↑f ∘ v) x) '' Icc 0 1
[PROOFSTEP]
congr 1 with t
[GOAL]
case h
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
f : E →ₗ[ℝ] F
v : ι → E
t : F
⊢ t ∈ ((fun a => ↑f a) ∘ fun t => ∑ i : ι, t i • v i) '' Icc 0 1 ↔ t ∈ (fun a => ∑ x : ι, a x • (↑f ∘ v) x) '' Icc 0 1
[PROOFSTEP]
simp only [Function.comp_apply, LinearMap.map_sum, LinearMap.map_smulₛₗ, RingHom.id_apply]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
e : ι' ≃ ι
⊢ parallelepiped (v ∘ ↑e) = parallelepiped v
[PROOFSTEP]
simp only [parallelepiped]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
e : ι' ≃ ι
⊢ (fun a => ∑ x : ι', a x • (v ∘ ↑e) x) '' Icc 0 1 = (fun t => ∑ i : ι, t i • v i) '' Icc 0 1
[PROOFSTEP]
let K : (ι' → ℝ) ≃ (ι → ℝ) := Equiv.piCongrLeft' (fun _a : ι' => ℝ) e
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
e : ι' ≃ ι
K : (ι' → ℝ) ≃ (ι → ℝ) := Equiv.piCongrLeft' (fun _a => ℝ) e
⊢ (fun a => ∑ x : ι', a x • (v ∘ ↑e) x) '' Icc 0 1 = (fun t => ∑ i : ι, t i • v i) '' Icc 0 1
[PROOFSTEP]
have : Icc (0 : ι → ℝ) 1 = K '' Icc (0 : ι' → ℝ) 1 :=
by
rw [← Equiv.preimage_eq_iff_eq_image]
ext x
simp only [mem_preimage, mem_Icc, Pi.le_def, Pi.zero_apply, Equiv.piCongrLeft'_apply, Pi.one_apply]
refine' ⟨fun h => ⟨fun i => _, fun i => _⟩, fun h => ⟨fun i => h.1 (e.symm i), fun i => h.2 (e.symm i)⟩⟩
· simpa only [Equiv.symm_apply_apply] using h.1 (e i)
· simpa only [Equiv.symm_apply_apply] using h.2 (e i)
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
e : ι' ≃ ι
K : (ι' → ℝ) ≃ (ι → ℝ) := Equiv.piCongrLeft' (fun _a => ℝ) e
⊢ Icc 0 1 = ↑K '' Icc 0 1
[PROOFSTEP]
rw [← Equiv.preimage_eq_iff_eq_image]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
e : ι' ≃ ι
K : (ι' → ℝ) ≃ (ι → ℝ) := Equiv.piCongrLeft' (fun _a => ℝ) e
⊢ ↑K ⁻¹' Icc 0 1 = Icc 0 1
[PROOFSTEP]
ext x
[GOAL]
case h
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
e : ι' ≃ ι
K : (ι' → ℝ) ≃ (ι → ℝ) := Equiv.piCongrLeft' (fun _a => ℝ) e
x : ι' → ℝ
⊢ x ∈ ↑K ⁻¹' Icc 0 1 ↔ x ∈ Icc 0 1
[PROOFSTEP]
simp only [mem_preimage, mem_Icc, Pi.le_def, Pi.zero_apply, Equiv.piCongrLeft'_apply, Pi.one_apply]
[GOAL]
case h
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
e : ι' ≃ ι
K : (ι' → ℝ) ≃ (ι → ℝ) := Equiv.piCongrLeft' (fun _a => ℝ) e
x : ι' → ℝ
⊢ ((∀ (i : ι), 0 ≤ x (↑e.symm i)) ∧ ∀ (i : ι), x (↑e.symm i) ≤ 1) ↔ (∀ (i : ι'), 0 ≤ x i) ∧ ∀ (i : ι'), x i ≤ 1
[PROOFSTEP]
refine' ⟨fun h => ⟨fun i => _, fun i => _⟩, fun h => ⟨fun i => h.1 (e.symm i), fun i => h.2 (e.symm i)⟩⟩
[GOAL]
case h.refine'_1
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
e : ι' ≃ ι
K : (ι' → ℝ) ≃ (ι → ℝ) := Equiv.piCongrLeft' (fun _a => ℝ) e
x : ι' → ℝ
h : (∀ (i : ι), 0 ≤ x (↑e.symm i)) ∧ ∀ (i : ι), x (↑e.symm i) ≤ 1
i : ι'
⊢ 0 ≤ x i
[PROOFSTEP]
simpa only [Equiv.symm_apply_apply] using h.1 (e i)
[GOAL]
case h.refine'_2
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
e : ι' ≃ ι
K : (ι' → ℝ) ≃ (ι → ℝ) := Equiv.piCongrLeft' (fun _a => ℝ) e
x : ι' → ℝ
h : (∀ (i : ι), 0 ≤ x (↑e.symm i)) ∧ ∀ (i : ι), x (↑e.symm i) ≤ 1
i : ι'
⊢ x i ≤ 1
[PROOFSTEP]
simpa only [Equiv.symm_apply_apply] using h.2 (e i)
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
e : ι' ≃ ι
K : (ι' → ℝ) ≃ (ι → ℝ) := Equiv.piCongrLeft' (fun _a => ℝ) e
this : Icc 0 1 = ↑K '' Icc 0 1
⊢ (fun a => ∑ x : ι', a x • (v ∘ ↑e) x) '' Icc 0 1 = (fun t => ∑ i : ι, t i • v i) '' Icc 0 1
[PROOFSTEP]
rw [this, ← image_comp]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
e : ι' ≃ ι
K : (ι' → ℝ) ≃ (ι → ℝ) := Equiv.piCongrLeft' (fun _a => ℝ) e
this : Icc 0 1 = ↑K '' Icc 0 1
⊢ (fun a => ∑ x : ι', a x • (v ∘ ↑e) x) '' Icc 0 1 = (fun t => ∑ i : ι, t i • v i) ∘ ↑K '' Icc 0 1
[PROOFSTEP]
congr 1 with x
[GOAL]
case h
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
e : ι' ≃ ι
K : (ι' → ℝ) ≃ (ι → ℝ) := Equiv.piCongrLeft' (fun _a => ℝ) e
this : Icc 0 1 = ↑K '' Icc 0 1
x : E
⊢ x ∈ (fun a => ∑ x : ι', a x • (v ∘ ↑e) x) '' Icc 0 1 ↔ x ∈ (fun t => ∑ i : ι, t i • v i) ∘ ↑K '' Icc 0 1
[PROOFSTEP]
have := fun z : ι' → ℝ => e.symm.sum_comp fun i => z i • v (e i)
[GOAL]
case h
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
e : ι' ≃ ι
K : (ι' → ℝ) ≃ (ι → ℝ) := Equiv.piCongrLeft' (fun _a => ℝ) e
this✝ : Icc 0 1 = ↑K '' Icc 0 1
x : E
this : ∀ (z : ι' → ℝ), ∑ i : ι, z (↑e.symm i) • v (↑e (↑e.symm i)) = ∑ i : ι', z i • v (↑e i)
⊢ x ∈ (fun a => ∑ x : ι', a x • (v ∘ ↑e) x) '' Icc 0 1 ↔ x ∈ (fun t => ∑ i : ι, t i • v i) ∘ ↑K '' Icc 0 1
[PROOFSTEP]
simp_rw [Equiv.apply_symm_apply] at this
[GOAL]
case h
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
e : ι' ≃ ι
K : (ι' → ℝ) ≃ (ι → ℝ) := Equiv.piCongrLeft' (fun _a => ℝ) e
this✝ : Icc 0 1 = ↑K '' Icc 0 1
x : E
this : ∀ (z : ι' → ℝ), ∑ x : ι, z (↑e.symm x) • v x = ∑ x : ι', z x • v (↑e x)
⊢ x ∈ (fun a => ∑ x : ι', a x • (v ∘ ↑e) x) '' Icc 0 1 ↔ x ∈ (fun t => ∑ i : ι, t i • v i) ∘ ↑K '' Icc 0 1
[PROOFSTEP]
simp_rw [Function.comp_apply, mem_image, mem_Icc, Equiv.piCongrLeft'_apply, this]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
b : OrthonormalBasis ι ℝ ℝ
⊢ parallelepiped ↑b = Icc 0 1 ∨ parallelepiped ↑b = Icc (-1) 0
[PROOFSTEP]
have e : ι ≃ Fin 1 := by
apply Fintype.equivFinOfCardEq
simp only [← finrank_eq_card_basis b.toBasis, finrank_self]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
b : OrthonormalBasis ι ℝ ℝ
⊢ ι ≃ Fin 1
[PROOFSTEP]
apply Fintype.equivFinOfCardEq
[GOAL]
case h
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
b : OrthonormalBasis ι ℝ ℝ
⊢ Fintype.card ι = 1
[PROOFSTEP]
simp only [← finrank_eq_card_basis b.toBasis, finrank_self]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
b : OrthonormalBasis ι ℝ ℝ
e : ι ≃ Fin 1
⊢ parallelepiped ↑b = Icc 0 1 ∨ parallelepiped ↑b = Icc (-1) 0
[PROOFSTEP]
have B : parallelepiped (b.reindex e) = parallelepiped b :=
by
convert parallelepiped_comp_equiv b e.symm
ext i
simp only [OrthonormalBasis.coe_reindex]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
b : OrthonormalBasis ι ℝ ℝ
e : ι ≃ Fin 1
⊢ parallelepiped ↑(OrthonormalBasis.reindex b e) = parallelepiped ↑b
[PROOFSTEP]
convert parallelepiped_comp_equiv b e.symm
[GOAL]
case h.e'_2.h.e'_6
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
b : OrthonormalBasis ι ℝ ℝ
e : ι ≃ Fin 1
⊢ ↑(OrthonormalBasis.reindex b e) = ↑b ∘ ↑e.symm
[PROOFSTEP]
ext i
[GOAL]
case h.e'_2.h.e'_6.h
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
b : OrthonormalBasis ι ℝ ℝ
e : ι ≃ Fin 1
i : Fin 1
⊢ ↑(OrthonormalBasis.reindex b e) i = (↑b ∘ ↑e.symm) i
[PROOFSTEP]
simp only [OrthonormalBasis.coe_reindex]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
b : OrthonormalBasis ι ℝ ℝ
e : ι ≃ Fin 1
B : parallelepiped ↑(OrthonormalBasis.reindex b e) = parallelepiped ↑b
⊢ parallelepiped ↑b = Icc 0 1 ∨ parallelepiped ↑b = Icc (-1) 0
[PROOFSTEP]
rw [← B]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
b : OrthonormalBasis ι ℝ ℝ
e : ι ≃ Fin 1
B : parallelepiped ↑(OrthonormalBasis.reindex b e) = parallelepiped ↑b
⊢ parallelepiped ↑(OrthonormalBasis.reindex b e) = Icc 0 1 ∨ parallelepiped ↑(OrthonormalBasis.reindex b e) = Icc (-1) 0
[PROOFSTEP]
let F : ℝ → Fin 1 → ℝ := fun t => fun _i => t
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F✝ : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F✝
inst✝ : Module ℝ F✝
b : OrthonormalBasis ι ℝ ℝ
e : ι ≃ Fin 1
B : parallelepiped ↑(OrthonormalBasis.reindex b e) = parallelepiped ↑b
F : ℝ → Fin 1 → ℝ := fun t _i => t
⊢ parallelepiped ↑(OrthonormalBasis.reindex b e) = Icc 0 1 ∨ parallelepiped ↑(OrthonormalBasis.reindex b e) = Icc (-1) 0
[PROOFSTEP]
have A : Icc (0 : Fin 1 → ℝ) 1 = F '' Icc (0 : ℝ) 1 :=
by
apply Subset.antisymm
· intro x hx
refine' ⟨x 0, ⟨hx.1 0, hx.2 0⟩, _⟩
ext j
simp only [Subsingleton.elim j 0]
· rintro x ⟨y, hy, rfl⟩
exact ⟨fun _j => hy.1, fun _j => hy.2⟩
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F✝ : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F✝
inst✝ : Module ℝ F✝
b : OrthonormalBasis ι ℝ ℝ
e : ι ≃ Fin 1
B : parallelepiped ↑(OrthonormalBasis.reindex b e) = parallelepiped ↑b
F : ℝ → Fin 1 → ℝ := fun t _i => t
⊢ Icc 0 1 = F '' Icc 0 1
[PROOFSTEP]
apply Subset.antisymm
[GOAL]
case h₁
ι : Type u_1
ι' : Type u_2
E : Type u_3
F✝ : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F✝
inst✝ : Module ℝ F✝
b : OrthonormalBasis ι ℝ ℝ
e : ι ≃ Fin 1
B : parallelepiped ↑(OrthonormalBasis.reindex b e) = parallelepiped ↑b
F : ℝ → Fin 1 → ℝ := fun t _i => t
⊢ Icc 0 1 ⊆ F '' Icc 0 1
[PROOFSTEP]
intro x hx
[GOAL]
case h₁
ι : Type u_1
ι' : Type u_2
E : Type u_3
F✝ : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F✝
inst✝ : Module ℝ F✝
b : OrthonormalBasis ι ℝ ℝ
e : ι ≃ Fin 1
B : parallelepiped ↑(OrthonormalBasis.reindex b e) = parallelepiped ↑b
F : ℝ → Fin 1 → ℝ := fun t _i => t
x : Fin 1 → ℝ
hx : x ∈ Icc 0 1
⊢ x ∈ F '' Icc 0 1
[PROOFSTEP]
refine' ⟨x 0, ⟨hx.1 0, hx.2 0⟩, _⟩
[GOAL]
case h₁
ι : Type u_1
ι' : Type u_2
E : Type u_3
F✝ : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F✝
inst✝ : Module ℝ F✝
b : OrthonormalBasis ι ℝ ℝ
e : ι ≃ Fin 1
B : parallelepiped ↑(OrthonormalBasis.reindex b e) = parallelepiped ↑b
F : ℝ → Fin 1 → ℝ := fun t _i => t
x : Fin 1 → ℝ
hx : x ∈ Icc 0 1
⊢ F (x 0) = x
[PROOFSTEP]
ext j
[GOAL]
case h₁.h
ι : Type u_1
ι' : Type u_2
E : Type u_3
F✝ : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F✝
inst✝ : Module ℝ F✝
b : OrthonormalBasis ι ℝ ℝ
e : ι ≃ Fin 1
B : parallelepiped ↑(OrthonormalBasis.reindex b e) = parallelepiped ↑b
F : ℝ → Fin 1 → ℝ := fun t _i => t
x : Fin 1 → ℝ
hx : x ∈ Icc 0 1
j : Fin 1
⊢ F (x 0) j = x j
[PROOFSTEP]
simp only [Subsingleton.elim j 0]
[GOAL]
case h₂
ι : Type u_1
ι' : Type u_2
E : Type u_3
F✝ : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F✝
inst✝ : Module ℝ F✝
b : OrthonormalBasis ι ℝ ℝ
e : ι ≃ Fin 1
B : parallelepiped ↑(OrthonormalBasis.reindex b e) = parallelepiped ↑b
F : ℝ → Fin 1 → ℝ := fun t _i => t
⊢ F '' Icc 0 1 ⊆ Icc 0 1
[PROOFSTEP]
rintro x ⟨y, hy, rfl⟩
[GOAL]
case h₂.intro.intro
ι : Type u_1
ι' : Type u_2
E : Type u_3
F✝ : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F✝
inst✝ : Module ℝ F✝
b : OrthonormalBasis ι ℝ ℝ
e : ι ≃ Fin 1
B : parallelepiped ↑(OrthonormalBasis.reindex b e) = parallelepiped ↑b
F : ℝ → Fin 1 → ℝ := fun t _i => t
y : ℝ
hy : y ∈ Icc 0 1
⊢ F y ∈ Icc 0 1
[PROOFSTEP]
exact ⟨fun _j => hy.1, fun _j => hy.2⟩
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F✝ : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F✝
inst✝ : Module ℝ F✝
b : OrthonormalBasis ι ℝ ℝ
e : ι ≃ Fin 1
B : parallelepiped ↑(OrthonormalBasis.reindex b e) = parallelepiped ↑b
F : ℝ → Fin 1 → ℝ := fun t _i => t
A : Icc 0 1 = F '' Icc 0 1
⊢ parallelepiped ↑(OrthonormalBasis.reindex b e) = Icc 0 1 ∨ parallelepiped ↑(OrthonormalBasis.reindex b e) = Icc (-1) 0
[PROOFSTEP]
rcases orthonormalBasis_one_dim (b.reindex e) with (H | H)
[GOAL]
case inl
ι : Type u_1
ι' : Type u_2
E : Type u_3
F✝ : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F✝
inst✝ : Module ℝ F✝
b : OrthonormalBasis ι ℝ ℝ
e : ι ≃ Fin 1
B : parallelepiped ↑(OrthonormalBasis.reindex b e) = parallelepiped ↑b
F : ℝ → Fin 1 → ℝ := fun t _i => t
A : Icc 0 1 = F '' Icc 0 1
H : ↑(OrthonormalBasis.reindex b e) = fun x => 1
⊢ parallelepiped ↑(OrthonormalBasis.reindex b e) = Icc 0 1 ∨ parallelepiped ↑(OrthonormalBasis.reindex b e) = Icc (-1) 0
[PROOFSTEP]
left
[GOAL]
case inl.h
ι : Type u_1
ι' : Type u_2
E : Type u_3
F✝ : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F✝
inst✝ : Module ℝ F✝
b : OrthonormalBasis ι ℝ ℝ
e : ι ≃ Fin 1
B : parallelepiped ↑(OrthonormalBasis.reindex b e) = parallelepiped ↑b
F : ℝ → Fin 1 → ℝ := fun t _i => t
A : Icc 0 1 = F '' Icc 0 1
H : ↑(OrthonormalBasis.reindex b e) = fun x => 1
⊢ parallelepiped ↑(OrthonormalBasis.reindex b e) = Icc 0 1
[PROOFSTEP]
simp_rw [parallelepiped, H, A, Algebra.id.smul_eq_mul, mul_one]
[GOAL]
case inl.h
ι : Type u_1
ι' : Type u_2
E : Type u_3
F✝ : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F✝
inst✝ : Module ℝ F✝
b : OrthonormalBasis ι ℝ ℝ
e : ι ≃ Fin 1
B : parallelepiped ↑(OrthonormalBasis.reindex b e) = parallelepiped ↑b
F : ℝ → Fin 1 → ℝ := fun t _i => t
A : Icc 0 1 = F '' Icc 0 1
H : ↑(OrthonormalBasis.reindex b e) = fun x => 1
⊢ (fun a => ∑ x : Fin 1, a x) '' ((fun a _i => a) '' Icc 0 1) = Icc 0 1
[PROOFSTEP]
simp only [Finset.univ_unique, Fin.default_eq_zero, smul_eq_mul, mul_one, Finset.sum_singleton, ← image_comp,
Function.comp_apply, image_id', ge_iff_le, zero_le_one, not_true, gt_iff_lt]
[GOAL]
case inr
ι : Type u_1
ι' : Type u_2
E : Type u_3
F✝ : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F✝
inst✝ : Module ℝ F✝
b : OrthonormalBasis ι ℝ ℝ
e : ι ≃ Fin 1
B : parallelepiped ↑(OrthonormalBasis.reindex b e) = parallelepiped ↑b
F : ℝ → Fin 1 → ℝ := fun t _i => t
A : Icc 0 1 = F '' Icc 0 1
H : ↑(OrthonormalBasis.reindex b e) = fun x => -1
⊢ parallelepiped ↑(OrthonormalBasis.reindex b e) = Icc 0 1 ∨ parallelepiped ↑(OrthonormalBasis.reindex b e) = Icc (-1) 0
[PROOFSTEP]
right
[GOAL]
case inr.h
ι : Type u_1
ι' : Type u_2
E : Type u_3
F✝ : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F✝
inst✝ : Module ℝ F✝
b : OrthonormalBasis ι ℝ ℝ
e : ι ≃ Fin 1
B : parallelepiped ↑(OrthonormalBasis.reindex b e) = parallelepiped ↑b
F : ℝ → Fin 1 → ℝ := fun t _i => t
A : Icc 0 1 = F '' Icc 0 1
H : ↑(OrthonormalBasis.reindex b e) = fun x => -1
⊢ parallelepiped ↑(OrthonormalBasis.reindex b e) = Icc (-1) 0
[PROOFSTEP]
simp_rw [H, parallelepiped, Algebra.id.smul_eq_mul, A]
[GOAL]
case inr.h
ι : Type u_1
ι' : Type u_2
E : Type u_3
F✝ : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F✝
inst✝ : Module ℝ F✝
b : OrthonormalBasis ι ℝ ℝ
e : ι ≃ Fin 1
B : parallelepiped ↑(OrthonormalBasis.reindex b e) = parallelepiped ↑b
F : ℝ → Fin 1 → ℝ := fun t _i => t
A : Icc 0 1 = F '' Icc 0 1
H : ↑(OrthonormalBasis.reindex b e) = fun x => -1
⊢ (fun a => ∑ x : Fin 1, a x * -1) '' ((fun a _i => a) '' Icc 0 1) = Icc (-1) 0
[PROOFSTEP]
simp only [Finset.univ_unique, Fin.default_eq_zero, mul_neg, mul_one, Finset.sum_neg_distrib, Finset.sum_singleton, ←
image_comp, Function.comp, image_neg, preimage_neg_Icc, neg_zero]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
⊢ parallelepiped v = ∑ i : ι, segment ℝ 0 (v i)
[PROOFSTEP]
ext
[GOAL]
case h
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
x✝ : E
⊢ x✝ ∈ parallelepiped v ↔ x✝ ∈ ∑ i : ι, segment ℝ 0 (v i)
[PROOFSTEP]
simp only [mem_parallelepiped_iff, Set.mem_finset_sum, Finset.mem_univ, forall_true_left, segment_eq_image, smul_zero,
zero_add, ← Set.pi_univ_Icc, Set.mem_univ_pi]
[GOAL]
case h
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
x✝ : E
⊢ (∃ t h, x✝ = ∑ i : ι, t i • v i) ↔ ∃ g h, ∑ i : ι, g i = x✝
[PROOFSTEP]
constructor
[GOAL]
case h.mp
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
x✝ : E
⊢ (∃ t h, x✝ = ∑ i : ι, t i • v i) → ∃ g h, ∑ i : ι, g i = x✝
[PROOFSTEP]
rintro ⟨t, ht, rfl⟩
[GOAL]
case h.mp.intro.intro
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
t : ι → ℝ
ht : ∀ (i : ι), t i ∈ Icc (OfNat.ofNat 0 i) (OfNat.ofNat 1 i)
⊢ ∃ g h, ∑ i : ι, g i = ∑ i : ι, t i • v i
[PROOFSTEP]
exact ⟨t • v, fun {i} => ⟨t i, ht _, by simp⟩, rfl⟩
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
t : ι → ℝ
ht : ∀ (i : ι), t i ∈ Icc (OfNat.ofNat 0 i) (OfNat.ofNat 1 i)
i : ι
⊢ (fun a => a • v i) (t i) = (t • v) i
[PROOFSTEP]
simp
[GOAL]
case h.mpr
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
x✝ : E
⊢ (∃ g h, ∑ i : ι, g i = x✝) → ∃ t h, x✝ = ∑ i : ι, t i • v i
[PROOFSTEP]
rintro ⟨g, hg, rfl⟩
[GOAL]
case h.mpr.intro.intro
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v g : ι → E
hg : ∀ {i : ι}, g i ∈ (fun a => a • v i) '' Icc 0 1
⊢ ∃ t h, ∑ i : ι, g i = ∑ i : ι, t i • v i
[PROOFSTEP]
choose t ht hg using @hg
[GOAL]
case h.mpr.intro.intro
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v g : ι → E
t : {i : ι} → ℝ
ht : ∀ {i : ι}, t ∈ Icc 0 1
hg : ∀ {i : ι}, (fun a => a • v i) t = g i
⊢ ∃ t h, ∑ i : ι, g i = ∑ i : ι, t i • v i
[PROOFSTEP]
refine ⟨@t, @ht, ?_⟩
[GOAL]
case h.mpr.intro.intro
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v g : ι → E
t : {i : ι} → ℝ
ht : ∀ {i : ι}, t ∈ Icc 0 1
hg : ∀ {i : ι}, (fun a => a • v i) t = g i
⊢ ∑ i : ι, g i = ∑ i : ι, t • v i
[PROOFSTEP]
simp_rw [hg]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
⊢ Convex ℝ (parallelepiped v)
[PROOFSTEP]
rw [parallelepiped_eq_sum_segment]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
⊢ Convex ℝ (∑ i : ι, segment ℝ 0 (v i))
[PROOFSTEP]
exact convex_sum _ fun _i _hi => convex_segment _ _
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : AddCommGroup E
inst✝² : Module ℝ E
inst✝¹ : AddCommGroup F
inst✝ : Module ℝ F
v : ι → E
⊢ parallelepiped v = ↑(convexHull ℝ) (∑ i : ι, {0, v i})
[PROOFSTEP]
simp_rw [convexHull_sum, convexHull_pair, parallelepiped_eq_sum_segment]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a : ι → ℝ
⊢ (parallelepiped fun i => Pi.single i (a i)) = uIcc 0 a
[PROOFSTEP]
ext x
[GOAL]
case h
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a x : ι → ℝ
⊢ (x ∈ parallelepiped fun i => Pi.single i (a i)) ↔ x ∈ uIcc 0 a
[PROOFSTEP]
simp_rw [Set.uIcc, mem_parallelepiped_iff, Set.mem_Icc, Pi.le_def, ← forall_and, Pi.inf_apply, Pi.sup_apply, ←
Pi.single_smul', Pi.one_apply, Pi.zero_apply, ← Pi.smul_apply', Finset.univ_sum_single (_ : ι → ℝ)]
[GOAL]
case h
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a x : ι → ℝ
⊢ (∃ t h, x = fun i => (t • a) i) ↔ ∀ (x_1 : ι), 0 ⊓ a x_1 ≤ x x_1 ∧ x x_1 ≤ 0 ⊔ a x_1
[PROOFSTEP]
constructor
[GOAL]
case h.mp
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a x : ι → ℝ
⊢ (∃ t h, x = fun i => (t • a) i) → ∀ (x_1 : ι), 0 ⊓ a x_1 ≤ x x_1 ∧ x x_1 ≤ 0 ⊔ a x_1
[PROOFSTEP]
rintro ⟨t, ht, rfl⟩ i
[GOAL]
case h.mp.intro.intro
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a t : ι → ℝ
ht : ∀ (x : ι), 0 ≤ t x ∧ t x ≤ 1
i : ι
⊢ 0 ⊓ a i ≤ (fun i => (t • a) i) i ∧ (fun i => (t • a) i) i ≤ 0 ⊔ a i
[PROOFSTEP]
specialize ht i
[GOAL]
case h.mp.intro.intro
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a t : ι → ℝ
i : ι
ht : 0 ≤ t i ∧ t i ≤ 1
⊢ 0 ⊓ a i ≤ (fun i => (t • a) i) i ∧ (fun i => (t • a) i) i ≤ 0 ⊔ a i
[PROOFSTEP]
simp_rw [smul_eq_mul, Pi.mul_apply]
[GOAL]
case h.mp.intro.intro
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a t : ι → ℝ
i : ι
ht : 0 ≤ t i ∧ t i ≤ 1
⊢ 0 ⊓ a i ≤ t i * a i ∧ t i * a i ≤ 0 ⊔ a i
[PROOFSTEP]
cases' le_total (a i) 0 with hai hai
[GOAL]
case h.mp.intro.intro.inl
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a t : ι → ℝ
i : ι
ht : 0 ≤ t i ∧ t i ≤ 1
hai : a i ≤ 0
⊢ 0 ⊓ a i ≤ t i * a i ∧ t i * a i ≤ 0 ⊔ a i
[PROOFSTEP]
rw [sup_eq_left.mpr hai, inf_eq_right.mpr hai]
[GOAL]
case h.mp.intro.intro.inl
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a t : ι → ℝ
i : ι
ht : 0 ≤ t i ∧ t i ≤ 1
hai : a i ≤ 0
⊢ a i ≤ t i * a i ∧ t i * a i ≤ 0
[PROOFSTEP]
exact ⟨le_mul_of_le_one_left hai ht.2, mul_nonpos_of_nonneg_of_nonpos ht.1 hai⟩
[GOAL]
case h.mp.intro.intro.inr
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a t : ι → ℝ
i : ι
ht : 0 ≤ t i ∧ t i ≤ 1
hai : 0 ≤ a i
⊢ 0 ⊓ a i ≤ t i * a i ∧ t i * a i ≤ 0 ⊔ a i
[PROOFSTEP]
rw [sup_eq_right.mpr hai, inf_eq_left.mpr hai]
[GOAL]
case h.mp.intro.intro.inr
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a t : ι → ℝ
i : ι
ht : 0 ≤ t i ∧ t i ≤ 1
hai : 0 ≤ a i
⊢ 0 ≤ t i * a i ∧ t i * a i ≤ a i
[PROOFSTEP]
exact ⟨mul_nonneg ht.1 hai, mul_le_of_le_one_left hai ht.2⟩
[GOAL]
case h.mpr
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a x : ι → ℝ
⊢ (∀ (x_1 : ι), 0 ⊓ a x_1 ≤ x x_1 ∧ x x_1 ≤ 0 ⊔ a x_1) → ∃ t h, x = fun i => (t • a) i
[PROOFSTEP]
intro h
[GOAL]
case h.mpr
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a x : ι → ℝ
h : ∀ (x_1 : ι), 0 ⊓ a x_1 ≤ x x_1 ∧ x x_1 ≤ 0 ⊔ a x_1
⊢ ∃ t h, x = fun i => (t • a) i
[PROOFSTEP]
refine' ⟨fun i => x i / a i, fun i => _, funext fun i => _⟩
[GOAL]
case h.mpr.refine'_1
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a x : ι → ℝ
h : ∀ (x_1 : ι), 0 ⊓ a x_1 ≤ x x_1 ∧ x x_1 ≤ 0 ⊔ a x_1
i : ι
⊢ 0 ≤ (fun i => x i / a i) i ∧ (fun i => x i / a i) i ≤ 1
[PROOFSTEP]
specialize h i
[GOAL]
case h.mpr.refine'_1
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a x : ι → ℝ
i : ι
h : 0 ⊓ a i ≤ x i ∧ x i ≤ 0 ⊔ a i
⊢ 0 ≤ (fun i => x i / a i) i ∧ (fun i => x i / a i) i ≤ 1
[PROOFSTEP]
cases' le_total (a i) 0 with hai hai
[GOAL]
case h.mpr.refine'_1.inl
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a x : ι → ℝ
i : ι
h : 0 ⊓ a i ≤ x i ∧ x i ≤ 0 ⊔ a i
hai : a i ≤ 0
⊢ 0 ≤ (fun i => x i / a i) i ∧ (fun i => x i / a i) i ≤ 1
[PROOFSTEP]
rw [sup_eq_left.mpr hai, inf_eq_right.mpr hai] at h
[GOAL]
case h.mpr.refine'_1.inl
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a x : ι → ℝ
i : ι
h : a i ≤ x i ∧ x i ≤ 0
hai : a i ≤ 0
⊢ 0 ≤ (fun i => x i / a i) i ∧ (fun i => x i / a i) i ≤ 1
[PROOFSTEP]
exact ⟨div_nonneg_of_nonpos h.2 hai, div_le_one_of_ge h.1 hai⟩
[GOAL]
case h.mpr.refine'_1.inr
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a x : ι → ℝ
i : ι
h : 0 ⊓ a i ≤ x i ∧ x i ≤ 0 ⊔ a i
hai : 0 ≤ a i
⊢ 0 ≤ (fun i => x i / a i) i ∧ (fun i => x i / a i) i ≤ 1
[PROOFSTEP]
rw [sup_eq_right.mpr hai, inf_eq_left.mpr hai] at h
[GOAL]
case h.mpr.refine'_1.inr
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a x : ι → ℝ
i : ι
h : 0 ≤ x i ∧ x i ≤ a i
hai : 0 ≤ a i
⊢ 0 ≤ (fun i => x i / a i) i ∧ (fun i => x i / a i) i ≤ 1
[PROOFSTEP]
exact ⟨div_nonneg h.1 hai, div_le_one_of_le h.2 hai⟩
[GOAL]
case h.mpr.refine'_2
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a x : ι → ℝ
h : ∀ (x_1 : ι), 0 ⊓ a x_1 ≤ x x_1 ∧ x x_1 ≤ 0 ⊔ a x_1
i : ι
⊢ x i = ((fun i => x i / a i) • a) i
[PROOFSTEP]
specialize h i
[GOAL]
case h.mpr.refine'_2
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a x : ι → ℝ
i : ι
h : 0 ⊓ a i ≤ x i ∧ x i ≤ 0 ⊔ a i
⊢ x i = ((fun i => x i / a i) • a) i
[PROOFSTEP]
simp only [smul_eq_mul, Pi.mul_apply]
[GOAL]
case h.mpr.refine'_2
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a x : ι → ℝ
i : ι
h : 0 ⊓ a i ≤ x i ∧ x i ≤ 0 ⊔ a i
⊢ x i = x i / a i * a i
[PROOFSTEP]
cases' eq_or_ne (a i) 0 with hai hai
[GOAL]
case h.mpr.refine'_2.inl
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a x : ι → ℝ
i : ι
h : 0 ⊓ a i ≤ x i ∧ x i ≤ 0 ⊔ a i
hai : a i = 0
⊢ x i = x i / a i * a i
[PROOFSTEP]
rw [hai, inf_idem, sup_idem, ← le_antisymm_iff] at h
[GOAL]
case h.mpr.refine'_2.inl
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a x : ι → ℝ
i : ι
h : 0 = x i
hai : a i = 0
⊢ x i = x i / a i * a i
[PROOFSTEP]
rw [hai, ← h, zero_div, zero_mul]
[GOAL]
case h.mpr.refine'_2.inr
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁶ : Fintype ι
inst✝⁵ : Fintype ι'
inst✝⁴ : AddCommGroup E
inst✝³ : Module ℝ E
inst✝² : AddCommGroup F
inst✝¹ : Module ℝ F
inst✝ : DecidableEq ι
a x : ι → ℝ
i : ι
h : 0 ⊓ a i ≤ x i ∧ x i ≤ 0 ⊔ a i
hai : a i ≠ 0
⊢ x i = x i / a i * a i
[PROOFSTEP]
rw [div_mul_cancel _ hai]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : NormedAddCommGroup E
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ E
inst✝ : NormedSpace ℝ F
b : Basis ι ℝ E
⊢ Set.Nonempty
(interior
{ carrier := _root_.parallelepiped ↑b,
isCompact' := (_ : IsCompact ((fun t => ∑ i : ι, t i • ↑b i) '' Icc 0 1)) }.carrier)
[PROOFSTEP]
suffices H : Set.Nonempty (interior (b.equivFunL.symm.toHomeomorph '' Icc 0 1))
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : NormedAddCommGroup E
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ E
inst✝ : NormedSpace ℝ F
b : Basis ι ℝ E
H :
Set.Nonempty (interior (↑(ContinuousLinearEquiv.toHomeomorph (ContinuousLinearEquiv.symm (equivFunL b))) '' Icc 0 1))
⊢ Set.Nonempty
(interior
{ carrier := _root_.parallelepiped ↑b,
isCompact' := (_ : IsCompact ((fun t => ∑ i : ι, t i • ↑b i) '' Icc 0 1)) }.carrier)
[PROOFSTEP]
dsimp only [_root_.parallelepiped]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : NormedAddCommGroup E
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ E
inst✝ : NormedSpace ℝ F
b : Basis ι ℝ E
H :
Set.Nonempty (interior (↑(ContinuousLinearEquiv.toHomeomorph (ContinuousLinearEquiv.symm (equivFunL b))) '' Icc 0 1))
⊢ Set.Nonempty (interior ((fun t => ∑ i : ι, t i • ↑b i) '' Icc 0 1))
[PROOFSTEP]
convert H
[GOAL]
case h.e'_2.h.e'_3.h
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : NormedAddCommGroup E
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ E
inst✝ : NormedSpace ℝ F
b : Basis ι ℝ E
H :
Set.Nonempty (interior (↑(ContinuousLinearEquiv.toHomeomorph (ContinuousLinearEquiv.symm (equivFunL b))) '' Icc 0 1))
a✝¹ : ι → ℝ
a✝ : a✝¹ ∈ Icc 0 1
⊢ ∑ i : ι, a✝¹ i • ↑b i = ↑(ContinuousLinearEquiv.toHomeomorph (ContinuousLinearEquiv.symm (equivFunL b))) a✝¹
[PROOFSTEP]
exact (b.equivFun_symm_apply _).symm
[GOAL]
case H
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : NormedAddCommGroup E
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ E
inst✝ : NormedSpace ℝ F
b : Basis ι ℝ E
⊢ Set.Nonempty (interior (↑(ContinuousLinearEquiv.toHomeomorph (ContinuousLinearEquiv.symm (equivFunL b))) '' Icc 0 1))
[PROOFSTEP]
have A : Set.Nonempty (interior (Icc (0 : ι → ℝ) 1)) :=
by
rw [← pi_univ_Icc, interior_pi_set (@finite_univ ι _)]
simp only [univ_pi_nonempty_iff, Pi.zero_apply, Pi.one_apply, interior_Icc, nonempty_Ioo, zero_lt_one, imp_true_iff]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : NormedAddCommGroup E
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ E
inst✝ : NormedSpace ℝ F
b : Basis ι ℝ E
⊢ Set.Nonempty (interior (Icc 0 1))
[PROOFSTEP]
rw [← pi_univ_Icc, interior_pi_set (@finite_univ ι _)]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : NormedAddCommGroup E
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ E
inst✝ : NormedSpace ℝ F
b : Basis ι ℝ E
⊢ Set.Nonempty (pi univ fun i => interior (Icc (OfNat.ofNat 0 i) (OfNat.ofNat 1 i)))
[PROOFSTEP]
simp only [univ_pi_nonempty_iff, Pi.zero_apply, Pi.one_apply, interior_Icc, nonempty_Ioo, zero_lt_one, imp_true_iff]
[GOAL]
case H
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁵ : Fintype ι
inst✝⁴ : Fintype ι'
inst✝³ : NormedAddCommGroup E
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace ℝ E
inst✝ : NormedSpace ℝ F
b : Basis ι ℝ E
A : Set.Nonempty (interior (Icc 0 1))
⊢ Set.Nonempty (interior (↑(ContinuousLinearEquiv.toHomeomorph (ContinuousLinearEquiv.symm (equivFunL b))) '' Icc 0 1))
[PROOFSTEP]
rwa [← Homeomorph.image_interior, nonempty_image_iff]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁷ : Fintype ι
inst✝⁶ : Fintype ι'
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedAddCommGroup F
inst✝³ : NormedSpace ℝ E
inst✝² : NormedSpace ℝ F
inst✝¹ : MeasurableSpace E
inst✝ : BorelSpace E
b : Basis ι ℝ E
⊢ IsAddHaarMeasure (Basis.addHaar b)
[PROOFSTEP]
rw [Basis.addHaar]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁷ : Fintype ι
inst✝⁶ : Fintype ι'
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedAddCommGroup F
inst✝³ : NormedSpace ℝ E
inst✝² : NormedSpace ℝ F
inst✝¹ : MeasurableSpace E
inst✝ : BorelSpace E
b : Basis ι ℝ E
⊢ IsAddHaarMeasure (addHaarMeasure (Basis.parallelepiped b))
[PROOFSTEP]
exact Measure.isAddHaarMeasure_addHaarMeasure _
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁷ : Fintype ι
inst✝⁶ : Fintype ι'
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedAddCommGroup F
inst✝³ : NormedSpace ℝ E
inst✝² : NormedSpace ℝ F
inst✝¹ : MeasurableSpace E
inst✝ : BorelSpace E
b : Basis ι ℝ E
⊢ ↑↑(addHaar b) (_root_.parallelepiped ↑b) = 1
[PROOFSTEP]
rw [Basis.addHaar]
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝⁷ : Fintype ι
inst✝⁶ : Fintype ι'
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : NormedAddCommGroup F
inst✝³ : NormedSpace ℝ E
inst✝² : NormedSpace ℝ F
inst✝¹ : MeasurableSpace E
inst✝ : BorelSpace E
b : Basis ι ℝ E
⊢ ↑↑(addHaarMeasure (parallelepiped b)) (_root_.parallelepiped ↑b) = 1
[PROOFSTEP]
exact addHaarMeasure_self
[GOAL]
ι : Type u_1
ι' : Type u_2
E : Type u_3
F : Type u_4
inst✝¹ : Fintype ι
inst✝ : Fintype ι'
⊢ MeasureSpace ℝ
[PROOFSTEP]
infer_instance
|
subroutine resid_BB_hgd_adv(ctfmp,n_BB,n_DLamx,n_beta,np1_BB,np1_D
&Lamx,np1_beta,x,Nx,advc,eta,ht,hx,myzero,phys_bdy,res)
implicit none
integer i
integer Nx
real*8 advc
real*8 eta
real*8 ht
real*8 hx
real*8 myzero
real*8 ctfmp(Nx)
real*8 n_BB(Nx)
real*8 n_DLamx(Nx)
real*8 n_beta(Nx)
real*8 np1_BB(Nx)
real*8 np1_DLamx(Nx)
real*8 np1_beta(Nx)
real*8 x(Nx)
integer phys_bdy(2)
real*8 res
real*8 qb
res = 0.0D0
if (phys_bdy(1) .eq. 1) then
do i=1, 1, 1
qb = np1_BB(i) + myzero * x(i)
res = res + qb**2
end do
endif
do i=2, Nx-1, 1
qb = (-0.1D1 * n_BB(i) + np1_BB(i)) / ht - 0.2500000000000000D0 *
#advc * np1_beta(i) * (-0.1D1 * np1_BB(i - 1) + np1_BB(i + 1)) / hx
# / ctfmp(i) + 0.5000000000000000D0 * eta * np1_BB(i) - 0.500000000
#0000000D0 * np1_DLamx(i) - 0.2500000000000000D0 * advc * n_beta(i)
# * (-0.1D1 * n_BB(i - 1) + n_BB(i + 1)) / hx / ctfmp(i) + 0.500000
#0000000000D0 * eta * n_BB(i) - 0.5000000000000000D0 * n_DLamx(i)
res = res + qb**2
end do
if (phys_bdy(2) .eq. 1) then
do i=Nx, Nx, 1
qb = np1_BB(i) + myzero * x(i)
res = res + qb**2
end do
endif
res = sqrt(res/(1*Nx))
END
|
If $k$ is a basis vector, then the dimension of the hyperplane $\{x \in \mathbb{R}^n \mid k \cdot x = 0\}$ is $n-1$. |
-- Was used for trying out graphics
record Circle where
constructor MkCircle
x, y, r : Int
record CircleMove where
constructor MkCircleMove
dx, dy : Int
updateCircle : Eff ()
[ 'Circle ::: STATE Circle
, 'CircleMove ::: STATE CircleMove
]
updateCircle = do
circle <- 'Circle :- get
circleMove <- 'CircleMove :- get
let (newCircle, newCircleMove) = moveCircle circle circleMove
'Circle :- put newCircle
'CircleMove :- put newCircleMove
where
moveCircle : Circle -> CircleMove -> (Circle, CircleMove)
moveCircle circle circleMove =
let newCircle@(MkCircle cx cy cr) = record
{ x $= (+ dx circleMove)
, y $= (+ dy circleMove)
} circle
newCircleMove = record
{ dx $= if cx < cr || cx > 640 - cr then (0 -) else (0 +)
, dy $= if cy < cr || cy > 480 - cr then (0 -) else (0 +)
} circleMove
in (newCircle, newCircleMove)
|
/-
Copyright (c) 2021 Yaël Dillies. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies
! This file was ported from Lean 3 source module order.monotone.monovary
! leanprover-community/mathlib commit 6cb77a8eaff0ddd100e87b1591c6d3ad319514ff
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Data.Set.Image
/-!
# Monovariance of functions
Two functions *vary together* if a strict change in the first implies a change in the second.
This is in some sense a way to say that two functions `f : ι → α`, `g : ι → β` are "monotone
together", without actually having an order on `ι`.
This condition comes up in the rearrangement inequality. See `Algebra.Order.Rearrangement`.
## Main declarations
* `Monovary f g`: `f` monovaries with `g`. If `g i < g j`, then `f i ≤ f j`.
* `Antivary f g`: `f` antivaries with `g`. If `g i < g j`, then `f j ≤ f i`.
* `MonovaryOn f g s`: `f` monovaries with `g` on `s`.
* `MonovaryOn f g s`: `f` antivaries with `g` on `s`.
-/
open Function Set
variable {ι ι' α β γ : Type _}
section Preorder
variable [Preorder α] [Preorder β] [Preorder γ] {f : ι → α} {f' : α → γ} {g : ι → β} {g' : β → γ}
{s t : Set ι}
/-- `f` monovaries with `g` if `g i < g j` implies `f i ≤ f j`. -/
def Monovary (f : ι → α) (g : ι → β) : Prop :=
∀ ⦃i j⦄, g i < g j → f i ≤ f j
#align monovary Monovary
/-- `f` antivaries with `g` if `g i < g j` implies `f j ≤ f i`. -/
def Antivary (f : ι → α) (g : ι → β) : Prop :=
∀ ⦃i j⦄, g i < g j → f j ≤ f i
#align antivary Antivary
/-- `f` monovaries with `g` on `s` if `g i < g j` implies `f i ≤ f j` for all `i, j ∈ s`. -/
def MonovaryOn (f : ι → α) (g : ι → β) (s : Set ι) : Prop :=
∀ ⦃i⦄ (_ : i ∈ s) ⦃j⦄ (_ : j ∈ s), g i < g j → f i ≤ f j
#align monovary_on MonovaryOn
/-- `f` antivaries with `g` on `s` if `g i < g j` implies `f j ≤ f i` for all `i, j ∈ s`. -/
def AntivaryOn (f : ι → α) (g : ι → β) (s : Set ι) : Prop :=
∀ ⦃i⦄ (_ : i ∈ s) ⦃j⦄ (_ : j ∈ s), g i < g j → f j ≤ f i
#align antivary_on AntivaryOn
protected theorem Monovary.monovaryOn (h : Monovary f g) (s : Set ι) : MonovaryOn f g s :=
fun _ _ _ _ hij => h hij
#align monovary.monovary_on Monovary.monovaryOn
protected theorem Antivary.antivaryOn (h : Antivary f g) (s : Set ι) : AntivaryOn f g s :=
fun _ _ _ _ hij => h hij
#align antivary.antivary_on Antivary.antivaryOn
@[simp]
theorem MonovaryOn.empty : MonovaryOn f g ∅ := fun _ => False.elim
#align monovary_on.empty MonovaryOn.empty
@[simp]
theorem AntivaryOn.empty : AntivaryOn f g ∅ := fun _ => False.elim
#align antivary_on.empty AntivaryOn.empty
@[simp]
theorem monovaryOn_univ : MonovaryOn f g univ ↔ Monovary f g :=
⟨fun h _ _ => h trivial trivial, fun h _ _ _ _ hij => h hij⟩
#align monovary_on_univ monovaryOn_univ
@[simp]
theorem antivaryOn_univ : AntivaryOn f g univ ↔ Antivary f g :=
⟨fun h _ _ => h trivial trivial, fun h _ _ _ _ hij => h hij⟩
#align antivary_on_univ antivaryOn_univ
protected theorem MonovaryOn.subset (hst : s ⊆ t) (h : MonovaryOn f g t) : MonovaryOn f g s :=
fun _ hi _ hj => h (hst hi) (hst hj)
#align monovary_on.subset MonovaryOn.subset
protected theorem AntivaryOn.subset (hst : s ⊆ t) (h : AntivaryOn f g t) : AntivaryOn f g s :=
fun _ hi _ hj => h (hst hi) (hst hj)
#align antivary_on.subset AntivaryOn.subset
theorem monovary_const_left (g : ι → β) (a : α) : Monovary (const ι a) g := fun _ _ _ => le_rfl
#align monovary_const_left monovary_const_left
theorem antivary_const_left (g : ι → β) (a : α) : Antivary (const ι a) g := fun _ _ _ => le_rfl
#align antivary_const_left antivary_const_left
theorem monovary_const_right (f : ι → α) (b : β) : Monovary f (const ι b) := fun _ _ h =>
(h.ne rfl).elim
#align monovary_const_right monovary_const_right
theorem antivary_const_right (f : ι → α) (b : β) : Antivary f (const ι b) := fun _ _ h =>
(h.ne rfl).elim
#align antivary_const_right antivary_const_right
theorem monovary_self (f : ι → α) : Monovary f f := fun _ _ => le_of_lt
#align monovary_self monovary_self
theorem monovaryOn_self (f : ι → α) (s : Set ι) : MonovaryOn f f s := fun _ _ _ _ => le_of_lt
#align monovary_on_self monovaryOn_self
protected theorem Subsingleton.monovary [Subsingleton ι] (f : ι → α) (g : ι → β) : Monovary f g :=
fun _ _ h => (ne_of_apply_ne _ h.ne <| Subsingleton.elim _ _).elim
#align subsingleton.monovary Subsingleton.monovary
protected theorem Subsingleton.antivary [Subsingleton ι] (f : ι → α) (g : ι → β) : Antivary f g :=
fun _ _ h => (ne_of_apply_ne _ h.ne <| Subsingleton.elim _ _).elim
#align subsingleton.antivary Subsingleton.antivary
protected theorem Subsingleton.monovaryOn [Subsingleton ι] (f : ι → α) (g : ι → β) (s : Set ι) :
MonovaryOn f g s := fun _ _ _ _ h => (ne_of_apply_ne _ h.ne <| Subsingleton.elim _ _).elim
#align subsingleton.monovary_on Subsingleton.monovaryOn
protected theorem Subsingleton.antivaryOn [Subsingleton ι] (f : ι → α) (g : ι → β) (s : Set ι) :
AntivaryOn f g s := fun _ _ _ _ h => (ne_of_apply_ne _ h.ne <| Subsingleton.elim _ _).elim
#align subsingleton.antivary_on Subsingleton.antivaryOn
theorem monovaryOn_const_left (g : ι → β) (a : α) (s : Set ι) : MonovaryOn (const ι a) g s :=
fun _ _ _ _ _ => le_rfl
#align monovary_on_const_left monovaryOn_const_left
theorem antivaryOn_const_left (g : ι → β) (a : α) (s : Set ι) : AntivaryOn (const ι a) g s :=
fun _ _ _ _ _ => le_rfl
#align antivary_on_const_left antivaryOn_const_left
theorem monovaryOn_const_right (f : ι → α) (b : β) (s : Set ι) : MonovaryOn f (const ι b) s :=
fun _ _ _ _ h => (h.ne rfl).elim
#align monovary_on_const_right monovaryOn_const_right
theorem antivaryOn_const_right (f : ι → α) (b : β) (s : Set ι) : AntivaryOn f (const ι b) s :=
fun _ _ _ _ h => (h.ne rfl).elim
#align antivary_on_const_right antivaryOn_const_right
theorem Monovary.comp_right (h : Monovary f g) (k : ι' → ι) : Monovary (f ∘ k) (g ∘ k) :=
fun _ _ hij => h hij
#align monovary.comp_right Monovary.comp_right
theorem Antivary.comp_right (h : Antivary f g) (k : ι' → ι) : Antivary (f ∘ k) (g ∘ k) :=
fun _ _ hij => h hij
#align antivary.comp_right Antivary.comp_right
theorem MonovaryOn.comp_right (h : MonovaryOn f g s) (k : ι' → ι) :
MonovaryOn (f ∘ k) (g ∘ k) (k ⁻¹' s) := fun _ hi _ hj => h hi hj
#align monovary_on.comp_right MonovaryOn.comp_right
theorem AntivaryOn.comp_right (h : AntivaryOn f g s) (k : ι' → ι) :
AntivaryOn (f ∘ k) (g ∘ k) (k ⁻¹' s) := fun _ hi _ hj => h hi hj
#align antivary_on.comp_right AntivaryOn.comp_right
theorem Monovary.comp_monotone_left (h : Monovary f g) (hf : Monotone f') : Monovary (f' ∘ f) g :=
fun _ _ hij => hf <| h hij
#align monovary.comp_monotone_left Monovary.comp_monotone_left
theorem Monovary.comp_antitone_left (h : Monovary f g) (hf : Antitone f') : Antivary (f' ∘ f) g :=
fun _ _ hij => hf <| h hij
#align monovary.comp_antitone_left Monovary.comp_antitone_left
theorem Antivary.comp_monotone_left (h : Antivary f g) (hf : Monotone f') : Antivary (f' ∘ f) g :=
fun _ _ hij => hf <| h hij
#align antivary.comp_monotone_left Antivary.comp_monotone_left
theorem Antivary.comp_antitone_left (h : Antivary f g) (hf : Antitone f') : Monovary (f' ∘ f) g :=
fun _ _ hij => hf <| h hij
#align antivary.comp_antitone_left Antivary.comp_antitone_left
theorem MonovaryOn.comp_monotone_on_left (h : MonovaryOn f g s) (hf : Monotone f') :
MonovaryOn (f' ∘ f) g s := fun _ hi _ hj hij => hf <| h hi hj hij
#align monovary_on.comp_monotone_on_left MonovaryOn.comp_monotone_on_left
theorem MonovaryOn.comp_antitone_on_left (h : MonovaryOn f g s) (hf : Antitone f') :
AntivaryOn (f' ∘ f) g s := fun _ hi _ hj hij => hf <| h hi hj hij
#align monovary_on.comp_antitone_on_left MonovaryOn.comp_antitone_on_left
theorem AntivaryOn.comp_monotone_on_left (h : AntivaryOn f g s) (hf : Monotone f') :
AntivaryOn (f' ∘ f) g s := fun _ hi _ hj hij => hf <| h hi hj hij
#align antivary_on.comp_monotone_on_left AntivaryOn.comp_monotone_on_left
theorem AntivaryOn.comp_antitone_on_left (h : AntivaryOn f g s) (hf : Antitone f') :
MonovaryOn (f' ∘ f) g s := fun _ hi _ hj hij => hf <| h hi hj hij
#align antivary_on.comp_antitone_on_left AntivaryOn.comp_antitone_on_left
section OrderDual
open OrderDual
theorem Monovary.dual : Monovary f g → Monovary (toDual ∘ f) (toDual ∘ g) :=
swap
#align monovary.dual Monovary.dual
theorem Antivary.dual : Antivary f g → Antivary (toDual ∘ f) (toDual ∘ g) :=
swap
#align antivary.dual Antivary.dual
theorem Monovary.dual_left : Monovary f g → Antivary (toDual ∘ f) g :=
id
#align monovary.dual_left Monovary.dual_left
theorem Antivary.dual_left : Antivary f g → Monovary (toDual ∘ f) g :=
id
#align antivary.dual_left Antivary.dual_left
theorem Monovary.dual_right : Monovary f g → Antivary f (toDual ∘ g) :=
swap
#align monovary.dual_right Monovary.dual_right
theorem Antivary.dual_right : Antivary f g → Monovary f (toDual ∘ g) :=
swap
#align antivary.dual_right Antivary.dual_right
theorem MonovaryOn.dual : MonovaryOn f g s → MonovaryOn (toDual ∘ f) (toDual ∘ g) s :=
swap₂
#align monovary_on.dual MonovaryOn.dual
theorem AntivaryOn.dual : AntivaryOn f g s → AntivaryOn (toDual ∘ f) (toDual ∘ g) s :=
swap₂
#align antivary_on.dual AntivaryOn.dual
theorem MonovaryOn.dual_left : MonovaryOn f g s → AntivaryOn (toDual ∘ f) g s :=
id
#align monovary_on.dual_left MonovaryOn.dual_left
theorem AntivaryOn.dual_left : AntivaryOn f g s → MonovaryOn (toDual ∘ f) g s :=
id
#align antivary_on.dual_left AntivaryOn.dual_left
theorem MonovaryOn.dual_right : MonovaryOn f g s → AntivaryOn f (toDual ∘ g) s :=
swap₂
#align monovary_on.dual_right MonovaryOn.dual_right
theorem AntivaryOn.dual_right : AntivaryOn f g s → MonovaryOn f (toDual ∘ g) s :=
swap₂
#align antivary_on.dual_right AntivaryOn.dual_right
@[simp]
theorem monovary_toDual_left : Monovary (toDual ∘ f) g ↔ Antivary f g :=
Iff.rfl
#align monovary_to_dual_left monovary_toDual_left
@[simp]
theorem monovary_toDual_right : Monovary f (toDual ∘ g) ↔ Antivary f g :=
forall_swap
#align monovary_to_dual_right monovary_toDual_right
@[simp]
theorem antivary_toDual_left : Antivary (toDual ∘ f) g ↔ Monovary f g :=
Iff.rfl
#align antivary_to_dual_left antivary_toDual_left
@[simp]
theorem antivary_toDual_right : Antivary f (toDual ∘ g) ↔ Monovary f g :=
forall_swap
#align antivary_to_dual_right antivary_toDual_right
@[simp]
theorem monovaryOn_toDual_left : MonovaryOn (toDual ∘ f) g s ↔ AntivaryOn f g s :=
Iff.rfl
#align monovary_on_to_dual_left monovaryOn_toDual_left
@[simp]
theorem monovaryOn_toDual_right : MonovaryOn f (toDual ∘ g) s ↔ AntivaryOn f g s :=
forall₂_swap
#align monovary_on_to_dual_right monovaryOn_toDual_right
@[simp]
theorem antivaryOn_toDual_left : AntivaryOn (toDual ∘ f) g s ↔ MonovaryOn f g s :=
Iff.rfl
#align antivary_on_to_dual_left antivaryOn_toDual_left
@[simp]
theorem antivaryOn_toDual_right : AntivaryOn f (toDual ∘ g) s ↔ MonovaryOn f g s :=
forall₂_swap
#align antivary_on_to_dual_right antivaryOn_toDual_right
end OrderDual
section PartialOrder
variable [PartialOrder ι]
@[simp]
theorem monovary_id_iff : Monovary f id ↔ Monotone f :=
monotone_iff_forall_lt.symm
#align monovary_id_iff monovary_id_iff
@[simp]
theorem antivary_id_iff : Antivary f id ↔ Antitone f :=
antitone_iff_forall_lt.symm
#align antivary_id_iff antivary_id_iff
@[simp]
theorem monovaryOn_id_iff : MonovaryOn f id s ↔ MonotoneOn f s :=
monotoneOn_iff_forall_lt.symm
#align monovary_on_id_iff monovaryOn_id_iff
@[simp]
theorem antivaryOn_id_iff : AntivaryOn f id s ↔ AntitoneOn f s :=
antitoneOn_iff_forall_lt.symm
#align antivary_on_id_iff antivaryOn_id_iff
end PartialOrder
variable [LinearOrder ι]
/-Porting note: Due to a bug in `alias`, many of the below lemmas have dot notation removed in the
proof-/
protected theorem Monotone.monovary (hf : Monotone f) (hg : Monotone g) : Monovary f g :=
fun _ _ hij => hf (hg.reflect_lt hij).le
#align monotone.monovary Monotone.monovary
protected theorem Monotone.antivary (hf : Monotone f) (hg : Antitone g) : Antivary f g :=
(hf.monovary (Antitone.dual_right hg)).dual_right
#align monotone.antivary Monotone.antivary
protected theorem Antitone.monovary (hf : Antitone f) (hg : Antitone g) : Monovary f g :=
(hf.dual_right.antivary hg).dual_left
#align antitone.monovary Antitone.monovary
protected theorem Antitone.antivary (hf : Antitone f) (hg : Monotone g) : Antivary f g :=
(hf.monovary (Monotone.dual_right hg)).dual_right
#align antitone.antivary Antitone.antivary
protected theorem MonotoneOn.monovaryOn (hf : MonotoneOn f s) (hg : MonotoneOn g s) :
MonovaryOn f g s := fun _ hi _ hj hij => hf hi hj (hg.reflect_lt hi hj hij).le
#align monotone_on.monovary_on MonotoneOn.monovaryOn
protected theorem MonotoneOn.antivaryOn (hf : MonotoneOn f s) (hg : AntitoneOn g s) :
AntivaryOn f g s :=
(hf.monovaryOn (AntitoneOn.dual_right hg)).dual_right
#align monotone_on.antivary_on MonotoneOn.antivaryOn
protected theorem AntitoneOn.monovaryOn (hf : AntitoneOn f s) (hg : AntitoneOn g s) :
MonovaryOn f g s :=
(hf.dual_right.antivaryOn hg).dual_left
#align antitone_on.monovary_on AntitoneOn.monovaryOn
protected theorem AntitoneOn.antivaryOn (hf : AntitoneOn f s) (hg : MonotoneOn g s) :
AntivaryOn f g s :=
(hf.monovaryOn (MonotoneOn.dual_right hg)).dual_right
#align antitone_on.antivary_on AntitoneOn.antivaryOn
end Preorder
section LinearOrder
variable [Preorder α] [LinearOrder β] [Preorder γ] {f : ι → α} {f' : α → γ} {g : ι → β} {g' : β → γ}
{s : Set ι}
theorem MonovaryOn.comp_monotoneOn_right (h : MonovaryOn f g s) (hg : MonotoneOn g' (g '' s)) :
MonovaryOn f (g' ∘ g) s := fun _ hi _ hj hij =>
h hi hj <| hg.reflect_lt (mem_image_of_mem _ hi) (mem_image_of_mem _ hj) hij
#align monovary_on.comp_monotone_on_right MonovaryOn.comp_monotoneOn_right
theorem MonovaryOn.comp_antitoneOn_right (h : MonovaryOn f g s) (hg : AntitoneOn g' (g '' s)) :
AntivaryOn f (g' ∘ g) s := fun _ hi _ hj hij =>
h hj hi <| hg.reflect_lt (mem_image_of_mem _ hi) (mem_image_of_mem _ hj) hij
#align monovary_on.comp_antitone_on_right MonovaryOn.comp_antitoneOn_right
theorem AntivaryOn.comp_monotoneOn_right (h : AntivaryOn f g s) (hg : MonotoneOn g' (g '' s)) :
AntivaryOn f (g' ∘ g) s := fun _ hi _ hj hij =>
h hi hj <| hg.reflect_lt (mem_image_of_mem _ hi) (mem_image_of_mem _ hj) hij
#align antivary_on.comp_monotone_on_right AntivaryOn.comp_monotoneOn_right
theorem AntivaryOn.comp_antitoneOn_right (h : AntivaryOn f g s) (hg : AntitoneOn g' (g '' s)) :
MonovaryOn f (g' ∘ g) s := fun _ hi _ hj hij =>
h hj hi <| hg.reflect_lt (mem_image_of_mem _ hi) (mem_image_of_mem _ hj) hij
#align antivary_on.comp_antitone_on_right AntivaryOn.comp_antitoneOn_right
protected theorem Monovary.symm (h : Monovary f g) : Monovary g f := fun _ _ hf =>
le_of_not_lt fun hg => hf.not_le <| h hg
#align monovary.symm Monovary.symm
protected theorem Antivary.symm (h : Antivary f g) : Antivary g f := fun _ _ hf =>
le_of_not_lt fun hg => hf.not_le <| h hg
#align antivary.symm Antivary.symm
protected theorem MonovaryOn.symm (h : MonovaryOn f g s) : MonovaryOn g f s := fun _ hi _ hj hf =>
le_of_not_lt fun hg => hf.not_le <| h hj hi hg
#align monovary_on.symm MonovaryOn.symm
protected theorem AntivaryOn.symm (h : AntivaryOn f g s) : AntivaryOn g f s := fun _ hi _ hj hf =>
le_of_not_lt fun hg => hf.not_le <| h hi hj hg
#align antivary_on.symm AntivaryOn.symm
end LinearOrder
section LinearOrder
variable [LinearOrder α] [LinearOrder β] {f : ι → α} {g : ι → β} {s : Set ι}
theorem monovary_comm : Monovary f g ↔ Monovary g f :=
⟨Monovary.symm, Monovary.symm⟩
#align monovary_comm monovary_comm
theorem antivary_comm : Antivary f g ↔ Antivary g f :=
⟨Antivary.symm, Antivary.symm⟩
#align antivary_comm antivary_comm
theorem monovaryOn_comm : MonovaryOn f g s ↔ MonovaryOn g f s :=
⟨MonovaryOn.symm, MonovaryOn.symm⟩
#align monovary_on_comm monovaryOn_comm
theorem antivaryOn_comm : AntivaryOn f g s ↔ AntivaryOn g f s :=
⟨AntivaryOn.symm, AntivaryOn.symm⟩
#align antivary_on_comm antivaryOn_comm
end LinearOrder
|
/*
* File: BlshIndex.h
* Author: chteflio
*
* Created on March 4, 2016, 3:19 PM
*/
#ifndef BLSHINDEX_H
#define BLSHINDEX_H
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#include <gsl/gsl_sf_gamma.h>
#include <gsl/gsl_sf_result.h>
#include <gsl/gsl_errno.h>
namespace mips {
class BlshIndex : public LshIndex {
int computeMinMatches(int nTrials) {
double p;
double epsilon = 1 - R;
int guess = nTrials / 2;
int start = 0;
int end = nTrials;
// std::cout<<"threshold: "<<threshold<<" epsilon: "<<epsilon<<std::endl;
int retValue = -1;
do {
p = 1.0 - posteriorCdf(guess, nTrials, minMathesT);
if (p > epsilon) {
if (start == end) {
retValue = start - 1;
break;
}
end = guess - 1;
if (end < start) {
retValue = guess - 1;
break;
}
} else if (p < epsilon) {
if (start == end) {
retValue = start;
break;
}
start = guess + 1;
if (start > end) {
retValue = guess;
break;
}
} else {
retValue = guess;
break;
}
guess = (start + end) / 2;
if (end < 0 || start > nTrials) {
std::cout << "Problem in computeMinMatches start " << start << " end " << end << std::endl;
exit(1);
}
} while (1);
if (retValue < 0)
retValue = 0;
else if (retValue > nTrials)
retValue = nTrials;
return retValue;
}
inline double posteriorCdf(double s, double n, double x) {
if (x >= 1.0)
return 1.0;
if (x <= 0.5)
return 0;
double b1 = 1.0;
double bHalf = boost::math::ibeta(s + 1, n - s + 1, 0.5);
double bx = boost::math::ibeta(s + 1, n - s + 1, x);
double den = b1 - bHalf;
if (den < 1.0e-15)
return exp(log(bx - bHalf) - log(den));
else
return (bx - bHalf) / den;
}
public:
double minMathesT; // threshold for matching, r = c2r(simT)
long long hashGroups = 0;
int32_t *minMatches; // minimum number of hashes that should be observed to meet simT
double R;
double worst;
BlshIndex() : LshIndex(), minMathesT(0), minMatches(nullptr) {
}
inline ~BlshIndex() {
if (minMatches != nullptr)
delete[] minMatches;
}
inline void allocateBayesLSHMemory(double worstCaseTheta) {
if (minMatches == nullptr) {
// set up cache space and pre-compute all minimum matches
minMatches = da_i32malloc(hashGroups, NULL); //"allocateBayesLSHMemory: minMatches"
// std::cout << "worstCaseTheta: " << worstCaseTheta << std::endl;
minMathesT = (1.0 - acos(worstCaseTheta) * INVPI); // min matches threshold
for (int i = 0; i < hashGroups; i++) {
minMatches[i] = computeMinMatches((i + 1) * 32);
// std::cout<<minMatches[i]<<" ";
}
// std::cout << std::endl;
}
}
inline void initializeLists(const VectorMatrix& matrix, double worstCaseTheta, bool forProbeVectors, double recall,
ta_size_type start = 0, ta_size_type end = 0) {
omp_set_lock(&writelock);
if (!initialized) {
R = recall;
end = (end == 0 ? matrix.rowNum : end);
row_type nVectors = end - start;
cosSketches = new CosineSketches(nVectors, LSH_CODE_LENGTH, LSH_SIGNATURES);
hashGroups = LSH_CODE_LENGTH * LSH_SIGNATURES / 32;
initializedSketches.resize(nVectors, 0);
cosSketches->alloc();
if (forProbeVectors) {
switch (LSH_CODE_LENGTH) {
case 8:
lshBins = new LshBinsDense();
break;
case 16:
lshBins = new LshBinsSparse<uint16_t>();
break;
case 24:
case 32:
lshBins = new LshBinsSparse<uint32_t>();
break;
default:
lshBins = new LshBinsSparse<uint64_t>();
break;
}
lshBins->init(cosSketches->bytesPerCode, cosSketches->numHashTables, cosSketches->nVectors);
}
if (worstCaseTheta < std::numeric_limits<double>::max()) {
if (worstCaseTheta > 0) {
worstCaseTheta /= matrix.getVectorLength(start);
worstCaseTheta = (worstCaseTheta > 1 ? 1 : worstCaseTheta); // it will break in the loop afterwards
} else {
worstCaseTheta /= matrix.getVectorLength(end - 1);
worstCaseTheta = (worstCaseTheta < -1 ? -1 : worstCaseTheta); // it will have to check everything
}
worst = worstCaseTheta;
allocateBayesLSHMemory(worstCaseTheta);
}
initialized = true;
}
omp_unset_lock(&writelock);
}
};
}
#endif /* BLSHINDEX_H */
|
//==================================================================================================
/*!
@file
@copyright 2016 NumScale SAS
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
//==================================================================================================
#ifndef BOOST_SIMD_FUNCTION_FIX_HPP_INCLUDED
#define BOOST_SIMD_FUNCTION_FIX_HPP_INCLUDED
#if defined(DOXYGEN_ONLY)
namespace boost { namespace simd
{
/*!
@ingroup group-arithmetic
Function object implementing fix capabilities
Computes the truncation toward @ref Zero of its parameter.
This is a convenience alias of @ref trunc
**/
Value fix(Value const & v0);
} }
#endif
#include <boost/simd/function/scalar/trunc.hpp>
#include <boost/simd/function/simd/fix.hpp>
#endif
|
\documentclass{article}
\usepackage{geometry}
\geometry{letterpaper}
%%%% Uncomment below to begin paragraphs with an empty line %%%%
\usepackage[parfill]{parskip}
\usepackage{graphicx}
\usepackage{amssymb}
%\usepackage[section]{placeins}
%\usepackage[below]{placeins}
\usepackage{epstopdf}
%\DeclareGraphicsRule{.tif}{png}{.png}{`convert #1 `dirname #1`/`basename #1 .tif`.png}
\newcommand\bslash{\char`\\}
\newcommand\lt{\char`\<}
\newcommand\gt{\char`\>}
\newcommand{\supers}[1]{\ensuremath{^\textrm{{\scriptsize #1}}}}
\newcommand{\subs}[1]{\ensuremath{_\textrm{{\scriptsize #1}}}}
\title{LK Scripting Language Reference}
\author{Aron P. Dobos}
\begin{document}
\maketitle
\vspace{3in}
\begin{abstract}
The LK language is a simple but powerful scripting language that is designed to be small, fast, and easily embedded in other applications. It allows users to extend the built-in functionality of programs, and provides a cross-platform standard library of function calls. The core LK engine, including lexical analyzer, parser, compiler, and virtual machine comprises roughly 7000 lines of ISO-standard C++ code, and is only dependent on the Standard C++ Library (STL), making it extremely tight and portable to various platforms. LK also provides a C language API for writing extensions that can be dynamically loaded at runtime.
\textbf{Note:} LK can utilize standard 8-bit ASCII strings via the built-in \texttt{std::string} class, or can be configured to utilize an \texttt{std::string}-compliant string class from an external library. In this way, Unicode text can be supported natively. For example, direct integration and linkage with the wxWidgets C++ GUI library string class is provided as an option.
\end{abstract}
\newpage
\tableofcontents
%%\listoffigures
%%\listoftables
\newpage
\section{Introduction}
\subsection{Hello world!}
As with any new language, the traditional first program is to print the words "Hello, world!" on the screen, and the LK version is listed below.
\begin{verbatim}
out( "Hello, world!\n" );
\end{verbatim}
Notable features:
\begin{enumerate}
\item The \texttt{out} command generates text output from the script
\item The text to be printed is enclosed in double-quotes
\item To move to a new line in the output window, the symbol \texttt{\bslash n} is used
\item The program statement is terminated by a semicolon \texttt{;}
\end{enumerate}
To run Hello world, type it into a new text file with the .lk extension, and load the script into the appropriate the LK script input form in your application.
\subsection{Why yet another scripting language?}
There are many scripting languages available that have all of LK's features, and many more. However, LK is unique in its code footprint (less than 4000 lines of C++), portability, and simplicity of integration into other programs. It is also very straightforward to write libraries of functions called extensions that can be dynamically loaded into the script engine at run-time.
Some notable aspects of LK include:
\begin{enumerate}
\item Functions are first-class values, so that they can be passed to procedures and defined implicitly
\item No distinction between functions and procedures, and they can be nested and mutually recursive
\item Built-in support for string indexed tables (hashes)
\item Initializer lists for arrays and tables
\item Fully automatic memory management and highly optimized internal data representation
\item Simple syntactic sugar to represent structures
\item '\texttt{elseif}' statement formatting is similar to PHP
\item '\texttt{for}' loop syntax follows the C / Perl convention
\end{enumerate}
\section{Data Variables}
\subsection{General Syntax}
In LK, a program statement is generally placed on a line by itself, and a semicolon \texttt{;} marks the end of the statement. A very long program statement can be split across multiple lines to improve readability, provided that the line breaks do not occur in the middle of a word or number.
Blank lines may be inserted between statements. While they have no meaning, they can help make a script easier to read. Spaces can also be added or removed nearly anywhere, except of course in the middle of a word. The following statements all have the same meaning.
\begin{verbatim}
out("Hello 1\n");
out (
"Hello 1\n");
out ( "Hello 1\n" );
\end{verbatim}
Comments are lines in the program code that are ignored by LK. They serve as a form of documentation, and can help other people (and you!) more easily understand what the script does. There a two types of comments. The first type begins with two forward slashes \texttt{//} , and continues to the end of the line.
\begin{verbatim}
// this program creates a greeting
out( "Hello, world!\n" ) // display the greeting to the user
\end{verbatim}
The second type of comment is usually used when a large section of code needs to be ignored, or a paragraph of documentation is included with the program text. In this case, the comment begins with \texttt{ /* } and ends with \texttt{ */ }, as in the example below.
\begin{verbatim}
/* this program creates a greeting
there are many interesting things to note:
- uses the 'out' command
- hello has five characters
- a new line is inserted with an escape sequence
*/
out( "Hello, world!\n" ); // display the greeting to the user
\end{verbatim}
\subsection{Variables}
Variables store information while your script is running. LK variables share many characteristics with other computer languages.
\begin{enumerate}
\item Variables do not need to be "declared" in advance of being used
\item There is no distinction between variables that store text and variables that store numbers
\item In LK, a variable can also be an array, table, or function
\end{enumerate}
Variable names may contain letters, digit, and the underscore symbol. A limitation is that variables cannot start with a digit. Like some languages like C and Perl, LK does distinguish between upper and lower case letters in a variable (or subroutine) name. As a result, the name \texttt{myData} is different from \texttt{MYdata}.
Values are assigned to variables using the equal sign \texttt{=}. Some examples are below.
\begin{verbatim}
Num_Modules = 10;
ArrayPowerWatts = 4k;
Tilt = 18.2;
system_name = "Super PV System";
Cost = "unknown";
COST = 1e6;
cost = 1M;
\end{verbatim}
Assigning to a variable overwrites its previous value. As shown above, decimal numbers can be written using scientific notation or engineering suffixes. The last two assignments to \texttt{Cost} are the same value. Recognized suffixes are listed in the table below. Suffixes are case-sensitive, so that LK can distinguish between \texttt{m} (milli) and \texttt{M} (Mega).
\begin{table}[ht]
\begin{center}
\begin{tabular}{lll}
Name & Suffix & Multiplier\\
\hline
Tera & T & 1e12\\
Giga & G & 1e9\\
Mega & M & 1e6\\
Kilo & k & 1e3\\
Milli & m & 1e-3\\
Micro & u & 1e-6\\
Nano & n & 1e-9\\
Pico & p & 1e-12\\
Femto & f & 1e-15\\
Atto & a & 1e-18\\
\end{tabular}
\caption{Recognized Numerical Suffixes}
\label{tab_engsuffixes}
\end{center}
\end{table}
\subsection{Arithmetic}
LK supports the five basic operations \texttt{+}, \texttt{-}, \texttt{*}, \texttt{/}, and \texttt{\^}. The usual algebraic precendence rules are followed, so that multiplications and divisions are performed before additions and subtractions. The exponentiation operator \texttt{\^} is performed before multiplications and divisions. Parentheses are also understood and can be used to change the default order of operations. Operators are left-associative, meaning that the expression \texttt{ 3-10-8 } is understood as \texttt{ (3-10)-8 }.
More complicated operations like modulus arithmetic are possible using built-in function calls in the standard LK library.
Examples of arithmetic operations:
\begin{verbatim}
battery_cost = cost_per_kwh * battery_capacity;
// multiplication takes precedence
degraded_output = degraded_output - degraded_output * 0.1;
// use parentheses to subtract before multiplication
cash_amount = total_cost * ( 1 - debt_fraction/100.0 );
\end{verbatim}
\subsection{Simple Input and Output}
You can use the built-in \texttt{out} and \texttt{outln} functions to write textual data to the console window. The difference is that \texttt{outln} automatically appends a newline character to the output. To output multiple text strings or variables, use the \texttt{+} operator, or separate them with a comma. Note that text strings may be enclosed by either single or double quotes - both have the exact same meaning.
\begin{verbatim}
array_power = 4.3k;
array_eff = 0.11;
outln("Array power is " + array_power + ' Watts.');
outln("It is " + (array_eff*100) + " percent efficient.");
outln('It is ', array_eff*100, ' percent efficient.'); // same as above
\end{verbatim}
The console output generated is:
\begin{verbatim}
Array power is 4300 Watts.
It is 11 percent efficient.
\end{verbatim}
Use the \texttt{in} function to read input from the user. You can optionally pass a message to \texttt{in} to display to the user when the input popup appears. The \texttt{in} function always returns a string, and you may need to convert to a different type to perform mathematical operations on the result.
\begin{verbatim}
cost_per_watt = to_real(in("Enter cost per watt:")); // Show a message. in() also is fine.
notice( "Total cost is: " + cost_per_watt * 4k + " dollars"); // 4kW system
\end{verbatim}
The \texttt{notice} function works like \texttt{out}, except that it displays a pop up message box on the computer screen.
\subsection{Data Types and Conversion}
LK supports two basic types of data: numbers and text. Numbers can be integers or real numbers. Text strings are stored as a sequence of individual characters, and there is no specific limit on the length of a string. LK does not try to automatically convert between data types in most cases, and will issue an error if you try to multiply a number by a string, even if the string contains the textual representation of a valid number. To convert between data types, LK has several functions in the standard library for this purpose.
Boolean (true/false) data is also stored as a number - there is no separate boolean data type. All non-zero numbers are interpreted as true, while zero is false. This convention follows the C programming language.
There is also a special data value used in LK to indicate the absence of any value, known as the \texttt{null} value. It useful when working with arrays and tables of variables, which will be discussed later in this document. When a variable's value is \texttt{null}, it cannot be multiplied, added, or otherwise used in a calculation.
\begin{table}[ht]
\begin{center}
\begin{tabular}{lll}
Data Type & Conversion Function & Valid Values \\
\hline
Integer Number & \texttt{ to\_int() } & approx. +/- 1e-308 to 1e308 \\
Real Number & \texttt{ to\_real() } & +/- 1e-308 to 1e308, not an number (NaN) \\
Boolean & \texttt{ to\_bool() } & \texttt{"true"} or \texttt{"false"} (\texttt{1} or \texttt{0}) \\
Text Strings & \texttt{ to\_string() } & Any length text string \\
\end{tabular}
\caption{Intrinsic Data Types}
\label{tab_datatypes}
\end{center}
\end{table}
Sometimes you have two numbers in text strings that you would like to multiply. This can happen if you read data in from a text file, for example. Since it does not make sense to try to multiply text strings, you need to first convert the strings to numbers. To convert a variable to a double-precision decimal number, use the \texttt{to\_real} function, as below.
\begin{verbatim}
a = "3.5";
b = "-2";
c1 = a*b; // this will cause an error when you click 'Run'
c2 = to_real(a) * to_real(b); // this will assign c2 the number value of -7
\end{verbatim}
The assignment to \texttt{c1} above will cause the error \emph{error: access violation to non-numeric data}, while the assignment to \texttt{c2} makes sense and executes correctly.
You can also use \texttt{to\_int} to convert a string to an integer or truncate a decimal number, or the \texttt{to\_string} function to explicitly convert a number to a string variable.
If you need to find out what type a variable currently has, use the \texttt{typeof} function to get a text description.
\begin{verbatim}
a = 3.5;
b = -2;
c1 = a+b; // this will set c1 to -1.5
c2 = to_string( to_int(a) ) + to_int( b ); // c2 set to text "3-2"
outln( typeof(a) ); // will display "number"
outln( typeof(c2) ); // will display "string"
\end{verbatim}
To change the value of an existing variable by a certain amount, you can use the combined arithmetic and assignment operators \texttt{+=}, \texttt{-=}, \texttt{*=}, \texttt{/=}. The \texttt{+=} is unique in that it can add a numeric value, concatenate strings, and append values to an array (discussed later). The other ones only work on numbers. Examples:
\begin{verbatim}
a = 1;
a += 2; // same as writing a=a+2
a *= 2.5; // same as writing a=a*2.5
a -= 1; // same as writing a=a-1
a /= 3; // after this line, a = 2.16667
s = 'abc';
s += 'def';
s += 'ghi';
outln(s); // prints abcdefghi
\end{verbatim}
\subsection{Special Characters}
Text data can contain special characters to denote tabs, line endings, and other useful elements that are not part of the normal alphabet. These are inserted into quoted text strings with \emph{escape sequences}, which begin with the \texttt{\bslash} character.
\begin{table}[ht]
\begin{center}
\begin{tabular}{ll}
Escape Sequence & Meaning\\
\hline
\texttt{\bslash n} & New line\\
\texttt{\bslash t} & Tab character\\
\texttt{\bslash r} & Carriage return\\
\texttt{\bslash "} & Double quote\\
\texttt{\bslash\bslash} & Backslash character\\
\end{tabular}
\caption{Text String Escape Sequences}
\label{tab_escseq}
\end{center}
\end{table}
So, to print the text \texttt{"Hi, tabbed world!"}, or assign \texttt{c:\bslash Windows\bslash notepad.exe}, you would have to write:
\begin{verbatim}
outln("\"Hi,\ttabbed world!\"")
program = "c:\\Windows\\notepad.exe"
\end{verbatim}
Note that for file names on a Windows computer, it is important to convert back slashes (\texttt{'\bslash'}) to forward slashes (\texttt{/}). Otherwise, the file name may be translated incorrectly and the file won't be found.
\subsection{Constants and Enumerations}
LK allows constants to be specified using the \texttt{const} keyword. Any type of variable can be declared constant, including numbers, strings, and functions (described later). Attempting to assign a new value to a \texttt{const} variable will result in a run-time error when the script is executed. Also, once a variable has been declared, it cannot be re-declared as \texttt{const} in a subsequent part of the program. Arrays and tables behave slightly differently from numbers: while the variable name referring to the array or table cannot be changed to another data type, the actual array or table contents are not protected by the \texttt{const} specifier. Examples:
\begin{verbatim}
const pi = 3.1415926;
pi = 3.11; // will cause an error
value = 43;
const value = 51; // will cause an error (value already exists)
const names = [ "Linus", "David", "Aron" ];
names[2] = "Ari"; // allowed because it changes the contents of array 'names'
// but not the data type or size of 'names'
names[3] = "Nora"; // not allowed because it changes the size of the 'names' array
names = "Patrick"; // will cause an error
const sqr = define(x) { return x*x; }; // function sqr can't being changed
\end{verbatim}
Sometimes it is desirable to specify many constants, perhaps to define various states in a program. For this purpose, an \texttt{enum}, or \emph{enumerate} statement exists. The state of a motor could be indicated with the constants below, instead of with numbers, which makes a program much more readable.
\begin{verbatim}
enum { STARTING, RUNNING, STOPPED, ERROR };
motor_state = STOPPED;
\end{verbatim}
Enumerations start assigning integer values to the names in the list, increasing by one. It is possible to assign a custom values to names in the following ways:
\begin{verbatim}
enum { STARTING, RUNNING, STOPPED, EFF=+20, EFF1, EFF2, ERROR1 = 100, ERROR2 };
\end{verbatim}
In this case, everything is the same as before until the EFF variable, for which the \texttt{+20} specifies that it should have a value 20 greater than the previous name in the enumeration: 22. EFF1 and EFF2 have values 23 and 24. It is also possible to jump to a known value, as with ERROR1.
The \texttt{enum} statement is simply a more convenient syntax to make to multiple \texttt{const} assignments. The enumeration above is semantically equivalent to:
\begin{verbatim}
const local STARTING = 0;
const local RUNNING = STARTING + 1;
const local STOPPED = RUNNING + 1;
const local EFF = STOPPED + 20;
const local EFF1 = EFF + 1;
const local EFF2 = EFF1 + 1;
const local ERROR1 = 100;
const local ERROR2 = ERROR1 + 1;
\end{verbatim}
The meaning of the \texttt{local} specifier will be explained in later sections.
\section{Flow Control}
\subsection{Comparison Operators}
LK supports many ways of comparing data. These types of tests can control the program flow with branching and looping constructs that we will discuss later.
There are six standard comparison operators that can be used on most types of data. For text strings, ``less than'' and ``greater than'' are with respect to alphabetical order.
\begin{table}[ht]
\begin{center}
\begin{tabular}{lc}
Comparison & Operator\\
\hline
Equal & \texttt{ == } \\
Not Equal & \texttt{ != } \\
Less Than & \texttt{ \lt } \\
Less Than or Equal & \texttt{ \lt= } \\
Greater Than & \texttt{ \gt } \\
Greater Than or Equal & \texttt{ \gt= } \\
\end{tabular}
\caption{Comparison Operators}
\label{tab_compop}
\end{center}
\end{table}
Examples of comparisons:
\begin{verbatim}
divisor != 0
state == "oregon"
error <= -0.003
"pv" > "csp"
\end{verbatim}
Single comparisons can be combined by \emph{boolean} operators into more complicated tests.
\begin{enumerate}
\item The \texttt{!} operator yields true when the test is false. It is placed before the test whose result is to be notted.\\Example: \texttt{!(divisor == 0)}
\item The \texttt{\&\&} operator yields true only if both tests are true.\\Example: \texttt{divisor != 0 \&\& dividend > 1}
\item The \texttt{||} operator yields true if either test is true.\\Example: \texttt{state == "oregon" || state == "colorado"}
\end{enumerate}
The boolean operators can be combined to make even more complex tests. The operators are listed above in order of highest precedence to lowest. If you are unsure of which test will be evaluated first, use parentheses to group tests. Note that the following statements have very different meanings.
\begin{verbatim}
state_count > 0 && state_abbrev == "CA" || state_abbrev == "OR"
state_count > 0 && (state_abbrev == "CA" || state_abbrev == "OR")
\end{verbatim}
\subsection{Branching}
Using the comparison and boolean operators to define tests, you can control whether a section of code in your script will be executed or not. Therefore, the script can make decisions depending on different circumstances and user inputs.
\subsubsection{\texttt{if} Statements}
The simplest branching construct is the \texttt{if} statement. For example:
\begin{verbatim}
if ( tilt < 0.0 )
{
outln("Error: tilt angle must be 0 or greater")
}
\end{verbatim}
Note the following characteristics of the \texttt{if} statement:
\begin{enumerate}
\item The test is placed in parentheses after the \texttt{if} keyword.
\item A curly brace \texttt{\{}indicates a new block of code statements.
\item The following program lines include the statements to execute when the \texttt{if} test succeeds.
\item To help program readability, the statements inside the \texttt{if} are usually indented.
\item The construct concludes with the \texttt{\}} curly brace to indicate the end of the block..
\item When the \texttt{if} test fails, the program statements inside the block are skipped.
\end{enumerate}
\subsubsection{\texttt{else} Construct}
When you also have commands you wish to execute when the \texttt{if} test fails, use the \texttt{else} clause. For example:
\begin{verbatim}
if ( power > 0 )
{
energy = power * time;
operating_cost = energy * energy_cost;
}
else
{
outln("Error, no power was generated.");
energy = -1;
operating_cost = -1;
}
\end{verbatim}
\subsubsection{Multiple \texttt{if} Tests}
Sometimes you wish to test many conditions in a sequence, and take appropriate action depending on which test is successful. In this situation, use the \texttt{elseif} clause. Be careful to spell it as a single word, as both \texttt{else if} and \texttt{elseif} can be syntactically correct, but have different meanings.
\begin{verbatim}
if ( angle >= 0 && angle < 90)
{
text = "first quadrant";
}
elseif ( angle >= 90 && angle < 180 )
{
text = "second quadrant";
}
elseif ( angle >= 180 && angle < 270 )
{
text = "third quadrant";
}
else
{
text = "fourth quadrant";
}
\end{verbatim}
You do not need to end a sequence of \texttt{elseif} statements with the \texttt{else} clause, although in most cases it is appropriate so that every situation can be handled. You can also nest \texttt{if} constructs if needed. Again, we recommend indenting each "level" of nesting to improve your script's readability. For example:
\begin{verbatim}
if ( angle >= 0
&& angle < 90 ) {
if ( print_value == true ) {
outln( "first quadrant: " + angle );
} else {
outln( "first quadrant" );
}
}
\end{verbatim}
Also note that because LK does not care about spaces and tabs when reading the program text, it is possible to use multiple lines for a long if statement test to make it more readable. The curly braces denoting the code block can also follow on the same line as the \texttt{if} or on the next line.
\subsubsection{Single statement \texttt{if}s}
Sometimes you only want to take a single action when an \texttt{if} statement succeeds. To reduce the amount of code you must type, LK accepts single line \texttt{if} statements that do not include the \texttt{\{} and \texttt{\}} block delimiters, as shown below.
\begin{verbatim}
if ( azimuth < 0 ) outln( "Warning: azimuth < 0, continuing..." );
if ( tilt > 90 ) tilt = 90; // set maximum tilt value
\end{verbatim}
You can also use a \texttt{elseif} and/or \texttt{else} statement on single line \texttt{if}. Like the \texttt{if}, it only accepts one program statement, and must be typed on the same program line. Example:
\begin{verbatim}
if ( value > average ) outln("Above average");
else outln("Not above average");
\end{verbatim}
\subsubsection{Inline Switch Statement}
Sometimes you need to select between multiple values based on a number. A quick way to do this in LK is to use an inline switch statement. A value out of a list of expressions is returned based on the numeric value of the test expression. Examples:
\begin{verbatim}
choice = 2;
value = ? choice [ 'aron', 'peter', 'rodger' ]; // returns rodger
a = 2.4;
b = 5.6;
operator = 1;
value = ? operator [ a+b, a-b, a*b, a/b ]; // returns -3.2
\end{verbatim}
Note that the inline switch statement is simply shorthand for writing a full \texttt{if-then-else} statement. Using the inline switch can make your code more compact when used appropriately.
\subsection{Looping}
A loop is a way of repeating the same commands over and over. You may need to process each line of a file in the same way, or sort a list of names. To achieve such tasks, LK provides two types of loop constructs, the \texttt{while} and \texttt{for} loops.
Like \texttt{if} statements, loops contain a "body" of program statements followed by a closing curly bracke \texttt{\}} to denote where the loop construct ends.
\subsubsection{\texttt{while} Loops}
The \texttt{while} loop is the simplest loop. It repeats one or more program statements as long as a logical test holds true. When the test fails, the loop ends, and the program continues execution of the statements following the loop construct. For example:
\begin{verbatim}
while ( done == false )
{
// process some data
// check if we are finished and update the 'done' variable
}
\end{verbatim}
The test in a \texttt{while} loop is checked before the body of the loop is entered for the first time. In the example above, we must set the variable \texttt{done} to \texttt{false} before the loop, because otherwise no data processing would occur. After each iteration ends, the test is checked again to determine whether to continue the loop or not.
\subsubsection{Counter-driven Loops}
Counter-driven loops are useful when you want to run a sequence of commands for a certain number of times. As an example, you may wish to display only the first 10 lines in a text file.
There are four basic parts of implementing a counter-driven loop:
\begin{enumerate}
\item Initialize a counter variable before the loop begins.
\item Test to see if the counter variable has reached a set maximum value.
\item Execute the program statements in the loop, if the counter has not reached the maximum value.
\item Increment the counter by some value.
\end{enumerate}
For example, we can implement a counter-driven loop using the \texttt{while} construct:
\begin{verbatim}
i = 0; // use i as counter variable
while (i < 10) {
outln( "value of i is " + i );
i = i + 1;
}
\end{verbatim}
\subsubsection{\texttt{for} Loops}
The \texttt{for} loop provides a streamlined way to write a counter-driven loop. It combines the counter initialization, test, and increment statements into a single line. The script below produces exactly the same effect as the \texttt{while} loop example above.
\begin{verbatim}
for ( i = 0; i < 10; i++ ) {
outln( "value of i is " + i );
}
\end{verbatim}
The three loop control statements are separated by semicolons in the \texttt{for} loop statement. The initialization statement (first) is run only once before the loop starts. The test statement (second) is run before entering an iteration of the loop body. Finally, the increment statement is run after each completed iteration, and before the test is rechecked. Note that you can use any assignment or calculation in the increment statement.
Note that the increment operator \texttt{++} is used to modify the counter variable i. It is equally valid to write \texttt{i=i+1} for the counter advancement expression. The \texttt{++} is simply shorthand for adding 1 to a number. The \texttt{--} operator similarly decrement a number by 1.
Just like the \texttt{if} statement, LK allows \texttt{for} loops that contain only one program statement in the body to be written on one line without curly braces. For example:
\begin{verbatim}
for ( val=57; val > 1; val = val / 2 ) outln("Value is " + val );
\end{verbatim}
\subsubsection{Loop Control Statements}
In some cases you may want to end a loop prematurely. Suppose under normal conditions, you would iterate 10 times, but because of some rare circumstance, you must break the loop's normal path of execution after the third iteration. To do this, use the \texttt{break} statement.
\begin{verbatim}
value = to_real( in("Enter a starting value") );
for ( i=0; i<10; i=i+1 )
{
if (value < 0.01)
{
break;
}
outln("Value is " + value );
value = value / 3.0;
}
\end{verbatim}
In another situation, you may not want to altogether break the loop, but skip the rest of program statements left in the current iteration. For example, you may be processing a list of files, but each one is only processed if it starts with a specific line. The \texttt{continue} keyword provides this functionality.
\begin{verbatim}
for ( i=0; i<file_count; i++ )
{
file_header_ok = false;
// check if whether current file has the correct header
if (!file_header_ok) continue;
// process this file
}
\end{verbatim}
The \texttt{break} and \texttt{continue} statements can be used with both \texttt{for} and \texttt{while} loops. If you have nested loops, the statements will act in relation to the nearest loop structure. In other words, a \texttt{break} statement in the body of the inner-most loop will only break the execution of the inner-most loop.
\subsection{Quitting}
LK script execution normally ends when there are no more statements to run at the end of the script. However, sometimes you may need to halt early, if the user chooses not to continue an operation.
The \texttt{exit} statement will end the script immediately. For example:
\begin{verbatim}
if ( yesno("Do you want to quit?") ) {
outln("Aborted.");
exit;
}
\end{verbatim}
The \texttt{yesno} function call displays a message box on the user's screen with yes and no buttons, showing the given message. It returns \texttt{true} if the user clicked yes, or \texttt{false} otherwise.
\section{Arrays}
Often you need to store a list of related values. For example, you may need to refer to the price of energy in different years. Or you might have a list of state names and capital cities. In LK, you can use arrays to store these types of collections of data.
\subsection{Initializing and Indexing}
An \emph{array} is simply a variable that has many values, and each value is indexed by a number. Each variable in the array is called an \emph{element} of the array, and the position of the element within the array is called the element's \emph{index}. The index of the first element in an array is always 0.
To access array elements, enclose the index number in square brackets immediately following the variable name. You do not need to declare or allocate space for the array data in advance. However, if you refer to an element at a high index number first, all of the elements up to that index are reserved and given the \texttt{null} value.
\begin{verbatim}
names[0] = "Sean";
names[1] = "Walter";
names[2] = "Pam";
names[3] = "Claire";
names[4] = "Patrick";
outln( names[3] ); // output is "Claire"
my_index = 2;
outln( names[my_index] ); // output is "Pam"
\end{verbatim}
You can also define an array using the \texttt{[ ]} initializer syntax. Simply separate each element with a comma. There is no limit to the number of elements you can list in an array initializer list.
\begin{verbatim}
names = ["Sean", "Walter", "Pam", "Claire", "Patrick"];
outln( "First: " + names[0] );
outln( "All: " + names );
\end{verbatim}
Note that calling the \texttt{typeof} function on an array variable will return "array" as the type description, not the type of the elements. This is because LK is not strict about the types of variables stored in an array, and does not require all elements to be of the same type.
\subsection{Array Length}
Sometimes you do not know in advance how many elements are in an array. This can happen if you are reading a list of numbers from a text file, storing each as an element in an array. After the all the data has been read, you can use the \texttt{\#} operator to determine how many elements the array contains.
\begin{verbatim}
count = #names;
\end{verbatim}
\subsection{Processing Arrays}
Arrays and loops naturally go together, since frequently you may want to perform the same operation on each element of an array. For example, you may want to find the total sum of an array of numbers.
\begin{verbatim}
numbers = [ 1, -3, 2.4, 9, 7, 22, -2.1, 5.8 ];
sum = 0;
for (i=0; i < #numbers; i++)
sum = sum + numbers[i];
\end{verbatim}
The important feature of this code is that it will work regardless of how many elements are in the array \texttt{numbers}.
To append a value to the end of an array, you can simply index a location past the end of the array. However, sometimes you don't have the index of the last element handy. In this case, you can just use the \texttt{+=} operator to append an item. Example:
\begin{verbatim}
alphabet = [ 'a' ];
alphabet += 'b';
alphabet += 'c';
for( i=0;i<3;i++ )
alphabet += 'd'; // append three d's
outln( alphabet ); // prints a,b,c,d,d,d
\end{verbatim}
To search for an item in an array, you can use the ``where at'' operator \texttt{?@}. This returns the index of the item if it is found. If the item doesn't exist in the array, a value of -1 is returned.
To remove an item from an array, use the \texttt{-@} operator along with the index of the item you want to remove. Examples:
\begin{verbatim}
names = [ 'aron', 'patrick', 'rodger', 'peter', 'hillary' ];
pos1 = names ?@ 'peter'; // returns 3
names -@ 1; // remove the item at index 1
pos2 = names ?@ 'patrick'; // return -1
\end{verbatim}
\subsection{Multidimensional Arrays}
As previously noted, LK is not strict with the types of elements stored in an array. Therefore, a single array element can even be another array. This allows you to define matrices with both row and column indexes, and also three (or greater) dimensional arrays.
To create a multi-dimensional array, simply separate the indices with commas between the square brackets. For example:
\begin{verbatim}
data[0][0] = 3
data[0][1] = -2
data[1][0] = 5
data[2][0] = 1
nrows = #data; // result is 4
ncols = #data[0] // result is 2
row1 = data[0]; // extract the first row
x = row1[0]; // value is 3
y = row1[1]; // value is -2
\end{verbatim}
The array initializer syntax \texttt{ [ ] } can also be used to declare arrays in multiple dimensions. This is often useful when declaring a matrix of numbers, or a list of states and associated capitals, for example.
\begin{verbatim}
matrix = [ [2,3,4],
[4,5,6],
[4,2,1] ];
vector = [ 2, 4, 5 ];
outln( matrix[0] ); // prints the first row [2,3,4]
list = [ ["oregon", "salem"],
["colorado", "denver"],
["new york", "albany"] ];
\end{verbatim}
\subsection{Managing Array Storage}
When you define an array, LK automatically allocates sufficient computer memory to store the elements. If you know in advance that your array will contain 100 elements, for example, it can be much faster to allocate the computer memory before filling the array with data. Use the \texttt{alloc} command to make space for 1 or 2 dimensional arrays. The array elements are filled in with the \texttt{null} value.
\begin{verbatim}
data = alloc(3,2); // a matrix with 3 rows and 2 columns
data[2][1] = 3;
prices = alloc( 5 ); // a simple 5 element array
\end{verbatim}
As before, you can extend the array simply by using higher indexes.
%\subsection{Multiple Advance Declarations}
%You can also declare many variables and arrays in advance using the \texttt{declare} statement. For example:
%\begin{verbatim}
%declare radiation[8760],temp[8760],matrix[3,3],i=0
%\end{verbatim}
%This statement will create the array variables \texttt{radiation} and \texttt{temp}, each with 8760 values. It will also set aside memory for the 3x3 \texttt{matrix} variable, and 'create' the variable \texttt{i} and assign it the value of zero. The \texttt{declare} statement can be a useful shortcut to creating arrays and initializing many variables in a single line. The only limitation is that you cannot define arrays of greater than two dimensions using the \texttt{declare} command.
\section{Tables}
In addition to arrays, LK provides another built-in data storage structure called a table. A table is useful when storing data associated with a specific key. Similar to how arrays are indexed by numbers, tables are indexed by text strings. The value stored with a key can be a number, text string, array, or even another table. A good example when it would be appropriate to use a table is to store a dictionary in memory, where each word is a key that has associated with it the definition of the word. In other languages, tables are sometimes called dictionaries, or hashes or hash tables.
\subsection{Initializing and Indexing}
To refer to data elements in a table, enclose the text key string in curly braces immediately following the variable name. You do not need to declare or allocate space for the table.
\begin{verbatim}
wordlen{"name"} = 4;
wordlen{"dog"} = 3;
wordlen{"simple"} = 5;
wordlen{"lk"} = 2;
outln(wordlen); // prints '{ name=4 dog=3 simple=5 lk=2 }'
outln( wordlen{"simple"} ); // prints 5
outln( typeof( wordlen{"can"} ) ); // prints 'null' (the key is not in the table)
key = "dog";
num_letters = wordlen{key}; // num_letters is 3
\end{verbatim}
Calling the \texttt{typeof} function on a table variable will return "table" as the type description. LK is not strict about the types of variables stored in a table, and does not require all of the elements to be of the same type.
You can also define a table with key=value pairs using the \texttt{ \{ \} } initializer syntax. The pairs are separated by commas, as shown below.
\begin{verbatim}
wordlen = { "antelope"=4, "dog"=3, "cat"=5, "frog"=2 };
outln( wordlen ); // prints { dog=3 frog=2 cat=5 antelope=4 }
\end{verbatim}
Note the order of the printout of the key-value pairs in the example above. Unlike an array whose elements are stored sequentially in memory, tables store data in an unordered set. As a result, once a key=value pair is added to a table, there is no specification of order of the pair relative to the other key=value pairs already in the table.
A ``lazy'' declaration syntax is also allowed, in which the keys are not put in quotes. This syntax is generally quicker to type into the computer. Example:
\begin{verbatim}
wordlen = { antelope=4, dog=3, cat=5, frog=2 };
\end{verbatim}
\subsection{Processing Tables}
It is often necessary to know how many key=value pairs are in a table, and to know what all the keys are. In many situations, you may have to perform an operation on every key=value pair in a table, but you do not know in advance what the keys are or how many there are. LK provides two operators \# and @ for this purpose.
To determine if a specific key has a value in a table, you can use the ``where at'' operator \texttt{?@}. This returns a true or false value depending on whether the table has that key in it.
To remove a key=value pair from a table, use the \texttt{-@} operator. Examples:
\begin{verbatim}
wordlen = { "antelope"=4, "dog"=3, "cat"=5, "frog"=2 };
outln("number of key,value pairs =" + #wordlen);
keys = @wordlen;
outln(keys);
wordlen -@ "frog"; // remove the frog entry
keys = @wordlen;
outln(keys);
for (i=0;i<#keys;i++)
outln("key '" + keys[i] + "'=" + wordlen{keys[i]});
has_frog = wordlen ?@ "frog"; // false
has_dog = wordlen ?@ "dog"; // true
\end{verbatim}
\subsection{Record Structures}
Many languages provide the ability to create user-defined data types that are built from the intrinsic types. For example, an address book application might have a record for each person, and each record would have fields for the person's name, email address, and phone number.
For programming simplicity, LK has a syntax shortcut for accessing fields of a structure, which in actuality are the values of keyed entries in a table. The dot operator '\texttt{.}' transforms the text following the dot into a key index, as shown below.
\begin{verbatim}
person.name = "Jane Ruth";
person.email = "[email protected]";
person.phone = "0009997777";
outln(person.name); // prints "Jane Ruth"
outln(person); // prints { name=Jane Ruth [email protected] phone=0009997777 }
outln(person{"email"}); // prints "[email protected]"
outln(@person); // prints "name,email,phone"
\end{verbatim}
You can build arrays and tables of structures. For example, a table of people indexed by their last names would make an efficient database for an address book.
\begin{verbatim}
db{"ruth"} = { "name"="Jane Ruth", "email"="[email protected]", "phone"="0009997777" };
db{"doe"} = { "name"="Joe Doe", "email"="[email protected]", "phone"="0008886666" };
// change joe's email address
db{"doe"}.email = "[email protected]";
outln( db{"ruth"}.phone );
// delete ruth from the address book
db{"ruth"} = null;
\end{verbatim}
\section{Functions}
It is usually good programming practice to split a larger program up into smaller sections, often called procedures, functions, or subroutines. A program may be easier to read and debug if it is not all thrown together, and you may have common blocks of functionality that are reused several times in the program.
A function is simply a named chunk of code that may be called from other parts of the script. It usually performs a well-defined operation on a set of variables, and it may return a computed value to the caller.
Functions can be written anywhere in a script, including inside other functions. If a function is never called by the program, it has no effect.
\subsection{Definition}
A function is defined by assigning a block of code to a variable. The variable is the name of the function, and the \texttt{define} keyword is used to create a new function.
Consider the very simple function listed below.
\begin{verbatim}
show_welcome = define()
{
outln("Thank you for choosing LK.");
outln("This text will be displayed at the start of the script.");
};
\end{verbatim}
Notable features:
\begin{enumerate}
\item A function is created using the \texttt{define} keyword, and the result is assigned to a variable.
\item The variable references the function and can be used to call it later in the code.
\item The empty parentheses after the \texttt{define} keyword indicate that this function takes no parameters.
\item A code block enclosed by \texttt{ \{ \} } follows and contains the statements that execute when the function is called.
\item A semicolon finishes the assignment statement of the function to the variable \texttt{show\_welcome}.
\end{enumerate}
To call the function from elsewhere in the code, simply write the function variable's name, followed by the parentheses.
\begin{verbatim}
// show a message to the user
show_welcome();
\end{verbatim}
\subsection{Returning a Value}
A function is generally more useful if it can return information back to the program that called it. In this example, the function will not return unless the user enters "yes" or "no" into the input dialog.
\begin{verbatim}
require_yes_or_no = define()
{
while( true )
{
answer = in("Destroy everything? Enter yes or no:");
if (answer == "yes")
return true;
if (answer == "no")
return false;
outln("That was not an acceptable response.");
}
};
// call the input function
result = require_yes_or_no(); // returns true or false
if ( !result ) {
outln("user said no, phew!");
exit;
} else {
outln("destroying everything...");
}
\end{verbatim}
The important lesson here is that the main script does not worry about the details of how the user is questioned, and only knows that it will receive a \texttt{true} or \texttt{false} response. Also, the function can be reused in different parts of the program, and each time the user will be treated in a familiar way.
\subsection{Parameters}
In most cases, a function will accept parameters when it is called. That way, the function can change its behavior, or take different inputs in calculating a result. Analogous to mathematical functions, LK functions can take arguments to compute a result that can be returned. Arguments to a function are given names and are listed between the parentheses following the \texttt{define} keyword.
For example, consider a function to determine the minimum of two numbers:
\begin{verbatim}
minimum = define(a, b) {
if (a < b) return a;
else return b;
};
// call the function
count = 129;
outln("Minimum: " + minimum( count, 77) );
\end{verbatim}
In LK, changing the value of a function's named arguments will modify the variable in the calling program. Instead of passing the actual value of a parameter \texttt{a}, a \emph{reference} to the variable in the original program is used. The reference is hidden from the user, so the variable acts just like any other variable inside the function.
Because arguments are passed by reference (as in Fortran, for example), a function can "return" more than one value. For example:
\begin{verbatim}
sumdiffmult = define(s, d, a, b) {
s = a+b;
d = a-b;
return a*b;
};
sum = -1;
diff = -1;
mult = sumdiffmult(sum, diff, 20, 7);
// will output 27, 13, and 140
outln("Sum: " + sum + " Diff: " + diff + " Mult: " + mult);
\end{verbatim}
Functions can accept an unspecified number of arguments. Every named argument must be provided by the caller, but additional arguments can be sent to a function also. A special variable is created when a function runs called \texttt{\_\_args} that is an array containing all of the provided arguments.
\begin{verbatim}
sum = define(init)
{
for( i=1;i<#__args;i++ )
init = init + __args[i];
return init;
};
outln( sum(1,2,3,4,5) ); // prints 15
outln( sum("a","b","c","d") ); // prints abcd
\end{verbatim}
\subsection{Alternate Declaration Syntax}
Functions can also be defined using the \texttt{function} keyword, as below. The syntax is simplified, and there is no semicolon required at the end because the definition does not comprise a statement in the usual sense. Functions declared in this way can be defined with parameters and can return values, as before.
\begin{verbatim}
function show_welcome(name)
{
outln("Thank you " + name + " for choosing LK.");
outln("This text will be displayed at the start of the script.");
}
\end{verbatim}
In fact, this alternate syntax has an exact translation to the \texttt{define} syntax used elsewhere in this manual. For example:
\begin{verbatim}
function show_welcome( <args> ) { <statements> }
\end{verbatim}
is exactly equivalent to
\begin{verbatim}
const show_welcome = define( <args> ) { <statements> };
\end{verbatim}
The alternate syntax described here can help clarify programs and is generally easier for people familiar with other languages, at least initially. It automatically enforces \emph{const-ness}, so that the function isn't inadvertently replaced later in the code by an assignment statement.
\subsection{First-class Functions}
Functions in LK are \emph{first-class} values, meaning that they are referenced just as any other variable, and can be passed to other functions, replaced with new bodies, or defined implicitly. Functions can also be declared within other functions, stored in arrays, or referenced by keys in a table. The \texttt{define} keyword simply returns an internal LK representation of a function, and the assignment statement assigns that 'value' to the variable name. This ties the variable name to the function, implying that when the variable name is typed as a function call, LK knows to evaluate the function to which it refers.
\begin{verbatim}
triple // variable name
= // assignment statement
define( x ) // definition of a function with argument x
{
return 3*x; // statements comprising function body
}
; // semicolon terminates the assignment statement
neg = define( y ) { return 0-y; }; // another function
\end{verbatim}
In the example below, the \texttt{meta} function is unique that it returns another function. Which function it returns depends on the argument \texttt{mode}.
\begin{verbatim}
meta = define( mode ) { // a function that returns another function
if (mode == "double")
return define(x) { return 2*x; };
else
return define(x) { return 3*x; };
};
outln( meta("triple")(12) ); // prints 36
outln( meta("double")(-23) ); // prints -46
\end{verbatim}
It is important to note that the function returned by \texttt{meta} does not retain the context in which it was created. For example, if the body of the implicit function returned by \texttt{meta} referenced the \texttt{mode} argument in its calculations, running the returned function would result in an error because the \texttt{mode} argument would no longer be present in the current \emph{environment}.
\subsection{Built-in Functions}
Throughout this guide, we have made use of built-in functions like \texttt{in}, \texttt{outln}, and others. These functions are included from the LK standard library automatically, and called in exactly the same way as user functions. Like user functions, they can return values, and sometimes they modify the arguments sent to them. Refer to the ``Standard Library Reference'' at the end of this guide for documentation on each function's capabilities, parameters, and return values. When LK is embedded in other programs, additional functions may become available that are specific to the program, and are usually documented by the program separately.
\section{Environments}
This section describes how LK stores information about data and functions. The rules for variable access are simple but important to understand for writing script code that works as expected in all situations.
During the execution of a script, all variables (and thus functions) are stored in a special lookup table called the \emph{environment}. The environment stores a reference from the variable name to the actual data or function represented by the variable, and allows new variables to be created and queried. When a script starts executing, the environment is empty, and as variables are assigned values, they are added to the environment. When an assignment statement changes the value of an existing variable, the old value is discarded and replaced with the new value.
\subsection{Variable Lookup Mechanism}
When a function is called, a new environment is created to store variables \emph{local} to the function. The new environment retains a reference to its \emph{parent environment}, which provides the mechanism by which variables and functions assigned outside a function can be accessed within it. LK follows these lookup rules:
\begin{enumerate}
\item The current environment is checked for the existence of the requested variable.
\item If it exists, the query is over and the variable has been found.
\item If the variable is not found, and the variable is referenced in a read-only or non-mutable context (i.e. right-hand side of an assignment statement), LK checks all of the \emph{parent environments} of the current environment to see if the variable exists in the environment hierarchy. If so, the variable has been found and the query is over. Otherwise, an error is thrown indicating an invalid access to an undefined variable.
\item If the variable is referenced in a mutable context (i.e. left-hand side of an assignment statement), first the global (top level) environment is searched for the variable name. If it exists already, and has been marked as a global variable upon its creation, the query is over and the global variable is used. Otherwise, a new variable is created. If the \texttt{global} keyword is used, the variable entry is created in the global environment and marked as global, and if not, it is created in the current function's local environment.
\end{enumerate}
In the script below, the variable \texttt{y} is not defined in any environment when it is first referenced in the body of the \texttt{triple} function. As a result, a new environment entry is created in the \texttt{triple} environment to hold its value, according to the lookup rules listed above.
\begin{verbatim}
triple = define (x) { y=3*x; return y; };
triple( 4 );
outln( y ); // this will fail because y is local to the triple function
\end{verbatim}
In script below, the behavior is the same. The query for variable in \texttt{y} in the call to \texttt{triple( 4 )} will look in the environment created for the function call to \texttt{triple}, and, not finding \texttt{y}, will create a new entry named \texttt{y} because it is accessed in a mutable context. Since \texttt{y} is now local to the function call, the instance in the caller's context value will not be overwritten with the expression \texttt{3*x}, which in this context has value of 12.
\begin{verbatim}
y = 24;
// ... other code ...
triple = define (x) { y=3*x; return y; };
triple( 4 );
outln( y ); // this will print 24
\end{verbatim}
\subsection{Global Values}
As we have seen, we can write useful functions using arguments and return values to pass data into and out of functions. However, sometimes there are some many inputs to a function that it becomes very cumbersome to list them all as arguments. Alternatively, you might have some variables that are used throughout your program, or are considered reference values or constants. Because of the hierarchical environment query rules, variables declared in the \emph{main} body of the script can be treated as global values. To make a global variable that can be changed from inside a function, use the \texttt{global} keyword when first assigning it in the main script. Example:
\begin{verbatim}
global counter1 = 0;
counter2 = 0;
function increment( step ) {
counter1 += step;
counter2 += step;
}
increment( 1 );
increment( 2 );
// counter1 now has value 3, counter2 is still zero
\end{verbatim}
Common programming advice is to minimize the number of globally writable variables used in a program. Sometimes they are certainly necessary, but too many can lead to mistakes that are harder to debug and correct, and can reduce the readability and maintainability of your script.
\section{Organizing Programs}
When you write a long program, it can become difficult to organize it in a single file. This section describes some ways in which LK scripts can be organized into manageable chunks of code.
\subsection{Importing Scripts}
LK lets you split a large program into manageable chunks so that you may call functions contained in other scripts. Another script can be imported using the \texttt{import} command.
\begin{verbatim}
<--- file: c:\scripts\functions.lk --->
const boltzmann = define() { return 1.3806488e-23; };
<--- file: c:\scripts\main.lk --->
import "c:/scripts/functions.lk";
outln( boltzmann() + ' J/K' );
\end{verbatim}
In this example, the main program imports another script file that contains one or more functions that can be called directly.
\subsection{Object-oriented Programming}
Using functions and tables, it is possible to create objects in LK that encapsulate functionality in a reusable unit. LK does not explicitly support classes and inheritance like true object-oriented languages like C++ and Java, but clever use of the facilities described thus far in this manual can go a long way in providing the user with powerful capabilities.
An object can be defined in LK by writing a function that acts like a \emph{constructor} to create a table with various fields. Some of the fields may be data, but others may actually be functions that operate on the data fields of the table. This is possible because functions are \emph{first-class} values.
Note the use of a special variable called \texttt{this} in functions defined as fields in the table (\texttt{area}, \texttt{perim}, and \texttt{scale\_side}). \texttt{this} is created and assigned automatically if the function is invoked using the \texttt{-\gt} operator, discussed below.
\begin{verbatim}
make_square = define(side)
{
local obj.label = "Square";
obj.side = side;
obj.area = define() { return this.side*this.side; };
obj.perim = define() { return 4*this.side; };
obj.scale_side = define(factor)
{
this.side = this.side * factor;
return this.side;
};
return obj;
};
sq[0] = make_square(2);
sq[1] = make_square(4);
sq[2] = make_square(5);
outln( sq[0].label ); // prints "Square"
outln( sq[1].perim ); // prints <function>
outln( sq[1].side ); // prints 4
for (i=0;i<#sq;i++)
outln("sq " + i + " area: " + sq[i]->area() + " perim: " + sq[i]->perim());
outln("new side: " + sq[0]->scale_side(3));
outln("area: " + sq[0]->area());
outln("new side: " + sq[0]->scale_side(1/3));
outln("area: " + sq[0]->area());
\end{verbatim}
In this example, the \texttt{make\_square} function works like a \emph{constructor} to create a new object of type square, implemented internally as a table. The \texttt{area} and \texttt{perim} functions defined within the constructor both take a parameter called \texttt{this}, which is a reference to an object created with \texttt{make\_square}. Once all of the fields and methods of the square object are defined, the object is returned. When we say object, in fact we are simply referring internally to a table, whose fields are the members.
We then create two instances of a square as two elements in the array \emph{sq}. In the \texttt{for} loop that prints information about each square in the array, the member functions are called using the \texttt{-\gt} operator. The \texttt{-\gt} is the \emph{thiscall} operator. It implicitly passes the left hand side of the operator to the function, in a special local variable called \texttt{this}. For example, in the \texttt{scale\_side} method, the variable \texttt{this} variable is used to access the data members of the object.
\section{Input, Output, and System Access}
LK provides a variety of standard library functions to work with files, directories, and interact with other programs. So far, we have used the \texttt{in} and \texttt{outln} functions to accept user input and display program output in the runtime console window. Now we will learn about accessing files and other programs. This section describes only a small part of the large standard library of functions included in LK. Refer to the ``Standard Library Reference'' at the end of this guide for documentation on each function's capabilities, parameters, and return values.
\subsection{Working with Text Files}
To write data to a text file, use the \texttt{write\_text\_file} function. \texttt{write\_text\_file} accepts any type of variable, but most frequently you will write text stored in a string variable. For example:
\begin{verbatim}
data = "";
for (i=0;i<10;i=i+1) data = data + "Text Data Line " + to_string(i) + "\n";
ok = write_text_file( "C:/test.txt", data );
if (!ok) outln("Error writing text file.");
\end{verbatim}
Reading a text file is just as simple with the \texttt{readtextfile} function. If the file is empty or cannot be read, an empty text string is returned.
\begin{verbatim}
mytext = read_text_file("C:/test.txt");
out(mytext);
\end{verbatim}
While these functions offer an easy way to read an entire text file, often it is useful to be able to access it line by line. The \texttt{open}, \texttt{close}, and \texttt{readln} functions are for this purpose.
\begin{verbatim}
file = open("c:/test.txt", "r");
if (!file) {
outln("could not open file");
exit;
}
line="";
while ( read_line( file, line ) ) {
outln( "My Text Line='" + line + "'" );
}
close(file)
\end{verbatim}
In the example above, \texttt{file} is a number that represents the file on the disk. The \texttt{open} function opens the specified file for reading when the \texttt{"r"} parameter is given. The \texttt{read\_line} function will return \texttt{true} as long as there are more lines to be read from the file, and the text of each line is placed in the \texttt{line} variable.
Another way to access individual lines of a text file uses the \texttt{split} function to return an array of text lines. For example:
\begin{verbatim}
lines = split( read_text_file( "C:/test.txt" ), "\n" );
outln("There are " + #lines + " lines of text in the file.");
if (#lines > 5) outln("Line 5: '", lines[5], "'");
\end{verbatim}
\subsection{File System Functions}
Suppose you need to process many different files, and consequently need a list of all the files in a folder that have a specific extension. LK provides the \texttt{dir\_list} function to help out in this situation. If you want to filter for multiple file extensions, separate them with commas.
\begin{verbatim}
list = dir_list( "C:/Windows", "dll" ); // could also use "txt,dll"
outln("Found " + #list + " files that match.");
outln(join(list, "\n"));
\end{verbatim}
To list all the files in the given folder, leave the extension string empty or pass \texttt{"*"}.
Sometimes you need to be able to quickly extract the file name from the full path, or vice versa. The functions \texttt{path\_only}, \texttt{file\_only}, \texttt{ext\_only} extract the respective sections of a file name, returning the result.
To test whether a file or directory exist, use the \texttt{dir\_exists} or \texttt{file\_exists} functions. Examples:
\begin{verbatim}
path = "C:/SAM/2010.11.9/samsim.dll";
dir = path_only( path );
name = file_only( path );
outln( "Path: " + path );
outln( "Extension: " + ext_only(path));
outln( "Name: " + name + " Exists? " + file_exists(path) );
outln( "Dir: " + dir + " Exists? " + dir_exists(dir));
\end{verbatim}
%\subsection{Standard Dialogs}
%To facilitate writing more interactive scripts, LK includes various dialog functions. We have already used the \texttt{notice} and \texttt{yesno} functions in previous examples.
%The \texttt{choose\_file} function pops up a file selection dialog to the user, prompting them to select a file. \texttt{choose\_file} will accept three optional parameters: the path of the initial directory to show in the dialog, a wildcard filter like \texttt{"Text Files (*.txt)"} to limit the types of files shown in the list, and a dialog caption to display on the window. Example:
%\begin{verbatim}
%file = choose_file("c:/SAM", "Choose a DLL file", "Dynamic Link Libraries (*.dll)" );
%if (file == "") {
% notice("You did not choose a file, quitting.");
% exit;
%} else {
% if ( ! yesno("Do you want to load:\n\n" + file)) exit;
%
% // proceed to load .dll file
% outln("Loading " + file);
%}
%\end{verbatim}
\subsection{Calling Other Programs}
Suppose you have a program on your computer that reads an input file, makes some complicated calculations, and writes an output file. The \texttt{system} function executes an external program, and returns the integer result code, after waiting for the called program to finish. Examples:
\begin{verbatim}
system("notepad.exe"); // run notepad and wait
\end{verbatim}
Each program runs in a folder that the program refers to as the \emph{working directory}. Sometimes you may need to switch the working directory to conveniently access other files, or to allow an external program to run correctly. The \texttt{cwd} function either gets or sets the current working directory.
\begin{verbatim}
working_dir = cwd(); // get the current working directory
cwd( "/usr/local/bin" ); // change the working directory
outln("cwd=" + cwd() );
cwd(working_dir); // change it back to the original one
outln("cwd=" + cwd() );
\end{verbatim}
\section{Writing Extensions}
LK provides an application programming interface (API) for writing libraries of functions that can be loaded at runtime into the scripting engine. Dynamically loaded libraries can be used to provide very high performance computations, ability to create linkages to other software packages, and to integrate existing codes written in other languages. The C language extension API is fully defined in the header file \texttt{lk\_invoke.h}, and does not require the extension to be linked to any other LK library or otherwise. As a result, programming, compiling, and linking a standalone LK extension is very straightforward.
\emph{Note:} Currently, the LK extension API does not support passing Unicode text back and forth between an extension and the core LK engine, even if the core engine is compiled utilizing a Unicode string class.
\subsection{Basic Organization}
The examples in this manual show how to construct an LK extension in C or C++. First, the \texttt{lk\_invoke.h} header file must be included in the C source file. This defines several functions and macros to assist writing functions that can be dynamically loaded by LK. In the very simple example extension below, a single function is defined that returns a constant value, the speed of light.
\begin{verbatim}
#include <lk_invoke.h>
LK_FUNCTION( speed_of_light )
{
LK_DOCUMENT( "speed_of_light", "Returns the speed of light in (m/s).", "(none):number" );
lk_return_number( 299792458 );
}
LK_BEGIN_EXTENSION()
speed_of_light
LK_END_EXTENSION()
\end{verbatim}
To define a new function, use the \texttt{LK\_FUNCTION( name )} macro, called with the name of the function. This macro expands to the C code \texttt{void name( struct \_\_lk\_invoke\_t *lk )}.
The first line of any extension function must be the documentation specifier, defined by the macro \texttt{LK\_DOCUMENT( name, description, signature )}. This macro takes three arguments: the name of the function by which LK will refer to it, a text description of what it does, and a \emph{signature} that shows the proper parameters and return value. For functions that may take multiple forms (i.e. different behavior and signatures), the \texttt{LK\_DOCUMENT2(...)} and \texttt{LK\_DOCUMENT3(...)} macros exist. These macros take multiple descriptions and signatures, and are fully explained in the \texttt{lk\_invoke.h} header file.
To make the functions visible to the LK engine, they must be listed between the \texttt{LK\_BEGIN\_EXTENSION()} and \texttt{LK\_END\_EXTENSION()} macros. Otherwise, the functions will not be properly exported from the dynamic library. These macros may only appear once within the source code of an extension library.
\subsection{Working with Variables}
The extension API defines several macros for easily working with function arguments and return values. A variable is referenced by the opaque pointer type \texttt{lk\_var\_t}, and is passed as an argument to the various functions to retrieve and set values. The \texttt{sum} example below shows how to work with function arguments and return values explicitly, as well as how to signal an error if necessary.
\begin{verbatim}
LK_FUNCTION( sum )
{
LK_DOCUMENT( "sum", "Sums up all the parameters passed.", "(...):number" );
double val = 0;
int i;
int count = lk_arg_count();
if (count < 1)
{
lk_error("sum() must be provided more than zero arguments");
return;
}
for (i=0;i<count;i++)
{
lk_var_t x = lk_arg( i );
val = val + lk_as_number( x );
}
lk_var_t ret = lk_result();
lk_set_number( ret, val );
}
\end{verbatim}
The extension API also provides facilities for working with arrays and tables. In the example below, the array passed in is converted to a table with named indices.
\begin{verbatim}
LK_FUNCTION( tabulate )
{
LK_DOCUMENT( "tabulate", "Converts an array to a table.", "(array):table");
lk_var_t arr, ret;
int len, i;
char key[64];
if (lk_arg_count() != 1
|| lk_type( lk_arg(0) ) != LK_ARRAY)
{
lk_error("tabulate() requires one array parameter");
return;
}
ret = lk_result();
lk_make_table(ret);
arr = lk_arg(0);
len = lk_length( arr );
for (i=0;i<len;i++)
{
sprintf(key, "item%d", i);
lk_var_t item = lk_index(arr, i);
int ty = lk_type(item);
switch(ty)
{
case LK_NUMBER:
lk_table_set_number( ret, key, lk_as_number( item ) );
break;
case LK_STRING:
lk_table_set_string( ret, key, lk_as_string( item ) );
break;
case LK_ARRAY:
case LK_TABLE:
lk_error("arrays and tables not currently copied. exercise for the user!");
return;
}
}
}
\end{verbatim}
Running the following LK script code after loading a compiled extension with the \texttt{tabulate()} function above shows that the array has been converted to a table with items \texttt{item0}, \texttt{item1}, and so on.
\begin{verbatim}
arr = [ 1, 5, 'h3llo', '213', 451.4, -1 ];
x = tabulate(arr);
outln(x);
\end{verbatim}
Output:
\begin{verbatim}
{ item2=h3llo item3=213 item0=1 item1=5 item4=451.4 item5=-1 }
\end{verbatim}
For additional variable manipulation macros, refer to the \texttt{lk\_invoke.h} header file. Additional advanced facilities not documented in this manual are also available for implementing callback-type functions and object referencing.
\subsection{Compiling and Loading}
Using the MinGW compiler toolchain on Windows, it is very straightforward to create an LK extension library. Simply issue the command below at the Windows command prompt (\texttt{cmd.exe} from Start/Run). These instructions assume that you are using a 32-bit version of \texttt{gcc} and the LK engine that you are loading the module from is also 32-bit.
\begin{verbatim}
c:\> gcc -shared -o testextension.dll testextension.c
\end{verbatim}
To load this extension at runtime into a LK script, use the standard library function \texttt{load\_extension()}, per below. If the extension loads successfully, any of the functions defined and exported in the extension can be invoked just like any other built-in or user-defined LK function.
\begin{verbatim}
ok = load_extension( "c:/testextension.dll" );
if (!ok) {
// failed to load extension
exit;
}
c = speed_of_light();
\end{verbatim}
The dynamic extension library will be unloaded when the script finishes.
\section{Standard Library Reference}
The standard library documented here outlines built-in functions that are likely to be available in any implementation of the LK scripting language. The input/output functions referenced in this manual (\texttt{in(), out(), outln()}) are not part of the standard library because the LK host environment defines how these functions are manifested. It is assumed, however, that most host environments will include these ``standard'' I/O functions as well.
\input{lk_basic.tex}
\input{lk_sysio.tex}
\input{lk_str.tex}
\input{lk_math.tex}
\end{document} |
import numpy as np
import cv2.cv2 as cv2
# Read the query image as query_img
# and train image This query image
# is what you need to find in train image
# Save it in the same directory
# with the name image.jpg
template = cv2.imread('score_overlay_2021_1280.png')
target = cv2.imread('2021/frame-00570.jpg')
# Convert it to grayscale
# template_bw = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
# target_bw = cv2.cvtColor(target, cv2.COLOR_BGR2GRAY)
# Initialize the ORB detector algorithm
algo = cv2.ORB_create()
# Now detect the keypoints and compute
# the descriptors for the query image
# and train image
template_kp, template_des = algo.detectAndCompute(template, None)
target_kp, target_des = algo.detectAndCompute(target, None)
# Initialize the Matcher for matching
# the keypoints and then match the
# keypoints
matcher = cv2.BFMatcher()
matches = matcher.match(template_des, target_des)
# matches = matcher.knnMatch(template_des, target_des, k=2)
# Apply ratio test
# good = []
# for m,n in matches:
# if m.distance < 0.75*n.distance:
# good.append([m])
# cv.drawMatchesKnn expects list of lists as matches.
# final_img = cv2.drawMatchesKnn(template,template_kp,target,target_kp,good,None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
# Show the final image
# cv2.imshow("Matches", final_img)
# cv2.waitKey()
# FLANN_INDEX_KDTREE = 0
# index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
# search_params = dict(checks=50)
# matcher = cv2.FlannBasedMatcher(index_params, search_params)
# matches = matcher.knnMatch(template_kp, target_kp, k=2)
# # Apply ratio test
# good = []
# for m,n in matches:
# if m.distance < 0.75*n.distance:
# good.append([m])
#
# # cv.drawMatchesKnn expects list of lists as matches.
# final_img = cv2.drawMatchesKnn(template, template_kp,
# target, target_kp, good, None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
#
# # Show the final image
# cv2.imshow("Matches", final_img)
# cv2.waitKey()
# draw the matches to the final image
# containing both the images the drawMatches()
# function takes both images and keypoints
# and outputs the matched query image with
# its train image
final_img = cv2.drawMatches(template, template_kp,
target, target_kp, matches[:20], None)
#
final_img = cv2.resize(final_img, (1000, 650))
#
# Show the final image
cv2.imshow("Matches", final_img)
cv2.waitKey() |
import data.fintype.basic
import data.real.basic
@[simp]
theorem floor_add_one_real
(x : ℝ)
(az : ℤ)
{ar : ℝ}
{h : coe az = ar}
: floor (x + (ar : ℝ)) = (floor x) + az
:= begin
rw h.symm,
exact floor_add_int x az,
end
theorem floor_eq_iff'
{T : Type*} [linear_ordered_ring T] [floor_ring T]
(r : T)
(ru : r < 1)
(rl : 0 ≤ r)
: floor r = 0
:= begin
have h1 : ((0 : ℤ) : T) ≤ r ∧ r < (0 + 1), by {refine ⟨rl, _⟩, finish},
exact (floor_eq_iff.mpr h1),
end
noncomputable def mod_two (x : ℝ) : ℝ := x - 2 * (floor (x / 2))
@[simp]
theorem mod_shift_even_eq_mod
(x : ℝ)
(k : ℤ)
: mod_two (x+2*k) = mod_two x
:= begin
unfold mod_two,
norm_cast,
calc x + ↑(2*k) - ↑(2 * ⌊(x + ↑(2*k)) / 2⌋)
= x + ↑(2*k) - ↑(2 * ⌊x / 2 + (↑(2*k) / 2)⌋) : by rw add_div x ↑(2*k) 2
... = x + ↑(2*k) - ↑(2 * ⌊x / 2 + ((2*↑k) / 2)⌋) : by norm_num
... = x + ↑(2*k) - ↑(2 * ⌊x / 2 + ((↑k*2) / 2)⌋) : by rw mul_comm 2 (↑k : ℝ)
... = x + ↑(2*k) - ↑(2 * ⌊x / 2 + (↑k : ℝ)⌋) : by {
rw div_eq_mul_inv ((↑k*2) : ℝ) 2,
rw mul_assoc (↑k : ℝ) 2 2⁻¹,
have inverse_cancel : 2 * (2 : ℝ)⁻¹ = 1 := half_add_self 1,
rw inverse_cancel,
rw mul_one (↑k : ℝ),
}
... = x + ↑(2*k) - ↑(2 * (⌊x / 2⌋ + k)) : by rw @floor_add_one_real (x/2) k _ rfl
... = x + ↑(2*k) - ↑((2 * ⌊x / 2⌋) + (2*k : ℤ)) : by rw mul_add 2 ⌊(x / 2)⌋ k
... = x - ↑(2 * ⌊x / 2⌋) : by { rw int.cast_add (2 * ⌊x / 2⌋) (2 * k), ring, }
end
@[simp]
theorem mod_plus_two_eq_id
(x : ℝ)
: mod_two (x+2) = mod_two x
:= begin
have hhh : mod_two (x+2) = mod_two (x + 2*(1 : ℤ)) := by { refine congr rfl _, finish, },
rw hhh,
exact mod_shift_even_eq_mod x (1 : ℤ),
end
@[simp]
theorem bounding_id
(a : ℝ)
(au : a < 2)
(al : 0 ≤ a)
: mod_two a = a
:= begin
calc a - 2 * ↑⌊a / 2⌋
= a - 2 * ↑(0) : by {
have hu : (a / 2) < 1, by {refine (div_lt_one _).mpr au, exact zero_lt_two},
have hl : 0 ≤ (a / 2), by {refine div_nonneg al _, exact zero_le_two},
rw floor_eq_iff' (a/2) hu hl,
exact rfl,
}
... = a - 2 * 0 : rfl
... = a : by linarith,
end
theorem mod_two_le_2
(x : ℝ)
: mod_two x < 2
:= begin
calc x - 2 * ↑⌊x / 2⌋
< x - 2 * ((x / 2) - 1 ) : by { norm_num, exact sub_one_lt_floor (x / 2), }
... = 2 : by linarith,
end
theorem mod_two_geq_0
(x : ℝ)
: mod_two x ≥ 0
:= begin
calc x - 2 * ↑⌊x / 2⌋
≥ x - 2 * (x / 2) : by { norm_num, exact floor_le (x / 2), }
... = 0 : by linarith
end
theorem mod_two_idempotent
(x : ℝ)
: mod_two (mod_two x) = mod_two x
:= bounding_id (mod_two x) (mod_two_le_2 x) (mod_two_geq_0 x)
theorem mod_two_addition
(a b : ℝ)
: mod_two ((mod_two a) + b) = mod_two (a+b)
:= begin
unfold mod_two,
calc a - 2 * ↑⌊a / 2⌋ + b - 2 * ↑⌊(a - 2 * ↑⌊a / 2⌋ + b) / 2⌋
= a - 2 * ↑⌊a / 2⌋ + b - 2 * ↑⌊(a + b - 2 * ↑⌊a / 2⌋) / 2⌋ : by rw sub_add_eq_add_sub a (2 * ↑⌊a / 2⌋) b
... = a - 2 * ↑⌊a / 2⌋ + b - 2 * ↑⌊(a + b)/2 - (2 * ↑⌊a / 2⌋) / 2⌋ : by rw sub_div (a + b) (2 * ↑⌊a / 2⌋) 2
... = a - 2 * ↑⌊a / 2⌋ + b - 2 * ↑⌊(a + b)/2 - ↑⌊a / 2⌋⌋ : by {
rw mul_comm (2:ℝ) (↑⌊a / 2⌋),
have hhh : (∀(x y : ℝ), y ≠ 0 → (x*y)/y = x) := mul_div_cancel,
rw hhh (↑⌊a / 2⌋) 2 two_ne_zero,
}
... = a - 2 * ↑⌊a / 2⌋ + b - 2 * ↑(⌊(a + b)/2⌋ - ⌊a / 2⌋) : by rw floor_sub_int ((a + b)/2) ⌊a / 2⌋
... = a - 2 * ↑⌊a / 2⌋ + b - 2 * (↑(⌊(a + b)/2⌋) - ↑⌊a / 2⌋) : by rw int.cast_sub ⌊(a + b)/2⌋ ⌊a / 2⌋
... = a + b - 2 * ↑⌊(a + b) / 2⌋ : by linarith
end
theorem mod_two_subtraction
(a b : ℝ)
: mod_two ((mod_two a) - b) = mod_two (a-b)
:= begin
rw sub_eq_add_neg a b,
rw ←mod_two_addition,
rw ←sub_eq_add_neg (mod_two a) b,
end
theorem mod_two_xsub_one_eq_mod_two_x_plus_one
(x : ℝ)
(h : mod_two x < 1)
: mod_two (x - 1) = (mod_two x) + 1
:= begin
calc mod_two (x - 1)
= mod_two (mod_two x - 1) : by rw ←mod_two_subtraction x 1
... = mod_two (mod_two x - 1 + 2) : by {rw mod_plus_two_eq_id (mod_two x - 1), }
... = mod_two (mod_two x + 1) : by { ring, }
... = mod_two x + 1 : begin
refine bounding_id (mod_two x + 1) _ _,
linarith,
linarith [mod_two_geq_0 x],
end
end
theorem abs_of_mod_two_sub_mod_two_of_sub_one_eq_one
(x : ℝ)
: abs (mod_two x - mod_two (x - 1)) = 1
:= begin
cases decidable.em (mod_two x ≥ 1) with mod_two_x_ge_1 ne_mod_two_x_ge_1, by {
rw ←mod_two_subtraction x 1,
have in_bounded_u : (mod_two x) - 1 < 2 := begin
have := mod_two_le_2 x,
linarith,
end,
have in_bounded_l : 0 ≤ (mod_two x) - 1, by linarith,
rw bounding_id (mod_two x - 1) in_bounded_u in_bounded_l,
calc abs (mod_two x - (mod_two x - 1))
= abs 1 : by { refine congr rfl _, linarith, }
... = 1 : abs_one,
},
{
have mod_x_le_1: mod_two x < 1 := not_le.mp ne_mod_two_x_ge_1,
rw mod_two_xsub_one_eq_mod_two_x_plus_one x mod_x_le_1,
calc abs (mod_two x - (mod_two x + 1))
= abs (-1) : by { refine congr rfl _, linarith, }
... = abs 1 : abs_neg 1
... = 1 : abs_one,
}
end
theorem bounding_mod_two
(a : ℝ)
(au : a < 2)
(al : 0 ≤ a)
(ha : mod_two a < 1)
: a < 1
:= begin
rw (bounding_id a au al) at ha,
exact ha,
end
|
#include <gsl/gsl_errno.h>
#include <gsl/block/gsl_block.h>
#define BASE_DOUBLE
#include <gsl/templates_on.h>
#include <gsl/block/block_source.c>
#include <gsl/templates_off.h>
#undef BASE_DOUBLE
|
def divisor (n m : Nat) : Prop := ∃ c, n = m*c
def even (n : Nat) : Prop := ∃ d, n=2*d
infixl:40 " | " => divisor
example : (a | b*c) → a | b ∧ a | c := by
intro h
apply Exists.elim h
intro x hx
apply And.intro <;> rw [Nat.mul_assoc] at hx
exact Exists.intro (c*x) hx
rw [Nat.mul_left_comm] at hx
exact Exists.intro (b*x) hx
def rel_prime (n m : Nat) : Prop := (¬ n = 1 ∧ ¬ m = 1) ∧ ∀ c, (n | c) ∧ (m | c) → c = 1
example (a b : Nat) : even a → rel_prime a b → ¬ even b := by
intro h1 h2 h3
apply Exists.elim h1
intro x hx
apply Exists.elim h3
intro y hy
have ab_even : a | 2 ∧ b | 2 := And.intro (Exists.intro x hx) (Exists.intro y hy)
have : 2 = 1 := (h2.right 2) ab_even
contradiction
theorem rel_prime_not_eq (n m : Nat) : rel_prime n m → ¬ n = m := by
intro h1 h2
have h4 : n | m ∧ m | m → m = 1 := h1.right m
apply h1.left.right
apply h4
apply And.intro
rw [h2]
apply Exists.intro 1
exact Eq.symm (Nat.mul_one m)
apply Exists.intro 1
exact Eq.symm (Nat.mul_one m) |
------------------------------------------------------------------------
-- A parametrised coinductive definition that can be used to define
-- strong and weak bisimilarity as well as expansion
------------------------------------------------------------------------
{-# OPTIONS --sized-types #-}
open import Prelude
open import Labelled-transition-system
module Bisimilarity.General
{ℓ}
(lts : LTS ℓ)
(open LTS lts)
(_[_]↝₁_ _[_]↝₂_ : Proc → Label → Proc → Type ℓ)
(⟶→↝₁ : ∀ {p μ q} → p [ μ ]⟶ q → p [ μ ]↝₁ q)
(⟶→↝₂ : ∀ {p μ q} → p [ μ ]⟶ q → p [ μ ]↝₂ q)
where
open import Equality.Propositional as Eq hiding (Extensionality)
open import Logical-equivalence using (_⇔_)
open import Prelude.Size
open import Bijection equality-with-J as Bijection using (_↔_)
open import Function-universe equality-with-J hiding (id; _∘_)
open import H-level equality-with-J
open import H-level.Closure equality-with-J
open import Bisimilarity.Step lts _[_]↝₁_ _[_]↝₂_ as Step public
using (StepC)
open import Indexed-container hiding (⟨_⟩; Bisimilarity)
open import Indexed-container.Combinators hiding (id; _∘_)
open import Relation
import Similarity.Step lts _[_]↝₁_ as Step₁
import Similarity.Step lts _[_]↝₂_ as Step₂
open Indexed-container public using (force)
------------------------------------------------------------------------
-- Bisimilarity
-- Bisimilarity. Note that this definition is small.
infix 4 _∼_ _∼′_ [_]_∼_ [_]_∼′_
Bisimilarity : Size → Rel₂ ℓ Proc
Bisimilarity = ν StepC
Bisimilarity′ : Size → Rel₂ ℓ Proc
Bisimilarity′ = ν′ StepC
[_]_∼_ : Size → Proc → Proc → Type ℓ
[_]_∼_ i = curry (Bisimilarity i)
[_]_∼′_ : Size → Proc → Proc → Type ℓ
[_]_∼′_ i = curry (Bisimilarity′ i)
_∼_ : Proc → Proc → Type ℓ
_∼_ = [ ∞ ]_∼_
_∼′_ : Proc → Proc → Type ℓ
_∼′_ = [ ∞ ]_∼′_
-- Bisimilarity is reflexive.
mutual
reflexive-∼ : ∀ {p i} → [ i ] p ∼ p
reflexive-∼ =
StepC.⟨ (λ p⟶p′ → _ , ⟶→↝₁ p⟶p′ , reflexive-∼′)
, (λ q⟶q′ → _ , ⟶→↝₂ q⟶q′ , reflexive-∼′)
⟩
reflexive-∼′ : ∀ {p i} → [ i ] p ∼′ p
force reflexive-∼′ = reflexive-∼
≡⇒∼ : ∀ {p q} → p ≡ q → p ∼ q
≡⇒∼ refl = reflexive-∼
-- Functions that can be used to aid the instance resolution
-- mechanism.
infix -2 ∼:_ ∼′:_
∼:_ : ∀ {i p q} → [ i ] p ∼ q → [ i ] p ∼ q
∼:_ = id
∼′:_ : ∀ {i p q} → [ i ] p ∼′ q → [ i ] p ∼′ q
∼′:_ = id
------------------------------------------------------------------------
-- Bisimilarity for bisimilarity
-- Bisimilarity of bisimilarity proofs.
infix 4 [_]_≡_ [_]_≡′_
[_]_≡_ : ∀ {p q} → Size → (_ _ : ν StepC ∞ (p , q)) → Type ℓ
[_]_≡_ i = curry (ν-bisimilar i)
[_]_≡′_ : ∀ {p q} → Size → (_ _ : ν′ StepC ∞ (p , q)) → Type ℓ
[_]_≡′_ i = curry (ν′-bisimilar i)
-- An alternative characterisation of bisimilarity of bisimilarity
-- proofs.
[]≡↔ :
Eq.Extensionality ℓ ℓ →
∀ {p q} {i : Size} (p∼q₁ p∼q₂ : ν StepC ∞ (p , q)) →
[ i ] p∼q₁ ≡ p∼q₂
↔
(∀ {p′ μ} (p⟶p′ : p [ μ ]⟶ p′) →
let q′₁ , q⟶q′₁ , p′∼q′₁ = StepC.left-to-right p∼q₁ p⟶p′
q′₂ , q⟶q′₂ , p′∼q′₂ = StepC.left-to-right p∼q₂ p⟶p′
in ∃ λ (q′₁≡q′₂ : q′₁ ≡ q′₂) →
subst (q [ μ ]↝₁_) q′₁≡q′₂ q⟶q′₁ ≡ q⟶q′₂
×
[ i ] subst (ν′ StepC ∞ ∘ (p′ ,_)) q′₁≡q′₂ p′∼q′₁ ≡′ p′∼q′₂)
×
(∀ {q′ μ} (q⟶q′ : q [ μ ]⟶ q′) →
let p′₁ , p⟶p′₁ , p′∼q′₁ = StepC.right-to-left p∼q₁ q⟶q′
p′₂ , p⟶p′₂ , p′∼q′₂ = StepC.right-to-left p∼q₂ q⟶q′
in ∃ λ (p′₁≡p′₂ : p′₁ ≡ p′₂) →
subst (p [ μ ]↝₂_) p′₁≡p′₂ p⟶p′₁ ≡ p⟶p′₂
×
[ i ] subst (ν′ StepC ∞ ∘ (_, q′)) p′₁≡p′₂ p′∼q′₁ ≡′ p′∼q′₂)
[]≡↔ ext {p} {q} {i} p∼q₁@(s₁ , f₁) p∼q₂@(s₂ , f₂) =
[ i ] p∼q₁ ≡ p∼q₂ ↝⟨ ν-bisimilar↔ ext p∼q₁ p∼q₂ ⟩
⟦ StepC₁ ⟷ StepC₂ ⟧₂ (ν′-bisimilar i) (p∼q₁ , p∼q₂) ↝⟨ ⟦⟷⟧₂↔ ext StepC₁ StepC₂ (ν′-bisimilar i) p∼q₁ p∼q₂ ⟩
⟦ StepC₁ ⟧₂ (ν′-bisimilar i)
((proj₁ s₁ , f₁ ∘ inj₁) , (proj₁ s₂ , f₂ ∘ inj₁))
×
⟦ StepC₂ ⟧₂ (ν′-bisimilar i)
( (proj₂ s₁ , λ p → f₁ (inj₂ (_ , refl , p)))
, (proj₂ s₂ , λ p → f₂ (inj₂ (_ , refl , p)))
) ↝⟨ Step₁.⟦StepC⟧₂↔ ext (ν′-bisimilar i) (proj₁ s₁ , f₁ ∘ inj₁)
(proj₁ s₂ , f₂ ∘ inj₁)
×-cong
Step₂.⟦StepC⟧₂↔ ext (ν′-bisimilar i)
(proj₂ s₁ , λ p → f₁ (inj₂ (_ , refl , p)))
(proj₂ s₂ , λ p → f₂ (inj₂ (_ , refl , p))) ⟩
(∀ {p′ μ} (p⟶p′ : p [ μ ]⟶ p′) →
let q′₁ , q⟶q′₁ , p′∼q′₁ =
Step₁.StepC.challenge (proj₁ s₁ , f₁ ∘ inj₁) p⟶p′
q′₂ , q⟶q′₂ , p′∼q′₂ =
Step₁.StepC.challenge (proj₁ s₂ , f₂ ∘ inj₁) p⟶p′
in ∃ λ (q′₁≡q′₂ : q′₁ ≡ q′₂) →
subst (q [ μ ]↝₁_) q′₁≡q′₂ q⟶q′₁ ≡ q⟶q′₂
×
[ i ] subst (ν′ StepC ∞ ∘ (p′ ,_)) q′₁≡q′₂ p′∼q′₁ ≡′ p′∼q′₂)
×
((∀ {q′ μ} (q⟶q′ : q [ μ ]⟶ q′) →
let p′₁ , p⟶p′₁ , p′∼q′₁ =
Step₂.StepC.challenge
(proj₂ s₁ , λ p → f₁ (inj₂ (_ , refl , p))) q⟶q′
p′₂ , p⟶p′₂ , p′∼q′₂ =
Step₂.StepC.challenge
(proj₂ s₂ , λ p → f₂ (inj₂ (_ , refl , p))) q⟶q′
in ∃ λ (p′₁≡p′₂ : p′₁ ≡ p′₂) →
subst (p [ μ ]↝₂_) p′₁≡p′₂ p⟶p′₁ ≡ p⟶p′₂
×
[ i ] subst (ν′ StepC ∞ ∘ (_, q′)) p′₁≡p′₂ p′∼q′₁ ≡′ p′∼q′₂)) ↔⟨⟩
(∀ {p′ μ} (p⟶p′ : p [ μ ]⟶ p′) →
let q′₁ , q⟶q′₁ , p′∼q′₁ = StepC.left-to-right p∼q₁ p⟶p′
q′₂ , q⟶q′₂ , p′∼q′₂ = StepC.left-to-right p∼q₂ p⟶p′
in ∃ λ (q′₁≡q′₂ : q′₁ ≡ q′₂) →
subst (q [ μ ]↝₁_) q′₁≡q′₂ q⟶q′₁ ≡ q⟶q′₂
×
[ i ] subst (ν′ StepC ∞ ∘ (p′ ,_)) q′₁≡q′₂ p′∼q′₁ ≡′ p′∼q′₂)
×
(∀ {q′ μ} (q⟶q′ : q [ μ ]⟶ q′) →
let p′₁ , p⟶p′₁ , p′∼q′₁ = StepC.right-to-left p∼q₁ q⟶q′
p′₂ , p⟶p′₂ , p′∼q′₂ = StepC.right-to-left p∼q₂ q⟶q′
in ∃ λ (p′₁≡p′₂ : p′₁ ≡ p′₂) →
subst (p [ μ ]↝₂_) p′₁≡p′₂ p⟶p′₁ ≡ p⟶p′₂
×
[ i ] subst (ν′ StepC ∞ ∘ (_, q′)) p′₁≡p′₂ p′∼q′₁ ≡′ p′∼q′₂) □
where
open Container
StepC₁ = Step₁.StepC
StepC₂ = Step₂.StepC
module Bisimilarity-of-∼
(ext : Eq.Extensionality ℓ ℓ)
{p q} {i : Size}
(p∼q₁ p∼q₂ : ν StepC ∞ (p , q))
where
-- A "constructor".
⟨_,_,_,_,_⟩ :
(∀ {p′ μ} (p⟶p′ : p [ μ ]⟶ p′) →
let q′₁ , q⟶q′₁ , p′∼q′₁ = StepC.left-to-right p∼q₁ p⟶p′
q′₂ , q⟶q′₂ , p′∼q′₂ = StepC.left-to-right p∼q₂ p⟶p′
in ∃ λ (q′₁≡q′₂ : q′₁ ≡ q′₂) →
subst (q [ μ ]↝₁_) q′₁≡q′₂ q⟶q′₁ ≡ q⟶q′₂
×
[ i ] subst (ν′ StepC ∞ ∘ (p′ ,_)) q′₁≡q′₂ p′∼q′₁ ≡′ p′∼q′₂) →
(∀ {q′ μ} (q⟶q′ : q [ μ ]⟶ q′) →
let p′₁ , p⟶p′₁ , p′∼q′₁ = StepC.right-to-left p∼q₁ q⟶q′
p′₂ , p⟶p′₂ , p′∼q′₂ = StepC.right-to-left p∼q₂ q⟶q′
in ∃ λ (p′₁≡p′₂ : p′₁ ≡ p′₂) →
subst (p [ μ ]↝₂_) p′₁≡p′₂ p⟶p′₁ ≡ p⟶p′₂
×
[ i ] subst (ν′ StepC ∞ ∘ (_, q′)) p′₁≡p′₂ p′∼q′₁ ≡′ p′∼q′₂) →
[ i ] p∼q₁ ≡ p∼q₂
⟨_,_,_,_,_⟩ = curry (_↔_.from ([]≡↔ ext p∼q₁ p∼q₂))
-- Some "projections".
left-to-right :
[ i ] p∼q₁ ≡ p∼q₂ →
∀ {p′ μ} (p⟶p′ : p [ μ ]⟶ p′) →
let q′₁ , q⟶q′₁ , p′∼q′₁ = StepC.left-to-right p∼q₁ p⟶p′
q′₂ , q⟶q′₂ , p′∼q′₂ = StepC.left-to-right p∼q₂ p⟶p′
in ∃ λ (q′₁≡q′₂ : q′₁ ≡ q′₂) →
subst (q [ μ ]↝₁_) q′₁≡q′₂ q⟶q′₁ ≡ q⟶q′₂
×
[ i ] subst (ν′ StepC ∞ ∘ (p′ ,_)) q′₁≡q′₂ p′∼q′₁ ≡′ p′∼q′₂
left-to-right = proj₁ ∘ _↔_.to ([]≡↔ ext p∼q₁ p∼q₂)
right-to-left :
[ i ] p∼q₁ ≡ p∼q₂ →
∀ {q′ μ} (q⟶q′ : q [ μ ]⟶ q′) →
let p′₁ , p⟶p′₁ , p′∼q′₁ = StepC.right-to-left p∼q₁ q⟶q′
p′₂ , p⟶p′₂ , p′∼q′₂ = StepC.right-to-left p∼q₂ q⟶q′
in ∃ λ (p′₁≡p′₂ : p′₁ ≡ p′₂) →
subst (p [ μ ]↝₂_) p′₁≡p′₂ p⟶p′₁ ≡ p⟶p′₂
×
[ i ] subst (ν′ StepC ∞ ∘ (_, q′)) p′₁≡p′₂ p′∼q′₁ ≡′ p′∼q′₂
right-to-left = proj₂ ∘ _↔_.to ([]≡↔ ext p∼q₁ p∼q₂)
-- A statement of extensionality for bisimilarity.
Extensionality : Type ℓ
Extensionality = ν′-extensionality StepC
-- This form of extensionality can be used to derive another form
-- (in the presence of extensionality for functions).
extensionality :
Eq.Extensionality ℓ ℓ →
Extensionality →
∀ {p q} {p∼q₁ p∼q₂ : ν StepC ∞ (p , q)} →
[ ∞ ] p∼q₁ ≡ p∼q₂ → p∼q₁ ≡ p∼q₂
extensionality ext ν-ext = ν-extensionality ext ν-ext
open StepC public using (⟨_,_⟩; left-to-right; right-to-left)
|
module Data.Hashable
%access public
%default total
class Hashable a where
hash : a -> Nat
finHash : Hashable a => a -> Fin (S n)
finHash {n} x with (natToFin ((hash x) `modNat` (S n)) (S n))
| Just m = m
| Nothing = 0 -- impossible
instance Hashable Int where hash = cast . abs
instance Hashable Integer where hash = cast . abs
instance Hashable Char where hash = cast . abs . the Int . cast
instance Hashable Float where hash = cast .abs . the Int . cast
instance Hashable Nat where hash = id
instance Hashable a => Hashable (Vect n a) where
hash = sum . map hash
instance (Hashable a, Foldable f) => Hashable (f a) where
hash = foldr (\a => (+ hash a)) 0
instance Hashable () where hash _ = 0
instance (Hashable a,Hashable b) => Hashable (a,b) where hash (a,b) = hash a * hash b
|
lemma emeasure_Un_null_set: assumes "A \<in> sets M" "B \<in> null_sets M" shows "emeasure M (A \<union> B) = emeasure M A" |
#-------------------------------------------------------------------
#* EMSO Model Library (EML) Copyright (C) 2004 - 2007 ALSOC.
#*
#* This LIBRARY is free software; you can distribute it and/or modify
#* it under the therms of the ALSOC FREE LICENSE as available at
#* http://www.enq.ufrgs.br/alsoc.
#*
#* EMSO Copyright (C) 2004 - 2007 ALSOC, original code
#* from http://www.rps.eng.br Copyright (C) 2002-2004.
#* All rights reserved.
#*
#* EMSO is distributed under the therms of the ALSOC LICENSE as
#* available at http://www.enq.ufrgs.br/alsoc.
#*----------------------------------------------------------------------
#* Author: Paula B. Staudt
#* $Id$
#*--------------------------------------------------------------------
type tank_simplified
tank_simplified()=begin
new(
DanaReal(Dict{Symbol,Any}(
:Brief=>"Valve Constant",
:Unit=>"m^2.5/h",
:Default=>4
)),
area(Dict{Symbol,Any}(
:Brief=>"Tank area",
:Default=>2
)),
length(Dict{Symbol,Any}(
:Brief=>"Tank level"
)),
flow_vol(Dict{Symbol,Any}(
:Brief=>"Input flow",
:PosX=>0.3037,
:PosY=>0
)),
flow_vol(Dict{Symbol,Any}(
:Brief=>"Output flow",
:PosX=>1,
:PosY=>1
)),
[
:(diff(A*Level) = Fin - Fout),
:(Fout = k*sqrt(Level)),
],
[
"Mass balance","Valve equation",
],
[:k,:A,],
[:Level,:Fin,:Fout,]
)
end
k::DanaReal
A::area
Level::length
Fin::flow_vol
Fout::flow_vol
equations::Array{Expr,1}
equationNames::Array{String,1}
parameters::Array{Symbol,1}
variables::Array{Symbol,1}
attributes::Dict{Symbol,Any}
end
export tank_simplified
function setEquationFlow(in::tank_simplified)
addEquation(1)
addEquation(2)
end
function atributes(in::tank_simplified,_::Dict{Symbol,Any})
fields::Dict{Symbol,Any}=Dict{Symbol,Any}()
fields[:Pallete]=true
fields[:Icon]="icon/Tank"
fields[:Brief]="Model of a simplified tank."
fields[:Info]="== Specify ==
* the Inlet flow rate;
== Initial Conditions ==
* the tank initial level (Level);
"
drive!(fields,_)
return fields
end
tank_simplified(_::Dict{Symbol,Any})=begin
newModel=tank_simplified()
newModel.attributes=atributes(newModel,_)
newModel
end
|
lemma emeasure_restrict_space: assumes "\<Omega> \<inter> space M \<in> sets M" "A \<subseteq> \<Omega>" shows "emeasure (restrict_space M \<Omega>) A = emeasure M A" |
[GOAL]
⊢ ∀ {α β : Type ?u.90} (Iα : Mul α) (Iβ : Mul β), Function.Injective MulHom.toFun
[PROOFSTEP]
intros
[GOAL]
α✝ β✝ : Type ?u.90
Iα✝ : Mul α✝
Iβ✝ : Mul β✝
⊢ Function.Injective MulHom.toFun
[PROOFSTEP]
apply @FunLike.coe_injective
[GOAL]
⊢ ∀ {α : Type ?u.90} (I : Mul α), (MulHom.id α).toFun = id
[PROOFSTEP]
aesop_cat
[GOAL]
⊢ ∀ {α β γ : Type ?u.90} (Iα : Mul α) (Iβ : Mul β) (Iγ : Mul γ) (f : α →ₙ* β) (g : β →ₙ* γ),
(MulHom.comp g f).toFun = g.toFun ∘ f.toFun
[PROOFSTEP]
aesop_cat
[GOAL]
X Y : Type u
inst✝¹ : Mul X
inst✝ : Mul Y
e : X ≃* Y
⊢ toMulHom e ≫ toMulHom (symm e) = 𝟙 (MagmaCat.of X)
[PROOFSTEP]
ext
[GOAL]
case w
X Y : Type u
inst✝¹ : Mul X
inst✝ : Mul Y
e : X ≃* Y
x✝ : (forget MagmaCat).obj (MagmaCat.of X)
⊢ ↑(toMulHom e ≫ toMulHom (symm e)) x✝ = ↑(𝟙 (MagmaCat.of X)) x✝
[PROOFSTEP]
simp_rw [comp_apply, toMulHom_eq_coe, MagmaCat.MulEquiv_coe_eq, symm_apply_apply, id_apply]
[GOAL]
X✝ Y✝ : Type u
X Y : MagmaCat
f : X ⟶ Y
x✝ : IsIso ((forget MagmaCat).map f)
⊢ IsIso f
[PROOFSTEP]
skip
[GOAL]
X✝ Y✝ : Type u
X Y : MagmaCat
f : X ⟶ Y
x✝ : IsIso ((forget MagmaCat).map f)
⊢ IsIso f
[PROOFSTEP]
let i := asIso ((forget MagmaCat).map f)
[GOAL]
X✝ Y✝ : Type u
X Y : MagmaCat
f : X ⟶ Y
x✝ : IsIso ((forget MagmaCat).map f)
i : (forget MagmaCat).obj X ≅ (forget MagmaCat).obj Y := asIso ((forget MagmaCat).map f)
⊢ IsIso f
[PROOFSTEP]
let e : X ≃* Y := { f, i.toEquiv with }
[GOAL]
X✝ Y✝ : Type u
X Y : MagmaCat
f : X ⟶ Y
x✝ : IsIso ((forget MagmaCat).map f)
i : (forget MagmaCat).obj X ≅ (forget MagmaCat).obj Y := asIso ((forget MagmaCat).map f)
e : ↑X ≃* ↑Y :=
let src := i.toEquiv;
{
toEquiv :=
{ toFun := f.toFun, invFun := src.invFun, left_inv := (_ : Function.LeftInverse src.invFun src.toFun),
right_inv := (_ : Function.RightInverse src.invFun src.toFun) },
map_mul' := (_ : ∀ (x y : ↑X), MulHom.toFun f (x * y) = MulHom.toFun f x * MulHom.toFun f y) }
⊢ IsIso f
[PROOFSTEP]
exact ⟨(IsIso.of_iso e.toMagmaCatIso).1⟩
[GOAL]
X✝ Y✝ : Type u
X Y : SemigroupCat
f : X ⟶ Y
x✝ : IsIso ((forget SemigroupCat).map f)
⊢ IsIso f
[PROOFSTEP]
skip
[GOAL]
X✝ Y✝ : Type u
X Y : SemigroupCat
f : X ⟶ Y
x✝ : IsIso ((forget SemigroupCat).map f)
⊢ IsIso f
[PROOFSTEP]
let i := asIso ((forget SemigroupCat).map f)
[GOAL]
X✝ Y✝ : Type u
X Y : SemigroupCat
f : X ⟶ Y
x✝ : IsIso ((forget SemigroupCat).map f)
i : (forget SemigroupCat).obj X ≅ (forget SemigroupCat).obj Y := asIso ((forget SemigroupCat).map f)
⊢ IsIso f
[PROOFSTEP]
let e : X ≃* Y := { f, i.toEquiv with }
[GOAL]
X✝ Y✝ : Type u
X Y : SemigroupCat
f : X ⟶ Y
x✝ : IsIso ((forget SemigroupCat).map f)
i : (forget SemigroupCat).obj X ≅ (forget SemigroupCat).obj Y := asIso ((forget SemigroupCat).map f)
e : ↑X ≃* ↑Y :=
let src := i.toEquiv;
{
toEquiv :=
{ toFun := f.toFun, invFun := src.invFun, left_inv := (_ : Function.LeftInverse src.invFun src.toFun),
right_inv := (_ : Function.RightInverse src.invFun src.toFun) },
map_mul' := (_ : ∀ (x y : ↑X), MulHom.toFun f (x * y) = MulHom.toFun f x * MulHom.toFun f y) }
⊢ IsIso f
[PROOFSTEP]
exact ⟨(IsIso.of_iso e.toSemigroupCatIso).1⟩
|
[GOAL]
C : Type u
inst✝⁶ : Category.{v₁, u} C
D : Type u₂
inst✝⁵ : Category.{u, u₂} D
E : Type u
inst✝⁴ : Category.{v₂, u} E
inst✝³ : HasBinaryProducts D
inst✝² : HasColimits D
inst✝¹ : (X : D) → PreservesColimits (prod.functor.obj X)
F : C ⥤ D
J : Type u
inst✝ : Category.{u, u} J
K : J ⥤ C ⥤ D
c : Cocone K
t : IsColimit c
⊢ IsColimit ((prod.functor.obj F).mapCocone c)
[PROOFSTEP]
apply evaluationJointlyReflectsColimits _ fun {k} => ?_
[GOAL]
C : Type u
inst✝⁶ : Category.{v₁, u} C
D : Type u₂
inst✝⁵ : Category.{u, u₂} D
E : Type u
inst✝⁴ : Category.{v₂, u} E
inst✝³ : HasBinaryProducts D
inst✝² : HasColimits D
inst✝¹ : (X : D) → PreservesColimits (prod.functor.obj X)
F : C ⥤ D
J : Type u
inst✝ : Category.{u, u} J
K : J ⥤ C ⥤ D
c : Cocone K
t : IsColimit c
k : C
⊢ IsColimit (((evaluation C D).obj k).mapCocone ((prod.functor.obj F).mapCocone c))
[PROOFSTEP]
change IsColimit ((prod.functor.obj F ⋙ (evaluation _ _).obj k).mapCocone c)
[GOAL]
C : Type u
inst✝⁶ : Category.{v₁, u} C
D : Type u₂
inst✝⁵ : Category.{u, u₂} D
E : Type u
inst✝⁴ : Category.{v₂, u} E
inst✝³ : HasBinaryProducts D
inst✝² : HasColimits D
inst✝¹ : (X : D) → PreservesColimits (prod.functor.obj X)
F : C ⥤ D
J : Type u
inst✝ : Category.{u, u} J
K : J ⥤ C ⥤ D
c : Cocone K
t : IsColimit c
k : C
⊢ IsColimit ((prod.functor.obj F ⋙ (evaluation C D).obj k).mapCocone c)
[PROOFSTEP]
let this := isColimitOfPreserves ((evaluation C D).obj k ⋙ prod.functor.obj (F.obj k)) t
[GOAL]
C : Type u
inst✝⁶ : Category.{v₁, u} C
D : Type u₂
inst✝⁵ : Category.{u, u₂} D
E : Type u
inst✝⁴ : Category.{v₂, u} E
inst✝³ : HasBinaryProducts D
inst✝² : HasColimits D
inst✝¹ : (X : D) → PreservesColimits (prod.functor.obj X)
F : C ⥤ D
J : Type u
inst✝ : Category.{u, u} J
K : J ⥤ C ⥤ D
c : Cocone K
t : IsColimit c
k : C
this : IsColimit (((evaluation C D).obj k ⋙ prod.functor.obj (F.obj k)).mapCocone c) :=
isColimitOfPreserves ((evaluation C D).obj k ⋙ prod.functor.obj (F.obj k)) t
⊢ IsColimit ((prod.functor.obj F ⋙ (evaluation C D).obj k).mapCocone c)
[PROOFSTEP]
apply IsColimit.mapCoconeEquiv _ this
[GOAL]
C : Type u
inst✝⁶ : Category.{v₁, u} C
D : Type u₂
inst✝⁵ : Category.{u, u₂} D
E : Type u
inst✝⁴ : Category.{v₂, u} E
inst✝³ : HasBinaryProducts D
inst✝² : HasColimits D
inst✝¹ : (X : D) → PreservesColimits (prod.functor.obj X)
F : C ⥤ D
J : Type u
inst✝ : Category.{u, u} J
K : J ⥤ C ⥤ D
c : Cocone K
t : IsColimit c
k : C
this : IsColimit (((evaluation C D).obj k ⋙ prod.functor.obj (F.obj k)).mapCocone c) :=
isColimitOfPreserves ((evaluation C D).obj k ⋙ prod.functor.obj (F.obj k)) t
⊢ (evaluation C D).obj k ⋙ prod.functor.obj (F.obj k) ≅ prod.functor.obj F ⋙ (evaluation C D).obj k
[PROOFSTEP]
apply (NatIso.ofComponents _ _).symm
[GOAL]
C : Type u
inst✝⁶ : Category.{v₁, u} C
D : Type u₂
inst✝⁵ : Category.{u, u₂} D
E : Type u
inst✝⁴ : Category.{v₂, u} E
inst✝³ : HasBinaryProducts D
inst✝² : HasColimits D
inst✝¹ : (X : D) → PreservesColimits (prod.functor.obj X)
F : C ⥤ D
J : Type u
inst✝ : Category.{u, u} J
K : J ⥤ C ⥤ D
c : Cocone K
t : IsColimit c
k : C
this : IsColimit (((evaluation C D).obj k ⋙ prod.functor.obj (F.obj k)).mapCocone c) :=
isColimitOfPreserves ((evaluation C D).obj k ⋙ prod.functor.obj (F.obj k)) t
⊢ (X : C ⥤ D) →
(prod.functor.obj F ⋙ (evaluation C D).obj k).obj X ≅ ((evaluation C D).obj k ⋙ prod.functor.obj (F.obj k)).obj X
[PROOFSTEP]
intro G
[GOAL]
C : Type u
inst✝⁶ : Category.{v₁, u} C
D : Type u₂
inst✝⁵ : Category.{u, u₂} D
E : Type u
inst✝⁴ : Category.{v₂, u} E
inst✝³ : HasBinaryProducts D
inst✝² : HasColimits D
inst✝¹ : (X : D) → PreservesColimits (prod.functor.obj X)
F : C ⥤ D
J : Type u
inst✝ : Category.{u, u} J
K : J ⥤ C ⥤ D
c : Cocone K
t : IsColimit c
k : C
this : IsColimit (((evaluation C D).obj k ⋙ prod.functor.obj (F.obj k)).mapCocone c) :=
isColimitOfPreserves ((evaluation C D).obj k ⋙ prod.functor.obj (F.obj k)) t
G : C ⥤ D
⊢ (prod.functor.obj F ⋙ (evaluation C D).obj k).obj G ≅ ((evaluation C D).obj k ⋙ prod.functor.obj (F.obj k)).obj G
[PROOFSTEP]
apply asIso (prodComparison ((evaluation C D).obj k) F G)
[GOAL]
C : Type u
inst✝⁶ : Category.{v₁, u} C
D : Type u₂
inst✝⁵ : Category.{u, u₂} D
E : Type u
inst✝⁴ : Category.{v₂, u} E
inst✝³ : HasBinaryProducts D
inst✝² : HasColimits D
inst✝¹ : (X : D) → PreservesColimits (prod.functor.obj X)
F : C ⥤ D
J : Type u
inst✝ : Category.{u, u} J
K : J ⥤ C ⥤ D
c : Cocone K
t : IsColimit c
k : C
this : IsColimit (((evaluation C D).obj k ⋙ prod.functor.obj (F.obj k)).mapCocone c) :=
isColimitOfPreserves ((evaluation C D).obj k ⋙ prod.functor.obj (F.obj k)) t
⊢ ∀ {X Y : C ⥤ D} (f : X ⟶ Y),
(prod.functor.obj F ⋙ (evaluation C D).obj k).map f ≫ (asIso (prodComparison ((evaluation C D).obj k) F Y)).hom =
(asIso (prodComparison ((evaluation C D).obj k) F X)).hom ≫
((evaluation C D).obj k ⋙ prod.functor.obj (F.obj k)).map f
[PROOFSTEP]
intro G G'
[GOAL]
C : Type u
inst✝⁶ : Category.{v₁, u} C
D : Type u₂
inst✝⁵ : Category.{u, u₂} D
E : Type u
inst✝⁴ : Category.{v₂, u} E
inst✝³ : HasBinaryProducts D
inst✝² : HasColimits D
inst✝¹ : (X : D) → PreservesColimits (prod.functor.obj X)
F : C ⥤ D
J : Type u
inst✝ : Category.{u, u} J
K : J ⥤ C ⥤ D
c : Cocone K
t : IsColimit c
k : C
this : IsColimit (((evaluation C D).obj k ⋙ prod.functor.obj (F.obj k)).mapCocone c) :=
isColimitOfPreserves ((evaluation C D).obj k ⋙ prod.functor.obj (F.obj k)) t
G G' : C ⥤ D
⊢ ∀ (f : G ⟶ G'),
(prod.functor.obj F ⋙ (evaluation C D).obj k).map f ≫ (asIso (prodComparison ((evaluation C D).obj k) F G')).hom =
(asIso (prodComparison ((evaluation C D).obj k) F G)).hom ≫
((evaluation C D).obj k ⋙ prod.functor.obj (F.obj k)).map f
[PROOFSTEP]
apply prodComparison_natural ((evaluation C D).obj k) (𝟙 F)
[GOAL]
C : Type u
inst✝³ : Category.{v₁, u} C
D : Type u₂
inst✝² : Category.{u, u₂} D
E : Type u
inst✝¹ : Category.{v₂, u} E
inst✝ : HasLimits D
F : C ⥤ E
J : Type u
hJ : Category.{u, u} J
K : J ⥤ E ⥤ D
c : Cone K
hc : IsLimit c
⊢ IsLimit (((whiskeringLeft C E D).obj F).mapCone c)
[PROOFSTEP]
apply evaluationJointlyReflectsLimits
[GOAL]
case t
C : Type u
inst✝³ : Category.{v₁, u} C
D : Type u₂
inst✝² : Category.{u, u₂} D
E : Type u
inst✝¹ : Category.{v₂, u} E
inst✝ : HasLimits D
F : C ⥤ E
J : Type u
hJ : Category.{u, u} J
K : J ⥤ E ⥤ D
c : Cone K
hc : IsLimit c
⊢ (k : C) → IsLimit (((evaluation C D).obj k).mapCone (((whiskeringLeft C E D).obj F).mapCone c))
[PROOFSTEP]
intro Y
[GOAL]
case t
C : Type u
inst✝³ : Category.{v₁, u} C
D : Type u₂
inst✝² : Category.{u, u₂} D
E : Type u
inst✝¹ : Category.{v₂, u} E
inst✝ : HasLimits D
F : C ⥤ E
J : Type u
hJ : Category.{u, u} J
K : J ⥤ E ⥤ D
c : Cone K
hc : IsLimit c
Y : C
⊢ IsLimit (((evaluation C D).obj Y).mapCone (((whiskeringLeft C E D).obj F).mapCone c))
[PROOFSTEP]
change IsLimit (((evaluation E D).obj (F.obj Y)).mapCone c)
[GOAL]
case t
C : Type u
inst✝³ : Category.{v₁, u} C
D : Type u₂
inst✝² : Category.{u, u₂} D
E : Type u
inst✝¹ : Category.{v₂, u} E
inst✝ : HasLimits D
F : C ⥤ E
J : Type u
hJ : Category.{u, u} J
K : J ⥤ E ⥤ D
c : Cone K
hc : IsLimit c
Y : C
⊢ IsLimit (((evaluation E D).obj (F.obj Y)).mapCone c)
[PROOFSTEP]
exact PreservesLimit.preserves hc
[GOAL]
C✝ : Type u
inst✝⁸ : Category.{v₁, u} C✝
D✝ : Type u₂
inst✝⁷ : Category.{u, u₂} D✝
E✝ : Type u
inst✝⁶ : Category.{v₂, u} E✝
C : Type u
inst✝⁵ : Category.{?u.25923, u} C
D : Type u_1
inst✝⁴ : Category.{u, u_1} D
E : Type u_2
inst✝³ : Category.{u, u_2} E
J : Type u
inst✝² : SmallCategory J
inst✝¹ : HasLimitsOfShape J D
F : D ⥤ E
inst✝ : PreservesLimitsOfShape J F
K : J ⥤ C ⥤ D
c : Cone K
hc : IsLimit c
⊢ IsLimit (((whiskeringRight C D E).obj F).mapCone c)
[PROOFSTEP]
apply evaluationJointlyReflectsLimits _ (fun k => ?_)
[GOAL]
C✝ : Type u
inst✝⁸ : Category.{v₁, u} C✝
D✝ : Type u₂
inst✝⁷ : Category.{u, u₂} D✝
E✝ : Type u
inst✝⁶ : Category.{v₂, u} E✝
C : Type u
inst✝⁵ : Category.{?u.25923, u} C
D : Type u_1
inst✝⁴ : Category.{u, u_1} D
E : Type u_2
inst✝³ : Category.{u, u_2} E
J : Type u
inst✝² : SmallCategory J
inst✝¹ : HasLimitsOfShape J D
F : D ⥤ E
inst✝ : PreservesLimitsOfShape J F
K : J ⥤ C ⥤ D
c : Cone K
hc : IsLimit c
k : C
⊢ IsLimit (((evaluation C E).obj k).mapCone (((whiskeringRight C D E).obj F).mapCone c))
[PROOFSTEP]
change IsLimit (((evaluation _ _).obj k ⋙ F).mapCone c)
[GOAL]
C✝ : Type u
inst✝⁸ : Category.{v₁, u} C✝
D✝ : Type u₂
inst✝⁷ : Category.{u, u₂} D✝
E✝ : Type u
inst✝⁶ : Category.{v₂, u} E✝
C : Type u
inst✝⁵ : Category.{?u.25923, u} C
D : Type u_1
inst✝⁴ : Category.{u, u_1} D
E : Type u_2
inst✝³ : Category.{u, u_2} E
J : Type u
inst✝² : SmallCategory J
inst✝¹ : HasLimitsOfShape J D
F : D ⥤ E
inst✝ : PreservesLimitsOfShape J F
K : J ⥤ C ⥤ D
c : Cone K
hc : IsLimit c
k : C
⊢ IsLimit (((evaluation C D).obj k ⋙ F).mapCone c)
[PROOFSTEP]
exact PreservesLimit.preserves hc
[GOAL]
C✝ : Type u
inst✝⁶ : Category.{v₁, u} C✝
D✝ : Type u₂
inst✝⁵ : Category.{u, u₂} D✝
E : Type u
inst✝⁴ : Category.{v₂, u} E
C D : Type u
inst✝³ : SmallCategory C
inst✝² : SmallCategory D
F : C ⥤ D
J : Type u
inst✝¹ : SmallCategory J
inst✝ : PreservesLimitsOfShape J (lan F.op)
⊢ PreservesLimitsOfShape J F
[PROOFSTEP]
apply @preservesLimitsOfShapeOfReflectsOfPreserves _ _ _ _ _ _ _ _ F yoneda ?_
[GOAL]
C✝ : Type u
inst✝⁶ : Category.{v₁, u} C✝
D✝ : Type u₂
inst✝⁵ : Category.{u, u₂} D✝
E : Type u
inst✝⁴ : Category.{v₂, u} E
C D : Type u
inst✝³ : SmallCategory C
inst✝² : SmallCategory D
F : C ⥤ D
J : Type u
inst✝¹ : SmallCategory J
inst✝ : PreservesLimitsOfShape J (lan F.op)
⊢ PreservesLimitsOfShape J (F ⋙ yoneda)
[PROOFSTEP]
exact preservesLimitsOfShapeOfNatIso (compYonedaIsoYonedaCompLan F).symm
|
#' get_vocab
#'
#' Pre-compute the vocabulary for use with \code{w2v()}.
#'
#' @param train_file
#' Input plaintext file.
#' @param vocab_file
#' File path (string) pointing to where you want the vocabulary to be saved.
#' @param comm
#' An MPI communicator number (from pbdMPI).
#' @param verbose
#' Want it to print what it's doing?
#'
#' @return
#' Invisibly returns \code{NULL}.
#'
#' @useDynLib w2v R_get_vocab
#' @export
get_vocab = function(train_file, vocab_file, comm=0, verbose=FALSE)
{
check.is.string(train_file)
check.is.string(vocab_file)
check.is.flag(verbose)
train_file = path.expand(train_file)
if (!file.exists(train_file))
comm.stop("train_file does not exist")
vocab_file = path.expand(vocab_file)
comm_ptr = pbdMPI::get.mpi.comm.ptr(comm)
.Call(R_get_vocab, train_file, vocab_file, comm_ptr, verbose)
invisible()
}
|
/* Starting from version 7.8, MATLAB BLAS expects ptrdiff_t arguments for integers */
#if MATLAB_VERSION >= 0x0708
#include <stddef.h>
#include <stdlib.h>
#endif
/* Starting from version 7.6, MATLAB BLAS is seperated */
#if MATLAB_VERSION >= 0x0705
#include <blas.h>
#endif
#include <lapack.h>
#ifndef min
#define min(a,b) ((a) <= (b) ? (a) : (b))
#endif
|
module Day1
import Data.String
foldCount : (Int, Int) -> Int -> (Int, Int)
foldCount (a, b) c = (c, (if a < c then b + 1 else b))
foldCount2 : (Int, Int, Int, Int) -> Int -> (Int, Int, Int, Int)
foldCount2 (a, b, c, d) e = (b, c, e, (if (a+b+c) < (b+c+e) then d + 1 else d))
export
solve : List String -> (Int, Int, Int, Int)
solve content = foldl foldCount2 (1000000, 1000000, 100000, 0) (mapMaybe parseInteger content)
|
{-# OPTIONS --cubical #-}
module Agda.Builtin.Cubical.Id where
open import Agda.Primitive.Cubical
open import Agda.Builtin.Cubical.Path
postulate
Id : ∀ {ℓ} {A : Set ℓ} → A → A → Set ℓ
{-# BUILTIN ID Id #-}
{-# BUILTIN CONID conid #-}
primitive
primDepIMin : _
primIdFace : ∀ {ℓ} {A : Set ℓ} {x y : A} → Id x y → I
primIdPath : ∀ {ℓ} {A : Set ℓ} {x y : A} → Id x y → x ≡ y
primitive
primIdJ : ∀ {ℓ ℓ'} {A : Set ℓ} {x : A} (P : ∀ y → Id x y → Set ℓ') →
P x (conid i1 (λ i → x)) → ∀ {y} (p : Id x y) → P y p
|
{-# LANGUAGE OverloadedStrings, NoImplicitPrelude #-}
{-# LANGUAGE FlexibleContexts, UndecidableInstances #-} -- required for HMatrix
-- | Albemarle, natural language processing for Haskell
module NLP.Albemarle (
SparseMatrix(..),
SparseVector(..),
DenseVector(..)
) where
import ClassyPrelude hiding (Vector)
import Numeric.LinearAlgebra
import qualified Data.Vector.Unboxed as Vec
-- | Vector of sorted (word ID, count)
data SparseVector = SparseVector Int [(Int, Double)]
deriving (Show, Eq)
data SparseMatrix = SparseMatrix Int [SparseVector]
deriving (Show, Eq)
type DenseVector = Vector Double
instance (Container Vector t, Eq t, Num (Vector t), Product t) => Semigroup (Matrix t) where
(<>) = mappend
|
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedLists #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeOperators #-}
{-# OPTIONS_GHC -Wall #-}
{-# OPTIONS_GHC -Wno-orphans #-}
{-# OPTIONS_GHC -fno-warn-type-defaults #-}
import NumHask.Array
import NumHask.Prelude
import Options.Generic
import Perf
import Perf.Analysis
import qualified Data.Matrix as Matrix
import qualified Numeric.LinearAlgebra as H
import qualified Data.Vector as V
data Opts = Opts
{ runs :: Maybe Int -- <?> "number of runs"
, size :: Maybe Int -- <?> "size of matrix"
} deriving (Generic, Show)
instance ParseRecord Opts
main :: IO ()
main = do
o :: Opts <- getRecord "benchmarking numhask array"
let !n = fromMaybe 1000 (runs o)
let !sz = fromMaybe 10 (size o)
_ <- warmup 100
let aa = [1 ..] :: Array [] '[10, 10] Int
let ab = [0 ..] :: Array [] '[10, 10] Int
let ha = (sz H.>< sz) [1 :: H.Z ..]
let hb = (sz H.>< sz) [1 :: H.Z ..]
let ma = Matrix.matrix sz sz (\(i, j) -> i + sz * j)
let mb = Matrix.matrix sz sz (\(i, j) -> i + sz * j)
let va = [1 ..] :: Array V.Vector '[10, 10] Int
let vb = [0 ..] :: Array V.Vector '[10, 10] Int
(tcreatea, aa') <- tickIO $ pure aa
(tcreateha, ha') <- tickIO $ pure ha
(tcreatema, ma') <- tickIO $ pure ma
(tcreateva, va') <- tickIO $ pure va
(rmmult, _) <- ticks n (NumHask.Array.mmult aa') ab
(rmmulth, _) <- ticks n (ha' H.<>) hb
(rmmultm, _) <- ticks n (ma' `Matrix.multStd2`) mb
(rmmultv, _) <- ticks n (NumHask.Array.mmult va') vb
writeFile "other/array.md" $ code
[ "square matrix size: " <> show sz
, ""
, "creation"
, formatInt "hmatrix:" 2 tcreateha
, formatInt "matrix:" 2 tcreatema
, formatInt "Array []:" 2 tcreatea
, formatInt "Array Vector(Boxed):" 2 tcreateva
, ""
, "mmult"
, formatRunHeader
, ""
, formatRun "hmatrix" 2 rmmulth
, formatRun "matrix" 2 rmmultm
, formatRun "[]" 2 rmmult
, formatRun "Boxed" 2 rmmultv
]
(rrow, _) <- ticks n (NumHask.Array.row (Proxy :: Proxy 4)) ab
(rcol, _) <- ticks n (NumHask.Array.col (Proxy :: Proxy 4)) ab
(runsaferow, _) <- ticks n (NumHask.Array.unsafeRow 0) ab
(runsafecol, _) <- ticks n (NumHask.Array.unsafeCol 0) ab
(runsafeindex, _) <- ticks n (NumHask.Array.unsafeIndex ab) [2, 3]
(rconcat, _) <- ticks n (concatenate (Proxy :: Proxy 2) aa) aa
(rtranspose, _) <- ticks n NumHask.Array.transpose aa
writeFile "other/ops.md" $ code
[ "square matrix size: " <> show sz
, formatRunHeader
, ""
, formatRun "row" 2 rrow
, formatRun "col" 2 rcol
, formatRun "unsafeRow" 2 runsaferow
, formatRun "unsafeCol" 2 runsafecol
, formatRun "unsafeIndex" 2 runsafeindex
, formatRun "concat" 2 rconcat
, formatRun "transpose" 2 rtranspose
]
pure ()
|
{-# OPTIONS --without-K #-}
open import Base
import Homotopy.TruncationHIT as T
{-
The definition of the truncation is in TruncationHIT, here I just make some
arguments implicit, define easier to use helper functions and prove the
universal property
-}
module Homotopy.Truncation {i} where
τ : (n : ℕ₋₂) → (Set i → Set i)
τ = T.τ
proj : {n : ℕ₋₂} {A : Set i} → (A → τ n A)
proj {n} {A} = T.proj n A
τ-is-truncated : (n : ℕ₋₂) (A : Set i) → is-truncated n (τ n A)
τ-is-truncated = T.τ-is-truncated
τ-is-truncated#instance : {n : ℕ₋₂} {A : Set i} → is-truncated n (τ n A)
τ-is-truncated#instance = T.τ-is-truncated _ _
τ-extend : ∀ {j} {n : ℕ₋₂} {A : Set i} {P : (τ n A) → Set j}
⦃ p : (x : τ n A) → is-truncated n (P x) ⦄ (f : (x : A) → P (proj x))
→ ((x : τ n A) → P x)
τ-extend {j} {n} {A} {P} ⦃ p ⦄ f = T.τ-rec _ _ _ f {transp} p where
abstract
transp : (pa : n ≡ ⟨-2⟩) (x y : τ n A) (x* : P x) (y* : P y)
→ transport P (T.hack-prop n A pa x y) x* ≡ y*
transp pa x y x* y* = π₁ (contr-is-prop
(transport (λ m → is-truncated m _) pa (p y))
_ _)
τ-extend-nondep : ∀ {j} {n : ℕ₋₂} {A : Set i} {B : Set j}
⦃ p : is-truncated n B ⦄ → ((f : A → B) → (τ n A → B))
τ-extend-nondep {j} {n} {A} {B} ⦃ p ⦄ f = T.τ-rec-nondep _ _ _ f {transp} p
where
abstract
transp : (pa : n ≡ ⟨-2⟩) (x y : τ n A) (x* y* : B) → x* ≡ y*
transp pa x y x* y* = π₁ (contr-is-prop
(transport (λ m → is-truncated m _) pa p)
_ _)
-- Special syntax for hProp-reflection
[_] : Set i → Set i
[_] = τ ⟨-1⟩
abstract
[]-is-prop : {A : Set i} → is-prop [ A ]
[]-is-prop = T.τ-is-truncated _ _
[]-extend : ∀ {j} {A : Set i} {P : [ A ] → Set j}
⦃ p : (x : [ A ]) → is-prop (P x) ⦄ (f : (x : A) → P (proj x))
→ ((x : [ A ]) → P x)
[]-extend f = τ-extend f
[]-extend-nondep : ∀ {j} {A : Set i} {B : Set j} ⦃ p : is-prop B ⦄
→ ((f : A → B) → ([ A ] → B))
[]-extend-nondep f = τ-extend-nondep f
-- Special syntax for hSet-reflection
π₀ : Set i → Set i
π₀ = τ ⟨0⟩
π₀-is-set : (A : Set i) → is-set (π₀ A)
π₀-is-set A = T.τ-is-truncated _ _
π₀-extend : ∀ {j} {A : Set i} {P : π₀ A → Set j}
⦃ p : (x : π₀ A) → is-set (P x) ⦄ (f : (x : A) → P (proj x))
→ ((x : π₀ A) → P x)
π₀-extend f = τ-extend f
π₀-extend-nondep : ∀ {j} {A : Set i} {B : Set j} ⦃ p : is-set B ⦄
→ ((f : A → B) → (π₀ A → B))
π₀-extend-nondep f = τ-extend-nondep f
-- Universal property of the truncation
abstract
τ-up : ∀ {j} (n : ℕ₋₂) (A : Set i) (B : Set j)
⦃ p : is-truncated n B ⦄
→ is-equiv (λ (f : τ n A → B) → (λ x → f (proj x)))
τ-up n A B ⦃ p ⦄ = iso-is-eq _
(τ-extend-nondep)
(λ _ → refl)
(λ f → funext (τ-extend ⦃ p = λ x → ≡-is-truncated n p ⦄
(λ x → refl)))
τ-extend-nondep-is-equiv : ∀ {j} (n : ℕ₋₂) (A : Set i) (B : Set j)
⦃ p : is-truncated n B ⦄ → is-equiv (τ-extend-nondep {n = n} {A} {B})
τ-extend-nondep-is-equiv n A B ⦃ p ⦄ = iso-is-eq _
(λ f → f ◯ proj)
(λ f → funext (τ-extend ⦃ λ x → ≡-is-truncated n p ⦄
(λ x → refl)))
(λ _ → refl)
-- Equivalence associated to the universal property
τ-equiv : ∀ {j} (n : ℕ₋₂) (A : Set i) (B : Set j)
⦃ p : is-truncated n B ⦄ → (τ n A → B) ≃ (A → B)
τ-equiv n A B = (_ , τ-up n _ _)
-- Equivalence associated to the universal property
τ-extend-equiv : ∀ {j} (n : ℕ₋₂) (A : Set i) (B : Set j)
⦃ p : is-truncated n B ⦄ → (A → B) ≃ (τ n A → B)
τ-extend-equiv n A B = (τ-extend-nondep , τ-extend-nondep-is-equiv n A B)
τ-fmap : {n : ℕ₋₂} {A B : Set i} → ((A → B) → (τ n A → τ n B))
τ-fmap f = τ-extend-nondep (proj ◯ f)
τ-fpmap : {n : ℕ₋₂} {A B : Set i} {f g : A → B} (h : (a : A) → f a ≡ g a)
→ ((a : τ n A) → τ-fmap f a ≡ τ-fmap g a)
τ-fpmap h = τ-extend ⦃ λ _ → ≡-is-truncated _ (τ-is-truncated _ _) ⦄
(ap proj ◯ h)
|
Ross began his career as a hockey coach in the midst of his playing days , when at age 24 he led the McGill University Redmen to a 4 – 2 – 1 record during the 1910 – 11 season . Following his playing career , Ross became a NHL referee . He was hired to coach the Hamilton Tigers for the 1922 – 23 season , and adopted new methods in training camp that emphasized physical fitness , including work off the ice . However , the Tigers finished with a record of six wins and eighteen losses , last in the NHL for the third successive year , and Ross did not return the next season . His next coaching appointment arose from meeting Boston grocery store magnate Charles Adams during the 1924 Stanley Cup Finals . Before the 1924 season , the NHL awarded Adams an expansion team . Adams ' first move was to hire Ross as vice president , general manager , coach and scout . Adams instructed Ross to come up with a nickname portraying an untamed animal displaying speed , agility and cunning . With this in mind , Ross named the team the Boston Bruins , after the Old English word for a bear . The team 's nickname went perfectly with the original colours of brown and yellow , which were the same colours of Adams ' grocery chain , First National Stores .
|
#include <boost/interprocess/sync/spin/interprocess_barrier.hpp>
|
# MIT License
#
# Copyright (c) 2018 Martin Biel
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Quadratic decision function #
# ========================== #
struct QuadraticDecisionObjectiveBridge{T} <: MOIB.Objective.AbstractBridge
decision_function::QuadraticDecisionFunction{T}
end
function MOIB.Objective.bridge_objective(::Type{QuadraticDecisionObjectiveBridge{T}}, model::MOI.ModelLike,
f::QuadraticDecisionFunction{T}) where T
# All decisions have been mapped to the variable part terms
# at this point.
F = MOI.ScalarQuadraticFunction{T}
# Set the bridged objective
MOI.set(model, MOI.ObjectiveFunction{F}(),
MOI.ScalarQuadraticFunction(
f.variable_part.affine_terms,
f.variable_part.quadratic_terms,
zero(T)))
# Save decision function to allow modifications
return QuadraticDecisionObjectiveBridge{T}(f)
end
function MOIB.Objective.supports_objective_function(
::Type{<:QuadraticDecisionObjectiveBridge}, ::Type{<:QuadraticDecisionFunction})
return true
end
MOIB.added_constrained_variable_types(::Type{<:QuadraticDecisionObjectiveBridge}) = Tuple{DataType}[]
function MOIB.added_constraint_types(::Type{<:QuadraticDecisionObjectiveBridge})
return Tuple{DataType, DataType}[]
end
function MOIB.Objective.concrete_bridge_type(::Type{<:QuadraticDecisionObjectiveBridge{T}},
::Type{QuadraticDecisionFunction{T}}) where T
return QuadraticDecisionObjectiveBridge{T}
end
function MOIB.set_objective_function_type(::Type{QuadraticDecisionObjectiveBridge{T}}) where T
return MOI.ScalarQuadraticFunction{T}
end
function MOI.get(::QuadraticDecisionObjectiveBridge, ::MOI.NumberOfVariables)
return 0
end
function MOI.get(::QuadraticDecisionObjectiveBridge, ::MOI.ListOfVariableIndices)
return MOI.VariableIndex[]
end
function MOI.delete(::MOI.ModelLike, ::QuadraticDecisionObjectiveBridge)
# Nothing to delete
return nothing
end
function MOI.set(::MOI.ModelLike, ::MOI.ObjectiveSense,
::QuadraticDecisionObjectiveBridge, ::MOI.OptimizationSense)
# Nothing to handle if sense changes
return nothing
end
function MOI.get(model::MOI.ModelLike,
attr::MOIB.ObjectiveFunctionValue{QuadraticDecisionFunction{T}},
bridge::QuadraticDecisionObjectiveBridge{T}) where T
f = bridge.decision_function
G = MOI.ScalarQuadraticFunction{T}
obj_val = MOI.get(model, MOIB.ObjectiveFunctionValue{G}(attr.result_index))
# Calculate and add constant
constant = f.variable_part.constant +
f.decision_part.constant
return obj_val + f.variable_part.constant
end
function MOI.get(model::MOI.ModelLike,
attr::MOI.ObjectiveFunction{QuadraticDecisionFunction{T}},
bridge::QuadraticDecisionObjectiveBridge{T}) where T
return bridge.decision_function
end
function MOI.modify(model::MOI.ModelLike, bridge::QuadraticDecisionObjectiveBridge{T}, change::MOI.ScalarConstantChange) where T
f = bridge.decision_function
f = f.linear_quadratic_terms
# Modify constant of variable part
lq.variable_part.constant = change.new_constant
return nothing
end
function MOI.modify(model::MOI.ModelLike, bridge::QuadraticDecisionObjectiveBridge{T}, change::MOI.ScalarCoefficientChange) where T
f = bridge.decision_function
# Update variable part
modify_coefficient!(f.variable_part.affine_terms, change.variable, change.new_coefficient)
# Modify variable part of objective as usual
F = MOI.ScalarQuadraticFunction{T}
MOI.modify(model, MOI.ObjectiveFunction{F}(), change)
return nothing
end
|
[STATEMENT]
theorem comb_planar_compat:
assumes "comb_planar G"
shows "kuratowski_planar G"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. kuratowski_planar G
[PROOF STEP]
proof (rule ccontr)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> kuratowski_planar G \<Longrightarrow> False
[PROOF STEP]
assume "\<not>?thesis"
[PROOF STATE]
proof (state)
this:
\<not> kuratowski_planar G
goal (1 subgoal):
1. \<not> kuratowski_planar G \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<not> kuratowski_planar G
[PROOF STEP]
obtain G0 rev_G0 K rev_K where sub: "subgraph G0 G" "subdivision (K, rev_K) (G0, rev_G0)"
and is_kur: "K\<^bsub>3,3\<^esub> K \<or> K\<^bsub>5\<^esub> K"
[PROOF STATE]
proof (prove)
using this:
\<not> kuratowski_planar G
goal (1 subgoal):
1. (\<And>G0 K rev_K rev_G0. \<lbrakk>subgraph G0 G; subdivision (K, rev_K) (G0, rev_G0); K\<^bsub>3,3\<^esub> K \<or> K\<^bsub>5\<^esub> K\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding kuratowski_planar_def
[PROOF STATE]
proof (prove)
using this:
\<not> (\<nexists>H. subgraph H G \<and> (\<exists>K rev_K rev_H. subdivision (K, rev_K) (H, rev_H) \<and> (K\<^bsub>3,3\<^esub> K \<or> K\<^bsub>5\<^esub> K)))
goal (1 subgoal):
1. (\<And>G0 K rev_K rev_G0. \<lbrakk>subgraph G0 G; subdivision (K, rev_K) (G0, rev_G0); K\<^bsub>3,3\<^esub> K \<or> K\<^bsub>5\<^esub> K\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
subgraph G0 G
subdivision (K, rev_K) (G0, rev_G0)
K\<^bsub>3,3\<^esub> K \<or> K\<^bsub>5\<^esub> K
goal (1 subgoal):
1. \<not> kuratowski_planar G \<Longrightarrow> False
[PROOF STEP]
have "comb_planar K"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. comb_planar K
[PROOF STEP]
using sub assms
[PROOF STATE]
proof (prove)
using this:
subgraph G0 G
subdivision (K, rev_K) (G0, rev_G0)
comb_planar G
goal (1 subgoal):
1. comb_planar K
[PROOF STEP]
by (metis subgraph_comb_planar subdivision_comb_planar subdivision_bidir)
[PROOF STATE]
proof (state)
this:
comb_planar K
goal (1 subgoal):
1. \<not> kuratowski_planar G \<Longrightarrow> False
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
comb_planar K
goal (1 subgoal):
1. \<not> kuratowski_planar G \<Longrightarrow> False
[PROOF STEP]
have "\<not>comb_planar K"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> comb_planar K
[PROOF STEP]
using is_kur
[PROOF STATE]
proof (prove)
using this:
K\<^bsub>3,3\<^esub> K \<or> K\<^bsub>5\<^esub> K
goal (1 subgoal):
1. \<not> comb_planar K
[PROOF STEP]
by (metis K5_not_comb_planar K33_not_comb_planar)
[PROOF STATE]
proof (state)
this:
\<not> comb_planar K
goal (1 subgoal):
1. \<not> kuratowski_planar G \<Longrightarrow> False
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
comb_planar K
\<not> comb_planar K
[PROOF STEP]
show False
[PROOF STATE]
proof (prove)
using this:
comb_planar K
\<not> comb_planar K
goal (1 subgoal):
1. False
[PROOF STEP]
by contradiction
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed |
/-
Copyright (c) 2022 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
! This file was ported from Lean 3 source module order.monotone.odd
! leanprover-community/mathlib commit 9116dd6709f303dcf781632e15fdef382b0fc579
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Order.Monotone.Union
import Mathlib.Algebra.Order.Group.Instances
/-!
# Monotonicity of odd functions
An odd function on a linear ordered additive commutative group `G` is monotone on the whole group
provided that is is monotone on `Set.Ici 0`, see `monotone_of_odd_of_monotoneOn_nonneg`. We also
prove versions of this lemma for `Antitone`, `StrictMono`, and `StrictAnti`.
-/
open Set
variable {G H : Type _} [LinearOrderedAddCommGroup G] [OrderedAddCommGroup H]
/-- An odd function on a linear ordered additive commutative group is strictly monotone on the whole
group provided that it is strictly monotone on `Set.Ici 0`. -/
theorem strictMono_of_odd_strictMonoOn_nonneg {f : G → H} (h₁ : ∀ x, f (-x) = -f x)
(h₂ : StrictMonoOn f (Ici 0)) : StrictMono f := by
refine' StrictMonoOn.Iic_union_Ici (fun x hx y hy hxy => neg_lt_neg_iff.1 _) h₂
rw [← h₁, ← h₁]
exact h₂ (neg_nonneg.2 hy) (neg_nonneg.2 hx) (neg_lt_neg hxy)
#align strict_mono_of_odd_strict_mono_on_nonneg strictMono_of_odd_strictMonoOn_nonneg
/-- An odd function on a linear ordered additive commutative group is strictly antitone on the whole
group provided that it is strictly antitone on `Set.Ici 0`. -/
theorem strictAnti_of_odd_strictAntiOn_nonneg {f : G → H} (h₁ : ∀ x, f (-x) = -f x)
(h₂ : StrictAntiOn f (Ici 0)) : StrictAnti f :=
@strictMono_of_odd_strictMonoOn_nonneg G Hᵒᵈ _ _ _ h₁ h₂
#align strict_anti_of_odd_strict_anti_on_nonneg strictAnti_of_odd_strictAntiOn_nonneg
/-- An odd function on a linear ordered additive commutative group is monotone on the whole group
provided that it is monotone on `Set.Ici 0`. -/
theorem monotone_of_odd_of_monotoneOn_nonneg {f : G → H} (h₁ : ∀ x, f (-x) = -f x)
(h₂ : MonotoneOn f (Ici 0)) : Monotone f := by
refine' MonotoneOn.Iic_union_Ici (fun x hx y hy hxy => neg_le_neg_iff.1 _) h₂
rw [← h₁, ← h₁]
exact h₂ (neg_nonneg.2 hy) (neg_nonneg.2 hx) (neg_le_neg hxy)
#align monotone_of_odd_of_monotone_on_nonneg monotone_of_odd_of_monotoneOn_nonneg
/-- An odd function on a linear ordered additive commutative group is antitone on the whole group
provided that it is monotone on `Set.Ici 0`. -/
theorem antitone_of_odd_of_monotoneOn_nonneg {f : G → H} (h₁ : ∀ x, f (-x) = -f x)
(h₂ : AntitoneOn f (Ici 0)) : Antitone f :=
@monotone_of_odd_of_monotoneOn_nonneg G Hᵒᵈ _ _ _ h₁ h₂
#align antitone_of_odd_of_monotone_on_nonneg antitone_of_odd_of_monotoneOn_nonneg
|
Require Export SystemFR.Judgments.
Require Export SystemFR.AnnotatedTactics.
Require Export SystemFR.ErasedSubtype.
Opaque reducible_values.
Lemma annotated_subtype_prod:
forall Θ Γ A1 A2 B1 B2 x,
~(x ∈ fv_context Γ) ->
~(x ∈ fv A1) ->
~(x ∈ fv A2) ->
~(x ∈ fv B1) ->
~(x ∈ fv B2) ->
~(x ∈ Θ) ->
is_annotated_type A2 ->
is_annotated_type B2 ->
[[ Θ; Γ ⊨ A1 <: B1 ]] ->
[[ Θ; (x,A1) :: Γ ⊨ open 0 A2 (fvar x term_var) <: open 0 B2 (fvar x term_var) ]] ->
[[ Θ; Γ ⊨ T_prod A1 A2 <: T_prod B1 B2 ]].
Proof.
unfold open_subtype;
repeat step.
apply reducible_prod_subtype_subst with (erase_type A1) (erase_type A2) x (erase_context Γ);
repeat step;
side_conditions.
unshelve epose proof (H8 ρ l0 _ _ _ v0 _);
repeat step || erase_open.
Qed.
Lemma annotated_subtype_prod2:
forall Θ Γ T A B x,
~(x ∈ fv_context Γ) ->
~(x ∈ fv B) ->
~(x ∈ fv A) ->
~(x ∈ fv T) ->
~(x ∈ Θ) ->
is_annotated_type B ->
wf B 1 ->
subset (fv B) (support Γ) ->
[[ Θ; (x,T) :: Γ ⊨ pi1 (fvar x term_var) : A ]] ->
[[ Θ; (x,T) :: Γ ⊨ pi2 (fvar x term_var) : open 0 B (pi1 (fvar x term_var)) ]] ->
[[ Θ; Γ ⊨ T <: T_prod A B ]].
Proof.
unfold open_subtype; repeat step.
apply subtype_prod2 with (erase_context Γ) (erase_type T) x;
repeat step || erase_open;
side_conditions.
Qed.
|
function x = vec(X)
% x = vec(X)
%
% Y = VEC(x) Given an m x n matrix x, this produces the vector Y of length
% m*n that contains the columns of the matrix x, stacked below each other.
%
% See also mat.
% This file is part of SeDuMi 1.1 by Imre Polik and Oleksandr Romanko
% Copyright (C) 2005 McMaster University, Hamilton, CANADA (since 1.1)
%
% Copyright (C) 2001 Jos F. Sturm (up to 1.05R5)
% Dept. Econometrics & O.R., Tilburg University, the Netherlands.
% Supported by the Netherlands Organization for Scientific Research (NWO).
%
% Affiliation SeDuMi 1.03 and 1.04Beta (2000):
% Dept. Quantitative Economics, Maastricht University, the Netherlands.
%
% Affiliations up to SeDuMi 1.02 (AUG1998):
% CRL, McMaster University, Canada.
% Supported by the Netherlands Organization for Scientific Research (NWO).
%
% This program is free software; you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 2 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with this program; if not, write to the Free Software
% Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
% 02110-1301, USA
x = reshape(X,numel(X),1);
|
"""
Noeske, K. G., et al. 2007, ApJ, 660, L43
http://arxiv.org/abs/astro-ph/0703056v2
For ssfr, values are corrected as seen in Behroozi et al. 2013 (http://arxiv.org/abs/1207.6105), Table 5, for I (Initial Mass Function) corrections.
"""
import numpy as np
info = \
{
'reference':'Noeske, K. G., et al. 2007, ApJ, 660, L43',
'data': 'Behroozi, Table 5',
'imf': ('chabrier, 2003', (0.1, 100.)),
}
redshifts = [0.5, 1.0]
wavelength = 1600.
ULIM = -1e10
fits = {}
# Table 1
tmp_data = {}
tmp_data['ssfr'] = \
{
0.5: {'M': [6.3095734E+09, 1.0000000E+10, 1.5848932E+10, 2.5118864E+10, 3.9810717E+10, 6.3095734E+10, 1.0000000E+11, 1.5848932E+11, 2.5118864E+11],
'phi': [-9.32258931209275, -9.28912138231483, -9.51407920780546, -9.79372614683001, -9.8173840679684, -9.8764969330579, -10.2203313727929,
-10.3870866433571, -10.5174185464455],
'err': [(0.3, 0.3), (0.3, 0.3), (0.3, 0.3), (0.3, 0.3), (0.3, 0.3), (0.3, 0.3), (0.3, 0.3), (0.3, 0.3), (0.3, 0.3)]
},
1.0: {'M': [1.2589254E+10, 1.9952623E+10, 3.1622777E+10, 5.0118723E+10, 7.9432823E+10, 1.2589254E+11, 1.9952623E+11, 3.1622777E+11],
'phi': [-8.92007496792566, -9.05708605318108, -9.22584215073632, -9.32957944145149, -9.59738771540091, -9.77518272454617, -9.74281773821207,
-10.0030841831815],
'err': [(0.3, 0.3), (0.3, 0.3), (0.3, 0.3), (0.3, 0.3), (0.3, 0.3), (0.3, 0.3), (0.3, 0.3), (0.3, 0.3)]
},
}
units = {'ssfr': '1.'}
data = {}
data['ssfr'] = {}
for group in ['ssfr']:
for key in tmp_data[group]:
if key not in tmp_data[group]:
continue
subdata = tmp_data[group]
mask = []
for element in subdata[key]['err']:
if element == ULIM:
mask.append(1)
else:
mask.append(0)
mask = np.array(mask)
data[group][key] = {}
data[group][key]['M'] = np.ma.array(subdata[key]['M'], mask=mask)
data[group][key]['phi'] = np.ma.array(subdata[key]['phi'], mask=mask)
data[group][key]['err'] = tmp_data[group][key]['err']
|
/-
Copyright (c) 2022 Kevin Buzzard. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author : Kevin Buzzard
-/
import tactic -- imports all the Lean tactics
/-!
# Logic in Lean, example sheet 2 : `true` and `false`
We learn about the `true` and `false` propositions.
## Tactics you will need
To solve the levels on this sheet you will need to know all previous
tactics, plus the following two new ones. Check out their explanations
in the course book. Or just try them out and hover over them to see
if you can understand what's going on.
* `triv`
* `exfalso`
-/
-- Throughout this sheet, `P`, `Q` and `R` will denote propositions.
variables (P Q R : Prop)
example : true :=
begin
triv,
end
example : true → true :=
begin
intro h,
exact h,
end
example : false → true :=
begin
intro h,
triv,
end
example : false → false :=
begin
intro h,
exact h,
end
example : (true → false) → false :=
begin
intro h,
apply h,
triv,
end
example : false → P :=
begin
intro h,
exfalso,
exact h,
end
example : true → false → true → false → true → false :=
begin
intros h1 h2 h3 h4 h5,
exact h4,
end
example : P → ((P → false) → false) :=
begin
intros hP h,
apply h,
assumption,
end
example : (P → false) → P → Q :=
begin
intros h1 h2,
exfalso,
apply h1,
exact h2,
end
example : (true → false) → P :=
begin
intro h,
exfalso,
apply h,
triv,
end |
import Lake
import Init
import Mathlib.Data.Real.Basic
import Mathlib.Data.Vector
#eval 1
/-!
# Indexed inductive type
We define types `Vec α n` for `α : Type u` and `n: ℕ` with terms of `Vec α n` n-tuples in `α`.
* `α` will be a parameter.
* `n` will be an index.
-/
inductive Vec (α : Type u) :
ℕ → Type (u + 1) where
| nil : Vec α 0
| cons : {n : ℕ} →
(head : α) → (tail : Vec α n) → Vec α (n + 1)
example : Vec ℕ 0 := Vec.nil
example : Vec ℕ 1 := Vec.cons 3 (Vec.nil)
#check List
def Vec.to_list {α : Type u} {n : ℕ} : Vec α n → List α
| Vec.nil => []
| Vec.cons head tail => head :: tail.to_list
/-!
Vectors, i.e., lists of a fixed length, can be defined in (at least) two ways. One way is as an indexed inductive type `Vec`, as we saw in lecture and is in the file `InductiveTypes.lean`.
A different definition is as a subtype `Vector` of lists consisting of those of a fixed length. This is the definition used in `mathlib` and is recalled below.
```lean
/-- `Vector α n` is the type of lists of length `n` with elements of type `α`. -/
def Vector (α : Type u) (n : ℕ) :=
{ l : List α // l.length = n }
```
In this lab, you will relate the two definitions by constructing functions that convert between the two definitions and prove that these functions are inverses of each other.
-/
universe u
/-- Convert a `Vector` to a `Vec` -/
def Vec.ofVector {α : Type u}: (n : ℕ) → Vector α n → Vec α n
| 0, _ => Vec.nil
| k + 1, ⟨ head :: tail, h ⟩ => by
have h1 : tail.length = k := by
simp [List.length_cons] at h
assumption
apply Vec.cons head (Vec.ofVector k ⟨ tail, h1⟩ )
#print Vec.ofVector
/-- Convert a `Vec` to a `Vector`
def Vec.toVector {α : Type u}: (n : ℕ) → Vec α n → Vector α n
| _, _ => sorry --/
def vector1 : Vector ℕ 1 := ⟨ [2] , rfl ⟩
def vector0 : Vector ℕ 0 := ⟨ [], rfl ⟩
#eval vector1
/-- Convert a `Vec` to a `Vector` -/
def Vec.toVector {α : Type u}: (n : ℕ) → Vec α n → Vector α n
| 0, _ => ⟨ [], rfl ⟩
| k + 1, Vec.cons head tail => ⟨ head :: (Vec.toVector k tail).val, by
simp [List.length]; apply (Vec.toVector k tail).property⟩
-- by
-- let vectorTail : Vector α k := Vec.toVector k tail
-- let ⟨Ltail, h⟩ := vectorTail
-- ------------------------
-- --have lem1: Ltail = vectorTail.1 := by
-- ------------------------
-- have lem5 : (head :: Ltail).length = k + 1 := by
-- simp [List.length, h]
-- let vec3 : Vector α (k+1) := ⟨head :: Ltail, lem5⟩
-- apply vec3
-- def Vec.toVector2 {α : Type u}: (n : ℕ) → Vec α n → Vector α n
-- | 0, _ => ⟨ [], rfl ⟩
-- | k + 1, Vec.cons head tail => by
-- let vectorTail : Vector α k := Vec.toVector2 k tail
-- let ⟨Ltail, h⟩ := vectorTail
-- ------------------------
-- have lem1: Ltail = vectorTail.1 := by
-- ------------------------
-- have lem5 : (head :: Ltail).length = k + 1 := by
-- simp [List.length, h]
-- let vec3 : Vector α (k+1) := ⟨head :: Ltail, lem5⟩
-- apply vec3
-- #print Vec.toVector2
def V : Vec ℕ 3 := Vec.cons 1 (Vec.cons 3 (Vec.cons 2 Vec.nil))
#check (Vec.toVector 3 V).2
def vec : Vector ℕ 3 := ⟨[1,5,3],rfl⟩
#check vec
#check vec.2
/-- Vec.to_list {α : Type u} {n : ℕ} : Vec α n → List α
| Vec.nil => []
| Vec.cons head tail => head :: tail.to_list
-/
def p : Prop := (1 = 1) ∧ (2 = 2)
#check p
theorem A (h : p): 1 = 1 := by
let ⟨ha, _⟩ := h
apply ha
-- Mapping a `Vec` to a `Vector` and back gives the original `Vec` -/
theorem Vec.ofVector.toVector {α : Type u} (n : ℕ) (v : Vec α n) :
Vec.ofVector n (Vec.toVector n v) = v :=
match n with
| 0 => by
rw [Vec.ofVector]
match v with
| Vec.nil => rfl
| k + 1 => by
match v with
| Vec.cons head tail =>
simp [Vec.ofVector]
apply Vec.ofVector.toVector k tail
#print Vec.ofVector.toVector
-- let Lhead := (Vec.toVector (k + 1) (Vec.cons head tail)).head
-- let Ltail := (Vec.toVector (k + 1) (Vec.cons head tail)).tail
-- let h := (Vec.toVector (k + 1) (Vec.cons head tail)).2
-- have lem1 : Lhead :: Ltail = head :: (Vec.toVector k tail).val := by
-- simp [Vec.toVector]
-- apply And.intro
-- case left =>
/-- Mapping a `Vector` to a `Vec` and back gives the original `Vector` -/
theorem Vec.toVector.ofVector {α : Type u} (n : ℕ) (v : Vector α n) :
Vec.toVector n (Vec.ofVector n v) = v :=
match n with
| 0 => by
rw [Vec.toVector]
match v with
| Vector.nil => rfl
| k + 1 => by
match v with
| ⟨ head :: tail, h⟩ =>
simp[toVector]
simp only [ofVector]
-- have lem1 : head ::
-- (toVector (k + 0 + 0)
-- (ofVector (k + 0)
-- { val := tail, property := (h1 : List.length tail = Nat.add k 0) })).val =
-- head :: tail :=
#check Vec.ofVector
#print Vec.toVector.ofVector
-- /-- Mapping a `Vector` to a `Vec` and back gives the original `Vector` -/
-- theorem Vec.toVector.ofVector2 {α : Type u} (n : ℕ) (v : Vector α n) :
-- Vec.toVector n (Vec.ofVector n v) = v :=
-- match n with
-- | 0 => by
-- rw [Vec.toVector]
-- match v with
-- | Vector.nil => rfl
-- | k + 1 => by
-- match v with
-- | ⟨head :: tail, h⟩ =>
-- simp [Vec.toVector]
-- have lem1 : tail.length = k := by
-- simp [List.length_cons] at h
-- assumption
-- --apply Vec.toVector.ofVector k ⟨tail, lem1⟩
-- have lemt : (toVector k (Vec.ofVector k ⟨tail, lem1⟩)) = ⟨tail, lem1⟩ := by
-- apply Vec.toVector.ofVector k ⟨tail, lem1⟩
-- simp only [lemt]
-- -- head :: ↑(toVector (k + 0 + 0) (Vec.ofVector (k + 0) { val := tail, property := (_ : List.length tail = Nat.add k 0) }))
-- let vecTail : Vector α k := ⟨tail, lem1⟩
-- have lem2_val :
-- head :: (Vec.toVector k (Vec.ofVector k vecTail)).val =
-- head :: tail := by
-- simp
-- simp [Vec.toVector.ofVector, lem1]
-- simp [lem2_val]
-- have lem3 : (_ :
-- List.length
-- (head ::
-- ↑(toVector (Nat.add (k + 0) 0)
-- (Vec.ofVector (k + 0) { val := tail, property := (_ : List.length tail = Nat.add k 0) }))) =
-- Nat.add (k + 0) 0 + 1
-- theorem Vec.toVector.ofVector2 {α : Type u} (n : ℕ) (v : Vector α n) :
-- Vec.toVector n (Vec.ofVector n v) = v := by
-- induction n
-- case zero =>
-- rw [Vec.toVector]
-- match v with
-- | Vector.nil => rfl
-- case succ =>
-- apply n_ih✝
-- simp [Vec.toVector]
--apply Vec.toVector.ofVector k --⟨tail, lem1⟩
/-{
val :=
head ::
↑(toVector k
(Vec.ofVector k { val := tail, property := (_ : List.length tail = k })),
property :=
(_ :
List.length
(head ::
↑(toVector k)
(Vec.ofVector k { val := tail, property := (_ : List.length tail = k) }))) =
k + 1) } =
{ val := head :: tail, property := h }-/
/-(match
toVector k (Vec.ofVector k { val := tail, property := (_ : List.length tail = k) }) with
| { val := Ltail, property := h } =>
{ val := head :: Ltail, property := (_ : List.length Ltail= k) }) =
{ val := head :: tail, property := h }-/
#eval 1
#check Subtype
#check Subtype.mk -- (val : α) → p val → Subtype p
#check Subtype.property
universe u
inductive InfiniteTree (α : Type u) where
| leaf (label: α) : InfiniteTree α
| node : (ℕ → InfiniteTree α) → InfiniteTree α
inductive FiniteTree (α : Type u) where
| leaf (label: α) : FiniteTree α
| node : (List <| FiniteTree α) → FiniteTree α
def And_implies_right (a b : Prop): a ∧ b → a := by
intro hab
apply And.left hab
/-def Distrubutive (a b c : Prop): a ∧ (b ∨ c) ↔ (a ∧ b) ∨ (a ∧ c) := by
apply Iff.intro
case mp =>
intro h
have ha : a := And.left h
have hborc : b ∨ c := And.right h
by_cases hc:c
case inl =>
have hac : a ∧ c := by apply And.intro ha hc
apply Or.inr
assumption
case inr =>
by_cases hb : b
case inl =>
have hab : a ∧ b := by apply And.intro ha hb
apply Or.inl
assumption
case inr =>-/
-------------------------------------------------------------------------------
variable (Point : Type)
variable [pts_nonEmpty : Inhabited Point]
variable (Line : Type)
variable (lies_on : Point → Line → Prop)
def samePoint(p₁ p₂: Point) := p₁ = p₂
def some_point : Point := default
#check some_point
structure IncidenceGeometry where
Line : Type
Point : Type
lies_on : Point → Line → Prop
def intersect {geom : IncidenceGeometry}(l₁ l₂ : geom.Line) : Prop :=
∃p : geom.Point, geom.lies_on p l₁ ∧ geom.lies_on p l₂
def intersect_on_pair {geom : IncidenceGeometry}(l₁ l₂ : geom.Line) : Prop :=
∃p₁ p₂ : geom.Point, geom.lies_on p₁ l₁ ∧ geom.lies_on p₁ l₂ ∧
geom.lies_on p₂ l₁ ∧ geom.lies_on p₂ l₂ ∧ p₁ ≠ p₂
variable (congruence : {A : Type} → A → A → Prop)
-- Congruence is equivalence relationship
axiom CongEquiv {A : Type} : IsEquiv A congruence
lemma CongRefl {A : Type} : ∀ a : A, congruence a a :=
CongEquiv.refl
lemma CongSymm {A : Type} :
∀ a b : A, congruence a b → congruence b a :=
CongEquiv.symm
lemma CongTrans {A : Type} :
∀ a b c : A,
congruence a b → congruence b c → congruence a c :=
CongEquiv.trans
axiom in_between : Point → Point → Point → Prop
structure Segment : Type :=
p1 : Point
p2 : Point
axiom distance (Apt Bpt : Point) : ℝ
-- Distance axioms
axiom dist_is_not_neg (A B : Point): distance A B ≥ 0
axiom dist_same_point (A : Point) : distance A A = 0
axiom dist_geq_0 (A B : Point) : A ≠ B ↔ distance A B > 0
axiom dist_is_symm (A B : Point) : distance A B = distance B A
axiom dist_tri_ineq (A B C : Point) :
distance A B + distance B C ≥ distance A C
-- Axioms when a point is in between other two
axiom dist_in_between (a b c : Point) (h : in_between a b c) :
distance a b + distance b c = distance a c
axiom between_refl_left (a b : Point) : in_between a a b
axiom between_refl_right (a b : Point) : in_between a b b
--Euclid postulates
--Postulate 1
--Between two points there is an unique line passing through them
axiom Line_of_two_points (A B : Point) (h : A ≠ B): Line --says you get a line from two points
axiom point_contain (A B : Point) (h : A ≠ B) : --says such a line contain both the points
have l : Line := Line_of_two_points A B h
lies_on A l ∧ lies_on B l
axiom line_unique (A B: Point) (h : A ≠ B) (l1 l2 : Line): --says such a line is unique
(lies_on A l1 ∧ lies_on B l2) ∧ (lies_on A l2 ∧ lies_on B l2) → l1 = l2
axiom IsCoincide (l1 l2 : Line) : l1 = l2
--Postulate 2
--A line segment can be extended to get a line
axiom line_from_seg (seg : Segment) : Line --says a you get a line from a segment
axiom contain_end_points (seg : Segment) : --says the end points of the segment lies on the resulting line
lies_on (seg.p1) (line_from_seg seg) ∧ lies_on (seg.p2) (line_from_seg seg)
axiom length_of_seg (seg : Segment) : ℝ
axiom Length_of_seg (seg : Segment) : length_of_seg seg = distance seg.p1 seg.p2
--Postulate 3
--A circle can be drawn from any centre and any radius
structure Circle : Type :=
centre : Point
radius : ℝ
axiom On_circle (A : Point) (C : Circle) :
distance C.centre A = C.radius
/-
def Equi_triangle (A B C : Point) : Prop :=
distance A B = distance A C ∧ distance A C = distance B C
theorem Elements_th1 (s : Segment) :
∃ C : Point , Equi_triangle (s.p1 s.p2 C) := by
have c1 : Circle := Circle.mk s.p1 (length_of_seg s)
have c2 : Circle := Circle.mk s.p2 (length_of_seg s)
-/
--- (c1 : Circle) : c1.centre = segAB.p1 ∧ c1.radius = (distance (segAB.p1 segAB.p2)) →
--Postulate 4
--All right angles are equal
structure Angle : Type :=
p1 : Point
Pivot : Point
p2 : Point
-- check whether p1 and pivot are different points
axiom reflexAngle : Angle → Angle
axiom mAngle : Angle → ℝ
-- properties of Angle
axiom AngleSymm (a b c : Point) : Angle.mk a b c = Angle.mk c b a
axiom MakingIntPointAngle (a : Point) (A : Angle) : Prop
-- property of "introducing an interior point 'a' within an angle A"
def IntPointAngle (a : Point) (A : Angle) (_ : MakingIntPointAngle a A): Prop :=
have A1 : Angle := Angle.mk a A.Pivot A.p1
have A2 : Angle := Angle.mk a A.Pivot A.p2
(mAngle (A1) < mAngle A)
∧ (mAngle (A2) < mAngle A) -- strict inequality because of 120
#check IntPointAngle
-- properties of mAngle
axiom mAngle_non_neg (a b c : Point) : mAngle (Angle.mk a b c) ≥ 0
axiom ZeroAngle (a b c : Point) (hc : in_between a c b): mAngle (Angle.mk a b c) = 0
axiom mAngle_postive (a b c : Point) (hc : ¬ in_between a c b) (ha : ¬ in_between c a b):
mAngle (Angle.mk a b c) > 0
axiom mReflexAngle (A : Angle) : mAngle (reflexAngle A) = 360 - mAngle A
-- Angle A as Sum of its constituents
axiom mAngleAdd (a : Point) (A : Angle) (hInt : MakingIntPointAngle a A)(h : IntPointAngle a A hInt) :
have A1 : Angle := Angle.mk a A.Pivot A.p1
have A2 : Angle := Angle.mk a A.Pivot A.p2
mAngle A = mAngle A1 + mAngle A2
-- Given Angle A, if we introduce segment (a A.Pivot), then
-- the constiuents sum to the whole Angle A. We can obtain the
-- this as a theorem to MakingIntPointAngle, IntPointAngle, mAngleAdd.
-- Postulate 4 says all right angles are equal.
-- We are assigning it a value of 90
axiom isRightAngle (A : Angle): mAngle A = 90
-- TODO: straight line is two right angles
axiom StraightAngle (a b c : Point) (h : in_between a b c) : mAngle (Angle.mk a b c) = 180
-- Postulate 5
-- If a straight line falling on two straight lines makes the interior
-- angles on the same side of it taken together less than two right angles, then the
-- two straight lines, if produced indefinitely, meet on that side on which the sum of
-- angles is less than two right angles.
axiom IsParallel (l1 l2 : Line): Prop -- defn of parallel lines
axiom IsParrallel_isequiv : sorry
-- need to take care of the case when l1 = l2 for IsIntersect.
-- we probably don't need this
-- property of IsIntersect
axiom PointFromIntersect (l1 l2 : Line) (h : ¬ IsParallel l1 l2) :
∃ c : Point, lies_on c l1 ∧ lies_on c l2
-- Unique intersection point of two lines
theorem UniqueIntersectPoint (l1 l2 : Line) (h1 : l1 ≠ l2) (h2 : ¬ IsParallel l1 l2 h1) :
lies_on (a : Point) l1 ∧ lies_on a l2
→
lies_on (b : Point) l1 ∧ lies_on b l2
→
a = b
:= sorry
-/
-- Towards same-sided-ness
-- entire segment is on the line
/-def SegmentOnLine (A : Segment) (l : Line) : Prop :=
lies_on A.p1 l ∧ lies_on A.p2 l-/
-- points on a segment
def PointOnSegment (a : Point) (A : Segment) : Prop :=
in_between A.p1 a A.p2
axiom SegLineIntersect (A : Segment) (l : Line) (h : ¬ SegmentOnLine A l)
: Prop
-- need to introduce property of SegLineIntersect
-- have not accounted for the case where segment "stands on" the line
-- Points on the same side of a line segment
axiom SameSidedPoints (a b : Point) (l : Line)
(h1 : ¬ SegmentOnLine (Segment.mk a b) l)
(h2 : ¬ SegLineIntersect (Segment.mk a b) l h1)
: Prop
axiom IntersectingLines (A B C : Segment)
(h1 : ¬ SegmentOnLine (Segment.mk A.p1 B.p1) (line_from_seg C))
(h2 : ¬ SegLineIntersect (Segment.mk A.p1 B.p1) (line_from_seg C) h1)
:
SameSidedPoints (A.p1) (B.p1) (line_from_seg C) h1 h2
→
/-
→
(in_between A.p1 C.p1 A.p2) ∧ (in_between B.p1 C.p2 B.p2)
-- what about C.p1 = A.p1? is this not taken care of by SegLineIntersect?
→ -/
-- (need to bring in "same-sided-ness")
|
!==============================================================================!
subroutine Monitor_Mod_Initialize(monitor, grid, restart, domain)
!------------------------------------------------------------------------------!
! This is to read the control file and set up monitoring points. !
!------------------------------------------------------------------------------!
implicit none
!---------------------------------[Arguments]----------------------------------!
type(Monitor_Type) :: monitor
type(Grid_Type), target :: grid
logical :: restart
integer, optional :: domain
!-----------------------------------[Locals]-----------------------------------!
integer :: c, m, n, l
real :: curr_dist, min_dist_all
character(len=80) :: mon_file_name
character(len=80) :: point_name
real, allocatable :: min_dist(:)
real :: xyz(3), def(3)
!==============================================================================!
monitor % pnt_grid => grid
! Read number of monitoring points from control file
call Control_Mod_Read_Int_Item('NUMBER_OF_MONITORING_POINTS', 0, &
monitor % n_points, .true.)
if(monitor % n_points .eq. 0) return
! Allocate memory for points
allocate(monitor % x(monitor % n_points))
allocate(monitor % y(monitor % n_points))
allocate(monitor % z(monitor % n_points))
! Allocate memory accordingly
allocate(monitor % cell (monitor % n_points))
allocate(monitor % file_unit(monitor % n_points))
allocate(min_dist (monitor % n_points))
!----------------------------------------!
! Read monitoring points coordinates !
!----------------------------------------!
do n = 1, monitor % n_points
write(point_name, '(a,i3.3)') 'MONITORING_POINT_', n
def = 0. ! don't have a better idea what to set
call Control_Mod_Read_Real_Array(point_name, 3, def, xyz, .true.)
monitor % x(n) = xyz(1)
monitor % y(n) = xyz(2)
monitor % z(n) = xyz(3)
end do
!--------------------------------------------!
! Set the names for all monitoring files !
!--------------------------------------------!
call File_Mod_Set_Name(mon_file_name, extension='-monit.000', domain=domain)
l = len_trim(mon_file_name)
!-------------------------------!
! Find the monitoring cells !
!-------------------------------!
do m = 1, monitor % n_points
min_dist(m) = HUGE
do c = 1, grid % n_cells
curr_dist = Math_Mod_Distance( monitor % x(m), &
monitor % y(m), &
monitor % z(m), &
grid % xc(c), &
grid % yc(c), &
grid % zc(c))
! Current distance is smaller than the stored one
if(curr_dist < min_dist(m)) then
monitor % cell(m) = c
min_dist(m) = curr_dist
end if
end do
! Check if smaller distance is found on another processor
if(n_proc > 1) then
min_dist_all = min_dist(m)
call Comm_Mod_Global_Min_Real(min_dist_all)
! If there is, erase monitoring point at this_proc
if(abs(min_dist_all - min_dist(m)) >= TINY) then
monitor % cell(m) = 0
end if
end if
end do
!----------------------------------------------!
! Write first line in the monitoring files !
!----------------------------------------------!
do m = 1, monitor % n_points
if(monitor % cell(m) > 0) then
write(mon_file_name(l-2:l),'(I3.3)') m
if(.not. restart) then
call File_Mod_Open_File_For_Writing(mon_file_name, &
monitor % file_unit(m))
else
call File_Mod_Append_File_For_Writing(mon_file_name, &
monitor % file_unit(m))
endif
write(monitor % file_unit(m), '(a24, 3f16.6)') &
'# Monitoring point:', &
grid % xc( monitor % cell(m) ), &
grid % yc( monitor % cell(m) ), &
grid % zc( monitor % cell(m) )
end if
end do
end subroutine
|
If $(q_1, r_1)$ and $(q_2, r_2)$ are both pairs of polynomials that satisfy the Euclidean relation for polynomials, then $q_1 = q_2$ and $r_1 = r_2$. |
module Error-in-imported-module.M where
Foo : Set
Foo = Set
|
subroutine tstrdx(cdrs,iodb,iopt,jflag,jsflag,narn1,narn2,
$ ndrs,ndrsmx,ndrsr,nodbmx,noptmx,noutpt,nrdxsp,nstmax,
$ qredox,uspec)
c
c This subroutine determines if the chemical model to be
c computed has a redox aspect. This will be determined to be
c so if a species in the model has an associated reaction
c that is a redox reaction and the species is not in the
c active basis set.
c
c An auxiliary basis species (say Oxalate-) that is in the
c model but is included in the active basis set is effectively.
c treated as detached from a corresponding strict basis species
c (e.g., the concentration/activity of Oxalate- is not determined
c by the assumption of equilibrium for its associated reaction,
c which would link it to HCO3-). In effect, Oxalate- is treated
c as being composed of a pseudo-element, and its presence in the
c model does not require a redox variable.
c
c This subroutine is called by:
c
c EQ6/eq6.f
c
c-----------------------------------------------------------------------
c
c Principal input:
c
c
c Principal output:
c
c
c-----------------------------------------------------------------------
c
implicit none
c
c-----------------------------------------------------------------------
c
c Calling sequence variable declarations.
c
integer ndrsmx,nodbmx,noptmx,nstmax
c
integer noutpt
c
integer iodb(nodbmx),iopt(noptmx),jflag(nstmax),jsflag(nstmax),
$ ndrs(ndrsmx),ndrsr(2,nstmax)
c
integer narn1,narn2,nrdxsp
c
logical qredox
c
character(len=48) uspec(nstmax)
c
real(8) cdrs(ndrsmx)
c
c-----------------------------------------------------------------------
c
c Local variable declarations.
c
integer jlen,ns
c
character(len=56) uspn56
c
real(8) cx
c
real(8) coefdr
c
c-----------------------------------------------------------------------
c
qredox = .false.
c
if (iopt(15) .le. 0) then
do ns = narn1,narn2
c
if (jflag(ns) .eq. 30) then
c
c The species is not in the active basis set. It is a
c "dependent" species whose concentration/activity is
c computed assuming its associated reaction is in a state
c of equilibrium.
c
if (jsflag(ns) .lt. 2) then
c
c The species not hard suppressed.
c
c Calling sequence substitutions:
c nrdxsp for nse
c
cx = coefdr(cdrs,ndrs,ndrsmx,ndrsr,nrdxsp,ns,nstmax)
if (cx .ne. 0.) then
qredox = .true.
if (iodb(1) .ge. 1) then
c
c Calling sequence substitutions:
c uspec(ns) for unam48
c
call fmspnm(jlen,uspec(ns),uspn56)
write (noutpt,1000) uspn56(1:jlen)
1000 format(/' * Note - (EQ6/tstrdx) The reaction',
$ ' associated with the species',/7x,a,' is a redox',
$ ' reaction.')
go to 999
endif
endif
endif
endif
enddo
endif
c
c* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
c
999 continue
end
|
Coq < Variable D : Set.
D is assumed
Warning: D is declared as a parameter because it is at a global level
Coq < Variable P : D -> Prop.
P is assumed
Warning: P is declared as a parameter because it is at a global level
Coq < Variable d : D.
d is assumed
Warning: d is declared as a parameter because it is at a global level
Coq < Lemma weird : (forall x:D, P x) -> exists a, P a.
1 subgoal
============================
(forall x : D, P x) -> exists a : D, P a
weird < intro UnivP.
1 subgoal
UnivP : forall x : D, P x
============================
exists a : D, P a
weird < exists d; trivial.
No more subgoals.
weird < Qed.
intro UnivP.
exists d; trivial.
weird is defined
|
(* Title: HOL/Library/Multiset.thy
Author: Tobias Nipkow, Markus Wenzel, Lawrence C Paulson, Norbert Voelker
Author: Andrei Popescu, TU Muenchen
Author: Jasmin Blanchette, Inria, LORIA, MPII
Author: Dmitriy Traytel, TU Muenchen
Author: Mathias Fleury, MPII
*)
section \<open>(Finite) Multisets\<close>
theory Multiset
imports Cancellation
begin
subsection \<open>The type of multisets\<close>
definition "multiset = {f :: 'a \<Rightarrow> nat. finite {x. f x > 0}}"
typedef 'a multiset = "multiset :: ('a \<Rightarrow> nat) set"
morphisms count Abs_multiset
unfolding multiset_def
proof
show "(\<lambda>x. 0::nat) \<in> {f. finite {x. f x > 0}}" by simp
qed
setup_lifting type_definition_multiset
lemma multiset_eq_iff: "M = N \<longleftrightarrow> (\<forall>a. count M a = count N a)"
by (simp only: count_inject [symmetric] fun_eq_iff)
lemma multiset_eqI: "(\<And>x. count A x = count B x) \<Longrightarrow> A = B"
using multiset_eq_iff by auto
text \<open>Preservation of the representing set \<^term>\<open>multiset\<close>.\<close>
lemma const0_in_multiset: "(\<lambda>a. 0) \<in> multiset"
by (simp add: multiset_def)
lemma only1_in_multiset: "(\<lambda>b. if b = a then n else 0) \<in> multiset"
by (simp add: multiset_def)
lemma union_preserves_multiset: "M \<in> multiset \<Longrightarrow> N \<in> multiset \<Longrightarrow> (\<lambda>a. M a + N a) \<in> multiset"
by (simp add: multiset_def)
lemma diff_preserves_multiset:
assumes "M \<in> multiset"
shows "(\<lambda>a. M a - N a) \<in> multiset"
proof -
have "{x. N x < M x} \<subseteq> {x. 0 < M x}"
by auto
with assms show ?thesis
by (auto simp add: multiset_def intro: finite_subset)
qed
lemma filter_preserves_multiset:
assumes "M \<in> multiset"
shows "(\<lambda>x. if P x then M x else 0) \<in> multiset"
proof -
have "{x. (P x \<longrightarrow> 0 < M x) \<and> P x} \<subseteq> {x. 0 < M x}"
by auto
with assms show ?thesis
by (auto simp add: multiset_def intro: finite_subset)
qed
lemmas in_multiset = const0_in_multiset only1_in_multiset
union_preserves_multiset diff_preserves_multiset filter_preserves_multiset
subsection \<open>Representing multisets\<close>
text \<open>Multiset enumeration\<close>
instantiation multiset :: (type) cancel_comm_monoid_add
begin
lift_definition zero_multiset :: "'a multiset" is "\<lambda>a. 0"
by (rule const0_in_multiset)
abbreviation Mempty :: "'a multiset" ("{#}") where
"Mempty \<equiv> 0"
lift_definition plus_multiset :: "'a multiset \<Rightarrow> 'a multiset \<Rightarrow> 'a multiset" is "\<lambda>M N. (\<lambda>a. M a + N a)"
by (rule union_preserves_multiset)
lift_definition minus_multiset :: "'a multiset \<Rightarrow> 'a multiset \<Rightarrow> 'a multiset" is "\<lambda> M N. \<lambda>a. M a - N a"
by (rule diff_preserves_multiset)
instance
by (standard; transfer; simp add: fun_eq_iff)
end
context
begin
qualified definition is_empty :: "'a multiset \<Rightarrow> bool" where
[code_abbrev]: "is_empty A \<longleftrightarrow> A = {#}"
end
lemma add_mset_in_multiset:
assumes M: \<open>M \<in> multiset\<close>
shows \<open>(\<lambda>b. if b = a then Suc (M b) else M b) \<in> multiset\<close>
using assms by (simp add: multiset_def flip: insert_Collect)
lift_definition add_mset :: "'a \<Rightarrow> 'a multiset \<Rightarrow> 'a multiset" is
"\<lambda>a M b. if b = a then Suc (M b) else M b"
by (rule add_mset_in_multiset)
syntax
"_multiset" :: "args \<Rightarrow> 'a multiset" ("{#(_)#}")
translations
"{#x, xs#}" == "CONST add_mset x {#xs#}"
"{#x#}" == "CONST add_mset x {#}"
lemma count_empty [simp]: "count {#} a = 0"
by (simp add: zero_multiset.rep_eq)
lemma count_add_mset [simp]:
"count (add_mset b A) a = (if b = a then Suc (count A a) else count A a)"
by (simp add: add_mset.rep_eq)
lemma count_single: "count {#b#} a = (if b = a then 1 else 0)"
by simp
lemma
add_mset_not_empty [simp]: \<open>add_mset a A \<noteq> {#}\<close> and
empty_not_add_mset [simp]: "{#} \<noteq> add_mset a A"
by (auto simp: multiset_eq_iff)
lemma add_mset_add_mset_same_iff [simp]:
"add_mset a A = add_mset a B \<longleftrightarrow> A = B"
by (auto simp: multiset_eq_iff)
lemma add_mset_commute:
"add_mset x (add_mset y M) = add_mset y (add_mset x M)"
by (auto simp: multiset_eq_iff)
subsection \<open>Basic operations\<close>
subsubsection \<open>Conversion to set and membership\<close>
definition set_mset :: "'a multiset \<Rightarrow> 'a set"
where "set_mset M = {x. count M x > 0}"
abbreviation Melem :: "'a \<Rightarrow> 'a multiset \<Rightarrow> bool"
where "Melem a M \<equiv> a \<in> set_mset M"
notation
Melem ("'(\<in>#')") and
Melem ("(_/ \<in># _)" [51, 51] 50)
notation (ASCII)
Melem ("'(:#')") and
Melem ("(_/ :# _)" [51, 51] 50)
abbreviation not_Melem :: "'a \<Rightarrow> 'a multiset \<Rightarrow> bool"
where "not_Melem a M \<equiv> a \<notin> set_mset M"
notation
not_Melem ("'(\<notin>#')") and
not_Melem ("(_/ \<notin># _)" [51, 51] 50)
notation (ASCII)
not_Melem ("'(~:#')") and
not_Melem ("(_/ ~:# _)" [51, 51] 50)
context
begin
qualified abbreviation Ball :: "'a multiset \<Rightarrow> ('a \<Rightarrow> bool) \<Rightarrow> bool"
where "Ball M \<equiv> Set.Ball (set_mset M)"
qualified abbreviation Bex :: "'a multiset \<Rightarrow> ('a \<Rightarrow> bool) \<Rightarrow> bool"
where "Bex M \<equiv> Set.Bex (set_mset M)"
end
syntax
"_MBall" :: "pttrn \<Rightarrow> 'a set \<Rightarrow> bool \<Rightarrow> bool" ("(3\<forall>_\<in>#_./ _)" [0, 0, 10] 10)
"_MBex" :: "pttrn \<Rightarrow> 'a set \<Rightarrow> bool \<Rightarrow> bool" ("(3\<exists>_\<in>#_./ _)" [0, 0, 10] 10)
syntax (ASCII)
"_MBall" :: "pttrn \<Rightarrow> 'a set \<Rightarrow> bool \<Rightarrow> bool" ("(3\<forall>_:#_./ _)" [0, 0, 10] 10)
"_MBex" :: "pttrn \<Rightarrow> 'a set \<Rightarrow> bool \<Rightarrow> bool" ("(3\<exists>_:#_./ _)" [0, 0, 10] 10)
translations
"\<forall>x\<in>#A. P" \<rightleftharpoons> "CONST Multiset.Ball A (\<lambda>x. P)"
"\<exists>x\<in>#A. P" \<rightleftharpoons> "CONST Multiset.Bex A (\<lambda>x. P)"
lemma count_eq_zero_iff:
"count M x = 0 \<longleftrightarrow> x \<notin># M"
by (auto simp add: set_mset_def)
lemma not_in_iff:
"x \<notin># M \<longleftrightarrow> count M x = 0"
by (auto simp add: count_eq_zero_iff)
lemma count_greater_zero_iff [simp]:
"count M x > 0 \<longleftrightarrow> x \<in># M"
by (auto simp add: set_mset_def)
lemma count_inI:
assumes "count M x = 0 \<Longrightarrow> False"
shows "x \<in># M"
proof (rule ccontr)
assume "x \<notin># M"
with assms show False by (simp add: not_in_iff)
qed
lemma in_countE:
assumes "x \<in># M"
obtains n where "count M x = Suc n"
proof -
from assms have "count M x > 0" by simp
then obtain n where "count M x = Suc n"
using gr0_conv_Suc by blast
with that show thesis .
qed
lemma count_greater_eq_Suc_zero_iff [simp]:
"count M x \<ge> Suc 0 \<longleftrightarrow> x \<in># M"
by (simp add: Suc_le_eq)
lemma count_greater_eq_one_iff [simp]:
"count M x \<ge> 1 \<longleftrightarrow> x \<in># M"
by simp
lemma set_mset_empty [simp]:
"set_mset {#} = {}"
by (simp add: set_mset_def)
lemma set_mset_single:
"set_mset {#b#} = {b}"
by (simp add: set_mset_def)
lemma set_mset_eq_empty_iff [simp]:
"set_mset M = {} \<longleftrightarrow> M = {#}"
by (auto simp add: multiset_eq_iff count_eq_zero_iff)
lemma finite_set_mset [iff]:
"finite (set_mset M)"
using count [of M] by (simp add: multiset_def)
lemma set_mset_add_mset_insert [simp]: \<open>set_mset (add_mset a A) = insert a (set_mset A)\<close>
by (auto simp flip: count_greater_eq_Suc_zero_iff split: if_splits)
lemma multiset_nonemptyE [elim]:
assumes "A \<noteq> {#}"
obtains x where "x \<in># A"
proof -
have "\<exists>x. x \<in># A" by (rule ccontr) (insert assms, auto)
with that show ?thesis by blast
qed
subsubsection \<open>Union\<close>
lemma count_union [simp]:
"count (M + N) a = count M a + count N a"
by (simp add: plus_multiset.rep_eq)
lemma set_mset_union [simp]:
"set_mset (M + N) = set_mset M \<union> set_mset N"
by (simp only: set_eq_iff count_greater_zero_iff [symmetric] count_union) simp
lemma union_mset_add_mset_left [simp]:
"add_mset a A + B = add_mset a (A + B)"
by (auto simp: multiset_eq_iff)
lemma union_mset_add_mset_right [simp]:
"A + add_mset a B = add_mset a (A + B)"
by (auto simp: multiset_eq_iff)
lemma add_mset_add_single: \<open>add_mset a A = A + {#a#}\<close>
by (subst union_mset_add_mset_right, subst add.comm_neutral) standard
subsubsection \<open>Difference\<close>
instance multiset :: (type) comm_monoid_diff
by standard (transfer; simp add: fun_eq_iff)
lemma count_diff [simp]:
"count (M - N) a = count M a - count N a"
by (simp add: minus_multiset.rep_eq)
lemma add_mset_diff_bothsides:
\<open>add_mset a M - add_mset a A = M - A\<close>
by (auto simp: multiset_eq_iff)
lemma in_diff_count:
"a \<in># M - N \<longleftrightarrow> count N a < count M a"
by (simp add: set_mset_def)
lemma count_in_diffI:
assumes "\<And>n. count N x = n + count M x \<Longrightarrow> False"
shows "x \<in># M - N"
proof (rule ccontr)
assume "x \<notin># M - N"
then have "count N x = (count N x - count M x) + count M x"
by (simp add: in_diff_count not_less)
with assms show False by auto
qed
lemma in_diff_countE:
assumes "x \<in># M - N"
obtains n where "count M x = Suc n + count N x"
proof -
from assms have "count M x - count N x > 0" by (simp add: in_diff_count)
then have "count M x > count N x" by simp
then obtain n where "count M x = Suc n + count N x"
using less_iff_Suc_add by auto
with that show thesis .
qed
lemma in_diffD:
assumes "a \<in># M - N"
shows "a \<in># M"
proof -
have "0 \<le> count N a" by simp
also from assms have "count N a < count M a"
by (simp add: in_diff_count)
finally show ?thesis by simp
qed
lemma set_mset_diff:
"set_mset (M - N) = {a. count N a < count M a}"
by (simp add: set_mset_def)
lemma diff_empty [simp]: "M - {#} = M \<and> {#} - M = {#}"
by rule (fact Groups.diff_zero, fact Groups.zero_diff)
lemma diff_cancel: "A - A = {#}"
by (fact Groups.diff_cancel)
lemma diff_union_cancelR: "M + N - N = (M::'a multiset)"
by (fact add_diff_cancel_right')
lemma diff_union_cancelL: "N + M - N = (M::'a multiset)"
by (fact add_diff_cancel_left')
lemma diff_right_commute:
fixes M N Q :: "'a multiset"
shows "M - N - Q = M - Q - N"
by (fact diff_right_commute)
lemma diff_add:
fixes M N Q :: "'a multiset"
shows "M - (N + Q) = M - N - Q"
by (rule sym) (fact diff_diff_add)
lemma insert_DiffM [simp]: "x \<in># M \<Longrightarrow> add_mset x (M - {#x#}) = M"
by (clarsimp simp: multiset_eq_iff)
lemma insert_DiffM2: "x \<in># M \<Longrightarrow> (M - {#x#}) + {#x#} = M"
by simp
lemma diff_union_swap: "a \<noteq> b \<Longrightarrow> add_mset b (M - {#a#}) = add_mset b M - {#a#}"
by (auto simp add: multiset_eq_iff)
lemma diff_add_mset_swap [simp]: "b \<notin># A \<Longrightarrow> add_mset b M - A = add_mset b (M - A)"
by (auto simp add: multiset_eq_iff simp: not_in_iff)
lemma diff_union_swap2 [simp]: "y \<in># M \<Longrightarrow> add_mset x M - {#y#} = add_mset x (M - {#y#})"
by (metis add_mset_diff_bothsides diff_union_swap diff_zero insert_DiffM)
lemma diff_diff_add_mset [simp]: "(M::'a multiset) - N - P = M - (N + P)"
by (rule diff_diff_add)
lemma diff_union_single_conv:
"a \<in># J \<Longrightarrow> I + J - {#a#} = I + (J - {#a#})"
by (simp add: multiset_eq_iff Suc_le_eq)
lemma mset_add [elim?]:
assumes "a \<in># A"
obtains B where "A = add_mset a B"
proof -
from assms have "A = add_mset a (A - {#a#})"
by simp
with that show thesis .
qed
lemma union_iff:
"a \<in># A + B \<longleftrightarrow> a \<in># A \<or> a \<in># B"
by auto
subsubsection \<open>Min and Max\<close>
abbreviation Min_mset :: "'a::linorder multiset \<Rightarrow> 'a" where
"Min_mset m \<equiv> Min (set_mset m)"
abbreviation Max_mset :: "'a::linorder multiset \<Rightarrow> 'a" where
"Max_mset m \<equiv> Max (set_mset m)"
subsubsection \<open>Equality of multisets\<close>
lemma single_eq_single [simp]: "{#a#} = {#b#} \<longleftrightarrow> a = b"
by (auto simp add: multiset_eq_iff)
lemma union_eq_empty [iff]: "M + N = {#} \<longleftrightarrow> M = {#} \<and> N = {#}"
by (auto simp add: multiset_eq_iff)
lemma empty_eq_union [iff]: "{#} = M + N \<longleftrightarrow> M = {#} \<and> N = {#}"
by (auto simp add: multiset_eq_iff)
lemma multi_self_add_other_not_self [simp]: "M = add_mset x M \<longleftrightarrow> False"
by (auto simp add: multiset_eq_iff)
lemma add_mset_remove_trivial [simp]: \<open>add_mset x M - {#x#} = M\<close>
by (auto simp: multiset_eq_iff)
lemma diff_single_trivial: "\<not> x \<in># M \<Longrightarrow> M - {#x#} = M"
by (auto simp add: multiset_eq_iff not_in_iff)
lemma diff_single_eq_union: "x \<in># M \<Longrightarrow> M - {#x#} = N \<longleftrightarrow> M = add_mset x N"
by auto
lemma union_single_eq_diff: "add_mset x M = N \<Longrightarrow> M = N - {#x#}"
unfolding add_mset_add_single[of _ M] by (fact add_implies_diff)
lemma union_single_eq_member: "add_mset x M = N \<Longrightarrow> x \<in># N"
by auto
lemma add_mset_remove_trivial_If:
"add_mset a (N - {#a#}) = (if a \<in># N then N else add_mset a N)"
by (simp add: diff_single_trivial)
lemma add_mset_remove_trivial_eq: \<open>N = add_mset a (N - {#a#}) \<longleftrightarrow> a \<in># N\<close>
by (auto simp: add_mset_remove_trivial_If)
lemma union_is_single:
"M + N = {#a#} \<longleftrightarrow> M = {#a#} \<and> N = {#} \<or> M = {#} \<and> N = {#a#}"
(is "?lhs = ?rhs")
proof
show ?lhs if ?rhs using that by auto
show ?rhs if ?lhs
by (metis Multiset.diff_cancel add.commute add_diff_cancel_left' diff_add_zero diff_single_trivial insert_DiffM that)
qed
lemma single_is_union: "{#a#} = M + N \<longleftrightarrow> {#a#} = M \<and> N = {#} \<or> M = {#} \<and> {#a#} = N"
by (auto simp add: eq_commute [of "{#a#}" "M + N"] union_is_single)
lemma add_eq_conv_diff:
"add_mset a M = add_mset b N \<longleftrightarrow> M = N \<and> a = b \<or> M = add_mset b (N - {#a#}) \<and> N = add_mset a (M - {#b#})"
(is "?lhs \<longleftrightarrow> ?rhs")
(* shorter: by (simp add: multiset_eq_iff) fastforce *)
proof
show ?lhs if ?rhs
using that
by (auto simp add: add_mset_commute[of a b])
show ?rhs if ?lhs
proof (cases "a = b")
case True with \<open>?lhs\<close> show ?thesis by simp
next
case False
from \<open>?lhs\<close> have "a \<in># add_mset b N" by (rule union_single_eq_member)
with False have "a \<in># N" by auto
moreover from \<open>?lhs\<close> have "M = add_mset b N - {#a#}" by (rule union_single_eq_diff)
moreover note False
ultimately show ?thesis by (auto simp add: diff_right_commute [of _ "{#a#}"])
qed
qed
lemma add_mset_eq_single [iff]: "add_mset b M = {#a#} \<longleftrightarrow> b = a \<and> M = {#}"
by (auto simp: add_eq_conv_diff)
lemma single_eq_add_mset [iff]: "{#a#} = add_mset b M \<longleftrightarrow> b = a \<and> M = {#}"
by (auto simp: add_eq_conv_diff)
lemma insert_noteq_member:
assumes BC: "add_mset b B = add_mset c C"
and bnotc: "b \<noteq> c"
shows "c \<in># B"
proof -
have "c \<in># add_mset c C" by simp
have nc: "\<not> c \<in># {#b#}" using bnotc by simp
then have "c \<in># add_mset b B" using BC by simp
then show "c \<in># B" using nc by simp
qed
lemma add_eq_conv_ex:
"(add_mset a M = add_mset b N) =
(M = N \<and> a = b \<or> (\<exists>K. M = add_mset b K \<and> N = add_mset a K))"
by (auto simp add: add_eq_conv_diff)
lemma multi_member_split: "x \<in># M \<Longrightarrow> \<exists>A. M = add_mset x A"
by (rule exI [where x = "M - {#x#}"]) simp
lemma multiset_add_sub_el_shuffle:
assumes "c \<in># B"
and "b \<noteq> c"
shows "add_mset b (B - {#c#}) = add_mset b B - {#c#}"
proof -
from \<open>c \<in># B\<close> obtain A where B: "B = add_mset c A"
by (blast dest: multi_member_split)
have "add_mset b A = add_mset c (add_mset b A) - {#c#}" by simp
then have "add_mset b A = add_mset b (add_mset c A) - {#c#}"
by (simp add: \<open>b \<noteq> c\<close>)
then show ?thesis using B by simp
qed
lemma add_mset_eq_singleton_iff[iff]:
"add_mset x M = {#y#} \<longleftrightarrow> M = {#} \<and> x = y"
by auto
subsubsection \<open>Pointwise ordering induced by count\<close>
definition subseteq_mset :: "'a multiset \<Rightarrow> 'a multiset \<Rightarrow> bool" (infix "\<subseteq>#" 50)
where "A \<subseteq># B \<longleftrightarrow> (\<forall>a. count A a \<le> count B a)"
definition subset_mset :: "'a multiset \<Rightarrow> 'a multiset \<Rightarrow> bool" (infix "\<subset>#" 50)
where "A \<subset># B \<longleftrightarrow> A \<subseteq># B \<and> A \<noteq> B"
abbreviation (input) supseteq_mset :: "'a multiset \<Rightarrow> 'a multiset \<Rightarrow> bool" (infix "\<supseteq>#" 50)
where "supseteq_mset A B \<equiv> B \<subseteq># A"
abbreviation (input) supset_mset :: "'a multiset \<Rightarrow> 'a multiset \<Rightarrow> bool" (infix "\<supset>#" 50)
where "supset_mset A B \<equiv> B \<subset># A"
notation (input)
subseteq_mset (infix "\<le>#" 50) and
supseteq_mset (infix "\<ge>#" 50)
notation (ASCII)
subseteq_mset (infix "<=#" 50) and
subset_mset (infix "<#" 50) and
supseteq_mset (infix ">=#" 50) and
supset_mset (infix ">#" 50)
interpretation subset_mset: ordered_ab_semigroup_add_imp_le "(+)" "(-)" "(\<subseteq>#)" "(\<subset>#)"
by standard (auto simp add: subset_mset_def subseteq_mset_def multiset_eq_iff intro: order_trans antisym)
\<comment> \<open>FIXME: avoid junk stemming from type class interpretation\<close>
interpretation subset_mset: ordered_ab_semigroup_monoid_add_imp_le "(+)" 0 "(-)" "(\<subseteq>#)" "(\<subset>#)"
by standard
\<comment> \<open>FIXME: avoid junk stemming from type class interpretation\<close>
lemma mset_subset_eqI:
"(\<And>a. count A a \<le> count B a) \<Longrightarrow> A \<subseteq># B"
by (simp add: subseteq_mset_def)
lemma mset_subset_eq_count:
"A \<subseteq># B \<Longrightarrow> count A a \<le> count B a"
by (simp add: subseteq_mset_def)
lemma mset_subset_eq_exists_conv: "(A::'a multiset) \<subseteq># B \<longleftrightarrow> (\<exists>C. B = A + C)"
unfolding subseteq_mset_def
apply (rule iffI)
apply (rule exI [where x = "B - A"])
apply (auto intro: multiset_eq_iff [THEN iffD2])
done
interpretation subset_mset: ordered_cancel_comm_monoid_diff "(+)" 0 "(\<subseteq>#)" "(\<subset>#)" "(-)"
by standard (simp, fact mset_subset_eq_exists_conv)
\<comment> \<open>FIXME: avoid junk stemming from type class interpretation\<close>
declare subset_mset.add_diff_assoc[simp] subset_mset.add_diff_assoc2[simp]
lemma mset_subset_eq_mono_add_right_cancel: "(A::'a multiset) + C \<subseteq># B + C \<longleftrightarrow> A \<subseteq># B"
by (fact subset_mset.add_le_cancel_right)
lemma mset_subset_eq_mono_add_left_cancel: "C + (A::'a multiset) \<subseteq># C + B \<longleftrightarrow> A \<subseteq># B"
by (fact subset_mset.add_le_cancel_left)
lemma mset_subset_eq_mono_add: "(A::'a multiset) \<subseteq># B \<Longrightarrow> C \<subseteq># D \<Longrightarrow> A + C \<subseteq># B + D"
by (fact subset_mset.add_mono)
lemma mset_subset_eq_add_left: "(A::'a multiset) \<subseteq># A + B"
by simp
lemma mset_subset_eq_add_right: "B \<subseteq># (A::'a multiset) + B"
by simp
lemma single_subset_iff [simp]:
"{#a#} \<subseteq># M \<longleftrightarrow> a \<in># M"
by (auto simp add: subseteq_mset_def Suc_le_eq)
lemma mset_subset_eq_single: "a \<in># B \<Longrightarrow> {#a#} \<subseteq># B"
by simp
lemma mset_subset_eq_add_mset_cancel: \<open>add_mset a A \<subseteq># add_mset a B \<longleftrightarrow> A \<subseteq># B\<close>
unfolding add_mset_add_single[of _ A] add_mset_add_single[of _ B]
by (rule mset_subset_eq_mono_add_right_cancel)
lemma multiset_diff_union_assoc:
fixes A B C D :: "'a multiset"
shows "C \<subseteq># B \<Longrightarrow> A + B - C = A + (B - C)"
by (fact subset_mset.diff_add_assoc)
lemma mset_subset_eq_multiset_union_diff_commute:
fixes A B C D :: "'a multiset"
shows "B \<subseteq># A \<Longrightarrow> A - B + C = A + C - B"
by (fact subset_mset.add_diff_assoc2)
lemma diff_subset_eq_self[simp]:
"(M::'a multiset) - N \<subseteq># M"
by (simp add: subseteq_mset_def)
lemma mset_subset_eqD:
assumes "A \<subseteq># B" and "x \<in># A"
shows "x \<in># B"
proof -
from \<open>x \<in># A\<close> have "count A x > 0" by simp
also from \<open>A \<subseteq># B\<close> have "count A x \<le> count B x"
by (simp add: subseteq_mset_def)
finally show ?thesis by simp
qed
lemma mset_subsetD:
"A \<subset># B \<Longrightarrow> x \<in># A \<Longrightarrow> x \<in># B"
by (auto intro: mset_subset_eqD [of A])
lemma set_mset_mono:
"A \<subseteq># B \<Longrightarrow> set_mset A \<subseteq> set_mset B"
by (metis mset_subset_eqD subsetI)
lemma mset_subset_eq_insertD:
"add_mset x A \<subseteq># B \<Longrightarrow> x \<in># B \<and> A \<subset># B"
apply (rule conjI)
apply (simp add: mset_subset_eqD)
apply (clarsimp simp: subset_mset_def subseteq_mset_def)
apply safe
apply (erule_tac x = a in allE)
apply (auto split: if_split_asm)
done
lemma mset_subset_insertD:
"add_mset x A \<subset># B \<Longrightarrow> x \<in># B \<and> A \<subset># B"
by (rule mset_subset_eq_insertD) simp
lemma mset_subset_of_empty[simp]: "A \<subset># {#} \<longleftrightarrow> False"
by (simp only: subset_mset.not_less_zero)
lemma empty_subset_add_mset[simp]: "{#} \<subset># add_mset x M"
by (auto intro: subset_mset.gr_zeroI)
lemma empty_le: "{#} \<subseteq># A"
by (fact subset_mset.zero_le)
lemma insert_subset_eq_iff:
"add_mset a A \<subseteq># B \<longleftrightarrow> a \<in># B \<and> A \<subseteq># B - {#a#}"
using le_diff_conv2 [of "Suc 0" "count B a" "count A a"]
apply (auto simp add: subseteq_mset_def not_in_iff Suc_le_eq)
apply (rule ccontr)
apply (auto simp add: not_in_iff)
done
lemma insert_union_subset_iff:
"add_mset a A \<subset># B \<longleftrightarrow> a \<in># B \<and> A \<subset># B - {#a#}"
by (auto simp add: insert_subset_eq_iff subset_mset_def)
lemma subset_eq_diff_conv:
"A - C \<subseteq># B \<longleftrightarrow> A \<subseteq># B + C"
by (simp add: subseteq_mset_def le_diff_conv)
lemma multi_psub_of_add_self [simp]: "A \<subset># add_mset x A"
by (auto simp: subset_mset_def subseteq_mset_def)
lemma multi_psub_self: "A \<subset># A = False"
by simp
lemma mset_subset_add_mset [simp]: "add_mset x N \<subset># add_mset x M \<longleftrightarrow> N \<subset># M"
unfolding add_mset_add_single[of _ N] add_mset_add_single[of _ M]
by (fact subset_mset.add_less_cancel_right)
lemma mset_subset_diff_self: "c \<in># B \<Longrightarrow> B - {#c#} \<subset># B"
by (auto simp: subset_mset_def elim: mset_add)
lemma Diff_eq_empty_iff_mset: "A - B = {#} \<longleftrightarrow> A \<subseteq># B"
by (auto simp: multiset_eq_iff subseteq_mset_def)
lemma add_mset_subseteq_single_iff[iff]: "add_mset a M \<subseteq># {#b#} \<longleftrightarrow> M = {#} \<and> a = b"
proof
assume A: "add_mset a M \<subseteq># {#b#}"
then have \<open>a = b\<close>
by (auto dest: mset_subset_eq_insertD)
then show "M={#} \<and> a=b"
using A by (simp add: mset_subset_eq_add_mset_cancel)
qed simp
subsubsection \<open>Intersection and bounded union\<close>
definition inf_subset_mset :: "'a multiset \<Rightarrow> 'a multiset \<Rightarrow> 'a multiset" (infixl "\<inter>#" 70) where
multiset_inter_def: "inf_subset_mset A B = A - (A - B)"
interpretation subset_mset: semilattice_inf inf_subset_mset "(\<subseteq>#)" "(\<subset>#)"
proof -
have [simp]: "m \<le> n \<Longrightarrow> m \<le> q \<Longrightarrow> m \<le> n - (n - q)" for m n q :: nat
by arith
show "class.semilattice_inf (\<inter>#) (\<subseteq>#) (\<subset>#)"
by standard (auto simp add: multiset_inter_def subseteq_mset_def)
qed \<comment> \<open>FIXME: avoid junk stemming from type class interpretation\<close>
definition sup_subset_mset :: "'a multiset \<Rightarrow> 'a multiset \<Rightarrow> 'a multiset"(infixl "\<union>#" 70)
where "sup_subset_mset A B = A + (B - A)" \<comment> \<open>FIXME irregular fact name\<close>
interpretation subset_mset: semilattice_sup sup_subset_mset "(\<subseteq>#)" "(\<subset>#)"
proof -
have [simp]: "m \<le> n \<Longrightarrow> q \<le> n \<Longrightarrow> m + (q - m) \<le> n" for m n q :: nat
by arith
show "class.semilattice_sup (\<union>#) (\<subseteq>#) (\<subset>#)"
by standard (auto simp add: sup_subset_mset_def subseteq_mset_def)
qed \<comment> \<open>FIXME: avoid junk stemming from type class interpretation\<close>
interpretation subset_mset: bounded_lattice_bot "(\<inter>#)" "(\<subseteq>#)" "(\<subset>#)"
"(\<union>#)" "{#}"
by standard auto
\<comment> \<open>FIXME: avoid junk stemming from type class interpretation\<close>
subsubsection \<open>Additional intersection facts\<close>
lemma multiset_inter_count [simp]:
fixes A B :: "'a multiset"
shows "count (A \<inter># B) x = min (count A x) (count B x)"
by (simp add: multiset_inter_def)
lemma set_mset_inter [simp]:
"set_mset (A \<inter># B) = set_mset A \<inter> set_mset B"
by (simp only: set_eq_iff count_greater_zero_iff [symmetric] multiset_inter_count) simp
lemma diff_intersect_left_idem [simp]:
"M - M \<inter># N = M - N"
by (simp add: multiset_eq_iff min_def)
lemma diff_intersect_right_idem [simp]:
"M - N \<inter># M = M - N"
by (simp add: multiset_eq_iff min_def)
lemma multiset_inter_single[simp]: "a \<noteq> b \<Longrightarrow> {#a#} \<inter># {#b#} = {#}"
by (rule multiset_eqI) auto
lemma multiset_union_diff_commute:
assumes "B \<inter># C = {#}"
shows "A + B - C = A - C + B"
proof (rule multiset_eqI)
fix x
from assms have "min (count B x) (count C x) = 0"
by (auto simp add: multiset_eq_iff)
then have "count B x = 0 \<or> count C x = 0"
unfolding min_def by (auto split: if_splits)
then show "count (A + B - C) x = count (A - C + B) x"
by auto
qed
lemma disjunct_not_in:
"A \<inter># B = {#} \<longleftrightarrow> (\<forall>a. a \<notin># A \<or> a \<notin># B)" (is "?P \<longleftrightarrow> ?Q")
proof
assume ?P
show ?Q
proof
fix a
from \<open>?P\<close> have "min (count A a) (count B a) = 0"
by (simp add: multiset_eq_iff)
then have "count A a = 0 \<or> count B a = 0"
by (cases "count A a \<le> count B a") (simp_all add: min_def)
then show "a \<notin># A \<or> a \<notin># B"
by (simp add: not_in_iff)
qed
next
assume ?Q
show ?P
proof (rule multiset_eqI)
fix a
from \<open>?Q\<close> have "count A a = 0 \<or> count B a = 0"
by (auto simp add: not_in_iff)
then show "count (A \<inter># B) a = count {#} a"
by auto
qed
qed
lemma inter_mset_empty_distrib_right: "A \<inter># (B + C) = {#} \<longleftrightarrow> A \<inter># B = {#} \<and> A \<inter># C = {#}"
by (meson disjunct_not_in union_iff)
lemma inter_mset_empty_distrib_left: "(A + B) \<inter># C = {#} \<longleftrightarrow> A \<inter># C = {#} \<and> B \<inter># C = {#}"
by (meson disjunct_not_in union_iff)
lemma add_mset_inter_add_mset[simp]:
"add_mset a A \<inter># add_mset a B = add_mset a (A \<inter># B)"
by (metis add_mset_add_single add_mset_diff_bothsides diff_subset_eq_self multiset_inter_def
subset_mset.diff_add_assoc2)
lemma add_mset_disjoint [simp]:
"add_mset a A \<inter># B = {#} \<longleftrightarrow> a \<notin># B \<and> A \<inter># B = {#}"
"{#} = add_mset a A \<inter># B \<longleftrightarrow> a \<notin># B \<and> {#} = A \<inter># B"
by (auto simp: disjunct_not_in)
lemma disjoint_add_mset [simp]:
"B \<inter># add_mset a A = {#} \<longleftrightarrow> a \<notin># B \<and> B \<inter># A = {#}"
"{#} = A \<inter># add_mset b B \<longleftrightarrow> b \<notin># A \<and> {#} = A \<inter># B"
by (auto simp: disjunct_not_in)
lemma inter_add_left1: "\<not> x \<in># N \<Longrightarrow> (add_mset x M) \<inter># N = M \<inter># N"
by (simp add: multiset_eq_iff not_in_iff)
lemma inter_add_left2: "x \<in># N \<Longrightarrow> (add_mset x M) \<inter># N = add_mset x (M \<inter># (N - {#x#}))"
by (auto simp add: multiset_eq_iff elim: mset_add)
lemma inter_add_right1: "\<not> x \<in># N \<Longrightarrow> N \<inter># (add_mset x M) = N \<inter># M"
by (simp add: multiset_eq_iff not_in_iff)
lemma inter_add_right2: "x \<in># N \<Longrightarrow> N \<inter># (add_mset x M) = add_mset x ((N - {#x#}) \<inter># M)"
by (auto simp add: multiset_eq_iff elim: mset_add)
lemma disjunct_set_mset_diff:
assumes "M \<inter># N = {#}"
shows "set_mset (M - N) = set_mset M"
proof (rule set_eqI)
fix a
from assms have "a \<notin># M \<or> a \<notin># N"
by (simp add: disjunct_not_in)
then show "a \<in># M - N \<longleftrightarrow> a \<in># M"
by (auto dest: in_diffD) (simp add: in_diff_count not_in_iff)
qed
lemma at_most_one_mset_mset_diff:
assumes "a \<notin># M - {#a#}"
shows "set_mset (M - {#a#}) = set_mset M - {a}"
using assms by (auto simp add: not_in_iff in_diff_count set_eq_iff)
lemma more_than_one_mset_mset_diff:
assumes "a \<in># M - {#a#}"
shows "set_mset (M - {#a#}) = set_mset M"
proof (rule set_eqI)
fix b
have "Suc 0 < count M b \<Longrightarrow> count M b > 0" by arith
then show "b \<in># M - {#a#} \<longleftrightarrow> b \<in># M"
using assms by (auto simp add: in_diff_count)
qed
lemma inter_iff:
"a \<in># A \<inter># B \<longleftrightarrow> a \<in># A \<and> a \<in># B"
by simp
lemma inter_union_distrib_left:
"A \<inter># B + C = (A + C) \<inter># (B + C)"
by (simp add: multiset_eq_iff min_add_distrib_left)
lemma inter_union_distrib_right:
"C + A \<inter># B = (C + A) \<inter># (C + B)"
using inter_union_distrib_left [of A B C] by (simp add: ac_simps)
lemma inter_subset_eq_union:
"A \<inter># B \<subseteq># A + B"
by (auto simp add: subseteq_mset_def)
subsubsection \<open>Additional bounded union facts\<close>
lemma sup_subset_mset_count [simp]: \<comment> \<open>FIXME irregular fact name\<close>
"count (A \<union># B) x = max (count A x) (count B x)"
by (simp add: sup_subset_mset_def)
lemma set_mset_sup [simp]:
"set_mset (A \<union># B) = set_mset A \<union> set_mset B"
by (simp only: set_eq_iff count_greater_zero_iff [symmetric] sup_subset_mset_count)
(auto simp add: not_in_iff elim: mset_add)
lemma sup_union_left1 [simp]: "\<not> x \<in># N \<Longrightarrow> (add_mset x M) \<union># N = add_mset x (M \<union># N)"
by (simp add: multiset_eq_iff not_in_iff)
lemma sup_union_left2: "x \<in># N \<Longrightarrow> (add_mset x M) \<union># N = add_mset x (M \<union># (N - {#x#}))"
by (simp add: multiset_eq_iff)
lemma sup_union_right1 [simp]: "\<not> x \<in># N \<Longrightarrow> N \<union># (add_mset x M) = add_mset x (N \<union># M)"
by (simp add: multiset_eq_iff not_in_iff)
lemma sup_union_right2: "x \<in># N \<Longrightarrow> N \<union># (add_mset x M) = add_mset x ((N - {#x#}) \<union># M)"
by (simp add: multiset_eq_iff)
lemma sup_union_distrib_left:
"A \<union># B + C = (A + C) \<union># (B + C)"
by (simp add: multiset_eq_iff max_add_distrib_left)
lemma union_sup_distrib_right:
"C + A \<union># B = (C + A) \<union># (C + B)"
using sup_union_distrib_left [of A B C] by (simp add: ac_simps)
lemma union_diff_inter_eq_sup:
"A + B - A \<inter># B = A \<union># B"
by (auto simp add: multiset_eq_iff)
lemma union_diff_sup_eq_inter:
"A + B - A \<union># B = A \<inter># B"
by (auto simp add: multiset_eq_iff)
lemma add_mset_union:
\<open>add_mset a A \<union># add_mset a B = add_mset a (A \<union># B)\<close>
by (auto simp: multiset_eq_iff max_def)
subsection \<open>Replicate and repeat operations\<close>
definition replicate_mset :: "nat \<Rightarrow> 'a \<Rightarrow> 'a multiset" where
"replicate_mset n x = (add_mset x ^^ n) {#}"
lemma replicate_mset_0[simp]: "replicate_mset 0 x = {#}"
unfolding replicate_mset_def by simp
lemma replicate_mset_Suc [simp]: "replicate_mset (Suc n) x = add_mset x (replicate_mset n x)"
unfolding replicate_mset_def by (induct n) (auto intro: add.commute)
lemma count_replicate_mset[simp]: "count (replicate_mset n x) y = (if y = x then n else 0)"
unfolding replicate_mset_def by (induct n) auto
fun repeat_mset :: "nat \<Rightarrow> 'a multiset \<Rightarrow> 'a multiset" where
"repeat_mset 0 _ = {#}" |
"repeat_mset (Suc n) A = A + repeat_mset n A"
lemma count_repeat_mset [simp]: "count (repeat_mset i A) a = i * count A a"
by (induction i) auto
lemma repeat_mset_right [simp]: "repeat_mset a (repeat_mset b A) = repeat_mset (a * b) A"
by (auto simp: multiset_eq_iff left_diff_distrib')
lemma left_diff_repeat_mset_distrib': \<open>repeat_mset (i - j) u = repeat_mset i u - repeat_mset j u\<close>
by (auto simp: multiset_eq_iff left_diff_distrib')
lemma left_add_mult_distrib_mset:
"repeat_mset i u + (repeat_mset j u + k) = repeat_mset (i+j) u + k"
by (auto simp: multiset_eq_iff add_mult_distrib)
lemma repeat_mset_distrib:
"repeat_mset (m + n) A = repeat_mset m A + repeat_mset n A"
by (auto simp: multiset_eq_iff Nat.add_mult_distrib)
lemma repeat_mset_distrib2[simp]:
"repeat_mset n (A + B) = repeat_mset n A + repeat_mset n B"
by (auto simp: multiset_eq_iff add_mult_distrib2)
lemma repeat_mset_replicate_mset[simp]:
"repeat_mset n {#a#} = replicate_mset n a"
by (auto simp: multiset_eq_iff)
lemma repeat_mset_distrib_add_mset[simp]:
"repeat_mset n (add_mset a A) = replicate_mset n a + repeat_mset n A"
by (auto simp: multiset_eq_iff)
lemma repeat_mset_empty[simp]: "repeat_mset n {#} = {#}"
by (induction n) simp_all
subsubsection \<open>Simprocs\<close>
lemma repeat_mset_iterate_add: \<open>repeat_mset n M = iterate_add n M\<close>
unfolding iterate_add_def by (induction n) auto
lemma mset_subseteq_add_iff1:
"j \<le> (i::nat) \<Longrightarrow> (repeat_mset i u + m \<subseteq># repeat_mset j u + n) = (repeat_mset (i-j) u + m \<subseteq># n)"
by (auto simp add: subseteq_mset_def nat_le_add_iff1)
lemma mset_subseteq_add_iff2:
"i \<le> (j::nat) \<Longrightarrow> (repeat_mset i u + m \<subseteq># repeat_mset j u + n) = (m \<subseteq># repeat_mset (j-i) u + n)"
by (auto simp add: subseteq_mset_def nat_le_add_iff2)
lemma mset_subset_add_iff1:
"j \<le> (i::nat) \<Longrightarrow> (repeat_mset i u + m \<subset># repeat_mset j u + n) = (repeat_mset (i-j) u + m \<subset># n)"
unfolding subset_mset_def repeat_mset_iterate_add
by (simp add: iterate_add_eq_add_iff1 mset_subseteq_add_iff1[unfolded repeat_mset_iterate_add])
lemma mset_subset_add_iff2:
"i \<le> (j::nat) \<Longrightarrow> (repeat_mset i u + m \<subset># repeat_mset j u + n) = (m \<subset># repeat_mset (j-i) u + n)"
unfolding subset_mset_def repeat_mset_iterate_add
by (simp add: iterate_add_eq_add_iff2 mset_subseteq_add_iff2[unfolded repeat_mset_iterate_add])
ML_file \<open>multiset_simprocs.ML\<close>
lemma add_mset_replicate_mset_safe[cancelation_simproc_pre]: \<open>NO_MATCH {#} M \<Longrightarrow> add_mset a M = {#a#} + M\<close>
by simp
declare repeat_mset_iterate_add[cancelation_simproc_pre]
declare iterate_add_distrib[cancelation_simproc_pre]
declare repeat_mset_iterate_add[symmetric, cancelation_simproc_post]
declare add_mset_not_empty[cancelation_simproc_eq_elim]
empty_not_add_mset[cancelation_simproc_eq_elim]
subset_mset.le_zero_eq[cancelation_simproc_eq_elim]
empty_not_add_mset[cancelation_simproc_eq_elim]
add_mset_not_empty[cancelation_simproc_eq_elim]
subset_mset.le_zero_eq[cancelation_simproc_eq_elim]
le_zero_eq[cancelation_simproc_eq_elim]
simproc_setup mseteq_cancel
("(l::'a multiset) + m = n" | "(l::'a multiset) = m + n" |
"add_mset a m = n" | "m = add_mset a n" |
"replicate_mset p a = n" | "m = replicate_mset p a" |
"repeat_mset p m = n" | "m = repeat_mset p m") =
\<open>fn phi => Cancel_Simprocs.eq_cancel\<close>
simproc_setup msetsubset_cancel
("(l::'a multiset) + m \<subset># n" | "(l::'a multiset) \<subset># m + n" |
"add_mset a m \<subset># n" | "m \<subset># add_mset a n" |
"replicate_mset p r \<subset># n" | "m \<subset># replicate_mset p r" |
"repeat_mset p m \<subset># n" | "m \<subset># repeat_mset p m") =
\<open>fn phi => Multiset_Simprocs.subset_cancel_msets\<close>
simproc_setup msetsubset_eq_cancel
("(l::'a multiset) + m \<subseteq># n" | "(l::'a multiset) \<subseteq># m + n" |
"add_mset a m \<subseteq># n" | "m \<subseteq># add_mset a n" |
"replicate_mset p r \<subseteq># n" | "m \<subseteq># replicate_mset p r" |
"repeat_mset p m \<subseteq># n" | "m \<subseteq># repeat_mset p m") =
\<open>fn phi => Multiset_Simprocs.subseteq_cancel_msets\<close>
simproc_setup msetdiff_cancel
("((l::'a multiset) + m) - n" | "(l::'a multiset) - (m + n)" |
"add_mset a m - n" | "m - add_mset a n" |
"replicate_mset p r - n" | "m - replicate_mset p r" |
"repeat_mset p m - n" | "m - repeat_mset p m") =
\<open>fn phi => Cancel_Simprocs.diff_cancel\<close>
subsubsection \<open>Conditionally complete lattice\<close>
instantiation multiset :: (type) Inf
begin
lift_definition Inf_multiset :: "'a multiset set \<Rightarrow> 'a multiset" is
"\<lambda>A i. if A = {} then 0 else Inf ((\<lambda>f. f i) ` A)"
proof -
fix A :: "('a \<Rightarrow> nat) set" assume *: "\<And>x. x \<in> A \<Longrightarrow> x \<in> multiset"
have "finite {i. (if A = {} then 0 else Inf ((\<lambda>f. f i) ` A)) > 0}" unfolding multiset_def
proof (cases "A = {}")
case False
then obtain f where "f \<in> A" by blast
hence "{i. Inf ((\<lambda>f. f i) ` A) > 0} \<subseteq> {i. f i > 0}"
by (auto intro: less_le_trans[OF _ cInf_lower])
moreover from \<open>f \<in> A\<close> * have "finite \<dots>" by (simp add: multiset_def)
ultimately have "finite {i. Inf ((\<lambda>f. f i) ` A) > 0}" by (rule finite_subset)
with False show ?thesis by simp
qed simp_all
thus "(\<lambda>i. if A = {} then 0 else INF f\<in>A. f i) \<in> multiset" by (simp add: multiset_def)
qed
instance ..
end
lemma Inf_multiset_empty: "Inf {} = {#}"
by transfer simp_all
lemma count_Inf_multiset_nonempty: "A \<noteq> {} \<Longrightarrow> count (Inf A) x = Inf ((\<lambda>X. count X x) ` A)"
by transfer simp_all
instantiation multiset :: (type) Sup
begin
definition Sup_multiset :: "'a multiset set \<Rightarrow> 'a multiset" where
"Sup_multiset A = (if A \<noteq> {} \<and> subset_mset.bdd_above A then
Abs_multiset (\<lambda>i. Sup ((\<lambda>X. count X i) ` A)) else {#})"
lemma Sup_multiset_empty: "Sup {} = {#}"
by (simp add: Sup_multiset_def)
lemma Sup_multiset_unbounded: "\<not>subset_mset.bdd_above A \<Longrightarrow> Sup A = {#}"
by (simp add: Sup_multiset_def)
instance ..
end
lemma bdd_above_multiset_imp_bdd_above_count:
assumes "subset_mset.bdd_above (A :: 'a multiset set)"
shows "bdd_above ((\<lambda>X. count X x) ` A)"
proof -
from assms obtain Y where Y: "\<forall>X\<in>A. X \<subseteq># Y"
by (auto simp: subset_mset.bdd_above_def)
hence "count X x \<le> count Y x" if "X \<in> A" for X
using that by (auto intro: mset_subset_eq_count)
thus ?thesis by (intro bdd_aboveI[of _ "count Y x"]) auto
qed
lemma bdd_above_multiset_imp_finite_support:
assumes "A \<noteq> {}" "subset_mset.bdd_above (A :: 'a multiset set)"
shows "finite (\<Union>X\<in>A. {x. count X x > 0})"
proof -
from assms obtain Y where Y: "\<forall>X\<in>A. X \<subseteq># Y"
by (auto simp: subset_mset.bdd_above_def)
hence "count X x \<le> count Y x" if "X \<in> A" for X x
using that by (auto intro: mset_subset_eq_count)
hence "(\<Union>X\<in>A. {x. count X x > 0}) \<subseteq> {x. count Y x > 0}"
by safe (erule less_le_trans)
moreover have "finite \<dots>" by simp
ultimately show ?thesis by (rule finite_subset)
qed
lemma Sup_multiset_in_multiset:
assumes "A \<noteq> {}" "subset_mset.bdd_above A"
shows "(\<lambda>i. SUP X\<in>A. count X i) \<in> multiset"
unfolding multiset_def
proof
have "{i. Sup ((\<lambda>X. count X i) ` A) > 0} \<subseteq> (\<Union>X\<in>A. {i. 0 < count X i})"
proof safe
fix i assume pos: "(SUP X\<in>A. count X i) > 0"
show "i \<in> (\<Union>X\<in>A. {i. 0 < count X i})"
proof (rule ccontr)
assume "i \<notin> (\<Union>X\<in>A. {i. 0 < count X i})"
hence "\<forall>X\<in>A. count X i \<le> 0" by (auto simp: count_eq_zero_iff)
with assms have "(SUP X\<in>A. count X i) \<le> 0"
by (intro cSup_least bdd_above_multiset_imp_bdd_above_count) auto
with pos show False by simp
qed
qed
moreover from assms have "finite \<dots>" by (rule bdd_above_multiset_imp_finite_support)
ultimately show "finite {i. Sup ((\<lambda>X. count X i) ` A) > 0}" by (rule finite_subset)
qed
lemma count_Sup_multiset_nonempty:
assumes "A \<noteq> {}" "subset_mset.bdd_above A"
shows "count (Sup A) x = (SUP X\<in>A. count X x)"
using assms by (simp add: Sup_multiset_def Abs_multiset_inverse Sup_multiset_in_multiset)
interpretation subset_mset: conditionally_complete_lattice Inf Sup "(\<inter>#)" "(\<subseteq>#)" "(\<subset>#)" "(\<union>#)"
proof
fix X :: "'a multiset" and A
assume "X \<in> A"
show "Inf A \<subseteq># X"
proof (rule mset_subset_eqI)
fix x
from \<open>X \<in> A\<close> have "A \<noteq> {}" by auto
hence "count (Inf A) x = (INF X\<in>A. count X x)"
by (simp add: count_Inf_multiset_nonempty)
also from \<open>X \<in> A\<close> have "\<dots> \<le> count X x"
by (intro cInf_lower) simp_all
finally show "count (Inf A) x \<le> count X x" .
qed
next
fix X :: "'a multiset" and A
assume nonempty: "A \<noteq> {}" and le: "\<And>Y. Y \<in> A \<Longrightarrow> X \<subseteq># Y"
show "X \<subseteq># Inf A"
proof (rule mset_subset_eqI)
fix x
from nonempty have "count X x \<le> (INF X\<in>A. count X x)"
by (intro cInf_greatest) (auto intro: mset_subset_eq_count le)
also from nonempty have "\<dots> = count (Inf A) x" by (simp add: count_Inf_multiset_nonempty)
finally show "count X x \<le> count (Inf A) x" .
qed
next
fix X :: "'a multiset" and A
assume X: "X \<in> A" and bdd: "subset_mset.bdd_above A"
show "X \<subseteq># Sup A"
proof (rule mset_subset_eqI)
fix x
from X have "A \<noteq> {}" by auto
have "count X x \<le> (SUP X\<in>A. count X x)"
by (intro cSUP_upper X bdd_above_multiset_imp_bdd_above_count bdd)
also from count_Sup_multiset_nonempty[OF \<open>A \<noteq> {}\<close> bdd]
have "(SUP X\<in>A. count X x) = count (Sup A) x" by simp
finally show "count X x \<le> count (Sup A) x" .
qed
next
fix X :: "'a multiset" and A
assume nonempty: "A \<noteq> {}" and ge: "\<And>Y. Y \<in> A \<Longrightarrow> Y \<subseteq># X"
from ge have bdd: "subset_mset.bdd_above A" by (rule subset_mset.bdd_aboveI[of _ X])
show "Sup A \<subseteq># X"
proof (rule mset_subset_eqI)
fix x
from count_Sup_multiset_nonempty[OF \<open>A \<noteq> {}\<close> bdd]
have "count (Sup A) x = (SUP X\<in>A. count X x)" .
also from nonempty have "\<dots> \<le> count X x"
by (intro cSup_least) (auto intro: mset_subset_eq_count ge)
finally show "count (Sup A) x \<le> count X x" .
qed
qed \<comment> \<open>FIXME: avoid junk stemming from type class interpretation\<close>
lemma set_mset_Inf:
assumes "A \<noteq> {}"
shows "set_mset (Inf A) = (\<Inter>X\<in>A. set_mset X)"
proof safe
fix x X assume "x \<in># Inf A" "X \<in> A"
hence nonempty: "A \<noteq> {}" by (auto simp: Inf_multiset_empty)
from \<open>x \<in># Inf A\<close> have "{#x#} \<subseteq># Inf A" by auto
also from \<open>X \<in> A\<close> have "\<dots> \<subseteq># X" by (rule subset_mset.cInf_lower) simp_all
finally show "x \<in># X" by simp
next
fix x assume x: "x \<in> (\<Inter>X\<in>A. set_mset X)"
hence "{#x#} \<subseteq># X" if "X \<in> A" for X using that by auto
from assms and this have "{#x#} \<subseteq># Inf A" by (rule subset_mset.cInf_greatest)
thus "x \<in># Inf A" by simp
qed
lemma in_Inf_multiset_iff:
assumes "A \<noteq> {}"
shows "x \<in># Inf A \<longleftrightarrow> (\<forall>X\<in>A. x \<in># X)"
proof -
from assms have "set_mset (Inf A) = (\<Inter>X\<in>A. set_mset X)" by (rule set_mset_Inf)
also have "x \<in> \<dots> \<longleftrightarrow> (\<forall>X\<in>A. x \<in># X)" by simp
finally show ?thesis .
qed
lemma in_Inf_multisetD: "x \<in># Inf A \<Longrightarrow> X \<in> A \<Longrightarrow> x \<in># X"
by (subst (asm) in_Inf_multiset_iff) auto
lemma set_mset_Sup:
assumes "subset_mset.bdd_above A"
shows "set_mset (Sup A) = (\<Union>X\<in>A. set_mset X)"
proof safe
fix x assume "x \<in># Sup A"
hence nonempty: "A \<noteq> {}" by (auto simp: Sup_multiset_empty)
show "x \<in> (\<Union>X\<in>A. set_mset X)"
proof (rule ccontr)
assume x: "x \<notin> (\<Union>X\<in>A. set_mset X)"
have "count X x \<le> count (Sup A) x" if "X \<in> A" for X x
using that by (intro mset_subset_eq_count subset_mset.cSup_upper assms)
with x have "X \<subseteq># Sup A - {#x#}" if "X \<in> A" for X
using that by (auto simp: subseteq_mset_def algebra_simps not_in_iff)
hence "Sup A \<subseteq># Sup A - {#x#}" by (intro subset_mset.cSup_least nonempty)
with \<open>x \<in># Sup A\<close> show False
by (auto simp: subseteq_mset_def simp flip: count_greater_zero_iff
dest!: spec[of _ x])
qed
next
fix x X assume "x \<in> set_mset X" "X \<in> A"
hence "{#x#} \<subseteq># X" by auto
also have "X \<subseteq># Sup A" by (intro subset_mset.cSup_upper \<open>X \<in> A\<close> assms)
finally show "x \<in> set_mset (Sup A)" by simp
qed
lemma in_Sup_multiset_iff:
assumes "subset_mset.bdd_above A"
shows "x \<in># Sup A \<longleftrightarrow> (\<exists>X\<in>A. x \<in># X)"
proof -
from assms have "set_mset (Sup A) = (\<Union>X\<in>A. set_mset X)" by (rule set_mset_Sup)
also have "x \<in> \<dots> \<longleftrightarrow> (\<exists>X\<in>A. x \<in># X)" by simp
finally show ?thesis .
qed
lemma in_Sup_multisetD:
assumes "x \<in># Sup A"
shows "\<exists>X\<in>A. x \<in># X"
proof -
have "subset_mset.bdd_above A"
by (rule ccontr) (insert assms, simp_all add: Sup_multiset_unbounded)
with assms show ?thesis by (simp add: in_Sup_multiset_iff)
qed
interpretation subset_mset: distrib_lattice "(\<inter>#)" "(\<subseteq>#)" "(\<subset>#)" "(\<union>#)"
proof
fix A B C :: "'a multiset"
show "A \<union># (B \<inter># C) = A \<union># B \<inter># (A \<union># C)"
by (intro multiset_eqI) simp_all
qed \<comment> \<open>FIXME: avoid junk stemming from type class interpretation\<close>
subsubsection \<open>Filter (with comprehension syntax)\<close>
text \<open>Multiset comprehension\<close>
lift_definition filter_mset :: "('a \<Rightarrow> bool) \<Rightarrow> 'a multiset \<Rightarrow> 'a multiset"
is "\<lambda>P M. \<lambda>x. if P x then M x else 0"
by (rule filter_preserves_multiset)
syntax (ASCII)
"_MCollect" :: "pttrn \<Rightarrow> 'a multiset \<Rightarrow> bool \<Rightarrow> 'a multiset" ("(1{#_ :# _./ _#})")
syntax
"_MCollect" :: "pttrn \<Rightarrow> 'a multiset \<Rightarrow> bool \<Rightarrow> 'a multiset" ("(1{#_ \<in># _./ _#})")
translations
"{#x \<in># M. P#}" == "CONST filter_mset (\<lambda>x. P) M"
lemma count_filter_mset [simp]:
"count (filter_mset P M) a = (if P a then count M a else 0)"
by (simp add: filter_mset.rep_eq)
lemma set_mset_filter [simp]:
"set_mset (filter_mset P M) = {a \<in> set_mset M. P a}"
by (simp only: set_eq_iff count_greater_zero_iff [symmetric] count_filter_mset) simp
lemma filter_empty_mset [simp]: "filter_mset P {#} = {#}"
by (rule multiset_eqI) simp
lemma filter_single_mset: "filter_mset P {#x#} = (if P x then {#x#} else {#})"
by (rule multiset_eqI) simp
lemma filter_union_mset [simp]: "filter_mset P (M + N) = filter_mset P M + filter_mset P N"
by (rule multiset_eqI) simp
lemma filter_diff_mset [simp]: "filter_mset P (M - N) = filter_mset P M - filter_mset P N"
by (rule multiset_eqI) simp
lemma filter_inter_mset [simp]: "filter_mset P (M \<inter># N) = filter_mset P M \<inter># filter_mset P N"
by (rule multiset_eqI) simp
lemma filter_sup_mset[simp]: "filter_mset P (A \<union># B) = filter_mset P A \<union># filter_mset P B"
by (rule multiset_eqI) simp
lemma filter_mset_add_mset [simp]:
"filter_mset P (add_mset x A) =
(if P x then add_mset x (filter_mset P A) else filter_mset P A)"
by (auto simp: multiset_eq_iff)
lemma multiset_filter_subset[simp]: "filter_mset f M \<subseteq># M"
by (simp add: mset_subset_eqI)
lemma multiset_filter_mono:
assumes "A \<subseteq># B"
shows "filter_mset f A \<subseteq># filter_mset f B"
proof -
from assms[unfolded mset_subset_eq_exists_conv]
obtain C where B: "B = A + C" by auto
show ?thesis unfolding B by auto
qed
lemma filter_mset_eq_conv:
"filter_mset P M = N \<longleftrightarrow> N \<subseteq># M \<and> (\<forall>b\<in>#N. P b) \<and> (\<forall>a\<in>#M - N. \<not> P a)" (is "?P \<longleftrightarrow> ?Q")
proof
assume ?P then show ?Q by auto (simp add: multiset_eq_iff in_diff_count)
next
assume ?Q
then obtain Q where M: "M = N + Q"
by (auto simp add: mset_subset_eq_exists_conv)
then have MN: "M - N = Q" by simp
show ?P
proof (rule multiset_eqI)
fix a
from \<open>?Q\<close> MN have *: "\<not> P a \<Longrightarrow> a \<notin># N" "P a \<Longrightarrow> a \<notin># Q"
by auto
show "count (filter_mset P M) a = count N a"
proof (cases "a \<in># M")
case True
with * show ?thesis
by (simp add: not_in_iff M)
next
case False then have "count M a = 0"
by (simp add: not_in_iff)
with M show ?thesis by simp
qed
qed
qed
lemma filter_filter_mset: "filter_mset P (filter_mset Q M) = {#x \<in># M. Q x \<and> P x#}"
by (auto simp: multiset_eq_iff)
lemma
filter_mset_True[simp]: "{#y \<in># M. True#} = M" and
filter_mset_False[simp]: "{#y \<in># M. False#} = {#}"
by (auto simp: multiset_eq_iff)
subsubsection \<open>Size\<close>
definition wcount where "wcount f M = (\<lambda>x. count M x * Suc (f x))"
lemma wcount_union: "wcount f (M + N) a = wcount f M a + wcount f N a"
by (auto simp: wcount_def add_mult_distrib)
lemma wcount_add_mset:
"wcount f (add_mset x M) a = (if x = a then Suc (f a) else 0) + wcount f M a"
unfolding add_mset_add_single[of _ M] wcount_union by (auto simp: wcount_def)
definition size_multiset :: "('a \<Rightarrow> nat) \<Rightarrow> 'a multiset \<Rightarrow> nat" where
"size_multiset f M = sum (wcount f M) (set_mset M)"
lemmas size_multiset_eq = size_multiset_def[unfolded wcount_def]
instantiation multiset :: (type) size
begin
definition size_multiset where
size_multiset_overloaded_def: "size_multiset = Multiset.size_multiset (\<lambda>_. 0)"
instance ..
end
lemmas size_multiset_overloaded_eq =
size_multiset_overloaded_def[THEN fun_cong, unfolded size_multiset_eq, simplified]
lemma size_multiset_empty [simp]: "size_multiset f {#} = 0"
by (simp add: size_multiset_def)
lemma size_empty [simp]: "size {#} = 0"
by (simp add: size_multiset_overloaded_def)
lemma size_multiset_single : "size_multiset f {#b#} = Suc (f b)"
by (simp add: size_multiset_eq)
lemma size_single: "size {#b#} = 1"
by (simp add: size_multiset_overloaded_def size_multiset_single)
lemma sum_wcount_Int:
"finite A \<Longrightarrow> sum (wcount f N) (A \<inter> set_mset N) = sum (wcount f N) A"
by (induct rule: finite_induct)
(simp_all add: Int_insert_left wcount_def count_eq_zero_iff)
lemma size_multiset_union [simp]:
"size_multiset f (M + N::'a multiset) = size_multiset f M + size_multiset f N"
apply (simp add: size_multiset_def sum_Un_nat sum.distrib sum_wcount_Int wcount_union)
apply (subst Int_commute)
apply (simp add: sum_wcount_Int)
done
lemma size_multiset_add_mset [simp]:
"size_multiset f (add_mset a M) = Suc (f a) + size_multiset f M"
unfolding add_mset_add_single[of _ M] size_multiset_union by (auto simp: size_multiset_single)
lemma size_add_mset [simp]: "size (add_mset a A) = Suc (size A)"
by (simp add: size_multiset_overloaded_def wcount_add_mset)
lemma size_union [simp]: "size (M + N::'a multiset) = size M + size N"
by (auto simp add: size_multiset_overloaded_def)
lemma size_multiset_eq_0_iff_empty [iff]:
"size_multiset f M = 0 \<longleftrightarrow> M = {#}"
by (auto simp add: size_multiset_eq count_eq_zero_iff)
lemma size_eq_0_iff_empty [iff]: "(size M = 0) = (M = {#})"
by (auto simp add: size_multiset_overloaded_def)
lemma nonempty_has_size: "(S \<noteq> {#}) = (0 < size S)"
by (metis gr0I gr_implies_not0 size_empty size_eq_0_iff_empty)
lemma size_eq_Suc_imp_elem: "size M = Suc n \<Longrightarrow> \<exists>a. a \<in># M"
apply (unfold size_multiset_overloaded_eq)
apply (drule sum_SucD)
apply auto
done
lemma size_eq_Suc_imp_eq_union:
assumes "size M = Suc n"
shows "\<exists>a N. M = add_mset a N"
proof -
from assms obtain a where "a \<in># M"
by (erule size_eq_Suc_imp_elem [THEN exE])
then have "M = add_mset a (M - {#a#})" by simp
then show ?thesis by blast
qed
lemma size_mset_mono:
fixes A B :: "'a multiset"
assumes "A \<subseteq># B"
shows "size A \<le> size B"
proof -
from assms[unfolded mset_subset_eq_exists_conv]
obtain C where B: "B = A + C" by auto
show ?thesis unfolding B by (induct C) auto
qed
lemma size_filter_mset_lesseq[simp]: "size (filter_mset f M) \<le> size M"
by (rule size_mset_mono[OF multiset_filter_subset])
lemma size_Diff_submset:
"M \<subseteq># M' \<Longrightarrow> size (M' - M) = size M' - size(M::'a multiset)"
by (metis add_diff_cancel_left' size_union mset_subset_eq_exists_conv)
subsection \<open>Induction and case splits\<close>
theorem multiset_induct [case_names empty add, induct type: multiset]:
assumes empty: "P {#}"
assumes add: "\<And>x M. P M \<Longrightarrow> P (add_mset x M)"
shows "P M"
proof (induct "size M" arbitrary: M)
case 0 thus "P M" by (simp add: empty)
next
case (Suc k)
obtain N x where "M = add_mset x N"
using \<open>Suc k = size M\<close> [symmetric]
using size_eq_Suc_imp_eq_union by fast
with Suc add show "P M" by simp
qed
lemma multiset_induct_min[case_names empty add]:
fixes M :: "'a::linorder multiset"
assumes
empty: "P {#}" and
add: "\<And>x M. P M \<Longrightarrow> (\<forall>y \<in># M. y \<ge> x) \<Longrightarrow> P (add_mset x M)"
shows "P M"
proof (induct "size M" arbitrary: M)
case (Suc k)
note ih = this(1) and Sk_eq_sz_M = this(2)
let ?y = "Min_mset M"
let ?N = "M - {#?y#}"
have M: "M = add_mset ?y ?N"
by (metis Min_in Sk_eq_sz_M finite_set_mset insert_DiffM lessI not_less_zero
set_mset_eq_empty_iff size_empty)
show ?case
by (subst M, rule add, rule ih, metis M Sk_eq_sz_M nat.inject size_add_mset,
meson Min_le finite_set_mset in_diffD)
qed (simp add: empty)
lemma multiset_induct_max[case_names empty add]:
fixes M :: "'a::linorder multiset"
assumes
empty: "P {#}" and
add: "\<And>x M. P M \<Longrightarrow> (\<forall>y \<in># M. y \<le> x) \<Longrightarrow> P (add_mset x M)"
shows "P M"
proof (induct "size M" arbitrary: M)
case (Suc k)
note ih = this(1) and Sk_eq_sz_M = this(2)
let ?y = "Max_mset M"
let ?N = "M - {#?y#}"
have M: "M = add_mset ?y ?N"
by (metis Max_in Sk_eq_sz_M finite_set_mset insert_DiffM lessI not_less_zero
set_mset_eq_empty_iff size_empty)
show ?case
by (subst M, rule add, rule ih, metis M Sk_eq_sz_M nat.inject size_add_mset,
meson Max_ge finite_set_mset in_diffD)
qed (simp add: empty)
lemma multi_nonempty_split: "M \<noteq> {#} \<Longrightarrow> \<exists>A a. M = add_mset a A"
by (induct M) auto
lemma multiset_cases [cases type]:
obtains (empty) "M = {#}"
| (add) x N where "M = add_mset x N"
by (induct M) simp_all
lemma multi_drop_mem_not_eq: "c \<in># B \<Longrightarrow> B - {#c#} \<noteq> B"
by (cases "B = {#}") (auto dest: multi_member_split)
lemma union_filter_mset_complement[simp]:
"\<forall>x. P x = (\<not> Q x) \<Longrightarrow> filter_mset P M + filter_mset Q M = M"
by (subst multiset_eq_iff) auto
lemma multiset_partition: "M = {#x \<in># M. P x#} + {#x \<in># M. \<not> P x#}"
by simp
lemma mset_subset_size: "A \<subset># B \<Longrightarrow> size A < size B"
proof (induct A arbitrary: B)
case empty
then show ?case
using nonempty_has_size by auto
next
case (add x A)
have "add_mset x A \<subseteq># B"
by (meson add.prems subset_mset_def)
then show ?case
by (metis (no_types) add.prems add.right_neutral add_diff_cancel_left' leD nat_neq_iff
size_Diff_submset size_eq_0_iff_empty size_mset_mono subset_mset.le_iff_add subset_mset_def)
qed
lemma size_1_singleton_mset: "size M = 1 \<Longrightarrow> \<exists>a. M = {#a#}"
by (cases M) auto
subsubsection \<open>Strong induction and subset induction for multisets\<close>
text \<open>Well-foundedness of strict subset relation\<close>
lemma wf_subset_mset_rel: "wf {(M, N :: 'a multiset). M \<subset># N}"
apply (rule wf_measure [THEN wf_subset, where f1=size])
apply (clarsimp simp: measure_def inv_image_def mset_subset_size)
done
lemma full_multiset_induct [case_names less]:
assumes ih: "\<And>B. \<forall>(A::'a multiset). A \<subset># B \<longrightarrow> P A \<Longrightarrow> P B"
shows "P B"
apply (rule wf_subset_mset_rel [THEN wf_induct])
apply (rule ih, auto)
done
lemma multi_subset_induct [consumes 2, case_names empty add]:
assumes "F \<subseteq># A"
and empty: "P {#}"
and insert: "\<And>a F. a \<in># A \<Longrightarrow> P F \<Longrightarrow> P (add_mset a F)"
shows "P F"
proof -
from \<open>F \<subseteq># A\<close>
show ?thesis
proof (induct F)
show "P {#}" by fact
next
fix x F
assume P: "F \<subseteq># A \<Longrightarrow> P F" and i: "add_mset x F \<subseteq># A"
show "P (add_mset x F)"
proof (rule insert)
from i show "x \<in># A" by (auto dest: mset_subset_eq_insertD)
from i have "F \<subseteq># A" by (auto dest: mset_subset_eq_insertD)
with P show "P F" .
qed
qed
qed
subsection \<open>The fold combinator\<close>
definition fold_mset :: "('a \<Rightarrow> 'b \<Rightarrow> 'b) \<Rightarrow> 'b \<Rightarrow> 'a multiset \<Rightarrow> 'b"
where
"fold_mset f s M = Finite_Set.fold (\<lambda>x. f x ^^ count M x) s (set_mset M)"
lemma fold_mset_empty [simp]: "fold_mset f s {#} = s"
by (simp add: fold_mset_def)
context comp_fun_commute
begin
lemma fold_mset_add_mset [simp]: "fold_mset f s (add_mset x M) = f x (fold_mset f s M)"
proof -
interpret mset: comp_fun_commute "\<lambda>y. f y ^^ count M y"
by (fact comp_fun_commute_funpow)
interpret mset_union: comp_fun_commute "\<lambda>y. f y ^^ count (add_mset x M) y"
by (fact comp_fun_commute_funpow)
show ?thesis
proof (cases "x \<in> set_mset M")
case False
then have *: "count (add_mset x M) x = 1"
by (simp add: not_in_iff)
from False have "Finite_Set.fold (\<lambda>y. f y ^^ count (add_mset x M) y) s (set_mset M) =
Finite_Set.fold (\<lambda>y. f y ^^ count M y) s (set_mset M)"
by (auto intro!: Finite_Set.fold_cong comp_fun_commute_funpow)
with False * show ?thesis
by (simp add: fold_mset_def del: count_add_mset)
next
case True
define N where "N = set_mset M - {x}"
from N_def True have *: "set_mset M = insert x N" "x \<notin> N" "finite N" by auto
then have "Finite_Set.fold (\<lambda>y. f y ^^ count (add_mset x M) y) s N =
Finite_Set.fold (\<lambda>y. f y ^^ count M y) s N"
by (auto intro!: Finite_Set.fold_cong comp_fun_commute_funpow)
with * show ?thesis by (simp add: fold_mset_def del: count_add_mset) simp
qed
qed
corollary fold_mset_single: "fold_mset f s {#x#} = f x s"
by simp
lemma fold_mset_fun_left_comm: "f x (fold_mset f s M) = fold_mset f (f x s) M"
by (induct M) (simp_all add: fun_left_comm)
lemma fold_mset_union [simp]: "fold_mset f s (M + N) = fold_mset f (fold_mset f s M) N"
by (induct M) (simp_all add: fold_mset_fun_left_comm)
lemma fold_mset_fusion:
assumes "comp_fun_commute g"
and *: "\<And>x y. h (g x y) = f x (h y)"
shows "h (fold_mset g w A) = fold_mset f (h w) A"
proof -
interpret comp_fun_commute g by (fact assms)
from * show ?thesis by (induct A) auto
qed
end
lemma union_fold_mset_add_mset: "A + B = fold_mset add_mset A B"
proof -
interpret comp_fun_commute add_mset
by standard auto
show ?thesis
by (induction B) auto
qed
text \<open>
A note on code generation: When defining some function containing a
subterm \<^term>\<open>fold_mset F\<close>, code generation is not automatic. When
interpreting locale \<open>left_commutative\<close> with \<open>F\<close>, the
would be code thms for \<^const>\<open>fold_mset\<close> become thms like
\<^term>\<open>fold_mset F z {#} = z\<close> where \<open>F\<close> is not a pattern but
contains defined symbols, i.e.\ is not a code thm. Hence a separate
constant with its own code thms needs to be introduced for \<open>F\<close>. See the image operator below.
\<close>
subsection \<open>Image\<close>
definition image_mset :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a multiset \<Rightarrow> 'b multiset" where
"image_mset f = fold_mset (add_mset \<circ> f) {#}"
lemma comp_fun_commute_mset_image: "comp_fun_commute (add_mset \<circ> f)"
by unfold_locales (simp add: fun_eq_iff)
lemma image_mset_empty [simp]: "image_mset f {#} = {#}"
by (simp add: image_mset_def)
lemma image_mset_single: "image_mset f {#x#} = {#f x#}"
by (simp add: comp_fun_commute.fold_mset_add_mset comp_fun_commute_mset_image image_mset_def)
lemma image_mset_union [simp]: "image_mset f (M + N) = image_mset f M + image_mset f N"
proof -
interpret comp_fun_commute "add_mset \<circ> f"
by (fact comp_fun_commute_mset_image)
show ?thesis by (induct N) (simp_all add: image_mset_def)
qed
corollary image_mset_add_mset [simp]:
"image_mset f (add_mset a M) = add_mset (f a) (image_mset f M)"
unfolding image_mset_union add_mset_add_single[of a M] by (simp add: image_mset_single)
lemma set_image_mset [simp]: "set_mset (image_mset f M) = image f (set_mset M)"
by (induct M) simp_all
lemma size_image_mset [simp]: "size (image_mset f M) = size M"
by (induct M) simp_all
lemma image_mset_is_empty_iff [simp]: "image_mset f M = {#} \<longleftrightarrow> M = {#}"
by (cases M) auto
lemma image_mset_If:
"image_mset (\<lambda>x. if P x then f x else g x) A =
image_mset f (filter_mset P A) + image_mset g (filter_mset (\<lambda>x. \<not>P x) A)"
by (induction A) auto
lemma image_mset_Diff:
assumes "B \<subseteq># A"
shows "image_mset f (A - B) = image_mset f A - image_mset f B"
proof -
have "image_mset f (A - B + B) = image_mset f (A - B) + image_mset f B"
by simp
also from assms have "A - B + B = A"
by (simp add: subset_mset.diff_add)
finally show ?thesis by simp
qed
lemma count_image_mset: "count (image_mset f A) x = (\<Sum>y\<in>f -` {x} \<inter> set_mset A. count A y)"
proof (induction A)
case empty
then show ?case by simp
next
case (add x A)
moreover have *: "(if x = y then Suc n else n) = n + (if x = y then 1 else 0)" for n y
by simp
ultimately show ?case
by (auto simp: sum.distrib intro!: sum.mono_neutral_left)
qed
lemma image_mset_subseteq_mono: "A \<subseteq># B \<Longrightarrow> image_mset f A \<subseteq># image_mset f B"
by (metis image_mset_union subset_mset.le_iff_add)
lemma image_mset_subset_mono: "M \<subset># N \<Longrightarrow> image_mset f M \<subset># image_mset f N"
by (metis (no_types) Diff_eq_empty_iff_mset image_mset_Diff image_mset_is_empty_iff
image_mset_subseteq_mono subset_mset.less_le_not_le)
syntax (ASCII)
"_comprehension_mset" :: "'a \<Rightarrow> 'b \<Rightarrow> 'b multiset \<Rightarrow> 'a multiset" ("({#_/. _ :# _#})")
syntax
"_comprehension_mset" :: "'a \<Rightarrow> 'b \<Rightarrow> 'b multiset \<Rightarrow> 'a multiset" ("({#_/. _ \<in># _#})")
translations
"{#e. x \<in># M#}" \<rightleftharpoons> "CONST image_mset (\<lambda>x. e) M"
syntax (ASCII)
"_comprehension_mset'" :: "'a \<Rightarrow> 'b \<Rightarrow> 'b multiset \<Rightarrow> bool \<Rightarrow> 'a multiset" ("({#_/ | _ :# _./ _#})")
syntax
"_comprehension_mset'" :: "'a \<Rightarrow> 'b \<Rightarrow> 'b multiset \<Rightarrow> bool \<Rightarrow> 'a multiset" ("({#_/ | _ \<in># _./ _#})")
translations
"{#e | x\<in>#M. P#}" \<rightharpoonup> "{#e. x \<in># {# x\<in>#M. P#}#}"
text \<open>
This allows to write not just filters like \<^term>\<open>{#x\<in>#M. x<c#}\<close>
but also images like \<^term>\<open>{#x+x. x\<in>#M #}\<close> and @{term [source]
"{#x+x|x\<in>#M. x<c#}"}, where the latter is currently displayed as
\<^term>\<open>{#x+x|x\<in>#M. x<c#}\<close>.
\<close>
lemma in_image_mset: "y \<in># {#f x. x \<in># M#} \<longleftrightarrow> y \<in> f ` set_mset M"
by simp
functor image_mset: image_mset
proof -
fix f g show "image_mset f \<circ> image_mset g = image_mset (f \<circ> g)"
proof
fix A
show "(image_mset f \<circ> image_mset g) A = image_mset (f \<circ> g) A"
by (induct A) simp_all
qed
show "image_mset id = id"
proof
fix A
show "image_mset id A = id A"
by (induct A) simp_all
qed
qed
declare
image_mset.id [simp]
image_mset.identity [simp]
lemma image_mset_id[simp]: "image_mset id x = x"
unfolding id_def by auto
lemma image_mset_cong: "(\<And>x. x \<in># M \<Longrightarrow> f x = g x) \<Longrightarrow> {#f x. x \<in># M#} = {#g x. x \<in># M#}"
by (induct M) auto
lemma image_mset_cong_pair:
"(\<forall>x y. (x, y) \<in># M \<longrightarrow> f x y = g x y) \<Longrightarrow> {#f x y. (x, y) \<in># M#} = {#g x y. (x, y) \<in># M#}"
by (metis image_mset_cong split_cong)
lemma image_mset_const_eq:
"{#c. a \<in># M#} = replicate_mset (size M) c"
by (induct M) simp_all
subsection \<open>Further conversions\<close>
primrec mset :: "'a list \<Rightarrow> 'a multiset" where
"mset [] = {#}" |
"mset (a # x) = add_mset a (mset x)"
lemma in_multiset_in_set:
"x \<in># mset xs \<longleftrightarrow> x \<in> set xs"
by (induct xs) simp_all
lemma count_mset:
"count (mset xs) x = length (filter (\<lambda>y. x = y) xs)"
by (induct xs) simp_all
lemma mset_zero_iff[simp]: "(mset x = {#}) = (x = [])"
by (induct x) auto
lemma mset_zero_iff_right[simp]: "({#} = mset x) = (x = [])"
by (induct x) auto
lemma count_mset_gt_0: "x \<in> set xs \<Longrightarrow> count (mset xs) x > 0"
by (induction xs) auto
lemma count_mset_0_iff [simp]: "count (mset xs) x = 0 \<longleftrightarrow> x \<notin> set xs"
by (induction xs) auto
lemma mset_single_iff[iff]: "mset xs = {#x#} \<longleftrightarrow> xs = [x]"
by (cases xs) auto
lemma mset_single_iff_right[iff]: "{#x#} = mset xs \<longleftrightarrow> xs = [x]"
by (cases xs) auto
lemma set_mset_mset[simp]: "set_mset (mset xs) = set xs"
by (induct xs) auto
lemma set_mset_comp_mset [simp]: "set_mset \<circ> mset = set"
by (simp add: fun_eq_iff)
lemma size_mset [simp]: "size (mset xs) = length xs"
by (induct xs) simp_all
lemma mset_append [simp]: "mset (xs @ ys) = mset xs + mset ys"
by (induct xs arbitrary: ys) auto
lemma mset_filter[simp]: "mset (filter P xs) = {#x \<in># mset xs. P x #}"
by (induct xs) simp_all
lemma mset_rev [simp]:
"mset (rev xs) = mset xs"
by (induct xs) simp_all
lemma surj_mset: "surj mset"
apply (unfold surj_def)
apply (rule allI)
apply (rule_tac M = y in multiset_induct)
apply auto
apply (rule_tac x = "x # xa" in exI)
apply auto
done
lemma distinct_count_atmost_1:
"distinct x = (\<forall>a. count (mset x) a = (if a \<in> set x then 1 else 0))"
proof (induct x)
case Nil then show ?case by simp
next
case (Cons x xs) show ?case (is "?lhs \<longleftrightarrow> ?rhs")
proof
assume ?lhs then show ?rhs using Cons by simp
next
assume ?rhs then have "x \<notin> set xs"
by (simp split: if_splits)
moreover from \<open>?rhs\<close> have "(\<forall>a. count (mset xs) a =
(if a \<in> set xs then 1 else 0))"
by (auto split: if_splits simp add: count_eq_zero_iff)
ultimately show ?lhs using Cons by simp
qed
qed
lemma mset_eq_setD:
assumes "mset xs = mset ys"
shows "set xs = set ys"
proof -
from assms have "set_mset (mset xs) = set_mset (mset ys)"
by simp
then show ?thesis by simp
qed
lemma set_eq_iff_mset_eq_distinct:
"distinct x \<Longrightarrow> distinct y \<Longrightarrow>
(set x = set y) = (mset x = mset y)"
by (auto simp: multiset_eq_iff distinct_count_atmost_1)
lemma set_eq_iff_mset_remdups_eq:
"(set x = set y) = (mset (remdups x) = mset (remdups y))"
apply (rule iffI)
apply (simp add: set_eq_iff_mset_eq_distinct[THEN iffD1])
apply (drule distinct_remdups [THEN distinct_remdups
[THEN set_eq_iff_mset_eq_distinct [THEN iffD2]]])
apply simp
done
lemma nth_mem_mset: "i < length ls \<Longrightarrow> (ls ! i) \<in># mset ls"
proof (induct ls arbitrary: i)
case Nil
then show ?case by simp
next
case Cons
then show ?case by (cases i) auto
qed
lemma mset_remove1[simp]: "mset (remove1 a xs) = mset xs - {#a#}"
by (induct xs) (auto simp add: multiset_eq_iff)
lemma mset_eq_length:
assumes "mset xs = mset ys"
shows "length xs = length ys"
using assms by (metis size_mset)
lemma mset_eq_length_filter:
assumes "mset xs = mset ys"
shows "length (filter (\<lambda>x. z = x) xs) = length (filter (\<lambda>y. z = y) ys)"
using assms by (metis count_mset)
lemma fold_multiset_equiv:
assumes f: "\<And>x y. x \<in> set xs \<Longrightarrow> y \<in> set xs \<Longrightarrow> f x \<circ> f y = f y \<circ> f x"
and equiv: "mset xs = mset ys"
shows "List.fold f xs = List.fold f ys"
using f equiv [symmetric]
proof (induct xs arbitrary: ys)
case Nil
then show ?case by simp
next
case (Cons x xs)
then have *: "set ys = set (x # xs)"
by (blast dest: mset_eq_setD)
have "\<And>x y. x \<in> set ys \<Longrightarrow> y \<in> set ys \<Longrightarrow> f x \<circ> f y = f y \<circ> f x"
by (rule Cons.prems(1)) (simp_all add: *)
moreover from * have "x \<in> set ys"
by simp
ultimately have "List.fold f ys = List.fold f (remove1 x ys) \<circ> f x"
by (fact fold_remove1_split)
moreover from Cons.prems have "List.fold f xs = List.fold f (remove1 x ys)"
by (auto intro: Cons.hyps)
ultimately show ?case by simp
qed
lemma mset_shuffles: "zs \<in> shuffles xs ys \<Longrightarrow> mset zs = mset xs + mset ys"
by (induction xs ys arbitrary: zs rule: shuffles.induct) auto
lemma mset_insort [simp]: "mset (insort x xs) = add_mset x (mset xs)"
by (induct xs) simp_all
lemma mset_map[simp]: "mset (map f xs) = image_mset f (mset xs)"
by (induct xs) simp_all
global_interpretation mset_set: folding add_mset "{#}"
defines mset_set = "folding.F add_mset {#}"
by standard (simp add: fun_eq_iff)
lemma sum_multiset_singleton [simp]: "sum (\<lambda>n. {#n#}) A = mset_set A"
by (induction A rule: infinite_finite_induct) auto
lemma count_mset_set [simp]:
"finite A \<Longrightarrow> x \<in> A \<Longrightarrow> count (mset_set A) x = 1" (is "PROP ?P")
"\<not> finite A \<Longrightarrow> count (mset_set A) x = 0" (is "PROP ?Q")
"x \<notin> A \<Longrightarrow> count (mset_set A) x = 0" (is "PROP ?R")
proof -
have *: "count (mset_set A) x = 0" if "x \<notin> A" for A
proof (cases "finite A")
case False then show ?thesis by simp
next
case True from True \<open>x \<notin> A\<close> show ?thesis by (induct A) auto
qed
then show "PROP ?P" "PROP ?Q" "PROP ?R"
by (auto elim!: Set.set_insert)
qed \<comment> \<open>TODO: maybe define \<^const>\<open>mset_set\<close> also in terms of \<^const>\<open>Abs_multiset\<close>\<close>
lemma elem_mset_set[simp, intro]: "finite A \<Longrightarrow> x \<in># mset_set A \<longleftrightarrow> x \<in> A"
by (induct A rule: finite_induct) simp_all
lemma mset_set_Union:
"finite A \<Longrightarrow> finite B \<Longrightarrow> A \<inter> B = {} \<Longrightarrow> mset_set (A \<union> B) = mset_set A + mset_set B"
by (induction A rule: finite_induct) auto
lemma filter_mset_mset_set [simp]:
"finite A \<Longrightarrow> filter_mset P (mset_set A) = mset_set {x\<in>A. P x}"
proof (induction A rule: finite_induct)
case (insert x A)
from insert.hyps have "filter_mset P (mset_set (insert x A)) =
filter_mset P (mset_set A) + mset_set (if P x then {x} else {})"
by simp
also have "filter_mset P (mset_set A) = mset_set {x\<in>A. P x}"
by (rule insert.IH)
also from insert.hyps
have "\<dots> + mset_set (if P x then {x} else {}) =
mset_set ({x \<in> A. P x} \<union> (if P x then {x} else {}))" (is "_ = mset_set ?A")
by (intro mset_set_Union [symmetric]) simp_all
also from insert.hyps have "?A = {y\<in>insert x A. P y}" by auto
finally show ?case .
qed simp_all
lemma mset_set_Diff:
assumes "finite A" "B \<subseteq> A"
shows "mset_set (A - B) = mset_set A - mset_set B"
proof -
from assms have "mset_set ((A - B) \<union> B) = mset_set (A - B) + mset_set B"
by (intro mset_set_Union) (auto dest: finite_subset)
also from assms have "A - B \<union> B = A" by blast
finally show ?thesis by simp
qed
lemma mset_set_set: "distinct xs \<Longrightarrow> mset_set (set xs) = mset xs"
by (induction xs) simp_all
lemma count_mset_set': "count (mset_set A) x = (if finite A \<and> x \<in> A then 1 else 0)"
by auto
lemma subset_imp_msubset_mset_set:
assumes "A \<subseteq> B" "finite B"
shows "mset_set A \<subseteq># mset_set B"
proof (rule mset_subset_eqI)
fix x :: 'a
from assms have "finite A" by (rule finite_subset)
with assms show "count (mset_set A) x \<le> count (mset_set B) x"
by (cases "x \<in> A"; cases "x \<in> B") auto
qed
lemma mset_set_set_mset_msubset: "mset_set (set_mset A) \<subseteq># A"
proof (rule mset_subset_eqI)
fix x show "count (mset_set (set_mset A)) x \<le> count A x"
by (cases "x \<in># A") simp_all
qed
context linorder
begin
definition sorted_list_of_multiset :: "'a multiset \<Rightarrow> 'a list"
where
"sorted_list_of_multiset M = fold_mset insort [] M"
lemma sorted_list_of_multiset_empty [simp]:
"sorted_list_of_multiset {#} = []"
by (simp add: sorted_list_of_multiset_def)
lemma sorted_list_of_multiset_singleton [simp]:
"sorted_list_of_multiset {#x#} = [x]"
proof -
interpret comp_fun_commute insort by (fact comp_fun_commute_insort)
show ?thesis by (simp add: sorted_list_of_multiset_def)
qed
lemma sorted_list_of_multiset_insert [simp]:
"sorted_list_of_multiset (add_mset x M) = List.insort x (sorted_list_of_multiset M)"
proof -
interpret comp_fun_commute insort by (fact comp_fun_commute_insort)
show ?thesis by (simp add: sorted_list_of_multiset_def)
qed
end
lemma mset_sorted_list_of_multiset[simp]: "mset (sorted_list_of_multiset M) = M"
by (induct M) simp_all
lemma sorted_list_of_multiset_mset[simp]: "sorted_list_of_multiset (mset xs) = sort xs"
by (induct xs) simp_all
lemma finite_set_mset_mset_set[simp]: "finite A \<Longrightarrow> set_mset (mset_set A) = A"
by auto
lemma mset_set_empty_iff: "mset_set A = {#} \<longleftrightarrow> A = {} \<or> infinite A"
using finite_set_mset_mset_set by fastforce
lemma infinite_set_mset_mset_set: "\<not> finite A \<Longrightarrow> set_mset (mset_set A) = {}"
by simp
lemma set_sorted_list_of_multiset [simp]:
"set (sorted_list_of_multiset M) = set_mset M"
by (induct M) (simp_all add: set_insort_key)
lemma sorted_list_of_mset_set [simp]:
"sorted_list_of_multiset (mset_set A) = sorted_list_of_set A"
by (cases "finite A") (induct A rule: finite_induct, simp_all)
lemma mset_upt [simp]: "mset [m..<n] = mset_set {m..<n}"
by (induction n) (simp_all add: atLeastLessThanSuc)
lemma image_mset_map_of:
"distinct (map fst xs) \<Longrightarrow> {#the (map_of xs i). i \<in># mset (map fst xs)#} = mset (map snd xs)"
proof (induction xs)
case (Cons x xs)
have "{#the (map_of (x # xs) i). i \<in># mset (map fst (x # xs))#} =
add_mset (snd x) {#the (if i = fst x then Some (snd x) else map_of xs i).
i \<in># mset (map fst xs)#}" (is "_ = add_mset _ ?A") by simp
also from Cons.prems have "?A = {#the (map_of xs i). i :# mset (map fst xs)#}"
by (cases x, intro image_mset_cong) (auto simp: in_multiset_in_set)
also from Cons.prems have "\<dots> = mset (map snd xs)" by (intro Cons.IH) simp_all
finally show ?case by simp
qed simp_all
lemma msubset_mset_set_iff[simp]:
assumes "finite A" "finite B"
shows "mset_set A \<subseteq># mset_set B \<longleftrightarrow> A \<subseteq> B"
using assms set_mset_mono subset_imp_msubset_mset_set by fastforce
lemma mset_set_eq_iff[simp]:
assumes "finite A" "finite B"
shows "mset_set A = mset_set B \<longleftrightarrow> A = B"
using assms by (fastforce dest: finite_set_mset_mset_set)
lemma image_mset_mset_set: \<^marker>\<open>contributor \<open>Lukas Bulwahn\<close>\<close>
assumes "inj_on f A"
shows "image_mset f (mset_set A) = mset_set (f ` A)"
proof cases
assume "finite A"
from this \<open>inj_on f A\<close> show ?thesis
by (induct A) auto
next
assume "infinite A"
from this \<open>inj_on f A\<close> have "infinite (f ` A)"
using finite_imageD by blast
from \<open>infinite A\<close> \<open>infinite (f ` A)\<close> show ?thesis by simp
qed
subsection \<open>More properties of the replicate and repeat operations\<close>
lemma in_replicate_mset[simp]: "x \<in># replicate_mset n y \<longleftrightarrow> n > 0 \<and> x = y"
unfolding replicate_mset_def by (induct n) auto
lemma set_mset_replicate_mset_subset[simp]: "set_mset (replicate_mset n x) = (if n = 0 then {} else {x})"
by (auto split: if_splits)
lemma size_replicate_mset[simp]: "size (replicate_mset n M) = n"
by (induct n, simp_all)
lemma count_le_replicate_mset_subset_eq: "n \<le> count M x \<longleftrightarrow> replicate_mset n x \<subseteq># M"
by (auto simp add: mset_subset_eqI) (metis count_replicate_mset subseteq_mset_def)
lemma filter_eq_replicate_mset: "{#y \<in># D. y = x#} = replicate_mset (count D x) x"
by (induct D) simp_all
lemma replicate_count_mset_eq_filter_eq: "replicate (count (mset xs) k) k = filter (HOL.eq k) xs"
by (induct xs) auto
lemma replicate_mset_eq_empty_iff [simp]: "replicate_mset n a = {#} \<longleftrightarrow> n = 0"
by (induct n) simp_all
lemma replicate_mset_eq_iff:
"replicate_mset m a = replicate_mset n b \<longleftrightarrow> m = 0 \<and> n = 0 \<or> m = n \<and> a = b"
by (auto simp add: multiset_eq_iff)
lemma repeat_mset_cancel1: "repeat_mset a A = repeat_mset a B \<longleftrightarrow> A = B \<or> a = 0"
by (auto simp: multiset_eq_iff)
lemma repeat_mset_cancel2: "repeat_mset a A = repeat_mset b A \<longleftrightarrow> a = b \<or> A = {#}"
by (auto simp: multiset_eq_iff)
lemma repeat_mset_eq_empty_iff: "repeat_mset n A = {#} \<longleftrightarrow> n = 0 \<or> A = {#}"
by (cases n) auto
lemma image_replicate_mset [simp]:
"image_mset f (replicate_mset n a) = replicate_mset n (f a)"
by (induct n) simp_all
lemma replicate_mset_msubseteq_iff:
"replicate_mset m a \<subseteq># replicate_mset n b \<longleftrightarrow> m = 0 \<or> a = b \<and> m \<le> n"
by (cases m)
(auto simp: insert_subset_eq_iff simp flip: count_le_replicate_mset_subset_eq)
lemma msubseteq_replicate_msetE:
assumes "A \<subseteq># replicate_mset n a"
obtains m where "m \<le> n" and "A = replicate_mset m a"
proof (cases "n = 0")
case True
with assms that show thesis
by simp
next
case False
from assms have "set_mset A \<subseteq> set_mset (replicate_mset n a)"
by (rule set_mset_mono)
with False have "set_mset A \<subseteq> {a}"
by simp
then have "\<exists>m. A = replicate_mset m a"
proof (induction A)
case empty
then show ?case
by simp
next
case (add b A)
then obtain m where "A = replicate_mset m a"
by auto
with add.prems show ?case
by (auto intro: exI [of _ "Suc m"])
qed
then obtain m where A: "A = replicate_mset m a" ..
with assms have "m \<le> n"
by (auto simp add: replicate_mset_msubseteq_iff)
then show thesis using A ..
qed
subsection \<open>Big operators\<close>
locale comm_monoid_mset = comm_monoid
begin
interpretation comp_fun_commute f
by standard (simp add: fun_eq_iff left_commute)
interpretation comp?: comp_fun_commute "f \<circ> g"
by (fact comp_comp_fun_commute)
context
begin
definition F :: "'a multiset \<Rightarrow> 'a"
where eq_fold: "F M = fold_mset f \<^bold>1 M"
lemma empty [simp]: "F {#} = \<^bold>1"
by (simp add: eq_fold)
lemma singleton [simp]: "F {#x#} = x"
proof -
interpret comp_fun_commute
by standard (simp add: fun_eq_iff left_commute)
show ?thesis by (simp add: eq_fold)
qed
lemma union [simp]: "F (M + N) = F M \<^bold>* F N"
proof -
interpret comp_fun_commute f
by standard (simp add: fun_eq_iff left_commute)
show ?thesis
by (induct N) (simp_all add: left_commute eq_fold)
qed
lemma add_mset [simp]: "F (add_mset x N) = x \<^bold>* F N"
unfolding add_mset_add_single[of x N] union by (simp add: ac_simps)
lemma insert [simp]:
shows "F (image_mset g (add_mset x A)) = g x \<^bold>* F (image_mset g A)"
by (simp add: eq_fold)
lemma remove:
assumes "x \<in># A"
shows "F A = x \<^bold>* F (A - {#x#})"
using multi_member_split[OF assms] by auto
lemma neutral:
"\<forall>x\<in>#A. x = \<^bold>1 \<Longrightarrow> F A = \<^bold>1"
by (induct A) simp_all
lemma neutral_const [simp]:
"F (image_mset (\<lambda>_. \<^bold>1) A) = \<^bold>1"
by (simp add: neutral)
private lemma F_image_mset_product:
"F {#g x j \<^bold>* F {#g i j. i \<in># A#}. j \<in># B#} =
F (image_mset (g x) B) \<^bold>* F {#F {#g i j. i \<in># A#}. j \<in># B#}"
by (induction B) (simp_all add: left_commute semigroup.assoc semigroup_axioms)
lemma swap:
"F (image_mset (\<lambda>i. F (image_mset (g i) B)) A) =
F (image_mset (\<lambda>j. F (image_mset (\<lambda>i. g i j) A)) B)"
apply (induction A, simp)
apply (induction B, auto simp add: F_image_mset_product ac_simps)
done
lemma distrib: "F (image_mset (\<lambda>x. g x \<^bold>* h x) A) = F (image_mset g A) \<^bold>* F (image_mset h A)"
by (induction A) (auto simp: ac_simps)
lemma union_disjoint:
"A \<inter># B = {#} \<Longrightarrow> F (image_mset g (A \<union># B)) = F (image_mset g A) \<^bold>* F (image_mset g B)"
by (induction A) (auto simp: ac_simps)
end
end
lemma comp_fun_commute_plus_mset[simp]: "comp_fun_commute ((+) :: 'a multiset \<Rightarrow> _ \<Rightarrow> _)"
by standard (simp add: add_ac comp_def)
declare comp_fun_commute.fold_mset_add_mset[OF comp_fun_commute_plus_mset, simp]
lemma in_mset_fold_plus_iff[iff]: "x \<in># fold_mset (+) M NN \<longleftrightarrow> x \<in># M \<or> (\<exists>N. N \<in># NN \<and> x \<in># N)"
by (induct NN) auto
context comm_monoid_add
begin
sublocale sum_mset: comm_monoid_mset plus 0
defines sum_mset = sum_mset.F ..
lemma sum_unfold_sum_mset:
"sum f A = sum_mset (image_mset f (mset_set A))"
by (cases "finite A") (induct A rule: finite_induct, simp_all)
end
syntax (ASCII)
"_sum_mset_image" :: "pttrn \<Rightarrow> 'b set \<Rightarrow> 'a \<Rightarrow> 'a::comm_monoid_add" ("(3SUM _:#_. _)" [0, 51, 10] 10)
syntax
"_sum_mset_image" :: "pttrn \<Rightarrow> 'b set \<Rightarrow> 'a \<Rightarrow> 'a::comm_monoid_add" ("(3\<Sum>_\<in>#_. _)" [0, 51, 10] 10)
translations
"\<Sum>i \<in># A. b" \<rightleftharpoons> "CONST sum_mset (CONST image_mset (\<lambda>i. b) A)"
context comm_monoid_add
begin
lemma sum_mset_sum_list:
"sum_mset (mset xs) = sum_list xs"
by (induction xs) auto
end
context canonically_ordered_monoid_add
begin
lemma sum_mset_0_iff [simp]:
"sum_mset M = 0 \<longleftrightarrow> (\<forall>x \<in> set_mset M. x = 0)"
by (induction M) auto
end
context ordered_comm_monoid_add
begin
lemma sum_mset_mono:
"sum_mset (image_mset f K) \<le> sum_mset (image_mset g K)"
if "\<And>i. i \<in># K \<Longrightarrow> f i \<le> g i"
using that by (induction K) (simp_all add: add_mono)
end
context ordered_cancel_comm_monoid_diff
begin
lemma sum_mset_diff:
"sum_mset (M - N) = sum_mset M - sum_mset N" if "N \<subseteq># M" for M N :: "'a multiset"
using that by (auto simp add: subset_mset.le_iff_add)
end
context semiring_0
begin
lemma sum_mset_distrib_left:
"c * (\<Sum>x \<in># M. f x) = (\<Sum>x \<in># M. c * f(x))"
by (induction M) (simp_all add: algebra_simps)
lemma sum_mset_distrib_right:
"(\<Sum>x \<in># M. f x) * c = (\<Sum>x \<in># M. f x * c)"
by (induction M) (simp_all add: algebra_simps)
end
lemma sum_mset_product:
fixes f :: "'a::{comm_monoid_add,times} \<Rightarrow> 'b::semiring_0"
shows "(\<Sum>i \<in># A. f i) * (\<Sum>i \<in># B. g i) = (\<Sum>i\<in>#A. \<Sum>j\<in>#B. f i * g j)"
by (subst sum_mset.swap) (simp add: sum_mset_distrib_left sum_mset_distrib_right)
context semiring_1
begin
lemma sum_mset_replicate_mset [simp]:
"sum_mset (replicate_mset n a) = of_nat n * a"
by (induction n) (simp_all add: algebra_simps)
lemma sum_mset_delta:
"sum_mset (image_mset (\<lambda>x. if x = y then c else 0) A) = c * of_nat (count A y)"
by (induction A) (simp_all add: algebra_simps)
lemma sum_mset_delta':
"sum_mset (image_mset (\<lambda>x. if y = x then c else 0) A) = c * of_nat (count A y)"
by (induction A) (simp_all add: algebra_simps)
end
lemma of_nat_sum_mset [simp]:
"of_nat (sum_mset A) = sum_mset (image_mset of_nat A)"
by (induction A) auto
lemma size_eq_sum_mset:
"size M = (\<Sum>a\<in>#M. 1)"
using image_mset_const_eq [of "1::nat" M] by simp
lemma size_mset_set [simp]:
"size (mset_set A) = card A"
by (simp only: size_eq_sum_mset card_eq_sum sum_unfold_sum_mset)
lemma sum_mset_constant [simp]:
fixes y :: "'b::semiring_1"
shows \<open>(\<Sum>x\<in>#A. y) = of_nat (size A) * y\<close>
by (induction A) (auto simp: algebra_simps)
abbreviation Union_mset :: "'a multiset multiset \<Rightarrow> 'a multiset" ("\<Union>#")
where "\<Union># MM \<equiv> sum_mset MM" \<comment> \<open>FIXME ambiguous notation --
could likewise refer to \<open>\<Squnion>#\<close>\<close>
lemma set_mset_Union_mset[simp]: "set_mset (\<Union># MM) = (\<Union>M \<in> set_mset MM. set_mset M)"
by (induct MM) auto
lemma in_Union_mset_iff[iff]: "x \<in># \<Union># MM \<longleftrightarrow> (\<exists>M. M \<in># MM \<and> x \<in># M)"
by (induct MM) auto
lemma count_sum:
"count (sum f A) x = sum (\<lambda>a. count (f a) x) A"
by (induct A rule: infinite_finite_induct) simp_all
lemma sum_eq_empty_iff:
assumes "finite A"
shows "sum f A = {#} \<longleftrightarrow> (\<forall>a\<in>A. f a = {#})"
using assms by induct simp_all
lemma Union_mset_empty_conv[simp]: "\<Union># M = {#} \<longleftrightarrow> (\<forall>i\<in>#M. i = {#})"
by (induction M) auto
lemma Union_image_single_mset[simp]: "\<Union># (image_mset (\<lambda>x. {#x#}) m) = m"
by(induction m) auto
context comm_monoid_mult
begin
sublocale prod_mset: comm_monoid_mset times 1
defines prod_mset = prod_mset.F ..
lemma prod_mset_empty:
"prod_mset {#} = 1"
by (fact prod_mset.empty)
lemma prod_mset_singleton:
"prod_mset {#x#} = x"
by (fact prod_mset.singleton)
lemma prod_mset_Un:
"prod_mset (A + B) = prod_mset A * prod_mset B"
by (fact prod_mset.union)
lemma prod_mset_prod_list:
"prod_mset (mset xs) = prod_list xs"
by (induct xs) auto
lemma prod_mset_replicate_mset [simp]:
"prod_mset (replicate_mset n a) = a ^ n"
by (induct n) simp_all
lemma prod_unfold_prod_mset:
"prod f A = prod_mset (image_mset f (mset_set A))"
by (cases "finite A") (induct A rule: finite_induct, simp_all)
lemma prod_mset_multiplicity:
"prod_mset M = prod (\<lambda>x. x ^ count M x) (set_mset M)"
by (simp add: fold_mset_def prod.eq_fold prod_mset.eq_fold funpow_times_power comp_def)
lemma prod_mset_delta: "prod_mset (image_mset (\<lambda>x. if x = y then c else 1) A) = c ^ count A y"
by (induction A) simp_all
lemma prod_mset_delta': "prod_mset (image_mset (\<lambda>x. if y = x then c else 1) A) = c ^ count A y"
by (induction A) simp_all
lemma prod_mset_subset_imp_dvd:
assumes "A \<subseteq># B"
shows "prod_mset A dvd prod_mset B"
proof -
from assms have "B = (B - A) + A" by (simp add: subset_mset.diff_add)
also have "prod_mset \<dots> = prod_mset (B - A) * prod_mset A" by simp
also have "prod_mset A dvd \<dots>" by simp
finally show ?thesis .
qed
lemma dvd_prod_mset:
assumes "x \<in># A"
shows "x dvd prod_mset A"
using assms prod_mset_subset_imp_dvd [of "{#x#}" A] by simp
end
syntax (ASCII)
"_prod_mset_image" :: "pttrn \<Rightarrow> 'b set \<Rightarrow> 'a \<Rightarrow> 'a::comm_monoid_mult" ("(3PROD _:#_. _)" [0, 51, 10] 10)
syntax
"_prod_mset_image" :: "pttrn \<Rightarrow> 'b set \<Rightarrow> 'a \<Rightarrow> 'a::comm_monoid_mult" ("(3\<Prod>_\<in>#_. _)" [0, 51, 10] 10)
translations
"\<Prod>i \<in># A. b" \<rightleftharpoons> "CONST prod_mset (CONST image_mset (\<lambda>i. b) A)"
lemma prod_mset_constant [simp]: "(\<Prod>_\<in>#A. c) = c ^ size A"
by (simp add: image_mset_const_eq)
lemma (in semidom) prod_mset_zero_iff [iff]:
"prod_mset A = 0 \<longleftrightarrow> 0 \<in># A"
by (induct A) auto
lemma (in semidom_divide) prod_mset_diff:
assumes "B \<subseteq># A" and "0 \<notin># B"
shows "prod_mset (A - B) = prod_mset A div prod_mset B"
proof -
from assms obtain C where "A = B + C"
by (metis subset_mset.add_diff_inverse)
with assms show ?thesis by simp
qed
lemma (in semidom_divide) prod_mset_minus:
assumes "a \<in># A" and "a \<noteq> 0"
shows "prod_mset (A - {#a#}) = prod_mset A div a"
using assms prod_mset_diff [of "{#a#}" A] by auto
lemma (in normalization_semidom) normalize_prod_mset_normalize:
"normalize (prod_mset (image_mset normalize A)) = normalize (prod_mset A)"
proof (induction A)
case (add x A)
have "normalize (prod_mset (image_mset normalize (add_mset x A))) =
normalize (x * normalize (prod_mset (image_mset normalize A)))"
by simp
also note add.IH
finally show ?case by simp
qed auto
lemma (in algebraic_semidom) is_unit_prod_mset_iff:
"is_unit (prod_mset A) \<longleftrightarrow> (\<forall>x \<in># A. is_unit x)"
by (induct A) (auto simp: is_unit_mult_iff)
lemma (in normalization_semidom_multiplicative) normalize_prod_mset:
"normalize (prod_mset A) = prod_mset (image_mset normalize A)"
by (induct A) (simp_all add: normalize_mult)
lemma (in normalization_semidom_multiplicative) normalized_prod_msetI:
assumes "\<And>a. a \<in># A \<Longrightarrow> normalize a = a"
shows "normalize (prod_mset A) = prod_mset A"
proof -
from assms have "image_mset normalize A = A"
by (induct A) simp_all
then show ?thesis by (simp add: normalize_prod_mset)
qed
subsection \<open>Alternative representations\<close>
subsubsection \<open>Lists\<close>
context linorder
begin
lemma mset_insort [simp]:
"mset (insort_key k x xs) = add_mset x (mset xs)"
by (induct xs) simp_all
lemma mset_sort [simp]:
"mset (sort_key k xs) = mset xs"
by (induct xs) simp_all
text \<open>
This lemma shows which properties suffice to show that a function
\<open>f\<close> with \<open>f xs = ys\<close> behaves like sort.
\<close>
lemma properties_for_sort_key:
assumes "mset ys = mset xs"
and "\<And>k. k \<in> set ys \<Longrightarrow> filter (\<lambda>x. f k = f x) ys = filter (\<lambda>x. f k = f x) xs"
and "sorted (map f ys)"
shows "sort_key f xs = ys"
using assms
proof (induct xs arbitrary: ys)
case Nil then show ?case by simp
next
case (Cons x xs)
from Cons.prems(2) have
"\<forall>k \<in> set ys. filter (\<lambda>x. f k = f x) (remove1 x ys) = filter (\<lambda>x. f k = f x) xs"
by (simp add: filter_remove1)
with Cons.prems have "sort_key f xs = remove1 x ys"
by (auto intro!: Cons.hyps simp add: sorted_map_remove1)
moreover from Cons.prems have "x \<in># mset ys"
by auto
then have "x \<in> set ys"
by simp
ultimately show ?case using Cons.prems by (simp add: insort_key_remove1)
qed
lemma properties_for_sort:
assumes multiset: "mset ys = mset xs"
and "sorted ys"
shows "sort xs = ys"
proof (rule properties_for_sort_key)
from multiset show "mset ys = mset xs" .
from \<open>sorted ys\<close> show "sorted (map (\<lambda>x. x) ys)" by simp
from multiset have "length (filter (\<lambda>y. k = y) ys) = length (filter (\<lambda>x. k = x) xs)" for k
by (rule mset_eq_length_filter)
then have "replicate (length (filter (\<lambda>y. k = y) ys)) k =
replicate (length (filter (\<lambda>x. k = x) xs)) k" for k
by simp
then show "k \<in> set ys \<Longrightarrow> filter (\<lambda>y. k = y) ys = filter (\<lambda>x. k = x) xs" for k
by (simp add: replicate_length_filter)
qed
lemma sort_key_inj_key_eq:
assumes mset_equal: "mset xs = mset ys"
and "inj_on f (set xs)"
and "sorted (map f ys)"
shows "sort_key f xs = ys"
proof (rule properties_for_sort_key)
from mset_equal
show "mset ys = mset xs" by simp
from \<open>sorted (map f ys)\<close>
show "sorted (map f ys)" .
show "[x\<leftarrow>ys . f k = f x] = [x\<leftarrow>xs . f k = f x]" if "k \<in> set ys" for k
proof -
from mset_equal
have set_equal: "set xs = set ys" by (rule mset_eq_setD)
with that have "insert k (set ys) = set ys" by auto
with \<open>inj_on f (set xs)\<close> have inj: "inj_on f (insert k (set ys))"
by (simp add: set_equal)
from inj have "[x\<leftarrow>ys . f k = f x] = filter (HOL.eq k) ys"
by (auto intro!: inj_on_filter_key_eq)
also have "\<dots> = replicate (count (mset ys) k) k"
by (simp add: replicate_count_mset_eq_filter_eq)
also have "\<dots> = replicate (count (mset xs) k) k"
using mset_equal by simp
also have "\<dots> = filter (HOL.eq k) xs"
by (simp add: replicate_count_mset_eq_filter_eq)
also have "\<dots> = [x\<leftarrow>xs . f k = f x]"
using inj by (auto intro!: inj_on_filter_key_eq [symmetric] simp add: set_equal)
finally show ?thesis .
qed
qed
lemma sort_key_eq_sort_key:
assumes "mset xs = mset ys"
and "inj_on f (set xs)"
shows "sort_key f xs = sort_key f ys"
by (rule sort_key_inj_key_eq) (simp_all add: assms)
lemma sort_key_by_quicksort:
"sort_key f xs = sort_key f [x\<leftarrow>xs. f x < f (xs ! (length xs div 2))]
@ [x\<leftarrow>xs. f x = f (xs ! (length xs div 2))]
@ sort_key f [x\<leftarrow>xs. f x > f (xs ! (length xs div 2))]" (is "sort_key f ?lhs = ?rhs")
proof (rule properties_for_sort_key)
show "mset ?rhs = mset ?lhs"
by (rule multiset_eqI) auto
show "sorted (map f ?rhs)"
by (auto simp add: sorted_append intro: sorted_map_same)
next
fix l
assume "l \<in> set ?rhs"
let ?pivot = "f (xs ! (length xs div 2))"
have *: "\<And>x. f l = f x \<longleftrightarrow> f x = f l" by auto
have "[x \<leftarrow> sort_key f xs . f x = f l] = [x \<leftarrow> xs. f x = f l]"
unfolding filter_sort by (rule properties_for_sort_key) (auto intro: sorted_map_same)
with * have **: "[x \<leftarrow> sort_key f xs . f l = f x] = [x \<leftarrow> xs. f l = f x]" by simp
have "\<And>x P. P (f x) ?pivot \<and> f l = f x \<longleftrightarrow> P (f l) ?pivot \<and> f l = f x" by auto
then have "\<And>P. [x \<leftarrow> sort_key f xs . P (f x) ?pivot \<and> f l = f x] =
[x \<leftarrow> sort_key f xs. P (f l) ?pivot \<and> f l = f x]" by simp
note *** = this [of "(<)"] this [of "(>)"] this [of "(=)"]
show "[x \<leftarrow> ?rhs. f l = f x] = [x \<leftarrow> ?lhs. f l = f x]"
proof (cases "f l" ?pivot rule: linorder_cases)
case less
then have "f l \<noteq> ?pivot" and "\<not> f l > ?pivot" by auto
with less show ?thesis
by (simp add: filter_sort [symmetric] ** ***)
next
case equal then show ?thesis
by (simp add: * less_le)
next
case greater
then have "f l \<noteq> ?pivot" and "\<not> f l < ?pivot" by auto
with greater show ?thesis
by (simp add: filter_sort [symmetric] ** ***)
qed
qed
lemma sort_by_quicksort:
"sort xs = sort [x\<leftarrow>xs. x < xs ! (length xs div 2)]
@ [x\<leftarrow>xs. x = xs ! (length xs div 2)]
@ sort [x\<leftarrow>xs. x > xs ! (length xs div 2)]" (is "sort ?lhs = ?rhs")
using sort_key_by_quicksort [of "\<lambda>x. x", symmetric] by simp
text \<open>A stable parameterized quicksort\<close>
definition part :: "('b \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> 'b list \<Rightarrow> 'b list \<times> 'b list \<times> 'b list" where
"part f pivot xs = ([x \<leftarrow> xs. f x < pivot], [x \<leftarrow> xs. f x = pivot], [x \<leftarrow> xs. pivot < f x])"
lemma part_code [code]:
"part f pivot [] = ([], [], [])"
"part f pivot (x # xs) = (let (lts, eqs, gts) = part f pivot xs; x' = f x in
if x' < pivot then (x # lts, eqs, gts)
else if x' > pivot then (lts, eqs, x # gts)
else (lts, x # eqs, gts))"
by (auto simp add: part_def Let_def split_def)
lemma sort_key_by_quicksort_code [code]:
"sort_key f xs =
(case xs of
[] \<Rightarrow> []
| [x] \<Rightarrow> xs
| [x, y] \<Rightarrow> (if f x \<le> f y then xs else [y, x])
| _ \<Rightarrow>
let (lts, eqs, gts) = part f (f (xs ! (length xs div 2))) xs
in sort_key f lts @ eqs @ sort_key f gts)"
proof (cases xs)
case Nil then show ?thesis by simp
next
case (Cons _ ys) note hyps = Cons show ?thesis
proof (cases ys)
case Nil with hyps show ?thesis by simp
next
case (Cons _ zs) note hyps = hyps Cons show ?thesis
proof (cases zs)
case Nil with hyps show ?thesis by auto
next
case Cons
from sort_key_by_quicksort [of f xs]
have "sort_key f xs = (let (lts, eqs, gts) = part f (f (xs ! (length xs div 2))) xs
in sort_key f lts @ eqs @ sort_key f gts)"
by (simp only: split_def Let_def part_def fst_conv snd_conv)
with hyps Cons show ?thesis by (simp only: list.cases)
qed
qed
qed
end
hide_const (open) part
lemma mset_remdups_subset_eq: "mset (remdups xs) \<subseteq># mset xs"
by (induct xs) (auto intro: subset_mset.order_trans)
lemma mset_update:
"i < length ls \<Longrightarrow> mset (ls[i := v]) = add_mset v (mset ls - {#ls ! i#})"
proof (induct ls arbitrary: i)
case Nil then show ?case by simp
next
case (Cons x xs)
show ?case
proof (cases i)
case 0 then show ?thesis by simp
next
case (Suc i')
with Cons show ?thesis
by (cases \<open>x = xs ! i'\<close>) auto
qed
qed
lemma mset_swap:
"i < length ls \<Longrightarrow> j < length ls \<Longrightarrow>
mset (ls[j := ls ! i, i := ls ! j]) = mset ls"
by (cases "i = j") (simp_all add: mset_update nth_mem_mset)
subsection \<open>The multiset order\<close>
subsubsection \<open>Well-foundedness\<close>
definition mult1 :: "('a \<times> 'a) set \<Rightarrow> ('a multiset \<times> 'a multiset) set" where
"mult1 r = {(N, M). \<exists>a M0 K. M = add_mset a M0 \<and> N = M0 + K \<and>
(\<forall>b. b \<in># K \<longrightarrow> (b, a) \<in> r)}"
definition mult :: "('a \<times> 'a) set \<Rightarrow> ('a multiset \<times> 'a multiset) set" where
"mult r = (mult1 r)\<^sup>+"
lemma mult1I:
assumes "M = add_mset a M0" and "N = M0 + K" and "\<And>b. b \<in># K \<Longrightarrow> (b, a) \<in> r"
shows "(N, M) \<in> mult1 r"
using assms unfolding mult1_def by blast
lemma mult1E:
assumes "(N, M) \<in> mult1 r"
obtains a M0 K where "M = add_mset a M0" "N = M0 + K" "\<And>b. b \<in># K \<Longrightarrow> (b, a) \<in> r"
using assms unfolding mult1_def by blast
lemma mono_mult1:
assumes "r \<subseteq> r'" shows "mult1 r \<subseteq> mult1 r'"
unfolding mult1_def using assms by blast
lemma mono_mult:
assumes "r \<subseteq> r'" shows "mult r \<subseteq> mult r'"
unfolding mult_def using mono_mult1[OF assms] trancl_mono by blast
lemma not_less_empty [iff]: "(M, {#}) \<notin> mult1 r"
by (simp add: mult1_def)
lemma less_add:
assumes mult1: "(N, add_mset a M0) \<in> mult1 r"
shows
"(\<exists>M. (M, M0) \<in> mult1 r \<and> N = add_mset a M) \<or>
(\<exists>K. (\<forall>b. b \<in># K \<longrightarrow> (b, a) \<in> r) \<and> N = M0 + K)"
proof -
let ?r = "\<lambda>K a. \<forall>b. b \<in># K \<longrightarrow> (b, a) \<in> r"
let ?R = "\<lambda>N M. \<exists>a M0 K. M = add_mset a M0 \<and> N = M0 + K \<and> ?r K a"
obtain a' M0' K where M0: "add_mset a M0 = add_mset a' M0'"
and N: "N = M0' + K"
and r: "?r K a'"
using mult1 unfolding mult1_def by auto
show ?thesis (is "?case1 \<or> ?case2")
proof -
from M0 consider "M0 = M0'" "a = a'"
| K' where "M0 = add_mset a' K'" "M0' = add_mset a K'"
by atomize_elim (simp only: add_eq_conv_ex)
then show ?thesis
proof cases
case 1
with N r have "?r K a \<and> N = M0 + K" by simp
then have ?case2 ..
then show ?thesis ..
next
case 2
from N 2(2) have n: "N = add_mset a (K' + K)" by simp
with r 2(1) have "?R (K' + K) M0" by blast
with n have ?case1 by (simp add: mult1_def)
then show ?thesis ..
qed
qed
qed
lemma all_accessible:
assumes "wf r"
shows "\<forall>M. M \<in> Wellfounded.acc (mult1 r)"
proof
let ?R = "mult1 r"
let ?W = "Wellfounded.acc ?R"
{
fix M M0 a
assume M0: "M0 \<in> ?W"
and wf_hyp: "\<And>b. (b, a) \<in> r \<Longrightarrow> (\<forall>M \<in> ?W. add_mset b M \<in> ?W)"
and acc_hyp: "\<forall>M. (M, M0) \<in> ?R \<longrightarrow> add_mset a M \<in> ?W"
have "add_mset a M0 \<in> ?W"
proof (rule accI [of "add_mset a M0"])
fix N
assume "(N, add_mset a M0) \<in> ?R"
then consider M where "(M, M0) \<in> ?R" "N = add_mset a M"
| K where "\<forall>b. b \<in># K \<longrightarrow> (b, a) \<in> r" "N = M0 + K"
by atomize_elim (rule less_add)
then show "N \<in> ?W"
proof cases
case 1
from acc_hyp have "(M, M0) \<in> ?R \<longrightarrow> add_mset a M \<in> ?W" ..
from this and \<open>(M, M0) \<in> ?R\<close> have "add_mset a M \<in> ?W" ..
then show "N \<in> ?W" by (simp only: \<open>N = add_mset a M\<close>)
next
case 2
from this(1) have "M0 + K \<in> ?W"
proof (induct K)
case empty
from M0 show "M0 + {#} \<in> ?W" by simp
next
case (add x K)
from add.prems have "(x, a) \<in> r" by simp
with wf_hyp have "\<forall>M \<in> ?W. add_mset x M \<in> ?W" by blast
moreover from add have "M0 + K \<in> ?W" by simp
ultimately have "add_mset x (M0 + K) \<in> ?W" ..
then show "M0 + (add_mset x K) \<in> ?W" by simp
qed
then show "N \<in> ?W" by (simp only: 2(2))
qed
qed
} note tedious_reasoning = this
show "M \<in> ?W" for M
proof (induct M)
show "{#} \<in> ?W"
proof (rule accI)
fix b assume "(b, {#}) \<in> ?R"
with not_less_empty show "b \<in> ?W" by contradiction
qed
fix M a assume "M \<in> ?W"
from \<open>wf r\<close> have "\<forall>M \<in> ?W. add_mset a M \<in> ?W"
proof induct
fix a
assume r: "\<And>b. (b, a) \<in> r \<Longrightarrow> (\<forall>M \<in> ?W. add_mset b M \<in> ?W)"
show "\<forall>M \<in> ?W. add_mset a M \<in> ?W"
proof
fix M assume "M \<in> ?W"
then show "add_mset a M \<in> ?W"
by (rule acc_induct) (rule tedious_reasoning [OF _ r])
qed
qed
from this and \<open>M \<in> ?W\<close> show "add_mset a M \<in> ?W" ..
qed
qed
theorem wf_mult1: "wf r \<Longrightarrow> wf (mult1 r)"
by (rule acc_wfI) (rule all_accessible)
theorem wf_mult: "wf r \<Longrightarrow> wf (mult r)"
unfolding mult_def by (rule wf_trancl) (rule wf_mult1)
subsubsection \<open>Closure-free presentation\<close>
text \<open>One direction.\<close>
lemma mult_implies_one_step:
assumes
trans: "trans r" and
MN: "(M, N) \<in> mult r"
shows "\<exists>I J K. N = I + J \<and> M = I + K \<and> J \<noteq> {#} \<and> (\<forall>k \<in> set_mset K. \<exists>j \<in> set_mset J. (k, j) \<in> r)"
using MN unfolding mult_def mult1_def
proof (induction rule: converse_trancl_induct)
case (base y)
then show ?case by force
next
case (step y z) note yz = this(1) and zN = this(2) and N_decomp = this(3)
obtain I J K where
N: "N = I + J" "z = I + K" "J \<noteq> {#}" "\<forall>k\<in>#K. \<exists>j\<in>#J. (k, j) \<in> r"
using N_decomp by blast
obtain a M0 K' where
z: "z = add_mset a M0" and y: "y = M0 + K'" and K: "\<forall>b. b \<in># K' \<longrightarrow> (b, a) \<in> r"
using yz by blast
show ?case
proof (cases "a \<in># K")
case True
moreover have "\<exists>j\<in>#J. (k, j) \<in> r" if "k \<in># K'" for k
using K N trans True by (meson that transE)
ultimately show ?thesis
by (rule_tac x = I in exI, rule_tac x = J in exI, rule_tac x = "(K - {#a#}) + K'" in exI)
(use z y N in \<open>auto simp del: subset_mset.add_diff_assoc2 dest: in_diffD\<close>)
next
case False
then have "a \<in># I" by (metis N(2) union_iff union_single_eq_member z)
moreover have "M0 = I + K - {#a#}"
using N(2) z by force
ultimately show ?thesis
by (rule_tac x = "I - {#a#}" in exI, rule_tac x = "add_mset a J" in exI,
rule_tac x = "K + K'" in exI)
(use z y N False K in \<open>auto simp: add.assoc\<close>)
qed
qed
lemma one_step_implies_mult:
assumes
"J \<noteq> {#}" and
"\<forall>k \<in> set_mset K. \<exists>j \<in> set_mset J. (k, j) \<in> r"
shows "(I + K, I + J) \<in> mult r"
using assms
proof (induction "size J" arbitrary: I J K)
case 0
then show ?case by auto
next
case (Suc n) note IH = this(1) and size_J = this(2)[THEN sym]
obtain J' a where J: "J = add_mset a J'"
using size_J by (blast dest: size_eq_Suc_imp_eq_union)
show ?case
proof (cases "J' = {#}")
case True
then show ?thesis
using J Suc by (fastforce simp add: mult_def mult1_def)
next
case [simp]: False
have K: "K = {#x \<in># K. (x, a) \<in> r#} + {#x \<in># K. (x, a) \<notin> r#}"
by simp
have "(I + K, (I + {# x \<in># K. (x, a) \<in> r #}) + J') \<in> mult r"
using IH[of J' "{# x \<in># K. (x, a) \<notin> r#}" "I + {# x \<in># K. (x, a) \<in> r#}"]
J Suc.prems K size_J by (auto simp: ac_simps)
moreover have "(I + {#x \<in># K. (x, a) \<in> r#} + J', I + J) \<in> mult r"
by (fastforce simp: J mult1_def mult_def)
ultimately show ?thesis
unfolding mult_def by simp
qed
qed
lemma subset_implies_mult:
assumes sub: "A \<subset># B"
shows "(A, B) \<in> mult r"
proof -
have ApBmA: "A + (B - A) = B"
using sub by simp
have BmA: "B - A \<noteq> {#}"
using sub by (simp add: Diff_eq_empty_iff_mset subset_mset.less_le_not_le)
thus ?thesis
by (rule one_step_implies_mult[of "B - A" "{#}" _ A, unfolded ApBmA, simplified])
qed
subsection \<open>The multiset extension is cancellative for multiset union\<close>
lemma mult_cancel:
assumes "trans s" and "irrefl s"
shows "(X + Z, Y + Z) \<in> mult s \<longleftrightarrow> (X, Y) \<in> mult s" (is "?L \<longleftrightarrow> ?R")
proof
assume ?L thus ?R
proof (induct Z)
case (add z Z)
obtain X' Y' Z' where *: "add_mset z X + Z = Z' + X'" "add_mset z Y + Z = Z' + Y'" "Y' \<noteq> {#}"
"\<forall>x \<in> set_mset X'. \<exists>y \<in> set_mset Y'. (x, y) \<in> s"
using mult_implies_one_step[OF \<open>trans s\<close> add(2)] by auto
consider Z2 where "Z' = add_mset z Z2" | X2 Y2 where "X' = add_mset z X2" "Y' = add_mset z Y2"
using *(1,2) by (metis add_mset_remove_trivial_If insert_iff set_mset_add_mset_insert union_iff)
thus ?case
proof (cases)
case 1 thus ?thesis using * one_step_implies_mult[of Y' X' s Z2]
by (auto simp: add.commute[of _ "{#_#}"] add.assoc intro: add(1))
next
case 2 then obtain y where "y \<in> set_mset Y2" "(z, y) \<in> s" using *(4) \<open>irrefl s\<close>
by (auto simp: irrefl_def)
moreover from this transD[OF \<open>trans s\<close> _ this(2)]
have "x' \<in> set_mset X2 \<Longrightarrow> \<exists>y \<in> set_mset Y2. (x', y) \<in> s" for x'
using 2 *(4)[rule_format, of x'] by auto
ultimately show ?thesis using * one_step_implies_mult[of Y2 X2 s Z'] 2
by (force simp: add.commute[of "{#_#}"] add.assoc[symmetric] intro: add(1))
qed
qed auto
next
assume ?R then obtain I J K
where "Y = I + J" "X = I + K" "J \<noteq> {#}" "\<forall>k \<in> set_mset K. \<exists>j \<in> set_mset J. (k, j) \<in> s"
using mult_implies_one_step[OF \<open>trans s\<close>] by blast
thus ?L using one_step_implies_mult[of J K s "I + Z"] by (auto simp: ac_simps)
qed
lemmas mult_cancel_add_mset =
mult_cancel[of _ _ "{#_#}", unfolded union_mset_add_mset_right add.comm_neutral]
lemma mult_cancel_max:
assumes "trans s" and "irrefl s"
shows "(X, Y) \<in> mult s \<longleftrightarrow> (X - X \<inter># Y, Y - X \<inter># Y) \<in> mult s" (is "?L \<longleftrightarrow> ?R")
proof -
have "X - X \<inter># Y + X \<inter># Y = X" "Y - X \<inter># Y + X \<inter># Y = Y" by (auto simp flip: count_inject)
thus ?thesis using mult_cancel[OF assms, of "X - X \<inter># Y" "X \<inter># Y" "Y - X \<inter># Y"] by auto
qed
subsection \<open>Quasi-executable version of the multiset extension\<close>
text \<open>
Predicate variants of \<open>mult\<close> and the reflexive closure of \<open>mult\<close>, which are
executable whenever the given predicate \<open>P\<close> is. Together with the standard
code equations for \<open>(\<inter>#\<close>) and \<open>(-\<close>) this should yield quadratic
(with respect to calls to \<open>P\<close>) implementations of \<open>multp\<close> and \<open>multeqp\<close>.
\<close>
definition multp :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a multiset \<Rightarrow> 'a multiset \<Rightarrow> bool" where
"multp P N M =
(let Z = M \<inter># N; X = M - Z in
X \<noteq> {#} \<and> (let Y = N - Z in (\<forall>y \<in> set_mset Y. \<exists>x \<in> set_mset X. P y x)))"
definition multeqp :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a multiset \<Rightarrow> 'a multiset \<Rightarrow> bool" where
"multeqp P N M =
(let Z = M \<inter># N; X = M - Z; Y = N - Z in
(\<forall>y \<in> set_mset Y. \<exists>x \<in> set_mset X. P y x))"
lemma multp_iff:
assumes "irrefl R" and "trans R" and [simp]: "\<And>x y. P x y \<longleftrightarrow> (x, y) \<in> R"
shows "multp P N M \<longleftrightarrow> (N, M) \<in> mult R" (is "?L \<longleftrightarrow> ?R")
proof -
have *: "M \<inter># N + (N - M \<inter># N) = N" "M \<inter># N + (M - M \<inter># N) = M"
"(M - M \<inter># N) \<inter># (N - M \<inter># N) = {#}" by (auto simp flip: count_inject)
show ?thesis
proof
assume ?L thus ?R
using one_step_implies_mult[of "M - M \<inter># N" "N - M \<inter># N" R "M \<inter># N"] *
by (auto simp: multp_def Let_def)
next
{ fix I J K :: "'a multiset" assume "(I + J) \<inter># (I + K) = {#}"
then have "I = {#}" by (metis inter_union_distrib_right union_eq_empty)
} note [dest!] = this
assume ?R thus ?L
using mult_implies_one_step[OF assms(2), of "N - M \<inter># N" "M - M \<inter># N"]
mult_cancel_max[OF assms(2,1), of "N" "M"] * by (auto simp: multp_def)
qed
qed
lemma multeqp_iff:
assumes "irrefl R" and "trans R" and "\<And>x y. P x y \<longleftrightarrow> (x, y) \<in> R"
shows "multeqp P N M \<longleftrightarrow> (N, M) \<in> (mult R)\<^sup>="
proof -
{ assume "N \<noteq> M" "M - M \<inter># N = {#}"
then obtain y where "count N y \<noteq> count M y" by (auto simp flip: count_inject)
then have "\<exists>y. count M y < count N y" using \<open>M - M \<inter># N = {#}\<close>
by (auto simp flip: count_inject dest!: le_neq_implies_less fun_cong[of _ _ y])
}
then have "multeqp P N M \<longleftrightarrow> multp P N M \<or> N = M"
by (auto simp: multeqp_def multp_def Let_def in_diff_count)
thus ?thesis using multp_iff[OF assms] by simp
qed
subsubsection \<open>Partial-order properties\<close>
lemma (in preorder) mult1_lessE:
assumes "(N, M) \<in> mult1 {(a, b). a < b}"
obtains a M0 K where "M = add_mset a M0" "N = M0 + K"
"a \<notin># K" "\<And>b. b \<in># K \<Longrightarrow> b < a"
proof -
from assms obtain a M0 K where "M = add_mset a M0" "N = M0 + K" and
*: "b \<in># K \<Longrightarrow> b < a" for b by (blast elim: mult1E)
moreover from * [of a] have "a \<notin># K" by auto
ultimately show thesis by (auto intro: that)
qed
instantiation multiset :: (preorder) order
begin
definition less_multiset :: "'a multiset \<Rightarrow> 'a multiset \<Rightarrow> bool"
where "M' < M \<longleftrightarrow> (M', M) \<in> mult {(x', x). x' < x}"
definition less_eq_multiset :: "'a multiset \<Rightarrow> 'a multiset \<Rightarrow> bool"
where "less_eq_multiset M' M \<longleftrightarrow> M' < M \<or> M' = M"
instance
proof -
have irrefl: "\<not> M < M" for M :: "'a multiset"
proof
assume "M < M"
then have MM: "(M, M) \<in> mult {(x, y). x < y}" by (simp add: less_multiset_def)
have "trans {(x'::'a, x). x' < x}"
by (metis (mono_tags, lifting) case_prodD case_prodI less_trans mem_Collect_eq transI)
moreover note MM
ultimately have "\<exists>I J K. M = I + J \<and> M = I + K
\<and> J \<noteq> {#} \<and> (\<forall>k\<in>set_mset K. \<exists>j\<in>set_mset J. (k, j) \<in> {(x, y). x < y})"
by (rule mult_implies_one_step)
then obtain I J K where "M = I + J" and "M = I + K"
and "J \<noteq> {#}" and "(\<forall>k\<in>set_mset K. \<exists>j\<in>set_mset J. (k, j) \<in> {(x, y). x < y})" by blast
then have *: "K \<noteq> {#}" and **: "\<forall>k\<in>set_mset K. \<exists>j\<in>set_mset K. k < j" by auto
have "finite (set_mset K)" by simp
moreover note **
ultimately have "set_mset K = {}"
by (induct rule: finite_induct) (auto intro: order_less_trans)
with * show False by simp
qed
have trans: "K < M \<Longrightarrow> M < N \<Longrightarrow> K < N" for K M N :: "'a multiset"
unfolding less_multiset_def mult_def by (blast intro: trancl_trans)
show "OFCLASS('a multiset, order_class)"
by standard (auto simp add: less_eq_multiset_def irrefl dest: trans)
qed
end \<comment> \<open>FIXME avoid junk stemming from type class interpretation\<close>
lemma mset_le_irrefl [elim!]:
fixes M :: "'a::preorder multiset"
shows "M < M \<Longrightarrow> R"
by simp
subsubsection \<open>Monotonicity of multiset union\<close>
lemma mult1_union: "(B, D) \<in> mult1 r \<Longrightarrow> (C + B, C + D) \<in> mult1 r"
by (force simp: mult1_def)
lemma union_le_mono2: "B < D \<Longrightarrow> C + B < C + (D::'a::preorder multiset)"
apply (unfold less_multiset_def mult_def)
apply (erule trancl_induct)
apply (blast intro: mult1_union)
apply (blast intro: mult1_union trancl_trans)
done
lemma union_le_mono1: "B < D \<Longrightarrow> B + C < D + (C::'a::preorder multiset)"
apply (subst add.commute [of B C])
apply (subst add.commute [of D C])
apply (erule union_le_mono2)
done
lemma union_less_mono:
fixes A B C D :: "'a::preorder multiset"
shows "A < C \<Longrightarrow> B < D \<Longrightarrow> A + B < C + D"
by (blast intro!: union_le_mono1 union_le_mono2 less_trans)
instantiation multiset :: (preorder) ordered_ab_semigroup_add
begin
instance
by standard (auto simp add: less_eq_multiset_def intro: union_le_mono2)
end
subsubsection \<open>Termination proofs with multiset orders\<close>
lemma multi_member_skip: "x \<in># XS \<Longrightarrow> x \<in># {# y #} + XS"
and multi_member_this: "x \<in># {# x #} + XS"
and multi_member_last: "x \<in># {# x #}"
by auto
definition "ms_strict = mult pair_less"
definition "ms_weak = ms_strict \<union> Id"
lemma ms_reduction_pair: "reduction_pair (ms_strict, ms_weak)"
unfolding reduction_pair_def ms_strict_def ms_weak_def pair_less_def
by (auto intro: wf_mult1 wf_trancl simp: mult_def)
lemma smsI:
"(set_mset A, set_mset B) \<in> max_strict \<Longrightarrow> (Z + A, Z + B) \<in> ms_strict"
unfolding ms_strict_def
by (rule one_step_implies_mult) (auto simp add: max_strict_def pair_less_def elim!:max_ext.cases)
lemma wmsI:
"(set_mset A, set_mset B) \<in> max_strict \<or> A = {#} \<and> B = {#}
\<Longrightarrow> (Z + A, Z + B) \<in> ms_weak"
unfolding ms_weak_def ms_strict_def
by (auto simp add: pair_less_def max_strict_def elim!:max_ext.cases intro: one_step_implies_mult)
inductive pw_leq
where
pw_leq_empty: "pw_leq {#} {#}"
| pw_leq_step: "\<lbrakk>(x,y) \<in> pair_leq; pw_leq X Y \<rbrakk> \<Longrightarrow> pw_leq ({#x#} + X) ({#y#} + Y)"
lemma pw_leq_lstep:
"(x, y) \<in> pair_leq \<Longrightarrow> pw_leq {#x#} {#y#}"
by (drule pw_leq_step) (rule pw_leq_empty, simp)
lemma pw_leq_split:
assumes "pw_leq X Y"
shows "\<exists>A B Z. X = A + Z \<and> Y = B + Z \<and> ((set_mset A, set_mset B) \<in> max_strict \<or> (B = {#} \<and> A = {#}))"
using assms
proof induct
case pw_leq_empty thus ?case by auto
next
case (pw_leq_step x y X Y)
then obtain A B Z where
[simp]: "X = A + Z" "Y = B + Z"
and 1[simp]: "(set_mset A, set_mset B) \<in> max_strict \<or> (B = {#} \<and> A = {#})"
by auto
from pw_leq_step consider "x = y" | "(x, y) \<in> pair_less"
unfolding pair_leq_def by auto
thus ?case
proof cases
case [simp]: 1
have "{#x#} + X = A + ({#y#}+Z) \<and> {#y#} + Y = B + ({#y#}+Z) \<and>
((set_mset A, set_mset B) \<in> max_strict \<or> (B = {#} \<and> A = {#}))"
by auto
thus ?thesis by blast
next
case 2
let ?A' = "{#x#} + A" and ?B' = "{#y#} + B"
have "{#x#} + X = ?A' + Z"
"{#y#} + Y = ?B' + Z"
by auto
moreover have
"(set_mset ?A', set_mset ?B') \<in> max_strict"
using 1 2 unfolding max_strict_def
by (auto elim!: max_ext.cases)
ultimately show ?thesis by blast
qed
qed
lemma
assumes pwleq: "pw_leq Z Z'"
shows ms_strictI: "(set_mset A, set_mset B) \<in> max_strict \<Longrightarrow> (Z + A, Z' + B) \<in> ms_strict"
and ms_weakI1: "(set_mset A, set_mset B) \<in> max_strict \<Longrightarrow> (Z + A, Z' + B) \<in> ms_weak"
and ms_weakI2: "(Z + {#}, Z' + {#}) \<in> ms_weak"
proof -
from pw_leq_split[OF pwleq]
obtain A' B' Z''
where [simp]: "Z = A' + Z''" "Z' = B' + Z''"
and mx_or_empty: "(set_mset A', set_mset B') \<in> max_strict \<or> (A' = {#} \<and> B' = {#})"
by blast
{
assume max: "(set_mset A, set_mset B) \<in> max_strict"
from mx_or_empty
have "(Z'' + (A + A'), Z'' + (B + B')) \<in> ms_strict"
proof
assume max': "(set_mset A', set_mset B') \<in> max_strict"
with max have "(set_mset (A + A'), set_mset (B + B')) \<in> max_strict"
by (auto simp: max_strict_def intro: max_ext_additive)
thus ?thesis by (rule smsI)
next
assume [simp]: "A' = {#} \<and> B' = {#}"
show ?thesis by (rule smsI) (auto intro: max)
qed
thus "(Z + A, Z' + B) \<in> ms_strict" by (simp add: ac_simps)
thus "(Z + A, Z' + B) \<in> ms_weak" by (simp add: ms_weak_def)
}
from mx_or_empty
have "(Z'' + A', Z'' + B') \<in> ms_weak" by (rule wmsI)
thus "(Z + {#}, Z' + {#}) \<in> ms_weak" by (simp add: ac_simps)
qed
lemma empty_neutral: "{#} + x = x" "x + {#} = x"
and nonempty_plus: "{# x #} + rs \<noteq> {#}"
and nonempty_single: "{# x #} \<noteq> {#}"
by auto
setup \<open>
let
fun msetT T = Type (\<^type_name>\<open>multiset\<close>, [T]);
fun mk_mset T [] = Const (\<^const_abbrev>\<open>Mempty\<close>, msetT T)
| mk_mset T [x] =
Const (\<^const_name>\<open>add_mset\<close>, T --> msetT T --> msetT T) $ x $
Const (\<^const_abbrev>\<open>Mempty\<close>, msetT T)
| mk_mset T (x :: xs) =
Const (\<^const_name>\<open>plus\<close>, msetT T --> msetT T --> msetT T) $
mk_mset T [x] $ mk_mset T xs
fun mset_member_tac ctxt m i =
if m <= 0 then
resolve_tac ctxt @{thms multi_member_this} i ORELSE
resolve_tac ctxt @{thms multi_member_last} i
else
resolve_tac ctxt @{thms multi_member_skip} i THEN mset_member_tac ctxt (m - 1) i
fun mset_nonempty_tac ctxt =
resolve_tac ctxt @{thms nonempty_plus} ORELSE'
resolve_tac ctxt @{thms nonempty_single}
fun regroup_munion_conv ctxt =
Function_Lib.regroup_conv ctxt \<^const_abbrev>\<open>Mempty\<close> \<^const_name>\<open>plus\<close>
(map (fn t => t RS eq_reflection) (@{thms ac_simps} @ @{thms empty_neutral}))
fun unfold_pwleq_tac ctxt i =
(resolve_tac ctxt @{thms pw_leq_step} i THEN (fn st => unfold_pwleq_tac ctxt (i + 1) st))
ORELSE (resolve_tac ctxt @{thms pw_leq_lstep} i)
ORELSE (resolve_tac ctxt @{thms pw_leq_empty} i)
val set_mset_simps = [@{thm set_mset_empty}, @{thm set_mset_single}, @{thm set_mset_union},
@{thm Un_insert_left}, @{thm Un_empty_left}]
in
ScnpReconstruct.multiset_setup (ScnpReconstruct.Multiset
{
msetT=msetT, mk_mset=mk_mset, mset_regroup_conv=regroup_munion_conv,
mset_member_tac=mset_member_tac, mset_nonempty_tac=mset_nonempty_tac,
mset_pwleq_tac=unfold_pwleq_tac, set_of_simps=set_mset_simps,
smsI'= @{thm ms_strictI}, wmsI2''= @{thm ms_weakI2}, wmsI1= @{thm ms_weakI1},
reduction_pair = @{thm ms_reduction_pair}
})
end
\<close>
subsection \<open>Legacy theorem bindings\<close>
lemmas multi_count_eq = multiset_eq_iff [symmetric]
lemma union_commute: "M + N = N + (M::'a multiset)"
by (fact add.commute)
lemma union_assoc: "(M + N) + K = M + (N + (K::'a multiset))"
by (fact add.assoc)
lemma union_lcomm: "M + (N + K) = N + (M + (K::'a multiset))"
by (fact add.left_commute)
lemmas union_ac = union_assoc union_commute union_lcomm add_mset_commute
lemma union_right_cancel: "M + K = N + K \<longleftrightarrow> M = (N::'a multiset)"
by (fact add_right_cancel)
lemma union_left_cancel: "K + M = K + N \<longleftrightarrow> M = (N::'a multiset)"
by (fact add_left_cancel)
lemma multi_union_self_other_eq: "(A::'a multiset) + X = A + Y \<Longrightarrow> X = Y"
by (fact add_left_imp_eq)
lemma mset_subset_trans: "(M::'a multiset) \<subset># K \<Longrightarrow> K \<subset># N \<Longrightarrow> M \<subset># N"
by (fact subset_mset.less_trans)
lemma multiset_inter_commute: "A \<inter># B = B \<inter># A"
by (fact subset_mset.inf.commute)
lemma multiset_inter_assoc: "A \<inter># (B \<inter># C) = A \<inter># B \<inter># C"
by (fact subset_mset.inf.assoc [symmetric])
lemma multiset_inter_left_commute: "A \<inter># (B \<inter># C) = B \<inter># (A \<inter># C)"
by (fact subset_mset.inf.left_commute)
lemmas multiset_inter_ac =
multiset_inter_commute
multiset_inter_assoc
multiset_inter_left_commute
lemma mset_le_not_refl: "\<not> M < (M::'a::preorder multiset)"
by (fact less_irrefl)
lemma mset_le_trans: "K < M \<Longrightarrow> M < N \<Longrightarrow> K < (N::'a::preorder multiset)"
by (fact less_trans)
lemma mset_le_not_sym: "M < N \<Longrightarrow> \<not> N < (M::'a::preorder multiset)"
by (fact less_not_sym)
lemma mset_le_asym: "M < N \<Longrightarrow> (\<not> P \<Longrightarrow> N < (M::'a::preorder multiset)) \<Longrightarrow> P"
by (fact less_asym)
declaration \<open>
let
fun multiset_postproc _ maybe_name all_values (T as Type (_, [elem_T])) (Const _ $ t') =
let
val (maybe_opt, ps) =
Nitpick_Model.dest_plain_fun t'
||> (~~)
||> map (apsnd (snd o HOLogic.dest_number))
fun elems_for t =
(case AList.lookup (=) ps t of
SOME n => replicate n t
| NONE => [Const (maybe_name, elem_T --> elem_T) $ t])
in
(case maps elems_for (all_values elem_T) @
(if maybe_opt then [Const (Nitpick_Model.unrep_mixfix (), elem_T)] else []) of
[] => Const (\<^const_name>\<open>zero_class.zero\<close>, T)
| ts =>
foldl1 (fn (s, t) => Const (\<^const_name>\<open>add_mset\<close>, elem_T --> T --> T) $ s $ t)
ts)
end
| multiset_postproc _ _ _ _ t = t
in Nitpick_Model.register_term_postprocessor \<^typ>\<open>'a multiset\<close> multiset_postproc end
\<close>
subsection \<open>Naive implementation using lists\<close>
code_datatype mset
lemma [code]: "add_mset x (mset xs) = mset (x # xs)"
by simp
lemma [code]: "Multiset.is_empty (mset xs) \<longleftrightarrow> List.null xs"
by (simp add: Multiset.is_empty_def List.null_def)
lemma union_code [code]: "mset xs + mset ys = mset (xs @ ys)"
by simp
lemma [code]: "image_mset f (mset xs) = mset (map f xs)"
by simp
lemma [code]: "filter_mset f (mset xs) = mset (filter f xs)"
by simp
lemma [code]: "mset xs - mset ys = mset (fold remove1 ys xs)"
by (rule sym, induct ys arbitrary: xs) (simp_all add: diff_add diff_right_commute diff_diff_add)
lemma [code]:
"mset xs \<inter># mset ys =
mset (snd (fold (\<lambda>x (ys, zs).
if x \<in> set ys then (remove1 x ys, x # zs) else (ys, zs)) xs (ys, [])))"
proof -
have "\<And>zs. mset (snd (fold (\<lambda>x (ys, zs).
if x \<in> set ys then (remove1 x ys, x # zs) else (ys, zs)) xs (ys, zs))) =
(mset xs \<inter># mset ys) + mset zs"
by (induct xs arbitrary: ys)
(auto simp add: inter_add_right1 inter_add_right2 ac_simps)
then show ?thesis by simp
qed
lemma [code]:
"mset xs \<union># mset ys =
mset (case_prod append (fold (\<lambda>x (ys, zs). (remove1 x ys, x # zs)) xs (ys, [])))"
proof -
have "\<And>zs. mset (case_prod append (fold (\<lambda>x (ys, zs). (remove1 x ys, x # zs)) xs (ys, zs))) =
(mset xs \<union># mset ys) + mset zs"
by (induct xs arbitrary: ys) (simp_all add: multiset_eq_iff)
then show ?thesis by simp
qed
declare in_multiset_in_set [code_unfold]
lemma [code]: "count (mset xs) x = fold (\<lambda>y. if x = y then Suc else id) xs 0"
proof -
have "\<And>n. fold (\<lambda>y. if x = y then Suc else id) xs n = count (mset xs) x + n"
by (induct xs) simp_all
then show ?thesis by simp
qed
declare set_mset_mset [code]
declare sorted_list_of_multiset_mset [code]
lemma [code]: \<comment> \<open>not very efficient, but representation-ignorant!\<close>
"mset_set A = mset (sorted_list_of_set A)"
apply (cases "finite A")
apply simp_all
apply (induct A rule: finite_induct)
apply simp_all
done
declare size_mset [code]
fun subset_eq_mset_impl :: "'a list \<Rightarrow> 'a list \<Rightarrow> bool option" where
"subset_eq_mset_impl [] ys = Some (ys \<noteq> [])"
| "subset_eq_mset_impl (Cons x xs) ys = (case List.extract ((=) x) ys of
None \<Rightarrow> None
| Some (ys1,_,ys2) \<Rightarrow> subset_eq_mset_impl xs (ys1 @ ys2))"
lemma subset_eq_mset_impl: "(subset_eq_mset_impl xs ys = None \<longleftrightarrow> \<not> mset xs \<subseteq># mset ys) \<and>
(subset_eq_mset_impl xs ys = Some True \<longleftrightarrow> mset xs \<subset># mset ys) \<and>
(subset_eq_mset_impl xs ys = Some False \<longrightarrow> mset xs = mset ys)"
proof (induct xs arbitrary: ys)
case (Nil ys)
show ?case by (auto simp: subset_mset.zero_less_iff_neq_zero)
next
case (Cons x xs ys)
show ?case
proof (cases "List.extract ((=) x) ys")
case None
hence x: "x \<notin> set ys" by (simp add: extract_None_iff)
{
assume "mset (x # xs) \<subseteq># mset ys"
from set_mset_mono[OF this] x have False by simp
} note nle = this
moreover
{
assume "mset (x # xs) \<subset># mset ys"
hence "mset (x # xs) \<subseteq># mset ys" by auto
from nle[OF this] have False .
}
ultimately show ?thesis using None by auto
next
case (Some res)
obtain ys1 y ys2 where res: "res = (ys1,y,ys2)" by (cases res, auto)
note Some = Some[unfolded res]
from extract_SomeE[OF Some] have "ys = ys1 @ x # ys2" by simp
hence id: "mset ys = add_mset x (mset (ys1 @ ys2))"
by auto
show ?thesis unfolding subset_eq_mset_impl.simps
unfolding Some option.simps split
unfolding id
using Cons[of "ys1 @ ys2"]
unfolding subset_mset_def subseteq_mset_def by auto
qed
qed
lemma [code]: "mset xs \<subseteq># mset ys \<longleftrightarrow> subset_eq_mset_impl xs ys \<noteq> None"
using subset_eq_mset_impl[of xs ys] by (cases "subset_eq_mset_impl xs ys", auto)
lemma [code]: "mset xs \<subset># mset ys \<longleftrightarrow> subset_eq_mset_impl xs ys = Some True"
using subset_eq_mset_impl[of xs ys] by (cases "subset_eq_mset_impl xs ys", auto)
instantiation multiset :: (equal) equal
begin
definition
[code del]: "HOL.equal A (B :: 'a multiset) \<longleftrightarrow> A = B"
lemma [code]: "HOL.equal (mset xs) (mset ys) \<longleftrightarrow> subset_eq_mset_impl xs ys = Some False"
unfolding equal_multiset_def
using subset_eq_mset_impl[of xs ys] by (cases "subset_eq_mset_impl xs ys", auto)
instance
by standard (simp add: equal_multiset_def)
end
declare sum_mset_sum_list [code]
lemma [code]: "prod_mset (mset xs) = fold times xs 1"
proof -
have "\<And>x. fold times xs x = prod_mset (mset xs) * x"
by (induct xs) (simp_all add: ac_simps)
then show ?thesis by simp
qed
text \<open>
Exercise for the casual reader: add implementations for \<^term>\<open>(\<le>)\<close>
and \<^term>\<open>(<)\<close> (multiset order).
\<close>
text \<open>Quickcheck generators\<close>
definition (in term_syntax)
msetify :: "'a::typerep list \<times> (unit \<Rightarrow> Code_Evaluation.term)
\<Rightarrow> 'a multiset \<times> (unit \<Rightarrow> Code_Evaluation.term)" where
[code_unfold]: "msetify xs = Code_Evaluation.valtermify mset {\<cdot>} xs"
notation fcomp (infixl "\<circ>>" 60)
notation scomp (infixl "\<circ>\<rightarrow>" 60)
instantiation multiset :: (random) random
begin
definition
"Quickcheck_Random.random i = Quickcheck_Random.random i \<circ>\<rightarrow> (\<lambda>xs. Pair (msetify xs))"
instance ..
end
no_notation fcomp (infixl "\<circ>>" 60)
no_notation scomp (infixl "\<circ>\<rightarrow>" 60)
instantiation multiset :: (full_exhaustive) full_exhaustive
begin
definition full_exhaustive_multiset :: "('a multiset \<times> (unit \<Rightarrow> term) \<Rightarrow> (bool \<times> term list) option) \<Rightarrow> natural \<Rightarrow> (bool \<times> term list) option"
where
"full_exhaustive_multiset f i = Quickcheck_Exhaustive.full_exhaustive (\<lambda>xs. f (msetify xs)) i"
instance ..
end
hide_const (open) msetify
subsection \<open>BNF setup\<close>
definition rel_mset where
"rel_mset R X Y \<longleftrightarrow> (\<exists>xs ys. mset xs = X \<and> mset ys = Y \<and> list_all2 R xs ys)"
lemma mset_zip_take_Cons_drop_twice:
assumes "length xs = length ys" "j \<le> length xs"
shows "mset (zip (take j xs @ x # drop j xs) (take j ys @ y # drop j ys)) =
add_mset (x,y) (mset (zip xs ys))"
using assms
proof (induct xs ys arbitrary: x y j rule: list_induct2)
case Nil
thus ?case
by simp
next
case (Cons x xs y ys)
thus ?case
proof (cases "j = 0")
case True
thus ?thesis
by simp
next
case False
then obtain k where k: "j = Suc k"
by (cases j) simp
hence "k \<le> length xs"
using Cons.prems by auto
hence "mset (zip (take k xs @ x # drop k xs) (take k ys @ y # drop k ys)) =
add_mset (x,y) (mset (zip xs ys))"
by (rule Cons.hyps(2))
thus ?thesis
unfolding k by auto
qed
qed
lemma ex_mset_zip_left:
assumes "length xs = length ys" "mset xs' = mset xs"
shows "\<exists>ys'. length ys' = length xs' \<and> mset (zip xs' ys') = mset (zip xs ys)"
using assms
proof (induct xs ys arbitrary: xs' rule: list_induct2)
case Nil
thus ?case
by auto
next
case (Cons x xs y ys xs')
obtain j where j_len: "j < length xs'" and nth_j: "xs' ! j = x"
by (metis Cons.prems in_set_conv_nth list.set_intros(1) mset_eq_setD)
define xsa where "xsa = take j xs' @ drop (Suc j) xs'"
have "mset xs' = {#x#} + mset xsa"
unfolding xsa_def using j_len nth_j
by (metis Cons_nth_drop_Suc union_mset_add_mset_right add_mset_remove_trivial add_diff_cancel_left'
append_take_drop_id mset.simps(2) mset_append)
hence ms_x: "mset xsa = mset xs"
by (simp add: Cons.prems)
then obtain ysa where
len_a: "length ysa = length xsa" and ms_a: "mset (zip xsa ysa) = mset (zip xs ys)"
using Cons.hyps(2) by blast
define ys' where "ys' = take j ysa @ y # drop j ysa"
have xs': "xs' = take j xsa @ x # drop j xsa"
using ms_x j_len nth_j Cons.prems xsa_def
by (metis append_eq_append_conv append_take_drop_id diff_Suc_Suc Cons_nth_drop_Suc length_Cons
length_drop size_mset)
have j_len': "j \<le> length xsa"
using j_len xs' xsa_def
by (metis add_Suc_right append_take_drop_id length_Cons length_append less_eq_Suc_le not_less)
have "length ys' = length xs'"
unfolding ys'_def using Cons.prems len_a ms_x
by (metis add_Suc_right append_take_drop_id length_Cons length_append mset_eq_length)
moreover have "mset (zip xs' ys') = mset (zip (x # xs) (y # ys))"
unfolding xs' ys'_def
by (rule trans[OF mset_zip_take_Cons_drop_twice])
(auto simp: len_a ms_a j_len')
ultimately show ?case
by blast
qed
lemma list_all2_reorder_left_invariance:
assumes rel: "list_all2 R xs ys" and ms_x: "mset xs' = mset xs"
shows "\<exists>ys'. list_all2 R xs' ys' \<and> mset ys' = mset ys"
proof -
have len: "length xs = length ys"
using rel list_all2_conv_all_nth by auto
obtain ys' where
len': "length xs' = length ys'" and ms_xy: "mset (zip xs' ys') = mset (zip xs ys)"
using len ms_x by (metis ex_mset_zip_left)
have "list_all2 R xs' ys'"
using assms(1) len' ms_xy unfolding list_all2_iff by (blast dest: mset_eq_setD)
moreover have "mset ys' = mset ys"
using len len' ms_xy map_snd_zip mset_map by metis
ultimately show ?thesis
by blast
qed
lemma ex_mset: "\<exists>xs. mset xs = X"
by (induct X) (simp, metis mset.simps(2))
inductive pred_mset :: "('a \<Rightarrow> bool) \<Rightarrow> 'a multiset \<Rightarrow> bool"
where
"pred_mset P {#}"
| "\<lbrakk>P a; pred_mset P M\<rbrakk> \<Longrightarrow> pred_mset P (add_mset a M)"
bnf "'a multiset"
map: image_mset
sets: set_mset
bd: natLeq
wits: "{#}"
rel: rel_mset
pred: pred_mset
proof -
show "image_mset id = id"
by (rule image_mset.id)
show "image_mset (g \<circ> f) = image_mset g \<circ> image_mset f" for f g
unfolding comp_def by (rule ext) (simp add: comp_def image_mset.compositionality)
show "(\<And>z. z \<in> set_mset X \<Longrightarrow> f z = g z) \<Longrightarrow> image_mset f X = image_mset g X" for f g X
by (induct X) simp_all
show "set_mset \<circ> image_mset f = (`) f \<circ> set_mset" for f
by auto
show "card_order natLeq"
by (rule natLeq_card_order)
show "BNF_Cardinal_Arithmetic.cinfinite natLeq"
by (rule natLeq_cinfinite)
show "ordLeq3 (card_of (set_mset X)) natLeq" for X
by transfer
(auto intro!: ordLess_imp_ordLeq simp: finite_iff_ordLess_natLeq[symmetric] multiset_def)
show "rel_mset R OO rel_mset S \<le> rel_mset (R OO S)" for R S
unfolding rel_mset_def[abs_def] OO_def
apply clarify
subgoal for X Z Y xs ys' ys zs
apply (drule list_all2_reorder_left_invariance [where xs = ys' and ys = zs and xs' = ys])
apply (auto intro: list_all2_trans)
done
done
show "rel_mset R =
(\<lambda>x y. \<exists>z. set_mset z \<subseteq> {(x, y). R x y} \<and>
image_mset fst z = x \<and> image_mset snd z = y)" for R
unfolding rel_mset_def[abs_def]
apply (rule ext)+
apply safe
apply (rule_tac x = "mset (zip xs ys)" in exI;
auto simp: in_set_zip list_all2_iff simp flip: mset_map)
apply (rename_tac XY)
apply (cut_tac X = XY in ex_mset)
apply (erule exE)
apply (rename_tac xys)
apply (rule_tac x = "map fst xys" in exI)
apply (auto simp: mset_map)
apply (rule_tac x = "map snd xys" in exI)
apply (auto simp: mset_map list_all2I subset_eq zip_map_fst_snd)
done
show "z \<in> set_mset {#} \<Longrightarrow> False" for z
by auto
show "pred_mset P = (\<lambda>x. Ball (set_mset x) P)" for P
proof (intro ext iffI)
fix x
assume "pred_mset P x"
then show "Ball (set_mset x) P" by (induct pred: pred_mset; simp)
next
fix x
assume "Ball (set_mset x) P"
then show "pred_mset P x" by (induct x; auto intro: pred_mset.intros)
qed
qed
inductive rel_mset'
where
Zero[intro]: "rel_mset' R {#} {#}"
| Plus[intro]: "\<lbrakk>R a b; rel_mset' R M N\<rbrakk> \<Longrightarrow> rel_mset' R (add_mset a M) (add_mset b N)"
lemma rel_mset_Zero: "rel_mset R {#} {#}"
unfolding rel_mset_def Grp_def by auto
declare multiset.count[simp]
declare Abs_multiset_inverse[simp]
declare multiset.count_inverse[simp]
declare union_preserves_multiset[simp]
lemma rel_mset_Plus:
assumes ab: "R a b"
and MN: "rel_mset R M N"
shows "rel_mset R (add_mset a M) (add_mset b N)"
proof -
have "\<exists>ya. add_mset a (image_mset fst y) = image_mset fst ya \<and>
add_mset b (image_mset snd y) = image_mset snd ya \<and>
set_mset ya \<subseteq> {(x, y). R x y}"
if "R a b" and "set_mset y \<subseteq> {(x, y). R x y}" for y
using that by (intro exI[of _ "add_mset (a,b) y"]) auto
thus ?thesis
using assms
unfolding multiset.rel_compp_Grp Grp_def by blast
qed
lemma rel_mset'_imp_rel_mset: "rel_mset' R M N \<Longrightarrow> rel_mset R M N"
by (induct rule: rel_mset'.induct) (auto simp: rel_mset_Zero rel_mset_Plus)
lemma rel_mset_size: "rel_mset R M N \<Longrightarrow> size M = size N"
unfolding multiset.rel_compp_Grp Grp_def by auto
lemma multiset_induct2[case_names empty addL addR]:
assumes empty: "P {#} {#}"
and addL: "\<And>a M N. P M N \<Longrightarrow> P (add_mset a M) N"
and addR: "\<And>a M N. P M N \<Longrightarrow> P M (add_mset a N)"
shows "P M N"
apply(induct N rule: multiset_induct)
apply(induct M rule: multiset_induct, rule empty, erule addL)
apply(induct M rule: multiset_induct, erule addR, erule addR)
done
lemma multiset_induct2_size[consumes 1, case_names empty add]:
assumes c: "size M = size N"
and empty: "P {#} {#}"
and add: "\<And>a b M N a b. P M N \<Longrightarrow> P (add_mset a M) (add_mset b N)"
shows "P M N"
using c
proof (induct M arbitrary: N rule: measure_induct_rule[of size])
case (less M)
show ?case
proof(cases "M = {#}")
case True hence "N = {#}" using less.prems by auto
thus ?thesis using True empty by auto
next
case False then obtain M1 a where M: "M = add_mset a M1" by (metis multi_nonempty_split)
have "N \<noteq> {#}" using False less.prems by auto
then obtain N1 b where N: "N = add_mset b N1" by (metis multi_nonempty_split)
have "size M1 = size N1" using less.prems unfolding M N by auto
thus ?thesis using M N less.hyps add by auto
qed
qed
lemma msed_map_invL:
assumes "image_mset f (add_mset a M) = N"
shows "\<exists>N1. N = add_mset (f a) N1 \<and> image_mset f M = N1"
proof -
have "f a \<in># N"
using assms multiset.set_map[of f "add_mset a M"] by auto
then obtain N1 where N: "N = add_mset (f a) N1" using multi_member_split by metis
have "image_mset f M = N1" using assms unfolding N by simp
thus ?thesis using N by blast
qed
lemma msed_map_invR:
assumes "image_mset f M = add_mset b N"
shows "\<exists>M1 a. M = add_mset a M1 \<and> f a = b \<and> image_mset f M1 = N"
proof -
obtain a where a: "a \<in># M" and fa: "f a = b"
using multiset.set_map[of f M] unfolding assms
by (metis image_iff union_single_eq_member)
then obtain M1 where M: "M = add_mset a M1" using multi_member_split by metis
have "image_mset f M1 = N" using assms unfolding M fa[symmetric] by simp
thus ?thesis using M fa by blast
qed
lemma msed_rel_invL:
assumes "rel_mset R (add_mset a M) N"
shows "\<exists>N1 b. N = add_mset b N1 \<and> R a b \<and> rel_mset R M N1"
proof -
obtain K where KM: "image_mset fst K = add_mset a M"
and KN: "image_mset snd K = N" and sK: "set_mset K \<subseteq> {(a, b). R a b}"
using assms
unfolding multiset.rel_compp_Grp Grp_def by auto
obtain K1 ab where K: "K = add_mset ab K1" and a: "fst ab = a"
and K1M: "image_mset fst K1 = M" using msed_map_invR[OF KM] by auto
obtain N1 where N: "N = add_mset (snd ab) N1" and K1N1: "image_mset snd K1 = N1"
using msed_map_invL[OF KN[unfolded K]] by auto
have Rab: "R a (snd ab)" using sK a unfolding K by auto
have "rel_mset R M N1" using sK K1M K1N1
unfolding K multiset.rel_compp_Grp Grp_def by auto
thus ?thesis using N Rab by auto
qed
lemma msed_rel_invR:
assumes "rel_mset R M (add_mset b N)"
shows "\<exists>M1 a. M = add_mset a M1 \<and> R a b \<and> rel_mset R M1 N"
proof -
obtain K where KN: "image_mset snd K = add_mset b N"
and KM: "image_mset fst K = M" and sK: "set_mset K \<subseteq> {(a, b). R a b}"
using assms
unfolding multiset.rel_compp_Grp Grp_def by auto
obtain K1 ab where K: "K = add_mset ab K1" and b: "snd ab = b"
and K1N: "image_mset snd K1 = N" using msed_map_invR[OF KN] by auto
obtain M1 where M: "M = add_mset (fst ab) M1" and K1M1: "image_mset fst K1 = M1"
using msed_map_invL[OF KM[unfolded K]] by auto
have Rab: "R (fst ab) b" using sK b unfolding K by auto
have "rel_mset R M1 N" using sK K1N K1M1
unfolding K multiset.rel_compp_Grp Grp_def by auto
thus ?thesis using M Rab by auto
qed
lemma rel_mset_imp_rel_mset':
assumes "rel_mset R M N"
shows "rel_mset' R M N"
using assms proof(induct M arbitrary: N rule: measure_induct_rule[of size])
case (less M)
have c: "size M = size N" using rel_mset_size[OF less.prems] .
show ?case
proof(cases "M = {#}")
case True hence "N = {#}" using c by simp
thus ?thesis using True rel_mset'.Zero by auto
next
case False then obtain M1 a where M: "M = add_mset a M1" by (metis multi_nonempty_split)
obtain N1 b where N: "N = add_mset b N1" and R: "R a b" and ms: "rel_mset R M1 N1"
using msed_rel_invL[OF less.prems[unfolded M]] by auto
have "rel_mset' R M1 N1" using less.hyps[of M1 N1] ms unfolding M by simp
thus ?thesis using rel_mset'.Plus[of R a b, OF R] unfolding M N by simp
qed
qed
lemma rel_mset_rel_mset': "rel_mset R M N = rel_mset' R M N"
using rel_mset_imp_rel_mset' rel_mset'_imp_rel_mset by auto
text \<open>The main end product for \<^const>\<open>rel_mset\<close>: inductive characterization:\<close>
lemmas rel_mset_induct[case_names empty add, induct pred: rel_mset] =
rel_mset'.induct[unfolded rel_mset_rel_mset'[symmetric]]
subsection \<open>Size setup\<close>
lemma size_multiset_o_map: "size_multiset g \<circ> image_mset f = size_multiset (g \<circ> f)"
apply (rule ext)
subgoal for x by (induct x) auto
done
setup \<open>
BNF_LFP_Size.register_size_global \<^type_name>\<open>multiset\<close> \<^const_name>\<open>size_multiset\<close>
@{thm size_multiset_overloaded_def}
@{thms size_multiset_empty size_multiset_single size_multiset_union size_empty size_single
size_union}
@{thms size_multiset_o_map}
\<close>
subsection \<open>Lemmas about Size\<close>
lemma size_mset_SucE: "size A = Suc n \<Longrightarrow> (\<And>a B. A = {#a#} + B \<Longrightarrow> size B = n \<Longrightarrow> P) \<Longrightarrow> P"
by (cases A) (auto simp add: ac_simps)
lemma size_Suc_Diff1: "x \<in># M \<Longrightarrow> Suc (size (M - {#x#})) = size M"
using arg_cong[OF insert_DiffM, of _ _ size] by simp
lemma size_Diff_singleton: "x \<in># M \<Longrightarrow> size (M - {#x#}) = size M - 1"
by (simp flip: size_Suc_Diff1)
lemma size_Diff_singleton_if: "size (A - {#x#}) = (if x \<in># A then size A - 1 else size A)"
by (simp add: diff_single_trivial size_Diff_singleton)
lemma size_Un_Int: "size A + size B = size (A \<union># B) + size (A \<inter># B)"
by (metis inter_subset_eq_union size_union subset_mset.diff_add union_diff_inter_eq_sup)
lemma size_Un_disjoint: "A \<inter># B = {#} \<Longrightarrow> size (A \<union># B) = size A + size B"
using size_Un_Int[of A B] by simp
lemma size_Diff_subset_Int: "size (M - M') = size M - size (M \<inter># M')"
by (metis diff_intersect_left_idem size_Diff_submset subset_mset.inf_le1)
lemma diff_size_le_size_Diff: "size (M :: _ multiset) - size M' \<le> size (M - M')"
by (simp add: diff_le_mono2 size_Diff_subset_Int size_mset_mono)
lemma size_Diff1_less: "x\<in># M \<Longrightarrow> size (M - {#x#}) < size M"
by (rule Suc_less_SucD) (simp add: size_Suc_Diff1)
lemma size_Diff2_less: "x\<in># M \<Longrightarrow> y\<in># M \<Longrightarrow> size (M - {#x#} - {#y#}) < size M"
by (metis less_imp_diff_less size_Diff1_less size_Diff_subset_Int)
lemma size_Diff1_le: "size (M - {#x#}) \<le> size M"
by (cases "x \<in># M") (simp_all add: size_Diff1_less less_imp_le diff_single_trivial)
lemma size_psubset: "M \<subseteq># M' \<Longrightarrow> size M < size M' \<Longrightarrow> M \<subset># M'"
using less_irrefl subset_mset_def by blast
hide_const (open) wcount
end
|
## 1. Visualizing Distributions to Investigate Movie Review Bias ##
reviews <- read_csv("movie_reviews.csv")
## 2. Comparing Averages Among Rating Sites ##
review_avgs <- reviews %>%
group_by(Rating_Site) %>%
summarize(mean(Rating))
## 3. Visualizing Differences Among Groups Using Bar Charts ##
review_avgs <- reviews %>%
group_by(Rating_Site) %>%
summarize(avg = mean(Rating))
ggplot(data = review_avgs,
aes(x = Rating_Site, y = avg)) +
geom_bar(stat = "identity")
## 4. Using Histograms to Understand Distributions ##
ggplot(data = reviews,
aes(x = Rating)) +
geom_histogram(bins = 30)
## 5. Comparing Distributions of Multiple Variables: Faceted Plots ##
# ggplot(data = reviews,
# aes(x = Rating)) +
# geom_histogram(bins = 30)
ggplot(data = reviews,
aes(x = Rating)) +
geom_histogram(bins = 30) +
facet_wrap(~Rating_Site, nrow= 2)
## 6. Comparing Distributions of Multiple Variables: Specifying Aesthetics ##
# ggplot(data = reviews,
# aes(x = Rating)) +
# geom_histogram(bins = 30)
ggplot(data = reviews,
aes(x = Rating, fill= Rating_Site)) +
geom_histogram(bins = 30)
## 7. Visualizing Averages and Variation ##
ggplot(data = reviews,
aes(x = Rating_Site, y = Rating)) +
geom_boxplot()
## 8. Anatomy of a Box Plot ##
# ggplot(data = reviews,
# aes(x = Rating_Site, y = Rating)) +
# geom_boxplot()
ggplot(data = reviews,
aes(x = Rating_Site, y = Rating)) +
geom_boxplot() +
labs(title = "Comparison of Movie Ratings") +
theme(panel.background = element_rect(fill = "white")) |
/*
* Copyright (c) 2012, 2013 Aldebaran Robotics. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the COPYING file.
*/
#include <errno.h>
#include <fcntl.h>
#include <windows.h>
#include <process.h>
#include <signal.h>
#include <sstream>
#include <string>
#include <boost/filesystem.hpp>
#include <locale>
#include <qi/assert.hpp>
#include <qi/log.hpp>
#include <qi/os.hpp>
#include <qi/path.hpp>
qiLogCategory("qi.os");
namespace qi
{
namespace os
{
namespace
{
std::vector<wchar_t> toCWStrings(const std::vector<std::wstring>& wstrlist)
{
std::vector<wchar_t> cstrlist;
cstrlist.reserve(wstrlist.size() + 1);
for(const auto& wstr : wstrlist)
{
for(const auto& wchr : wstr)
{
cstrlist.push_back(wchr);
}
cstrlist.push_back(L' ');
}
cstrlist.push_back(0);
return cstrlist;
}
template <class WStringSequence>
int winSpawn(const WStringSequence& wArgs)
{
auto cwArgs = toCWStrings(wArgs);
STARTUPINFOW startupInfo { };
PROCESS_INFORMATION processInfo { };
const BOOL spawned = CreateProcessW(
NULL, &cwArgs[0], NULL, NULL, false, NULL, NULL, NULL,
&startupInfo, &processInfo);
if(!spawned)
return -1;
return static_cast<int>(processInfo.dwProcessId);
}
std::string messageForError(DWORD errorCode)
{
LPVOID lpMsgBuf = nullptr;
FormatMessage(
FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS,
NULL,
errorCode,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
(LPTSTR) &lpMsgBuf,
0, NULL );
return static_cast<const char*>(lpMsgBuf);
}
}
int spawnvp(char* const argv[])
{
std::vector<std::wstring> wargs;
for (const char* const* argIt = &argv[0]; *argIt != NULL; ++argIt)
{
wargs.push_back(qi::Path(*argIt).bfsPath().wstring());
}
return winSpawn(wargs);
}
int spawnlp(const char* argv, ...)
{
std::vector<std::wstring> wargs;
{
va_list ap;
const char* arg = nullptr;
int i = 0;
va_start(ap, argv);
for (arg = argv; arg != NULL; arg = va_arg(ap, const char*), ++i)
{
wargs.push_back(qi::Path(arg).bfsPath().wstring());
}
va_end(ap);
}
return winSpawn(wargs);
}
int system(const char* command)
{
boost::filesystem::path fname(command, qi::unicodeFacet());
return _wsystem(fname.wstring(qi::unicodeFacet()).c_str());
}
int getpid()
{
return _getpid();
}
int gettid()
{
return GetCurrentThreadId();
}
int waitpid(int pid, int* status)
{
auto logLastError = [&pid](const std::string& doingWhat)
{
qiLogDebug() << "Error waiting for pid " << pid << " "
<< doingWhat << ": " << messageForError(GetLastError());
};
const HANDLE handle = OpenProcess(
PROCESS_QUERY_INFORMATION | PROCESS_VM_READ | SYNCHRONIZE, FALSE,
static_cast<DWORD>(pid));
if (!handle)
{
*status = 127;
logLastError("checking the process");
return 0;
}
const DWORD result = WaitForSingleObject(handle, INFINITE);
QI_ASSERT(result != WAIT_TIMEOUT);
if (result == WAIT_FAILED)
{
*status = 127;
logLastError("waiting for the process");
return 0;
}
DWORD exitCode = 0xFFFFFF;
if (!GetExitCodeProcess(handle, &exitCode))
{
*status = 127;
logLastError("retrieving the exit code");
return 0;
}
*status = exitCode;
return 0;
}
int kill(int pid, int sig)
{
if(sig == 0) // if signal is 0, just check that it is running
return isProcessRunning(pid) ? 0 : -1;
qiLogDebug() << "Killing " << pid << ": checking the process";
const HANDLE handle = OpenProcess(
PROCESS_QUERY_INFORMATION | PROCESS_VM_READ | PROCESS_TERMINATE, FALSE,
static_cast<DWORD>(pid));
if (!handle)
return -1;
qiLogDebug() << "Killing " << pid << ": terminating the process";
if (!TerminateProcess(handle, sig))
return -1;
qiLogDebug() << "Killing " << pid << ": waiting the end of the process";
WaitForSingleObject(handle, INFINITE);
qiLogDebug() << "Killed " << pid;
return 0;
}
} // os
} // qi
|
# Use baremodule to shave off a few KB from the serialized `.ji` file
baremodule Word2Vec_jll
using Base
using Base: UUID
import JLLWrappers
JLLWrappers.@generate_main_file_header("Word2Vec")
JLLWrappers.@generate_main_file("Word2Vec", UUID("9fbe4022-c126-5389-b4b2-756cc9f654d0"))
end # module Word2Vec_jll
|
##################################################
const ∑ = sum
function innerproduct(P::Type{<:AbstractDiscreteOrthogonalPolynomial}, f, g)
dom = domain(P)
fn = x -> f(x) * g(x) * weight_function(P)(x)
a, b = first(dom), last(dom)
if !isinf(a) && !isinf(b)
return ∑(fn(x) for x in first(dom):last(dom))
else
## what to do if infinite
end
end
##
## --------------------------------------------------
##
abstract type AbstractDiscreteWeightFunction{T,X,N} <: AbstractDiscreteOrthogonalPolynomial{T,X} end
abstract type DiscreteWeightFunction{T,X,N} <: AbstractDiscreteWeightFunction{T,X,N} end
export DiscreteWeightFunction
"""
DiscreteWeightFunction
For a discrete measure, `dλ = ∑ wᵢ δ(x - xᵢ)`, specified through two
vectors, `xs` and `ws`, a collection of monic orthogonal polynomials is
produced through Darboux's formula for `α_n` and `β_n` using the
3-term recurrence defined by `π_{n+1} = (x-α_n)⋅π_n - β_n⋅π_{n-1}` (`An=1`, `Bn=-α_n`, `Cn=β_n`)
and the discrete Stieltjes method [Guatschi §3.1](https://www.cs.purdue.edu/homes/wxg/Madrid.pdf).
# Example
Discrete Chebyshev by its weight function (uniform on 0,1,…,N-1)
```jldoctest
julia> using Polynomials, SpecialPolynomials
julia> const SP = SpecialPolynomials;
julia> N = 9
9
julia> xs, ws = collect(0:N-1), ones(N); # w(x) = ∑ wⱼ⋅δ(x-xⱼ)
julia> SP.@register0 DWF DiscreteWeightFunction
julia> SP.@register_discrete_weight_function(DWF, xs, ws)
julia> [SP.Bn.(DWF, 0:N-1) SP.Cn.(DWF, 0:N-1)]
9×2 Matrix{Float64}:
-4.0 9.0
-4.0 6.66667
-4.0 5.13333
-4.0 4.62857
-4.0 4.12698
-4.0 3.53535
-4.0 2.83217
-4.0 2.01026
-4.0 1.06667
julia> i,j = 3,4; ## check that ∫pᵢpⱼdw = 0 for i,j=3,4
julia> sum(basis(DWF,i)(x) * basis(DWF,j)(x) * w for (x,w) in zip(xs, ws))
5.684341886080802e-14
julia> ## Gogin, Hirvensalo (https://doi.org/10.1007/s10958-017-3410-8) characterization
D(k,N,x) = sum((-1)^l * binomial(k+l,k) * binomial(N-l,k-l) * SP.generalized_binomial(x,l) for l in 0:k)
D (generic function with 1 method)
julia> x = variable()
Polynomials.Polynomial(x)
julia> ps,qs = [D(k,N-1,x) for k in 0:N-1], [basis(DWF, k)(x) for k in 0:N-1];
julia> all(qs .* [p[end] for p in ps] .≈ ps)
true
```
"""
DiscreteWeightFunction
basis_symbol(::Type{<:AbstractDiscreteWeightFunction}) = "W"
xs_ws(::Type{<:AbstractDiscreteWeightFunction}) = throw(ArgumentError("No default method"))
# (Gautschi](https://www.cs.purdue.edu/homes/wxg/Madrid.pdf), section 3.1
# compute α_n = <tπ_n,π_n>/<π_n,π_n>, β_n = <π_n,π_n>/<π_{n-1},π_{n-1}>
# wherer <p,q> = ∑_1^N w_k p(k) q(k)
function discrete_stieltjes(W::Type{<:AbstractDiscreteWeightFunction})
xs,ws = xs_ws(W)
N = length(xs)
n = N
# k = 0 case
πk_1, πk = zeros(N), ones(N)
βk = β0 = norm_k = sum(ws)/1 # <π_0, π_0> = ∑ w_k 1 ⋅ 1
αk = α0 = ∑(ws[k] * xs[k] for k in eachindex(ws)) / norm_k
αs = [α0]
βs = [β0]
for _ ∈ 1:(n-1)
πk1 = (xs .- αk) .* πk - βk * πk_1 # use just computed αk, βk to find π_{k+1} = (x-αk)⋅π_k - βk⋅π_{k-1}
norm_k1 = ∑(ws[k] * πk1[k]^2 for k in eachindex(ws)) # <π_{k+1}, π_{k+1}>
# Darboux
# α_{k+1} = <x ⋅ π_{k+1}, π_{k+1}> / <π_{k+1}, π_{k+1}>,
# β_{k+1} = <π_{k+1}, π_{k+1}> / <π_k, π_k>,
αk1 = ∑(ws[k] * xs[k] * πk1[k]^2 for k in eachindex(ws)) / norm_k1
βk1 = norm_k1/norm_k
push!(αs, αk1)
push!(βs, βk1)
αk, βk = αk1, βk1
πk_1, πk = πk, πk1
norm_k = norm_k1
end
(-αs, βs)
end
An(::Type{W}, n) where {W <: AbstractDiscreteWeightFunction} = one(eltype(W))
Bn(::Type{W}, k::Int) where {W <:AbstractDiscreteWeightFunction} = discrete_stieltjes(W)[1][k+1]
Cn(::Type{W}, k::Int) where {W <:AbstractDiscreteWeightFunction} = discrete_stieltjes(W)[2][k+1]
Polynomials.domain(::Type{<:DiscreteWeightFunction}) = Polynomials.Interval(-Inf, Inf)
innerproduct(W::Type{<:AbstractDiscreteWeightFunction}, f, g) = ∑(wk * f(xk) * g(xk) for (xk, wk) in zip(xs_ws(W)...))
|
Stefi has degrees in medical history (UC Berkeley) and international public health (UCLA). In 2010 she co-founded the Sustainable Healthcare Education network to integrate sustainability and health into medical and nursing education. She led public health teaching at Norwich Medical School, global public health at Azim Premji University in Bangalore, and violence prevention for the Medical Peace Work Partnership at the University of Bergen, Norway. |
## ----------- General functions
name(d :: AbstractDeviation) = d.name;
short_description(d :: AbstractDeviation) = d.shortStr;
long_description(d :: AbstractDeviation) = d.longStr;
norm_p(d :: AbstractDeviation) = d.normP;
"""
$(SIGNATURES)
Retrieve data values
"""
get_data_values(d :: AbstractDeviation{F1}) where F1 = deepcopy(d.dataV);
"""
$(SIGNATURES)
Retrieve model values
"""
get_model_values(d :: AbstractDeviation{F1}) where F1 = deepcopy(d.modelV);
"""
$(SIGNATURES)
Retrieve std errors of data values. Not valid for all types of deviations.
Returns `nothing` if std errors are not set (are all 0).
"""
function get_std_errors(d :: AbstractDeviation{F1}) where F1
if all(d.stdV .== zero(F1))
return nothing
else
return deepcopy(d.stdV);
end
end
"""
$(SIGNATURES)
Set model values in an existing deviation.
"""
function set_model_values(d :: AbstractDeviation{F1}, modelV) where F1
dataV = get_data_values(d);
if typeof(modelV) != typeof(dataV)
println(modelV);
println(dataV);
error("Type mismatch in $(d.name): $(typeof(modelV)) vs $(typeof(dataV))");
end
@assert size(modelV) == size(dataV) "Size mismatch: $(size(modelV)) vs $(size(dataV))"
d.modelV = deepcopy(modelV);
return nothing
end
"""
$(SIGNATURES)
Retrieve weights. Returns scalar 1 for scalar deviations.
"""
function get_weights(d :: AbstractDeviation{F1}) where F1
return d.wtV
end
"""
set_weights
Does nothing for Deviation types that do not have weights.
"""
function set_weights!(d :: AbstractDeviation{F1}, wtV) where F1
if isa(d, Deviation)
@assert typeof(wtV) == typeof(get_data_values(d))
@assert size(wtV) == size(get_data_values(d))
@assert all(wtV .> 0.0)
d.wtV = deepcopy(wtV);
end
return nothing
end
"""
$(SIGNATURES)
Validate a `Deviation`.
"""
validate_deviation(d :: AbstractDeviation) = true
## ------------- Computing the scalar deviation
"""
$(SIGNATURES)
Compute the scalar deviation between model and data values.
Using a weighted sum of deviations to a power. By default: simply mean abs deviation.
Note: Using a weighted norm would not increase the overall deviation for a moment that fits poorly.
"""
function scalar_deviation(modelV :: AbstractArray{F1}, dataV :: AbstractArray{F1},
wtV; p :: F1 = one(F1)) where F1 <: AbstractFloat
totalWt = sum(wtV);
@assert totalWt > 1e-8 "Total weight too small: $totalWt"
# Scaling `wtV` so it sums to 1 partially undoes the `^(1/p)` scaling below.
devV = (wtV ./ totalWt) .* (abs.(modelV .- dataV)) .^ p;
scalarDev = totalWt * sum(devV);
return scalarDev
end
scalar_deviation(model :: F1, data :: F1, wt :: F1;
p :: F1 = one(F1)) where F1 <: AbstractFloat =
wt * (abs(model - data) ^ p);
## --------------- Display
# This is never called for concrete types (why?)
Base.show(io :: IO, d :: AbstractDeviation{F1}) where F1 =
Base.print(io, "$(name(d)): ", short_description(d));
## Formatted short deviation for display
function short_display(d :: AbstractDeviation{F1}; inclScalarWt :: Bool = true) where F1
_, scalarStr = scalar_dev(d, inclScalarWt = inclScalarWt);
return d.shortStr * ": " * scalarStr;
end
"""
$(SIGNATURES)
Show a deviation using the show function contained in its definition.
Optionally, a file path can be provided. If none is provided, the path inside the deviation is used.
"""
function show_deviation(d :: AbstractDeviation{F1}; showModel :: Bool = true, fPath :: String = "") where F1
return d.showFct(d, showModel = showModel, fPath = fPath)
end
function open_show_path(d :: AbstractDeviation{F1};
fPath :: String = "", writeMode :: String = "w") where F1
if isempty(fPath)
showPath = d.showPath;
else
showPath = fPath;
end
if isempty(showPath)
io = stdout;
else
io = open(showPath, "w");
end
return io
end
function close_show_path(d :: AbstractDeviation{F1}, io) where F1
if io != stdout
close(io);
end
end
# ------------- |
[STATEMENT]
lemma return_result_heap_code [code]:
"returns_result_heap h p r h' \<longleftrightarrow> (case h \<turnstile> p of Inr (r', h'') \<Rightarrow> r = r' \<and> h' = h'' | Inl _ \<Rightarrow> False)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. h \<turnstile> p \<rightarrow>\<^sub>r r \<rightarrow>\<^sub>h h' = (case h \<turnstile> p of Inl x \<Rightarrow> False | Inr (r', h'') \<Rightarrow> r = r' \<and> h' = h'')
[PROOF STEP]
by(auto simp add: returns_result_heap_def returns_result_def returns_heap_def split: sum.splits) |
{-# LANGUAGE ScopedTypeVariables #-}
{-|
Module : Numeric.LinearAlgebra.Static.Util
Description : Functions to manipulate vectors and matrices
Copyright : (c) Ryan Orendorff, 2020
License : BSD3
Stability : experimental
-}
module Numeric.LinearAlgebra.Static.Util
( atM
, atV
, foldR
, foldR1
)
where
import GHC.TypeLits
import Numeric.LinearAlgebra.Static
import qualified Numeric.LinearAlgebra as LA
import Data.Fin
import qualified Data.Vector.Storable as V
---------------------------------------------------------------
-- Get data from vector/matrix with compile time bound check --
---------------------------------------------------------------
-- | Get a value from a vector at a given coordinate/index with guarantee
-- that the element index is within the length of the vector at compile
-- time.
atV
:: forall n
. (KnownNat n)
=> R n
-> Fin n
-> Double
atV v p = extract v LA.! (fromFin p)
-- | Get element from matrix at a given coordinate/index with guarantee that
-- the element index is within the shape of the matrix at compile time.
atM
:: forall m n
. (KnownNat m, KnownNat n)
=> L m n
-> Fin m
-> Fin n
-> Double
atM mat m n = extract mat `LA.atIndex` (fromFin m, fromFin n)
-- | Fold a vector.
foldR :: KnownNat n => (b -> Double -> b) -> b -> R n -> b
foldR f initial x = V.foldl' f initial (extract x)
-- | Fold a vector without an initial element.
foldR1 :: KnownNat n => (Double -> Double -> Double) -> R n -> Double
foldR1 f x = V.foldl1' f (extract x)
|
#include <stdio.h>
#include <gsl/gsl_errno.h>
#include <gsl/block/gsl_block.h>
#include <gsl/vector/gsl_vector.h>
#define BASE_DOUBLE
#include <gsl/templates_on.h>
#include <gsl/vector/file_source.c>
#include <gsl/templates_off.h>
#undef BASE_DOUBLE
|
If $f_2$ is asymptotically equivalent to $g_2$ and $g_2$ is eventually nonzero, then $f_1 / f_2$ is asymptotically equivalent to $g_1 / g_2$ if and only if $f_1$ is asymptotically equivalent to $g_1$. |
Set Implicit Arguments.
Set Bullet Behavior "Strict Subproofs".
Require Import Lists.List.
Import ListNotations.
Open Scope list.
From Utils Require Import Utils.
Section WithVar.
Context (V : Type).
Notation named_list := (@named_list V).
Notation named_map := (@named_map V).
(* Operations for types that can be substituted into themselves *)
Class Substable0 (A : Type) : Type :=
{
inj_var : V -> A;
apply_subst0 : named_list A -> A -> A;
well_scoped0 : list V -> A -> Prop;
}.
Section WithSubstable0.
Context {A} {Substable0 : Substable0 A}.
Definition subst := (named_list A).
Definition id_args {B} (c : named_list B) :=
map inj_var (map fst c).
(*Defined as a notation so that the definition
does not get in the way of automation *)
Notation id_subst c := (with_names_from c (id_args c)).
Definition subst_cmp (s1 s2 : subst) := named_map (apply_subst0 s1) s2.
Fixpoint ws_subst args (s : subst) : Prop :=
match s with
| [] => True
| (n,e)::s' => fresh n s' /\ well_scoped0 args e /\ ws_subst args s'
end.
Arguments ws_subst args !s/.
(* TODO: make sure that using apply_subst0 here isn't a problem for automation.
The alternative is to split Substable into defs and properties like Substable0
*)
Definition args_subst s (a : list A) := map (apply_subst0 s) a.
Arguments args_subst s !a/.
Definition ws_args args : list A -> Prop := all (well_scoped0 args).
Arguments ws_args args !s/.
Class Substable0_ok : Type :=
{
(* Stated like this so that the class declaration doesn't depend on Eqb *)
subst_var_internal : forall s x, named_list_lookup_prop (inj_var x) s x (apply_subst0 s (inj_var x));
subst_assoc0 : forall s1 s2 a,
well_scoped0 (map fst s2) a ->
apply_subst0 s1 (apply_subst0 s2 a) = apply_subst0 (subst_cmp s1 s2) a;
subst_id0 : forall {B} (c : named_list B) a,
(* Not necessary because of our choice of default
well_scoped (map fst c) a ->*)
apply_subst0 (id_subst c) a = a;
strengthen_subst0
: forall s a n e,
well_scoped0 (map fst s) a ->
fresh n s ->
apply_subst0 ((n,e)::s) a = apply_subst0 s a;
well_scoped_subst0 args s a
: ws_subst args s ->
well_scoped0 (map fst s) a ->
well_scoped0 args (apply_subst0 s a)
}.
Definition subst_lookup `{Eqb V} (s : subst) (n : V) : A :=
named_list_lookup (inj_var n) s n.
Arguments subst_lookup {_} !s n/.
Lemma subst_var `{Eqb_ok V} `{Substable0_ok}
: forall s x, apply_subst0 s (inj_var x) = subst_lookup s x.
Proof.
intros.
unfold subst_lookup.
symmetry.
erewrite <- named_list_lookup_prop_correct.
unshelve eapply subst_var_internal.
Qed.
Class Substable (B : Type) : Type :=
{
apply_subst : subst -> B -> B;
well_scoped : list V -> B -> Prop;
}.
Class Substable_ok (B : Type) {Substable : Substable B} : Type :=
{
subst_assoc : forall s1 s2 a,
well_scoped (map fst s2) a ->
apply_subst s1 (apply_subst s2 a) = apply_subst (subst_cmp s1 s2) a;
subst_id : forall {B} (c : named_list B) a,
(* Not necessary because of our choice of default
well_scoped (map fst c) a ->*)
apply_subst (id_subst c) a = a;
strengthen_subst
: forall s a n e,
well_scoped (map fst s) a ->
fresh n s ->
apply_subst ((n,e)::s) a= apply_subst s a;
well_scoped_subst args s a
: ws_subst args s ->
well_scoped (map fst s) a ->
well_scoped args (apply_subst s a)
}.
Arguments Substable_ok B%type_scope {Substable}.
Notation "e [/ s /]" := (apply_subst s e) (at level 7, left associativity).
#[export] Instance substable0_is_substable
: Substable A :=
{
apply_subst := apply_subst0;
well_scoped := well_scoped0;
}.
Context {Substable0_ok : Substable0_ok}.
#[export] Instance substable0_is_substable_ok
: Substable_ok A :=
{
subst_assoc := subst_assoc0;
subst_id := @subst_id0 _;
strengthen_subst := strengthen_subst0;
well_scoped_subst := well_scoped_subst0;
}.
(*TODO: use separate DB*)
Local Hint Rewrite subst_assoc0 : utils.
Arguments subst_id0 {Substable0_ok} B%type_scope _ _.
Local Hint Rewrite subst_id0 : utils.
Local Hint Rewrite strengthen_subst0 : utils.
Local Hint Resolve well_scoped_subst0 : utils.
Lemma args_subst_assoc : forall s1 s2 a,
ws_args (map fst s2) a ->
args_subst s1 (args_subst s2 a)
= args_subst (subst_cmp s1 s2) a.
Proof.
induction a; basic_goal_prep;
basic_utils_crush.
Qed.
Lemma args_subst_id
: forall A (c : named_list A) a,
args_subst (id_subst c) a = a.
Proof.
induction a; basic_goal_prep;
basic_utils_crush.
Qed.
Lemma args_strengthen_subst s a n e
: ws_args (map fst s) a ->
fresh n s ->
args_subst ((n,e)::s) a = args_subst s a.
Proof.
induction a; basic_goal_prep; f_equal;
basic_utils_crush.
Qed.
Lemma args_well_scoped_subst args s a
: ws_subst args s ->
ws_args (map fst s) a ->
ws_args args (args_subst s a).
Proof.
induction a; basic_goal_prep; try case_match;
basic_utils_crush.
Qed.
#[export] Instance substable_args : Substable (list A) :=
{
apply_subst := args_subst;
well_scoped := ws_args;
}.
#[export] Instance substable_args_ok : Substable_ok (list A) :=
{
subst_assoc := args_subst_assoc;
subst_id := args_subst_id;
strengthen_subst := args_strengthen_subst;
well_scoped_subst := args_well_scoped_subst;
}.
Lemma subst_subst_assoc : forall s1 s2 a,
ws_subst (map fst s2) a ->
subst_cmp s1 (subst_cmp s2 a)
= subst_cmp (subst_cmp s1 s2) a.
Proof.
induction a; basic_goal_prep;
basic_utils_crush.
Qed.
Lemma subst_subst_id
: forall A (c : named_list A) a,
subst_cmp (id_subst c) a = a.
Proof.
induction a; basic_goal_prep;
basic_utils_crush.
Qed.
Lemma subst_strengthen_subst s a n e
: ws_subst (map fst s) a ->
fresh n s ->
subst_cmp ((n,e)::s) a = subst_cmp s a.
Proof.
induction a; basic_goal_prep; f_equal;
solve [basic_utils_crush].
Qed.
Lemma subst_well_scoped_subst args s a
: ws_subst args s ->
ws_subst (map fst s) a ->
ws_subst args (subst_cmp s a).
Proof.
unfold subst_cmp.
induction a; basic_goal_prep; try case_match;
basic_utils_crush.
Qed.
#[export] Instance substable_subst : Substable (named_list A) :=
{
apply_subst := subst_cmp;
well_scoped := ws_subst;
}.
#[export] Instance substable_subst_ok : Substable_ok (named_list A) :=
{
subst_assoc := subst_subst_assoc;
subst_id := subst_subst_id;
strengthen_subst := subst_strengthen_subst;
well_scoped_subst := subst_well_scoped_subst;
}.
Lemma with_names_from_args_subst {B} (c':named_list B) s' (s : list A)
: with_names_from c' s[/s'/] = (with_names_from c' s)[/s'/].
Proof.
revert s.
induction c';
destruct s;
basic_goal_prep;
basic_utils_crush.
Qed.
End WithSubstable0.
End WithVar.
Arguments id_args : simpl never.
(*Defined as a notation so that the definition
does not get in the way of automation *)
Notation id_subst c := (with_names_from c (id_args c)).
Arguments subst_lookup [V]%type_scope {A}%type_scope {Substable0 H} !s n/.
Arguments args_subst [V]%type_scope {A}%type_scope {Substable0} s !a%list_scope/.
Arguments ws_args [V]%type_scope {A}%type_scope {Substable0} (_ !_)%list_scope/.
Arguments ws_subst [V]%type_scope {A}%type_scope {Substable0} args !s/.
Arguments Substable0_ok [V]%type_scope _%type_scope {_}.
Arguments Substable [V]%type_scope A%type_scope _%type_scope.
Arguments Substable_ok [V]%type_scope A%type_scope {Substable0} B%type_scope {Substable}.
Arguments well_scoped [V]%type_scope {A B}%type_scope {_} _%list_scope !_.
Arguments apply_subst [V]%type_scope {A B}%type_scope {_} _%list_scope !_.
Arguments args_subst [V]%type_scope {A}%type_scope {Substable0} s !a%list_scope/.
Arguments ws_args [V]%type_scope {A}%type_scope {Substable0} (_ !_)%list_scope/.
Arguments subst_id0 [V]%type_scope {A}%type_scope {Substable0 Substable0_ok} B%type_scope _ _ : rename.
#[global] Hint Rewrite subst_assoc using solve[typeclasses eauto] : term.
#[global] Hint Rewrite subst_id using solve[typeclasses eauto] : term.
#[global] Hint Rewrite strengthen_subst using solve[typeclasses eauto] : term.
#[global] Hint Resolve well_scoped_subst : term.
Notation "e [/ s /]" := (apply_subst s e) (at level 7, left associativity).
|
Require Import Coq.Sets.Ensembles.
Require Import Coq.Sets.Finite_sets.
Lemma Sn_n : forall n : nat, ~ S n = n.
Proof.
intros. induction n as [| n' IHn].
- intro. discriminate H.
- intro H. apply IHn.
inversion H. assumption.
Qed.
Lemma le_trans : forall n m k : nat, n <= m -> m <= k -> n <= k.
Proof.
intros * H1 H2. induction H2 as [| m'].
- assumption.
- constructor 2. assumption.
Qed.
Lemma S_le : forall n m :nat, n <= m -> S n <= S m .
Proof.
intros * H. induction H.
- constructor.
- constructor 2. assumption.
Qed.
Lemma m_le_Sn : forall n m : nat, m <= S n <-> m <= n \/ m = S n.
Proof.
intros. split; intro H.
- destruct n as [| n']; inversion H as [| m' H1 H2].
+ right. reflexivity.
+ left. assumption.
+ right. reflexivity.
+ left. assumption.
- elim H; intro H1.
+ constructor 2. assumption.
+ rewrite H1. constructor.
Qed.
Lemma lt_irrefl : forall n : nat, ~ n < n.
Proof.
intros * H.
absurd (S n <= n).
- contradict H. exfalso.
induction n as [|n' IHn].
+ inversion H.
+ apply IHn.
assert (H1 : pred (S (S n')) <= pred (S n')).
apply le_pred. assumption.
simpl in H1. assumption.
- assumption.
Qed.
Lemma lt_trans : forall n m k : nat, n < m -> m < k -> n < k.
Proof.
intros * H1 H2.
induction H2 as [| k]; [apply le_S in H1 | apply le_S]; assumption.
Qed.
Definition ininat (n : nat) : Ensemble nat := fun m => m <= n.
Lemma ininat_0 : forall n : nat, In nat (ininat 0) n <-> n = 0.
Proof.
intro. split; intro H.
- destruct n as [| n'].
+ reflexivity.
+ compute in H. exfalso. inversion H.
- rewrite H. apply le_n.
Qed.
Lemma ininat_n_Sn : forall n : nat, ~ In nat (ininat n) (S n).
Proof.
intro. apply lt_irrefl.
Qed.
Lemma ininat_0_add : Same_set nat (ininat 0) (Add nat (Empty_set nat) 0).
Proof.
constructor.
- constructor 2. apply ininat_0 in H. rewrite H. compute. constructor.
- compute. intros * H. destruct H as [|n H1].
+ compute in H. exfalso. contradiction.
+ compute in H1. destruct H1. constructor.
Qed.
Lemma fin_ininat_0 : Finite nat (ininat 0).
Proof.
pose (H := Extensionality_Ensembles nat).
pose (H1 := H (ininat 0) (Add nat (Empty_set nat) 0)).
pose (H2 := H1 ininat_0_add). rewrite H2.
constructor.
- constructor.
- intro H3. contradiction.
Qed.
Lemma ininat_Sn_add :
forall n : nat, Same_set nat (ininat (S n)) (Add nat (ininat n) (S n)).
Proof.
intro. split.
- compute. intros * H.
assert (H1 : x <= n \/ x = S n). apply m_le_Sn. assumption.
elim H1; intro H2.
+ constructor. compute. assumption.
+ constructor 2. compute. rewrite H2. constructor.
- compute. intros * H. destruct H; compute in H.
+ apply le_S. assumption.
+ destruct H. apply le_n.
Qed.
Lemma fin_ininat : forall n : nat, Finite nat (ininat n).
Proof.
intro. induction n as [| n' IHn].
- apply fin_ininat_0.
- pose (H := Extensionality_Ensembles nat).
pose (H1 := H (ininat (S n')) (Add nat (ininat n') (S n'))).
pose (H2 := H1 (ininat_Sn_add n')). rewrite H2.
constructor 2. assumption.
apply ininat_n_Sn.
Qed.
|
[GOAL]
R : Type u_1
E : Type u_2
F : Type u_3
inst✝⁷ : AddCommGroup E
inst✝⁶ : TopologicalSpace E
inst✝⁵ : AddCommGroup F
inst✝⁴ : TopologicalSpace F
inst✝³ : TopologicalAddGroup F
inst✝² : Ring R
inst✝¹ : Module R E
inst✝ : Module R F
f : E →ᵃ[R] F
⊢ Continuous ↑f ↔ Continuous ↑f.linear
[PROOFSTEP]
constructor
[GOAL]
case mp
R : Type u_1
E : Type u_2
F : Type u_3
inst✝⁷ : AddCommGroup E
inst✝⁶ : TopologicalSpace E
inst✝⁵ : AddCommGroup F
inst✝⁴ : TopologicalSpace F
inst✝³ : TopologicalAddGroup F
inst✝² : Ring R
inst✝¹ : Module R E
inst✝ : Module R F
f : E →ᵃ[R] F
⊢ Continuous ↑f → Continuous ↑f.linear
[PROOFSTEP]
intro hc
[GOAL]
case mp
R : Type u_1
E : Type u_2
F : Type u_3
inst✝⁷ : AddCommGroup E
inst✝⁶ : TopologicalSpace E
inst✝⁵ : AddCommGroup F
inst✝⁴ : TopologicalSpace F
inst✝³ : TopologicalAddGroup F
inst✝² : Ring R
inst✝¹ : Module R E
inst✝ : Module R F
f : E →ᵃ[R] F
hc : Continuous ↑f
⊢ Continuous ↑f.linear
[PROOFSTEP]
rw [decomp' f]
[GOAL]
case mp
R : Type u_1
E : Type u_2
F : Type u_3
inst✝⁷ : AddCommGroup E
inst✝⁶ : TopologicalSpace E
inst✝⁵ : AddCommGroup F
inst✝⁴ : TopologicalSpace F
inst✝³ : TopologicalAddGroup F
inst✝² : Ring R
inst✝¹ : Module R E
inst✝ : Module R F
f : E →ᵃ[R] F
hc : Continuous ↑f
⊢ Continuous (↑f - fun x => ↑f 0)
[PROOFSTEP]
exact hc.sub continuous_const
[GOAL]
case mpr
R : Type u_1
E : Type u_2
F : Type u_3
inst✝⁷ : AddCommGroup E
inst✝⁶ : TopologicalSpace E
inst✝⁵ : AddCommGroup F
inst✝⁴ : TopologicalSpace F
inst✝³ : TopologicalAddGroup F
inst✝² : Ring R
inst✝¹ : Module R E
inst✝ : Module R F
f : E →ᵃ[R] F
⊢ Continuous ↑f.linear → Continuous ↑f
[PROOFSTEP]
intro hc
[GOAL]
case mpr
R : Type u_1
E : Type u_2
F : Type u_3
inst✝⁷ : AddCommGroup E
inst✝⁶ : TopologicalSpace E
inst✝⁵ : AddCommGroup F
inst✝⁴ : TopologicalSpace F
inst✝³ : TopologicalAddGroup F
inst✝² : Ring R
inst✝¹ : Module R E
inst✝ : Module R F
f : E →ᵃ[R] F
hc : Continuous ↑f.linear
⊢ Continuous ↑f
[PROOFSTEP]
rw [decomp f]
[GOAL]
case mpr
R : Type u_1
E : Type u_2
F : Type u_3
inst✝⁷ : AddCommGroup E
inst✝⁶ : TopologicalSpace E
inst✝⁵ : AddCommGroup F
inst✝⁴ : TopologicalSpace F
inst✝³ : TopologicalAddGroup F
inst✝² : Ring R
inst✝¹ : Module R E
inst✝ : Module R F
f : E →ᵃ[R] F
hc : Continuous ↑f.linear
⊢ Continuous (↑f.linear + fun x => ↑f 0)
[PROOFSTEP]
exact hc.add continuous_const
[GOAL]
R : Type u_1
E : Type u_2
F : Type u_3
inst✝⁷ : AddCommGroup E
inst✝⁶ : TopologicalSpace E
inst✝⁵ : AddCommGroup F
inst✝⁴ : TopologicalSpace F
inst✝³ : TopologicalAddGroup F
inst✝² : CommRing R
inst✝¹ : Module R F
inst✝ : ContinuousConstSMul R F
x : F
t : R
⊢ Continuous ↑(homothety x t)
[PROOFSTEP]
suffices ⇑(homothety x t) = fun y => t • (y - x) + x by
rw [this]
exact ((continuous_id.sub continuous_const).const_smul _).add continuous_const
[GOAL]
R : Type u_1
E : Type u_2
F : Type u_3
inst✝⁷ : AddCommGroup E
inst✝⁶ : TopologicalSpace E
inst✝⁵ : AddCommGroup F
inst✝⁴ : TopologicalSpace F
inst✝³ : TopologicalAddGroup F
inst✝² : CommRing R
inst✝¹ : Module R F
inst✝ : ContinuousConstSMul R F
x : F
t : R
this : ↑(homothety x t) = fun y => t • (y - x) + x
⊢ Continuous ↑(homothety x t)
[PROOFSTEP]
rw [this]
[GOAL]
R : Type u_1
E : Type u_2
F : Type u_3
inst✝⁷ : AddCommGroup E
inst✝⁶ : TopologicalSpace E
inst✝⁵ : AddCommGroup F
inst✝⁴ : TopologicalSpace F
inst✝³ : TopologicalAddGroup F
inst✝² : CommRing R
inst✝¹ : Module R F
inst✝ : ContinuousConstSMul R F
x : F
t : R
this : ↑(homothety x t) = fun y => t • (y - x) + x
⊢ Continuous fun y => t • (y - x) + x
[PROOFSTEP]
exact ((continuous_id.sub continuous_const).const_smul _).add continuous_const
[GOAL]
R : Type u_1
E : Type u_2
F : Type u_3
inst✝⁷ : AddCommGroup E
inst✝⁶ : TopologicalSpace E
inst✝⁵ : AddCommGroup F
inst✝⁴ : TopologicalSpace F
inst✝³ : TopologicalAddGroup F
inst✝² : CommRing R
inst✝¹ : Module R F
inst✝ : ContinuousConstSMul R F
x : F
t : R
⊢ ↑(homothety x t) = fun y => t • (y - x) + x
[PROOFSTEP]
ext y
[GOAL]
case h
R : Type u_1
E : Type u_2
F : Type u_3
inst✝⁷ : AddCommGroup E
inst✝⁶ : TopologicalSpace E
inst✝⁵ : AddCommGroup F
inst✝⁴ : TopologicalSpace F
inst✝³ : TopologicalAddGroup F
inst✝² : CommRing R
inst✝¹ : Module R F
inst✝ : ContinuousConstSMul R F
x : F
t : R
y : F
⊢ ↑(homothety x t) y = t • (y - x) + x
[PROOFSTEP]
simp [homothety_apply]
[GOAL]
R : Type u_1
E : Type u_2
F : Type u_3
inst✝⁷ : AddCommGroup E
inst✝⁶ : TopologicalSpace E
inst✝⁵ : AddCommGroup F
inst✝⁴ : TopologicalSpace F
inst✝³ : TopologicalAddGroup F
inst✝² : Field R
inst✝¹ : Module R F
inst✝ : ContinuousConstSMul R F
x : F
t : R
ht : t ≠ 0
⊢ IsOpenMap ↑(homothety x t)
[PROOFSTEP]
apply IsOpenMap.of_inverse (homothety_continuous x t⁻¹)
[GOAL]
case l_inv
R : Type u_1
E : Type u_2
F : Type u_3
inst✝⁷ : AddCommGroup E
inst✝⁶ : TopologicalSpace E
inst✝⁵ : AddCommGroup F
inst✝⁴ : TopologicalSpace F
inst✝³ : TopologicalAddGroup F
inst✝² : Field R
inst✝¹ : Module R F
inst✝ : ContinuousConstSMul R F
x : F
t : R
ht : t ≠ 0
⊢ Function.LeftInverse ↑(homothety x t) ↑(homothety x t⁻¹)
[PROOFSTEP]
intro e
[GOAL]
case r_inv
R : Type u_1
E : Type u_2
F : Type u_3
inst✝⁷ : AddCommGroup E
inst✝⁶ : TopologicalSpace E
inst✝⁵ : AddCommGroup F
inst✝⁴ : TopologicalSpace F
inst✝³ : TopologicalAddGroup F
inst✝² : Field R
inst✝¹ : Module R F
inst✝ : ContinuousConstSMul R F
x : F
t : R
ht : t ≠ 0
⊢ Function.RightInverse ↑(homothety x t) ↑(homothety x t⁻¹)
[PROOFSTEP]
intro e
[GOAL]
case l_inv
R : Type u_1
E : Type u_2
F : Type u_3
inst✝⁷ : AddCommGroup E
inst✝⁶ : TopologicalSpace E
inst✝⁵ : AddCommGroup F
inst✝⁴ : TopologicalSpace F
inst✝³ : TopologicalAddGroup F
inst✝² : Field R
inst✝¹ : Module R F
inst✝ : ContinuousConstSMul R F
x : F
t : R
ht : t ≠ 0
e : F
⊢ ↑(homothety x t) (↑(homothety x t⁻¹) e) = e
[PROOFSTEP]
simp [← AffineMap.comp_apply, ← homothety_mul, ht]
[GOAL]
case r_inv
R : Type u_1
E : Type u_2
F : Type u_3
inst✝⁷ : AddCommGroup E
inst✝⁶ : TopologicalSpace E
inst✝⁵ : AddCommGroup F
inst✝⁴ : TopologicalSpace F
inst✝³ : TopologicalAddGroup F
inst✝² : Field R
inst✝¹ : Module R F
inst✝ : ContinuousConstSMul R F
x : F
t : R
ht : t ≠ 0
e : F
⊢ ↑(homothety x t⁻¹) (↑(homothety x t) e) = e
[PROOFSTEP]
simp [← AffineMap.comp_apply, ← homothety_mul, ht]
|
The network renewed the series for a second season , which began in October 2007 . The show 's third season premiered on October 30 , 2008 . The premiere episode drew 8 @.@ 5 million viewers , the highest ratings of the series .
|
/-
Copyright (c) 2017 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
-/
import data.option.basic
import data.nat.basic
/-!
# Partial predecessor and partial subtraction on the natural numbers
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
The usual definition of natural number subtraction (`nat.sub`) returns 0 as a "garbage value" for
`a - b` when `a < b`. Similarly, `nat.pred 0` is defined to be `0`. The functions in this file
wrap the result in an `option` type instead:
## Main definitions
- `nat.ppred`: a partial predecessor operation
- `nat.psub`: a partial subtraction operation
-/
namespace nat
/-- Partial predecessor operation. Returns `ppred n = some m`
if `n = m + 1`, otherwise `none`. -/
@[simp] def ppred : ℕ → option ℕ
| 0 := none
| (n+1) := some n
/-- Partial subtraction operation. Returns `psub m n = some k`
if `m = n + k`, otherwise `none`. -/
@[simp] def psub (m : ℕ) : ℕ → option ℕ
| 0 := some m
| (n+1) := psub n >>= ppred
theorem pred_eq_ppred (n : ℕ) : pred n = (ppred n).get_or_else 0 :=
by cases n; refl
theorem sub_eq_psub (m : ℕ) : ∀ n, m - n = (psub m n).get_or_else 0
| 0 := rfl
| (n+1) := (pred_eq_ppred (m-n)).trans $
by rw [sub_eq_psub, psub]; cases psub m n; refl
@[simp] theorem ppred_eq_some {m : ℕ} : ∀ {n}, ppred n = some m ↔ succ m = n
| 0 := by split; intro h; contradiction
| (n+1) := by dsimp; split; intro h; injection h; subst n
@[simp] theorem ppred_eq_none : ∀ {n : ℕ}, ppred n = none ↔ n = 0
| 0 := by simp
| (n+1) := by dsimp; split; contradiction
theorem psub_eq_some {m : ℕ} : ∀ {n k}, psub m n = some k ↔ k + n = m
| 0 k := by simp [eq_comm]
| (n+1) k :=
begin
dsimp,
apply option.bind_eq_some.trans,
simp [psub_eq_some, add_comm, add_left_comm, nat.succ_eq_add_one]
end
theorem psub_eq_none {m n : ℕ} : psub m n = none ↔ m < n :=
begin
cases s : psub m n; simp [eq_comm],
{ show m < n, refine lt_of_not_ge (λ h, _),
cases le.dest h with k e,
injection s.symm.trans (psub_eq_some.2 $ (add_comm _ _).trans e) },
{ show n ≤ m, rw ← psub_eq_some.1 s, apply nat.le_add_left }
end
theorem ppred_eq_pred {n} (h : 0 < n) : ppred n = some (pred n) :=
ppred_eq_some.2 $ succ_pred_eq_of_pos h
theorem psub_eq_sub {m n} (h : n ≤ m) : psub m n = some (m - n) :=
psub_eq_some.2 $ nat.sub_add_cancel h
theorem psub_add (m n k) : psub m (n + k) = do x ← psub m n, psub x k :=
by induction k; simp [*, add_succ, bind_assoc]
/-- Same as `psub`, but with a more efficient implementation. -/
@[inline] def psub' (m n : ℕ) : option ℕ := if n ≤ m then some (m - n) else none
theorem psub'_eq_psub (m n) : psub' m n = psub m n :=
by rw [psub']; split_ifs;
[exact (psub_eq_sub h).symm, exact (psub_eq_none.2 (not_le.1 h)).symm]
end nat
|
------------------------------------------------------------------------------
-- The gcd is divisible by any common divisor
------------------------------------------------------------------------------
{-# OPTIONS --exact-split #-}
{-# OPTIONS --no-sized-types #-}
{-# OPTIONS --no-universe-polymorphism #-}
{-# OPTIONS --without-K #-}
module FOTC.Program.GCD.Total.DivisibleATP where
open import FOTC.Base
open import FOTC.Data.Nat
open import FOTC.Data.Nat.Divisibility.By0
open import FOTC.Data.Nat.Divisibility.By0.PropertiesATP
open import FOTC.Data.Nat.Induction.NonAcc.LexicographicATP
open import FOTC.Data.Nat.Inequalities
open import FOTC.Data.Nat.Inequalities.EliminationPropertiesATP
open import FOTC.Data.Nat.Inequalities.PropertiesATP
open import FOTC.Data.Nat.PropertiesATP
open import FOTC.Program.GCD.Total.Definitions
open import FOTC.Program.GCD.Total.GCD
------------------------------------------------------------------------------
-- The gcd 0 0 is Divisible.
postulate gcd-00-Divisible : Divisible zero zero (gcd zero zero)
{-# ATP prove gcd-00-Divisible #-}
-- The gcd 0 (succ n) is Divisible.
postulate
gcd-0S-Divisible : ∀ {n} → N n → Divisible zero (succ₁ n) (gcd zero (succ₁ n))
{-# ATP prove gcd-0S-Divisible #-}
postulate
gcd-S0-Divisible : ∀ {n} → N n → Divisible (succ₁ n) zero (gcd (succ₁ n) zero)
{-# ATP prove gcd-S0-Divisible #-}
------------------------------------------------------------------------------
-- The gcd (succ₁ m) (succ₁ n) when succ₁ m > succ₁ n is Divisible.
-- For the proof using the ATP we added the helper hypothesis
-- c | succ₁ m → c | succ₁ c → c | succ₁ m ∸ succ₁ n.
postulate
gcd-S>S-Divisible-ah :
∀ {m n} → N m → N n →
(Divisible (succ₁ m ∸ succ₁ n) (succ₁ n) (gcd (succ₁ m ∸ succ₁ n) (succ₁ n))) →
succ₁ m > succ₁ n →
∀ c → N c → CD (succ₁ m) (succ₁ n) c →
(c ∣ succ₁ m ∸ succ₁ n) →
c ∣ gcd (succ₁ m) (succ₁ n)
{-# ATP prove gcd-S>S-Divisible-ah #-}
gcd-S>S-Divisible :
∀ {m n} → N m → N n →
(Divisible (succ₁ m ∸ succ₁ n) (succ₁ n) (gcd (succ₁ m ∸ succ₁ n) (succ₁ n))) →
succ₁ m > succ₁ n →
Divisible (succ₁ m) (succ₁ n) (gcd (succ₁ m) (succ₁ n))
gcd-S>S-Divisible {m} {n} Nm Nn acc Sm>Sn c Nc (c∣Sm , c∣Sn) =
gcd-S>S-Divisible-ah Nm Nn acc Sm>Sn c Nc (c∣Sm , c∣Sn)
(x∣y→x∣z→x∣y∸z Nc (nsucc Nm) (nsucc Nn) c∣Sm c∣Sn)
------------------------------------------------------------------------------
-- The gcd (succ₁ m) (succ₁ n) when succ₁ m ≯ succ₁ n is Divisible.
-- For the proof using the ATP we added the helper hypothesis
-- c | succ₁ n → c | succ₁ m → c | succ₁ n ∸ succ₁ m.
postulate
gcd-S≯S-Divisible-ah :
∀ {m n} → N m → N n →
(Divisible (succ₁ m) (succ₁ n ∸ succ₁ m) (gcd (succ₁ m) (succ₁ n ∸ succ₁ m))) →
succ₁ m ≯ succ₁ n →
∀ c → N c → CD (succ₁ m) (succ₁ n) c →
(c ∣ succ₁ n ∸ succ₁ m) →
c ∣ gcd (succ₁ m) (succ₁ n)
{-# ATP prove gcd-S≯S-Divisible-ah #-}
gcd-S≯S-Divisible :
∀ {m n} → N m → N n →
(Divisible (succ₁ m) (succ₁ n ∸ succ₁ m) (gcd (succ₁ m) (succ₁ n ∸ succ₁ m))) →
succ₁ m ≯ succ₁ n →
Divisible (succ₁ m) (succ₁ n) (gcd (succ₁ m) (succ₁ n))
gcd-S≯S-Divisible {m} {n} Nm Nn acc Sm≯Sn c Nc (c∣Sm , c∣Sn) =
gcd-S≯S-Divisible-ah Nm Nn acc Sm≯Sn c Nc (c∣Sm , c∣Sn)
(x∣y→x∣z→x∣y∸z Nc (nsucc Nn) (nsucc Nm) c∣Sn c∣Sm)
------------------------------------------------------------------------------
-- The gcd m n when m > n is Divisible.
gcd-x>y-Divisible :
∀ {m n} → N m → N n →
(∀ {o p} → N o → N p → Lexi o p m n → Divisible o p (gcd o p)) →
m > n →
Divisible m n (gcd m n)
gcd-x>y-Divisible nzero Nn _ 0>n _ _ = ⊥-elim (0>x→⊥ Nn 0>n)
gcd-x>y-Divisible (nsucc Nm) nzero _ _ c Nc = gcd-S0-Divisible Nm c Nc
gcd-x>y-Divisible (nsucc {m} Nm) (nsucc {n} Nn) ah Sm>Sn c Nc =
gcd-S>S-Divisible Nm Nn ih Sm>Sn c Nc
where
-- Inductive hypothesis.
ih : Divisible (succ₁ m ∸ succ₁ n) (succ₁ n) (gcd (succ₁ m ∸ succ₁ n) (succ₁ n))
ih = ah {succ₁ m ∸ succ₁ n}
{succ₁ n}
(∸-N (nsucc Nm) (nsucc Nn))
(nsucc Nn)
([Sx∸Sy,Sy]<[Sx,Sy] Nm Nn)
------------------------------------------------------------------------------
-- The gcd m n when m ≯ n is Divisible.
gcd-x≯y-Divisible :
∀ {m n} → N m → N n →
(∀ {o p} → N o → N p → Lexi o p m n → Divisible o p (gcd o p)) →
m ≯ n →
Divisible m n (gcd m n)
gcd-x≯y-Divisible nzero nzero _ _ c Nc = gcd-00-Divisible c Nc
gcd-x≯y-Divisible nzero (nsucc Nn) _ _ c Nc = gcd-0S-Divisible Nn c Nc
gcd-x≯y-Divisible (nsucc _) nzero _ Sm≯0 _ _ = ⊥-elim (S≯0→⊥ Sm≯0)
gcd-x≯y-Divisible (nsucc {m} Nm) (nsucc {n} Nn) ah Sm≯Sn c Nc =
gcd-S≯S-Divisible Nm Nn ih Sm≯Sn c Nc
where
-- Inductive hypothesis.
ih : Divisible (succ₁ m) (succ₁ n ∸ succ₁ m) (gcd (succ₁ m) (succ₁ n ∸ succ₁ m))
ih = ah {succ₁ m}
{succ₁ n ∸ succ₁ m}
(nsucc Nm)
(∸-N (nsucc Nn) (nsucc Nm))
([Sx,Sy∸Sx]<[Sx,Sy] Nm Nn)
------------------------------------------------------------------------------
-- The gcd is Divisible.
gcdDivisible : ∀ {m n} → N m → N n → Divisible m n (gcd m n)
gcdDivisible = Lexi-wfind A h
where
A : D → D → Set
A i j = Divisible i j (gcd i j)
h : ∀ {i j} → N i → N j → (∀ {k l} → N k → N l → Lexi k l i j → A k l) →
A i j
h Ni Nj ah = case (gcd-x>y-Divisible Ni Nj ah)
(gcd-x≯y-Divisible Ni Nj ah)
(x>y∨x≯y Ni Nj)
|
(* Title: Proving the Correctness of Disk Paxos
Author: Mauro J. Jaskelioff, Stephan Merz, 2005
Maintainer: Mauro J. Jaskelioff <mauro at fceia.unr.edu.ar>
*)
theory DiskPaxos_Chosen imports DiskPaxos_Inv5 begin
subsection {* Lemma I2f *}
text {*
To prove the final conjunct we will use the predicate $valueChosen(v)$.
This predicate is true if $v$ is the only possible value that can be
chosen as output. It also asserts that, for every disk $d$ in $D$, if $q$
has already read $disk s d p$, then it has read a block with $bal$ field
at least $b$.
*}
definition valueChosen :: "state \<Rightarrow> InputsOrNi \<Rightarrow> bool"
where
"valueChosen s v =
(\<exists>b\<in> (UN p. Ballot p).
maxBalInp s b v
\<and> (\<exists>p. \<exists>D\<in>MajoritySet.(\<forall>d\<in>D. b \<le> bal(disk s d p)
\<and>(\<forall>q.( phase s q = 1
\<and> b \<le>mbal(dblock s q)
\<and> hasRead s q d p
) \<longrightarrow> (\<exists>br\<in>blocksRead s q d. b \<le> bal(block br))
))))"
lemma HEndPhase1_valueChosen_inp:
assumes act: "HEndPhase1 s s' q"
and inv2a: "Inv2a s"
and asm1: "b \<in> (UN p. Ballot p)"
and bk_blocksOf: "bk\<in>blocksOf s r"
and bk: "bk\<in> blocksSeen s q"
and b_bal: "b \<le> bal bk"
and asm3: "maxBalInp s b v"
and inv1: "Inv1 s"
shows "inp(dblock s' q) = v"
proof -
from bk_blocksOf inv2a
have inv2a_bk: "Inv2a_innermost s r bk"
by(auto simp add: Inv2a_def Inv2a_inner_def)
from Ballot_nzero asm1
have "0 < b " by auto
with b_bal
have "0< bal bk" by auto
with inv2a_bk
have "inp bk \<noteq> NotAnInput"
by(auto simp add: Inv2a_innermost_def)
with bk InputsOrNi
have bk_noninit: "bk \<in> nonInitBlks s q"
by(auto simp add: nonInitBlks_def blocksSeen_def
allBlocksRead_def allRdBlks_def)
with maxBlk_in_nonInitBlks[OF this inv1] b_bal
have maxBlk_b: "b \<le> bal (maxBlk s q)"
by auto
from maxBlk_in_nonInitBlks[OF bk_noninit inv1]
have "\<exists>p d. maxBlk s q \<in> blocksSeen s p"
by(auto simp add: nonInitBlks_def blocksSeen_def)
hence "\<exists>p. maxBlk s q \<in> blocksOf s p"
by(auto simp add: blocksOf_def blocksSeen_def
allBlocksRead_def allRdBlks_def rdBy_def, force)
with maxBlk_b asm3
have "inp(maxBlk s q) = v"
by(auto simp add: maxBalInp_def allBlocks_def)
with bk_noninit act
show ?thesis
by(auto simp add: EndPhase1_def)
qed
lemma HEndPhase1_maxBalInp:
assumes act: "HEndPhase1 s s' q"
and asm1: "b \<in> (UN p. Ballot p)"
and asm2: "D\<in>MajoritySet"
and asm3: "maxBalInp s b v"
and asm4: "\<forall>d\<in>D. b \<le> bal(disk s d p)
\<and>(\<forall>q.( phase s q = 1
\<and> b \<le>mbal(dblock s q)
\<and> hasRead s q d p
) \<longrightarrow> (\<exists>br\<in>blocksRead s q d. b \<le> bal(block br)))"
and inv1: "Inv1 s"
and inv2a: "Inv2a s"
and inv2b: "Inv2b s"
shows "maxBalInp s' b v"
proof(cases "b \<le> mbal(dblock s q)")
case True
show ?thesis
proof(cases "p\<noteq>q")
assume pnq: "p\<noteq>q"
have "\<exists>d\<in>D. hasRead s q d p"
proof -
from act
have "IsMajority({d. d\<in> disksWritten s q \<and> (\<forall>r\<in>UNIV-{q}. hasRead s q d r)})" (is "IsMajority(?M)")
by(auto simp add: EndPhase1_def)
with majorities_intersect asm2
have "D \<inter> ?M \<noteq> {}"
by(auto simp add: MajoritySet_def)
hence "\<exists>d\<in>D. (\<forall>r\<in>UNIV-{q}. hasRead s q d r)"
by auto
with pnq
show ?thesis
by auto
qed
then obtain d where p41: "d\<in>D \<and> hasRead s q d p" by auto
with asm4 asm3 act True
have p42: "\<exists>br\<in>blocksRead s q d. b\<le> bal(block br)"
by(auto simp add: EndPhase1_def)
from True act
have thesis_L: "b\<le>bal (dblock s' q)"
by(auto simp add: EndPhase1_def)
from p42
have "inp(dblock s' q) = v"
proof auto
fix br
assume br: "br \<in> blocksRead s q d"
and b_bal: " b \<le> bal (block br)"
hence br_rdBy: "br \<in> (UN q d. rdBy s (proc br) q d)"
by(auto simp add: rdBy_def)
hence br_blksof: "block br \<in> blocksOf s (proc br)"
by(auto simp add: blocksOf_def)
from br have br_bseen: "block br\<in> blocksSeen s q"
by(auto simp add: blocksSeen_def allBlocksRead_def allRdBlks_def)
from HEndPhase1_valueChosen_inp[OF act inv2a asm1 br_blksof br_bseen b_bal asm3 inv1]
show ?thesis .
qed
with asm3 HEndPhase1_allBlocks[OF act]
show ?thesis
by(auto simp add: maxBalInp_def)
next
case False
from asm4
have p41: "\<forall>d\<in>D. b \<le> bal(disk s d p)"
by auto
have p42: "\<exists>d\<in>D. disk s d p = dblock s p"
proof -
from act
have "IsMajority {d. d\<in>disksWritten s q \<and> (\<forall>p\<in>UNIV-{q}. hasRead s q d p)}" (is "IsMajority ?S")
by(auto simp add: EndPhase1_def)
with majorities_intersect asm2
have "D \<inter> ?S \<noteq> {}"
by(auto simp add: MajoritySet_def)
hence "\<exists>d\<in>D. d\<in>disksWritten s q"
by auto
with inv2b False
show ?thesis
by(auto simp add: Inv2b_def Inv2b_inner_def)
qed
have "inp(dblock s' q) = v"
proof -
from p42 p41 False
have b_bal: "b \<le> bal(dblock s q)" by auto
have db_blksof: "(dblock s q) \<in> blocksOf s q"
by(auto simp add: blocksOf_def)
have db_bseen: "(dblock s q) \<in> blocksSeen s q"
by(auto simp add: blocksSeen_def)
from HEndPhase1_valueChosen_inp[OF act inv2a asm1 db_blksof db_bseen b_bal asm3 inv1]
show ?thesis .
qed
with asm3 HEndPhase1_allBlocks[OF act]
show ?thesis
by(auto simp add: maxBalInp_def)
qed
next
case False
have "dblock s' q \<in> allBlocks s'"
by(auto simp add: allBlocks_def blocksOf_def)
show ?thesis
proof(auto simp add: maxBalInp_def)
fix bk
assume bk: "bk \<in> allBlocks s'"
and b_bal: "b \<le> bal bk"
from subsetD[OF HEndPhase1_allBlocks[OF act] bk]
show "inp bk = v"
proof
assume bk: "bk \<in> allBlocks s"
with asm3 b_bal
show ?thesis
by(auto simp add: maxBalInp_def)
next
assume bk: "bk \<in> {dblock s' q}"
from act False
have " \<not> b \<le> bal (dblock s' q)"
by(auto simp add: EndPhase1_def)
with bk b_bal
show ?thesis
by(auto)
qed
qed
qed
lemma HEndPhase1_valueChosen2:
assumes act: "HEndPhase1 s s' q"
and asm4: "\<forall>d\<in>D. b \<le> bal(disk s d p)
\<and>(\<forall>q.( phase s q = 1
\<and> b \<le>mbal(dblock s q)
\<and> hasRead s q d p
) \<longrightarrow> (\<exists>br\<in>blocksRead s q d. b \<le> bal(block br)))" (is "?P s")
shows "?P s'"
proof(auto)
fix d
assume d: "d\<in>D"
with act asm4
show "b \<le> bal (disk s' d p)"
by(auto simp add: EndPhase1_def)
fix d q
assume d: "d\<in>D"
and phase': "phase s' q = Suc 0"
and dblk_mbal: "b \<le> mbal (dblock s' q)"
with act
have p31: "phase s q = 1"
and p32: "dblock s' q = dblock s q"
by(auto simp add: EndPhase1_def split: split_if_asm)
with dblk_mbal
have "b\<le>mbal(dblock s q)" by auto
moreover
assume hasRead: "hasRead s' q d p"
with act
have "hasRead s q d p"
by(auto simp add: EndPhase1_def InitializePhase_def
hasRead_def split: split_if_asm)
ultimately
have "\<exists>br\<in>blocksRead s q d. b\<le> bal(block br)"
using p31 asm4 d
by blast
with act hasRead
show "\<exists>br\<in>blocksRead s' q d. b\<le> bal(block br)"
by(auto simp add: EndPhase1_def InitializePhase_def hasRead_def)
qed
theorem HEndPhase1_valueChosen:
assumes act: "HEndPhase1 s s' q"
and vc: "valueChosen s v"
and inv1: "Inv1 s"
and inv2a: "Inv2a s"
and inv2b: "Inv2b s"
and v_input: "v \<in> Inputs"
shows "valueChosen s' v"
proof -
from vc
obtain b p D where
asm1: "b \<in> (UN p. Ballot p)"
and asm2: "D\<in>MajoritySet"
and asm3: "maxBalInp s b v"
and asm4: "\<forall>d\<in>D. b \<le> bal(disk s d p)
\<and>(\<forall>q.( phase s q = 1
\<and> b \<le>mbal(dblock s q)
\<and> hasRead s q d p
) \<longrightarrow> (\<exists>br\<in>blocksRead s q d. b \<le> bal(block br)))"
by(auto simp add: valueChosen_def)
from HEndPhase1_maxBalInp[OF act asm1 asm2 asm3 asm4 inv1 inv2a inv2b]
have "maxBalInp s' b v" .
with HEndPhase1_valueChosen2[OF act asm4] asm1 asm2
show ?thesis
by(auto simp add: valueChosen_def)
qed
lemma HStartBallot_maxBalInp:
assumes act: "HStartBallot s s' q"
and asm3: "maxBalInp s b v"
shows "maxBalInp s' b v"
proof(auto simp add: maxBalInp_def)
fix bk
assume bk: "bk \<in> allBlocks s'"
and b_bal: "b\<le> bal bk"
from subsetD[OF HStartBallot_allBlocks[OF act] bk]
show "inp bk = v"
proof
assume bk: "bk\<in>allBlocks s"
with asm3 b_bal
show ?thesis
by(auto simp add: maxBalInp_def)
next
assume bk: "bk\<in>{dblock s' q}"
from asm3
have "b\<le> bal(dblock s q) \<Longrightarrow> inp(dblock s q) = v"
by(auto simp add: maxBalInp_def allBlocks_def blocksOf_def)
with act bk b_bal
show ?thesis
by(auto simp add: StartBallot_def)
qed
qed
lemma HStartBallot_valueChosen2:
assumes act: "HStartBallot s s' q"
and asm4: "\<forall>d\<in>D. b \<le> bal(disk s d p)
\<and>(\<forall>q.( phase s q = 1
\<and> b \<le>mbal(dblock s q)
\<and> hasRead s q d p
) \<longrightarrow> (\<exists>br\<in>blocksRead s q d. b \<le> bal(block br)))" (is "?P s")
shows "?P s'"
proof(auto)
fix d
assume d: "d\<in>D"
with act asm4
show "b \<le> bal (disk s' d p)"
by(auto simp add: StartBallot_def)
fix d q
assume d: "d\<in>D"
and phase': "phase s' q = Suc 0"
and dblk_mbal: "b \<le> mbal (dblock s' q)"
and hasRead: "hasRead s' q d p"
from phase' act hasRead
have p31: "phase s q = 1"
and p32: "dblock s' q = dblock s q"
by(auto simp add: StartBallot_def InitializePhase_def
hasRead_def split : split_if_asm)
with dblk_mbal
have "b\<le>mbal(dblock s q)" by auto
moreover
from act hasRead
have "hasRead s q d p"
by(auto simp add: StartBallot_def InitializePhase_def
hasRead_def split: split_if_asm)
ultimately
have "\<exists>br\<in>blocksRead s q d. b\<le> bal(block br)"
using p31 asm4 d
by blast
with act hasRead
show "\<exists>br\<in>blocksRead s' q d. b\<le> bal(block br)"
by(auto simp add: StartBallot_def InitializePhase_def
hasRead_def)
qed
theorem HStartBallot_valueChosen:
assumes act: "HStartBallot s s' q"
and vc: "valueChosen s v"
and v_input: "v \<in> Inputs"
shows "valueChosen s' v"
proof -
from vc
obtain b p D where
asm1: "b \<in> (UN p. Ballot p)"
and asm2: "D\<in>MajoritySet"
and asm3: "maxBalInp s b v"
and asm4: "\<forall>d\<in>D. b \<le> bal(disk s d p)
\<and>(\<forall>q.( phase s q = 1
\<and> b \<le>mbal(dblock s q)
\<and> hasRead s q d p
) \<longrightarrow> (\<exists>br\<in>blocksRead s q d. b \<le> bal(block br)))"
by(auto simp add: valueChosen_def)
from HStartBallot_maxBalInp[OF act asm3]
have "maxBalInp s' b v" .
with HStartBallot_valueChosen2[OF act asm4] asm1 asm2
show ?thesis
by(auto simp add: valueChosen_def)
qed
lemma HPhase1or2Write_maxBalInp:
assumes act: "HPhase1or2Write s s' q d"
and asm3: "maxBalInp s b v"
shows "maxBalInp s' b v"
proof(auto simp add: maxBalInp_def)
fix bk
assume bk: "bk \<in> allBlocks s'"
and b_bal: "b\<le> bal bk"
from subsetD[OF HPhase1or2Write_allBlocks[OF act] bk] asm3 b_bal
show "inp bk = v"
by(auto simp add: maxBalInp_def)
qed
lemma HPhase1or2Write_valueChosen2:
assumes act: "HPhase1or2Write s s' pp d"
and asm2: "D\<in>MajoritySet"
and asm4: "\<forall>d\<in>D. b \<le> bal(disk s d p)
\<and>(\<forall>q.( phase s q = 1
\<and> b \<le>mbal(dblock s q)
\<and> hasRead s q d p
) \<longrightarrow> (\<exists>br\<in>blocksRead s q d. b \<le> bal(block br)))" (is "?P s")
and inv4: "HInv4a s pp"
shows "?P s'"
proof(auto)
fix d1
assume d: "d1\<in>D"
show "b \<le> bal (disk s' d1 p)"
proof(cases "d1=d\<and>pp=p")
case True
with inv4 act
have "HInv4a2 s p"
by(auto simp add: Phase1or2Write_def HInv4a_def)
with asm2 majorities_intersect
have "\<exists>dd\<in>D. bal(disk s dd p)\<le>bal(dblock s p)"
by(auto simp add: HInv4a2_def MajoritySet_def)
then obtain dd where p41: "dd\<in>D \<and> bal(disk s dd p)\<le>bal(dblock s p)"
by auto
from asm4 p41
have "b\<le> bal(disk s dd p)"
by auto
with p41
have p42: "b \<le> bal(dblock s p)"
by auto
from act True
have "dblock s p = disk s' d p"
by(auto simp add: Phase1or2Write_def)
with p42 True
show ?thesis
by auto
next
case False
with act asm4 d
show ?thesis
by(auto simp add: Phase1or2Write_def)
qed
next
fix d q
assume d: "d\<in>D"
and phase': "phase s' q = Suc 0"
and dblk_mbal: "b \<le> mbal (dblock s' q)"
and hasRead: "hasRead s' q d p"
from phase' act hasRead
have p31: "phase s q = 1"
and p32: "dblock s' q = dblock s q"
by(auto simp add: Phase1or2Write_def InitializePhase_def
hasRead_def split : split_if_asm)
with dblk_mbal
have "b\<le>mbal(dblock s q)" by auto
moreover
from act hasRead
have "hasRead s q d p"
by(auto simp add: Phase1or2Write_def InitializePhase_def
hasRead_def split: split_if_asm)
ultimately
have "\<exists>br\<in>blocksRead s q d. b\<le> bal(block br)"
using p31 asm4 d
by blast
with act hasRead
show "\<exists>br\<in>blocksRead s' q d. b\<le> bal(block br)"
by(auto simp add: Phase1or2Write_def InitializePhase_def
hasRead_def)
qed
theorem HPhase1or2Write_valueChosen:
assumes act: "HPhase1or2Write s s' q d"
and vc: "valueChosen s v"
and v_input: "v \<in> Inputs"
and inv4: "HInv4a s q"
shows "valueChosen s' v"
proof -
from vc
obtain b p D where
asm1: "b \<in> (UN p. Ballot p)"
and asm2: "D\<in>MajoritySet"
and asm3: "maxBalInp s b v"
and asm4: "\<forall>d\<in>D. b \<le> bal(disk s d p)
\<and>(\<forall>q.( phase s q = 1
\<and> b \<le>mbal(dblock s q)
\<and> hasRead s q d p
) \<longrightarrow> (\<exists>br\<in>blocksRead s q d. b \<le> bal(block br)))"
by(auto simp add: valueChosen_def)
from HPhase1or2Write_maxBalInp[OF act asm3]
have "maxBalInp s' b v" .
with HPhase1or2Write_valueChosen2[OF act asm2 asm4 inv4] asm1 asm2
show ?thesis
by(auto simp add: valueChosen_def)
qed
lemma HPhase1or2ReadThen_maxBalInp:
assumes act: "HPhase1or2ReadThen s s' q d p"
and asm3: "maxBalInp s b v"
shows "maxBalInp s' b v"
proof(auto simp add: maxBalInp_def)
fix bk
assume bk: "bk \<in> allBlocks s'"
and b_bal: "b\<le> bal bk"
from subsetD[OF HPhase1or2ReadThen_allBlocks[OF act] bk] asm3 b_bal
show "inp bk = v"
by(auto simp add: maxBalInp_def)
qed
theorem HPhase1or2ReadThen_valueChosen:
assumes act: "HPhase1or2ReadThen s s' q d p"
and vc: "valueChosen s v"
and v_input: "v \<in> Inputs"
shows "valueChosen s' v"
proof -
from vc
obtain b p D where
asm1: "b \<in> (UN p. Ballot p)"
and asm2: "D\<in>MajoritySet"
and asm3: "maxBalInp s b v"
and asm4: "\<forall>d\<in>D. b \<le> bal(disk s d p)
\<and>(\<forall>q.( phase s q = 1
\<and> b \<le>mbal(dblock s q)
\<and> hasRead s q d p
) \<longrightarrow> (\<exists>br\<in>blocksRead s q d. b \<le> bal(block br)))"
by(auto simp add: valueChosen_def)
from HPhase1or2ReadThen_maxBalInp[OF act asm3]
have "maxBalInp s' b v" .
with HPhase1or2ReadThen_valueChosen2[OF act asm4] asm1 asm2
show ?thesis
by(auto simp add: valueChosen_def)
qed
theorem HPhase1or2ReadElse_valueChosen:
"\<lbrakk> HPhase1or2ReadElse s s' p d r; valueChosen s v; v\<in> Inputs \<rbrakk>
\<Longrightarrow> valueChosen s' v"
using HStartBallot_valueChosen
by(auto simp add: Phase1or2ReadElse_def)
lemma HEndPhase2_maxBalInp:
assumes act: "HEndPhase2 s s' q"
and asm3: "maxBalInp s b v"
shows "maxBalInp s' b v"
proof(auto simp add: maxBalInp_def)
fix bk
assume bk: "bk \<in> allBlocks s'"
and b_bal: "b\<le> bal bk"
from subsetD[OF HEndPhase2_allBlocks[OF act] bk] asm3 b_bal
show "inp bk = v"
by(auto simp add: maxBalInp_def)
qed
lemma HEndPhase2_valueChosen2:
assumes act: "HEndPhase2 s s' q"
and asm4: "\<forall>d\<in>D. b \<le> bal(disk s d p)
\<and>(\<forall>q.( phase s q = 1
\<and> b \<le>mbal(dblock s q)
\<and> hasRead s q d p
) \<longrightarrow> (\<exists>br\<in>blocksRead s q d. b \<le> bal(block br)))" (is "?P s")
shows "?P s'"
proof(auto)
fix d
assume d: "d\<in>D"
with act asm4
show "b \<le> bal (disk s' d p)"
by(auto simp add: EndPhase2_def)
fix d q
assume d: "d\<in>D"
and phase': "phase s' q = Suc 0"
and dblk_mbal: "b \<le> mbal (dblock s' q)"
and hasRead: "hasRead s' q d p"
from phase' act hasRead
have p31: "phase s q = 1"
and p32: "dblock s' q = dblock s q"
by(auto simp add: EndPhase2_def InitializePhase_def
hasRead_def split : split_if_asm)
with dblk_mbal
have "b\<le>mbal(dblock s q)" by auto
moreover
from act hasRead
have "hasRead s q d p"
by(auto simp add: EndPhase2_def InitializePhase_def
hasRead_def split: split_if_asm)
ultimately
have "\<exists>br\<in>blocksRead s q d. b\<le> bal(block br)"
using p31 asm4 d
by blast
with act hasRead
show "\<exists>br\<in>blocksRead s' q d. b\<le> bal(block br)"
by(auto simp add: EndPhase2_def InitializePhase_def
hasRead_def)
qed
theorem HEndPhase2_valueChosen:
assumes act: "HEndPhase2 s s' q"
and vc: "valueChosen s v"
and v_input: "v \<in> Inputs"
shows "valueChosen s' v"
proof -
from vc
obtain b p D where
asm1: "b \<in> (UN p. Ballot p)"
and asm2: "D\<in>MajoritySet"
and asm3: "maxBalInp s b v"
and asm4: "\<forall>d\<in>D. b \<le> bal(disk s d p)
\<and>(\<forall>q.( phase s q = 1
\<and> b \<le>mbal(dblock s q)
\<and> hasRead s q d p
) \<longrightarrow> (\<exists>br\<in>blocksRead s q d. b \<le> bal(block br)))"
by(auto simp add: valueChosen_def)
from HEndPhase2_maxBalInp[OF act asm3]
have "maxBalInp s' b v" .
with HEndPhase2_valueChosen2[OF act asm4] asm1 asm2
show ?thesis
by(auto simp add: valueChosen_def)
qed
lemma HFail_maxBalInp:
assumes act: "HFail s s' q"
and asm1: "b \<in> (UN p. Ballot p)"
and asm3: "maxBalInp s b v"
shows "maxBalInp s' b v"
proof(auto simp add: maxBalInp_def)
fix bk
assume bk: "bk \<in> allBlocks s'"
and b_bal: "b\<le> bal bk"
from subsetD[OF HFail_allBlocks[OF act] bk]
show "inp bk = v"
proof
assume bk: "bk\<in>allBlocks s"
with asm3 b_bal
show ?thesis
by(auto simp add: maxBalInp_def)
next
assume bk: "bk\<in>{dblock s' q}"
with act
have "bal bk = 0"
by(auto simp add: Fail_def InitDB_def)
moreover
from Ballot_nzero asm1
have "0 < b"
by auto
ultimately
show ?thesis
using b_bal
by auto
qed
qed
lemma HFail_valueChosen2:
assumes act: "HFail s s' q"
and asm4: "\<forall>d\<in>D. b \<le> bal(disk s d p)
\<and>(\<forall>q.( phase s q = 1
\<and> b \<le>mbal(dblock s q)
\<and> hasRead s q d p
) \<longrightarrow> (\<exists>br\<in>blocksRead s q d. b \<le> bal(block br)))" (is "?P s")
shows "?P s'"
proof(auto)
fix d
assume d: "d\<in>D"
with act asm4
show "b \<le> bal (disk s' d p)"
by(auto simp add: Fail_def)
fix d q
assume d: "d\<in>D"
and phase': "phase s' q = Suc 0"
and dblk_mbal: "b \<le> mbal (dblock s' q)"
and hasRead: "hasRead s' q d p"
from phase' act hasRead
have p31: "phase s q = 1"
and p32: "dblock s' q = dblock s q"
by(auto simp add: Fail_def InitializePhase_def
hasRead_def split : split_if_asm)
with dblk_mbal
have "b\<le>mbal(dblock s q)" by auto
moreover
from act hasRead
have "hasRead s q d p"
by(auto simp add: Fail_def InitializePhase_def
hasRead_def split: split_if_asm)
ultimately
have "\<exists>br\<in>blocksRead s q d. b\<le> bal(block br)"
using p31 asm4 d
by blast
with act hasRead
show "\<exists>br\<in>blocksRead s' q d. b\<le> bal(block br)"
by(auto simp add: Fail_def InitializePhase_def hasRead_def)
qed
theorem HFail_valueChosen:
assumes act: "HFail s s' q"
and vc: "valueChosen s v"
and v_input: "v \<in> Inputs"
shows "valueChosen s' v"
proof -
from vc
obtain b p D where
asm1: "b \<in> (UN p. Ballot p)"
and asm2: "D\<in>MajoritySet"
and asm3: "maxBalInp s b v"
and asm4: "\<forall>d\<in>D. b \<le> bal(disk s d p)
\<and>(\<forall>q.( phase s q = 1
\<and> b \<le>mbal(dblock s q)
\<and> hasRead s q d p
) \<longrightarrow> (\<exists>br\<in>blocksRead s q d. b \<le> bal(block br)))"
by(auto simp add: valueChosen_def)
from HFail_maxBalInp[OF act asm1 asm3]
have "maxBalInp s' b v" .
with HFail_valueChosen2[OF act asm4] asm1 asm2
show ?thesis
by(auto simp add: valueChosen_def)
qed
lemma HPhase0Read_maxBalInp:
assumes act: "HPhase0Read s s' q d"
and asm3: "maxBalInp s b v"
shows "maxBalInp s' b v"
proof(auto simp add: maxBalInp_def)
fix bk
assume bk: "bk \<in> allBlocks s'"
and b_bal: "b\<le> bal bk"
from subsetD[OF HPhase0Read_allBlocks[OF act] bk] asm3 b_bal
show "inp bk = v"
by(auto simp add: maxBalInp_def)
qed
lemma HPhase0Read_valueChosen2:
assumes act: "HPhase0Read s s' qq dd"
and asm4: "\<forall>d\<in>D. b \<le> bal(disk s d p)
\<and>(\<forall>q.( phase s q = 1
\<and> b \<le>mbal(dblock s q)
\<and> hasRead s q d p
) \<longrightarrow> (\<exists>br\<in>blocksRead s q d. b \<le> bal(block br)))" (is "?P s")
shows "?P s'"
proof(auto)
fix d
assume d: "d\<in>D"
with act asm4
show "b \<le> bal (disk s' d p)"
by(auto simp add: Phase0Read_def)
next
fix d q
assume d: "d\<in>D"
and phase': "phase s' q = Suc 0"
and dblk_mbal: "b \<le> mbal (dblock s' q)"
and hasRead: "hasRead s' q d p"
from phase' act
have qqnq: "qq\<noteq>q"
by(auto simp add: Phase0Read_def)
show "\<exists>br\<in>blocksRead s' q d. b \<le> bal (block br)"
proof -
from phase' act hasRead
have p31: "phase s q = 1"
and p32: "dblock s' q = dblock s q"
by(auto simp add: Phase0Read_def hasRead_def)
with dblk_mbal
have "b\<le>mbal(dblock s q)" by auto
moreover
from act hasRead qqnq
have "hasRead s q d p"
by(auto simp add: Phase0Read_def hasRead_def
split: split_if_asm)
ultimately
have "\<exists>br\<in>blocksRead s q d. b\<le> bal(block br)"
using p31 asm4 d
by blast
with act hasRead
show "\<exists>br\<in>blocksRead s' q d. b\<le> bal(block br)"
by(auto simp add: Phase0Read_def InitializePhase_def
hasRead_def)
qed
qed
theorem HPhase0Read_valueChosen:
assumes act: "HPhase0Read s s' q d"
and vc: "valueChosen s v"
and v_input: "v \<in> Inputs"
shows "valueChosen s' v"
proof -
from vc
obtain b p D where
asm1: "b \<in> (UN p. Ballot p)"
and asm2: "D\<in>MajoritySet"
and asm3: "maxBalInp s b v"
and asm4: "\<forall>d\<in>D. b \<le> bal(disk s d p)
\<and>(\<forall>q.( phase s q = 1
\<and> b \<le>mbal(dblock s q)
\<and> hasRead s q d p
) \<longrightarrow> (\<exists>br\<in>blocksRead s q d. b \<le> bal(block br)))"
by(auto simp add: valueChosen_def)
from HPhase0Read_maxBalInp[OF act asm3]
have "maxBalInp s' b v" .
with HPhase0Read_valueChosen2[OF act asm4] asm1 asm2
show ?thesis
by(auto simp add: valueChosen_def)
qed
lemma HEndPhase0_maxBalInp:
assumes act: "HEndPhase0 s s' q"
and asm3: "maxBalInp s b v"
and inv1: "Inv1 s"
shows "maxBalInp s' b v"
proof(auto simp add: maxBalInp_def)
fix bk
assume bk: "bk \<in> allBlocks s'"
and b_bal: "b\<le> bal bk"
from subsetD[OF HEndPhase0_allBlocks[OF act] bk]
show "inp bk = v"
proof
assume bk: "bk\<in>allBlocks s"
with asm3 b_bal
show ?thesis
by(auto simp add: maxBalInp_def)
next
assume bk: "bk\<in>{dblock s' q}"
with HEndPhase0_some[OF act inv1] act
have "\<exists>ba\<in>allBlocksRead s q. bal ba = bal (dblock s' q) \<and> inp ba = inp (dblock s' q)"
by(auto simp add: EndPhase0_def)
then obtain ba
where ba_blksread: "ba\<in>allBlocksRead s q"
and ba_balinp: "bal ba = bal (dblock s' q) \<and> inp ba = inp (dblock s' q)"
by auto
have "allBlocksRead s q \<subseteq> allBlocks s"
by(auto simp add: allBlocksRead_def allRdBlks_def
allBlocks_def blocksOf_def rdBy_def)
from subsetD[OF this ba_blksread] ba_balinp bk b_bal asm3
show ?thesis
by(auto simp add: maxBalInp_def)
qed
qed
lemma HEndPhase0_valueChosen2:
assumes act: "HEndPhase0 s s' q"
and asm4: "\<forall>d\<in>D. b \<le> bal(disk s d p)
\<and>(\<forall>q.( phase s q = 1
\<and> b \<le>mbal(dblock s q)
\<and> hasRead s q d p
) \<longrightarrow> (\<exists>br\<in>blocksRead s q d. b \<le> bal(block br)))" (is "?P s")
shows "?P s'"
proof(auto)
fix d
assume d: "d\<in>D"
with act asm4
show "b \<le> bal (disk s' d p)"
by(auto simp add: EndPhase0_def)
fix d q
assume d: "d\<in>D"
and phase': "phase s' q = Suc 0"
and dblk_mbal: "b \<le> mbal (dblock s' q)"
and hasRead: "hasRead s' q d p"
from phase' act hasRead
have p31: "phase s q = 1"
and p32: "dblock s' q = dblock s q"
by(auto simp add: EndPhase0_def InitializePhase_def
hasRead_def split : split_if_asm)
with dblk_mbal
have "b\<le>mbal(dblock s q)" by auto
moreover
from act hasRead
have "hasRead s q d p"
by(auto simp add: EndPhase0_def InitializePhase_def
hasRead_def split: split_if_asm)
ultimately
have "\<exists>br\<in>blocksRead s q d. b\<le> bal(block br)"
using p31 asm4 d
by blast
with act hasRead
show "\<exists>br\<in>blocksRead s' q d. b\<le> bal(block br)"
by(auto simp add: EndPhase0_def InitializePhase_def
hasRead_def)
qed
theorem HEndPhase0_valueChosen:
assumes act: "HEndPhase0 s s' q"
and vc: "valueChosen s v"
and v_input: "v \<in> Inputs"
and inv1: "Inv1 s"
shows "valueChosen s' v"
proof -
from vc
obtain b p D where
asm1: "b \<in> (UN p. Ballot p)"
and asm2: "D\<in>MajoritySet"
and asm3: "maxBalInp s b v"
and asm4: "\<forall>d\<in>D. b \<le> bal(disk s d p)
\<and>(\<forall>q.( phase s q = 1
\<and> b \<le>mbal(dblock s q)
\<and> hasRead s q d p
) \<longrightarrow> (\<exists>br\<in>blocksRead s q d. b \<le> bal(block br)))"
by(auto simp add: valueChosen_def)
from HEndPhase0_maxBalInp[OF act asm3 inv1]
have "maxBalInp s' b v" .
with HEndPhase0_valueChosen2[OF act asm4] asm1 asm2
show ?thesis
by(auto simp add: valueChosen_def)
qed
end
|
(* Sampling Semantics *)
(* For Importance Sampling, a sample consists of a value and a weight *)
Require Import Coq.Lists.List.
Require Import Coq.Logic.FunctionalExtensionality.
Require Import Coq.Lists.List.
Require Import Coq.Logic.FunctionalExtensionality.
Require Import LiftedTypes.
Require Import Weights.
Require Import MyReals. (* including times: Weight -> real -> real *)
Require Import Measures.
Require Import Types. (* including variables and environments *)
Require Import Syntax.
Function den_typeS (t : Otype) : Type :=
match t with
| Stype m => (den_Mtype m)
| Funtype t u => (den_typeS t) -> (lifted (den_typeS u))
| Meastype m => Unit -> lifted ((den_Mtype m) * Weight)
end.
Function seq2 {A B C : Type}
(d1 : lifted A)
(d2 : lifted B)
(f : (A -> B -> lifted C))
: lifted C
:=
(strict d1 (fun a => (strict d2 (fun b => (f a b))))).
Function bindopS {a b : Mtype}
(s : den_typeS (Meastype a))
(f : den_typeS (Funtype (Stype a) (Meastype b)))
: (den_typeS (Meastype b)) :=
(fun (u:Unit) =>
(strict2
(s (left_half u))
(fun a w1 =>
(strict
(f a)
(fun s2 => (strict2
(s2 (right_half u))
(fun b w2 => (lift
(b, (prod_weights w1 w2)))))))))).
Hint Unfold bindopS.
Function returnopS {m} (v : den_typeS (Stype m)) : (den_typeS (Meastype m))
:= fun u => (lift (v, one_weight)).
Hint Unfold returnopS.
Definition uniformopS : (den_typeS (Meastype Real_type))
:= fun u => (lift ((unit_real u), one_weight)).
Hint Unfold uniformopS.
Definition distvalS : real -> (den_typeS (Meastype Real_type))
:= fun p u => (lift ((unit_real u), (density_1 p (unit_real u)))).
Hint Unfold distvalS.
Definition obsvalS : (den_typeS (Stype Real_type)) ->
(den_typeS (Stype Real_type)) ->
(den_typeS (Meastype Empty_type)) :=
fun r x => fun u => lift (tt, (density_1 r x)).
(* Coercion, needed to make den_val typecheck *)
Function coerce_realS (w : real) : (den_typeS (Stype Real_type)) := w.
Hint Unfold coerce_realS.
(* Denotations of Values and Expressions in the Sampling Semantics*)
Function den_valS {G o} (val : Val G o) (r : (@den_env den_typeS G))
: (den_typeS o)
:=
match val with
| constexp w => (coerce_realS w)
| varexp var => (apply_env r var)
| absexp e => (fun val => den_expS e (cons_env val r))
| uniformval => uniformopS
| distval v => distvalS (den_valS v r)
| bindval s f => bindopS (den_valS s r) (den_valS f r)
| returnval v => returnopS (den_valS v r)
| prodval v1 v2 => ((den_valS v1 r), (den_valS v2 r))
| obsval v1 v2 => obsvalS (den_valS v1 r) (den_valS v2 r)
| unitval => tt
end
with
den_expS {G o} (exp : (Exp G o)) (r : (den_env G)) : (lifted (den_typeS o))
:=
match exp with
| valexp val => lift (den_valS val r)
| prodexp e1 e2 => seq2 (den_expS e1 r) (den_expS e2 r)
(fun v1 v2 => (lift (v1, v2)))
| proj1exp e => strict (den_expS e r) (fun p => (lift (fst p)))
| proj2exp e => strict (den_expS e r) (fun p => (lift (snd p)))
| appexp e1 e2 => seq2 (den_expS e1 r) (den_expS e2 r) (fun f a => (f a))
| returnexp e1 => strict (den_expS e1 r)
(fun a => (lift (returnopS a)))
| bindexp e1 e2 => seq2 (den_expS e1 r) (den_expS e2 r)
(fun s f => (lift (bindopS s f)))
end.
|
(*
* @TAG(OTHER_LGPL)
*)
(* Author: Norbert Schirmer
Maintainer: Norbert Schirmer, norbert.schirmer at web de
License: LGPL
*)
(* Title: Heap.thy
Author: Norbert Schirmer, TU Muenchen
Copyright (C) 2004-2008 Norbert Schirmer
Some rights reserved, TU Muenchen
This library is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
This library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
USA
*)
theory Simpl_Heap
imports Main
begin
subsection "References"
definition "ref = (UNIV::nat set)"
typedef ref = ref by (simp add: ref_def)
code_type ref
(SML "int")
lemma finite_nat_ex_max:
assumes fin: "finite (N::nat set)"
shows "\<exists>m. \<forall>n\<in>N. n < m"
using fin
proof (induct)
case empty
show ?case by auto
next
case (insert k N)
have "\<exists>m. \<forall>n\<in>N. n < m" by fact
then obtain m where m_max: "\<forall>n\<in>N. n < m"..
show "\<exists>m. \<forall>n\<in>insert k N. n < m"
proof (rule exI [where x="Suc (max k m)"])
qed (insert m_max, auto simp add: max_def)
qed
lemma infinite_nat: "\<not>finite (UNIV::nat set)"
proof
assume fin: "finite (UNIV::nat set)"
then obtain m::nat where "\<forall>n\<in>UNIV. n < m"
by (rule finite_nat_ex_max [elim_format] ) auto
moreover have "m\<in>UNIV"..
ultimately show False by blast
qed
lemma infinite_ref [simp,intro]: "\<not>finite (UNIV::ref set)"
proof
assume "finite (UNIV::ref set)"
hence "finite (range Rep_ref)"
by simp
moreover
have "range Rep_ref = ref"
proof
show "range Rep_ref \<subseteq> ref"
by (simp add: ref_def)
next
show "ref \<subseteq> range Rep_ref"
proof
fix x
assume x: "x \<in> ref"
show "x \<in> range Rep_ref"
by (rule Rep_ref_induct) (auto simp add: ref_def)
qed
qed
ultimately have "finite ref"
by simp
thus False
by (simp add: ref_def infinite_nat)
qed
consts Null :: ref
definition new :: "ref set \<Rightarrow> ref" where
"new A = (SOME a. a \<notin> {Null} \<union> A)"
text {*
Constant @{const "Null"} can be defined later on. Conceptually
@{const "Null"} and @{const "new"} are @{text "fixes"} of a locale
with @{prop "finite A \<Longrightarrow> new A \<notin> A \<union> {Null}"}. But since definitions
relative to a locale do not yet work in Isabelle2005 we use this
workaround to avoid lots of parameters in definitions.
*}
lemma new_notin [simp,intro]:
"finite A \<Longrightarrow> new (A) \<notin> A"
apply (unfold new_def)
apply (rule someI2_ex)
apply (fastforce intro: ex_new_if_finite)
apply simp
done
lemma new_not_Null [simp,intro]:
"finite A \<Longrightarrow> new (A) \<noteq> Null"
apply (unfold new_def)
apply (rule someI2_ex)
apply (fastforce intro: ex_new_if_finite)
apply simp
done
end
|
infixr 0 -->
(-->) : (Type -> Type) -> (Type -> Type) -> Type
|
\<^marker>\<open>creator "Kevin Kappelmann"\<close>
section \<open>Subset\<close>
theory Subset
imports Basic
begin
lemma subsetI [intro!]: "(\<And>x. x \<in> A \<Longrightarrow> x \<in> B) \<Longrightarrow> A \<subseteq> B"
unfolding subset_def by simp
lemma subsetD [dest, trans]: "\<lbrakk>A \<subseteq> B; a \<in> A\<rbrakk> \<Longrightarrow> a \<in> B"
unfolding subset_def by blast
lemma subset_self [iff]: "A \<subseteq> A" by blast
lemma empty_subset [iff]: "{} \<subseteq> A" by blast
lemma subset_empty_iff [iff]: "A \<subseteq> {} \<longleftrightarrow> A = {}" by blast
lemma not_mem_if_subset_if_not_mem [trans]: "\<lbrakk>a \<notin> B; A \<subseteq> B\<rbrakk> \<Longrightarrow> a \<notin> A"
by blast
lemma subset_if_subset_if_subset [trans]: "\<lbrakk>A \<subseteq> B; B \<subseteq> C\<rbrakk> \<Longrightarrow> A \<subseteq> C"
by blast
lemma subsetCE [elim]:
assumes "A \<subseteq> B"
obtains "a \<notin> A" | "a \<in> B"
using assms by auto
subsection \<open>Strict Subsets\<close>
definition "ssubset A B \<equiv> A \<subseteq> B \<and> A \<noteq> B"
bundle hotg_ssubset_syntax begin notation ssubset (infixl "\<subset>" 50) end
bundle no_hotg_xsubset_syntax begin no_notation ssubset (infixl "\<subset>" 50) end
unbundle hotg_ssubset_syntax
lemma ssubsetI [intro]:
assumes "A \<subseteq> B"
and "A \<noteq> B"
shows "A \<subset> B"
unfolding ssubset_def using assms by blast
lemma ssubsetE [elim]:
assumes "A \<subset> B"
obtains "A \<subseteq> B" "A \<noteq> B"
using assms unfolding ssubset_def by blast
end
|
module Core.InitPrimitives
import Compiler.CompileExpr
import Core.Context
import Core.Primitives
%default covering
addPrim : {auto c : Ref Ctxt Defs} ->
Prim -> Core ()
addPrim p
= do addBuiltin (opName (fn p)) (type p) (totality p) (fn p)
compileDef (opName (fn p))
export
addPrimitives : {auto c : Ref Ctxt Defs} -> Core ()
addPrimitives
= traverse_ addPrim allPrimitives
|
import numpy as np
def list_to_mat(data, dims):
m, n = dims
n_obs = len(data[:, 0])
out1 = np.zeros((m, n))
out2 = np.zeros((m, n))
for ind in range(n_obs):
i, j = data[ind, :2]
i = int(i)
j = int(j)
out1[i, j] = data[ind, -1]
out2[i, j] = 1
return out1, out2
def predict(test_set, U, V):
n_test = test_set.shape[0]
i_obs = test_set[:, 0].astype('int')
j_obs = test_set[:, 1].astype('int')
UV_obs = np.sum(U[i_obs, :] * V[:, j_obs].T, axis=1)
diff = (test_set[:, -1] - UV_obs)
count = np.sum(np.abs(diff) >= 1)
return (np.sum(diff ** 2) ** 0.5, count / len(i_obs))
def log_joint(U, V, X_list, gamma_U_params, gamma_V_params):
m, d = np.shape(U)
_, n = np.shape(V)
A_u, B_u = gamma_U_params['a'], gamma_U_params['b']
A_v, B_v = gamma_V_params['a'], gamma_U_params['b']
n_obs = len(X_list[:, 0])
i_obs = X_list[:, 0].astype('int')
j_obs = X_list[:, 1].astype('int')
rel_UV = np.sum(U[i_obs, :] * V[:, j_obs].T, axis=1)
pt_poisson = np.sum(X_list[:, 2] * np.log(rel_UV) - rel_UV)
return pt_poisson
|
[STATEMENT]
lemma map_filter_simps [code]:
"map_filter f (x # xs) = (case f x of None \<Rightarrow> map_filter f xs | Some y \<Rightarrow> y # map_filter f xs)"
"map_filter f [] = []"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map_filter f (x # xs) = (case f x of None \<Rightarrow> map_filter f xs | Some y \<Rightarrow> y # map_filter f xs) &&& map_filter f [] = []
[PROOF STEP]
by (simp_all add: map_filter_def split: option.split) |
[GOAL]
α : Type u_1
t : α
ts is : List α
⊢ (invImage (fun a => PSigma.casesOn a fun ts snd => (length ts + length snd, length ts))
Prod.instWellFoundedRelationProd).1
{ fst := ts, snd := t :: is } { fst := t :: ts, snd := is }
[PROOFSTEP]
simp_wf
[GOAL]
α : Type u_1
t : α
ts is : List α
⊢ Prod.Lex (fun a₁ a₂ => a₁ < a₂) (fun a₁ a₂ => a₁ < a₂) (length ts + succ (length is), length ts)
(succ (length ts) + length is, succ (length ts))
[PROOFSTEP]
simp [Nat.succ_add]
[GOAL]
α : Type u_1
t : α
ts is : List α
⊢ Prod.Lex (fun a₁ a₂ => a₁ < a₂) (fun a₁ a₂ => a₁ < a₂) (length ts + succ (length is), length ts)
(succ (length ts + length is), succ (length ts))
[PROOFSTEP]
decreasing_tactic
[GOAL]
α : Type u_1
t : α
ts is : List α
⊢ (invImage (fun a => PSigma.casesOn a fun ts snd => (length ts + length snd, length ts))
Prod.instWellFoundedRelationProd).1
{ fst := is, snd := [] } { fst := t :: ts, snd := is }
[PROOFSTEP]
simp_wf
[GOAL]
α : Type u_1
t : α
ts is : List α
⊢ Prod.Lex (fun a₁ a₂ => a₁ < a₂) (fun a₁ a₂ => a₁ < a₂) (length is, length is)
(succ (length ts) + length is, succ (length ts))
[PROOFSTEP]
simp [Nat.succ_add]
[GOAL]
α : Type u_1
t : α
ts is : List α
⊢ Prod.Lex (fun a₁ a₂ => a₁ < a₂) (fun a₁ a₂ => a₁ < a₂) (length is, length is)
(succ (length ts + length is), succ (length ts))
[PROOFSTEP]
decreasing_tactic
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
ε : Type u_5
ζ : Type u_6
R : α → α → Prop
inst✝ : DecidableRel R
a : α
l : List α
⊢ Decidable (Chain R a l)
[PROOFSTEP]
induction l generalizing a with
| nil => simp only [List.Chain.nil]; infer_instance
| cons a as ih => haveI := ih; simp only [List.chain_cons]; infer_instance
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
ε : Type u_5
ζ : Type u_6
R : α → α → Prop
inst✝ : DecidableRel R
a : α
l : List α
⊢ Decidable (Chain R a l)
[PROOFSTEP]
induction l generalizing a with
| nil => simp only [List.Chain.nil]; infer_instance
| cons a as ih => haveI := ih; simp only [List.chain_cons]; infer_instance
[GOAL]
case nil
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
ε : Type u_5
ζ : Type u_6
R : α → α → Prop
inst✝ : DecidableRel R
a : α
⊢ Decidable (Chain R a [])
[PROOFSTEP]
| nil => simp only [List.Chain.nil]; infer_instance
[GOAL]
case nil
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
ε : Type u_5
ζ : Type u_6
R : α → α → Prop
inst✝ : DecidableRel R
a : α
⊢ Decidable (Chain R a [])
[PROOFSTEP]
simp only [List.Chain.nil]
[GOAL]
case nil
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
ε : Type u_5
ζ : Type u_6
R : α → α → Prop
inst✝ : DecidableRel R
a : α
⊢ Decidable True
[PROOFSTEP]
infer_instance
[GOAL]
case cons
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
ε : Type u_5
ζ : Type u_6
R : α → α → Prop
inst✝ : DecidableRel R
a✝ : α
as : List α
ih : (a : α) → Decidable (Chain R a as)
a : α
⊢ Decidable (Chain R a (a✝ :: as))
[PROOFSTEP]
| cons a as ih => haveI := ih; simp only [List.chain_cons]; infer_instance
[GOAL]
case cons
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
ε : Type u_5
ζ : Type u_6
R : α → α → Prop
inst✝ : DecidableRel R
a✝ : α
as : List α
ih : (a : α) → Decidable (Chain R a as)
a : α
⊢ Decidable (Chain R a (a✝ :: as))
[PROOFSTEP]
haveI := ih
[GOAL]
case cons
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
ε : Type u_5
ζ : Type u_6
R : α → α → Prop
inst✝ : DecidableRel R
a✝ : α
as : List α
ih : (a : α) → Decidable (Chain R a as)
a : α
this : (a : α) → Decidable (Chain R a as)
⊢ Decidable (Chain R a (a✝ :: as))
[PROOFSTEP]
simp only [List.chain_cons]
[GOAL]
case cons
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
ε : Type u_5
ζ : Type u_6
R : α → α → Prop
inst✝ : DecidableRel R
a✝ : α
as : List α
ih : (a : α) → Decidable (Chain R a as)
a : α
this : (a : α) → Decidable (Chain R a as)
⊢ Decidable (R a a✝ ∧ Chain R a✝ as)
[PROOFSTEP]
infer_instance
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
ε : Type u_5
ζ : Type u_6
R : α → α → Prop
inst✝ : DecidableRel R
l : List α
⊢ Decidable (Chain' R l)
[PROOFSTEP]
cases l
[GOAL]
case nil
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
ε : Type u_5
ζ : Type u_6
R : α → α → Prop
inst✝ : DecidableRel R
⊢ Decidable (Chain' R [])
[PROOFSTEP]
dsimp only [List.Chain']
[GOAL]
case cons
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
ε : Type u_5
ζ : Type u_6
R : α → α → Prop
inst✝ : DecidableRel R
head✝ : α
tail✝ : List α
⊢ Decidable (Chain' R (head✝ :: tail✝))
[PROOFSTEP]
dsimp only [List.Chain']
[GOAL]
case nil
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
ε : Type u_5
ζ : Type u_6
R : α → α → Prop
inst✝ : DecidableRel R
⊢ Decidable True
[PROOFSTEP]
infer_instance
[GOAL]
case cons
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
ε : Type u_5
ζ : Type u_6
R : α → α → Prop
inst✝ : DecidableRel R
head✝ : α
tail✝ : List α
⊢ Decidable (Chain R head✝ tail✝)
[PROOFSTEP]
infer_instance
|
function value = mm_header_check ( id, type, rep, field, symm )
%*****************************************************************************80
%
%% MM_HEADER_CHECK checks the header strings for a Matrix Market file.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 02 May 2004
%
% Author:
%
% John Burkardt
%
% Parameters:
%
% Input, character ( len = 14 ) ID, the Matrix Market identifier.
% This value must be '%%MatrixMarket'.
%
% Input, character ( len = 6 ) TYPE, the Matrix Market type.
% This value must be 'matrix'.
%
% Input, character ( len = 10 ) REP, the Matrix Market 'representation'
% indicator. Possible values include:
% 'coordinate' (for sparse data)
% 'array' (for dense data)
% 'elemental' (to be added)
%
% Input, character ( len = 7 ) FIELD, the Matrix Market 'field'.
% Possible values include:
% 'real'
% 'double'
% 'complex'
% 'integer'
% 'pattern' (for REP = 'coordinate' only)
%
% Input, character ( len = 19 ) SYMM, the Matrix Market symmetry.
% Possible values include:
% 'symmetric'
% 'hermitian'
% 'skew-symmetric'
% 'general'
%
% Output, logical VALUE, is TRUE if the header checks out.
%
FALSE = 0;
TRUE = 1;
%
% Test the input qualifiers.
%
if ( s_neqi ( id, '%%MatrixMarket' ) )
fprintf ( 1, '\n' );
fprintf ( 1, 'MM_HEADER_CHECK - Fatal error!\n' );
fprintf ( 1, ' The value of ID was illegal:\n' );
fprintf ( 1, ' "%s".\n', id );
fprintf ( 1, ' Legal values are:\n' );
fprintf ( 1, ' "%%MatrixMarket"\n' );
value = FALSE;
return;
end
if ( s_neqi ( type, 'matrix' ) )
fprintf ( 1, '\n' );
fprintf ( 1, 'MM_HEADER_CHECK - Fatal error!\n' );
fprintf ( 1, ' The value of TYPE was illegal:\n' );
fprintf ( 1, ' "%s".\n', type );
fprintf ( 1, ' Legal values are:\n' );
fprintf ( 1, ' "matrix"\n' );
value = FALSE;
return
end
if ( ...
s_neqi ( rep, 'coordinate' ) & ...
s_neqi ( rep, 'array' ) )
fprintf ( 1, '\n' );
fprintf ( 1, 'MM_HEADER_CHECK - Fatal error!\n' );
fprintf ( 1, ' The value of REP was illegal:\n' );
fprintf ( 1, ' "%s".\n', rep );
fprintf ( 1, ' Legal values are:\n' );
fprintf ( 1, ' "array"\n' );
fprintf ( 1, ' "coordinate"\n' );
value = FALSE;
return
end
if ( s_eqi ( rep, 'coordinate' ) )
if ( ...
s_neqi ( field, 'integer' ) & ...
s_neqi ( field, 'real' ) & ...
s_neqi ( field, 'double' ) & ...
s_neqi ( field, 'complex' ) & ...
s_neqi ( field, 'pattern' ) )
fprintf ( 1, '\n' );
fprintf ( 1, 'MM_HEADER_CHECK - Fatal error!\n' );
fprintf ( 1, ' The value of FIELD was illegal:\n' );
fprintf ( 1, ' "%s".\n', field );
value = FALSE;
return
end
elseif ( s_eqi ( rep, 'array' ) )
if ( ...
s_neqi ( field, 'integer' ) & ...
s_neqi ( field, 'real' ) & ...
s_neqi ( field, 'double' ) & ...
s_neqi ( field, 'complex' ) )
fprintf ( 1, '\n' );
fprintf ( 1, 'MM_HEADER_CHECK - Fatal error!\n' );
fprintf ( 1, ' The value of FIELD was illegal:\n' );
fprintf ( 1, ' "%s".\n', field );
value = FALSE;
return
end
end
if ( ...
s_neqi ( symm, 'general' ) & ...
s_neqi ( symm, 'symmetric' ) & ...
s_neqi ( symm, 'hermitian' ) & ...
s_neqi ( symm, 'skew-symmetric' ) )
fprintf ( 1, '\n' );
fprintf ( 1, 'MM_HEADER_CHECK - Fatal error!\n' );
fprintf ( 1, ' The value of SYMM was illegal:\n' );
fprintf ( 1, ' "%s".\n', symm );
value = FALSE;
return
end
value = TRUE;
return
end
|
The Remix is a remix album by American recording artist Lady Gaga . Released in Japan on March 3 , 2010 , it contains remixes of the songs from her first studio album , The Fame ( 2008 ) , and her third extended play , The Fame Monster ( 2009 ) . A revised version of the track list was prepared for release in additional markets , beginning with Mexico on May 3 , 2010 . A number of recording artists have produced the songs , including Pet Shop Boys , Passion Pit and The Sound of Arrows . The remixed versions feature both uptempo and downtempo compositions , with altered vocals from Gaga .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.