text
stringlengths 0
3.34M
|
---|
(* Title: HOL/Auth/Guard/Guard_Public.thy
Author: Frederic Blanqui, University of Cambridge Computer Laboratory
Copyright 2002 University of Cambridge
Lemmas on guarded messages for public protocols.
*)
theory Guard_Public imports Guard "../Public" Extensions begin
subsection{*Extensions to Theory @{text Public}*}
declare initState.simps [simp del]
subsubsection{*signature*}
definition sign :: "agent => msg => msg" where
"sign A X == {|Agent A, X, Crypt (priK A) (Hash X)|}"
lemma sign_inj [iff]: "(sign A X = sign A' X') = (A=A' & X=X')"
by (auto simp: sign_def)
subsubsection{*agent associated to a key*}
definition agt :: "key => agent" where
"agt K == @A. K = priK A | K = pubK A"
lemma agt_priK [simp]: "agt (priK A) = A"
by (simp add: agt_def)
lemma agt_pubK [simp]: "agt (pubK A) = A"
by (simp add: agt_def)
subsubsection{*basic facts about @{term initState}*}
lemma no_Crypt_in_parts_init [simp]: "Crypt K X ~:parts (initState A)"
by (cases A, auto simp: initState.simps)
lemma no_Crypt_in_analz_init [simp]: "Crypt K X ~:analz (initState A)"
by auto
lemma no_priK_in_analz_init [simp]: "A ~:bad
==> Key (priK A) ~:analz (initState Spy)"
by (auto simp: initState.simps)
lemma priK_notin_initState_Friend [simp]: "A ~= Friend C
==> Key (priK A) ~: parts (initState (Friend C))"
by (auto simp: initState.simps)
lemma keyset_init [iff]: "keyset (initState A)"
by (cases A, auto simp: keyset_def initState.simps)
subsubsection{*sets of private keys*}
definition priK_set :: "key set => bool" where
"priK_set Ks == ALL K. K:Ks --> (EX A. K = priK A)"
lemma in_priK_set: "[| priK_set Ks; K:Ks |] ==> EX A. K = priK A"
by (simp add: priK_set_def)
lemma priK_set1 [iff]: "priK_set {priK A}"
by (simp add: priK_set_def)
lemma priK_set2 [iff]: "priK_set {priK A, priK B}"
by (simp add: priK_set_def)
subsubsection{*sets of good keys*}
definition good :: "key set => bool" where
"good Ks == ALL K. K:Ks --> agt K ~:bad"
lemma in_good: "[| good Ks; K:Ks |] ==> agt K ~:bad"
by (simp add: good_def)
lemma good1 [simp]: "A ~:bad ==> good {priK A}"
by (simp add: good_def)
lemma good2 [simp]: "[| A ~:bad; B ~:bad |] ==> good {priK A, priK B}"
by (simp add: good_def)
subsubsection{*greatest nonce used in a trace, 0 if there is no nonce*}
primrec greatest :: "event list => nat"
where
"greatest [] = 0"
| "greatest (ev # evs) = max (greatest_msg (msg ev)) (greatest evs)"
lemma greatest_is_greatest: "Nonce n:used evs ==> n <= greatest evs"
apply (induct evs, auto simp: initState.simps)
apply (drule used_sub_parts_used, safe)
apply (drule greatest_msg_is_greatest, arith)
by simp
subsubsection{*function giving a new nonce*}
definition new :: "event list => nat" where
"new evs == Suc (greatest evs)"
lemma new_isnt_used [iff]: "Nonce (new evs) ~:used evs"
by (clarify, drule greatest_is_greatest, auto simp: new_def)
subsection{*Proofs About Guarded Messages*}
subsubsection{*small hack necessary because priK is defined as the inverse of pubK*}
lemma pubK_is_invKey_priK: "pubK A = invKey (priK A)"
by simp
lemmas pubK_is_invKey_priK_substI = pubK_is_invKey_priK [THEN ssubst]
lemmas invKey_invKey_substI = invKey [THEN ssubst]
lemma "Nonce n:parts {X} ==> Crypt (pubK A) X:guard n {priK A}"
apply (rule pubK_is_invKey_priK_substI, rule invKey_invKey_substI)
by (rule Guard_Nonce, simp+)
subsubsection{*guardedness results*}
lemma sign_guard [intro]: "X:guard n Ks ==> sign A X:guard n Ks"
by (auto simp: sign_def)
lemma Guard_init [iff]: "Guard n Ks (initState B)"
by (induct B, auto simp: Guard_def initState.simps)
lemma Guard_knows_max': "Guard n Ks (knows_max' C evs)
==> Guard n Ks (knows_max C evs)"
by (simp add: knows_max_def)
lemma Nonce_not_used_Guard_spies [dest]: "Nonce n ~:used evs
==> Guard n Ks (spies evs)"
by (auto simp: Guard_def dest: not_used_not_known parts_sub)
lemma Nonce_not_used_Guard [dest]: "[| evs:p; Nonce n ~:used evs;
Gets_correct p; one_step p |] ==> Guard n Ks (knows (Friend C) evs)"
by (auto simp: Guard_def dest: known_used parts_trans)
lemma Nonce_not_used_Guard_max [dest]: "[| evs:p; Nonce n ~:used evs;
Gets_correct p; one_step p |] ==> Guard n Ks (knows_max (Friend C) evs)"
by (auto simp: Guard_def dest: known_max_used parts_trans)
lemma Nonce_not_used_Guard_max' [dest]: "[| evs:p; Nonce n ~:used evs;
Gets_correct p; one_step p |] ==> Guard n Ks (knows_max' (Friend C) evs)"
apply (rule_tac H="knows_max (Friend C) evs" in Guard_mono)
by (auto simp: knows_max_def)
subsubsection{*regular protocols*}
definition regular :: "event list set => bool" where
"regular p == ALL evs A. evs:p --> (Key (priK A):parts (spies evs)) = (A:bad)"
lemma priK_parts_iff_bad [simp]: "[| evs:p; regular p |] ==>
(Key (priK A):parts (spies evs)) = (A:bad)"
by (auto simp: regular_def)
lemma priK_analz_iff_bad [simp]: "[| evs:p; regular p |] ==>
(Key (priK A):analz (spies evs)) = (A:bad)"
by auto
lemma Guard_Nonce_analz: "[| Guard n Ks (spies evs); evs:p;
priK_set Ks; good Ks; regular p |] ==> Nonce n ~:analz (spies evs)"
apply (clarify, simp only: knows_decomp)
apply (drule Guard_invKey_keyset, simp+, safe)
apply (drule in_good, simp)
apply (drule in_priK_set, simp+, clarify)
apply (frule_tac A=A in priK_analz_iff_bad)
by (simp add: knows_decomp)+
end
|
```python
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
```
### Extracting information about sequence quality and enrichment
### Enrichment
+ Often want to compare two datasets (tissue 1 vs. tissue 2; -drug vs. +drug; etc.)
+ Done by taking ratio of counts for sequences between data sets
$$f_{seq} = \frac{C_{seq}}{C_{total}}$$
The normalized frequency of a sequence $f_{seq}$ is determined by the number of counts of that sequence relative to all counts in the data set.
The enrichment of the sequence in dataset 2 vs. dataset 1 is given by:
$$E_{seq} = \frac{f_{seq,2}}{f_{seq,1}}$$
where $f_{seq,1}$ and $f_{seq,2}$ are the normalized frequencies of the sequence in dataset 1 and 2.
### How do we decide which sequences are high enough quality to include?
### For each cluster, you get a sequence of colors representing the sequence
+ Some bases are read well, others are ambiguous.
### The "Phred" score measures confidence in the base "call":
\begin{equation}
Q = -10log_{10}(p) \qquad \qquad \text{$(1)$}
\end{equation}
where $p$ is the probability that the call is **wrong**.
By rearranging Eq. $(1)$ above, we get:
\begin{equation}
p = 10^{- \frac{Q}{10}} \qquad \qquad \text{$(2)$}
\end{equation}
+ Create a plot of Q vs. p. Is a high "Q" good or bad?
```python
```
#### Phred scores are encoded in last line:
```
@SRR001666.1 071112_SLXA-EAS1_s_7:5:1:817:345 length=60
GGGTGATGGCCGCTGCCGATGGCGTCAAATCCCACCAAGTTACCCTTAACAACTTAAGGG
+SRR001666.1 071112_SLXA-EAS1_s_7:5:1:817:345 length=60
IIIIIIIIIIIIIIIIIIIIIIIIIIIIII9IG9ICIIIIIIIIIIIIIIIIIIIIDIII
```
### Encoding goes like
|Letter | ASCII | $Q$ | $p$ |
|:-----:|:-----:|:---:| -------:|
| `!` | 33 | 0 | 1.00000 |
| `"` | 34 | 1 | 0.79433 |
| `#` | 35 | 2 | 0.63096 |
| ... | ... | ... | ... |
| `J` | 74 | 41 | 0.00008 |
| `K` | 75 | 42 | 0.00006 |
### python `chr` command converts integer ASCII to character
```python
print(chr(33))
print(chr(34))
print(chr(35))
print("...")
print(chr(74))
print(chr(75))
```
!
"
#
...
J
K
### Can create dictionary that converts letters to quality scores
Recall that
\begin{equation}
p = 10^{- \frac{Q}{10}} \qquad \qquad \text{$(2)$}
\end{equation}
```python
Q_dict = {}
p_dict = {}
for i in range(33,76):
Q_dict[chr(i)] = i-33
p_dict[chr(i)] = 10**(-(Q_dict[chr(i)])/10.)
p_dict["K"]
```
6.309573444801929e-05
### Example
$$p_{correct} = \prod_{i=1}^{L} (1-p_{incorrect})$$
where $i$ indexes along sequence and $L$ is sequence length.
```python
qual_string = "IIIIIIIIIIIIIIIIIIIIIIIIIIIIII9IG9ICIIIIIIIIIIIIIIIIIIIIDIII"
p_correct = 1.0
for q in qual_string:
p_correct = p_correct*(1-p_dict[q])
print(p_correct)
```
0.9857512130454114
### Modify this code to pull out the $p_{correct}$ each the sequence
```python
import gzip
get_line = False
seqs = {}
with gzip.open("files/example.fastq.gz") as f:
for l in f:
l_ascii = l.decode("ascii")
if l_ascii[0] == "@":
get_line = True
continue
if get_line:
try:
seqs[l_ascii.strip()] += 1
except KeyError:
seqs[l_ascii.strip()] = 1
get_line = False
```
### Does sequence quality vary along the length of your reads?
```python
```
```python
```
|
C$Procedure CKR02 ( C-kernel, read pointing record, data type 2 )
SUBROUTINE CKR02 ( HANDLE, DESCR, SCLKDP, TOL, RECORD, FOUND )
IMPLICIT NONE
C$ Abstract
C
C Read a pointing record from a CK segment, data type 2.
C
C$ Disclaimer
C
C THIS SOFTWARE AND ANY RELATED MATERIALS WERE CREATED BY THE
C CALIFORNIA INSTITUTE OF TECHNOLOGY (CALTECH) UNDER A U.S.
C GOVERNMENT CONTRACT WITH THE NATIONAL AERONAUTICS AND SPACE
C ADMINISTRATION (NASA). THE SOFTWARE IS TECHNOLOGY AND SOFTWARE
C PUBLICLY AVAILABLE UNDER U.S. EXPORT LAWS AND IS PROVIDED "AS-IS"
C TO THE RECIPIENT WITHOUT WARRANTY OF ANY KIND, INCLUDING ANY
C WARRANTIES OF PERFORMANCE OR MERCHANTABILITY OR FITNESS FOR A
C PARTICULAR USE OR PURPOSE (AS SET FORTH IN UNITED STATES UCC
C SECTIONS 2312-2313) OR FOR ANY PURPOSE WHATSOEVER, FOR THE
C SOFTWARE AND RELATED MATERIALS, HOWEVER USED.
C
C IN NO EVENT SHALL CALTECH, ITS JET PROPULSION LABORATORY, OR NASA
C BE LIABLE FOR ANY DAMAGES AND/OR COSTS, INCLUDING, BUT NOT
C LIMITED TO, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND,
C INCLUDING ECONOMIC DAMAGE OR INJURY TO PROPERTY AND LOST PROFITS,
C REGARDLESS OF WHETHER CALTECH, JPL, OR NASA BE ADVISED, HAVE
C REASON TO KNOW, OR, IN FACT, SHALL KNOW OF THE POSSIBILITY.
C
C RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF
C THE SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY
C CALTECH AND NASA FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE
C ACTIONS OF RECIPIENT IN THE USE OF THE SOFTWARE.
C
C$ Required_Reading
C
C CK
C DAF
C
C$ Keywords
C
C POINTING
C
C$ Declarations
INTEGER HANDLE
DOUBLE PRECISION DESCR ( * )
DOUBLE PRECISION SCLKDP
DOUBLE PRECISION TOL
DOUBLE PRECISION RECORD ( * )
LOGICAL FOUND
C$ Brief_I/O
C
C Variable I/O Description
C -------- --- --------------------------------------------------
C HANDLE I File handle.
C DESCR I Segment descriptor.
C SCLKDP I Spacecraft clock time.
C TOL I Time tolerance
C RECORD O Pointing data record.
C FOUND O True when data is found.
C
C$ Detailed_Input
C
C HANDLE is the integer handle of the CK file containing the
C segment.
C
C DESCR is the descriptor of the segment.
C
C SCLKDP is the encoded spacecraft clock time for which
C pointing is being requested.
C
C TOL is a time tolerance, measured in the same units as
C encoded spacecraft clock.
C
C When SCLKDP falls within the bounds of one of the
C intervals then the tolerance has no effect. However,
C if the request time is not in one of the intervals
C then the tolerance is used to determine if pointing
C at one of the interval endpoints should be returned.
C
C$ Detailed_Output
C
C RECORD is the pointing record. Contents are as follows:
C
C RECORD( 1 ) = Start time of interval.
C RECORD( 2 ) = Time for which pointing was found.
C RECORD( 3 ) = Seconds per tick rate.
C
C RECORD( 4 ) = q0
C RECORD( 5 ) = q1
C RECORD( 6 ) = q2
C RECORD( 7 ) = q3
C
C RECORD( 8 ) = av1
C RECORD( 9 ) = av2
C RECORD( 10 ) = av3
C
C The quantities q0 - q3 are the components of the
C quaternion that represents the C-matrix associated with
C the start time of the interval. The quantities av1,
C av2, and av3 represent the angular velocity vector of
C the interval. The components of the angular velocity
C vector are specified relative to the inertial reference
C frame of the segment.
C
C FOUND is true if a record was found to satisfy the pointing
C request.
C
C$ Parameters
C
C None.
C
C$ Exceptions
C
C 1) If the specified handle does not belong to any file that is
C currently known to be open, an error is diagnosed by a
C routine that this routine calls.
C
C 2) If DESCR is not a valid, packed descriptor of a segment in
C the CK file specified by HANDLE, the results of this routine
C are unpredictable.
C
C 3) If the segment is not of data type 2, as specified in the
C third integer component of the segment descriptor, then
C the error SPICE(WRONGDATATYPE) is signalled.
C
C$ Files
C
C The file containing the segment is specified by its handle, and
C should be opened for read, either by CKLPF or DAFOPR.
C
C$ Particulars
C
C See the CK Required Reading file for a detailed description of
C the structure of a type 2 pointing segment.
C
C This routine searches a type 2 segment and determines if the
C request for pointing can be satisfied by the segment. If so,
C then it returns information in the array RECORD that CKE02 uses
C to evaluate the pointing at the time for which pointing was found.
C
C When the time for which pointing was requested falls within one
C of the intervals then the returned time is the same as the
C requested time. However, when the request time is not within any
C of the intervals then the returned time will be the interval
C endpoint closest to the request time, provided that endpoint is
C within the tolerance specified by the user.
C
C
C$ Examples
C
C The CKRnn routines are usually used in tandem with the CKEnn
C routines, which evaluate the record returned by CKRnn to give
C the pointing information and output time.
C
C The following code fragment searches through a file (attached to
C HANDLE) for all segments applicable to the Voyager 2 wide angle
C camera, for a particular spacecraft clock time, that are of data
C types 1 or 2. It then evaluates the pointing for that epoch and
C prints the result.
C
C
C SC = -32
C INST = -32002
C C
C C Load the Voyager 2 spacecraft clock kernel and the C-kernel.
C C
C CALL FURNSH ( 'VGR_SCLK.TSC' )
C CALL DAFOPR ( 'VGR2_CK.BC', HANDLE )
C C
C C Get the spacecraft clock time. Must encode it for use
C C in the C-kernel.
C C
C WRITE (*,*) 'Enter spacecraft clock time string:'
C READ (*,FMT='(A)') SCLKCH
C CALL SCENCD ( SC, SCLKCH, SCLKDP )
C
C C
C C Search from the beginning through all segments.
C C
C CALL DAFBFS ( HANDLE )
C CALL DAFFNA ( SFND )
C
C DO WHILE ( SFND )
C
C CALL DAFGN ( IDENT )
C CALL DAFGS ( DESCR )
C CALL DAFUS ( DESCR, 2, 6, DCD, ICD )
C
C IF ( INST .EQ. ICD( 1 ) .AND.
C . SCLKDP + TOL .GE. DCD( 1 ) .AND.
C . SCLKDP - TOL .LE. DCD( 2 ) ) THEN
C
C DTYPE = ICD ( 3 )
C
C IF ( DTYPE .EQ. 1 ) THEN
C
C CALL CKR01 ( HANDLE, DESCR, SCLKDP, TOL, NEEDAV,
C . RECORD, FOUND )
C
C IF ( FOUND ) THEN
C CALL CKE01 ( NEEDAV, RECORD, CMAT, AV, CLKOUT )
C END IF
C
C ELSE IF ( DTYPE .EQ. 2 ) THEN
C
C CALL CKR02 ( HANDLE, DESCR, SCLKDP, TOL,
C . RECORD, FOUND )
C
C IF ( FOUND ) THEN
C CALL CKE02 ( NEEDAV, RECORD, CMAT, AV, CLKOUT )
C END IF
C
C END IF
C
C IF ( FOUND ) THEN
C
C WRITE (*,*) 'Segment descriptor and identifier:'
C WRITE (*,*) DCD, ICD
C WRITE (*,*) IDENT
C
C WRITE (*,*) 'C-matrix:'
C WRITE (*,*) CMAT
C
C END IF
C
C END IF
C
C CALL DAFFNA ( SFND )
C
C END DO
C
C$ Restrictions
C
C 1) The file containing the segment should be opened for read,
C either by CKLPF or DAFOPR.
C
C$ Literature_References
C
C None.
C
C$ Author_and_Institution
C
C J.M. Lynch (JPL)
C
C$ Version
C
C- SPICELIB Version 1.1.1, 22-AUG-2006 (EDW)
C
C Replaced references to LDPOOL with references
C to FURNSH.
C
C- SPICELIB Version 1.1.0, 07-SEP-2001 (EDW)
C
C Replaced DAFRDA call with DAFGDA.
C Added IMPLICIT NONE.
C
C- SPICELIB Version 1.0.1, 10-MAR-1992 (WLT)
C
C Comment section for permuted index source lines was added
C following the header.
C
C- SPICELIB Version 1.0.0, 30-AUG-1991 (JML)
C
C-&
C$ Index_Entries
C
C read ck type_2 pointing data record
C
C-&
C
C SPICELIB functions
C
LOGICAL RETURN
INTEGER LSTLED
C
C Local parameters
C
C DIRSIZ is the directory size.
C
C NDC is the number of double precision components in an
C unpacked C-kernel segment descriptor.
C
C NIC is the number of integer components in an unpacked
C C-kernel segment descriptor.
C
C PSIZ is the number of double precision numbers making up
C the record containing the quaternion, angular
C velocity vector, and seconds per tick rate.
C
C DTYPE is the data type of the segment that this routine
C operates on.
C
C
INTEGER DIRSIZ
PARAMETER ( DIRSIZ = 100 )
INTEGER NDC
PARAMETER ( NDC = 2 )
INTEGER NIC
PARAMETER ( NIC = 6 )
INTEGER PSIZ
PARAMETER ( PSIZ = 8 )
INTEGER DTYPE
PARAMETER ( DTYPE = 2 )
C
C Local variables
C
INTEGER ICD ( NIC )
INTEGER BEG
INTEGER END
INTEGER NREC
INTEGER NDIR
INTEGER GROUP
INTEGER DIRLOC
INTEGER REMAIN
INTEGER SKIP
INTEGER GRPNDX
INTEGER INDEX
INTEGER STPLOC
INTEGER ARRSIZ
INTEGER I
INTEGER N
DOUBLE PRECISION DCD ( NDC )
DOUBLE PRECISION BUFFER ( DIRSIZ )
DOUBLE PRECISION PREC ( PSIZ )
DOUBLE PRECISION START
DOUBLE PRECISION CLKOUT
DOUBLE PRECISION STOPI
DOUBLE PRECISION DIFF1
DOUBLE PRECISION DIFF2
LOGICAL FND
C
C Standard SPICE error handling.
C
IF ( RETURN () ) THEN
RETURN
ELSE
CALL CHKIN ( 'CKR02' )
END IF
C
C To minimize the number of file reads performed during the search,
C a buffer of 100 double precision numbers is used to read the SCLK
C times from the C-kernel. If there are 10,001 or fewer pointing
C records, at most four reads will be needed to satisfy the request:
C one to read in 100 or fewer directory times, one to read 100 or
C fewer interval start times, one to read from the stop times, and
C then, after the appropriate record has been located, one to read
C the pointing record.
C
C One more read would be required for every other group of 10,000
C records in the segment.
C
C
C Start off with FOUND equal to false.
C
FOUND = .FALSE.
C
C We need to look at a few of the descriptor components.
C
C The unpacked descriptor contains the following information
C about the segment:
C
C DCD(1) Initial encoded SCLK
C DCD(2) Final encoded SCLK
C ICD(1) Instrument
C ICD(2) Inertial reference frame
C ICD(3) Data type
C ICD(4) Angular velocity flag
C ICD(5) Initial address of segment data
C ICD(6) Final address of segment data
C
CALL DAFUS ( DESCR, NDC, NIC, DCD, ICD )
C
C Check to make sure that the segment is type 2.
C
IF ( ICD( 3 ) .NE. DTYPE ) THEN
CALL SETMSG ( 'The segment is not a type 2 segment. ' //
. 'Type is #' )
CALL ERRINT ( '#', ICD(3) )
CALL SIGERR ( 'SPICE(WRONGDATATYPE)' )
CALL CHKOUT ( 'CKR02' )
RETURN
END IF
C
C The beginning and ending addresses of the segment are in the
C descriptor.
C
BEG = ICD( 5 )
END = ICD( 6 )
C
C Get the number of records in this segment, and from that determine
C the number of directory epochs.
C
C
C Based on the structure of a type 2 segment, the size of a
C segment with N pointing intervals is given as follows:
C
C ARRSIZ = PSIZ * N + 2 * N + ( N-1 ) / 100 (1)
C
C In the above equation PSIZ is eight and integer arithmetic is
C used. This equation is equivalent to:
C
C
C 100 * ARRSIZ = 1000 * N + ( N-1 ) * 100 (2)
C -------
C 100
C
C If we can eliminate the integer division then, since all of
C the other values represent whole numbers, we can solve the
C equation for N in terms of ARRSIZ by using double precision
C arithmetic and then rounding the result to the nearest integer.
C
C This next equation uses double precision arithmetic and is
C equivalent to (2):
C
C 100 * ARRSIZ = 1000 * N + ( N-1 ) - ( N-1 ) MOD 100 (3)
C
C Which means:
C
C 100 * ARRSIZ + 1 ( N-1 ) MOD 100
C ---------------- + --------------- = N (4)
C 1001 1001
C
C Since the second term on the left side of (4) is always less
C than 0.1, the first term will always round to the correct
C value of N.
C
ARRSIZ = END - BEG + 1
NREC = NINT ( ( 100.D0 * (DBLE(ARRSIZ)) + 1.D0 ) / 1001.D0 )
NDIR = ( NREC - 1 ) / DIRSIZ
C
C The directory epochs narrow down the search to a group of DIRSIZ
C or fewer records.
C
C There is only one group if there are no directory epochs.
C
IF ( NDIR .EQ. 0 ) THEN
GROUP = 1
ELSE
C
C Compute the location of the first directory epoch. From the
C beginning of the segment, we need to go through all of the
C pointing numbers (PSIZ*NREC of them), then through all of
C the SCLK start and stop times (2*NREC more) to get to the
C first SCLK directory.
C
DIRLOC = BEG + ( PSIZ + 2 ) * NREC
C
C Locate the last directory epoch less than or equal to SCLKDP.
C
C Read in as many as DIRSIZ directory epochs at a time for
C comparison.
C
FND = .FALSE.
REMAIN = NDIR
GROUP = 0
DO WHILE ( .NOT. FND )
C
C The number of records to read in the buffer.
C
N = MIN( REMAIN, DIRSIZ )
CALL DAFGDA ( HANDLE, DIRLOC, DIRLOC + N - 1, BUFFER )
REMAIN = REMAIN - N
C
C Determine the last directory element in BUFFER that's less
C than or equal to SCLKDP.
C
C If we reach the end of the directories, and still haven't
C found one bigger than the epoch, the group is the last group
C in the segment.
C
C Otherwise keep looking.
C
I = LSTLED( SCLKDP, N, BUFFER )
IF ( I .LT. N ) THEN
GROUP = GROUP + I + 1
FND = .TRUE.
ELSE IF ( REMAIN .EQ. 0 ) THEN
GROUP = NDIR + 1
FND = .TRUE.
ELSE
DIRLOC = DIRLOC + N
GROUP = GROUP + N
END IF
END DO
END IF
C
C Now we know which group of DIRSIZ (or less) times to look at.
C Out of the NREC START times, the number that we should skip over
C to get to the proper group is DIRSIZ*( GROUP - 1 ).
C
SKIP = DIRSIZ * ( GROUP - 1 )
C
C From this we can compute the index into the segment of the group
C of times we want. From the beginning, we need to pass through
C PSIZ*NREC pointing numbers to get to the first START time.
C Then we skip over the number just computed above.
C
GRPNDX = BEG + NREC * ( PSIZ ) + SKIP
C
C The number of times that we have to look at may be less than
C DIRSIZ. However many there are, go ahead and read them into the
C buffer.
C
N = MIN( DIRSIZ, NREC - SKIP )
CALL DAFGDA ( HANDLE, GRPNDX, GRPNDX + N - 1, BUFFER )
C
C Find the largest time in the group less than or equal to the input
C time.
C
I = LSTLED ( SCLKDP, N, BUFFER )
C
C If the request time does not fall into one of the intervals, then
C there are several cases in which this routine can return an
C endpoint of an interval.
C
C 1) If I = 0 then the request time falls before the first START
C time in the group. Because of the way that the directory
C is constructed we already know that the preceding STOP
C time is not the right one so all we have to check is if
C SCLKDP + TOL is greater than or equal to the first START
C time of the group.
C
C 2) If I = N and the request time is not in the Nth interval
C then we know that the request time is after the last STOP
C time in the group. Because of the way that the directory
C is constructed we already know that the following START
C time is not the right one so all we have to check is if
C SCLKDP - TOL is less than or equal to the last STOP time
C of the group.
C
C 3) Finally, if I is between 1 and N-1 and the request time
C does not fall in any of the intervals then we need to
C return the closer of STOP(I) or START(I+1) if it is
C within TOL of SCLKDP.
C
C
C If SCLKDP is less than the first time in BUFFER then check to see
C if we want the first START time in the group.
C
IF ( I .EQ. 0 ) THEN
IF ( ( SCLKDP + TOL ) .GE. BUFFER(1) ) THEN
FOUND = .TRUE.
START = BUFFER(1)
CLKOUT = BUFFER(1)
INDEX = 1
ELSE
CALL CHKOUT ( 'CKR02' )
RETURN
END IF
ELSE
C
C I is not equal to zero. Determine if the request time falls
C within the Ith interval.
C
STPLOC = BEG + NREC*( PSIZ + 1 ) + SKIP + I - 1
CALL DAFGDA ( HANDLE, STPLOC, STPLOC, STOPI )
IF ( SCLKDP .LE. STOPI ) THEN
FOUND = .TRUE.
START = BUFFER(I)
CLKOUT = SCLKDP
INDEX = I
ELSE
C
C The request time does not fall within the interval. Check
C to see if the Ith STOP time or the (I+1)th START time
C satisfy the request.
C
C If I = N then we need to consider only the STOP time
C because of the way that the directory is constructed.
C
IF ( I .EQ. N ) THEN
IF ( ( SCLKDP - TOL ) .LE. STOPI ) THEN
FOUND = .TRUE.
START = BUFFER(I)
CLKOUT = STOPI
INDEX = I
ELSE
CALL CHKOUT ( 'CKR02' )
RETURN
END IF
ELSE
C
C Find which time SCLKDP is closest to and then see if
C it is within the tolerance.
C
DIFF1 = SCLKDP - STOPI
DIFF2 = BUFFER(I+1) - SCLKDP
IF ( MIN ( DIFF1, DIFF2 ) .LE. TOL ) THEN
FOUND = .TRUE.
C
C Notice that if the request time is equidistant from
C the STOP and START time the START time will be chosen.
C
IF ( DIFF2 .LE. DIFF1 ) THEN
START = BUFFER(I+1)
CLKOUT = BUFFER(I+1)
INDEX = I+1
ELSE
START = BUFFER(I)
CLKOUT = STOPI
INDEX = I
END IF
ELSE
CALL CHKOUT ( 'CKR02' )
RETURN
END IF
END IF
END IF
END IF
C
C
C Now we know the exact record that we want and can begin
C constructing the output record.
C
C RECORD( 1 ) holds the interval start time.
C RECORD( 2 ) holds the time for which pointing was found (CLKOUT).
C
RECORD( 1 ) = START
RECORD( 2 ) = CLKOUT
C
C We need the pointing record out of GROUP indexed by INDEX.
C This group of size DIRSIZ is SKIP records into the beginning
C of the segment. And each record is PSIZ big.
C
N = BEG + PSIZ * ( SKIP + INDEX - 1 )
CALL DAFGDA ( HANDLE, N, N + PSIZ - 1, PREC )
RECORD ( 3 ) = PREC ( PSIZ )
CALL VEQUG ( PREC, 7, RECORD( 4 ) )
C
C That is all.
C
CALL CHKOUT ( 'CKR02' )
RETURN
END
|
{-# OPTIONS --safe #-}
module Cubical.Algebra.CommRingSolver.EvalHom where
open import Cubical.Foundations.Prelude
open import Cubical.Data.Nat using (ℕ)
open import Cubical.Data.Int.Base hiding (_+_ ; _·_ ; -_)
open import Cubical.Data.FinData
open import Cubical.Data.Vec
open import Cubical.Data.Bool
open import Cubical.Relation.Nullary.Base
open import Cubical.Algebra.CommRingSolver.Utility
open import Cubical.Algebra.CommRingSolver.RawAlgebra
open import Cubical.Algebra.CommRingSolver.IntAsRawRing
open import Cubical.Algebra.CommRingSolver.HornerForms
open import Cubical.Algebra.CommRingSolver.HornerEval
open import Cubical.Algebra.CommRing
open import Cubical.Algebra.Ring
private
variable
ℓ : Level
module HomomorphismProperties (R : CommRing ℓ) where
private
νR = CommRing→RawℤAlgebra R
open CommRingStr (snd R)
open RingTheory (CommRing→Ring R)
open IteratedHornerOperations νR
EvalHom+0 : {n : ℕ} (P : IteratedHornerForms νR n) (xs : Vec ⟨ νR ⟩ n)
→ eval (0ₕ +ₕ P) xs ≡ eval P xs
EvalHom+0 {n = ℕ.zero} (const x) [] = cong (scalar R) (+Ridℤ x)
EvalHom+0 {n = ℕ.suc _} P xs = refl
Eval0H : {n : ℕ} (xs : Vec ⟨ νR ⟩ n)
→ eval {A = νR} 0ₕ xs ≡ 0r
Eval0H [] = refl
Eval0H (x ∷ xs) = refl
Eval1ₕ : {n : ℕ} (xs : Vec ⟨ νR ⟩ n)
→ eval {A = νR} 1ₕ xs ≡ 1r
Eval1ₕ [] = refl
Eval1ₕ (x ∷ xs) =
eval 1ₕ (x ∷ xs) ≡⟨ refl ⟩
eval (0H ·X+ 1ₕ) (x ∷ xs) ≡⟨ combineCasesEval R 0H 1ₕ x xs ⟩
eval {A = νR} 0H (x ∷ xs) · x + eval 1ₕ xs ≡⟨ cong (λ u → u · x + eval 1ₕ xs)
(Eval0H (x ∷ xs)) ⟩
0r · x + eval 1ₕ xs ≡⟨ cong (λ u → 0r · x + u)
(Eval1ₕ xs) ⟩
0r · x + 1r ≡⟨ cong (λ u → u + 1r)
(0LeftAnnihilates _) ⟩
0r + 1r ≡⟨ +IdL _ ⟩
1r ∎
-EvalDist :
{n : ℕ} (P : IteratedHornerForms νR n) (xs : Vec ⟨ νR ⟩ n)
→ eval (-ₕ P) xs ≡ - eval P xs
-EvalDist (const x) [] = -DistScalar R x
-EvalDist 0H xs =
eval (-ₕ 0H) xs ≡⟨ Eval0H xs ⟩
0r ≡⟨ sym 0Selfinverse ⟩
- 0r ≡⟨ cong -_ (sym (Eval0H xs)) ⟩
- eval 0H xs ∎
-EvalDist (P ·X+ Q) (x ∷ xs) =
eval (-ₕ (P ·X+ Q)) (x ∷ xs)
≡⟨ refl ⟩
eval ((-ₕ P) ·X+ (-ₕ Q)) (x ∷ xs)
≡⟨ combineCasesEval R (-ₕ P) (-ₕ Q) x xs ⟩
(eval (-ₕ P) (x ∷ xs)) · x + eval (-ₕ Q) xs
≡⟨ cong (λ u → u · x + eval (-ₕ Q) xs) (-EvalDist P _) ⟩
(- eval P (x ∷ xs)) · x + eval (-ₕ Q) xs
≡⟨ cong (λ u → (- eval P (x ∷ xs)) · x + u) (-EvalDist Q _) ⟩
(- eval P (x ∷ xs)) · x + - eval Q xs
≡[ i ]⟨ -DistL· (eval P (x ∷ xs)) x i + - eval Q xs ⟩
- ((eval P (x ∷ xs)) · x) + (- eval Q xs)
≡⟨ -Dist _ _ ⟩
- ((eval P (x ∷ xs)) · x + eval Q xs)
≡[ i ]⟨ - combineCasesEval R P Q x xs (~ i) ⟩
- eval (P ·X+ Q) (x ∷ xs) ∎
combineCases+ : {n : ℕ} (P Q : IteratedHornerForms νR (ℕ.suc n))
(r s : IteratedHornerForms νR n)
(x : fst R) (xs : Vec (fst R) n)
→ eval ((P ·X+ r) +ₕ (Q ·X+ s)) (x ∷ xs)
≡ eval ((P +ₕ Q) ·X+ (r +ₕ s)) (x ∷ xs)
combineCases+ {n = n} P Q r s x xs with (isZero νR (P +ₕ Q) and isZero νR (r +ₕ s)) ≟ true
... | yes p = compute+ₕEvalBothZero R n P Q r s x xs p
... | no p = compute+ₕEvalNotBothZero R n P Q r s x xs (¬true→false _ p)
+Homeval :
{n : ℕ} (P Q : IteratedHornerForms νR n) (xs : Vec ⟨ νR ⟩ n)
→ eval (P +ₕ Q) xs ≡ (eval P xs) + (eval Q xs)
+Homeval (const x) (const y) [] = +HomScalar R x y
+Homeval 0H Q xs =
eval (0H +ₕ Q) xs ≡⟨ refl ⟩
eval Q xs ≡⟨ sym (+IdL _) ⟩
0r + eval Q xs ≡⟨ cong (λ u → u + eval Q xs) (sym (Eval0H xs)) ⟩
eval 0H xs + eval Q xs ∎
+Homeval (P ·X+ Q) 0H xs =
eval ((P ·X+ Q) +ₕ 0H) xs ≡⟨ refl ⟩
eval (P ·X+ Q) xs ≡⟨ sym (+IdR _) ⟩
eval (P ·X+ Q) xs + 0r
≡⟨ cong (λ u → eval (P ·X+ Q) xs + u) (sym (Eval0H xs)) ⟩
eval (P ·X+ Q) xs + eval 0H xs ∎
+Homeval (P ·X+ Q) (S ·X+ T) (x ∷ xs) =
eval ((P ·X+ Q) +ₕ (S ·X+ T)) (x ∷ xs)
≡⟨ combineCases+ P S Q T x xs ⟩
eval ((P +ₕ S) ·X+ (Q +ₕ T)) (x ∷ xs)
≡⟨ combineCasesEval R (P +ₕ S) (Q +ₕ T) x xs ⟩
(eval (P +ₕ S) (x ∷ xs)) · x + eval (Q +ₕ T) xs
≡⟨ cong (λ u → (eval (P +ₕ S) (x ∷ xs)) · x + u) (+Homeval Q T xs) ⟩
(eval (P +ₕ S) (x ∷ xs)) · x + (eval Q xs + eval T xs)
≡⟨ cong (λ u → u · x + (eval Q xs + eval T xs)) (+Homeval P S (x ∷ xs)) ⟩
(eval P (x ∷ xs) + eval S (x ∷ xs)) · x
+ (eval Q xs + eval T xs)
≡⟨ cong (λ u → u + (eval Q xs + eval T xs)) (·DistL+ _ _ _) ⟩
(eval P (x ∷ xs)) · x + (eval S (x ∷ xs)) · x
+ (eval Q xs + eval T xs)
≡⟨ +ShufflePairs _ _ _ _ ⟩
((eval P (x ∷ xs)) · x + eval Q xs)
+ ((eval S (x ∷ xs)) · x + eval T xs)
≡[ i ]⟨ combineCasesEval R P Q x xs (~ i) + combineCasesEval R S T x xs (~ i) ⟩
eval (P ·X+ Q) (x ∷ xs)
+ eval (S ·X+ T) (x ∷ xs) ∎
⋆Homeval : {n : ℕ}
(r : IteratedHornerForms νR n)
(P : IteratedHornerForms νR (ℕ.suc n)) (x : ⟨ νR ⟩) (xs : Vec ⟨ νR ⟩ n)
→ eval (r ⋆ P) (x ∷ xs) ≡ eval r xs · eval P (x ∷ xs)
⋆0LeftAnnihilates :
{n : ℕ} (P : IteratedHornerForms νR (ℕ.suc n)) (xs : Vec ⟨ νR ⟩ (ℕ.suc n))
→ eval (0ₕ ⋆ P) xs ≡ 0r
⋆0LeftAnnihilates 0H xs = Eval0H xs
⋆0LeftAnnihilates {n = ℕ.zero} (P ·X+ Q) (x ∷ xs) = refl
⋆0LeftAnnihilates {n = ℕ.suc _} (P ·X+ Q) (x ∷ xs) = refl
⋆isZeroLeftAnnihilates :
{n : ℕ} (r : IteratedHornerForms νR n)
(P : IteratedHornerForms νR (ℕ.suc n))
(xs : Vec ⟨ νR ⟩ (ℕ.suc n))
→ isZero νR r ≡ true
→ eval (r ⋆ P) xs ≡ 0r
⋆isZeroLeftAnnihilates r P xs isZero-r = evalIsZero R (r ⋆ P) xs (isZeroPresLeft⋆ r P isZero-r)
·0LeftAnnihilates :
{n : ℕ} (P : IteratedHornerForms νR n) (xs : Vec ⟨ νR ⟩ n)
→ eval (0ₕ ·ₕ P) xs ≡ 0r
·0LeftAnnihilates (const x) xs =
eval (const _) xs ≡⟨ Eval0H xs ⟩ 0r ∎
·0LeftAnnihilates 0H xs = Eval0H xs
·0LeftAnnihilates (P ·X+ P₁) xs = Eval0H xs
·isZeroLeftAnnihilates :
{n : ℕ} (P Q : IteratedHornerForms νR n)
(xs : Vec (fst R) n)
→ isZero νR P ≡ true
→ eval (P ·ₕ Q) xs ≡ 0r
·isZeroLeftAnnihilates P Q xs isZeroP = evalIsZero R (P ·ₕ Q) xs (isZeroPresLeft·ₕ P Q isZeroP)
·Homeval : {n : ℕ} (P Q : IteratedHornerForms νR n) (xs : Vec ⟨ νR ⟩ n)
→ eval (P ·ₕ Q) xs ≡ (eval P xs) · (eval Q xs)
combineCases⋆ : {n : ℕ} (x : fst R) (xs : Vec (fst R) n)
→ (r : IteratedHornerForms νR n)
→ (P : IteratedHornerForms νR (ℕ.suc n))
→ (Q : IteratedHornerForms νR n)
→ eval (r ⋆ (P ·X+ Q)) (x ∷ xs) ≡ eval ((r ⋆ P) ·X+ (r ·ₕ Q)) (x ∷ xs)
combineCases⋆ x xs r P Q with isZero νR r ≟ true
... | yes p =
eval (r ⋆ (P ·X+ Q)) (x ∷ xs) ≡⟨ ⋆isZeroLeftAnnihilates r (P ·X+ Q) (x ∷ xs) p ⟩
0r ≡⟨ someCalculation R ⟩
0r · x + 0r ≡⟨ step1 ⟩
eval (r ⋆ P) (x ∷ xs) · x + eval (r ·ₕ Q) xs ≡⟨ sym (combineCasesEval R (r ⋆ P) (r ·ₕ Q) x xs) ⟩
eval ((r ⋆ P) ·X+ (r ·ₕ Q)) (x ∷ xs) ∎
where
step1 : 0r · x + 0r ≡ eval (r ⋆ P) (x ∷ xs) · x + eval (r ·ₕ Q) xs
step1 i = ⋆isZeroLeftAnnihilates r P (x ∷ xs) p (~ i) · x + ·isZeroLeftAnnihilates r Q xs p (~ i)
... | no p with isZero νR r
... | true = byAbsurdity (p refl)
... | false = refl
⋆Homeval r 0H x xs =
eval (r ⋆ 0H) (x ∷ xs) ≡⟨ refl ⟩
0r ≡⟨ sym (0RightAnnihilates _) ⟩
eval r xs · 0r ≡⟨ refl ⟩
eval r xs · eval {A = νR} 0H (x ∷ xs) ∎
⋆Homeval r (P ·X+ Q) x xs =
eval (r ⋆ (P ·X+ Q)) (x ∷ xs) ≡⟨ combineCases⋆ x xs r P Q ⟩
eval ((r ⋆ P) ·X+ (r ·ₕ Q)) (x ∷ xs)
≡⟨ combineCasesEval R (r ⋆ P) (r ·ₕ Q) x xs ⟩
(eval (r ⋆ P) (x ∷ xs)) · x + eval (r ·ₕ Q) xs
≡⟨ cong (λ u → u · x + eval (r ·ₕ Q) xs) (⋆Homeval r P x xs) ⟩
(eval r xs · eval P (x ∷ xs)) · x + eval (r ·ₕ Q) xs
≡⟨ cong (λ u → (eval r xs · eval P (x ∷ xs)) · x + u) (·Homeval r Q xs) ⟩
(eval r xs · eval P (x ∷ xs)) · x + eval r xs · eval Q xs
≡⟨ cong (λ u → u + eval r xs · eval Q xs) (sym (·Assoc _ _ _)) ⟩
eval r xs · (eval P (x ∷ xs) · x) + eval r xs · eval Q xs
≡⟨ sym (·DistR+ _ _ _) ⟩
eval r xs · ((eval P (x ∷ xs) · x) + eval Q xs)
≡[ i ]⟨ eval r xs · combineCasesEval R P Q x xs (~ i) ⟩
eval r xs · eval (P ·X+ Q) (x ∷ xs) ∎
lemmaForCombineCases· :
{n : ℕ} (Q : IteratedHornerForms νR n) (P S : IteratedHornerForms νR (ℕ.suc n))
(xs : Vec (fst R) (ℕ.suc n))
→ isZero νR (P ·ₕ S) ≡ true
→ eval ((P ·X+ Q) ·ₕ S) xs ≡ eval (Q ⋆ S) xs
lemmaForCombineCases· Q P S xs isZeroProd with isZero νR (P ·ₕ S)
... | true = refl
... | false = byBoolAbsurdity isZeroProd
combineCases· :
{n : ℕ} (Q : IteratedHornerForms νR n) (P S : IteratedHornerForms νR (ℕ.suc n))
(xs : Vec (fst R) (ℕ.suc n))
→ eval ((P ·X+ Q) ·ₕ S) xs ≡ eval (((P ·ₕ S) ·X+ 0ₕ) +ₕ (Q ⋆ S)) xs
combineCases· Q P S (x ∷ xs) with isZero νR (P ·ₕ S) ≟ true
... | yes p =
eval ((P ·X+ Q) ·ₕ S) (x ∷ xs) ≡⟨ lemmaForCombineCases· Q P S (x ∷ xs) p ⟩
eval (Q ⋆ S) (x ∷ xs) ≡⟨ sym (+IdL _) ⟩
0r + eval (Q ⋆ S) (x ∷ xs) ≡⟨ step1 ⟩
eval ((P ·ₕ S) ·X+ 0ₕ) (x ∷ xs) + eval (Q ⋆ S) (x ∷ xs) ≡⟨ step2 ⟩
eval (((P ·ₕ S) ·X+ 0ₕ) +ₕ (Q ⋆ S)) (x ∷ xs) ∎
where
lemma =
eval ((P ·ₕ S) ·X+ 0ₕ) (x ∷ xs) ≡⟨ combineCasesEval R (P ·ₕ S) 0ₕ x xs ⟩
eval (P ·ₕ S) (x ∷ xs) · x + eval 0ₕ xs ≡[ i ]⟨ evalIsZero R (P ·ₕ S) (x ∷ xs) p i · x + Eval0H xs i ⟩
0r · x + 0r ≡⟨ sym (someCalculation R) ⟩
0r ∎
step1 : _ ≡ _
step1 i = lemma (~ i) + eval (Q ⋆ S) (x ∷ xs)
step2 = sym (+Homeval ((P ·ₕ S) ·X+ 0ₕ) (Q ⋆ S) (x ∷ xs))
... | no p with isZero νR (P ·ₕ S)
... | true = byAbsurdity (p refl)
... | false = refl
·Homeval (const x) (const y) [] = ·HomScalar R x y
·Homeval 0H Q xs =
eval (0H ·ₕ Q) xs ≡⟨ Eval0H xs ⟩
0r ≡⟨ sym (0LeftAnnihilates _) ⟩
0r · eval Q xs ≡⟨ cong (λ u → u · eval Q xs) (sym (Eval0H xs)) ⟩
eval 0H xs · eval Q xs ∎
·Homeval (P ·X+ Q) S (x ∷ xs) =
eval ((P ·X+ Q) ·ₕ S) (x ∷ xs)
≡⟨ combineCases· Q P S (x ∷ xs) ⟩
eval (((P ·ₕ S) ·X+ 0ₕ) +ₕ (Q ⋆ S)) (x ∷ xs)
≡⟨ +Homeval ((P ·ₕ S) ·X+ 0ₕ) (Q ⋆ S) (x ∷ xs) ⟩
eval ((P ·ₕ S) ·X+ 0ₕ) (x ∷ xs) + eval (Q ⋆ S) (x ∷ xs)
≡⟨ cong (λ u → u + eval (Q ⋆ S) (x ∷ xs)) (combineCasesEval R (P ·ₕ S) 0ₕ x xs) ⟩
(eval (P ·ₕ S) (x ∷ xs) · x + eval 0ₕ xs) + eval (Q ⋆ S) (x ∷ xs)
≡⟨ cong (λ u → u + eval (Q ⋆ S) (x ∷ xs))
((eval (P ·ₕ S) (x ∷ xs) · x + eval 0ₕ xs)
≡⟨ cong (λ u → eval (P ·ₕ S) (x ∷ xs) · x + u) (Eval0H xs) ⟩
(eval (P ·ₕ S) (x ∷ xs) · x + 0r)
≡⟨ +IdR _ ⟩
(eval (P ·ₕ S) (x ∷ xs) · x)
≡⟨ cong (λ u → u · x) (·Homeval P S (x ∷ xs)) ⟩
((eval P (x ∷ xs) · eval S (x ∷ xs)) · x)
≡⟨ sym (·Assoc _ _ _) ⟩
(eval P (x ∷ xs) · (eval S (x ∷ xs) · x))
≡⟨ cong (λ u → eval P (x ∷ xs) · u) (·Comm _ _) ⟩
(eval P (x ∷ xs) · (x · eval S (x ∷ xs)))
≡⟨ ·Assoc _ _ _ ⟩
(eval P (x ∷ xs) · x) · eval S (x ∷ xs)
∎) ⟩
(eval P (x ∷ xs) · x) · eval S (x ∷ xs) + eval (Q ⋆ S) (x ∷ xs)
≡⟨ cong (λ u → (eval P (x ∷ xs) · x) · eval S (x ∷ xs) + u)
(⋆Homeval Q S x xs) ⟩
(eval P (x ∷ xs) · x) · eval S (x ∷ xs) + eval Q xs · eval S (x ∷ xs)
≡⟨ sym (·DistL+ _ _ _) ⟩
((eval P (x ∷ xs) · x) + eval Q xs) · eval S (x ∷ xs)
≡⟨ cong (λ u → u · eval S (x ∷ xs)) (sym (combineCasesEval R P Q x xs)) ⟩
eval (P ·X+ Q) (x ∷ xs) · eval S (x ∷ xs) ∎
|
/-
Copyright (c) 2021 Andrew Yang. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Andrew Yang
-/
import group_theory.submonoid.pointwise
/-!
# Submonoid of inverses
Given a submonoid `N` of a monoid `M`, we define the submonoid `N.left_inv` as the submonoid of
left inverses of `N`. When `M` is commutative, we may define `from_comm_left_inv : N.left_inv →* N`
since the inverses are unique. When `N ≤ is_unit.submonoid M`, this is precisely
the pointwise inverse of `N`, and we may define `left_inv_equiv : S.left_inv ≃* S`.
For the pointwise inverse of submonoids of groups, please refer to
`group_theory.submonoid.pointwise`.
## TODO
Define the submonoid of right inverses and two-sided inverses.
See the comments of #10679 for a possible implementation.
-/
variables {M : Type*}
namespace submonoid
@[to_additive]
noncomputable instance [monoid M] : group (is_unit.submonoid M) :=
{ inv := λ x, ⟨_, (x.prop.unit⁻¹).is_unit⟩,
mul_left_inv := λ x, subtype.eq x.prop.unit.inv_val,
..(show monoid (is_unit.submonoid M), by apply_instance) }
@[to_additive]
noncomputable instance [comm_monoid M] : comm_group (is_unit.submonoid M) :=
{ mul_comm := λ a b, mul_comm a b,
..(show group (is_unit.submonoid M), by apply_instance) }
@[to_additive] lemma is_unit.submonoid.coe_inv [monoid M] (x : is_unit.submonoid M) :
↑(x⁻¹) = (↑x.prop.unit⁻¹ : M) := rfl
section monoid
variables [monoid M] (S : submonoid M)
/-- `S.left_inv` is the submonoid containing all the left inverses of `S`. -/
@[to_additive "`S.left_neg` is the additive submonoid containing all the left additive inverses
of `S`."]
def left_inv : submonoid M :=
{ carrier := { x : M | ∃ y : S, x * y = 1 },
one_mem' := ⟨1, mul_one 1⟩,
mul_mem' := λ a b ⟨a', ha⟩ ⟨b', hb⟩,
⟨b' * a', by rw [coe_mul, ← mul_assoc, mul_assoc a, hb, mul_one, ha]⟩ }
@[to_additive]
@[to_additive]
lemma unit_mem_left_inv (x : Mˣ) (hx : (x : M) ∈ S) : ((x⁻¹ : _) : M) ∈ S.left_inv :=
⟨⟨x, hx⟩, x.inv_val⟩
@[to_additive]
lemma left_inv_left_inv_eq (hS : S ≤ is_unit.submonoid M) : S.left_inv.left_inv = S :=
begin
refine le_antisymm S.left_inv_left_inv_le _,
intros x hx,
have : x = ((hS hx).unit⁻¹⁻¹ : Mˣ) := by { rw [inv_inv (hS hx).unit], refl },
rw this,
exact S.left_inv.unit_mem_left_inv _ (S.unit_mem_left_inv _ hx)
end
/-- The function from `S.left_inv` to `S` sending an element to its right inverse in `S`.
This is a `monoid_hom` when `M` is commutative. -/
@[to_additive "The function from `S.left_add` to `S` sending an element to its right additive
inverse in `S`. This is an `add_monoid_hom` when `M` is commutative."]
noncomputable
def from_left_inv : S.left_inv → S := λ x, x.prop.some
@[simp, to_additive]
lemma mul_from_left_inv (x : S.left_inv) : (x : M) * S.from_left_inv x = 1 :=
x.prop.some_spec
@[simp, to_additive] lemma from_left_inv_one : S.from_left_inv 1 = 1 :=
(one_mul _).symm.trans (subtype.eq $ S.mul_from_left_inv 1)
end monoid
section comm_monoid
variables [comm_monoid M] (S : submonoid M)
@[simp, to_additive]
lemma from_left_inv_mul (x : S.left_inv) : (S.from_left_inv x : M) * x = 1 :=
by rw [mul_comm, mul_from_left_inv]
@[to_additive]
lemma left_inv_le_is_unit : S.left_inv ≤ is_unit.submonoid M :=
λ x ⟨y, hx⟩, ⟨⟨x, y, hx, mul_comm x y ▸ hx⟩, rfl⟩
@[to_additive]
lemma from_left_inv_eq_iff (a : S.left_inv) (b : M) :
(S.from_left_inv a : M) = b ↔ (a : M) * b = 1 :=
by rw [← is_unit.mul_right_inj (left_inv_le_is_unit _ a.prop), S.mul_from_left_inv, eq_comm]
/-- The `monoid_hom` from `S.left_inv` to `S` sending an element to its right inverse in `S`. -/
@[to_additive "The `add_monoid_hom` from `S.left_neg` to `S` sending an element to its
right additive inverse in `S`.", simps]
noncomputable
def from_comm_left_inv : S.left_inv →* S :=
{ to_fun := S.from_left_inv,
map_one' := S.from_left_inv_one,
map_mul' := λ x y, subtype.ext $
by rw [from_left_inv_eq_iff, mul_comm x, submonoid.coe_mul, submonoid.coe_mul, mul_assoc,
← mul_assoc (x : M), mul_from_left_inv, one_mul, mul_from_left_inv] }
variable (hS : S ≤ is_unit.submonoid M)
include hS
/-- The submonoid of pointwise inverse of `S` is `mul_equiv` to `S`. -/
@[to_additive "The additive submonoid of pointwise additive inverse of `S` is
`add_equiv` to `S`.", simps apply]
noncomputable
def left_inv_equiv : S.left_inv ≃* S :=
{ inv_fun := λ x, by { choose x' hx using (hS x.prop), exact ⟨x'.inv, x, hx ▸ x'.inv_val⟩ },
left_inv := λ x, subtype.eq $ begin
dsimp, generalize_proofs h, rw ← h.some.mul_left_inj,
exact h.some.inv_val.trans ((S.mul_from_left_inv x).symm.trans (by rw h.some_spec)),
end,
right_inv := λ x, by { dsimp, ext, rw [from_left_inv_eq_iff], convert (hS x.prop).some.inv_val,
exact (hS x.prop).some_spec.symm },
..S.from_comm_left_inv }
@[simp, to_additive] lemma from_left_inv_left_inv_equiv_symm (x : S) :
S.from_left_inv ((S.left_inv_equiv hS).symm x) = x := (S.left_inv_equiv hS).right_inv x
@[simp, to_additive] lemma left_inv_equiv_symm_from_left_inv (x : S.left_inv) :
(S.left_inv_equiv hS).symm (S.from_left_inv x) = x := (S.left_inv_equiv hS).left_inv x
@[to_additive]
lemma left_inv_equiv_mul (x : S.left_inv) : (S.left_inv_equiv hS x : M) * x = 1 := by simp
@[to_additive]
lemma mul_left_inv_equiv (x : S.left_inv) : (x : M) * S.left_inv_equiv hS x = 1 := by simp
@[simp, to_additive] lemma left_inv_equiv_symm_mul (x : S) :
((S.left_inv_equiv hS).symm x : M) * x = 1 :=
by { convert S.mul_left_inv_equiv hS ((S.left_inv_equiv hS).symm x), simp }
@[simp, to_additive] lemma mul_left_inv_equiv_symm (x : S) :
(x : M) * (S.left_inv_equiv hS).symm x = 1 :=
by { convert S.left_inv_equiv_mul hS ((S.left_inv_equiv hS).symm x), simp }
end comm_monoid
section group
variables [group M] (S : submonoid M)
open_locale pointwise
@[to_additive] lemma left_inv_eq_inv : S.left_inv = S⁻¹ :=
submonoid.ext $ λ x,
⟨λ h, submonoid.mem_inv.mpr ((inv_eq_of_mul_eq_one h.some_spec).symm ▸ h.some.prop),
λ h, ⟨⟨_, h⟩, mul_right_inv _⟩⟩
@[simp, to_additive] lemma from_left_inv_eq_inv (x : S.left_inv) :
(S.from_left_inv x : M) = x⁻¹ :=
by rw [← mul_right_inj (x : M), mul_right_inv, mul_from_left_inv]
end group
section comm_group
variables [comm_group M] (S : submonoid M) (hS : S ≤ is_unit.submonoid M)
@[simp, to_additive] lemma left_inv_equiv_symm_eq_inv (x : S) :
((S.left_inv_equiv hS).symm x : M) = x⁻¹ :=
by rw [← mul_right_inj (x : M), mul_right_inv, mul_left_inv_equiv_symm]
end comm_group
end submonoid
|
args=commandArgs(T)
cutOff<-as.numeric(args[1])
setwd("/home/gaoy1/data/cuizhe/project/2020-06-30-10xPBMC_nextgem_labeled/bin")
library(Matrix)
library(parallel)
Data<-readRDS("../input/10xpbmc5k-snap-full.rds")
labels<-read.table("../input/pbmc_5k_atac_label_nextgem.txt",header = TRUE,sep = "\t",check.names = FALSE)
Data<-as(Data, "dgCMatrix")
labels<-labels[labels$barcode %in% rownames(Data),]
func<-function(label){
barcode<-labels[labels$label == label,]$barcode
cell_data<-Data[rownames(Data) %in% barcode,]
nonZeroColumnList_cell<-diff(cell_data@p)/nrow(cell_data)
candidateRange<-which(nonZeroColumnList_cell>=cutOff)
for(i in candidateRange){
cell_data[,i] = 1
}
return(cell_data)
}
cl.cores<-16
cl <- makeCluster(cl.cores,type = "FORK")
results <- parLapply(cl, unique(labels$label), func)
enhanced_Data<-do.call('rbind',results)
stopCluster(cl)
saveRDS(enhanced_Data,file = paste0('../output/10xpbmc5k-snap-full_enh',cutOff,'.rds'))
print(paste0(cutOff,' is finished'))
|
# https://fenicsproject.org/docs/dolfin/latest/python/demos/biharmonic/demo_biharmonic.py.html
module demo_biharmonic
using FenicsPy
# Optimization options for the form compiler
parameters["form_compiler"]["cpp_optimize"] = true
parameters["form_compiler"]["optimize"] = true
# Make mesh ghosted for evaluation of DG terms
parameters["ghost_mode"] = "shared_facet"
# Create mesh and define function space
mesh = UnitSquareMesh(32, 32)
V = FunctionSpace(mesh, "CG", 2)
# Define Dirichlet boundary
@pydef mutable struct DirichletBoundary <: dolfin.SubDomain
function inside(self, x, on_boundary)
return on_boundary
end
end
py"""
from math import sin, pi
from dolfin import UserExpression
class Source(UserExpression):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def eval(self, values, x):
values[0] = 4.0*pi**4*sin(pi*x[0])*sin(pi*x[1])
def value_shape(self):
return ()
"""
# Define boundary condition
u0 = Constant(0.0)
bc = DirichletBC(V, u0, SubDomain(DirichletBoundary()))
# Define trial and test functions
u = TrialFunction(V)
v = TestFunction(V)
# Define normal component, mesh size and right-hand side
h = CellDiameter(mesh)
h_avg = (h("+") + h("-"))/2.0
n = FacetNormal(mesh)
f = Expression(py"Source"(degree=2))
# Penalty parameter
alpha = Constant(8.0)
# Define bilinear form
a = inner(div(grad(u)), div(grad(v)))*dx -
inner(avg(div(grad(u))), jump(grad(v), n))*dS -
inner(jump(grad(u), n), avg(div(grad(v))))*dS +
alpha/h_avg*inner(jump(grad(u),n), jump(grad(v),n))*dS
# Define linear form
L = f*v*dx
# Solve variational problem
u = FeFunction(V)
solve(a == L, u, bc)
# Save solution to file
file = File("biharmonic/biharmonic.pvd")
file << u
# Plot solution
#plot(u)
end # module demo_biharmonic
|
mutable struct MiddlewareStore{T} <: AbstractStore{T}
store::Store{T}
middlewares::Vector
end
MiddlewareStore(store::Store) = MiddlewareStore(store, [])
function identity_middleware(dispatch)
new_dispatch = (s::Store, a::AbstractAction) -> begin
# prologue
result = dispatch(s, a)
# epilogue
return result
end
return new_dispatch
end
function dispatch!(store::MiddlewareStore, action::AbstractAction)
isempty(store.middlewares) && dispatch!(store.store, action)
dispatch = identity_middleware(dispatch!)
for middleware in store.middlewares
dispatch = middleware(dispatch)
end
return dispatch(store.store, action)
end
get_state(store::MiddlewareStore) = get_state(store.store)
subscribe!(store::MiddlewareStore, listener::Base.Callable) = subscribe!(store.store, listener)
function create_store(reducer::Base.Callable, preloaded_state, middlewares)
store = create_store(reducer, preloaded_state)
return MiddlewareStore(store, middlewares)
end
|
[STATEMENT]
lemma value_written_change_prefix:
assumes eq: "ltake n E = ltake n E'"
and an: "enat a < n"
shows "value_written P E a = value_written P E' a"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. value_written P E a = value_written P E' a
[PROOF STEP]
using action_obs_change_prefix_eq[OF eq an]
[PROOF STATE]
proof (prove)
using this:
action_obs E a = action_obs E' a
goal (1 subgoal):
1. value_written P E a = value_written P E' a
[PROOF STEP]
by(simp add: value_written_def fun_eq_iff) |
/-= Chapter 9: Structures and records =-/
/- Declaring structures -/
namespace declaring_structures
structure Point (α : Type u) where
mk :: (x : α) (y : α)
#check Point
#check @Point.rec
#check @Point.mk
#check @Point.x
#check @Point.y
namespace alternate_form
structure Point (α : Type u) where
rect ::
x : α
y : α
example : Point Nat := Point.rect 3 4
end alternate_form
namespace default_constructor
structure Point (α : Type u) where
x : α
y : α
example : Point Nat := Point.mk 3 4
end default_constructor
#eval Point.x (Point.mk 10 20)
#eval Point.y (Point.mk 10 20)
open Point
example (a b : α) : x (mk a b) = a := rfl
example (a b : α) : y (mk a b) = b := rfl
def p := Point.mk 10 20
#check p.x
#eval p.x
#eval p.y
namespace dot_notation
structure Point (α : Type u) where
rect ::
x : α
y : α
deriving Repr
def Point.add (p q : Point Nat) :=
Point.rect (p.x + q.x) (p.y + q.y)
def p : Point Nat := Point.rect 1 2
def q : Point Nat := Point.rect 3 4
#eval p.add q
def Point.smul (n : Nat) (p : Point Nat) :=
Point.rect (n * p.x) (n * p.y)
#eval p.smul 3
#check @List.map
def xs : List Nat := [1, 2, 3]
def f : Nat → Nat := λ x => x * x
#eval xs.map f
end dot_notation
end declaring_structures
/- Objects -/
namespace objects
namespace basic_point
structure Point (α : Type u) where
x : α
y : α
#check { x := 10, y := 20 : Point Nat }
#check { y := 20, x := 10 : Point _ }
#check ({ x := 10, y := 20 } : Point Nat )
example : Point Nat := { y := 20, x := 10 }
end basic_point
namespace field_inference
structure MyStruct where
{α : Type u}
{β : Type v}
a : α
b : β
#check { a := 10, b := true : MyStruct }
end field_inference
namespace record_update
structure Point (α : Type u) where
x : α
y : α
deriving Repr
def p : Point Nat := { x := 1, y := 2 }
#eval { p with y := 3 }
#eval { p with x := 4 }
structure Point3 (α : Type u) where
x : α
y : α
z : α
def q : Point3 Nat := { x := 5, y := 5, z := 5 }
def r : Point3 Nat := { p, q with x := 6 }
example : r.x = 6 := rfl
example : r.y = 2 := rfl
example : r.z = 5 := rfl
end record_update
end objects
/- Inheritance -/
namespace inheritance
namespace simple
structure Point (α : Type u) where
x : α
y : α
inductive Color where
| red | green | blue
structure ColorPoint (α : Type u) extends Point α where
c : Color
end simple
namespace multiple
structure Point (α : Type u) where
x : α
y : α
z : α
structure RgbValue where
red : Nat
green : Nat
blue : Nat
structure RedGreenPoint (α : Type u) extends Point α, RgbValue where
no_blue : blue = 0
def p : Point Nat := { x := 10, y := 10, z := 20 }
def rgp : RedGreenPoint Nat :=
{ p with red := 200, green := 40, blue := 0, no_blue := rfl }
example : rgp.x = 10 := rfl
example : rgp.red = 200 := rfl
end multiple
end inheritance
|
#' Build a Link-path Incidence Matrix
#'
#' This function builds a link-path incidence matrix, in which feasible routes are determined using stochastic network loading based on user-specified link costs. The routes are based on a probit model, implemented via simulation.
#' @param Links For a network with N nodes, Links is an NxN matrix. The i,j entry of Links is the link number (ID) for the node connecting node i to node j. Entries should be set to NA if there is no link between the node pair in question.
#' @param Costs A 2-column matrix, the first column of which is the link number (ID) and the second the link travel cost. Costs should be strictly positive.
#' @param theta Standard deviation of link costs when apply probit-based stochastic network loading to identify routes. Can be vector of length equal to the number of links, or a scalar (which is then replicated if necessary). Default value is theta=0, so that only shortest paths between OD pairs are generated.
#' @param nsim Number of simulation runs to find routes. In each run, link costs are modified by adding normal error, and the shortest paths computed with respect to these modified costs. nsim defaults to 100.
#' @param orig Set of nodes specified as possible origins of travel. Defaults to all nodes for which there is at least one feasible route to another node.
#' @param dest Set of nodes specified as possible destinations of travel. Defaults to all nodes that are reachable from elsewere by a feasible path.
#' @return A list containing the link-path incidence matrix A, a vector O specifying the origin for each path, and a vector D specifying the destination for each path.
#' @keywords link-path incidence matrix
#' @export
#' @examples
#' Links <- matrix(0,nrow=9, ncol=9)
#' Links[1,7] <- 1
#' Links[2,7] <- 2
#' Links[1,5] <- 3
#' Links[7,8] <- 4
#' Links[2,6] <- 5
#' Links[5,8] <- 6
#' Links[8,5] <- 7
#' Links[8,6] <- 8
#' Links[6,8] <- 9
#' Links[5,3] <- 10
#' Links[8,9] <- 11
#' Links[6,4] <- 12
#' Links[9,3] <- 13
#' Links[9,4] <- 14
#' Costs <- cbind(1:14,rep(1,14))
#' buildA(Links,Costs)
#' buildA(links,Costs,theta=0.4,orig=c(1,2),dest=c(3,4))
buildA <- function(Links,Costs,theta=0,nsim=100,orig=NA,dest=NA){
require(e1071)
r <- nrow(Links)
n <- nrow(Costs)
A <- D <- O <- numeric(0)
X <- Links*NA
theta <- rep(theta,length.out=n)
if (is.na(orig[1])) orig <- 1:r
if (is.na(dest[1])) dest <- 1:r
if (theta[1] <= 0) nsim <- 1
for (i in 1:nsim){
for ( i in Costs[,1]){
X[Links==Costs[i,1]] <- Costs[i,2] + rnorm(1,sd=theta[i])
}
X[X<0] <- 1e-6
SP <- allShortestPaths(X)
for (o in orig){
for (d in dest){
if (o!=d){
if(!is.na(SP$length[o,d])){
p <- extractPath(SP,o,d)
a <- rep(0,n)
for (j in 1:(length(p)-1) ){
link <- Links[p[j],p[j+1]]
a[link] <- 1
}
A <- cbind(A,a)
O <- c(O,o)
D <- c(D,d)
}
}
}
}
}
indx <- duplicated(A,MARGIN=2)
A <- A[,!indx]
O <- O[!indx]
D <- D[!indx]
indx <- order(O,D)
A <- A[,indx]
O <- O[indx]
D <- D[indx]
list(A=unname(A),O=O,D=D)
} |
function xList=RKAdaptiveAtTimes(xInit,theTimes,f,initStepSize,order,solutionChoice,RelTol,AbsTol,maxSteps)
%%RKADAPTIVEATTIMES Perform multiple steps of Runge-Kutta propagation
% using an adaptive step size. Runge-Kutta methods are
% derivative-free techniques for solving ordinary
% differential equations. That is, integrating
% dx/dt=f(x,t) given initial conditions
% (xStart,tSpan(1)). More information on available
% algorithms for the steps is given in the comments to
% the function RungeKStep.
%
%INPUTS: xInit The initial value of the state (scalar or vector) over which
% integration is being performed.
% theTimes The times at which state estimates are desired. theTimes(1)
% is the time of xInit.
% f f(xVal,curT) returns the derivative of xVal taken at time
% curT.
% initStepSize An optional initial step size (in t) to use for the
% integration. If omitted or an empty matrix is passed, an
% ad-hoc method is used to find an initial step size.
% order The order of the Runge-Kutta method. If this parameter is
% omitted, then the default order of 4 is used. Order can
% range from 1 to 7.
% solutionChoice When multiple formulae are implemented, this selects
% which one to use. Otherwise, this parameter is not used.
% RelTol The maximum relative error tolerance allowed, a positive
% scalar. If omitted or an empty matrix is passed, the default
% value of 1e-3 is used.
% AbsTol The absolute error tolerance allowed, a positive scalar, of
% the same for all components of x, or a positive NX1 vector.
% If omitted or an empty matrix is passed, the default value
% of 1e-6 is used.
% maxSteps The maximum allowable number of steps to perform the
% integration. If omitted, the default of 1024 is used.
%
%OUTPUTS: xList The state at the times given in times xList(:,1) is the
% same as xInit.
%
%A detailed description of the adaptive step size algorithm can be found in
%the comments of RKAdaptiveOverRange.
%
%May 2015 David Karnick, Naval Research Laboratory, Washington D.C.
%(UNCLASSIFIED) DISTRIBUTION STATEMENT A. Approved for public release.
if(nargin<9||isempty(maxSteps))
maxSteps=1024;
end
if(nargin<8||isempty(AbsTol))
AbsTol=1e-6;
end
if(nargin<7||isempty(RelTol))
RelTol=1e-3;
end
if(nargin<6||isempty(solutionChoice))
solutionChoice=0;
end
if(nargin<5||isempty(order))
order=5;
end
if(nargin<4||isempty(initStepSize))
initStepSize=[];
end
xDim=length(xInit);
numTimes=length(theTimes);
xList=zeros(xDim,numTimes);
xList(:,1)=xInit;
for curTime=2:numTimes
tSpan=theTimes(curTime-1:curTime);
[xVals,~,~,~,~,initStepSize]=RKAdaptiveOverRange(xList(:,curTime-1),tSpan,f,initStepSize,0,order,solutionChoice,RelTol,AbsTol,maxSteps);
xList(:,curTime)=xVals(:,end);
end
end
%LICENSE:
%
%The source code is in the public domain and not licensed or under
%copyright. The information and software may be used freely by the public.
%As required by 17 U.S.C. 403, third parties producing copyrighted works
%consisting predominantly of the material produced by U.S. government
%agencies must provide notice with such work(s) identifying the U.S.
%Government material incorporated and stating that such material is not
%subject to copyright protection.
%
%Derived works shall not identify themselves in a manner that implies an
%endorsement by or an affiliation with the Naval Research Laboratory.
%
%RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF THE
%SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY THE NAVAL
%RESEARCH LABORATORY FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE ACTIONS
%OF RECIPIENT IN THE USE OF THE SOFTWARE.
|
#pragma once
#include <Eigen/Dense>
namespace pd { class TabbedWindow; }
namespace pd::ecs::cmp::tabbedWindow
{
// ---------------------------------------------------------------------------------------------------------
class Component
{
public:
Eigen::Vector2i position;
Eigen::Vector2i size;
QUuid layerUuid;
};
// ---------------------------------------------------------------------------------------------------------
class RequestWidgetUpdate
{
public:
};
// ---------------------------------------------------------------------------------------------------------
class Focused
{
public:
};
} |
!*****************************************************************************
!
! Rokko: Integrated Interface for libraries of eigenvalue decomposition
!
! Copyright (C) 2012-2019 by Rokko Developers https://github.com/t-sakashita/rokko
!
! Distributed under the Boost Software License, Version 1.0. (See accompanying
! file LICENSE_1_0.txt or copy at http://www.boost.org/license_1_0.txt)
!
!*****************************************************************************
program generate_eigen_vector
use rokko
implicit none
integer :: dim
type(rokko_eigen_vector) :: vec
integer :: i
dim = 10
print *,"dimension = ", dim
call rokko_construct(vec, dim)
do i = 1, dim
call rokko_set_elem(vec, i, dble(i))
enddo
call rokko_print(vec)
call rokko_destruct(vec)
end program generate_eigen_vector
|
module Debug.Trace
import Prelude
import PrimIO
%default total
export
trace : (msg : String) -> (result : a) -> a
trace x val = unsafePerformIO (do putStrLn x; pure val)
%foreign "jvm:toString(java/lang/Object java/lang/String),java/util/Objects"
prim_objectToString : AnyPtr -> PrimIO String
export
toString : a -> String
toString value = unsafePerformIO $ primIO (prim_objectToString (believe_me value))
|
{-
Agda Implementors' Meeting VI
Göteborg
May 24 - 30, 2007
Hello Agda!
Ulf Norell
-}
-- This is where the fun begins.
-- Unleashing datatypes, pattern matching and recursion.
module Datatypes where
{-
Simple datatypes.
-}
-- Now which datatype should we start with...?
data Nat : Set where
zero : Nat
suc : Nat -> Nat
-- Let's start simple.
pred : Nat -> Nat
pred zero = zero
pred (suc n) = n
-- Now let's do recursion.
_+_ : Nat -> Nat -> Nat
zero + m = m
suc n + m = suc (n + m)
-- An aside on infix operators:
-- Any name containing _ can be used as a mixfix operator.
-- The arguments simply go in place of the _. For instance:
data Bool : Set where
true : Bool
false : Bool
if_then_else_ : {A : Set} -> Bool -> A -> A -> A
if true then x else y = x
if false then x else y = y
-- To declare the associativity and precedence of an operator
-- we write. In this case we need parenthesis around the else branch
-- if its precedence is lower than 10. For the condition and the then
-- branch we only need parenthesis for things like λs.
infix 10 if_then_else_
{-
Parameterised datatypes
-}
data List (A : Set) : Set where
[] : List A
_::_ : A -> List A -> List A
infixr 50 _::_
-- The parameters are implicit arguments to the constructors.
nil : (A : Set) -> List A
nil A = [] {A}
map : {A B : Set} -> (A -> B) -> List A -> List B
map f [] = []
map f (x :: xs) = f x :: map f xs
{-
Empty datatypes
-}
-- A very useful guy is the empty datatype.
data False : Set where
-- When pattern matching on an element of an empty type, something
-- interesting happens:
elim-False : {A : Set} -> False -> A
elim-False () -- Look Ma, no right hand side!
-- The pattern () is called an absurd pattern and matches elements
-- of an empty type.
{-
What's next?
-}
-- Fun as they are, eventually you'll get bored with
-- inductive datatypes.
-- Move on to: Families.agda |
function aa=lpcrf2aa(rf)
%LPCRF2AA Convert reflection coefficients to area function AA=(RF)
%The areas are normalised so that aa(p+2)=1: the effective area of the free air beyond the lips.
% aa(1) is the area of the glottis. This will be zero if rf(:,1)=1.
% Copyright (C) Mike Brookes 1997
% Version: $Id: lpcrf2aa.m,v 1.4 2007/05/04 07:01:39 dmb Exp $
%
% VOICEBOX is a MATLAB toolbox for speech processing.
% Home page: http://www.ee.ic.ac.uk/hp/staff/dmb/voicebox/voicebox.html
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This program is free software; you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 2 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You can obtain a copy of the GNU General Public License from
% http://www.gnu.org/copyleft/gpl.html or by writing to
% Free Software Foundation, Inc.,675 Mass Ave, Cambridge, MA 02139, USA.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
aa = fliplr(cumprod([ones(1,size(rf,1)); fliplr((1-rf)./(1+rf)).']).');
|
x:=1:y:=2:z:=3:w:=4:
fun:=proc()
global x,y,z;
print(x,y,z,w);
end proc:
Grid:-Launch(fun,imports=[':-x'=x,'y',"z"]); |
module Flexidisc.RecordContent
import Control.Monad.Identity
import Flexidisc.OrdList
import Flexidisc.TaggedValue
import Flexidisc.THList
%default total
%access export
public export
OrdHeader : (k : Type) -> (o : Ord k) -> Type
OrdHeader k o = OrdList k o Type
public export
data RecordContentM : (m : Type -> Type) ->
(k : Type) -> (o : Ord k) -> (OrdHeader k o) -> Type where
Nil : RecordContentM m k o []
(::) : TaggedValue k' (m a) -> RecordContentM m k o xs ->
RecordContentM m k o ((k', a) :: xs)
cons : TaggedValue k' (m a) -> RecordContentM m k o xs ->
RecordContentM m k o ((k', a) :: xs)
cons = (::)
%name RecordContentM xs, ys, zs
public export
RecordContent : (k : Type) -> (o : Ord k) -> (OrdHeader k o) -> Type
RecordContent = RecordContentM id
empty : RecordContentM m k o []
empty = Nil
insert : TaggedValue k' (m a) -> RecordContentM m k o header ->
RecordContentM m k o (insert (k', a) header)
insert x [] = [x]
insert (k' := v) ((kx := vx) :: xs') with (k' < kx)
| False = (kx := vx) :: (insert (k' := v) xs')
| True = (k' := v) :: (kx := vx) :: xs'
atLabel : RecordContentM m k o header -> (loc : OrdLabel l header) ->
m (atLabel header loc)
atLabel ((l := x) :: _) Here = x
atLabel (_ :: xs) (There later) = atLabel xs later
atRow : RecordContentM m k o header -> (loc : OrdRow l ty header) -> m ty
atRow ((l := x) :: _) Here = x
atRow (_ :: xs) (There later) = atRow xs later
update : RecordContentM m k o header -> (loc : OrdRow l a header) ->
(f : m a -> m b) ->
RecordContentM m k o (changeValue header loc b)
update ((l := x) :: xs) Here f = (l := f x) :: xs
update (x :: xs) (There later) f = x :: update xs later f
set : RecordContentM m k o header ->
(loc : OrdLabel l header) -> (new : m ty) ->
RecordContentM m k o (changeValue header loc ty)
set ((l := x) :: xs) Here new = (l := new) :: xs
set (x :: xs) (There later) new = x :: set xs later new
setByRow : RecordContentM m k o header ->
(loc : OrdRow l tOld header) -> (new : m tNew) ->
RecordContentM m k o (changeValue header loc tNew)
setByRow xs loc new = update xs loc (const new)
merge : (xs : RecordContentM m k o header) ->
(ys : RecordContentM m k o header') ->
RecordContentM m k o (merge header header')
merge [] ys = ys
merge (x :: zs) [] = x :: zs
merge ((k := x) :: zs) ((k' := y) :: ys) with (k < k')
| True = (k := x) :: (merge zs ((k' := y) :: ys))
| False = (k' := y) :: (merge ((k := x) :: zs) ys)
diff : DecEq k =>
(xs : RecordContentM m k o header) ->
(ys : RecordContentM m k o header') ->
RecordContentM m k o (diffKeys header header')
diff [] ys = []
diff (kx := vx :: xs) ys {header'} with (decFresh kx header')
| (Yes prf) = kx := vx :: diff xs ys
| (No contra) = diff xs ys
infixl 7 |>
(|>) : DecEq k =>
(xs : RecordContentM m k o header) ->
(ys : RecordContentM m k o header') ->
RecordContentM m k o (patch header header')
(|>) xs ys = merge (diff ys xs) xs
drop : RecordContentM m k o header -> (loc : OrdLabel l header) ->
RecordContentM m k o (dropLabel header loc)
drop (_ :: xs) Here = xs
drop (x :: xs) (There later) = x :: drop xs later
project : RecordContentM m k o header -> Sub sub header ->
RecordContentM m k o sub
project [] Empty = []
project (_ :: ys) (Skip sub) = project ys sub
project (x :: ys) (Keep sub) = x :: project ys sub
keep : (xs : RecordContentM m k o pre) -> (sub : SubWithKeys keys post pre) ->
RecordContentM m k o post
keep xs = project xs . toSub
discard : (xs : RecordContentM m k o pre) ->
(sub : CompWithKeys keys post pre) ->
RecordContentM m k o post
discard xs = project xs . toSub
hoist : (f: {a : _} -> m a -> n a) -> RecordContentM m k o header ->
RecordContentM n k o header
hoist f [] = []
hoist f (l := v :: xs) = (l := f v) :: hoist f xs
lift : (f : {a : _} -> a -> m a) -> RecordContent k o header ->
RecordContentM m k o header
lift = hoist
unlift : RecordContentM m k o header -> RecordContent k o (map m header)
unlift [] = []
unlift (x :: ys) = x :: unlift ys
optional : (xs : RecordContent k o pre) -> (opt : HereOrNot post pre) ->
RecordContentM Maybe k o post
optional [] Empty = []
optional xs (Skip compat yes) = _ := Nothing :: optional xs compat
optional (x :: xs) (Extra compat yes) = optional xs compat
optional (l := v :: xs) (Keep compat) = l := Just v :: optional xs compat
toTHList : RecordContentM m k o header -> THList m k (toList header)
toTHList [] = []
toTHList ((_ := x) :: xs) = x :: toTHList xs
sequence : Applicative m =>
RecordContentM m k o header -> m (RecordContent k o header)
sequence [] = pure []
sequence ((k' := x) :: xs) = (map (cons . (k' :=)) x) <*> (sequence xs)
implementation Eq (RecordContentM m k o []) where
(==) x y = True
(/=) x y = False
implementation
(Eq (m t), Eq (RecordContentM m k o ts)) => Eq (RecordContentM m k o ((l,t) :: ts)) where
(==) ((_ := x) :: xs) ((_ := y) :: ys) = x == y && xs == ys
(/=) ((_ := x) :: xs) ((_ := y) :: ys) = x /= y || xs /= ys
|
[STATEMENT]
lemma rett_instr_low_equal:
assumes a1: "low_equal s1 s2 \<and>
\<not> snd (rett_instr instr s1) \<and>
\<not> snd (rett_instr instr s2) \<and>
(((get_S (cpu_reg_val PSR s1)))::word1) = 0 \<and>
(((get_S (cpu_reg_val PSR s2)))::word1) = 0 \<and>
t1 = snd (fst (rett_instr instr s1)) \<and> t2 = snd (fst (rett_instr instr s2))"
shows "low_equal t1 t2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. low_equal t1 t2
[PROOF STEP]
using a1
[PROOF STATE]
proof (prove)
using this:
low_equal s1 s2 \<and> \<not> snd (rett_instr instr s1) \<and> \<not> snd (rett_instr instr s2) \<and> get_S (cpu_reg_val PSR s1) = 0 \<and> get_S (cpu_reg_val PSR s2) = 0 \<and> t1 = snd (fst (rett_instr instr s1)) \<and> t2 = snd (fst (rett_instr instr s2))
goal (1 subgoal):
1. low_equal t1 t2
[PROOF STEP]
apply (simp add: rett_instr_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. low_equal s1 s2 \<and> \<not> snd ((do psr_val \<leftarrow> gets (cpu_reg_val PSR);
curr_win \<leftarrow> gets (\<lambda>s. get_CWP (cpu_reg_val PSR s));
addr \<leftarrow> gets (get_addr (snd instr));
wim_val \<leftarrow> gets (cpu_reg_val WIM);
npc_val \<leftarrow> gets (cpu_reg_val nPC);
if get_ET psr_val = 1 then if get_S psr_val = 0 then do raise_trap privileged_instruction;
return ()
od else do raise_trap illegal_instruction;
return ()
od else if get_S psr_val = 0 then do write_cpu_tt 3;
set_exe_mode False;
set_err_mode True;
raise_trap privileged_instruction;
fail ()
od else if get_WIM_bit (nat ((uint curr_win + 1) mod NWINDOWS)) wim_val \<noteq> 0 then do write_cpu_tt 6;
set_exe_mode False;
set_err_mode True;
raise_trap window_underflow;
fail ()
od else if addr AND 3 \<noteq> 0 then do write_cpu_tt 7;
set_exe_mode False;
set_err_mode True;
raise_trap mem_address_not_aligned;
fail ()
od else do write_cpu npc_val PC;
write_cpu addr nPC;
new_psr_val \<leftarrow> gets (\<lambda>s. update_PSR_rett (word_of_int ((uint curr_win + 1) mod NWINDOWS)) 1 (get_PS psr_val) psr_val);
write_cpu new_psr_val PSR;
return ()
od
od) s1) \<and> \<not> snd ((do psr_val \<leftarrow> gets (cpu_reg_val PSR);
curr_win \<leftarrow> gets (\<lambda>s. get_CWP (cpu_reg_val PSR s));
addr \<leftarrow> gets (get_addr (snd instr));
wim_val \<leftarrow> gets (cpu_reg_val WIM);
npc_val \<leftarrow> gets (cpu_reg_val nPC);
if get_ET psr_val = 1 then if get_S psr_val = 0 then do raise_trap privileged_instruction;
return ()
od else do raise_trap illegal_instruction;
return ()
od else if get_S psr_val = 0 then do write_cpu_tt 3;
set_exe_mode False;
set_err_mode True;
raise_trap privileged_instruction;
fail ()
od else if get_WIM_bit (nat ((uint curr_win + 1) mod NWINDOWS)) wim_val \<noteq> 0 then do write_cpu_tt 6;
set_exe_mode False;
set_err_mode True;
raise_trap window_underflow;
fail ()
od else if addr AND 3 \<noteq> 0 then do write_cpu_tt 7;
set_exe_mode False;
set_err_mode True;
raise_trap mem_address_not_aligned;
fail ()
od else do write_cpu npc_val PC;
write_cpu addr nPC;
new_psr_val \<leftarrow> gets (\<lambda>s. update_PSR_rett (word_of_int ((uint curr_win + 1) mod NWINDOWS)) 1 (get_PS psr_val) psr_val);
write_cpu new_psr_val PSR;
return ()
od
od) s2) \<and> get_S (cpu_reg_val PSR s1) = 0 \<and> get_S (cpu_reg_val PSR s2) = 0 \<and> t1 = snd (fst ((do psr_val \<leftarrow> gets (cpu_reg_val PSR);
curr_win \<leftarrow> gets (\<lambda>s. get_CWP (cpu_reg_val PSR s));
addr \<leftarrow> gets (get_addr (snd instr));
wim_val \<leftarrow> gets (cpu_reg_val WIM);
npc_val \<leftarrow> gets (cpu_reg_val nPC);
if get_ET psr_val = 1 then if get_S psr_val = 0 then do raise_trap privileged_instruction;
return ()
od else do raise_trap illegal_instruction;
return ()
od else if get_S psr_val = 0 then do write_cpu_tt 3;
set_exe_mode False;
set_err_mode True;
raise_trap privileged_instruction;
fail ()
od else if get_WIM_bit (nat ((uint curr_win + 1) mod NWINDOWS)) wim_val \<noteq> 0 then do write_cpu_tt 6;
set_exe_mode False;
set_err_mode True;
raise_trap window_underflow;
fail ()
od else if addr AND 3 \<noteq> 0 then do write_cpu_tt 7;
set_exe_mode False;
set_err_mode True;
raise_trap mem_address_not_aligned;
fail ()
od else do write_cpu npc_val PC;
write_cpu addr nPC;
new_psr_val \<leftarrow> gets (\<lambda>s. update_PSR_rett (word_of_int ((uint curr_win + 1) mod NWINDOWS)) 1 (get_PS psr_val) psr_val);
write_cpu new_psr_val PSR;
return ()
od
od) s1)) \<and> t2 = snd (fst ((do psr_val \<leftarrow> gets (cpu_reg_val PSR);
curr_win \<leftarrow> gets (\<lambda>s. get_CWP (cpu_reg_val PSR s));
addr \<leftarrow> gets (get_addr (snd instr));
wim_val \<leftarrow> gets (cpu_reg_val WIM);
npc_val \<leftarrow> gets (cpu_reg_val nPC);
if get_ET psr_val = 1 then if get_S psr_val = 0 then do raise_trap privileged_instruction;
return ()
od else do raise_trap illegal_instruction;
return ()
od else if get_S psr_val = 0 then do write_cpu_tt 3;
set_exe_mode False;
set_err_mode True;
raise_trap privileged_instruction;
fail ()
od else if get_WIM_bit (nat ((uint curr_win + 1) mod NWINDOWS)) wim_val \<noteq> 0 then do write_cpu_tt 6;
set_exe_mode False;
set_err_mode True;
raise_trap window_underflow;
fail ()
od else if addr AND 3 \<noteq> 0 then do write_cpu_tt 7;
set_exe_mode False;
set_err_mode True;
raise_trap mem_address_not_aligned;
fail ()
od else do write_cpu npc_val PC;
write_cpu addr nPC;
new_psr_val \<leftarrow> gets (\<lambda>s. update_PSR_rett (word_of_int ((uint curr_win + 1) mod NWINDOWS)) 1 (get_PS psr_val) psr_val);
write_cpu new_psr_val PSR;
return ()
od
od) s2)) \<Longrightarrow> low_equal (snd (fst ((do psr_val \<leftarrow> gets (cpu_reg_val PSR);
curr_win \<leftarrow> gets (\<lambda>s. get_CWP (cpu_reg_val PSR s));
addr \<leftarrow> gets (get_addr (snd instr));
wim_val \<leftarrow> gets (cpu_reg_val WIM);
npc_val \<leftarrow> gets (cpu_reg_val nPC);
if get_ET psr_val = 1 then if get_S psr_val = 0 then do raise_trap privileged_instruction;
return ()
od else do raise_trap illegal_instruction;
return ()
od else if get_S psr_val = 0 then do write_cpu_tt 3;
set_exe_mode False;
set_err_mode True;
raise_trap privileged_instruction;
fail ()
od else if get_WIM_bit (nat ((uint curr_win + 1) mod NWINDOWS)) wim_val \<noteq> 0 then do write_cpu_tt 6;
set_exe_mode False;
set_err_mode True;
raise_trap window_underflow;
fail ()
od else if addr AND 3 \<noteq> 0 then do write_cpu_tt 7;
set_exe_mode False;
set_err_mode True;
raise_trap mem_address_not_aligned;
fail ()
od else do write_cpu npc_val PC;
write_cpu addr nPC;
new_psr_val \<leftarrow> gets (\<lambda>s. update_PSR_rett (word_of_int ((uint curr_win + 1) mod NWINDOWS)) 1 (get_PS psr_val) psr_val);
write_cpu new_psr_val PSR;
return ()
od
od) s1))) (snd (fst ((do psr_val \<leftarrow> gets (cpu_reg_val PSR);
curr_win \<leftarrow> gets (\<lambda>s. get_CWP (cpu_reg_val PSR s));
addr \<leftarrow> gets (get_addr (snd instr));
wim_val \<leftarrow> gets (cpu_reg_val WIM);
npc_val \<leftarrow> gets (cpu_reg_val nPC);
if get_ET psr_val = 1 then if get_S psr_val = 0 then do raise_trap privileged_instruction;
return ()
od else do raise_trap illegal_instruction;
return ()
od else if get_S psr_val = 0 then do write_cpu_tt 3;
set_exe_mode False;
set_err_mode True;
raise_trap privileged_instruction;
fail ()
od else if get_WIM_bit (nat ((uint curr_win + 1) mod NWINDOWS)) wim_val \<noteq> 0 then do write_cpu_tt 6;
set_exe_mode False;
set_err_mode True;
raise_trap window_underflow;
fail ()
od else if addr AND 3 \<noteq> 0 then do write_cpu_tt 7;
set_exe_mode False;
set_err_mode True;
raise_trap mem_address_not_aligned;
fail ()
od else do write_cpu npc_val PC;
write_cpu addr nPC;
new_psr_val \<leftarrow> gets (\<lambda>s. update_PSR_rett (word_of_int ((uint curr_win + 1) mod NWINDOWS)) 1 (get_PS psr_val) psr_val);
write_cpu new_psr_val PSR;
return ()
od
od) s2)))
[PROOF STEP]
apply (simp add: simpler_gets_def bind_def h1_def h2_def Let_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. low_equal s1 s2 \<and> \<not> snd ((if get_ET (cpu_reg_val PSR s1) = 1 then if get_S (cpu_reg_val PSR s1) = 0 then do raise_trap privileged_instruction;
return ()
od else do raise_trap illegal_instruction;
return ()
od else if get_S (cpu_reg_val PSR s1) = 0 then do write_cpu_tt 3;
set_exe_mode False;
set_err_mode True;
raise_trap privileged_instruction;
fail ()
od else if get_WIM_bit (nat ((uint (get_CWP (cpu_reg_val PSR s1)) + 1) mod NWINDOWS)) (cpu_reg_val WIM s1) \<noteq> 0 then do write_cpu_tt 6;
set_exe_mode False;
set_err_mode True;
raise_trap window_underflow;
fail ()
od else if get_addr (snd instr) s1 AND 3 \<noteq> 0 then do write_cpu_tt 7;
set_exe_mode False;
set_err_mode True;
raise_trap mem_address_not_aligned;
fail ()
od else do write_cpu (cpu_reg_val nPC s1) PC;
write_cpu (get_addr (snd instr) s1) nPC;
new_psr_val \<leftarrow> gets (\<lambda>s. update_PSR_rett (word_of_int ((uint (get_CWP (cpu_reg_val PSR s1)) + 1) mod NWINDOWS)) 1 (get_PS (cpu_reg_val PSR s1)) (cpu_reg_val PSR s1));
write_cpu new_psr_val PSR;
return ()
od) s1) \<and> \<not> snd ((if get_ET (cpu_reg_val PSR s2) = 1 then if get_S (cpu_reg_val PSR s2) = 0 then do raise_trap privileged_instruction;
return ()
od else do raise_trap illegal_instruction;
return ()
od else if get_S (cpu_reg_val PSR s2) = 0 then do write_cpu_tt 3;
set_exe_mode False;
set_err_mode True;
raise_trap privileged_instruction;
fail ()
od else if get_WIM_bit (nat ((uint (get_CWP (cpu_reg_val PSR s2)) + 1) mod NWINDOWS)) (cpu_reg_val WIM s2) \<noteq> 0 then do write_cpu_tt 6;
set_exe_mode False;
set_err_mode True;
raise_trap window_underflow;
fail ()
od else if get_addr (snd instr) s2 AND 3 \<noteq> 0 then do write_cpu_tt 7;
set_exe_mode False;
set_err_mode True;
raise_trap mem_address_not_aligned;
fail ()
od else do write_cpu (cpu_reg_val nPC s2) PC;
write_cpu (get_addr (snd instr) s2) nPC;
new_psr_val \<leftarrow> gets (\<lambda>s. update_PSR_rett (word_of_int ((uint (get_CWP (cpu_reg_val PSR s2)) + 1) mod NWINDOWS)) 1 (get_PS (cpu_reg_val PSR s2)) (cpu_reg_val PSR s2));
write_cpu new_psr_val PSR;
return ()
od) s2) \<and> get_S (cpu_reg_val PSR s1) = 0 \<and> get_S (cpu_reg_val PSR s2) = 0 \<and> t1 = snd (fst ((if get_ET (cpu_reg_val PSR s1) = 1 then if get_S (cpu_reg_val PSR s1) = 0 then do raise_trap privileged_instruction;
return ()
od else do raise_trap illegal_instruction;
return ()
od else if get_S (cpu_reg_val PSR s1) = 0 then do write_cpu_tt 3;
set_exe_mode False;
set_err_mode True;
raise_trap privileged_instruction;
fail ()
od else if get_WIM_bit (nat ((uint (get_CWP (cpu_reg_val PSR s1)) + 1) mod NWINDOWS)) (cpu_reg_val WIM s1) \<noteq> 0 then do write_cpu_tt 6;
set_exe_mode False;
set_err_mode True;
raise_trap window_underflow;
fail ()
od else if get_addr (snd instr) s1 AND 3 \<noteq> 0 then do write_cpu_tt 7;
set_exe_mode False;
set_err_mode True;
raise_trap mem_address_not_aligned;
fail ()
od else do write_cpu (cpu_reg_val nPC s1) PC;
write_cpu (get_addr (snd instr) s1) nPC;
new_psr_val \<leftarrow> gets (\<lambda>s. update_PSR_rett (word_of_int ((uint (get_CWP (cpu_reg_val PSR s1)) + 1) mod NWINDOWS)) 1 (get_PS (cpu_reg_val PSR s1)) (cpu_reg_val PSR s1));
write_cpu new_psr_val PSR;
return ()
od) s1)) \<and> t2 = snd (fst ((if get_ET (cpu_reg_val PSR s2) = 1 then if get_S (cpu_reg_val PSR s2) = 0 then do raise_trap privileged_instruction;
return ()
od else do raise_trap illegal_instruction;
return ()
od else if get_S (cpu_reg_val PSR s2) = 0 then do write_cpu_tt 3;
set_exe_mode False;
set_err_mode True;
raise_trap privileged_instruction;
fail ()
od else if get_WIM_bit (nat ((uint (get_CWP (cpu_reg_val PSR s2)) + 1) mod NWINDOWS)) (cpu_reg_val WIM s2) \<noteq> 0 then do write_cpu_tt 6;
set_exe_mode False;
set_err_mode True;
raise_trap window_underflow;
fail ()
od else if get_addr (snd instr) s2 AND 3 \<noteq> 0 then do write_cpu_tt 7;
set_exe_mode False;
set_err_mode True;
raise_trap mem_address_not_aligned;
fail ()
od else do write_cpu (cpu_reg_val nPC s2) PC;
write_cpu (get_addr (snd instr) s2) nPC;
new_psr_val \<leftarrow> gets (\<lambda>s. update_PSR_rett (word_of_int ((uint (get_CWP (cpu_reg_val PSR s2)) + 1) mod NWINDOWS)) 1 (get_PS (cpu_reg_val PSR s2)) (cpu_reg_val PSR s2));
write_cpu new_psr_val PSR;
return ()
od) s2)) \<Longrightarrow> (get_ET (cpu_reg_val PSR s1) = 1 \<longrightarrow> (get_ET (cpu_reg_val PSR s2) = 1 \<longrightarrow> low_equal (snd (fst (case fst (raise_trap privileged_instruction s1) of (x, xa) \<Rightarrow> return x xa))) (snd (fst (case fst (raise_trap privileged_instruction s2) of (x, xa) \<Rightarrow> return x xa)))) \<and> (get_ET (cpu_reg_val PSR s2) \<noteq> 1 \<longrightarrow> low_equal (snd (fst (case fst (raise_trap privileged_instruction s1) of (x, xa) \<Rightarrow> return x xa))) (snd (fst (case fst (write_cpu_tt 3 s2) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))))))) \<and> (get_ET (cpu_reg_val PSR s1) \<noteq> 1 \<longrightarrow> (get_ET (cpu_reg_val PSR s2) = 1 \<longrightarrow> low_equal (snd (fst (case fst (write_cpu_tt 3 s1) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))))) (snd (fst (case fst (raise_trap privileged_instruction s2) of (x, xa) \<Rightarrow> return x xa)))) \<and> (get_ET (cpu_reg_val PSR s2) \<noteq> 1 \<longrightarrow> low_equal (snd (fst (case fst (write_cpu_tt 3 s1) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))))) (snd (fst (case fst (write_cpu_tt 3 s2) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b)))))))
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<lbrakk>low_equal s1 s2; \<not> snd (raise_trap privileged_instruction s1); \<not> snd (raise_trap privileged_instruction s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst (raise_trap privileged_instruction s1)); t2 = snd (fst (raise_trap privileged_instruction s2)); get_ET (cpu_reg_val PSR s1) = 1; get_ET (cpu_reg_val PSR s2) = 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (raise_trap privileged_instruction s1) of (x, xa) \<Rightarrow> return x xa))) (snd (fst (case fst (raise_trap privileged_instruction s2) of (x, xa) \<Rightarrow> return x xa)))
2. \<lbrakk>low_equal s1 s2; \<not> snd (raise_trap privileged_instruction s1); \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst (raise_trap privileged_instruction s1)); t2 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2)); get_ET (cpu_reg_val PSR s1) = 1; get_ET (cpu_reg_val PSR s2) \<noteq> 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (raise_trap privileged_instruction s1) of (x, xa) \<Rightarrow> return x xa))) (snd (fst (case fst (write_cpu_tt 3 s2) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b)))))
3. \<lbrakk>low_equal s1 s2; \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1); \<not> snd (raise_trap privileged_instruction s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1)); t2 = snd (fst (raise_trap privileged_instruction s2)); get_ET (cpu_reg_val PSR s1) \<noteq> 1; get_ET (cpu_reg_val PSR s2) = 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (write_cpu_tt 3 s1) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))))) (snd (fst (case fst (raise_trap privileged_instruction s2) of (x, xa) \<Rightarrow> return x xa)))
4. \<lbrakk>low_equal s1 s2; \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1); \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1)); t2 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2)); get_ET (cpu_reg_val PSR s1) \<noteq> 1; get_ET (cpu_reg_val PSR s2) \<noteq> 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (write_cpu_tt 3 s1) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))))) (snd (fst (case fst (write_cpu_tt 3 s2) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b)))))
[PROOF STEP]
apply (simp add: raise_trap_def add_trap_set_def simpler_modify_def)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<lbrakk>low_equal s1 s2; get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = s1\<lparr>traps := insert privileged_instruction (traps s1)\<rparr>; t2 = s2\<lparr>traps := insert privileged_instruction (traps s2)\<rparr>; get_ET (cpu_reg_val PSR s1) = 1; get_ET (cpu_reg_val PSR s2) = 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (return () (s1\<lparr>traps := insert privileged_instruction (traps s1)\<rparr>)))) (snd (fst (return () (s2\<lparr>traps := insert privileged_instruction (traps s2)\<rparr>))))
2. \<lbrakk>low_equal s1 s2; \<not> snd (raise_trap privileged_instruction s1); \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst (raise_trap privileged_instruction s1)); t2 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2)); get_ET (cpu_reg_val PSR s1) = 1; get_ET (cpu_reg_val PSR s2) \<noteq> 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (raise_trap privileged_instruction s1) of (x, xa) \<Rightarrow> return x xa))) (snd (fst (case fst (write_cpu_tt 3 s2) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b)))))
3. \<lbrakk>low_equal s1 s2; \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1); \<not> snd (raise_trap privileged_instruction s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1)); t2 = snd (fst (raise_trap privileged_instruction s2)); get_ET (cpu_reg_val PSR s1) \<noteq> 1; get_ET (cpu_reg_val PSR s2) = 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (write_cpu_tt 3 s1) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))))) (snd (fst (case fst (raise_trap privileged_instruction s2) of (x, xa) \<Rightarrow> return x xa)))
4. \<lbrakk>low_equal s1 s2; \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1); \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1)); t2 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2)); get_ET (cpu_reg_val PSR s1) \<noteq> 1; get_ET (cpu_reg_val PSR s2) \<noteq> 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (write_cpu_tt 3 s1) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))))) (snd (fst (case fst (write_cpu_tt 3 s2) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b)))))
[PROOF STEP]
apply (simp add: return_def)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<lbrakk>low_equal s1 s2; get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = s1\<lparr>traps := insert privileged_instruction (traps s1)\<rparr>; t2 = s2\<lparr>traps := insert privileged_instruction (traps s2)\<rparr>; get_ET (cpu_reg_val PSR s1) = 1; get_ET (cpu_reg_val PSR s2) = 1\<rbrakk> \<Longrightarrow> low_equal (s1\<lparr>traps := insert privileged_instruction (traps s1)\<rparr>) (s2\<lparr>traps := insert privileged_instruction (traps s2)\<rparr>)
2. \<lbrakk>low_equal s1 s2; \<not> snd (raise_trap privileged_instruction s1); \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst (raise_trap privileged_instruction s1)); t2 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2)); get_ET (cpu_reg_val PSR s1) = 1; get_ET (cpu_reg_val PSR s2) \<noteq> 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (raise_trap privileged_instruction s1) of (x, xa) \<Rightarrow> return x xa))) (snd (fst (case fst (write_cpu_tt 3 s2) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b)))))
3. \<lbrakk>low_equal s1 s2; \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1); \<not> snd (raise_trap privileged_instruction s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1)); t2 = snd (fst (raise_trap privileged_instruction s2)); get_ET (cpu_reg_val PSR s1) \<noteq> 1; get_ET (cpu_reg_val PSR s2) = 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (write_cpu_tt 3 s1) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))))) (snd (fst (case fst (raise_trap privileged_instruction s2) of (x, xa) \<Rightarrow> return x xa)))
4. \<lbrakk>low_equal s1 s2; \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1); \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1)); t2 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2)); get_ET (cpu_reg_val PSR s1) \<noteq> 1; get_ET (cpu_reg_val PSR s2) \<noteq> 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (write_cpu_tt 3 s1) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))))) (snd (fst (case fst (write_cpu_tt 3 s2) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b)))))
[PROOF STEP]
using mod_trap_low_equal traps_low_equal
[PROOF STATE]
proof (prove)
using this:
low_equal ?s1.0 ?s2.0 \<and> ?t1.0 = ?s1.0\<lparr>traps := ?new_traps\<rparr> \<and> ?t2.0 = ?s2.0\<lparr>traps := ?new_traps\<rparr> \<Longrightarrow> low_equal ?t1.0 ?t2.0
low_equal ?s1.0 ?s2.0 \<Longrightarrow> traps ?s1.0 = traps ?s2.0
goal (4 subgoals):
1. \<lbrakk>low_equal s1 s2; get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = s1\<lparr>traps := insert privileged_instruction (traps s1)\<rparr>; t2 = s2\<lparr>traps := insert privileged_instruction (traps s2)\<rparr>; get_ET (cpu_reg_val PSR s1) = 1; get_ET (cpu_reg_val PSR s2) = 1\<rbrakk> \<Longrightarrow> low_equal (s1\<lparr>traps := insert privileged_instruction (traps s1)\<rparr>) (s2\<lparr>traps := insert privileged_instruction (traps s2)\<rparr>)
2. \<lbrakk>low_equal s1 s2; \<not> snd (raise_trap privileged_instruction s1); \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst (raise_trap privileged_instruction s1)); t2 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2)); get_ET (cpu_reg_val PSR s1) = 1; get_ET (cpu_reg_val PSR s2) \<noteq> 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (raise_trap privileged_instruction s1) of (x, xa) \<Rightarrow> return x xa))) (snd (fst (case fst (write_cpu_tt 3 s2) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b)))))
3. \<lbrakk>low_equal s1 s2; \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1); \<not> snd (raise_trap privileged_instruction s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1)); t2 = snd (fst (raise_trap privileged_instruction s2)); get_ET (cpu_reg_val PSR s1) \<noteq> 1; get_ET (cpu_reg_val PSR s2) = 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (write_cpu_tt 3 s1) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))))) (snd (fst (case fst (raise_trap privileged_instruction s2) of (x, xa) \<Rightarrow> return x xa)))
4. \<lbrakk>low_equal s1 s2; \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1); \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1)); t2 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2)); get_ET (cpu_reg_val PSR s1) \<noteq> 1; get_ET (cpu_reg_val PSR s2) \<noteq> 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (write_cpu_tt 3 s1) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))))) (snd (fst (case fst (write_cpu_tt 3 s2) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b)))))
[PROOF STEP]
apply fastforce
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>low_equal s1 s2; \<not> snd (raise_trap privileged_instruction s1); \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst (raise_trap privileged_instruction s1)); t2 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2)); get_ET (cpu_reg_val PSR s1) = 1; get_ET (cpu_reg_val PSR s2) \<noteq> 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (raise_trap privileged_instruction s1) of (x, xa) \<Rightarrow> return x xa))) (snd (fst (case fst (write_cpu_tt 3 s2) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b)))))
2. \<lbrakk>low_equal s1 s2; \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1); \<not> snd (raise_trap privileged_instruction s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1)); t2 = snd (fst (raise_trap privileged_instruction s2)); get_ET (cpu_reg_val PSR s1) \<noteq> 1; get_ET (cpu_reg_val PSR s2) = 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (write_cpu_tt 3 s1) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))))) (snd (fst (case fst (raise_trap privileged_instruction s2) of (x, xa) \<Rightarrow> return x xa)))
3. \<lbrakk>low_equal s1 s2; \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1); \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1)); t2 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2)); get_ET (cpu_reg_val PSR s1) \<noteq> 1; get_ET (cpu_reg_val PSR s2) \<noteq> 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (write_cpu_tt 3 s1) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))))) (snd (fst (case fst (write_cpu_tt 3 s2) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b)))))
[PROOF STEP]
using cpu_reg_val_low_equal
[PROOF STATE]
proof (prove)
using this:
low_equal ?s1.0 ?s2.0 \<Longrightarrow> cpu_reg_val ?cr ?s1.0 = cpu_reg_val ?cr ?s2.0
goal (3 subgoals):
1. \<lbrakk>low_equal s1 s2; \<not> snd (raise_trap privileged_instruction s1); \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst (raise_trap privileged_instruction s1)); t2 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2)); get_ET (cpu_reg_val PSR s1) = 1; get_ET (cpu_reg_val PSR s2) \<noteq> 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (raise_trap privileged_instruction s1) of (x, xa) \<Rightarrow> return x xa))) (snd (fst (case fst (write_cpu_tt 3 s2) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b)))))
2. \<lbrakk>low_equal s1 s2; \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1); \<not> snd (raise_trap privileged_instruction s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1)); t2 = snd (fst (raise_trap privileged_instruction s2)); get_ET (cpu_reg_val PSR s1) \<noteq> 1; get_ET (cpu_reg_val PSR s2) = 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (write_cpu_tt 3 s1) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))))) (snd (fst (case fst (raise_trap privileged_instruction s2) of (x, xa) \<Rightarrow> return x xa)))
3. \<lbrakk>low_equal s1 s2; \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1); \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1)); t2 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2)); get_ET (cpu_reg_val PSR s1) \<noteq> 1; get_ET (cpu_reg_val PSR s2) \<noteq> 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (write_cpu_tt 3 s1) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))))) (snd (fst (case fst (write_cpu_tt 3 s2) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b)))))
[PROOF STEP]
apply fastforce
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>low_equal s1 s2; \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1); \<not> snd (raise_trap privileged_instruction s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1)); t2 = snd (fst (raise_trap privileged_instruction s2)); get_ET (cpu_reg_val PSR s1) \<noteq> 1; get_ET (cpu_reg_val PSR s2) = 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (write_cpu_tt 3 s1) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))))) (snd (fst (case fst (raise_trap privileged_instruction s2) of (x, xa) \<Rightarrow> return x xa)))
2. \<lbrakk>low_equal s1 s2; \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1); \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1)); t2 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2)); get_ET (cpu_reg_val PSR s1) \<noteq> 1; get_ET (cpu_reg_val PSR s2) \<noteq> 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (write_cpu_tt 3 s1) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))))) (snd (fst (case fst (write_cpu_tt 3 s2) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b)))))
[PROOF STEP]
using cpu_reg_val_low_equal
[PROOF STATE]
proof (prove)
using this:
low_equal ?s1.0 ?s2.0 \<Longrightarrow> cpu_reg_val ?cr ?s1.0 = cpu_reg_val ?cr ?s2.0
goal (2 subgoals):
1. \<lbrakk>low_equal s1 s2; \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1); \<not> snd (raise_trap privileged_instruction s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1)); t2 = snd (fst (raise_trap privileged_instruction s2)); get_ET (cpu_reg_val PSR s1) \<noteq> 1; get_ET (cpu_reg_val PSR s2) = 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (write_cpu_tt 3 s1) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))))) (snd (fst (case fst (raise_trap privileged_instruction s2) of (x, xa) \<Rightarrow> return x xa)))
2. \<lbrakk>low_equal s1 s2; \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1); \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1)); t2 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2)); get_ET (cpu_reg_val PSR s1) \<noteq> 1; get_ET (cpu_reg_val PSR s2) \<noteq> 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (write_cpu_tt 3 s1) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))))) (snd (fst (case fst (write_cpu_tt 3 s2) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b)))))
[PROOF STEP]
apply fastforce
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>low_equal s1 s2; \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1); \<not> snd ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s1)); t2 = snd (fst ((do y \<leftarrow> write_cpu_tt 3;
y \<leftarrow> set_exe_mode False;
y \<leftarrow> set_err_mode True;
raise_trap privileged_instruction >>= fail
od) s2)); get_ET (cpu_reg_val PSR s1) \<noteq> 1; get_ET (cpu_reg_val PSR s2) \<noteq> 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (write_cpu_tt 3 s1) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))))) (snd (fst (case fst (write_cpu_tt 3 s2) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b)))))
[PROOF STEP]
apply (simp add: bind_def h1_def h2_def Let_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>low_equal s1 s2; \<not> snd (case fst (write_cpu_tt 3 s1) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))) \<and> \<not> snd (write_cpu_tt 3 s1); \<not> snd (case fst (write_cpu_tt 3 s2) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))) \<and> \<not> snd (write_cpu_tt 3 s2); get_S (cpu_reg_val PSR s1) = 0; get_S (cpu_reg_val PSR s2) = 0; t1 = snd (fst (case fst (write_cpu_tt 3 s1) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b)))); t2 = snd (fst (case fst (write_cpu_tt 3 s2) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b)))); get_ET (cpu_reg_val PSR s1) \<noteq> 1; get_ET (cpu_reg_val PSR s2) \<noteq> 1\<rbrakk> \<Longrightarrow> low_equal (snd (fst (case fst (write_cpu_tt 3 s1) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b))))) (snd (fst (case fst (write_cpu_tt 3 s2) of (a, b) \<Rightarrow> (fst (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))), snd (case fst (set_exe_mode False b) of (a, b) \<Rightarrow> (fst (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))), snd (case fst (set_err_mode True b) of (a, b) \<Rightarrow> (fst (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa), snd (case fst (raise_trap privileged_instruction b) of (x, xa) \<Rightarrow> fail x xa) \<or> snd (raise_trap privileged_instruction b))) \<or> snd (set_err_mode True b))) \<or> snd (set_exe_mode False b)))))
[PROOF STEP]
by (simp add: case_prod_unfold fail_def) |
using RDatasets, DataFrames, Gadfly
set_default_plot_size(6inch, 3inch)
df = dataset("plm", "Cigar")
df = df[df[:State] .== 1, :]
ymin = df[:Sales] .- 20*0.23603334566204692
ymax = df[:Sales] .+ 20*0.34651701419196046
plot(df, x="Year", y="Sales", ymin=ymin, ymax=ymax,
Geom.bar, Geom.errorbar)
|
Do you love small handbags and want to own a unique bag? If that's the case, TVC Mart's round-the-clock bag is the right choice for you.
Inside the bag is a lining, pocket with zipper, very suitable for holding small items needed when out as wallet, phone. The strap is designed in the form of handles and straps to fit the interests of many sisters. With a knit pattern to create very delicate lines, combined with colour tone, the product brings a beauty and elegance but no less youthful and attractive.
The bag is small size D20 x C20 x R7. A feminine or polished gown will be the best choice for this rounded bag. |
# Copyright (c) 2018-2020, Carnegie Mellon University
# See LICENSE for details
#
# n-qubit S transform
#
#F qST( n, i ) - S Gate non-terminal
#F Definition: (2^n x 2^n)-matrix that applies an n-point S Transform to the target qubits
#F Note: qST(1) denotes the matrix[[1, 0], [0, e^(-ipi/2)]], or [[1,0], [0, -i]]
#F qZT(_) is symmetric.
#F
#F qST(n , i) -> an n-qubit S transform, or Sdag transform if i = -1
Class(qST, TaggedNonTerminal, rec(
abbrevs := [ (n, i) -> Checked(IsPosInt(n), [n, When(i <= -1, -1, 1)]) ],
dims := self >> let(size := 2^self.params[1], [size, size]),
terminate := self >> When(self.params[2] = -1, Tensor(Replicate(self.params[1], qSdag())), Tensor(Replicate(self.params[1], qS()))),
isReal := self >> false,
rChildren := self >> self.params,
from_rChildren := (self, rch) >> self.__bases__[1](rch[1], rch[2]),
groups := self >> [Replicate(self.params[1], 1)],
recursive_def := (self, arch) >> self.__bases__[1](self.params[1], self.params[2]),
SmallRandom := () -> Random([2..5]),
LargeRandom := () -> Random([6..15]),
normalizedArithCost := self >> Error("ArithCost not implemented"),
TType := T_Complex(64),
));
NewRulesFor(qST, rec(
# qST_BinSplit rule
# qST_BinSplit qST_(k) -> (qST_(k1) tensor I) (I tensor qST_(k2))
# k1 + k2 = k
qST_BinSplit := rec (
forTransposition := false,
minSize := 2,
applicable := (self, nt) >> nt.params[1] > 1,
children := nt -> List( [1..nt.params[1] - 1], i -> [ Tensor(qST(i, nt.params[2]), qST(nt.params[1]-i, nt.params[2])).withTags(nt.getTags()) ] ),
apply := (nt, c, cnt) -> c[1],
switch := true,
),
#F qST_Base: qST(1, _) = qS() or qS().inverse() SPL object
#F Directly represent as an implementable gate
qST_Base := rec(
info := "qST_(1, _) -> qS()",
forTransposition := false,
applicable := (self, nt) >> nt.params[1]=1,
apply := (nt, c, cnt) -> When(nt.params[2] = -1, qSdag(), qS()),
)
)); |
The #2 St. Cloud State Men's Hockey team (11-1-0) defeated visiting Bemidji State 7-4 Saturday night in St. Cloud -- earning their fifth weekend sweep of the season.
Scoring for St. Cloud State came from Jimmy Schuldt (3, 4), Kevin Fitzgerald (5, 6), Patrick Newell (8), Robby Jackson (3), and Jon Lizotte (1).
The Beavers out shot the Huskies 24-21, with Dave Hrenak in the net for SCSU recording 20 saves for his eighth straight win in he nets. |
As explained below , chess theorists in recent decades have continued to debate the size and nature of White 's advantage , if any . Apart from Berliner , they have rejected the idea that White has a forced win from the opening position . Many also reject the traditional paradigm that Black 's objective should be to neutralize White 's initiative and obtain equality .
|
import order.lattice
set_option old_structure_cmd true
universe u
variable (α : Type u)
namespace zfc
class has_zmem extends has_mem α α
instance [has_zmem α] : has_subset α :=
⟨λ x y, ∀ z, z ∈ x → z ∈ y⟩
variable {α}
theorem subset.refl [has_zmem α] (x : α) : x ⊆ x := λ z, id
theorem subset.trans [has_zmem α] {x y z : α}
(hxy : x ⊆ y) (hyz : y ⊆ z) : x ⊆ z :=
λ p hp, hyz p $ hxy p hp
instance [has_zmem α] : preorder α :=
⟨(⊆), _, subset.refl, λ x y z, subset.trans⟩
variable α
class is_extensional extends has_zmem α :=
(ext : ∀ x y : α, (∀ z, z ∈ x ↔ z ∈ y) → x = y )
variable {α}
theorem subset.antisymm [is_extensional α] {x y : α}
(hxy : x ⊆ y) (hyx : y ⊆ x) : x = y :=
is_extensional.ext x y $ λ z, ⟨λ hx, hxy z hx, λ hy, hyx z hy⟩
instance partial_order [is_extensional α] : partial_order α :=
{ le_antisymm := λ x y, subset.antisymm,
.. zfc.preorder }
variable α
class has_zempty extends has_zmem α, has_emptyc α :=
(not_zmem_empty : ∀ x, x ∉ (∅:α))
class has_upair extends has_zmem α :=
(upair : α → α → α)
(zmem_upair_iff_eq_or_eq : ∀ x y z, z ∈ upair x y ↔ z = x ∨ z = y)
variable {α}
theorem not_zmem_empty [has_zempty α] : ∀ {x}, x ∉ (∅:α) :=
has_zempty.not_zmem_empty
def upair [has_upair α] : α → α → α :=
has_upair.upair
def zmem_upair_iff_eq_or_eq [has_upair α] {x y z : α} :
z ∈ upair x y ↔ z = x ∨ z = y :=
has_upair.zmem_upair_iff_eq_or_eq x y z
theorem zmem_upair_left [has_upair α] {x y : α} : x ∈ upair x y :=
zmem_upair_iff_eq_or_eq.2 $ or.inl rfl
theorem zmem_upair_right [has_upair α] {x y : α} : y ∈ upair x y :=
zmem_upair_iff_eq_or_eq.2 $ or.inr rfl
def opair [has_upair α] (x y : α) : α :=
upair (upair x x) (upair x y)
theorem opair.ext [has_upair α] {p q r s : α}
(h : opair p q = opair r s) : p = r ∧ q = s :=
begin
have h1 : upair p p ∈ opair p q,
from zmem_upair_left,
have h2 : upair p q ∈ opair p q,
from zmem_upair_right,
have h3 : p ∈ upair p p,
from zmem_upair_left,
have h4 : p ∈ upair p q,
from zmem_upair_left,
have h5 : q ∈ upair p q,
from zmem_upair_right,
have h6 : upair r r ∈ opair r s,
from zmem_upair_left,
have h7 : upair r s ∈ opair r s,
from zmem_upair_right,
have h8 : r ∈ upair r r,
from zmem_upair_left,
have h9 : r ∈ upair r s,
from zmem_upair_left,
have h0 : s ∈ upair r s,
from zmem_upair_right,
rw h at h1 h2,
rw ← h at h6 h7,
unfold opair at h1 h2 h6 h7,
rw zmem_upair_iff_eq_or_eq at h1 h2 h6 h7,
cases h1,
{ rw [h1, zmem_upair_iff_eq_or_eq, or_self] at h3,
subst h3,
cases h2,
{ rw [h2, zmem_upair_iff_eq_or_eq, or_self] at h5,
subst h5,
rw or_self at h7,
rw [h7, zmem_upair_iff_eq_or_eq, or_self] at h0,
subst h0,
split, refl, refl },
{ rw [h2, zmem_upair_iff_eq_or_eq] at h5,
cases h5; subst h5,
{ rw [← h2, zmem_upair_iff_eq_or_eq, or_self] at h0,
subst h0,
split, refl, refl },
{ split, refl, refl } } },
{ rw [← h1, zmem_upair_iff_eq_or_eq, or_self] at h9,
subst h9,
rw [← h1, zmem_upair_iff_eq_or_eq, or_self] at h0,
subst h0,
rw or_self at h2,
rw [h2, zmem_upair_iff_eq_or_eq, or_self] at h5,
subst h5,
split, refl, refl }
end
theorem opair.iff [has_upair α] {p q r s : α} : opair p q = opair r s ↔ p = r ∧ q = s :=
⟨opair.ext, λ ⟨hpq, hrs⟩, hpq ▸ hrs ▸ rfl⟩
variable α
class has_sUnion extends has_zmem α :=
(sUnion : α → α)
(zmem_sUnion_iff_zmem_zmem : ∀ x z, z ∈ sUnion x ↔ ∃ t, z ∈ t ∧ t ∈ x)
variable {α}
def sUnion [has_sUnion α] : α → α :=
has_sUnion.sUnion
def zmem_sUnion_iff_zmem_zmem [has_sUnion α] {x z : α} : z ∈ sUnion x ↔ ∃ t, z ∈ t ∧ t ∈ x :=
has_sUnion.zmem_sUnion_iff_zmem_zmem x z
variable α
class has_sUnion_upair extends has_sUnion α, has_upair α
instance [has_sUnion_upair α] : has_union α :=
⟨λ x y, sUnion $ upair x y⟩
instance [has_sUnion_upair α] : has_insert α α :=
⟨λ x A, upair x x ∪ A⟩
lemma union_def [has_sUnion_upair α] (x y : α) : x ∪ y = (sUnion $ upair x y) := rfl
lemma insert_def [has_sUnion_upair α] (x A : α) : has_insert.insert x A = upair x x ∪ A := rfl
variable {α}
lemma zmem_union_iff_zmem_or_zmem [has_sUnion_upair α] {x y z : α} : z ∈ x ∪ y ↔ z ∈ x ∨ z ∈ y :=
begin
rw [union_def, zmem_sUnion_iff_zmem_zmem],
split,
{ intro H,
rcases H with ⟨t, H1, H2⟩,
rw [zmem_upair_iff_eq_or_eq] at H2,
cases H2; subst H2,
{ left, assumption },
{ right, assumption } },
{ intro H,
cases H,
{ existsi x,
split, exact H, exact zmem_upair_left },
{ existsi y,
split, exact H, exact zmem_upair_right } }
end
lemma zmem_insert_iff_eq_or_zmem [has_sUnion_upair α] {x A z : α} : z ∈ has_insert.insert x A ↔ z = x ∨ z ∈ A :=
by rw [insert_def, zmem_union_iff_zmem_or_zmem, zmem_upair_iff_eq_or_eq, or_self]
theorem zmem_insert [has_sUnion_upair α] {x y : α} : x ∈ has_insert.insert x y :=
zmem_insert_iff_eq_or_zmem.2 $ or.inl rfl
theorem zmem_insert_of_zmem [has_sUnion_upair α] {x y z : α} (H : x ∈ z) : x ∈ has_insert.insert y z :=
zmem_insert_iff_eq_or_zmem.2 $ or.inr H
def succ [has_sUnion_upair α] : α → α :=
λ x, has_insert.insert x x
theorem zmem_succ_iff_eq_or_zmem [has_sUnion_upair α] {x y : α} : x ∈ succ y ↔ x = y ∨ x ∈ y :=
zmem_insert_iff_eq_or_zmem
theorem zmem_succ [has_sUnion_upair α] {x : α} : x ∈ succ x :=
zmem_succ_iff_eq_or_zmem.2 $ or.inl rfl
theorem zmem_succ_of_zmem [has_sUnion_upair α] {x y : α} (H : x ∈ y) : x ∈ succ y :=
zmem_succ_iff_eq_or_zmem.2 $ or.inr H
variable α
class has_powerset extends has_zmem α :=
(powerset : α → α)
(zmem_powerset : ∀ x z, z ∈ powerset x ↔ z ⊆ x)
class has_comprehension extends has_zmem α :=
(comprehension : α → (α → Prop) → α)
(zmem_comprehension_iff : ∀ A p x, x ∈ comprehension A p ↔ x ∈ A ∧ p x)
class has_infty :=
(infinity : α)
notation `∞` := has_infty.infinity _
class has_replacement extends has_zmem α :=
(replacement : α → (α → α → Prop) → α)
(zmem_replacement_iff : ∀ A (f : α → α → Prop) y (H : ∀ A B C, f A B → f A C → B = C), (y ∈ replacement A f ↔ ∃ x, x ∈ A ∧ f x y))
class has_infty_replacement_powerset extends has_infty α, has_replacement α, has_powerset α
class has_infty_replacement_powerset_sUnion extends has_infty_replacement_powerset α, has_sUnion α
variable {α}
instance has_infty.to_inhabited [has_infty α] : inhabited α :=
⟨∞⟩
def powerset [has_powerset α] : α → α :=
has_powerset.powerset
theorem zmem_powerset [has_powerset α] : ∀ {x z : α}, z ∈ powerset x ↔ z ⊆ x :=
has_powerset.zmem_powerset
def comprehension [has_comprehension α] : α → (α → Prop) → α :=
has_comprehension.comprehension
theorem zmem_comprehension_iff [has_comprehension α] : ∀ {A : α} {p x}, x ∈ comprehension A p ↔ x ∈ A ∧ p x :=
has_comprehension.zmem_comprehension_iff
def replacement [has_replacement α] : α → (α → α → Prop) → α :=
has_replacement.replacement
theorem zmem_replacement_iff [has_replacement α] {A} {f : α → α → Prop} {y} (H : ∀ A B C, f A B → f A C → B = C) :
(y ∈ replacement A f ↔ ∃ x, x ∈ A ∧ f x y) :=
has_replacement.zmem_replacement_iff A f y H
instance has_comprehension.to_has_zempty [s : has_comprehension α] [inhabited α] : has_zempty α :=
{ emptyc := comprehension (inhabited.default α) (λ x, false),
not_zmem_empty := λ x hx, begin
simp [∅] at hx,
rw [zmem_comprehension_iff] at hx,
exact hx.2
end,
.. s }
instance has_replacement.to_has_comprehension [s : has_replacement α] : has_comprehension α :=
{ comprehension := λ A p, replacement A (λ x y, x = y ∧ p x),
zmem_comprehension_iff := λ A p x, begin
have h1 : ∀ A B C, A = B ∧ p A → A = C ∧ p A → B = C,
{ intros A B C h1 h2,
rw [← h1.1, ← h2.1] },
rw [zmem_replacement_iff h1],
split,
{ intro H,
rcases H with ⟨w, H1, H2, H3⟩,
subst H2,
exact ⟨H1, H3⟩ },
{ intro H,
existsi x,
simpa }
end,
.. s }
instance has_infty_replacement_powerset.to_has_zempty [s : has_infty_replacement_powerset α] : has_zempty α :=
{ .. s, .. has_comprehension.to_has_zempty }
instance has_infty_replacement_powerset.to_has_upair [s : has_infty_replacement_powerset α] : has_upair α :=
{ upair := λ x y, replacement (powerset (powerset ∅)) (λ m n, m = ∅ ∧ n = x ∨ m = powerset ∅ ∧ n = y),
zmem_upair_iff_eq_or_eq := λ x y z, begin
have h1 : ∀ (A B C : α),
A = ∅ ∧ B = x ∨ A = powerset ∅ ∧ B = y →
A = ∅ ∧ C = x ∨ A = powerset ∅ ∧ C = y → B = C,
{ intros A B C h1 h2,
cases h1; cases h2;
cases h1 with h3 h4;
cases h2 with h5 h6,
{ subst h4, subst h6 },
{ exfalso,
rw h3 at h5,
have h7 : (∅:α) ∈ powerset ∅,
{ rw zmem_powerset,
exact subset.refl ∅ },
rw ← h5 at h7,
exact not_zmem_empty h7 },
{ exfalso,
rw h3 at h5,
have h7 : (∅:α) ∈ powerset ∅,
{ rw zmem_powerset,
exact subset.refl ∅ },
rw h5 at h7,
exact not_zmem_empty h7 },
{ subst h4, subst h6 } },
rw zmem_replacement_iff h1,
split,
{ intro H,
rcases H with ⟨w, H1, ⟨H2, H3⟩ | ⟨H2, H3⟩⟩,
{ left, assumption },
{ right, assumption } },
{ intro H,
cases H,
{ existsi ∅,
split,
{ rw zmem_powerset,
intros z hz,
exfalso,
exact not_zmem_empty hz },
{ left, split, refl, assumption } },
{ existsi powerset ∅,
split,
{ rw zmem_powerset,
exact subset.refl _ },
{ right, split, refl, assumption } } }
end,
.. s }
instance has_infty_replacement_powerset_sUnion.to_has_sUnion_upair [s : has_infty_replacement_powerset_sUnion α] : has_sUnion_upair α :=
{ .. s, .. has_infty_replacement_powerset.to_has_upair }
variable α
class has_zinfty extends has_infty α, has_infty_replacement_powerset_sUnion α :=
(empty_zmem_infinity : (∅:α) ∈ (∞:α))
(succ_zmem_infinity_of_zmem_infinity : ∀ x:α, x ∈ (∞:α) → succ x ∈ (∞:α))
class is_regular extends has_zmem α :=
(regular : ∀ x : α, (∃ y, y ∈ x) → (∃ y : α, y ∈ x ∧ ∀ z, z ∈ y → z ∈ x → false))
variable {α}
theorem empty_zmem_infinity [has_zinfty α] : (∅:α) ∈ (∞:α) :=
has_zinfty.empty_zmem_infinity α
theorem succ_zmem_infinity_of_zmem_infinity [has_zinfty α] : ∀ x:α, x ∈ (∞:α) → succ x ∈ (∞:α) :=
has_zinfty.succ_zmem_infinity_of_zmem_infinity
theorem regular [is_regular α] : ∀ x : α, (∃ y, y ∈ x) → (∃ y : α, y ∈ x ∧ ∀ z, z ∈ y → z ∈ x → false) :=
is_regular.regular
variable α
class zf extends has_zmem α, is_extensional α, has_sUnion α, has_powerset α, has_replacement α, has_zinfty α, is_regular α
section zf
variables {α} [zf α] {x y z : α}
theorem singleton_def : {x} = has_insert.insert x (∅:α) := rfl
theorem zmem_singleton_iff : x ∈ ({y}:α) ↔ x = y :=
begin
rw [singleton_def, zmem_insert_iff_eq_or_zmem],
apply or_iff_left_of_imp,
intro H,
exfalso,
exact not_zmem_empty H
end
theorem zmem_singleton : x ∈ ({x}:α) :=
begin
rw [singleton_def, zmem_insert_iff_eq_or_zmem],
left, refl
end
theorem not_zmem_self : x ∉ x :=
λ h, begin
rcases regular {x} ⟨x, zmem_singleton⟩ with ⟨y, h1, h2⟩,
rw zmem_singleton_iff at h1,
subst h1,
exact h2 y h zmem_singleton
end
theorem not_zmem_and_zmem : x ∈ y → y ∈ x → false :=
λ hxy hyx, begin
rcases regular {x, y} ⟨x, by simp [zmem_insert_iff_eq_or_zmem]; right; exact zmem_singleton⟩ with ⟨z, h1, h2⟩,
rw [zmem_insert_iff_eq_or_zmem, zmem_singleton_iff] at h1,
cases h1; subst h1,
{ apply h2 _ hxy,
simp [zmem_insert_iff_eq_or_zmem],
right, exact zmem_singleton },
{ apply h2 _ hyx,
simp [zmem_insert_iff_eq_or_zmem] }
end
theorem succ.ext {x y : α} (H : succ x = succ y) : x = y :=
begin
simp [succ] at H,
have H1 : x ∈ has_insert.insert x x,
{ rw zmem_insert_iff_eq_or_zmem,
left, refl },
have H2 : y ∈ has_insert.insert y y,
{ rw zmem_insert_iff_eq_or_zmem,
left, refl },
rw [H, zmem_insert_iff_eq_or_zmem] at H1,
rw [← H, zmem_insert_iff_eq_or_zmem] at H2,
cases H1,
{ assumption },
{ cases H2,
{ subst H2 },
{ exfalso,
exact not_zmem_and_zmem H1 H2 } }
end
def prod : α → α → α :=
λ X Y, comprehension (powerset $ powerset $ X ∪ Y) (λ z, ∃ x y, x ∈ X ∧ y ∈ Y ∧ z = opair x y)
theorem zmem_prod {A B : α} (hx : x ∈ A) (hy : y ∈ B) : opair x y ∈ prod A B :=
zmem_comprehension_iff.2
⟨zmem_powerset.2 $ λ z (hz : z ∈ opair x y),
or.cases_on (zmem_upair_iff_eq_or_eq.1 hz)
(λ hzx, zmem_powerset.2 $ λ w hw,
or.cases_on (zmem_upair_iff_eq_or_eq.1 $ (hzx ▸ hw : w ∈ upair x x))
(λ hwx, zmem_union_iff_zmem_or_zmem.2 $ or.inl $ hwx.symm ▸ hx)
(λ hwx, zmem_union_iff_zmem_or_zmem.2 $ or.inl $ hwx.symm ▸ hx))
(λ hzxy, zmem_powerset.2 $ λ w hw,
or.cases_on (zmem_upair_iff_eq_or_eq.1 $ (hzxy ▸ hw : w ∈ upair x y))
(λ hwx, zmem_union_iff_zmem_or_zmem.2 $ or.inl $ hwx.symm ▸ hx)
(λ hwy, zmem_union_iff_zmem_or_zmem.2 $ or.inr $ hwy.symm ▸ hy)),
x, y, hx, hy, rfl⟩
theorem zmem_left_of_zmem_prod {A B : α} (h : opair x y ∈ prod A B) : x ∈ A :=
let ⟨x1, y1, hx1, hy1, h⟩ := (zmem_comprehension_iff.1 h).2 in
(opair.ext h).1.symm ▸ hx1
theorem zmem_right_of_zmem_prod {A B : α} (h : opair x y ∈ prod A B) : y ∈ B :=
let ⟨x1, y1, hx1, hy1, h⟩ := (zmem_comprehension_iff.1 h).2 in
(opair.ext h).2.symm ▸ hy1
class is_relation (f : α) : Prop :=
(eq_opair_of_zmem : ∀ z, z ∈ f → ∃ x y, z = opair x y)
def dom (f : α) : α :=
replacement f (λ z x, ∃ y, z = opair x y)
def range (f : α) : α :=
replacement f (λ z y, ∃ x, z = opair x y)
class is_function (f : α) extends is_relation f : Prop :=
(exists_unique : ∀ x, x ∈ dom f → ∃! y, opair x y ∈ f)
class is_injective (f : α) extends is_function f : Prop :=
(injective : ∀ x y z, opair x z ∈ f → opair y z ∈ f → x = y)
theorem zmem_dom_iff {f x : α} : x ∈ dom f ↔ ∃ y, opair x y ∈ f :=
begin
have h1 : ∀ (A B C : α), (∃ (y : α), A = opair B y) → (∃ (y : α), A = opair C y) → B = C,
{ intros A B C h1 h2,
cases h1 with m h1,
cases h2 with n h2,
subst h1,
exact (opair.ext h2).1 },
rw [dom, zmem_replacement_iff h1],
split; intro H,
{ rcases H with ⟨z, h1, w, h2⟩,
subst h2, existsi w, assumption },
{ cases H with z h,
existsi opair x z,
split, assumption, existsi z, refl }
end
theorem zmem_range_iff {f y : α} : y ∈ range f ↔ ∃ x, opair x y ∈ f :=
begin
have h1 : ∀ (A B C : α), (∃ (x : α), A = opair x B) → (∃ (x : α), A = opair x C) → B = C,
{ intros A B C h1 h2,
cases h1 with m h1,
cases h2 with n h2,
subst h1,
exact (opair.ext h2).2 },
rw [range, zmem_replacement_iff h1],
split; intro H,
{ rcases H with ⟨z, h1, w, h2⟩,
subst h2, existsi w, assumption },
{ cases H with z h,
existsi opair z y,
split, assumption, existsi z, refl }
end
theorem zmem_dom_of_opair_zmem {f : α} [is_function f] {x y : α} (H : opair x y ∈ f) : x ∈ dom f :=
zmem_dom_iff.2 ⟨_, H⟩
theorem zmem_range_of_opair_zmem {f : α} [is_function f] {x y : α} (H : opair x y ∈ f) : y ∈ range f :=
zmem_range_iff.2 ⟨_, H⟩
def eval (f : α) [is_function f] (x : α) : α :=
sUnion $ comprehension (range f) (λ y, opair x y ∈ f)
theorem eval_unique {f : α} [is_function f] {x y : α} (H : x ∈ dom f) (Hxy : opair x y ∈ f) : y = eval f x :=
begin
rcases is_function.exists_unique _ H with ⟨y', H1, H2⟩,
have H3 := H2 _ Hxy, subst H3,
apply is_extensional.ext,
intro z, split; intro hz,
{ rw [eval, zmem_sUnion_iff_zmem_zmem],
existsi y,
rw [zmem_comprehension_iff, zmem_range_iff],
exact ⟨hz, ⟨x, Hxy⟩, Hxy⟩ },
{ rw [eval, zmem_sUnion_iff_zmem_zmem] at hz,
rcases hz with ⟨t, ht1, ht2⟩,
rw [zmem_comprehension_iff, zmem_range_iff] at ht2,
specialize H2 _ ht2.2,
subst H2,
exact ht1 }
end
theorem opair_eval_zmem {f : α} [is_function f] {x : α} (H : x ∈ dom f) : opair x (eval f x) ∈ f :=
begin
rcases is_function.exists_unique _ H with ⟨y, H1, H2⟩,
have H3 := eval_unique H H1,
subst H3,
exact H1
end
theorem eval_zmem_range {f : α} [is_function f] {x : α} (H : x ∈ dom f) : eval f x ∈ range f :=
zmem_range_iff.2 ⟨_, opair_eval_zmem H⟩
def inv (f : α) : α :=
replacement f (λ x y, ∃ m n, opair m n = x ∧ opair n m = y)
theorem inv.aux (A B C : α)
(H1 : ∃ (m n : α), opair m n = A ∧ opair n m = B)
(H2 : ∃ (m n : α), opair m n = A ∧ opair n m = C) :
B = C :=
begin
rcases H1 with ⟨m1, n1, H3, H4⟩,
rcases H2 with ⟨m2, n2, H5, H6⟩,
subst H3, subst H4, subst H6,
rw opair.iff at H5 ⊢,
cases H5 with H1 H2,
split; symmetry; assumption
end
theorem zmem_inv_iff {f x y : α} : opair x y ∈ inv f ↔ opair y x ∈ f :=
begin
rw [inv, zmem_replacement_iff inv.aux],
split; intro h,
{ rcases h with ⟨z, hz, m, n, h1, h2⟩,
rcases opair.ext h2 with ⟨h3, h4⟩,
subst h1, subst h3, subst h4,
exact hz },
{ exact ⟨_, h, _, _, rfl, rfl⟩ }
end
instance inv.is_relation (f : α) : is_relation (inv f) :=
{ eq_opair_of_zmem := begin
intros z hz,
rw [inv, zmem_replacement_iff inv.aux] at hz,
rcases hz with ⟨x, hxf, m, n, h1, h2⟩,
subst h2,
exact ⟨_, _, rfl⟩
end }
instance inv.is_injective (f : α) [is_injective f] : is_injective (inv f) :=
{ exists_unique := begin
intros x hx,
rw zmem_dom_iff at hx,
cases hx with y hy,
existsi y,
split,
{ exact hy },
{ intros z hz,
rw zmem_inv_iff at hy hz,
exact is_injective.injective _ _ _ hz hy }
end,
injective := begin
intros x y z hx hy,
rw zmem_inv_iff at hx hy,
have h1 : z ∈ dom f,
{ rw zmem_dom_iff,
exact ⟨_, hy⟩ },
have h2 := is_function.exists_unique z h1,
exact unique_of_exists_unique h2 hx hy
end,
.. inv.is_relation f }
theorem inv.dom {f : α} : dom (inv f) = range f :=
begin
apply is_extensional.ext,
intro z,
rw [zmem_dom_iff, zmem_range_iff],
split; intro hz,
{ cases hz with y h,
rw zmem_inv_iff at h,
exact ⟨_, h⟩ },
{ cases hz with y h,
existsi y,
rw zmem_inv_iff,
exact h }
end
theorem inv.range {f : α} : range (inv f) = dom f :=
begin
apply is_extensional.ext,
intro z,
rw [zmem_dom_iff, zmem_range_iff],
split; intro hz,
{ cases hz with y h,
rw zmem_inv_iff at h,
exact ⟨_, h⟩ },
{ cases hz with y h,
existsi y,
rw zmem_inv_iff,
exact h }
end
variable α
def omega : α :=
comprehension ∞ (λ n, ∀ A:α, ∅ ∈ A → (∀ k, k ∈ A → succ k ∈ A) → n ∈ A)
notation `ω` := omega _
variable {α}
theorem empty_zmem_omega : (∅:α) ∈ (ω:α) :=
zmem_comprehension_iff.2 ⟨empty_zmem_infinity, λ A h1 h2, h1⟩
theorem succ_zmem_omega_of_zmem (H : x ∈ (ω:α)) : succ x ∈ (ω:α) :=
zmem_comprehension_iff.2
⟨succ_zmem_infinity_of_zmem_infinity x $ (zmem_comprehension_iff.1 H).1,
λ A h1 h2, h2 x $ (zmem_comprehension_iff.1 H).2 A h1 h2⟩
theorem induction (p : α → Prop) (H1 : p ∅) (H2 : ∀ k, k ∈ omega α → p k → p (succ k)) (n : α) (Hn : n ∈ omega α) : p n :=
(zmem_comprehension_iff.1 $
(zmem_comprehension_iff.1 Hn).2 (comprehension (omega α) p)
(zmem_comprehension_iff.2 ⟨empty_zmem_omega, H1⟩)
(λ k hk, zmem_comprehension_iff.2
⟨succ_zmem_omega_of_zmem $ (zmem_comprehension_iff.1 hk).1,
H2 k (zmem_comprehension_iff.1 hk).1 (zmem_comprehension_iff.1 hk).2⟩)).2
theorem omega.structure (H : x ∈ (ω:α)) : x = ∅ ∨ ∃ y ∈ (ω:α), x = succ y :=
@induction α _ (λ z:α, z = ∅ ∨ ∃ y ∈ (ω:α), z = succ y)
(or.inl rfl)
(λ k hk1 hk2, or.inr ⟨k, hk1, rfl⟩)
x H
class is_transitive (x : α) : Prop :=
(zmem_trans : ∀ m n, m ∈ n → n ∈ x → m ∈ x)
instance omega.transitive : is_transitive (ω:α) :=
{ zmem_trans := λ m n hmn hno, @induction α _ (λ z, ∀ x, x ∈ z → x ∈ (ω:α))
(λ x hx, false.elim $ not_zmem_empty hx)
(λ k hk1 hk2 x hx, or.cases_on (zmem_succ_iff_eq_or_zmem.1 hx)
(λ hxk, hxk.symm ▸ hk1)
(hk2 x))
n hno m hmn }
variable α
def nat.to_omega : nat → α
| nat.zero := ∅
| (nat.succ n) := succ (n.to_omega)
theorem nat.to_omega.zmem_omega : ∀ n, nat.to_omega α n ∈ omega α
| nat.zero := empty_zmem_omega
| (nat.succ n) := succ_zmem_omega_of_zmem $ nat.to_omega.zmem_omega n
def nat.to_omega' : nat → {x // x ∈ omega α} :=
λ n, ⟨nat.to_omega α n, nat.to_omega.zmem_omega α n⟩
theorem nat.to_omega.injective : function.injective (nat.to_omega α) :=
begin
intros m n H,
induction m with m ihm generalizing n H;
induction n with n ihn,
{ refl },
{ exfalso,
have h1 : nat.to_omega α n ∈ nat.to_omega α (nat.succ n),
{ unfold nat.to_omega,
unfold succ,
rw zmem_insert_iff_eq_or_zmem,
left, refl },
rw ← H at h1,
exact not_zmem_empty h1 },
{ exfalso,
have h1 : nat.to_omega α m ∈ nat.to_omega α (nat.succ m),
{ unfold nat.to_omega,
unfold succ,
rw zmem_insert_iff_eq_or_zmem,
left, refl },
rw H at h1,
exact not_zmem_empty h1 },
{ unfold nat.to_omega at H,
congr,
exact ihm (succ.ext H) }
end
-- it isn't supposed to be surjective unless the model is transitive
theorem nat.to_omega.surjective_cheating : function.surjective (nat.to_omega' α) :=
begin
intros x,
cases x with x hx,
dsimp [omega] at hx,
rw zmem_comprehension_iff at hx,
cases hx with h1 h2,
let cheating := comprehension (omega α) (nat.to_omega α '' set.univ),
specialize h2 cheating,
simp [cheating, zmem_comprehension_iff] at h2,
specialize h2 empty_zmem_omega,
specialize h2 ⟨0, rfl⟩,
specialize h2 (λ k hk1 ⟨n, hk2⟩, ⟨succ_zmem_omega_of_zmem hk1, nat.succ n, by rw ← hk2; refl⟩),
rcases h2 with ⟨h2, n, h3⟩,
existsi n,
apply subtype.eq,
exact h3
end
variable {α}
section erase
def erase (x y : α) : α :=
comprehension x (λ z, z ≠ y)
theorem zmem_erase_iff : z ∈ erase x y ↔ z ∈ x ∧ z ≠ y :=
zmem_comprehension_iff
theorem zmem_of_zmem_erase (H : z ∈ erase x y) : z ∈ x :=
(zmem_erase_iff.1 H).1
theorem ne_of_zmem_erase (H : z ∈ erase x y) : z ≠ y :=
(zmem_erase_iff.1 H).2
theorem zmem_erase_of_zmem_of_ne (H1 : z ∈ x) (H2 : z ≠ y) : z ∈ erase x y :=
zmem_erase_iff.2 ⟨H1, H2⟩
end erase
section recursion
variables (f A c : α) [is_function f] (H1 : dom f = prod ω A) (H2 : range f ⊆ A) (H3 : c ∈ A)
-- {(x,y) ∈ ω × A | ∃ h : ω → A, h(∅) = c ∧ (∀ m ∈ x, h(m⁺) = f(m, h(m))) ∧ h(x) = y}
def recursion : α :=
comprehension (prod ω A)
(λ z, ∃ (h x y : α) [is_function h] (H4 : z = opair x y) (H5 : dom h = ω) (H6 : range h ⊆ A)
(H7 : opair ∅ c ∈ h) (H8: ∀ m hm hsm, m ∈ x → opair m hm ∈ h → opair (opair m hm) hsm ∈ f → opair (succ m) hsm ∈ h), z ∈ h)
include H3
theorem recursion.empty : opair ∅ c ∈ recursion f A c :=
begin
let h : α := prod ω {c},
have hf : is_function h,
{ split,
{ intros z hz,
dsimp [h] at hz,
rw [prod, zmem_comprehension_iff] at hz,
replace hz := hz.2,
rcases hz with ⟨x', y', _, _, h'⟩,
exact ⟨x', y', h'⟩ },
{ intros x' hx',
existsi c,
rw [zmem_dom_iff] at hx',
rcases hx' with ⟨y, hy⟩,
dsimp [h] at hy ⊢,
split,
{ exact zmem_prod (zmem_left_of_zmem_prod hy) zmem_singleton },
{ intros y' hy',
replace hy' := zmem_right_of_zmem_prod hy',
rwa zmem_singleton_iff at hy' } } },
have H5 : dom h = ω,
{ apply is_extensional.ext,
intro z,
rw [zmem_dom_iff],
split; intro h5,
{ cases h5 with y hy,
exact zmem_left_of_zmem_prod hy },
{ existsi c,
exact zmem_prod h5 zmem_singleton } },
have H6 : range h ⊆ A,
{ intros z hz,
rw [zmem_range_iff] at hz,
cases hz with y hy,
replace hy := zmem_right_of_zmem_prod hy,
rw zmem_singleton_iff at hy,
subst hy,
exact H3 },
have H7 : opair ∅ c ∈ h,
{ exact zmem_prod empty_zmem_omega zmem_singleton },
rw [recursion, zmem_comprehension_iff],
split,
{ exact zmem_prod empty_zmem_omega H3 },
{ exact ⟨h, _, _, hf, rfl, H5, H6, H7, (λ m _ _ hm, false.elim $ not_zmem_empty hm), H7⟩ }
end
omit H3
include H1 H2 H3
theorem recursion.succ (h1 : opair x y ∈ recursion f A c) (h2 : opair (opair x y) z ∈ f) : opair (succ x) z ∈ recursion f A c :=
begin
have h3 : opair x y ∈ prod ω A,
{ rw [← H1, zmem_dom_iff], exact ⟨z, h2⟩ },
have h5 : z ∈ A,
{ apply H2, rw [zmem_range_iff], exact ⟨_, h2⟩ },
rw [recursion, zmem_comprehension_iff] at h1,
cases h1 with h1 h3,
rcases h3 with ⟨h, x', y', hf, H4, H5, H6, H7, H8, H9⟩,
have h4 : opair x y ∈ dom f,
{ rw H1, exact h1 },
let h' : α := comprehension (prod ω A) (λ w, ∀ m n, w = opair m n → ((m = succ x ∧ n = z) ∨ (m ≠ succ x ∧ w ∈ h))),
have hf' : is_function h' :=
{ eq_opair_of_zmem := λ z hz, begin
dsimp [h'] at hz,
rw [zmem_comprehension_iff] at hz,
cases hz with hz1 hz2,
rw [prod, zmem_comprehension_iff] at hz1,
rcases hz1.2 with ⟨x'', y'', _, _, h''⟩,
exact ⟨x'', y'', h''⟩
end,
exists_unique := begin
intros x hx,
rw [zmem_dom_iff] at hx,
cases hx with y hy,
dsimp [h'] at hy,
rw [zmem_comprehension_iff] at hy,
cases hy with hy1 hy2,
specialize hy2 x y rfl,
cases hy2 with hy2 hy2;
cases hy2 with hy2 hy3,
{ existsi y,
split,
{ dsimp [h'],
rw [zmem_comprehension_iff],
split, exact hy1,
intros m n hxy,
cases opair.ext hxy with hxy1 hxy2,
subst hxy1, subst hxy2,
left, split, subst hy2, subst hy3 },
{ intros z hz,
dsimp [h'] at hz,
rw [zmem_comprehension_iff] at hz,
cases hz with hz1 hz2,
specialize hz2 _ _ rfl,
cases hz2 with hz2 hz2;
cases hz2 with hz2 hz3,
{ subst hy3, subst hz3 },
{ exfalso, exact hz2 hy2 } } },
{ existsi y,
split,
{ dsimp [h'],
rw [zmem_comprehension_iff],
split, exact hy1,
intros m n hxy,
cases opair.ext hxy with hxy1 hxy2,
subst hxy1, subst hxy2,
right, split, exact hy2, exact hy3 },
{ intros z hz,
dsimp [h'] at hz,
rw [zmem_comprehension_iff] at hz,
cases hz with hz1 hz2,
specialize hz2 _ _ rfl,
cases hz2 with hz2 hz2;
cases hz2 with hz2 hz3,
{ exfalso, exact hy2 hz2 },
{ have hf1 := hf.exists_unique,
have hf2 : x ∈ dom h,
{ rw [zmem_dom_iff], existsi y, exact hy3 },
specialize hf1 _ hf2,
exact unique_of_exists_unique hf1 hz3 hy3 } } }
end },
have H5' : dom h' = ω,
{ apply is_extensional.ext,
intro w,
rw [zmem_dom_iff],
split; intro hw,
{ cases hw with w hw,
dsimp [h'] at hw,
rw [zmem_comprehension_iff] at hw,
cases hw with hw1 hw2,
exact zmem_left_of_zmem_prod hw1 },
{ cases classical.em (w = succ x) with hzk hzk,
{ existsi z,
dsimp [h'],
rw [zmem_comprehension_iff],
split,
{ exact zmem_prod hw h5 },
{ intros m n hmn,
cases opair.ext hmn with hmn1 hmn2,
subst hmn1, subst hmn2,
left, split, exact hzk, refl } },
{ have hf1 := hf.exists_unique,
specialize hf1 w (H5.symm ▸ hw),
rcases hf1 with ⟨w', hf1, hf2⟩,
existsi w',
dsimp [h'],
rw [zmem_comprehension_iff],
split,
have hf3 : w' ∈ range h,
{ rw [zmem_range_iff], existsi w, exact hf1 },
{ exact zmem_prod hw (H6 _ hf3) },
{ intros m n hmn,
cases opair.ext hmn with hmn1 hmn2,
subst hmn1, subst hmn2,
right, split, exact hzk, exact hf1 } } } },
have H6' : range h' ⊆ A,
{ intros z hz,
rw [zmem_range_iff] at hz,
cases hz with w hw,
dsimp [h'] at hw,
rw [zmem_comprehension_iff] at hw,
cases hw with hw1 hw2,
specialize hw2 _ _ rfl,
cases hw2 with hw2 hw2;
cases hw2 with hw2 hw3,
{ subst hw3,
apply H2,
rw [zmem_range_iff],
exact ⟨_, h2⟩ },
{ have hf1 : z ∈ range h,
{ rw [zmem_range_iff], existsi w, exact hw3 },
exact H6 _ hf1 } },
have H7' : opair ∅ c ∈ h',
{ dsimp [h'],
rw [zmem_comprehension_iff],
split,
{ exact zmem_prod empty_zmem_omega H3 },
{ intros m n hmn,
cases opair.ext hmn with hmn1 hmn2,
subst hmn1, subst hmn2,
right, split,
{ intro hmn1,
have hmn2 : x ∈ succ x := zmem_succ,
rw ← hmn1 at hmn2,
exact not_zmem_empty hmn2 },
{ exact H7 } } },
have H8' : ∀ (m hm hsm : α), m ∈ succ x → opair m hm ∈ h' → opair (opair m hm) hsm ∈ f → opair (succ m) hsm ∈ h',
{ intros m hm hsm hm1 hm2 hm3,
rw zmem_succ_iff_eq_or_zmem at hm1,
cases hm1 with hm1 hm1,
{ subst hm1,
dsimp [h'],
rw [zmem_comprehension_iff],
have hm4 : hsm ∈ range f,
{ rw [zmem_range_iff], existsi _, exact hm3 },
have hm5 : m ∈ ω,
{ rw ← H5', rw [zmem_dom_iff], existsi _, exact hm2 },
split,
{ exact zmem_prod (succ_zmem_omega_of_zmem hm5) (H2 _ hm4) },
{ intros m n hmn,
cases opair.ext hmn with hmn1 hmn2,
subst hmn1, subst hmn2,
left, split, refl,
dsimp [h'] at hm2,
rw [zmem_comprehension_iff] at hm2,
cases hm2 with hm8 hm6,
specialize hm6 _ _ rfl,
cases hm6 with hm6 hm6;
cases hm6 with hm6 hm7,
{ exfalso,
have hm9 : m ∈ succ m := zmem_succ,
rw ← hm6 at hm9,
exact not_zmem_self hm9 },
{ have hf1 := hf.exists_unique,
have hf2 : m ∈ dom h,
{ rw H5, exact hm5 },
specialize hf1 _ hf2,
have hf3 := unique_of_exists_unique hf1 hm7 H9,
subst hf3,
have hf4 := is_function.exists_unique _ h4,
exact unique_of_exists_unique hf4 hm3 h2 } } },
{ cases opair.ext H4 with H41 H42,
subst H41, subst H42,
dsimp [h'],
rw [zmem_comprehension_iff],
have hm4 : hsm ∈ range f,
{ rw [zmem_range_iff], existsi _, exact hm3 },
have hm5 : m ∈ ω,
{ rw ← H5', rw [zmem_dom_iff], existsi _, exact hm2 },
split,
{ exact zmem_prod (succ_zmem_omega_of_zmem hm5) (H2 _ hm4) },
{ intros m n hmn,
cases opair.ext hmn with hmn1 hmn2,
subst hmn1, subst hmn2,
right, split,
{ intro hmk,
replace hmk := succ.ext hmk,
subst hmk,
exact not_zmem_self hm1 },
{ have hm6 : opair m hm ∈ h,
{ dsimp [h'] at hm2,
rw [zmem_comprehension_iff] at hm2,
cases hm2 with hm8 hm6,
specialize hm6 _ _ rfl,
cases hm6 with hm6 hm6;
cases hm6 with hm6 hm7,
{ exfalso,
subst hm6,
apply not_zmem_and_zmem hm1 zmem_succ },
{ exact hm7 } },
exact H8 _ _ _ hm1 hm6 hm3 } } } },
have H9' : opair (succ x) z ∈ h',
{ dsimp [h'],
rw [zmem_comprehension_iff],
split,
{ exact zmem_prod (succ_zmem_omega_of_zmem $ zmem_left_of_zmem_prod h3) h5 },
{ intros m n hmn,
cases opair.ext hmn with hmn1 hmn2,
subst hmn1, subst hmn2,
left, split, refl, refl } },
rw [recursion, zmem_comprehension_iff],
split,
{ exact zmem_prod (succ_zmem_omega_of_zmem $ zmem_left_of_zmem_prod h3) h5 },
{ exact ⟨h', _, _, hf', rfl, H5', H6', H7', H8', H9'⟩ }
end
theorem recursion.dom_omega (n : α) (Hn : n ∈ (ω:α)) : ∃! y, opair n y ∈ recursion f A c :=
begin
apply @induction _ _ _ _ _ n Hn,
{ existsi c,
split,
{ exact recursion.empty f A c H3 },
{ intros y h1,
rw [recursion, zmem_comprehension_iff] at h1,
rcases h1 with ⟨h1, h, x', y', hf, H4, H5, H6, H7, H8, H9⟩,
have hf1 := hf.exists_unique,
specialize hf1 ∅ (H5.symm ▸ empty_zmem_omega),
exact unique_of_exists_unique hf1 H9 H7 } },
{ intros k hk h1,
rcases h1 with ⟨y, h1, h2⟩,
have h4 : opair k y ∈ dom f,
{ rw [recursion, zmem_comprehension_iff] at h1,
rw H1, exact h1.1 },
existsi (eval f (opair k y)),
split,
{ exact recursion.succ f A c H1 H2 H3 h1 (opair_eval_zmem h4) },
{ intros z hz,
apply eval_unique h4,
rw [recursion, zmem_comprehension_iff] at hz,
rcases hz with ⟨hz1, h'', x'', y'', hf'', H4'', H5'', H6'', H7'', H8'', H9''⟩,
cases opair.ext H4'' with H41'' H42'',
subst H41'', subst H42'',
rw [recursion, zmem_comprehension_iff] at h1,
cases h1 with h1 h3,
rcases h3 with ⟨h, x', y', hf, H4, H5, H6, H7, H8, H9⟩, cases opair.ext H4 with H41 H42,
subst H41, subst H42,
cases omega.structure hk with h3 h3,
{ subst h3,
have hf1 := hf.exists_unique,
specialize hf1 ∅ (H5.symm ▸ empty_zmem_omega),
have hf2 := unique_of_exists_unique hf1 H9 H7,
subst hf2,
specialize H8'' ∅ _ _ zmem_succ H7'' (opair_eval_zmem h4),
have hf3 := hf''.exists_unique,
specialize hf3 (succ ∅) (H5''.symm ▸ succ_zmem_omega_of_zmem empty_zmem_omega),
have hf2 := unique_of_exists_unique hf3 H8'' H9'',
subst hf2,
apply opair_eval_zmem h4 },
{ rcases h3 with ⟨k, H, hk⟩, subst hk,
have h5 : succ k ∈ dom h'',
{ rw H5'', exact hk },
have h6 : opair (succ k) (@@eval _ h'' hf'' (succ k)) ∈ recursion f A c,
{ rw [recursion, zmem_comprehension_iff],
split,
{ exact zmem_prod hk (H6'' _ $ @eval_zmem_range _ _ _ hf'' _ h5) },
{ exact ⟨h'', _, _, hf'', rfl, H5'', H6'', H7'',
(λ m hm hsm hm1 hm2 hm3, H8'' m hm hsm (zmem_succ_of_zmem hm1) hm2 hm3),
@opair_eval_zmem _ _ _ hf'' _ h5⟩ } },
specialize H8'' _ _ _ zmem_succ (@opair_eval_zmem _ _ _ hf'' _ h5)
(opair_eval_zmem (H1.symm ▸ zmem_prod hk (H6'' _ $ @eval_zmem_range _ _ _ hf'' _ h5))),
specialize h2 _ h6,
subst h2,
have hf3 := hf''.exists_unique,
specialize hf3 (succ (succ k)) (H5''.symm ▸ succ_zmem_omega_of_zmem hk),
have hf2 := unique_of_exists_unique hf3 H8'' H9'',
subst hf2,
apply opair_eval_zmem h4 } } }
end
instance recursion.is_function : is_function (recursion f A c) :=
{ eq_opair_of_zmem := λ z hz, let ⟨x, y, _, _, h⟩ := (zmem_comprehension_iff.1 (zmem_comprehension_iff.1 hz).1).2 in ⟨x, y, h⟩,
exists_unique := λ x hx, begin
rw [zmem_dom_iff] at hx,
cases hx with w hw,
rw [recursion, zmem_comprehension_iff] at hw,
have hx : x ∈ ω := zmem_left_of_zmem_prod hw.1,
exact recursion.dom_omega f A c H1 H2 H3 x hx
end }
theorem recursion.dom : dom (recursion f A c) = ω :=
begin
apply is_extensional.ext,
intro n,
rw [zmem_dom_iff],
split; intro hn,
{ cases hn with y hy,
rw [recursion, zmem_comprehension_iff] at hy,
exact zmem_left_of_zmem_prod hy.1 },
{ rcases recursion.dom_omega f A c H1 H2 H3 n hn with ⟨y, h1, h2⟩,
exact ⟨y, h1⟩ }
end
theorem recursion.range : range (recursion f A c) ⊆ A :=
begin
intros z hz,
rw [zmem_range_iff] at hz,
cases hz with n hn,
rw [recursion, zmem_comprehension_iff] at hn,
exact zmem_right_of_zmem_prod hn.1
end
-- recursion theorem:
-- for any function f : ω×A → A and c ∈ A, there is a unique function h : ω → A such that:
-- 1. h(0) = c
-- 2. h(m⁺) = f(m,h(m))
-- (I left uniqueness unproved)
end recursion
section transitive_closure
def transitive_closure (z : α) : α :=
sUnion $ replacement ω (λ x y, ∃ (A : α) (H1 : x ∈ (ω:α)) (H2 : ∀ p q r, opair p q ∈ A → opair p r ∈ A → q = r)
(H3 : ∀ p q, opair p q ∈ A → p ∈ succ x) (H4 : ∀ p, p ∈ x → ∃ q, opair p q ∈ A) (H5 : opair ∅ z ∈ A)
(H6 : ∀ p q, p ∈ x → opair p q ∈ A → opair (succ p) (sUnion q) ∈ A), opair x y ∈ A)
theorem transitive_closure.aux.empty : ∃ (A : α) (H1 : (∅:α) ∈ (ω:α)) (H2 : ∀ (p q r : α), opair p q ∈ A → opair p r ∈ A → q = r)
(H3 : ∀ (p q : α), opair p q ∈ A → p ∈ succ (∅:α))
(H4 : ∀ (p : α), p ∈ (∅:α) → (∃ (q : α), opair p q ∈ A)) (H5 : opair ∅ z ∈ A)
(H6 : ∀ (p q : α), p ∈ (∅:α) → opair p q ∈ A → opair (succ p) (sUnion q) ∈ A),
opair ∅ z ∈ A :=
begin
have H2 : ∀ (p q r : α), opair p q ∈ {opair ∅ z} → opair p r ∈ {opair ∅ z} → q = r,
{ intros p q r hpq hpr,
rw zmem_singleton_iff at hpq hpr,
have h1 := (opair.ext hpq).2,
have h2 := (opair.ext hpr).2,
subst h1, subst h2 },
have H3 : ∀ (p q : α), opair p q ∈ {opair ∅ z} → p ∈ succ (∅:α),
{ intros p q h,
rw zmem_singleton_iff at h,
rw (opair.ext h).1,
exact zmem_succ },
exact ⟨{opair ∅ z}, empty_zmem_omega, H2, H3, (λ p hp, false.elim $ not_zmem_empty hp),
zmem_singleton, (λ p q hp, false.elim $ not_zmem_empty hp), zmem_singleton⟩
end
theorem transitive_closure.aux.succ {k : α} (hk : k ∈ (ω:α)) :
(∃ (A : α) (H1 : k ∈ (ω:α)) (H2 : ∀ (p q r : α), opair p q ∈ A → opair p r ∈ A → q = r)
(H3 : ∀ (p q : α), opair p q ∈ A → p ∈ succ k)
(H4 : ∀ (p : α), p ∈ k → (∃ (q : α), opair p q ∈ A)) (H5 : opair ∅ z ∈ A)
(H6 : ∀ (p q : α), p ∈ k → opair p q ∈ A → opair (succ p) (sUnion q) ∈ A),
opair k y ∈ A)
→
(∃ (A : α) (H1 : succ k ∈ (ω:α)) (H2 : ∀ (p q r : α), opair p q ∈ A → opair p r ∈ A → q = r)
(H3 : ∀ (p q : α), opair p q ∈ A → p ∈ succ (succ k))
(H4 : ∀ (p : α), p ∈ succ k → (∃ (q : α), opair p q ∈ A)) (H5 : opair ∅ z ∈ A)
(H6 : ∀ (p q : α), p ∈ succ k → opair p q ∈ A → opair (succ p) (sUnion q) ∈ A),
opair (succ k) (sUnion y) ∈ A) :=
begin
intro h1,
rcases h1 with ⟨A, H1, H2, H3, H4, H5, H6, H7⟩,
have H2' : ∀ (p q r : α),
opair p q ∈ has_insert.insert (opair (succ k) (sUnion y)) A →
opair p r ∈ has_insert.insert (opair (succ k) (sUnion y)) A →
q = r,
{ intros p q r hpq hpr,
rw zmem_insert_iff_eq_or_zmem at hpq hpr,
cases hpq; cases hpr,
{ have h1 := (opair.ext hpq).2,
have h2 := (opair.ext hpr).2,
subst h1, subst h2 },
{ exfalso,
have h1 := (opair.ext hpq).1,
subst h1,
exact not_zmem_self (H3 _ _ hpr) },
{ exfalso,
have h1 := (opair.ext hpr).1,
subst h1,
exact not_zmem_self (H3 _ _ hpq) },
{ exact H2 _ _ _ hpq hpr } },
have H3' : ∀ (p q : α), opair p q ∈ has_insert.insert (opair (succ k) (sUnion y)) A → p ∈ succ (succ k),
{ intros p q h,
rw zmem_insert_iff_eq_or_zmem at h,
cases h with h h,
{ rw (opair.ext h).1,
exact zmem_succ },
{ exact zmem_succ_of_zmem (H3 _ _ h) } },
have H4' : ∀ (p : α), p ∈ succ k → (∃ (q : α), opair p q ∈ has_insert.insert (opair (succ k) (sUnion y)) A),
{ intros p hp,
rw zmem_succ_iff_eq_or_zmem at hp,
cases hp with hp hp,
{ subst hp,
existsi y,
exact zmem_insert_of_zmem H7 },
{ cases H4 p hp with q hq,
exact ⟨q, zmem_insert_of_zmem hq⟩ } },
have H6' : ∀ (p q : α), p ∈ succ k →
opair p q ∈ has_insert.insert (opair (succ k) (sUnion y)) A →
opair (succ p) (sUnion q) ∈ has_insert.insert (opair (succ k) (sUnion y)) A,
{ intros p q hp hpq,
rw zmem_succ_iff_eq_or_zmem at hp,
rw zmem_insert_iff_eq_or_zmem at hpq,
cases hp with hp hp;
cases hpq with hpq hpq,
{ subst hp,
exfalso,
have h1 : p ∈ succ p := zmem_succ,
rw ← (opair.ext hpq).1 at h1,
exact not_zmem_self h1 },
{ subst hp,
rw H2 _ _ _ hpq H7,
exact zmem_insert },
{ exfalso,
apply not_zmem_and_zmem hp,
rw (opair.ext hpq).1,
exact zmem_succ },
{ exact zmem_insert_of_zmem (H6 _ _ hp hpq) } },
exact ⟨has_insert.insert (opair (succ k) (sUnion y)) A,
succ_zmem_omega_of_zmem hk, H2', H3', H4',
zmem_insert_of_zmem H5, H6', zmem_insert⟩
end
theorem transitive_closure.aux : ∀ x, x ∈ (ω:α) → ∃! y,
∃ (A : α) (H1 : x ∈ (ω:α)) (H2 : ∀ p q r, opair p q ∈ A → opair p r ∈ A → q = r)
(H3 : ∀ p q, opair p q ∈ A → p ∈ succ x) (H4 : ∀ p, p ∈ x → ∃ q, opair p q ∈ A) (H5 : opair ∅ z ∈ A)
(H6 : ∀ p q, p ∈ x → opair p q ∈ A → opair (succ p) (sUnion q) ∈ A), opair x y ∈ A :=
begin
apply induction,
{ existsi z,
split,
{ exact transitive_closure.aux.empty },
{ intros y hy,
rcases hy with ⟨A, H1, H2, H3, H4, H5, H6, H7⟩,
exact H2 _ _ _ H7 H5 } },
{ intros k hk ih,
rcases ih with ⟨y, h1, h2⟩,
existsi sUnion y,
split,
{ exact transitive_closure.aux.succ hk h1 },
{ intros w hw,
rcases hw with ⟨A', H1', H2', H3', H4', H5', H6', H7'⟩,
cases H4' _ zmem_succ with q hq,
have h := H2' _ _ _ H7' (H6' _ _ zmem_succ hq), subst h,
congr,
have H3'' : ∀ (p q_1 : α), opair p q_1 ∈ erase A' (opair (succ k) (sUnion q)) → p ∈ succ k,
{ intros p q h,
have h1 := H3' _ _ (zmem_of_zmem_erase h),
cases zmem_succ_iff_eq_or_zmem.1 h1 with h3 h3,
{ exfalso, subst h3,
have h3 := H2' _ _ _ (H6' _ _ zmem_succ hq) (zmem_of_zmem_erase h), subst h3,
exact ne_of_zmem_erase h rfl },
{ exact h3 } },
have H4'' : ∀ (p : α), p ∈ k → (∃ (q_1 : α), opair p q_1 ∈ erase A' (opair (succ k) (sUnion q))),
{ intros p hp,
cases H4' p (zmem_succ_of_zmem hp) with q hq,
existsi q,
apply zmem_erase_of_zmem_of_ne hq,
intro h,
rw (opair.ext h).1 at hp,
exact not_zmem_and_zmem hp zmem_succ },
have H5'' : opair ∅ z ∈ erase A' (opair (succ k) (sUnion q)),
{ apply zmem_erase_of_zmem_of_ne H5',
intro h,
have h1 : k ∈ succ k := zmem_succ,
rw ← (opair.ext h).1 at h1,
exact not_zmem_empty h1 },
have H6'' : ∀ (p q_1 : α), p ∈ k →
opair p q_1 ∈ erase A' (opair (succ k) (sUnion q)) →
opair (succ p) (sUnion q_1) ∈ erase A' (opair (succ k) (sUnion q)),
{ intros p q hp hpq,
apply zmem_erase_of_zmem_of_ne,
{ exact H6' _ _ (zmem_succ_of_zmem hp) (zmem_of_zmem_erase hpq) },
{ intro h,
rw succ.ext (opair.ext h).1 at hp,
exact not_zmem_self hp } },
have H7'' : opair k q ∈ erase A' (opair (succ k) (sUnion q)),
{ apply zmem_erase_of_zmem_of_ne hq,
intro h,
have h1 : k ∈ succ k := zmem_succ,
rw ← (opair.ext h).1 at h1,
exact not_zmem_self h1 },
exact h2 q ⟨erase A' (opair (succ k) (sUnion q)), hk,
λ p q r hpq hpr, H2' p q r (zmem_of_zmem_erase hpq) (zmem_of_zmem_erase hpr),
H3'', H4'', H5'', H6'', H7''⟩ } }
end
theorem transitive_closure.aux.iff {w : α} : w ∈ transitive_closure z ↔ ∃ y, w ∈ y ∧ ∃ x, x ∈ (ω:α) ∧
∃ (A : α) (H1 : x ∈ (ω:α)) (H2 : ∀ p q r, opair p q ∈ A → opair p r ∈ A → q = r)
(H3 : ∀ p q, opair p q ∈ A → p ∈ succ x) (H4 : ∀ p, p ∈ x → ∃ q, opair p q ∈ A) (H5 : opair ∅ z ∈ A)
(H6 : ∀ p q, p ∈ x → opair p q ∈ A → opair (succ p) (sUnion q) ∈ A), opair x y ∈ A :=
begin
rw [transitive_closure],
rw [zmem_sUnion_iff_zmem_zmem],
split; intro h; rcases h with ⟨h1, h2, h3⟩; refine ⟨h1, h2, _⟩;
rwa [zmem_replacement_iff] <|> rwa [zmem_replacement_iff] at h3;
{ intros A B C hab hac,
have hab1 := hab,
rcases hab1 with ⟨_, h, _⟩,
exact unique_of_exists_unique (transitive_closure.aux _ h) hab hac }
end
variable z
theorem transitive_closure.subset : z ⊆ transitive_closure z :=
λ w hw, transitive_closure.aux.iff.2 ⟨z, hw, ∅, empty_zmem_omega, transitive_closure.aux.empty⟩
instance transitive_closure.is_transitive : is_transitive (transitive_closure z) :=
{ zmem_trans := begin
intros m n hmn hn,
rw transitive_closure.aux.iff at hn ⊢,
rcases hn with ⟨y, hny, k, hk, hn⟩,
refine ⟨_, zmem_sUnion_iff_zmem_zmem.2 ⟨n, hmn, hny⟩, _, succ_zmem_omega_of_zmem hk, transitive_closure.aux.succ hk hn⟩
end }
theorem transitive_closure.UMP (w : α) (H : z ⊆ w) [is_transitive w] : transitive_closure z ⊆ w :=
begin
intros x hx,
rw [transitive_closure.aux.iff] at hx,
rcases hx with ⟨y, hxy, k, hk, A, H1, H2, H3, H4, H5, H6, H7⟩,
revert x y A,
apply induction _ _ _ k hk,
{ intros x y A hxy H2 H3 H4 H5 H6 H7,
specialize H2 _ _ _ H7 H5,
subst H2,
exact H _ hxy },
{ clear hk H1 k,
intros k hk ih x y A hxy H2 H3 H4 H5 H6 H7,
cases H4 _ zmem_succ with q hq,
have H2subst := H2 _ _ _ H7 (H6 _ _ zmem_succ hq),
subst H2subst,
rw zmem_sUnion_iff_zmem_zmem at hxy,
rcases hxy with ⟨t, hxt, hty⟩,
have H3' : ∀ (p q_1 : α), opair p q_1 ∈ erase A (opair (succ k) (sUnion q)) → p ∈ succ k,
{ intros p q h,
have h1 := H3 _ _ (zmem_of_zmem_erase h),
cases zmem_succ_iff_eq_or_zmem.1 h1 with h3 h3,
{ exfalso, subst h3,
have h3 := H2 _ _ _ (H6 _ _ zmem_succ hq) (zmem_of_zmem_erase h), subst h3,
exact ne_of_zmem_erase h rfl },
{ exact h3 } },
have H4' : ∀ (p : α), p ∈ k → (∃ (q_1 : α), opair p q_1 ∈ erase A (opair (succ k) (sUnion q))),
{ intros p hp,
cases H4 p (zmem_succ_of_zmem hp) with q hq,
existsi q,
apply zmem_erase_of_zmem_of_ne hq,
intro h,
rw (opair.ext h).1 at hp,
exact not_zmem_and_zmem hp zmem_succ },
have H5' : opair ∅ z ∈ erase A (opair (succ k) (sUnion q)),
{ apply zmem_erase_of_zmem_of_ne H5,
intro h,
have h1 : k ∈ succ k := zmem_succ,
rw ← (opair.ext h).1 at h1,
exact not_zmem_empty h1 },
have H6' : ∀ (p q_1 : α), p ∈ k →
opair p q_1 ∈ erase A (opair (succ k) (sUnion q)) →
opair (succ p) (sUnion q_1) ∈ erase A (opair (succ k) (sUnion q)),
{ intros p q hp hpq,
apply zmem_erase_of_zmem_of_ne,
{ exact H6 _ _ (zmem_succ_of_zmem hp) (zmem_of_zmem_erase hpq) },
{ intro h,
rw succ.ext (opair.ext h).1 at hp,
exact not_zmem_self hp } },
have H7' : opair k q ∈ erase A (opair (succ k) (sUnion q)),
{ apply zmem_erase_of_zmem_of_ne hq,
intro h,
have h1 : k ∈ succ k := zmem_succ,
rw ← (opair.ext h).1 at h1,
exact not_zmem_self h1 },
specialize ih t q (erase A (opair (succ k) (sUnion q))) hty
(λ p q r hpq hpr, H2 p q r (zmem_of_zmem_erase hpq) (zmem_of_zmem_erase hpr))
H3' H4' H5' H6' H7',
exact is_transitive.zmem_trans _ _ hxt ih }
end
end transitive_closure
def prod_assoc (x y z : α) : α :=
comprehension (prod (prod (prod x y) z) (prod x (prod y z)))
(λ m, ∃ p q r, m = opair (opair (opair p q) r) (opair p (opair q r)))
theorem prod_assoc.dom {x y z : α} : dom (prod_assoc x y z) = prod (prod x y) z :=
begin
apply is_extensional.ext,
intro m,
rw zmem_dom_iff,
split; intro h,
{ cases h with n h,
rw [prod_assoc, zmem_comprehension_iff] at h,
rcases h with ⟨h1, p, q, r, h2⟩,
exact zmem_left_of_zmem_prod h1 },
{ rw [prod, zmem_comprehension_iff] at h,
rcases h with ⟨_, pq, r, hpq, hr, h⟩,
subst h,
rw zmem_comprehension_iff at hpq,
rcases hpq with ⟨_, p, q, hp, hq, hpq⟩,
subst hpq,
existsi opair p (opair q r),
rw [prod_assoc, zmem_comprehension_iff],
refine ⟨zmem_prod
(zmem_prod (zmem_prod hp hq) hr)
(zmem_prod hp (zmem_prod hq hr)),
_, _, _, rfl⟩ }
end
theorem prod_assoc.range {x y z : α} : range (prod_assoc x y z) = prod x (prod y z) :=
begin
apply is_extensional.ext,
intro m,
rw zmem_range_iff,
split; intro h,
{ cases h with n h,
rw [prod_assoc, zmem_comprehension_iff] at h,
rcases h with ⟨h1, p, q, r, h2⟩,
exact zmem_right_of_zmem_prod h1 },
{ rw [prod, zmem_comprehension_iff] at h,
rcases h with ⟨_, p, qr, hp, hqr, h⟩,
subst h,
rw zmem_comprehension_iff at hqr,
rcases hqr with ⟨_, q, r, hq, hr, hqr⟩,
subst hqr,
existsi opair (opair p q) r,
rw [prod_assoc, zmem_comprehension_iff],
refine ⟨zmem_prod
(zmem_prod (zmem_prod hp hq) hr)
(zmem_prod hp (zmem_prod hq hr)),
_, _, _, rfl⟩ }
end
instance prod_assoc.is_injective {x y z : α} : is_injective (prod_assoc x y z) :=
{ eq_opair_of_zmem := begin
intros m h,
rw [prod_assoc, zmem_comprehension_iff] at h,
rcases h with ⟨h1, p, q, r, h2⟩,
exact ⟨_, _, h2⟩
end,
exists_unique := begin
intros m h,
rw zmem_dom_iff at h,
cases h with n hn,
existsi n, split, exact hn,
intros n' hn',
rw [prod_assoc, zmem_comprehension_iff] at hn,
rcases hn with ⟨h1, p, q, r, h2⟩,
rw [prod_assoc, zmem_comprehension_iff] at hn',
rcases hn' with ⟨h1', p', q', r', h2'⟩,
rw opair.iff at h2 h2',
cases h2 with h2 h3,
cases h2' with h2' h3',
subst h2, subst h3, subst h3',
rw [opair.iff, opair.iff] at h2',
cases h2' with hpq hr,
cases hpq with hp hq,
subst hp, subst hq, subst hr
end,
injective := begin
intros m m' n hm hm',
rw [prod_assoc, zmem_comprehension_iff] at hm,
rcases hm with ⟨h1, p, q, r, h2⟩,
rw [prod_assoc, zmem_comprehension_iff] at hm',
rcases hm' with ⟨h1', p', q', r', h2'⟩,
rw opair.iff at h2 h2',
cases h2 with h2 h3,
cases h2' with h2' h3',
subst h2, subst h2', subst h3,
rw [opair.iff, opair.iff] at h3',
rcases h3' with ⟨hp, hq, hr⟩,
subst hp, subst hq, subst hr
end }
def hom (x y : α) : α :=
comprehension (powerset $ prod x y) (λ f, is_function f ∧ dom f = x)
theorem zmem_hom_iff {f : α} : f ∈ hom x y ↔ is_function f ∧ dom f = x ∧ range f ⊆ y :=
begin
rw [hom, zmem_comprehension_iff, zmem_powerset],
split; intro hf; rcases hf with ⟨hf1, hf2, hf3⟩,
{ refine ⟨hf2, hf3, _⟩,
intros z hz,
rw zmem_range_iff at hz,
cases hz with w hwz,
exact zmem_right_of_zmem_prod (hf1 _ hwz) },
{ refine ⟨_, hf1, hf2⟩,
intros z hz,
have hf4 := hf1.eq_opair_of_zmem,
specialize hf4 _ hz,
rcases hf4 with ⟨m, n, hf4⟩,
subst hf4,
have hf4 : m ∈ dom f,
{ rw zmem_dom_iff, exact ⟨_, hz⟩ },
have hf5 : n ∈ range f,
{ rw zmem_range_iff, exact ⟨_, hz⟩ },
rw hf2 at hf4,
replace hf5 := hf3 _ hf5,
exact zmem_prod hf4 hf5 }
end
def prod_adjoint_hom (x y z : α) : α :=
comprehension (prod (hom (prod x y) z) (hom x (hom y z)))
(λ m, ∃ (f g : α) (H : m = opair f g),
∀ p h, opair p h ∈ g → ∀ q r, opair (opair p q) r ∈ f → opair q r ∈ h)
theorem prod_adjoint_hom.dom {x y z : α} : dom (prod_adjoint_hom x y z) = hom (prod x y) z :=
begin
apply is_extensional.ext,
intro m,
rw zmem_dom_iff,
split; intro h,
{ cases h with n h,
rw [prod_adjoint_hom, zmem_comprehension_iff] at h,
rcases h with ⟨h1, p, q, r, h2⟩,
exact zmem_left_of_zmem_prod h1 },
{ have h := h,
rw zmem_hom_iff at h,
rcases h with ⟨h1, h2, h3⟩,
let m' : α := comprehension (prod x (hom y z))
(λ n, ∀ p g, n = opair p g → ∀ q r, opair q r ∈ g → opair (opair p q) r ∈ m),
have h1' : is_function m',
{ split,
{ intros z hz,
rw [zmem_comprehension_iff, prod, zmem_comprehension_iff] at hz,
rcases hz.1.2 with ⟨_, _, _, _, hz1⟩,
exact ⟨_, _, hz1⟩ },
{ intros x' hx',
rw [zmem_dom_iff] at hx',
cases hx' with y' hy',
existsi y',
split,
{ exact hy' },
{ intros y'' hy'',
rw [zmem_comprehension_iff, prod, zmem_comprehension_iff] at hy',
rcases hy' with ⟨hy1', hy2'⟩,
rcases hy1' with ⟨_, p', q', hp', hq', h'⟩,
rcases opair.ext h' with ⟨h1', h2'⟩,
subst h1', subst h2', clear h',
rw zmem_hom_iff at hq',
rcases hq' with ⟨hy1', hy3', hy4'⟩,
cases hy1' with hy5' hy6',
specialize hy2' _ _ rfl,
rw [zmem_comprehension_iff, prod, zmem_comprehension_iff] at hy'',
rcases hy'' with ⟨hy1'', hy2''⟩,
rcases hy1'' with ⟨_, p'', q'', hp'', hq'', h''⟩,
rcases opair.ext h'' with ⟨h1'', h2''⟩,
subst h1'', subst h2'', clear h'',
rw zmem_hom_iff at hq'',
rcases hq'' with ⟨hy1'', hy3'', hy4''⟩,
cases hy1'' with hy5'' hy6'',
specialize hy2'' _ _ rfl,
apply is_extensional.ext,
intro z',
cases h1 with h4 h5,
split; intro hz',
{ specialize hy5'' _ hz',
rcases hy5'' with ⟨q'', r'', h⟩, subst h,
specialize hy2'' _ _ hz',
have hz1' : q'' ∈ dom y'',
{ rw zmem_dom_iff, exact ⟨_, hz'⟩ },
rw [hy3'', ← hy3'] at hz1',
specialize hy6' _ hz1',
rcases hy6' with ⟨r', hy6', _⟩,
specialize hy2' _ _ hy6',
have hz2' : opair x' q'' ∈ dom m,
{ rw zmem_dom_iff, exact ⟨_, hy2'⟩ },
specialize h5 _ hz2',
replace h5 := unique_of_exists_unique h5 hy2'' hy2',
subst h5, exact hy6' },
{ specialize hy5' _ hz',
rcases hy5' with ⟨q', r', h⟩, subst h,
specialize hy2' _ _ hz',
have hz1' : q' ∈ dom y',
{ rw zmem_dom_iff, exact ⟨_, hz'⟩ },
rw [hy3', ← hy3''] at hz1',
specialize hy6'' _ hz1',
rcases hy6'' with ⟨r'', hy6'', _⟩,
specialize hy2'' _ _ hy6'',
have hz2' : opair x' q' ∈ dom m,
{ rw zmem_dom_iff, exact ⟨_, hy2'⟩ },
specialize h5 _ hz2',
replace h5 := unique_of_exists_unique h5 hy2'' hy2',
subst h5, exact hy6'' } } } },
have h2' : dom m' = x,
{ apply is_extensional.ext,
intro p, rw [zmem_dom_iff],
split; intro hp,
{ cases hp with g hg,
rw [zmem_comprehension_iff] at hg,
exact zmem_left_of_zmem_prod hg.1 },
{ let g : α := comprehension (prod y z)
(λ n, ∀ q r, n = opair q r → opair (opair p q) r ∈ m),
cases h1 with h4 h5,
have hg1 : is_function g,
{ split,
{ intros z hz,
rw [zmem_comprehension_iff, prod, zmem_comprehension_iff] at hz,
rcases hz.1.2 with ⟨_, _, _, _, hz1⟩,
exact ⟨_, _, hz1⟩ },
{ intros q hq,
rw zmem_dom_iff at hq,
cases hq with r hr,
existsi r,
split,
{ exact hr },
{ intros r' hr',
rw [zmem_comprehension_iff] at hr hr',
rw h2 at h5,
specialize h5 _ (zmem_prod hp $ zmem_left_of_zmem_prod hr.1),
exact unique_of_exists_unique h5 (hr'.2 _ _ rfl) (hr.2 _ _ rfl) } } },
have hg2 : dom g = y,
{ apply is_extensional.ext,
intro q,
rw zmem_dom_iff,
split; intro hq,
{ cases hq with r hr,
rw [zmem_comprehension_iff] at hr,
exact zmem_left_of_zmem_prod hr.1 },
{ have h6 : opair p q ∈ dom m,
{ rw h2, exact zmem_prod hp hq },
specialize h5 _ h6,
rcases h5 with ⟨r, hr1, hr2⟩,
have hr : r ∈ z,
{ apply h3,
rw zmem_range_iff,
exact ⟨_, hr1⟩ },
existsi r,
rw zmem_comprehension_iff,
split,
{ exact zmem_prod hq hr },
{ intros _ _ hqr,
cases opair.ext hqr with hqr1 hqr2,
subst hqr1, subst hqr2, clear hqr,
exact hr1 } } },
have hg3 : range g ⊆ z,
{ intros r hr,
rw zmem_range_iff at hr,
cases hr with q hq,
rw zmem_comprehension_iff at hq,
exact zmem_right_of_zmem_prod hq.1 },
existsi g,
rw zmem_comprehension_iff,
split,
{ apply zmem_prod hp,
rw zmem_hom_iff,
exact ⟨hg1, hg2, hg3⟩ },
{ intros _ _ hpg,
cases opair.ext hpg with hpg1 hpg2,
subst hpg1, subst hpg2, clear hpg,
intros q r hqr,
rw zmem_comprehension_iff at hqr,
exact hqr.2 _ _ rfl } } },
have h3' : range m' ⊆ hom y z,
{ intros g hg,
rw zmem_range_iff at hg,
cases hg with p hg,
rw zmem_comprehension_iff at hg,
exact zmem_right_of_zmem_prod hg.1 },
existsi m',
rw [prod_adjoint_hom, zmem_comprehension_iff],
split,
{ apply zmem_prod h,
rw zmem_hom_iff,
exact ⟨h1', h2', h3'⟩ },
{ refine ⟨_, _, rfl, _⟩,
intros p g hp q r hpqr,
rw zmem_comprehension_iff at hp,
have hg := zmem_right_of_zmem_prod hp.1,
rw zmem_hom_iff at hg,
rcases hg with ⟨hg1, hg2, hg3⟩,
cases hg1 with hg4 hg5,
rw hg2 at hg5,
have hpq : opair p q ∈ prod x y,
{ rw [← h2, zmem_dom_iff],
exact ⟨_, hpqr⟩ },
specialize hg5 _ (zmem_right_of_zmem_prod hpq),
rcases hg5 with ⟨r', hr1', hr2'⟩,
have hpqr' := hp.2 _ _ rfl _ _ hr1',
cases h1 with h4 h5,
rw h2 at h5,
replace h5 := unique_of_exists_unique (h5 _ hpq) hpqr' hpqr,
subst h5,
exact hr1' } }
end
theorem prod_adjoint_hom.range {x y z : α} : range (prod_adjoint_hom x y z) = hom x (hom y z) := sorry
instance prod_adjoint_hom.is_injective {x y z : α} : is_injective (prod_adjoint_hom x y z) := sorry
-- maximum usability
theorem prod_adjoint_hom.prop {x y z f g p q : α}
[is_function f] [is_function g]
(H1 : f ∈ dom (prod_adjoint_hom x y z))
(H2 : eval (prod_adjoint_hom x y z) f = g)
(H3 : opair p q ∈ dom f)
(H4 : p ∈ dom g) (H5 : q ∈ dom (eval g p))
[is_function (eval g p)] :
eval f (opair p q) = eval (eval g p) q := sorry
end zf
end zfc
|
import set_theory.game.pgame
import tactic.abel
open pgame
open_locale pgame
universes u v
namespace game
instance pgame.setoid : setoid pgame.{u} :=
⟨(≈), equiv_refl, @pgame.equiv.symm, @pgame.equiv.trans⟩
abbreviation game : Type (u+1) := quotient pgame.setoid.{u}
instance : add_comm_group game :=
{ zero := ⟦0⟧,
neg := quotient.lift (λ x, ⟦-x⟧) (λ x₁ x₂ h, quotient.sound ((@neg_equiv_neg_iff x₁ x₂).mpr h)),
add := quotient.lift₂ (λ x y : pgame, ⟦x + y⟧)
(λ x₁ y₁ x₂ y₂ hx hy, quotient.sound (add_congr hx hy)),
add_zero := by { rintro ⟨x⟩, exact quotient.sound (add_zero_equiv x) },
zero_add := by { rintro ⟨x⟩, exact quotient.sound (zero_add_equiv x) },
add_assoc := by { rintros ⟨x⟩ ⟨y⟩ ⟨z⟩, exact quotient.sound add_assoc_equiv },
add_left_neg := by { rintro ⟨x⟩, exact quotient.sound (add_left_neg_equiv x), },
add_comm := by { rintros ⟨x⟩ ⟨y⟩, exact quotient.sound add_comm_equiv } }
instance : has_one game := ⟨⟦1⟧⟩
instance : inhabited game := ⟨0⟩
instance : partial_order game :=
{ le := quotient.lift₂ (λ x y, x ≤ y) (λ x₁ y₁ x₂ y₂ hx hy, propext (le_congr hx hy)),
le_refl := by { rintro ⟨x⟩, exact le_refl x },
le_trans := by { rintros ⟨x⟩ ⟨y⟩ ⟨z⟩, exact @le_trans _ _ x y z },
le_antisymm := by { rintros ⟨x⟩ ⟨y⟩ h₁ h₂, exact quotient.sound ⟨h₁, h₂⟩ } }
def lf : game → game → Prop :=
quotient.lift₂ lf (λ x₁ y₁ x₂ y₂ hx hy, propext (lf_congr hx hy))
local infix ` ⧏ `:50 := lf
@[simp] theorem not_le : ∀ {x y : game}, ¬ x ≤ y ↔ y ⧏ x :=
by { rintro ⟨x⟩ ⟨y⟩, exact pgame.not_le }
@[simp] theorem not_lf : ∀ {x y : game}, ¬ x ⧏ y ↔ y ≤ x :=
by { rintro ⟨x⟩ ⟨y⟩, exact not_lf }
instance : is_trichotomous game (⧏) :=
⟨by { rintro ⟨x⟩ ⟨y⟩, change _ ∨ ⟦x⟧ = ⟦y⟧ ∨ _, rw quotient.eq, exact lf_or_equiv_or_gf x y }⟩
theorem _root_.pgame.le_iff_game_le {x y : pgame} : x ≤ y ↔ ⟦x⟧ ≤ ⟦y⟧ := iff.rfl
theorem _root_.pgame.lf_iff_game_lf {x y : pgame} : pgame.lf x y ↔ ⟦x⟧ ⧏ ⟦y⟧ := iff.rfl
theorem _root_.pgame.lt_iff_game_lt {x y : pgame} : x < y ↔ ⟦x⟧ < ⟦y⟧ := iff.rfl
theorem _root_.pgame.equiv_iff_game_eq {x y : pgame} : x ≈ y ↔ ⟦x⟧ = ⟦y⟧ :=
(@quotient.eq _ _ x y).symm
def fuzzy : game → game → Prop :=
quotient.lift₂ fuzzy (λ x₁ y₁ x₂ y₂ hx hy, propext (fuzzy_congr hx hy))
local infix ` ∥ `:50 := fuzzy
theorem _root_.pgame.fuzzy_iff_game_fuzzy {x y : pgame} : pgame.fuzzy x y ↔ ⟦x⟧ ∥ ⟦y⟧ := iff.rfl
instance covariant_class_add_le : covariant_class game game (+) (≤) :=
⟨by { rintro ⟨x⟩ ⟨y⟩ ⟨z⟩ h, exact @add_le_add_left _ _ _ _ y z h x }⟩
instance covariant_class_swap_add_le : covariant_class game game (function.swap (+)) (≤) :=
⟨by { rintro ⟨x⟩ ⟨y⟩ ⟨z⟩ h, exact @add_le_add_right _ _ _ _ y z h x }⟩
instance covariant_class_add_lt : covariant_class game game (+) (<) :=
⟨by { rintro ⟨x⟩ ⟨y⟩ ⟨z⟩ h, exact @add_lt_add_left _ _ _ _ y z h x }⟩
instance covariant_class_swap_add_lt : covariant_class game game (function.swap (+)) (<) :=
⟨by { rintro ⟨x⟩ ⟨y⟩ ⟨z⟩ h, exact @add_lt_add_right _ _ _ _ y z h x }⟩
theorem add_lf_add_right : ∀ {y z : game} (h : y ⧏ z) (x), y + x ⧏ z + x :=
by { rintro ⟨y⟩ ⟨z⟩ h ⟨x⟩, apply add_lf_add_right h }
theorem add_lf_add_left : ∀ {y z : game} (h : y ⧏ z) (x), x + y ⧏ x + z :=
by { rintro ⟨y⟩ ⟨z⟩ h ⟨x⟩, apply add_lf_add_left h }
instance ordered_add_comm_group : ordered_add_comm_group game :=
{ add_le_add_left := @add_le_add_left _ _ _ game.covariant_class_add_le,
..game.add_comm_group,
..game.partial_order }
end game
namespace pgame
@[simp] lemma quot_neg (x : pgame) : ⟦-x⟧ = -⟦x⟧ := rfl
@[simp] lemma quot_add (x y : pgame) : ⟦x + y⟧ = ⟦x⟧ + ⟦y⟧ := rfl
@[simp] lemma quot_sub (x y : pgame) : ⟦x - y⟧ = ⟦x⟧ - ⟦y⟧ := rfl
theorem quot_eq_of_mk_quot_eq {x y : pgame}
(L : x.left_moves ≃ y.left_moves) (R : x.right_moves ≃ y.right_moves)
(hl : ∀ (i : x.left_moves), ⟦x.move_left i⟧ = ⟦y.move_left (L i)⟧)
(hr : ∀ (j : y.right_moves), ⟦x.move_right (R.symm j)⟧ = ⟦y.move_right j⟧) :
⟦x⟧ = ⟦y⟧ :=
begin
simp only [quotient.eq] at hl hr,
exact quotient.sound (equiv_of_mk_equiv L R hl hr)
end
def mul : pgame → pgame → pgame
| x@⟨xl, xr, xL, xR⟩ y@⟨yl, yr, yL, yR⟩ :=
⟨xl × yl ⊕ xr × yr, xl × yr ⊕ xr × yl,
@sum.rec _ _ (λ _, pgame)
(@prod.rec _ _ (λ _, pgame) (λ i₁ i₂, mul (xL i₁) y + mul x (yL i₂) - mul (xL i₁) (yL i₂)))
(@prod.rec _ _ (λ _, pgame) (λ j₁ j₂, mul (xR j₁) y + mul x (yR j₂) - mul (xR j₁) (yR j₂))),
@sum.rec _ _ (λ _, pgame)
(@prod.rec _ _ (λ _, pgame) (λ i j, mul (xL i) y + mul x (yR j) - mul (xL i) (yR j)))
(@prod.rec _ _ (λ _, pgame) (λ j i, mul (xR j) y + mul x (yL i) - mul (xR j) (yL i)))⟩
using_well_founded { dec_tac := pgame_wf_tac }
instance : has_mul pgame.{u} :=
⟨λ x y, begin
induction x with xl xr xL xR IHxl IHxr generalizing y,
induction y with yl yr yL yR IHyl IHyr,
have y := mk yl yr yL yR,
refine ⟨xl × yl ⊕ xr × yr, xl × yr ⊕ xr × yl, _, _⟩; rintro (⟨i, j⟩ | ⟨i, j⟩),
{ exact IHxl i y + IHyl j - IHxl i (yL j) },
{ exact IHxr i y + IHyr j - IHxr i (yR j) },
{ exact IHxl i y + IHyr j - IHxl i (yR j) },
{ exact IHxr i y + IHyl j - IHxr i (yL j) }
end⟩
example {x y : pgame} : x * y = mul x y :=
begin
induction x with xl xr xL xR IHxl IHxr generalizing y,
induction y with yl yr yL yR IHyl IHyr,
dsimp only [(*)],
unfold mul,
congr,
all_goals
{ ext,
change _ * _ + mk _ _ _ _ * _ - _ * _ = _,
congr; simp [IHxl, IHxr, IHyl, IHyr] }
end
theorem left_moves_mul : ∀ (x y : pgame.{u}), (x * y).left_moves
= (x.left_moves × y.left_moves ⊕ x.right_moves × y.right_moves)
| ⟨_, _, _, _⟩ ⟨_, _, _, _⟩ := rfl
theorem right_moves_mul : ∀ (x y : pgame.{u}), (x * y).right_moves
= (x.left_moves × y.right_moves ⊕ x.right_moves × y.left_moves)
| ⟨_, _, _, _⟩ ⟨_, _, _, _⟩ := rfl
def to_left_moves_mul {x y : pgame} :
x.left_moves × y.left_moves ⊕ x.right_moves × y.right_moves ≃ (x * y).left_moves :=
equiv.cast (left_moves_mul x y).symm
def to_right_moves_mul {x y : pgame} :
x.left_moves × y.right_moves ⊕ x.right_moves × y.left_moves ≃ (x * y).right_moves :=
equiv.cast (right_moves_mul x y).symm
@[simp] lemma mk_mul_move_left_inl {xl xr yl yr} {xL xR yL yR} {i j} :
(mk xl xr xL xR * mk yl yr yL yR).move_left (sum.inl (i, j))
= xL i * (mk yl yr yL yR) + (mk xl xr xL xR) * yL j - xL i * yL j :=
rfl
@[simp] lemma mul_move_left_inl {x y : pgame} {i j} :
(x * y).move_left (to_left_moves_mul (sum.inl (i, j)))
= x.move_left i * y + x * y.move_left j - x.move_left i * y.move_left j :=
by { cases x, cases y, refl }
@[simp] lemma mk_mul_move_left_inr {xl xr yl yr} {xL xR yL yR} {i j} :
(mk xl xr xL xR * mk yl yr yL yR).move_left (sum.inr (i, j))
= xR i * (mk yl yr yL yR) + (mk xl xr xL xR) * yR j - xR i * yR j :=
rfl
@[simp] lemma mul_move_left_inr {x y : pgame} {i j} :
(x * y).move_left (to_left_moves_mul (sum.inr (i, j)))
= x.move_right i * y + x * y.move_right j - x.move_right i * y.move_right j :=
by { cases x, cases y, refl }
@[simp] lemma mk_mul_move_right_inl {xl xr yl yr} {xL xR yL yR} {i j} :
(mk xl xr xL xR * mk yl yr yL yR).move_right (sum.inl (i, j))
= xL i * (mk yl yr yL yR) + (mk xl xr xL xR) * yR j - xL i * yR j :=
rfl
@[simp] lemma mul_move_right_inl {x y : pgame} {i j} :
(x * y).move_right (to_right_moves_mul (sum.inl (i, j)))
= x.move_left i * y + x * y.move_right j - x.move_left i * y.move_right j :=
by { cases x, cases y, refl }
@[simp] lemma mk_mul_move_right_inr {xl xr yl yr} {xL xR yL yR} {i j} :
(mk xl xr xL xR * mk yl yr yL yR).move_right (sum.inr (i, j))
= xR i * (mk yl yr yL yR) + (mk xl xr xL xR) * yL j - xR i * yL j :=
rfl
@[simp] lemma mul_move_right_inr {x y : pgame} {i j} :
(x * y).move_right (to_right_moves_mul (sum.inr (i, j)))
= x.move_right i * y + x * y.move_left j - x.move_right i * y.move_left j :=
by { cases x, cases y, refl }
lemma left_moves_mul_cases {x y : pgame} (k) {P : (x * y).left_moves → Prop}
(hl : ∀ ix iy, P $ to_left_moves_mul (sum.inl ⟨ix, iy⟩))
(hr : ∀ jx jy, P $ to_left_moves_mul (sum.inr ⟨jx, jy⟩)) : P k :=
begin
rw ←to_left_moves_mul.apply_symm_apply k,
rcases to_left_moves_mul.symm k with ⟨ix, iy⟩ | ⟨jx, jy⟩,
{ apply hl },
{ apply hr }
end
lemma right_moves_mul_cases {x y : pgame} (k) {P : (x * y).right_moves → Prop}
(hl : ∀ i j, P $ to_right_moves_mul (sum.inl ⟨i, j⟩))
(hr : ∀ j i, P $ to_right_moves_mul (sum.inr ⟨j, i⟩)) : P k :=
begin
rw ←to_right_moves_mul.apply_symm_apply k,
rcases to_right_moves_mul.symm k with ⟨i, j⟩ | ⟨j, i⟩,
{ apply hl },
{ apply hr }
end
theorem quot_mul_comm : Π (x y : pgame.{u}), ⟦x * y⟧ = ⟦y * x⟧
| (mk xl xr xL xR) (mk yl yr yL yR) :=
begin
refine quot_eq_of_mk_quot_eq
(equiv.sum_congr (equiv.prod_comm _ _) (equiv.prod_comm _ _))
((equiv.sum_comm _ _).trans (equiv.sum_congr (equiv.prod_comm _ _) (equiv.prod_comm _ _))) _ _;
all_goals { rintro (⟨i, j⟩ | ⟨i, j⟩); dsimp; rw [quot_mul_comm, quot_mul_comm (mk xl xr xL xR)] },
{ rw [quot_mul_comm (xL i), add_comm] },
{ rw [quot_mul_comm (xR i), add_comm] },
{ rw [quot_mul_comm (xR j), add_comm] },
{ rw [quot_mul_comm (xL j), add_comm] }
end
using_well_founded { dec_tac := pgame_wf_tac }
theorem mul_comm_equiv (x y : pgame) : x * y ≈ y * x :=
quotient.exact $ quot_mul_comm _ _
instance is_empty_mul_zero_left_moves (x : pgame.{u}) : is_empty (x * 0).left_moves :=
by { cases x, apply sum.is_empty }
instance is_empty_mul_zero_right_moves (x : pgame.{u}) : is_empty (x * 0).right_moves :=
by { cases x, apply sum.is_empty }
instance is_empty_zero_mul_left_moves (x : pgame.{u}) : is_empty (0 * x).left_moves :=
by { cases x, apply sum.is_empty }
instance is_empty_zero_mul_right_moves (x : pgame.{u}) : is_empty (0 * x).right_moves :=
by { cases x, apply sum.is_empty }
def mul_zero_relabelling (x : pgame) : x * 0 ≡r 0 := relabelling.is_empty _
theorem mul_zero_equiv (x : pgame) : x * 0 ≈ 0 := (mul_zero_relabelling x).equiv
@[simp] theorem quot_mul_zero (x : pgame) : ⟦x * 0⟧ = ⟦0⟧ :=
@quotient.sound _ _ (x * 0) _ x.mul_zero_equiv
def zero_mul_relabelling (x : pgame) : 0 * x ≡r 0 := relabelling.is_empty _
theorem zero_mul_equiv (x : pgame) : 0 * x ≈ 0 := (zero_mul_relabelling x).equiv
@[simp] theorem quot_zero_mul (x : pgame) : ⟦0 * x⟧ = ⟦0⟧ :=
@quotient.sound _ _ (0 * x) _ x.zero_mul_equiv
@[simp] theorem quot_neg_mul : ∀ (x y : pgame), ⟦-x * y⟧ = -⟦x * y⟧
| (mk xl xr xL xR) (mk yl yr yL yR) :=
begin
let x := mk xl xr xL xR,
let y := mk yl yr yL yR,
refine quot_eq_of_mk_quot_eq _ _ _ _,
any_goals
{ fsplit; rintro (⟨_, _⟩ | ⟨_, _⟩);
solve_by_elim [sum.inl, sum.inr, prod.mk] { max_depth := 4 } },
all_goals
{ rintro (⟨_, _⟩ | ⟨_, _⟩),
all_goals
{ change ⟦-_ * y + (-x) * _ - (-_) * _⟧ = ⟦-(_ * y + x * _ - _ * _)⟧,
simp only [quot_add, quot_sub, quot_neg_mul],
simp, abel } }
end
using_well_founded { dec_tac := pgame_wf_tac }
@[simp] theorem quot_mul_neg (x y : pgame) : ⟦x * -y⟧ = -⟦x * y⟧ :=
by rw [quot_mul_comm, quot_neg_mul, quot_mul_comm]
@[simp] theorem quot_left_distrib : ∀ (x y z : pgame), ⟦x * (y + z)⟧ = ⟦x * y⟧ + ⟦x * z⟧
| (mk xl xr xL xR) (mk yl yr yL yR) (mk zl zr zL zR) :=
begin
let x := mk xl xr xL xR,
let y := mk yl yr yL yR,
let z := mk zl zr zL zR,
refine quot_eq_of_mk_quot_eq _ _ _ _,
{ fsplit,
{ rintro (⟨_, _ | _⟩ | ⟨_, _ | _⟩);
solve_by_elim [sum.inl, sum.inr, prod.mk] { max_depth := 5 } },
{ rintro (⟨⟨_, _⟩ | ⟨_, _⟩⟩ | ⟨_, _⟩ | ⟨_, _⟩);
solve_by_elim [sum.inl, sum.inr, prod.mk] { max_depth := 5 } },
{ rintro (⟨_, _ | _⟩ | ⟨_, _ | _⟩); refl },
{ rintro (⟨⟨_, _⟩ | ⟨_, _⟩⟩ | ⟨_, _⟩ | ⟨_, _⟩); refl } },
{ fsplit,
{ rintro (⟨_, _ | _⟩ | ⟨_, _ | _⟩);
solve_by_elim [sum.inl, sum.inr, prod.mk] { max_depth := 5 } },
{ rintro (⟨⟨_, _⟩ | ⟨_, _⟩⟩ | ⟨_, _⟩ | ⟨_, _⟩);
solve_by_elim [sum.inl, sum.inr, prod.mk] { max_depth := 5 } },
{ rintro (⟨_, _ | _⟩ | ⟨_, _ | _⟩); refl },
{ rintro (⟨⟨_, _⟩ | ⟨_, _⟩⟩ | ⟨_, _⟩ | ⟨_, _⟩); refl } },
{ rintro (⟨i, j | k⟩ | ⟨i, j | k⟩),
{ change ⟦xL i * (y + z) + x * (yL j + z) - xL i * (yL j + z)⟧
= ⟦xL i * y + x * yL j - xL i * yL j + x * z⟧,
simp [quot_left_distrib], abel },
{ change ⟦xL i * (y + z) + x * (y + zL k) - xL i * (y + zL k)⟧
= ⟦x * y + (xL i * z + x * zL k - xL i * zL k)⟧,
simp [quot_left_distrib], abel },
{ change ⟦xR i * (y + z) + x * (yR j + z) - xR i * (yR j + z)⟧
= ⟦xR i * y + x * yR j - xR i * yR j + x * z⟧,
simp [quot_left_distrib], abel },
{ change ⟦xR i * (y + z) + x * (y + zR k) - xR i * (y + zR k)⟧
= ⟦x * y + (xR i * z + x * zR k - xR i * zR k)⟧,
simp [quot_left_distrib], abel } },
{ rintro (⟨⟨i, j⟩ | ⟨i, j⟩⟩ | ⟨i, k⟩ | ⟨i, k⟩),
{ change ⟦xL i * (y + z) + x * (yR j + z) - xL i * (yR j + z)⟧
= ⟦xL i * y + x * yR j - xL i * yR j + x * z⟧,
simp [quot_left_distrib], abel },
{ change ⟦xR i * (y + z) + x * (yL j + z) - xR i * (yL j + z)⟧
= ⟦xR i * y + x * yL j - xR i * yL j + x * z⟧,
simp [quot_left_distrib], abel },
{ change ⟦xL i * (y + z) + x * (y + zR k) - xL i * (y + zR k)⟧
= ⟦x * y + (xL i * z + x * zR k - xL i * zR k)⟧,
simp [quot_left_distrib], abel },
{ change ⟦xR i * (y + z) + x * (y + zL k) - xR i * (y + zL k)⟧
= ⟦x * y + (xR i * z + x * zL k - xR i * zL k)⟧,
simp [quot_left_distrib], abel } }
end
using_well_founded { dec_tac := pgame_wf_tac }
theorem left_distrib_equiv (x y z : pgame) : x * (y + z) ≈ x * y + x * z :=
quotient.exact $ quot_left_distrib _ _ _
@[simp] theorem quot_left_distrib_sub (x y z : pgame) : ⟦x * (y - z)⟧ = ⟦x * y⟧ - ⟦x * z⟧ :=
by { change ⟦x * (y + -z)⟧ = ⟦x * y⟧ + -⟦x * z⟧, rw [quot_left_distrib, quot_mul_neg] }
@[simp] theorem quot_right_distrib (x y z : pgame) : ⟦(x + y) * z⟧ = ⟦x * z⟧ + ⟦y * z⟧ :=
by simp only [quot_mul_comm, quot_left_distrib]
theorem right_distrib_equiv (x y z : pgame) : (x + y) * z ≈ x * z + y * z :=
quotient.exact $ quot_right_distrib _ _ _
@[simp] theorem quot_right_distrib_sub (x y z : pgame) : ⟦(y - z) * x⟧ = ⟦y * x⟧ - ⟦z * x⟧ :=
by { change ⟦(y + -z) * x⟧ = ⟦y * x⟧ + -⟦z * x⟧, rw [quot_right_distrib, quot_neg_mul] }
@[simp] theorem quot_mul_one : ∀ (x : pgame), ⟦x * 1⟧ = ⟦x⟧
| (mk xl xr xL xR) :=
begin
let x := mk xl xr xL xR,
refine quot_eq_of_mk_quot_eq _ _ _ _,
{ fsplit,
{ rintro (⟨_, ⟨⟩⟩ | ⟨_, ⟨⟩⟩), assumption },
{ rintro i, exact sum.inl (i, punit.star) },
{ rintro (⟨_, ⟨⟩⟩ | ⟨_, ⟨⟩⟩), refl },
{ rintro i, refl } },
{ fsplit,
{ rintro (⟨_, ⟨⟩⟩ | ⟨_, ⟨⟩⟩), assumption },
{ rintro i, exact sum.inr (i, punit.star) },
{ rintro (⟨_, ⟨⟩⟩ | ⟨_, ⟨⟩⟩), refl },
{ rintro i, refl } },
{ rintro (⟨i, ⟨⟩⟩ | ⟨_, ⟨⟩⟩),
change ⟦xL i * 1 + x * 0 - xL i * 0⟧ = ⟦xL i⟧,
simp [quot_mul_one] },
{ rintro i,
change ⟦xR i * 1 + x * 0 - xR i * 0⟧ = ⟦xR i⟧,
simp [quot_mul_one] }
end
theorem mul_one_equiv (x : pgame) : x * 1 ≈ x := quotient.exact $ quot_mul_one _
@[simp] theorem quot_one_mul (x : pgame) : ⟦1 * x⟧ = ⟦x⟧ :=
by rw [quot_mul_comm, quot_mul_one x]
theorem one_mul_equiv (x : pgame) : 1 * x ≈ x := quotient.exact $ quot_one_mul _
theorem quot_mul_assoc : ∀ (x y z : pgame), ⟦x * y * z⟧ = ⟦x * (y * z)⟧
| (mk xl xr xL xR) (mk yl yr yL yR) (mk zl zr zL zR) :=
begin
let x := mk xl xr xL xR,
let y := mk yl yr yL yR,
let z := mk zl zr zL zR,
refine quot_eq_of_mk_quot_eq _ _ _ _,
{ fsplit,
{ rintro (⟨⟨_, _⟩ | ⟨_, _⟩, _⟩ | ⟨⟨_, _⟩ | ⟨_, _⟩, _⟩);
solve_by_elim [sum.inl, sum.inr, prod.mk] { max_depth := 7 } },
{ rintro (⟨_, ⟨_, _⟩ | ⟨_, _⟩⟩ | ⟨_,⟨_, _⟩ | ⟨_, _⟩⟩);
solve_by_elim [sum.inl, sum.inr, prod.mk] { max_depth := 7 } },
{ rintro (⟨⟨_, _⟩ | ⟨_, _⟩, _⟩ | ⟨⟨_,_⟩ | ⟨_, _⟩,_⟩); refl },
{ rintro (⟨_, ⟨_, _⟩ | ⟨_, _⟩⟩ | ⟨_,⟨_, _⟩ | ⟨_, _⟩⟩); refl } },
{ fsplit,
{ rintro (⟨⟨_, _⟩ | ⟨_, _⟩, _⟩ | ⟨⟨_, _⟩ | ⟨_, _⟩,_⟩);
solve_by_elim [sum.inl, sum.inr, prod.mk] { max_depth := 7 } },
{ rintro (⟨_, ⟨_, _⟩ | ⟨_, _⟩⟩ | ⟨_, ⟨_, _⟩ | ⟨_, _⟩⟩);
solve_by_elim [sum.inl, sum.inr, prod.mk] { max_depth := 7 } },
{ rintro (⟨⟨_, _⟩ | ⟨_, _⟩, _⟩ | ⟨⟨_, _⟩ | ⟨_, _⟩,_⟩); refl },
{ rintro (⟨_, ⟨_, _⟩ | ⟨_, _⟩⟩ | ⟨_, ⟨_, _⟩ | ⟨_, _⟩⟩); refl } },
all_goals
{ try { rintro (⟨⟨_, _⟩ | ⟨_, _⟩, _⟩ | ⟨⟨_, _⟩ | ⟨_, _⟩, _⟩) },
try { rintro (⟨_, ⟨_, _⟩ | ⟨_, _⟩⟩ | ⟨_, ⟨_, _⟩ | ⟨_, _⟩⟩) },
all_goals
{ change ⟦(_ * y + x * _ - _ * _) * z + (x * y) * _
- (_ * y + x * _ - _ * _) * _⟧
= ⟦_ * (y * z) + x * (_ * z + y * _ - _ * _)
- _ * (_ * z + y * _ - _ * _)⟧,
simp [quot_mul_assoc], abel } },
end
using_well_founded { dec_tac := pgame_wf_tac }
theorem mul_assoc_equiv (x y z : pgame) : x * y * z ≈ x * (y * z) :=
quotient.exact $ quot_mul_assoc _ _ _
section
parameters {l r : Type u}
inductive inv_ty (l r : Type u) : bool → Type u
| zero : inv_ty ff
| left₁ : r → inv_ty ff → inv_ty ff
| left₂ : l → inv_ty tt → inv_ty ff
| right₁ : l → inv_ty ff → inv_ty tt
| right₂ : r → inv_ty tt → inv_ty tt
mutual inductive inv_ty_ff, inv_ty_tt (l r : Type u)
with inv_ty_ff : Type u
| zero : inv_ty_ff
| left₁ : r → inv_ty_ff → inv_ty_ff
| left₂ : l → inv_ty_tt → inv_ty_ff
with inv_ty_tt : Type u
| right₁ : l → inv_ty_ff → inv_ty_tt
| right₂ : r → inv_ty_tt → inv_ty_tt
def inv_ty' : bool → Type u
| ff := inv_ty_ff
| tt := inv_ty_tt
def inv_ty'.sizeof : (Σ' b, inv_ty' b) → ℕ
| ⟨ff, x⟩ := @sizeof inv_ty_ff _ x
| ⟨tt, x⟩ := @sizeof inv_ty_tt _ x
meta def inv_ty'.rel_tac : expr → list expr → tactic unit :=
λ _ _, `[exact ⟨_, measure_wf inv_ty'.sizeof⟩]
@[simp] def inv_ty'.rec {C : Π b, inv_ty' b → Sort v}
(hz : C ff inv_ty_ff.zero)
(hl₁ : Π {j x}, C ff x → C ff (inv_ty_ff.left₁ j x))
(hl₂ : Π {i x}, C tt x → C ff (inv_ty_ff.left₂ i x))
(hr₁ : Π {i x}, C ff x → C tt (inv_ty_tt.right₁ i x))
(hr₂ : Π {j x}, C tt x → C tt (inv_ty_tt.right₂ j x))
: Π {b} x, C b x
| ff inv_ty_ff.zero := hz
| ff (inv_ty_ff.left₁ _ x) := hl₁ (@inv_ty'.rec ff x)
| ff (inv_ty_ff.left₂ _ x) := hl₂ (@inv_ty'.rec tt x)
| tt (inv_ty_tt.right₁ _ x) := hr₁ (@inv_ty'.rec ff x)
| tt (inv_ty_tt.right₂ _ x) := hr₂ (@inv_ty'.rec tt x)
using_well_founded { rel_tac := inv_ty'.rel_tac }
@[simp] def f_aux : Π {b}, inv_ty b → inv_ty' b
| _ inv_ty.zero := inv_ty_ff.zero
| _ (inv_ty.left₁ j x) := inv_ty_ff.left₁ j (f_aux x)
| _ (inv_ty.left₂ i x) := inv_ty_ff.left₂ i (f_aux x)
| _ (inv_ty.right₁ i x) := inv_ty_tt.right₁ i (f_aux x)
| _ (inv_ty.right₂ j x) := inv_ty_tt.right₂ j (f_aux x)
@[simp] def f : (Σ b, inv_ty b) → Σ b, inv_ty' b
| ⟨_, x⟩ := ⟨_, f_aux x⟩
@[simp] def f_inv_aux : Π b, inv_ty' b → inv_ty b
| ff inv_ty_ff.zero := inv_ty.zero
| ff (inv_ty_ff.left₁ j x) := inv_ty.left₁ j (f_inv_aux ff x)
| ff (inv_ty_ff.left₂ i x) := inv_ty.left₂ i (f_inv_aux tt x)
| tt (inv_ty_tt.right₁ i x) := inv_ty.right₁ i (f_inv_aux ff x)
| tt (inv_ty_tt.right₂ j x) := inv_ty.right₂ j (f_inv_aux tt x)
using_well_founded { rel_tac := inv_ty'.rel_tac }
@[simp] def f_inv : (Σ b, inv_ty' b) → Σ b, inv_ty b
| ⟨_, x⟩ := ⟨_, f_inv_aux _ x⟩
example : (Σ b, inv_ty b) ≃ Σ b, inv_ty' b :=
by refine ⟨f, f_inv, _, _⟩; rintro ⟨_, x⟩; induction x; simp * at *
end
instance (l r : Type u) [is_empty l] [is_empty r] : is_empty (inv_ty l r tt) :=
⟨by rintro (_|_|_|a|a); exact is_empty_elim a⟩
instance (l r : Type u) : inhabited (inv_ty l r ff) := ⟨inv_ty.zero⟩
instance unique_inv_ty (l r : Type u) [is_empty l] [is_empty r] : unique (inv_ty l r ff) :=
⟨_, by { rintro (_|a|a), refl, all_goals { exact is_empty_elim a } }⟩
def inv_val {l r} (L : l → pgame) (R : r → pgame)
(IHl : l → pgame) (IHr : r → pgame) : ∀ {b}, inv_ty l r b → pgame
| _ inv_ty.zero := 0
| _ (inv_ty.left₁ j x) := (1 + (R j - mk l r L R) * inv_val x) * IHr j
| _ (inv_ty.left₂ i x) := (1 + (L i - mk l r L R) * inv_val x) * IHl i
| _ (inv_ty.right₁ i x) := (1 + (L i - mk l r L R) * inv_val x) * IHl i
| _ (inv_ty.right₂ j x) := (1 + (R j - mk l r L R) * inv_val x) * IHr j
@[simp] theorem inv_val_is_empty {l r b} (L R IHl IHr) (x : inv_ty l r b)
[is_empty l] [is_empty r] : inv_val L R IHl IHr x = 0 :=
by { cases x with a _ a _ a _ a, refl, all_goals { exact is_empty_elim a } }
def inv' : pgame → pgame
| ⟨l, r, L, R⟩ :=
let l' := {i // 0 < L i},
L' : l' → pgame := λ i, L i.1,
IHl' : l' → pgame := λ i, inv' (L i.1),
IHr := λ i, inv' (R i) in
⟨inv_ty l' r ff, inv_ty l' r tt,
inv_val L' R IHl' IHr, inv_val L' R IHl' IHr⟩
def inv'' : pgame → pgame
| ⟨l, r, L, R⟩ := begin
let l' := {i // 0 < L i},
let L' := λ i : l', L i.1,
let IHl' := λ i : l', inv'' (L i.1),
let IHr := λ j, inv'' (R j),
exact ⟨inv_ty l' r ff, inv_ty l' r tt, inv_val L' R IHl' IHr, inv_val L' R IHl' IHr⟩
end
example : inv' = inv'' :=
begin
ext x,
induction x with _ _ _ _ IHl _,
dsimp only [inv'],
congr; ext; solve_by_elim [λ i, IHl (subtype.val i)]
end
def inv''' (x : pgame) : pgame :=
begin
induction x with l r L R IHl IHr,
let l' := {i // 0 < L i},
let L' := λ i : l', L i.1,
let IHl' := λ i : l', IHl i.1,
exact ⟨inv_ty l' r ff, inv_ty l' r tt, inv_val L' R IHl' IHr, inv_val L' R IHl' IHr⟩
end
example : inv' = inv''' :=
begin
ext x,
induction x with _ _ _ _ IHl _,
dsimp only [inv'],
congr; ext; solve_by_elim [λ i, IHl (subtype.val i)]
end
theorem zero_lf_inv' : ∀ (x : pgame), 0 ⧏ inv' x
| ⟨xl, xr, xL, xR⟩ := by { convert lf_mk _ _ inv_ty.zero, refl }
def inv'_zero : inv' 0 ≡r 1 :=
begin
change mk _ _ _ _ ≡r 1,
refine ⟨_, _, λ i, _, is_empty_elim⟩,
{ dsimp, apply equiv.equiv_punit },
{ dsimp, apply equiv.equiv_pempty },
{ simp }
end
theorem inv'_zero_equiv : inv' 0 ≈ 1 := inv'_zero.equiv
def inv'_one : inv' 1 ≡r (1 : pgame.{u}) :=
begin
change mk _ _ _ _ ≡r 1,
haveI : is_empty {i : punit.{u+1} // (0 : pgame.{u}) < 0},
{ rw lt_self_iff_false, apply_instance },
refine ⟨_, _, λ i, _, is_empty_elim⟩,
{ dsimp, apply equiv.equiv_punit },
{ dsimp, apply equiv.equiv_pempty },
{ simp }
end
theorem inv'_one_equiv : inv' 1 ≈ 1 := inv'_one.equiv
noncomputable instance : has_inv pgame :=
⟨by { classical, exact λ x, if x ≈ 0 then 0 else if 0 < x then inv' x else -inv' (-x) }⟩
noncomputable instance : has_div pgame := ⟨λ x y, x * y⁻¹⟩
theorem inv_eq_of_equiv_zero {x : pgame} (h : x ≈ 0) : x⁻¹ = 0 := if_pos h
@[simp] theorem inv_zero : (0 : pgame)⁻¹ = 0 :=
inv_eq_of_equiv_zero (equiv_refl _)
theorem inv_eq_of_pos {x : pgame} (h : 0 < x) : x⁻¹ = inv' x :=
(if_neg h.lf.not_equiv').trans (if_pos h)
theorem inv_eq_of_lf_zero {x : pgame} (h : x ⧏ 0) : x⁻¹ = -inv' (-x) :=
(if_neg h.not_equiv).trans (if_neg h.not_gt)
def inv_one : 1⁻¹ ≡r 1 :=
by { rw inv_eq_of_pos zero_lt_one, exact inv'_one }
theorem inv_one_equiv : 1⁻¹ ≈ 1 := inv_one.equiv
end pgame |
section \<open>Chamber complexes\<close>
text \<open>
Now we develop the basic theory of chamber complexes, including both thin and thick complexes.
Some terminology: a maximal simplex is now called a chamber, and a chain (with respect to
adjacency) of chambers is now called a gallery. A gallery in which no chamber appears more than
once is called proper, and we use the prefix p as a naming convention to denote proper.
Again, we remind the reader that some sources reserve the name gallery for (a slightly weaker
notion of) what we are calling a proper gallery, using pregallery to denote an improper gallery.
\<close>
theory Chamber
imports Algebra Simplicial
begin
subsection \<open>Locale definition and basic facts\<close>
locale ChamberComplex = SimplicialComplex X
for X :: "'a set set"
+ assumes simplex_in_max : "y\<in>X \<Longrightarrow> \<exists>x. maxsimp x \<and> y\<subseteq>x"
and maxsimp_connect: "\<lbrakk> x \<noteq> y; maxsimp x; maxsimp y \<rbrakk> \<Longrightarrow>
\<exists>xs. maxsimpchain (x#xs@[y])"
context ChamberComplex
begin
abbreviation "chamber \<equiv> maxsimp"
abbreviation "gallery \<equiv> maxsimpchain"
abbreviation "pgallery \<equiv> pmaxsimpchain"
abbreviation "min_gallery \<equiv> min_maxsimpchain"
abbreviation "supchamber v \<equiv> (SOME C. chamber C \<and> v\<in>C)"
lemmas faces = faces
lemmas singleton_simplex = singleton_simplex
lemmas chamberI = maxsimpI
lemmas chamberD_simplex = maxsimpD_simplex
lemmas chamberD_maximal = maxsimpD_maximal
lemmas finite_chamber = finite_maxsimp
lemmas chamber_nempty = maxsimp_nempty
lemmas chamber_vertices = maxsimp_vertices
lemmas gallery_def = maxsimpchain_def
lemmas gallery_snocI = maxsimpchain_snocI
lemmas galleryD_chamber = maxsimpchainD_maxsimp
lemmas galleryD_adj = maxsimpchainD_adj
lemmas gallery_CConsI = maxsimpchain_CConsI
lemmas gallery_Cons_reduce = maxsimpchain_Cons_reduce
lemmas gallery_append_reduce1 = maxsimpchain_append_reduce1
lemmas gallery_append_reduce2 = maxsimpchain_append_reduce2
lemmas gallery_remdup_adj = maxsimpchain_remdup_adj
lemmas gallery_obtain_pgallery = maxsimpchain_obtain_pmaxsimpchain
lemmas pgallery_def = pmaxsimpchain_def
lemmas pgalleryI_gallery = pmaxsimpchainI_maxsimpchain
lemmas pgalleryD_chamber = pmaxsimpchainD_maxsimp
lemmas pgalleryD_adj = pmaxsimpchainD_adj
lemmas pgalleryD_distinct = pmaxsimpchainD_distinct
lemmas pgallery_Cons_reduce = pmaxsimpchain_Cons_reduce
lemmas pgallery_append_reduce1 = pmaxsimpchain_append_reduce1
lemmas pgallery = pmaxsimpchain
lemmas min_gallery_simps = min_maxsimpchain.simps
lemmas min_galleryI_betw = min_maxsimpchainI_betw
lemmas min_galleryI_betw_compare = min_maxsimpchainI_betw_compare
lemmas min_galleryD_min_betw = min_maxsimpchainD_min_betw
lemmas min_galleryD_gallery = min_maxsimpchainD_maxsimpchain
lemmas min_gallery_pgallery = min_maxsimpchain_pmaxsimpchain
lemmas min_gallery_rev = min_maxsimpchain_rev
lemmas min_gallery_adj = min_maxsimpchain_adj
lemmas not_min_galleryI_betw = not_min_maxsimpchainI_betw
lemmas min_gallery_betw_CCons_reduce =
min_maxsimpchain_betw_CCons_reduce
lemmas min_gallery_betw_uniform_length =
min_maxsimpchain_betw_uniform_length
lemmas vertex_set_int = vertex_set_int[OF ChamberComplex.axioms(1)]
lemma chamber_pconnect:
"\<lbrakk> x \<noteq> y; chamber x; chamber y \<rbrakk> \<Longrightarrow> \<exists>xs. pgallery (x#xs@[y])"
using maxsimp_connect[of x y] gallery_obtain_pgallery[of x y] by fast
lemma supchamberD:
assumes "v\<in>\<Union>X"
defines "C \<equiv> supchamber v"
shows "chamber C" "v\<in>C"
using assms simplex_in_max someI[of "\<lambda>C. chamber C \<and> v\<in>C"]
by auto
definition
"ChamberSubcomplex Y \<equiv> Y \<subseteq> X \<and> ChamberComplex Y \<and>
(\<forall>C. ChamberComplex.chamber Y C \<longrightarrow> chamber C)"
lemma ChamberSubcomplexI:
assumes "Y\<subseteq>X" "ChamberComplex Y"
"\<And>y. ChamberComplex.chamber Y y \<Longrightarrow> chamber y"
shows "ChamberSubcomplex Y"
using assms ChamberSubcomplex_def
by fast
lemma ChamberSubcomplexD_sub: "ChamberSubcomplex Y \<Longrightarrow> Y \<subseteq> X"
using ChamberSubcomplex_def by fast
lemma ChamberSubcomplexD_complex:
"ChamberSubcomplex Y \<Longrightarrow> ChamberComplex Y"
unfolding ChamberSubcomplex_def by fast
lemma chambersub_imp_sub: "ChamberSubcomplex Y \<Longrightarrow> Subcomplex Y"
using ChamberSubcomplex_def ChamberComplex.axioms(1) by fast
lemma chamber_in_subcomplex:
"\<lbrakk> ChamberSubcomplex Y; C \<in> Y; chamber C \<rbrakk> \<Longrightarrow>
ChamberComplex.chamber Y C"
using chambersub_imp_sub max_in_subcomplex by simp
lemma subcomplex_chamber:
"ChamberSubcomplex Y \<Longrightarrow> ChamberComplex.chamber Y C \<Longrightarrow> chamber C"
unfolding ChamberSubcomplex_def by fast
lemma gallery_in_subcomplex:
"\<lbrakk> ChamberSubcomplex Y; set ys \<subseteq> Y; gallery ys \<rbrakk> \<Longrightarrow>
ChamberComplex.gallery Y ys"
using chambersub_imp_sub maxsimpchain_in_subcomplex by simp
lemma subcomplex_gallery:
"ChamberSubcomplex Y \<Longrightarrow> ChamberComplex.gallery Y Cs \<Longrightarrow> gallery Cs"
using ChamberSubcomplex_def gallery_def ChamberComplex.gallery_def
by fastforce
lemma subcomplex_pgallery:
"ChamberSubcomplex Y \<Longrightarrow> ChamberComplex.pgallery Y Cs \<Longrightarrow> pgallery Cs"
using ChamberSubcomplex_def pgallery_def ChamberComplex.pgallery_def
by fastforce
lemma min_gallery_in_subcomplex:
assumes "ChamberSubcomplex Y" "min_gallery Cs" "set Cs \<subseteq> Y"
shows "ChamberComplex.min_gallery Y Cs"
proof (cases Cs rule: list_cases_Cons_snoc)
case Nil with assms(1) show ?thesis
using ChamberSubcomplexD_complex ChamberComplex.min_gallery_simps(1)
by fast
next
case Single with assms show ?thesis
using min_galleryD_gallery galleryD_chamber chamber_in_subcomplex
ChamberComplex.min_gallery_simps(2) ChamberSubcomplexD_complex
by force
next
case (Cons_snoc C Ds D)
with assms show ?thesis
using ChamberSubcomplexD_complex min_gallery_pgallery
pgalleryD_distinct[of "C#Ds@[D]"] pgallery
gallery_in_subcomplex[of Y] subcomplex_gallery
min_galleryD_min_betw
ChamberComplex.min_galleryI_betw[of Y]
by force
qed
lemma chamber_card: "chamber C \<Longrightarrow> chamber D \<Longrightarrow> card C = card D"
using maxsimp_connect[of C D] galleryD_adj adjacentchain_card
by (cases "C=D") auto
lemma chamber_facet_is_chamber_facet:
"\<lbrakk> chamber C; chamber D; z\<lhd>C; z\<subseteq>D \<rbrakk> \<Longrightarrow> z\<lhd>D"
using finite_chamber finite_facetrel_card chamber_card[of C]
by (fastforce intro: facetrelI_cardSuc)
lemma chamber_adj:
assumes "chamber C" "D\<in>X" "C \<sim> D"
shows "chamber D"
proof-
from assms(2) obtain B where B: "chamber B" "D\<subseteq>B"
using simplex_in_max by fast
with assms(1,3) show ?thesis
using chamber_card[of B] adjacent_card finite_chamber card_subset_eq[of B D]
by force
qed
lemma chambers_share_facet:
assumes "chamber C" "chamber (insert v z)" "z\<lhd>C"
shows "z\<lhd>insert v z"
proof (rule facetrelI)
from assms show "v\<notin>z"
using finite_chamber[of C] finite_chamber[of "insert v z"] card_insert_if[of z v]
by (auto simp add: finite_facetrel_card chamber_card)
qed simp
lemma chamber_shared_facet: "\<lbrakk> chamber C; z\<lhd>C; D\<in>X; z\<lhd>D \<rbrakk> \<Longrightarrow> chamber D"
by (fast intro: chamber_adj adjacentI)
lemma adjacentset_conv_facetchambersets:
assumes "X \<noteq> {{}}" "chamber C"
shows "adjacentset C = (\<Union>v\<in>C. {D\<in>X. C-{v}\<lhd>D})"
proof (rule seteqI)
fix D assume D: "D \<in> adjacentset C"
show "D \<in> (\<Union>v\<in>C. {D\<in>X. C-{v}\<lhd>D})"
proof (cases "D=C")
case True with assms
have "C \<noteq> {}" and "C \<in> X"
using chamber_nempty chamberD_simplex by auto
with True assms show ?thesis
using facetrel_diff_vertex by fastforce
next
case False
from D have D': "C\<sim>D" using adjacentsetD_adj by fast
with False obtain v where v: "v\<notin>D" "C = insert v (C\<inter>D)"
using adjacent_int_decomp by fast
hence "C-{v} = C\<inter>D" by auto
with D' False have "C-{v} \<lhd> D" using adjacent_int_facet2 by auto
with assms(2) D v(2) show ?thesis using adjacentset_def by fast
qed
next
from assms(2)
show "\<And>D. D \<in> (\<Union>v\<in>C. {E\<in>X. C-{v}\<lhd>E}) \<Longrightarrow>
D \<in> adjacentset C"
using facetrel_diff_vertex adjacentI
unfolding adjacentset_def
by fastforce
qed
end (* context ChamberComplex *)
subsection \<open>The system of chambers and distance between chambers\<close>
text \<open>
We now examine the system of all chambers in more detail, and explore the distance function on
this system provided by lengths of minimal galleries.
\<close>
context ChamberComplex
begin
definition chamber_system :: "'a set set"
where "chamber_system \<equiv> {C. chamber C}"
abbreviation "\<C> \<equiv> chamber_system"
definition chamber_distance :: "'a set \<Rightarrow> 'a set \<Rightarrow> nat"
where "chamber_distance C D =
(if C=D then 0 else
Suc (length (ARG_MIN length Cs. gallery (C#Cs@[D]))))"
definition closest_supchamber :: "'a set \<Rightarrow> 'a set \<Rightarrow> 'a set"
where "closest_supchamber F D =
(ARG_MIN (\<lambda>C. chamber_distance C D) C.
chamber C \<and> F\<subseteq>C)"
definition "face_distance F D \<equiv> chamber_distance (closest_supchamber F D) D"
lemma chamber_system_simplices: "\<C> \<subseteq> X"
using chamberD_simplex unfolding chamber_system_def by fast
lemma gallery_chamber_system: "gallery Cs \<Longrightarrow> set Cs \<subseteq> \<C>"
using galleryD_chamber chamber_system_def by fast
lemmas pgallery_chamber_system = gallery_chamber_system[OF pgallery]
lemma chamber_distance_le:
"gallery (C#Cs@[D]) \<Longrightarrow> chamber_distance C D \<le> Suc (length Cs)"
using chamber_distance_def
arg_min_nat_le[of "\<lambda>Cs. gallery (C#Cs@[D])" _ length]
by auto
lemma min_gallery_betw_chamber_distance:
"min_gallery (C#Cs@[D]) \<Longrightarrow> chamber_distance C D = Suc (length Cs)"
using chamber_distance_def[of C D] is_arg_min_size[of length _ Cs] by auto
lemma min_galleryI_chamber_distance_betw:
"gallery (C#Cs@[D]) \<Longrightarrow> Suc (length Cs) = chamber_distance C D \<Longrightarrow>
min_gallery (C#Cs@[D])"
using chamber_distance_def chamber_distance_le min_galleryI_betw[of C D]
by fastforce
lemma gallery_least_length:
assumes "chamber C" "chamber D" "C\<noteq>D"
defines "Cs \<equiv> ARG_MIN length Cs. gallery (C#Cs@[D])"
shows "gallery (C#Cs@[D])"
using assms maxsimp_connect[of C D] arg_min_natI
by fast
lemma min_gallery_least_length:
assumes "chamber C" "chamber D" "C\<noteq>D"
defines "Cs \<equiv> ARG_MIN length Cs. gallery (C#Cs@[D])"
shows "min_gallery (C#Cs@[D])"
unfolding Cs_def
using assms gallery_least_length
by (blast intro: min_galleryI_betw arg_min_nat_le)
lemma pgallery_least_length:
assumes "chamber C" "chamber D" "C\<noteq>D"
defines "Cs \<equiv> ARG_MIN length Cs. gallery (C#Cs@[D])"
shows "pgallery (C#Cs@[D])"
using assms min_gallery_least_length min_gallery_pgallery
by fast
lemma closest_supchamberD:
assumes "F\<in>X" "chamber D"
shows "chamber (closest_supchamber F D)" "F \<subseteq> closest_supchamber F D"
using assms arg_min_natI[of "\<lambda>C. chamber C \<and> F\<subseteq>C" ] simplex_in_max[of F]
unfolding closest_supchamber_def
by auto
lemma closest_supchamber_closest:
"chamber C \<Longrightarrow> F\<subseteq>C \<Longrightarrow>
chamber_distance (closest_supchamber F D) D \<le> chamber_distance C D"
using arg_min_nat_le[of "\<lambda>C. chamber C \<and> F\<subseteq>C" C] closest_supchamber_def
by simp
lemma face_distance_le:
"chamber C \<Longrightarrow> F\<subseteq>C \<Longrightarrow> face_distance F D \<le> chamber_distance C D"
unfolding face_distance_def closest_supchamber_def
by (auto intro: arg_min_nat_le)
lemma face_distance_eq_0: "chamber C \<Longrightarrow> F\<subseteq>C \<Longrightarrow> face_distance F C = 0"
using chamber_distance_def closest_supchamber_def face_distance_def
arg_min_equality[
of "\<lambda>C. chamber C \<and> F\<subseteq>C" C "\<lambda>D. chamber_distance D C"
]
by simp
end (* context ChamberComplex *)
subsection \<open>Labelling a chamber complex\<close>
text \<open>
A labelling of a chamber complex is a function on the vertex set so that each chamber is in
bijective correspondence with the label set (chambers all have the same number of vertices).
\<close>
context ChamberComplex
begin
definition label_wrt :: "'b set \<Rightarrow> ('a\<Rightarrow>'b) \<Rightarrow> bool"
where "label_wrt B f \<equiv> (\<forall>C\<in>\<C>. bij_betw f C B)"
lemma label_wrtD: "label_wrt B f \<Longrightarrow> C\<in>\<C> \<Longrightarrow> bij_betw f C B"
using label_wrt_def by fast
lemma label_wrtD': "label_wrt B f \<Longrightarrow> chamber C \<Longrightarrow> bij_betw f C B"
using label_wrt_def chamber_system_def by fast
lemma label_wrt_adjacent:
assumes "label_wrt B f" "chamber C" "chamber D" "C\<sim>D" "v\<in>C-D" "w\<in>D-C"
shows "f v = f w"
proof-
from assms(5) have "f`D = insert (f v) (f`(C\<inter>D))"
using adjacent_conv_insert[OF assms(4), of v] label_wrtD'[OF assms(1,2)]
label_wrtD'[OF assms(1,3)]
bij_betw_imp_surj_on[of f]
by force
with assms(6) show ?thesis
using adjacent_sym[OF assms(4)] adjacent_conv_insert[of D C]
inj_on_insert[of f w "C\<inter>D"]
bij_betw_imp_inj_on[OF label_wrtD', OF assms(1,3)]
by (force simp add: Int_commute)
qed
lemma label_wrt_adjacent_shared_facet:
"\<lbrakk> label_wrt B f; chamber (insert v z); chamber (insert w z); v\<notin>z; w\<notin>z \<rbrakk> \<Longrightarrow>
f v = f w"
by (auto intro: label_wrt_adjacent adjacentI facetrelI)
lemma label_wrt_elt_image: "label_wrt B f \<Longrightarrow> v\<in>\<Union>X \<Longrightarrow> f v \<in> B"
using simplex_in_max label_wrtD' bij_betw_imp_surj_on by fast
end (* context ChamberComplex *)
subsection \<open>Morphisms of chamber complexes\<close>
text \<open>
While any function on the vertex set of a simplicial complex can be considered a morphism of
simplicial complexes onto its image, for chamber complexes we require the function send chambers
onto chambers of the same cardinality in some chamber complex of the codomain.
\<close>
subsubsection \<open>Morphism locale and basic facts\<close>
locale ChamberComplexMorphism = domain: ChamberComplex X + codomain: ChamberComplex Y
for X :: "'a set set"
and Y :: "'b set set"
+ fixes f :: "'a\<Rightarrow>'b"
assumes chamber_map: "domain.chamber C \<Longrightarrow> codomain.chamber (f`C)"
and dim_map : "domain.chamber C \<Longrightarrow> card (f`C) = card C"
lemma (in ChamberComplex) trivial_morphism:
"ChamberComplexMorphism X X id"
by unfold_locales auto
lemma (in ChamberComplex) inclusion_morphism:
assumes "ChamberSubcomplex Y"
shows "ChamberComplexMorphism Y X id"
by (
rule ChamberComplexMorphism.intro,
rule ChamberSubcomplexD_complex,
rule assms, unfold_locales
)
(auto simp add: subcomplex_chamber[OF assms])
context ChamberComplexMorphism
begin
lemmas domain_complex = domain.ChamberComplex_axioms
lemmas codomain_complex = codomain.ChamberComplex_axioms
lemmas simplicialcomplex_image = domain.map_is_simplicial_morph[of f]
lemma cong: "fun_eq_on g f (\<Union>X) \<Longrightarrow> ChamberComplexMorphism X Y g"
using chamber_map domain.chamber_vertices fun_eq_on_im[of g f] dim_map
domain.chamber_vertices
by unfold_locales auto
lemma comp:
assumes "ChamberComplexMorphism Y Z g"
shows "ChamberComplexMorphism X Z (g\<circ>f)"
proof (
rule ChamberComplexMorphism.intro, rule domain_complex,
rule ChamberComplexMorphism.axioms(2), rule assms, unfold_locales
)
fix C assume C: "domain.chamber C"
from C show "SimplicialComplex.maxsimp Z ((g\<circ>f)`C)"
using chamber_map ChamberComplexMorphism.chamber_map[OF assms]
by (force simp add: image_comp[THEN sym])
from C show "card ((g \<circ> f)`C) = card C"
using chamber_map dim_map ChamberComplexMorphism.dim_map[OF assms]
by (force simp add: image_comp[THEN sym])
qed
lemma restrict_domain:
assumes "domain.ChamberSubcomplex W"
shows "ChamberComplexMorphism W Y f"
proof (
rule ChamberComplexMorphism.intro, rule domain.ChamberSubcomplexD_complex,
rule assms, rule codomain_complex, unfold_locales
)
fix C assume "ChamberComplex.chamber W C"
with assms show "codomain.chamber (f`C)" "card (f`C) = card C"
using domain.subcomplex_chamber chamber_map dim_map by auto
qed
lemma restrict_codomain:
assumes "codomain.ChamberSubcomplex Z" "f\<turnstile>X \<subseteq> Z"
shows "ChamberComplexMorphism X Z f"
proof (
rule ChamberComplexMorphism.intro, rule domain_complex,
rule codomain.ChamberSubcomplexD_complex,
rule assms, unfold_locales
)
fix C assume "domain.chamber C"
with assms show "SimplicialComplex.maxsimp Z (f`C)" "card (f ` C) = card C"
using domain.chamberD_simplex[of C] chamber_map
codomain.chamber_in_subcomplex dim_map
by auto
qed
lemma inj_on_chamber: "domain.chamber C \<Longrightarrow> inj_on f C"
using domain.finite_chamber dim_map by (fast intro: eq_card_imp_inj_on)
lemma bij_betw_chambers: "domain.chamber C \<Longrightarrow> bij_betw f C (f`C)"
using inj_on_chamber by (fast intro: bij_betw_imageI)
lemma card_map: "x\<in>X \<Longrightarrow> card (f`x) = card x"
using domain.simplex_in_max subset_inj_on[OF inj_on_chamber]
domain.finite_simplex inj_on_iff_eq_card
by blast
lemma codim_map:
assumes "domain.chamber C" "y \<subseteq> C"
shows "card (f`C - f`y) = card (C-y)"
using assms dim_map domain.chamberD_simplex domain.faces[of C y]
domain.finite_simplex card_Diff_subset[of "f`y" "f`C"]
card_map card_Diff_subset[of y C]
by auto
lemma simplex_map: "x\<in>X \<Longrightarrow> f`x\<in>Y"
using chamber_map domain.simplex_in_max codomain.chamberD_simplex
codomain.faces[of _ "f`x"]
by force
lemma simplices_map: "f\<turnstile>X \<subseteq> Y"
using simplex_map by fast
lemma vertex_map: "x \<in> \<Union>X \<Longrightarrow> f x \<in> \<Union>Y"
using simplex_map by fast
lemma facet_map: "domain.chamber C \<Longrightarrow> z\<lhd>C \<Longrightarrow> f`z \<lhd> f`C"
using facetrel_subset facetrel_card codim_map[of C z]
by (fastforce intro: facetrelI_card)
lemma adj_int_im:
assumes "domain.chamber C" "domain.chamber D" "C \<sim> D" "f`C \<noteq> f`D"
shows "(f`C \<inter> f`D) \<lhd> f`C"
proof (rule facetrelI_card)
from assms(1,2) chamber_map have 1: "f`C \<subseteq> f`D \<Longrightarrow> f`C = f`D"
using codomain.chamberD_simplex codomain.chamberD_maximal[of "f`C" "f`D"]
by simp
thus "f ` C \<inter> f ` D \<subseteq> f ` C" by fast
from assms(1) have "card (f`C - f`C \<inter> f`D) \<le> card (f`C - f`(C\<inter>D))"
using domain.finite_chamber
card_mono[of "f`C - f`(C\<inter>D)" "f`C - f`C \<inter> f`D"]
by fast
moreover from assms(1,3,4) have "card (f`C - f`(C\<inter>D)) = 1"
using codim_map[of C "C\<inter>D"] adjacent_int_facet1 facetrel_card
by fastforce
ultimately have "card (f`C - f`C \<inter> f`D) \<le> 1" by simp
moreover from 1 assms(1,4) have "card (f`C - f`C \<inter> f`D) \<noteq> 0"
using domain.finite_chamber by auto
ultimately show "card (f`C - f`C \<inter> f`D) = 1" by simp
qed
lemma adj_map':
assumes "domain.chamber C" "domain.chamber D" "C \<sim> D" "f`C \<noteq> f`D"
shows "f`C \<sim> f`D"
using assms(3,4) adj_int_im[OF assms] adjacent_sym
adj_int_im[OF assms(2) assms(1)]
by (auto simp add: Int_commute intro: adjacentI)
lemma adj_map:
"\<lbrakk> domain.chamber C; domain.chamber D; C \<sim> D \<rbrakk> \<Longrightarrow> f`C \<sim> f`D"
using adjacent_refl[of "f`C"] adj_map' empty_not_adjacent[of D] by fastforce
lemma chamber_vertex_outside_facet_image:
assumes "v\<notin>z" "domain.chamber (insert v z)"
shows "f v \<notin> f`z"
proof-
from assms(1) have "insert v z - z = {v}" by force
with assms(2) show ?thesis using codim_map by fastforce
qed
lemma expand_codomain:
assumes "ChamberComplex Z" "ChamberComplex.ChamberSubcomplex Z Y"
shows "ChamberComplexMorphism X Z f"
proof (
rule ChamberComplexMorphism.intro, rule domain_complex, rule assms(1),
unfold_locales
)
from assms show
"\<And>x. domain.chamber x \<Longrightarrow> SimplicialComplex.maxsimp Z (f ` x)"
using chamber_map ChamberComplex.subcomplex_chamber by fast
qed (auto simp add: dim_map)
end (* context ChamberComplexMorphism *)
subsubsection \<open>Action on pregalleries and galleries\<close>
context ChamberComplexMorphism
begin
lemma gallery_map: "domain.gallery Cs \<Longrightarrow> codomain.gallery (f\<Turnstile>Cs)"
proof (induct Cs rule: list_induct_CCons)
case (Single C) thus ?case
using domain.galleryD_chamber chamber_map codomain.gallery_def by auto
next
case (CCons B C Cs)
have "codomain.gallery (f`B # f`C # f\<Turnstile>Cs)"
proof (rule codomain.gallery_CConsI)
from CCons(2) show "codomain.chamber (f ` B)"
using domain.galleryD_chamber chamber_map by simp
from CCons show "codomain.gallery (f`C # f\<Turnstile>Cs)"
using domain.gallery_Cons_reduce by auto
from CCons(2) show "f`B \<sim> f`C"
using domain.gallery_Cons_reduce[of B "C#Cs"] domain.galleryD_adj
domain.galleryD_chamber adj_map
by fastforce
qed
thus ?case by simp
qed (simp add: codomain.maxsimpchain_def)
lemma gallery_betw_map:
"domain.gallery (C#Cs@[D]) \<Longrightarrow> codomain.gallery (f`C # f\<Turnstile>Cs @ [f`D])"
using gallery_map by fastforce
end (* context ChamberComplexMorphism *)
subsubsection \<open>Properties of the image\<close>
context ChamberComplexMorphism
begin
lemma subcomplex_image: "codomain.Subcomplex (f\<turnstile>X)"
using simplicialcomplex_image simplex_map by fast
lemmas chamber_in_image = codomain.max_in_subcomplex[OF subcomplex_image]
lemma maxsimp_map_into_image:
assumes "domain.chamber x"
shows "SimplicialComplex.maxsimp (f\<turnstile>X) (f`x)"
proof (
rule SimplicialComplex.maxsimpI, rule simplicialcomplex_image, rule imageI,
rule domain.chamberD_simplex, rule assms
)
from assms show "\<And>z. z\<in>f\<turnstile>X \<Longrightarrow> f`x \<subseteq> z \<Longrightarrow> z = f`x"
using chamber_map[of x] simplex_map codomain.chamberD_maximal[of "f`x"]
by blast
qed
lemma maxsimp_preimage:
assumes "C\<in>X" "SimplicialComplex.maxsimp (f\<turnstile>X) (f`C)"
shows "domain.chamber C"
proof-
from assms(1) obtain D where D: "domain.chamber D" "C\<subseteq>D"
using domain.simplex_in_max by fast
have "C=D"
proof (rule card_subset_eq)
from D(1) show "finite D" using domain.finite_chamber by fast
with assms D show "card C = card D"
using domain.chamberD_simplex simplicialcomplex_image
SimplicialComplex.maxsimpD_maximal[of "f\<turnstile>X" "f`C" "f`D"]
card_mono[of D C] domain.finite_simplex card_image_le[of C f] dim_map
by force
qed (rule D(2))
with D(1) show ?thesis by fast
qed
lemma chamber_preimage:
"C\<in>X \<Longrightarrow> codomain.chamber (f`C) \<Longrightarrow> domain.chamber C"
using chamber_in_image maxsimp_preimage by simp
lemma chambercomplex_image: "ChamberComplex (f\<turnstile>X)"
proof (intro_locales, rule simplicialcomplex_image, unfold_locales)
show "\<And>y. y\<in>f\<turnstile>X \<Longrightarrow> \<exists>x. SimplicialComplex.maxsimp (f\<turnstile>X) x \<and> y \<subseteq> x"
using domain.simplex_in_max maxsimp_map_into_image by fast
next
fix x y
assume xy: "x\<noteq>y" "SimplicialComplex.maxsimp (f\<turnstile>X) x"
"SimplicialComplex.maxsimp (f\<turnstile>X) y"
from xy(2,3) obtain zx zy where zxy: "zx\<in>X" "x = f`zx" "zy\<in>X" "y = f`zy "
using SimplicialComplex.maxsimpD_simplex[OF simplicialcomplex_image, of x]
SimplicialComplex.maxsimpD_simplex[OF simplicialcomplex_image, of y]
by fast
with xy obtain ws where ws: "domain.gallery (zx#ws@[zy])"
using maxsimp_preimage domain.maxsimp_connect[of zx zy] by auto
with ws zxy(2,4) have "SimplicialComplex.maxsimpchain (f\<turnstile>X) (x#(f\<Turnstile>ws)@[y])"
using gallery_map[of "zx#ws@[zy]"] domain.galleryD_chamber
domain.chamberD_simplex codomain.galleryD_chamber
codomain.max_in_subcomplex[OF subcomplex_image]
codomain.galleryD_adj
SimplicialComplex.maxsimpchain_def[OF simplicialcomplex_image]
by auto
thus "\<exists>xs. SimplicialComplex.maxsimpchain (f\<turnstile>X) (x#xs@[y])" by fast
qed
lemma chambersubcomplex_image: "codomain.ChamberSubcomplex (f\<turnstile>X)"
using simplices_map chambercomplex_image ChamberComplex.chamberD_simplex
chambercomplex_image maxsimp_preimage chamber_map
by (force intro: codomain.ChamberSubcomplexI)
lemma restrict_codomain_to_image: "ChamberComplexMorphism X (f\<turnstile>X) f"
using restrict_codomain chambersubcomplex_image by fast
end (* context ChamberComplexMorphism *)
subsubsection \<open>Action on the chamber system\<close>
context ChamberComplexMorphism
begin
lemma chamber_system_into: "f\<turnstile>domain.\<C> \<subseteq> codomain.\<C>"
using chamber_map domain.chamber_system_def codomain.chamber_system_def
by auto
lemma image_chamber_system: "ChamberComplex.\<C> (f\<turnstile>X) = f \<turnstile> domain.\<C>"
using ChamberComplex.chamber_system_def codomain.subcomplex_chamber
ChamberComplex.chamberD_simplex chambercomplex_image
chambersubcomplex_image chamber_system_image
codomain.chamber_in_subcomplex codomain.chamber_system_def
by auto
lemma image_chamber_system_image:
"ChamberComplex.\<C> (f\<turnstile>X) = codomain.\<C> \<inter> (f\<turnstile>X)"
using image_chamber_system chamber_system_image by simp
lemma face_distance_eq_chamber_distance_map:
assumes "domain.chamber C" "domain.chamber D" "C\<noteq>D" "z\<subseteq>C"
"codomain.face_distance (f`z) (f`D) = domain.face_distance z D"
"domain.face_distance z D = domain.chamber_distance C D"
shows "codomain.face_distance (f`z) (f`D) =
codomain.chamber_distance (f`C) (f`D)"
using assms codomain.face_distance_le[of "f`C" "f`z" "f`D"] chamber_map
codomain.chamber_distance_le
gallery_betw_map[OF domain.gallery_least_length, of C D]
domain.chamber_distance_def
by force
lemma face_distance_eq_chamber_distance_min_gallery_betw_map:
assumes "domain.chamber C" "domain.chamber D" "C\<noteq>D" "z\<subseteq>C"
"codomain.face_distance (f`z) (f`D) = domain.face_distance z D"
"domain.face_distance z D = domain.chamber_distance C D"
"domain.min_gallery (C#Cs@[D])"
shows "codomain.min_gallery (f\<Turnstile>(C#Cs@[D]))"
using assms face_distance_eq_chamber_distance_map[of C D z]
gallery_map[OF domain.min_galleryD_gallery, OF assms(7)]
domain.min_gallery_betw_chamber_distance[OF assms(7)]
codomain.min_galleryI_chamber_distance_betw[of "f`C" "f\<Turnstile>Cs" "f`D"]
by auto
end (* context ChamberComplexMorphism *)
subsubsection \<open>Isomorphisms\<close>
locale ChamberComplexIsomorphism = ChamberComplexMorphism X Y f
for X :: "'a set set"
and Y :: "'b set set"
and f :: "'a\<Rightarrow>'b"
+ assumes bij_betw_vertices: "bij_betw f (\<Union>X) (\<Union>Y)"
and surj_simplex_map : "f\<turnstile>X = Y"
lemma (in ChamberComplexIsomorphism) inj: "inj_on f (\<Union>X)"
using bij_betw_vertices bij_betw_def by fast
sublocale ChamberComplexIsomorphism < SimplicialComplexIsomorphism
using inj by (unfold_locales) fast
lemma (in ChamberComplex) trivial_isomorphism:
"ChamberComplexIsomorphism X X id"
using trivial_morphism bij_betw_id
by unfold_locales (auto intro: ChamberComplexIsomorphism.intro)
lemma (in ChamberComplexMorphism) isoI_inverse:
assumes "ChamberComplexMorphism Y X g"
"fixespointwise (g\<circ>f) (\<Union>X)" "fixespointwise (f\<circ>g) (\<Union>Y)"
shows "ChamberComplexIsomorphism X Y f"
proof (rule ChamberComplexIsomorphism.intro)
show "ChamberComplexMorphism X Y f" ..
show "ChamberComplexIsomorphism_axioms X Y f"
proof
from assms show "bij_betw f (\<Union>X) (\<Union>Y)"
using vertex_map ChamberComplexMorphism.vertex_map
comps_fixpointwise_imp_bij_betw[of f "\<Union>X" "\<Union>Y" g]
by fast
show "f\<turnstile>X = Y"
proof (rule order.antisym, rule simplices_map, rule subsetI)
fix y assume "y\<in>Y"
moreover hence "(f\<circ>g) ` y \<in> f\<turnstile>X"
using ChamberComplexMorphism.simplex_map[OF assms(1)]
by (simp add: image_comp[THEN sym])
ultimately show "y \<in> f\<turnstile>X"
using fixespointwise_subset[OF assms(3), of y] fixespointwise_im by fastforce
qed
qed
qed
context ChamberComplexIsomorphism
begin
lemmas domain_complex = domain_complex
lemmas chamber_map = chamber_map
lemmas dim_map = dim_map
lemmas gallery_map = gallery_map
lemmas simplex_map = simplex_map
lemmas chamber_preimage = chamber_preimage
lemma chamber_morphism: "ChamberComplexMorphism X Y f" ..
lemma pgallery_map: "domain.pgallery Cs \<Longrightarrow> codomain.pgallery (f\<Turnstile>Cs)"
using pmaxsimpchain_map surj_simplex_map by simp
lemma iso_cong:
assumes "fun_eq_on g f (\<Union>X)"
shows "ChamberComplexIsomorphism X Y g"
proof (
rule ChamberComplexIsomorphism.intro, rule cong, rule assms,
unfold_locales
)
from assms show "bij_betw g (\<Union>X) (\<Union>Y)"
using bij_betw_vertices fun_eq_on_bij_betw by blast
show "g \<turnstile> X = Y" using setsetmapim_cong[OF assms] surj_simplex_map by simp
qed
lemma iso_comp:
assumes "ChamberComplexIsomorphism Y Z g"
shows "ChamberComplexIsomorphism X Z (g\<circ>f)"
by (
rule ChamberComplexIsomorphism.intro, rule comp,
rule ChamberComplexIsomorphism.axioms(1),
rule assms, unfold_locales, rule bij_betw_trans,
rule bij_betw_vertices,
rule ChamberComplexIsomorphism.bij_betw_vertices,
rule assms
)
(simp add:
setsetmapim_comp surj_simplex_map assms
ChamberComplexIsomorphism.surj_simplex_map
)
lemma inj_on_chamber_system: "inj_on ((`) f) domain.\<C>"
proof (rule inj_onI)
fix C D show "\<lbrakk> C \<in> domain.\<C>; D \<in> domain.\<C>; f`C = f`D \<rbrakk> \<Longrightarrow> C=D"
using domain.chamber_system_def domain.chamber_pconnect[of C D]
pgallery_map codomain.pgalleryD_distinct
by fastforce
qed
lemma inv: "ChamberComplexIsomorphism Y X (the_inv_into (\<Union>X) f)"
proof
show "bij_betw (the_inv_into (\<Union>X) f) (\<Union>Y) (\<Union>X)"
using bij_betw_vertices bij_betw_the_inv_into by fast
show 4: "(the_inv_into (\<Union>X) f) \<turnstile> Y = X"
using bij_betw_imp_inj_on[OF bij_betw_vertices] surj_simplex_map
setsetmapim_the_inv_into
by force
next
fix C assume C: "codomain.chamber C"
hence C': "C\<in>f\<turnstile>X" using codomain.chamberD_simplex surj_simplex_map by fast
show "domain.chamber (the_inv_into (\<Union>X) f ` C)"
proof (rule domain.chamberI)
from C' obtain D where "D\<in>X" "the_inv_into (\<Union>X) f ` C = D"
using the_inv_into_f_im_f_im[OF inj] by blast
thus "the_inv_into (\<Union>X) f ` C \<in> X" by simp
fix z assume z: "z\<in>X" "the_inv_into (\<Union>X) f ` C \<subseteq> z"
with C have "f`z = C"
using C' f_im_the_inv_into_f_im[OF inj, of C] surj_simplex_map
codomain.chamberD_maximal[of C "f`z"]
by blast
with z(1) show "z = the_inv_into (\<Union>X) f ` C"
using the_inv_into_f_im_f_im[OF inj] by auto
qed
from C show "card (the_inv_into (\<Union>X) f ` C) = card C"
using C' codomain.finite_chamber
subset_inj_on[OF inj_on_the_inv_into, OF inj, of C]
by (fast intro: inj_on_iff_eq_card[THEN iffD1])
qed
lemma chamber_distance_map:
assumes "domain.chamber C" "domain.chamber D"
shows "codomain.chamber_distance (f`C) (f`D) =
domain.chamber_distance C D"
proof (cases "f`C=f`D")
case True
moreover with assms have "C=D"
using inj_onD[OF inj_on_chamber_system] domain.chamber_system_def
by simp
ultimately show ?thesis
using domain.chamber_distance_def codomain.chamber_distance_def by simp
next
case False
define Cs Ds where "Cs = (ARG_MIN length Cs. domain.gallery (C#Cs@[D]))"
and "Ds = (ARG_MIN length Ds. codomain.gallery (f`C # Ds @ [f`D]))"
from assms False Cs_def have "codomain.gallery (f`C # f\<Turnstile>Cs @ [f`D])"
using gallery_map domain.maxsimp_connect[of C D]
arg_min_natI[of "\<lambda>Cs. domain.gallery (C#Cs@[D])"]
by fastforce
moreover from assms Cs_def
have "\<And>Es. codomain.gallery (f`C # Es @ [f`D]) \<Longrightarrow>
length (f\<Turnstile>Cs) \<le> length Es"
using ChamberComplexIsomorphism.gallery_map[OF inv]
the_inv_into_f_im_f_im[OF inj, of C] the_inv_into_f_im_f_im[OF inj, of D]
domain.chamberD_simplex[of C] domain.chamberD_simplex[of D]
domain.maxsimp_connect[of C D]
arg_min_nat_le[of "\<lambda>Cs. domain.gallery (C#Cs@[D])" _ length]
by force
ultimately have "length Ds = length (f\<Turnstile>Cs)"
unfolding Ds_def by (fast intro: arg_min_equality)
with False Cs_def Ds_def show ?thesis
using domain.chamber_distance_def codomain.chamber_distance_def by auto
qed
lemma face_distance_map:
assumes "domain.chamber C" "F\<in>X"
shows "codomain.face_distance (f`F) (f`C) = domain.face_distance F C"
proof-
define D D' invf where "D = domain.closest_supchamber F C"
and "D' = codomain.closest_supchamber (f`F) (f`C)"
and "invf = the_inv_into (\<Union>X) f"
from assms D_def D'_def invf_def have chambers:
"codomain.chamber (f`C)" "domain.chamber D" "codomain.chamber D'"
"codomain.chamber (f`D)" "domain.chamber (invf`D')"
using domain.closest_supchamberD(1) simplex_map
codomain.closest_supchamberD(1) chamber_map
ChamberComplexIsomorphism.chamber_map[OF inv]
by auto
have "codomain.chamber_distance D' (f`C) \<le> domain.chamber_distance D C"
proof-
from assms D_def D'_def
have "codomain.chamber_distance D' (f`C) \<le>
codomain.chamber_distance (f`D) (f`C)"
using chambers(4) domain.closest_supchamberD(2)
codomain.closest_supchamber_def
by (fastforce intro: arg_min_nat_le)
with assms D_def D'_def show ?thesis
using chambers(2) chamber_distance_map by simp
qed
moreover
have "domain.chamber_distance D C \<le> codomain.chamber_distance D' (f`C)"
proof-
from assms D'_def have "invf`f`F \<subseteq> invf`D'"
using chambers(1) simplex_map codomain.closest_supchamberD(2) by fast
with assms(2) invf_def have "F \<subseteq> invf`D'"
using the_inv_into_f_im_f_im[OF inj, of F] by fastforce
with D_def
have "domain.chamber_distance D C \<le>
domain.chamber_distance (invf ` D') C"
using chambers(5) domain.closest_supchamber_def
by (auto intro: arg_min_nat_le)
with assms(1) invf_def show ?thesis
using chambers(3,5) surj_simplex_map codomain.chamberD_simplex
f_im_the_inv_into_f_im[OF inj, of D']
chamber_distance_map[of "invf`D'" C]
by fastforce
qed
ultimately show ?thesis
using D_def D'_def domain.face_distance_def codomain.face_distance_def
by simp
qed
end (* context ChamberComplexIsomorphism *)
subsubsection \<open>Endomorphisms\<close>
locale ChamberComplexEndomorphism = ChamberComplexMorphism X X f
for X :: "'a set set"
and f :: "'a\<Rightarrow>'a"
+ assumes trivial_outside : "v\<notin>\<Union>X \<Longrightarrow> f v = v"
\<comment> \<open>to facilitate uniqueness arguments\<close>
lemma (in ChamberComplex) trivial_endomorphism:
"ChamberComplexEndomorphism X id"
by (
rule ChamberComplexEndomorphism.intro, rule trivial_morphism,
unfold_locales
)
simp
context ChamberComplexEndomorphism
begin
abbreviation "ChamberSubcomplex \<equiv> domain.ChamberSubcomplex"
abbreviation "Subcomplex \<equiv> domain.Subcomplex"
abbreviation "chamber \<equiv> domain.chamber"
abbreviation "gallery \<equiv> domain.gallery"
abbreviation "\<C> \<equiv> domain.chamber_system"
abbreviation "label_wrt \<equiv> domain.label_wrt"
lemmas dim_map = dim_map
lemmas simplex_map = simplex_map
lemmas vertex_map = vertex_map
lemmas chamber_map = chamber_map
lemmas adj_map = adj_map
lemmas facet_map = facet_map
lemmas bij_betw_chambers = bij_betw_chambers
lemmas chamber_system_into = chamber_system_into
lemmas chamber_system_image = chamber_system_image
lemmas image_chamber_system = image_chamber_system
lemmas chambercomplex_image = chambercomplex_image
lemmas chambersubcomplex_image = chambersubcomplex_image
lemmas face_distance_eq_chamber_distance_map =
face_distance_eq_chamber_distance_map
lemmas face_distance_eq_chamber_distance_min_gallery_betw_map =
face_distance_eq_chamber_distance_min_gallery_betw_map
lemmas facedist_chdist_mingal_btwmap =
face_distance_eq_chamber_distance_min_gallery_betw_map
lemmas trivial_endomorphism = domain.trivial_endomorphism
lemmas finite_simplices = domain.finite_simplices
lemmas faces = domain.faces
lemmas maxsimp_connect = domain.maxsimp_connect
lemmas simplex_in_max = domain.simplex_in_max
lemmas chamberD_simplex = domain.chamberD_simplex
lemmas chamber_system_def = domain.chamber_system_def
lemmas chamber_system_simplices = domain.chamber_system_simplices
lemmas galleryD_chamber = domain.galleryD_chamber
lemmas galleryD_adj = domain.galleryD_adj
lemmas gallery_append_reduce1 = domain.gallery_append_reduce1
lemmas gallery_Cons_reduce = domain.gallery_Cons_reduce
lemmas gallery_chamber_system = domain.gallery_chamber_system
lemmas label_wrtD = domain.label_wrtD
lemmas label_wrt_adjacent = domain.label_wrt_adjacent
lemma endo_comp:
assumes "ChamberComplexEndomorphism X g"
shows "ChamberComplexEndomorphism X (g\<circ>f)"
proof (rule ChamberComplexEndomorphism.intro)
from assms show "ChamberComplexMorphism X X (g\<circ>f)"
using comp ChamberComplexEndomorphism.axioms by fast
from assms show "ChamberComplexEndomorphism_axioms X (g\<circ>f)"
using trivial_outside ChamberComplexEndomorphism.trivial_outside
by unfold_locales auto
qed
lemma funpower_endomorphism:
"ChamberComplexEndomorphism X (f^^n)"
proof (induct n)
case 0 show ?case using trivial_endomorphism subst[of id] by fastforce
next
case (Suc m)
hence "ChamberComplexEndomorphism X (f^^m \<circ> f)"
using endo_comp by auto
moreover have "f^^m \<circ> f = f^^(Suc m)"
by (simp add: funpow_Suc_right[THEN sym])
ultimately show ?case
using subst[of _ _ "\<lambda>f. ChamberComplexEndomorphism X f"] by fast
qed
end (* context ChamberComplexEndomorphism *)
lemma (in ChamberComplex) fold_chamber_complex_endomorph_list:
"\<forall>x\<in>set xs. ChamberComplexEndomorphism X (f x) \<Longrightarrow>
ChamberComplexEndomorphism X (fold f xs)"
proof (induct xs)
case Nil show ?case using trivial_endomorphism subst[of id] by fastforce
next
case (Cons x xs)
hence "ChamberComplexEndomorphism X (fold f xs \<circ> f x)"
using ChamberComplexEndomorphism.endo_comp by auto
moreover have "fold f xs \<circ> f x = fold f (x#xs)" by simp
ultimately show ?case
using subst[of _ _ "\<lambda>f. ChamberComplexEndomorphism X f"] by fast
qed
context ChamberComplexEndomorphism
begin
lemma split_gallery:
"\<lbrakk> C\<in>f\<turnstile>\<C>; D\<in>\<C>-f\<turnstile>\<C>; gallery (C#Cs@[D]) \<rbrakk> \<Longrightarrow>
\<exists>As A B Bs. A\<in>f\<turnstile>\<C> \<and> B\<in>\<C>-f\<turnstile>\<C> \<and> C#Cs@[D] = As@A#B#Bs"
proof (induct Cs arbitrary: C)
case Nil
define As :: "'a set list" where "As = []"
hence "C#[]@[D] = As@C#D#As" by simp
with Nil(1,2) show ?case by auto
next
case (Cons E Es)
show ?case
proof (cases "E\<in>f\<turnstile>\<C>")
case True
from Cons(4) have "gallery (E#Es@[D])"
using gallery_Cons_reduce by simp
with True obtain As A B Bs
where 1: "A\<in>f\<turnstile>\<C>" "B\<in>\<C>-f\<turnstile>\<C>" "E#Es@[D] = As@A#B#Bs"
using Cons(1)[of E] Cons(3)
by blast
from 1(3) have "C#(E#Es)@[D] = (C#As)@A#B#Bs" by simp
with 1(1,2) show ?thesis by blast
next
case False
hence "E\<in>\<C>-f\<turnstile>\<C>" using gallery_chamber_system[OF Cons(4)] by simp
moreover have "C#(E#Es)@[D] = []@C#E#(Es@[D])" by simp
ultimately show ?thesis using Cons(2) by blast
qed
qed
lemma respects_labels_adjacent:
assumes "label_wrt B \<phi>" "chamber C" "chamber D" "C\<sim>D" "\<forall>v\<in>C. \<phi> (f v) = \<phi> v"
shows "\<forall>v\<in>D. \<phi> (f v) = \<phi> v"
proof (cases "C=D")
case False have CD: "C\<noteq>D" by fact
with assms(4) obtain w where w: "w\<notin>D" "C = insert w (C\<inter>D)"
using adjacent_int_decomp by fast
with assms(2) have fC: "f w \<notin> f`(C\<inter>D)" "f`C = insert (f w) (f`(C\<inter>D))"
using chamber_vertex_outside_facet_image[of w "C\<inter>D"] by auto
show ?thesis
proof
fix v assume v: "v\<in>D"
show "\<phi> (f v) = \<phi> v"
proof (cases "v\<in>C")
case False
with assms(3,4) v have fD: "f v \<notin> f`(D\<inter>C)" "f`D = insert (f v) (f`(D\<inter>C))"
using adjacent_sym[of C D] adjacent_conv_insert[of D C v]
chamber_vertex_outside_facet_image[of v "D\<inter>C"]
by auto
have "\<phi> (f v) = \<phi> (f w)"
proof (cases "f`C=f`D")
case True
with fC fD have "f v = f w" by (auto simp add: Int_commute)
thus ?thesis by simp
next
case False
from assms(2-4) have "chamber (f`C)" "chamber (f`D)" and fCfD: "f`C\<sim>f`D"
using chamber_map adj_map by auto
moreover from assms(4) fC fCfD False have "f w \<in> f`C - f`D"
using adjacent_to_adjacent_int[of C D f] by auto
ultimately show ?thesis
using assms(4) fD fCfD False adjacent_sym
adjacent_to_adjacent_int[of D C f]
label_wrt_adjacent[OF assms(1), of "f`C" "f`D" "f w" "f v", THEN sym]
by auto
qed
with False v w assms(5) show ?thesis
using label_wrt_adjacent[OF assms(1-4), of w v, THEN sym] by fastforce
qed (simp add: assms(5))
qed
qed (simp add: assms(5))
lemma respects_labels_gallery:
assumes "label_wrt B \<phi>" "\<forall>v\<in>C. \<phi> (f v) = \<phi> v"
shows "gallery (C#Cs@[D]) \<Longrightarrow> \<forall>v\<in>D. \<phi> (f v) = \<phi> v"
proof (induct Cs arbitrary: D rule: rev_induct)
case Nil with assms(2) show ?case
using galleryD_chamber galleryD_adj
respects_labels_adjacent[OF assms(1), of C D]
by force
next
case (snoc E Es)
with assms(2) show ?case
using gallery_append_reduce1[of "C#Es@[E]"] galleryD_chamber galleryD_adj
binrelchain_append_reduce2[of adjacent "C#Es" "[E,D]"]
respects_labels_adjacent[OF assms(1), of E D]
by force
qed
lemma respect_label_fix_chamber_imp_fun_eq_on:
assumes label : "label_wrt B \<phi>"
and chamber: "chamber C" "f`C = g`C"
and respect: "\<forall>v\<in>C. \<phi> (f v) = \<phi> v" "\<forall>v\<in>C. \<phi> (g v) = \<phi> v"
shows "fun_eq_on f g C"
proof (rule fun_eq_onI)
fix v assume "v\<in>C"
moreover with respect have "\<phi> (f v) = \<phi> (g v)" by simp
ultimately show "f v = g v"
using label chamber chamber_map chamber_system_def label_wrtD[of B \<phi> "f`C"]
bij_betw_imp_inj_on[of \<phi>] inj_onD
by fastforce
qed
lemmas respects_label_fixes_chamber_imp_fixespointwise =
respect_label_fix_chamber_imp_fun_eq_on[of _ _ _ id, simplified]
end (* context ChamberComplexEndomorphism *)
subsubsection \<open>Automorphisms\<close>
locale ChamberComplexAutomorphism = ChamberComplexIsomorphism X X f
for X :: "'a set set"
and f :: "'a\<Rightarrow>'a"
+ assumes trivial_outside : "v\<notin>\<Union>X \<Longrightarrow> f v = v"
\<comment> \<open>to facilitate uniqueness arguments\<close>
sublocale ChamberComplexAutomorphism < ChamberComplexEndomorphism
using trivial_outside by unfold_locales fast
lemma (in ChamberComplex) trivial_automorphism:
"ChamberComplexAutomorphism X id"
using trivial_isomorphism
by unfold_locales (auto intro: ChamberComplexAutomorphism.intro)
context ChamberComplexAutomorphism
begin
lemmas facet_map = facet_map
lemmas chamber_map = chamber_map
lemmas chamber_morphism = chamber_morphism
lemmas bij_betw_vertices = bij_betw_vertices
lemmas surj_simplex_map = surj_simplex_map
lemma bij: "bij f"
proof (rule bijI)
show "inj f"
proof (rule injI)
fix x y assume "f x = f y" thus "x = y"
using bij_betw_imp_inj_on[OF bij_betw_vertices] inj_onD[of f "\<Union>X" x y]
vertex_map trivial_outside
by (cases "x\<in>\<Union>X" "y\<in>\<Union>X" rule: two_cases) auto
qed
show "surj f" unfolding surj_def
proof
fix y show "\<exists>x. y = f x"
using bij_betw_imp_surj_on[OF bij_betw_vertices]
trivial_outside[THEN sym, of y]
by (cases "y\<in>\<Union>X") auto
qed
qed
lemma comp:
assumes "ChamberComplexAutomorphism X g"
shows "ChamberComplexAutomorphism X (g\<circ>f)"
proof (
rule ChamberComplexAutomorphism.intro,
rule ChamberComplexIsomorphism.intro,
rule ChamberComplexMorphism.comp
)
from assms show "ChamberComplexMorphism X X g"
using ChamberComplexAutomorphism.chamber_morphism by fast
show "ChamberComplexIsomorphism_axioms X X (g \<circ> f)"
proof
from assms show "bij_betw (g\<circ>f) (\<Union>X) (\<Union>X)"
using bij_betw_vertices ChamberComplexAutomorphism.bij_betw_vertices
bij_betw_trans
by fast
from assms show "(g\<circ>f) \<turnstile> X = X"
using surj_simplex_map ChamberComplexAutomorphism.surj_simplex_map
by (force simp add: setsetmapim_comp)
qed
show "ChamberComplexAutomorphism_axioms X (g \<circ> f)"
using trivial_outside ChamberComplexAutomorphism.trivial_outside[OF assms]
by unfold_locales auto
qed unfold_locales
lemma equality:
assumes "ChamberComplexAutomorphism X g" "fun_eq_on f g (\<Union>X)"
shows "f = g"
proof
fix x show "f x = g x"
using trivial_outside fun_eq_onD[OF assms(2)]
ChamberComplexAutomorphism.trivial_outside[OF assms(1)]
by force
qed
end (* context ChamberComplexAutomorphism *)
subsubsection \<open>Retractions\<close>
text \<open>A retraction of a chamber complex is an endomorphism that is the identity on its image.\<close>
locale ChamberComplexRetraction = ChamberComplexEndomorphism X f
for X :: "'a set set"
and f :: "'a\<Rightarrow>'a"
+ assumes retraction: "v\<in>\<Union>X \<Longrightarrow> f (f v) = f v"
begin
lemmas simplex_map = simplex_map
lemmas chamber_map = chamber_map
lemmas gallery_map = gallery_map
lemma vertex_retraction: "v\<in>f`(\<Union>X) \<Longrightarrow> f v = v"
using retraction by fast
lemma simplex_retraction1: "x\<in>f\<turnstile>X \<Longrightarrow> fixespointwise f x"
using retraction fixespointwiseI[of x f] by auto
lemma simplex_retraction2: "x\<in>f\<turnstile>X \<Longrightarrow> f`x = x"
using retraction retraction[THEN sym] by blast
lemma chamber_retraction1: "C\<in>f\<turnstile>\<C> \<Longrightarrow> fixespointwise f C"
using chamber_system_simplices simplex_retraction1 by auto
lemma chamber_retraction2: "C\<in>f\<turnstile>\<C> \<Longrightarrow> f`C = C"
using chamber_system_simplices simplex_retraction2[of C] by auto
lemma respects_labels:
assumes "label_wrt B \<phi>" "v\<in>(\<Union>X)"
shows "\<phi> (f v) = \<phi> v"
proof-
from assms(2) obtain C where "chamber C" "v\<in>C" using simplex_in_max by fast
thus ?thesis
using chamber_retraction1[of C] chamber_system_def chamber_map
maxsimp_connect[of "f`C" C] chamber_retraction1[of "f`C"]
respects_labels_gallery[OF assms(1), THEN bspec, of "f`C" _ C v]
by (force simp add: fixespointwiseD)
qed
end (* context ChamberComplexRetraction *)
subsubsection \<open>Foldings of chamber complexes\<close>
text \<open>
A folding of a chamber complex is a retraction that literally folds the complex in half, in that
each chamber in the image is the image of precisely two chambers: itself (since a folding is a
retraction) and a unique chamber outside the image.
\<close>
paragraph \<open>Locale definition\<close>
text \<open>
Here we define the locale and collect some lemmas inherited from the
@{const ChamberComplexRetraction} locale.
\<close>
locale ChamberComplexFolding = ChamberComplexRetraction X f
for X :: "'a set set"
and f :: "'a\<Rightarrow>'a"
+ assumes folding:
"chamber C \<Longrightarrow> C\<in>f\<turnstile>X \<Longrightarrow>
\<exists>!D. chamber D \<and> D\<notin>f\<turnstile>X \<and> f`D = C"
begin
lemmas folding_ex = ex1_implies_ex[OF folding]
lemmas chamber_system_into = chamber_system_into
lemmas gallery_map = gallery_map
lemmas chamber_retraction1 = chamber_retraction1
lemmas chamber_retraction2 = chamber_retraction2
end (* context ChamberComplexFolding *)
paragraph \<open>Decomposition into half chamber systems and half apartments\<close>
text \<open>
Here we describe how a folding splits the chamber system of the complex into its image and the
complement of its image. The chamber subcomplex consisting of all simplices contained in a
chamber of a given half of the chamber system is called a half-apartment.
\<close>
context ChamberComplexFolding
begin
definition opp_half_apartment :: "'a set set"
where "opp_half_apartment \<equiv> {x\<in>X. \<exists>C\<in>\<C>-f\<turnstile>\<C>. x\<subseteq>C}"
abbreviation "Y \<equiv> opp_half_apartment"
lemma opp_half_apartment_subset_complex: "Y\<subseteq>X"
using opp_half_apartment_def by fast
lemma simplicialcomplex_opp_half_apartment: "SimplicialComplex Y"
proof
show "\<forall>x\<in>Y. finite x"
using opp_half_apartment_subset_complex finite_simplices by fast
next
fix x y assume "x\<in>Y" "y\<subseteq>x" thus "y\<in>Y"
using opp_half_apartment_subset_complex faces[of x y]
unfolding opp_half_apartment_def
by auto
qed
lemma subcomplex_opp_half_apartment: "Subcomplex Y"
using opp_half_apartment_subset_complex simplicialcomplex_opp_half_apartment
by fast
lemma opp_half_apartmentI: "\<lbrakk> x\<in>X; C\<in>\<C>-f\<turnstile>\<C>; x\<subseteq>C \<rbrakk> \<Longrightarrow> x\<in>Y"
using opp_half_apartment_def by auto
lemma opp_chambers_subset_opp_half_apartment: "\<C>-f\<turnstile>\<C> \<subseteq> Y"
proof
fix C assume "C \<in> \<C>-f\<turnstile>\<C>"
thus "C \<in> Y" using chamber_system_simplices opp_half_apartmentI by auto
qed
end (* context ChamberComplexFolding *)
paragraph \<open>Mapping between half chamber systems for foldings\<close>
text \<open>
Since each chamber in the image of the folding is the image of a unique chamber in the complement
of the image, we obtain well-defined functions from one half chamber system to the other.
\<close>
context ChamberComplexFolding
begin
abbreviation "opp_chamber C \<equiv> THE D. D\<in>\<C>-f\<turnstile>\<C> \<and> f`D = C"
abbreviation "flop C \<equiv> if C \<in> f\<turnstile>\<C> then opp_chamber C else f`C"
lemma inj_on_opp_chambers':
assumes "chamber C" "C\<notin>f\<turnstile>X" "chamber D" "D\<notin>f\<turnstile>X" "f`C = f`D"
shows "C=D"
proof-
from assms(1) folding have ex1: "\<exists>!B. chamber B \<and> B\<notin>f\<turnstile>X \<and> f`B = f`C"
using chamberD_simplex chamber_map by auto
from assms show ?thesis using ex1_unique[OF ex1, of C D] by blast
qed
lemma inj_on_opp_chambers'':
"\<lbrakk> C \<in> \<C>-f\<turnstile>\<C>; D \<in> \<C>-f\<turnstile>\<C>; f`C = f`D \<rbrakk> \<Longrightarrow> C=D"
using chamber_system_def chamber_system_image inj_on_opp_chambers' by auto
lemma inj_on_opp_chambers: "inj_on ((`) f) (\<C>-f\<turnstile>\<C>)"
using inj_on_opp_chambers'' inj_onI[of "\<C>-f\<turnstile>\<C>" "(`) f"] by fast
lemma opp_chambers_surj: "f\<turnstile>(\<C>-(f\<turnstile>\<C>)) = f\<turnstile>\<C>"
proof (rule seteqI)
fix D assume D: "D \<in> f\<turnstile>\<C>"
from this obtain B where "chamber B" "B\<notin>f\<turnstile>X" "f`B = D"
using chamber_system_def chamber_map chamberD_simplex folding_ex[of D]
by auto
thus "D \<in> f\<turnstile>(\<C> - f\<turnstile>\<C>)"
using chamber_system_image chamber_system_def by auto
qed fast
lemma opp_chambers_bij: "bij_betw ((`) f) (\<C>-(f\<turnstile>\<C>)) (f\<turnstile>\<C>)"
using inj_on_opp_chambers opp_chambers_surj bij_betw_def[of "(`) f"] by auto
lemma folding':
assumes "C\<in>f\<turnstile>\<C>"
shows "\<exists>!D\<in>\<C>-f\<turnstile>\<C>. f`D = C"
proof (rule ex_ex1I)
from assms show "\<exists>D. D \<in> \<C>-f\<turnstile>\<C> \<and> f`D = C"
using chamber_system_image chamber_system_def folding_ex[of C] by auto
next
fix B D assume "B \<in> \<C>-f\<turnstile>\<C> \<and> f`B = C" "D \<in> \<C>-f\<turnstile>\<C> \<and> f`D = C"
with assms show "B=D"
using chamber_system_def chamber_system_image chamber_map
chamberD_simplex ex1_unique[OF folding, of C B D]
by auto
qed
lemma opp_chambers_distinct_map:
"set Cs \<subseteq> \<C>-f\<turnstile>\<C> \<Longrightarrow> distinct Cs \<Longrightarrow> distinct (f\<Turnstile>Cs)"
using distinct_map subset_inj_on[OF inj_on_opp_chambers] by auto
lemma opp_chamberD1: "C\<in>f\<turnstile>\<C> \<Longrightarrow> opp_chamber C \<in> \<C>-f\<turnstile>\<C>"
using theI'[OF folding'] by simp
lemma opp_chamberD2: "C\<in>f\<turnstile>\<C> \<Longrightarrow> f`(opp_chamber C) = C"
using theI'[OF folding'] by simp
lemma opp_chamber_reverse: "C\<in>\<C>-f\<turnstile>\<C> \<Longrightarrow> opp_chamber (f`C) = C"
using the1_equality[OF folding'] by simp
lemma f_opp_chamber_list:
"set Cs \<subseteq> f\<turnstile>\<C> \<Longrightarrow> f\<Turnstile>(map opp_chamber Cs) = Cs"
using opp_chamberD2 by (induct Cs) auto
lemma flop_chamber: "chamber C \<Longrightarrow> chamber (flop C)"
using chamber_map opp_chamberD1 chamber_system_def by auto
end (* context ChamberComplexFolding *)
subsection \<open>Thin chamber complexes\<close>
text \<open>
A thin chamber complex is one in which every facet is a facet in exactly two chambers. Slightly
more generally, we first consider the case of a chamber complex in which every facet is a facet
of at most two chambers. One of the main results obtained at this point is the so-called standard
uniqueness argument, which essentially states that two morphisms on a thin chamber complex that
agree on a particular chamber must in fact agree on the entire complex. Following that, foldings
of thin chamber complexes are investigated. In particular, we are interested in pairs of opposed
foldings.
\<close>
subsubsection \<open>Locales and basic facts\<close>
locale ThinishChamberComplex = ChamberComplex X
for X :: "'a set set"
+ assumes thinish:
"\<lbrakk> chamber C; z\<lhd>C; \<exists>D\<in>X-{C}. z\<lhd>D \<rbrakk> \<Longrightarrow> \<exists>!D\<in>X-{C}. z\<lhd>D"
\<comment> \<open>being adjacent to a chamber, such a @{term D} would also be a chamber (see lemma
@{text "chamber_adj"})\<close>
begin
lemma facet_unique_other_chamber:
"\<lbrakk> chamber B; z\<lhd>B; chamber C; z\<lhd>C; chamber D; z\<lhd>D; C\<noteq>B; D\<noteq>B \<rbrakk>
\<Longrightarrow> C=D"
using chamberD_simplex bex1_equality[OF thinish, OF _ _ bexI, of B z C C D]
by auto
lemma finite_adjacentset:
assumes "chamber C"
shows "finite (adjacentset C)"
proof (cases "X = {{}}")
case True thus ?thesis using adjacentset_def by simp
next
case False
moreover have "finite (\<Union>v\<in>C. {D\<in>X. C-{v}\<lhd>D})"
proof
from assms show "finite C" using finite_chamber by simp
next
fix v assume "v\<in>C"
with assms have Cv: "C-{v}\<lhd>C"
using chamberD_simplex facetrel_diff_vertex by fast
with assms have C: "C\<in>{D\<in>X. C-{v}\<lhd>D}"
using chamberD_simplex by fast
show "finite {D\<in>X. C-{v}\<lhd>D}"
proof (cases "{D\<in>X. C-{v}\<lhd>D} - {C} = {}")
case True
hence 1: "{D\<in>X. C-{v}\<lhd>D} = {C}" using C by auto
show ?thesis using ssubst[OF 1, of finite] by simp
next
case False
from this obtain D where D: "D\<in>X-{C}" "C-{v}\<lhd>D" by fast
with assms have 2: "{D\<in>X. C-{v}\<lhd>D} \<subseteq> {C,D}"
using Cv chamber_shared_facet[of C] facet_unique_other_chamber[of C _ D]
by fastforce
show ?thesis using finite_subset[OF 2] by simp
qed
qed
ultimately show ?thesis
using assms adjacentset_conv_facetchambersets by simp
qed
lemma label_wrt_eq_on_adjacent_vertex:
fixes v v' :: 'a
and z z' :: "'a set"
defines D : "D \<equiv> insert v z"
and D': "D' \<equiv> insert v' z'"
assumes label : "label_wrt B f" "f v = f v'"
and chambers: "chamber C" "chamber D" "chamber D'" "z\<lhd>C" "z'\<lhd>C" "D\<noteq>C" "D'\<noteq>C"
shows "D = D'"
proof (
rule facet_unique_other_chamber, rule chambers(1), rule chambers(4),
rule chambers(2)
)
from D D' chambers(1-5) have z: "z\<lhd>D" and z': "z'\<lhd>D'"
using chambers_share_facet by auto
show "z\<lhd>D" by fact
from chambers(4,5) obtain w w'
where w : "w \<notin> z " "C = insert w z"
and w': "w'\<notin> z'" "C = insert w' z'"
unfolding facetrel_def
by fastforce
from w' D' chambers(1,3) have "f`z' = f`C - {f v'}"
using z' label_wrtD'[OF label(1), of C] bij_betw_imp_inj_on[of f C]
facetrel_complement_vertex[of z']
label_wrt_adjacent_shared_facet[OF label(1), of v']
by simp
moreover from w D chambers(1,2) have "f`z = f`C - {f v}"
using z label_wrtD'[OF label(1), of C] bij_betw_imp_inj_on[of f C]
facetrel_complement_vertex[of z]
label_wrt_adjacent_shared_facet[OF label(1), of v]
by simp
ultimately show "z\<lhd>D'"
using z' chambers(1,4,5) label(2) facetrel_subset
label_wrtD'[OF label(1), of C]
bij_betw_imp_inj_on[of f] inj_on_eq_image[of f C z' z]
by force
qed (rule chambers(3), rule chambers(6), rule chambers(7))
lemma face_distance_eq_chamber_distance_compare_other_chamber:
assumes "chamber C" "chamber D" "z\<lhd>C" "z\<lhd>D" "C\<noteq>D"
"chamber_distance C E \<le> chamber_distance D E"
shows "face_distance z E = chamber_distance C E"
unfolding face_distance_def closest_supchamber_def
proof (
rule arg_min_equality, rule conjI, rule assms(1), rule facetrel_subset,
rule assms(3)
)
from assms
show "\<And>B. chamber B \<and> z \<subseteq> B \<Longrightarrow>
chamber_distance C E \<le> chamber_distance B E"
using chamber_facet_is_chamber_facet facet_unique_other_chamber
by blast
qed
end (* context ThinishChamberComplex *)
locale ThinChamberComplex = ChamberComplex X
for X :: "'a set set"
+ assumes thin: "chamber C \<Longrightarrow> z\<lhd>C \<Longrightarrow> \<exists>!D\<in>X-{C}. z\<lhd>D"
sublocale ThinChamberComplex < ThinishChamberComplex
using thin by unfold_locales simp
context ThinChamberComplex
begin
lemma thinish: "ThinishChamberComplex X" ..
lemmas face_distance_eq_chamber_distance_compare_other_chamber =
face_distance_eq_chamber_distance_compare_other_chamber
abbreviation "the_adj_chamber C z \<equiv> THE D. D\<in>X-{C} \<and> z \<lhd> D"
lemma the_adj_chamber_simplex:
"chamber C \<Longrightarrow> z \<lhd> C \<Longrightarrow> the_adj_chamber C z \<in> X"
using theI'[OF thin] by fast
lemma the_adj_chamber_facet: "chamber C \<Longrightarrow> z\<lhd>C \<Longrightarrow> z \<lhd> the_adj_chamber C z"
using theI'[OF thin] by fast
lemma the_adj_chamber_is_adjacent:
"chamber C \<Longrightarrow> z\<lhd>C \<Longrightarrow> C \<sim> the_adj_chamber C z"
using the_adj_chamber_facet by (auto intro: adjacentI)
lemma the_adj_chamber:
"chamber C \<Longrightarrow> z \<lhd> C \<Longrightarrow> chamber (the_adj_chamber C z)"
using the_adj_chamber_simplex the_adj_chamber_is_adjacent
by (fast intro: chamber_adj)
lemma the_adj_chamber_neq:
"chamber C \<Longrightarrow> z \<lhd> C \<Longrightarrow> the_adj_chamber C z \<noteq> C"
using theI'[OF thin] by fast
lemma the_adj_chamber_adjacentset:
"chamber C \<Longrightarrow> z\<lhd>C \<Longrightarrow> the_adj_chamber C z \<in> adjacentset C"
using adjacentset_def the_adj_chamber_simplex the_adj_chamber_is_adjacent
by fast
end (* context ThinChamberComplex *)
lemmas (in ChamberComplexIsomorphism) thin_image_shared_facet =
thinish_image_shared_facet[OF _ _ _ _ _ ThinChamberComplex.thinish]
subsubsection \<open>The standard uniqueness argument for chamber morphisms of thin chamber complexes\<close>
context ThinishChamberComplex
begin
lemma standard_uniqueness_dbl:
assumes morph : "ChamberComplexMorphism W X f"
"ChamberComplexMorphism W X g"
and chambers: "ChamberComplex.chamber W C"
"ChamberComplex.chamber W D"
"C\<sim>D" "f`D \<noteq> f`C" "g`D \<noteq> g`C" "chamber (g`D)"
and funeq : "fun_eq_on f g C"
shows "fun_eq_on f g D"
proof (rule fun_eq_onI)
fix v assume v: "v\<in>D"
show "f v = g v"
proof (cases "v\<in>C")
case True with funeq show ?thesis using fun_eq_onD by fast
next
case False
define F G where "F = f`C \<inter> f`D" and "G = g`C \<inter> g`D"
from morph(1) chambers(1-4) have 1: "f`C \<sim> f`D"
using ChamberComplexMorphism.adj_map' by fast
with F_def chambers(4) have F_facet: "F\<lhd>f`C" "F\<lhd>f`D"
using adjacent_int_facet1[of "f`C"] adjacent_int_facet2[of "f`C"] by auto
from F_def G_def chambers have "G = F"
using ChamberComplexMorphism.adj_map'[OF morph(2)]
adjacent_to_adjacent_int[of C D g] 1
adjacent_to_adjacent_int[of C D f] funeq fun_eq_on_im[of f g]
by force
with G_def morph(2) chambers have F_facet': "F\<lhd>g`D"
using ChamberComplexMorphism.adj_map' adjacent_int_facet2 by blast
with chambers(1,2,4,5) have 2: "g`D = f`D"
using ChamberComplexMorphism.chamber_map[OF morph(1)] F_facet
ChamberComplexMorphism.chamber_map[OF morph(2)]
fun_eq_on_im[OF funeq]
facet_unique_other_chamber[of "f`C" F "g`D" "f`D"]
by auto
from chambers(3) v False have 3: "D = insert v (D\<inter>C)"
using adjacent_sym adjacent_conv_insert by fast
from chambers(4) obtain w where w: "w \<notin> f`C" "w \<in> f`D"
using adjacent_int_decomp[OF adjacent_sym, OF 1] by blast
with 3 have "w = f v" by fast
moreover from 2 w(2) obtain v' where "v'\<in>D" "w = g v'" by auto
ultimately show ?thesis
using w(1) 3 funeq by (fastforce simp add: fun_eq_on_im)
qed
qed
lemma standard_uniqueness_pgallery_betw:
assumes morph : "ChamberComplexMorphism W X f"
"ChamberComplexMorphism W X g"
and chambers: "fun_eq_on f g C" "ChamberComplex.gallery W (C#Cs@[D])"
"pgallery (f\<Turnstile>(C#Cs@[D]))" "pgallery (g\<Turnstile>(C#Cs@[D]))"
shows "fun_eq_on f g D"
proof-
from morph(1) have W: "ChamberComplex W"
using ChamberComplexMorphism.domain_complex by fast
have "\<lbrakk> fun_eq_on f g C; ChamberComplex.gallery W (C#Cs@[D]);
pgallery (f\<Turnstile>(C#Cs@[D])); pgallery (g\<Turnstile>(C#Cs@[D])) \<rbrakk> \<Longrightarrow>
fun_eq_on f g D"
proof (induct Cs arbitrary: C)
case Nil from assms Nil(1) show ?case
using ChamberComplex.galleryD_chamber[OF W Nil(2)]
ChamberComplex.galleryD_adj[OF W Nil(2)]
pgalleryD_distinct[OF Nil(3)] pgalleryD_distinct[OF Nil(4)]
pgalleryD_chamber[OF Nil(4)] standard_uniqueness_dbl[of W f g C D]
by auto
next
case (Cons B Bs)
have "fun_eq_on f g B"
proof (rule standard_uniqueness_dbl, rule morph(1), rule morph(2))
show "ChamberComplex.chamber W C" "ChamberComplex.chamber W B" "C\<sim>B"
using ChamberComplex.galleryD_chamber[OF W Cons(3)]
ChamberComplex.galleryD_adj[OF W Cons(3)]
by auto
show "f`B \<noteq> f`C" using pgalleryD_distinct[OF Cons(4)] by fastforce
show "g`B \<noteq> g`C" using pgalleryD_distinct[OF Cons(5)] by fastforce
show "chamber (g`B)" using pgalleryD_chamber[OF Cons(5)] by fastforce
qed (rule Cons(2))
with Cons(1,3-5) show ?case
using ChamberComplex.gallery_Cons_reduce[OF W, of C "B#Bs@[D]"]
pgallery_Cons_reduce[of "f`C" "f\<Turnstile>(B#Bs@[D])"]
pgallery_Cons_reduce[of "g`C" "g\<Turnstile>(B#Bs@[D])"]
by force
qed
with chambers show ?thesis by simp
qed
lemma standard_uniqueness:
assumes morph : "ChamberComplexMorphism W X f"
"ChamberComplexMorphism W X g"
and chamber : "ChamberComplex.chamber W C" "fun_eq_on f g C"
and map_gals:
"\<And>Cs. ChamberComplex.min_gallery W (C#Cs) \<Longrightarrow> pgallery (f\<Turnstile>(C#Cs))"
"\<And>Cs. ChamberComplex.min_gallery W (C#Cs) \<Longrightarrow> pgallery (g\<Turnstile>(C#Cs))"
shows "fun_eq_on f g (\<Union>W)"
proof (rule fun_eq_onI)
from morph(1) have W: "ChamberComplex W"
using ChamberComplexMorphism.axioms(1) by fast
fix v assume "v \<in> \<Union>W"
from this obtain D where "ChamberComplex.chamber W D" "v\<in>D"
using ChamberComplex.simplex_in_max[OF W] by auto
moreover define Cs where "Cs = (ARG_MIN length Cs. ChamberComplex.gallery W (C#Cs@[D]))"
ultimately show "f v = g v"
using chamber map_gals[of "Cs@[D]"]
ChamberComplex.gallery_least_length[OF W]
ChamberComplex.min_gallery_least_length[OF W]
standard_uniqueness_pgallery_betw[OF morph(1,2) chamber(2), of Cs]
fun_eq_onD[of f g D]
by (cases "D=C") auto
qed
lemma standard_uniqueness_isomorphs:
assumes "ChamberComplexIsomorphism W X f"
"ChamberComplexIsomorphism W X g"
"ChamberComplex.chamber W C" "fun_eq_on f g C"
shows "fun_eq_on f g (\<Union>W)"
using assms ChamberComplexIsomorphism.chamber_morphism
ChamberComplexIsomorphism.domain_complex
ChamberComplex.min_gallery_pgallery
ChamberComplexIsomorphism.pgallery_map
by (blast intro: standard_uniqueness)
lemma standard_uniqueness_automorphs:
assumes "ChamberComplexAutomorphism X f"
"ChamberComplexAutomorphism X g"
"chamber C" "fun_eq_on f g C"
shows "f=g"
using assms ChamberComplexAutomorphism.equality
standard_uniqueness_isomorphs
ChamberComplexAutomorphism.axioms(1)
by blast
end (* context ThinishChamberComplex *)
context ThinChamberComplex
begin
lemmas standard_uniqueness = standard_uniqueness
lemmas standard_uniqueness_isomorphs = standard_uniqueness_isomorphs
lemmas standard_uniqueness_pgallery_betw = standard_uniqueness_pgallery_betw
end (* context ThinChamberComplex *)
subsection \<open>Foldings of thin chamber complexes\<close>
subsubsection \<open>Locale definition and basic facts\<close>
locale ThinishChamberComplexFolding =
ThinishChamberComplex X + folding: ChamberComplexFolding X f
for X :: "'a set set"
and f :: "'a\<Rightarrow>'a"
begin
abbreviation "opp_chamber \<equiv> folding.opp_chamber"
lemma adjacent_half_chamber_system_image:
assumes chambers: "C \<in> f\<turnstile>\<C>" "D \<in> \<C>-f\<turnstile>\<C>"
and adjacent: "C\<sim>D"
shows "f`D = C"
proof-
from adjacent obtain z where z: "z\<lhd>C" "z\<lhd>D" using adjacent_def by fast
moreover from z(1) chambers(1) have fz: "f`z = z"
using facetrel_subset[of z C] chamber_system_simplices
folding.simplicialcomplex_image
SimplicialComplex.faces[of "f\<turnstile>X" C z]
folding.simplex_retraction2[of z]
by auto
moreover from chambers have "f`D \<noteq> D" "C\<noteq>D" by auto
ultimately show ?thesis
using chambers chamber_system_def folding.chamber_map
folding.facet_map[of D z]
facet_unique_other_chamber[of D z "f`D" C]
by force
qed
lemma adjacent_half_chamber_system_image_reverse:
"\<lbrakk> C \<in> f\<turnstile>\<C>; D \<in> \<C>-f\<turnstile>\<C>; C\<sim>D \<rbrakk> \<Longrightarrow> opp_chamber C = D"
using adjacent_half_chamber_system_image[of C D]
the1_equality[OF folding.folding']
by fastforce
lemma chamber_image_closer:
assumes "D\<in>\<C>-f\<turnstile>\<C>" "B\<in>f\<turnstile>\<C>" "B\<noteq>f`D" "gallery (B#Ds@[D])"
shows "\<exists>Cs. gallery (B#Cs@[f`D]) \<and> length Cs < length Ds"
proof-
from assms(1,2,4) obtain As A E Es
where split: "A\<in>f\<turnstile>\<C>" "E\<in>\<C>-f\<turnstile>\<C>" "B#Ds@[D] = As@A#E#Es"
using folding.split_gallery[of B D Ds]
by blast
from assms(4) split(3) have "A\<sim>E"
using gallery_append_reduce2[of As "A#E#Es"] galleryD_adj[of "A#E#Es"]
by simp
with assms(2) split(1,2)
have fB: "f`B = B" and fA: "f`A = A" and fE: "f`E = A"
using folding.chamber_retraction2 adjacent_half_chamber_system_image[of A E]
by auto
show "\<exists>Cs. gallery (B#Cs@[f`D]) \<and> length Cs < length Ds"
proof (cases As)
case Nil have As: "As = []" by fact
show ?thesis
proof (cases Es rule: rev_cases)
case Nil with split(3) As assms(3) fE show ?thesis by simp
next
case (snoc Fs F)
with assms(4) split(3) As fE
have "Ds = E#Fs" "gallery (B # f\<Turnstile>Fs @ [f`D])"
using fB folding.gallery_map[of "B#E#Fs@[D]"] gallery_Cons_reduce
by auto
thus ?thesis by auto
qed
next
case (Cons H Hs)
show ?thesis
proof (cases Es rule: rev_cases)
case Nil
with assms(4) Cons split(3)
have decomp: "Ds = Hs@[A]" "D=E" "gallery (B#Hs@[A,D])"
by auto
from decomp(2,3) fB fA fE have "gallery (B # f\<Turnstile>Hs @ [f`D])"
using folding.gallery_map gallery_append_reduce1[of "B # f\<Turnstile>Hs @ [f`D]"]
by force
with decomp(1) show ?thesis by auto
next
case (snoc Fs F)
with split(3) Cons assms(4) fB fA fE
have decomp: "Ds = Hs@A#E#Fs" "gallery (B # f\<Turnstile>(Hs@A#Fs) @ [f`D])"
using folding.gallery_map[of "B#Hs@A#E#Fs@[D]"]
gallery_remdup_adj[of "B#f\<Turnstile>Hs" A "f\<Turnstile>Fs@[f`D]"]
by auto
from decomp(1) have "length (f\<Turnstile>(Hs@A#Fs)) < length Ds" by simp
with decomp(2) show ?thesis by blast
qed
qed
qed
lemma chamber_image_subset:
assumes D: "D\<in>\<C>-f\<turnstile>\<C>"
defines C: "C \<equiv> f`D"
defines "closerToC \<equiv> {B\<in>\<C>. chamber_distance B C < chamber_distance B D}"
shows "f\<turnstile>\<C> \<subseteq> closerToC"
proof
fix B assume B: "B\<in>f\<turnstile>\<C>"
hence B': "B\<in>\<C>" using folding.chamber_system_into by fast
show "B \<in> closerToC"
proof (cases "B=C")
case True with B D closerToC_def show ?thesis
using B' chamber_distance_def by auto
next
case False
define Ds where "Ds = (ARG_MIN length Ds. gallery (B#Ds@[D]))"
with B C D False closerToC_def show ?thesis
using chamber_system_def folding.chamber_map gallery_least_length[of B D]
chamber_image_closer[of D B Ds]
chamber_distance_le chamber_distance_def[of B D]
by fastforce
qed
qed
lemma gallery_double_cross_not_minimal_Cons1:
"\<lbrakk> B\<in>f\<turnstile>\<C>; C\<in>\<C>-f\<turnstile>\<C>; D\<in>f\<turnstile>\<C>; gallery (B#C#Cs@[D]) \<rbrakk> \<Longrightarrow>
\<not> min_gallery (B#C#Cs@[D])"
using galleryD_adj[of "B#C#Cs@[D]"]
adjacent_half_chamber_system_image[of B C]
folding.gallery_map[of "B#C#Cs@[D]"]
gallery_Cons_reduce[of B "B # f\<Turnstile>Cs @ [D]"]
is_arg_minD2[of length "(\<lambda>Ds. maxsimpchain (B#Ds@[D]))" _ "f\<Turnstile>Cs"]
min_maxsimpchain.simps(3)[of B "C#Cs" D]
by(simp add: folding.chamber_retraction2)(meson impossible_Cons not_less)
lemma gallery_double_cross_not_minimal1:
"\<lbrakk> B\<in>f\<turnstile>\<C>; C\<in>\<C>-f\<turnstile>\<C>; D\<in>f\<turnstile>\<C>; gallery (B#Bs@C#Cs@[D]) \<rbrakk> \<Longrightarrow>
\<not> min_gallery (B#Bs@C#Cs@[D])"
proof (induct Bs arbitrary: B)
case Nil thus ?case using gallery_double_cross_not_minimal_Cons1 by simp
next
case (Cons E Es)
show ?case
proof (cases "E\<in>f\<turnstile>\<C>")
case True
with Cons(1,3-5) show ?thesis
using gallery_Cons_reduce[of B "E#Es@C#Cs@[D]"]
min_gallery_betw_CCons_reduce[of B E "Es@C#Cs" D]
by auto
next
case False with Cons(2,4,5) show ?thesis
using gallery_chamber_system
gallery_double_cross_not_minimal_Cons1[of B E D "Es@C#Cs"]
by force
qed
qed
end (* ThinishChamberComplexFolding *)
locale ThinChamberComplexFolding =
ThinChamberComplex X + folding: ChamberComplexFolding X f
for X :: "'a set set"
and f :: "'a\<Rightarrow>'a"
sublocale ThinChamberComplexFolding < ThinishChamberComplexFolding ..
context ThinChamberComplexFolding
begin
abbreviation "flop \<equiv> folding.flop"
lemmas adjacent_half_chamber_system_image = adjacent_half_chamber_system_image
lemmas gallery_double_cross_not_minimal1 = gallery_double_cross_not_minimal1
lemmas gallery_double_cross_not_minimal_Cons1 =
gallery_double_cross_not_minimal_Cons1
lemma adjacent_preimage:
assumes chambers: "C \<in> \<C>-f\<turnstile>\<C>" "D \<in> \<C>-f\<turnstile>\<C>"
and adjacent: "f`C \<sim> f`D"
shows "C \<sim> D"
proof (cases "f`C=f`D")
case True
with chambers show "C \<sim> D"
using folding.inj_on_opp_chambers''[of C D] adjacent_refl[of C] by auto
next
case False
from chambers have CD: "chamber C" "chamber D"
using chamber_system_def by auto
hence ch_fCD: "chamber (f`C)" "chamber (f`D)"
using chamber_system_def folding.chamber_map by auto
from adjacent obtain z where z: "z \<lhd> f`C" "z \<lhd> f`D"
using adjacent_def by fast
from chambers(1) z(1) obtain y where y: "y \<lhd> C" "f`y = z"
using chamber_system_def folding.inj_on_chamber[of C]
inj_on_pullback_facet[of f C z]
by auto
define B where "B = the_adj_chamber C y"
with CD(1) y(1) have B: "chamber B" "y\<lhd>B" "B\<noteq>C"
using the_adj_chamber the_adj_chamber_facet the_adj_chamber_neq by auto
have "f`B \<noteq> f`C"
proof (cases "B \<in> f\<turnstile>\<C>")
case False with chambers(1) show ?thesis
using B(1,3) chamber_system_def folding.inj_on_opp_chambers''[of B]
by auto
next
case True show ?thesis
proof
assume fB_fC: "f`B = f`C"
with True have "B = f`C" using folding.chamber_retraction2 by auto
with z(1) y(2) B(2) chambers(1) have "y = z"
using facetrel_subset[of y B] chamber_system_def chamberD_simplex face_im
folding.simplex_retraction2[of y]
by force
with chambers y(1) z(2) have "f`D = B"
using CD(1) ch_fCD(2) B facet_unique_other_chamber[of C y] by auto
with z(2) chambers fB_fC False show False
using folding.chamber_retraction2 by force
qed
qed
with False z y(2) have fB_fD: "f`B = f`D"
using ch_fCD B(1,2) folding.chamber_map folding.facet_map
facet_unique_other_chamber[of "f`C" z]
by force
have "B = D"
proof (cases "B \<in> f\<turnstile>\<C>")
case False
with B(1) chambers(2) show ?thesis
using chamber_system_def fB_fD folding.inj_on_opp_chambers'' by simp
next
case True
with fB_fD have "B = f`D" using folding.chamber_retraction2 by auto
moreover with z(1) y(2) B(2) chambers(2) have "y = z"
using facetrel_subset[of y B] chamber_system_def chamberD_simplex face_im
folding.simplex_retraction2[of y]
by force
ultimately show ?thesis
using CD y(1) B ch_fCD(1) z(1) False chambers(1)
facet_unique_other_chamber[of B y C "f`C"]
by auto
qed
with y(1) B(2) show ?thesis using adjacentI by fast
qed
lemma adjacent_opp_chamber:
"\<lbrakk> C\<in>f\<turnstile>\<C>; D\<in>f\<turnstile>\<C>; C\<sim>D \<rbrakk> \<Longrightarrow> opp_chamber C \<sim> opp_chamber D"
using folding.opp_chamberD1 folding.opp_chamberD2 adjacent_preimage by simp
lemma adjacentchain_preimage:
"set Cs \<subseteq> \<C>-f\<turnstile>\<C> \<Longrightarrow> adjacentchain (f\<Turnstile>Cs) \<Longrightarrow> adjacentchain Cs"
using adjacent_preimage by (induct Cs rule: list_induct_CCons) auto
lemma gallery_preimage: "set Cs \<subseteq> \<C>-f\<turnstile>\<C> \<Longrightarrow> gallery (f\<Turnstile>Cs) \<Longrightarrow> gallery Cs"
using galleryD_adj adjacentchain_preimage chamber_system_def gallery_def
by fast
lemma chambercomplex_opp_half_apartment: "ChamberComplex folding.Y"
proof (intro_locales, rule folding.simplicialcomplex_opp_half_apartment, unfold_locales)
define Y where "Y = folding.Y"
fix y assume "y\<in>Y"
with Y_def obtain C where "C\<in>\<C>-f\<turnstile>\<C>" "y\<subseteq>C"
using folding.opp_half_apartment_def by auto
with Y_def show "\<exists>x. SimplicialComplex.maxsimp Y x \<and> y \<subseteq> x"
using folding.subcomplex_opp_half_apartment
folding.opp_chambers_subset_opp_half_apartment
chamber_system_def max_in_subcomplex[of Y]
by force
next
define Y where "Y = folding.Y"
fix C D
assume CD: "SimplicialComplex.maxsimp Y C" "SimplicialComplex.maxsimp Y D"
"C\<noteq>D"
from CD(1,2) Y_def have CD': "C \<in> \<C>-f\<turnstile>\<C>" "D \<in> \<C>-f\<turnstile>\<C>"
using folding.maxsimp_in_opp_half_apartment by auto
with CD(3) obtain Ds
where Ds: "ChamberComplex.gallery (f\<turnstile>X) ((f`C)#Ds@[f`D])"
using folding.inj_on_opp_chambers''[of C D] chamber_system_def
folding.maxsimp_map_into_image folding.chambercomplex_image
ChamberComplex.maxsimp_connect[of "f\<turnstile>X" "f`C" "f`D"]
by auto
define Cs where "Cs = map opp_chamber Ds"
from Ds have Ds': "gallery ((f`C)#Ds@[f`D])"
using folding.chambersubcomplex_image subcomplex_gallery by fast
with Ds have Ds'': "set Ds \<subseteq> f\<turnstile>\<C>"
using folding.chambercomplex_image folding.chamber_system_image
ChamberComplex.galleryD_chamber ChamberComplex.chamberD_simplex
gallery_chamber_system
by fastforce
have *: "set Cs \<subseteq> \<C>-f\<turnstile>\<C>"
proof
fix B assume "B \<in> set Cs"
with Cs_def obtain A where "A\<in>set Ds" "B = opp_chamber A" by auto
with Ds'' show "B \<in> \<C>-f\<turnstile>\<C>" using folding.opp_chamberD1[of A] by auto
qed
moreover from Cs_def CD' Ds' Ds'' * have "gallery (C#Cs@[D])"
using folding.f_opp_chamber_list gallery_preimage[of "C#Cs@[D]"] by simp
ultimately show "\<exists>Cs. SimplicialComplex.maxsimpchain Y (C # Cs @ [D])"
using Y_def CD' folding.subcomplex_opp_half_apartment
folding.opp_chambers_subset_opp_half_apartment
maxsimpchain_in_subcomplex[of Y "C#Cs@[D]"]
by fastforce
qed
lemma flop_adj:
assumes "chamber C" "chamber D" "C\<sim>D"
shows "flop C \<sim> flop D"
proof (cases "C\<in>f\<turnstile>\<C>" "D\<in>f\<turnstile>\<C>" rule: two_cases)
case both
with assms(3) show ?thesis using adjacent_opp_chamber by simp
next
case one
with assms(2,3) show ?thesis
using chamber_system_def adjacent_half_chamber_system_image[of C]
adjacent_half_chamber_system_image_reverse adjacent_sym
by simp
next
case other
with assms(1) show ?thesis
using chamber_system_def adjacent_sym[OF assms(3)]
adjacent_half_chamber_system_image[of D]
adjacent_half_chamber_system_image_reverse
by auto
qed (simp add: assms folding.adj_map)
lemma flop_gallery: "gallery Cs \<Longrightarrow> gallery (map flop Cs)"
proof (induct Cs rule: list_induct_CCons)
case (CCons B C Cs)
have "gallery (flop B # (flop C) # map flop Cs)"
proof (rule gallery_CConsI)
from CCons(2) show "chamber (flop B)"
using galleryD_chamber folding.flop_chamber by simp
from CCons(1) show "gallery (flop C # map flop Cs)"
using gallery_Cons_reduce[OF CCons(2)] by simp
from CCons(2) show "flop B \<sim> flop C"
using galleryD_chamber galleryD_adj flop_adj[of B C] by fastforce
qed
thus ?case by simp
qed (auto simp add: galleryD_chamber folding.flop_chamber gallery_def)
lemma morphism_half_apartments: "ChamberComplexMorphism folding.Y (f\<turnstile>X) f"
proof (
rule ChamberComplexMorphism.intro, rule chambercomplex_opp_half_apartment,
rule folding.chambercomplex_image, unfold_locales
)
show
"\<And>C. SimplicialComplex.maxsimp folding.Y C \<Longrightarrow>
SimplicialComplex.maxsimp (f\<turnstile>X) (f`C)"
"\<And>C. SimplicialComplex.maxsimp folding.Y C \<Longrightarrow> card (f`C) = card C"
using folding.chamber_in_opp_half_apartment folding.chamber_map
folding.chambersubcomplex_image chamber_in_subcomplex
chamberD_simplex folding.dim_map
by auto
qed
lemma chamber_image_complement_closer:
"\<lbrakk> D\<in>\<C>-f\<turnstile>\<C>; B\<in>\<C>-f\<turnstile>\<C>; B\<noteq>D; gallery (B#Cs@[f`D]) \<rbrakk> \<Longrightarrow>
\<exists>Ds. gallery (B#Ds@[D]) \<and> length Ds < length Cs"
using flop_gallery chamber_image_closer[of D "f`B" "map flop Cs"]
folding.opp_chamber_reverse folding.inj_on_opp_chambers''[of B D]
by force
lemma chamber_image_complement_subset:
assumes D: "D\<in>\<C>-f\<turnstile>\<C>"
defines C: "C \<equiv> f`D"
defines "closerToD \<equiv> {B\<in>\<C>. chamber_distance B D < chamber_distance B C}"
shows "\<C>-f\<turnstile>\<C> \<subseteq> closerToD"
proof
fix B assume B: "B\<in>\<C>-f\<turnstile>\<C>"
show "B \<in> closerToD"
proof (cases "B=D")
case True with B C closerToD_def show ?thesis
using chamber_distance_def by auto
next
case False
define Cs where "Cs = (ARG_MIN length Cs. gallery (B#Cs@[C]))"
with B C D False closerToD_def show ?thesis
using chamber_system_def folding.chamber_map[of D]
gallery_least_length[of B C] chamber_distance_le
chamber_image_complement_closer[of D B Cs]
chamber_distance_def[of B C]
by fastforce
qed
qed
lemma chamber_image_and_complement:
assumes D: "D\<in>\<C>-f\<turnstile>\<C>"
defines C: "C \<equiv> f`D"
defines "closerToC \<equiv> {B\<in>\<C>. chamber_distance B C < chamber_distance B D}"
and "closerToD \<equiv> {B\<in>\<C>. chamber_distance B D < chamber_distance B C}"
shows "f\<turnstile>\<C> = closerToC" "\<C>-f\<turnstile>\<C> = closerToD"
proof-
from closerToC_def closerToD_def have "closerToC \<inter> closerToD = {}" by auto
moreover from C D closerToC_def closerToD_def
have "\<C> = f \<turnstile> \<C> \<union> (\<C>-f\<turnstile>\<C>)" "closerToC \<subseteq> \<C>" "closerToD \<subseteq> \<C>"
using folding.chamber_system_into
by auto
moreover from assms have "f\<turnstile>\<C> \<subseteq> closerToC" "\<C>-f\<turnstile>\<C> \<subseteq> closerToD"
using chamber_image_subset chamber_image_complement_subset by auto
ultimately show "f\<turnstile>\<C> = closerToC" "\<C>-f\<turnstile>\<C> = closerToD"
using set_decomp_subset[of \<C> "f\<turnstile>\<C>"] set_decomp_subset[of \<C> "\<C>-f\<turnstile>\<C>"] by auto
qed
end (* context ThinChamberComplexFolding *)
subsubsection \<open>Pairs of opposed foldings\<close>
text \<open>
A pair of foldings of a thin chamber complex are opposed or opposite if there is a corresponding
pair of adjacent chambers, where each folding sends its corresponding chamber to the other
chamber.
\<close>
locale OpposedThinChamberComplexFoldings =
ThinChamberComplex X
+ folding_f: ChamberComplexFolding X f
+ folding_g: ChamberComplexFolding X g
for X :: "'a set set"
and f :: "'a\<Rightarrow>'a"
and g :: "'a\<Rightarrow>'a"
+ fixes C0 :: "'a set"
assumes chambers: "chamber C0" "C0\<sim>g`C0" "C0\<noteq>g`C0" "f`g`C0 = C0"
begin
abbreviation "D0 \<equiv> g`C0"
lemmas chamber_D0 = folding_g.chamber_map[OF chambers(1)]
lemma ThinChamberComplexFolding_f: "ThinChamberComplexFolding X f" ..
lemma ThinChamberComplexFolding_g: "ThinChamberComplexFolding X g" ..
lemmas foldf = ThinChamberComplexFolding_f
lemmas foldg = ThinChamberComplexFolding_g
lemma fg_symmetric: "OpposedThinChamberComplexFoldings X g f D0"
using chambers(2-4) chamber_D0 adjacent_sym by unfold_locales auto
lemma basechambers_half_chamber_systems: "C0\<in>f\<turnstile>\<C>" "D0\<in>g\<turnstile>\<C>"
using chambers(1,4) chamber_D0 chamber_system_def by auto
lemmas basech_halfchsys =
basechambers_half_chamber_systems
lemma f_trivial_C0: "v\<in>C0 \<Longrightarrow> f v = v"
using chambers(4) chamber_D0 chamberD_simplex[of D0]
folding_f.vertex_retraction
by fast
lemmas g_trivial_D0 =
OpposedThinChamberComplexFoldings.f_trivial_C0[OF fg_symmetric]
lemma double_fold_D0:
assumes "v \<in> D0 - C0"
shows "g (f v) = v"
proof-
from assms chambers(2) have 1: "D0 = insert v (C0\<inter>D0)"
using adjacent_sym adjacent_conv_insert by fast
hence "f`D0 = insert (f v) (f`(C0\<inter>D0))" by fast
moreover have "f`(C0\<inter>D0) = C0\<inter>D0" using f_trivial_C0 by force
ultimately have "C0 = insert (f v) (C0\<inter>D0)" using chambers(4) by simp
hence "g`C0 = insert (g (f v)) (g`(C0\<inter>D0))" by force
moreover have "g`(C0\<inter>D0) = C0\<inter>D0"
using g_trivial_D0 fixespointwise_im[of g D0 "C0\<inter>D0"]
by (fastforce intro: fixespointwiseI)
ultimately have "D0 = insert (g (f v)) (C0\<inter>D0)" by simp
with assms show ?thesis using 1 by force
qed
lemmas double_fold_C0 =
OpposedThinChamberComplexFoldings.double_fold_D0[OF fg_symmetric]
lemma flopped_half_chamber_systems_fg: "\<C>-f\<turnstile>\<C> = g\<turnstile>\<C>"
proof-
from chambers(1,3,4) have "D0\<in>\<C>-f\<turnstile>\<C>" "C0\<in>\<C>-g\<turnstile>\<C>"
using chamber_system_def chamber_D0 folding_f.chamber_retraction2[of D0]
folding_g.chamber_retraction2[of C0]
by auto
with chambers(2,4) show ?thesis
using ThinChamberComplexFolding.chamber_image_and_complement[
OF ThinChamberComplexFolding_g, of C0
]
ThinChamberComplexFolding.chamber_image_and_complement[
OF ThinChamberComplexFolding_f, of D0
]
adjacent_sym[of C0 D0]
by force
qed
lemmas flopped_half_chamber_systems_gf =
OpposedThinChamberComplexFoldings.flopped_half_chamber_systems_fg[
OF fg_symmetric
]
lemma flopped_half_apartments_fg: "folding_f.opp_half_apartment = g\<turnstile>X"
proof (rule seteqI)
fix a assume "a \<in> folding_f.Y"
from this obtain C where "C\<in>g\<turnstile>\<C>" "a\<subseteq>C"
using folding_f.opp_half_apartment_def flopped_half_chamber_systems_fg by auto
thus "a\<in>g\<turnstile>X"
using chamber_system_simplices
ChamberComplex.faces[OF folding_g.chambercomplex_image, of C]
by auto
next
fix b assume b: "b \<in> g\<turnstile>X"
from this obtain C where C: "C\<in>\<C>" "b \<subseteq> g`C"
using simplex_in_max chamber_system_def by fast
from C(1) have "g`C \<in> g\<turnstile>\<C>" by fast
hence "g`C \<in> \<C>-f\<turnstile>\<C>" using flopped_half_chamber_systems_fg by simp
with C(2) have "\<exists>C\<in>\<C>-f\<turnstile>\<C>. b\<subseteq>C" by auto
moreover from b have "b\<in>X" using folding_g.simplex_map by fast
ultimately show "b \<in> folding_f.Y"
unfolding folding_f.opp_half_apartment_def by simp
qed
lemmas flopped_half_apartments_gf =
OpposedThinChamberComplexFoldings.flopped_half_apartments_fg[
OF fg_symmetric
]
lemma vertex_set_split: "\<Union>X = f`(\<Union>X) \<union> g`(\<Union>X)"
\<comment> \<open>@{term f} and @{term g} will both be the identity on the intersection\<close>
proof
show "\<Union>X \<supseteq> f`(\<Union>X) \<union> g`(\<Union>X)"
using folding_f.simplex_map folding_g.simplex_map by auto
show "\<Union>X \<subseteq> f`(\<Union>X) \<union> g`(\<Union>X)"
proof
fix a assume "a\<in>\<Union>X"
from this obtain C where C: "chamber C" "a\<in>C"
using simplex_in_max by fast
from C(1) have "C\<in>f\<turnstile>\<C> \<or> C\<in>g\<turnstile>\<C>"
using chamber_system_def flopped_half_chamber_systems_fg by auto
with C(2) show "a \<in> (f`\<Union>X) \<union> (g`\<Union>X)"
using chamber_system_simplices by fast
qed
qed
lemma half_chamber_system_disjoint_union:
"\<C> = f\<turnstile>\<C> \<union> g\<turnstile>\<C>" "(f\<turnstile>\<C>) \<inter> (g\<turnstile>\<C>) = {}"
using folding_f.chamber_system_into
flopped_half_chamber_systems_fg[THEN sym]
by auto
lemmas halfchsys_decomp =
half_chamber_system_disjoint_union
lemma chamber_in_other_half_fg: "chamber C \<Longrightarrow> C\<notin>f\<turnstile>\<C> \<Longrightarrow> C\<in>g\<turnstile>\<C>"
using chamber_system_def half_chamber_system_disjoint_union(1) by blast
lemma adjacent_half_chamber_system_image_fg:
"C\<in>f\<turnstile>\<C> \<Longrightarrow> D\<in>g\<turnstile>\<C> \<Longrightarrow> C\<sim>D \<Longrightarrow> f`D = C"
using ThinChamberComplexFolding.adjacent_half_chamber_system_image[
OF ThinChamberComplexFolding_f
]
by (simp add: flopped_half_chamber_systems_fg)
lemmas adjacent_half_chamber_system_image_gf =
OpposedThinChamberComplexFoldings.adjacent_half_chamber_system_image_fg[
OF fg_symmetric
]
lemmas adjhalfchsys_image_gf =
adjacent_half_chamber_system_image_gf
lemma switch_basechamber:
assumes "C\<in>f\<turnstile>\<C>" "C\<sim>g`C"
shows "OpposedThinChamberComplexFoldings X f g C"
proof
from assms(1) have "C\<in>\<C>-g\<turnstile>\<C>" using flopped_half_chamber_systems_gf by simp
with assms show "chamber C" "C \<noteq> g`C" "f`g`C = C"
using chamber_system_def adjacent_half_chamber_system_image_fg[of C "g`C"]
by auto
qed (rule assms(2))
lemma unique_half_chamber_system_f:
assumes "OpposedThinChamberComplexFoldings X f' g' C0" "g'`C0 = D0"
shows "f'\<turnstile>\<C> = f\<turnstile>\<C>"
proof-
have 1: "OpposedThinChamberComplexFoldings X f g' C0"
proof (rule OpposedThinChamberComplexFoldings.intro)
show "ChamberComplexFolding X f" "ThinChamberComplex X" ..
from assms(1) show "ChamberComplexFolding X g'"
using OpposedThinChamberComplexFoldings.axioms(3) by fastforce
from assms(2) chambers
show "OpposedThinChamberComplexFoldings_axioms X f g' C0"
by unfold_locales auto
qed
define a b where "a = f'\<turnstile>\<C>" and "b = f\<turnstile>\<C>"
hence "a\<subseteq>\<C>" "b\<subseteq>\<C>" "\<C>-a = \<C>-b"
using OpposedThinChamberComplexFoldings.axioms(2)[OF assms(1)]
OpposedThinChamberComplexFoldings.axioms(2)[OF 1]
ChamberComplexFolding.chamber_system_into[of X f]
ChamberComplexFolding.chamber_system_into[of X f']
OpposedThinChamberComplexFoldings.flopped_half_chamber_systems_fg[
OF assms(1)
]
OpposedThinChamberComplexFoldings.flopped_half_chamber_systems_fg[
OF 1
]
by auto
hence "a=b" by fast
with a_def b_def show ?thesis by simp
qed
lemma unique_half_chamber_system_g:
"OpposedThinChamberComplexFoldings X f' g' C0 \<Longrightarrow> g'`C0 = D0 \<Longrightarrow>
g'\<turnstile>\<C> = g\<turnstile>\<C>"
using unique_half_chamber_system_f flopped_half_chamber_systems_fg
OpposedThinChamberComplexFoldings.flopped_half_chamber_systems_fg[
of X f' g'
]
by simp
lemma split_gallery_fg:
"\<lbrakk> C\<in>f\<turnstile>\<C>; D\<in>g\<turnstile>\<C>; gallery (C#Cs@[D]) \<rbrakk> \<Longrightarrow>
\<exists>As A B Bs. A\<in>f\<turnstile>\<C> \<and> B\<in>g\<turnstile>\<C> \<and> C#Cs@[D] = As@A#B#Bs"
using folding_f.split_gallery flopped_half_chamber_systems_fg by simp
lemmas split_gallery_gf =
OpposedThinChamberComplexFoldings.split_gallery_fg[OF fg_symmetric]
end (* context OpposedThinChamberComplexFoldings *)
subsubsection \<open>The automorphism induced by a pair of opposed foldings\<close>
text \<open>
Recall that a folding of a chamber complex is a special kind of chamber complex retraction, and
so is the identity on its image. Hence a pair of opposed foldings will be the identity on the
intersection of their images and so we can stitch them together to create an automorphism of the
chamber complex, by allowing each folding to act on the complement of its image.
This automorphism will be of order two, and will be the unique automorphism of the chamber
complex that fixes pointwise the facet shared by the pair of adjacent chambers associated to the
opposed foldings.
\<close>
context OpposedThinChamberComplexFoldings
begin
definition induced_automorphism :: "'a\<Rightarrow>'a"
where "induced_automorphism v \<equiv>
if v\<in>f`(\<Union>X) then g v else if v\<in>g`(\<Union>X) then f v else v"
\<comment> \<open>@{term f} and @{term g} will both be the identity on the intersection of their images\<close>
abbreviation "\<s> \<equiv> induced_automorphism"
lemma induced_automorphism_fg_symmetric:
"\<s> = OpposedThinChamberComplexFoldings.\<s> X g f"
by (auto simp add:
folding_f.vertex_retraction folding_g.vertex_retraction
induced_automorphism_def
OpposedThinChamberComplexFoldings.induced_automorphism_def[
OF fg_symmetric
]
)
lemma induced_automorphism_on_simplices_fg: "x\<in>f\<turnstile>X \<Longrightarrow> v\<in>x \<Longrightarrow> \<s> v = g v"
using induced_automorphism_def by auto
lemma induced_automorphism_eq_foldings_on_chambers_fg:
"C\<in>f\<turnstile>\<C> \<Longrightarrow> fun_eq_on \<s> g C"
using chamber_system_simplices induced_automorphism_on_simplices_fg[of C]
by (fast intro: fun_eq_onI)
lemmas indaut_eq_foldch_fg =
induced_automorphism_eq_foldings_on_chambers_fg
lemma induced_automorphism_eq_foldings_on_chambers_gf:
"C\<in>g\<turnstile>\<C> \<Longrightarrow> fun_eq_on \<s> f C"
by (auto simp add:
OpposedThinChamberComplexFoldings.indaut_eq_foldch_fg[
OF fg_symmetric
]
induced_automorphism_fg_symmetric
)
lemma induced_automorphism_on_chamber_vertices_f:
"chamber C \<Longrightarrow> v\<in>C \<Longrightarrow> \<s> v = (if C\<in>f\<turnstile>\<C> then g v else f v)"
using chamber_system_def induced_automorphism_eq_foldings_on_chambers_fg
induced_automorphism_eq_foldings_on_chambers_gf
flopped_half_chamber_systems_fg[THEN sym]
fun_eq_onD[of \<s> g C] fun_eq_onD[of \<s> f C]
by auto
lemma induced_automorphism_simplex_image:
"C\<in>f\<turnstile>\<C> \<Longrightarrow> x\<subseteq>C \<Longrightarrow> \<s>`x = g`x" "C\<in>g\<turnstile>\<C> \<Longrightarrow> x\<subseteq>C \<Longrightarrow> \<s>`x = f`x"
using fun_eq_on_im[of \<s> g C] fun_eq_on_im[of \<s> f C]
induced_automorphism_eq_foldings_on_chambers_fg
induced_automorphism_eq_foldings_on_chambers_gf
by auto
lemma induced_automorphism_chamber_list_image_fg:
"set Cs \<subseteq> f\<turnstile>\<C> \<Longrightarrow> \<s>\<Turnstile>Cs = g\<Turnstile>Cs"
proof (induct Cs)
case (Cons C Cs) thus ?case
using induced_automorphism_simplex_image(1)[of C] by simp
qed simp
lemma induced_automorphism_chamber_image_fg:
"chamber C \<Longrightarrow> \<s>`C = (if C\<in>f\<turnstile>\<C> then g`C else f`C)"
using chamber_system_def induced_automorphism_simplex_image
flopped_half_chamber_systems_fg[THEN sym]
by auto
lemma induced_automorphism_C0: "\<s>`C0 = D0"
using chambers(1,4) basechambers_half_chamber_systems(1)
induced_automorphism_chamber_image_fg
by auto
lemma induced_automorphism_fixespointwise_C0_int_D0:
"fixespointwise \<s> (C0\<inter>D0)"
using fun_eq_on_trans[of \<s> g] fun_eq_on_subset[of \<s> g C0]
fixespointwise_subset[of g D0]
induced_automorphism_eq_foldings_on_chambers_fg
basechambers_half_chamber_systems
folding_g.chamber_retraction1
by auto
lemmas indaut_fixes_fundfacet =
induced_automorphism_fixespointwise_C0_int_D0
lemma induced_automorphism_adjacent_half_chamber_system_image_fg:
"\<lbrakk> C\<in>f\<turnstile>\<C>; D\<in>g\<turnstile>\<C>; C\<sim>D \<rbrakk> \<Longrightarrow> \<s>`D = C"
using adjacent_half_chamber_system_image_fg[of C D]
induced_automorphism_simplex_image(2)
by auto
lemmas indaut_adj_halfchsys_im_fg =
induced_automorphism_adjacent_half_chamber_system_image_fg
lemma induced_automorphism_chamber_map: "chamber C \<Longrightarrow> chamber (\<s>`C)"
using induced_automorphism_chamber_image_fg folding_f.chamber_map
folding_g.chamber_map
by auto
lemmas indaut_chmap = induced_automorphism_chamber_map
lemma induced_automorphism_ntrivial: "\<s> \<noteq> id"
proof
assume \<s>: "\<s> = id"
from chambers(2,3) obtain v where v: "v \<notin> D0" "C0 = insert v (C0\<inter>D0)"
using adjacent_int_decomp[of C0 D0] by fast
from chambers(4) \<s> v(2) have gv: "g v = v"
using chamberD_simplex[OF chamber_D0]
induced_automorphism_on_simplices_fg[of C0 v, THEN sym]
by auto
have "g`C0 = C0"
proof (rule seteqI)
from v(2) gv show "\<And>x. x\<in>C0 \<Longrightarrow> x\<in>g`C0" using g_trivial_D0 by force
next
fix x assume "x\<in>g`C0"
from this obtain y where y: "y\<in>C0" "x = g y" by fast
moreover from y(1) v(2) gv have "g y = y"
using g_trivial_D0[of y] by (cases "y=v") auto
ultimately show "x\<in>C0" using y by simp
qed
with chambers(3) show False by fast
qed
lemma induced_automorphism_bij_between_half_chamber_systems_f:
"bij_betw ((`) \<s>) (\<C>-f\<turnstile>\<C>) (f\<turnstile>\<C>)"
using induced_automorphism_simplex_image(2)
flopped_half_chamber_systems_fg
folding_f.opp_chambers_bij bij_betw_cong[of "\<C>-f\<turnstile>\<C>" "(`) \<s>"]
by auto
lemmas indaut_bij_btw_halfchsys_f =
induced_automorphism_bij_between_half_chamber_systems_f
lemma induced_automorphism_bij_between_half_chamber_systems_g:
"bij_betw ((`) \<s>) (\<C>-g\<turnstile>\<C>) (g\<turnstile>\<C>)"
using induced_automorphism_fg_symmetric
OpposedThinChamberComplexFoldings.indaut_bij_btw_halfchsys_f[
OF fg_symmetric
]
by simp
lemma induced_automorphism_halfmorphism_fopp_to_fimage:
"ChamberComplexMorphism folding_f.opp_half_apartment (f\<turnstile>X) \<s>"
proof (
rule ChamberComplexMorphism.cong,
rule ThinChamberComplexFolding.morphism_half_apartments,
rule ThinChamberComplexFolding_f, rule fun_eq_onI
)
show "\<And>v. v \<in> \<Union>folding_f.Y \<Longrightarrow> \<s> v = f v"
using folding_f.opp_half_apartment_def chamber_system_simplices
by (force simp add:
flopped_half_chamber_systems_fg
induced_automorphism_fg_symmetric
OpposedThinChamberComplexFoldings.induced_automorphism_def[
OF fg_symmetric
]
)
qed
lemmas indaut_halfmorph_fopp_fim =
induced_automorphism_halfmorphism_fopp_to_fimage
lemma induced_automorphism_half_chamber_system_gallery_map_f:
"set Cs \<subseteq> f\<turnstile>\<C> \<Longrightarrow> gallery Cs \<Longrightarrow> gallery (\<s>\<Turnstile>Cs)"
using folding_g.gallery_map[of Cs]
induced_automorphism_chamber_list_image_fg[THEN sym]
by auto
lemma induced_automorphism_half_chamber_system_pgallery_map_f:
"set Cs \<subseteq> f\<turnstile>\<C> \<Longrightarrow> pgallery Cs \<Longrightarrow> pgallery (\<s>\<Turnstile>Cs)"
using induced_automorphism_half_chamber_system_gallery_map_f pgallery
flopped_half_chamber_systems_gf pgalleryD_distinct
folding_g.opp_chambers_distinct_map
induced_automorphism_chamber_list_image_fg[THEN sym]
by (auto intro: pgalleryI_gallery)
lemmas indaut_halfchsys_pgal_map_f =
induced_automorphism_half_chamber_system_pgallery_map_f
lemma induced_automorphism_half_chamber_system_pgallery_map_g:
"set Cs \<subseteq> g\<turnstile>\<C> \<Longrightarrow> pgallery Cs \<Longrightarrow> pgallery (\<s>\<Turnstile>Cs)"
using induced_automorphism_fg_symmetric
OpposedThinChamberComplexFoldings.indaut_halfchsys_pgal_map_f[
OF fg_symmetric
]
by simp
lemma induced_automorphism_halfmorphism_fimage_to_fopp:
"ChamberComplexMorphism (f\<turnstile>X) folding_f.opp_half_apartment \<s>"
using OpposedThinChamberComplexFoldings.indaut_halfmorph_fopp_fim[
OF fg_symmetric
]
by (auto simp add:
flopped_half_apartments_gf flopped_half_apartments_fg
induced_automorphism_fg_symmetric
)
lemma induced_automorphism_selfcomp_halfmorphism_f:
"ChamberComplexMorphism (f\<turnstile>X) (f\<turnstile>X) (\<s>\<circ>\<s>)"
using induced_automorphism_halfmorphism_fimage_to_fopp
induced_automorphism_halfmorphism_fopp_to_fimage
by (auto intro: ChamberComplexMorphism.comp)
lemma induced_automorphism_selfcomp_halftrivial_f: "fixespointwise (\<s>\<circ>\<s>) (\<Union>(f\<turnstile>X))"
proof (
rule standard_uniqueness, rule ChamberComplexMorphism.expand_codomain,
rule induced_automorphism_selfcomp_halfmorphism_f
)
show "ChamberComplexMorphism (f\<turnstile>X) X id"
using folding_f.chambersubcomplex_image inclusion_morphism by fast
show "SimplicialComplex.maxsimp (f\<turnstile>X) C0"
using chambers(1,4) chamberD_simplex[OF chamber_D0]
chamber_in_subcomplex[OF folding_f.chambersubcomplex_image, of C0]
by auto
show "fixespointwise (\<s>\<circ>\<s>) C0"
proof (rule fixespointwiseI)
fix v assume v: "v\<in>C0"
with chambers(4) have "v\<in>f`(\<Union>X)"
using chamber_D0 chamberD_simplex by fast
hence 1: "\<s> v = g v" using induced_automorphism_def by simp
show "(\<s>\<circ>\<s>) v = id v"
proof (cases "v\<in>D0")
case True with v show ?thesis using 1 g_trivial_D0 by simp
next
case False
from v chambers(1,4) have "\<s> (g v) = f (g v)"
using chamberD_simplex induced_automorphism_fg_symmetric
OpposedThinChamberComplexFoldings.induced_automorphism_def[
OF fg_symmetric, of "g v"
]
by force
with v False chambers(4) show ?thesis using double_fold_C0 1 by simp
qed
qed
next
fix Cs assume "ChamberComplex.min_gallery (f\<turnstile>X) (C0#Cs)"
hence Cs: "ChamberComplex.pgallery (f\<turnstile>X) (C0#Cs)"
using ChamberComplex.min_gallery_pgallery folding_f.chambercomplex_image
by fast
hence pCs: "pgallery (C0#Cs)"
using folding_f.chambersubcomplex_image subcomplex_pgallery by auto
thus "pgallery (id\<Turnstile>(C0#Cs))" by simp
have set_Cs: "set (C0#Cs) \<subseteq> f\<turnstile>\<C>"
using Cs pCs folding_f.chambersubcomplex_image
ChamberSubcomplexD_complex ChamberComplex.pgalleryD_chamber
ChamberComplex.chamberD_simplex pgallery_chamber_system
folding_f.chamber_system_image
by fastforce
hence "pgallery (\<s>\<Turnstile>(C0#Cs))"
using pCs induced_automorphism_half_chamber_system_pgallery_map_f[of "C0#Cs"]
by auto
moreover have "set (\<s>\<Turnstile>(C0#Cs)) \<subseteq> g\<turnstile>\<C>"
proof-
have "set (\<s>\<Turnstile>(C0#Cs)) \<subseteq> \<s>\<turnstile>(\<C>-g\<turnstile>\<C>)"
using set_Cs flopped_half_chamber_systems_gf by auto
thus ?thesis
using bij_betw_imp_surj_on[
OF induced_automorphism_bij_between_half_chamber_systems_g
]
by simp
qed
ultimately have "pgallery (\<s>\<Turnstile>(\<s>\<Turnstile>(C0#Cs)))"
using induced_automorphism_half_chamber_system_pgallery_map_g[
of "\<s>\<Turnstile>(C0#Cs)"
]
by auto
thus "pgallery ((\<s>\<circ>\<s>)\<Turnstile>(C0#Cs))"
using ssubst[OF setlistmapim_comp, of pgallery, of \<s> \<s> "C0#Cs"] by fast
qed (unfold_locales, rule folding_f.chambersubcomplex_image)
lemmas indaut_selfcomp_halftriv_f =
induced_automorphism_selfcomp_halftrivial_f
lemma induced_automorphism_selfcomp_halftrivial_g: "fixespointwise (\<s>\<circ>\<s>) (\<Union>(g\<turnstile>X))"
using induced_automorphism_fg_symmetric
OpposedThinChamberComplexFoldings.indaut_selfcomp_halftriv_f[
OF fg_symmetric
]
by simp
lemma induced_automorphism_trivial_outside:
assumes "v\<notin>\<Union>X"
shows "\<s> v = v"
proof-
from assms have "v \<notin> f`(\<Union>X) \<and> v \<notin> g`(\<Union>X)" using vertex_set_split by fast
thus "\<s> v = v" using induced_automorphism_def by simp
qed
lemmas indaut_morph = induced_automorphism_morphism
lemma induced_automorphism_morphism_order2: "\<s>\<circ>\<s> = id"
proof
fix v
show "(\<s>\<circ>\<s>) v = id v"
proof (cases "v\<in>f`(\<Union>X)" "v\<in>g`(\<Union>X)" rule: two_cases)
case both
from both(1) show ?thesis
using induced_automorphism_selfcomp_halftrivial_f fixespointwiseD[of "\<s>\<circ>\<s>"]
by auto
next
case one thus ?thesis
using induced_automorphism_selfcomp_halftrivial_f fixespointwiseD[of "\<s>\<circ>\<s>"]
by fastforce
next
case other thus ?thesis
using induced_automorphism_selfcomp_halftrivial_g fixespointwiseD[of "\<s>\<circ>\<s>"]
by fastforce
qed (simp add: induced_automorphism_def)
qed
lemmas indaut_order2 = induced_automorphism_morphism_order2
lemmas induced_automorphism_bij =
o_bij[OF
induced_automorphism_morphism_order2
induced_automorphism_morphism_order2
]
lemma induced_automorphism_surj_on_vertexset: "\<s>`(\<Union>X) = \<Union>X"
proof
show "\<s>`(\<Union>X) \<subseteq> \<Union>X"
using induced_automorphism_morphism
ChamberComplexEndomorphism.vertex_map
by fast
hence "(\<s>\<circ>\<s>)`(\<Union>X) \<subseteq> \<s>`(\<Union>X)" by fastforce
thus "\<Union>X \<subseteq> \<s>`(\<Union>X)" using induced_automorphism_morphism_order2 by simp
qed
lemma induced_automorphism_bij_betw_vertexset: "bij_betw \<s> (\<Union>X) (\<Union>X)"
using induced_automorphism_bij induced_automorphism_surj_on_vertexset
by (auto intro: bij_betw_subset)
lemma induced_automorphism_surj_on_simplices: "\<s>\<turnstile>X = X"
proof
show "\<s>\<turnstile>X \<subseteq> X"
using induced_automorphism_morphism
ChamberComplexEndomorphism.simplex_map
by fast
hence "\<s>\<turnstile>(\<s>\<turnstile>X) \<subseteq> \<s>\<turnstile>X" by auto
thus "X \<subseteq> \<s>\<turnstile>X"
by (simp add:
setsetmapim_comp[THEN sym] induced_automorphism_morphism_order2
)
qed
lemma induced_automorphism_automorphism:
"ChamberComplexAutomorphism X \<s>"
using induced_automorphism_chamber_map
ChamberComplexEndomorphism.dim_map
induced_automorphism_morphism
induced_automorphism_bij_betw_vertexset
induced_automorphism_surj_on_simplices
induced_automorphism_trivial_outside
by (intro_locales, unfold_locales, fast)
lemmas indaut_aut = induced_automorphism_automorphism
lemma induced_automorphism_unique_automorphism':
assumes "ChamberComplexAutomorphism X s" "s\<noteq>id" "fixespointwise s (C0\<inter>D0)"
shows "fun_eq_on s \<s> C0"
proof (rule fun_eq_on_subset_and_diff_imp_eq_on)
from assms(3) show "fun_eq_on s \<s> (C0\<inter>D0)"
using induced_automorphism_fixespointwise_C0_int_D0
fixespointwise2_imp_eq_on
by fast
show "fun_eq_on s \<s> (C0 - (C0\<inter>D0))"
proof (rule fun_eq_onI)
fix v assume v: "v \<in> C0 - C0\<inter>D0"
with chambers(2) have C0_insert: "C0 = insert v (C0\<inter>D0)"
using adjacent_conv_insert by fast
hence "s`C0 = insert (s v) (s`(C0\<inter>D0))" "\<s>`C0 = insert (\<s> v) (\<s>`(C0\<inter>D0))"
by auto
with assms(3)
have insert: "s`C0 = insert (s v) (C0\<inter>D0)" "D0 = insert (\<s> v) (C0\<inter>D0)"
using basechambers_half_chamber_systems
induced_automorphism_fixespointwise_C0_int_D0
induced_automorphism_simplex_image(1)
by (auto simp add: fixespointwise_im)
from chambers(2,3) have C0D0_C0: "(C0\<inter>D0) \<lhd> C0"
using adjacent_int_facet1 by fast
with assms(1) chambers(1) have "s`(C0\<inter>D0) \<lhd> s`C0"
using ChamberComplexAutomorphism.facet_map by fast
with assms(3) have C0D0_sC0: "(C0\<inter>D0) \<lhd> s`C0"
by (simp add: fixespointwise_im)
hence sv_nin_C0D0: "s v \<notin> C0\<inter>D0" using insert(1) facetrel_psubset by auto
from assms(1) chambers(1) have "chamber (s`C0)"
using ChamberComplexAutomorphism.chamber_map by fast
moreover from chambers(2,3) have C0D0_D0: "(C0\<inter>D0) \<lhd> D0"
using adjacent_sym adjacent_int_facet1 by (fastforce simp add: Int_commute)
ultimately have "s`C0 = C0 \<or> s`C0 = D0"
using chambers(1,3) chamber_D0 C0D0_C0 C0D0_sC0
facet_unique_other_chamber[of "s`C0" "C0\<inter>D0" C0 D0]
by auto
moreover have "\<not> s`C0 = C0"
proof
assume sC0: "s`C0 = C0"
have "s = id"
proof (
rule standard_uniqueness_automorphs, rule assms(1),
rule trivial_automorphism, rule chambers(1),
rule fixespointwise_subset_and_diff_imp_eq_on,
rule Int_lower1, rule assms(3), rule fixespointwiseI
)
fix a assume "a \<in> C0-(C0\<inter>D0)"
with v have "a = v" using C0_insert by fast
with sC0 show "s a = id a" using C0_insert sv_nin_C0D0 by auto
qed
with assms(1,2) show False by fast
qed
ultimately have sC0_D0: "s`C0 = D0" by fast
have "\<s> v \<notin> C0\<inter>D0" using insert(2) C0D0_D0 facetrel_psubset by force
thus "s v = \<s> v" using insert sC0_D0 sv_nin_C0D0 by auto
qed
qed simp
lemma induced_automorphism_unique_automorphism:
"\<lbrakk> ChamberComplexAutomorphism X s; s\<noteq>id; fixespointwise s (C0\<inter>D0) \<rbrakk>
\<Longrightarrow> s = \<s>"
using chambers(1) induced_automorphism_unique_automorphism'
standard_uniqueness_automorphs induced_automorphism_automorphism
by fastforce
lemmas indaut_uniq_aut =
induced_automorphism_unique_automorphism
lemma induced_automorphism_unique:
"OpposedThinChamberComplexFoldings X f' g' C0 \<Longrightarrow> g'`C0 = g`C0 \<Longrightarrow>
OpposedThinChamberComplexFoldings.induced_automorphism X f' g' = \<s>"
using induced_automorphism_automorphism induced_automorphism_ntrivial
induced_automorphism_fixespointwise_C0_int_D0
by (auto intro:
OpposedThinChamberComplexFoldings.indaut_uniq_aut[
THEN sym
]
)
lemma induced_automorphism_sym:
"OpposedThinChamberComplexFoldings.induced_automorphism X g f = \<s>"
using OpposedThinChamberComplexFoldings.indaut_aut[
OF fg_symmetric
]
OpposedThinChamberComplexFoldings.induced_automorphism_ntrivial[
OF fg_symmetric
]
OpposedThinChamberComplexFoldings.indaut_fixes_fundfacet[
OF fg_symmetric
]
induced_automorphism_unique_automorphism
by (simp add: chambers(4) Int_commute)
lemma induced_automorphism_respects_labels:
assumes "label_wrt B \<phi>" "v\<in>(\<Union>X)"
shows "\<phi> (\<s> v) = \<phi> v"
proof-
from assms(2) obtain C where "chamber C" "v\<in>C" using simplex_in_max by fast
with assms show ?thesis
by (simp add:
induced_automorphism_on_chamber_vertices_f folding_f.respects_labels
folding_g.respects_labels
)
qed
lemmas indaut_resplabels =
induced_automorphism_respects_labels
end (* context OpposedThinChamberComplexFoldings *)
subsubsection \<open>Walls\<close>
text \<open>
A pair of opposed foldings of a thin chamber complex defines a decomposition of the chamber
system into the two disjoint chamber system images. Call such a decomposition a wall, as we image
that disjointness erects a wall between the two half chamber systems. By considering the
collection of all possible opposed folding pairs, and their associated walls, we can obtain
information about minimality of galleries by considering the walls they cross.
\<close>
context ThinChamberComplex
begin
definition foldpairs :: "(('a\<Rightarrow>'a) \<times> ('a\<Rightarrow>'a)) set"
where "foldpairs \<equiv> {(f,g). \<exists>C. OpposedThinChamberComplexFoldings X f g C}"
abbreviation "walls \<equiv> \<Union>(f,g)\<in>foldpairs. {{f\<turnstile>\<C>,g\<turnstile>\<C>}}"
abbreviation "the_wall_betw C D \<equiv>
THE_default {} (\<lambda>H. H\<in>walls \<and> separated_by H C D)"
definition walls_betw :: "'a set \<Rightarrow> 'a set \<Rightarrow> 'a set set set set"
where "walls_betw C D \<equiv> {H\<in>walls. separated_by H C D}"
fun wall_crossings :: "'a set list \<Rightarrow> 'a set set set list"
where "wall_crossings [] = []"
| "wall_crossings [C] = []"
| "wall_crossings (B#C#Cs) = the_wall_betw B C # wall_crossings (C#Cs)"
lemma foldpairs_sym: "(f,g)\<in>foldpairs \<Longrightarrow> (g,f)\<in>foldpairs"
using foldpairs_def OpposedThinChamberComplexFoldings.fg_symmetric by fastforce
lemma not_self_separated_by_wall: "H\<in>walls \<Longrightarrow> \<not> separated_by H C C"
using foldpairs_def OpposedThinChamberComplexFoldings.halfchsys_decomp(2)
not_self_separated_by_disjoint
by force
lemma the_wall_betw_nempty:
assumes "the_wall_betw C D \<noteq> {}"
shows "the_wall_betw C D \<in> walls" "separated_by (the_wall_betw C D) C D"
proof-
from assms have 1: "\<exists>!H'\<in>walls. separated_by H' C D"
using THE_default_none[of "\<lambda>H. H\<in>walls \<and> separated_by H C D" "{}"] by fast
show "the_wall_betw C D \<in> walls" "separated_by (the_wall_betw C D) C D"
using THE_defaultI'[OF 1] by auto
qed
lemma the_wall_betw_self_empty: "the_wall_betw C C = {}"
proof-
{
assume *: "the_wall_betw C C \<noteq> {}"
then obtain f g
where "(f,g)\<in>foldpairs" "the_wall_betw C C = {f\<turnstile>\<C>,g\<turnstile>\<C>}"
using the_wall_betw_nempty(1)[of C C]
by blast
with * have False
using the_wall_betw_nempty(2)[of C C] foldpairs_def
OpposedThinChamberComplexFoldings.halfchsys_decomp(2)[
of X
]
not_self_separated_by_disjoint[of "f\<turnstile>\<C>" "g\<turnstile>\<C>"]
by auto
}
thus ?thesis by fast
qed
lemma length_wall_crossings: "length (wall_crossings Cs) = length Cs - 1"
by (induct Cs rule: list_induct_CCons) auto
lemma wall_crossings_snoc:
"wall_crossings (Cs@[D,E]) = wall_crossings (Cs@[D]) @ [the_wall_betw D E]"
by (induct Cs rule: list_induct_CCons) auto
lemma wall_crossings_are_walls:
"H\<in>set (wall_crossings Cs) \<Longrightarrow> H\<noteq>{} \<Longrightarrow> H\<in>walls"
proof (induct Cs arbitrary: H rule: list_induct_CCons)
case (CCons B C Cs) thus ?case
using the_wall_betw_nempty(1)
by (cases "H\<in>set (wall_crossings (C#Cs))") auto
qed auto
lemma in_set_wall_crossings_decomp:
"H\<in>set (wall_crossings Cs) \<Longrightarrow>
\<exists>As A B Bs. Cs = As@[A,B]@Bs \<and> H = the_wall_betw A B"
proof (induct Cs rule: list_induct_CCons)
case (CCons C D Ds)
show ?case
proof (cases "H \<in> set (wall_crossings (D#Ds))")
case True
with CCons(1) obtain As A B Bs
where "C#(D#Ds) = (C#As)@[A,B]@Bs" "H = the_wall_betw A B"
by fastforce
thus ?thesis by fast
next
case False
with CCons(2) have "C#(D#Ds) = []@[C,D]@Ds" "H = the_wall_betw C D"
by auto
thus ?thesis by fast
qed
qed auto
end (* context ThinChamberComplex *)
context OpposedThinChamberComplexFoldings
begin
lemma foldpair: "(f,g)\<in>foldpairs"
unfolding foldpairs_def
proof-
have "OpposedThinChamberComplexFoldings X f g C0" ..
thus "(f, g) \<in> {(f, g).
\<exists>C. OpposedThinChamberComplexFoldings X f g C}"
by fast
qed
lemma separated_by_this_wall_fg:
"separated_by {f\<turnstile>\<C>,g\<turnstile>\<C>} C D \<Longrightarrow> C\<in>f\<turnstile>\<C> \<Longrightarrow> D\<in>g\<turnstile>\<C>"
using separated_by_disjoint[
OF _ half_chamber_system_disjoint_union(2), of C D
]
by fast
lemmas separated_by_this_wall_gf =
OpposedThinChamberComplexFoldings.separated_by_this_wall_fg[
OF fg_symmetric
]
lemma induced_automorphism_this_wall_vertex:
assumes "C\<in>f\<turnstile>\<C>" "D\<in>g\<turnstile>\<C>" "v\<in>C\<inter>D"
shows "\<s> v = v"
proof-
from assms have "\<s> v = g v"
using chamber_system_simplices induced_automorphism_on_simplices_fg
by auto
with assms(2,3) show "\<s> v = v"
using chamber_system_simplices folding_g.retraction by auto
qed
lemmas indaut_wallvertex =
induced_automorphism_this_wall_vertex
lemma unique_wall:
assumes opp' : "OpposedThinChamberComplexFoldings X f' g' C'"
and chambers: "A\<in>f\<turnstile>\<C>" "A\<in>f'\<turnstile>\<C>" "B\<in>g\<turnstile>\<C>" "B\<in>g'\<turnstile>\<C>" "A\<sim>B"
shows "{f\<turnstile>\<C>,g\<turnstile>\<C>} = {f'\<turnstile>\<C>,g'\<turnstile>\<C>}"
proof-
from chambers have B: "B=g`A" "B=g'`A"
using adjacent_sym[of A B] adjacent_half_chamber_system_image_gf
OpposedThinChamberComplexFoldings.adjhalfchsys_image_gf[
OF opp'
]
by auto
with chambers(1,2,5)
have A : "OpposedThinChamberComplexFoldings X f g A"
and A': "OpposedThinChamberComplexFoldings X f' g' A"
using switch_basechamber[of A]
OpposedThinChamberComplexFoldings.switch_basechamber[
OF opp', of A
]
by auto
with B show ?thesis
using OpposedThinChamberComplexFoldings.unique_half_chamber_system_f[
OF A A'
]
OpposedThinChamberComplexFoldings.unique_half_chamber_system_g[
OF A A'
]
by auto
qed
end (* context OpposedThinChamberComplexFoldings *)
context ThinChamberComplex
begin
lemma separated_by_wall_ex_foldpair:
assumes "H\<in>walls" "separated_by H C D"
shows "\<exists>(f,g)\<in>foldpairs. H = {f\<turnstile>\<C>,g\<turnstile>\<C>} \<and> C\<in>f\<turnstile>\<C> \<and> D\<in>g\<turnstile>\<C>"
proof-
from assms(1) obtain f g where fg: "(f,g)\<in>foldpairs" "H = {f\<turnstile>\<C>,g\<turnstile>\<C>}" by auto
show ?thesis
proof (cases "C\<in>f\<turnstile>\<C>")
case True
moreover with fg assms(2) have "D\<in>g\<turnstile>\<C>"
using foldpairs_def
OpposedThinChamberComplexFoldings.separated_by_this_wall_fg[
of X f g _ C D
]
by auto
ultimately show ?thesis using fg by auto
next
case False with assms(2) fg show ?thesis
using foldpairs_sym[of f g] separated_by_in_other[of "f\<turnstile>\<C>" "g\<turnstile>\<C>" C D] by auto
qed
qed
lemma not_separated_by_wall_ex_foldpair:
assumes chambers: "chamber C" "chamber D"
and wall : "H\<in>walls" "\<not> separated_by H C D"
shows "\<exists>(f,g)\<in>foldpairs. H = {f\<turnstile>\<C>,g\<turnstile>\<C>} \<and> C\<in>f\<turnstile>\<C> \<and> D\<in>f\<turnstile>\<C>"
proof-
from wall(1) obtain f g where fg: "(f,g)\<in>foldpairs" "H = {f\<turnstile>\<C>,g\<turnstile>\<C>}" by auto
from fg(1) obtain A where A: "OpposedThinChamberComplexFoldings X f g A"
using foldpairs_def by fast
from chambers have chambers': "C\<in>f\<turnstile>\<C> \<or> C\<in>g\<turnstile>\<C>" "D\<in>f\<turnstile>\<C> \<or> D\<in>g\<turnstile>\<C>"
using chamber_system_def
OpposedThinChamberComplexFoldings.halfchsys_decomp(1)[
OF A
]
by auto
show ?thesis
proof (cases "C\<in>f\<turnstile>\<C>")
case True
moreover with chambers'(2) fg(2) wall(2) have "D\<in>f\<turnstile>\<C>"
unfolding separated_by_def by auto
ultimately show ?thesis using fg by auto
next
case False
with chambers'(1) have "C\<in>g\<turnstile>\<C>" by simp
moreover with chambers'(2) fg(2) wall(2) have "D\<in>g\<turnstile>\<C>"
using insert_commute[of "f\<turnstile>\<C>" "g\<turnstile>\<C>" "{}"] unfolding separated_by_def by auto
ultimately show ?thesis using fg foldpairs_sym[of f g] by auto
qed
qed
lemma adj_wall_imp_ex1_wall:
assumes adj : "C\<sim>D"
and wall: "H0\<in>walls" "separated_by H0 C D"
shows "\<exists>!H\<in>walls. separated_by H C D"
proof (rule ex1I, rule conjI, rule wall(1), rule wall(2))
fix H assume H: "H\<in>walls \<and> separated_by H C D"
from this obtain f g
where fg: "(f,g)\<in>foldpairs" "H={f\<turnstile>\<C>,g\<turnstile>\<C>}" "C\<in>f\<turnstile>\<C>" "D\<in>g\<turnstile>\<C>"
using separated_by_wall_ex_foldpair[of H C D]
by auto
from wall obtain f0 g0
where f0g0: "(f0,g0)\<in>foldpairs" "H0={f0\<turnstile>\<C>,g0\<turnstile>\<C>}" "C\<in>f0\<turnstile>\<C>" "D\<in>g0\<turnstile>\<C>"
using separated_by_wall_ex_foldpair[of H0 C D]
by auto
from fg(1) f0g0(1) obtain A A0
where A : "OpposedThinChamberComplexFoldings X f g A"
and A0: "OpposedThinChamberComplexFoldings X f0 g0 A0"
using foldpairs_def
by auto
from fg(2-4) f0g0(2-4) adj show "H = H0"
using OpposedThinChamberComplexFoldings.unique_wall[OF A0 A] by auto
qed
end (* context ThinChamberComplex *)
context OpposedThinChamberComplexFoldings
begin
lemma this_wall_betwI:
assumes "C\<in>f\<turnstile>\<C>" "D\<in>g\<turnstile>\<C>" "C\<sim>D"
shows "the_wall_betw C D = {f\<turnstile>\<C>,g\<turnstile>\<C>}"
proof (rule THE_default1_equality, rule adj_wall_imp_ex1_wall)
have "OpposedThinChamberComplexFoldings X f g C0" ..
thus "{f\<turnstile>\<C>,g\<turnstile>\<C>}\<in>walls" using foldpairs_def by auto
moreover from assms(1,2) show "separated_by {f\<turnstile>\<C>,g\<turnstile>\<C>} C D"
by (auto intro: separated_byI)
ultimately show "{f\<turnstile>\<C>,g\<turnstile>\<C>}\<in>walls \<and> separated_by {f\<turnstile>\<C>,g\<turnstile>\<C>} C D" by simp
qed (rule assms(3))
lemma this_wall_betw_basechambers:
"the_wall_betw C0 D0 = {f\<turnstile>\<C>,g\<turnstile>\<C>}"
using basechambers_half_chamber_systems chambers(2) this_wall_betwI by auto
lemma this_wall_in_crossingsI_fg:
defines H: "H \<equiv> {f\<turnstile>\<C>,g\<turnstile>\<C>}"
assumes D: "D\<in>g\<turnstile>\<C>"
shows "C\<in>f\<turnstile>\<C> \<Longrightarrow> gallery (C#Cs@[D]) \<Longrightarrow> H \<in> set (wall_crossings (C#Cs@[D]))"
proof (induct Cs arbitrary: C)
case Nil
from Nil(1) assms have "H\<in>walls" "separated_by H C D"
using foldpair by (auto intro: separated_byI)
thus ?case
using galleryD_adj[OF Nil(2)]
THE_default1_equality[OF adj_wall_imp_ex1_wall]
by auto
next
case (Cons B Bs)
show ?case
proof (cases "B\<in>f\<turnstile>\<C>")
case True with Cons(1,3) show ?thesis using gallery_Cons_reduce by simp
next
case False
with Cons(2,3) H have "H\<in>walls" "separated_by H C B"
using galleryD_chamber[OF Cons(3)] chamber_in_other_half_fg[of B] foldpair
by (auto intro: separated_byI)
thus ?thesis
using galleryD_adj[OF Cons(3)]
THE_default1_equality[OF adj_wall_imp_ex1_wall]
by auto
qed
qed
end (* context OpposedThinChamberComplexFoldings *)
lemma (in ThinChamberComplex) walls_betw_subset_wall_crossings:
assumes "gallery (C#Cs@[D])"
shows "walls_betw C D \<subseteq> set (wall_crossings (C#Cs@[D]))"
proof
fix H assume "H \<in> walls_betw C D"
hence H: "H\<in>walls" "separated_by H C D" using walls_betw_def by auto
from this obtain f g
where fg: "(f,g)\<in>foldpairs" "H = {f\<turnstile>\<C>,g\<turnstile>\<C>}" "C\<in>f\<turnstile>\<C>" "D\<in>g\<turnstile>\<C>"
using separated_by_wall_ex_foldpair[of H C D]
by auto
from fg(1) obtain Z where Z: "OpposedThinChamberComplexFoldings X f g Z"
using foldpairs_def by fast
from assms H(2) fg(2-4) show "H \<in> set (wall_crossings (C#Cs@[D]))"
using OpposedThinChamberComplexFoldings.this_wall_in_crossingsI_fg[OF Z]
by auto
qed
context OpposedThinChamberComplexFoldings
begin
lemma same_side_this_wall_wall_crossings_not_distinct_f:
"gallery (C#Cs@[D]) \<Longrightarrow> C\<in>f\<turnstile>\<C> \<Longrightarrow> D\<in>f\<turnstile>\<C> \<Longrightarrow>
{f\<turnstile>\<C>,g\<turnstile>\<C>}\<in>set (wall_crossings (C#Cs@[D])) \<Longrightarrow>
\<not> distinct (wall_crossings (C#Cs@[D]))"
proof (induct Cs arbitrary: C)
case Nil
hence "{f\<turnstile>\<C>,g\<turnstile>\<C>} = the_wall_betw C D" by simp
moreover hence "the_wall_betw C D \<noteq> {}" by fast
ultimately show ?case
using Nil(2,3) the_wall_betw_nempty(2) separated_by_this_wall_fg[of C D]
half_chamber_system_disjoint_union(2)
by auto
next
case (Cons E Es)
show ?case
proof
assume 1: "distinct (wall_crossings (C # (E # Es) @ [D]))"
show False
proof (
cases "E\<in>f\<turnstile>\<C>" "{f\<turnstile>\<C>,g\<turnstile>\<C>} \<in> set (wall_crossings (E#Es@[D]))"
rule: two_cases
)
case both with Cons(1,2,4) 1 show False
using gallery_Cons_reduce by simp
next
case one
from one(2) Cons(5) have "{f\<turnstile>\<C>,g\<turnstile>\<C>} = the_wall_betw C E" by simp
moreover hence "the_wall_betw C E \<noteq> {}" by fast
ultimately show False
using Cons(3) one(1) the_wall_betw_nempty(2)
separated_by_this_wall_fg[of C E]
half_chamber_system_disjoint_union(2)
by auto
next
case other with Cons(3) show False
using 1 galleryD_chamber[OF Cons(2)] galleryD_adj[OF Cons(2)]
chamber_in_other_half_fg this_wall_betwI
by force
next
case neither
from Cons(2) neither(1) have "E\<in>g\<turnstile>\<C>"
using galleryD_chamber chamber_in_other_half_fg by auto
with Cons(4) have "separated_by {g\<turnstile>\<C>,f\<turnstile>\<C>} E D"
by (blast intro: separated_byI)
hence "{f\<turnstile>\<C>,g\<turnstile>\<C>} \<in> walls_betw E D"
using foldpair walls_betw_def by (auto simp add: insert_commute)
with neither(2) show False
using gallery_Cons_reduce[OF Cons(2)] walls_betw_subset_wall_crossings
by auto
qed
qed
qed
lemmas sside_wcrossings_ndistinct_f =
same_side_this_wall_wall_crossings_not_distinct_f
lemma separated_by_this_wall_chain3_fg:
assumes "B\<in>f\<turnstile>\<C>" "chamber C" "chamber D"
"separated_by {f\<turnstile>\<C>,g\<turnstile>\<C>} B C" "separated_by {f\<turnstile>\<C>,g\<turnstile>\<C>} C D"
shows "C\<in>g\<turnstile>\<C>" "D\<in>f\<turnstile>\<C>"
using assms separated_by_this_wall_fg separated_by_this_wall_gf
by (auto simp add: insert_commute)
lemmas sepwall_chain3_fg =
separated_by_this_wall_chain3_fg
end (* context OpposedThinChamberComplexFoldings *)
context ThinChamberComplex
begin
lemma wall_crossings_min_gallery_betwI:
assumes "gallery (C#Cs@[D])"
"distinct (wall_crossings (C#Cs@[D]))"
"\<forall>H\<in>set (wall_crossings (C#Cs@[D])). separated_by H C D"
shows "min_gallery (C#Cs@[D])"
proof (rule min_galleryI_betw)
obtain B Bs where BBs: "Cs@[D] = B#Bs" using snoc_conv_cons by fast
define H where "H = the_wall_betw C B"
with BBs assms(3) have 1: "separated_by H C D" by simp
show "C\<noteq>D"
proof (cases "H={}")
case True thus ?thesis
using 1 unfolding separated_by_def by simp
next
case False
with H_def have "H \<in> walls" using the_wall_betw_nempty(1) by simp
from this obtain f g
where fg: "(f,g)\<in>foldpairs" "H = {f\<turnstile>\<C>,g\<turnstile>\<C>}" "C\<in>f\<turnstile>\<C>" "D\<in>g\<turnstile>\<C>"
using 1 separated_by_wall_ex_foldpair[of H C D]
by auto
thus ?thesis
using foldpairs_def
OpposedThinChamberComplexFoldings.halfchsys_decomp(2)[
of X f g
]
by auto
qed
next
fix Ds assume Ds: "gallery (C # Ds @ [D])"
have "Suc (length Cs) = card (walls_betw C D)"
proof-
from assms(1,3) have "set (wall_crossings (C#Cs@[D])) = walls_betw C D"
using separated_by_not_empty wall_crossings_are_walls[of _ "C#Cs@[D]"]
walls_betw_def
walls_betw_subset_wall_crossings[OF assms(1)]
unfolding separated_by_def
by auto
with assms(2) show ?thesis
using distinct_card[THEN sym] length_wall_crossings by fastforce
qed
moreover have "card (walls_betw C D) \<le> Suc (length Ds)"
proof-
from Ds have "card (walls_betw C D) \<le> card (set (wall_crossings (C#Ds@[D])))"
using walls_betw_subset_wall_crossings finite_set card_mono by force
also have "\<dots> \<le> length (wall_crossings (C#Ds@[D]))"
using card_length by auto
finally show ?thesis using length_wall_crossings by simp
qed
ultimately show "length Cs \<le> length Ds" by simp
qed (rule assms(1))
lemma ex_nonseparating_wall_imp_wall_crossings_not_distinct:
assumes gal : "gallery (C#Cs@[D])"
and wall: "H\<in>set (wall_crossings (C#Cs@[D]))" "H\<noteq>{}"
"\<not> separated_by H C D"
shows "\<not> distinct (wall_crossings (C#Cs@[D]))"
proof-
from assms obtain f g
where fg: "(f,g)\<in>foldpairs" "H = {f\<turnstile>\<C>,g\<turnstile>\<C>}" "C\<in>f\<turnstile>\<C>" "D\<in>f\<turnstile>\<C>"
using wall_crossings_are_walls[of H]
not_separated_by_wall_ex_foldpair[of C D H]
galleryD_chamber
by auto
from fg(1) obtain Z where Z: "OpposedThinChamberComplexFoldings X f g Z"
using foldpairs_def by fast
from wall fg(2-4) show ?thesis
using OpposedThinChamberComplexFoldings.sside_wcrossings_ndistinct_f [
OF Z gal
]
by blast
qed
lemma not_min_gallery_double_crosses_wall:
assumes "gallery Cs" "\<not> min_gallery Cs" "{} \<notin> set (wall_crossings Cs)"
shows "\<not> distinct (wall_crossings Cs)"
proof (cases Cs rule: list_cases_Cons_snoc)
case Nil with assms(2) show ?thesis by simp
next
case Single with assms(1,2) show ?thesis using galleryD_chamber by simp
next
case (Cons_snoc B Bs C)
show ?thesis
proof (cases "B=C")
case True show ?thesis
proof (cases Bs)
case Nil with True Cons_snoc assms(3) show ?thesis
using the_wall_betw_self_empty by simp
next
case (Cons E Es)
define H where "H = the_wall_betw B E"
with Cons have *: "H \<in> set (wall_crossings (B#Bs@[C]))" by simp
moreover from assms(3) Cons_snoc * have "H \<noteq> {}" by fast
ultimately show ?thesis
using assms(1) Cons_snoc Cons True H_def
the_wall_betw_nempty(1)[of B E] not_self_separated_by_wall[of H B]
ex_nonseparating_wall_imp_wall_crossings_not_distinct[of B Bs C H]
by fast
qed
next
case False
with assms Cons_snoc
have 1: "\<not> distinct (wall_crossings Cs) \<or>
\<not> (\<forall>H\<in>set (wall_crossings Cs). separated_by H B C)"
using wall_crossings_min_gallery_betwI
by force
moreover {
assume "\<not> (\<forall>H\<in>set (wall_crossings Cs). separated_by H B C)"
from this obtain H
where H: "H\<in>set (wall_crossings Cs)" "\<not> separated_by H B C"
by auto
moreover from H(1) assms(3) have "H\<noteq>{}" by fast
ultimately have ?thesis
using assms(1) Cons_snoc
ex_nonseparating_wall_imp_wall_crossings_not_distinct
by simp
}
ultimately show ?thesis by fast
qed
qed
lemma not_distinct_crossings_split_gallery:
"\<lbrakk> gallery Cs; {} \<notin> set (wall_crossings Cs); \<not> distinct (wall_crossings Cs) \<rbrakk> \<Longrightarrow>
\<exists>f g As A B Bs E F Fs.
(f,g)\<in>foldpairs \<and> A\<in>f\<turnstile>\<C> \<and> B\<in>g\<turnstile>\<C> \<and> E\<in>g\<turnstile>\<C> \<and> F\<in>f\<turnstile>\<C> \<and>
( Cs = As@[A,B,F]@Fs \<or> Cs = As@[A,B]@Bs@[E,F]@Fs )"
proof (induct Cs rule: list_induct_CCons)
case (CCons C J Js)
show ?case
proof (cases "distinct (wall_crossings (J#Js))")
case False
moreover from CCons(2) have "gallery (J#Js)"
using gallery_Cons_reduce by simp
moreover from CCons(3) have "{} \<notin> set (wall_crossings (J#Js))" by simp
ultimately obtain f g As A B Bs E F Fs where split:
"(f,g)\<in>foldpairs" "A\<in>f\<turnstile>\<C>" "B\<in>g\<turnstile>\<C>" "E\<in>g\<turnstile>\<C>" "F\<in>f\<turnstile>\<C>"
"J#Js = As@[A,B,F]@Fs \<or> J#Js = As@[A,B]@Bs@[E,F]@Fs"
using CCons(1)
by blast
from split(6)
have "C#J#Js = (C#As)@[A,B,F]@Fs \<or>
C#J#Js = (C#As)@[A,B]@Bs@[E,F]@Fs"
by simp
with split(1-5) show ?thesis by blast
next
case True
define H where "H = the_wall_betw C J"
with True CCons(4) have "H\<in>set (wall_crossings (J#Js))" by simp
from this obtain Bs E F Fs
where split1: "J#Js = Bs@[E,F]@Fs" "H = the_wall_betw E F"
using in_set_wall_crossings_decomp
by fast
from H_def split1(2) CCons(3)
have Hwall: "H \<in> walls" "separated_by H C J" "separated_by H E F"
using the_wall_betw_nempty[of C J] the_wall_betw_nempty[of E F]
by auto
from Hwall(1,2) obtain f g
where fg: "(f,g)\<in>foldpairs" "H={f\<turnstile>\<C>,g\<turnstile>\<C>}" "C\<in>f\<turnstile>\<C>" "J\<in>g\<turnstile>\<C>"
using separated_by_wall_ex_foldpair[of H C J]
by auto
from fg(1) obtain Z
where Z: "OpposedThinChamberComplexFoldings X f g Z"
using foldpairs_def
by fast
show ?thesis
proof (cases Bs)
case Nil
with CCons(2) Hwall(2,3) fg(2-4) split1(1)
have "F\<in>f\<turnstile>\<C>" "C#J#Js = []@[C,J,F]@Fs"
using galleryD_chamber
OpposedThinChamberComplexFoldings.sepwall_chain3_fg(2)[
OF Z, of C J F
]
by auto
with fg(1,3,4) show ?thesis by blast
next
case (Cons K Ks) have Bs: "Bs = K#Ks" by fact
show ?thesis
proof (cases "E\<in>f\<turnstile>\<C>")
case True
from CCons(2) split1(1) Bs have "gallery (J#Ks@[E])"
using gallery_Cons_reduce[of C "J#Ks@E#F#Fs"]
gallery_append_reduce1[of "J#Ks@[E]" "F#Fs"]
by simp
with fg(4) True obtain Ls L M Ms
where LsLMMs: "L\<in>g\<turnstile>\<C>" "M\<in>f\<turnstile>\<C>" "J#Ks@[E] = Ls@L#M#Ms"
using OpposedThinChamberComplexFoldings.split_gallery_gf[
OF Z, of J E Ks
]
by blast
show ?thesis
proof (cases Ls)
case Nil
with split1(1) Bs LsLMMs(3)
have "C#J#Js = []@[C,J,M]@(Ms@F#Fs)"
by simp
with fg(1,3,4) LsLMMs(2) show ?thesis by blast
next
case (Cons N Ns)
with split1(1) Bs LsLMMs(3)
have "C#J#Js = []@[C,J]@Ns@[L,M]@(Ms@F#Fs)"
by simp
with fg(1,3,4) LsLMMs(1,2) show ?thesis by blast
qed
next
case False
with Hwall(2,3) fg(2) split1(1) Cons
have "E\<in>g\<turnstile>\<C>" "F\<in>f\<turnstile>\<C>" "C#J#Js = []@[C,J]@Ks@[E,F]@Fs"
using OpposedThinChamberComplexFoldings.separated_by_this_wall_fg[
OF Z
]
separated_by_in_other[of "f\<turnstile>\<C>" "g\<turnstile>\<C>"]
by auto
with fg(1,3,4) show ?thesis by blast
qed
qed
qed
qed auto
lemma not_min_gallery_double_split:
"\<lbrakk> gallery Cs; \<not> min_gallery Cs; {} \<notin> set (wall_crossings Cs) \<rbrakk> \<Longrightarrow>
\<exists>f g As A B Bs E F Fs.
(f,g)\<in>foldpairs \<and> A\<in>f\<turnstile>\<C> \<and> B\<in>g\<turnstile>\<C> \<and> E\<in>g\<turnstile>\<C> \<and> F\<in>f\<turnstile>\<C> \<and>
( Cs = As@[A,B,F]@Fs \<or> Cs = As@[A,B]@Bs@[E,F]@Fs )"
using not_min_gallery_double_crosses_wall not_distinct_crossings_split_gallery
by simp
end (* context ThinChamberComplex *)
subsection \<open>Thin chamber complexes with many foldings\<close>
text \<open>
Here we begin to examine thin chamber complexes in which every pair of adjacent chambers affords a
pair of opposed foldings of the complex. This condition will ultimately be shown to be sufficient
to ensure that a thin chamber complex is isomorphic to some Coxeter complex.
\<close>
subsubsection \<open>Locale definition and basic facts\<close>
locale ThinChamberComplexManyFoldings = ThinChamberComplex X
for X :: "'a set set"
+ fixes C0 :: "'a set"
assumes fundchamber: "chamber C0"
and ex_walls :
"\<lbrakk> chamber C; chamber D; C\<sim>D; C\<noteq>D \<rbrakk> \<Longrightarrow>
\<exists>f g. OpposedThinChamberComplexFoldings X f g C \<and> D=g`C"
lemma (in ThinChamberComplex) ThinChamberComplexManyFoldingsI:
assumes "chamber C0"
and "\<And>C D. \<lbrakk> chamber C; chamber D; C\<sim>D; C\<noteq>D \<rbrakk> \<Longrightarrow>
\<exists>f g. OpposedThinChamberComplexFoldings X f g C \<and> D=g`C"
shows "ThinChamberComplexManyFoldings X C0"
using assms
by (intro_locales, unfold_locales, fast)
lemma (in ThinChamberComplexManyFoldings) wall_crossings_subset_walls_betw:
assumes "min_gallery (C#Cs@[D])"
shows "set (wall_crossings (C#Cs@[D])) \<subseteq> walls_betw C D"
proof
fix H assume "H \<in> set (wall_crossings (C#Cs@[D]))"
from this obtain As A B Bs
where H: "C#Cs@[D] = As@[A,B]@Bs" "H=the_wall_betw A B"
using in_set_wall_crossings_decomp
by blast
from assms have pgal: "pgallery (C#Cs@[D])"
using min_gallery_pgallery by fast
with H(1) obtain f g
where fg: "OpposedThinChamberComplexFoldings X f g A" "B=g`A"
using pgalleryD_chamber pgalleryD_adj
binrelchain_append_reduce2[of adjacent As "[A,B]@Bs"]
pgalleryD_distinct[of "As@[A,B]@Bs"] ex_walls[of A B]
by auto
from H(2) fg have H': "A\<in>f\<turnstile>\<C>" "B\<in>g\<turnstile>\<C>" "H = {f\<turnstile>\<C>,g\<turnstile>\<C>}" "H\<in>walls"
using OpposedThinChamberComplexFoldings.basech_halfchsys[
OF fg(1)
]
OpposedThinChamberComplexFoldings.chambers(2)[OF fg(1)]
OpposedThinChamberComplexFoldings.this_wall_betwI[OF fg(1)]
foldpairs_def
by auto
have CD: "C \<in> f\<turnstile>\<C> \<union> g\<turnstile>\<C>" "D \<in> f\<turnstile>\<C> \<union> g\<turnstile>\<C>"
using pgal pgalleryD_chamber chamber_system_def
OpposedThinChamberComplexFoldings.halfchsys_decomp(1)[
OF fg(1)
]
by auto
show "H \<in> walls_betw C D"
proof (cases Bs As rule: two_lists_cases_snoc_Cons')
case both_Nil with H show ?thesis
using H'(3) the_wall_betw_nempty[of A B] unfolding walls_betw_def by force
next
case (Nil1 E Es)
show ?thesis
proof (cases "C\<in>f\<turnstile>\<C>")
case True
with Nil1 H(1) have "separated_by H C D"
using H'(2,3) by (auto intro: separated_byI)
thus ?thesis using H'(4) unfolding walls_betw_def by simp
next
case False with assms Nil1 H(1) show ?thesis
using OpposedThinChamberComplexFoldings.foldg[
OF fg(1)
]
CD(1) H'(1,2) pgal pgallery
OpposedThinChamberComplexFoldings.flopped_half_chamber_systems_gf[
OF fg(1)
]
ThinChamberComplexFolding.gallery_double_cross_not_minimal1[
of X g E A B Es "[]"
]
by force
qed
next
case (Nil2 Fs F)
show ?thesis
proof (cases "D\<in>f\<turnstile>\<C>")
case True
with assms Nil2 H(1) show ?thesis
using OpposedThinChamberComplexFoldings.foldf[
OF fg(1)
]
H'(1,2) pgal pgallery
OpposedThinChamberComplexFoldings.flopped_half_chamber_systems_fg[
OF fg(1)
]
ThinChamberComplexFolding.gallery_double_cross_not_minimal_Cons1[
of X f
]
by force
next
case False with Nil2 H(1) have "separated_by H C D"
using CD(2) H'(1,3) by (auto intro: separated_byI)
thus ?thesis using H'(4) unfolding walls_betw_def by simp
qed
next
case (snoc_Cons Fs F E Es) show ?thesis
proof (cases "C\<in>f\<turnstile>\<C>" "D\<in>g\<turnstile>\<C>" rule: two_cases)
case both thus ?thesis
using H'(3,4) walls_betw_def unfolding separated_by_def by auto
next
case one
with snoc_Cons assms H(1) show ?thesis
using OpposedThinChamberComplexFoldings.foldf[
OF fg(1)
]
CD(2) H'(2) pgal pgallery
OpposedThinChamberComplexFoldings.flopped_half_chamber_systems_fg[
OF fg(1)
]
ThinChamberComplexFolding.gallery_double_cross_not_minimal1[
of X f C B D "Es@[A]"
]
by fastforce
next
case other
with snoc_Cons assms H(1) show ?thesis
using OpposedThinChamberComplexFoldings.ThinChamberComplexFolding_g[
OF fg(1)
]
CD(1) H'(1) pgal pgallery
OpposedThinChamberComplexFoldings.flopped_half_chamber_systems_gf[
OF fg(1)
]
ThinChamberComplexFolding.gallery_double_cross_not_minimal1[
of X g E A F Es "B#Fs"
]
by force
next
case neither
hence "separated_by {g\<turnstile>\<C>,f\<turnstile>\<C>} C D" using CD by (auto intro: separated_byI)
thus ?thesis
using H'(3,4) walls_betw_def by (auto simp add: insert_commute)
qed
qed
qed
subsubsection \<open>The group of automorphisms\<close>
text \<open>
Recall that a pair of opposed foldings of a thin chamber complex can be stitched together to form
an automorphism of the complex. Choosing an arbitrary chamber in the complex to act as a sort of
centre of the complex (referred to as the fundamental chamber), we consider the group (under
composition) generated by the automorphisms afforded by the chambers adjacent to the fundamental
chamber via the pairs of opposed foldings that we have assumed to exist.
\<close>
context ThinChamberComplexManyFoldings
begin
definition fundfoldpairs :: "(('a\<Rightarrow>'a)\<times>('a\<Rightarrow>'a)) set"
where "fundfoldpairs \<equiv> {(f,g). OpposedThinChamberComplexFoldings X f g C0}"
abbreviation "fundadjset \<equiv> adjacentset C0 - {C0}"
abbreviation induced_automorph :: "('a\<Rightarrow>'a) \<Rightarrow> ('a\<Rightarrow>'a) \<Rightarrow> ('a\<Rightarrow>'a)"
where "induced_automorph f g \<equiv>
OpposedThinChamberComplexFoldings.induced_automorphism X f g"
abbreviation Abs_induced_automorph :: "('a\<Rightarrow>'a) \<Rightarrow> ('a\<Rightarrow>'a) \<Rightarrow> 'a permutation"
where "Abs_induced_automorph f g \<equiv> Abs_permutation (induced_automorph f g)"
abbreviation "S \<equiv> \<Union>(f,g)\<in>fundfoldpairs. {Abs_induced_automorph f g}"
abbreviation "W \<equiv> \<langle>S\<rangle>"
lemma fundfoldpairs_induced_autormorph_bij:
"(f,g) \<in> fundfoldpairs \<Longrightarrow> bij (induced_automorph f g)"
using OpposedThinChamberComplexFoldings.induced_automorphism_bij
unfolding fundfoldpairs_def
by fast
lemmas permutation_conv_induced_automorph =
Abs_permutation_inverse[OF CollectI, OF fundfoldpairs_induced_autormorph_bij]
lemma fundfoldpairs_induced_autormorph_ntrivial:
"(f,g) \<in> fundfoldpairs \<Longrightarrow> induced_automorph f g \<noteq> id"
using OpposedThinChamberComplexFoldings.induced_automorphism_ntrivial
unfolding fundfoldpairs_def
by fast
lemma fundfoldpairs_fundchamber_image:
"(f,g)\<in>fundfoldpairs \<Longrightarrow> Abs_induced_automorph f g `\<rightarrow> C0 = g`C0"
using fundfoldpairs_def
by (simp add:
permutation_conv_induced_automorph
OpposedThinChamberComplexFoldings.induced_automorphism_C0
)
lemma fundfoldpair_fundchamber_in_half_chamber_system_f:
"(f,g)\<in>fundfoldpairs \<Longrightarrow> C0\<in>f\<turnstile>\<C>"
using fundfoldpairs_def
OpposedThinChamberComplexFoldings.basech_halfchsys(1)
by fast
lemma fundfoldpair_unique_half_chamber_system_f:
assumes "(f,g)\<in>fundfoldpairs" "(f',g')\<in>fundfoldpairs"
"Abs_induced_automorph f' g' = Abs_induced_automorph f g"
shows "f'\<turnstile>\<C> = f\<turnstile>\<C>"
proof-
from assms have "g'`C0 = g`C0"
using fundfoldpairs_fundchamber_image[OF assms(1)]
fundfoldpairs_fundchamber_image[OF assms(2)]
by simp
with assms show "f'\<turnstile>\<C> = f\<turnstile>\<C>"
using fundfoldpairs_def
OpposedThinChamberComplexFoldings.unique_half_chamber_system_f[
of X f g C0 f' g'
]
by auto
qed
lemma fundfoldpair_unique_half_chamber_systems_chamber_ng_f:
assumes "(f,g)\<in>fundfoldpairs" "(f',g')\<in>fundfoldpairs"
"Abs_induced_automorph f' g' = Abs_induced_automorph f g"
"chamber C" "C\<notin>g\<turnstile>\<C>"
shows "C\<in>f'\<turnstile>\<C>"
using assms(1,3-5) fundfoldpairs_def chamber_system_def
OpposedThinChamberComplexFoldings.flopped_half_chamber_systems_gf[
THEN sym
]
fundfoldpair_unique_half_chamber_system_f[OF assms(1,2)]
by fastforce
lemma the_wall_betw_adj_fundchamber:
"(f,g)\<in>fundfoldpairs \<Longrightarrow>
the_wall_betw C0 (Abs_induced_automorph f g `\<rightarrow> C0) = {f\<turnstile>\<C>,g\<turnstile>\<C>}"
using fundfoldpairs_def
OpposedThinChamberComplexFoldings.this_wall_betw_basechambers
OpposedThinChamberComplexFoldings.induced_automorphism_C0
by (fastforce simp add: permutation_conv_induced_automorph)
lemma zero_notin_S: "0\<notin>S"
proof
assume "0\<in>S"
from this obtain f g
where "(f,g)\<in>fundfoldpairs" "0 = Abs_induced_automorph f g"
by fast
thus False
using Abs_permutation_inject[of id "induced_automorph f g"]
bij_id fundfoldpairs_induced_autormorph_bij
fundfoldpairs_induced_autormorph_ntrivial
by (force simp add: zero_permutation.abs_eq)
qed
lemma S_order2_add: "s\<in>S \<Longrightarrow> s + s = 0"
using fundfoldpairs_induced_autormorph_bij zero_permutation.abs_eq
by (fastforce simp add:
plus_permutation_abs_eq fundfoldpairs_induced_autormorph_order2
)
lemma S_add_order2:
assumes "s\<in>S"
shows "add_order s = 2"
proof (rule add_order_equality)
from assms show "s+^2 = 0" using S_order2_add by (simp add: nataction_2)
next
fix m assume "0 < m" "s+^m = 0"
with assms show "2 \<le> m" using zero_notin_S by (cases "m=1") auto
qed simp
lemmas S_uminus = minus_unique[OF S_order2_add]
lemma S_sym: "uminus ` S \<subseteq> S"
using S_uminus by auto
lemmas sum_list_S_in_W = sum_list_lists_in_genby_sym[OF S_sym]
lemmas W_conv_sum_lists = genby_sym_eq_sum_lists[OF S_sym]
lemma S_endomorphism:
"s\<in>S \<Longrightarrow> ChamberComplexEndomorphism X (permutation s)"
using fundfoldpairs_def
OpposedThinChamberComplexFoldings.induced_automorphism_morphism
by (fastforce simp add: permutation_conv_induced_automorph)
lemma S_list_endomorphism:
"ss\<in>lists S \<Longrightarrow> ChamberComplexEndomorphism X (permutation (sum_list ss))"
by (induct ss)
(auto simp add:
zero_permutation.rep_eq trivial_endomorphism plus_permutation.rep_eq
S_endomorphism ChamberComplexEndomorphism.endo_comp
)
lemma W_endomorphism:
"w\<in>W \<Longrightarrow> ChamberComplexEndomorphism X (permutation w)"
using W_conv_sum_lists S_list_endomorphism by auto
lemma S_automorphism:
"s\<in>S \<Longrightarrow> ChamberComplexAutomorphism X (permutation s)"
using fundfoldpairs_def
OpposedThinChamberComplexFoldings.induced_automorphism_automorphism
by (fastforce simp add: permutation_conv_induced_automorph)
lemma S_list_automorphism:
"ss\<in>lists S \<Longrightarrow> ChamberComplexAutomorphism X (permutation (sum_list ss))"
by (induct ss)
(auto simp add:
zero_permutation.rep_eq trivial_automorphism plus_permutation.rep_eq
S_automorphism ChamberComplexAutomorphism.comp
)
lemma W_automorphism:
"w\<in>W \<Longrightarrow> ChamberComplexAutomorphism X (permutation w)"
using W_conv_sum_lists S_list_automorphism by auto
lemma S_respects_labels: "\<lbrakk> label_wrt B \<phi>; s\<in>S; v\<in>(\<Union>X) \<rbrakk> \<Longrightarrow> \<phi> (s \<rightarrow> v) = \<phi> v"
using fundfoldpairs_def
OpposedThinChamberComplexFoldings.indaut_resplabels[
of X _ _ C0 B \<phi> v
]
by (auto simp add: permutation_conv_induced_automorph)
lemma S_list_respects_labels:
"\<lbrakk> label_wrt B \<phi>; ss\<in>lists S; v\<in>(\<Union>X) \<rbrakk> \<Longrightarrow> \<phi> (sum_list ss \<rightarrow> v) = \<phi> v"
using S_endomorphism ChamberComplexEndomorphism.vertex_map[of X]
by (induct ss arbitrary: v rule: rev_induct)
(auto simp add:
plus_permutation.rep_eq S_respects_labels zero_permutation.rep_eq
)
lemma W_respects_labels:
"\<lbrakk> label_wrt B \<phi>; w\<in>W; v\<in>(\<Union>X) \<rbrakk> \<Longrightarrow> \<phi> (w\<rightarrow>v) = \<phi> v"
using W_conv_sum_lists S_list_respects_labels[of B \<phi> _ v] by auto
end (* context ThinChamberComplexManyFoldings *)
subsubsection \<open>Action of the group of automorphisms on the chamber system\<close>
text \<open>
Now we examine the action of the group @{term W} on the chamber system. In particular, we show
that the action is transitive.
\<close>
context ThinChamberComplexManyFoldings
begin
lemma fundchamber_S_chamber: "s\<in>S \<Longrightarrow> chamber (s`\<rightarrow>C0)"
using fundfoldpairs_def
by (fastforce simp add:
fundfoldpairs_fundchamber_image
OpposedThinChamberComplexFoldings.chamber_D0
)
lemma fundchamber_W_image_chamber:
"w\<in>W \<Longrightarrow> chamber (w`\<rightarrow>C0)"
using fundchamber W_endomorphism
ChamberComplexEndomorphism.chamber_map
by auto
lemma fundchamber_S_adjacent: "s\<in>S \<Longrightarrow> C0 \<sim> (s`\<rightarrow>C0)"
using fundfoldpairs_def
by (auto simp add:
fundfoldpairs_fundchamber_image
OpposedThinChamberComplexFoldings.chambers(2)
)
lemma fundchamber_WS_image_adjacent:
"w\<in>W \<Longrightarrow> s\<in>S \<Longrightarrow> (w`\<rightarrow>C0) \<sim> ((w+s)`\<rightarrow>C0)"
using fundchamber fundchamber_S_adjacent fundchamber_S_chamber
W_endomorphism
ChamberComplexEndomorphism.adj_map[of X "permutation w" C0 "s`\<rightarrow>C0"]
by (auto simp add: image_comp plus_permutation.rep_eq)
lemma fundchamber_S_image_neq_fundchamber: "s\<in>S \<Longrightarrow> s`\<rightarrow>C0 \<noteq> C0"
using fundfoldpairs_def OpposedThinChamberComplexFoldings.chambers(3)
by (fastforce simp add: fundfoldpairs_fundchamber_image)
lemma fundchamber_next_WS_image_neq:
assumes "s\<in>S"
shows "(w+s) `\<rightarrow> C0 \<noteq> w `\<rightarrow> C0"
proof
assume "(w+s) `\<rightarrow> C0 = w `\<rightarrow> C0"
with assms show False
using fundchamber_S_image_neq_fundchamber[of s]
by (auto simp add: plus_permutation.rep_eq image_comp permutation_eq_image)
qed
lemma fundchamber_S_fundadjset: "s\<in>S \<Longrightarrow> s`\<rightarrow>C0 \<in> fundadjset"
using fundchamber_S_adjacent fundchamber_S_image_neq_fundchamber
fundchamber_S_chamber chamberD_simplex adjacentset_def
by simp
lemma fundadjset_eq_S_image: "D\<in>fundadjset \<Longrightarrow> \<exists>s\<in>S. D = s`\<rightarrow>C0"
using fundchamber adjacentsetD_adj adjacentset_chamber ex_walls[of C0 D]
fundfoldpairs_def fundfoldpairs_fundchamber_image
by blast
lemma S_fixespointwise_fundchamber_image_int:
assumes "s\<in>S"
shows "fixespointwise ((\<rightarrow>) s) (C0\<inter>s`\<rightarrow>C0)"
proof-
from assms(1) obtain f g
where fg: "(f,g)\<in>fundfoldpairs" "s = Abs_induced_automorph f g"
by fast
show ?thesis
proof (rule fixespointwise_cong)
from fg show "fun_eq_on ((\<rightarrow>) s) (induced_automorph f g) (C0\<inter>s`\<rightarrow>C0)"
using permutation_conv_induced_automorph fun_eq_onI by fastforce
from fg show "fixespointwise (induced_automorph f g) (C0\<inter>s`\<rightarrow>C0)"
using fundfoldpairs_fundchamber_image fundfoldpairs_def
OpposedThinChamberComplexFoldings.indaut_fixes_fundfacet
by auto
qed
qed
lemma S_fixes_fundchamber_image_int:
"s\<in>S \<Longrightarrow> s`\<rightarrow>(C0\<inter>s`\<rightarrow>C0) = C0\<inter>s`\<rightarrow>C0"
using fixespointwise_im[OF S_fixespointwise_fundchamber_image_int] by simp
lemma fundfacets:
assumes "s\<in>S"
shows "C0\<inter>s`\<rightarrow>C0 \<lhd> C0" "C0\<inter>s`\<rightarrow>C0 \<lhd> s`\<rightarrow>C0"
using assms fundchamber_S_adjacent[of s]
fundchamber_S_image_neq_fundchamber[of s]
adjacent_int_facet1[of C0] adjacent_int_facet2[of C0]
by auto
lemma fundadjset_ex1_eq_S_image:
assumes "D\<in>fundadjset"
shows "\<exists>!s\<in>S. D = s`\<rightarrow>C0"
proof (rule ex_ex1I)
from assms show "\<exists>s. s\<in>S \<and> D = s `\<rightarrow> C0"
using fundadjset_eq_S_image by fast
next
fix s t assume "s\<in>S \<and> D = s`\<rightarrow>C0" "t\<in>S \<and> D = t`\<rightarrow>C0"
hence s: "s\<in>S" "D = s`\<rightarrow>C0"
and t: "t\<in>S" "D = t`\<rightarrow>C0"
by auto
from s(1) t(1) obtain f g f' g'
where "(f,g)\<in>fundfoldpairs" "s = Abs_induced_automorph f g"
and "(f',g')\<in>fundfoldpairs" "t = Abs_induced_automorph f' g'"
by auto
with s(2) t(2) show "s=t"
using fundfoldpairs_def fundfoldpairs_fundchamber_image
OpposedThinChamberComplexFoldings.induced_automorphism_unique[
of X f' g' C0 f g
]
by auto
qed
lemma fundchamber_S_image_inj_on: "inj_on (\<lambda>s. s`\<rightarrow>C0) S"
proof (rule inj_onI)
fix s t assume "s\<in>S" "t\<in>S" "s`\<rightarrow>C0 = t`\<rightarrow>C0" thus "s=t"
using fundchamber_S_fundadjset
bex1_equality[OF fundadjset_ex1_eq_S_image, of "s`\<rightarrow>C0" s t]
by simp
qed
lemma S_list_image_gallery:
"ss\<in>lists S \<Longrightarrow> gallery (map (\<lambda>w. w`\<rightarrow>C0) (sums ss))"
proof (induct ss rule: list_induct_ssnoc)
case (Single s) thus ?case
using fundchamber fundchamber_S_chamber fundchamber_S_adjacent
gallery_def
by (fastforce simp add: zero_permutation.rep_eq)
next
case (ssnoc ss s t)
define Cs D E where "Cs = map (\<lambda>w. w `\<rightarrow> C0) (sums ss)"
and "D = sum_list (ss@[s]) `\<rightarrow> C0"
and "E = sum_list (ss@[s,t]) `\<rightarrow> C0"
with ssnoc have "gallery (Cs@[D,E])"
using sum_list_S_in_W[of "ss@[s,t]"] sum_list_S_in_W[of "ss@[s]"]
fundchamber_W_image_chamber
fundchamber_WS_image_adjacent[of "sum_list (ss@[s])" t]
sum_list_append[of "ss@[s]" "[t]"]
by (auto intro: gallery_snocI simp add: sums_snoc)
with Cs_def D_def E_def show ?case using sums_snoc[of "ss@[s]" t] by (simp add: sums_snoc)
qed (auto simp add: gallery_def fundchamber zero_permutation.rep_eq)
lemma pgallery_last_eq_W_image:
"pgallery (C0#Cs@[C]) \<Longrightarrow> \<exists>w\<in>W. C = w`\<rightarrow>C0"
proof (induct Cs arbitrary: C rule: rev_induct)
case Nil
hence "C\<in>fundadjset"
using pgallery_def chamberD_simplex adjacentset_def by fastforce
from this obtain s where "s\<in>S" "C = s`\<rightarrow>C0"
using fundadjset_eq_S_image[of C] by auto
thus ?case using genby_genset_closed[of s S] by fast
next
case (snoc D Ds)
have DC: "chamber D" "chamber C" "D\<sim>C" "D\<noteq>C"
using pgallery_def snoc(2)
binrelchain_append_reduce2[of adjacent "C0#Ds" "[D,C]"]
by auto
from snoc obtain w where w: "w\<in>W" "D = w`\<rightarrow>C0"
using pgallery_append_reduce1[of "C0#Ds@[D]" "[C]"] by force
from w(2) have "(-w)`\<rightarrow>D = C0"
by (simp add:
image_comp plus_permutation.rep_eq[THEN sym]
zero_permutation.rep_eq
)
with DC w(1) have "C0 \<sim> (-w)`\<rightarrow>C" "C0 \<noteq> (-w)`\<rightarrow>C" "(-w)`\<rightarrow>C \<in> X"
using genby_uminus_closed W_endomorphism[of "-w"]
ChamberComplexEndomorphism.adj_map[of X _ D C]
permutation_eq_image[of "-w" D] chamberD_simplex[of C]
ChamberComplexEndomorphism.simplex_map[of X "permutation (-w)" C]
by auto
hence "(-w)`\<rightarrow>C \<in> fundadjset" using adjacentset_def by fast
from this obtain s where s: "s\<in>S" "(-w)`\<rightarrow>C = s`\<rightarrow>C0"
using fundadjset_eq_S_image by force
from s(2) have
"(permutation w \<circ> permutation (-w))`C = (permutation w \<circ> permutation s)`C0"
by (simp add: image_comp[THEN sym])
hence "C = (w+s)`\<rightarrow>C0"
by (simp add: plus_permutation.rep_eq[THEN sym] zero_permutation.rep_eq)
with w(1) s(1) show ?case
using genby_genset_closed[of s S] genby_add_closed by blast
qed
lemma chamber_eq_W_image:
assumes "chamber C"
shows "\<exists>w\<in>W. C = w`\<rightarrow>C0"
proof (cases "C=C0")
case True
hence "0\<in>W" "C = 0`\<rightarrow>C0"
using genby_0_closed by (auto simp add: zero_permutation.rep_eq)
thus ?thesis by fast
next
case False with assms show ?thesis
using fundchamber chamber_pconnect pgallery_last_eq_W_image by blast
qed
lemma S_list_image_crosses_walls:
"ss \<in> lists S \<Longrightarrow> {} \<notin> set (wall_crossings (map (\<lambda>w. w`\<rightarrow>C0) (sums ss)))"
proof (induct ss rule: list_induct_ssnoc)
case (Single s) thus ?case
using fundchamber fundchamber_S_chamber fundchamber_S_adjacent
fundchamber_S_image_neq_fundchamber[of s] ex_walls[of C0 "s`\<rightarrow>C0"]
OpposedThinChamberComplexFoldings.this_wall_betw_basechambers
by (force simp add: zero_permutation.rep_eq)
next
case (ssnoc ss s t)
moreover
define A B where "A = sum_list (ss@[s]) `\<rightarrow> C0" and "B = sum_list (ss@[s,t]) `\<rightarrow> C0"
moreover from ssnoc(2) A_def B_def obtain f g
where "OpposedThinChamberComplexFoldings X f g A" "B=g`A"
using sum_list_S_in_W[of "ss@[s]"] sum_list_S_in_W[of "ss@[s,t]"]
fundchamber_W_image_chamber sum_list_append[of "ss@[s]" "[t]"]
fundchamber_next_WS_image_neq[of t "sum_list (ss@[s])"]
fundchamber_WS_image_adjacent[of "sum_list (ss@[s])" t]
ex_walls[of A B]
by auto
ultimately show ?case
using OpposedThinChamberComplexFoldings.this_wall_betw_basechambers
sums_snoc[of "ss@[s]" t]
by (force simp add: sums_snoc wall_crossings_snoc)
qed (simp add: zero_permutation.rep_eq)
end (* context ThinChamberComplexManyFoldings *)
subsubsection \<open>A labelling by the vertices of the fundamental chamber\<close>
text \<open>
Here we show that by repeatedly applying the composition of all the elements in the collection
@{term S} of fundamental automorphisms, we can retract the entire chamber complex onto the
fundamental chamber. This retraction provides a means of labelling the chamber complex, using the
vertices of the fundamental chamber as labels.
\<close>
context ThinChamberComplexManyFoldings
begin
definition Spair :: "'a permutation \<Rightarrow> ('a\<Rightarrow>'a)\<times>('a\<Rightarrow>'a)"
where "Spair s \<equiv>
SOME fg. fg \<in> fundfoldpairs \<and> s = case_prod Abs_induced_automorph fg"
lemma Spair_fundfoldpair: "s\<in>S \<Longrightarrow> Spair s \<in> fundfoldpairs"
using Spair_def
someI_ex[of
"\<lambda>fg. fg \<in> fundfoldpairs \<and>
s = case_prod Abs_induced_automorph fg"
]
by auto
lemma Spair_induced_automorph:
"s\<in>S \<Longrightarrow> s = case_prod Abs_induced_automorph (Spair s)"
using Spair_def
someI_ex[of
"\<lambda>fg. fg \<in> fundfoldpairs \<and>
s = case_prod Abs_induced_automorph fg"
]
by auto
lemma S_list_pgallery_decomp1:
assumes ss: "set ss = S" and gal: "Cs\<noteq>[]" "pgallery (C0#Cs)"
shows "\<exists>s\<in>set ss. \<exists>C\<in>set Cs. \<forall>(f,g)\<in>fundfoldpairs.
s = Abs_induced_automorph f g \<longrightarrow> C \<in> g\<turnstile>\<C>"
proof (cases Cs)
case (Cons D Ds)
with gal(2) have "D\<in>fundadjset"
using pgallery_def chamberD_simplex adjacentset_def by fastforce
from this obtain s where s: "s\<in>S" "D = s`\<rightarrow>C0"
using fundadjset_eq_S_image by blast
from s(2) have
"\<forall>(f,g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> D\<in>g\<turnstile>\<C>"
using fundfoldpairs_def fundfoldpairs_fundchamber_image
OpposedThinChamberComplexFoldings.basechambers_half_chamber_systems(2)
by auto
with s(1) ss Cons show ?thesis by auto
qed (simp add: gal(1))
lemma S_list_pgallery_decomp2:
assumes "set ss = S" "Cs\<noteq>[]" "pgallery (C0#Cs)"
shows
"\<exists>rs s ts. ss = rs@s#ts \<and>
(\<exists>C\<in>set Cs. \<forall>(f,g)\<in>fundfoldpairs.
s = Abs_induced_automorph f g \<longrightarrow> C \<in> g\<turnstile>\<C>) \<and>
(\<forall>r\<in>set rs. \<forall>C\<in>set Cs. \<forall>(f,g)\<in>fundfoldpairs.
r = Abs_induced_automorph f g \<longrightarrow> C\<in>f\<turnstile>\<C>)"
proof-
from assms obtain rs s ts where rs_s_ts:
"ss = rs@s#ts"
"\<exists>C\<in>set Cs. \<forall>(f,g)\<in>fundfoldpairs.
s = Abs_induced_automorph f g \<longrightarrow> C \<in> g\<turnstile>\<C>"
"\<forall>r\<in>set rs. \<forall>C\<in>set Cs.
\<not> (\<forall>(f,g)\<in>fundfoldpairs. r = Abs_induced_automorph f g \<longrightarrow> C \<in> g\<turnstile>\<C>)"
using split_list_first_prop[OF S_list_pgallery_decomp1, of ss Cs]
by auto
have "\<forall>r\<in>set rs. \<forall>C\<in>set Cs. \<forall>(f,g)\<in>fundfoldpairs.
r = Abs_induced_automorph f g \<longrightarrow> C\<in>f\<turnstile>\<C>"
proof (rule ballI, rule ballI, rule prod_ballI, rule impI)
fix r C f g
assume "r \<in> set rs" "C \<in> set Cs" "(f,g)\<in>fundfoldpairs"
"r = Abs_induced_automorph f g"
with rs_s_ts(3) assms(3) show "C\<in>f\<turnstile>\<C>"
using pgalleryD_chamber
fundfoldpair_unique_half_chamber_systems_chamber_ng_f[
of _ _ f g C
]
by fastforce
qed
with rs_s_ts(1,2) show ?thesis by auto
qed
lemma S_list_pgallery_decomp3:
assumes "set ss = S" "Cs\<noteq>[]" "pgallery (C0#Cs)"
shows
"\<exists>rs s ts As B Bs. ss = rs@s#ts \<and> Cs = As@B#Bs \<and>
(\<forall>(f,g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> B\<in>g\<turnstile>\<C>) \<and>
(\<forall>A\<in>set As. \<forall>(f,g)\<in>fundfoldpairs.
s = Abs_induced_automorph f g \<longrightarrow> A\<in>f\<turnstile>\<C>) \<and>
(\<forall>r\<in>set rs. \<forall>C\<in>set Cs. \<forall>(f,g)\<in>fundfoldpairs.
r = Abs_induced_automorph f g \<longrightarrow> C\<in>f\<turnstile>\<C>)"
proof-
from assms obtain rs s ts where rs_s_ts:
"ss = rs@s#ts"
"\<exists>B\<in>set Cs. \<forall>(f,g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> B \<in> g\<turnstile>\<C>"
"\<forall>r\<in>set rs. \<forall>B\<in>set Cs. \<forall>(f,g)\<in>fundfoldpairs.
r = Abs_induced_automorph f g \<longrightarrow> B\<in>f\<turnstile>\<C>"
using S_list_pgallery_decomp2[of ss Cs]
by auto
obtain As B Bs where As_B_Bs:
"Cs = As@B#Bs"
"\<forall>(f,g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> B \<in> g\<turnstile>\<C>"
"\<forall>A\<in>set As. \<exists>(f,g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<and> A\<notin>g\<turnstile>\<C>"
using split_list_first_prop[OF rs_s_ts(2)]
by fastforce
from As_B_Bs(1,3) assms(3)
have "\<forall>A\<in>set As. \<forall>(f,g)\<in>fundfoldpairs.
s = Abs_induced_automorph f g \<longrightarrow> A\<in>f\<turnstile>\<C>"
using pgalleryD_chamber
fundfoldpair_unique_half_chamber_systems_chamber_ng_f
by auto
with rs_s_ts(1,3) As_B_Bs(1,2) show ?thesis by fast
qed
lemma fundfold_trivial_f\<C>:
"r\<in>S \<Longrightarrow> \<forall>(f,g)\<in>fundfoldpairs. r = Abs_induced_automorph f g \<longrightarrow> C\<in>f\<turnstile>\<C> \<Longrightarrow>
fst (Spair r) ` C = C"
using Spair_fundfoldpair[of r] Spair_induced_automorph[of r] fundfoldpairs_def
OpposedThinChamberComplexFoldings.axioms(2)[
of X "fst (Spair r)" "snd (Spair r)" C0
]
ChamberComplexFolding.chamber_retraction2[of X "fst (Spair r)" C]
by fastforce
lemma fundfold_comp_trivial_f\<C>:
"set rs \<subseteq> S \<Longrightarrow>
\<forall>r\<in>set rs. \<forall>(f,g)\<in>fundfoldpairs.
r = Abs_induced_automorph f g \<longrightarrow> C\<in>f\<turnstile>\<C> \<Longrightarrow>
fold fst (map Spair rs) ` C = C"
proof (induct rs)
case (Cons r rs)
have "fold fst (map Spair (r#rs)) ` C =
fold fst (map Spair rs) ` fst (Spair r) ` C"
by (simp add: image_comp)
also from Cons have "\<dots> = C" by (simp add: fundfold_trivial_f\<C>)
finally show ?case by fast
qed simp
lemma fundfold_trivial_f\<C>_list:
"r\<in>S \<Longrightarrow>
\<forall>C\<in>set Cs. \<forall>(f,g)\<in>fundfoldpairs.
r = Abs_induced_automorph f g \<longrightarrow> C\<in>f\<turnstile>\<C> \<Longrightarrow>
fst (Spair r) \<Turnstile> Cs = Cs"
using fundfold_trivial_f\<C> by (induct Cs) auto
lemma fundfold_comp_trivial_f\<C>_list:
"set rs \<subseteq> S \<Longrightarrow>
\<forall>r\<in>set rs. \<forall>C\<in>set Cs. \<forall>(f,g)\<in>fundfoldpairs.
r = Abs_induced_automorph f g \<longrightarrow> C\<in>f\<turnstile>\<C> \<Longrightarrow>
fold fst (map Spair rs) \<Turnstile> Cs = Cs"
proof (induct rs Cs rule: list_induct2')
case (4 r rs C Cs)
from 4(3)
have r: "\<forall>D\<in>set (C#Cs). \<forall>(f,g)\<in>fundfoldpairs.
r = Abs_induced_automorph f g \<longrightarrow> D\<in>f\<turnstile>\<C>"
by simp
from 4(2)
have "fold fst (map Spair (r#rs)) \<Turnstile> (C#Cs) =
map ((`) (fold fst (map Spair rs))) (fst (Spair r) \<Turnstile> (C#Cs))"
by (auto simp add: image_comp)
also from 4 have "\<dots> = C#Cs"
using fundfold_trivial_f\<C>_list[of r "C#Cs"]
by (simp add: fundfold_comp_trivial_f\<C>)
finally show ?case by fast
qed auto
lemma fundfold_gallery_map:
"s\<in>S \<Longrightarrow> gallery Cs \<Longrightarrow> gallery (fst (Spair s) \<Turnstile> Cs)"
using Spair_fundfoldpair fundfoldpairs_def
OpposedThinChamberComplexFoldings.axioms(2)
ChamberComplexFolding.gallery_map[of X "fst (Spair s)"]
by fastforce
lemma fundfold_comp_gallery_map:
assumes pregal: "gallery Cs"
shows "set ss \<subseteq> S \<Longrightarrow> gallery (fold fst (map Spair ss) \<Turnstile> Cs)"
proof (induct ss rule: rev_induct)
case (snoc s ss)
hence 1: "gallery (fst (Spair s) \<Turnstile> (fold fst (map Spair ss) \<Turnstile> Cs))"
using fundfold_gallery_map by fastforce
have 2: "fst (Spair s) \<Turnstile> (fold fst (map Spair ss) \<Turnstile> Cs) =
fold fst (map Spair (ss@[s])) \<Turnstile> Cs"
by (simp add: image_comp)
show ?case using 1 subst[OF 2, of gallery, OF 1] by fast
qed (simp add: pregal galleryD_adj)
lemma fundfold_comp_pgallery_ex_funpow:
assumes ss: "set ss = S"
shows "pgallery (C0#Cs@[C]) \<Longrightarrow>
\<exists>n. (fold fst (map Spair ss) ^^ n) ` C = C0"
proof (induct Cs arbitrary: C rule: length_induct)
fix Cs C
assume step : "\<forall>ys. length ys < length Cs \<longrightarrow>
(\<forall>x. pgallery (C0 # ys @ [x]) \<longrightarrow>
(\<exists>n. (fold fst (map Spair ss) ^^ n) ` x = C0))"
and set_up: "pgallery (C0#Cs@[C])"
from ss set_up obtain rs s ts As B Bs where decomps:
"ss = rs@s#ts" "Cs@[C] = As@B#Bs"
"\<forall>(f,g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> B\<in>g\<turnstile>\<C>"
"\<forall>A\<in>set As. \<forall>(f,g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> A\<in>f\<turnstile>\<C>"
"\<forall>r\<in>set rs. \<forall>D\<in>set (Cs@[C]). \<forall>(f,g)\<in>fundfoldpairs.
r = Abs_induced_automorph f g \<longrightarrow> D\<in>f\<turnstile>\<C>"
using S_list_pgallery_decomp3[of ss "Cs@[C]"]
by fastforce
obtain Es E where EsE: "C0#As = Es@[E]" using cons_conv_snoc by fast
have EsE_s_f\<C>:
"\<forall>A\<in>set (Es@[E]). \<forall>(f,g)\<in>fundfoldpairs.
s = Abs_induced_automorph f g \<longrightarrow> A\<in>f\<turnstile>\<C>"
proof (rule ballI)
fix A assume "A\<in>set (Es@[E])"
with EsE decomps(4)
show "\<forall>(f, g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> A \<in> f \<turnstile> \<C>"
using fundfoldpair_fundchamber_in_half_chamber_system_f
set_ConsD[of A C0 As]
by auto
qed
moreover from decomps(2) EsE
have decomp2: "C0#Cs@[C] = Es@E#B#Bs"
by simp
moreover from ss decomps(1) have "s\<in>S" by auto
ultimately have sB: "fst (Spair s) ` B = E"
using set_up decomps(3) Spair_fundfoldpair[of s]
Spair_induced_automorph[of s] fundfoldpairs_def
pgalleryD_adj
binrelchain_append_reduce2[of adjacent Es "E#B#Bs"]
OpposedThinChamberComplexFoldings.adjacent_half_chamber_system_image_fg[
of X "fst (Spair s)" "snd (Spair s)" C0 E B
]
by auto
show "\<exists>n. (fold fst (map Spair ss) ^^ n) ` C = C0"
proof (cases "Es=[] \<and> Bs = []")
case True
from decomps(5) have
"\<forall>r\<in>set rs. \<forall>(f,g)\<in>fundfoldpairs. r = Abs_induced_automorph f g \<longrightarrow> C\<in>f\<turnstile>\<C>"
by auto
with decomps(1) ss
have "fold fst (map Spair ss) ` C = fold fst (map Spair ts) ` fst (Spair s) ` C"
using fundfold_comp_trivial_f\<C>[of rs C]
by (fastforce simp add: image_comp[THEN sym])
moreover
have "\<forall>r\<in>set ts. \<forall>(f,g)\<in>fundfoldpairs.
r = Abs_induced_automorph f g \<longrightarrow> C0\<in>f\<turnstile>\<C>"
using fundfoldpair_fundchamber_in_half_chamber_system_f
by fast
ultimately have "(fold fst (map Spair ss) ^^ 1) ` C = C0"
using True decomps(1,2) ss EsE sB fundfold_comp_trivial_f\<C>[of ts C0]
fundfold_comp_trivial_f\<C>[of ts C0]
by fastforce
thus ?thesis by fast
next
case False have EsBs: "\<not> (Es = [] \<and> Bs = [])" by fact
show ?thesis
proof (cases "fold fst (map Spair ss) ` C = C0")
case True
hence "(fold fst (map Spair ss) ^^ 1) ` C = C0" by simp
thus ?thesis by fast
next
case False
from decomps(5) have C0CsC_rs_f\<C>:
"\<forall>r\<in>set rs. \<forall>D\<in>set (C0#Cs@[C]). \<forall>(f,g)\<in>fundfoldpairs.
r = Abs_induced_automorph f g \<longrightarrow> D\<in>f\<turnstile>\<C>"
using fundfoldpair_fundchamber_in_half_chamber_system_f
by auto
from decomps(1)
have "fold fst (map Spair (rs@[s])) \<Turnstile> (C0#Cs@[C]) =
fst (Spair s) \<Turnstile> (fold fst (map Spair rs) \<Turnstile> (C0#Cs@[C]))"
by (simp add: image_comp)
also from ss decomps(1)
have "\<dots> = fst (Spair s) \<Turnstile> (C0#Cs@[C])"
using C0CsC_rs_f\<C> fundfold_comp_trivial_f\<C>_list[of rs "C0#Cs@[C]"]
by fastforce
also from decomp2 have "\<dots> = fst (Spair s) \<Turnstile> (Es@E#B#Bs)"
by (simp add: image_comp)
finally
have "fold fst (map Spair (rs@[s])) \<Turnstile> (C0#Cs@[C]) =
Es @ E # E # fst (Spair s) \<Turnstile> Bs"
using decomps(1) ss sB EsE_s_f\<C> fundfold_trivial_f\<C>_list[of s "Es@[E]"]
by fastforce
with set_up ss decomps(1)
have gal: "gallery (Es @ E # fst (Spair s) \<Turnstile> Bs)"
using pgallery fundfold_comp_gallery_map[of _ "rs@[s]"]
gallery_remdup_adj[of Es E "fst (Spair s) \<Turnstile> Bs"]
by fastforce
from EsBs decomp2 EsE
have "\<exists>Zs. length Zs < length Cs \<and>
Es @ E # fst (Spair s) \<Turnstile> Bs = C0 # Zs @ [fst (Spair s) ` C]"
using sB
by (cases Bs Es rule: two_lists_cases_snoc_Cons') auto
from this obtain Zs where Zs:
"length Zs < length Cs"
"Es @ E # fst (Spair s) \<Turnstile> Bs = C0 # Zs @ [fst (Spair s) ` C]"
by fast
define Ys where "Ys = fold fst (map Spair ts) \<Turnstile> Zs"
with Zs(2) have
"fold fst (map Spair ts) \<Turnstile> (Es @ E # fst (Spair s) \<Turnstile> Bs) =
fold fst (map Spair ts) ` C0 # Ys @ [fold fst (map Spair (s#ts)) ` C]"
by (simp add: image_comp)
moreover
have "\<forall>r\<in>set ts. \<forall>(f,g)\<in>fundfoldpairs.
r = Abs_induced_automorph f g \<longrightarrow> C0\<in>f\<turnstile>\<C>"
using fundfoldpair_fundchamber_in_half_chamber_system_f
by fast
ultimately have
"fold fst (map Spair ts) \<Turnstile> (Es @ E # fst (Spair s) \<Turnstile> Bs) =
C0 # Ys @ [fold fst (map Spair (s#ts)) ` fold fst (map Spair rs) ` C]"
using decomps(1) ss C0CsC_rs_f\<C> fundfold_comp_trivial_f\<C>[of ts C0]
fundfold_comp_trivial_f\<C>[of rs C]
by fastforce
with decomps(1) ss obtain Xs where Xs:
"length Xs \<le> length Ys"
"pgallery (C0 # Xs @ [fold fst (map Spair ss) ` C])"
using gal fundfold_comp_gallery_map[of "Es @ E # fst (Spair s) \<Turnstile> Bs" ts]
gallery_obtain_pgallery[OF False[THEN not_sym]]
by (fastforce simp add: image_comp)
from Ys_def Xs(1) Zs(1) have "length Xs < length Cs" by simp
with Xs(2) obtain n where "(fold fst (map Spair ss) ^^ (Suc n)) ` C = C0"
using step by (force simp add: image_comp funpow_Suc_right[THEN sym])
thus ?thesis by fast
qed
qed
qed
lemma fundfold_comp_chamber_ex_funpow:
assumes ss: "set ss = S" and C: "chamber C"
shows "\<exists>n. (fold fst (map Spair ss) ^^ n) ` C = C0"
proof (cases "C=C0")
case True
hence "(fold fst (map Spair ss) ^^ 0) ` C = C0" by simp
thus ?thesis by fast
next
case False with fundchamber assms show ?thesis
using chamber_pconnect[of C0 C] fundfold_comp_pgallery_ex_funpow
by fastforce
qed
lemma fundfold_comp_endomorphism:
assumes "set ss \<subseteq> S"
shows "ChamberComplexEndomorphism X (fold fst (map Spair ss))"
proof (rule fold_chamber_complex_endomorph_list, rule ballI)
fix fg assume fg: "fg \<in>set (map Spair ss)"
from this obtain s where "s\<in>set ss" "fg = Spair s" by auto
with assms show "ChamberComplexEndomorphism X (fst fg)"
using Spair_fundfoldpair
OpposedThinChamberComplexFoldings.axioms(2)[of X]
ChamberComplexFolding.axioms(1)[of X]
ChamberComplexRetraction.axioms(1)[of X]
unfolding fundfoldpairs_def
by fastforce
qed
lemma finite_S: "finite S"
using fundchamber_S_fundadjset fundchamber finite_adjacentset
by (blast intro: inj_on_finite fundchamber_S_image_inj_on)
lemma ex_label_retraction: "\<exists>\<phi>. label_wrt C0 \<phi> \<and> fixespointwise \<phi> C0"
proof-
obtain ss where ss: "set ss = S" using finite_S finite_list by fastforce
define fgs where "fgs = map Spair ss"
\<comment> \<open>for @{term "fg \<in> set fgs"}, have @{term "(fst fg) ` D = C0"} for some @{term "D \<in> fundajdset"}\<close>
define \<psi> where "\<psi> = fold fst fgs" (* \<psi> = fn \<circ> \<dots> \<circ> f1 *)
define vdist where "vdist v = (LEAST n. (\<psi>^^n) v \<in> C0)" for v
define \<phi> where "\<phi> v = (\<psi>^^(vdist v)) v" for v
have "label_wrt C0 \<phi>"
unfolding label_wrt_def
proof
fix C assume C: "C\<in>\<C>"
show "bij_betw \<phi> C C0"
proof-
from \<psi>_def fgs_def ss C obtain m where m: "(\<psi>^^m)`C = C0"
using chamber_system_def fundfold_comp_chamber_ex_funpow by fastforce
have "\<And>v. v\<in>C \<Longrightarrow> (\<psi>^^m) v = \<phi> v"
proof-
fix v assume v: "v\<in>C"
define n where "n = (LEAST n. (\<psi>^^n) v \<in> C0)"
from v m \<phi>_def vdist_def n_def have "m \<ge> n" "\<phi> v \<in> C0"
using Least_le[of "\<lambda>n. (\<psi>^^n) v \<in> C0" m]
LeastI_ex[of "\<lambda>n. (\<psi>^^n) v \<in> C0"]
by auto
then show "(\<psi>^^m) v = \<phi> v"
using ss \<psi>_def fgs_def \<phi>_def vdist_def n_def funpow_add[of "m-n" n \<psi>]
fundfold_comp_fixespointwise_C0
funpower_fixespointwise fixespointwiseD
by fastforce
qed
with C m ss \<psi>_def fgs_def show ?thesis
using chamber_system_def fundchamber fundfold_comp_endomorphism
ChamberComplexEndomorphism.funpower_endomorphism[of X]
ChamberComplexEndomorphism.bij_betw_chambers[of X]
bij_betw_cong[of C "\<psi>^^m" \<phi> C0]
by fastforce
qed
qed
moreover from vdist_def \<phi>_def have "fixespointwise \<phi> C0"
using Least_eq_0 by (fastforce intro: fixespointwiseI)
ultimately show ?thesis by fast
qed
lemma ex_label_map: "\<exists>\<phi>. label_wrt C0 \<phi>"
using ex_label_retraction by fast
end (* context ThinChamberComplexManyFoldings *)
subsubsection \<open>More on the action of the group of automorphisms on chambers\<close>
text \<open>
Recall that we have already verified that @{term W} acts transitively on the chamber system. We
now use the labelling of the chamber complex examined in the previous section to show that this
action is simply transitive.
\<close>
context ThinChamberComplexManyFoldings
begin
lemma fundchamber_W_image_ker:
assumes "w\<in>W" "w`\<rightarrow>C0 = C0"
shows "w = 0"
proof-
obtain \<phi> where \<phi>: "label_wrt C0 \<phi>" using ex_label_map by fast
have "fixespointwise (permutation w) C0"
using W_respects_labels[OF \<phi> assms(1)] chamberD_simplex[OF fundchamber]
ChamberComplexEndomorphism.respects_label_fixes_chamber_imp_fixespointwise[
OF W_endomorphism, OF assms(1) \<phi> fundchamber assms(2)
]
by fast
with assms(1) show ?thesis
using fundchamber W_automorphism trivial_automorphism
standard_uniqueness_automorphs
permutation_inject[of w 0]
by (auto simp add: zero_permutation.rep_eq)
qed
lemma fundchamber_W_image_inj_on:
"inj_on (\<lambda>w. w`\<rightarrow>C0) W"
proof (rule inj_onI)
fix w w' assume ww': "w\<in>W" "w'\<in>W" "w`\<rightarrow>C0 = w'`\<rightarrow>C0"
from ww'(3) have "(-w')`\<rightarrow>w`\<rightarrow>C0 = (-w')`\<rightarrow>w'`\<rightarrow>C0" by simp
with ww'(1,2) show "w = w'"
using fundchamber_W_image_ker[of "-w'+w"] add.assoc[of w' "-w'" w]
by (simp add:
image_comp plus_permutation.rep_eq[THEN sym]
zero_permutation.rep_eq genby_uminus_add_closed
)
qed
end (* context ThinChamberComplexManyFoldings *)
subsubsection \<open>A bijection between the fundamental chamber and the set of generating automorphisms\<close>
text \<open>
Removing a single vertex from the fundamental chamber determines a facet, a facet in the
fundamental chamber determines an adjacent chamber (since our complex is thin), and a chamber
adjacent to the fundamental chamber determines an automorphism (via some pair of opposed foldings)
in our generating set @{term S}. Here we show that this correspondence is bijective.
\<close>
context ThinChamberComplexManyFoldings
begin
definition fundantivertex :: "'a permutation \<Rightarrow> 'a"
where "fundantivertex s \<equiv> (THE v. v \<in> C0-s`\<rightarrow>C0)"
abbreviation "fundantipermutation \<equiv> the_inv_into S fundantivertex"
lemma fundantivertex: "s\<in>S \<Longrightarrow> fundantivertex s \<in> C0-s`\<rightarrow>C0"
using fundchamber_S_adjacent[of s]
fundchamber_S_image_neq_fundchamber[of s]
fundantivertex_def[of s] theI'[OF adj_antivertex]
by auto
lemma fundantivertex_fundchamber_decomp:
"s\<in>S \<Longrightarrow> C0 = insert (fundantivertex s) (C0\<inter>s`\<rightarrow>C0)"
using fundchamber_S_adjacent[of s]
fundchamber_S_image_neq_fundchamber[of s]
fundantivertex[of s] adjacent_conv_insert[of C0]
by auto
lemma fundantivertex_unstable:
"s\<in>S \<Longrightarrow> s \<rightarrow> fundantivertex s \<noteq> fundantivertex s"
using fundantivertex_fundchamber_decomp[of s]
image_insert[of "(\<rightarrow>) s" "fundantivertex s" "C0\<inter>s`\<rightarrow>C0"]
S_fixes_fundchamber_image_int fundchamber_S_image_neq_fundchamber
by fastforce
lemma fundantivertex_inj_on: "inj_on fundantivertex S"
proof (rule inj_onI)
fix s t assume st: "s\<in>S" "t\<in>S" "fundantivertex s = fundantivertex t"
hence "insert (fundantivertex s) (C0\<inter>s`\<rightarrow>C0) =
insert (fundantivertex s) (C0\<inter>t`\<rightarrow>C0)"
using fundantivertex_fundchamber_decomp[of s]
fundantivertex_fundchamber_decomp[of t]
by auto
moreover from st
have "fundantivertex s \<notin> C0\<inter>s`\<rightarrow>C0" "fundantivertex s \<notin> C0\<inter>t`\<rightarrow>C0"
using fundantivertex[of s] fundantivertex[of t]
by auto
ultimately have "C0\<inter>s`\<rightarrow>C0 = C0\<inter>t`\<rightarrow>C0"
using insert_subset_equality[of "fundantivertex s"] by simp
with st(1,2) show "s=t"
using fundchamber fundchamber_S_chamber[of s] fundchamber_S_chamber[of t]
fundfacets[of s] fundfacets(2)[of t]
fundchamber_S_image_neq_fundchamber[of s]
fundchamber_S_image_neq_fundchamber[of t]
facet_unique_other_chamber[of C0 "C0\<inter>s`\<rightarrow>C0" "s`\<rightarrow>C0" "t`\<rightarrow>C0"]
genby_genset_closed[of _ S]
inj_onD[OF fundchamber_W_image_inj_on, of s t]
by auto
qed
lemma fundantivertex_surj_on: "fundantivertex ` S = C0"
proof (rule seteqI)
show "\<And>v. v \<in> fundantivertex ` S \<Longrightarrow> v\<in>C0" using fundantivertex by fast
next
fix v assume v: "v\<in>C0"
define D where "D = the_adj_chamber C0 (C0-{v})"
with v have "D\<in>fundadjset"
using fundchamber facetrel_diff_vertex the_adj_chamber_adjacentset
the_adj_chamber_neq
by fastforce
from this obtain s where s: "s\<in>S" "D = s`\<rightarrow>C0"
using fundadjset_eq_S_image by blast
with v D_def [abs_def] have "fundantivertex s = v"
using fundchamber fundchamber_S_adjacent
fundchamber_S_image_neq_fundchamber[of s]
facetrel_diff_vertex[of v C0]
the_adj_chamber_facet facetrel_def[of "C0-{v}" D]
unfolding fundantivertex_def
by (force intro: the1_equality[OF adj_antivertex])
with s(1) show "v \<in> fundantivertex ` S" by fast
qed
lemma fundantivertex_bij_betw: "bij_betw fundantivertex S C0"
unfolding bij_betw_def
using fundantivertex_inj_on fundantivertex_surj_on
by fast
lemma card_S_fundchamber: "card S = card C0"
using bij_betw_same_card[OF fundantivertex_bij_betw] by fast
lemma card_S_chamber:
"chamber C \<Longrightarrow> card C = card S"
using fundchamber chamber_card[of C0 C] card_S_fundchamber by auto
lemma fundantipermutation1:
"v\<in>C0 \<Longrightarrow> fundantipermutation v \<in> S"
using fundantivertex_surj_on the_inv_into_into[OF fundantivertex_inj_on] by blast
end (* context ThinChamberComplexManyFoldings *)
subsection \<open>Thick chamber complexes\<close>
text \<open>
A thick chamber complex is one in which every facet is a facet of at least three chambers.
\<close>
locale ThickChamberComplex = ChamberComplex X
for X :: "'a set set"
+ assumes thick:
"chamber C \<Longrightarrow> z\<lhd>C \<Longrightarrow>
\<exists>D E. D\<in>X-{C} \<and> z\<lhd>D \<and> E\<in>X-{C,D} \<and> z\<lhd>E"
begin
definition some_third_chamber :: "'a set \<Rightarrow> 'a set \<Rightarrow> 'a set \<Rightarrow> 'a set"
where "some_third_chamber C D z \<equiv> SOME E. E\<in>X-{C,D} \<and> z\<lhd>E"
lemma facet_ex_third_chamber: "chamber C \<Longrightarrow> z\<lhd>C \<Longrightarrow> \<exists>E\<in>X-{C,D}. z\<lhd>E"
using thick[of C z] by auto
lemma some_third_chamberD_facet:
"chamber C \<Longrightarrow> z\<lhd>C \<Longrightarrow> z \<lhd> some_third_chamber C D z"
using facet_ex_third_chamber[of C z D] someI_ex[of "\<lambda>E. E\<in>X-{C,D} \<and> z\<lhd>E"]
some_third_chamber_def
by auto
lemma some_third_chamberD_simplex:
"chamber C \<Longrightarrow> z\<lhd>C \<Longrightarrow> some_third_chamber C D z \<in> X"
using facet_ex_third_chamber[of C z D] someI_ex[of "\<lambda>E. E\<in>X-{C,D} \<and> z\<lhd>E"]
some_third_chamber_def
by auto
lemma some_third_chamberD_adj:
"chamber C \<Longrightarrow> z\<lhd>C \<Longrightarrow> C \<sim> some_third_chamber C D z"
using some_third_chamberD_facet by (fast intro: adjacentI)
lemma chamber_some_third_chamber:
"chamber C \<Longrightarrow> z\<lhd>C \<Longrightarrow> chamber (some_third_chamber C D z)"
using chamber_adj some_third_chamberD_simplex some_third_chamberD_adj
by fast
lemma some_third_chamberD_ne:
assumes "chamber C" "z\<lhd>C"
shows "some_third_chamber C D z \<noteq> C" "some_third_chamber C D z \<noteq> D"
using assms facet_ex_third_chamber[of C z D]
someI_ex[of "\<lambda>E. E\<in>X-{C,D} \<and> z\<lhd>E"] some_third_chamber_def
by auto
end (* context ThickChamberComplex *)
end (* theory *)
|
import Structure.Generic.Axioms.Universes
import Structure.Generic.Axioms.AbstractFunctors
import Structure.Generic.Axioms.AbstractEquivalences
import Structure.Generic.Lemmas.DerivedFunctors
import Structure.Generic.Notation
set_option autoBoundImplicitLocal false
--set_option pp.universes true
universes u v
def GeneralizedProperty (α : Sort u) (V : Universe.{v}) := α → V
namespace GeneralizedProperty
variable {α : Sort u} {V : Universe.{v}}
instance hasInstances : HasInstances (GeneralizedProperty α V) := ⟨λ P => ∀ a, P a⟩
section Properties
variable (P : GeneralizedProperty α V)
class HasInst where
(inst (a : α) : P a)
end Properties
end GeneralizedProperty
open GeneralizedProperty
-- TODO: Update comments
-- We want to formalize a very general "structure with equivalences", so we start with a very basic
-- abstraction for something that looks like an equivalence relation except that the codomain is
-- generalized to `Sort u` instead of `Prop`. Therefore, `⟨Equiv.refl, Equiv.symm, Equiv.trans⟩`, where
-- `Equiv` is the Lean 4 version of the `equiv` type in Lean 3 mathlib, is also an instance of this type
-- (with the restriction that both arguments must live in the same universe).
--
-- We actually need to generalize slightly further to a codomain that is not necessarily a sort but can be
-- coerced to a sort. This way, the codomain can be any Lean structure that bundles a sort, in particular
-- it can be our `Structure` type.
def GeneralizedRelation (α : Sort u) (V : Universe.{v}) := α → α → V
namespace GeneralizedRelation
variable {α : Sort u} {V : Universe.{v}}
instance hasInstances : HasInstances (GeneralizedRelation α V) := ⟨λ R => ∀ a b, R a b⟩
section Properties
variable (R : GeneralizedRelation α V)
class HasRefl where
(refl (a : α) : R a a)
variable [HasInternalFunctors V]
class HasTrans where
(trans {a b c : α} : R a b ⟶ R b c ⟶ R a c)
class IsPreorder extends HasRefl R, HasTrans R
variable [HasInternalEquivalences V]
class HasSymm where
(symm {a b : α} : R a b ⟷ R b a)
class IsEquivalence extends IsPreorder R, HasSymm R
end Properties
def HasTrans.revTrans {R : GeneralizedRelation α V} [HasInternalFunctors V] [HasLinearFunOp V] [h : HasTrans R]
{a b c : α} : R b c ⟶ R a b ⟶ R a c :=
HasLinearFunOp.swapFunFun h.trans
@[simp] theorem HasTrans.revTrans.eff {R : GeneralizedRelation α V} [HasInternalFunctors V] [HasLinearFunOp V] [h : HasTrans R]
{a b c : α} (g : R b c) (f : R a b) :
h.revTrans g f = h.trans f g :=
by apply HasLinearFunOp.swapFunFun.effEff
def HasTrans.trans' {R : GeneralizedRelation α V} [HasInternalFunctors V] [h : HasTrans R]
{a b c : α} (f : R a b) (g : R b c) : R a c := h.trans f g
def HasSymm.symm' {R : GeneralizedRelation α V} [HasInternalFunctors V] [HasInternalEquivalences V] [h : HasSymm R]
{a b : α} (f : R a b) : R b a := HasInternalEquivalences.to h.symm f
-- When reasoning about instances of `R a b`, we would like to write `trans` as composition, `refl` as
-- identity, and `symm` as inverse.
-- Note that `R` can be inferred from `f : R a b` by elaboration.
section Notation
@[reducible] def revComp {R : GeneralizedRelation α V} [HasInternalFunctors V] [h : HasTrans R] {a b c : α} (g : R b c) (f : R a b) : R a c := h.trans' f g
infixr:90 " • " => revComp
@[reducible] def ident (R : GeneralizedRelation α V) [h : HasRefl R] (a : α) : R a a := h.refl a
@[reducible] def inv {R : GeneralizedRelation α V} [HasInternalFunctors V] [HasInternalEquivalences V] [h : HasSymm R] {a b : α} (f : R a b) : R b a := h.symm' f
postfix:max "⁻¹" => inv
end Notation
end GeneralizedRelation
open GeneralizedRelation
-- We can attach products, arrows, and/or equivalences to a given sort, in the form of generalized
-- relations satisfying appropriate properties.
section AttachedRelations
variable (α : Sort u) (V : Universe.{v}) [HasInternalFunctors V]
class HasArrows where
(Arrow : GeneralizedRelation α V)
[isPreorder : IsPreorder Arrow]
namespace HasArrows
variable [h : HasArrows α V]
instance arrowPreorder : IsPreorder h.Arrow := h.isPreorder
instance hasArrow : HasArrow α α := ⟨h.Arrow⟩
instance : HasInstances (HasArrow.γ α α) := Universe.instInst V
instance : IsPreorder (@HasArrow.Arrow α α (hasArrow α V)) := h.isPreorder
end HasArrows
variable [HasInternalEquivalences V]
class HasEquivalences where
(Equiv : GeneralizedRelation α V)
[isEquiv : IsEquivalence Equiv]
namespace HasEquivalences
variable [h : HasEquivalences α V]
instance equivEquivalence : IsEquivalence h.Equiv := h.isEquiv
instance hasEquivalence : HasEquivalence α α := ⟨h.Equiv⟩
instance : HasInstances (HasEquivalence.γ α α) := Universe.instInst V
instance : IsEquivalence (@HasEquivalence.Equiv α α (hasEquivalence α V)) := h.isEquiv
end HasEquivalences
class HasProducts where
(Product : GeneralizedRelation α V)
[hasSymm : HasSymm Product]
namespace HasProducts
variable [h : HasProducts α V]
instance productSymm : HasSymm h.Product := h.hasSymm
instance hasProduct : HasProduct α α := ⟨h.Product⟩
instance : HasInstances (HasProduct.γ α α) := Universe.instInst V
instance : HasSymm (@HasProduct.Product α α (hasProduct α V)) := h.hasSymm
end HasProducts
end AttachedRelations
|
# Musterlösung zu Übungsblatt 1
* [Aufgabe 1](#Aufgabe-1)
* [Aufgabe 2](#Aufgabe-2)
* [Aufgabe 3](#Aufgabe-3)
```python
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
plt.style.use('ggplot')
```
---
## Aufgabe 1
Gegeben sei eine parametrische Funktion $y = f(x)$, $y = 1 + a_1x + a_2x^2$ mit Parametern $a_1 = 2.0 ± 0.2$, $a_2 = 1.0 ± 0.1$ und Korrelationskoeffizient $ρ = −0.8$.
---
```python
a1, a1_err = 2.0, 0.2
a2, a2_err = 1.0, 0.1
rho = -0.8
```
---
### 1.1
Geben Sie die Kovarianzmatrix von $a_1$ und $a_2$ an.
---
Die Kovarianzmatrix von $a_1$ und $a_2$ lässt sich wie folgt berechnen:
$$\mathrm{Cov}(a) = \pmatrix{\sigma^2_{a_1} & \rho\sigma_{a_1}\sigma_{a_2} \\
\rho\sigma_{a_1}\sigma_{a_2} & \sigma^2_{a_2}}$$
```python
c12 = rho * a1_err * a2_err
covariance = np.matrix([[a1_err ** 2, c12], [c12, a2_err ** 2]])
covariance
```
matrix([[ 0.04 , -0.016],
[-0.016, 0.01 ]])
---
### 1.2
Bestimmen Sie analytisch die Unsicherheit von $y$ als Funktion von $x$:
---
Dazu bestimmen wir zunaechst die Ableitungen von $y$ nach $a_1$ und $a_2$
$$\frac{\partial{}y}{\partial{}a_1} = x \,,\quad \frac{\partial{}y}{\partial{}a_2} = x^2 \,.$$
Daraus können wir die Kovarianz von $y$ nach
$$\sigma^2_y = \mathrm{Cov}(y) = \sum_{ij}\frac{\partial{}y}{\partial{}a_i}\frac{\partial{}y}{\partial{}a_j}\mathrm{Cov}(a)_{ij}$$
bestimmen. Einsetzen der eingangs bestimmten Ableitungen liefert
$$\sigma^2_y = c_{11}x^2 + 2c_{12}x^3 + c_{22}x^4$$
wobei die $c_{ij}$ die Einträge von $\mathrm{Cov}(a)$ sind. Der Ausdruck lässt sich weiter vereinfachen zu
$$\sigma^2_y = x^2\left(\sigma^2_{a_1} + \sigma^2_{a_2}x^2 + 2\rho\sigma_{a_1}\sigma_{a_2}x\right) \,.$$
Daraus ergibt sich für die Unsicherheit
$$ \sigma_y = \lvert{}x\rvert\sqrt{\sigma^2_{a_1} + \sigma^2_{a_2}x^2 + 2\rho\sigma_{a_1}\sigma_{a_2}x} \,.$$
---
#### 1.2.1
unter Vernachlässigung der Korrelation
---
(also für $\rho = 0$) vereinfacht sich der obige Ausdruck zu
$$\sigma_y = \lvert{}x\rvert\sqrt{\sigma^2_{a_1} + \sigma^2_{a_2}x^2} \,.$$
Mit den eingangs berechneten Werten aus `covariance` ergibt sich also
$$\sigma_y = \lvert{}x\rvert\sqrt{0.04 + 0.01x^2} = 0.2\lvert{}x\rvert{}\sqrt{1 + 0.25x^2} \,.$$
```python
def err_ana_wo(x):
return 0.2 * np.abs(x) * np.sqrt(1 + 0.25 * x ** 2)
xs = np.linspace(-3, 3, 10000)
ys = 1 + a1 * xs + a2 * xs ** 2
errs = err_ana_wo(xs)
plt.plot(xs, ys)
plt.fill_between(xs, ys - errs, ys + errs, alpha=0.5)
plt.show()
```
---
#### 1.2.2
mit Berücksichtigung der Korrelation
---
$$\sigma_y = \lvert{}x\rvert\sqrt{0.04 + 0.01x^2 - 0.016x} = 0.2\lvert{}x\rvert\sqrt{1 - 0.8x + 0.25x^2} \,.$$
```python
def err_ana(x):
return 0.2 * np.abs(x) * np.sqrt(1 + 0.25 * x ** 2 - 0.4 * x)
errs = err_ana(xs)
plt.plot(xs, ys)
plt.fill_between(xs, ys - errs, ys + errs, alpha=0.5)
plt.show()
```
---
### 1.3
Bestimmen Sie per Monte Carlo die Unsicherheit von $y$ als Funktion von $x$:
#### 1.3.1
Generieren Sie Wertepaare $(a_1, a_2)$ gemäß ihrer Kovarianzmatrix und visualisieren Sie diese, z.B. mit einem Scatter-Plot.
_Hinweis_: Wenn $x_1$ und $x_2$ zwei gaussverteilte Zufallszahlen mit Mittelwert null und Varianz eins sind, erhält man ein Paar korrelierter gaussverteilter Zufallszahlen $(y_1, y_2)$ mit Mittelwert null und Varianz eins durch $(y_1 = x_1; y_2 = x_1ρ + x_2\sqrt{1 − \rho^2})$.
---
```python
x1s, x2s = np.random.normal(size=(2, 10000))
plt.hist2d(x1s, x2s, bins=40)
plt.title('2-dim Normalverteilung')
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
plt.show()
y1s = x1s
y2s = x1s * rho + x2s * np.sqrt(1 - rho ** 2)
plt.hist2d(y1s, y2s, bins=40)
plt.title('2-dim Normalverteilung mit Korrelation')
plt.xlabel('$y_1$')
plt.ylabel('$y_2$')
plt.show()
a1s = a1 + y1s * a1_err
a2s = a2 + y2s * a2_err
plt.hist2d(a1s, a2s, bins=40)
plt.title('Verteilung der $a_1$ und $a_2$')
plt.xlabel('$a_1$')
plt.ylabel('$a_2$')
plt.show()
```
---
#### 1.3.2
Bestimmen Sie die Verteilung von $y$ für $x = \{−1, 0, +1\}$ und vergleichen Sie Mittelwert und Varianz (Standardabweichung) mit den Resultaten der analytischen Rechnung.
---
```python
def y(x, a1, a2):
return 1 + a1 * x + a2 * x ** 2
def var_analytical(x):
xx = x ** 2
#return xx * (covariance[0, 0] + covariance[1, 1] * xx + 2 * covariance[0, 1] * x)
return 0.04 * xx * (1 + 0.25 * xx - 0.8 * x)
for x in (-1, 0, 1):
ys = y(x, a1s, a2s)
mean = np.mean(ys)
var = np.var(ys)
print('〈y({})〉= {:.3f}'.format(x, mean))
print(' σ² = {:.3f}'.format(var))
print(' analytical = {:.3f}'.format(var_analytical(x)))
plt.hist(ys, bins=100)
plt.xlabel('y({})'.format(x))
plt.ylabel('Absolute Häufigkeit')
plt.show()
```
Der Fall $x = 0$ ist hier besonders. Da alle Koeffizienten vor Potenzen von $x$ stehen, ergibt sich für den Fall immer $y=0$ unabhängig von den $a_i$. Wir können also keine Aussage über die Varianz treffen.
---
## Aufgabe 2
Betrachten Sie folgende Reparametrisierung von $y = f(x)$:
$$y = 1 + \frac{x(1+x)}{b_1} + \frac{x(1-x)}{b_2}$$
### 2.1
Bestimmen Sie analytisch die transformierten Parameter $b_1$ und $b_2$ und deren Kovarianzmatrix
---
Wir lösen die Reparametrisierung nach Koeffizienten von Potenzen von $x$ auf. Dabei können wir den Term $1$ vernachlässigen, weil er in beiden Definitionen gleichermaßen auftritt.
\begin{align}
a_1 x + a_2 x^2 &= \frac{x(1 + x)}{b_1} + \frac{x(1 - x)}{b_2} \\
&= \frac{x}{b_1} + \frac{x^2}{b_1} + \frac{x}{b_2} - \frac{x^2}{b_2} \\
&= x\left(\frac{1}{b_1} + \frac{1}{b_2}\right) + x^2\left(\frac{1}{b_1} - \frac{1}{b_2}\right)
\end{align}
Damit ist
$$a_1 = \left(\frac{1}{b_1} + \frac{1}{b_2}\right) \quad\text{und}\quad a_2 = \left(\frac{1}{b_1} - \frac{1}{b_2}\right)$$
also
$$b_1 = \frac{2}{a_1 + a_2} \quad\text{und}\quad b_2 = \frac{2}{a_1 - a_2} \,.$$
Für die Jacobimatrix der Transformation ergibt sich
\begin{equation}
M = \pmatrix{
\frac{-2}{(a_1 + a_2)^2} & \frac{-2}{(a_1 + a_2)^2} \\
\frac{-2}{(a_1 - a_2)^2} & \frac{+2}{(a_1 - a_2)^2}
} \quad\text{wobei}\quad m_{ij} = \frac{\partial b_i}{\partial a_j}
\end{equation}
\begin{equation}
M^T = \pmatrix{
\frac{-2}{(a_1 + a_2)^2} & \frac{-2}{(a_1 - a_2)^2} \\
\frac{-2}{(a_1 + a_2)^2} & \frac{+2}{(a_1 - a_2)^2}
}
\end{equation}
Die transformierte Kovarianzmatrix ist dann $\mathrm{Cov}(b) = M\mathrm{Cov}(a)M^T$.
```python
b1 = 2 / (a1 + a2)
b2 = 2 / (a1 - a2)
denom1 = (a1 + a2) ** 2
denom2 = (a1 - a2) ** 2
M = np.matrix([[-2 / denom1, -2 / denom1],
[-2 / denom2, 2 / denom2]])
cov_b = M * covariance * M.T
cov_b
```
matrix([[ 0.00088889, 0.01333333],
[ 0.01333333, 0.328 ]])
---
### 2.2
Bestimmen Sie die Kovarianzmatrix der transformierten Parameter per Monte Carlo
---
```python
b1s = 2 / (a1s + a2s)
b2s = 2 / (a1s - a2s)
print('b1 = {}'.format(np.mean(b1s)))
print('var = {}'.format(np.var(b1s)))
print('b2 = {}'.format(np.mean(b2s)))
print('var = {}'.format(np.var(b2s)))
plt.hist(b1s, bins=100)
plt.show()
plt.hist(b2s, bins=100)
plt.show()
```
Dabei tritt das Problem auf, dass für einige Kombinationen von Werten für $a_1$ und $a_2$ der Nenner sehr nah an `0` kommt. Dadurch ergeben sich sehr große (unrealistische) Werte für $b_2$. Wir können dem entgegenwirken, indem wir einen Bereich festlegen in dem wir die Werte für $b_2$ erwarten.
```python
cut = np.logical_and(b2s < 5, b2s > 0)
b1s_ = b1s[cut]
b2s_ = b2s[cut]
print('b2 gefiltert = {}'.format(np.mean(b2s_)))
print('var = {}'.format(np.var(b2s_)))
plt.hist(b1s_, bins=100)
plt.show()
plt.hist2d(b1s_, b2s_, bins=100)
plt.show()
```
Wenn wir uns das Leben etwas erleichtern wollen, koennen wir auch einfach die Funktion `cov` aus Numpy verwenden, die uns fuer zwei Arrays direkt die Kovarianzmatrix ausrechnet
```python
ncov_b = np.cov(b1s_, b2s_)
ncov_b
```
array([[ 0.00083908, 0.01394795],
[ 0.01394795, 0.44603336]])
---
### 2.3
Bestimmen Sie analytisch die Unsicherheit von $y$ als Funktion von $x$:
#### 2.3.1
unter Verwendung der analytisch bestimmten Kovarianzmatrix von $(b_1, b_2)$
---
Zunächst berechnen wir die partiellen Ableitungen von $y$ nach den Koeffizienten $b_1$ und $b_2$ und damit die Jacobimatrix $M$.
\begin{equation}
M = \pmatrix{\frac{\partial y}{\partial b_1} \\
\frac{\partial y}{\partial b_2}}
= \pmatrix{\frac{-x(1+x)}{b_1^2}\\
\frac{-x(1-x)}{b_2^2}}
\end{equation}
Damit ergibt sich für die Varianz
\begin{align}
\sigma_y^2 &= M^T \mathrm{Cov}(b) M \\
&= x^2\left[\frac{c_{11}}{b_1^4}(1+x)^2 + \frac{2c_{12}}{b_1^2b_2^2}(1 - x^2) + \frac{c_{22}}{b_2^4}(1-x)^2\right] \\
&= x^2\left(\alpha + \beta x + \gamma x^2 \right)
\end{align}
mit den Koeffizienten
\begin{equation}
\alpha = \left(\frac{c_{11}}{b_1^4} + \frac{2c_{12}}{b_1^2b_2^2} + \frac{c_{22}}{b_2^4}\right) \quad,\quad
\beta = 2\left(\frac{c_{11}}{b_1^4} - \frac{c_{22}}{b_2^4}\right) \quad\text{und}\quad
\gamma = \left(\frac{c_{11}}{b_1^4} - \frac{2c_{12}}{b_1^2b_2^2} + \frac{c_{22}}{b_2^4}\right) \,.
\end{equation}
In Zahlen ausgedrückt sind die Koeffizienten
```python
c11 = cov_b[0, 0]
c12 = cov_b[0, 1]
c22 = cov_b[1, 1]
print(cov_b)
a = c11 / b1 ** 4
b = 2 * c12 / b1 ** 2 / b2 ** 2
c = c22 / b2 ** 4
alpha = a + b + c
beta = 2 * (a - c)
gamma = a - b + c
np.sqrt(alpha), alpha/alpha, beta/alpha, gamma/alpha
```
[[ 0.00088889 0.01333333]
[ 0.01333333 0.328 ]]
(0.20000000000000001, 1.0, -0.80000000000000004, 0.25)
Es ist also
\begin{equation}
\sigma_y = 0.2\lvert{}x\rvert\sqrt{1 - 0.8x - 0.25x^2}
\end{equation}
was exakt das gleiche Ergbnis ist wie für die ursprüngliche Parametrisierung.
```python
def err_ana(x):
return np.abs(x) * np.sqrt(alpha + beta * x + gamma * x ** 2)
xs = np.linspace(-10, 10, 10000)
ys = 1 + xs * (xs + 1) / b1 + xs * (xs - 1) / b2
errs = err_ana(xs)
plt.plot(xs, ys)
plt.fill_between(xs, ys - errs, ys + errs, alpha=0.5)
plt.show()
```
---
#### 2.3.2
unter Verwendung der numerisch bestimmten Kovarianzmatrix von $(b_1, b_2)$
---
Die gleiche Rechnung mit den per Monte Carlo bestimmten Werten
```python
c11_n = ncov_b[0, 0]
c12_n = ncov_b[0, 1]
c22_n = ncov_b[1, 1]
a_n = c11_n / b1 ** 4
b_n = 2 * c12_n / b1 ** 2 / b2 ** 2
c_n = c22_n / b2 ** 4
alpha_n = a_n + b_n + c_n
beta_n = 2 * (a_n - c_n)
gamma_n = a_n - b_n + c_n
np.sqrt(alpha_n), alpha_n/alpha_n, beta_n/alpha_n, gamma_n/alpha_n
```
(0.21866956831133924, 1.0, -0.98833238462782003, 0.3436789259019577)
```python
def err_num(x):
return np.abs(x) * np.sqrt(alpha_n + beta_n * x + gamma_n * x ** 2)
errs = err_num(xs)
plt.plot(xs, ys)
plt.fill_between(xs, ys - errs, ys + errs, alpha=0.5)
plt.show()
```
---
## Aufgabe 3
Lösen Sie die obigen Teilaufgaben für $y = f(x)$ mit
$$y = \ln\left(1 + a_1x + a_2x^2\right) \quad \text{bzw.} \quad y = \ln\left(1 + \frac{x(1+x)}{b_1} + \frac{x(x-1)}{b_2}\right)$$
Im folgenden nennen wir das neue $y$ der Einfachheit halber $z$.
\begin{equation}
z = \ln(1 + a_1x + a_2x^2) = \ln(y)
\end{equation}
Für die Unsicherheit von $z$ ergibt sich
\begin{align}
\sigma_z &= \sqrt{\left(\frac{\partial z}{\partial y}\right)^2\sigma_y^2} \\
&= \sqrt{\left(\frac{1}{y}\right)^2\sigma_y^2} \\
&= \left\lvert\frac{\sigma_y}{y}\right\rvert \\
&= \frac{0.2\lvert x\rvert\sqrt{1 + 0.25x^2 - 0.4x}}{\lvert 1 + a_1 x + a_2 x^2 \rvert} \,.
\end{align}
Völlig analog ist die Rechnung für die Reparametrisierung. Hier ergibt sich
\begin{align}
\sigma_z &= \left\lvert\frac{\sigma_y}{y}\right\rvert \\
&= \frac{0.2\lvert x\rvert\sqrt{1 + 0.25x^2 - 0.4x}}{\left\lvert 1 + \frac{x(1+x)}{b_1} + \frac{x(x-1)}{b_2}\right\rvert} \,.
\end{align}
```python
def err1(x):
return err_ana(x) / np.abs(1 + a1 * x + a2 * x ** 2)
def err2(x):
return err_ana(x) / np.abs(1 + xs * (xs + 1) / b1 + xs * (xs - 1) / b2)
xs = np.linspace(-3, 3, 100)
ys1 = 1 + a1 * xs + a2 * xs ** 2
ys2 = np.log(1 + xs * (xs + 1) / b1 + xs * (xs - 1) / b2)
errs1 = err1(xs)
errs2 = err2(xs)
plt.plot(xs, ys1)
plt.fill_between(xs, ys1 - errs1, ys1 + errs1, alpha=0.5)
plt.show()
plt.plot(xs, ys2)
plt.fill_between(xs, ys2 - errs2, ys2 + errs2, alpha=0.5)
plt.show()
```
|
% Conclusions
\chapter{Conclusions}
This section concludes the project by briefly discussing possible optimization methods.
As mentioned before, the main goal of the design was simplicity, so many optimizations were not applied.
The main bottleneck in the current design, as in many computing systems, is the I/O bandwidth. During
each clock cycle, only one unit of data is read or written, which is rather inefficient. Therefore, most of the
optimization methods naturally focus on improving the I/O efficiency.
The first obvious optimization is to transfer all weight data from the Dual-SPI Flash to the \gls{psram} once
the \gls{fpga} is configured. Weights will be loaded from \gls{psram} subsequently, which has a parallel
interface and will result in much faster loading of weights.
Burst transfer can be used to read data from \gls{psram} and further reduces I/O latency. This is done by
placing the starting address on the address bus, a fixed amount of data is then read repeatedly in a single
``burst''.
Another optimization mentioned is to widen the data buses connected to the \gls{bram} buffers. Currently, the
data address of input buffer is only 8-bit. This can be increased to 32-bit or even more. Multiple data
can be loaded at the same time and calculation can be performed in parallel on these data. The current
design is highly sequential and only utilizes around $15\%$ of the \gls{dsp} slices.
Yet another optimization is to further introduce several small caches using distributed RAM. These caches
are implemented using \glspl{lut} and are faster than \glspl{bram}, i.e., they can be read asynchronously.
Once inputs and weights are loaded into these caches, more parallelism can be achieved, utilizing more \gls{dsp} slices.
Finally, on an \gls{fpga} chip with more \gls{bram} capacity, layer-level parallelism can be exploited, i.e.,
several layers can be calculated at the same time. This involves modifying the current ring structure and
forwarding data to subsequent layers in a single pass. Each layer will work on its input concurrently, but
some global coordination and scheduling is needed in order to avoid data overrun.
More discussion on efficient processing of deep neural networks can be found in
\cite{DBLP:journals/corr/SzeCYE17}, which includes a section on hardware acceleration.
Due to the limitations of the development board used in this project, the design runs in a way which appears as a temporal architecture, however, it can be easily modified to become a spatial architecture.
Memory hierarchy and an optimized dataflow can be utilized to greatly improve overall efficiency and reduce
power.
\clearpage %force the next chapter to start on a new page. Keep that as the last line of your chapter!
|
%% FUNCTION split_data
% Splitting multi-task data into training / testing by percentage.
%
%% INPUT
% X: {n * d} * t - input matrix
% Y: {n * 1} * t - output matrix
% percent: percentage of the splitting range (0, 1)
%
%% OUTPUT
% X_train: the split of X that has the specified percent of samples
% Y_train: the split of Y that has the specified percent of samples
% X_test: the split of X that has the remaining samples
% Y_test: the split of Y that has the remaining samples
% selIdx: the selection index of for X_train and Y_train for each task
%%
function [X_train, Y_train, X_test, Y_test, selIdx] = split_data(X, Y, percent)
if percent > 1 || percent < 0
error('splitting percentage error')
end
task_num = length(X);
selIdx = cell(task_num, 0);
X_train = cell(task_num, 0);
Y_train = cell(task_num, 0);
X_test = cell(task_num, 0);
Y_test = cell(task_num, 0);
for t = 1:task_num
task_sample_size = length(Y{t});
tSelIdx = randperm(task_sample_size) < task_sample_size * percent;
selIdx{t} = tSelIdx;
X_train{t} = X{t}(tSelIdx,:);
Y_train{t} = Y{t}(tSelIdx,:);
X_test{t} = X{t}(~tSelIdx,:);
Y_test{t} = Y{t}(~tSelIdx,:);
end |
{-# OPTIONS --without-K #-}
open import HoTT hiding (_::_)
module algebra.Word {i} (A : Type i) where
data Word : Type i where
nil : Word
_::_ : A → Word → Word
_inv::_ : A → Word → Word
infixr 60 _::_ _inv::_
-- The following six functions prove things like if [x ∷ v ≡ y ∷ w],
-- then [x ≡ y].
-- This is not as easy as it sounds, you cannot directly induct on the equality
-- (because [x ∷ v] is not a general element of type word), so you have to
-- extract the head, but it’s not always possible…
Word= : (v w : Word) → Type i
Word= nil nil = Lift ⊤
Word= nil (y :: w) = Lift ⊥
Word= nil (y inv:: w) = Lift ⊥
Word= (x :: v) nil = Lift ⊥
Word= (x :: v) (y :: w) = (x == y) × (v == w)
Word= (x :: v) (y inv:: w) = Lift ⊥
Word= (x inv:: v) nil = Lift ⊥
Word= (x inv:: v) (y :: w) = Lift ⊥
Word= (x inv:: v) (y inv:: w) = (x == y) × (v == w)
Word=-out : {v w : Word} (p : v == w) → Word= v w
Word=-out {v = nil} idp = lift unit
Word=-out {v = x :: v} idp = (idp , idp)
Word=-out {v = x inv:: v} idp = (idp , idp)
Word=-in : {v w : Word} → Word= v w → v == w
Word=-in {nil} {nil} _ = idp
Word=-in {nil} {y :: w} (lift ())
Word=-in {nil} {y inv:: w} (lift ())
Word=-in {x :: v} {nil} (lift ())
Word=-in {x :: v} {y :: w} (p , q) = ap2 _::_ p q
Word=-in {x :: v} {y inv:: w} (lift ())
Word=-in {x inv:: v} {nil} (lift ())
Word=-in {x inv:: v} {y :: w} (lift ())
Word=-in {x inv:: v} {y inv:: w} (p , q) = ap2 _inv::_ p q
Word-fst= : {x y : A} {v w : Word} (p : x :: v == y :: w) → x == y
Word-fst= p = fst (Word=-out p)
Word-snd= : {x y : A} {v w : Word} (p : x :: v == y :: w) → v == w
Word-snd= p = snd (Word=-out p)
Word-fst=' : {x y : A} {v w : Word} (p : x inv:: v == y inv:: w) → x == y
Word-fst=' p = fst (Word=-out p)
Word-snd=' : {x y : A} {v w : Word} (p : x inv:: v == y inv:: w) → v == w
Word-snd=' p = snd (Word=-out p)
|
[STATEMENT]
lemma unique_continuation:
"z \<cdot> g r = z' \<cdot> h s \<Longrightarrow> z \<cdot> g r' = z' \<cdot> h s' \<Longrightarrow> z \<cdot> g (r \<and>\<^sub>p r') = z' \<cdot> h (s \<and>\<^sub>p s')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>z \<cdot> g r = z' \<cdot> h s; z \<cdot> g r' = z' \<cdot> h s'\<rbrakk> \<Longrightarrow> z \<cdot> g (r \<and>\<^sub>p r') = z' \<cdot> h (s \<and>\<^sub>p s')
[PROOF STEP]
using lcp_ext_left g.marked_morph_lcp h.marked_morph_lcp
[PROOF STATE]
proof (prove)
using this:
?z \<cdot> ?u \<and>\<^sub>p ?z \<cdot> ?v = ?z \<cdot> (?u \<and>\<^sub>p ?v)
g (?r \<and>\<^sub>p ?s) = g ?r \<and>\<^sub>p g ?s
h (?r \<and>\<^sub>p ?s) = h ?r \<and>\<^sub>p h ?s
goal (1 subgoal):
1. \<lbrakk>z \<cdot> g r = z' \<cdot> h s; z \<cdot> g r' = z' \<cdot> h s'\<rbrakk> \<Longrightarrow> z \<cdot> g (r \<and>\<^sub>p r') = z' \<cdot> h (s \<and>\<^sub>p s')
[PROOF STEP]
by metis |
State Before: l : Type ?u.54869
m : Type ?u.54872
n : Type u_2
o : Type ?u.54878
m' : o → Type ?u.54883
n' : o → Type ?u.54888
R : Type u_1
S : Type ?u.54894
α : Type v
β : Type w
γ : Type ?u.54901
inst✝³ : DecidableEq n
inst✝² : Monoid R
inst✝¹ : AddMonoid α
inst✝ : DistribMulAction R α
r : R
d : n → α
⊢ diagonal (r • d) = r • diagonal d State After: case a.h
l : Type ?u.54869
m : Type ?u.54872
n : Type u_2
o : Type ?u.54878
m' : o → Type ?u.54883
n' : o → Type ?u.54888
R : Type u_1
S : Type ?u.54894
α : Type v
β : Type w
γ : Type ?u.54901
inst✝³ : DecidableEq n
inst✝² : Monoid R
inst✝¹ : AddMonoid α
inst✝ : DistribMulAction R α
r : R
d : n → α
i j : n
⊢ diagonal (r • d) i j = (r • diagonal d) i j Tactic: ext i j State Before: case a.h
l : Type ?u.54869
m : Type ?u.54872
n : Type u_2
o : Type ?u.54878
m' : o → Type ?u.54883
n' : o → Type ?u.54888
R : Type u_1
S : Type ?u.54894
α : Type v
β : Type w
γ : Type ?u.54901
inst✝³ : DecidableEq n
inst✝² : Monoid R
inst✝¹ : AddMonoid α
inst✝ : DistribMulAction R α
r : R
d : n → α
i j : n
⊢ diagonal (r • d) i j = (r • diagonal d) i j State After: no goals Tactic: by_cases h : i = j <;>
simp [h] |
{-# OPTIONS --without-K --safe #-}
module Categories.Category.Instance.Properties.Setoids.Choice where
open import Categories.Category using (Category)
open import Categories.Category.Exact using (Exact)
open import Categories.Category.Instance.Setoids using (Setoids)
open import Data.Product using (∃; proj₁; proj₂; _,_; Σ-syntax; _×_; -,_; map; zip; swap; map₂)
open import Data.Sum using (_⊎_; inj₁; inj₂)
open import Function.Equality as SΠ using (Π; _⇨_) renaming (id to ⟶-id; _∘_ to _∘⟶_)
open import Level
open import Relation.Binary using (Setoid; Rel; REL; IsEquivalence)
import Relation.Binary.Reasoning.Setoid as SR
open import Data.Nat.Base
import Relation.Binary.PropositionalEquality.Core as P
open Setoid renaming (_≈_ to [_][_≈_]; Carrier to ∣_∣) using (isEquivalence; refl; sym; trans)
open Π using (_⟨$⟩_; cong)
module _ ℓ where
private
S = Setoids ℓ ℓ
open Category S hiding (_≈_)
module S = Category S
open import Categories.Category.Instance.Properties.Setoids.Exact
open import Categories.Object.InternalRelation using (Relation)
-- Presentation axiom, aka CoSHEP (http://nlab-pages.s3.us-east-2.amazonaws.com/nlab/show/presentation+axiom)
record CoSHEP (A : Setoid ℓ ℓ) : Set (Level.suc ℓ) where
field
{P} : Setoid ℓ ℓ
pre : P ⇒ A
surj : SSurj ℓ pre
split : {X : Setoid ℓ ℓ} (f : X ⇒ P) → SSurj ℓ f → Σ[ g ∈ ∣ P ⇨ X ∣ ] [ P ⇨ P ][ f ∘ g ≈ id ]
Setoid-CoSHEP : (A : Setoid ℓ ℓ) → CoSHEP A
Setoid-CoSHEP A = record
{ P = record
{ Carrier = ∣ A ∣
; _≈_ = P._≡_
; isEquivalence = record { refl = P.refl ; sym = P.sym ; trans = P.trans }
}
; pre = record
{ _⟨$⟩_ = λ x → x
; cong = λ {x} eq → P.subst (λ z → [ A ][ x ≈ z ]) eq (refl A)
}
; surj = λ x → x , refl A
; split = λ {X} f surj → record
{ _⟨$⟩_ = λ y → let x , _ = surj y in x
; cong = λ {x}{y} x≡y → P.subst (λ z → [ X ][ proj₁ (surj x) ≈ proj₁ (surj z) ]) x≡y (refl X)
}
, λ {x}{y} x≡y → let z , eq = surj x in P.trans eq x≡y
}
entire : {A B : Setoid ℓ ℓ} → (R : Relation S A B) → Set ℓ
entire {A} R = ∀ (x : ∣ A ∣) → Σ[ e ∈ ∣ dom ∣ ] [ A ][ p₁ ⟨$⟩ e ≈ x ]
where open Relation R
ℕ-Setoid : Setoid ℓ ℓ
ℕ-Setoid = record { Carrier = Lift _ ℕ ; _≈_ = P._≡_ ; isEquivalence = record { refl = P.refl ; sym = P.sym ; trans = P.trans } }
record DepChoice {A : Setoid ℓ ℓ} (R : Relation S A A) (inhb : ∣ A ∣) (ent : entire R) : Set (Level.suc ℓ) where
open Relation R
field
pair : ℕ → ∣ dom ∣
chain : ∀ (n : ℕ) → [ A ][ p₁ ⟨$⟩ pair (ℕ.suc n) ≈ p₂ ⟨$⟩ pair n ]
-- Dependent choice for setoids
Setoid-DepChoice : {A : Setoid ℓ ℓ} (R : Relation S A A) (inhb : ∣ A ∣) (ent : entire R) → DepChoice R inhb ent
Setoid-DepChoice {A} R inhb ent = record
{ pair = pair
; chain = chain
}
where
open Relation R
pair : ℕ → ∣ dom ∣
pair ℕ.zero = proj₁ (ent inhb)
pair (ℕ.suc n) = let x , _ = ent (p₂ ⟨$⟩ pair n) in x
chain : (n : ℕ) → [ A ][ p₁ ⟨$⟩ proj₁ (ent (p₂ ⟨$⟩ pair n)) ≈ p₂ ⟨$⟩ pair n ]
chain ℕ.zero = let _ , eq = ent (p₂ ⟨$⟩ proj₁ (ent inhb)) in eq
chain (ℕ.suc n) = let x , eq = ent (p₂ ⟨$⟩ proj₁ (ent (p₂ ⟨$⟩ pair n))) in eq
-- Countable choice for setoids
ℕ-Choice : ∀ {A : Setoid ℓ ℓ} (f : A ⇒ ℕ-Setoid) → SSurj ℓ f → Σ[ g ∈ ∣ ℕ-Setoid ⇨ A ∣ ] [ ℕ-Setoid ⇨ ℕ-Setoid ][ f ∘ g ≈ id ]
ℕ-Choice {A} f surj = record
{ _⟨$⟩_ = λ n → let x , eq = surj n in x
; cong = λ {n}{m} eq → let x , _ = surj n; y , _ = surj m in P.subst (λ m → [ A ][ proj₁ (surj n) ≈ proj₁ (surj m) ]) eq (refl A)
}
, λ {n}{m} n≡m → let _ , eq = surj n in P.trans eq n≡m
|
Formal statement is: lemma has_contour_integral_eq: "\<lbrakk>(f has_contour_integral y) p; \<And>x. x \<in> path_image p \<Longrightarrow> f x = g x\<rbrakk> \<Longrightarrow> (g has_contour_integral y) p" Informal statement is: If $f$ and $g$ are functions that agree on the image of a path $p$, and if $f$ has a contour integral along $p$, then $g$ has the same contour integral along $p$. |
State Before: R : Type u
S : Type v
T : Type w
a b : R
n : ℕ
inst✝¹ : CommRing R
inst✝ : IsDomain R
p✝ q p : R[X]
⊢ ↑Multiset.card (roots p) ≤ natDegree p State After: case pos
R : Type u
S : Type v
T : Type w
a b : R
n : ℕ
inst✝¹ : CommRing R
inst✝ : IsDomain R
p✝ q p : R[X]
hp0 : p = 0
⊢ ↑Multiset.card (roots p) ≤ natDegree p
case neg
R : Type u
S : Type v
T : Type w
a b : R
n : ℕ
inst✝¹ : CommRing R
inst✝ : IsDomain R
p✝ q p : R[X]
hp0 : ¬p = 0
⊢ ↑Multiset.card (roots p) ≤ natDegree p Tactic: by_cases hp0 : p = 0 State Before: case neg
R : Type u
S : Type v
T : Type w
a b : R
n : ℕ
inst✝¹ : CommRing R
inst✝ : IsDomain R
p✝ q p : R[X]
hp0 : ¬p = 0
⊢ ↑Multiset.card (roots p) ≤ natDegree p State After: no goals Tactic: exact WithBot.coe_le_coe.1 (le_trans (card_roots hp0) (le_of_eq <| degree_eq_natDegree hp0)) State Before: case pos
R : Type u
S : Type v
T : Type w
a b : R
n : ℕ
inst✝¹ : CommRing R
inst✝ : IsDomain R
p✝ q p : R[X]
hp0 : p = 0
⊢ ↑Multiset.card (roots p) ≤ natDegree p State After: no goals Tactic: simp [hp0] |
If $r$ is a real number and $M$ is a measure space, then $r \cdot \mu$ is the null measure if and only if $\mu$ is the null measure. |
lemma strict_mono_leD: "strict_mono r \<Longrightarrow> m \<le> n \<Longrightarrow> r m \<le> r n" |
[STATEMENT]
lemma (in is_tm_functor) cf_cf_obj_comma_proj_is_tm_functor'[cat_comma_cs_intros]:
assumes "b \<in>\<^sub>\<circ> \<BB>\<lparr>Obj\<rparr>" and "\<FF>b = \<FF> \<^sub>C\<^sub>F\<down> b"
shows "\<FF> \<^sub>C\<^sub>F\<Sqinter>\<^sub>O b : \<FF>b \<mapsto>\<mapsto>\<^sub>C\<^sub>.\<^sub>t\<^sub>m\<^bsub>\<alpha>\<^esub> \<AA>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<FF> \<^sub>C\<^sub>F\<Sqinter>\<^sub>O b : \<FF>b \<mapsto>\<mapsto>\<^sub>C\<^sub>.\<^sub>t\<^sub>m\<^bsub>\<alpha>\<^esub> \<AA>
[PROOF STEP]
using assms(1)
[PROOF STATE]
proof (prove)
using this:
b \<in>\<^sub>\<circ> \<BB>\<lparr>Obj\<rparr>
goal (1 subgoal):
1. \<FF> \<^sub>C\<^sub>F\<Sqinter>\<^sub>O b : \<FF>b \<mapsto>\<mapsto>\<^sub>C\<^sub>.\<^sub>t\<^sub>m\<^bsub>\<alpha>\<^esub> \<AA>
[PROOF STEP]
unfolding assms(2)
[PROOF STATE]
proof (prove)
using this:
b \<in>\<^sub>\<circ> \<BB>\<lparr>Obj\<rparr>
goal (1 subgoal):
1. \<FF> \<^sub>C\<^sub>F\<Sqinter>\<^sub>O b : \<FF> \<^sub>C\<^sub>F\<down> b \<mapsto>\<mapsto>\<^sub>C\<^sub>.\<^sub>t\<^sub>m\<^bsub>\<alpha>\<^esub> \<AA>
[PROOF STEP]
by (rule cf_cf_obj_comma_proj_is_tm_functor) |
(* Title: HOL/IMPP/EvenOdd.thy
Author: David von Oheimb, TUM
*)
section \<open>Example of mutually recursive procedures verified with Hoare logic\<close>
theory EvenOdd
imports Main Misc
begin
axiomatization
Even :: pname and
Odd :: pname
where
Even_neq_Odd: "Even ~= Odd" and
Arg_neq_Res: "Arg ~= Res"
definition
evn :: com where
"evn = (IF (%s. s<Arg> = 0)
THEN Loc Res:==(%s. 0)
ELSE(Loc Res:=CALL Odd(%s. s<Arg> - 1);;
Loc Arg:=CALL Odd(%s. s<Arg> - 1);;
Loc Res:==(%s. s<Res> * s<Arg>)))"
definition
odd :: com where
"odd = (IF (%s. s<Arg> = 0)
THEN Loc Res:==(%s. 1)
ELSE(Loc Res:=CALL Even (%s. s<Arg> - 1)))"
overloading bodies \<equiv> bodies
begin
definition "bodies == [(Even,evn),(Odd,odd)]"
end
definition
Z_eq_Arg_plus :: "nat => nat assn" ("Z=Arg+_" [50]50) where
"Z=Arg+n = (%Z s. Z = s<Arg>+n)"
definition
Res_ok :: "nat assn" where
"Res_ok = (%Z s. even Z = (s<Res> = 0))"
subsection "Arg, Res"
declare Arg_neq_Res [simp] Arg_neq_Res [THEN not_sym, simp]
declare Even_neq_Odd [simp] Even_neq_Odd [THEN not_sym, simp]
lemma Z_eq_Arg_plus_def2: "(Z=Arg+n) Z s = (Z = s<Arg>+n)"
apply (unfold Z_eq_Arg_plus_def)
apply (rule refl)
done
lemma Res_ok_def2: "Res_ok Z s = (even Z = (s<Res> = 0))"
apply (unfold Res_ok_def)
apply (rule refl)
done
lemmas Arg_Res_simps = Z_eq_Arg_plus_def2 Res_ok_def2
lemma body_Odd [simp]: "body Odd = Some odd"
apply (unfold body_def bodies_def)
apply auto
done
lemma body_Even [simp]: "body Even = Some evn"
apply (unfold body_def bodies_def)
apply auto
done
subsection "verification"
lemma Odd_lemma: "{{Z=Arg+0}. BODY Even .{Res_ok}}|-{Z=Arg+Suc 0}. odd .{Res_ok}"
apply (unfold odd_def)
apply (rule hoare_derivs.If)
apply (rule hoare_derivs.Ass [THEN conseq1])
apply (clarsimp simp: Arg_Res_simps)
apply (rule export_s)
apply (rule hoare_derivs.Call [THEN conseq1])
apply (rule_tac P = "Z=Arg+Suc (Suc 0) " in conseq12)
apply (rule single_asm)
apply (auto simp: Arg_Res_simps)
done
lemma Even_lemma: "{{Z=Arg+1}. BODY Odd .{Res_ok}}|-{Z=Arg+0}. evn .{Res_ok}"
apply (unfold evn_def)
apply (rule hoare_derivs.If)
apply (rule hoare_derivs.Ass [THEN conseq1])
apply (clarsimp simp: Arg_Res_simps)
apply (rule hoare_derivs.Comp)
apply (rule_tac [2] hoare_derivs.Ass)
apply clarsimp
apply (rule_tac Q = "%Z s. P Z s & Res_ok Z s" and P = P for P in hoare_derivs.Comp)
apply (rule export_s)
apply (rule_tac I1 = "%Z l. Z = l Arg & 0 < Z" and Q1 = "Res_ok" in Call_invariant [THEN conseq12])
apply (rule single_asm [THEN conseq2])
apply (clarsimp simp: Arg_Res_simps)
apply (force simp: Arg_Res_simps)
apply (rule export_s)
apply (rule_tac I1 = "%Z l. even Z = (l Res = 0) " and Q1 = "%Z s. even Z = (s<Arg> = 0) " in Call_invariant [THEN conseq12])
apply (rule single_asm [THEN conseq2])
apply (clarsimp simp: Arg_Res_simps)
apply (force simp: Arg_Res_simps)
done
lemma Even_ok_N: "{}|-{Z=Arg+0}. BODY Even .{Res_ok}"
apply (rule BodyN)
apply (simp (no_asm))
apply (rule Even_lemma [THEN hoare_derivs.cut])
apply (rule BodyN)
apply (simp (no_asm))
apply (rule Odd_lemma [THEN thin])
apply (simp (no_asm))
done
lemma Even_ok_S: "{}|-{Z=Arg+0}. BODY Even .{Res_ok}"
apply (rule conseq1)
apply (rule_tac Procs = "{Odd, Even}" and pn = "Even" and P = "%pn. Z=Arg+ (if pn = Odd then 1 else 0) " and Q = "%pn. Res_ok" in Body1)
apply auto
apply (rule hoare_derivs.insert)
apply (rule Odd_lemma [THEN thin])
apply (simp (no_asm))
apply (rule Even_lemma [THEN thin])
apply (simp (no_asm))
done
end
|
* This notebook implements the SSL model in [Kingma (2014)](https://arxiv.org/abs/1406.5298).
* C.f. [this implementation](https://github.com/wiseodd/generative-models/blob/master/VAE/conditional_vae/cvae_tensorflow.py).
> What I cannot create I cannot understand.
## Conditional Variational Auto-encoder
### Motivation
* Conditional variational auto-encoder extends variational auto-encoder.
### Definitions
* Let $X$ be the observed random variable and $Y, Z$ the latent. Let
\begin{equation}
Y, Z \sim P_{Y, Z}
\end{equation}
be some given prior.
* And let
\begin{equation}
X \mid Y, Z \sim P_{X \mid Y, Z; \phi}
\end{equation}
for some distribution in a family parameterized by $\phi$.
For instance, if $X \in \mathbb{R}$, it is general to assume a multivariate Gaussian distribution, diagonalized or semi-diagonalized, then we have
\begin{equation}
X \mid Y, Z \sim \mathcal{N} \left( \mu(Y, Z; \phi_1), \sigma(Y, Z; \phi_2) \right),
\end{equation}
for some universality functions $\mu(\cdot; \phi_2)$ and $\sigma(\cdot; \phi_2)$ and $\phi := (\phi_1, \phi_2)$.
### Variational Inference
Utilization of the dataset of $(x, y)$ forces us to employ some distribution $q(z \mid x, y)$ (to be determined) to fit $p(z \mid x, y)$ (and if the dataset is of $(y, z)$, then we are forced to $p(x \mid y, z)$ instead), then we have the bound by KL-divergence
\begin{align}
\text{KL} \left( q(z \mid x,y) \| p(z \mid x,y) \right)
& = \mathbb{E}_{z \sim q(z \mid x,y)} \left[
\ln q(z \mid x,y) - \ln p(x \mid y,z) - \ln p(z \mid y) \right] \\
& + \ln p(x \mid y) \\
& \geq 0.
\end{align}
Thus, a loss
\begin{align}
L(x, y)
& := \mathbb{E}_{z \sim q(z \mid x,y)} \left[
\ln q(z \mid x,y) - \ln p(x \mid y,z) - \ln p(z \mid y) \right] \\
& \geq - \ln p(x \mid y).
\end{align}
The equality can be reached if and only if $q(z \mid x, y) = p(z \mid x, y)$.
### Relation with Auto-encoder
* The $\mathbb{E}_{z \sim q(z \mid x, y)} \left[ - \ln p(x \mid y, z) \right]$ part can be interpreted as a reconstruction loss.
* The $\mathbb{E}_{z \sim q(z \mid x, y)} \left[ - \ln p(z \mid y) \right]$ part serves as regularization terms.
* It seems that the $\mathbb{E}_{z \sim q(z \mid x, y)} \left[ \ln q(z \mid x, y) \right]$ part has no correspondence in auto-encoder.
### Example
In this very example on the MNIST dataset:
\begin{align}
Y & \sim \text{Categorical uniform}; \\
Z & \sim \mathcal{N} (0, 1); \\
X \mid Y, Z & \sim \text{Bernoulli}\left( p(y, z; \theta) \right).
\end{align}
And inference distributions:
\begin{align}
Z_0 \mid X, Y & \sim \mathcal{N}\left( \mu(x, y; \phi_1), \sigma(x, y; \phi_2) \right); \\
Z \mid Z_0 & \sim T(z_0).
\end{align}
where $Z_0$ obeys the "base inference distribution", as a Gaussian; and $T$ is bijective with universality.
## Semi-supervised Learning
### Motivation
* Semi-supervised learning utilizes the conditional variational auto-encoder.
### Definitions
* Let
\begin{equation}
Y \mid Z \sim P_{Y \mid Z; \theta}.
\end{equation}
for some distribution in a family parameterized by $\theta$.
### Variational Inference (Something Wrong!)
By the conditional probability formula,
\begin{equation}
p(y, z \mid x) = p(z \mid x, y) p(y \mid x).
\end{equation}
With this formula, we have the bound by KL-divergence,
\begin{align}
\text{KL} \left( q(y, z \mid x) \| p(y, z \mid x) \right)
& = \int q(y, z \mid x) \left[
\ln q(y, z \mid x) - \ln p(y, z \mid x)
\right] dy dz \\
& = \int q(z \mid x, y) q(y \mid x) \left[
\ln q(z \mid x, y) + \ln q(y \mid x) - \ln p(x \mid y, z) - \ln p(y, z)
\right] dy dz \\
& + \ln p(x).
\end{align}
Integrating out $z$ gives
\begin{align}
\text{KL} \left( q(y, z \mid x) \| p(y, z \mid x) \right)
& = \int q(y \mid x) \left[ L(x, y) + \ln q(y \mid x) \right] dy + \ln p(x) \\
& = \mathbb{E}_{y \sim q(y \mid x)} \left[ L(x, y) \right]
- \mathcal{H} \left[ q(y \mid x) \right]
+ \ln p(x) \\
& \geq 0.
\end{align}
Thus a loss
\begin{align}
U(x) & := \mathbb{E}_{y \sim q(y \mid x)} \left[ L(x, y) \right]
- \mathcal{H} \left[ q(y \mid x) \right] \\
& \geq - \ln p(x).
\end{align}
The equality can be reached if and only if $q(y \mid x) = p(y \mid x)$, then $q(y, z \mid x) = p(y, z \mid x)$.
This loss can be derived in a more concise way. By approximation $-\ln p(x, y) \approx L(x, y)$,
\begin{align}
\text{KL} \left( q(y \mid x) \| p(y \mid x) \right)
& = \int q(y \mid x) \left[ \ln q(y \mid x) - \ln p(y \mid x) \right] dy \\
& = \int q(y \mid x) \left[ \ln q(y \mid x) - \ln p(x, y) \right] dy + \ln p(x) \\
& \approx \int q(y \mid x) \left[ \ln q(y \mid x) + \ln L(x, y) \right] dy
+ \ln p(x) \\
& = \mathbb{E}_{y \sim q(y \mid x)} \left[ L(x, y) \right]
- \mathcal{H} \left[ q(y \mid x) \right].
\end{align}
Thus, again,
\begin{align}
U(x) & := \mathbb{E}_{y \sim q(y \mid x)} \left[ L(x, y) \right]
- \mathcal{H} \left[ q(y \mid x) \right] \\
& \geq - \ln p(x).
\end{align}
The equality can be reached if and only if $q(y \mid x) = p(y \mid x)$.
The trick in the training is that, the minimization of $L(x, y)$ has **completed** the training of the variables in $p(x \mid y, z)$ and $q(z \mid x, y)$, which, thus, shall
**not** be trained further in the minimization of $U(x)$.
(The strange thing is that, after training the variables in $p(x \mid y, z)$ and $q(z \mid x, y)$ by minimizing $L(x, y)$, if train them further in the minimizing of $U(x)$, then the trained variables will give a **higher** $L(x, y)$ than that after the minimization of its own. Why?)
### Example
In this very example:
\begin{equation}
Y \mid Z \sim \text{Categorical} \left( \pi(Z; \theta) \right).
\end{equation}
## Implementation
In this implementation, we follow the spirit of TensorFlow, hiding the parameters of functions. That is, define `L(X, y; phi)` as `L(phi)(X, y)`.
```python
%matplotlib inline
import os
from IPython.display import display
import matplotlib.pyplot as plt
from PIL import Image
from tqdm import tqdm
import numpy as np
import tensorflow as tf
import tensorflow.contrib.distributions as tfd
from tensorflow.contrib.distributions.python.ops import bijectors as tfb
from tensorflow.examples.tutorials.mnist import input_data
# For reproducibility
seed = 42
np.random.seed(seed)
tf.set_random_seed(seed)
```
### Functions
```python
def get_q_z_Xy(X, y, z_dim, hidden_layers=None, bijectors=None,
dtype='float32', name='q_z_Xy', reuse=None):
"""Returns the distribution of Z | X, Y.
Z = bijector(Z_0), and
Z_0 | X, Y ~ Normal(mu(X,Y;phi), sigma(X,Y;phi)).
Args:
X: Tensor with shape `[batch_size, X_dim]`.
y: Tensor with shape `[batch_size, y_dim]`.
hidden_layers: List of positive integers. Defaults to
`[512, 256, 128]`.
bijectors: List of `tfb.Bijector`s. Defaults to an empty
list.
dtype: String.
name: String.
reuse: Boolean.
Returns:
An instance of `tfd.Distribution` with batch-shape `batch_size`
and event-shape `z_dim`.
"""
if bijectors is None:
bijectors = []
if hidden_layers is None:
hidden_layers = [512, 256, 128]
with tf.variable_scope(name, reuse=reuse):
# For concatenating, the dtype shall be the same
X = tf.cast(X, dtype)
y = tf.cast(y, dtype)
input_ = tf.concat([X, y], axis=1)
hidden = input_
for hidden_layer in hidden_layers:
hidden = tf.layers.dense(hidden, hidden_layer,
activation=tf.nn.relu)
# Outputs in the fiber-bundle space
output = tf.layers.dense(hidden, z_dim * 2, activation=None)
# shape: [batch_size, z_dim]
mu, log_var = tf.split(output, [z_dim, z_dim], axis=1)
q_z0_Xy = tfd.MultivariateNormalDiag(mu, tf.exp(log_var))
chain = tfb.Chain(bijectors)
q_z_Xy = tfd.TransformedDistribution(distribution=q_z0_Xy,
bijector=chain)
return q_z_Xy
```
```python
def get_bijectors(bijector_layers=None, name='bjiectors', reuse=None):
"""Complexify the inference distribution by extra-bijectors like
normalizing flows.
Args:
bijector_layers: List of lists of integers. Each sub-list represents
the hidden_layers of the corresponding bijector. Defaults to ten
`[128]` lists, i.e. ten bijectors with each having one hidden layer
with 128 perceptrons.
name: String.
reuse: Boolean.
Returns:
List of `Bijector`s.
"""
if bijector_layers is None:
n_bijectors = 10
bijector_layers = [[128] for _ in range(n_bijectors)]
with tf.variable_scope(name, reuse=reuse):
bijectors = []
for bijector_layer in bijector_layers:
shift_and_log_scale_fn = \
tfb.masked_autoregressive_default_template(bijector_layer)
# MAP is extremely slow in training. Use IAF instead.
bijector = tfb.Invert(
tfb.MaskedAutoregressiveFlow(shift_and_log_scale_fn))
bijectors.append(bijector)
return bijectors
```
```python
def get_p_X_yz(y, z, X_dim, hidden_layers=None,
dtype='float32', name='p_X_yz', reuse=None):
"""Returns the distribution of X | Y, Z.
X | Y, Z ~ Bernoulli( p(Y, Z; theta) ).
Args:
y: Tensor with shape `[batch_size, y_dim]` and dtype `float32`.
z: Tensor with shape `[batch_size, z_dim]` and dtype `float32`.
X_dim: Positive integer.
hidden_layers: List of positive integers. Defaults to
`[128, 256, 512]`.
dtype: String.
name: String.
reuse: Boolean.
Returns:
An instance of `tfd.Distribution` with batch-shape `batch_size`
and event-shape `X_dim`.
"""
if hidden_layers is None:
hidden_layers = [128, 256, 512]
with tf.variable_scope(name, reuse=reuse):
# For concatenating, the dtype shall be the same
y = tf.cast(y, dtype)
z = tf.cast(z, dtype)
input_ = tf.concat([y, z], axis=1)
hidden = input_
for hidden_layer in hidden_layers:
hidden = tf.layers.dense(hidden, hidden_layer,
activation=tf.nn.relu)
logits = tf.layers.dense(hidden, X_dim, activation=None)
p_X_yz = tfd.Independent(tfd.Bernoulli(logits=logits))
return p_X_yz
```
```python
def get_loss_Xy(get_q_z_Xy, get_p_X_yz, p_z_y, reuse=None):
"""L(X,y) := E_{z ~ q(z|X,y)} [ log_q(z|X,y) - log_p(X|y,z) - log_p(z|y) ].
Args:
get_q_z_Xy: Callable with the signature:
Args:
X: Tensor with shape `[batch_size, X_dim]`.
y: Tensor with shape `[batch_size, y_dim]`.
reuse: Boolean.
Returns:
An instance of `tfd.Distribution` with batch-shape `batch_size`
and event-shape `z_dim`.
get_p_X_yz: Callable with the signature:
Args:
y: Tensor with shape `[batch_size, y_dim]` and dtype `float32`.
z: Tensor with shape `[batch_size, z_dim]` and dtype `float32`.
reuse: Boolean.
Returns:
An instance of `tfd.Distribution` with batch-shape `batch_size`
and event-shape `X_dim`.
p_z_y: An instance of `tfd.Distribution` with batch-shape `batch_size`
and event-shape `z_dim`.
reuse: If reuse the variables in `get_q_z_Xy` and `get_p_X_yz`.
Returns:
Callable with the signature:
Args:
X: Tensor of the shape `[batch_size, X_dim]`.
y: Tensor of the shape `[batch_size, y_dim]`.
name: String.
Returns:
Tensor of the shape `[batch_size]`.
"""
def loss_Xy(X, y, name='loss_Xy'):
"""
Args:
X: Tensor of the shape `[batch_size, X_dim]`.
y: Tensor of the shape `[batch_size, y_dim]`.
name: String.
Returns:
Tensor of the shape `[batch_size]`.
"""
with tf.name_scope(name):
# Get the distribution q(z|X,y)
q_z_Xy = get_q_z_Xy(X, y, reuse=reuse)
# Get the distribution p(X|y,z)
z_samples = q_z_Xy.sample()
p_X_yz = get_p_X_yz(y, z_samples, reuse=reuse)
# Compute the tensor of L(X,y)
loss_Xy_tensor = tf.zeros([batch_size]) # initialize.
# E_{z ~ q(z|X,y)} [ log_q(z|X,y) ]
loss_Xy_tensor += q_z_Xy.log_prob(z_samples)
# E_{z ~ q(z|X,y)} [ - log_p(z|y) ]
loss_Xy_tensor += -1 * p_z_y.log_prob(z_samples)
# E_{z ~ q(z|X,y)} [ - log_p(X|y,z) ]
loss_Xy_tensor += -1 * p_X_yz.log_prob(X)
return loss_Xy_tensor
return loss_Xy
```
```python
def get_q_y_X(X, name='q_y_X', reuse=None):
"""Returns the distribution of Y | X.
Y | X ~ Categorical( pi(X; alpha) ).
Args:
X: Tensor of the shape `[batch_size, X_dim]`.
Returns:
An instance of `tfd.Distribution` with batch-shape `batch_size`.
and event-shape `()`.
"""
with tf.variable_scope(name, reuse=reuse):
hidden = tf.layers.dense(X, 512, activation=tf.nn.relu)
hidden = tf.layers.dense(hidden, 256, activation=tf.nn.relu)
hidden = tf.layers.dense(hidden, 128, activation=tf.nn.relu)
# Outputs in the fiber-bundle space
logits = tf.layers.dense(hidden, y_dim, activation=None)
q_y_X = tfd.Independent(tfd.OneHotCategorical(logits=logits))
return q_y_X
```
```python
def get_loss_X(q_y_X, loss_Xy, name='loss_X'):
"""U(X) := E_{y ~ q(y|X)} [ L(X,y) ] - H(q(y|X)).
Args:
q_y_X: An instance of `tfd.Distribution` with batch-shape `batch_size`
and event-shape `()`.
loss_Xy: Callable with the signature:
Args:
X: Tensor of the shape `[batch_size, X_dim]`.
y: Tensor of the shape `[batch_size, y_dim]`.
Returns:
Tensor of the shape `[batch_size]`.
Returns:
Callable with signature:
Args:
X: Tensor of the shape `[batch_size, X_dim]`.
Returns:
Tensor of the shape `[batch_size]`.
"""
def loss_X(X):
"""
Args:
X: Tensor of the shape `[batch_size, X_dim]`.
Returns:
Tensor of the shape `[batch_size]`.
"""
with tf.name_scope(name):
# shape: `[batch_size, y_dim]`
y_samples = q_y_X.sample()
# shape: `[batch_size]`
loss_X_tensor = loss_Xy(X, y_samples) - q_y_X.entropy()
return loss_X_tensor
return loss_X
```
```python
import collections
def get_dependent_variables(tensor_or_tensors):
"""Returns all variables that the tensor `tensor` depends on.
Forked from: https://stackoverflow.com/a/42861919/1218716
Args:
tensor_or_tensors: Tensor or a collection of tensors.
Returns:
List of variables.
"""
if isinstance(tensor_or_tensors, (list, tuple, set)): # a collection.
tensors = tensor_or_tensors
dependent_vars = []
for tensor in tensors:
dependent_vars += get_dependent_variables(tensor)
return list(set(dependent_vars))
# When not a collection,
tensor = tensor_or_tensors
# Initialize
starting_op = tensor.op
dependent_vars = []
queue = collections.deque()
queue.append(starting_op)
op_to_var = {var.op: var for var in tf.trainable_variables()}
visited = set([starting_op])
while queue:
op = queue.popleft()
try:
dependent_vars.append(op_to_var[op])
except KeyError:
# `op` is not a variable, so search its inputs (if any).
for op_input in op.inputs:
if op_input.op not in visited:
queue.append(op_input.op)
visited.add(op_input.op)
return dependent_vars
```
### Loss
```python
batch_size = 128
X_dim = 28 * 28
y_dim = 10
X = tf.placeholder(shape=[batch_size, X_dim], dtype='float32', name='X')
y = tf.placeholder(shape=[batch_size, y_dim], dtype='float32', name='y')
```
```python
z_dim = 64
def _get_q_z_Xy(X, y, reuse):
bijectors = get_bijectors(reuse=reuse)
#bijectors = [] # for comparison
return get_q_z_Xy(X, y, z_dim, bijectors=bijectors, reuse=reuse)
def _get_p_X_yz(y, z, reuse):
return get_p_X_yz(y, z, X_dim=X_dim, reuse=reuse)
p_z_y = tfd.MultivariateNormalDiag(tf.zeros([batch_size, z_dim]),
name='p_z_y')
loss_Xy = get_loss_Xy(_get_q_z_Xy, _get_p_X_yz, p_z_y,
reuse=tf.AUTO_REUSE)
loss_Xy_tensor = loss_Xy(X, y)
loss_Xy_scalar = tf.reduce_mean(loss_Xy_tensor)
```
```python
q_y_X = get_q_y_X(X, reuse=tf.AUTO_REUSE)
y_samples = q_y_X.sample()
```
```python
loss_X = get_loss_X(q_y_X, loss_Xy)
loss_X_tensor = loss_X(X)
loss_X_scalar = tf.reduce_mean(loss_X_tensor)
```
### Training
```python
# Minimize L(X,y)
optimizer = tf.train.AdamOptimizer(epsilon=1e-3)
minimize_loss_Xy = optimizer.minimize(loss_Xy_scalar)
```
```python
# Minimize U(X), while fixing the variables in L(X,y)
optimizer = tf.train.AdamOptimizer(epsilon=1e-3, learning_rate=1e-5)
loss_X_vars = [_ for _ in get_dependent_variables(loss_X_scalar)
if _ not in get_dependent_variables(loss_Xy_scalar)]
minimize_loss_X = optimizer.minimize(loss_X_scalar, var_list=loss_X_vars)
```
### Generating
```python
# X | Y,Z
z_samples_ph = tf.placeholder(shape=[batch_size, z_dim], dtype='float32')
p_X_yz_gen = _get_p_X_yz(y, z_samples_ph, reuse=tf.AUTO_REUSE)
X_samples = p_X_yz_gen.sample()
```
```python
def get_image(array):
"""
Args:
array: Numpy array with shape `[28*28]`.
Returns:
An image.
"""
array = 255 * array
array = array.reshape([28, 28])
array = array.astype(np.uint8)
return Image.fromarray(array)
```
### Having Fun
```python
sess = tf.Session()
sess.run(tf.global_variables_initializer())
```
```python
DATA_DIR = './dat'
mnist = input_data.read_data_sets(
os.path.join(DATA_DIR, 'MNIST'),
one_hot=True,
source_url='http://yann.lecun.com/exdb/mnist/')
def get_X_y_batch(source='train'):
if source == 'train':
data = mnist.train
elif source == 'test':
data = mnist.test
else:
raise ValueError('Argument source can either be "train" or "test".')
X_batch, y_batch = data.next_batch(batch_size)
# Since X | Y, Z ~ Bernoulli, the observed value of X shall
# either be 0 or 1,
X_batch = np.where(X_batch >= 0.5, np.ones_like(X_batch),
np.zeros_like(X_batch))
return X_batch, y_batch
```
Extracting ./dat/MNIST/train-images-idx3-ubyte.gz
Extracting ./dat/MNIST/train-labels-idx1-ubyte.gz
Extracting ./dat/MNIST/t10k-images-idx3-ubyte.gz
Extracting ./dat/MNIST/t10k-labels-idx1-ubyte.gz
#### Minimizing $L(x, y)$ over batches
```python
# Training by minimizing `loss_Xy_scalar`
loss_Xy_vals = []
smeared_loss_Xy_vals = []
smear_steps = 300
for i in tqdm(range(100000)):
X_batch, y_batch = get_X_y_batch()
_, loss_Xy_val = sess.run([minimize_loss_Xy, loss_Xy_scalar],
feed_dict={X: X_batch, y: y_batch})
if np.isnan(loss_Xy_val):
raise ValueError('Loss has been NaN.')
loss_Xy_vals.append(loss_Xy_val)
smeared_loss_Xy_vals.append(np.mean(loss_Xy_vals[-smear_steps:]))
print('Mean L(X,y):', np.mean(loss_Xy_vals))
print('Final L(X,y):', smeared_loss_Xy_vals[-1])
# Visualize the training loss
plt.plot(smeared_loss_Xy_vals)
plt.xlabel('steps')
plt.ylabel('L(X,y) (smeared)')
plt.show()
# Visualize the training loss in the last several steps
last_steps = 20000
plt.plot(smeared_loss_Xy_vals[-last_steps:])
plt.xlabel('last steps')
plt.ylabel('L(X,y) (smeared)')
plt.show()
```
```python
_, y_batch = get_X_y_batch()
# Generate X
z_samples_val = np.random.normal(size=[batch_size, z_dim])
X_sample_vals = sess.run(X_samples,
feed_dict={y: y_batch, z_samples_ph: z_samples_val})
# Display the results
n_display = 5
for i in range(n_display):
print('Label:', np.argmax(y_batch[i]))
print('Gnerated:')
display(get_image(X_sample_vals[i]))
print()
```
#### Minimizing $U(x)$ over batches
```python
'''
# Training by minimizing `loss_X_scalar`
loss_X_vals = []
smeared_loss_X_vals = []
for i in tqdm(range(100000)):
X_batch, y_batch = get_X_y_batch()
_, loss_X_val = sess.run([minimize_loss_X, loss_X_scalar],
feed_dict={X: X_batch})
if np.isnan(loss_X_val):
raise ValueError('Loss has been NaN.')
loss_X_vals.append(loss_X_val)
smeared_loss_X_vals.append(np.mean(loss_X_vals[-smear_steps:]))
print('Mean U(X):', np.mean(loss_X_vals))
print('Final U(X):', smeared_loss_X_vals[-1])
# Visualize the training loss
plt.plot(smeared_loss_X_vals)
plt.xlabel('steps')
plt.ylabel('U(X) (smeared)')
plt.show()
# Visualize the training loss in the last several steps
plt.plot(smeared_loss_X_vals[-last_steps:])
plt.xlabel('last steps')
plt.ylabel('U(X) (smeared)')
plt.show()
'''
```
"\n# Training by minimizing `loss_X_scalar`\nloss_X_vals = []\nsmeared_loss_X_vals = []\nfor i in tqdm(range(100000)):\n X_batch, y_batch = get_X_y_batch()\n _, loss_X_val = sess.run([minimize_loss_X, loss_X_scalar],\n feed_dict={X: X_batch})\n if np.isnan(loss_X_val):\n raise ValueError('Loss has been NaN.')\n loss_X_vals.append(loss_X_val)\n smeared_loss_X_vals.append(np.mean(loss_X_vals[-smear_steps:]))\nprint('Mean U(X):', np.mean(loss_X_vals))\nprint('Final U(X):', smeared_loss_X_vals[-1])\n\n# Visualize the training loss\nplt.plot(smeared_loss_X_vals)\nplt.xlabel('steps')\nplt.ylabel('U(X) (smeared)')\nplt.show()\n\n# Visualize the training loss in the last several steps\nplt.plot(smeared_loss_X_vals[-last_steps:])\nplt.xlabel('last steps')\nplt.ylabel('U(X) (smeared)')\nplt.show()\n"
```python
'''
X_batch, y_batch = get_X_y_batch()
# Predict y
y_sample_vals = sess.run(y_samples, feed_dict={X: X_batch})
# Display the results
n_display = 5
for i in range(n_display):
print('Label:', np.argmax(y_batch[i]))
print('Predicted:', np.argmax(y_sample_vals[i]))
print('Original:')
display(get_image(X_batch[i]))
'''
```
"\nX_batch, y_batch = get_X_y_batch()\n\n# Predict y\ny_sample_vals = sess.run(y_samples, feed_dict={X: X_batch})\n\n# Display the results\nn_display = 5\nfor i in range(n_display):\n print('Label:', np.argmax(y_batch[i]))\n print('Predicted:', np.argmax(y_sample_vals[i]))\n print('Original:')\n display(get_image(X_batch[i]))\n"
* We find that the minimization of $U(x)$ is always non-effective. Why?
### Effect of Normalizing-flow
* While not employing normalizing flow, 100000 iterations give the final L(X,y) about 70.0.
* With 10 normalizing flows, 100000 iterations give the L(X,y) loss 65.5 (improved 4.5).
```python
np.exp(4)
```
54.598150033144236
### Discrimination (Something Wrong!)
The training by minimizing $L(x, y)$ over data-batches gains the approximation
\begin{equation}
\ln p(x \mid y) = - L(x, y).
\end{equation}
Thus,
\begin{align}
\ln p(y \mid x)
& = \ln p(x \mid y) + \ln p(y) - \ln p(x) \\
& = -L(x, y) + \ln p(y) - \ln p(x).
\end{align}
That is, given $x$, the most probable value of $y$ is the one that minimizes the $L(x, y) + \ln p(y)$.
```python
''' # TODO
def predict(get_loss_Xy_val, X_batch, y_dim=y_dim):
"""Returns the log-probability of discrimination for each element
in the X's batch `X_batch`.
Args:
get_loss_Xy_val: Callable that maps two arraies of the shapes
`[batch_size, X_dim]` and `[batch_size, y_dim]`, respectively,
to an array of the shape `[batch_size]`.
X: Array of the shape `[batch_size, X_dim]`.
Returns:
An array of the shape `[batch_size, y_dim]`.
"""
batch_size, _ = X_batch.shape
loss_Xy_vals = []
for i in range(y_dim):
y_batch = np.zeros([batch_size, y_dim])
y_batch[:,i] = np.ones([batch_size])
loss_Xy_vals.append(get_loss_Xy_val(X_batch, y_batch))
# shape: `[batch_size, y_dim]`
log_prob_Xy = -np.stack(loss_Xy_vals, axis=1)
# shape: `[batch_size]`
log_prob_X = logsumexp(log_prob_Xy, axis=1)
# shape: `[batch_size, 1]`
log_prob_X = np.expand_dims(log_prob_X, axis=1)
# shape: `[batch_size, y_dim]`
log_probs = log_prob_Xy - log_prob_X
return log_probs
'''
```
' # TODO\ndef predict(get_loss_Xy_val, X_batch, y_dim=y_dim):\n """Returns the log-probability of discrimination for each element\n in the X\'s batch `X_batch`.\n \n Args:\n get_loss_Xy_val: Callable that maps two arraies of the shapes\n `[batch_size, X_dim]` and `[batch_size, y_dim]`, respectively,\n to an array of the shape `[batch_size]`.\n X: Array of the shape `[batch_size, X_dim]`.\n \n Returns:\n An array of the shape `[batch_size, y_dim]`.\n """\n batch_size, _ = X_batch.shape\n \n loss_Xy_vals = []\n for i in range(y_dim):\n y_batch = np.zeros([batch_size, y_dim])\n y_batch[:,i] = np.ones([batch_size])\n loss_Xy_vals.append(get_loss_Xy_val(X_batch, y_batch))\n # shape: `[batch_size, y_dim]`\n log_prob_Xy = -np.stack(loss_Xy_vals, axis=1)\n # shape: `[batch_size]`\n log_prob_X = logsumexp(log_prob_Xy, axis=1)\n # shape: `[batch_size, 1]`\n log_prob_X = np.expand_dims(log_prob_X, axis=1)\n # shape: `[batch_size, y_dim]`\n log_probs = log_prob_Xy - log_prob_X\n return log_probs\n'
```python
def get_loss_Xy_val(X_batch, y_batch):
return sess.run(loss_Xy_tensor, {X: X_batch, y: y_batch})
```
```python
''' # TODO
X_batch, y_batch = get_X_y_batch(source='test')
predicted = predict(get_loss_Xy_val, X_batch)
count = 0
for i, X_val in enumerate(X_batch):
y_val = np.argmax(y_batch, axis=1)[i]
p_val = np.argmax(predicted, axis=1)[i]
if y_val != p_val:
display(get_image(X_val))
print('Difference:', y_val, p_val)
print('Probabilities:', ', '.join(['{0:.3f}'.format(np.exp(_))
for _ in predicted[i]]))
print()
count += 1
print('Accuracy:', 1 - count / len(y_batch))
'''
```
" # TODO\nX_batch, y_batch = get_X_y_batch(source='test')\npredicted = predict(get_loss_Xy_val, X_batch)\n\ncount = 0\nfor i, X_val in enumerate(X_batch):\n y_val = np.argmax(y_batch, axis=1)[i]\n p_val = np.argmax(predicted, axis=1)[i]\n \n if y_val != p_val:\n display(get_image(X_val))\n print('Difference:', y_val, p_val)\n print('Probabilities:', ', '.join(['{0:.3f}'.format(np.exp(_))\n for _ in predicted[i]]))\n print()\n count += 1\nprint('Accuracy:', 1 - count / len(y_batch))\n"
```python
```
|
[STATEMENT]
lemma surj_on_imageI: "surj_on (g \<circ> f) A B \<Longrightarrow> surj_on g (f ` A) B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. surj_on (g \<circ> f) A B \<Longrightarrow> surj_on g (f ` A) B
[PROOF STEP]
unfolding surj_on_conv
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>b\<in>B. \<exists>a\<in>A. b = (g \<circ> f) a \<Longrightarrow> \<forall>b\<in>B. \<exists>a\<in>f ` A. b = g a
[PROOF STEP]
by fastforce |
If $c \neq 0$, then $a = b/c$ if and only if $c \cdot a = b$. |
[STATEMENT]
lemma M_simps:
"edge_succ M = lists_succ xss"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. edge_succ (to_map' (snd G_list) xss) = lists_succ xss
[PROOF STEP]
unfolding to_map_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. edge_succ \<lparr>edge_rev = swap_in (set (snd G_list)), edge_succ = lists_succ xss\<rparr> = lists_succ xss
[PROOF STEP]
by (cases G_list) auto |
toy_input := "start-A
start-b
A-c
A-b
b-d
A-end
b-end":
toy2_input:="dc-end
HN-start
start-kj
dc-start
dc-HN
LN-dc
HN-end
kj-sa
kj-HN
kj-dc":
sample_input := "fs-end
he-DX
fs-he
start-DX
pj-DX
end-zg
zg-sl
zg-pj
pj-he
RW-he
fs-DX
pj-RW
zg-RW
start-pj
he-WI
zg-he
pj-fs
start-RW":
input := FileTools:-Text:-ReadFile("AoC-2021-12-input.txt" ):
nodes := map(s->{StringTools:-Split(s,"-")[]}, StringTools:-Split(input)):
G := GraphTheory:-Graph( {nodes[]} );
small := select(StringTools:-IsLower,{map(op, nodes)[]}) minus {"end"};
numpaths := proc(a, s2:=FAIL, visited_s2:=false,
excluding:={"start"}, path:=[] )
global G, small;
local newex, newvs2;
if a = "end" then
if s2<>FAIL and numboccur(path, s2) <> 2 then
return 0;
end if;
# print([path[],"end"]);
return 1;
end if;
local nb := {GraphTheory:-Neighborhood(G, a)[]} minus excluding;
if a = s2 and visited_s2 = false then
newex := excluding;
newvs2 := true;
elif a in small then
newex := excluding union {a};
newvs2 := visited_s2;
else
newex := excluding;
newvs2 := visited_s2;
end if;
return add(numpaths(i, s2, newvs2, newex, [path[],a]), i in nb);
end proc:
answer1 := numpaths("start");
answer2 := answer1 + add(numpaths("start", pp), pp in small minus {"start"});
|
#include "../coroutine.h"
#include "../controller.h"
#include "os.h"
#include "process.h"
#include <filesystem>
// 获取本地网卡地址
#include <sys/types.h>
#include <sys/socket.h>
#include <ifaddrs.h>
#include <netdb.h>
#include <boost/process.hpp>
#include <boost/process/async.hpp>
namespace flame::os {
void declare(php::extension_entry &ext) {
ext
.function<interfaces>("flame\\os\\interfaces")
.function<spawn>("flame\\os\\spawn", {
{"executable", php::TYPE::STRING},
{"arguments", php::TYPE::ARRAY, false, true},
{"options", php::TYPE::ARRAY, false, true},
})
.function<exec>("flame\\os\\exec", {
{"executable", php::TYPE::STRING},
{"arguments", php::TYPE::ARRAY, false, true},
{"options", php::TYPE::ARRAY, false, true},
});
process::declare(ext);
}
static void interface(php::array& data, const struct ifaddrs *addr) {
php::string name(addr->ifa_name);
php::array info(2);
info.set("family", addr->ifa_addr->sa_family == AF_INET ? "IPv4" : "IPv6");
char address[NI_MAXHOST];
if (getnameinfo(addr->ifa_addr,
addr->ifa_addr->sa_family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6),
address, NI_MAXHOST, nullptr, 0, NI_NUMERICHOST) != 0)
throw php::exception(zend_ce_exception, gai_strerror(errno), errno);
info.set("address", address);
if (data.exists(name)) {
php::array iface = data.get(name);
iface.set(iface.size(), info);
}
else {
php::array iface(2);
iface.set(iface.size(), info);
data.set(name, iface);
}
}
php::value interfaces(php::parameters ¶ms) {
struct ifaddrs *addr;
if (getifaddrs(&addr) != 0) throw php::exception(zend_ce_exception, std::strerror(errno), errno);
else if (!addr) return php::array(0);
php::array data(4);
// 用 shared_ptr 作自动释放保证
std::shared_ptr<struct ifaddrs> autofree(addr, freeifaddrs);
do {
if (addr->ifa_addr->sa_family != AF_INET && addr->ifa_addr->sa_family != AF_INET6) continue;
interface(data, addr);
} while ((addr = addr->ifa_next) != nullptr);
return std::move(data);
}
php::value spawn(php::parameters ¶ms) {
php::object proc(php::class_entry<process>::entry());
process *proc_ = static_cast<process *>(php::native(proc));
proc_->exit_ = false;
auto env = boost::this_process::environment();
std::string exec = params[0].to_string();
if (exec[0] != '.' && exec[0] != '/')
exec = boost::process::search_path(exec).native();
std::vector<std::string> argv;
std::string cwdv = boost::filesystem::current_path().native();
if (params.size() > 1) {
php::array args = params[1];
for (auto i = args.begin(); i != args.end(); ++i)
argv.push_back(i->second.to_string());
}
if (params.size() > 2) {
php::array opts = params[2];
php::array envs = opts.get("env");
if (envs.type_of(php::TYPE::ARRAY)) {
for (auto i = envs.begin(); i != envs.end(); ++i)
env[i->first.to_string()] = i->second.to_string();
}
php::string cwds = opts.get("cwd");
if (cwds.type_of(php::TYPE::STRING)) cwdv = cwds.to_string();
}
auto c = std::make_unique<boost::process::child>(
exec, boost::process::args = argv, env, gcontroller->context_x,
boost::process::start_dir = cwdv,
boost::process::std_out > proc_->out_,
boost::process::std_err > proc_->err_,
boost::process::on_exit = [proc, proc_] (int exit_code, const std::error_code &) {
proc_->exit_ = true;
if (proc_->ch_) proc_->ch_.resume();
});
if(c->valid()) {
proc.set("pid", c->id());
proc_->c_ = std::move(c);
return proc;
}
else {
return nullptr;
}
}
php::value exec(php::parameters ¶ms) {
php::object proc = spawn(params);
process *proc_ = static_cast<process *>(php::native(proc));
return proc.call("stdout");
}
} // namespace flame::os
|
/-
Copyright (c) 2022 Joachim Breitner. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Joachim Breitner
-/
import group_theory.order_of_element
import data.finset.noncomm_prod
import data.fintype.big_operators
import data.nat.gcd.big_operators
import order.sup_indep
/-!
# Canonical homomorphism from a finite family of monoids
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This file defines the construction of the canonical homomorphism from a family of monoids.
Given a family of morphisms `ϕ i : N i →* M` for each `i : ι` where elements in the
images of different morphisms commute, we obtain a canonical morphism
`monoid_hom.noncomm_pi_coprod : (Π i, N i) →* M` that coincides with `ϕ`
## Main definitions
* `monoid_hom.noncomm_pi_coprod : (Π i, N i) →* M` is the main homomorphism
* `subgroup.noncomm_pi_coprod : (Π i, H i) →* G` is the specialization to `H i : subgroup G`
and the subgroup embedding.
## Main theorems
* `monoid_hom.noncomm_pi_coprod` coincides with `ϕ i` when restricted to `N i`
* `monoid_hom.noncomm_pi_coprod_mrange`: The range of `monoid_hom.noncomm_pi_coprod` is
`⨆ (i : ι), (ϕ i).mrange`
* `monoid_hom.noncomm_pi_coprod_range`: The range of `monoid_hom.noncomm_pi_coprod` is
`⨆ (i : ι), (ϕ i).range`
* `subgroup.noncomm_pi_coprod_range`: The range of `subgroup.noncomm_pi_coprod` is `⨆ (i : ι), H i`.
* `monoid_hom.injective_noncomm_pi_coprod_of_independent`: in the case of groups, `pi_hom.hom` is
injective if the `ϕ` are injective and the ranges of the `ϕ` are independent.
* `monoid_hom.independent_range_of_coprime_order`: If the `N i` have coprime orders, then the ranges
of the `ϕ` are independent.
* `subgroup.independent_of_coprime_order`: If commuting normal subgroups `H i` have coprime orders,
they are independent.
-/
open_locale big_operators
namespace subgroup
variables {G : Type*} [group G]
/-- `finset.noncomm_prod` is “injective” in `f` if `f` maps into independent subgroups. This
generalizes (one direction of) `subgroup.disjoint_iff_mul_eq_one`. -/
@[to_additive "`finset.noncomm_sum` is “injective” in `f` if `f` maps into independent subgroups.
This generalizes (one direction of) `add_subgroup.disjoint_iff_add_eq_zero`. "]
lemma eq_one_of_noncomm_prod_eq_one_of_independent {ι : Type*} (s : finset ι) (f : ι → G) (comm)
(K : ι → subgroup G) (hind : complete_lattice.independent K) (hmem : ∀ (x ∈ s), f x ∈ K x)
(heq1 : s.noncomm_prod f comm = 1) : ∀ (i ∈ s), f i = 1 :=
begin
classical,
revert heq1,
induction s using finset.induction_on with i s hnmem ih,
{ simp, },
{ have hcomm := comm.mono (finset.coe_subset.2 $ finset.subset_insert _ _),
simp only [finset.forall_mem_insert] at hmem,
have hmem_bsupr: s.noncomm_prod f hcomm ∈ ⨆ (i ∈ (s : set ι)), K i,
{ refine subgroup.noncomm_prod_mem _ _ _,
intros x hx,
have : K x ≤ ⨆ (i ∈ (s : set ι)), K i := le_supr₂ x hx,
exact this (hmem.2 x hx), },
intro heq1,
rw finset.noncomm_prod_insert_of_not_mem _ _ _ _ hnmem at heq1,
have hnmem' : i ∉ (s : set ι), by simpa,
obtain ⟨heq1i : f i = 1, heq1S : s.noncomm_prod f _ = 1⟩ :=
subgroup.disjoint_iff_mul_eq_one.mp (hind.disjoint_bsupr hnmem') hmem.1 hmem_bsupr heq1,
intros i h,
simp only [finset.mem_insert] at h,
rcases h with ⟨rfl | _⟩,
{ exact heq1i },
{ exact ih hcomm hmem.2 heq1S _ h } }
end
end subgroup
section family_of_monoids
variables {M : Type*} [monoid M]
-- We have a family of monoids
-- The fintype assumption is not always used, but declared here, to keep things in order
variables {ι : Type*} [hdec : decidable_eq ι] [fintype ι]
variables {N : ι → Type*} [∀ i, monoid (N i)]
-- And morphisms ϕ into G
variables (ϕ : Π (i : ι), N i →* M)
-- We assume that the elements of different morphism commute
variables (hcomm : pairwise $ λ i j, ∀ x y, commute (ϕ i x) (ϕ j y))
include hcomm
-- We use `f` and `g` to denote elements of `Π (i : ι), N i`
variables (f g : Π (i : ι), N i)
namespace monoid_hom
/-- The canonical homomorphism from a family of monoids. -/
@[to_additive "The canonical homomorphism from a family of additive monoids.
See also `linear_map.lsum` for a linear version without the commutativity assumption."]
def noncomm_pi_coprod : (Π (i : ι), N i) →* M :=
{ to_fun := λ f, finset.univ.noncomm_prod (λ i, ϕ i (f i)) $ λ i _ j _ h, hcomm h _ _,
map_one' := by {apply (finset.noncomm_prod_eq_pow_card _ _ _ _ _).trans (one_pow _), simp},
map_mul' := λ f g,
begin
classical,
convert @finset.noncomm_prod_mul_distrib _ _ _ _ (λ i, ϕ i (f i)) (λ i, ϕ i (g i)) _ _ _,
{ ext i, exact map_mul (ϕ i) (f i) (g i), },
{ rintros i - j - h, exact hcomm h _ _ },
end }
variable {hcomm}
include hdec
@[simp, to_additive]
lemma noncomm_pi_coprod_mul_single (i : ι) (y : N i):
noncomm_pi_coprod ϕ hcomm (pi.mul_single i y) = ϕ i y :=
begin
change finset.univ.noncomm_prod (λ j, ϕ j (pi.mul_single i y j)) _ = ϕ i y,
simp only [←finset.insert_erase (finset.mem_univ i)] {single_pass := tt},
rw finset.noncomm_prod_insert_of_not_mem _ _ _ _ (finset.not_mem_erase i _),
rw pi.mul_single_eq_same,
rw finset.noncomm_prod_eq_pow_card,
{ rw one_pow, exact mul_one _ },
{ intros j hj, simp only [finset.mem_erase] at hj, simp [hj], },
end
omit hcomm
/-- The universal property of `noncomm_pi_coprod` -/
@[to_additive "The universal property of `noncomm_pi_coprod`"]
def noncomm_pi_coprod_equiv :
{ϕ : Π i, N i →* M // pairwise (λ i j, ∀ x y, commute (ϕ i x) (ϕ j y)) }
≃ ((Π i, N i) →* M) :=
{ to_fun := λ ϕ, noncomm_pi_coprod ϕ.1 ϕ.2,
inv_fun := λ f,
⟨ λ i, f.comp (monoid_hom.single N i),
λ i j hij x y, commute.map (pi.mul_single_commute hij x y) f ⟩,
left_inv := λ ϕ, by { ext, simp, },
right_inv := λ f, pi_ext (λ i x, by simp) }
omit hdec
include hcomm
@[to_additive]
lemma noncomm_pi_coprod_mrange : (noncomm_pi_coprod ϕ hcomm).mrange = ⨆ i : ι, (ϕ i).mrange :=
begin
classical,
apply le_antisymm,
{ rintro x ⟨f, rfl⟩,
refine submonoid.noncomm_prod_mem _ _ _ _ _,
intros i hi,
apply submonoid.mem_Sup_of_mem, { use i },
simp, },
{ refine supr_le _,
rintro i x ⟨y, rfl⟩,
refine ⟨pi.mul_single i y, noncomm_pi_coprod_mul_single _ _ _⟩, },
end
end monoid_hom
end family_of_monoids
section family_of_groups
variables {G : Type*} [group G]
variables {ι : Type*} [hdec : decidable_eq ι] [hfin : fintype ι]
variables {H : ι → Type*} [∀ i, group (H i)]
variables (ϕ : Π (i : ι), H i →* G)
variables {hcomm : ∀ (i j : ι), i ≠ j → ∀ (x : H i) (y : H j), commute (ϕ i x) (ϕ j y)}
include hcomm
-- We use `f` and `g` to denote elements of `Π (i : ι), H i`
variables (f g : Π (i : ι), H i)
include hfin
namespace monoid_hom
-- The subgroup version of `noncomm_pi_coprod_mrange`
@[to_additive]
lemma noncomm_pi_coprod_range : (noncomm_pi_coprod ϕ hcomm).range = ⨆ i : ι, (ϕ i).range :=
begin
classical,
apply le_antisymm,
{ rintro x ⟨f, rfl⟩,
refine subgroup.noncomm_prod_mem _ _ _,
intros i hi,
apply subgroup.mem_Sup_of_mem, { use i },
simp, },
{ refine supr_le _,
rintro i x ⟨y, rfl⟩,
refine ⟨pi.mul_single i y, noncomm_pi_coprod_mul_single _ _ _⟩, },
end
@[to_additive]
lemma injective_noncomm_pi_coprod_of_independent
(hind : complete_lattice.independent (λ i, (ϕ i).range))
(hinj : ∀ i, function.injective (ϕ i)) :
function.injective (noncomm_pi_coprod ϕ hcomm):=
begin
classical,
apply (monoid_hom.ker_eq_bot_iff _).mp,
apply eq_bot_iff.mpr,
intros f heq1,
change finset.univ.noncomm_prod (λ i, ϕ i (f i)) _ = 1 at heq1,
change f = 1,
have : ∀ i, i ∈ finset.univ → ϕ i (f i) = 1 :=
subgroup.eq_one_of_noncomm_prod_eq_one_of_independent _ _ _ _ hind (by simp) heq1,
ext i,
apply hinj,
simp [this i (finset.mem_univ i)],
end
variable (hcomm)
omit hfin
@[to_additive]
lemma independent_range_of_coprime_order [finite ι] [Π i, fintype (H i)]
(hcoprime : ∀ i j, i ≠ j → nat.coprime (fintype.card (H i)) (fintype.card (H j))) :
complete_lattice.independent (λ i, (ϕ i).range) :=
begin
casesI nonempty_fintype ι,
classical,
rintros i,
rw disjoint_iff_inf_le,
rintros f ⟨hxi, hxp⟩, dsimp at hxi hxp,
rw [supr_subtype', ← noncomm_pi_coprod_range] at hxp,
rotate, { intros _ _ hj, apply hcomm, exact hj ∘ subtype.ext },
cases hxp with g hgf, cases hxi with g' hg'f,
have hxi : order_of f ∣ fintype.card (H i),
{ rw ← hg'f, exact (order_of_map_dvd _ _).trans order_of_dvd_card_univ },
have hxp : order_of f ∣ ∏ j : {j // j ≠ i}, fintype.card (H j),
{ rw [← hgf, ← fintype.card_pi], exact (order_of_map_dvd _ _).trans order_of_dvd_card_univ },
change f = 1, rw [← pow_one f, ← order_of_dvd_iff_pow_eq_one],
convert ← nat.dvd_gcd hxp hxi, rw ← nat.coprime_iff_gcd_eq_one,
apply nat.coprime_prod_left, intros j _, apply hcoprime, exact j.2,
end
end monoid_hom
end family_of_groups
namespace subgroup
-- We have an family of subgroups
variables {G : Type*} [group G]
variables {ι : Type*} [hdec : decidable_eq ι] [hfin : fintype ι] {H : ι → subgroup G}
-- Elements of `Π (i : ι), H i` are called `f` and `g` here
variables (f g : Π (i : ι), H i)
section commuting_subgroups
-- We assume that the elements of different subgroups commute
variables (hcomm : ∀ (i j : ι), i ≠ j → ∀ (x y : G), x ∈ H i → y ∈ H j → commute x y)
include hcomm
@[to_additive]
lemma commute_subtype_of_commute (i j : ι) (hne : i ≠ j) :
∀ (x : H i) (y : H j), commute ((H i).subtype x) ((H j).subtype y) :=
by { rintros ⟨x, hx⟩ ⟨y, hy⟩, exact hcomm i j hne x y hx hy }
include hfin
/-- The canonical homomorphism from a family of subgroups where elements from different subgroups
commute -/
@[to_additive "The canonical homomorphism from a family of additive subgroups where elements from
different subgroups commute"]
def noncomm_pi_coprod : (Π (i : ι), H i) →* G :=
monoid_hom.noncomm_pi_coprod (λ i, (H i).subtype) (commute_subtype_of_commute hcomm)
variable {hcomm}
include hdec
@[simp, to_additive]
lemma noncomm_pi_coprod_mul_single (i : ι) (y : H i) :
noncomm_pi_coprod hcomm (pi.mul_single i y) = y :=
by apply monoid_hom.noncomm_pi_coprod_mul_single
omit hdec
@[to_additive]
lemma noncomm_pi_coprod_range : (noncomm_pi_coprod hcomm).range = ⨆ i : ι, H i :=
by simp [noncomm_pi_coprod, monoid_hom.noncomm_pi_coprod_range]
@[to_additive]
lemma injective_noncomm_pi_coprod_of_independent (hind : complete_lattice.independent H) :
function.injective (noncomm_pi_coprod hcomm) :=
begin
apply monoid_hom.injective_noncomm_pi_coprod_of_independent,
{ simpa using hind },
{ intro i, exact subtype.coe_injective }
end
variable (hcomm)
omit hfin
@[to_additive]
lemma independent_of_coprime_order [finite ι] [∀ i, fintype (H i)]
(hcoprime : ∀ i j, i ≠ j → nat.coprime (fintype.card (H i)) (fintype.card (H j))) :
complete_lattice.independent H :=
by simpa using monoid_hom.independent_range_of_coprime_order (λ i, (H i).subtype)
(commute_subtype_of_commute hcomm) hcoprime
end commuting_subgroups
end subgroup
|
# indep.t.test.unequal.var.r
x <- c(21.6,20.8,17.6,20.1,20.1,21.9,20.6,19.4,21.5,26.1)
y <- c(20.6,20.4,20.2,20.2,18.0,19.8,20.9,19.7,20.3,19.7,22.7)
mx <- mean(x); my <- mean(y)
sdx <- sd(x); sdy <- sd(y)
t0 <- (mx-my)/sqrt(sdx^2/10+sdy^2/11)
t0
sw.df <- (sdx^2/10+sdy^2/11)^2/((sdx^2/10)^2/9 + (sdy^2/11)^2/10)
1-pt(t0, sw.df)
lbd <- (mx-my) - qt(0.995, sw.df)*sqrt(sdx^2/10+sdy^2/11)
lbd
ubd <- (mx-my) + qt(0.995, sw.df)*sqrt(sdx^2/10+sdy^2/11)
ubd
|
lemma convergentI: "X \<longlonglongrightarrow> L \<Longrightarrow> convergent X" |
# -*- coding: utf-8 -*-
"""
@author: alexyang
@contact: [email protected]
@file: inference.py
@time: 2018/4/18 8:53
@desc:
"""
import os
import sys
import pickle
import numpy as np
from gen_data import read_data
import tensorflow as tf
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
def pickle_load(data_path):
return pickle.load(open(data_path, 'rb'))
def compute_accuracy(model_path, data):
""""compute top k hits accuracy"""
sess = tf.Session()
saver = tf.train.import_meta_graph(model_path + '.meta')
saver.restore(sess, model_path)
graph = tf.get_default_graph()
# input
question_ids = graph.get_tensor_by_name('question_ids:0')
sequence_len = graph.get_tensor_by_name('sequence_len:0')
pos_sub_ids = graph.get_tensor_by_name('pos_sub_ids:0')
gru_keep_prob = graph.get_tensor_by_name('gru_keep_prob:0')
# output
pos_similarity = graph.get_tensor_by_name('pos_similarity:0')
q_word_ids, q_seq_len, q_pos_sub_ids, cand_pos_ids = data
top1 = top3 = top5 = top10 = 0
for i in range(len(q_word_ids)):
if i % 1000 == 0:
print(i, top1)
score = {}
# compute score for each candidate relation
q_word_ids_npy = np.zeros([1, 60])
q_word_ids_npy[0, :len(q_word_ids[i])] = q_word_ids[i]
mul_q_word_ids_npy = np.tile(q_word_ids_npy, (len(cand_pos_ids[i]), 1))
mul_q_seq_len = np.tile(q_seq_len[i:i + 1], len(cand_pos_ids[i]))
# print(mul_q_word_ids_npy.shape, mul_q_seq_len.shape, len(cand_pos_ids[i]))
feed_dict = {question_ids: mul_q_word_ids_npy,
sequence_len: mul_q_seq_len,
pos_sub_ids: cand_pos_ids[i],
gru_keep_prob: 1.0
}
similarity = sess.run(pos_similarity, feed_dict=feed_dict)
for j in range(len(cand_pos_ids[i])):
score[cand_pos_ids[i][j]] = similarity[j]
# rank
sorted_score = sorted(score.items(), key=lambda x: x[1], reverse=True)
sorted_rel = [score[0] for score in sorted_score]
if q_pos_sub_ids[i] in sorted_rel[:1]:
top1 += 1
if q_pos_sub_ids[i] in sorted_rel[:3]:
top3 += 1
if q_pos_sub_ids[i] in sorted_rel[:5]:
top5 += 1
if q_pos_sub_ids[i] in sorted_rel[:10]:
top10 += 1
print('accuracy: hits@1: %f hits@3: %f hits@5: %f hits@10: %f' % (top1 / len(q_word_ids), top3 / len(q_word_ids),
top5 / len(q_word_ids), top10 / len(q_word_ids)))
if __name__ == '__main__':
assert len(sys.argv) == 3, 'arguments error!'
word2idx = pickle_load('../../../data/fb_word2idx.pkl')
subject2idx = pickle_load('../../../data/FB5M_subject2idx.pkl')
idx2subject = pickle_load('../../../data/FB5M_idx2subject.pkl')
_, test_data_metric = read_data(sys.argv[2], word2idx, subject2idx)
compute_accuracy(sys.argv[1], test_data_metric)
|
-- ----------------------------------------------------------------- [ DOM.idr ]
-- Module : XML.DOM
-- Copyright : (c) Jan de Muijnck-Hughes
-- License : see LICENSE
--
--
-- --------------------------------------------------------------------- [ EOH ]
module XML.DOM
import public Data.DList
import public Data.PList
import public Commons.Text.Display
%access export
%default total
public export
data NodeTy = DOCUMENT | ELEMENT | TEXT | CDATA | INSTRUCTION | COMMENT | QNAME | INFO | IDENT | DOCTYPE
Eq NodeTy where
(==) DOCUMENT DOCUMENT = True
(==) ELEMENT ELEMENT = True
(==) TEXT TEXT = True
(==) CDATA CDATA = True
(==) INSTRUCTION INSTRUCTION = True
(==) COMMENT COMMENT = True
(==) QNAME QNAME = True
(==) INFO INFO = True
(==) IDENT IDENT = True
(==) DOCTYPE DOCTYPE = True
(==) _ _ = False
public export
data ValidNode : NodeTy -> Type where
ValidElem : ValidNode ELEMENT
ValidCData : ValidNode CDATA
ValidText : ValidNode TEXT
ValidDoc : ValidNode COMMENT
ValidInstr : ValidNode INSTRUCTION
data Document : NodeTy -> Type where
MkDocument : (info : Document INFO)
-> (doctype : Maybe $ Document DOCTYPE)
-> (instructions : List (Document INSTRUCTION))
-> (comment : Maybe (Document COMMENT))
-> (root : Document ELEMENT)
-> Document DOCUMENT
XMLInfo : (version : String)
-> (encoding : String)
-> (standalone : Bool)
-> Document INFO
DocType : (name : String)
-> (ident : Maybe $ Document IDENT)
-> Document DOCTYPE
SystemID : (ident : String) -> Document IDENT
PublicID : (ident : String) -> (ident_sys : String) -> Document IDENT
QName : (name : String)
-> (nspace : Maybe String)
-> (nprefix : Maybe String)
-> Document QNAME
Element : (qName : Document QNAME)
-> (attributes : List (Document QNAME, String))
-> (children : PList NodeTy Document ValidNode ts prfs)
-> Document ELEMENT
Comment : (comment : String) -> Document COMMENT
Text : (text : String) -> Document TEXT
CData : (cdata : String) -> Document CDATA
Instruction : (name : String)
-> (attributes : List (String, String))
-> Document INSTRUCTION
-- ---------------------------------------------------------------------- [ Eq ]
private
maybeEq : (Document a -> Document b -> Bool)
-> Maybe (Document a)
-> Maybe (Document b)
-> Bool
maybeEq _ Nothing Nothing = True
maybeEq f (Just x) (Just y) = f x y
maybeEq _ _ _ = False
partial
eqDoc : Document a -> Document b -> Bool
eqDoc (MkDocument iA dA inA cA rA) (MkDocument iB dB inB cB rB) =
eqDoc iA iB
&& maybeEq eqDoc dA dB
&& (and (zipWith (\x, y => eqDoc x y) inA inB))
&& maybeEq eqDoc cA cB
&& eqDoc rA rB
eqDoc (XMLInfo vA eA sA) (XMLInfo vB eB sB) = vA == vB && eA == eB && sA == sB
eqDoc (DocType nA iA) (DocType nB iB) = nA == nB && maybeEq eqDoc iA iB
eqDoc (SystemID x) (SystemID y) = x == y
eqDoc (PublicID x xloc) (PublicID y yloc) = x == y && xloc == yloc
eqDoc (QName x xs _) (QName y ys _) = x == y && xs == ys
eqDoc (Element nA aA cA) (Element nB aB cB) =
eqDoc nA nB
&& (and (zipWith (\(a,b), (x,y) => eqDoc a x && b == y) aA aB))
&& eqPList eqDoc cA cB
eqDoc (Comment a) (Comment b) = a == b
eqDoc (Text a) (Text b) = a == b
eqDoc (CData a) (CData b) = a == b
eqDoc (Instruction at ad) (Instruction bt bd) = at == bt && ad == ad
eqDoc _ _ = False
-- -------------------------------------------------------------------- [ Show ]
partial
Show (Document a) where
show (MkDocument info doctype instructions comment root) =
unwords ["[MkDocument"
, show info
, show doctype
, show instructions
, show comment
, show root
,"]"
]
show (XMLInfo version encoding standalone) =
unwords ["[XMLInfo"
, show version
, show encoding
, show standalone
, "]"
]
show (DocType name ident) =
unwords ["[DocType"
, show name
, show ident
, "]"
]
show (SystemID ident) = unwords ["SystemID", show ident]
show (PublicID ident loc) = unwords ["PublicID", show ident, show loc]
show (QName name nspace nprefix) =
unwords ["[QName"
, show name
, show nspace
, show nprefix
, "]"
]
show (Element qName attributes children) =
unwords ["[Element "
, show qName
, show attributes
, showPList show children
, "]"
]
show (Comment comment) = unwords ["[Comment ", show comment, "]"]
show (Text text) = unwords ["[Text", show text, "]"]
show (CData cdata) = unwords ["[CData", show cdata, "]"]
show (Instruction name attributes) =
unwords ["[Instruction"
, show name
, show attributes
,"]"
]
partial
displayDoc : Document a -> String
displayDoc (MkDocument info doctype instructions comment root) =
unwords ["[MkDocument"
, displayDoc info
, maybe "" displayDoc doctype
, unwords $ Functor.map displayDoc instructions
, maybe "" displayDoc comment
, displayDoc root
,"]"
]
displayDoc (XMLInfo version encoding standalone) =
unwords ["<?xml"
, concat ["version=", show version]
, concat ["encoding=", show encoding]
-- , concat ["standalone=", show standalone
, "?>"
]
displayDoc (DocType name ident) =
unwords ["<!DOCTYPE"
, name
, maybe "" displayDoc ident
, ">"
]
displayDoc (SystemID ident) = ident
displayDoc (PublicID ident loc) = unwords [display ident, display loc]
displayDoc (QName name nspace nprefix) =
concat [maybe "" display nspace
, if isJust nspace then ":" else ""
, display name
]
displayDoc (Element qName attributes Nil) =
concat [ "<"
, displayDoc qName
, unwords $ Functor.map (\(k,v) => concat [displayDoc k, "=", show v]) attributes
, "/>"
]
displayDoc (Element qName attributes children) =
concat ["[Element "
, "<", displayDoc qName, ">"
, unwords $ Functor.map (\(k,v) => concat [displayDoc k, "=", show v]) attributes
, concat $ map displayDoc children
,"</", displayDoc qName, ">"
]
displayDoc (Comment comment) = unwords ["<!--", comment, "-->"]
displayDoc (Text text) = text
displayDoc (CData cdata) = unwords ["<![CData[", cdata, "]]>"]
displayDoc (Instruction name attributes) =
unwords ["<?"
, name
, show attributes
,"?>"
]
Display (Document a) where
display d = assert_total $ displayDoc d -- nasty hack
-- -------------------------------------------------------------------- [ Misc ]
getDocElemTy : {a : NodeTy} -> Document a -> NodeTy
getDocElemTy {a} _ = a
public export
NodeList : (types : List NodeTy)
-> (prfs : DList NodeTy ValidNode types)
-> Type
NodeList = PList NodeTy Document ValidNode
-- --------------------------------------------------------------------- [ API ]
setRoot : Document ELEMENT -> Document DOCUMENT -> Document DOCUMENT
setRoot newe (MkDocument info dtype ins doc e) = MkDocument info dtype ins doc newe
getRoot : Document DOCUMENT -> Document ELEMENT
getRoot (MkDocument info doctype instructions comment root) = root
mkXMLInfo : String -> String -> Bool -> Document INFO
mkXMLInfo = XMLInfo
defaultXMLInfo : Document INFO
defaultXMLInfo = mkXMLInfo "1.2" "UTF-8" True
emptyNodeList : PList NodeTy Document ValidNode Nil Nil
emptyNodeList = Nil
mkSystemID : String -> Document IDENT
mkSystemID = SystemID
mkPublicID : String -> String -> Document IDENT
mkPublicID = PublicID
-- [ DocTypes ]
mkDocType : String -> Maybe (Document IDENT) -> Document DOCTYPE
mkDocType = DocType
-- ------------------------------------------------------------- [ DOM Objects ]
-- ------------------------------------------------------------------ [ QNames ]
mkQName : String -> Maybe String -> Maybe String -> Document QNAME
mkQName = QName
namespace Simple
||| Create a local name
mkQName : String -> Document QNAME
mkQName n = QName n Nothing Nothing
||| Create a qualified name with a name space
|||
||| @n The name
||| @ns The name space.
mkQNameNS : (n : String) -> (ns : String) -> Document QNAME
mkQNameNS n ns = QName n (Just ns) Nothing
||| Create a tag with a name and a prefix
|||
||| @n The name
||| @pre The prefix
mkQNamePrefix : (n : String) -> (pre : String) -> Document QNAME
mkQNamePrefix n pre = QName n Nothing (Just pre)
||| Create a tag with namespace and prefix.
|||
||| @n The name
||| @pre The prefix.
||| @ns The namespace
mkQNameNSPrefix : (n : String) -> (pre : String) -> (ns : String) -> Document QNAME
mkQNameNSPrefix n pre ns = QName n (Just ns) (Just pre)
||| Create a prefixed qualified name, intended for use with
||| attributes.
mkAttrNamePrefix : String -> String -> Document QNAME
mkAttrNamePrefix n pre = QName n Nothing (Just pre)
setNameSpace : Maybe String -> Document QNAME -> Document QNAME
setNameSpace s (QName n _ pre) = QName n s pre
-- ---------------------------------------------------------------- [ Elements ]
||| Create a element with a local qualified name.
mkSimpleElement : String -> Document ELEMENT
mkSimpleElement name = Element tag Nil Nil
where
tag : Document QNAME
tag = mkQName name
||| Create a element with a Namespace.
|||
||| @n The name
||| @ns The namespace
mkElementNS : (n : String) -> (ns : String) -> Document ELEMENT
mkElementNS n ns = Element tag Nil Nil
where
tag = mkQNameNS n ns
||| Create an element with a prefix
|||
||| @n The name
||| @pre The prefix.
mkElementPrefix : (n : String) -> (pre : String) -> Document ELEMENT
mkElementPrefix n p = Element (mkQNamePrefix n p) Nil Nil
||| Create an element with a namespace and a prefix
|||
||| @n The name
||| @pre The prefix.
||| @ns The namespace
mkElementNSPrefix : (n : String)
-> (pre : String)
-> (ns : String)
-> Document ELEMENT
mkElementNSPrefix n pre ns = Element (mkQNameNSPrefix n pre ns) Nil Nil
||| Create a key value pair
mkAttribute : String -> String -> (Document QNAME, String)
mkAttribute k v = (mkQName k, v)
mkAttributePrefix : String -> String -> String -> (Document QNAME, String)
mkAttributePrefix k p v = (mkQNamePrefix k p, v)
||| Creates a Document with an empty root node
mkEmptyDocument : Document QNAME -> Maybe (Document DOCTYPE) -> Document DOCUMENT
mkEmptyDocument n dtd =
MkDocument defaultXMLInfo
dtd
Nil
Nothing
(Element n Nil emptyNodeList)
mkSimpleDocument : Document ELEMENT -> Document DOCUMENT
mkSimpleDocument root = MkDocument (defaultXMLInfo) Nothing Nil Nothing root
mkDocument : (info : Document INFO)
-> (doctype : Maybe $ Document DOCTYPE)
-> (instructions : List (Document INSTRUCTION))
-> (comment : Maybe (Document COMMENT))
-> (root : Document ELEMENT)
-> Document DOCUMENT
mkDocument = MkDocument
-- ----------------------------------------------------------- [ Node Creation ]
||| Create an XML Comment
mkComment : String -> Document COMMENT
mkComment = Comment
mkCData : String -> Document CDATA
mkCData = CData
mkText : String -> Document TEXT
mkText = Text
mkInstruction : String -> List (String, String) -> Document INSTRUCTION
mkInstruction = Instruction
mkEmptyElement : Document QNAME -> List (Document QNAME, String) -> Document ELEMENT
mkEmptyElement n as = Element n as Nil
mkElement : Document QNAME -> List (Document QNAME, String) -> NodeList ts prfs -> Document ELEMENT
mkElement = Element
-- -------------------------------------------------------------- [ Attributes ]
getAttrName : (Document QNAME, String) -> String
getAttrName (QName n ns prefix', v) = n
getAttrPrefix : (Document QNAME, String) -> Maybe String
getAttrPrefix (QName n ns prefix', v) = prefix'
getAttrValue : (Document QNAME, String) -> String
getAttrValue (k,v) = v
-- --------------------------------------------------------------- [ Accessors ]
||| Get the attributes of the node
getAttributes : (node : Document a )
-> {auto prf : ValidNode a}
-> List (Document QNAME, String)
getAttributes (Element x xs y) {prf=ValidElem} = xs
getAttributes node {prf} = Nil
||| Does node have attributes
hasAttributes : Document a
-> {auto prf : ValidNode a}
-> Bool
hasAttributes n {prf} = isCons (getAttributes n {prf=prf})
||| Get the children
getNodes : Document a
-> {auto prf : ValidNode a}
-> (ts ** prfs ** NodeList ts prfs)
getNodes (Element x xs ys) {prf=ValidElem} = (_ ** _ ** ys)
getNodes x {prf} = ([] ** [] ** [])
||| Does element have child nodes
hasNodes : Document a
-> {auto prf : ValidNode a}
-> Bool
hasNodes n = let (ts ** prfs ** nodes) = getNodes n
in isCons nodes
||| Get node name
||| http://docs.oracle.com/javase/7/docs/api/org/w3c/dom/Node.html
getNodeName : Document a
-> {auto prf : ValidNode a}
-> String
getNodeName (Element (QName name nspace nprefix) xs y) {prf = ValidElem} = name
getNodeName (CData x) {prf = ValidCData} = "#cdata-section"
getNodeName (Text x) {prf = ValidText} = "#text"
getNodeName (Comment x) {prf = ValidDoc} = "#comment"
getNodeName (Instruction x xs) {prf = ValidInstr} = x
||| Return the element's value
getNodeValue : Document a
-> {auto prf : ValidNode a}
-> Maybe String
getNodeValue x {prf = ValidElem} = Nothing
getNodeValue (CData x) {prf = ValidCData} = Just x
getNodeValue (Text x) {prf = ValidText} = Just x
getNodeValue (Comment x) {prf = ValidDoc} = Just x
getNodeValue (Instruction x xs) {prf = ValidInstr} =
Just $ unwords $ Functor.map show xs
getTag : Document ELEMENT -> Document QNAME
getTag (Element n _ _) = n
||| Get tag name
getTagName : Document ELEMENT -> String
getTagName (Element (QName name nspace nprefix) _ _) = name
||| Return an element's prefix
getTagPrefix : Document ELEMENT -> Maybe String
getTagPrefix (Element (QName name nspace nprefix) _ _) = nprefix
||| Return an element's namespace
getTagNS : Document ELEMENT -> Maybe String
getTagNS (Element (QName name nspace nprefix) _ _ ) = nspace
||| Get value for a given attribute
getAttribute : String -> Document ELEMENT -> Maybe String
getAttribute key (Element x xs y) =
lookupBy cmpQName (mkQName key) xs
where
cmpQName (QName a _ _) (QName b _ _) = a == b
||| Remove first occurance of attribute.
removeAttribute : String -> Document ELEMENT -> Document ELEMENT
removeAttribute key (Element n as ns) = Element n attrs' ns
where
cmpQNameKVPair : (Document QNAME, String)
-> (Document QNAME, String)
-> Bool
cmpQNameKVPair (QName a _ _, b) (QName x _ _, y) = a == x
attrs' : List (Document QNAME, String)
attrs' = deleteBy cmpQNameKVPair
(mkQName key, "")
(as)
||| Set first occurance of atttribute to new value.
setAttribute : (key : String)
-> (value : String)
-> Document ELEMENT
-> Document ELEMENT
setAttribute k v e@(Element n as ns) = Element n (newAS e) ns
where
newAS : Document ELEMENT -> List (Document QNAME, String)
newAS e = mkAttribute k v :: getAttributes (removeAttribute k e)
-- ------------------------------------------------------------- [ Element Ops ]
ContainsChild : (child : Document a)
-> (node : NodeList ts prfs)
-> (prfI : Elem a ts)
-> (prfP : DElem NodeTy ValidNode prf prfs prfI)
-> Type
ContainsChild c n prfI prfP {a} {prf} =
Elem NodeTy Document ValidNode a c prf n prfI prfP
data HasElem : (elem : Document a)
-> (prf : ValidNode a)
-> (node : Document ELEMENT)
-> Type
where
HasElemProof : (prf : ContainsChild elem children prfI prfP)
-> HasElem elem vnode (Element n as children)
infixl 2 <++> -- Append Child
infixl 2 <--> -- Remove Child
infixl 2 <=> -- Add Text Node
infixl 2 <+=> -- Create and add text node
private
appendToNode : Document a
-> Document ELEMENT
-> (prf : ValidNode a)
-> Document ELEMENT
appendToNode c (Element n as ns) prf = Element n as (add c ns)
private
removeFromNodeList : (elem : Document a)
-> (nodes : NodeList ts prfs)
-> {auto idx : Elem a ts}
-> {auto vprf : DElem NodeTy ValidNode prf prfs idx}
-> NodeList (dropElem ts idx) (dropElem prfs vprf)
removeFromNodeList elem nodes {idx} {vprf} = delete' elem nodes idx vprf
private
removeFromNode : (elem : Document a)
-> (node : Document ELEMENT)
-> (vnode : ValidNode a)
-> (prf : HasElem elem vnode node)
-> Document ELEMENT
removeFromNode c (Element n as ns {ts = ts}) vnode (HasElemProof prf) =
Element n as (dropElem ns prf)
||| Set Value
addText : String -> Document ELEMENT -> Document ELEMENT
addText s e = appendToNode (Text s) e ValidText
-- -------------------------------------------------------------------- [ Ops ]
||| Append
(<++>) : Document ELEMENT
-> Document a
-> {auto prf : ValidNode a}
-> Document ELEMENT
(<++>) p c {prf} = appendToNode c p prf
||| Remove
(<-->) : (node : Document ELEMENT)
-> (elem : Document a)
-> {auto prfV : ValidNode a}
-> {auto prfN : HasElem elem prfV node}
-> Document ELEMENT
(<-->) p c {prfV} {prfN} = removeFromNode c p prfV prfN
||| Add text value
(<=>) : Document ELEMENT -> String -> Document ELEMENT
(<=>) e s = e <++> (Text s)
||| Create and add text value
(<+=>) : String -> String -> Document ELEMENT
(<+=>) n v = (mkSimpleElement n) <=> v
-- ------------------------------------------------------------ [ Node Queries ]
namespace NodeList
||| getElements
getElements : NodeList ts prfs
-> List $ Document ELEMENT
getElements [] = []
getElements ((::) elem {prf = ValidElem} rest) = elem :: getElements rest
getElements ((::) elem {prf} rest) = getElements rest
getText : NodeList ts prfs
-> List $ Document TEXT
getText [] = []
getText ((::) elem {prf = ValidText} rest) = elem :: getText rest
getText ((::) elem {prf} rest) = getText rest
getComments : NodeList ts prfs
-> List $ Document COMMENT
getComments Nil = Nil
getComments ((::) elem {prf = ValidDoc} rest) = elem :: getComments rest
getComments ((::) elem {prf} rest) = getComments rest
getCData : NodeList ts prfs -> List $ Document CDATA
getCData Nil = Nil
getCData ((::) elem {prf = ValidCData} rest) = elem :: getCData rest
getCData ((::) elem {prf} rest) = getCData rest
getElements : Document ELEMENT -> List $ Document ELEMENT
getElements (Element _ _ ns) = getElements ns
getText : Document ELEMENT -> List $ Document TEXT
getText (Element _ _ ns) = getText ns
getComments : Document ELEMENT -> List $ Document COMMENT
getComments (Element _ _ ns) = getComments ns
getCData : Document ELEMENT -> List $ Document CDATA
getCData (Element _ _ ns) = getCData ns
-- --------------------------------------------------------- [ Element Queries ]
public export
data CanQuery : NodeTy -> Type where
QueryDoc : CanQuery DOCUMENT
QueryElem : CanQuery ELEMENT
||| Get the immediate child elements
getChildElements : Document a -> {auto prf : CanQuery a} -> List $ Document ELEMENT
getChildElements (Element _ _ ns) {prf=QueryElem} = getElements ns
getChildElements (MkDocument x y xs z (Element _ _ ns)) {prf=QueryDoc} = getElements ns
private
doTest : (eqFunc : Document QNAME -> Document QNAME -> Bool)
-> (name : Document QNAME)
-> (node : Document ELEMENT)
-> Maybe (Document ELEMENT)
doTest eqFunc name node@(Element n attrs children) =
if eqFunc name n
then Just node
else Nothing
private partial
getAllElements : (node : Document ELEMENT)
-> List $ Document ELEMENT
getAllElements node@(Element x as Nil) = node :: Nil
getAllElements node@(Element x as xs) = node :: concatMap getAllElements (getElements xs)
private
getElementsBy' : (func : Document ELEMENT -> Bool)
-> (node : Document a)
-> (prf : CanQuery a)
-> List (Document ELEMENT)
getElementsBy' func (MkDocument x y xs z w) QueryDoc = filter func (assert_total $ getAllElements w)
getElementsBy' func node QueryElem = filter func (assert_total $ getAllElements node)
getElementsBy : (func : Document ELEMENT -> Bool)
-> (node : Document a)
-> {auto prf : CanQuery a}
-> List (Document ELEMENT)
getElementsBy func node {prf} = getElementsBy' func node prf
||| Get all Elements with a given QName
getElementsByQName : Document QNAME
-> Document a
-> {auto prf : CanQuery a}
-> List $ Document ELEMENT
getElementsByQName (QName x ns p) node = getElementsBy (\(Element (QName y _ _) as xs) => x == y) node
-- change tp ==
||| Get all Elements with a given name. This ignores prefixes and namespaces.
getElementsByName : String
-> Document a
-> {auto prf : CanQuery a}
-> List $ Document ELEMENT
getElementsByName naam e = getElementsBy (\(Element (QName x _ _) _ _) => naam == x) e
||| Get All Child Elements with a given QName.
getChildElementsByQName : Document QNAME
-> Document a
-> {auto prf : CanQuery a}
-> List $ Document ELEMENT
getChildElementsByQName (QName y _ _) node {prf} =
filter (\(Element (QName x _ _) _ _) => x == y) (getChildElements node) -- change to ==
||| Get All Child Elements with a local name
getChildElementsByName : String
-> Document a
-> {auto prf : CanQuery a}
-> List $ Document ELEMENT
getChildElementsByName name node {prf} =
filter (\(Element (QName x _ _) _ _) => x == name) (getChildElements node)
||| Get All child elements regardless of name.
getAllChildren : Document a
-> {auto prf : CanQuery a}
-> List $ Document ELEMENT
getAllChildren (MkDocument x y xs z w) {prf = QueryDoc} = assert_total $ getAllElements w
getAllChildren node {prf = QueryElem} = assert_total $ getAllElements node
-- ----------------------------------------------------------------- [ Aliases ]
public export
XMLDoc : Type
XMLDoc = Document DOCUMENT
public export
XMLElem : Type
XMLElem = Document ELEMENT
-- --------------------------------------------------------------------- [ EOF ]
|
open import Prelude
open import Data.Nat
open import Data.Nat.Properties
open import Induction.Nat
module Implicits.Resolution.GenericFinite.Examples.MaximumDepth
where
open import Implicits.Resolution.GenericFinite.TerminationCondition
_<′?_ : (x y : ℕ) → Dec (x <′ y)
x <′? y with (suc x) ≤? y
x <′? y | yes p = yes (≤⇒≤′ p)
x <′? y | no ¬p = no (λ p → ¬p (≤′⇒≤ p))
MaxDepthCondition : TerminationCondition
MaxDepthCondition = record
{ TCtx = ℕ
; _<_ = _<′_
; _<?_ = _<′?_
; step = λ n _ _ _ _ → n ∸ 1
; wf-< = <-well-founded
}
|
{-# OPTIONS --cubical --safe #-}
module Cubical.Data.List.Base where
open import Agda.Builtin.List public
open import Cubical.Core.Everything
module _ {ℓ} {A : Type ℓ} where
infixr 5 _++_
[_] : A → List A
[ a ] = a ∷ []
_++_ : List A → List A → List A
[] ++ ys = ys
(x ∷ xs) ++ ys = x ∷ xs ++ ys
rev : List A → List A
rev [] = []
rev (x ∷ xs) = rev xs ++ [ x ]
|
Christina Taylor is creative, diligent and responsive, she was raised in Lexington, SC, but now calls the Upstate home. Christina graduated from the College of Charleston with a Bachelor’s degree in Historic Preservation and a minor in Art History. Her passion for architecture has lead her to pursue a career in real estate. She loves old, historical homes, but is eager to help find you a new home to love and call your own. Nothing gives her more joy than knowing that she has impacted someone’s life in some way. Let her assist you as you begin the process in selling your home. She looks forward to working with you! |
# Sympy
```python
from sympy import *
# init_printing()
x, y, z = symbols("x y z")
```
```python
simplify(sin(x) ** 2 + cos(x) ** 2)
```
$\displaystyle 1$
```python
expand((x + 1) ** 3)
```
$\displaystyle x^{3} + 3 x^{2} + 3 x + 1$
```python
a = 3
b = 8
c = 2
y = a * x ** 2 + b * x + c
plot(y)
```
```python
solveset(y)
```
$\displaystyle \left\{- \frac{4}{3} - \frac{\sqrt{10}}{3}, - \frac{4}{3} + \frac{\sqrt{10}}{3}\right\}$
```python
dy = y.diff(x)
plot(dy)
```
```python
iy = integrate(y, x)
plot(iy)
```
|
= Norman Finkelstein =
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj20eqsynthconj1 : forall (lv0 : natural) (lv1 : natural) (lv2 : natural), (@eq natural (plus lv0 (plus lv1 lv2)) (plus (plus lv1 lv2) lv0)).
Admitted.
QuickChick conj20eqsynthconj1.
|
\hypertarget{section}{%
\section{1}\label{section}}
\bibleverse{1} Now after the death of Moses the servant of the LORD it
came to pass, that the LORD spake unto Joshua the son of Nun, Moses'
minister, saying, \bibleverse{2} Moses my servant is dead; now therefore
arise, go over this Jordan, thou, and all this people, unto the land
which I do give to them, even to the children of Israel. \bibleverse{3}
Every place that the sole of your foot shall tread upon, that have I
given unto you, as I said unto Moses. \bibleverse{4} From the wilderness
and this Lebanon even unto the great river, the river Euphrates, all the
land of the Hittites, and unto the great sea toward the going down of
the sun, shall be your coast. \bibleverse{5} There shall not any man be
able to stand before thee all the days of thy life: as I was with Moses,
so I will be with thee: I will not fail thee, nor forsake thee.
\bibleverse{6} Be strong and of a good courage: for unto this people
shalt thou divide for an inheritance the land, which I sware unto their
fathers to give them.\footnote{\textbf{1:6} unto this\ldots: or, thou
shalt cause this people to inherit the land, etc} \bibleverse{7} Only
be thou strong and very courageous, that thou mayest observe to do
according to all the law, which Moses my servant commanded thee: turn
not from it to the right hand or to the left, that thou mayest prosper
whithersoever thou goest.\footnote{\textbf{1:7} prosper: or, do wisely}
\bibleverse{8} This book of the law shall not depart out of thy mouth;
but thou shalt meditate therein day and night, that thou mayest observe
to do according to all that is written therein: for then thou shalt make
thy way prosperous, and then thou shalt have good success.\footnote{\textbf{1:8}
have\ldots: or, do wisely} \bibleverse{9} Have not I commanded thee?
Be strong and of a good courage; be not afraid, neither be thou
dismayed: for the LORD thy God is with thee whithersoever thou goest.
\bibleverse{10} Then Joshua commanded the officers of the people,
saying, \bibleverse{11} Pass through the host, and command the people,
saying, Prepare you victuals; for within three days ye shall pass over
this Jordan, to go in to possess the land, which the LORD your God
giveth you to possess it.
\bibleverse{12} And to the Reubenites, and to the Gadites, and to half
the tribe of Manasseh, spake Joshua, saying, \bibleverse{13} Remember
the word which Moses the servant of the LORD commanded you, saying, The
LORD your God hath given you rest, and hath given you this land.
\bibleverse{14} Your wives, your little ones, and your cattle, shall
remain in the land which Moses gave you on this side Jordan; but ye
shall pass before your brethren armed, all the mighty men of valour, and
help them;\footnote{\textbf{1:14} armed: Heb. marshalled by five}
\bibleverse{15} Until the LORD have given your brethren rest, as he hath
given you, and they also have possessed the land which the LORD your God
giveth them: then ye shall return unto the land of your possession, and
enjoy it, which Moses the LORD's servant gave you on this side Jordan
toward the sunrising.
\bibleverse{16} And they answered Joshua, saying, All that thou
commandest us we will do, and whithersoever thou sendest us, we will go.
\bibleverse{17} According as we hearkened unto Moses in all things, so
will we hearken unto thee: only the LORD thy God be with thee, as he was
with Moses. \bibleverse{18} Whosoever he be that doth rebel against thy
commandment, and will not hearken unto thy words in all that thou
commandest him, he shall be put to death: only be strong and of a good
courage.
\hypertarget{section-1}{%
\section{2}\label{section-1}}
\bibleverse{1} And Joshua the son of Nun sent out of Shittim two men to
spy secretly, saying, Go view the land, even Jericho. And they went, and
came into an harlot's house, named Rahab, and lodged
there.\textsuperscript{{[}\textbf{2:1} sent: or, had
sent{]}}{[}\textbf{2:1} lodged: Heb. lay{]} \bibleverse{2} And it was
told the king of Jericho, saying, Behold, there came men in hither to
night of the children of Israel to search out the country.
\bibleverse{3} And the king of Jericho sent unto Rahab, saying, Bring
forth the men that are come to thee, which are entered into thine house:
for they be come to search out all the country. \bibleverse{4} And the
woman took the two men, and hid them, and said thus, There came men unto
me, but I wist not whence they were: \bibleverse{5} And it came to pass
about the time of shutting of the gate, when it was dark, that the men
went out: whither the men went I wot not: pursue after them quickly; for
ye shall overtake them. \bibleverse{6} But she had brought them up to
the roof of the house, and hid them with the stalks of flax, which she
had laid in order upon the roof. \bibleverse{7} And the men pursued
after them the way to Jordan unto the fords: and as soon as they which
pursued after them were gone out, they shut the gate.
\bibleverse{8} And before they were laid down, she came up unto them
upon the roof; \bibleverse{9} And she said unto the men, I know that the
LORD hath given you the land, and that your terror is fallen upon us,
and that all the inhabitants of the land faint because of
you.\footnote{\textbf{2:9} faint: Heb. melt} \bibleverse{10} For we have
heard how the LORD dried up the water of the Red sea for you, when ye
came out of Egypt; and what ye did unto the two kings of the Amorites,
that were on the other side Jordan, Sihon and Og, whom ye utterly
destroyed. \bibleverse{11} And as soon as we had heard these things, our
hearts did melt, neither did there remain any more courage in any man,
because of you: for the LORD your God, he is God in heaven above, and in
earth beneath.\footnote{\textbf{2:11} did there\ldots: Heb. rose up}
\bibleverse{12} Now therefore, I pray you, swear unto me by the LORD,
since I have shewed you kindness, that ye will also shew kindness unto
my father's house, and give me a true token: \bibleverse{13} And that ye
will save alive my father, and my mother, and my brethren, and my
sisters, and all that they have, and deliver our lives from death.
\bibleverse{14} And the men answered her, Our life for yours, if ye
utter not this our business. And it shall be, when the LORD hath given
us the land, that we will deal kindly and truly with thee.\footnote{\textbf{2:14}
for\ldots: Heb. instead of you to die} \bibleverse{15} Then she let
them down by a cord through the window: for her house was upon the town
wall, and she dwelt upon the wall. \bibleverse{16} And she said unto
them, Get you to the mountain, lest the pursuers meet you; and hide
yourselves there three days, until the pursuers be returned: and
afterward may ye go your way. \bibleverse{17} And the men said unto her,
We will be blameless of this thine oath which thou hast made us swear.
\bibleverse{18} Behold, when we come into the land, thou shalt bind this
line of scarlet thread in the window which thou didst let us down by:
and thou shalt bring thy father, and thy mother, and thy brethren, and
all thy father's household, home unto thee.\footnote{\textbf{2:18}
bring: Heb. gather} \bibleverse{19} And it shall be, that whosoever
shall go out of the doors of thy house into the street, his blood shall
be upon his head, and we will be guiltless: and whosoever shall be with
thee in the house, his blood shall be on our head, if any hand be upon
him. \bibleverse{20} And if thou utter this our business, then we will
be quit of thine oath which thou hast made us to swear. \bibleverse{21}
And she said, According unto your words, so be it. And she sent them
away, and they departed: and she bound the scarlet line in the window.
\bibleverse{22} And they went, and came unto the mountain, and abode
there three days, until the pursuers were returned: and the pursuers
sought them throughout all the way, but found them not.
\bibleverse{23} So the two men returned, and descended from the
mountain, and passed over, and came to Joshua the son of Nun, and told
him all things that befell them: \bibleverse{24} And they said unto
Joshua, Truly the LORD hath delivered into our hands all the land; for
even all the inhabitants of the country do faint because of
us.\footnote{\textbf{2:24} faint: Heb. melt}
\hypertarget{section-2}{%
\section{3}\label{section-2}}
\bibleverse{1} And Joshua rose early in the morning; and they removed
from Shittim, and came to Jordan, he and all the children of Israel, and
lodged there before they passed over. \bibleverse{2} And it came to pass
after three days, that the officers went through the host;
\bibleverse{3} And they commanded the people, saying, When ye see the
ark of the covenant of the LORD your God, and the priests the Levites
bearing it, then ye shall remove from your place, and go after it.
\bibleverse{4} Yet there shall be a space between you and it, about two
thousand cubits by measure: come not near unto it, that ye may know the
way by which ye must go: for ye have not passed this way
heretofore.\footnote{\textbf{3:4} heretofore: Heb. since yesterday, and
the third day} \bibleverse{5} And Joshua said unto the people,
Sanctify yourselves: for to morrow the LORD will do wonders among you.
\bibleverse{6} And Joshua spake unto the priests, saying, Take up the
ark of the covenant, and pass over before the people. And they took up
the ark of the covenant, and went before the people.
\bibleverse{7} And the LORD said unto Joshua, This day will I begin to
magnify thee in the sight of all Israel, that they may know that, as I
was with Moses, so I will be with thee. \bibleverse{8} And thou shalt
command the priests that bear the ark of the covenant, saying, When ye
are come to the brink of the water of Jordan, ye shall stand still in
Jordan.
\bibleverse{9} And Joshua said unto the children of Israel, Come hither,
and hear the words of the LORD your God. \bibleverse{10} And Joshua
said, Hereby ye shall know that the living God is among you, and that he
will without fail drive out from before you the Canaanites, and the
Hittites, and the Hivites, and the Perizzites, and the Girgashites, and
the Amorites, and the Jebusites. \bibleverse{11} Behold, the ark of the
covenant of the Lord of all the earth passeth over before you into
Jordan. \bibleverse{12} Now therefore take you twelve men out of the
tribes of Israel, out of every tribe a man. \bibleverse{13} And it shall
come to pass, as soon as the soles of the feet of the priests that bear
the ark of the LORD, the Lord of all the earth, shall rest in the waters
of Jordan, that the waters of Jordan shall be cut off from the waters
that come down from above; and they shall stand upon an heap.
\bibleverse{14} And it came to pass, when the people removed from their
tents, to pass over Jordan, and the priests bearing the ark of the
covenant before the people; \bibleverse{15} And as they that bare the
ark were come unto Jordan, and the feet of the priests that bare the ark
were dipped in the brim of the water, (for Jordan overfloweth all his
banks all the time of harvest,) \bibleverse{16} That the waters which
came down from above stood and rose up upon an heap very far from the
city Adam, that is beside Zaretan: and those that came down toward the
sea of the plain, even the salt sea, failed, and were cut off: and the
people passed over right against Jericho. \bibleverse{17} And the
priests that bare the ark of the covenant of the LORD stood firm on dry
ground in the midst of Jordan, and all the Israelites passed over on dry
ground, until all the people were passed clean over Jordan.
\hypertarget{section-3}{%
\section{4}\label{section-3}}
\bibleverse{1} And it came to pass, when all the people were clean
passed over Jordan, that the LORD spake unto Joshua, saying,
\bibleverse{2} Take you twelve men out of the people, out of every tribe
a man, \bibleverse{3} And command ye them, saying, Take you hence out of
the midst of Jordan, out of the place where the priests' feet stood
firm, twelve stones, and ye shall carry them over with you, and leave
them in the lodging place, where ye shall lodge this night.
\bibleverse{4} Then Joshua called the twelve men, whom he had prepared
of the children of Israel, out of every tribe a man: \bibleverse{5} And
Joshua said unto them, Pass over before the ark of the LORD your God
into the midst of Jordan, and take ye up every man of you a stone upon
his shoulder, according unto the number of the tribes of the children of
Israel: \bibleverse{6} That this may be a sign among you, that when your
children ask their fathers in time to come, saying, What mean ye by
these stones?\footnote{\textbf{4:6} in time\ldots: Heb. to morrow}
\bibleverse{7} Then ye shall answer them, That the waters of Jordan were
cut off before the ark of the covenant of the LORD; when it passed over
Jordan, the waters of Jordan were cut off: and these stones shall be for
a memorial unto the children of Israel for ever. \bibleverse{8} And the
children of Israel did so as Joshua commanded, and took up twelve stones
out of the midst of Jordan, as the LORD spake unto Joshua, according to
the number of the tribes of the children of Israel, and carried them
over with them unto the place where they lodged, and laid them down
there. \bibleverse{9} And Joshua set up twelve stones in the midst of
Jordan, in the place where the feet of the priests which bare the ark of
the covenant stood: and they are there unto this day.
\bibleverse{10} For the priests which bare the ark stood in the midst of
Jordan, until every thing was finished that the LORD commanded Joshua to
speak unto the people, according to all that Moses commanded Joshua: and
the people hasted and passed over. \bibleverse{11} And it came to pass,
when all the people were clean passed over, that the ark of the LORD
passed over, and the priests, in the presence of the people.
\bibleverse{12} And the children of Reuben, and the children of Gad, and
half the tribe of Manasseh, passed over armed before the children of
Israel, as Moses spake unto them: \bibleverse{13} About forty thousand
prepared for war passed over before the LORD unto battle, to the plains
of Jericho.\footnote{\textbf{4:13} prepared: or, ready armed}
\bibleverse{14} On that day the LORD magnified Joshua in the sight of
all Israel; and they feared him, as they feared Moses, all the days of
his life. \bibleverse{15} And the LORD spake unto Joshua, saying,
\bibleverse{16} Command the priests that bear the ark of the testimony,
that they come up out of Jordan. \bibleverse{17} Joshua therefore
commanded the priests, saying, Come ye up out of Jordan. \bibleverse{18}
And it came to pass, when the priests that bare the ark of the covenant
of the LORD were come up out of the midst of Jordan, and the soles of
the priests' feet were lifted up unto the dry land, that the waters of
Jordan returned unto their place, and flowed over all his banks, as they
did before.\textsuperscript{{[}\textbf{4:18} lifted\ldots: Heb. plucked
up{]}}{[}\textbf{4:18} flowed: Heb. went{]}
\bibleverse{19} And the people came up out of Jordan on the tenth day of
the first month, and encamped in Gilgal, in the east border of Jericho.
\bibleverse{20} And those twelve stones, which they took out of Jordan,
did Joshua pitch in Gilgal. \bibleverse{21} And he spake unto the
children of Israel, saying, When your children shall ask their fathers
in time to come, saying, What mean these stones?\footnote{\textbf{4:21}
in time\ldots: Heb. to morrow} \bibleverse{22} Then ye shall let your
children know, saying, Israel came over this Jordan on dry land.
\bibleverse{23} For the LORD your God dried up the waters of Jordan from
before you, until ye were passed over, as the LORD your God did to the
Red sea, which he dried up from before us, until we were gone over:
\bibleverse{24} That all the people of the earth might know the hand of
the LORD, that it is mighty: that ye might fear the LORD your God for
ever.\footnote{\textbf{4:24} for ever: Heb. all days}
\hypertarget{section-4}{%
\section{5}\label{section-4}}
\bibleverse{1} And it came to pass, when all the kings of the Amorites,
which were on the side of Jordan westward, and all the kings of the
Canaanites, which were by the sea, heard that the LORD had dried up the
waters of Jordan from before the children of Israel, until we were
passed over, that their heart melted, neither was there spirit in them
any more, because of the children of Israel.
\bibleverse{2} At that time the LORD said unto Joshua, Make thee sharp
knives, and circumcise again the children of Israel the second
time.\footnote{\textbf{5:2} sharp\ldots: or, knives of flints}
\bibleverse{3} And Joshua made him sharp knives, and circumcised the
children of Israel at the hill of the
foreskins.\textsuperscript{{[}\textbf{5:3} sharp\ldots: or, knives of
flints{]}}{[}\textbf{5:3} the hill\ldots: or, Gibeah-haaraloth{]}
\bibleverse{4} And this is the cause why Joshua did circumcise: All the
people that came out of Egypt, that were males, even all the men of war,
died in the wilderness by the way, after they came out of Egypt.
\bibleverse{5} Now all the people that came out were circumcised: but
all the people that were born in the wilderness by the way as they came
forth out of Egypt, them they had not circumcised. \bibleverse{6} For
the children of Israel walked forty years in the wilderness, till all
the people that were men of war, which came out of Egypt, were consumed,
because they obeyed not the voice of the LORD: unto whom the LORD sware
that he would not shew them the land, which the LORD sware unto their
fathers that he would give us, a land that floweth with milk and honey.
\bibleverse{7} And their children, whom he raised up in their stead,
them Joshua circumcised: for they were uncircumcised, because they had
not circumcised them by the way. \bibleverse{8} And it came to pass,
when they had done circumcising all the people, that they abode in their
places in the camp, till they were whole.\footnote{\textbf{5:8} they
had\ldots: Heb. the people had made an end to be circumcised}
\bibleverse{9} And the LORD said unto Joshua, This day have I rolled
away the reproach of Egypt from off you. Wherefore the name of the place
is called Gilgal unto this day.\footnote{\textbf{5:9} Gilgal: that is
Rolling}
\bibleverse{10} And the children of Israel encamped in Gilgal, and kept
the passover on the fourteenth day of the month at even in the plains of
Jericho. \bibleverse{11} And they did eat of the old corn of the land on
the morrow after the passover, unleavened cakes, and parched corn in the
selfsame day.
\bibleverse{12} And the manna ceased on the morrow after they had eaten
of the old corn of the land; neither had the children of Israel manna
any more; but they did eat of the fruit of the land of Canaan that year.
\bibleverse{13} And it came to pass, when Joshua was by Jericho, that he
lifted up his eyes and looked, and, behold, there stood a man over
against him with his sword drawn in his hand: and Joshua went unto him,
and said unto him, Art thou for us, or for our adversaries?
\bibleverse{14} And he said, Nay; but as captain of the host of the LORD
am I now come. And Joshua fell on his face to the earth, and did
worship, and said unto him, What saith my lord unto his
servant?\footnote{\textbf{5:14} captain: or, prince} \bibleverse{15} And
the captain of the LORD's host said unto Joshua, Loose thy shoe from off
thy foot; for the place whereon thou standest is holy. And Joshua did
so.
\hypertarget{section-5}{%
\section{6}\label{section-5}}
\bibleverse{1} Now Jericho was straitly shut up because of the children
of Israel: none went out, and none came in.\footnote{\textbf{6:1}
was\ldots: Heb. did shut up, and was shut up} \bibleverse{2} And the
LORD said unto Joshua, See, I have given into thine hand Jericho, and
the king thereof, and the mighty men of valour. \bibleverse{3} And ye
shall compass the city, all ye men of war, and go round about the city
once. Thus shalt thou do six days. \bibleverse{4} And seven priests
shall bear before the ark seven trumpets of rams' horns: and the seventh
day ye shall compass the city seven times, and the priests shall blow
with the trumpets. \bibleverse{5} And it shall come to pass, that when
they make a long blast with the ram's horn, and when ye hear the sound
of the trumpet, all the people shall shout with a great shout; and the
wall of the city shall fall down flat, and the people shall ascend up
every man straight before him.\footnote{\textbf{6:5} flat: Heb. under it}
\bibleverse{6} And Joshua the son of Nun called the priests, and said
unto them, Take up the ark of the covenant, and let seven priests bear
seven trumpets of rams' horns before the ark of the LORD. \bibleverse{7}
And he said unto the people, Pass on, and compass the city, and let him
that is armed pass on before the ark of the LORD.
\bibleverse{8} And it came to pass, when Joshua had spoken unto the
people, that the seven priests bearing the seven trumpets of rams' horns
passed on before the LORD, and blew with the trumpets: and the ark of
the covenant of the LORD followed them.
\bibleverse{9} And the armed men went before the priests that blew with
the trumpets, and the rereward came after the ark, the priests going on,
and blowing with the trumpets.\footnote{\textbf{6:9} rereward: Heb.
gathering host} \bibleverse{10} And Joshua had commanded the people,
saying, Ye shall not shout, nor make any noise with your voice, neither
shall any word proceed out of your mouth, until the day I bid you shout;
then shall ye shout.\footnote{\textbf{6:10} any noise\ldots: Heb. your
voice to be heard} \bibleverse{11} So the ark of the LORD compassed
the city, going about it once: and they came into the camp, and lodged
in the camp.
\bibleverse{12} And Joshua rose early in the morning, and the priests
took up the ark of the LORD. \bibleverse{13} And seven priests bearing
seven trumpets of rams' horns before the ark of the LORD went on
continually, and blew with the trumpets: and the armed men went before
them; but the rereward came after the ark of the LORD, the priests going
on, and blowing with the trumpets. \bibleverse{14} And the second day
they compassed the city once, and returned into the camp: so they did
six days. \bibleverse{15} And it came to pass on the seventh day, that
they rose early about the dawning of the day, and compassed the city
after the same manner seven times: only on that day they compassed the
city seven times. \bibleverse{16} And it came to pass at the seventh
time, when the priests blew with the trumpets, Joshua said unto the
people, Shout; for the LORD hath given you the city.
\bibleverse{17} And the city shall be accursed, even it, and all that
are therein, to the LORD: only Rahab the harlot shall live, she and all
that are with her in the house, because she hid the messengers that we
sent.\footnote{\textbf{6:17} accursed: or, devoted} \bibleverse{18} And
ye, in any wise keep yourselves from the accursed thing, lest ye make
yourselves accursed, when ye take of the accursed thing, and make the
camp of Israel a curse, and trouble it.\textsuperscript{{[}\textbf{6:18}
accursed: or, devoted{]}}{[}\textbf{6:18} a curse: or, devoted{]}
\bibleverse{19} But all the silver, and gold, and vessels of brass and
iron, are consecrated unto the LORD: they shall come into the treasury
of the LORD.\footnote{\textbf{6:19} consecrated: Heb. holiness}
\bibleverse{20} So the people shouted when the priests blew with the
trumpets: and it came to pass, when the people heard the sound of the
trumpet, and the people shouted with a great shout, that the wall fell
down flat, so that the people went up into the city, every man straight
before him, and they took the city.\footnote{\textbf{6:20} flat: Heb.
under it} \bibleverse{21} And they utterly destroyed all that was in
the city, both man and woman, young and old, and ox, and sheep, and ass,
with the edge of the sword. \bibleverse{22} But Joshua had said unto the
two men that had spied out the country, Go into the harlot's house, and
bring out thence the woman, and all that she hath, as ye sware unto her.
\bibleverse{23} And the young men that were spies went in, and brought
out Rahab, and her father, and her mother, and her brethren, and all
that she had; and they brought out all her kindred, and left them
without the camp of Israel.\footnote{\textbf{6:23} kindred: Heb.
families} \bibleverse{24} And they burnt the city with fire, and all
that was therein: only the silver, and the gold, and the vessels of
brass and of iron, they put into the treasury of the house of the LORD.
\bibleverse{25} And Joshua saved Rahab the harlot alive, and her
father's household, and all that she had; and she dwelleth in Israel
even unto this day; because she hid the messengers, which Joshua sent to
spy out Jericho.
\bibleverse{26} And Joshua adjured them at that time, saying, Cursed be
the man before the LORD, that riseth up and buildeth this city Jericho:
he shall lay the foundation thereof in his firstborn, and in his
youngest son shall he set up the gates of it. \bibleverse{27} So the
LORD was with Joshua; and his fame was noised throughout all the
country.
\hypertarget{section-6}{%
\section{7}\label{section-6}}
\bibleverse{1} But the children of Israel committed a trespass in the
accursed thing: for Achan, the son of Carmi, the son of Zabdi, the son
of Zerah, of the tribe of Judah, took of the accursed thing: and the
anger of the LORD was kindled against the children of
Israel.\textsuperscript{{[}\textbf{7:1} Achan: also called,
Achar{]}}{[}\textbf{7:1} Zabdi: also called, Zimri{]} \bibleverse{2} And
Joshua sent men from Jericho to Ai, which is beside Beth-aven, on the
east side of Beth-el, and spake unto them, saying, Go up and view the
country. And the men went up and viewed Ai. \bibleverse{3} And they
returned to Joshua, and said unto him, Let not all the people go up; but
let about two or three thousand men go up and smite Ai; and make not all
the people to labour thither; for they are but few.\footnote{\textbf{7:3}
about\ldots: Heb. about two thousand men, or, about three thousand men}
\bibleverse{4} So there went up thither of the people about three
thousand men: and they fled before the men of Ai. \bibleverse{5} And the
men of Ai smote of them about thirty and six men: for they chased them
from before the gate even unto Shebarim, and smote them in the going
down: wherefore the hearts of the people melted, and became as
water.\footnote{\textbf{7:5} in\ldots: or, in Morad}
\bibleverse{6} And Joshua rent his clothes, and fell to the earth upon
his face before the ark of the LORD until the eventide, he and the
elders of Israel, and put dust upon their heads. \bibleverse{7} And
Joshua said, Alas, O Lord GOD, wherefore hast thou at all brought this
people over Jordan, to deliver us into the hand of the Amorites, to
destroy us? would to God we had been content, and dwelt on the other
side Jordan! \bibleverse{8} O Lord, what shall I say, when Israel
turneth their backs before their enemies!\footnote{\textbf{7:8} backs:
Heb. necks} \bibleverse{9} For the Canaanites and all the inhabitants
of the land shall hear of it, and shall environ us round, and cut off
our name from the earth: and what wilt thou do unto thy great name?
\bibleverse{10} And the LORD said unto Joshua, Get thee up; wherefore
liest thou thus upon thy face?\footnote{\textbf{7:10} liest: Heb.
fallest} \bibleverse{11} Israel hath sinned, and they have also
transgressed my covenant which I commanded them: for they have even
taken of the accursed thing, and have also stolen, and dissembled also,
and they have put it even among their own stuff. \bibleverse{12}
Therefore the children of Israel could not stand before their enemies,
but turned their backs before their enemies, because they were accursed:
neither will I be with you any more, except ye destroy the accursed from
among you. \bibleverse{13} Up, sanctify the people, and say, Sanctify
yourselves against to morrow: for thus saith the LORD God of Israel,
There is an accursed thing in the midst of thee, O Israel: thou canst
not stand before thine enemies, until ye take away the accursed thing
from among you. \bibleverse{14} In the morning therefore ye shall be
brought according to your tribes: and it shall be, that the tribe which
the LORD taketh shall come according to the families thereof; and the
family which the LORD shall take shall come by households; and the
household which the LORD shall take shall come man by man.
\bibleverse{15} And it shall be, that he that is taken with the accursed
thing shall be burnt with fire, he and all that he hath: because he hath
transgressed the covenant of the LORD, and because he hath wrought folly
in Israel.\footnote{\textbf{7:15} folly: or, wickedness}
\bibleverse{16} So Joshua rose up early in the morning, and brought
Israel by their tribes; and the tribe of Judah was taken:
\bibleverse{17} And he brought the family of Judah; and he took the
family of the Zarhites: and he brought the family of the Zarhites man by
man; and Zabdi was taken: \bibleverse{18} And he brought his household
man by man; and Achan, the son of Carmi, the son of Zabdi, the son of
Zerah, of the tribe of Judah, was taken. \bibleverse{19} And Joshua said
unto Achan, My son, give, I pray thee, glory to the LORD God of Israel,
and make confession unto him; and tell me now what thou hast done; hide
it not from me. \bibleverse{20} And Achan answered Joshua, and said,
Indeed I have sinned against the LORD God of Israel, and thus and thus
have I done: \bibleverse{21} When I saw among the spoils a goodly
Babylonish garment, and two hundred shekels of silver, and a wedge of
gold of fifty shekels weight, then I coveted them, and took them; and,
behold, they are hid in the earth in the midst of my tent, and the
silver under it.\footnote{\textbf{7:21} wedge: Heb. tongue}
\bibleverse{22} So Joshua sent messengers, and they ran unto the tent;
and, behold, it was hid in his tent, and the silver under it.
\bibleverse{23} And they took them out of the midst of the tent, and
brought them unto Joshua, and unto all the children of Israel, and laid
them out before the LORD.\footnote{\textbf{7:23} laid: Heb. poured}
\bibleverse{24} And Joshua, and all Israel with him, took Achan the son
of Zerah, and the silver, and the garment, and the wedge of gold, and
his sons, and his daughters, and his oxen, and his asses, and his sheep,
and his tent, and all that he had: and they brought them unto the valley
of Achor. \bibleverse{25} And Joshua said, Why hast thou troubled us?
the LORD shall trouble thee this day. And all Israel stoned him with
stones, and burned them with fire, after they had stoned them with
stones. \bibleverse{26} And they raised over him a great heap of stones
unto this day. So the LORD turned from the fierceness of his anger.
Wherefore the name of that place was called, The valley of Achor, unto
this day.\footnote{\textbf{7:26} Achor: that is, Trouble}
\hypertarget{section-7}{%
\section{8}\label{section-7}}
\bibleverse{1} And the LORD said unto Joshua, Fear not, neither be thou
dismayed: take all the people of war with thee, and arise, go up to Ai:
see, I have given into thy hand the king of Ai, and his people, and his
city, and his land: \bibleverse{2} And thou shalt do to Ai and her king
as thou didst unto Jericho and her king: only the spoil thereof, and the
cattle thereof, shall ye take for a prey unto yourselves: lay thee an
ambush for the city behind it.
\bibleverse{3} So Joshua arose, and all the people of war, to go up
against Ai: and Joshua chose out thirty thousand mighty men of valour,
and sent them away by night. \bibleverse{4} And he commanded them,
saying, Behold, ye shall lie in wait against the city, even behind the
city: go not very far from the city, but be ye all ready: \bibleverse{5}
And I, and all the people that are with me, will approach unto the city:
and it shall come to pass, when they come out against us, as at the
first, that we will flee before them, \bibleverse{6} (For they will come
out after us) till we have drawn them from the city; for they will say,
They flee before us, as at the first: therefore we will flee before
them.\footnote{\textbf{8:6} drawn: Heb. pulled} \bibleverse{7} Then ye
shall rise up from the ambush, and seize upon the city: for the LORD
your God will deliver it into your hand. \bibleverse{8} And it shall be,
when ye have taken the city, that ye shall set the city on fire:
according to the commandment of the LORD shall ye do. See, I have
commanded you.
\bibleverse{9} Joshua therefore sent them forth: and they went to lie in
ambush, and abode between Beth-el and Ai, on the west side of Ai: but
Joshua lodged that night among the people. \bibleverse{10} And Joshua
rose up early in the morning, and numbered the people, and went up, he
and the elders of Israel, before the people to Ai. \bibleverse{11} And
all the people, even the people of war that were with him, went up, and
drew nigh, and came before the city, and pitched on the north side of
Ai: now there was a valley between them and Ai. \bibleverse{12} And he
took about five thousand men, and set them to lie in ambush between
Beth-el and Ai, on the west side of the city.\footnote{\textbf{8:12}
of\ldots: or, of Ai} \bibleverse{13} And when they had set the people,
even all the host that was on the north of the city, and their liers in
wait on the west of the city, Joshua went that night into the midst of
the valley.\footnote{\textbf{8:13} liers\ldots: Heb. lying in wait}
\bibleverse{14} And it came to pass, when the king of Ai saw it, that
they hasted and rose up early, and the men of the city went out against
Israel to battle, he and all his people, at a time appointed, before the
plain; but he wist not that there were liers in ambush against him
behind the city. \bibleverse{15} And Joshua and all Israel made as if
they were beaten before them, and fled by the way of the wilderness.
\bibleverse{16} And all the people that were in Ai were called together
to pursue after them: and they pursued after Joshua, and were drawn away
from the city. \bibleverse{17} And there was not a man left in Ai or
Beth-el, that went not out after Israel: and they left the city open,
and pursued after Israel. \bibleverse{18} And the LORD said unto Joshua,
Stretch out the spear that is in thy hand toward Ai; for I will give it
into thine hand. And Joshua stretched out the spear that he had in his
hand toward the city. \bibleverse{19} And the ambush arose quickly out
of their place, and they ran as soon as he had stretched out his hand:
and they entered into the city, and took it, and hasted and set the city
on fire. \bibleverse{20} And when the men of Ai looked behind them, they
saw, and, behold, the smoke of the city ascended up to heaven, and they
had no power to flee this way or that way: and the people that fled to
the wilderness turned back upon the pursuers.\footnote{\textbf{8:20}
power: Heb. hand} \bibleverse{21} And when Joshua and all Israel saw
that the ambush had taken the city, and that the smoke of the city
ascended, then they turned again, and slew the men of Ai.
\bibleverse{22} And the other issued out of the city against them; so
they were in the midst of Israel, some on this side, and some on that
side: and they smote them, so that they let none of them remain or
escape.
\bibleverse{23} And the king of Ai they took alive, and brought him to
Joshua. \bibleverse{24} And it came to pass, when Israel had made an end
of slaying all the inhabitants of Ai in the field, in the wilderness
wherein they chased them, and when they were all fallen on the edge of
the sword, until they were consumed, that all the Israelites returned
unto Ai, and smote it with the edge of the sword. \bibleverse{25} And so
it was, that all that fell that day, both of men and women, were twelve
thousand, even all the men of Ai. \bibleverse{26} For Joshua drew not
his hand back, wherewith he stretched out the spear, until he had
utterly destroyed all the inhabitants of Ai. \bibleverse{27} Only the
cattle and the spoil of that city Israel took for a prey unto
themselves, according unto the word of the LORD which he commanded
Joshua. \bibleverse{28} And Joshua burnt Ai, and made it an heap for
ever, even a desolation unto this day. \bibleverse{29} And the king of
Ai he hanged on a tree until eventide: and as soon as the sun was down,
Joshua commanded that they should take his carcase down from the tree,
and cast it at the entering of the gate of the city, and raise thereon a
great heap of stones, that remaineth unto this day.
\bibleverse{30} Then Joshua built an altar unto the LORD God of Israel
in mount Ebal, \bibleverse{31} As Moses the servant of the LORD
commanded the children of Israel, as it is written in the book of the
law of Moses, an altar of whole stones, over which no man hath lift up
any iron: and they offered thereon burnt offerings unto the LORD, and
sacrificed peace offerings.
\bibleverse{32} And he wrote there upon the stones a copy of the law of
Moses, which he wrote in the presence of the children of Israel.
\bibleverse{33} And all Israel, and their elders, and officers, and
their judges, stood on this side the ark and on that side before the
priests the Levites, which bare the ark of the covenant of the LORD, as
well the stranger, as he that was born among them; half of them over
against mount Gerizim, and half of them over against mount Ebal; as
Moses the servant of the LORD had commanded before, that they should
bless the people of Israel. \bibleverse{34} And afterward he read all
the words of the law, the blessings and cursings, according to all that
is written in the book of the law. \bibleverse{35} There was not a word
of all that Moses commanded, which Joshua read not before all the
congregation of Israel, with the women, and the little ones, and the
strangers that were conversant among them.\footnote{\textbf{8:35}
were\ldots: Heb. walked}
\hypertarget{section-8}{%
\section{9}\label{section-8}}
\bibleverse{1} And it came to pass, when all the kings which were on
this side Jordan, in the hills, and in the valleys, and in all the
coasts of the great sea over against Lebanon, the Hittite, and the
Amorite, the Canaanite, the Perizzite, the Hivite, and the Jebusite,
heard thereof; \bibleverse{2} That they gathered themselves together, to
fight with Joshua and with Israel, with one accord.\footnote{\textbf{9:2}
accord: Heb. mouth}
\bibleverse{3} And when the inhabitants of Gibeon heard what Joshua had
done unto Jericho and to Ai, \bibleverse{4} They did work wilily, and
went and made as if they had been ambassadors, and took old sacks upon
their asses, and wine bottles, old, and rent, and bound up;
\bibleverse{5} And old shoes and clouted upon their feet, and old
garments upon them; and all the bread of their provision was dry and
mouldy. \bibleverse{6} And they went to Joshua unto the camp at Gilgal,
and said unto him, and to the men of Israel, We be come from a far
country: now therefore make ye a league with us. \bibleverse{7} And the
men of Israel said unto the Hivites, Peradventure ye dwell among us; and
how shall we make a league with you? \bibleverse{8} And they said unto
Joshua, We are thy servants. And Joshua said unto them, Who are ye? and
from whence come ye? \bibleverse{9} And they said unto him, From a very
far country thy servants are come because of the name of the LORD thy
God: for we have heard the fame of him, and all that he did in Egypt,
\bibleverse{10} And all that he did to the two kings of the Amorites,
that were beyond Jordan, to Sihon king of Heshbon, and to Og king of
Bashan, which was at Ashtaroth. \bibleverse{11} Wherefore our elders and
all the inhabitants of our country spake to us, saying, Take victuals
with you for the journey, and go to meet them, and say unto them, We are
your servants: therefore now make ye a league with us.\footnote{\textbf{9:11}
with you: Heb. in your hand} \bibleverse{12} This our bread we took
hot for our provision out of our houses on the day we came forth to go
unto you; but now, behold, it is dry, and it is mouldy: \bibleverse{13}
And these bottles of wine, which we filled, were new; and, behold, they
be rent: and these our garments and our shoes are become old by reason
of the very long journey. \bibleverse{14} And the men took of their
victuals, and asked not counsel at the mouth of the LORD.\footnote{\textbf{9:14}
the men\ldots: or, they received the men by reason of their victuals}
\bibleverse{15} And Joshua made peace with them, and made a league with
them, to let them live: and the princes of the congregation sware unto
them.
\bibleverse{16} And it came to pass at the end of three days after they
had made a league with them, that they heard that they were their
neighbours, and that they dwelt among them. \bibleverse{17} And the
children of Israel journeyed, and came unto their cities on the third
day. Now their cities were Gibeon, and Chephirah, and Beeroth, and
Kirjath-jearim. \bibleverse{18} And the children of Israel smote them
not, because the princes of the congregation had sworn unto them by the
LORD God of Israel. And all the congregation murmured against the
princes. \bibleverse{19} But all the princes said unto all the
congregation, We have sworn unto them by the LORD God of Israel: now
therefore we may not touch them. \bibleverse{20} This we will do to
them; we will even let them live, lest wrath be upon us, because of the
oath which we sware unto them. \bibleverse{21} And the princes said unto
them, Let them live; but let them be hewers of wood and drawers of water
unto all the congregation; as the princes had promised them.
\bibleverse{22} And Joshua called for them, and he spake unto them,
saying, Wherefore have ye beguiled us, saying, We are very far from you;
when ye dwell among us? \bibleverse{23} Now therefore ye are cursed, and
there shall none of you be freed from being bondmen, and hewers of wood
and drawers of water for the house of my God.\footnote{\textbf{9:23}
none\ldots: Heb. not be cut off from you} \bibleverse{24} And they
answered Joshua, and said, Because it was certainly told thy servants,
how that the LORD thy God commanded his servant Moses to give you all
the land, and to destroy all the inhabitants of the land from before
you, therefore we were sore afraid of our lives because of you, and have
done this thing. \bibleverse{25} And now, behold, we are in thine hand:
as it seemeth good and right unto thee to do unto us, do.
\bibleverse{26} And so did he unto them, and delivered them out of the
hand of the children of Israel, that they slew them not. \bibleverse{27}
And Joshua made them that day hewers of wood and drawers of water for
the congregation, and for the altar of the LORD, even unto this day, in
the place which he should choose.\footnote{\textbf{9:27} made: Heb.
gave, or, delivered to be}
\hypertarget{section-9}{%
\section{10}\label{section-9}}
\bibleverse{1} Now it came to pass, when Adoni-zedek king of Jerusalem
had heard how Joshua had taken Ai, and had utterly destroyed it; as he
had done to Jericho and her king, so he had done to Ai and her king; and
how the inhabitants of Gibeon had made peace with Israel, and were among
them; \bibleverse{2} That they feared greatly, because Gibeon was a
great city, as one of the royal cities, and because it was greater than
Ai, and all the men thereof were mighty.\footnote{\textbf{10:2}
royal\ldots: Heb. cities of the kingdom} \bibleverse{3} Wherefore
Adoni-zedek king of Jerusalem sent unto Hoham king of Hebron, and unto
Piram king of Jarmuth, and unto Japhia king of Lachish, and unto Debir
king of Eglon, saying, \bibleverse{4} Come up unto me, and help me, that
we may smite Gibeon: for it hath made peace with Joshua and with the
children of Israel. \bibleverse{5} Therefore the five kings of the
Amorites, the king of Jerusalem, the king of Hebron, the king of
Jarmuth, the king of Lachish, the king of Eglon, gathered themselves
together, and went up, they and all their hosts, and encamped before
Gibeon, and made war against it.
\bibleverse{6} And the men of Gibeon sent unto Joshua to the camp to
Gilgal, saying, Slack not thy hand from thy servants; come up to us
quickly, and save us, and help us: for all the kings of the Amorites
that dwell in the mountains are gathered together against us.
\bibleverse{7} So Joshua ascended from Gilgal, he, and all the people of
war with him, and all the mighty men of valour.
\bibleverse{8} And the LORD said unto Joshua, Fear them not: for I have
delivered them into thine hand; there shall not a man of them stand
before thee. \bibleverse{9} Joshua therefore came unto them suddenly,
and went up from Gilgal all night. \bibleverse{10} And the LORD
discomfited them before Israel, and slew them with a great slaughter at
Gibeon, and chased them along the way that goeth up to Beth-horon, and
smote them to Azekah, and unto Makkedah. \bibleverse{11} And it came to
pass, as they fled from before Israel, and were in the going down to
Beth-horon, that the LORD cast down great stones from heaven upon them
unto Azekah, and they died: they were more which died with hailstones
than they whom the children of Israel slew with the sword.
\bibleverse{12} Then spake Joshua to the LORD in the day when the LORD
delivered up the Amorites before the children of Israel, and he said in
the sight of Israel, Sun, stand thou still upon Gibeon; and thou, Moon,
in the valley of Ajalon.\footnote{\textbf{10:12} stand\ldots: Heb. be
silent} \bibleverse{13} And the sun stood still, and the moon stayed,
until the people had avenged themselves upon their enemies. Is not this
written in the book of Jasher? So the sun stood still in the midst of
heaven, and hasted not to go down about a whole day.\footnote{\textbf{10:13}
Jasher: or, the upright?} \bibleverse{14} And there was no day like
that before it or after it, that the LORD hearkened unto the voice of a
man: for the LORD fought for Israel.
\bibleverse{15} And Joshua returned, and all Israel with him, unto the
camp to Gilgal. \bibleverse{16} But these five kings fled, and hid
themselves in a cave at Makkedah. \bibleverse{17} And it was told
Joshua, saying, The five kings are found hid in a cave at Makkedah.
\bibleverse{18} And Joshua said, Roll great stones upon the mouth of the
cave, and set men by it for to keep them: \bibleverse{19} And stay ye
not, but pursue after your enemies, and smite the hindmost of them;
suffer them not to enter into their cities: for the LORD your God hath
delivered them into your hand.\footnote{\textbf{10:19} smite\ldots: Heb.
cut off the tail} \bibleverse{20} And it came to pass, when Joshua and
the children of Israel had made an end of slaying them with a very great
slaughter, till they were consumed, that the rest which remained of them
entered into fenced cities. \bibleverse{21} And all the people returned
to the camp to Joshua at Makkedah in peace: none moved his tongue
against any of the children of Israel. \bibleverse{22} Then said Joshua,
Open the mouth of the cave, and bring out those five kings unto me out
of the cave. \bibleverse{23} And they did so, and brought forth those
five kings unto him out of the cave, the king of Jerusalem, the king of
Hebron, the king of Jarmuth, the king of Lachish, and the king of Eglon.
\bibleverse{24} And it came to pass, when they brought out those kings
unto Joshua, that Joshua called for all the men of Israel, and said unto
the captains of the men of war which went with him, Come near, put your
feet upon the necks of these kings. And they came near, and put their
feet upon the necks of them. \bibleverse{25} And Joshua said unto them,
Fear not, nor be dismayed, be strong and of good courage: for thus shall
the LORD do to all your enemies against whom ye fight. \bibleverse{26}
And afterward Joshua smote them, and slew them, and hanged them on five
trees: and they were hanging upon the trees until the evening.
\bibleverse{27} And it came to pass at the time of the going down of the
sun, that Joshua commanded, and they took them down off the trees, and
cast them into the cave wherein they had been hid, and laid great stones
in the cave's mouth, which remain until this very day.
\bibleverse{28} And that day Joshua took Makkedah, and smote it with the
edge of the sword, and the king thereof he utterly destroyed, them, and
all the souls that were therein; he let none remain: and he did to the
king of Makkedah as he did unto the king of Jericho. \bibleverse{29}
Then Joshua passed from Makkedah, and all Israel with him, unto Libnah,
and fought against Libnah: \bibleverse{30} And the LORD delivered it
also, and the king thereof, into the hand of Israel; and he smote it
with the edge of the sword, and all the souls that were therein; he let
none remain in it; but did unto the king thereof as he did unto the king
of Jericho.
\bibleverse{31} And Joshua passed from Libnah, and all Israel with him,
unto Lachish, and encamped against it, and fought against it:
\bibleverse{32} And the LORD delivered Lachish into the hand of Israel,
which took it on the second day, and smote it with the edge of the
sword, and all the souls that were therein, according to all that he had
done to Libnah.
\bibleverse{33} Then Horam king of Gezer came up to help Lachish; and
Joshua smote him and his people, until he had left him none remaining.
\bibleverse{34} And from Lachish Joshua passed unto Eglon, and all
Israel with him; and they encamped against it, and fought against it:
\bibleverse{35} And they took it on that day, and smote it with the edge
of the sword, and all the souls that were therein he utterly destroyed
that day, according to all that he had done to Lachish. \bibleverse{36}
And Joshua went up from Eglon, and all Israel with him, unto Hebron; and
they fought against it: \bibleverse{37} And they took it, and smote it
with the edge of the sword, and the king thereof, and all the cities
thereof, and all the souls that were therein; he left none remaining,
according to all that he had done to Eglon; but destroyed it utterly,
and all the souls that were therein.
\bibleverse{38} And Joshua returned, and all Israel with him, to Debir;
and fought against it: \bibleverse{39} And he took it, and the king
thereof, and all the cities thereof; and they smote them with the edge
of the sword, and utterly destroyed all the souls that were therein; he
left none remaining: as he had done to Hebron, so he did to Debir, and
to the king thereof; as he had done also to Libnah, and to her king.
\bibleverse{40} So Joshua smote all the country of the hills, and of the
south, and of the vale, and of the springs, and all their kings: he left
none remaining, but utterly destroyed all that breathed, as the LORD God
of Israel commanded. \bibleverse{41} And Joshua smote them from
Kadesh-barnea even unto Gaza, and all the country of Goshen, even unto
Gibeon. \bibleverse{42} And all these kings and their land did Joshua
take at one time, because the LORD God of Israel fought for Israel.
\bibleverse{43} And Joshua returned, and all Israel with him, unto the
camp to Gilgal.
\hypertarget{section-10}{%
\section{11}\label{section-10}}
\bibleverse{1} And it came to pass, when Jabin king of Hazor had heard
those things, that he sent to Jobab king of Madon, and to the king of
Shimron, and to the king of Achshaph, \bibleverse{2} And to the kings
that were on the north of the mountains, and of the plains south of
Chinneroth, and in the valley, and in the borders of Dor on the west,
\bibleverse{3} And to the Canaanite on the east and on the west, and to
the Amorite, and the Hittite, and the Perizzite, and the Jebusite in the
mountains, and to the Hivite under Hermon in the land of Mizpeh.
\bibleverse{4} And they went out, they and all their hosts with them,
much people, even as the sand that is upon the sea shore in multitude,
with horses and chariots very many. \bibleverse{5} And when all these
kings were met together, they came and pitched together at the waters of
Merom, to fight against Israel.\footnote{\textbf{11:5} met\ldots: Heb.
assembled by appointment}
\bibleverse{6} And the LORD said unto Joshua, Be not afraid because of
them: for to morrow about this time will I deliver them up all slain
before Israel: thou shalt hough their horses, and burn their chariots
with fire. \bibleverse{7} So Joshua came, and all the people of war with
him, against them by the waters of Merom suddenly; and they fell upon
them. \bibleverse{8} And the LORD delivered them into the hand of
Israel, who smote them, and chased them unto great Zidon, and unto
Misrephoth-maim, and unto the valley of Mizpeh eastward; and they smote
them, until they left them none
remaining.\textsuperscript{{[}\textbf{11:8} great Zidon: or,
Zidonrabbah{]}}{[}\textbf{11:8} Misrephoth-maim: or, Salt pits: Heb.
Burnings of waters{]} \bibleverse{9} And Joshua did unto them as the
LORD bade him: he houghed their horses, and burnt their chariots with
fire.
\bibleverse{10} And Joshua at that time turned back, and took Hazor, and
smote the king thereof with the sword: for Hazor beforetime was the head
of all those kingdoms. \bibleverse{11} And they smote all the souls that
were therein with the edge of the sword, utterly destroying them: there
was not any left to breathe: and he burnt Hazor with fire.\footnote{\textbf{11:11}
any\ldots: Heb. any breath} \bibleverse{12} And all the cities of
those kings, and all the kings of them, did Joshua take, and smote them
with the edge of the sword, and he utterly destroyed them, as Moses the
servant of the LORD commanded. \bibleverse{13} But as for the cities
that stood still in their strength, Israel burned none of them, save
Hazor only; that did Joshua burn.\footnote{\textbf{11:13} in
their\ldots: Heb. on their heap} \bibleverse{14} And all the spoil of
these cities, and the cattle, the children of Israel took for a prey
unto themselves; but every man they smote with the edge of the sword,
until they had destroyed them, neither left they any to breathe.
\bibleverse{15} As the LORD commanded Moses his servant, so did Moses
command Joshua, and so did Joshua; he left nothing undone of all that
the LORD commanded Moses.\footnote{\textbf{11:15} left\ldots: Heb.
removed nothing} \bibleverse{16} So Joshua took all that land, the
hills, and all the south country, and all the land of Goshen, and the
valley, and the plain, and the mountain of Israel, and the valley of the
same; \bibleverse{17} Even from the mount Halak, that goeth up to Seir,
even unto Baal-gad in the valley of Lebanon under mount Hermon: and all
their kings he took, and smote them, and slew them.\footnote{\textbf{11:17}
the mount\ldots: or, the smooth mountain} \bibleverse{18} Joshua made
war a long time with all those kings. \bibleverse{19} There was not a
city that made peace with the children of Israel, save the Hivites the
inhabitants of Gibeon: all other they took in battle. \bibleverse{20}
For it was of the LORD to harden their hearts, that they should come
against Israel in battle, that he might destroy them utterly, and that
they might have no favour, but that he might destroy them, as the LORD
commanded Moses.
\bibleverse{21} And at that time came Joshua, and cut off the Anakims
from the mountains, from Hebron, from Debir, from Anab, and from all the
mountains of Judah, and from all the mountains of Israel: Joshua
destroyed them utterly with their cities. \bibleverse{22} There was none
of the Anakims left in the land of the children of Israel: only in Gaza,
in Gath, and in Ashdod, there remained. \bibleverse{23} So Joshua took
the whole land, according to all that the LORD said unto Moses; and
Joshua gave it for an inheritance unto Israel according to their
divisions by their tribes. And the land rested from war.
\hypertarget{section-11}{%
\section{12}\label{section-11}}
\bibleverse{1} Now these are the kings of the land, which the children
of Israel smote, and possessed their land on the other side Jordan
toward the rising of the sun, from the river Arnon unto mount Hermon,
and all the plain on the east: \bibleverse{2} Sihon king of the
Amorites, who dwelt in Heshbon, and ruled from Aroer, which is upon the
bank of the river Arnon, and from the middle of the river, and from half
Gilead, even unto the river Jabbok, which is the border of the children
of Ammon; \bibleverse{3} And from the plain to the sea of Chinneroth on
the east, and unto the sea of the plain, even the salt sea on the east,
the way to Beth-jeshimoth; and from the south, under
Ashdoth-pisgah:\textsuperscript{{[}\textbf{12:3} the south: or,
Teman{]}}{[}\textbf{12:3} Ashdoth-pisgah: or, the springs of Pisgah, or,
the hill{]}
\bibleverse{4} And the coast of Og king of Bashan, which was of the
remnant of the giants, that dwelt at Ashtaroth and at Edrei,
\bibleverse{5} And reigned in mount Hermon, and in Salcah, and in all
Bashan, unto the border of the Geshurites and the Maachathites, and half
Gilead, the border of Sihon king of Heshbon. \bibleverse{6} Them did
Moses the servant of the LORD and the children of Israel smite: and
Moses the servant of the LORD gave it for a possession unto the
Reubenites, and the Gadites, and the half tribe of Manasseh.
\bibleverse{7} And these are the kings of the country which Joshua and
the children of Israel smote on this side Jordan on the west, from
Baal-gad in the valley of Lebanon even unto the mount Halak, that goeth
up to Seir; which Joshua gave unto the tribes of Israel for a possession
according to their divisions; \bibleverse{8} In the mountains, and in
the valleys, and in the plains, and in the springs, and in the
wilderness, and in the south country; the Hittites, the Amorites, and
the Canaanites, the Perizzites, the Hivites, and the Jebusites:
\bibleverse{9} The king of Jericho, one; the king of Ai, which is beside
Beth-el, one; \bibleverse{10} The king of Jerusalem, one; the king of
Hebron, one; \bibleverse{11} The king of Jarmuth, one; the king of
Lachish, one; \bibleverse{12} The king of Eglon, one; the king of Gezer,
one; \bibleverse{13} The king of Debir, one; the king of Geder, one;
\bibleverse{14} The king of Hormah, one; the king of Arad, one;
\bibleverse{15} The king of Libnah, one; the king of Adullam, one;
\bibleverse{16} The king of Makkedah, one; the king of Beth-el, one;
\bibleverse{17} The king of Tappuah, one; the king of Hepher, one;
\bibleverse{18} The king of Aphek, one; the king of Lasharon,
one;\footnote{\textbf{12:18} Lasharon: or, Sharon} \bibleverse{19} The
king of Madon, one; the king of Hazor, one; \bibleverse{20} The king of
Shimron-meron, one; the king of Achshaph, one; \bibleverse{21} The king
of Taanach, one; the king of Megiddo, one; \bibleverse{22} The king of
Kedesh, one; the king of Jokneam of Carmel, one; \bibleverse{23} The
king of Dor in the coast of Dor, one; the king of the nations of Gilgal,
one; \bibleverse{24} The king of Tirzah, one: all the kings thirty and
one.
\hypertarget{section-12}{%
\section{13}\label{section-12}}
\bibleverse{1} Now Joshua was old and stricken in years; and the LORD
said unto him, Thou art old and stricken in years, and there remaineth
yet very much land to be possessed.\footnote{\textbf{13:1} to\ldots:
Heb. to possess it} \bibleverse{2} This is the land that yet
remaineth: all the borders of the Philistines, and all Geshuri,
\bibleverse{3} From Sihor, which is before Egypt, even unto the borders
of Ekron northward, which is counted to the Canaanite: five lords of the
Philistines; the Gazathites, and the Ashdothites, the Eshkalonites, the
Gittites, and the Ekronites; also the Avites: \bibleverse{4} From the
south, all the land of the Canaanites, and Mearah that is beside the
Sidonians, unto Aphek, to the borders of the Amorites:\footnote{\textbf{13:4}
Mearah: or, the cave} \bibleverse{5} And the land of the Giblites, and
all Lebanon, toward the sunrising, from Baal-gad under mount Hermon unto
the entering into Hamath. \bibleverse{6} All the inhabitants of the hill
country from Lebanon unto Misrephoth-maim, and all the Sidonians, them
will I drive out from before the children of Israel: only divide thou it
by lot unto the Israelites for an inheritance, as I have commanded thee.
\bibleverse{7} Now therefore divide this land for an inheritance unto
the nine tribes, and the half tribe of Manasseh, \bibleverse{8} With
whom the Reubenites and the Gadites have received their inheritance,
which Moses gave them, beyond Jordan eastward, even as Moses the servant
of the LORD gave them; \bibleverse{9} From Aroer, that is upon the bank
of the river Arnon, and the city that is in the midst of the river, and
all the plain of Medeba unto Dibon; \bibleverse{10} And all the cities
of Sihon king of the Amorites, which reigned in Heshbon, unto the border
of the children of Ammon; \bibleverse{11} And Gilead, and the border of
the Geshurites and Maachathites, and all mount Hermon, and all Bashan
unto Salcah; \bibleverse{12} All the kingdom of Og in Bashan, which
reigned in Ashtaroth and in Edrei, who remained of the remnant of the
giants: for these did Moses smite, and cast them out. \bibleverse{13}
Nevertheless the children of Israel expelled not the Geshurites, nor the
Maachathites: but the Geshurites and the Maachathites dwell among the
Israelites until this day. \bibleverse{14} Only unto the tribe of Levi
he gave none inheritance; the sacrifices of the LORD God of Israel made
by fire are their inheritance, as he said unto them.
\bibleverse{15} And Moses gave unto the tribe of the children of Reuben
inheritance according to their families. \bibleverse{16} And their coast
was from Aroer, that is on the bank of the river Arnon, and the city
that is in the midst of the river, and all the plain by Medeba;
\bibleverse{17} Heshbon, and all her cities that are in the plain;
Dibon, and Bamoth-baal, and Beth-baal-meon,\footnote{\textbf{13:17}
Bamoth-baal\ldots: or, the high places of Baal, and house of Baal-meon}
\bibleverse{18} And Jahazah, and Kedemoth, and Mephaath, \bibleverse{19}
And Kirjathaim, and Sibmah, and Zareth-shahar in the mount of the
valley, \bibleverse{20} And Beth-peor, and Ashdoth-pisgah, and
Beth-jeshimoth,\footnote{\textbf{13:20} Ashdoth-pisgah: or, springs of
Pisgah, or, the hill} \bibleverse{21} And all the cities of the plain,
and all the kingdom of Sihon king of the Amorites, which reigned in
Heshbon, whom Moses smote with the princes of Midian, Evi, and Rekem,
and Zur, and Hur, and Reba, which were dukes of Sihon, dwelling in the
country.
\bibleverse{22} Balaam also the son of Beor, the soothsayer, did the
children of Israel slay with the sword among them that were slain by
them.\footnote{\textbf{13:22} soothsayer: or, diviner} \bibleverse{23}
And the border of the children of Reuben was Jordan, and the border
thereof. This was the inheritance of the children of Reuben after their
families, the cities and the villages thereof. \bibleverse{24} And Moses
gave inheritance unto the tribe of Gad, even unto the children of Gad
according to their families. \bibleverse{25} And their coast was Jazer,
and all the cities of Gilead, and half the land of the children of
Ammon, unto Aroer that is before Rabbah; \bibleverse{26} And from
Heshbon unto Ramath-mizpeh, and Betonim; and from Mahanaim unto the
border of Debir; \bibleverse{27} And in the valley, Beth-aram, and
Beth-nimrah, and Succoth, and Zaphon, the rest of the kingdom of Sihon
king of Heshbon, Jordan and his border, even unto the edge of the sea of
Chinnereth on the other side Jordan eastward. \bibleverse{28} This is
the inheritance of the children of Gad after their families, the cities,
and their villages.
\bibleverse{29} And Moses gave inheritance unto the half tribe of
Manasseh: and this was the possession of the half tribe of the children
of Manasseh by their families. \bibleverse{30} And their coast was from
Mahanaim, all Bashan, all the kingdom of Og king of Bashan, and all the
towns of Jair, which are in Bashan, threescore cities: \bibleverse{31}
And half Gilead, and Ashtaroth, and Edrei, cities of the kingdom of Og
in Bashan, were pertaining unto the children of Machir the son of
Manasseh, even to the one half of the children of Machir by their
families. \bibleverse{32} These are the countries which Moses did
distribute for inheritance in the plains of Moab, on the other side
Jordan, by Jericho, eastward. \bibleverse{33} But unto the tribe of Levi
Moses gave not any inheritance: the LORD God of Israel was their
inheritance, as he said unto them.
\hypertarget{section-13}{%
\section{14}\label{section-13}}
\bibleverse{1} And these are the countries which the children of Israel
inherited in the land of Canaan, which Eleazar the priest, and Joshua
the son of Nun, and the heads of the fathers of the tribes of the
children of Israel, distributed for inheritance to them. \bibleverse{2}
By lot was their inheritance, as the LORD commanded by the hand of
Moses, for the nine tribes, and for the half tribe. \bibleverse{3} For
Moses had given the inheritance of two tribes and an half tribe on the
other side Jordan: but unto the Levites he gave none inheritance among
them. \bibleverse{4} For the children of Joseph were two tribes,
Manasseh and Ephraim: therefore they gave no part unto the Levites in
the land, save cities to dwell in, with their suburbs for their cattle
and for their substance. \bibleverse{5} As the LORD commanded Moses, so
the children of Israel did, and they divided the land.
\bibleverse{6} Then the children of Judah came unto Joshua in Gilgal:
and Caleb the son of Jephunneh the Kenezite said unto him, Thou knowest
the thing that the LORD said unto Moses the man of God concerning me and
thee in Kadesh-barnea. \bibleverse{7} Forty years old was I when Moses
the servant of the LORD sent me from Kadesh-barnea to espy out the land;
and I brought him word again as it was in mine heart. \bibleverse{8}
Nevertheless my brethren that went up with me made the heart of the
people melt: but I wholly followed the LORD my God. \bibleverse{9} And
Moses sware on that day, saying, Surely the land whereon thy feet have
trodden shall be thine inheritance, and thy children's for ever, because
thou hast wholly followed the LORD my God. \bibleverse{10} And now,
behold, the LORD hath kept me alive, as he said, these forty and five
years, even since the LORD spake this word unto Moses, while the
children of Israel wandered in the wilderness: and now, lo, I am this
day fourscore and five years old.\footnote{\textbf{14:10} wandered: Heb.
walked} \bibleverse{11} As yet I am as strong this day as I was in the
day that Moses sent me: as my strength was then, even so is my strength
now, for war, both to go out, and to come in. \bibleverse{12} Now
therefore give me this mountain, whereof the LORD spake in that day; for
thou heardest in that day how the Anakims were there, and that the
cities were great and fenced: if so be the LORD will be with me, then I
shall be able to drive them out, as the LORD said. \bibleverse{13} And
Joshua blessed him, and gave unto Caleb the son of Jephunneh Hebron for
an inheritance. \bibleverse{14} Hebron therefore became the inheritance
of Caleb the son of Jephunneh the Kenezite unto this day, because that
he wholly followed the LORD God of Israel. \bibleverse{15} And the name
of Hebron before was Kirjath-arba; which Arba was a great man among the
Anakims. And the land had rest from war.
\hypertarget{section-14}{%
\section{15}\label{section-14}}
\bibleverse{1} This then was the lot of the tribe of the children of
Judah by their families; even to the border of Edom the wilderness of
Zin southward was the uttermost part of the south coast. \bibleverse{2}
And their south border was from the shore of the salt sea, from the bay
that looketh southward:\footnote{\textbf{15:2} bay: Heb. tongue}
\bibleverse{3} And it went out to the south side to Maaleh-acrabbim, and
passed along to Zin, and ascended up on the south side unto
Kadesh-barnea, and passed along to Hezron, and went up to Adar, and
fetched a compass to Karkaa:\footnote{\textbf{15:3} Maaleh-acrabbim: or,
the going up to Acrabbim} \bibleverse{4} From thence it passed toward
Azmon, and went out unto the river of Egypt; and the goings out of that
coast were at the sea: this shall be your south coast. \bibleverse{5}
And the east border was the salt sea, even unto the end of Jordan. And
their border in the north quarter was from the bay of the sea at the
uttermost part of Jordan: \bibleverse{6} And the border went up to
Beth-hogla, and passed along by the north of Beth-arabah; and the border
went up to the stone of Bohan the son of Reuben: \bibleverse{7} And the
border went up toward Debir from the valley of Achor, and so northward,
looking toward Gilgal, that is before the going up to Adummim, which is
on the south side of the river: and the border passed toward the waters
of En-shemesh, and the goings out thereof were at En-rogel:
\bibleverse{8} And the border went up by the valley of the son of Hinnom
unto the south side of the Jebusite; the same is Jerusalem: and the
border went up to the top of the mountain that lieth before the valley
of Hinnom westward, which is at the end of the valley of the giants
northward: \bibleverse{9} And the border was drawn from the top of the
hill unto the fountain of the water of Nephtoah, and went out to the
cities of mount Ephron; and the border was drawn to Baalah, which is
Kirjath-jearim: \bibleverse{10} And the border compassed from Baalah
westward unto mount Seir, and passed along unto the side of mount
Jearim, which is Chesalon, on the north side, and went down to
Beth-shemesh, and passed on to Timnah: \bibleverse{11} And the border
went out unto the side of Ekron northward: and the border was drawn to
Shicron, and passed along to mount Baalah, and went out unto Jabneel;
and the goings out of the border were at the sea. \bibleverse{12} And
the west border was to the great sea, and the coast thereof. This is the
coast of the children of Judah round about according to their families.
\bibleverse{13} And unto Caleb the son of Jephunneh he gave a part among
the children of Judah, according to the commandment of the LORD to
Joshua, even the city of Arba the father of Anak, which city is
Hebron.\footnote{\textbf{15:13} the city\ldots: or, Kirjath-arba}
\bibleverse{14} And Caleb drove thence the three sons of Anak, Sheshai,
and Ahiman, and Talmai, the children of Anak. \bibleverse{15} And he
went up thence to the inhabitants of Debir: and the name of Debir before
was Kirjath-sepher.
\bibleverse{16} And Caleb said, He that smiteth Kirjath-sepher, and
taketh it, to him will I give Achsah my daughter to wife.
\bibleverse{17} And Othniel the son of Kenaz, the brother of Caleb, took
it: and he gave him Achsah his daughter to wife. \bibleverse{18} And it
came to pass, as she came unto him, that she moved him to ask of her
father a field: and she lighted off her ass; and Caleb said unto her,
What wouldest thou? \bibleverse{19} Who answered, Give me a blessing;
for thou hast given me a south land; give me also springs of water. And
he gave her the upper springs, and the nether springs.
\bibleverse{20} This is the inheritance of the tribe of the children of
Judah according to their families. \bibleverse{21} And the uttermost
cities of the tribe of the children of Judah toward the coast of Edom
southward were Kabzeel, and Eder, and Jagur, \bibleverse{22} And Kinah,
and Dimonah, and Adadah, \bibleverse{23} And Kedesh, and Hazor, and
Ithnan, \bibleverse{24} Ziph, and Telem, and Bealoth, \bibleverse{25}
And Hazor, Hadattah, and Kerioth, and Hezron, which is Hazor,
\bibleverse{26} Amam, and Shema, and Moladah, \bibleverse{27} And
Hazar-gaddah, and Heshmon, and Beth-palet, \bibleverse{28} And
Hazar-shual, and Beer-sheba, and Bizjothjah, \bibleverse{29} Baalah, and
Iim, and Azem, \bibleverse{30} And Eltolad, and Chesil, and Hormah,
\bibleverse{31} And Ziklag, and Madmannah, and Sansannah,
\bibleverse{32} And Lebaoth, and Shilhim, and Ain, and Rimmon: all the
cities are twenty and nine, with their villages: \bibleverse{33} And in
the valley, Eshtaol, and Zoreah, and Ashnah, \bibleverse{34} And Zanoah,
and En-gannim, Tappuah, and Enam, \bibleverse{35} Jarmuth, and Adullam,
Socoh, and Azekah, \bibleverse{36} And Sharaim, and Adithaim, and
Gederah, and Gederothaim; fourteen cities with their
villages:\footnote{\textbf{15:36} and Gederothaim: or, or Gederothaim}
\bibleverse{37} Zenan, and Hadashah, and Migdal-gad, \bibleverse{38} And
Dilean, and Mizpeh, and Joktheel, \bibleverse{39} Lachish, and Bozkath,
and Eglon, \bibleverse{40} And Cabbon, and Lahmam, and Kithlish,
\bibleverse{41} And Gederoth, Beth-dagon, and Naamah, and Makkedah;
sixteen cities with their villages: \bibleverse{42} Libnah, and Ether,
and Ashan, \bibleverse{43} And Jiphtah, and Ashnah, and Nezib,
\bibleverse{44} And Keilah, and Achzib, and Mareshah; nine cities with
their villages: \bibleverse{45} Ekron, with her towns and her villages:
\bibleverse{46} From Ekron even unto the sea, all that lay near Ashdod,
with their villages:\footnote{\textbf{15:46} near: Heb. by the place of}
\bibleverse{47} Ashdod with her towns and her villages, Gaza with her
towns and her villages, unto the river of Egypt, and the great sea, and
the border thereof:
\bibleverse{48} And in the mountains, Shamir, and Jattir, and Socoh,
\bibleverse{49} And Dannah, and Kirjath-sannah, which is Debir,
\bibleverse{50} And Anab, and Eshtemoh, and Anim, \bibleverse{51} And
Goshen, and Holon, and Giloh; eleven cities with their villages:
\bibleverse{52} Arab, and Dumah, and Eshean, \bibleverse{53} And Janum,
and Beth-tappuah, and Aphekah,\footnote{\textbf{15:53} Janum: or, Janus}
\bibleverse{54} And Humtah, and Kirjath-arba, which is Hebron, and Zior;
nine cities with their villages: \bibleverse{55} Maon, Carmel, and Ziph,
and Juttah, \bibleverse{56} And Jezreel, and Jokdeam, and Zanoah,
\bibleverse{57} Cain, Gibeah, and Timnah; ten cities with their
villages: \bibleverse{58} Halhul, Beth-zur, and Gedor, \bibleverse{59}
And Maarath, and Beth-anoth, and Eltekon; six cities with their
villages: \bibleverse{60} Kirjath-baal, which is Kirjath-jearim, and
Rabbah; two cities with their villages: \bibleverse{61} In the
wilderness, Beth-arabah, Middin, and Secacah, \bibleverse{62} And
Nibshan, and the city of Salt, and En-gedi; six cities with their
villages.
\bibleverse{63} As for the Jebusites the inhabitants of Jerusalem, the
children of Judah could not drive them out: but the Jebusites dwell with
the children of Judah at Jerusalem unto this day.
\hypertarget{section-15}{%
\section{16}\label{section-15}}
\bibleverse{1} And the lot of the children of Joseph fell from Jordan by
Jericho, unto the water of Jericho on the east, to the wilderness that
goeth up from Jericho throughout mount Beth-el,\footnote{\textbf{16:1}
fell: Heb. went forth} \bibleverse{2} And goeth out from Beth-el to
Luz, and passeth along unto the borders of Archi to Ataroth,
\bibleverse{3} And goeth down westward to the coast of Japhleti, unto
the coast of Beth-horon the nether, and to Gezer: and the goings out
thereof are at the sea. \bibleverse{4} So the children of Joseph,
Manasseh and Ephraim, took their inheritance.
\bibleverse{5} And the border of the children of Ephraim according to
their families was thus: even the border of their inheritance on the
east side was Ataroth-addar, unto Beth-horon the upper; \bibleverse{6}
And the border went out toward the sea to Michmethah on the north side;
and the border went about eastward unto Taanath-shiloh, and passed by it
on the east to Janohah; \bibleverse{7} And it went down from Janohah to
Ataroth, and to Naarath, and came to Jericho, and went out at Jordan.
\bibleverse{8} The border went out from Tappuah westward unto the river
Kanah; and the goings out thereof were at the sea. This is the
inheritance of the tribe of the children of Ephraim by their families.
\bibleverse{9} And the separate cities for the children of Ephraim were
among the inheritance of the children of Manasseh, all the cities with
their villages. \bibleverse{10} And they drave not out the Canaanites
that dwelt in Gezer: but the Canaanites dwell among the Ephraimites unto
this day, and serve under tribute.
\hypertarget{section-16}{%
\section{17}\label{section-16}}
\bibleverse{1} There was also a lot for the tribe of Manasseh; for he
was the firstborn of Joseph; to wit, for Machir the firstborn of
Manasseh, the father of Gilead: because he was a man of war, therefore
he had Gilead and Bashan. \bibleverse{2} There was also a lot for the
rest of the children of Manasseh by their families; for the children of
Abiezer, and for the children of Helek, and for the children of Asriel,
and for the children of Shechem, and for the children of Hepher, and for
the children of Shemida: these were the male children of Manasseh the
son of Joseph by their families.\footnote{\textbf{17:2} Abiezer: also
called, Jeezer}
\bibleverse{3} But Zelophehad, the son of Hepher, the son of Gilead, the
son of Machir, the son of Manasseh, had no sons, but daughters: and
these are the names of his daughters, Mahlah, and Noah, Hoglah, Milcah,
and Tirzah. \bibleverse{4} And they came near before Eleazar the priest,
and before Joshua the son of Nun, and before the princes, saying, The
LORD commanded Moses to give us an inheritance among our brethren.
Therefore according to the commandment of the LORD he gave them an
inheritance among the brethren of their father. \bibleverse{5} And there
fell ten portions to Manasseh, beside the land of Gilead and Bashan,
which were on the other side Jordan; \bibleverse{6} Because the
daughters of Manasseh had an inheritance among his sons: and the rest of
Manasseh's sons had the land of Gilead.
\bibleverse{7} And the coast of Manasseh was from Asher to Michmethah,
that lieth before Shechem; and the border went along on the right hand
unto the inhabitants of En-tappuah. \bibleverse{8} Now Manasseh had the
land of Tappuah: but Tappuah on the border of Manasseh belonged to the
children of Ephraim; \bibleverse{9} And the coast descended unto the
river Kanah, southward of the river: these cities of Ephraim are among
the cities of Manasseh: the coast of Manasseh also was on the north side
of the river, and the outgoings of it were at the sea:\footnote{\textbf{17:9}
river Kanah: or, brook of reeds} \bibleverse{10} Southward it was
Ephraim's, and northward it was Manasseh's, and the sea is his border;
and they met together in Asher on the north, and in Issachar on the
east. \bibleverse{11} And Manasseh had in Issachar and in Asher
Beth-shean and her towns, and Ibleam and her towns, and the inhabitants
of Dor and her towns, and the inhabitants of Endor and her towns, and
the inhabitants of Taanach and her towns, and the inhabitants of Megiddo
and her towns, even three countries. \bibleverse{12} Yet the children of
Manasseh could not drive out the inhabitants of those cities; but the
Canaanites would dwell in that land. \bibleverse{13} Yet it came to
pass, when the children of Israel were waxen strong, that they put the
Canaanites to tribute; but did not utterly drive them out.
\bibleverse{14} And the children of Joseph spake unto Joshua, saying,
Why hast thou given me but one lot and one portion to inherit, seeing I
am a great people, forasmuch as the LORD hath blessed me hitherto?
\bibleverse{15} And Joshua answered them, If thou be a great people,
then get thee up to the wood country, and cut down for thyself there in
the land of the Perizzites and of the giants, if mount Ephraim be too
narrow for thee.\footnote{\textbf{17:15} giants: or, Rephaims}
\bibleverse{16} And the children of Joseph said, The hill is not enough
for us: and all the Canaanites that dwell in the land of the valley have
chariots of iron, both they who are of Beth-shean and her towns, and
they who are of the valley of Jezreel. \bibleverse{17} And Joshua spake
unto the house of Joseph, even to Ephraim and to Manasseh, saying, Thou
art a great people, and hast great power: thou shalt not have one lot
only: \bibleverse{18} But the mountain shall be thine; for it is a wood,
and thou shalt cut it down: and the outgoings of it shall be thine: for
thou shalt drive out the Canaanites, though they have iron chariots, and
though they be strong.
\hypertarget{section-17}{%
\section{18}\label{section-17}}
\bibleverse{1} And the whole congregation of the children of Israel
assembled together at Shiloh, and set up the tabernacle of the
congregation there. And the land was subdued before them.
\bibleverse{2} And there remained among the children of Israel seven
tribes, which had not yet received their inheritance. \bibleverse{3} And
Joshua said unto the children of Israel, How long are ye slack to go to
possess the land, which the LORD God of your fathers hath given you?
\bibleverse{4} Give out from among you three men for each tribe: and I
will send them, and they shall rise, and go through the land, and
describe it according to the inheritance of them; and they shall come
again to me. \bibleverse{5} And they shall divide it into seven parts:
Judah shall abide in their coast on the south, and the house of Joseph
shall abide in their coasts on the north. \bibleverse{6} Ye shall
therefore describe the land into seven parts, and bring the description
hither to me, that I may cast lots for you here before the LORD our God.
\bibleverse{7} But the Levites have no part among you; for the
priesthood of the LORD is their inheritance: and Gad, and Reuben, and
half the tribe of Manasseh, have received their inheritance beyond
Jordan on the east, which Moses the servant of the LORD gave them.
\bibleverse{8} And the men arose, and went away: and Joshua charged them
that went to describe the land, saying, Go and walk through the land,
and describe it, and come again to me, that I may here cast lots for you
before the LORD in Shiloh. \bibleverse{9} And the men went and passed
through the land, and described it by cities into seven parts in a book,
and came again to Joshua to the host at Shiloh.
\bibleverse{10} And Joshua cast lots for them in Shiloh before the LORD:
and there Joshua divided the land unto the children of Israel according
to their divisions.
\bibleverse{11} And the lot of the tribe of the children of Benjamin
came up according to their families: and the coast of their lot came
forth between the children of Judah and the children of Joseph.
\bibleverse{12} And their border on the north side was from Jordan; and
the border went up to the side of Jericho on the north side, and went up
through the mountains westward; and the goings out thereof were at the
wilderness of Beth-aven. \bibleverse{13} And the border went over from
thence toward Luz, to the side of Luz, which is Beth-el, southward; and
the border descended to Ataroth-adar, near the hill that lieth on the
south side of the nether Beth-horon. \bibleverse{14} And the border was
drawn thence, and compassed the corner of the sea southward, from the
hill that lieth before Beth-horon southward; and the goings out thereof
were at Kirjath-baal, which is Kirjath-jearim, a city of the children of
Judah: this was the west quarter. \bibleverse{15} And the south quarter
was from the end of Kirjath-jearim, and the border went out on the west,
and went out to the well of waters of Nephtoah: \bibleverse{16} And the
border came down to the end of the mountain that lieth before the valley
of the son of Hinnom, and which is in the valley of the giants on the
north, and descended to the valley of Hinnom, to the side of Jebusi on
the south, and descended to En-rogel, \bibleverse{17} And was drawn from
the north, and went forth to En-shemesh, and went forth toward Geliloth,
which is over against the going up of Adummim, and descended to the
stone of Bohan the son of Reuben, \bibleverse{18} And passed along
toward the side over against Arabah northward, and went down unto
Arabah:\footnote{\textbf{18:18} Arabah: or, the plain} \bibleverse{19}
And the border passed along to the side of Beth-hoglah northward: and
the outgoings of the border were at the north bay of the salt sea at the
south end of Jordan: this was the south coast.\footnote{\textbf{18:19}
bay: Heb. tongue} \bibleverse{20} And Jordan was the border of it on
the east side. This was the inheritance of the children of Benjamin, by
the coasts thereof round about, according to their families.
\bibleverse{21} Now the cities of the tribe of the children of Benjamin
according to their families were Jericho, and Beth-hoglah, and the
valley of Keziz, \bibleverse{22} And Beth-arabah, and Zemaraim, and
Beth-el, \bibleverse{23} And Avim, and Parah, and Ophrah,
\bibleverse{24} And Chephar-haammonai, and Ophni, and Gaba; twelve
cities with their villages: \bibleverse{25} Gibeon, and Ramah, and
Beeroth, \bibleverse{26} And Mizpeh, and Chephirah, and Mozah,
\bibleverse{27} And Rekem, and Irpeel, and Taralah, \bibleverse{28} And
Zelah, Eleph, and Jebusi, which is Jerusalem, Gibeath, and Kirjath;
fourteen cities with their villages. This is the inheritance of the
children of Benjamin according to their families.
\hypertarget{section-18}{%
\section{19}\label{section-18}}
\bibleverse{1} And the second lot came forth to Simeon, even for the
tribe of the children of Simeon according to their families: and their
inheritance was within the inheritance of the children of Judah.
\bibleverse{2} And they had in their inheritance Beer-sheba, or Sheba,
and Moladah, \bibleverse{3} And Hazar-shual, and Balah, and Azem,
\bibleverse{4} And Eltolad, and Bethul, and Hormah, \bibleverse{5} And
Ziklag, and Beth-marcaboth, and Hazar-susah, \bibleverse{6} And
Beth-lebaoth, and Sharuhen; thirteen cities and their villages:
\bibleverse{7} Ain, Remmon, and Ether, and Ashan; four cities and their
villages: \bibleverse{8} And all the villages that were round about
these cities to Baalath-beer, Ramath of the south. This is the
inheritance of the tribe of the children of Simeon according to their
families. \bibleverse{9} Out of the portion of the children of Judah was
the inheritance of the children of Simeon: for the part of the children
of Judah was too much for them: therefore the children of Simeon had
their inheritance within the inheritance of them.
\bibleverse{10} And the third lot came up for the children of Zebulun
according to their families: and the border of their inheritance was
unto Sarid: \bibleverse{11} And their border went up toward the sea, and
Maralah, and reached to Dabbasheth, and reached to the river that is
before Jokneam; \bibleverse{12} And turned from Sarid eastward toward
the sunrising unto the border of Chisloth-tabor, and then goeth out to
Daberath, and goeth up to Japhia, \bibleverse{13} And from thence
passeth on along on the east to Gittah-hepher, to Ittah-kazin, and goeth
out to Remmon-methoar to Neah;\footnote{\textbf{19:13} methoar: or,
which is drawn} \bibleverse{14} And the border compasseth it on the
north side to Hannathon: and the outgoings thereof are in the valley of
Jiphthah-el: \bibleverse{15} And Kattath, and Nahallal, and Shimron, and
Idalah, and Beth-lehem: twelve cities with their villages.
\bibleverse{16} This is the inheritance of the children of Zebulun
according to their families, these cities with their villages.
\bibleverse{17} And the fourth lot came out to Issachar, for the
children of Issachar according to their families. \bibleverse{18} And
their border was toward Jezreel, and Chesulloth, and Shunem,
\bibleverse{19} And Hapharaim, and Shion, and Anaharath, \bibleverse{20}
And Rabbith, and Kishion, and Abez, \bibleverse{21} And Remeth, and
En-gannim, and En-haddah, and Beth-pazzez; \bibleverse{22} And the coast
reacheth to Tabor, and Shahazimah, and Beth-shemesh; and the outgoings
of their border were at Jordan: sixteen cities with their villages.
\bibleverse{23} This is the inheritance of the tribe of the children of
Issachar according to their families, the cities and their villages.
\bibleverse{24} And the fifth lot came out for the tribe of the children
of Asher according to their families. \bibleverse{25} And their border
was Helkath, and Hali, and Beten, and Achshaph, \bibleverse{26} And
Alammelech, and Amad, and Misheal; and reacheth to Carmel westward, and
to Shihor-libnath; \bibleverse{27} And turneth toward the sunrising to
Beth-dagon, and reacheth to Zebulun, and to the valley of Jiphthah-el
toward the north side of Beth-emek, and Neiel, and goeth out to Cabul on
the left hand, \bibleverse{28} And Hebron, and Rehob, and Hammon, and
Kanah, even unto great Zidon; \bibleverse{29} And then the coast turneth
to Ramah, and to the strong city Tyre; and the coast turneth to Hosah;
and the outgoings thereof are at the sea from the coast to
Achzib:\footnote{\textbf{19:29} Tyre: Heb. Tzor} \bibleverse{30} Ummah
also, and Aphek, and Rehob: twenty and two cities with their villages.
\bibleverse{31} This is the inheritance of the tribe of the children of
Asher according to their families, these cities with their villages.
\bibleverse{32} The sixth lot came out to the children of Naphtali, even
for the children of Naphtali according to their families.
\bibleverse{33} And their coast was from Heleph, from Allon to
Zaanannim, and Adami, Nekeb, and Jabneel, unto Lakum; and the outgoings
thereof were at Jordan: \bibleverse{34} And then the coast turneth
westward to Aznoth-tabor, and goeth out from thence to Hukkok, and
reacheth to Zebulun on the south side, and reacheth to Asher on the west
side, and to Judah upon Jordan toward the sunrising. \bibleverse{35} And
the fenced cities are Ziddim, Zer, and Hammath, Rakkath, and Chinnereth,
\bibleverse{36} And Adamah, and Ramah, and Hazor, \bibleverse{37} And
Kedesh, and Edrei, and En-hazor, \bibleverse{38} And Iron, and
Migdal-el, Horem, and Beth-anath, and Beth-shemesh; nineteen cities with
their villages. \bibleverse{39} This is the inheritance of the tribe of
the children of Naphtali according to their families, the cities and
their villages.
\bibleverse{40} And the seventh lot came out for the tribe of the
children of Dan according to their families. \bibleverse{41} And the
coast of their inheritance was Zorah, and Eshtaol, and Ir-shemesh,
\bibleverse{42} And Shaalabbin, and Ajalon, and Jethlah, \bibleverse{43}
And Elon, and Thimnathah, and Ekron, \bibleverse{44} And Eltekeh, and
Gibbethon, and Baalath, \bibleverse{45} And Jehud, and Bene-berak, and
Gath-rimmon, \bibleverse{46} And Me-jarkon, and Rakkon, with the border
before Japho.\textsuperscript{{[}\textbf{19:46} before: or, over
against{]}}{[}\textbf{19:46} Japho: or, Joppa{]} \bibleverse{47} And the
coast of the children of Dan went out too little for them: therefore the
children of Dan went up to fight against Leshem, and took it, and smote
it with the edge of the sword, and possessed it, and dwelt therein, and
called Leshem, Dan, after the name of Dan their father. \bibleverse{48}
This is the inheritance of the tribe of the children of Dan according to
their families, these cities with their villages.
\bibleverse{49} When they had made an end of dividing the land for
inheritance by their coasts, the children of Israel gave an inheritance
to Joshua the son of Nun among them: \bibleverse{50} According to the
word of the LORD they gave him the city which he asked, even
Timnath-serah in mount Ephraim: and he built the city, and dwelt
therein. \bibleverse{51} These are the inheritances, which Eleazar the
priest, and Joshua the son of Nun, and the heads of the fathers of the
tribes of the children of Israel, divided for an inheritance by lot in
Shiloh before the LORD, at the door of the tabernacle of the
congregation. So they made an end of dividing the country.
\hypertarget{section-19}{%
\section{20}\label{section-19}}
\bibleverse{1} The LORD also spake unto Joshua, saying, \bibleverse{2}
Speak to the children of Israel, saying, Appoint out for you cities of
refuge, whereof I spake unto you by the hand of Moses: \bibleverse{3}
That the slayer that killeth any person unawares and unwittingly may
flee thither: and they shall be your refuge from the avenger of blood.
\bibleverse{4} And when he that doth flee unto one of those cities shall
stand at the entering of the gate of the city, and shall declare his
cause in the ears of the elders of that city, they shall take him into
the city unto them, and give him a place, that he may dwell among them.
\bibleverse{5} And if the avenger of blood pursue after him, then they
shall not deliver the slayer up into his hand; because he smote his
neighbour unwittingly, and hated him not beforetime. \bibleverse{6} And
he shall dwell in that city, until he stand before the congregation for
judgment, and until the death of the high priest that shall be in those
days: then shall the slayer return, and come unto his own city, and unto
his own house, unto the city from whence he fled.
\bibleverse{7} And they appointed Kedesh in Galilee in mount Naphtali,
and Shechem in mount Ephraim, and Kirjath-arba, which is Hebron, in the
mountain of Judah.\footnote{\textbf{20:7} appointed: Heb. sanctified}
\bibleverse{8} And on the other side Jordan by Jericho eastward, they
assigned Bezer in the wilderness upon the plain out of the tribe of
Reuben, and Ramoth in Gilead out of the tribe of Gad, and Golan in
Bashan out of the tribe of Manasseh. \bibleverse{9} These were the
cities appointed for all the children of Israel, and for the stranger
that sojourneth among them, that whosoever killeth any person at
unawares might flee thither, and not die by the hand of the avenger of
blood, until he stood before the congregation.
\hypertarget{section-20}{%
\section{21}\label{section-20}}
\bibleverse{1} Then came near the heads of the fathers of the Levites
unto Eleazar the priest, and unto Joshua the son of Nun, and unto the
heads of the fathers of the tribes of the children of Israel;
\bibleverse{2} And they spake unto them at Shiloh in the land of Canaan,
saying, The LORD commanded by the hand of Moses to give us cities to
dwell in, with the suburbs thereof for our cattle. \bibleverse{3} And
the children of Israel gave unto the Levites out of their inheritance,
at the commandment of the LORD, these cities and their suburbs.
\bibleverse{4} And the lot came out for the families of the Kohathites:
and the children of Aaron the priest, which were of the Levites, had by
lot out of the tribe of Judah, and out of the tribe of Simeon, and out
of the tribe of Benjamin, thirteen cities. \bibleverse{5} And the rest
of the children of Kohath had by lot out of the families of the tribe of
Ephraim, and out of the tribe of Dan, and out of the half tribe of
Manasseh, ten cities. \bibleverse{6} And the children of Gershon had by
lot out of the families of the tribe of Issachar, and out of the tribe
of Asher, and out of the tribe of Naphtali, and out of the half tribe of
Manasseh in Bashan, thirteen cities. \bibleverse{7} The children of
Merari by their families had out of the tribe of Reuben, and out of the
tribe of Gad, and out of the tribe of Zebulun, twelve cities.
\bibleverse{8} And the children of Israel gave by lot unto the Levites
these cities with their suburbs, as the LORD commanded by the hand of
Moses.
\bibleverse{9} And they gave out of the tribe of the children of Judah,
and out of the tribe of the children of Simeon, these cities which are
here mentioned by name,\footnote{\textbf{21:9} mentioned: Heb. called}
\bibleverse{10} Which the children of Aaron, being of the families of
the Kohathites, who were of the children of Levi, had: for theirs was
the first lot. \bibleverse{11} And they gave them the city of Arba the
father of Anak, which city is Hebron, in the hill country of Judah, with
the suburbs thereof round about it.\footnote{\textbf{21:11} the
city\ldots: or, Kirjath-arba} \bibleverse{12} But the fields of the
city, and the villages thereof, gave they to Caleb the son of Jephunneh
for his possession.
\bibleverse{13} Thus they gave to the children of Aaron the priest
Hebron with her suburbs, to be a city of refuge for the slayer; and
Libnah with her suburbs, \bibleverse{14} And Jattir with her suburbs,
and Eshtemoa with her suburbs, \bibleverse{15} And Holon with her
suburbs, and Debir with her suburbs,\footnote{\textbf{21:15} Holon: also
called, Hilen} \bibleverse{16} And Ain with her suburbs, and Juttah
with her suburbs, and Beth-shemesh with her suburbs; nine cities out of
those two tribes.\footnote{\textbf{21:16} Ain: also called, Ashan}
\bibleverse{17} And out of the tribe of Benjamin, Gibeon with her
suburbs, Geba with her suburbs,\footnote{\textbf{21:17} Geba: also
called, Gaba} \bibleverse{18} Anathoth with her suburbs, and Almon
with her suburbs; four cities.\footnote{\textbf{21:18} Almon: also
called, Alemeth} \bibleverse{19} All the cities of the children of
Aaron, the priests, were thirteen cities with their suburbs.
\bibleverse{20} And the families of the children of Kohath, the Levites
which remained of the children of Kohath, even they had the cities of
their lot out of the tribe of Ephraim. \bibleverse{21} For they gave
them Shechem with her suburbs in mount Ephraim, to be a city of refuge
for the slayer; and Gezer with her suburbs, \bibleverse{22} And Kibzaim
with her suburbs, and Beth-horon with her suburbs; four cities.
\bibleverse{23} And out of the tribe of Dan, Eltekeh with her suburbs,
Gibbethon with her suburbs, \bibleverse{24} Aijalon with her suburbs,
Gath-rimmon with her suburbs; four cities. \bibleverse{25} And out of
the half tribe of Manasseh, Tanach with her suburbs, and Gath-rimmon
with her suburbs; two cities. \bibleverse{26} All the cities were ten
with their suburbs for the families of the children of Kohath that
remained.
\bibleverse{27} And unto the children of Gershon, of the families of the
Levites, out of the other half tribe of Manasseh they gave Golan in
Bashan with her suburbs, to be a city of refuge for the slayer; and
Beesh-terah with her suburbs; two cities. \bibleverse{28} And out of the
tribe of Issachar, Kishon with her suburbs, Dabareh with her suburbs,
\bibleverse{29} Jarmuth with her suburbs, En-gannim with her suburbs;
four cities. \bibleverse{30} And out of the tribe of Asher, Mishal with
her suburbs, Abdon with her suburbs, \bibleverse{31} Helkath with her
suburbs, and Rehob with her suburbs; four cities. \bibleverse{32} And
out of the tribe of Naphtali, Kedesh in Galilee with her suburbs, to be
a city of refuge for the slayer; and Hammoth-dor with her suburbs, and
Kartan with her suburbs; three cities. \bibleverse{33} All the cities of
the Gershonites according to their families were thirteen cities with
their suburbs.
\bibleverse{34} And unto the families of the children of Merari, the
rest of the Levites, out of the tribe of Zebulun, Jokneam with her
suburbs, and Kartah with her suburbs, \bibleverse{35} Dimnah with her
suburbs, Nahalal with her suburbs; four cities. \bibleverse{36} And out
of the tribe of Reuben, Bezer with her suburbs, and Jahazah with her
suburbs, \bibleverse{37} Kedemoth with her suburbs, and Mephaath with
her suburbs; four cities. \bibleverse{38} And out of the tribe of Gad,
Ramoth in Gilead with her suburbs, to be a city of refuge for the
slayer; and Mahanaim with her suburbs, \bibleverse{39} Heshbon with her
suburbs, Jazer with her suburbs; four cities in all. \bibleverse{40} So
all the cities for the children of Merari by their families, which were
remaining of the families of the Levites, were by their lot twelve
cities. \bibleverse{41} All the cities of the Levites within the
possession of the children of Israel were forty and eight cities with
their suburbs. \bibleverse{42} These cities were every one with their
suburbs round about them: thus were all these cities.
\bibleverse{43} And the LORD gave unto Israel all the land which he
sware to give unto their fathers; and they possessed it, and dwelt
therein. \bibleverse{44} And the LORD gave them rest round about,
according to all that he sware unto their fathers: and there stood not a
man of all their enemies before them; the LORD delivered all their
enemies into their hand. \bibleverse{45} There failed not ought of any
good thing which the LORD had spoken unto the house of Israel; all came
to pass.
\hypertarget{section-21}{%
\section{22}\label{section-21}}
\bibleverse{1} Then Joshua called the Reubenites, and the Gadites, and
the half tribe of Manasseh, \bibleverse{2} And said unto them, Ye have
kept all that Moses the servant of the LORD commanded you, and have
obeyed my voice in all that I commanded you: \bibleverse{3} Ye have not
left your brethren these many days unto this day, but have kept the
charge of the commandment of the LORD your God. \bibleverse{4} And now
the LORD your God hath given rest unto your brethren, as he promised
them: therefore now return ye, and get you unto your tents, and unto the
land of your possession, which Moses the servant of the LORD gave you on
the other side Jordan. \bibleverse{5} But take diligent heed to do the
commandment and the law, which Moses the servant of the LORD charged
you, to love the LORD your God, and to walk in all his ways, and to keep
his commandments, and to cleave unto him, and to serve him with all your
heart and with all your soul. \bibleverse{6} So Joshua blessed them, and
sent them away: and they went unto their tents.
\bibleverse{7} Now to the one half of the tribe of Manasseh Moses had
given possession in Bashan: but unto the other half thereof gave Joshua
among their brethren on this side Jordan westward. And when Joshua sent
them away also unto their tents, then he blessed them, \bibleverse{8}
And he spake unto them, saying, Return with much riches unto your tents,
and with very much cattle, with silver, and with gold, and with brass,
and with iron, and with very much raiment: divide the spoil of your
enemies with your brethren.
\bibleverse{9} And the children of Reuben and the children of Gad and
the half tribe of Manasseh returned, and departed from the children of
Israel out of Shiloh, which is in the land of Canaan, to go unto the
country of Gilead, to the land of their possession, whereof they were
possessed, according to the word of the LORD by the hand of Moses.
\bibleverse{10} And when they came unto the borders of Jordan, that are
in the land of Canaan, the children of Reuben and the children of Gad
and the half tribe of Manasseh built there an altar by Jordan, a great
altar to see to.
\bibleverse{11} And the children of Israel heard say, Behold, the
children of Reuben and the children of Gad and the half tribe of
Manasseh have built an altar over against the land of Canaan, in the
borders of Jordan, at the passage of the children of Israel.
\bibleverse{12} And when the children of Israel heard of it, the whole
congregation of the children of Israel gathered themselves together at
Shiloh, to go up to war against them. \bibleverse{13} And the children
of Israel sent unto the children of Reuben, and to the children of Gad,
and to the half tribe of Manasseh, into the land of Gilead, Phinehas the
son of Eleazar the priest, \bibleverse{14} And with him ten princes, of
each chief house a prince throughout all the tribes of Israel; and each
one was an head of the house of their fathers among the thousands of
Israel.\footnote{\textbf{22:14} chief\ldots: Heb. house of the father}
\bibleverse{15} And they came unto the children of Reuben, and to the
children of Gad, and to the half tribe of Manasseh, unto the land of
Gilead, and they spake with them, saying, \bibleverse{16} Thus saith the
whole congregation of the LORD, What trespass is this that ye have
committed against the God of Israel, to turn away this day from
following the LORD, in that ye have builded you an altar, that ye might
rebel this day against the LORD? \bibleverse{17} Is the iniquity of Peor
too little for us, from which we are not cleansed until this day,
although there was a plague in the congregation of the LORD,
\bibleverse{18} But that ye must turn away this day from following the
LORD? and it will be, seeing ye rebel to day against the LORD, that to
morrow he will be wroth with the whole congregation of Israel.
\bibleverse{19} Notwithstanding, if the land of your possession be
unclean, then pass ye over unto the land of the possession of the LORD,
wherein the LORD's tabernacle dwelleth, and take possession among us:
but rebel not against the LORD, nor rebel against us, in building you an
altar beside the altar of the LORD our God. \bibleverse{20} Did not
Achan the son of Zerah commit a trespass in the accursed thing, and
wrath fell on all the congregation of Israel? and that man perished not
alone in his iniquity.
\bibleverse{21} Then the children of Reuben and the children of Gad and
the half tribe of Manasseh answered, and said unto the heads of the
thousands of Israel, \bibleverse{22} The LORD God of gods, the LORD God
of gods, he knoweth, and Israel he shall know; if it be in rebellion, or
if in transgression against the LORD, (save us not this day,)
\bibleverse{23} That we have built us an altar to turn from following
the LORD, or if to offer thereon burnt offering or meat offering, or if
to offer peace offerings thereon, let the LORD himself require it;
\bibleverse{24} And if we have not rather done it for fear of this
thing, saying, In time to come your children might speak unto our
children, saying, What have ye to do with the LORD God of
Israel?\footnote{\textbf{22:24} In time\ldots: Heb. To morrow}
\bibleverse{25} For the LORD hath made Jordan a border between us and
you, ye children of Reuben and children of Gad; ye have no part in the
LORD: so shall your children make our children cease from fearing the
LORD. \bibleverse{26} Therefore we said, Let us now prepare to build us
an altar, not for burnt offering, nor for sacrifice: \bibleverse{27} But
that it may be a witness between us, and you, and our generations after
us, that we might do the service of the LORD before him with our burnt
offerings, and with our sacrifices, and with our peace offerings; that
your children may not say to our children in time to come, Ye have no
part in the LORD. \bibleverse{28} Therefore said we, that it shall be,
when they should so say to us or to our generations in time to come,
that we may say again, Behold the pattern of the altar of the LORD,
which our fathers made, not for burnt offerings, nor for sacrifices; but
it is a witness between us and you. \bibleverse{29} God forbid that we
should rebel against the LORD, and turn this day from following the
LORD, to build an altar for burnt offerings, for meat offerings, or for
sacrifices, beside the altar of the LORD our God that is before his
tabernacle.
\bibleverse{30} And when Phinehas the priest, and the princes of the
congregation and heads of the thousands of Israel which were with him,
heard the words that the children of Reuben and the children of Gad and
the children of Manasseh spake, it pleased them.\footnote{\textbf{22:30}
pleased\ldots: Heb. was good in their eyes} \bibleverse{31} And
Phinehas the son of Eleazar the priest said unto the children of Reuben,
and to the children of Gad, and to the children of Manasseh, This day we
perceive that the LORD is among us, because ye have not committed this
trespass against the LORD: now ye have delivered the children of Israel
out of the hand of the LORD.\footnote{\textbf{22:31} now: Heb. then}
\bibleverse{32} And Phinehas the son of Eleazar the priest, and the
princes, returned from the children of Reuben, and from the children of
Gad, out of the land of Gilead, unto the land of Canaan, to the children
of Israel, and brought them word again. \bibleverse{33} And the thing
pleased the children of Israel; and the children of Israel blessed God,
and did not intend to go up against them in battle, to destroy the land
wherein the children of Reuben and Gad dwelt. \bibleverse{34} And the
children of Reuben and the children of Gad called the altar Ed: for it
shall be a witness between us that the LORD is God.\footnote{\textbf{22:34}
Ed: that is, A witness}
\hypertarget{section-22}{%
\section{23}\label{section-22}}
\bibleverse{1} And it came to pass a long time after that the LORD had
given rest unto Israel from all their enemies round about, that Joshua
waxed old and stricken in age.\footnote{\textbf{23:1} stricken\ldots:
Heb. come into days} \bibleverse{2} And Joshua called for all Israel,
and for their elders, and for their heads, and for their judges, and for
their officers, and said unto them, I am old and stricken in age:
\bibleverse{3} And ye have seen all that the LORD your God hath done
unto all these nations because of you; for the LORD your God is he that
hath fought for you. \bibleverse{4} Behold, I have divided unto you by
lot these nations that remain, to be an inheritance for your tribes,
from Jordan, with all the nations that I have cut off, even unto the
great sea westward.\footnote{\textbf{23:4} westward: Heb. at the sunset}
\bibleverse{5} And the LORD your God, he shall expel them from before
you, and drive them from out of your sight; and ye shall possess their
land, as the LORD your God hath promised unto you. \bibleverse{6} Be ye
therefore very courageous to keep and to do all that is written in the
book of the law of Moses, that ye turn not aside therefrom to the right
hand or to the left; \bibleverse{7} That ye come not among these
nations, these that remain among you; neither make mention of the name
of their gods, nor cause to swear by them, neither serve them, nor bow
yourselves unto them: \bibleverse{8} But cleave unto the LORD your God,
as ye have done unto this day.\footnote{\textbf{23:8} But cleave\ldots:
or, For if ye will cleave, etc} \bibleverse{9} For the LORD hath
driven out from before you great nations and strong: but as for you, no
man hath been able to stand before you unto this day.\footnote{\textbf{23:9}
For the LORD\ldots: or, Then the LORD will drive} \bibleverse{10} One
man of you shall chase a thousand: for the LORD your God, he it is that
fighteth for you, as he hath promised you.
\bibleverse{11} Take good heed therefore unto yourselves, that ye love
the LORD your God.\footnote{\textbf{23:11} yourselves: Heb. your souls}
\bibleverse{12} Else if ye do in any wise go back, and cleave unto the
remnant of these nations, even these that remain among you, and shall
make marriages with them, and go in unto them, and they to you:
\bibleverse{13} Know for a certainty that the LORD your God will no more
drive out any of these nations from before you; but they shall be snares
and traps unto you, and scourges in your sides, and thorns in your eyes,
until ye perish from off this good land which the LORD your God hath
given you. \bibleverse{14} And, behold, this day I am going the way of
all the earth: and ye know in all your hearts and in all your souls,
that not one thing hath failed of all the good things which the LORD
your God spake concerning you; all are come to pass unto you, and not
one thing hath failed thereof. \bibleverse{15} Therefore it shall come
to pass, that as all good things are come upon you, which the LORD your
God promised you; so shall the LORD bring upon you all evil things,
until he have destroyed you from off this good land which the LORD your
God hath given you. \bibleverse{16} When ye have transgressed the
covenant of the LORD your God, which he commanded you, and have gone and
served other gods, and bowed yourselves to them; then shall the anger of
the LORD be kindled against you, and ye shall perish quickly from off
the good land which he hath given unto you.
\hypertarget{section-23}{%
\section{24}\label{section-23}}
\bibleverse{1} And Joshua gathered all the tribes of Israel to Shechem,
and called for the elders of Israel, and for their heads, and for their
judges, and for their officers; and they presented themselves before
God. \bibleverse{2} And Joshua said unto all the people, Thus saith the
LORD God of Israel, Your fathers dwelt on the other side of the flood in
old time, even Terah, the father of Abraham, and the father of Nachor:
and they served other gods. \bibleverse{3} And I took your father
Abraham from the other side of the flood, and led him throughout all the
land of Canaan, and multiplied his seed, and gave him Isaac.
\bibleverse{4} And I gave unto Isaac Jacob and Esau: and I gave unto
Esau mount Seir, to possess it; but Jacob and his children went down
into Egypt. \bibleverse{5} I sent Moses also and Aaron, and I plagued
Egypt, according to that which I did among them: and afterward I brought
you out. \bibleverse{6} And I brought your fathers out of Egypt: and ye
came unto the sea; and the Egyptians pursued after your fathers with
chariots and horsemen unto the Red sea. \bibleverse{7} And when they
cried unto the LORD, he put darkness between you and the Egyptians, and
brought the sea upon them, and covered them; and your eyes have seen
what I have done in Egypt: and ye dwelt in the wilderness a long season.
\bibleverse{8} And I brought you into the land of the Amorites, which
dwelt on the other side Jordan; and they fought with you: and I gave
them into your hand, that ye might possess their land; and I destroyed
them from before you. \bibleverse{9} Then Balak the son of Zippor, king
of Moab, arose and warred against Israel, and sent and called Balaam the
son of Beor to curse you: \bibleverse{10} But I would not hearken unto
Balaam; therefore he blessed you still: so I delivered you out of his
hand. \bibleverse{11} And ye went over Jordan, and came unto Jericho:
and the men of Jericho fought against you, the Amorites, and the
Perizzites, and the Canaanites, and the Hittites, and the Girgashites,
the Hivites, and the Jebusites; and I delivered them into your hand.
\bibleverse{12} And I sent the hornet before you, which drave them out
from before you, even the two kings of the Amorites; but not with thy
sword, nor with thy bow. \bibleverse{13} And I have given you a land for
which ye did not labour, and cities which ye built not, and ye dwell in
them; of the vineyards and oliveyards which ye planted not do ye eat.
\bibleverse{14} Now therefore fear the LORD, and serve him in sincerity
and in truth: and put away the gods which your fathers served on the
other side of the flood, and in Egypt; and serve ye the LORD.
\bibleverse{15} And if it seem evil unto you to serve the LORD, choose
you this day whom ye will serve; whether the gods which your fathers
served that were on the other side of the flood, or the gods of the
Amorites, in whose land ye dwell: but as for me and my house, we will
serve the LORD. \bibleverse{16} And the people answered and said, God
forbid that we should forsake the LORD, to serve other gods;
\bibleverse{17} For the LORD our God, he it is that brought us up and
our fathers out of the land of Egypt, from the house of bondage, and
which did those great signs in our sight, and preserved us in all the
way wherein we went, and among all the people through whom we passed:
\bibleverse{18} And the LORD drave out from before us all the people,
even the Amorites which dwelt in the land: therefore will we also serve
the LORD; for he is our God. \bibleverse{19} And Joshua said unto the
people, Ye cannot serve the LORD: for he is an holy God; he is a jealous
God; he will not forgive your transgressions nor your sins.\footnote{\textbf{24:19}
holy God: Heb. holy Gods} \bibleverse{20} If ye forsake the LORD, and
serve strange gods, then he will turn and do you hurt, and consume you,
after that he hath done you good. \bibleverse{21} And the people said
unto Joshua, Nay; but we will serve the LORD. \bibleverse{22} And Joshua
said unto the people, Ye are witnesses against yourselves that ye have
chosen you the LORD, to serve him. And they said, We are witnesses.
\bibleverse{23} Now therefore put away, said he, the strange gods which
are among you, and incline your heart unto the LORD God of Israel.
\bibleverse{24} And the people said unto Joshua, The LORD our God will
we serve, and his voice will we obey. \bibleverse{25} So Joshua made a
covenant with the people that day, and set them a statute and an
ordinance in Shechem.
\bibleverse{26} And Joshua wrote these words in the book of the law of
God, and took a great stone, and set it up there under an oak, that was
by the sanctuary of the LORD. \bibleverse{27} And Joshua said unto all
the people, Behold, this stone shall be a witness unto us; for it hath
heard all the words of the LORD which he spake unto us: it shall be
therefore a witness unto you, lest ye deny your God. \bibleverse{28} So
Joshua let the people depart, every man unto his inheritance.
\bibleverse{29} And it came to pass after these things, that Joshua the
son of Nun, the servant of the LORD, died, being an hundred and ten
years old. \bibleverse{30} And they buried him in the border of his
inheritance in Timnath-serah, which is in mount Ephraim, on the north
side of the hill of Gaash. \bibleverse{31} And Israel served the LORD
all the days of Joshua, and all the days of the elders that overlived
Joshua, and which had known all the works of the LORD, that he had done
for Israel.\footnote{\textbf{24:31} overlived\ldots: Heb. prolonged
their days after Joshua}
\bibleverse{32} And the bones of Joseph, which the children of Israel
brought up out of Egypt, buried they in Shechem, in a parcel of ground
which Jacob bought of the sons of Hamor the father of Shechem for an
hundred pieces of silver: and it became the inheritance of the children
of Joseph.+ 24.32 pieces\ldots: or, lambs \bibleverse{33} And Eleazar
the son of Aaron died; and they buried him in a hill that pertained to
Phinehas his son, which was given him in mount Ephraim.
|
-- Term matching.
module Tactic.Reflection.Match where
open import Prelude
open import Builtin.Reflection
open import Tactic.Reflection.Equality
open import Control.Monad.Zero
open import Control.Monad.State
open import Container.Traversable
private
M : Nat → Set → Set
M n = StateT (Vec (Maybe Term) n) Maybe
fN : ∀ {n} (i : Nat) → IsTrue (lessNat i n) → Fin n
fN i lt = fromNat i {{lt}}
patVar : ∀ {n} → Nat → Nat → Maybe (Fin n)
patVar {n} zero i with lessNat i n | fN {n} i
... | true | toFin = just (toFin true)
... | false | _ = nothing
patVar (suc k) zero = nothing
patVar (suc k) (suc i) = patVar k i
upd : ∀ {a} {A : Set a} {n} → Fin n → A → Vec A n → Vec A n
upd zero x (_ ∷ xs) = x ∷ xs
upd (suc i) x (y ∷ xs) = y ∷ upd i x xs
matchVar : ∀ {n} → Fin n → Term → M n ⊤
matchVar i v =
caseM flip indexVec i <$> get of λ
{ (just u) → guard (u == v) (pure _)
; nothing → _ <$ modify (upd i (just v)) }
MatchFun : Set → Set
MatchFun A = ∀ {n} → Nat → A → A → M n ⊤
matchTerm matchTerm′ : MatchFun Term
matchArgs : MatchFun (List (Arg Term))
matchArg : MatchFun (Arg Term)
matchAbs : MatchFun (Abs Term)
matchTerm k (var i []) v with patVar k i
... | just x = matchVar x v
... | nothing = matchTerm′ k (var i []) v
matchTerm k p v = matchTerm′ k p v
matchTerm′ {n} k (var i args) (var j args₁) =
guard! (j <? k && isYes (i == j) || j ≥? k && isYes (j + n == i))
(matchArgs k args args₁)
matchTerm′ k (con c args) (con c₁ args₁) = guard (c == c₁) (matchArgs k args args₁)
matchTerm′ k (def f args) (def g args₁) = guard (f == g) (matchArgs k args args₁)
matchTerm′ k (lam h p) (lam h₁ v) = guard (h == h₁) (matchAbs k p v)
matchTerm′ k (pat-lam cs args) v = empty -- todo
matchTerm′ k (pi a b) (pi a₁ b₁) = matchArg k a a₁ >> matchAbs k b b₁
matchTerm′ k (lit l) (lit l₁) = guard (l == l₁) (pure _)
matchTerm′ k (agda-sort _) _ = pure _ -- ignore sorts
matchTerm′ k (meta _ _) _ = pure _
matchTerm′ k unknown _ = pure _
matchTerm′ k p v = empty
matchArgs k (x ∷ xs) (y ∷ ys) = matchArg k x y >> matchArgs k xs ys
matchArgs k [] [] = pure _
matchArgs k _ _ = empty
matchAbs k (abs _ x) (abs _ y) = matchTerm (suc k) x y
matchArg k (arg i x) (arg j y) = guard (i == j) (matchTerm k x y)
-- match |Δ| p v = just σ
-- where Γ, Δ ⊢ p
-- Γ ⊢ v
-- Γ ⊢ σ : Δ
-- p σ ≡ v
match : (n : Nat) → Term → Term → Maybe (Vec Term n)
match n pat v = do
env ← snd <$> runStateT (matchTerm 0 pat v) (pure nothing ofType Vec (Maybe Term) n)
traverse id env
|
%---------------------------------------------------------------------------------
\chapter{Fitzhugh-Nagumo Model Example}
\label{chap:fitzhugh-nagumo}
%---------------------------------------------------------------------------------
\section{Background}
\label{sec:background}
We will look into Fitzhugh-Nagumo model as an application of the Python package. The Fitzhugh-Nagumo model describes an excitable system, such as the action potential of cardiac cells. The action potential was first described by Hodgkin and Huxley. Their model were then simplified to the Fitzhugh-Nagumo model, retaining the fast-slow phase and the excitability of the Hodgkin \& Huxley model. \cite{Keener2009}
This model is relevant to my D.Phil.~project as my project will be related to action potential of cardiac muscle cells, an excitable system. I will be working on sodium ion channels of cardiac muscle cells, studying the effect of the flow of sodium ions across the cell membrane on the cell's action potential. Moreover, Fitzhugh-Nagumo model captures all the important features of an action potential, the excitability and the fast-slow phase. Therefore, it is a good simple model to start with.
\section{Fitzhugh-Nagumo model}
\label{sec:FHN}
The definition of Fitzhugh-Nagumo model is
\begin{align}
\label{eqn:FHN}
\epsilon \frac{dv}{dt} &= f(v) - w + I_{app} \\
\frac{dw}{dt} &= v - \gamma w \label{eqn:FHN-end}
\end{align}
where $f(v) = v(1-v)(v-\alpha)$, $0 < \alpha < 1$, $\epsilon \ll 1$, $I_{app}$ is the applied current and $t$ is time. The fast $v$ is the excitation variable, while the slow $w$ is the recovery variable.
In this implementation, the parameters are chosen to be $\alpha = 0.1$, $\gamma = 0.5$, $\epsilon = 0.01$ and $I_{app} = 0.026$, taking reference from \cite{Chapwanya2018}.
The initial values are taken to be near the origin, which are $(v_0, w_0) = (0.01, 0.01)$. The model is solved for time $t$ from 0 to 1.
Fitzhugh-Nagumo model is solved with the various numerical methods implemented in the software that I have developed. The solutions of the model are in a notebook at the link: \href{https://nbviewer.jupyter.org/github/FarmHJ/numerical-solver/blob/main/examples/fitzhugh_nagumo.ipynb}{\underline{\emph{Fitzhugh-Nagumo model notebook}}}.
\begin{figure}
\includegraphics[width=0.95\columnwidth]{FHN_Euler_explicit}
\caption{(\textit{A}) Phase plane of $v$ and $w$ by Euler's explicit method. (\textit{B}) Graph of $v$ and $w$ against time by Euler's explicit method.}
\label{fig:FHN_Euler_explicit}
\end{figure}
From Figure \ref{fig:FHN_Euler_explicit} B, we can see that $v$, the excitation variable is excited in the early stage. While $v$ increases significantly, the change in $w$ is small. After $v$ reaches its peak and starts to reduce, $w$ increases slowly. This can be observed in both the phase plane (Figure \ref{fig:FHN_Euler_explicit} A) and the variable graph (Figure \ref{fig:FHN_Euler_explicit} B). When the variable $v$ starts to recover to its original value, $w$ is at its maximum. The scattering of points in the phase plane captures the feature of the model, where the change in $v$ is rapid while the change in $w$ is slow. When the change in $v$ is significantly larger than the change in $w$, the points are sparse. On the other hand, the points are packed when $w$ increases or decreases more than $v$. However, in the adaptive methods, such insights cannot be interpreted directly from the phase plane. Therefore, green triangles were plotted in Figure \ref{fig:FHN_adaptive} B to indicate the adapted mesh points. The mesh points are adapted towards large change in $v$ or $w$ over a short period of time.
\begin{figure}
\includegraphics[width=0.95\columnwidth]{FHN_adaptive}
\caption{(\textit{A}) Phase plane of $v$ and $w$ for adaptive method BS23. (\textit{B}) Graph of $v$ and $w$ against time for adaptive method BS23}
\label{fig:FHN_adaptive}
\end{figure}
\section{Convergence of Fitzhugh-Nagumo model}
\label{sec:FHN-convergence}
A notebook (accessible from the link: \href{https://nbviewer.jupyter.org/github/FarmHJ/numerical-solver/blob/main/examples/fhn_model_convergence.ipynb}{\underline{\emph{Fitzhugh-Nagumo convergence notebook}}}) is created to test the convergence of the solution of the Fitzhugh-Nagumo model. Since the model has no analytical solution, it is tested against a reference solution. The reference solutions are assumed to be sufficiently accurate. For methods with fixed step size, which are the one-step methods and predictor-corrector method, the reference solutions are constructed by using a much smaller step size of $1^{-7}$, as compared to $1^{-5}$, the smallest step size for other numerical solutions. For methods with adaptive step size, reference solutions are obtained by using a much smaller tolerance value of $1^{-8}$, as compared to $1^{-5}$, the smallest tolerance value for other numerical solutions. This notebook shows the numerical solution computed for different methods at different step sizes or tolerance values. The numerical solutions are then compared with their respective reference solutions. In both methods, the error decreases as the step size or the tolerance value decreases, as shown in Figure \ref{fig:Euler_explicit_error} and Figure \ref{fig:adaptive_error}.
\begin{figure}
\includegraphics[width=0.95\columnwidth]{FHN_Euler_explicit_error_behaviour}
\caption{Error at a mesh point for Euler's explicit method. The Fitzhugh-Nagumo model Eqs.~\eqref{eqn:FHN}-\eqref{eqn:FHN-end} is solved with Euler's explicit method at different step sizes. The error is the absolute difference between reference solution and numerical solution at $x=0.7$. The reference solution is taken at step size of $1^{-7}$. Note that $\log(1^{-7})\simeq-16.118$.}
\label{fig:Euler_explicit_error}
\end{figure}
\begin{figure}
\includegraphics[width=0.95\columnwidth]{FHN_adaptive_error_behaviour}
\caption{Sum of error for adaptive method BS23. The Fitzhugh-Nagumo model Eqs.~\eqref{eqn:FHN}-\eqref{eqn:FHN-end} is solved with BS23 method at different absolute tolerance values. The relative tolerance is fixed at $1^{-15}$. The error is the sum of absolute difference between reference solution and numerical solution at all mesh points. The reference solution is taken at relative tolerance of $1^{-8}$. Note that $\log(1^{-8})\simeq-18.421$.}
\label{fig:adaptive_error}
\end{figure} |
Formal statement is: lemma (in topological_space) filterlim_within_subset: "filterlim f l (at x within S) \<Longrightarrow> T \<subseteq> S \<Longrightarrow> filterlim f l (at x within T)" Informal statement is: If $f$ converges to $l$ at $x$ within $S$, then $f$ converges to $l$ at $x$ within $T$ for any $T \subseteq S$. |
Formal statement is: lemma has_contour_integral_unique: "(f has_contour_integral i) g \<Longrightarrow> (f has_contour_integral j) g \<Longrightarrow> i = j" Informal statement is: If a function $f$ has a contour integral $i$ along a contour $g$, and $f$ has a contour integral $j$ along the same contour $g$, then $i = j$. |
-- -------------------------------------------------------------- [ Lens.idr ]
-- Description : Idris port of Control.Lens
-- Copyright : (c) Huw Campbell
-- --------------------------------------------------------------------- [ EOH ]
module Control.Lens.Tuple
import Control.Lens.Types
import Control.Lens.Lens
import Data.Profunctor
import Data.Bitraversable
%default total
--
-- Note: Tuples in Idris are nested `Pair`s, so these don't currently act like
-- their haskell counterparts, e.g., `_2` on `(1,2,3)` will focus on `(2,3)`.
--
public export
fst_ : Lens (a,c) (b,c) a b
fst_ = lens (\(a,_) => a)
(\(_,c),b => (b,c))
public export
snd_ : Lens (c,a) (c,b) a b
snd_ = lens (\(_,a) => a)
(\(c,_),b => (c,b))
public export
both : Bitraversable r => Traversal (r a a) (r b b) a b
both (Mor f) = Mor (bitraverse f f)
-- --------------------------------------------------------------------- [ EOF ]
|
ICB’s innovative green roofing solution, EvaGreen, has played its role in the radical, award winning Clay Fields housing project in Suffolk - providing the ultimate sustainable roof.
The ambitious housing development in Elmswell, Suffolk, comprises 26 homes and four storage buildings that focus heavily on unique and effective sustainable strategies and lifetime energy use. Since completion, it has picked up a Housing Design Award, as well as being shortlisted for the Sustainable Housing Awards. |
#' ---
#' title: "Uvod u R"
#' author:
#' - "Milutin Pejovic, Petar Bursac"
#' date: "`r format(Sys.time(), '%d %B %Y')`"
#' output:
#' html_document:
#' keep_md: true
#' theme: "simplex"
#' highlight: tango
#' toc: true
#' toc_depth: 5
#' toc_float: true
#' fig_caption: yes
#' ---
#'
#' # Strukture podataka
#'
#' Podsetićemo se na početku osnovnih struktura podataka u R-u
#'
#+ echo = FALSE, warning = FALSE, message = FALSE, fig.width = 10, fig.height = 8, fig.align='center'
knitr::include_graphics("Figures/strukture_podataka.jpg")
#'
#' # Osnovni tipovi i klase podataka u R-u
#'
#'
#'
#' ## Atomic vectors
#'
v1 <- c(1, 2, 3, 4, 5, 6)
v2 <- 1:6
is.vector(v1)
#' ### Tipovi podatka
#'
#' R razlikuje šest osnovnih tipova podatka, a to su *doubles, integers, characters, logicals, complex, and raw*. Tip podatka je obično poznat autoru koda, međutim ukoliko ima potrebe da se sazna kojeg je tipa određeni podatak skladišten u nekom vektoru, to moguće saznati pozivom komandom `typeof`:
#'
#' #### Double
#'
typeof(v1)
#'
#' #### Integer
#'
v3 <- c(1L, 2L, 3L, 4L, 5L, 6L)
typeof(v3)
#'
#' #### Characters
#'
v4 <- c("Milutin", "Vojkan", "Petar")
typeof(v4)
prof_name <- paste("Milutin", "Pejovic", sep = " ")
prof_name
v5 <- paste(c("Milutin", "Vojkan", "Petar"), "GRF", sep = "_")
typeof(v5)
#'
#'
#' #### Logicals
#'
v6 <-c(TRUE, FALSE, TRUE)
typeof(v6)
v7 <-c(T, F, T)
v7
typeof(v7)
#'
#' #### Nedostajuće vrednosti (`NA`)
#'
#' Nodostajuće ili nepoznate vrednosti u R-u se predstavljaju sa `NA`. Treba imati na umu da nedostajuća vrednost nije nula vrednost, odnosno da bilo koja operacija sa nedostajućom vrednosti rezultira takođe u nedostajuću vrednost.
#'
#+ include = TRUE
1 > 5
NA > 5
10 * NA
#'
#' Izuzeci su samo sledeće operacije:
#'
#+ include = TRUE
NA ^ 0
NA | TRUE # operator | znači "ili"
NA & FALSE
#'
#'
#' #### Provera tipa podataka
#'
#' Provera tipa podataka se može sprovesti pozivanjem neke od funkcija iz familije `is.*()`, kao na primer `is.logical()`, `is.integer()`, `is.double()`, ili `is.character()`. Postoje i funkcije kao što su `is.vector()`, `is.atomic()`, ili `is.numeric()` ali oni ne služe toj svrsi.
#'
#+
is.numeric(v1)
is.integer(v1)
is.logical(v1)
#'
#'
#' #### Prinudna konverzija imeđu tipova podataka
#'
#' Kao što je poznato, vektor, matrica ili array su strukture podataka koje mogu sadržati samo jedan tip podataka. Stim u vezi, ukoliko se u nekom vektoru nađu dva tipa podataka, R će prema integrisanim pravilima (`character → double → integer → logical`) prinudno transformisati tip podataka.
#'
#+ echo = FALSE, warning = FALSE, message = FALSE, fig.width = 10, fig.height = 10, fig.align='center'
knitr::include_graphics("Figures/prinudna_promena.jpg")
#
str(c("a", 1)) # komanda `str` pokazuje strukturu podataka
#'
#' ## Atributi
#'
#' ### `Names` (nazivi)
#'
names(v1) <-c("jedan", "dva", "tri", "cetiri", "pet", "sest")
v1+1 # Imena ne prepoznaju vrednost na koju se odnose
names(v1) <- NULL # Uklanjanje atributa
#'
#' ### `Dim` (dimenzije)
#'
#' Vektor se moze transformisati u dvo-dimenzionalnu strukturu podataka - matricu, dodavanjem odgovarajućih dimenzija pomoću komande `dim`:
#'
#+ include = TRUE
dim(v1) <- c(2, 3)
v1
#'
#' Na isti način, vektor se može transformisati u `array`:
#'
#+ include = TRUE
v1 <- 1:6
dim(v1) <- c(1, 2, 3)
v1
#'
#'
#'
#'
#' ### Klase podataka
#'
#' Jedan od najvažnijih atributa koji se vezuje za osnovne strukture podataka u R-u je `klasa`, čime se definiše jedan od objektno orijentisanih pristupa definisanja strukture podataka poznat pod imenom `S3` klase. R podržava više sistema za objeknto orijentisano struktuiranje podataka kao što su `S3`, `S4` i `R6`. `S3` je osnovni sistem i podržan je u okviru osnovne istalacije R-a.
#' Dodavanjem atributa `class` R objekat postja `S3` objekat i od toga zavisi kako će se neke osnovne funkcije (`generic functions`) ophoditi prema tom objektu. Drugim rečima, rezultat neke operacije zavisi od klase podataka.
#'
#' U okviru ovog poglavlja, razmotrićemo tri osnovne klase vekorskih podataka:
#'
#' #### `factors`
#'
#' Faktor je vektor koji može sadržati samo određeni broj predefinisanih vrednosti i služi za skladištenje kategoričkih promenljivih. Faktorski vektor u sebi sadrži celobrojne vrednosti kojima je dodeljen naziv, odnosno nivo (`level`). Tako na primer:
#'
v7 <- factor(c("a", "b", "b", "a"))
v7
typeof(v7)
attributes(v7)
#' Faktorski vektor je pogodan za grupisanje podataka, što nam može omogućiti analizu podataka prema grupi kojoj pripadaju.
#'
#'
#' #### `Dates` (Datum) vektori
#'
#' `Dates` ili datumski vektori vektori u sebi sadrže podatak o vremenu na dnevnoj rezoluciji i kreirani su na osnovu `double` vrednosti. Oni predstavljanju broj dana počev od `1970-01-01`
#'
v8 <- Sys.Date()
typeof(v8)
attributes(v8)
as.numeric(v8)
v9 <- as.Date("1983-03-30")
v9
as.numeric(v9)
#'
#' #### `Date-time` (datum-vreme) vektori
#'
#' R podržava dva načina u okviru `S3` klasa za skladištenje informacija o datumu-vremenu POSIXct, and POSIXlt. POSIX je skraćenica od Portable Operating System Interface što je skraćenica za familiju standarda za razmenu informacija o vremenu. `ct` je skraćenica od `calendar`, a `lt` od `local time`. POSIXct vektor je kreiran na osnovu `double` vektora i predstavlja broj sekundi od `1970-01-01`.
#'
v10 <- as.POSIXct("2020-11-04 10:00", tz = "UTC")
typeof(v10)
attributes(v10)
#'
#'
#'
#' # Podešavanje radnog direktorijuma
#'
#' Ukoliko postoji potreba da se neka skripta veže za određeni set podataka koji se nalazi u određenom folderu, često je potrebno definisati radni direktorijum. Time se praktično definiše `default` putanja koju će koristiti sve funkcije koje za argument koriste putanju do određenog foldera, ukoliko se ne podesi drugačije. Podešavanje radnog direktorijuma se vrši pozivom komande `setwd()`
#'
#?setwd()
#setwd(dir = "C:/R_projects/Nauka_R/Slides")
#'
#' Ukoliko postoji potreba da se proveri koja je aktuelna putanja, odnosno koji je aktuelni radni direktorijum, to se može učiniti pozivom komande `getwd()`.
#'
#getwd()
#'
#' Izlistavanje fajlova koji se nalaze u nekom direktorijumu se vrši pozivom komande `ls()`
#'
#' > <h3>Zadatak 1</h3>
#' > + Podesiti radni direktorijum.
#' > + Izlistati sve fajlove koji se nalaze u radnom direktorijumu.
#'
#'Podešavanje radnog direktorijuma je korisno koristiti ako prilikom rada koristimo konstantno jedinstven direktorijum sa skriptama, podacima i drugim potrebnim fajlovima. Tada u radu i korišćenju funkcija možemo koristiti relativne putanje ka pod-fodlerima ako je potrebno, inače se podrazumeva data putanja kao apsolutna.
#'
#'
#' # Učitavanje podataka u R-u
#'
#' Za učitavanje podataka u radno okruženje koriste se funkcije, koje rade na principu zadavanja putanje ka podacima, kao i formata podataka, koji ne mora biti eksplicitno naveden. Neke od osnovnih funkcija su:
#'
#'
#+ include = TRUE
studenti <- read.table(file = "C:/R_projects/Nauka_R/Slides/data/Students_IG1.txt", sep = ",", header = TRUE)
studenti <- read.csv(file = "C:/R_projects/Nauka_R/Slides/data/Students_IG1.txt", header = TRUE, stringsAsFactors = FALSE)
#'
#' ## `readxl` paket
#'
#'Učitavanje excel tabela je moguće učiniti putem paketa "readxl":
#+ eval = FALSE
install.packages("readxl")
library(readxl)
studenti <- readxl::read_xlsx(path = "C:/R_projects/Nauka_R/Slides/data/Students_IG1.xlsx", sheet = "Students")
#'
#'
#'
#' ## Pregled podataka
#'
str(studenti) # Obratite pažnju da su imena studenata skladištena kao faktorske kolone u okviru data.frame?
#' Ukoliko želimo da se određene kolone ne transformišu u faktorske prilikom učitavanja potrebno opciju `stringsAsFactors` podesititi da bude `FALSE`.
#'
class(studenti)
head(studenti, 5)
tail(studenti, 5)
dim(studenti)
#'
#'
#'
#' ## Selektovanje podataka
#'
#' U okviru R-a postoji poseban sistem notacije kojim je moguće pristupiti vrednostima objekta. Kako bi pristupili podatku ili setu podataka (red-u i/ili kolona-ma), koristi se sledeće notacija sa [] zagradama:
# studenti[ , ]
#' U okviru zagrada pišu se dva indeksa odvojena zarezom, pri predstavlja broj **reda** i drugi predstavlja broj **kolone**. Indeksi mogu biti napisani na veći broj načina, i to:
#'
#' - Pozitivne celobrojne vrednosti
#' - Negativne celobrojne vrednosti
#' - Nula
#' - Razmak
#' - Logičke vrednosti
#' - Nazivi
#'
#' #### Pozitivne celobrojne vrednosti
#'
studenti[1, ]
studenti[, 2]
studenti[1, 2]
#' Na ovaj način izvršena je selekcija prvog reda i druge kolone. Pored zadavanja jedne vrednosti, možemo izvršiti selekciju podataka skupom indeksa.
studenti[1, c(2,3)]
studenti[1, c(2:5)]
#' Rezultat upita je samo prikaz - kopija vrednosti. Rezultat možemo dodeliti novoj promenljivoj:
Boris <- studenti[1, c(1:14)]
Boris
#' Isti sistem notacije se koristi i kod drugih tipova podataka, npr. kod vektora:
vec <- c(6, 1, 3, 6, 10, 5)
vec[1:3]
#' Bitno je zapamtiti da indeksiranje u R-u **uvek počinje od 1**, dok za razliku od nekih drugih programskih jezika počinje od 0.
#'
#' #### Negativne celobrojne vrednosti
#' Negativne vrednosti daju suprotni rezultat u odnosu na pozitivne celobrojne vrednosti. Rezultat je sve osim elemenata navedenih indeksom:
studenti[-c(2:35), 2:5]
#' Kombincija pozitivnih i negativnih indeksa je moguća, dok nije moguće postaviti pozitivnu i negativnu vrednost u okviru istog inndeksa:
# studenti[-c(-1,1), 2]
# Error in x[i] : only 0's may be mixed with negative subscripts
studenti[1:5, -1]
#'
#' #### Nula
#' Kao što je rečeno indeksiranje elemenata počinje od 1, dok indeks 0 nije greška, već je rezultat objekat bez elemenata:
#'
studenti[0, 0]
#'
#' #### Razmak
#' Korišćenem razmaka - praznog indeksa, dobija se rezultat koji podrazumeva sve elemente datog reda ili kolone:
#'
studenti[1, ]
#'
#' #### Logičke vrednosti
#' Vrednost indeksa može biti i logička vrednost i tom slučaju rezultat je red i/ili kolona koja odgovara vrednosti TRUE:
#'
studenti[1, c(FALSE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE)]
#' Vektor logičkih vrednosti moguće je kreirati primenom logičkih upita. Odnosno kreiranjem logičkog upita dobija se vektor sa vrednostima TRUE i FALSE koji se mogu koristiti za određivanje pozicije vrednosti koju želimo izmeniti:
#'
generacija_2017 <- studenti$god.upisa == 2017
generacija_2017 # vektor logičkih vrednosti
studenti[generacija_2017, ]
#'
#' Logički operatori su veoma efikasan način selekcije i u okviru R-a definisani su na sledeći način:
#'
#'
#' #### Bulovi operatori
#'
#' #' Bulovi operatori nam omogućuju da kombinujemo logičke izraze. Međutim, tu treba imati na umu da `NA` vrednosti mogu uticati na rezultat izraza. Bilo koji izraz sa `NA`vrednošću nam kao rezultat vraća `NA` vrednost.
#'
#+ echo = FALSE, warning = FALSE, message = FALSE, fig.width = 10, fig.height = 8, fig.align='center'
knitr::include_graphics("Figures/logicki operatori.jpg")
#'
#'
#' Na primer, ako želimo pogledati koji studenti su imali 100 bodova junskom roku i kranju ocenu 9, nećemo dobiti željeni rezultat, upravo zbog prisustva `NA` vrednosti
#'
studenti[studenti$Jun == 100 & studenti$Ocena == 9, ]
studenti[!is.na(studenti$Jun) & !is.na(studenti$Ocena) & studenti$Jun == 100 & studenti$Ocena == 9, ]
#'
#'
#'
#' #### Nazivi
#'
#' Selekcija podataka - elementa je moguća i putem naziva kolona i/ili redova ako su dostupni:
names(studenti)
studenti[, "Prezime"]
studenti[1:5, c("Prezime", "Ime", "br.ind", "god.upisa")]
#' Kao i kombinacija navedenog:
studenti[1:5, c(names(studenti[, c(2:5)]))]
#'
#' ### Selektovanje podataka putem $ sintakse
#' Putem prethodnih primera pokazan je osnovni način selekcije elementa iz skupa podataka. Način selekcije podataka koji se najčešče koristi predstavlja upotrebu $ sintakse.
#' Potrebno je napisati naziv objekta - data frame-a a zatim napisati naziv kolone odvojen znakom "$":
#'
studenti$Prezime
#' *Tips: Nakon napisanog znaka "$" moguće je pritisnuti taster TAB na tastaturi kako bi dobili listu naziva kolona.*
#' Kako bi izvršili upit po redu, potrebno je napisati [] zagrade i navesti indeks reda.
#'
studenti$Prezime[1]
studenti$Prezime[1:5]
#'
#'
#' ### Selektovanje podataka u okviru `list` - e
#'
#' Selektovanje podataka u okviru liste vrši se korišćenjem operatora `[]` i `[[]]` ili prema nazivu elementa liste.
#' Na primer, kreiraćemo listu studenata sa dva elementa, oni koji su položili praksu i onih koji nisu položili praksu.
#'
praksa_list <- list(polozili = studenti[!is.na(studenti$Praksa), ], nisu_polozili = studenti[is.na(studenti$Praksa), ])
#'
#' Ukoliko želimo selektovati prvi član liste primenom operatora `[]`, za rezultat ćemo takođe dobiti listu!
#'
praksa_list[1]
class(praksa_list[1])
#'
#+ echo = FALSE, warning = FALSE, message = FALSE, fig.width = 10, fig.height = 7, fig.align='center'
knitr::include_graphics("Figures/selektovanje_liste.jpg")
#'
#' Međutim, ukoliko želimo selektovati prvi član liste primenom operatora `[[]]`, za rezultat ćemo takođe dobiti `data.frame`!
#'
praksa_list[[1]]
class(praksa_list[[1]])
#'
#' Element liste moguće je selektovati i naznačavanjem imena liste u okviru operatora `[]`:
#'
praksa_list["polozili"]
praksa_list[["polozili"]]
#'
#'
praksa_list[1]
class(praksa_list[1])
#'
#' > <h3>Zadatak</h3>
#' > + Kreirati listu tako što da studenti upisani iste godine čine jedan član liste. Za te potrebe koristiti komandu `list(prvi data.frame, drugi data.frame, treci data.frame)`
#'
#'
#'
#'
#'
#' ## Modifikovanje podataka
#'
#'
#' ### Promena vrednosti
#'
#' Modifikacija vrednosti podataka odnosi se na promenu vrednosti nekog podatka. Da bi neku vrednost bilo moguce promeniti, potrebno je prvo specificirati tačnu poziciju vrednosti koju želimo promeniti. Na primer, ako želimo da upišemo kao godinu upisa broj 2017 umesto 17, i 2016 umesto 16, to cemo uraditi na sledeci nacin:
studenti[studenti$god.upisa == 17, "god.upisa"] <- 2017
studenti[studenti$god.upisa == 16, "god.upisa"] <- 2016
studenti$god.upisa
#' Ukoliko žeilimo da svim studentima koji su upisali fakultet 2017 godine dodelimo ocenu 5 iz IG1, to možemo uraditi na sledeći način:
studenti[studenti$god.upisa == 2017, "Ocena"] <- 5
studenti <- read.csv(file = "C:/R_projects/Nauka_R/Slides/data/Students_IG1.txt", header = TRUE, stringsAsFactors = FALSE)
#'
#' > <h3>Zadatak</h3>
#' > + Izmeniti rezultate krajnje ocene i prakse za svoje ime tako sto cete dodeliti ocenu 10.
#'
#' #### Modifikovanje tipa podataka
#'
str(studenti)
unique(studenti$god.upisa)
studenti$god.upisa <- factor(studenti$god.upisa, labels = c("2015", "2016", "2017"))
#' #### Modifikovanje redosleda podataka
#'
#' Ukoliko želimo da poređamo vrste u `data.frame`-u prema vrednostima u nekoj koloni, to možemo učiniti na sledeći način:
#'
studenti[order(studenti$Ocena, studenti$Praksa),]
#'
#' #### Kombinovanje podataka
#'
#' Kombinovanjem podataka se odnosi na mogućnost spajanja dve tabele. Za te potrebe učitaćemo rezultate (konačne ocene) studenata postignute na predmetima IG1, Praksa i IG2.
#'
ig1 <- readxl::read_xlsx(path = "C:/R_projects/Nauka_R/Slides/data/Students_IG1.xlsx", sheet = "Students")
head(ig1)
ig1 <- ig1[, c("Prezime", "Ime", "Ocena", "Praksa")] # Selektovali smo samo kolone koje nas zanimaju.
names(ig1) <- c("prezime", "ime", "ocena_ig1", "ocena_praksa")
ig2 <- readxl::read_xlsx(path = "C:/R_projects/Nauka_R/Slides/data/Students_IG2.xlsx", sheet = "Students")
head(ig1)
ig2 <- ig2[, c("Prezime", "Ime", "Ocena")] # Selektovali smo samo kolone koje nas zanimaju.
names(ig2) <- c("prezime", "ime", "ocena_ig2")
#' Komanda `cbind` spaja dve tabele tako što drugu "nalepi" na prvu. Kao rezultat se dobija tabela sa ponovljenih kolonama koje su zajedničke. Uslov za korišćenje komande `cbind` je da dve tabele imaju isti broj vrsta.
ig <- cbind(ig1, ig2)
ig
#' Analogno komandi `cbind`, postoji komanda `rbind` koja spaja dve tabele tako što ih nadovezuje jednu ispod druge. Uslov za korišćenje komande `rbind` je da dve tabele imaju isti broj kolona.
#' Ukoliko postoje zajedničke kolone poželjno je da se one ne ponavljaju. U tu svrhu koristićemo komandu `merge`:
#'
ig <- merge(ig1, ig2, by = c("prezime", "ime"))
ig
#'
#'
#' ## Sumiranje
#'
#' Sumiranje podataka je moguće uraditi po vrednostima reodova i/ili kolona. Postoji veliki broj funkcija, kao i paketa koji koriste svoje funkcije za sumiranje po određenim pravilima. Neke od osnovnih funkcija base paketa su:
#'
#' ### Base funkcije
summary(studenti)
mean(studenti$Ocena, na.rm = TRUE) # "na.rm" parametar se koristi kako bi se zanemarile NA vrednosti u podacima.
min(studenti$kol.1, na.rm = TRUE)
max(studenti$kol.1, na.rm = TRUE)
median(studenti$Praksa, na.rm = T)
#'
#' ### apply i lapply funkcije
#' apply i lapply funkcije kao ulaz koriste data.frame ili matricu i kao rezultat daju vektor, listu ili array.
## ?apply()
## ?lapply()
#' apply možemo koristiti kako bi izvršili sumiranje po svim redovima (drugi argument funkcije je 1) ili kolonama (drugi argument funkcije je 2).
#'
studenti_bez_na <- studenti
studenti_bez_na[is.na(studenti_bez_na)] <- 0
apply(studenti_bez_na[, 6:14], 2, mean)
#' lapply koristimo kada želimo da izvršimo sumiranje po svakom članu liste ili npr. svakoj koloni data.frame-a. Razlika u odnosu na apply je što je ovde rezltat lista,
#'
lapply(studenti_bez_na[, 6:14], mean)
#' ### Funkcije colSums() i rowSums()
#'
colSums(studenti[, 6:14], na.rm = T)
rowSums(studenti[, 6:7], na.rm = T) # Rezultat je zbir bodova na prvom i drugom kolokvijumu
colMeans(studenti[, 6:14], na.rm = T) # Uočavamo razilku između srednje vrednosti kolona dobijene funkcijom apply i colSums. Razlika je u tome da smo u prvom slučaju NA vrednosti zamenili sa 0 (pa je delilac veći) u odnosu na funkciju colSums gde smo naznačili da zanemarujemo NA vrednosti.
rowMeans(studenti[, 6:7], na.rm = T)
#'
#' ### Funkcija by()
#' Koriscenjem funkcije by(), možemo na jednostavan način uraditi sumiranje po određenim faktorskim kolona.
by(studenti, studenti[, 5], summary) # Sumarni rezultati po godini upisa
#' ### Kreiranje jednostavne funkcije
#' U cilju sumiranja podataka po određenom pravilu, možemo napisati jednostavnu funkciju:
#'
racunajSrednjuVrednost <- function(data_frame = data_frame, izbaciNA = TRUE){ # argumenti funkcije
sr_vrednost <- mean(data_frame$Ocena, na.rm = izbaciNA)
return(sr_vrednost)
}
racunajSrednjuVrednost(data_frame = studenti, izbaciNA = TRUE)
#'
#' > <h3>Zadatak</h3>
#' > + Sumirati podatke i sračunati ukupan broj studenata koji su položili prvi kolokvijum.
#' > + Sumirati podatke i sračunati ukupan broj studenata koji su položili oba kolokvijuma.
#' > + Sumirati podatke i sračunati koliko su bodovi na prvom i drugom kolokvijumu u korelaciji sa krajnjom ocenom.
#'
#' Rešenje:
#'
#' Zadatak 1:
length(studenti_bez_na$kol.1[studenti_bez_na$kol.1 > 0])
#' Zadatak 2:
length(studenti_bez_na$kol.1[studenti_bez_na$kol.1 > 0 & studenti_bez_na$kol.2 > 0])
#' Zadatak 3:
cor(studenti_bez_na$kol.1, studenti_bez_na$Ocena)
plot(studenti_bez_na$kol.1, studenti_bez_na$Ocena)
cor(studenti_bez_na$kol.2, studenti_bez_na$Ocena)
plot(studenti_bez_na$kol.2, studenti_bez_na$Ocena)
#'
#'
#'
#' # Eksportovanje podataka u R-u
#'
#' Osnovna funkcija za eksportovanje podataka u R-u je `write.table`. Međutim, češće se koriste tzv. `wrapper` funkcije koje pozivaju funkciju `write.table` ali sa predefinisanim `default` parametrima, kao na primer `write.csv`.
#'
#'
write.csv(studenti, file = "studenti_export.csv", row.names = FALSE)
#'
#' ## `writexl` paket
#'
#' Paket `writexl` je veoma jednostavan paket za kreiranje `excel` fajlova. Ima svega nekoliko veoma korisnih funkcija.
#+ eval = FALSE
install.packages("writexl")
library(writexl)
#+ eval = FALSE
writexl::write_xlsx(studenti, path = "studenti_excel.xlsx")
#'
#'
#' ## Čuvanje i učitavanje R podataka
#'
#' Podaci (objekti svih vrsta) kreirani u okviru R-a, mogu biti sačuvani kao `Rdata` (ukoliko je više objekata) ili `rda` (`rds`) (ukoliko je jedan objekat)
#+ eval = FALSE
save(studenti, file = "C:/R_projects/Nauka_R/Slides/studenti_export.rda")
#'
#'
|
/**
* @file redis.cc
* @author sun-moon-star-star
*/
#include "redis.h"
#include <functional>
#include <boost/format.hpp>
namespace utils {
namespace redis {
const Config Config::NIL;
std::string Explain(ErrorCode code) {
switch (code) {
case ErrorCode::OK:
return "OK";
case ErrorCode::CONFIG_NIL_ERROR:
return "Config为空值";
case ErrorCode::CONFIGS_EMPTY_ERROR:
return "Configs个数为0";
case ErrorCode::CLIENT_NUM_ERROR:
return "Client的个数非法";
case ErrorCode::ALLOCATE_REDIS_CONTEXT_ERROR:
return "无法分配RedisContext内存";
case ErrorCode::CONNECTION_ERROR:
return "连接Redis失败";
case ErrorCode::COMMAND_EXECUTE_ERROR:
return "执行Redis命令失败";
case ErrorCode::UNKNOWN_ERROR:
return "未知错误";
case ErrorCode::BUG:
return "运行期间出现非预期行为,需修复";
}
return "无法解析的错误码";
}
Error Client::Init(const Config& cfg) {
if (cfg == Config::NIL) {
return Error(ErrorCode::CONFIG_NIL_ERROR, "can't init with nil config");
}
this->config = cfg;
return this->FreshConnection();
}
RedisContextPtr Client::CreateRedisContext() {
std::function<void(redisContext*)> delete_func =
[](redisContext* raw_context) {
if (raw_context == nullptr) {
return;
}
redisFree(raw_context);
raw_context = nullptr;
};
RedisContextPtr new_context(
static_cast<redisContext*>(redisConnect(config.ip.c_str(), config.port)),
delete_func);
return new_context;
}
Error Client::CheckRedisContext(RedisContextPtr new_context) {
if (new_context == NULL) {
return Error(ErrorCode::ALLOCATE_REDIS_CONTEXT_ERROR,
"can't allocate redis context");
}
if (new_context->err) {
return Error(ErrorCode::CONNECTION_ERROR,
std::string("fail to connect to redis-server") +
", err: " + std::to_string(new_context->err) +
", errstr: " + new_context->errstr);
}
return Error();
}
Error Client::FreshConnection() {
RedisContextPtr new_context = this->CreateRedisContext();
Error err = this->CheckRedisContext(new_context);
if (err) {
return err;
}
this->context = new_context;
return Error();
}
RedisReplyPtr Client::CreateRedisReply(const std::string& command) {
std::function<void(redisReply*)> delete_func = [](redisReply* raw_reply) {
if (raw_reply == nullptr) {
return;
}
freeReplyObject(raw_reply);
raw_reply = nullptr;
};
RedisReplyPtr new_reply(
static_cast<redisReply*>(redisCommand(context.get(), command.c_str())),
delete_func);
return new_reply;
}
Error Client::CheckRedisReply(RedisReplyPtr reply) {
if (reply == nullptr) {
return Error(ErrorCode::COMMAND_EXECUTE_ERROR,
std::string("redisCommand return nil redisReply") +
", err: " + std::to_string(context->err) +
", errstr: " + context->errstr);
}
if (reply->type == REDIS_REPLY_ERROR) {
return Error(ErrorCode::COMMAND_EXECUTE_ERROR,
std::string("redisCommand meet REDIS_REPLY_ERROR") +
", reply-str: " + reply->str);
}
return Error();
}
const bool NeedSaveResult(std::list<std::string>* result) {
return result != nullptr;
}
void SaveResult(RedisReplyPtr reply, std::list<std::string>* result) {
switch (reply->type) {
case REDIS_REPLY_ARRAY:
for (int i = 0; i < reply->elements; ++i) {
result->push_back(reply->element[i]->str);
}
break;
case REDIS_REPLY_INTEGER:
result->push_back(std::to_string(reply->integer));
break;
case REDIS_REPLY_NIL:
result->push_back("");
break;
case REDIS_REPLY_STRING:
case REDIS_REPLY_STATUS:
default:
result->push_back(reply->str);
break;
}
}
Error Client::ExecOnce(const std::string& command,
std::list<std::string>* result) {
RedisReplyPtr reply = this->CreateRedisReply(command);
Error err = this->CheckRedisReply(reply);
if (!err) {
return err;
}
if (NeedSaveResult(result)) {
SaveResult(reply, result);
}
return Error();
}
Error Client::Exec(const std::string& command, std::list<std::string>* result) {
Error err = this->ExecOnce(command, result);
int real_retry_times = 0;
while (err && real_retry_times < config.retry) {
real_retry_times++;
err = this->FreshConnection();
if (err) {
continue;
}
err = this->ExecOnce(command, result);
}
return err;
}
Error Client::Get(const std::string& key, std::string* value) {
std::string command = std::string("GET ") + key;
std::list<std::string> result;
Error err = this->Exec(command, &result);
if (err) {
return err;
}
if (value != nullptr && result.size() > 0) {
*value = &result.begin();
}
return err;
}
Error Client::MGet(const std::list<std::string>& key,
std::list<std::string>* value) {
std::string command = "MGET";
for (const auto& it = key.begin(); it != key.end(); ++it) {
command = command + " " + *it;
}
return this->Exec(command, value);
}
Error Client::Set(const std::string& key, const std::string& value) {
boost::format formater = boost::format("SET %1% %2%") % key % value;
std::string command = formater.str();
return this->Exec(command, nullptr);
}
} // namespace redis
} // namespace utils |
From Perennial.program_proof Require Import grove_prelude.
From Goose.github_com.mit_pdos.gokv Require Import aof.
From Perennial.algebra Require Import mlist.
From iris.algebra Require Import mono_nat.
From Perennial.Helpers Require Import ModArith.
From Perennial.program_proof.fencing Require Import map.
From Perennial.goose_lang Require Import crash_borrow.
From Perennial.program_proof Require Import marshal_stateless_proof.
Section aof_proof.
Class aofG Σ := AofG {
aof_flistG :> fmlistG u8 Σ ;
aof_mnatG :> inG Σ mono_natUR ;
aof_mapG :> mapG Σ u64 unit ;
aof_tokG :> inG Σ (exclR unitO) ;
aof_stagedG :> stagedG Σ ; (* for crash borrows? *)
aof_ghostG :> ghost_varG Σ unit ;
aof_curdataG :> ghost_varG Σ (list u8) ;
}.
Definition aofΣ := #[
fmlistΣ u8 ; GFunctor (mono_natUR) ;
mapΣ u64 unit ;
GFunctor (exclR unitO) ;
stagedΣ ;
ghost_varΣ unit ;
ghost_varΣ (list u8)
].
Global Instance subG_aofΣ {Σ} : subG aofΣ Σ → aofG Σ.
Proof. solve_inG. Qed.
Context `{!heapGS Σ}.
Context `{!filesysG Σ}.
Record aof_vol_names := mk_aof_vol_names {
logdata : gname ;
predurabledata : gname ;
durabledata : gname ;
len : gname ;
len_toks : gname ;
close_tok: gname ;
close_req_tok: gname ;
curdata : gname ;
crashtok: gname ;
initdata : list u8 ;
}.
Context `{aofG Σ}.
Implicit Types γ : aof_vol_names.
Implicit Types P: (list u8) → iProp Σ.
Definition aof_lenN := nroot .@ "aof_len".
Definition aof_len_invariant γ : iProp Σ :=
∃ (l:u64),
own γ.(len) (●MN{#1/2} (int.nat l)) ∗
[∗ set] x ∈ (fin_to_set u64), x ⤳[γ.(len_toks)] () ∨ ⌜int.nat x > int.nat l⌝
.
Definition aof_length_lb γ (l:u64) : iProp Σ :=
own γ.(len) (mono_nat_lb (int.nat l)).
Definition list_safe_size (l:list u8) := int.nat (length l) = length l.
Definition aof_log_own γ data : iProp Σ :=
fmlist γ.(logdata) (DfracOwn (1/2)) data ∗
ghost_var γ.(close_req_tok) 1 () ∗
ghost_var γ.(close_tok) 1 ()
.
Definition aof_durable_lb γ data : iProp Σ :=
fmlist_lb γ.(durabledata) data
.
(* TODO: make make this a parameter to the library? *)
Definition aofN := nroot .@ "aof".
Definition aofN1 := aofN .@ "1".
Definition aofN2 := aofN .@ "2".
Definition aofNlk := aofN .@ "aoflk".
Definition is_aof_ctx_inv γ P :=
inv aofN1 (C ∗ ghost_var γ.(crashtok) 1 () ∨ (* either the node has crashed, *)
∃ (data:list u8),
(fmlist γ.(logdata) DfracDiscarded data) ∗ ghost_var γ.(curdata) 1 data ∗ ghost_var γ.(close_req_tok) 1 ()
∨ (* or there's a witness that the user closed the file *)
(* the fmlist_lb and the 1/2 durabledata ownership are to help prove a strong accessP lemma *)
fmlist_lb γ.(logdata) data ∗ fmlist γ.(durabledata) (DfracOwn (1/2)) data ∗
ghost_var γ.(curdata) (1/2) data ∗ P data) (* or P is available *)
.
Definition aof_close_resources (aof_ptr:loc) γ P Pcrash fname : iProp Σ :=
∃ (isClosed closeRequested:bool),
"HcloseRequested" ∷ aof_ptr ↦[AppendOnlyFile :: "closeRequested"] #closeRequested ∗
"Hclosed" ∷ aof_ptr ↦[AppendOnlyFile :: "closed"]{1/2} #isClosed ∗ (* other half owned by background thread *)
"#HexpectedData" ∷ (if closeRequested then ∃ expectedData, (fmlist γ.(logdata) (DfracDiscarded) expectedData) else True) ∗
"Hreq_tok" ∷ (if closeRequested && negb isClosed then ghost_var γ.(close_req_tok) 1 () else True) ∗
"HfileEscrow" ∷ (if isClosed then
inv aofN2 (∃ data, crash_borrow (fname f↦ data ∗ P data) (|C={⊤}=> ∃ data', fname f↦ data' ∗ ▷ Pcrash data') ∗
fmlist γ.(logdata) DfracDiscarded data ∨
ghost_var γ.(close_tok) 1 ()
)
else
True)
.
(* FIXME: the membuf fupd will need to be run while is_aof_ctx_inv is open, so
it can't use aofN. Its mask needs adjustment. *)
Definition aof_mu_invariant (aof_ptr:loc) mu γ fname P Pcrash : iProp Σ :=
∃ membuf_sl membufC predurableC (durlen:u64) (durCond_ptr:loc),
let memlen := length (predurableC ++ membufC) in
"#Hcrash_wand" ∷ □ (∀ data, ▷ P data ={⊤}=∗ ▷ Pcrash data) ∗
"Hmembuf" ∷ aof_ptr ↦[AppendOnlyFile :: "membuf"] (slice_val membuf_sl) ∗
"HdurableLength" ∷ aof_ptr ↦[AppendOnlyFile :: "durableLength"]{1/2} #durlen ∗
"Hlength" ∷ aof_ptr ↦[AppendOnlyFile :: "length"] #(U64 memlen) ∗
"Hmembuf_sl" ∷ typed_slice.is_slice membuf_sl byteT 1 membufC ∗
"Hpredurable" ∷ fmlist γ.(predurabledata) (DfracOwn (1/2)) (γ.(initdata) ++ predurableC) ∗
"Hlogdata" ∷ fmlist γ.(logdata) (DfracOwn (1/2)) (γ.(initdata) ++ predurableC ++ membufC) ∗
"%Hlengthsafe" ∷ ⌜list_safe_size (predurableC ++ membufC)⌝ ∗
"Hlen_toks" ∷ ([∗ set] x ∈ (fin_to_set u64), x ⤳[γ.(len_toks)] () ∨ ⌜int.nat x ≤ memlen⌝) ∗
"Hmembuf_fupd" ∷ (P (γ.(initdata) ++ predurableC) -∗ fmlist_lb γ.(durabledata) (γ.(initdata) ++ predurableC ++ membufC)
={⊤∖↑aofN}=∗ P (γ.(initdata) ++ predurableC ++ membufC)
∗ (own γ.(len) (●MN{#1/2} (length predurableC)) ={⊤}=∗ own γ.(len) (●MN{#1/2} memlen)
)
) ∗
"#Hdurlen_lb" ∷ aof_length_lb γ durlen ∗
"Hclose" ∷ aof_close_resources aof_ptr γ P Pcrash fname ∗
"HdurableCond" ∷ aof_ptr ↦[AppendOnlyFile :: "durableCond"] #durCond_ptr ∗
"#HdurCond" ∷ is_cond durCond_ptr mu
.
Definition is_aof aof_ptr γ fname (P : (list u8) → iProp Σ) Pcrash : iProp Σ :=
∃ mu_ptr (lenCond_ptr cloCond_ptr:loc),
"#Hmu" ∷ readonly (aof_ptr ↦[AppendOnlyFile :: "mu"] mu_ptr) ∗
"#HlengthCond" ∷ readonly (aof_ptr ↦[AppendOnlyFile :: "lengthCond"] #lenCond_ptr) ∗
"#HclosedCond" ∷ readonly (aof_ptr ↦[AppendOnlyFile :: "closedCond"] #cloCond_ptr) ∗
"#HlenCond" ∷ is_cond lenCond_ptr mu_ptr ∗
"#HcloCond" ∷ is_cond cloCond_ptr mu_ptr ∗
"#Hmu_inv" ∷ is_lock aofNlk mu_ptr (aof_mu_invariant aof_ptr mu_ptr γ fname P Pcrash) ∗
"#Haof_len_inv" ∷ inv aof_lenN (aof_len_invariant γ) ∗
"#Hctx_inv" ∷ is_aof_ctx_inv γ P
.
Lemma ctx_inv_crash γ P data :
is_aof_ctx_inv γ P -∗
ghost_var γ.(curdata) (1 / 2) data -∗ ghost_var γ.(crashtok) 1 () -∗
|C={⊤}=> ▷ P data.
Proof.
iIntros "#Hinv Hcurdata Hcrashtok".
iInv "Hinv" as "Hctx" "Hctx_close".
iDestruct "Hctx" as "[[_ >Hbad]|Hctx]".
{ iDestruct (ghost_var_valid_2 with "Hcrashtok Hbad") as %Hbad.
exfalso.
naive_solver. }
iDestruct "Hctx" as (?) "Hctx".
iDestruct "Hctx" as "[[_ [>Hbad _]]|Hctx]".
{ iDestruct (ghost_var_valid_2 with "Hcurdata Hbad") as %Hbad.
exfalso.
naive_solver. }
iDestruct "Hctx" as "(_ & _ & >Hcurdata2 & Hctx)".
iDestruct (ghost_var_agree with "Hcurdata Hcurdata2") as %->.
iIntros "#Hc".
iMod ("Hctx_close" with "[Hcrashtok]").
{
iLeft. iFrame "∗#".
}
iFrame. done.
Qed.
Lemma ctx_inv_update γ P data data' Q :
prefix data data' →
is_aof_ctx_inv γ P -∗
£ 1 -∗
fmlist_lb γ.(logdata) data' -∗
(P data -∗ fmlist_lb γ.(durabledata) data' ={⊤∖↑aofN}=∗ P data' ∗ Q) -∗
fmlist γ.(durabledata) (DfracOwn (1/2)) data -∗
ghost_var γ.(curdata) (1/2) data -∗
ghost_var γ.(crashtok) 1 () ={⊤}=∗
ghost_var γ.(curdata) (1 / 2) data' ∗ ghost_var γ.(crashtok) 1 () ∗
fmlist γ.(durabledata) (DfracOwn (1/2)) data' ∗ Q
.
Proof.
intros.
iIntros "#Hinv Hlc #Hloglb Hupd Hdurdata Hcurdata Hcrashtok".
iInv "Hinv" as "Hctx" "Hctx_close".
iMod (lc_fupd_elim_later with "Hlc Hctx") as "Hctx".
iDestruct "Hctx" as "[[_ Hbad]|Hctx]".
{ iDestruct (ghost_var_valid_2 with "Hcrashtok Hbad") as %Hbad.
exfalso.
naive_solver. }
iDestruct "Hctx" as (?) "Hctx".
iDestruct "Hctx" as "[[_ [Hbad _]]|Hctx]".
{ iDestruct (ghost_var_valid_2 with "Hcurdata Hbad") as %Hbad.
exfalso.
naive_solver. }
iDestruct "Hctx" as "(_ & Hdurdata2 & Hcurdata2 & Hctx)".
iDestruct (ghost_var_agree with "Hcurdata Hcurdata2") as %<-.
iCombine "Hdurdata Hdurdata2" as "Hdurdata".
iMod (fmlist_update with "Hdurdata") as "[[Hdurdata Hdurdata2] Hdurdata_lb]".
{ done. }
iMod (fmlist_get_lb with "Hdurdata") as "[Hdurdata #Hlog_lb]".
iApply (fupd_mask_weaken (⊤ ∖ ↑aofN)); first by solve_ndisj.
iIntros "Hfupd_clo".
iMod ("Hupd" with "Hctx Hlog_lb") as "(Hctx & HQ)".
iMod (ghost_var_update_2 with "Hcurdata Hcurdata2") as "[Hcurdata Hcurdata2]".
{ by rewrite Qp.half_half. }
iMod "Hfupd_clo".
iMod ("Hctx_close" with "[Hctx Hcurdata2 Hdurdata2]").
{
iRight. iExists _.
iRight. iFrame "∗#".
}
iFrame.
done.
Qed.
Lemma ctx_inv_close γ P data :
is_aof_ctx_inv γ P -∗
£ 1 -∗
fmlist γ.(logdata) DfracDiscarded data -∗
ghost_var γ.(curdata) (1 / 2) data -∗
ghost_var γ.(close_req_tok) 1 () -∗
ghost_var γ.(crashtok) 1 ()
={⊤}=∗
P data ∗ ghost_var γ.(crashtok) 1 ()
.
Proof.
iIntros "#Hinv Hlc Hclosed Hcurdata Hclose_req Hcrashtok".
iInv "Hinv" as "Hctx" "Hctx_close".
iMod (lc_fupd_elim_later with "Hlc Hctx") as "Hctx".
iDestruct "Hctx" as "[[_ Hbad]|Hctx]".
{ iDestruct (ghost_var_valid_2 with "Hcrashtok Hbad") as %Hbad.
exfalso.
naive_solver. }
iDestruct "Hctx" as (?) "Hctx".
iDestruct "Hctx" as "[[_ [Hbad _]]|Hctx]".
{ iDestruct (ghost_var_valid_2 with "Hcurdata Hbad") as %Hbad.
exfalso.
naive_solver. }
iDestruct "Hctx" as "(_ & _ & Hcurdata2 & Hctx)".
iDestruct (ghost_var_agree with "Hcurdata Hcurdata2") as %->.
iCombine "Hcurdata Hcurdata2" as "Hcurdata".
iFrame.
iMod ("Hctx_close" with "[-]").
{
iNext.
iRight.
iExists _; iLeft.
iFrame.
}
done.
Qed.
Lemma wp_CreateAppendOnlyFile (fname:string) data P Pcrash :
□(∀ data, ▷ P data ={⊤}=∗ ▷ Pcrash data) -∗
{{{
crash_borrow (fname f↦ data ∗ P data)
(|C={⊤}=> ∃ data', fname f↦ data' ∗ ▷ Pcrash data')
}}}
CreateAppendOnlyFile #(str fname)
{{{
aof_ptr γ, RET #aof_ptr; is_aof aof_ptr γ fname P Pcrash ∗ aof_log_own γ data ∗ aof_durable_lb γ data
}}}.
Proof.
iIntros "#Hcrash_wand !#" (Φ) "Hpre HΦ".
wp_lam.
wp_apply (wp_allocStruct).
{ Transparent slice.T. repeat econstructor. Opaque slice.T. }
iIntros (l) "Hl".
iDestruct (struct_fields_split with "Hl") as "Hl".
iNamed "Hl".
wp_pures.
wp_apply (wp_new_free_lock).
iIntros (mu) "Hmu_free".
wp_pures.
wp_storeField.
wp_loadField.
wp_apply (wp_newCond' with "Hmu_free").
iIntros (lengthCond) "[Hmu_free #HlenCond]".
wp_storeField.
wp_loadField.
wp_apply (wp_newCond' with "Hmu_free").
iIntros (durableCond) "[Hmu_free #HdurCond]".
wp_storeField.
wp_loadField.
wp_apply (wp_newCond' with "Hmu_free").
iIntros (closedCond) "[Hmu_free #HcloCond]".
(* Allocate ghost state and invariants *)
iMod (fmlist_alloc data) as (γlogdata) "[Hlogdata Hlogdata2]".
iMod (fmlist_get_lb with "Hlogdata2") as "[Hlogdata2 #Hlog_lb]".
iMod (fmlist_alloc data) as (γpredurabledata) "[Hpredurable Hpredurable2]".
iMod (fmlist_alloc data) as (γdurabledata) "[Hdurabledata Hdurabledata2]".
iMod (fmlist_get_lb with "Hdurabledata") as "[Hdurabledata #Hdurable_lb]".
iMod (ghost_map_alloc_fin ()) as (γlen_toks) "Hlen_toks".
iMod (own_alloc (●MN 0)) as (γlen) "[Hlen Hlen2]".
{ apply mono_nat_auth_valid. }
iDestruct (own_mono _ _ (◯MN 0) with "Hlen2") as "#Hdurlen_lb".
{ apply mono_nat_included. }
iDestruct "durableLength" as "[HdurableLength HdurableLength2]".
iMod (ghost_var_alloc ()) as (γclose_tok) "Hclose_tok".
iMod (ghost_var_alloc ()) as (γclose_req_tok) "Hclose_req_tok".
iMod (ghost_var_alloc data) as (γcurdata) "[Hcurdata Hcurdata2]".
iMod (ghost_var_alloc ()) as (γcrashtok) "Hcrashtok".
set (γ:=mk_aof_vol_names γlogdata γpredurabledata γdurabledata γlen γlen_toks γclose_tok γclose_req_tok γcurdata γcrashtok data).
iDestruct (crash_borrow_wpc_nval _ _ _ (fname f↦data ∗ ghost_var γcurdata (1/2) data ∗ ghost_var γ.(crashtok) 1 ())
(is_aof_ctx_inv γ P) with "Hpre [Hcurdata2 Hcurdata Hcrashtok Hdurabledata2]") as "Hnval_pre".
{
iNext.
iIntros "[Hf HP]".
iMod (inv_alloc with "[HP Hcurdata2 Hdurabledata2]") as "#Hctx_inv"; last iFrame "Hctx_inv".
{
iNext. iRight.
iExists data.
iRight.
iFrame "∗#".
}
iFrame.
instantiate (1:=⊤).
iModIntro.
iModIntro.
iIntros "(Hf & Hcurdata & Htok)".
iMod (ctx_inv_crash with "Hctx_inv Hcurdata Htok") as "HP".
iMod ("Hcrash_wand" with "HP") as "HP".
iModIntro.
iExists _; iFrame.
}
wp_bind (struct.storeF _ _ _ _).
iApply (wpc_wp _ _ _ _ True).
wpc_apply (wpc_nval_elim with "Hnval_pre").
{ done. }
{ done. }
iApply wp_wpc.
wp_storeField.
iIntros "[#Hctx_inv Hpre]".
iDestruct "closed" as "(closed1&closed2)".
iAssert ((|={⊤}=> is_aof l γ fname P Pcrash ∗
fmlist γ.(predurabledata) (DfracOwn (1/2)) γ.(initdata) ∗
fmlist γ.(durabledata) (DfracOwn (1/2)) γ.(initdata) ∗
l ↦[AppendOnlyFile :: "durableLength"]{1/2} #0 ∗
own γ.(len) (●MN{#1/2} 0) ∗
aof_log_own γ γ.(initdata) ∗
⌜data = γ.(initdata)⌝
)
)%I with "[-Hpre closed2 HΦ]" as ">HH".
{
iAssert (
([∗ set] x ∈ fin_to_set u64,
(x ⤳[γlen_toks] () ∨ ⌜int.nat x ≤ 0%nat⌝) ∗
(x ⤳[γlen_toks] () ∨ ⌜int.nat x > 0%nat⌝))
)%I with "[Hlen_toks]" as "HH".
{
iApply (big_sepS_impl with "Hlen_toks").
iModIntro.
iIntros.
destruct (decide (int.nat x <= 0%nat)).
{
iSplitR; iFrame.
iRight. iPureIntro. done.
}
assert (int.nat x > 0%nat) by word.
iSplitL; iFrame.
iRight. iPureIntro. done.
}
iDestruct (big_sepS_sep with "HH") as "[Hlen_toks Hlen_toks2]".
iMod (inv_alloc aof_lenN _ (aof_len_invariant γ) with "[Hlen2 Hlen_toks2]") as "#Hinv".
{
iNext.
iExists (U64 0).
iFrame.
}
iMod (readonly_alloc_1 with "mu") as "#Hmu".
iMod (readonly_alloc_1 with "lengthCond") as "#HlengthCond".
iMod (readonly_alloc_1 with "closedCond") as "#HclosedCond".
iMod (alloc_lock _ _ _ (aof_mu_invariant l (#mu) γ fname P Pcrash) with "Hmu_free [-Hlogdata2 HdurableLength2 Hpredurable2 Hdurabledata Hlen Hclose_tok Hclose_req_tok]") as "#HmuInv".
{
iNext.
iExists (Slice.nil), [], [], (U64 0), _.
iDestruct is_slice_zero as "$".
simpl.
rewrite app_nil_r.
iFrame "∗#".
iSplitR; first done.
iSplitR.
{ by iIntros "$ _ !> $ !>". }
iExists false, false.
iFrame.
done.
}
iModIntro.
iFrame.
iSplitL; last done.
repeat iExists _.
iFrame "#".
}
iDestruct "HH" as "(#His_aof & Hpredur & Hdur & HdurLen & Hlen & Hlog_own & %Hre)".
rewrite Hre.
replace (data) with (γ.(initdata)) by done.
clear Hre.
rename data into initial_data.
wp_apply (wp_fork with "[-HΦ Hlog_own]").
{
iNext.
iEval (simpl) in "closed2".
iClear "HlenCond HdurCond HcloCond Hcrash_wand Hctx_inv".
iNamed "His_aof".
wp_loadField.
wp_apply (acquire_spec with "Hmu_inv").
iIntros "[Hlocked Haof_own]".
wp_pures.
iAssert (∃ data',
crash_borrow
(fname f↦(γ.(initdata) ++ data') ∗ ghost_var γcurdata (1 / 2) (γ.(initdata) ++ data') ∗
ghost_var γ.(crashtok) 1 ())
(|C={⊤}=> ∃ data' : list u8, fname f↦data' ∗ ▷ Pcrash data') ∗
fmlist γ.(predurabledata) (DfracOwn (1/2)) (γ.(initdata) ++ data') ∗
fmlist γ.(durabledata) (DfracOwn (1/2)) (γ.(initdata) ++ data') ∗
l ↦[AppendOnlyFile :: "durableLength"]{1 / 2} #(U64 (length data')) ∗
own γ.(len) (●MN{#1/2} (length (data')))
)%I with "[Hpre Hdur Hpredur HdurLen Hlen]" as "Hfile_ctx".
{ iExists []; iFrame. rewrite app_nil_r. iFrame. }
wp_forBreak.
wp_pure1_credit "Hlc".
iClear "Hdurlen_lb".
iNamed "Haof_own".
wp_loadField.
wp_apply (wp_slice_len).
wp_pure1_credit "Hlc2".
iNamed "Hclose".
wp_apply (wp_and with "[HcloseRequested]").
{ iNamedAccu. }
{
wp_pures.
done.
}
{
iIntros.
wp_loadField.
wp_pures.
iFrame.
instantiate (2:=(closeRequested = false)).
instantiate (1:=decide_rel eq _ _).
iPureIntro.
f_equal.
destruct closeRequested.
{ rewrite bool_decide_false; done. }
{ rewrite bool_decide_true; done. }
}
iNamed 1.
wp_if_destruct.
{
wp_loadField.
wp_apply (wp_condWait with "[- closed2 Hfile_ctx]").
{ iFrame "#∗". repeat iExists _; iFrame "∗#". iSplitR; first done.
iExists _, _; iFrame "∗#".
}
iIntros "[Hlocked Haof_own]".
wp_pures.
iLeft.
iFrame.
done.
}
wp_loadField.
(* Prepare ghost state for writing to file; whether we close the file or
not, we're going to do the FileAppend *)
iDestruct "Hfile_ctx" as (data') "(Hfile_ctx & Hpredur & Hdur & HdurLen & Hlen)".
iDestruct (fmlist_agree_1 with "Hpredur Hpredurable") as %Hpredur.
apply app_inv_head in Hpredur.
rewrite Hpredur.
iCombine "Hpredur Hpredurable" as "Hpredur".
iMod (fmlist_update (γ.(initdata) ++ predurableC ++ membufC) with "Hpredur") as "[Hpredur _]".
{ rewrite app_assoc. apply prefix_app_r. done. }
iDestruct "Hpredur" as "[Hpredur Hpredurable]".
iClear "Hlog_lb".
iMod (fmlist_get_lb with "Hlogdata") as "[Hlogdata #Hlog_lb]".
wp_if_destruct.
{ (* case: close aof; write the remaining stuff first, using the same
arugment as when we write with the lock *)
wp_loadField.
wp_bind (FileAppend _ _).
iApply (wpc_wp _ _ _ _ True).
wpc_apply (wpc_crash_borrow_open_modify with "Hfile_ctx").
{ done. }
iSplit; first done.
iIntros "(Hfile & Hcurdata & Hcrashtok)".
iApply wpc_fupd.
iDestruct (is_slice_to_small with "Hmembuf_sl") as "Hmembuf_sl".
wpc_apply (wpc_FileAppend with "[$Hfile $Hmembuf_sl]").
iSplit.
{ (* This is the case in which the node crashes during the FileAppend. *)
iIntros "[Hbefore|Hafter]".
{
iSplitR; first done.
iMod (ctx_inv_crash with "Hctx_inv Hcurdata Hcrashtok") as "HP".
iMod ("Hcrash_wand" with "HP") as "HP".
iModIntro.
iExists _. iFrame.
}
{
iSplitR; first done.
repeat rewrite -app_assoc.
iMod (ctx_inv_update with "Hctx_inv Hlc Hlog_lb [Hmembuf_fupd] Hdur Hcurdata Hcrashtok") as "(Hcurdata & Hcrashtok & _)".
{
rewrite app_assoc.
apply prefix_app_r.
done.
}
{
instantiate (1:=True%I).
iIntros "HP Hlb".
iMod ("Hmembuf_fupd" with "HP Hlb") as "[$ _]".
done.
}
iMod (ctx_inv_crash with "Hctx_inv Hcurdata Hcrashtok") as "HP".
iMod ("Hcrash_wand" with "HP") as "HP".
iModIntro.
iExists _. iFrame.
}
}
iNext.
iIntros "[Hfile _]".
(* commit remaining operations before we close the file. *)
iMod (ctx_inv_update with "Hctx_inv Hlc Hlog_lb [Hmembuf_fupd] Hdur Hcurdata Hcrashtok") as "(Hcurdata & Hcrashtok & Hdur & Hlen_fupd)".
{ rewrite app_assoc. apply prefix_app_r. done. }
{
iIntros "HP Hlb".
iMod ("Hmembuf_fupd" with "HP Hlb") as "[$ Hlen_fupd]".
iModIntro.
iExact "Hlen_fupd".
}
iMod ("Hlen_fupd" with "Hlen") as "Hlen".
iEval (rewrite mono_nat_auth_lb_op) in "Hlen".
iDestruct "Hlen" as "[Hlen #Hlenlb]".
(* Going to return the crash borrow to the user, so get it back into the
form they want. *)
iNamed "Hclose".
iDestruct "HexpectedData" as (?) "HexpectedData".
iDestruct (fmlist_agree_1 with "HexpectedData Hlogdata") as %->.
iDestruct (struct_field_mapsto_agree with "Hclosed closed2") as %Heq.
inversion Heq.
iMod (ctx_inv_close with "Hctx_inv Hlc2 HexpectedData Hcurdata Hreq_tok Hcrashtok") as "[HP Hcrashtok]".
rewrite -app_assoc.
iModIntro.
iExists _.
iSplitL "Hfile HP".
{ iAccu. }
iSplit.
{
iModIntro. iIntros "(Hfile & HP)".
iMod ("Hcrash_wand" with "HP") as "HP".
iModIntro.
iExists _. iFrame.
}
iIntros "Hfile_ctx".
iSplit; first done.
wp_pures.
wp_apply (wp_NewSlice).
iIntros (empty_membuf_sl) "Hmembuf_empty".
wp_storeField.
wp_loadField.
iDestruct (struct_field_mapsto_agree with "HdurLen HdurableLength") as %Heq'.
rewrite Heq'.
iCombine "HdurLen HdurableLength" as "HdurLen".
wp_storeField.
wp_loadField.
wp_apply (wp_condBroadcast).
{ iFrame "#". }
wp_pures.
iCombine "closed2 Hclosed" as "Hclosed".
wp_storeField.
iDestruct "Hclosed" as "(closed2&Hclosed)".
wp_loadField.
wp_apply (wp_condBroadcast).
{ iFrame "#". }
wp_pures.
wp_loadField.
iMod (inv_alloc with "[Hfile_ctx]") as "#HfileEscrow2".
{
instantiate (1:=(∃ data : list u8,
crash_borrow (fname f↦data ∗ P data)
(|C={⊤}=> ∃ data'0 : list u8, fname f↦data'0 ∗ ▷ Pcrash data'0) ∗
fmlist γ.(logdata) DfracDiscarded data ∨ ghost_var γ.(close_tok) 1 ())%I).
iNext.
iExists _.
iLeft.
iFrame "∗#".
}
wp_apply (release_spec with "[-Hlen closed2]").
{
iFrame "#∗".
iNext.
iDestruct "HdurLen" as "[HdurableLength HdurLen]".
iExists _, [], _, _, _.
rewrite app_nil_r.
iFrame "∗#%".
iSplitR.
{ by iIntros "$ _ !> $ !>". }
unfold aof_length_lb.
rewrite Hlengthsafe.
iFrame "#".
iExists _, _.
iFrame "Hclosed ∗".
iFrame "#".
iExists _; iFrame "#".
}
wp_pures.
iRight.
done.
}
(* case: don't want to close the file, do a FileAppend after letting go of
mutex and loop *)
wp_loadField.
wp_pures.
wp_loadField.
wp_pures.
wp_apply (wp_new_slice).
{ done. }
iIntros (empty_membuf_sl) "Hmembuf_empty".
wp_apply (wp_storeField with "Hmembuf").
{ unfold AppendOnlyFile. unfold field_ty. simpl. apply slice_val_ty. }
iIntros "Hmembuf".
wp_pures.
wp_loadField.
wp_pures.
wp_loadField.
wp_apply (wp_newCond with "Hmu_inv").
iIntros (newcond) "#Hnewcond".
wp_storeField.
wp_loadField.
wp_apply (release_spec with "[-Hfile_ctx Hpredur Hdur Hmembuf_fupd Hmembuf_sl HdurLen Hlen closed2]").
{ iFrame "#∗". iNext. iExists _, [], (predurableC ++ membufC),_, _. iFrame "∗#".
rewrite app_nil_r.
iFrame.
iSplitL ""; first done.
iSplitR.
{ iIntros "$ _ !> $ !> //". }
iExists _, _; iFrame "∗#".
}
wp_pure1_credit "Hlc".
wp_pures.
iDestruct (typed_slice.is_slice_sz with "Hmembuf_sl") as %Hsz.
iDestruct (is_slice_to_small with "Hmembuf_sl") as "Hmembuf_sl".
wp_bind (FileAppend _ _).
iApply (wpc_wp _ _ _ _ True).
wpc_apply (wpc_crash_borrow_open_modify with "Hfile_ctx").
{ done. }
iSplit; first done.
iIntros "(Hfile & Hcurdata & Hcrashtok)".
iApply wpc_fupd.
wpc_apply (wpc_FileAppend with "[$Hfile $Hmembuf_sl]").
iSplit.
{ (* This is the case in which the node crashes during the FileAppend. *)
iIntros "[Hbefore|Hafter]".
{
iSplitR; first done.
iMod (ctx_inv_crash with "Hctx_inv Hcurdata Hcrashtok") as "HP".
iMod ("Hcrash_wand" with "HP") as "HP".
iModIntro.
iExists _. iFrame.
}
{
iSplitR; first done.
repeat rewrite -app_assoc.
iMod (ctx_inv_update with "Hctx_inv Hlc Hlog_lb [Hmembuf_fupd] Hdur Hcurdata Hcrashtok") as "(Hcurdata & Hcrashtok & _)".
{
rewrite app_assoc.
apply prefix_app_r.
done.
}
{
instantiate (1:=True%I).
iIntros "HP Hlb".
iMod ("Hmembuf_fupd" with "HP Hlb") as "[$ _]".
done.
}
iMod (ctx_inv_crash with "Hctx_inv Hcurdata Hcrashtok") as "HP".
iMod ("Hcrash_wand" with "HP") as "HP".
iModIntro.
iExists _. iFrame.
}
}
iNext.
iIntros "[Hfile _]".
(* commit remaining operations before we close the file. *)
iMod (ctx_inv_update with "Hctx_inv Hlc Hlog_lb [Hmembuf_fupd] Hdur Hcurdata Hcrashtok") as "(Hcurdata & Hcrashtok & Hdur & Hlen_fupd)".
{ rewrite app_assoc. apply prefix_app_r. done. }
{
iIntros "HP Hlb".
iMod ("Hmembuf_fupd" with "HP Hlb") as "[$ Hlen_fupd]".
iModIntro.
iExact "Hlen_fupd".
}
iMod ("Hlen_fupd" with "Hlen") as "Hlen".
iEval (rewrite mono_nat_auth_lb_op) in "Hlen".
iDestruct "Hlen" as "[Hlen #Hlenlb]".
iModIntro.
iExists _.
iSplitL "Hfile Hcurdata Hcrashtok".
{ iNamedAccu. }
iSplit.
{
iModIntro. iNamed 1.
iMod (ctx_inv_crash with "Hctx_inv Hcurdata Hcrashtok") as "HP".
iMod ("Hcrash_wand" with "HP") as "HP".
iModIntro.
iExists _. rewrite -app_assoc.
iFrame.
}
iIntros "Hfile_ctx".
iSplit; first done.
wp_pures.
wp_loadField.
wp_apply (acquire_spec with "Hmu_inv").
iIntros "[Hlocked Haof_own]".
iRename "Hdurlen_lb" into "Hdurlen_lb_old".
iClear "Hcrash_wand".
iRename "HdurCond" into "HoldDurCond".
iNamed "Haof_own".
wp_pures.
iDestruct (struct_field_mapsto_agree with "HdurLen HdurableLength") as %Heq.
rewrite Heq.
iCombine "HdurLen HdurableLength" as "HdurLen".
wp_storeField.
wp_apply (wp_condBroadcast).
{ iFrame "#". }
wp_pures.
iLeft.
iFrame.
iSplitL ""; first done.
iDestruct "HdurLen" as "[HdurableLength HdurLen]".
iSplitR "Hpredur Hdur HdurLen Hlen Hfile_ctx".
{
repeat iExists _; iFrame "∗#".
unfold aof_length_lb.
iSplitL ""; first done.
unfold aof_length_lb.
rewrite Hlengthsafe.
iFrame "#".
done.
}
{
rewrite -Hpredur.
repeat rewrite -app_assoc.
iExists _; iFrame.
iModIntro.
done.
}
}
wp_pures.
iApply "HΦ".
iFrame "∗#".
done.
Qed.
Lemma wp_AppendOnlyFile__Append Q aof_ptr γ data_sl P Pcrash fname (oldData newData:list u8) q :
length newData > 0 →
list_safe_size newData →
is_aof aof_ptr γ fname P Pcrash -∗
{{{
typed_slice.is_slice_small data_sl byteT q newData ∗ aof_log_own γ oldData ∗
(P oldData ={⊤∖↑aofN}=∗ P (oldData ++ newData) ∗ Q)
}}}
AppendOnlyFile__Append #aof_ptr (slice_val data_sl)
{{{
(l:u64), RET #l; aof_log_own γ (oldData ++ newData) ∗
(aof_length_lb γ l ={⊤}=∗ ▷ Q ∗ aof_durable_lb γ (oldData ++ newData))
}}}.
Proof.
intros HnewDataLen HnewDataSafe.
iIntros "#Haof" (Φ) "!# Hpre HΦ".
iNamed "Haof".
wp_lam.
wp_pures.
wp_loadField.
wp_apply (acquire_spec with "Hmu_inv").
iIntros "[Hlocked Haof]".
iNamed "Haof".
iDestruct "Hpre" as "(HnewData & Haof_log & Hfupd)".
wp_pures.
wp_loadField.
iDestruct (is_slice_small_sz with "HnewData") as %Hsz.
wp_apply (wp_WriteBytes with "[$Hmembuf_sl $HnewData]").
iIntros (membuf_sl') "[Hmembuf_sl HnewData]".
wp_apply (wp_storeField with "Hmembuf").
{ unfold AppendOnlyFile. unfold field_ty. simpl. apply slice_val_ty. }
iIntros "Hmembuf".
wp_pures.
(* overflow guard *)
wp_apply (wp_slice_len).
wp_loadField.
wp_apply (std_proof.wp_SumAssumeNoOverflow).
iIntros (HnoOverflow).
wp_storeField.
wp_loadField.
wp_pures.
rewrite -HnewDataSafe in Hsz.
assert (U64 (length newData) = data_sl.(Slice.sz)) as HH.
{
apply Z2Nat.inj in Hsz.
{ word_cleanup. naive_solver. }
{ word_cleanup. naive_solver. }
word.
}
rewrite -HH.
rewrite -HH in HnoOverflow.
wp_pures.
wp_loadField.
wp_apply (wp_condSignal).
{ iFrame "#". }
wp_pures.
unfold aof_log_own.
iDestruct "Haof_log" as "[Haof_log Htok]".
iDestruct (fmlist_agree_1 with "Haof_log Hlogdata") as %->.
iCombine "Haof_log Hlogdata" as "Haof_log".
iMod (fmlist_update ((γ.(initdata) ++ predurableC ++ membufC) ++ newData) with "Haof_log") as "[Haof_log _]".
{ apply prefix_app_r. done. }
iDestruct "Haof_log" as "[Hlogdata Haof_log]".
rewrite -app_assoc.
(* Want to prove membuf_fupd, and the postcondition *)
set (membufC' := membufC ++ newData) in *.
iAssert (([∗ set] x ∈ (fin_to_set u64 : gset u64), x ⤳[γ.(len_toks)] () ∨
⌜int.nat x <= length (predurableC ++ membufC)⌝ ∨
⌜length (predurableC ++ membufC') < int.nat x⌝
) ∗
([∗ set] x ∈ (fin_to_set u64 : gset u64), x ⤳[γ.(len_toks)] () ∨
⌜int.nat x ≤ length (predurableC ++ membufC')⌝
))%I
with "[Hlen_toks]"
as "HH".
{
iApply big_sepS_sep.
iApply (big_sepS_impl with "Hlen_toks").
iModIntro.
iIntros (x ?) "Hx".
iDestruct "Hx" as "[Hx|%Hineq]".
{
destruct (bool_decide (length (predurableC ++ membufC') < int.nat x)) as [|] eqn:Hineq.
{
apply bool_decide_eq_true in Hineq.
iSplitR "Hx".
{ iRight; iRight. done. }
iLeft. iFrame.
}
{
apply bool_decide_eq_false in Hineq.
iSplitL "Hx".
{ iFrame. }
iRight.
iPureIntro.
word.
}
}
{
iSplitL.
{
iRight; iLeft. done.
}
iRight. iPureIntro.
replace (membufC') with (membufC ++ newData) by done.
rewrite app_assoc.
rewrite app_length.
word.
}
}
iDestruct "HH" as "[Htoks Hlen_toks]".
assert (int.nat (U64 (length (predurableC ++ membufC'))) = (length (predurableC ++ membufC'))) as Hsafesize'.
{
replace (membufC') with (membufC ++ newData) by done.
rewrite app_assoc.
rewrite app_length.
word_cleanup.
rewrite -Hlengthsafe.
repeat (rewrite Nat2Z.inj_add).
replace (length newData) with (Z.to_nat (Z.of_nat (length newData))) by lia.
rewrite -Z2Nat.inj_add.
{
rewrite Z2Nat.inj_iff.
{
rewrite Z2Nat.id.
{
rewrite wrap_small; first word.
split.
{
apply Z.add_nonneg_nonneg; word_cleanup; naive_solver.
}
{
rewrite Nat2Z.id.
rewrite -HnewDataSafe.
replace (Z.of_nat (int.nat (length newData))) with (int.Z (length newData)); last first.
{ rewrite u64_Z_through_nat. done. }
destruct (bool_decide (int.Z (length (predurableC ++ membufC)) + (int.Z (length newData)) < 2 ^ 64))%Z eqn:Hnov.
{ apply bool_decide_eq_true in Hnov. done. }
{
apply bool_decide_eq_false in Hnov.
word.
}
}
}
naive_solver.
}
{
word_cleanup.
unfold word.wrap.
by apply Z_mod_lt.
}
{
word_cleanup.
apply Z.add_nonneg_nonneg; word_cleanup; naive_solver.
}
}
{ naive_solver. }
{ lia. }
}
iAssert (|={⊤}=> (P (γ.(initdata) ++ predurableC) -∗ fmlist_lb γ.(durabledata) (γ.(initdata) ++ predurableC ++ membufC')
={⊤∖↑aofN}=∗ P (γ.(initdata) ++ predurableC ++ membufC')
∗ (own γ.(len) (●MN{#1/2} (length predurableC))
={⊤}=∗ own γ.(len)
(●MN{#1/2} (length (predurableC ++ membufC'))))
) ∗ (aof_length_lb γ (U64 (length (predurableC ++ membufC'))) ={⊤}=∗ ▷ Q ∗ aof_durable_lb γ (γ.(initdata) ++ predurableC ++ membufC')))%I with "[Hmembuf_fupd Hfupd Htoks]" as "HH".
{
(* allocate invariant to escrow Q *)
iMod (own_alloc (Excl ())) as "HQtok".
{ done. }
iDestruct "HQtok" as (γtok) "Htok".
iMod (own_alloc (Excl ())) as "HQexcl".
{ done. }
iDestruct "HQexcl" as (γq) "HQexcl".
iDestruct (big_sepS_elem_of_acc _ _ (U64 (length (predurableC ++ membufC'))) with "Htoks") as "[Hlen_tok Hlen_toks_rest]".
{ set_solver. }
iDestruct "Hlen_tok" as "[Hlen_tok|%Hbad]"; last first.
{
exfalso.
rewrite Hsafesize' in Hbad.
rewrite app_length in Hbad.
rewrite app_length in Hbad.
rewrite app_length in Hbad.
word.
}
iMod (inv_alloc aofN _ (own γtok (Excl ()) ∗ aof_length_lb γ (U64 (length (predurableC ++ membufC'))) ∨ (U64 (length (predurableC ++ membufC')) ⤳[γ.(len_toks)] ()) ∨ (Q ∗ aof_durable_lb γ (γ.(initdata) ++ predurableC ++ membufC')) ∗ own γq (Excl ())) with "[Hlen_tok]") as "#HQinv".
{
iRight. iLeft.
iFrame.
}
iSplitR "Htok"; last first.
{
iModIntro.
iIntros "Haof_lb".
iInv "HQinv" as "Hq" "Hqclose".
iDestruct "Hq" as "[>[Htok2 _]|Hq]".
{ iDestruct (own_valid_2 with "Htok Htok2") as %Hbad. contradiction. }
iDestruct "Hq" as "[>Hlentok|Hq]".
{
iInv "Haof_len_inv" as ">Ha" "Haclose".
unfold aof_len_invariant.
iDestruct "Ha" as (l) "[Hlen Ha]".
iDestruct (own_valid_2 with "Hlen Haof_lb") as %Hineq.
apply mono_nat_both_dfrac_valid in Hineq as [_ Hineq].
iDestruct (big_sepS_elem_of_acc _ _ (U64 (length (predurableC ++ membufC'))) with "Ha") as "[Ha Harest]".
{ set_solver. }
iDestruct "Ha" as "[Hlentok2|%Hbad]"; last first.
{ exfalso. lia. }
iDestruct (ghost_map_points_to_valid_2 with "Hlentok Hlentok2") as %[Hbad _].
exfalso.
rewrite dfrac_op_own in Hbad.
done.
}
iMod ("Hqclose" with "[Htok Haof_lb]").
{ iLeft. iNext. iFrame. }
iDestruct "Hq" as "[[$ >$] _]".
by iModIntro.
}
iModIntro.
iIntros "Hctx #Hdur_lb".
iMod ("Hmembuf_fupd" with "Hctx [Hdur_lb]") as "[Hctx Hmembuf_fupd]".
{
iApply (fmlist_lb_mono with "Hdur_lb").
repeat rewrite app_assoc.
apply prefix_app_r.
done.
}
rewrite -app_assoc.
iMod ("Hfupd" with "Hctx") as "[$ HQ]".
iModIntro.
(* length stuff *)
iIntros "Hlen".
iInv "HQinv" as "Hq" "Hqclose".
iDestruct "Hq" as "[[_ >Hlb]|Hq]".
{
iDestruct (own_valid_2 with "Hlen Hlb") as %Hbad.
exfalso.
apply mono_nat_both_dfrac_valid in Hbad as [_ Hbad].
rewrite Hsafesize' in Hbad.
rewrite app_length in Hbad.
rewrite app_length in Hbad.
lia.
}
iDestruct "Hq" as "[>Hlen_tok|[_ >HQexcl2]]"; last first.
{ iDestruct (own_valid_2 with "HQexcl HQexcl2") as %Hbad. contradiction. }
iDestruct ("Hlen_toks_rest" with "[$Hlen_tok]") as "Hlen_toks".
iMod ("Hqclose" with "[HQexcl HQ]") as "_".
{ iRight; iRight; iFrame. iFrame "∗#". }
iMod ("Hmembuf_fupd" with "Hlen") as "Hlen".
(* Use tokens to update mono_nat counter *)
iInv "Haof_len_inv" as ">Ha" "Haclose".
iDestruct "Ha" as (len) "[Hlen2 Ha]".
iDestruct (own_valid_2 with "Hlen Hlen2") as %Hleneq.
apply mono_nat_auth_dfrac_op_valid in Hleneq as [_ <-].
iCombine "Hlen Hlen2" as "Hlen".
iMod (own_update _ _ (mono_nat_auth _ (length (predurableC ++ membufC'))) with "Hlen") as "Hlen".
{
apply mono_nat_update.
repeat rewrite app_length.
lia.
}
iDestruct "Hlen" as "[Hlen Hlen2]".
iMod ("Haclose" with "[Ha Hlen_toks Hlen2]") as "_".
{
iNext. iExists _. rewrite -Hsafesize'.
iFrame.
iApply (big_sepS_impl with "[Ha Hlen_toks]").
{ iApply big_sepS_sep. iFrame. }
iModIntro.
iIntros (x ?) "Hx".
destruct (bool_decide (int.nat (length (predurableC ++ membufC')) < int.nat x)) as [|] eqn:Hineq.
{
apply bool_decide_eq_true in Hineq.
iRight.
iPureIntro.
word.
}
{
apply bool_decide_eq_false in Hineq.
iLeft.
iDestruct "Hx" as "[[$|%Hbad] [$|%Hineq2]]".
exfalso.
word.
}
}
iFrame.
by iModIntro.
}
iMod "HH" as "[Hmembuf_fupd HfupdQ]".
wp_loadField.
wp_apply (release_spec with "[-HΦ Haof_log HfupdQ Htok]").
{
iFrame "#∗".
iNext.
repeat iExists _.
rewrite -app_assoc.
iFrame "∗#".
replace (word.add (length (predurableC ++ membufC)) (length newData)) with
(U64 (length (predurableC ++ membufC'))); last first.
{
repeat rewrite app_length.
rewrite -word.ring_morph_add.
word_cleanup.
repeat (rewrite Nat2Z.inj_add).
rewrite Z.add_assoc.
done.
}
iFrame.
done.
}
wp_pures.
iApply "HΦ".
iFrame.
iModIntro.
iIntros "#Hlb".
rewrite -app_assoc.
iMod ("HfupdQ" with "[Hlb]") as "$"; last by iModIntro.
replace (U64 (length (predurableC ++ membufC'))) with
(word.add (length (predurableC ++ membufC)) (length newData)).
{ iFrame "#". }
repeat rewrite app_length.
repeat (rewrite Nat2Z.inj_add).
rewrite Z.add_assoc.
rewrite -word.ring_morph_add.
unfold U64.
done.
Qed.
Lemma wp_AppendOnlyFile__WaitAppend aof_ptr γ (l:u64) fname P Pcrash :
is_aof aof_ptr γ fname P Pcrash -∗
{{{
True
}}}
AppendOnlyFile__WaitAppend #aof_ptr #l
{{{
RET #(); aof_length_lb γ l
}}}.
Proof.
iIntros "#Haof" (Φ) "!# _ HΦ".
wp_lam.
wp_pures.
iNamed "Haof".
wp_loadField.
wp_apply (acquire_spec with "Hmu_inv").
iIntros "[Hlocked Haof_own]".
wp_pures.
wp_apply (wp_forBreak_cond' with "[-]").
{
iNamedAccu.
}
iModIntro.
iNamed 1.
wp_pures.
iNamed "Haof_own".
wp_loadField.
wp_pures.
wp_if_destruct.
{
wp_pures.
wp_loadField.
wp_apply (wp_condWait with "[- HΦ]").
{
iFrame "#∗".
repeat iExists _. iFrame "#∗".
done.
}
iIntros "[Hlocked Haof_own]".
wp_pures.
iLeft.
iFrame.
done.
}
iSpecialize ("HΦ" with "[Hdurlen_lb]").
{
assert (int.nat l ≤ int.nat durlen) as Hineq.
{
word.
}
unfold aof_length_lb.
replace (int.nat durlen)%nat with ((int.nat durlen) `max` int.nat l)%nat by word.
rewrite mono_nat_lb_op.
iDestruct "Hdurlen_lb" as "[_ $]".
}
iRight.
iSplitL ""; first done.
iModIntro.
wp_pures.
wp_loadField.
wp_apply (release_spec with "[- HΦ]").
{
iFrame "#∗".
repeat iExists _; iFrame "#∗".
done.
}
wp_pures.
iApply "HΦ".
Qed.
Lemma wp_AppendOnlyFile__Close aof_ptr γ P Pcrash fname data :
is_aof aof_ptr γ fname P Pcrash -∗
{{{
aof_log_own γ data
}}}
AppendOnlyFile__Close #aof_ptr
{{{
RET #(); crash_borrow (fname f↦ data ∗ P data) (|C={⊤}=> ∃ data', fname f↦ data' ∗ ▷ Pcrash data')
}}}.
Proof.
iIntros "#Haof" (Φ) "!# Haof_log HΦ".
wp_lam.
wp_pures.
iNamed "Haof".
wp_loadField.
wp_apply (acquire_spec with "Hmu_inv").
iIntros "[Hlocked Haof_own]".
wp_pures.
iNamed "Haof_own".
iNamed "Hclose".
wp_storeField.
wp_loadField.
wp_apply (wp_condSignal).
{ iFrame "#". }
wp_pures.
iDestruct "Haof_log" as "[Haof_log [Hreq_tok Htok]]".
iMod (fmlist_freeze with "Haof_log") as "#Hexpected".
iAssert (aof_mu_invariant aof_ptr mu_ptr γ fname P Pcrash) with "[-Htok HΦ Hlocked]" as "Haof_own".
{
repeat iExists _; iFrame "∗#%".
iExists _, _.
iFrame "HcloseRequested ∗%".
iDestruct "Hclose" as "[_ [_ $]]".
iSplit.
{ iExists _; iFrame "#". }
destruct isClosed; iEval simpl; auto; by iFrame.
}
wp_forBreak_cond.
iClear "Hdurlen_lb Hcrash_wand HdurCond".
iNamed "Haof_own".
iNamed "Hclose".
wp_loadField.
wp_if_destruct.
{ (* aof not closed yet, keep looping *)
wp_pures.
wp_loadField.
wp_apply (wp_condWait with "[-Htok HΦ]").
{
iFrame "#∗".
repeat iExists _; iFrame "∗#%".
iExists _, _.
iFrame "∗#∗".
}
iIntros "[Hlocked Haof_own]".
wp_pures.
iLeft.
iSplitR; first done.
iModIntro.
iFrame.
}
(* aof closed *)
iRight.
iModIntro.
iSplitR; first done.
iDestruct "Hclose" as "[HcloseRest [Hreq #H]]".
iNamed "H".
wp_pure1_credit "Hlc".
iApply fupd_wp.
iInv "HfileEscrow" as "Hi" "Hiclose".
iMod (lc_fupd_elim_later with "Hlc Hi") as "Hi".
iDestruct "Hi" as (?) "[[Hf #Hdata]|Hbad]"; last first.
{
iDestruct (ghost_var_valid_2 with "Htok Hbad") as %Hbad.
exfalso. naive_solver.
}
iMod ("Hiclose" with "[$Htok]").
{ done. }
iModIntro.
iDestruct (fmlist_agree_1 with "Hexpected Hdata") as %->.
wp_pures.
wp_loadField.
wp_apply (release_spec with "[-HΦ Hf]").
{
iFrame "#∗".
iNext.
repeat iExists _; iFrame "∗#%".
iExists _, _. iFrame "∗#".
simpl. iFrame.
}
wp_pures.
iModIntro.
iApply "HΦ".
iFrame.
Qed.
Lemma accessP aof_ptr fname γ P Pcrash data durablePrefix :
is_aof aof_ptr γ fname P Pcrash -∗
aof_durable_lb γ durablePrefix -∗
aof_log_own γ data -∗
(|NC={⊤,⊤∖↑aofN}=> ∃ durableData, ⌜prefix durableData data⌝ ∗
⌜prefix durablePrefix durableData⌝ ∗
▷ P durableData ∗ (▷ P durableData -∗ |NC={⊤∖↑aofN,⊤}=> aof_log_own γ data)
)
.
Proof.
iIntros "#Hinv #Hlb Hlog".
iNamed "Hinv".
iInv "Hctx_inv" as "Hctx" "Hctx_close".
iDestruct "Hctx" as "[[>Hbad _]|Hctx]".
{
rewrite ncfupd_eq.
unfold ncfupd_def.
iIntros (?) "Hnc".
iDestruct (NC_C with "Hnc Hbad") as %Hbad.
by exfalso.
}
iDestruct "Hctx" as (?) "Hctx".
iDestruct "Hctx" as "[[>Hbad [_ >Hbad']]|Hctx]".
{
iDestruct "Hlog" as "[Hlog [Hlog' Hreq]]".
iDestruct (ghost_var_valid_2 with "[$] [$]") as %Hbad.
exfalso.
naive_solver.
}
iDestruct "Hctx" as "(>Hlb1 & >Hl & >Hcurdata2 & Hctx)".
iDestruct "Hlog" as "(Hl'&?)".
iDestruct (fmlist_agree_2 with "Hl' [$]") as %Hpref.
rewrite ncfupd_eq.
unfold ncfupd_def.
iIntros (q) "HNC".
iApply (fupd_mask_weaken (⊤ ∖ ↑aofN)).
{ solve_ndisj. }
iIntros "Hclo".
iModIntro. iFrame "HNC".
iExists _. iFrame.
iSplit; auto.
iDestruct (fmlist_agree_2 with "Hl Hlb") as %Hprefix.
iSplit.
{ iPureIntro.
destruct Hprefix as [??].
destruct Hpref as [??].
eexists _.
rewrite H0.
repeat rewrite -app_assoc.
done.
}
iIntros "HP". iIntros (?) "HNC".
iMod "Hclo".
iMod ("Hctx_close" with "[-HNC]"); last by eauto.
iRight. iExists _. iNext. iFrame.
iRight. iFrame.
Qed.
Lemma accessP_weak aof_ptr fname γ P Pcrash data :
is_aof aof_ptr γ fname P Pcrash -∗
aof_log_own γ data -∗
(|NC={⊤,⊤∖↑aofN}=> ∃ durableData, ⌜prefix durableData data⌝ ∗
▷ P durableData ∗ (▷ P durableData -∗ |NC={⊤∖↑aofN,⊤}=> aof_log_own γ data)
)
.
Proof.
iIntros "#Hinv Hlog".
iNamed "Hinv".
iInv "Hctx_inv" as "Hctx" "Hctx_close".
iDestruct "Hctx" as "[[>Hbad _]|Hctx]".
{
rewrite ncfupd_eq.
unfold ncfupd_def.
iIntros (?) "Hnc".
iDestruct (NC_C with "Hnc Hbad") as %Hbad.
by exfalso.
}
iDestruct "Hctx" as (?) "Hctx".
iDestruct "Hctx" as "[[>Hbad [_ >Hbad']]|Hctx]".
{
iDestruct "Hlog" as "[Hlog [Hlog' Hreq]]".
iDestruct (ghost_var_valid_2 with "[$] [$]") as %Hbad.
exfalso.
naive_solver.
}
iDestruct "Hctx" as "(>Hlb1 & >Hl & >Hcurdata2 & Hctx)".
iDestruct "Hlog" as "(Hl'&?)".
iDestruct (fmlist_agree_2 with "Hl' [$]") as %Hpref.
rewrite ncfupd_eq.
unfold ncfupd_def.
iIntros (q) "HNC".
iApply (fupd_mask_weaken (⊤ ∖ ↑aofN)).
{ solve_ndisj. }
iIntros "Hclo".
iModIntro. iFrame "HNC".
iExists _. iFrame.
iSplit; auto.
iIntros "HP". iIntros (?) "HNC".
iMod "Hclo".
iMod ("Hctx_close" with "[-HNC]"); last by eauto.
iRight. iExists _. iNext. iFrame.
iRight. iFrame.
Qed.
End aof_proof.
|
module Minecraft.Base.PreClassic.Cobblestone.Export
import public Minecraft.Base.PreClassic.Cobblestone.Block.Export
import public Minecraft.Base.PreClassic.Cobblestone.Item.Export
import public Minecraft.Base.PreClassic.Cobblestone.ItemEntity.Export
%default total
|
corollary\<^marker>\<open>tag unimportant\<close> orthogonal_to_subspace_exists: fixes S :: "'a :: euclidean_space set" assumes "dim S < DIM('a)" obtains x where "x \<noteq> 0" "\<And>y. y \<in> span S \<Longrightarrow> orthogonal x y" |
function cnn_toy_data_generator(dataDir)
%CNN_TOY_DATA_GENERATOR
% Generates toy data in the given path: random image of triangles,
% squares and circles.
%
% The directory format is: '<dataDir>/<set>/<label>/<sample>.png', where
% <set> is 'train' or 'val', <label> is an integer between 1 and 3, and
% <sample> is the sample index.
% Copyright (C) 2017 Joao F. Henriques.
% All rights reserved.
%
% This file is part of the VLFeat library and is made available under
% the terms of the BSD license (see the COPYING file).
% Set random seed
rng('default') ;
rng(0) ;
% The sets, and number of samples per label in each set
sets = {'train', 'val'} ;
numSamples = [1500, 150] ;
% Number of polygon points in each class. The circle is created with 50
% points.
numPoints = [3, 4, 50] ;
for s = 1:2 % Iterate sets
for label = 1:3 % Iterate labels
fprintf('Generating images for set %s, label %i...\n', sets{s}, label) ;
mkdir(sprintf('%s/%s/%i', dataDir, sets{s}, label)) ;
for i = 1:numSamples(s) % Iterate samples
% Points of a regular polygon, with random rotation and scale
radius = randi([11, 14]) ;
angles = rand(1) * 2 * pi + (0 : 2 * pi / numPoints(label) : 2 * pi) ;
xs = 16.5 + cos(angles) * radius ;
ys = 16.5 + sin(angles) * radius ;
% Generate image
image = poly2mask(xs, ys, 32, 32) ;
% Save it
imwrite(image, sprintf('%s/%s/%i/%04i.png', dataDir, sets{s}, label, i)) ;
end
end
end
end
|
module Minecraft.Core.Univ
import public Minecraft.Data.Deserialize
import public Minecraft.Data.Type.Ctx
import public Minecraft.Data.Type.Vtbl
%default total
public export
record Univ where
constructor MkUniv
exists : String -> Maybe (Ctx Deserialize)
reflect : (ty : Type) -> Maybe (Vtbl ty)
|
(**
CoLoR, a Coq library on rewriting and termination.
See the COPYRIGHTS and LICENSE files.
- Adam Koprowski, 2004-09-06
- William Delobel, 2005-10-07
This file provides some basic results concerning relations that were
missing in the standard library.
*)
Set Implicit Arguments.
Require Import RelUtil LogicUtil Max Arith Setoid.
Require Export Relations.
(***********************************************************************)
(** strict order *)
Section StrictOrder.
Variable A : Type.
Variable R : relation A.
Record strict_order : Prop := {
sord_trans : transitive R;
sord_irrefl : irreflexive R
}.
Variable so : strict_order.
Lemma so_not_symmetric : forall a b, R a b -> R b a -> False.
Proof.
unfold not; intros a b Rab Rba.
exact (sord_irrefl so (sord_trans so Rab Rba)).
Qed.
Variable eq : relation A.
Variable Req_compat : forall x x' y y',
eq x x' -> eq y y' -> R x y -> R x' y'.
Variable eq_setoid : Setoid_Theory A eq.
Lemma so_strict : forall x y, eq x y -> R x y -> False.
Proof.
intros.
assert (R y x).
apply Req_compat with x y; auto.
apply (Seq_sym A eq eq_setoid); trivial.
absurd (R x x).
unfold not; intro xx; exact (sord_irrefl so xx).
exact (sord_trans so H0 H1).
Qed.
End StrictOrder.
(***********************************************************************)
(** module types for setoids with decidable equality *)
Module Type SetA.
Parameter A : Type.
End SetA.
Module Type Eqset.
Parameter A : Type.
Parameter eqA : relation A.
Notation "X =A= Y" := (eqA X Y) (at level 70).
Parameter sid_theoryA : Setoid_Theory A eqA.
Hint Resolve (Seq_refl A eqA sid_theoryA) : sets.
Hint Resolve (Seq_trans A eqA sid_theoryA) : sets.
Hint Resolve (Seq_sym A eqA sid_theoryA) : sets.
End Eqset.
Module Type Eqset_dec.
Declare Module Export Eq : Eqset.
Parameter eqA_dec : forall x y, {x =A= y} + {~x =A= y}.
End Eqset_dec.
Module Eqset_def (A : SetA) <: Eqset.
Definition A := A.A.
Definition eqA := eq (A:=A).
Definition sid_theoryA := Build_Setoid_Theory _ eqA
(refl_equal (A:=A)) (sym_eq (A:=A)) (trans_eq (A:=A)).
Hint Resolve (Seq_refl A eqA sid_theoryA) : sets.
Hint Resolve (Seq_trans A eqA sid_theoryA) : sets.
Hint Resolve (Seq_sym A eqA sid_theoryA) : sets.
End Eqset_def.
(***********************************************************************)
(** module types for ordered setoids *)
Section Eqset_def_gtA_eqA_compat.
Variable A : Type.
Variable gtA : relation A.
Lemma Eqset_def_gtA_eqA_compat :
forall x x' y y', x = x' -> y = y' -> gtA x y -> gtA x' y'.
Proof.
intros x x' y y' x_x' y_y' x_y.
rewrite <- x_x'; rewrite <- y_y'; trivial.
Qed.
End Eqset_def_gtA_eqA_compat.
Module Type Ord.
Parameter A : Type.
Declare Module Export S : Eqset with Definition A := A.
Parameter gtA : relation A.
Notation "X >A Y" := (gtA X Y) (at level 70).
Parameter gtA_eqA_compat : forall x x' y y',
x =A= x' -> y =A= y' -> x >A y -> x' >A y'.
Hint Resolve gtA_eqA_compat : sets.
End Ord.
Module OrdLemmas (P : Ord).
Export P.
Definition ltA := transp gtA.
Definition geA x y := ~ ltA x y.
Definition leA x y := ~ gtA x y.
Definition AccA := Acc ltA.
Notation "X <A Y" := (ltA X Y) (at level 70).
Notation "X >=A Y" := (geA X Y) (at level 70).
Notation "X <=A Y" := (leA X Y) (at level 70).
Hint Unfold ltA geA leA AccA : sets.
Add Setoid A eqA sid_theoryA as sidA.
Add Morphism gtA
with signature eqA ==> eqA ==> iff
as gtA_morph.
Proof.
split; eauto with sets.
Qed.
Add Morphism ltA
with signature eqA ==> eqA ==> iff
as ltA_morph.
Proof.
split. eauto with sets.
cut (y0 =A= x0). intro. eauto with sets.
apply (Seq_sym _ _ sid_theoryA). hyp.
Qed.
Add Morphism AccA
with signature eqA ==> iff
as AccA_morph.
Proof.
intros a b eq_ab. split.
intro acc_a. inversion acc_a. constructor. intros.
apply H. rewrite eq_ab. hyp.
intros acc_b. inversion acc_b. constructor. intros.
apply H. rewrite <- eq_ab. hyp.
Qed.
End OrdLemmas.
Module Type Poset.
Parameter A : Type.
Declare Module Export O : Ord with Definition A := A.
Parameter gtA_so : strict_order gtA.
Hint Resolve (sord_trans gtA_so) : sets.
Hint Resolve (sord_irrefl gtA_so) : sets.
Hint Resolve (so_not_symmetric gtA_so) : sets.
Hint Resolve (so_strict gtA_so gtA_eqA_compat sid_theoryA) : sets.
End Poset.
Module nat_ord <: Ord.
Module natSet <: SetA.
Definition A := nat.
Definition eqA_dec := eq_nat_dec.
End natSet.
Module S := Eqset_def natSet.
Definition A := nat.
Definition gtA := gt.
Lemma gtA_eqA_compat : forall x x' y y',
x = x' -> y = y' -> x > y -> x' > y'.
Proof.
intros x x' y y' xx' yy'.
rewrite <- xx'; rewrite <- yy'; trivial.
Qed.
End nat_ord.
(***********************************************************************)
(** lemmas on transitive closure *)
Section Transitive_Closure.
Variable A : Type.
Variable R : relation A.
Lemma trans_clos_step_l : forall x y,
R! x y -> R x y \/ (exists2 z, R x z & R! z y).
Proof.
intros x y; compute; intro xy; induction xy.
left; trivial.
inversion IHxy1; inversion IHxy2; right; solve [eauto |
inversion H; try inversion H0; exists x0;
[trivial | constructor 2 with y; auto]].
Qed.
Lemma trans_clos_step_r : forall x y,
R! x y -> R x y \/ (exists2 z, R! x z & R z y).
Proof.
intros x y; compute; intro xy; induction xy.
left; trivial.
inversion_clear IHxy1; inversion_clear IHxy2; right;
solve [eauto | inversion H0; exists x0;
[constructor 2 with y; auto | trivial]].
Qed.
Variable eqA : relation A.
Parameter sid_theoryA : Setoid_Theory A eqA.
Parameter R_eqA_comp : forall x y x' y',
eqA x x' -> eqA y y' -> R x y -> R x' y'.
Parameter R_so : strict_order R.
Hint Resolve R_eqA_comp.
Lemma trans_clos_mirror : forall x y x' y',
eqA x x' -> eqA y y' -> R! x y -> R! x' y'.
Proof.
intros x y x' y' eq_xx' eq_yy' R_xy.
case (trans_clos_step_l R_xy).
intro Rxy; constructor 1; eauto.
intro R_xzy; destruct R_xzy as [w Rxw R_wy].
case (trans_clos_step_r R_wy).
intro Rwy; constructor 1; apply R_eqA_comp with x y;
solve [trivial | apply (sord_trans R_so) with w; trivial].
intro R_wpy; destruct R_wpy as [p Rwp R_py].
constructor 2 with w.
constructor 1; apply R_eqA_comp with x w;
solve [trivial | apply (Seq_refl A eqA sid_theoryA)].
constructor 2 with p.
trivial.
constructor 1; apply R_eqA_comp with p y;
solve [trivial | apply (Seq_refl A eqA sid_theoryA)].
Qed.
Lemma trans_clos_transp : forall x y, transp (R!) x y <-> (transp R)! x y.
Proof.
intros; split; compute.
induction 1.
constructor; trivial.
constructor 2 with y; auto.
induction 1.
constructor; trivial.
constructor 2 with y; auto.
Qed.
End Transitive_Closure.
(***********************************************************************)
(** specification *)
Section Specif.
Inductive sigPS2 (A : Type) (P: A -> Prop) (Q: A -> Set) : Type :=
existPS2: forall x:A, P x -> Q x -> sigPS2 (A:=A) P Q.
Notation "{ x : A # P & Q }"
:= (sigPS2 (fun x:A => P) (fun x:A => Q)) : type_scope.
Variable A : Type.
Variables P Q : A -> Prop.
Definition proj1_sig2 (e: sig2 P Q) :=
match e with
| exist2 a p q => a
end.
End Specif.
(***********************************************************************)
(** lemmas on the option type *)
Section option.
Variable A : Type.
Lemma option_dec : forall (el: option A),
el = None \/ exists w: A, el = Some w.
Proof.
intros.
destruct el.
right; exists a; trivial.
left; trivial.
Qed.
End option.
(***********************************************************************)
(** tactics *)
Ltac pair_destruct t0 t1 :=
first [destruct t0 | intros until t0; destruct t0];
first [destruct t1 | intros until t1; destruct t1];
try contr; simpl.
Ltac rewrite_lr term := apply (proj1 term).
Ltac rewrite_rl term := apply (proj2 term).
Ltac try_solve :=
simpl in *; try (intros; solve
[ contr
| discr
| auto with terms
| tauto
| congruence
]
).
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal4.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Derive Show for lst.
Derive Arbitrary for lst.
Instance Dec_Eq_lst : Dec_Eq lst.
Proof. dec_eq. Qed.
Lemma conj13synthconj5 : forall (lv0 : lst) (lv1 : natural), (@eq natural (len (append lv0 (Cons lv1 lv0))) (Succ (double (len lv0)))).
Admitted.
QuickChick conj13synthconj5.
|
State Before: l : Type ?u.36987
m : Type u_1
n : Type u_2
o : Type ?u.36996
m' : o → Type ?u.37001
n' : o → Type ?u.37006
R : Type ?u.37009
S : Type ?u.37012
α : Type v
β : Type w
γ : Type ?u.37019
inst✝¹ : Zero α
inst✝ : Zero β
f : α → β
h : f 0 = 0
⊢ map 0 f = 0 State After: case a.h
l : Type ?u.36987
m : Type u_1
n : Type u_2
o : Type ?u.36996
m' : o → Type ?u.37001
n' : o → Type ?u.37006
R : Type ?u.37009
S : Type ?u.37012
α : Type v
β : Type w
γ : Type ?u.37019
inst✝¹ : Zero α
inst✝ : Zero β
f : α → β
h : f 0 = 0
i✝ : m
x✝ : n
⊢ map 0 f i✝ x✝ = OfNat.ofNat 0 i✝ x✝ Tactic: ext State Before: case a.h
l : Type ?u.36987
m : Type u_1
n : Type u_2
o : Type ?u.36996
m' : o → Type ?u.37001
n' : o → Type ?u.37006
R : Type ?u.37009
S : Type ?u.37012
α : Type v
β : Type w
γ : Type ?u.37019
inst✝¹ : Zero α
inst✝ : Zero β
f : α → β
h : f 0 = 0
i✝ : m
x✝ : n
⊢ map 0 f i✝ x✝ = OfNat.ofNat 0 i✝ x✝ State After: no goals Tactic: simp [h] |
<!-- dom:TITLE: Learning from data: Bayesian Parameter Estimation -->
# Learning from data: Bayesian Parameter Estimation
<!-- dom:AUTHOR: Christian Forssén at Department of Physics, Chalmers University of Technology, Sweden -->
<!-- Author: -->
**Christian Forssén**, Department of Physics, Chalmers University of Technology, Sweden
Date: **Sep 16, 2019**
Copyright 2018-2019, Christian Forssén. Released under CC Attribution-NonCommercial 4.0 license
<!-- dom:FIGURE:[fig/m1m2.png, width=400 frac=0.8] Joint pdf for the masses of two black holes merging obtained from the data analysis of a gravitational wave signal. This representation of a joint pdf is known as a corner plot. <div id="fig:gw"></div> -->
<!-- begin figure -->
<div id="fig:gw"></div>
<p>Joint pdf for the masses of two black holes merging obtained from the data analysis of a gravitational wave signal. This representation of a joint pdf is known as a corner plot.</p>
<!-- end figure -->
# Inference With Parametric Models
Inductive inference with parametric models is a very important tool in the natural sciences.
* Consider $N$ different models $M_i$ ($i = 1, \ldots, N$), each with parameters $\boldsymbol{\theta}_i$. Each of them implies a sampling distribution for possible data
$$
p(D|\boldsymbol{\theta}_i, M_i)
$$
* The likelihood function is the pdf of the actual, observed data ($D_\mathrm{obs}$) given a set of parameters $\boldsymbol{\theta}_i$:
$$
\mathcal{L}_i (\boldsymbol{\theta}_i) \equiv p(D_\mathrm{obs}|\boldsymbol{\theta}_i, M_i)
$$
* We may be uncertain about $M_i$ (model uncertainty),
* or uncertain about $\boldsymbol{\theta}_i$ (parameter uncertainty).
Parameter Estimation:
:
Premise = We have chosen a model (say $M_1$)
$\Rightarrow$ What can we say about its parameters $\boldsymbol{\theta}_1$?
Model comparison:
:
Premise = We have a set of different models $\{M_i\}$
$\Rightarrow$ How do they compare with each other? Do we have evidence to say that, e.g. $M_1$, is better than the other models?
Model adequacy:
:
Premise = We have a model $M_1$
$\Rightarrow$ Is $M_1$ adequate?
Hybrid Uncertainty:
:
Models share some common params: $\boldsymbol{\theta}_i = \{ \boldsymbol{\varphi}, \boldsymbol{\eta}_i\}$
$\Rightarrow$ What can we say about $\boldsymbol{\varphi}$? (Systematic error is an example)
## Parameter estimation
Overview comments:
* In general terms, "parameter estimation" in physics means obtaining values for parameters (constants) that appear in a theoretical model which describes data (exceptions to this general definition exist of course).
* Conventionally this process is known as "parameter fitting" and the goal is to find the "best fit".
* We will make particular interpretations of these phrases from our Bayesian point of view.
* We will also see how familiar ideas like "least-squares optimization" show up from a Bayesian perspective.
## Bayesian parameter estimation
We will now consider the very important task of model parameter estimation using statistical inference.
Let us first remind ourselves what can go wrong in a fit. We have encountered both **underfitting** (model is not complex enough to describe the variability in the data) and **overfitting** (model tunes to data fluctuations, or terms are underdetermined causing them playing off each other). Bayesian methods can prevent/identify both these situations.
<!-- ===== Example: Measured flux from a star (single parameter) ===== -->
# Example: Measured flux from a star (single parameter)
Adapted from the blog [Pythonic Perambulations](http://jakevdp.github.io) by Jake VanderPlas.
Imagine that we point our telescope to the sky, and observe the light coming from a single star. Our physics model will be that the star's true flux is constant with time, i.e. that it has a fixed value $F_\mathrm{true}$ (we'll also ignore effects like sky noise and other sources of systematic error). Thus, we have a single model parameter: $F_\mathrm{true}$.
We'll assume that we perform a series of $N$ measurements with our telescope, where the i:th measurement reports an observed photon flux $F_i$ and is accompanied by an error model given by $e_i$[^errors].
The question is, given this set of measurements $D = \{F_i\}_{i=0}^{N-1}$, and the statistical model $F_i = F_\mathrm{true} + e_i$, what is our best estimate of the true flux $F_\mathrm{true}$?
[^errors]: We'll make the reasonable assumption that errors are Gaussian. In a Frequentist perspective, $e_i$ is the standard deviation of the results of a single measurement event in the limit of repetitions of *that event*. In the Bayesian perspective, $e_i$ is the standard deviation of the (Gaussian) probability distribution describing our knowledge of that particular measurement given its observed value.
Because the measurements are number counts, a Poisson distribution is a good approximation to the measurement process:
```python
%matplotlib inline
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import emcee
```
```python
np.random.seed(1) # for repeatability
F_true = 1000 # true flux, say number of photons measured in 1 second
N = 50 # number of measurements
F = stats.poisson(F_true).rvs(N)
# N measurements of the flux
e = np.sqrt(F) # errors on Poisson counts estimated via square root
```
Now let's make a simple visualization of the "observed" data, see Fig. [fig:flux](#fig:flux).
```python
fig, ax = plt.subplots()
ax.errorbar(F, np.arange(N), xerr=e, fmt='ok', ecolor='gray', alpha=0.5)
ax.vlines([F_true], 0, N, linewidth=5, alpha=0.2)
ax.set_xlabel("Flux");ax.set_ylabel("measurement number");
```
<!-- dom:FIGURE:[fig/singlephotoncount_fig_1.png, width=400 frac=0.8] Single photon counts (flux measurements). <div id="fig:flux"></div> -->
<!-- begin figure -->
<div id="fig:flux"></div>
<p>Single photon counts (flux measurements).</p>
<!-- end figure -->
These measurements each have a different error $e_i$ which is estimated from Poisson statistics using the standard square-root rule. In this toy example we know the true flux that was used to generate the data, but the question is this: given our measurements and statistical model, what is our best estimate of $F_\mathrm{true}$?
Let's take a look at the frequentist and Bayesian approaches to solving this.
### Simple Photon Counts: Frequentist Approach
We'll start with the classical frequentist maximum likelihood approach. Given a single observation $D_i = F_i$, we can compute the probability distribution of the measurement given the true flux $F_\mathrm{true}$ given our assumption of Gaussian errors
<!-- Equation labels as ordinary links -->
<div id="_auto1"></div>
$$
\begin{equation}
p(D_i | F_\mathrm{true}, I) = \frac{1}{\sqrt{2\pi e_i^2}} \exp \left( \frac{-(F_i-F_\mathrm{true})^2}{2e_i^2} \right).
\label{_auto1} \tag{1}
\end{equation}
$$
This should be read "the probability of $D_i$ given $F_\mathrm{true}$
equals ...". You should recognize this as a normal distribution with mean $F_\mathrm{true}$ and standard deviation $e_i$.
We construct the *likelihood function* by computing the product of the probabilities for each data point
<!-- Equation labels as ordinary links -->
<div id="_auto2"></div>
$$
\begin{equation}
\mathcal{L}(F_\mathrm{true}) = \prod_{i=1}^N p(D_i | F_\mathrm{true}, I),
\label{_auto2} \tag{2}
\end{equation}
$$
here $D = \{D_i\}$ represents the entire set of measurements. Because the value of the likelihood can become very small, it is often more convenient to instead compute the log-likelihood.
**Notice.**
In the following we will use $\log$ to denote the natural logarithm. We will write $\log_{10}$ if we specifically mean the logarithm with base 10.
Combining the previous two equations and computing the log, we have
<!-- Equation labels as ordinary links -->
<div id="_auto3"></div>
$$
\begin{equation}
\log\mathcal{L} = -\frac{1}{2} \sum_{i=1}^N \left[ \log(2\pi e_i^2) + \frac{(F_i-F_\mathrm{true})^2}{e_i^2} \right].
\label{_auto3} \tag{3}
\end{equation}
$$
What we'd like to do is determine $F_\mathrm{true}$ such that the likelihood is maximized. At this pont we can note that that problem of maximizing the likelihood is equivalent to the minimization of the sum
<!-- Equation labels as ordinary links -->
<div id="_auto4"></div>
$$
\begin{equation}
\sum_{i=1}^N \frac{(F_i-F_\mathrm{true})^2}{e_i^2},
\label{_auto4} \tag{4}
\end{equation}
$$
which you should recognize as the chi-squared function encountered in the linear regression model.
Therefore, it is not surprising that this particular maximization problem can be solved analytically (i.e. by setting $d\log\mathcal{L}/d F_\mathrm{true} = 0$). This results in the following observed estimate of $F_\mathrm{true}$
<!-- Equation labels as ordinary links -->
<div id="_auto5"></div>
$$
\begin{equation}
F_\mathrm{est} = \frac{ \sum_{i=1}^N w_i F_i }{ \sum_{i=1}^N w_i}, \quad w_i = 1/e_i^2.
\label{_auto5} \tag{5}
\end{equation}
$$
Notice that in the special case of all errors $e_i$ being equal, this reduces to
<!-- Equation labels as ordinary links -->
<div id="_auto6"></div>
$$
\begin{equation}
F_\mathrm{est} = \frac{1}{N} \sum_{i=1} F_i.
\label{_auto6} \tag{6}
\end{equation}
$$
That is, in agreement with intuition, $F_\mathrm{est}$ is simply the mean of the observed data when errors are equal.
We can go further and ask what the error of our estimate is. In the frequentist approach, this can be accomplished by fitting a Gaussian approximation to the likelihood curve at maximum; in this simple case this can also be solved analytically (the sum of Gaussians is also a Gaussian). It can be shown that the standard deviation of this Gaussian approximation is $\sigma_\mathrm{est}$, which is given by
<!-- Equation labels as ordinary links -->
<div id="_auto7"></div>
$$
\begin{equation}
\frac{ 1 } {\sigma_\mathrm{est}^2} = \sum_{i=1}^N w_i .
\label{_auto7} \tag{7}
\end{equation}
$$
These results are fairly simple calculations; let's evaluate them for our toy dataset:
```python
w=1./e**2
print(f"""
F_true = {F_true}
F_est = {(w * F).sum() / w.sum():.0f} +/- { w.sum() ** -0.5:.0f} (based on {N} measurements) """)
```
`F_true = 1000`
`F_est = 998 +/- 4 (based on 50 measurements)`
We find that for 50 measurements of the flux, our estimate has an error of about 0.4% and is consistent with the input value.
### Simple Photon Counts: Bayesian Approach
The Bayesian approach, as you might expect, begins and ends with probabilities. Our hypothesis is that the star has a constant flux $F_\mathrm{true}$. It recognizes that what we fundamentally want to compute is our knowledge of the parameter in question given the data and other information (such as our knowledge of uncertainties for the observed values), i.e. in this case, $p(F_\mathrm{true} | D,I)$.
Note that this formulation of the problem is fundamentally contrary to the frequentist philosophy, which says that probabilities have no meaning for model parameters like $F_\mathrm{true}$. Nevertheless, within the Bayesian philosophy this is perfectly acceptable.
To compute this pdf, Bayesians next apply Bayes' Theorem.
If we set the prior $p(F_\mathrm{true}|I) \propto 1$ (a flat prior), we find
$p(F_\mathrm{true}|D,I) \propto p(D | F_\mathrm{true},I) \equiv \mathcal{L}(F_\mathrm{true})$
and the Bayesian probability is maximized at precisely the same value as the frequentist result! So despite the philosophical differences, we see that (for this simple problem at least) the Bayesian and frequentist point estimates are equivalent.
### A note about priors
The prior allows inclusion of other information into the computation, which becomes very useful in cases where multiple measurement strategies are being combined to constrain a single model. The necessity to specify a prior, however, is one of the more controversial pieces of Bayesian analysis.
A frequentist will point out that the prior is problematic when no true prior information is available. Though it might seem straightforward to use a noninformative prior like the flat prior mentioned above, there are some [surprisingly subtleties](http://normaldeviate.wordpress.com/2013/07/13/lost-causes-in-statistics-ii-noninformative- priors/comment-page-1/) involved. It turns out that in many situations, a truly noninformative prior does not exist! Frequentists point out that the subjective choice of a prior which necessarily biases your result has no place in statistical data analysis.
A Bayesian would counter that frequentism doesn't solve this problem, but simply skirts the question. Frequentism can often be viewed as simply a special case of the Bayesian approach for some (implicit) choice of the prior: a Bayesian would say that it's better to make this implicit choice explicit, even if the choice might include some subjectivity.
### Simple Photon Counts: Bayesian approach in practice
Leaving these philosophical debates aside for the time being, let's address how Bayesian results are generally computed in practice. For a one parameter problem like the one considered here, it's as simple as computing the posterior probability $p(F_\mathrm{true} | D,I)$ as a function of $F_\mathrm{true}$: this is the distribution reflecting our knowledge of the parameter $F_\mathrm{true}$.
But as the dimension of the model grows, this direct approach becomes increasingly intractable. For this reason, Bayesian calculations often depend on sampling methods such as Markov Chain Monte Carlo (MCMC). For this practical example, let us apply an MCMC approach using Dan Foreman-Mackey's [emcee](http://dan.iel.fm/emcee/current/) package. Keep in mind here that the goal is to generate a set of points drawn from the posterior probability distribution, and to use those points to determine the answer we seek.
To perform this MCMC, we start by defining Python functions for the prior $p(F_\mathrm{true} | I)$, the likelihood $p(D | F_\mathrm{true},I)$, and the posterior $p(F_\mathrm{true} | D,I)$, noting that none of these need be properly normalized. Our model here is one-dimensional, but to handle multi-dimensional models we'll define the model in terms of an array of parameters $\boldsymbol{\theta}$, which in this case is $\boldsymbol{\theta} = [F_\mathrm{true}]$
```python
def log_prior(theta):
if theta>0 and theta<10000:
return 0 # flat prior
else:
return -np.inf
def log_likelihood(theta, F, e):
return -0.5 * np.sum(np.log(2 * np.pi * e ** 2) \
+ (F - theta[0]) ** 2 / e ** 2)
def log_posterior(theta, F, e):
return log_prior(theta) + log_likelihood(theta, F, e)
```
Now we set up the problem, including generating some random starting guesses for the multiple chains of points.
```python
ndim = 1 # number of parameters in the model
nwalkers = 50 # number of MCMC walkers
nwarm = 1000 # "warm-up" period to let chains stabilize
nsteps = 2000 # number of MCMC steps to take
# we'll start at random locations between 0 and 2000
starting_guesses = 2000 * np.random.rand(nwalkers, ndim)
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior, args=[F,e])
sampler.run_mcmc(starting_guesses, nsteps)
# Shape of sampler.chain = (nwalkers, nsteps, ndim)
# Flatten the sampler chain and discard warm-in points:
samples = sampler.chain[:, nwarm:, :].reshape((-1, ndim))
```
If this all worked correctly, the array sample should contain a series of 50,000 points drawn from the posterior. Let's plot them and check. See results in Fig. [fig:flux-bayesian](#fig:flux-bayesian).
```python
fig, ax = plt.subplots()
ax.hist(samples, bins=50, histtype="stepfilled", alpha=0.3, density=True)
ax.set_xlabel(r'$F_\mathrm{est}$')
ax.set_ylabel(r'$p(F_\mathrm{est}|D,I)$');
```
<!-- dom:FIGURE:[fig/singlephotoncount_fig_2.png, width=400 frac=0.8] Bayesian posterior pdf (represented by a histogram of MCMC samples) from flux measurements. <div id="fig:flux-bayesian"></div> -->
<!-- begin figure -->
<div id="fig:flux-bayesian"></div>
<p>Bayesian posterior pdf (represented by a histogram of MCMC samples) from flux measurements.</p>
<!-- end figure -->
## Aside: Best estimates and confidence intervals
The posterior distribution from our Bayesian data analysis is the key quantity that encodes our inference about the values of the model parameters, given the data and the relevant background information. Often, however, we wish to summarize this result with just a few numbers: the best estimate and a measure of its reliability.
There are a few different options for this. The choice of the most appropriate one depends mainly on the shape of the posterior distribution:
### Symmetric posterior pdfs
Since the probability (density) associated with any particular value of the parameter is a measure of how much we believe that it lies in the neighbourhood of that point, our best estimate is given by the maximum of the posterior pdf. If we denote the quantity of interest by $\theta$, with a posterior pdf $P =p(\theta|D,I)$, then the best estimate of its value $\theta_0$ is given by the condition $dP/d\theta|_{\theta=\theta_0}=0$. Strictly speaking, we should also check the sign of the second derivative to ensure that $\theta_0$ represents a maximum.
To obtain a measure of the reliability of this best estimate, we need to look at the width or spread of the posterior pdf about $\theta_0$. When considering the behaviour of any function in the neighbourhood of a particular point, it is often helpful to carry out a Taylor series expansion; this is simply a standard tool for (locally) approximating a complicated function by a low-order polynomial. The linear term is zero at the maximum and the quadratic term is often the dominating one determining the width of the posterior pdf. Ignoring all the higher-order terms we arrive at the Gaussian approximation (see more details below)
<!-- Equation labels as ordinary links -->
<div id="_auto8"></div>
$$
\begin{equation}
p(\theta|D,I) \approx \frac{1}{\sigma\sqrt{2\pi}} \exp \left[ -\frac{(\theta-\mu)^2}{2\sigma^2} \right],
\label{_auto8} \tag{8}
\end{equation}
$$
where the mean $\mu = \theta_0$ and the variance $\sigma = \left( - \left. \frac{d^2L}{d\theta^2} \right|_{\theta_0} \right)^{-1/2}$, where $L$ is the logarithm of the posterior $P$. Our inference about the quantity of interest is conveyed very concisely, therefore, by the 67% Bayesian confidence interval $\theta = \theta_0 \pm \sigma$, and
$$
p(\theta_0-\sigma < \theta < \theta_0+\sigma | D,I) = \int_{\theta_0-\sigma}^{\theta_0+\sigma} p(\theta|D,I) d\theta \approx 0.67.
$$
### Asymmetric posterior pdfs
While the maximum of the posterior ($\theta_0$) can still be regarded as giving the best estimate, the true value is now more likely to be on one side of this rather than the other. Alternatively one can compute the mean value, $\langle \theta \rangle = \int \theta p(\theta|D,I) d\theta$, although this tends to overemphasise very long tails. The best option is probably a compromise that can be employed when having access to a large sample from the posterior (as provided by an MCMC), namely to give the median of this ensemble.
Furthermore, the concept of an error-bar does not seem appropriate in this case, as it implicitly entails the idea of symmetry. A good way of expressing the reliability with which a parameter can be inferred, for an asymmetric posterior pdf, is rather through a *confidence interval*. Since the area under the posterior pdf between $\theta_1$ and $\theta_2$ is proportional to how much we believe that $\theta$ lies in that range, the shortest interval that encloses 67% of the area represents a sensible measure of the uncertainty of the estimate. Obviously we can choose to provide some other degree-of-belief that we think is relevant for the case at hand. Assuming that the posterior pdf has been normalized, to have unit area, we need to find $\theta_1$ and $\theta_2$ such that:
$$
p(\theta_1 < \theta < \theta_2 | D,I) = \int_{\theta_1}^{\theta_2} p(\theta|D,I) d\theta \approx 0.67,
$$
where the difference $\theta_2 - \theta_1$ is as small as possible. The region $\theta_1 < \theta < \theta_2$ is then called the shortest 67% confidence interval.
### Multimodal posterior pdfs
We can sometimes obtain posteriors which are multimodal; i.e. contains several disconnected regions with large probabilities. There is no difficulty when one of the maxima is very much larger than the others: we can simply ignore the subsidiary solutions, to a good approximation, and concentrate on the global maximum. The problem arises when there are several maxima of comparable magnitude. What do we now mean by a best estimate, and how should we quantify its reliability? The idea of a best estimate and an error-bar, or even a confidence interval, is merely an attempt to summarize the posterior with just two or three numbers; sometimes this just can’t be done, and so these concepts are not valid. For the bimodal case we might be able to characterize the posterior in terms of a few numbers: two best estimates and their associated error-bars, or disjoint confidence intervals. For a general multimodal pdf, the most honest thing we can do is just display the posterior itself.
Two options for assigning credible intervals to asymmetric and multimodal pdfs:
* Equal-tailed interval: the probability area above and below the interval are equal.
* Highest posterior density (HPD) interval: The posterior density for any point within the interval is larger than the posterior density for any point outside the interval.
### Different views on confidence intervals
A Bayesian confidence interval, or credible interval, or degree-of-belief (DOB) interval is the following: Given this data and other information there is $d \%$ probability that this interval contains the true value of the parameter. E.g. a 95% DOB interval implies that the Baysian data analyser would bet 20-to-1 that the true result is inside the interval.
A frequentist 95% confidence interval should be understood as follows:
"There is a 95% probability that when I compute a confidence interval from data of this sort that he true value of the parameter will fall within the (hypothetical) space of observations". So the parameter is fixed (no pdf) and the confidence interval is based on random sampling of data.
Let's try again to understand this: If we make a large number of repeated samples, then 95% of the intervals extracted in this way will include the true value of the parameter.
### Simple Photon Counts: Best estimates and confidence intervals
To compute these numbers for our example, you would run:
```python
sampper=np.percentile(samples, [2.5, 16.5, 50, 83.5, 97.5],axis=0).flatten()
print(f"""
F_true = {F_true}
Based on {N} measurements the posterior point estimates are:
...F_est = { np.mean(samples):.0f} +/- { np.std(samples):.0f}
or using credibility intervals:
...F_est = {sampper[2]:.0f} (posterior median)
...F_est in [{sampper[1]:.0f}, {sampper[3]:.0f}] (67% credibility interval)
...F_est in [{sampper[0]:.0f}, {sampper[4]:.0f}] (95% credibility interval) """)
```
`F_true = 1000`
`Based on 50 measurements the posterior point estimates are:`
`...F_est = 998 +/- 4`
`or using credibility intervals:`
`...F_est = 998 (posterior median)`
`...F_est in [993, 1002] (67% credibility interval)`
`...F_est in [989, 1006] (95% credibility interval)`
In this particular example, the posterior pdf is actually a Gaussian (since it is constructed as a product of Gaussians), and the mean and variance from the quadratic approximation will agree exactly with the frequentist approach.
From this final result you might come away with the impression that the Bayesian method is unnecessarily complicated, and in this case it certainly is. Using an MCMC sampler to characterize a one-dimensional normal distribution is a bit like using the Death Star to destroy a beach ball, but we did this here because it demonstrates an approach that can scale to complicated posteriors in many, many dimensions, and can provide nice results in more complicated situations where an analytic likelihood approach is not possible.
Furthermore, as data and models grow in complexity, the two approaches can diverge greatly.
# Example: Gaussian noise and averages
The example in the demonstration notebook is from Sivia's book. How do we infer the mean and standard deviation of a Gaussian distribution from $M$ measurements $D \in \{ x_k \}_{k=0}^{M-1}$ that should be distributed according to a normal distribution $p( D | \mu,\sigma,I)$?
Start from Bayes theorem
$$
p(\mu,\sigma | D, I) = \frac{p(D|\mu,\sigma,I) p(\mu,\sigma|I)}{p(D|I)}
$$
* Remind yourself about the names of the different terms.
* It should become intuitive what the different probabilities (pdfs) describe.
* Bayes theorem tells you how to flip from (hard-to-compute) $p(\mu,\sigma | D, I) \Leftrightarrow p(D|\mu,\sigma,I)$ (easier-to-compute).
Aside on the denominator, which is known as the "data probability" or "marginalized likelihood" or "evidence".
* With $\theta$ denoting a general vector of parameters we must have
$$
p(D|I) = \int d\theta p(D|\theta,I) p(\theta|I).
$$
* This integration (or marginalization) over all parameters is often difficult to perform.
* Fortunately, for **parameter estimation** we don't need $p(D|I)$ since it doesn't depend on $\theta$. We usually only need relative probabilities, or we can determine the normalization $N$ after we have computed the unnormalized posterior
$$
p(\theta | D,I) = \frac{1}{N} p(D|\theta,I) p(\theta|I).
$$
If we use a uniform prior $p(\theta | I ) \propto 1$ (in a finite volume), then the posterior is proportional to the **likelihood**
$$
p(\theta | D,I) \propto p(D|\theta,I) = \mathcal{L}(\theta)
$$
In this particular situation, the mode of the likelihood (which would correspond to the point estimate of maximum likelihood) is equivalent to the mode of the posterior pdf in the Bayesian analysis.
The real use of the prior, however, is to include into the analysis any additional information that you might have. The prior statement makes such additional assumptions and information very explicit.
But how do we actually compute the posterior in practice. Most often we won't be able to get an analytical expression, but we can sample the distribution using a method known as Markov Chain Monte Carlo (MCMC).
# Example: Fitting a straight line
The next example that we will study is the well known fit of a straight line.
* Here the theoretical model is
$$
y_\mathrm{th}(x; \theta) = m x + b,
$$
with parameters $\theta = [b,m]$.
* The statistical model for the data is
$$
y_{\mathrm{exp},i} = y_{\mathrm{th},i} + \delta y_{\mathrm{exp},i},
$$
where we often assume that the experimental errors are independent and normally distributed so that
$$
y_i = \mathcal{N} \left( y_\mathrm{th}(x_i; \theta), e_i^2 \right).
$$
* Is independent errors always a good approximation?
* An even better statistical model for theoretical models with a quantified, finite resolution would be
$$
y_\mathrm{exp} = y_\mathrm{th} + \delta y_\mathrm{exp} + \delta y_\mathrm{th}.
$$
### Why normal distributions?
Let us give a quick motivation why Gaussian distributions show up so often. Say that we have a pdf $p(\theta | D,I)$. Our best estimate from this pdf will be $\theta_0$ where
$$
\left.
\frac{ \partial p }{ \partial \theta }
\right|_{\theta_0} = 0, \qquad
\left. \frac{ \partial^2 p }{ \partial \theta^2 }
\right|_{\theta_0} < 0.
$$
The distribution usually varies very rapidly so we study $L(\theta) \equiv \log p$ instead.
Near the peak, it behaves as
$$
L(\theta) = L(\theta_0) + \frac{1}{2} \left. \frac{\partial^2 L}{\partial \theta^2} \right|_{\theta_0} \left( \theta - \theta_0 \right)^2 + \ldots,
$$
where the first-order term is zero since we are expanding around a maximum and $\partial L / \partial\theta = 0$.
If we neglect higher-order terms we find that
$$
p(\theta|D,I) \approx A \exp \left[ \frac{1}{2} \left. \frac{\partial^2 L}{\partial \theta^2} \right|_{\theta_0} \left( \theta - \theta_0 \right)^2 \right],
$$
which is a Gaussian $\mathcal{N}(\mu,\sigma^2)$ with
$$
\mu = \theta_0, \qquad \sigma^2 = \left( - \left. \frac{\partial^2 L}{\partial \theta^2} \right|_{\theta_0} \right)^{-1/2}.
$$
## Correlations
In the "fitting a straight-line" example you should find that the joint pdf for the slope and the intercept $[m, b]$ corresponds to a slanted ellipse. That result implies that the model parameters are **correlated**.
* Try to understand the correlation that you find in this example.
Let us explore correlations by studying the behavior of the pdf at the maximum.
A Taylor expansion for a bivariate pdf $p(x,y)$ around the mode $(x_0,y_0)$ gives
$$
p(x,y) \approx p(x_0,y_0) + \frac{1}{2} \begin{pmatrix} x-x_0 & y-y_0 \end{pmatrix}
H
\begin{pmatrix} x-x_0 \\ y-y_0 \end{pmatrix},
$$
where $H$ is the symmetric Hessian matrix
$$
\begin{pmatrix}
A & C \\ C & B
\end{pmatrix},
$$
with elements
$$
A = \left. \frac{\partial^2 p}{\partial x^2} \right|_{x_0,y_0}, \quad
B = \left. \frac{\partial^2 p}{\partial y^2} \right|_{x_0,y_0}, \quad
C = \left. \frac{\partial^2 p}{\partial x \partial y} \right|_{x_0,y_0}.
$$
* So in this quadratic approximation the contour is an ellipse centered at $(x_0,y_0)$ with orientation and eccentricity determined by $A,B,C$.
* The principal axes are found from the eigenvectors of $H$.
* Depending on the skewness of the ellipse, the parameters are either (i) not correlated, (ii) correlated, or (iii) anti-correlated.
* Take a minute to consider what that implies.
|
Formal statement is: lemma uniformly_continuous_on_Cauchy: fixes f :: "'a::metric_space \<Rightarrow> 'b::metric_space" assumes "uniformly_continuous_on S f" "Cauchy X" "\<And>n. X n \<in> S" shows "Cauchy (\<lambda>n. f (X n))" Informal statement is: If $f$ is uniformly continuous on a set $S$, and if the sequence $X$ is Cauchy and takes values in $S$, then the sequence $f(X)$ is Cauchy. |
\documentclass[12pt]{article}
\usepackage[]{algorithm2e}
\usepackage{ amssymb }
\usepackage{amsmath}
\usepackage[hyphens]{url}
\usepackage{listings}
\usepackage{xcolor}
\definecolor{listinggray}{gray}{0.9}
\definecolor{lbcolor}{rgb}{0.9,0.9,0.9}
\lstset{
backgroundcolor=\color{lbcolor},
tabsize=4,
language=C++,
captionpos=b,
tabsize=3,
frame=lines,
numbers=left,
numberstyle=\tiny,
numbersep=5pt,
breaklines=true,
showstringspaces=false,
basicstyle=\footnotesize,
% identifierstyle=\color{magenta},
keywordstyle=\color[rgb]{0,0,1},
commentstyle=\color{Darkgreen},
stringstyle=\color{red}
}
\begin{document}
\title{Project 01 CS-790}
\author{Brandon Bluemner}
\date{2017}
\maketitle
% //Start
\begin{abstract}
Analysis of c++11 implementation of Dijkstra's algorithm.
\end{abstract}
% ==========================================================================================================================
% Algorithm Section 1
% ==========================================================================================================================
\section[Algorithm]{Algorithm}
Algorithm taken from class with some additional information for wiki \cite{ Wikipedia}.
\begin{algorithm}
\KwData{ \textit{source:} source node, \textit{$goal_v$:} collection or goals,\\ \textit{cost:} function that returns cost between edge $u \to u_i$, \\ \textit{succ:} function that returns the neighbors of node u }
\KwResult{ \textit{path:} collection to store path in}
\SetKwProg{Fn}{Function}{}{}
\Fn{run(source, $goal_v$, path, cost, succ)}{
$I$ u;
$T$ \_cost $\leftarrow \infty$;
$frontier.push( {0,source})$ // priority que \\
$current.insert(source)$ //keep track of current node in frontier \\
$map<I,T> g $ // This will keep track of cost for node I cost T\\
$g[source]$ $\leftarrow$ 0;
$I _goal$ $\leftarrow goal_v.at(0)$\\
\While{ $frontier.empty() = false$}{
$u$ $\leftarrow$ frontier.top() // get next node \\
$frontier.pop()$ // move remove the next from the que \\
$current.erase(current.find(u))$ // remove u from current \\
\For{$auto goal$ $\in$ $goal_v$}{
\If{$ u = goal$ and $g[goal]<$\_cost}{
\_cost $\leftarrow g[goal]$;
\_goal $\leftarrow goal$ \\
}
}
$explored.insert(u)$;
$vector<Edge> successor$; $ succ(u,successor)$;
\\
\For{$ auto$ $s : successor $} {
$I ui \leftarrow s.get_target()$\\
// ui not in E and ui not in f\\
\If{$explored.find(ui) = explored.end() and current.find(ui) = current.end()$}{
$g[ui] \leftarrow g[u] + cost(u,ui) frontier.push({g[ui],ui})$\\
$current.insert(ui)$;
$path[ui] \leftarrow u$
}
\ElseIf{$current.find(ui) \neq current.end()$}{
\If{ $g[u] + cost(u,ui) < g[ui]$ }{
$g[ui] \leftarrow g[u] + cost(u,ui)$;
$path[ui] \leftarrow u$
}
}
\ElseIf{$explored.find(ui) \neq explored.end()$}
{
\If{ $g[u] + cost(u,ui) < g[ui] $}{
$explored.erase(explored.find(ui))$;
$frontier.push({g[ui],ui})$;
$current.insert(source)$;
}
}
}
}
}
\end{algorithm}
\clearpage
% ==========================================================================================================================
% Implementation Section 2
% ==========================================================================================================================
\section[Implementation]{Implementation}
The implementation of Dijkstra utilizes several of the c++11 standard library data structures,
Below is how the data structures are defined and the running time of each function\cite{ northwestern}.
The core algorithm will be implemented under the betacore namespace.
%----------------------------------------
% Std::Vector
% ---------------------------------------
\subsection{std::vector}
\begin{lstlisting}
template<
class T,
class Allocator = std::allocator<T>
> class vector;
\end{lstlisting}
\begin{tabular}{ l l l }
\textbf{Constructors} & \textbf{Description} & \textbf{Big - OH}\\
vector$<$T$>$ v; & Make an empty vector. & O(1)\\
vector$<$T$>$ v(n); & Make a vector with N elements. & O(n)\\
vector$<$T$>$ v(n, value); & Make a vector with N elements, initialized to value. & O(n)\\
vector$<$T$>$ v(begin, end); & Make a vector and copy the elements from begin to end & O(n)\\
\end{tabular}
\\
\\
\\
\begin{tabular}{ l l l }
\textbf{Accessors} & \textbf{Description} & \textbf{Big - OH}\\
v[i]; & Return (or set) the I'th element. & O(1)\\
v.at(i); & Return (or set) the I'th element, with bounds checking. & O(1)\\
v.size(); & Return current number of elements. & O(1)\\
v.empty(); & Return true if vector is empty. & O(1)\\
v.begin(); & Return random access iterator to start. & O(1)\\
v.end(); & Return random access iterator to end. & O(1)\\
v.front(); & Return the first element. & O(1)\\
v.back(); & Return the last element. & O(1)\\
v.capacity(); & Return maximum number of elements. & O(1)\\
\end{tabular}
\\
\\
\\
\begin{tabular}{ l l l }
\textbf{Modifiers} & \textbf{Description} & \textbf{Big - OH}\\
v.push\_back(value); & Add value to end. & O(1) (amortized)\\
v.insert(iterator, value); & Insert value at the position indexed by iterator. & O(n)\\
v.pop\_back(); & Remove value from end. & O(1)\\
v.erase(iterator); & Erase value indexed by iterator. & O(n)\\
v.erase(begin, end); & Erase the elements from begin to end & O(n)\\
\end{tabular}
\clearpage
%----------------------------------------
% Std::priority queue
% ---------------------------------------
\subsection{std::priority\_queue}\label{pq}
\begin{tabular}{ p{5cm} p{7cm} p{5cm}}
\textbf{Constructors} & \textbf{Description} & \textbf{Big - OH}\\
priority\_queue$<$T, container$<$T$>$, comparison$<$T$>$ $>$ q & Make an empty priority queue using the given container to hold values, and comparison to compare values. container defaults to vector$<$T$>$ and comparison defaults to less$<$T$>$. & O(1)\end{tabular}
\\
\\
\\
\begin{tabular}{ l l l }
\textbf{Accessors} & \textbf{Description} & \textbf{Big - OH}\\
q.top(); & Return the ``biggest'' element. & O(1)\\
q.size(); & Return current number of elements. & O(1)\\
q.empty(); & Return true if priority queue is empty. & O(1)
\end{tabular}
\\
\\
\\
\begin{tabular}{ l l l }
\textbf{Modifiers} & \textbf{Description} & \textbf{Big - OH}\\
q.push(value); & Add value to priority queue. & O(log n)\\
q.pop(); & Remove biggest value. & O(log n)\\
\end{tabular}
\clearpage
%----------------------------------------
% Std::Set
% ---------------------------------------
\subsection{std::set} \label{stdset}
\begin{tabular}{ p{5cm} p{7cm} p{5cm}}
\textbf{Constructors} & \textbf{Description} & \textbf{Big - OH}\\
set $<$type, compare$>$ s; & Make an empty set. compare should be a binary predicate for ordering the set. It's optional and will default to a function that uses operator$<$. & O(1)\\
set $<$type, compare$>$ s(begin, end); & Make a set and copy the values from begin to end. & O(n log n)
\end{tabular}
\\
\\
\\
\begin{tabular}{ p{5cm} p{7cm} p{5cm}}
\textbf{Modifiers} & \textbf{Description} & \textbf{Big - OH}\\
s.find(key) & Return an iterator pointing to an occurrence of key in s, or s.end() if key is not in s. & O(log n)\\
s.lower\_bound(key) & Return an iterator pointing to the first occurrence of an item in s not less than key, or s.end() if no such item is found. & O(log n)\\
s.upper\_bound(key) & Return an iterator pointing to the first occurrence of an item greater than key in s, or s.end() if no such item is found. & O(log n)\\
s.equal\_range(key) & Returns pair$<$lower\_bound(key), upper\_bound(key)$>$. & O(log n)\\
s.count(key) & Returns the number of items equal to key in s. & O(log n)\\
s.size() & Return current number of elements. & O(1)\\
s.empty() & Return true if set is empty. & O(1)\\
s.begin() & Return an iterator pointing to the first element. & O(1)\\
s.end() & Return an iterator pointing one past the last element. & O(1)
\end{tabular}
\clearpage
%----------------------------------------
% Std::Map
% ---------------------------------------
\subsection{std::Map}
\begin{tabular}{ p{5cm} p{7cm} p{5cm} }
\textbf{Constructors} & \textbf{Description} & \textbf{Big - OH}\\
map$<$ key\_type, value\_type, key\_compare $>$ m; & Make an empty map. key\_compare should be a binary predicate for ordering the keys. It's optional and will default to a function that uses operator$<$. & O(1)\\
map$<$ key\_type, value\_type, key\_compare $>$ m(begin, end); & Make a map and copy the values from begin to end. & O(n log n)
\end{tabular}
\\
\\
\\
\begin{tabular}{ p{5cm} p{7cm} p{5cm} }
\textbf{Accessors} & \textbf{Description} & \textbf{Big - OH}\\
m[key] & Return the value stored for key. This adds a default value if key not in map. & O(log n)\\
m.find(key) & Return an iterator pointing to a key-value pair, or m.end() if key is not in map. & O(log n)\\
m.lower\_bound(key) & Return an iterator pointing to the first pair containing key, or m.end() if key is not in map. & O(log n)\\
m.upper\_bound(key) & Return an iterator pointing one past the last pair containing key, or m.end() if key is not in map. & O(log n)\\
m.equal\_range(key) & Return a pair containing the lower and upper bounds for key. This may be more efficient than calling those functions separately. & O(log n)\\
m.size(); & Return current number of elements. & O(1)\\
m.empty(); & Return true if map is empty. & O(1)\\
m.begin() & Return an iterator pointing to the first pair. & O(1)\\
m.end() & Return an iterator pointing one past the last pair. & O(1) n)
\end{tabular}
\\
\\
\\
\begin{tabular}{ p{5cm} p{7cm} p{5cm} }
\textbf{Modifiers} & \textbf{Description} & \textbf{Big - OH}\\
m[key] = value; & Store value under key in map. & O(log n)\\
m.insert(pair) & Inserts the <key, value> pair into the map. Equivalent to the above operation. & O(log n)
\end{tabular}
\subsection{std::pair}
\begin{tabular}{ p{5cm} p{10cm} }
default (1) & constexpr pair\\
copy / move (2) & template$<$class U, class V$>$ pair (const pair$<$U,V$>$\& pr);
template$<$class U, class V$>$ pair (pair$<$U,V$>$\&\& pr);
pair (const pair\& pr) = default;
pair (pair\&\& pr) = default;\\
initialization (3) & pair (const first\_type\& a, const second\_type\& b);
template$<$class U, class V$>$ pair (U\&\& a, V\&\& b);\\
piecewise (4) &
template $<$class... Args1, class... Args2$>$
pair (piecewise\_construct\_t pwc, tuple$<$Args1...$>$ first\_args,
tuple$<$Args2...$>$ second\_args);
\end{tabular}
\begin{lstlisting}
template <class T1,class T2>
pair<T1,T2> make_pair (T1 x, T2 y)
{
return ( pair<T1,T2>(x,y) );
}
\end{lstlisting}
function to make a std::pair
%----------------------------------------
% Betacore::Node
% ---------------------------------------
\subsection{Betacore::Node}
\begin{lstlisting}
template<typename I>
class Node{
private:
I id;
std::string name;
public:
Node(I &id,std::string &name)
std::string get_name()
void setName(const std::string &name)
I get_id()
};
\end{lstlisting}
Helper class that can store minimal information about a node,
more information can stored in the csv file
%----------------------------------------
% Betacore::Edge
% ---------------------------------------
\subsection{Betacore::Edge}
\begin{lstlisting}
template<typename T, typename I>
class Edge{
private:
I id;
I source;
I target;
T cost;
public:
Edge(){}
Edge(I &source, I &target, T &cost)
I get_source()
I get_target()
T get_cost()
};
\end{lstlisting}
Helper class that can store minimal information about a Edge,
more information can stored in the csv file
%----------------------------------------
% Betacore::Graph
% ---------------------------------------
\subsection{Betacore::Graph}
\begin{lstlisting}
template<typename T, typename I>
class Graph{
private:
std::vector<betacore::Node<I>> nodes
std::vector<betacore::Edge<T,I>> edges
betacore::Node<I> * source
std::vector<betacore::Node<I>> targets
void parse_line(std::string &line)
public:
Graph()
void successor(I &node, std::vector<Edge<T,I>> &result)
void get_edges(std::vector<betacore::Edge<T,I>> &edges)
Node<I> get_node(I id)
void add_node(Node<I> &node)
void remove_node(I id)
load_from_file(std::string file_path)
T cost(I u, I ui)
void print()
};
\end{lstlisting}
Betacore::graph is an implementation of a directed graph that reads from a
modified comma delimited file. This allows for the graph state to change
without recompiling the code, however this is a potential bottleneck due to
the read speed of a hard disk (for larger gig size files this should be
be modified to a stream and the graph split up into regions then load each region
only when needed, thus saving memory ).
Template$<$typename T, typename I$>$ allows the precision of the graph type to
be determined by the implementation. T being a float, double, or long double
used for floating point arithmetic. I being a char, int, long, long long int is
used for indexing the nodes. Depending on the size of the graph changing the
data types for T and I can have a performance and precision impact on the graph.
%----------------------------------------
% Betacore::Dijkstra
% ---------------------------------------
\subsection{Betacore::Dijkstra} \label{betadij}
\begin{lstlisting}
struct Dijkstra_Exception : public std::exception {
const char * what () const throw () {
return "Dijkstra Exception";
}
};
template<typename T, typename I>
class Dijkstra{
private:
std::priority_queue<std::pair<T,I>,std::vector<std::pair<T,I>>, std::greater<std::pair<T,I>> > frontier;
std::set<I> current;
std::set<I> explored;
T cost();
public:
Dijkstra(){
}
~Dijkstra(){
}
void run (
I source,
I goal,
std::map<I,I> &path,
std::function<T( I u, I ui)> &cost,
std::function<void(I &node, std::vector<Edge<T,I>> &result)> &Succ
);
void run (
I source,
std::vector<I> &goal_v,
std::map<I,I> &path,
std::function<T( I u, I ui)> &cost,
std::function<void(I &node, std::vector<Edge<T,I>> &result)> &Succ
);
};
\end{lstlisting}
Betacore::Dijkstra implementation utilizes the std::priority\_queue (see section \ref{pq}).
The priority\_queue uses the std::greater function which set the que by the value in edge cost$<$T$>$. The goal is the min value so using the grater function
so the value in first position will be the smallest, thus the shortest edge$<$T$>$.
Limitation of using std::priority\_queue is that their is no contains method. That
problem is overcome by keeping a std::set (see section \ref{stdset}) to keep track of nodes in the frontier.
So the std::priority\_queue frontier keeps the order of the edges and std::set current keeps track of the contains.
This adds double space complexity but saves from have to implement a new child class of std::priority\_queue. Nodes
are know by their index type I and the edges by the edge class.
\\
\\
\begin{lstlisting}
std::function<T( I u, I ui)> &cost
\end{lstlisting}
This function passed into the algorithm determines the cost from node $u \to u_i$ and
must be provided by the user and allows for the most flexibility.
\\
\begin{lstlisting}
std::function<void(I &node, std::vector<Edge<T,I>> &result)> &Succ
\end{lstlisting}
This function passed into the algorithm determines then neighbors nodes of $u$ and
stores the result into a std::vector \&result. To change the implement just make sure
that you add the Edge$<$T,I$>$ index I do the vector list.
% ==========================================================================================================================
% Proof of correctness Section 3
% ==========================================================================================================================
% \section[Proof of correctness]{Proof of correctness}
\section{Proof of correctness}
Note: running time doesn't include loading from file, its the running time of the algorithm.
Their is an additional graph, not required by assignment,
that was used to verify the algorithm.
Due to simplistic nature of the graph\_01 it can show in an ideal case the algorithm takes shortest path (not ideal being negative cycle),
assuming it is provided by the Succ function(\ref{betadij}).
\subsection{Flight Plans}
Sample Run output:
\begin{lstlisting}
_______________________________________________________________
Heuristics
_______________________________________________________________
../data/graph_gt_7_3.csv
N,1,SFO
N,2,ORD
N,3,BOS
N,4,PVD
N,5,JFK
N,6,BWI
N,7,MIA
N,8,DFW
N,9,LAX
#
E,1,2,1846
E,2,1,1846
#
E,1,8,1464
E,8,1,1464
#
E,1,9,337
E,9,1,337
#
E,2,3,867
E,3,2,867
#
E,2,4,849
E,4,2,849
#
E,2,5,740
E,5,2,740
#
E,2,6,621
E,6,2,621
#
E,2,8,802
E,8,2,802
#
E,3,5,187
E,5,3,187
#
E,3,7,1258
E,7,3,1258
#
E,4,5,144
E,5,4,144
#
E,5,6,184
E,6,5,184
#
E,5,7,1090
E,7,5,1090
#
E,5,8,1391
E,8,5,1391
#
E,6,7,946
E,7,6,946
#
E,7,8,1121
E,8,7,1121
#
E,7,9,2342
E,9,7,2342
#
E,8,9,1235
E,9,8,1235
#
T,1,SFO
T,9,LAX
S,6,BWI
_______________________________________________________________
Edge:1->2 cost:1846
Edge:2->1 cost:1846
Edge:1->8 cost:1464
Edge:8->1 cost:1464
Edge:1->9 cost:337
Edge:9->1 cost:337
Edge:2->3 cost:867
Edge:3->2 cost:867
Edge:2->4 cost:849
Edge:4->2 cost:849
Edge:2->5 cost:740
Edge:5->2 cost:740
Edge:2->6 cost:621
Edge:6->2 cost:621
Edge:2->8 cost:802
Edge:8->2 cost:802
Edge:3->5 cost:187
Edge:5->3 cost:187
Edge:3->7 cost:1258
Edge:7->3 cost:1258
Edge:4->5 cost:144
Edge:5->4 cost:144
Edge:5->6 cost:184
Edge:6->5 cost:184
Edge:5->7 cost:1090
Edge:7->5 cost:1090
Edge:5->8 cost:1391
Edge:8->5 cost:1391
Edge:6->7 cost:946
Edge:7->6 cost:946
Edge:7->8 cost:1121
Edge:8->7 cost:1121
Edge:7->9 cost:2342
Edge:9->7 cost:2342
Edge:8->9 cost:1235
Edge:9->8 cost:1235
Nodes:1
Nodes:2
Nodes:3
Nodes:4
Nodes:5
Nodes:6
Nodes:7
Nodes:8
Nodes:9
_______________________________________________________________
Node U::6
Other Goal Found:1 cost:0
Other Goal Found:9 cost:0
Adding to frontier node:2 cost:621
Adding to frontier node:5 cost:184
Adding to frontier node:7 cost:946
Node U::5
Other Goal Found:1 cost:0
Other Goal Found:9 cost:0
Adding to frontier node:3 cost:371
Adding to frontier node:4 cost:328
Adding to frontier node:8 cost:1575
Node U::4
Other Goal Found:1 cost:0
Other Goal Found:9 cost:0
Node U::3
Other Goal Found:1 cost:0
Other Goal Found:9 cost:0
Node U::2
Other Goal Found:1 cost:0
Other Goal Found:9 cost:0
Adding to frontier node:1 cost:2467
+adding edge:2 cost:1423
Node U::7
Other Goal Found:1 cost:2467
Other Goal Found:9 cost:0
Adding to frontier node:9 cost:3288
Node U::8
Other Goal Found:1 cost:2467
Other Goal Found:9 cost:3288
+adding edge:8 cost:2658
Node U::1
Min Goal Found:1 cost:2467
Other Goal Found:9 cost:2658
Node U::9
Other Goal Found:1 cost:2467
Other Goal Found:9 cost:2658
explored node count:8
explored node:1
explored node:2
explored node:3
explored node:4
explored node:5
explored node:6
explored node:7
explored node:8
frontier node count:0
path:
1<-2<-6
cost:2467
Running time(includes std::cout): 0.000282s
cpu start: 0 cpu end:1 CLOCKS_PER_SEC:1000
cpu time(includes std::cout): 0.001s
_______________________________________________________________
\end{lstlisting}
* Note the cpu run time may not appear accurate due to the running system (see System \\ information \ref{sysinfo})
Doing the trace by hand yielded similar results with the min path being BWI $\to$ ORD $\to$ SFO. in the output
the entire search state is dumped (which can hurt performance) so by looking at each choice the algorithm is taking (making sure it's being greedy).
we can verify the correctness for this instance (if the graph was larger test driven development would be advised ).
\subsection{Missionaries and Cannibals}
With run of $n=3$ and $m=2$ \\
The graph in the format:\\
Each node represents the following\\
N: Type of node\\
\#: id of the node\\
Name: LHS(Missionaries\_Cannibals)::Boat(Missionaries\_Cannibals)::RHS(Missionaries\_Cannibals)\\
\\
Each Edge is source, target, cost\\
The Goal is to move from LHS $\to$ RHS
\\
Sample Run output:
\begin{lstlisting}
_______________________________________________________________
Heuristics
_______________________________________________________________
../data/graph_can_n_3_m_2.csv
N,0,N::3_2::0_0::0_0
N,1,N::2_1::1_1::0_0
N,2,N::3_0::0_2::0_0
N,3,N::1_2::2_0::0_0
N,4,N::2_1::0_1::1_0
N,5,N::2_1::1_0::0_1
N,6,N::2_0::0_1::0_1
N,7,N::3_0::0_1::0_1
N,8,N::1_1::1_1::1_0
N,9,N::2_0::0_2::1_0
N,10,N::2_0::1_1::0_1
N,11,N::1_1::2_0::0_1
N,12,N::1_1::0_1::2_0
N,13,N::1_1::1_0::1_1
N,14,N::2_0::0_1::1_1
N,15,N::2_0::1_0::0_2
N,16,N::1_0::2_0::0_1
N,17,N::1_0::1_1::2_0
N,18,N::1_0::0_2::2_0
N,19,N::1_0::0_1::2_1
N,20,N::0_1::0_1::3_0
N,21,N::1_0::1_0::1_2
N,22,N::0_0::1_1::2_1
N,23,N::0_0_::0_2::3_0
N,24,N::0_0::0_0::3_2
E,0,1,1
E,0,2,1
E,0,3,1
E,1,4,1
E,1,5,1
E,2,6,1
E,4,7,1
E,4,8,1
E,5,9,1
E,5,10,1
E,6,9,1
E,7,11,1
E,7,12,1
E,8,13,1
E,9,13,1
E,11,15,1
E,11,16,1
E,12,16,1
E,13,17,1
E,14,18,1
E,15,19,1
E,16,20,1
E,17,19,1
E,17,21,1
E,18,21,1
E,19,22,1
E,20,23,1
E,22,24,1
E,23,24,1
S,0,N::3_2::0_0::0_0
T,24,N::0_0::0_0::3_2
_______________________________________________________________
Edge:0->1 cost:1
Edge:0->2 cost:1
Edge:0->3 cost:1
Edge:1->4 cost:1
Edge:1->5 cost:1
Edge:2->6 cost:1
Edge:4->7 cost:1
Edge:4->8 cost:1
Edge:5->9 cost:1
Edge:5->10 cost:1
Edge:6->9 cost:1
Edge:7->11 cost:1
Edge:7->12 cost:1
Edge:8->13 cost:1
Edge:9->13 cost:1
Edge:11->15 cost:1
Edge:11->16 cost:1
Edge:12->16 cost:1
Edge:13->17 cost:1
Edge:14->18 cost:1
Edge:15->19 cost:1
Edge:16->20 cost:1
Edge:17->19 cost:1
Edge:17->21 cost:1
Edge:18->21 cost:1
Edge:19->22 cost:1
Edge:20->23 cost:1
Edge:22->24 cost:1
Edge:23->24 cost:1
Nodes:0
Nodes:1
Nodes:2
Nodes:3
Nodes:4
Nodes:5
Nodes:6
Nodes:7
Nodes:8
Nodes:9
Nodes:10
Nodes:11
Nodes:12
Nodes:13
Nodes:14
Nodes:15
Nodes:16
Nodes:17
Nodes:18
Nodes:19
Nodes:20
Nodes:21
Nodes:22
Nodes:23
Nodes:24
_______________________________________________________________
Node U::0
Other Goal Found:24 cost:0
Adding to frontier node:1 cost:1
Adding to frontier node:2 cost:1
Adding to frontier node:3 cost:1
Node U::1
Other Goal Found:24 cost:0
Adding to frontier node:4 cost:2
Adding to frontier node:5 cost:2
Node U::2
Other Goal Found:24 cost:0
Adding to frontier node:6 cost:2
Node U::3
Other Goal Found:24 cost:0
Node U::4
Other Goal Found:24 cost:0
Adding to frontier node:7 cost:3
Adding to frontier node:8 cost:3
Node U::5
Other Goal Found:24 cost:0
Adding to frontier node:9 cost:3
Adding to frontier node:10 cost:3
Node U::6
Other Goal Found:24 cost:0
Node U::7
Other Goal Found:24 cost:0
Adding to frontier node:11 cost:4
Adding to frontier node:12 cost:4
Node U::8
Other Goal Found:24 cost:0
Adding to frontier node:13 cost:4
Node U::9
Other Goal Found:24 cost:0
Node U::10
Other Goal Found:24 cost:0
Node U::11
Other Goal Found:24 cost:0
Adding to frontier node:15 cost:5
Adding to frontier node:16 cost:5
Node U::12
Other Goal Found:24 cost:0
Node U::13
Other Goal Found:24 cost:0
Adding to frontier node:17 cost:5
Node U::15
Other Goal Found:24 cost:0
Adding to frontier node:19 cost:6
Node U::16
Other Goal Found:24 cost:0
Adding to frontier node:20 cost:6
Node U::17
Other Goal Found:24 cost:0
Adding to frontier node:21 cost:6
Node U::19
Other Goal Found:24 cost:0
Adding to frontier node:22 cost:7
Node U::20
Other Goal Found:24 cost:0
Adding to frontier node:23 cost:7
Node U::21
Other Goal Found:24 cost:0
Node U::22
Other Goal Found:24 cost:0
Adding to frontier node:24 cost:8
Node U::23
Other Goal Found:24 cost:8
Node U::24
Min Goal Found:24 cost:8
explored node count:23
explored node:0
explored node:1
explored node:2
explored node:3
explored node:4
explored node:5
explored node:6
explored node:7
explored node:8
explored node:9
explored node:10
explored node:11
explored node:12
explored node:13
explored node:15
explored node:16
explored node:17
explored node:19
explored node:20
explored node:21
explored node:22
explored node:23
explored node:24
frontier node count:0
path:
24<-22<-19<-15<-11<-7<-4<-1<-0
cost:8
Running time(includes std::cout): 0.0004963s
cpu start: 1 cpu end:1 CLOCKS_PER_SEC:1000
cpu time(includes std::cout): 0s
_______________________________________________________________
\end{lstlisting}
* Note the cpu run time may not appear accurate due to the running system (see System information \ref{sysinfo})
\\
\\
The path\\
24$\leftarrow$22$\leftarrow$19$\leftarrow$15$\leftarrow$11$\leftarrow$7$\leftarrow$4$\leftarrow$1$\leftarrow$0
matches the tree state of the problem and is the length of the depth of the tree.
Each edge havening a cost of 1 yields the correct cost which matches the numbers of edges
With run of $n=4$ and $m=3$
The graph in the format:\\
Each node represents the following\\
N: Type of node\\
\#: id of the node\\
Name: LHS(Missionaries\_Cannibals)::Boat(Missionaries\_Cannibals)::RHS(Missionaries\_Cannibals)\\
\\
Each Edge is source, target, cost\\
The Goal is to move from LHS $\to$ RHS
Sample Run output:
\begin{lstlisting}
_______________________________________________________________
Heuristics
_______________________________________________________________
../data/graph_can_n_4_m_3.csv
N,0,4_3::0_0::0_0
N,1,3_2::1_1::0_0
N,2,4_1::0_2::0_0
N,3,2_3::2_0::0_0
N,4,3_2::1_0::0_1
N,5,3_2::0_1::1_0
N,6,4_1::0_1::0_1
N,7,3_1::1_1::0_1
N,8,2_2::2_0::0_1
N,9,2_2::1_1::1_0
N,10,3_1::0_2::1_0
N,11,4_0::0_2::0_1
N,12,3_1::1_0::0_2
N,13,3_1::0_1::1_1
N,14,2_2::1_0::1_1
N,15,2_2::0_1::2_0
N,16,4_0::0_1::0_2
N,17,2_1::2_0::0_2
N,18,3_0::1_1::0_2
N,19,3_0::0_2::1_1
N,20,2_1::1_1::1_1
N,21,1_2::2_0::1_1
N,22,2_1::0_2::2_0
N,23,2_1::1_0::1_2
N,24,3_0::1_0::0_3
N,25,3_0::0_1::1_2
N,26,2_1::0_1::2_1
N,27,2_1::1_0::1_2
N,28,2_0::2_0::0_3
N,29,2_0::0_2::2_1
N,30,1_1::1_1::2_1
N,31,2_0::1_0::1_3
N,32,2_0::0_1::2_2
N,33,1_1::0_1::3_1
N,34,1_1::1_0::2_2
N,35,1_0::1_1::2_2
N,36,1_0::0_2::3_1
N,37,0_1::1_1::3_1
N,38,0_1::2_0::2_2
N,39,1_0::1_0::2_3
N,40,1_0::0_1::3_2
N,41,0_1::1_0::3_2
N,42,0_1::0_1::4_1
N,43,0_0::1_1::3_2
N,44,0_0::0_2::4_1
N,45,0_0::0_0::4_3
E,0,1,1
E,0,2,1
E,0,3,1
E,1,4,1
E,1,5,1
E,2,6,1
E,4,7,1
E,4,8,1
E,5,9,1
E,5,10,1
E,6,11,1
E,7,12,1
E,7,13,1
E,8,14,1
E,9,14,1
E,9,15,1
E,10,13,1
E,11,16,1
E,12,17,1
E,12,18,1
E,13,19,1
E,13,20,1
E,14,20,1
E,14,21,1
E,15,22,1
E,15,23,1
E,16,18,1
E,17,23,1
E,18,24,1
E,18,25,1
E,19,25,1
E,20,26,1
E,20,23,1
E,22,26,1
E,24,28,1
E,26,29,1
E,26,30,1
E,28,31,1
E,29,32,1
E,30,33,1
E,30,34,1
E,32,35,1
E,33,36,1
E,33,37,1
E,34,38,1
E,35,39,1
E,35,40,1
E,36,40,1
E,37,41,1
E,37,42,1
E,38,41,1
E,40,43,1
E,41,43,1
E,42,44,1
E,43,45,1
E,44,45,1
S,0,4_3::0_0::0_0
T,45,0_0::0_0::4_3
_______________________________________________________________
Edge:0->1 cost:1
Edge:0->2 cost:1
Edge:0->3 cost:1
Edge:1->4 cost:1
Edge:1->5 cost:1
Edge:2->6 cost:1
Edge:4->7 cost:1
Edge:4->8 cost:1
Edge:5->9 cost:1
Edge:5->10 cost:1
Edge:6->11 cost:1
Edge:7->12 cost:1
Edge:7->13 cost:1
Edge:8->14 cost:1
Edge:9->14 cost:1
Edge:9->15 cost:1
Edge:10->13 cost:1
Edge:11->16 cost:1
Edge:12->17 cost:1
Edge:12->18 cost:1
Edge:13->19 cost:1
Edge:13->20 cost:1
Edge:14->20 cost:1
Edge:14->21 cost:1
Edge:15->22 cost:1
Edge:15->23 cost:1
Edge:16->18 cost:1
Edge:17->23 cost:1
Edge:18->24 cost:1
Edge:18->25 cost:1
Edge:19->25 cost:1
Edge:20->26 cost:1
Edge:20->23 cost:1
Edge:22->26 cost:1
Edge:24->28 cost:1
Edge:26->29 cost:1
Edge:26->30 cost:1
Edge:28->31 cost:1
Edge:29->32 cost:1
Edge:30->33 cost:1
Edge:30->34 cost:1
Edge:32->35 cost:1
Edge:33->36 cost:1
Edge:33->37 cost:1
Edge:34->38 cost:1
Edge:35->39 cost:1
Edge:35->40 cost:1
Edge:36->40 cost:1
Edge:37->41 cost:1
Edge:37->42 cost:1
Edge:38->41 cost:1
Edge:40->43 cost:1
Edge:41->43 cost:1
Edge:42->44 cost:1
Edge:43->45 cost:1
Edge:44->45 cost:1
Nodes:0
Nodes:1
Nodes:2
Nodes:3
Nodes:4
Nodes:5
Nodes:6
Nodes:7
Nodes:8
Nodes:9
Nodes:10
Nodes:11
Nodes:12
Nodes:13
Nodes:14
Nodes:15
Nodes:16
Nodes:17
Nodes:18
Nodes:19
Nodes:20
Nodes:21
Nodes:22
Nodes:23
Nodes:24
Nodes:25
Nodes:26
Nodes:27
Nodes:28
Nodes:29
Nodes:30
Nodes:31
Nodes:32
Nodes:33
Nodes:34
Nodes:35
Nodes:36
Nodes:37
Nodes:38
Nodes:39
Nodes:40
Nodes:41
Nodes:42
Nodes:43
Nodes:44
Nodes:45
_______________________________________________________________
Node U::0
Other Goal Found:45 cost:0
Adding to frontier node:1 cost:1
Adding to frontier node:2 cost:1
Adding to frontier node:3 cost:1
Node U::1
Other Goal Found:45 cost:0
Adding to frontier node:4 cost:2
Adding to frontier node:5 cost:2
Node U::2
Other Goal Found:45 cost:0
Adding to frontier node:6 cost:2
Node U::3
Other Goal Found:45 cost:0
Node U::4
Other Goal Found:45 cost:0
Adding to frontier node:7 cost:3
Adding to frontier node:8 cost:3
Node U::5
Other Goal Found:45 cost:0
Adding to frontier node:9 cost:3
Adding to frontier node:10 cost:3
Node U::6
Other Goal Found:45 cost:0
Adding to frontier node:11 cost:3
Node U::7
Other Goal Found:45 cost:0
Adding to frontier node:12 cost:4
Adding to frontier node:13 cost:4
Node U::8
Other Goal Found:45 cost:0
Adding to frontier node:14 cost:4
Node U::9
Other Goal Found:45 cost:0
Adding to frontier node:15 cost:4
Node U::10
Other Goal Found:45 cost:0
Node U::11
Other Goal Found:45 cost:0
Adding to frontier node:16 cost:4
Node U::12
Other Goal Found:45 cost:0
Adding to frontier node:17 cost:5
Adding to frontier node:18 cost:5
Node U::13
Other Goal Found:45 cost:0
Adding to frontier node:19 cost:5
Adding to frontier node:20 cost:5
Node U::14
Other Goal Found:45 cost:0
Adding to frontier node:21 cost:5
Node U::15
Other Goal Found:45 cost:0
Adding to frontier node:22 cost:5
Adding to frontier node:23 cost:5
Node U::16
Other Goal Found:45 cost:0
Node U::17
Other Goal Found:45 cost:0
Node U::18
Other Goal Found:45 cost:0
Adding to frontier node:24 cost:6
Adding to frontier node:25 cost:6
Node U::19
Other Goal Found:45 cost:0
Node U::20
Other Goal Found:45 cost:0
Adding to frontier node:26 cost:6
Node U::21
Other Goal Found:45 cost:0
Node U::22
Other Goal Found:45 cost:0
Node U::23
Other Goal Found:45 cost:0
Node U::24
Other Goal Found:45 cost:0
Adding to frontier node:28 cost:7
Node U::25
Other Goal Found:45 cost:0
Node U::26
Other Goal Found:45 cost:0
Adding to frontier node:29 cost:7
Adding to frontier node:30 cost:7
Node U::28
Other Goal Found:45 cost:0
Adding to frontier node:31 cost:8
Node U::29
Other Goal Found:45 cost:0
Adding to frontier node:32 cost:8
Node U::30
Other Goal Found:45 cost:0
Adding to frontier node:33 cost:8
Adding to frontier node:34 cost:8
Node U::31
Other Goal Found:45 cost:0
Node U::32
Other Goal Found:45 cost:0
Adding to frontier node:35 cost:9
Node U::33
Other Goal Found:45 cost:0
Adding to frontier node:36 cost:9
Adding to frontier node:37 cost:9
Node U::34
Other Goal Found:45 cost:0
Adding to frontier node:38 cost:9
Node U::35
Other Goal Found:45 cost:0
Adding to frontier node:39 cost:10
Adding to frontier node:40 cost:10
Node U::36
Other Goal Found:45 cost:0
Node U::37
Other Goal Found:45 cost:0
Adding to frontier node:41 cost:10
Adding to frontier node:42 cost:10
Node U::38
Other Goal Found:45 cost:0
Node U::39
Other Goal Found:45 cost:0
Node U::40
Other Goal Found:45 cost:0
Adding to frontier node:43 cost:11
Node U::41
Other Goal Found:45 cost:0
Node U::42
Other Goal Found:45 cost:0
Adding to frontier node:44 cost:11
Node U::43
Other Goal Found:45 cost:0
Adding to frontier node:45 cost:12
Node U::44
Other Goal Found:45 cost:12
Node U::45
Min Goal Found:45 cost:12
explored node count:45
explored node:0
explored node:1
explored node:2
explored node:3
explored node:4
explored node:5
explored node:6
explored node:7
explored node:8
explored node:9
explored node:10
explored node:11
explored node:12
explored node:13
explored node:14
explored node:15
explored node:16
explored node:17
explored node:18
explored node:19
explored node:20
explored node:21
explored node:22
explored node:23
explored node:24
explored node:25
explored node:26
explored node:28
explored node:29
explored node:30
explored node:31
explored node:32
explored node:33
explored node:34
explored node:35
explored node:36
explored node:37
explored node:38
explored node:39
explored node:40
explored node:41
explored node:42
explored node:43
explored node:44
explored node:45
frontier node count:0
path:
45<-43<-40<-35<-32<-29<-26<-20<-13<-7<-4<-1<-0
cost:12
Running time(includes std::cout): 0.0010232s
cpu start: 1 cpu end:2 CLOCKS_PER_SEC:1000
cpu time(includes std::cout): 0.001s
_______________________________________________________________
\end{lstlisting}
* Note the cpu run time may not appear accurate due to the running system (see System information \ref{sysinfo})
45$\leftarrow$43$\leftarrow$40$\leftarrow$35$\leftarrow$32$\leftarrow$29$\leftarrow$26$\leftarrow$20$\leftarrow$13$\leftarrow$7$\leftarrow$4$\leftarrow$1$\leftarrow$0
the path can be trace from the source to the sink with the sink\\target state matching the gaol state
\\
The output cost 12 is correct because the depth of the tree which represents
this problem is 12 and because each edge has a weight of 1 the cost should equal the depth of the tree.
\subsection{System information}\label{sysinfo}
clinfo.exe dump of system information.
Note The above runs where running on cl.exe in debug mode on Windows 10 x64
\begin{lstlisting}
Number of platforms: 1
Platform Profile: FULL_PROFILE
Platform Version: OpenCL 2.0 AMD-APP (2264.11)
Platform Name: AMD Accelerated Parallel Processing
Platform Vendor: Advanced Micro Devices, Inc.
Platform Extensions: cl_khr_icd cl_khr_d3d10_sharing cl_khr_d3d11_sharing cl_khr_dx9_media_sharing cl_amd_event_callback cl_amd_offline_devices
Platform Name: AMD Accelerated Parallel Processing
Number of devices: 2
Device Type: CL_DEVICE_TYPE_GPU
Vendor ID: 1002h
Board name: AMD Radeon HD 7800 Series
Device Topology: PCI[ B#1, D#0, F#0 ]
Max compute units: 16
Max work items dimensions: 3
Max work items[0]: 256
Max work items[1]: 256
Max work items[2]: 256
Max work group size: 256
Preferred vector width char: 4
Preferred vector width short: 2
Preferred vector width int: 1
Preferred vector width long: 1
Preferred vector width float: 1
Preferred vector width double: 1
Native vector width char: 4
Native vector width short: 2
Native vector width int: 1
Native vector width long: 1
Native vector width float: 1
Native vector width double: 1
Max clock frequency: 860Mhz
Address bits: 32
Max memory allocation: 1409286144
Image support: Yes
Max number of images read arguments: 128
Max number of images write arguments: 8
Max image 2D width: 16384
Max image 2D height: 16384
Max image 3D width: 2048
Max image 3D height: 2048
Max image 3D depth: 2048
Max samplers within kernel: 16
Max size of kernel argument: 1024
Alignment (bits) of base address: 2048
Minimum alignment (bytes) for any datatype: 128
Single precision floating point capability
Denorms: No
Quiet NaNs: Yes
Round to nearest even: Yes
Round to zero: Yes
Round to +ve and infinity: Yes
IEEE754-2008 fused multiply-add: Yes
Cache type: Read/Write
Cache line size: 64
Cache size: 16384
Global memory size: 2147483648
Constant buffer size: 65536
Max number of constant args: 8
Local memory type: Scratchpad
Local memory size: 32768
Max pipe arguments: 0
Max pipe active reservations: 0
Max pipe packet size: 0
Max global variable size: 0
Max global variable preferred total size: 0
Max read/write image args: 0
Max on device events: 0
Queue on device max size: 0
Max on device queues: 0
Queue on device preferred size: 0
SVM capabilities:
Coarse grain buffer: No
Fine grain buffer: No
Fine grain system: No
Atomics: No
Preferred platform atomic alignment: 0
Preferred global atomic alignment: 0
Preferred local atomic alignment: 0
Kernel Preferred work group size multiple: 64
Error correction support: 0
Unified memory for Host and Device: 0
Profiling timer resolution: 1
Device endianess: Little
Available: Yes
Compiler available: Yes
Execution capabilities:
Execute OpenCL kernels: Yes
Execute native function: No
Queue on Host properties:
Out-of-Order: No
Profiling : Yes
Queue on Device properties:
Out-of-Order: No
Profiling : No
Platform ID: 00007FFE8904B188
Name: Pitcairn
Vendor: Advanced Micro Devices, Inc.
Device OpenCL C version: OpenCL C 1.2
Driver version: 2264.11
Profile: FULL_PROFILE
Version: OpenCL 1.2 AMD-APP (2264.11)
Extensions: cl_khr_fp64 cl_amd_fp64 cl_khr_global_int32_base_atomics cl_khr_global_int32_extended_atomics cl_khr_local_int32_base_atomics cl_khr_local_int32_extended_atomics cl_khr_int64_base_atomics cl_khr_int64_extended_atomics cl_khr_3d_image_writes cl_khr_byte_addressable_store cl_khr_gl_sharing cl_amd_device_attribute_query cl_amd_vec3 cl_amd_printf cl_amd_media_ops cl_amd_media_ops2 cl_amd_popcnt cl_khr_d3d10_sharing cl_khr_d3d11_sharing cl_khr_dx9_media_sharing cl_khr_image2d_from_buffer cl_khr_spir cl_khr_gl_event cl_amd_liquid_flash
Device Type: CL_DEVICE_TYPE_CPU
Vendor ID: 1002h
Board name:
Max compute units: 4
Max work items dimensions: 3
Max work items[0]: 1024
Max work items[1]: 1024
Max work items[2]: 1024
Max work group size: 1024
Preferred vector width char: 16
Preferred vector width short: 8
Preferred vector width int: 4
Preferred vector width long: 2
Preferred vector width float: 8
Preferred vector width double: 4
Native vector width char: 16
Native vector width short: 8
Native vector width int: 4
Native vector width long: 2
Native vector width float: 8
Native vector width double: 4
Max clock frequency: 3504Mhz
Address bits: 64
Max memory allocation: 4275856384
Image support: Yes
Max number of images read arguments: 128
Max number of images write arguments: 64
Max image 2D width: 8192
Max image 2D height: 8192
Max image 3D width: 2048
Max image 3D height: 2048
Max image 3D depth: 2048
Max samplers within kernel: 16
Max size of kernel argument: 4096
Alignment (bits) of base address: 1024
Minimum alignment (bytes) for any datatype: 128
Single precision floating point capability
Denorms: Yes
Quiet NaNs: Yes
Round to nearest even: Yes
Round to zero: Yes
Round to +ve and infinity: Yes
IEEE754-2008 fused multiply-add: Yes
Cache type: Read/Write
Cache line size: 64
Cache size: 32768
Global memory size: 17103425536
Constant buffer size: 65536
Max number of constant args: 8
Local memory type: Global
Local memory size: 32768
Max pipe arguments: 16
Max pipe active reservations: 16
Max pipe packet size: 4275856384
Max global variable size: 1879048192
Max global variable preferred total size: 1879048192
Max read/write image args: 64
Max on device events: 0
Queue on device max size: 0
Max on device queues: 0
Queue on device preferred size: 0
SVM capabilities:
Coarse grain buffer: No
Fine grain buffer: No
Fine grain system: No
Atomics: No
Preferred platform atomic alignment: 0
Preferred global atomic alignment: 0
Preferred local atomic alignment: 0
Kernel Preferred work group size multiple: 1
Error correction support: 0
Unified memory for Host and Device: 1
Profiling timer resolution: 292
Device endianess: Little
Available: Yes
Compiler available: Yes
Execution capabilities:
Execute OpenCL kernels: Yes
Execute native function: Yes
Queue on Host properties:
Out-of-Order: No
Profiling : Yes
Queue on Device properties:
Out-of-Order: No
Profiling : No
Platform ID: 00007FFE8904B188
Name: Intel(R) Core(TM) i5-6600K CPU @ 3.50GHz
Vendor: GenuineIntel
Device OpenCL C version: OpenCL C 1.2
Driver version: 2264.11 (sse2,avx)
Profile: FULL_PROFILE
Version: OpenCL 1.2 AMD-APP (2264.11)
Extensions: cl_khr_fp64 cl_amd_fp64 cl_khr_global_int32_base_atomics cl_khr_global_int32_extended_atomics cl_khr_local_int32_base_atomics cl_khr_local_int32_extended_atomics cl_khr_int64_base_atomics cl_khr_int64_extended_atomics cl_khr_3d_image_writes cl_khr_byte_addressable_store cl_khr_gl_sharing cl_ext_device_fission cl_amd_device_attribute_query cl_amd_vec3 cl_amd_printf cl_amd_media_ops cl_amd_media_ops2 cl_amd_popcnt cl_khr_d3d10_sharing cl_khr_spir cl_khr_gl_event
\end{lstlisting}
% //END
\section{ Sources of Error}
There could be some sources of error in my graph csv files,
incorrectly adding edges or mistyping the graph state for the Cannibals and Missionaries.
\bibliographystyle{unsrt}
\bibliography{bib}
\end{document}
|
function P = vecRt_r()
% vec(R') = P * r
P = sparse([1,2,3,4,5,6,7,8,9],...
[1,4,7,2,5,8,3,6,9],...
[1,1,1,1,1,1,1,1,1],...
9,9);
end |
// Copyright Yamaha 2021
// MIT License
// https://github.com/yamaha-bps/ros2_pcl_utils/blob/master/LICENSE
#include "pcl_utils/pcl_seg_node.hpp"
#include <boost/circular_buffer.hpp>
#include <rclcpp_components/register_node_macro.hpp>
#include <condition_variable>
#include <deque>
#include <functional>
#include <limits>
#include <memory>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "pcl_utils/filter.hpp"
namespace cbr
{
using Pose = std::pair<Eigen::Vector3f, Eigen::Quaternionf>;
struct PclSegNode::Impl
{
std::mutex pcl_queue_mtx;
boost::circular_buffer<sensor_msgs::msg::PointCloud2::UniquePtr> pcl_queue{};
std::mutex img_queue_mtx;
std::deque<sensor_msgs::msg::Image::UniquePtr> img_queue{};
std::condition_variable condition_;
std::atomic<bool> calib_received_;
sensor_msgs::msg::CameraInfo calib;
std::atomic<bool> camera_frame_received;
std::string camera_frame;
// class indices to pass through
std::unordered_set<uint8_t> classes_;
std::mutex frames_mtx;
std::unordered_map<
std::string, Pose, std::hash<std::string>,
std::equal_to<std::string>,
Eigen::aligned_allocator<std::pair<std::string, Pose>>>
frames;
};
PclSegNode::PclSegNode(const rclcpp::NodeOptions & opts)
: Node("pcl_seg", opts), pImpl(std::make_unique<Impl>())
{
declare_parameter<std::vector<int64_t>>("classes", std::vector<int64_t>{0});
auto classes_vec = get_parameter("classes").as_integer_array();
declare_parameter<int>("n_pclbuf", 500);
pImpl->pcl_queue.set_capacity(get_parameter("n_pclbuf").as_int());
for (auto c : classes_vec) {
if (c < 0 || c > std::numeric_limits<uint8_t>::max()) {
RCLCPP_ERROR(get_logger(), "Class index %lu not a uint8", c);
} else {
pImpl->classes_.insert(static_cast<uint8_t>(c));
}
}
sub_pcl_ = create_subscription<sensor_msgs::msg::PointCloud2>(
"pointcloud", rclcpp::SensorDataQoS(),
std::bind(&PclSegNode::cb_pcl_, this, std::placeholders::_1));
sub_img_ = create_subscription<sensor_msgs::msg::Image>(
"segmentation", rclcpp::SensorDataQoS(),
std::bind(&PclSegNode::cb_img_, this, std::placeholders::_1));
sub_calib_ = create_subscription<sensor_msgs::msg::CameraInfo>(
"camera_info", rclcpp::SystemDefaultsQoS(),
[this](const sensor_msgs::msg::CameraInfo::SharedPtr msg) {
if (!pImpl->calib_received_) {
pImpl->calib = *msg;
pImpl->calib_received_ = true;
RCLCPP_INFO(get_logger(), "Received camera info");
}
});
pub_pcl_ = create_publisher<sensor_msgs::msg::PointCloud2>(
"pointcloud_filtered", rclcpp::SensorDataQoS());
tf2_buf_ = std::make_shared<tf2_ros::Buffer>(get_clock());
tf2_listener_ = std::make_shared<tf2_ros::TransformListener>(*tf2_buf_);
worker_thread_ = std::thread(std::bind(&PclSegNode::work, this));
RCLCPP_INFO(get_logger(), "Started node");
}
PclSegNode::~PclSegNode()
{
canceled_.store(true);
pImpl->condition_.notify_one();
if (worker_thread_.joinable()) {
worker_thread_.join();
}
RCLCPP_INFO(get_logger(), "Closing node");
}
void PclSegNode::cb_pcl_(sensor_msgs::msg::PointCloud2::UniquePtr msg)
{
if (!pImpl->calib_received_) {
RCLCPP_INFO_THROTTLE(
get_logger(), *get_clock(), 3000,
"Waiting for camera info");
return;
}
if (!pImpl->camera_frame_received) {
RCLCPP_INFO_THROTTLE(
get_logger(), *get_clock(), 3000,
"Waiting for segmentation image");
return;
}
// look up transform
if (pImpl->frames.find(msg->header.frame_id) == pImpl->frames.end()) {
if (!tf2_buf_->canTransform(
pImpl->camera_frame, msg->header.frame_id,
msg->header.stamp))
{
RCLCPP_WARN(
get_logger(), "Can not transform from %s to %s",
msg->header.frame_id.c_str(), pImpl->camera_frame.c_str());
return;
}
auto tf = tf2_buf_->lookupTransform(
pImpl->camera_frame, msg->header.frame_id, msg->header.stamp);
std::lock_guard lock(pImpl->frames_mtx);
pImpl->frames[msg->header.frame_id] = std::make_pair(
Eigen::Vector3f(
tf.transform.translation.x, tf.transform.translation.y,
tf.transform.translation.z),
Eigen::Quaternionf(
tf.transform.rotation.w, tf.transform.rotation.x,
tf.transform.rotation.y, tf.transform.rotation.z)
);
}
{
std::lock_guard lock(pImpl->pcl_queue_mtx);
if (pImpl->pcl_queue.full()) {
RCLCPP_WARN(
get_logger(),
"PCL buffer reached capacity %lu, dropping oldest",
pImpl->pcl_queue.capacity());
}
pImpl->pcl_queue.push_back(std::move(msg));
}
pImpl->condition_.notify_one();
}
void PclSegNode::cb_img_(sensor_msgs::msg::Image::UniquePtr msg)
{
std::lock_guard lock(pImpl->img_queue_mtx);
rclcpp::Time msg_t(msg->header.stamp);
if (!pImpl->img_queue.empty() &&
msg_t < rclcpp::Time(pImpl->img_queue.back()->header.stamp))
{
RCLCPP_WARN(get_logger(), "Seg image arrived out of order, discarding...");
return;
}
if (!pImpl->camera_frame_received) {
pImpl->camera_frame = msg->header.frame_id;
pImpl->camera_frame_received = true;
}
pImpl->img_queue.push_back(std::move(msg));
}
void PclSegNode::work()
{
while (!canceled_) {
std::unique_ptr<sensor_msgs::msg::PointCloud2> pcl;
// wait only if queue is empty
{
std::unique_lock<std::mutex> lock(pImpl->pcl_queue_mtx);
if (pImpl->pcl_queue.empty()) {
pImpl->condition_.wait(
lock, [this] {return canceled_ || !pImpl->pcl_queue.empty();});
}
}
if (canceled_) {
break;
}
// here we are guaranteed the following:
// - all pcls in queue have frame transform information stored in
// pImpl->frames
// - pImpl->calib exists
if (!pImpl->calib_received_) {
RCLCPP_WARN_THROTTLE(
get_logger(), *get_clock(), 3000,
"Waiting for camera info");
continue;
}
{
// discard old pointclouds (should only occur at the beginning)
std::lock_guard l1(pImpl->img_queue_mtx);
std::lock_guard l2(pImpl->pcl_queue_mtx);
while (!pImpl->img_queue.empty() && !pImpl->pcl_queue.empty()) {
rclcpp::Time img_t(pImpl->img_queue.front()->header.stamp);
rclcpp::Time pcl_t(pImpl->pcl_queue.front()->header.stamp);
if (pcl_t < img_t) {
pImpl->pcl_queue.pop_front();
} else {
break;
}
}
// discard non-relevant images
while (pImpl->img_queue.size() >= 2 &&
rclcpp::Time(pImpl->img_queue[1]->header.stamp) <=
rclcpp::Time(pImpl->pcl_queue.front()->header.stamp))
{
pImpl->img_queue.pop_front();
}
// ensure that we have one image before and after the pointcloud
if (pImpl->img_queue.size() < 2) {
continue;
}
// grab next pointcloud
pcl = std::move(pImpl->pcl_queue.front());
pImpl->pcl_queue.pop_front();
}
// it's fine to not lock img_queue_mtx here since we are only reading,
// and other thread is just adding which does not invalidate pointers
// (https://en.cppreference.com/w/cpp/container/deque)
rclcpp::Time pcl_t(pcl->header.stamp);
std::lock_guard flock(pImpl->frames_mtx);
auto it = pImpl->frames.find(pcl->header.frame_id);
if (it != pImpl->frames.end()) {
const auto [T_CAM_LID, R_CAM_LID] = it->second;
// here we are guaranteed to have one image before and one image after the
// pcl
rclcpp::Time img0_t(pImpl->img_queue[0]->header.stamp);
rclcpp::Time img1_t(pImpl->img_queue[1]->header.stamp);
if (!(img0_t <= pcl_t && pcl_t <= img1_t)) {
// something went wrong (maybe pointclouds arrived out of order,
// discard)
continue;
}
// pick segmentation image that is closest to the pointcloud in time
std::size_t idx = std::abs((img0_t - pcl_t).seconds()) <
std::abs((img1_t - pcl_t).seconds()) ?
0 :
1;
// filter the pointcloud with respect to the image
filter(
*pImpl->img_queue[idx], pImpl->calib, pImpl->classes_,
T_CAM_LID, R_CAM_LID, *pcl);
// re-publish filtered pointcloud
pub_pcl_->publish(std::move(pcl));
}
}
}
} // namespace cbr
RCLCPP_COMPONENTS_REGISTER_NODE(cbr::PclSegNode)
|
{-# OPTIONS --without-K --safe #-}
open import Level using (Level)
open import FLA.Algebra.Structures
open import Relation.Binary.PropositionalEquality
open ≡-Reasoning
module FLA.Algebra.Properties.Field {ℓ : Level } {A : Set ℓ} ⦃ F : Field A ⦄ where
open Field F
0ᶠ+0ᶠ≡0ᶠ : 0ᶠ + 0ᶠ ≡ 0ᶠ
0ᶠ+0ᶠ≡0ᶠ = +0ᶠ 0ᶠ
0ᶠ+ : (a : A) → 0ᶠ + a ≡ a
0ᶠ+ a rewrite +-comm 0ᶠ a = +0ᶠ a
a*0ᶠ≡0ᶠ : (a : A) → a * 0ᶠ ≡ 0ᶠ
a*0ᶠ≡0ᶠ a = begin
a * 0ᶠ ≡˘⟨ 0ᶠ+ (a * 0ᶠ) ⟩
0ᶠ + a * 0ᶠ ≡⟨ cong (_+ a * 0ᶠ) (sym (+-inv a)) ⟩
- a + a + a * 0ᶠ ≡⟨ cong (λ x → - a + x + a * 0ᶠ) (sym (*1ᶠ a)) ⟩
- a + a * 1ᶠ + a * 0ᶠ ≡˘⟨ +-assoc (- a) (a * 1ᶠ) (a * 0ᶠ) ⟩
- a + (a * 1ᶠ + a * 0ᶠ) ≡⟨ cong (- a +_) (sym (*-distr-+ a 1ᶠ 0ᶠ)) ⟩
- a + (a * (1ᶠ + 0ᶠ)) ≡⟨ cong (λ x → - a + (a * x)) (+0ᶠ 1ᶠ) ⟩
- a + (a * 1ᶠ) ≡⟨ cong (- a +_) (*1ᶠ a) ⟩
- a + a ≡⟨ +-inv a ⟩
0ᶠ ∎
0ᶠ*a≡0ᶠ : (a : A) → 0ᶠ * a ≡ 0ᶠ
0ᶠ*a≡0ᶠ a = trans (*-comm 0ᶠ a) (a*0ᶠ≡0ᶠ a)
-a≡-1ᶠ*a : (a : A) → - a ≡ - 1ᶠ * a
-a≡-1ᶠ*a a = begin
- a ≡˘⟨ +0ᶠ (- a) ⟩
- a + 0ᶠ ≡⟨ cong (- a +_) (sym (a*0ᶠ≡0ᶠ a)) ⟩
- a + (a * 0ᶠ) ≡⟨ cong (λ x → - a + (a * x)) (sym (+-inv 1ᶠ)) ⟩
- a + (a * (- 1ᶠ + 1ᶠ)) ≡⟨ cong (- a +_) (*-distr-+ a (- 1ᶠ) 1ᶠ) ⟩
- a + (a * - 1ᶠ + a * 1ᶠ) ≡⟨ cong (- a +_) (+-comm (a * - 1ᶠ) (a * 1ᶠ)) ⟩
- a + (a * 1ᶠ + a * - 1ᶠ ) ≡⟨ +-assoc (- a) (a * 1ᶠ) (a * - 1ᶠ) ⟩
- a + a * 1ᶠ + a * - 1ᶠ ≡⟨ cong (λ x → - a + x + a * - 1ᶠ) (*1ᶠ a) ⟩
- a + a + a * - 1ᶠ ≡⟨ cong (_+ a * - 1ᶠ) (+-inv a) ⟩
0ᶠ + a * - 1ᶠ ≡⟨ 0ᶠ+ (a * - 1ᶠ) ⟩
a * - 1ᶠ ≡⟨ *-comm a (- 1ᶠ) ⟩
- 1ᶠ * a ∎
-a*b≡-[a*b] : (a b : A) → - a * b ≡ - (a * b)
-a*b≡-[a*b] a b = begin
- a * b ≡⟨ cong (_* b) (-a≡-1ᶠ*a a) ⟩
(- 1ᶠ * a) * b ≡˘⟨ *-assoc (- 1ᶠ) a b ⟩
- 1ᶠ * (a * b) ≡˘⟨ -a≡-1ᶠ*a ((a * b)) ⟩
- (a * b) ∎
a*-b≡-[a*b] : (a b : A) → a * - b ≡ - (a * b)
a*-b≡-[a*b] a b = begin
a * - b ≡⟨ *-comm a (- b) ⟩
- b * a ≡⟨ -a*b≡-[a*b] b a ⟩
- (b * a) ≡⟨ cong -_ (*-comm b a) ⟩
- (a * b) ∎
|
import os
import pywt
import matplotlib.pyplot as plt
import numpy as np
import librosa
import scipy.io as sio
import scipy.io.wavfile
def checkPath(target) :
if not os.path.exists(target): os.makedirs(target)
#### Check Dataset & Output Directory
ROOT_INPUT_PATH = os.path.join(os.path.abspath(__file__+ "../../"), '.dataset/')
ROOT_OUT_PATH = os.path.join(os.path.abspath(__file__+ "../../"), '.output/')
ROOT_FIGURE_PATH = ROOT_OUT_PATH+".WaveLetDWT/"
fileName = "Loop_0"
fileExt = ".wav"
inputFile = ROOT_INPUT_PATH+fileName+ fileExt
transFile = ROOT_INPUT_PATH+fileName+"_32" + fileExt
checkPath(ROOT_OUT_PATH)
checkPath(ROOT_INPUT_PATH)
checkPath(ROOT_FIGURE_PATH)
if not os.path.exists(transFile):
data, samplerate = librosa.load(inputFile, dtype='float32')
librosa.output.write_wav(transFile, data, samplerate)
#### Load
# Return the sample rate (in samples/sec), data from a WAV file, Wave Format PCM
fs, samples_murmur = sio.wavfile.read(transFile)
print("Wave Info\n Sample Rate={0}, ".format(fs)) # 22.050kHz, 1초당 추출되는 샘플개수
print(" Data Length={0}\n Data={1}".format(len(samples_murmur), samples_murmur))
### Discrete Wavelet Info
# pywt.Wavelet: Describes properties of a discrete wavelet identified by the specified wavelet name, must be a valid wavelet name from the pywt.wavelist() list.
# wavelist: 'haar', 'db', 'sym', 'coif', 'bior', 'rbio', 'dmey', 'gaus', 'mexh', 'morl', 'cgau', 'shan', 'fbsp', 'cmor'
discrete_wavelet = pywt.Wavelet('db2')
print(discrete_wavelet)
max_level = pywt.dwt_max_level(len(samples_murmur), discrete_wavelet)
print('MAXIMUM DECOMPOSE LEVEL = ', max_level)
targetData = samples_murmur.copy() # NO read only
#### Discrete Wavelet Transform
# pywt.wavedec: Multilevel 1D Discrete Wavelet Transform of data.
# Parameters: data, wavelet, mode='symmetric', level=None, axis=-1
# Returns: [cA_n, cD_n, cD_n-1, …, cD2, cD1] : list
originalMatrix = pywt.wavedec(data=targetData, wavelet='db2', level=3)
cA3, cD3, cD2, cD1 = originalMatrix
print("< Discrete Wavelet Transform >\n" + " cD1: {0}\n cD2: {1}\n cD3: {2}\n cA3: {3}\n".format(cD1,cD2,cD3,cA3))
print(cA3.size, cD3.size, cD2.size, cD1.size);
#### Reconstruct
reconstructMatrix = [cA3, cD3, cD2, cD1];
reconstruct_sample = pywt.waverec(reconstructMatrix, 'db2')
print("< Reconstruct >\n" + " Length={0}\n Data={1}".format(len(reconstruct_sample), reconstruct_sample))
sio.wavfile.write(ROOT_FIGURE_PATH+fileName+fileExt, fs, reconstruct_sample)
rec_to_orig = pywt.idwt(None, cD1, 'db2', 'smooth')
rec_to_level1 = pywt.idwt(None, cD2, 'db2', 'smooth')
rec_to_level2_from_detail = pywt.idwt(None, cD3, 'db2', 'smooth')
rec_to_level2_from_approx = pywt.idwt(cA3, None, 'db2', 'smooth')
# print(rec_to_orig,rec_to_level1,rec_to_level2_from_detail,rec_to_level2_from_approx)
#### visualize
# plt.figure(figsize=(4,4))
# (phi, psi, x) = discrete_wavelet.wavefun()
# plt.plot(x, phi)
# plt.savefig(ROOT_FIGURE_PATH+fileName+"_Info_DWT.png")
# plt.show()
plt.figure(figsize=(15,10))
plt.subplot(6,1,1)
plt.title('Sample')
plt.plot(np.linspace(0.0, len(samples_murmur),len(samples_murmur)), samples_murmur)
plt.grid()
plt.subplot(6,1,2)
plt.title('cD1')
plt.plot(np.linspace(0.0, len(rec_to_orig),len(rec_to_orig)), rec_to_orig)
plt.grid()
plt.subplot(6,1,3)
plt.title('cD2')
plt.plot(np.linspace(0.0, len(rec_to_level1),len(rec_to_level1)), rec_to_level1)
plt.grid()
plt.subplot(6,1,4)
plt.title('cD3')
plt.plot(np.linspace(0.0, len(rec_to_level2_from_detail),len(rec_to_level2_from_detail)), rec_to_level2_from_detail)
plt.grid()
plt.subplot(6,1,5)
plt.title('cA3')
plt.plot(np.linspace(0.0, len(rec_to_level2_from_approx),len(rec_to_level2_from_approx)), rec_to_level2_from_approx)
plt.grid()
plt.subplot(6,1,6)
plt.title('reconstruct_sample')
plt.plot(np.linspace(0.0, len(reconstruct_sample),len(reconstruct_sample)), reconstruct_sample)
plt.grid()
plt.tight_layout()
plt.savefig(ROOT_FIGURE_PATH+fileName+"_Figure_DWT.png")
plt.show() |
The convoy reached a position 65 nautical miles ( 120 km ; 75 mi ) off St Nazaire at 21 : 00 and changed course toward the estuary , leaving Atherstone and Tynedale as a sea patrol . The convoy adopted a new formation with the MGB and two torpedo MLs in the lead , followed by Campbeltown . The rest of the MLs formed two columns on either side and astern of the destroyer , with the MTB bringing up the rear . The first casualty of the raid was ML 341 , which had developed engine trouble and was abandoned . At 22 : 00 the submarine Sturgeon directed her navigation beacon out to sea to guide the convoy in . At about the same time Campbeltown raised the German naval ensign in an attempt to deceive any German lookouts into thinking she was a German destroyer .
|
State Before: V : Type u_1
G : SimpleGraph V
inst✝ : DecidableEq V
u v : V
p : Walk G u v
h : IsEulerian p
⊢ IsTrail p State After: V : Type u_1
G : SimpleGraph V
inst✝ : DecidableEq V
u v : V
p : Walk G u v
h : IsEulerian p
⊢ ∀ (a : Sym2 V), List.count a (edges p) ≤ 1 Tactic: rw [isTrail_def, List.nodup_iff_count_le_one] State Before: V : Type u_1
G : SimpleGraph V
inst✝ : DecidableEq V
u v : V
p : Walk G u v
h : IsEulerian p
⊢ ∀ (a : Sym2 V), List.count a (edges p) ≤ 1 State After: V : Type u_1
G : SimpleGraph V
inst✝ : DecidableEq V
u v : V
p : Walk G u v
h : IsEulerian p
e : Sym2 V
⊢ List.count e (edges p) ≤ 1 Tactic: intro e State Before: V : Type u_1
G : SimpleGraph V
inst✝ : DecidableEq V
u v : V
p : Walk G u v
h : IsEulerian p
e : Sym2 V
⊢ List.count e (edges p) ≤ 1 State After: case pos
V : Type u_1
G : SimpleGraph V
inst✝ : DecidableEq V
u v : V
p : Walk G u v
h : IsEulerian p
e : Sym2 V
he : e ∈ edges p
⊢ List.count e (edges p) ≤ 1
case neg
V : Type u_1
G : SimpleGraph V
inst✝ : DecidableEq V
u v : V
p : Walk G u v
h : IsEulerian p
e : Sym2 V
he : ¬e ∈ edges p
⊢ List.count e (edges p) ≤ 1 Tactic: by_cases he : e ∈ p.edges State Before: case pos
V : Type u_1
G : SimpleGraph V
inst✝ : DecidableEq V
u v : V
p : Walk G u v
h : IsEulerian p
e : Sym2 V
he : e ∈ edges p
⊢ List.count e (edges p) ≤ 1 State After: no goals Tactic: exact (h e (edges_subset_edgeSet _ he)).le State Before: case neg
V : Type u_1
G : SimpleGraph V
inst✝ : DecidableEq V
u v : V
p : Walk G u v
h : IsEulerian p
e : Sym2 V
he : ¬e ∈ edges p
⊢ List.count e (edges p) ≤ 1 State After: no goals Tactic: simp [he] |
c Copyright 1993 Colorado State University
c All Rights Reserved
subroutine eachyr
implicit none
include 'comput.inc'
include 'const.inc'
include 'dovars.inc'
include 'isovar.inc'
include 'jday.inc'
include 'ligvar.inc'
include 'param.inc'
include 'parcp.inc'
include 'parfs.inc'
include 'plot1.inc'
include 'plot2.inc'
include 'plot3.inc'
include 'seq.inc'
include 'site.inc'
include 'wth.inc'
include 'zztim.inc'
c ... Perform tasks that only need to be done once a year.
c ... Local variables
integer imo
c ... Correct for the rounding problem with time. The value for time
c ... drifts downward during long runs since dt=1/12 cannot be represented
c ... precisely. At this point in the run, time should be a whole number.
c ... Changed increment value from .001 to .5 to correct error in time calculation
c ... occuring after year 8192. (mse 3/95). New code if from Kendrick Killian.
time = sign(int(abs(time)+.5),int(time))
c ... Reset annual accumulators to zero
call annacc
c ... Wet-dry fixation of N
c ... Determine annual precipitation and annual PET
c ... For RAMS/Daily Century, prcann = average annual precip.
c ... petann (output var) is computed in dailymoist. -mdh 12/96
do 10 imo = 1, MONTHS
prcann = prcann + precip(imo)
agdefacm(imo) = -1.
bgdefacm(imo) = -1.
10 continue
c ... N fixation in atmosphere
wdfxa = epnfa(INTCPT)+epnfa(SLOPE)*MIN(prcann,80.0)
if (wdfxa .lt. 0.) then
wdfxa = 0.0
endif
c ... Non-symbiotic N fixation in the soil
c ... Use annual ET unless it is the first timestep
c ... No longer using the intercept in the calculation.
c ... wdfxs = epnfs(INTCPT)+epnfs(SLOPE)*MIN(prcann,100.0)
if (annet .eq. 0.0) then
wdfxs = epnfs(SLOPE)*MIN(prcann,100.0)
else
wdfxs = epnfs(2) * (annet - epnfs(1))
endif
c ... Reset annual accumulator for evapotranspiration
annet = 0
if (wdfxs .lt. 0.) then
wdfxs = 0.0
endif
wdfx = wdfxa+wdfxs
c ... Atmospheric S deposition
satmt = max(0.0, satmos(1) + satmos(2)*prcann)
cisofr = 0.0
cisotf = 0.0
c ... Initialize co2 effects
call co2eff(time)
if (cursys .ne. FORSYS) then
c ..... Determine what fraction of plant residue added this year will be lignin.
call cmplig(cursys,fligni,wdlig,pltlig)
endif
c ... Determine the number of days in each month. The leap year exception
c ... will be handled in getwth.
do 110 imo = 1, 12
dysimo(imo) = idysimo(imo)
lstdy(imo) = ilstdy(imo)
frstdy(imo) = ifrstdy(imo)
110 continue
return
end
|
module nc4fortran
!! NetCDF4 object-oriented polymorphic interface
use, intrinsic :: iso_c_binding, only : c_ptr, c_loc
use, intrinsic :: iso_fortran_env, only : real32, real64, int32, int64, stderr=>error_unit
use netcdf, only : nf90_create, nf90_open, NF90_WRITE, NF90_CLOBBER, NF90_NETCDF4, NF90_MAX_NAME, &
NF90_NOERR, NF90_EHDFERR, NF90_EBADNAME, NF90_EBADDIM, NF90_EBADTYPE, NF90_EBADGRPID, NF90_ENOTNC, NF90_ENOTVAR, &
NF90_ECHAR, NF90_EEDGE, NF90_ENAMEINUSE, NF90_EBADID, NF90_EINDEFINE, NF90_NOWRITE, NF90_EDIMSIZE, &
nf90_open, nf90_close, nf90_estride, nf90_inq_varid, nf90_inq_dimid, nf90_inquire_dimension, &
nf90_def_dim, nf90_def_var, nf90_get_var, nf90_put_var, &
nf90_inq_libvers, nf90_sync, nf90_inquire_variable
implicit none (type, external)
private
public :: netcdf_file, NF90_MAX_NAME, NF90_NOERR, check_error, is_netcdf, nc_exist, nc4version
!! at this time, we assume up to 7 dimension NetCDF variable.
integer, parameter :: NC_MAXDIM = 7
!> main type
type :: netcdf_file
character(:), allocatable :: filename
integer :: file_id !< location identifier
integer :: comp_lvl = 0 !< compression level (1-9) 0: disable compression
logical :: verbose = .false.
logical :: debug = .false.
logical :: is_open = .false.
!! will be auto-deleted on close
contains
!> methods used directly without type/rank agnosticism
procedure, public :: open => nc_open
procedure, public :: close => nc_close
procedure, public :: create => nc_create
procedure, public :: shape => get_shape
procedure, public :: ndim => get_ndim
procedure, public :: ndims => get_ndim
procedure, public :: write_attribute
procedure, public :: read_attribute
procedure, public :: flush=>nc_flush
procedure, public :: deflate => get_deflate
procedure, public :: exist=>nc_check_exist
procedure, public :: exists=>nc_check_exist
procedure, public :: is_chunked
procedure, public :: is_contig
procedure, public :: chunks=>get_chunk
!> generic procedures mapped over type / rank
generic, public :: write => &
nc_write_scalar, nc_write_1d, nc_write_2d, nc_write_3d, nc_write_4d, nc_write_5d, nc_write_6d, nc_write_7d
generic, public :: read => nc_read_scalar, nc_read_1d, nc_read_2d, nc_read_3d, nc_read_4d, nc_read_5d, nc_read_6d, nc_read_7d
procedure, private :: nc_write_scalar, nc_write_1d, nc_write_2d, nc_write_3d, nc_write_4d, nc_write_5d, nc_write_6d, nc_write_7d, &
nc_read_scalar, nc_read_1d, nc_read_2d, nc_read_3d, nc_read_4d, nc_read_5d, nc_read_6d, nc_read_7d, &
def_dims
!> flush file to disk and close file if user forgets to do so.
final :: destructor
end type netcdf_file
!> Submodules
interface !< write.f90
module subroutine nc_create(self, dset_name, dtype, dims, dim_names, chunk_size, fill_value, varid)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dset_name
integer, intent(in) :: dtype
integer, intent(in) :: dims(:)
character(*), intent(in), optional :: dim_names(:)
integer, intent(in), optional :: chunk_size(:)
class(*), intent(in), optional :: fill_value
integer, intent(out), optional :: varid
end subroutine
module subroutine def_dims(self, dname, dim_names, dims, dimids)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
character(*), intent(in), optional :: dim_names(:)
integer, intent(in) :: dims(:)
integer, intent(out) :: dimids(size(dims))
end subroutine
module subroutine nc_flush(self)
class(netcdf_file), intent(in) :: self
end subroutine
end interface
interface !< writer.f90
module subroutine nc_write_scalar(self, dname, value)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
class(*), intent(in) :: value
end subroutine
module subroutine nc_write_1d(self, dname, value, dims, istart, iend, stride, chunk_size)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
class(*), intent(in) :: value(:)
character(*), intent(in), optional :: dims(1)
integer, intent(in), dimension(1), optional :: istart, iend, stride
integer, intent(in), dimension(1), optional :: chunk_size
end subroutine
module subroutine nc_write_2d(self, dname, value, dims, istart, iend, stride, chunk_size)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
class(*), intent(in) :: value(:,:)
character(*), intent(in), optional :: dims(2)
integer, intent(in), dimension(2), optional :: istart, iend, stride
integer, intent(in), dimension(2), optional :: chunk_size
end subroutine
module subroutine nc_write_3d(self, dname, value, dims, istart, iend, stride, chunk_size)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
class(*), intent(in) :: value(:,:,:)
character(*), intent(in), optional :: dims(3)
integer, intent(in), dimension(3), optional :: istart, iend, stride
integer, intent(in), dimension(3), optional :: chunk_size
end subroutine
module subroutine nc_write_4d(self, dname, value, dims, istart, iend, stride, chunk_size)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
class(*), intent(in) :: value(:,:,:,:)
character(*), intent(in), optional :: dims(4)
integer, intent(in), dimension(4), optional :: istart, iend, stride
integer, intent(in), dimension(4), optional :: chunk_size
end subroutine
module subroutine nc_write_5d(self, dname, value, dims, istart, iend, stride, chunk_size)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
class(*), intent(in) :: value(:,:,:,:,:)
character(*), intent(in), optional :: dims(5)
integer, intent(in), dimension(5), optional :: istart, iend, stride
integer, intent(in), dimension(5), optional :: chunk_size
end subroutine
module subroutine nc_write_6d(self, dname, value, dims, istart, iend, stride, chunk_size)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
class(*), intent(in) :: value(:,:,:,:,:,:)
character(*), intent(in), optional :: dims(6)
integer, intent(in), dimension(6), optional :: istart, iend, stride
integer, intent(in), dimension(6), optional :: chunk_size
end subroutine
module subroutine nc_write_7d(self, dname, value, dims, istart, iend, stride, chunk_size)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
class(*), intent(in) :: value(:,:,:,:,:,:,:)
character(*), intent(in), optional :: dims(7)
integer, intent(in), dimension(7), optional :: istart, iend, stride
integer, intent(in), dimension(7), optional :: chunk_size
end subroutine
end interface
interface !< read.f90
module subroutine get_chunk(self, dname, chunk_size)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
integer, intent(out) :: chunk_size(:)
end subroutine
module integer function get_ndim(self, dname) result (drank)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
end function get_ndim
module subroutine get_shape(self, dname, dims, dimnames)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
integer, intent(out), allocatable :: dims(:)
character(NF90_MAX_NAME), intent(out), allocatable, optional :: dimnames(:)
end subroutine
module logical function get_deflate(self, dname)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
end function
module logical function nc_check_exist(self, dname) result(exists)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
end function nc_check_exist
module logical function nc_exist(filename, dname)
character(*), intent(in) :: filename, dname
end function nc_exist
end interface
interface !< reader.f90
module subroutine nc_read_scalar(self, dname, value)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
class(*), intent(inout) :: value
!! inout for character
end subroutine
module subroutine nc_read_1d(self, dname, value, istart, iend, stride)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
class(*), intent(inout) :: value(:)
integer, intent(in), dimension(1), optional :: istart, iend, stride
end subroutine
module subroutine nc_read_2d(self, dname, value, istart, iend, stride)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
class(*), intent(inout) :: value(:,:)
integer, intent(in), dimension(2), optional :: istart, iend, stride
end subroutine
module subroutine nc_read_3d(self, dname, value, istart, iend, stride)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
class(*), intent(inout) :: value(:,:,:)
integer, intent(in), dimension(3), optional :: istart, iend, stride
end subroutine
module subroutine nc_read_4d(self, dname, value, istart, iend, stride)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
class(*), intent(inout) :: value(:,:,:,:)
integer, intent(in), dimension(4), optional :: istart, iend, stride
end subroutine
module subroutine nc_read_5d(self, dname, value, istart, iend, stride)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
class(*), intent(inout) :: value(:,:,:,:,:)
integer, intent(in), dimension(5), optional :: istart, iend, stride
end subroutine
module subroutine nc_read_6d(self, dname, value, istart, iend, stride)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
class(*), intent(inout) :: value(:,:,:,:,:,:)
integer, intent(in), dimension(6), optional :: istart, iend, stride
end subroutine
module subroutine nc_read_7d(self, dname, value, istart, iend, stride)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
class(*), intent(inout) :: value(:,:,:,:,:,:,:)
integer, intent(in), dimension(7), optional :: istart, iend, stride
end subroutine
end interface
interface !< attributes.f90
module subroutine write_attribute(self, dname, attrname, value)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname, attrname
class(*), intent(in) :: value
end subroutine
module subroutine read_attribute(self, dname, attrname, value)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname, attrname
class(*), intent(inout) :: value
!! inout for character
end subroutine
end interface
interface !< utils.f90
module subroutine nc_open(self, filename, action, comp_lvl, verbose, debug)
!! Opens NetCDF file
class(netcdf_file), intent(inout) :: self
character(*), intent(in) :: filename
character(*), intent(in), optional :: action
integer, intent(in), optional :: comp_lvl
logical, intent(in), optional :: verbose, debug
end subroutine
module subroutine destructor(self)
!! Close file and handle if user forgets to do so
type(netcdf_file), intent(inout) :: self
end subroutine
module subroutine nc_close(self)
class(netcdf_file), intent(inout) :: self
end subroutine
module function nc4version()
!! get NetCDF4 library version
character(:), allocatable :: nc4version
end function
module logical function is_chunked(self, dname)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
end function
module logical function is_contig(self, dname)
class(netcdf_file), intent(in) :: self
character(*), intent(in) :: dname
end function
module logical function is_netcdf(filename)
!! is this file NetCDF4?
character(*), intent(in) :: filename
end function
module logical function check_error(code, dname)
integer, intent(in) :: code
character(*), intent(in) :: dname
end function
end interface
end module nc4fortran
|
\title{Floating Point (Q) format and Floating Point Rounding in myHDL}
\author{Steven K Armour}
\maketitle
# Referances
https://timetoexplore.net/blog/fixed-point-numbers-in-verilog
```python
from myhdl import *
from myhdlpeek import Peeker
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from sympy import *
init_printing()
from bitstring import BitArray
#https://github.com/jrjohansson/version_information
%load_ext version_information
%version_information myhdl, myhdlpeek, numpy, pandas, matplotlib, sympy, bitstring
```
<table><tr><th>Software</th><th>Version</th></tr><tr><td>Python</td><td>3.6.5 64bit [GCC 7.2.0]</td></tr><tr><td>IPython</td><td>6.4.0</td></tr><tr><td>OS</td><td>Linux 4.15.0 30 generic x86_64 with debian buster sid</td></tr><tr><td>myhdl</td><td>0.10</td></tr><tr><td>myhdlpeek</td><td>0.0.7</td></tr><tr><td>numpy</td><td>1.14.3</td></tr><tr><td>pandas</td><td>0.23.0</td></tr><tr><td>matplotlib</td><td>2.2.2</td></tr><tr><td>sympy</td><td>1.1.1</td></tr><tr><td>bitstring</td><td>3.1.5</td></tr><tr><td colspan='2'>Sat Aug 25 17:54:05 2018 MDT</td></tr></table>
```python
#helper functions to read in the .v and .vhd generated files into python
def VerilogTextReader(loc, printresult=True):
with open(f'{loc}.v', 'r') as vText:
VerilogText=vText.read()
if printresult:
print(f'***Verilog modual from {loc}.v***\n\n', VerilogText)
return VerilogText
def VHDLTextReader(loc, printresult=True):
with open(f'{loc}.vhd', 'r') as vText:
VerilogText=vText.read()
if printresult:
print(f'***VHDL modual from {loc}.vhd***\n\n', VerilogText)
return VerilogText
```
```python
#4 bit int, 4 bit float
Q=(4,4)
Qlen=Q[0]+Q[1]
Qscale=2**(Q[1]); Qscale
```
# Postive Addition
```python
a=3.6250; b=4.0625
c=a+b; c
```
```python
aQ=int(a*Qscale); bQ=int(b*Qscale)
f'aQ:{aQ}; bA:{bQ}'
```
'aQ:58; bA:65'
```python
aQBV=intbv(aQ)[Qlen:]; bQBV=intbv(bQ)[Qlen:]
f'aQBV: {bin(aQBV, Qlen)}; bQBV: {bin(bQBV, Qlen)}'
```
'aQBV: 00111010; bQBV: 01000001'
```python
cQ=aQBV+bQBV; cQ
```
```python
c==cQ/Qscale
```
True
```python
class AddPosTVGen():
"""
Class to generate postive random numbers to be Qed for testing
"""
def __init__(self, Q, N):
"""
Take in arguments and create output holds
Args:
Q(tuple): Q notation tuple where Q[0] is int bit len and Q[1] is
dec bit len
N(int): number of values to generate
"""
self.Q=Q; self.N=N
self.Qlen=self.Q[0]+self.Q[1]; self.Qmax=2**self.Qlen
self.Qscale=2**self.Q[1]
self.aTV=np.zeros(0); self.aTVQ=np.zeros(0)
self.bTV=np.zeros(0); self.bTVQ=np.zeros(0)
self.cK=np.zeros(0); self.cKQ=np.zeros(0)
def Genrator(self):
"""
Random Number genrator in floating point and supsequent Qed version
"""
self.V1=np.array((1/np.random.ranf())).round(decimals=self.Q[1])
#needed to force np.random to generate a differint random num
np.random.seed(np.random.randint(self.Qmax))
self.V2=np.array((1/np.random.ranf())).round(decimals=self.Q[1])
self.V1Q=(self.V1*self.Qscale).astype(int)
self.V2Q=(self.V2*self.Qscale).astype(int)
def GenratorCheckAndAdd(self):
"""
Cheacks if the sum of the two randome numbers generated are going to break the Qmax
if they do dont append to retrun holds
"""
self.V1pV2=(self.V1+self.V2).round(decimals=self.Q[1])
self.V1pV2Q=(self.V1pV2*self.Qscale).astype(int)
if (self.V1Q+self.V2Q)<self.Qmax:
self.aTV=np.append(self.aTV, self.V1); self.aTVQ=np.append(self.aTVQ, self.V1Q).astype(int)
self.bTV=np.append(self.bTV, self.V1); self.bTVQ=np.append(self.bTVQ, self.V1Q).astype(int)
self.cK=np.append(self.cK, self.V1pV2); self.cKQ=np.append(self.cKQ, self.V1pV2Q).astype(int)
def MakeTVs(self):
"""
Automates the generating, testing and appending to make the TVs
Returns:
self.aTV(np.array): floating point numbers for a
self.aTVQ(np.array): fixed point Qed from self.aTV
self.bTV(np.array): floating point numbers for b
self.bTVQ(np.array): fixed point Qed from self.bTV
self.cK(np.array): known floating point rounded sum of self.aTV, self.bTV
self.cKQ(np.array): known fixed point Qed from self.cK
"""
while len(self.aTV)<=self.N:
self.Genrator()
self.GenratorCheckAndAdd()
#print('Done')
```
```python
@block
def AdderBehaverial(a, b, c):
@always_comb
def logic():
c.next=a+b
return instances()
```
```python
Peeker.clear()
a=Signal(intbv(0)[Qlen:]); Peeker(a, 'a')
b=Signal(intbv(0)[Qlen:]); Peeker(b, 'b')
c=Signal(intbv(0)[Qlen:]); Peeker(c, 'c')
TVG=AddPosTVGen(Q, 100); TVG.MakeTVs()
aTV=TVG.aTV; aTVQ=TVG.aTVQ
bTV=TVG.bTV; bTVQ=TVG.bTVQ
cKTV=TVG.cK; cKTVQ=TVG.cKQ
DUT=AdderBehaverial(a, b, c)
def Adder_TB():
@instance
def simules():
for i in range(len(aTVQ)):
a.next=int(aTVQ[i])
b.next=int(bTVQ[i])
yield delay(1)
raise StopSimulation()
return instances()
sim=Simulation(DUT, Adder_TB(), *Peeker.instances()).run()
```
```python
Peeker.to_wavedrom()
```
<div></div>
```python
PAOData=Peeker.to_dataframe()
#load in the source floating values
PAOData['aTV']=aTV; PAOData['bTV']=bTV
#get the predicted floating Point Sum
PAOData['aTV+bTV']=aTV+bTV
#get the predicted fixed point sum
PAOData['aQ+bQ']=aTVQ+bTVQ
#reorder
PAOData=PAOData[['a', 'aTV', 'b', 'bTV', 'aTV+bTV', 'aQ+bQ', 'c']]
#load the sourced Qed sum
PAOData['cKTVQ']=cKTVQ
#de Q the testbench gen sum
PAOData['cdQ']=PAOData['c']/Qscale
#load the sourced floting sum
PAOData['cKTV']=cKTV
PAOData
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>a</th>
<th>aTV</th>
<th>b</th>
<th>bTV</th>
<th>aTV+bTV</th>
<th>aQ+bQ</th>
<th>c</th>
<th>cKTVQ</th>
<th>cdQ</th>
<th>cKTV</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>33</td>
<td>2.0671</td>
<td>33</td>
<td>2.0671</td>
<td>4.1342</td>
<td>66</td>
<td>66</td>
<td>76</td>
<td>4.125</td>
<td>4.7942</td>
</tr>
<tr>
<th>1</th>
<td>25</td>
<td>1.5798</td>
<td>25</td>
<td>1.5798</td>
<td>3.1596</td>
<td>50</td>
<td>50</td>
<td>50</td>
<td>3.125</td>
<td>3.1322</td>
</tr>
<tr>
<th>2</th>
<td>42</td>
<td>2.6264</td>
<td>42</td>
<td>2.6264</td>
<td>5.2528</td>
<td>84</td>
<td>84</td>
<td>97</td>
<td>5.250</td>
<td>6.0666</td>
</tr>
<tr>
<th>3</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>4</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>5</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>6</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>7</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>8</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>9</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>10</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>11</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>12</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>13</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>14</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>15</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>16</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>17</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>18</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>19</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>20</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>21</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>22</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>23</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>24</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>25</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>26</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>27</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>28</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>29</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>71</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>72</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>73</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>74</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>75</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>76</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>77</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>78</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>79</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>80</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>81</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>82</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>83</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>84</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>85</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>86</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>87</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>88</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>89</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>90</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>91</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>92</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>93</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>94</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>95</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>96</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>97</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>98</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
<tr>
<th>99</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.2822</td>
<td>2.5644</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>100</th>
<td>18</td>
<td>1.1643</td>
<td>18</td>
<td>1.1643</td>
<td>2.3286</td>
<td>36</td>
<td>36</td>
<td>228</td>
<td>2.250</td>
<td>14.2690</td>
</tr>
</tbody>
</table>
<p>101 rows × 10 columns</p>
</div>
```python
#dataframe of error measures
PAODataErr=pd.DataFrame()
PAODataErr['aQ+bQ_c']=np.abs(PAOData['aQ+bQ']-PAOData['c'])
PAODataErr['c_cKTVQ']=np.abs(PAOData['c']-PAOData['cKTVQ'])
PAODataErr['cdQ_cKTV']=np.abs(PAOData['cdQ']-PAOData['cKTV'])
PAODataErr['c_cKTVQ__cdQ_cKTV']=np.abs((PAODataErr['c_cKTVQ']/ Qscale)- PAODataErr['cdQ_cKTV'])
PAODataErr
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>aQ+bQ_c</th>
<th>c_cKTVQ</th>
<th>cdQ_cKTV</th>
<th>c_cKTVQ__cdQ_cKTV</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>0</td>
<td>10</td>
<td>0.6692</td>
<td>0.0442</td>
</tr>
<tr>
<th>1</th>
<td>0</td>
<td>0</td>
<td>0.0072</td>
<td>0.0072</td>
</tr>
<tr>
<th>2</th>
<td>0</td>
<td>13</td>
<td>0.8166</td>
<td>0.0041</td>
</tr>
<tr>
<th>3</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>4</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>5</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>6</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>7</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>8</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>9</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>10</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>11</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>12</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>13</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>14</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>15</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>16</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>17</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>18</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>19</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>20</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>21</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>22</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>23</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>24</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>25</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>26</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>27</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>28</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>29</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>71</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>72</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>73</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>74</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>75</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>76</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>77</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>78</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>79</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>80</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>81</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>82</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>83</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>84</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>85</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>86</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>87</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>88</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>89</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>90</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>91</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>92</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>93</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>94</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>95</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>96</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>97</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>98</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
<tr>
<th>99</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>100</th>
<td>0</td>
<td>192</td>
<td>12.0190</td>
<td>0.0190</td>
</tr>
</tbody>
</table>
<p>101 rows × 4 columns</p>
</div>
```python
PAODataErr.describe()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>aQ+bQ_c</th>
<th>c_cKTVQ</th>
<th>cdQ_cKTV</th>
<th>c_cKTVQ__cdQ_cKTV</th>
</tr>
</thead>
<tbody>
<tr>
<th>count</th>
<td>101.0</td>
<td>101.000000</td>
<td>101.000000</td>
<td>101.000000</td>
</tr>
<tr>
<th>mean</th>
<td>0.0</td>
<td>97.742574</td>
<td>6.143760</td>
<td>0.034850</td>
</tr>
<tr>
<th>std</th>
<td>0.0</td>
<td>91.959736</td>
<td>5.732047</td>
<td>0.016739</td>
</tr>
<tr>
<th>min</th>
<td>0.0</td>
<td>0.000000</td>
<td>0.007200</td>
<td>0.004100</td>
</tr>
<tr>
<th>25%</th>
<td>0.0</td>
<td>9.000000</td>
<td>0.614200</td>
<td>0.019000</td>
</tr>
<tr>
<th>50%</th>
<td>0.0</td>
<td>10.000000</td>
<td>0.669200</td>
<td>0.019000</td>
</tr>
<tr>
<th>75%</th>
<td>0.0</td>
<td>192.000000</td>
<td>12.019000</td>
<td>0.051700</td>
</tr>
<tr>
<th>max</th>
<td>0.0</td>
<td>192.000000</td>
<td>12.019000</td>
<td>0.051700</td>
</tr>
</tbody>
</table>
</div>
```python
DUT.convert()
VerilogTextReader('AdderBehaverial');
```
***Verilog modual from AdderBehaverial.v***
// File: AdderBehaverial.v
// Generated by MyHDL 0.10
// Date: Sat Aug 25 17:54:15 2018
`timescale 1ns/10ps
module AdderBehaverial (
a,
b,
c
);
input [7:0] a;
input [7:0] b;
output [7:0] c;
wire [7:0] c;
assign c = (a + b);
endmodule
# Negative Values
```python
a=3.6250; aQ=int(a*Qscale);a, aQ
```
```python
b=-1.5; bMagQ=int(abs(b)*Qscale); bMagQ
```
```python
bMagQBV=bin(bMagQ, Qlen); bMagQBV
```
'00011000'
```python
bQBVComp="".join([str(int(not(int(i)))) for i in bMagQBV]); bQBVComp
```
'11100111'
```python
bQComp=int(bQBVComp, 2); bQComp
```
```python
bQ2Comp=bQComp+1; bQ2Comp
```
```python
bQBV2Comp=bin(bQ2Comp, 2); bQBV2Comp
```
'11101000'
```python
(BitArray(bin=bQBV2Comp).int)/ Qscale
```
```python
aQBV=intbv(aQ)[Qlen:].signed()
aQBV, bin(aQBV, Qlen), aQBV.min, aQBV.max
```
(intbv(58), '00111010', -128, 128)
```python
bQBV=intbv(int(b*Qscale))[Qlen:].signed()
bQBV, bin(bQBV, Qlen)
```
(intbv(-24), '11101000')
```python
bQBV2Comp==bin(bQBV, Qlen)
```
True
```python
a+b
```
```python
c=aQBV+bQBV; c, c/Qscale
```
```python
class AddPosNegTVGen():
"""
Class to generate postive random numbers to be Qed for testing
"""
def __init__(self, Q, N):
"""
Take in arguments and create output holds
Args:
Q(tuple): Q notation tuple where Q[0] is int bit len and Q[1] is
dec bit len
N(int): number of values to generate
"""
self.Q=Q; self.N=N
self.Qlen=self.Q[0]+self.Q[1]
self.Qmin=-(2**(Qlen-1)); self.Qmax=2**(self.Qlen-1) -1
self.Qscale=2**self.Q[1]
self.aTV=np.zeros(0); self.aTVQ=np.zeros(0)
self.bTV=np.zeros(0); self.bTVQ=np.zeros(0)
self.cK=np.zeros(0); self.cKQ=np.zeros(0)
def Genrator(self):
"""
Random Number genrator in floating point and supsequent Qed version
"""
self.V1=np.array((1/np.random.ranf())).round(decimals=self.Q[1])
#needed to force np.random to generate a differint random num
np.random.seed(np.random.randint(self.Qmax))
self.V2=np.array((1/np.random.ranf())).round(decimals=self.Q[1])
#needed to force np.random to generate a differint random num
np.random.seed(np.random.randint(self.Qmax))
self.Sign=np.random.randint(2)
if self.Sign==1:
self.V2=-self.V2
self.V1Q=(self.V1*self.Qscale).astype(int)
self.V2Q=(self.V2*self.Qscale).astype(int)
def GenratorCheckAndAdd(self):
"""
Cheacks if the sum of the two randome numbers generated are going to break the Qmax
if they do dont append to retrun holds
"""
self.V1pV2=(self.V1+self.V2).round(decimals=self.Q[1])
self.V1pV2Q=(self.V1pV2*self.Qscale).astype(int)
check=self.V1Q+self.V2Q
if self.V1Q>self.Qmin and self.V1Q<self.Qmax:
if self.V2Q>self.Qmin and self.V2Q<self.Qmax:
if check>self.Qmin and check<self.Qmax:
self.aTV=np.append(self.aTV, self.V1); self.aTVQ=np.append(self.aTVQ, self.V1Q).astype(int)
self.bTV=np.append(self.bTV, self.V2); self.bTVQ=np.append(self.bTVQ, self.V1Q).astype(int)
self.cK=np.append(self.cK, self.V1pV2); self.cKQ=np.append(self.cKQ, self.V1pV2Q).astype(int)
def MakeTVs(self):
"""
Automates the generating, testing and appending to make the TVs
Returns:
self.aTV(np.array): floating point numbers for a
self.aTVQ(np.array): fixed point Qed from self.aTV
self.bTV(np.array): floating point numbers for b
self.bTVQ(np.array): fixed point Qed from self.bTV
self.cK(np.array): known floating point rounded sum of self.aTV, self.bTV
self.cKQ(np.array): known fixed point Qed from self.cK
"""
while len(self.aTV)<=self.N:
self.Genrator()
self.GenratorCheckAndAdd()
#print('Done')
```
```python
Peeker.clear()
a=Signal(intbv(0)[Qlen:].signed()); Peeker(a, 'a')
b=Signal(intbv(0)[Qlen:].signed()); Peeker(b, 'b')
c=Signal(intbv(0)[Qlen:].signed()); Peeker(c, 'c')
TVG=AddPosNegTVGen(Q, 100); TVG.MakeTVs()
aTV=TVG.aTV; aTVQ=TVG.aTVQ
bTV=TVG.bTV; bTVQ=TVG.bTVQ
cKTV=TVG.cK; cKTVQ=TVG.cKQ
DUT=AdderBehaverial(a, b, c)
def Adder_TB():
@instance
def simules():
for i in range(len(aTVQ)):
a.next=int(aTVQ[i])
b.next=int(bTVQ[i])
yield delay(1)
raise StopSimulation()
return instances()
sim=Simulation(DUT, Adder_TB(), *Peeker.instances()).run()
```
```python
Peeker.to_wavedrom()
```
<div></div>
```python
AOData=Peeker.to_dataframe()
#load in the source floating values
AOData['aTV']=aTV; AOData['bTV']=bTV
#get the predicted floating Point Sum
AOData['aTV+bTV']=aTV+bTV
#get the predicted fixed point sum
AOData['aQ+bQ']=aTVQ+bTVQ
#reorder
AOData=AOData[['a', 'aTV', 'b', 'bTV', 'aTV+bTV', 'aQ+bQ', 'c']]
#load the sourced Qed sum
AOData['cKTVQ']=cKTVQ
#de Q the testbench gen sum
AOData['cdQ']=AOData['c']/Qscale
#load the sourced floting sum
AOData['cKTV']=cKTV
AOData
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>a</th>
<th>aTV</th>
<th>b</th>
<th>bTV</th>
<th>aTV+bTV</th>
<th>aQ+bQ</th>
<th>c</th>
<th>cKTVQ</th>
<th>cdQ</th>
<th>cKTV</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>20</td>
<td>1.2822</td>
<td>20</td>
<td>1.8320</td>
<td>3.1142</td>
<td>40</td>
<td>40</td>
<td>49</td>
<td>2.500</td>
<td>3.1142</td>
</tr>
<tr>
<th>1</th>
<td>38</td>
<td>2.3776</td>
<td>38</td>
<td>-1.5560</td>
<td>0.8216</td>
<td>76</td>
<td>76</td>
<td>13</td>
<td>4.750</td>
<td>0.8216</td>
</tr>
<tr>
<th>2</th>
<td>62</td>
<td>3.8753</td>
<td>62</td>
<td>2.6655</td>
<td>6.5408</td>
<td>124</td>
<td>124</td>
<td>104</td>
<td>7.750</td>
<td>6.5408</td>
</tr>
<tr>
<th>3</th>
<td>22</td>
<td>1.4366</td>
<td>22</td>
<td>-1.1781</td>
<td>0.2585</td>
<td>44</td>
<td>44</td>
<td>4</td>
<td>2.750</td>
<td>0.2585</td>
</tr>
<tr>
<th>4</th>
<td>35</td>
<td>2.2410</td>
<td>35</td>
<td>1.9735</td>
<td>4.2145</td>
<td>70</td>
<td>70</td>
<td>67</td>
<td>4.375</td>
<td>4.2145</td>
</tr>
<tr>
<th>5</th>
<td>49</td>
<td>3.0806</td>
<td>49</td>
<td>-1.8052</td>
<td>1.2754</td>
<td>98</td>
<td>98</td>
<td>20</td>
<td>6.125</td>
<td>1.2754</td>
</tr>
<tr>
<th>6</th>
<td>23</td>
<td>1.4397</td>
<td>23</td>
<td>-1.1493</td>
<td>0.2904</td>
<td>46</td>
<td>46</td>
<td>4</td>
<td>2.875</td>
<td>0.2904</td>
</tr>
<tr>
<th>7</th>
<td>19</td>
<td>1.2206</td>
<td>19</td>
<td>-3.4958</td>
<td>-2.2752</td>
<td>38</td>
<td>38</td>
<td>-36</td>
<td>2.375</td>
<td>-2.2752</td>
</tr>
<tr>
<th>8</th>
<td>48</td>
<td>3.0613</td>
<td>48</td>
<td>3.3755</td>
<td>6.4368</td>
<td>96</td>
<td>96</td>
<td>102</td>
<td>6.000</td>
<td>6.4368</td>
</tr>
<tr>
<th>9</th>
<td>52</td>
<td>3.2753</td>
<td>52</td>
<td>1.2965</td>
<td>4.5718</td>
<td>104</td>
<td>104</td>
<td>73</td>
<td>6.500</td>
<td>4.5718</td>
</tr>
<tr>
<th>10</th>
<td>23</td>
<td>1.4900</td>
<td>23</td>
<td>-2.3142</td>
<td>-0.8242</td>
<td>46</td>
<td>46</td>
<td>-13</td>
<td>2.875</td>
<td>-0.8242</td>
</tr>
<tr>
<th>11</th>
<td>37</td>
<td>2.3231</td>
<td>37</td>
<td>-1.3656</td>
<td>0.9575</td>
<td>74</td>
<td>74</td>
<td>15</td>
<td>4.625</td>
<td>0.9575</td>
</tr>
<tr>
<th>12</th>
<td>19</td>
<td>1.2145</td>
<td>19</td>
<td>1.2858</td>
<td>2.5003</td>
<td>38</td>
<td>38</td>
<td>40</td>
<td>2.375</td>
<td>2.5003</td>
</tr>
<tr>
<th>13</th>
<td>38</td>
<td>2.3776</td>
<td>38</td>
<td>-1.5560</td>
<td>0.8216</td>
<td>76</td>
<td>76</td>
<td>13</td>
<td>4.750</td>
<td>0.8216</td>
</tr>
<tr>
<th>14</th>
<td>62</td>
<td>3.8753</td>
<td>62</td>
<td>2.6655</td>
<td>6.5408</td>
<td>124</td>
<td>124</td>
<td>104</td>
<td>7.750</td>
<td>6.5408</td>
</tr>
<tr>
<th>15</th>
<td>22</td>
<td>1.4366</td>
<td>22</td>
<td>-1.1781</td>
<td>0.2585</td>
<td>44</td>
<td>44</td>
<td>4</td>
<td>2.750</td>
<td>0.2585</td>
</tr>
<tr>
<th>16</th>
<td>35</td>
<td>2.2410</td>
<td>35</td>
<td>1.9735</td>
<td>4.2145</td>
<td>70</td>
<td>70</td>
<td>67</td>
<td>4.375</td>
<td>4.2145</td>
</tr>
<tr>
<th>17</th>
<td>49</td>
<td>3.0806</td>
<td>49</td>
<td>-1.8052</td>
<td>1.2754</td>
<td>98</td>
<td>98</td>
<td>20</td>
<td>6.125</td>
<td>1.2754</td>
</tr>
<tr>
<th>18</th>
<td>23</td>
<td>1.4397</td>
<td>23</td>
<td>-1.1493</td>
<td>0.2904</td>
<td>46</td>
<td>46</td>
<td>4</td>
<td>2.875</td>
<td>0.2904</td>
</tr>
<tr>
<th>19</th>
<td>19</td>
<td>1.2206</td>
<td>19</td>
<td>-3.4958</td>
<td>-2.2752</td>
<td>38</td>
<td>38</td>
<td>-36</td>
<td>2.375</td>
<td>-2.2752</td>
</tr>
<tr>
<th>20</th>
<td>48</td>
<td>3.0613</td>
<td>48</td>
<td>3.3755</td>
<td>6.4368</td>
<td>96</td>
<td>96</td>
<td>102</td>
<td>6.000</td>
<td>6.4368</td>
</tr>
<tr>
<th>21</th>
<td>52</td>
<td>3.2753</td>
<td>52</td>
<td>1.2965</td>
<td>4.5718</td>
<td>104</td>
<td>104</td>
<td>73</td>
<td>6.500</td>
<td>4.5718</td>
</tr>
<tr>
<th>22</th>
<td>23</td>
<td>1.4900</td>
<td>23</td>
<td>-2.3142</td>
<td>-0.8242</td>
<td>46</td>
<td>46</td>
<td>-13</td>
<td>2.875</td>
<td>-0.8242</td>
</tr>
<tr>
<th>23</th>
<td>37</td>
<td>2.3231</td>
<td>37</td>
<td>-1.3656</td>
<td>0.9575</td>
<td>74</td>
<td>74</td>
<td>15</td>
<td>4.625</td>
<td>0.9575</td>
</tr>
<tr>
<th>24</th>
<td>19</td>
<td>1.2145</td>
<td>19</td>
<td>1.2858</td>
<td>2.5003</td>
<td>38</td>
<td>38</td>
<td>40</td>
<td>2.375</td>
<td>2.5003</td>
</tr>
<tr>
<th>25</th>
<td>38</td>
<td>2.3776</td>
<td>38</td>
<td>-1.5560</td>
<td>0.8216</td>
<td>76</td>
<td>76</td>
<td>13</td>
<td>4.750</td>
<td>0.8216</td>
</tr>
<tr>
<th>26</th>
<td>62</td>
<td>3.8753</td>
<td>62</td>
<td>2.6655</td>
<td>6.5408</td>
<td>124</td>
<td>124</td>
<td>104</td>
<td>7.750</td>
<td>6.5408</td>
</tr>
<tr>
<th>27</th>
<td>22</td>
<td>1.4366</td>
<td>22</td>
<td>-1.1781</td>
<td>0.2585</td>
<td>44</td>
<td>44</td>
<td>4</td>
<td>2.750</td>
<td>0.2585</td>
</tr>
<tr>
<th>28</th>
<td>35</td>
<td>2.2410</td>
<td>35</td>
<td>1.9735</td>
<td>4.2145</td>
<td>70</td>
<td>70</td>
<td>67</td>
<td>4.375</td>
<td>4.2145</td>
</tr>
<tr>
<th>29</th>
<td>49</td>
<td>3.0806</td>
<td>49</td>
<td>-1.8052</td>
<td>1.2754</td>
<td>98</td>
<td>98</td>
<td>20</td>
<td>6.125</td>
<td>1.2754</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>71</th>
<td>37</td>
<td>2.3231</td>
<td>37</td>
<td>-1.3656</td>
<td>0.9575</td>
<td>74</td>
<td>74</td>
<td>15</td>
<td>4.625</td>
<td>0.9575</td>
</tr>
<tr>
<th>72</th>
<td>19</td>
<td>1.2145</td>
<td>19</td>
<td>1.2858</td>
<td>2.5003</td>
<td>38</td>
<td>38</td>
<td>40</td>
<td>2.375</td>
<td>2.5003</td>
</tr>
<tr>
<th>73</th>
<td>38</td>
<td>2.3776</td>
<td>38</td>
<td>-1.5560</td>
<td>0.8216</td>
<td>76</td>
<td>76</td>
<td>13</td>
<td>4.750</td>
<td>0.8216</td>
</tr>
<tr>
<th>74</th>
<td>62</td>
<td>3.8753</td>
<td>62</td>
<td>2.6655</td>
<td>6.5408</td>
<td>124</td>
<td>124</td>
<td>104</td>
<td>7.750</td>
<td>6.5408</td>
</tr>
<tr>
<th>75</th>
<td>22</td>
<td>1.4366</td>
<td>22</td>
<td>-1.1781</td>
<td>0.2585</td>
<td>44</td>
<td>44</td>
<td>4</td>
<td>2.750</td>
<td>0.2585</td>
</tr>
<tr>
<th>76</th>
<td>35</td>
<td>2.2410</td>
<td>35</td>
<td>1.9735</td>
<td>4.2145</td>
<td>70</td>
<td>70</td>
<td>67</td>
<td>4.375</td>
<td>4.2145</td>
</tr>
<tr>
<th>77</th>
<td>49</td>
<td>3.0806</td>
<td>49</td>
<td>-1.8052</td>
<td>1.2754</td>
<td>98</td>
<td>98</td>
<td>20</td>
<td>6.125</td>
<td>1.2754</td>
</tr>
<tr>
<th>78</th>
<td>23</td>
<td>1.4397</td>
<td>23</td>
<td>-1.1493</td>
<td>0.2904</td>
<td>46</td>
<td>46</td>
<td>4</td>
<td>2.875</td>
<td>0.2904</td>
</tr>
<tr>
<th>79</th>
<td>19</td>
<td>1.2206</td>
<td>19</td>
<td>-3.4958</td>
<td>-2.2752</td>
<td>38</td>
<td>38</td>
<td>-36</td>
<td>2.375</td>
<td>-2.2752</td>
</tr>
<tr>
<th>80</th>
<td>48</td>
<td>3.0613</td>
<td>48</td>
<td>3.3755</td>
<td>6.4368</td>
<td>96</td>
<td>96</td>
<td>102</td>
<td>6.000</td>
<td>6.4368</td>
</tr>
<tr>
<th>81</th>
<td>52</td>
<td>3.2753</td>
<td>52</td>
<td>1.2965</td>
<td>4.5718</td>
<td>104</td>
<td>104</td>
<td>73</td>
<td>6.500</td>
<td>4.5718</td>
</tr>
<tr>
<th>82</th>
<td>23</td>
<td>1.4900</td>
<td>23</td>
<td>-2.3142</td>
<td>-0.8242</td>
<td>46</td>
<td>46</td>
<td>-13</td>
<td>2.875</td>
<td>-0.8242</td>
</tr>
<tr>
<th>83</th>
<td>37</td>
<td>2.3231</td>
<td>37</td>
<td>-1.3656</td>
<td>0.9575</td>
<td>74</td>
<td>74</td>
<td>15</td>
<td>4.625</td>
<td>0.9575</td>
</tr>
<tr>
<th>84</th>
<td>19</td>
<td>1.2145</td>
<td>19</td>
<td>1.2858</td>
<td>2.5003</td>
<td>38</td>
<td>38</td>
<td>40</td>
<td>2.375</td>
<td>2.5003</td>
</tr>
<tr>
<th>85</th>
<td>38</td>
<td>2.3776</td>
<td>38</td>
<td>-1.5560</td>
<td>0.8216</td>
<td>76</td>
<td>76</td>
<td>13</td>
<td>4.750</td>
<td>0.8216</td>
</tr>
<tr>
<th>86</th>
<td>62</td>
<td>3.8753</td>
<td>62</td>
<td>2.6655</td>
<td>6.5408</td>
<td>124</td>
<td>124</td>
<td>104</td>
<td>7.750</td>
<td>6.5408</td>
</tr>
<tr>
<th>87</th>
<td>22</td>
<td>1.4366</td>
<td>22</td>
<td>-1.1781</td>
<td>0.2585</td>
<td>44</td>
<td>44</td>
<td>4</td>
<td>2.750</td>
<td>0.2585</td>
</tr>
<tr>
<th>88</th>
<td>35</td>
<td>2.2410</td>
<td>35</td>
<td>1.9735</td>
<td>4.2145</td>
<td>70</td>
<td>70</td>
<td>67</td>
<td>4.375</td>
<td>4.2145</td>
</tr>
<tr>
<th>89</th>
<td>49</td>
<td>3.0806</td>
<td>49</td>
<td>-1.8052</td>
<td>1.2754</td>
<td>98</td>
<td>98</td>
<td>20</td>
<td>6.125</td>
<td>1.2754</td>
</tr>
<tr>
<th>90</th>
<td>23</td>
<td>1.4397</td>
<td>23</td>
<td>-1.1493</td>
<td>0.2904</td>
<td>46</td>
<td>46</td>
<td>4</td>
<td>2.875</td>
<td>0.2904</td>
</tr>
<tr>
<th>91</th>
<td>19</td>
<td>1.2206</td>
<td>19</td>
<td>-3.4958</td>
<td>-2.2752</td>
<td>38</td>
<td>38</td>
<td>-36</td>
<td>2.375</td>
<td>-2.2752</td>
</tr>
<tr>
<th>92</th>
<td>48</td>
<td>3.0613</td>
<td>48</td>
<td>3.3755</td>
<td>6.4368</td>
<td>96</td>
<td>96</td>
<td>102</td>
<td>6.000</td>
<td>6.4368</td>
</tr>
<tr>
<th>93</th>
<td>52</td>
<td>3.2753</td>
<td>52</td>
<td>1.2965</td>
<td>4.5718</td>
<td>104</td>
<td>104</td>
<td>73</td>
<td>6.500</td>
<td>4.5718</td>
</tr>
<tr>
<th>94</th>
<td>23</td>
<td>1.4900</td>
<td>23</td>
<td>-2.3142</td>
<td>-0.8242</td>
<td>46</td>
<td>46</td>
<td>-13</td>
<td>2.875</td>
<td>-0.8242</td>
</tr>
<tr>
<th>95</th>
<td>37</td>
<td>2.3231</td>
<td>37</td>
<td>-1.3656</td>
<td>0.9575</td>
<td>74</td>
<td>74</td>
<td>15</td>
<td>4.625</td>
<td>0.9575</td>
</tr>
<tr>
<th>96</th>
<td>19</td>
<td>1.2145</td>
<td>19</td>
<td>1.2858</td>
<td>2.5003</td>
<td>38</td>
<td>38</td>
<td>40</td>
<td>2.375</td>
<td>2.5003</td>
</tr>
<tr>
<th>97</th>
<td>38</td>
<td>2.3776</td>
<td>38</td>
<td>-1.5560</td>
<td>0.8216</td>
<td>76</td>
<td>76</td>
<td>13</td>
<td>4.750</td>
<td>0.8216</td>
</tr>
<tr>
<th>98</th>
<td>62</td>
<td>3.8753</td>
<td>62</td>
<td>2.6655</td>
<td>6.5408</td>
<td>124</td>
<td>124</td>
<td>104</td>
<td>7.750</td>
<td>6.5408</td>
</tr>
<tr>
<th>99</th>
<td>22</td>
<td>1.4366</td>
<td>22</td>
<td>-1.1781</td>
<td>0.2585</td>
<td>44</td>
<td>44</td>
<td>4</td>
<td>2.750</td>
<td>0.2585</td>
</tr>
<tr>
<th>100</th>
<td>35</td>
<td>2.2410</td>
<td>35</td>
<td>1.9735</td>
<td>4.2145</td>
<td>70</td>
<td>70</td>
<td>67</td>
<td>4.375</td>
<td>4.2145</td>
</tr>
</tbody>
</table>
<p>101 rows × 10 columns</p>
</div>
```python
#dataframe of error measures
AODataErr=pd.DataFrame()
AODataErr['aQ+bQ_c']=np.abs(AOData['aQ+bQ']-AOData['c'])
AODataErr['c_cKTVQ']=np.abs(AOData['c']-AOData['cKTVQ'])
AODataErr['cdQ_cKTV']=np.abs(AOData['cdQ']-AOData['cKTV'])
AODataErr['c_cKTVQ__cdQ_cKTV']=np.abs((AODataErr['c_cKTVQ']/ Qscale)- AODataErr['cdQ_cKTV'])
AODataErr
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>aQ+bQ_c</th>
<th>c_cKTVQ</th>
<th>cdQ_cKTV</th>
<th>c_cKTVQ__cdQ_cKTV</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>0</td>
<td>9</td>
<td>0.6142</td>
<td>0.0517</td>
</tr>
<tr>
<th>1</th>
<td>0</td>
<td>63</td>
<td>3.9284</td>
<td>0.0091</td>
</tr>
<tr>
<th>2</th>
<td>0</td>
<td>20</td>
<td>1.2092</td>
<td>0.0408</td>
</tr>
<tr>
<th>3</th>
<td>0</td>
<td>40</td>
<td>2.4915</td>
<td>0.0085</td>
</tr>
<tr>
<th>4</th>
<td>0</td>
<td>3</td>
<td>0.1605</td>
<td>0.0270</td>
</tr>
<tr>
<th>5</th>
<td>0</td>
<td>78</td>
<td>4.8496</td>
<td>0.0254</td>
</tr>
<tr>
<th>6</th>
<td>0</td>
<td>42</td>
<td>2.5846</td>
<td>0.0404</td>
</tr>
<tr>
<th>7</th>
<td>0</td>
<td>74</td>
<td>4.6502</td>
<td>0.0252</td>
</tr>
<tr>
<th>8</th>
<td>0</td>
<td>6</td>
<td>0.4368</td>
<td>0.0618</td>
</tr>
<tr>
<th>9</th>
<td>0</td>
<td>31</td>
<td>1.9282</td>
<td>0.0093</td>
</tr>
<tr>
<th>10</th>
<td>0</td>
<td>59</td>
<td>3.6992</td>
<td>0.0117</td>
</tr>
<tr>
<th>11</th>
<td>0</td>
<td>59</td>
<td>3.6675</td>
<td>0.0200</td>
</tr>
<tr>
<th>12</th>
<td>0</td>
<td>2</td>
<td>0.1253</td>
<td>0.0003</td>
</tr>
<tr>
<th>13</th>
<td>0</td>
<td>63</td>
<td>3.9284</td>
<td>0.0091</td>
</tr>
<tr>
<th>14</th>
<td>0</td>
<td>20</td>
<td>1.2092</td>
<td>0.0408</td>
</tr>
<tr>
<th>15</th>
<td>0</td>
<td>40</td>
<td>2.4915</td>
<td>0.0085</td>
</tr>
<tr>
<th>16</th>
<td>0</td>
<td>3</td>
<td>0.1605</td>
<td>0.0270</td>
</tr>
<tr>
<th>17</th>
<td>0</td>
<td>78</td>
<td>4.8496</td>
<td>0.0254</td>
</tr>
<tr>
<th>18</th>
<td>0</td>
<td>42</td>
<td>2.5846</td>
<td>0.0404</td>
</tr>
<tr>
<th>19</th>
<td>0</td>
<td>74</td>
<td>4.6502</td>
<td>0.0252</td>
</tr>
<tr>
<th>20</th>
<td>0</td>
<td>6</td>
<td>0.4368</td>
<td>0.0618</td>
</tr>
<tr>
<th>21</th>
<td>0</td>
<td>31</td>
<td>1.9282</td>
<td>0.0093</td>
</tr>
<tr>
<th>22</th>
<td>0</td>
<td>59</td>
<td>3.6992</td>
<td>0.0117</td>
</tr>
<tr>
<th>23</th>
<td>0</td>
<td>59</td>
<td>3.6675</td>
<td>0.0200</td>
</tr>
<tr>
<th>24</th>
<td>0</td>
<td>2</td>
<td>0.1253</td>
<td>0.0003</td>
</tr>
<tr>
<th>25</th>
<td>0</td>
<td>63</td>
<td>3.9284</td>
<td>0.0091</td>
</tr>
<tr>
<th>26</th>
<td>0</td>
<td>20</td>
<td>1.2092</td>
<td>0.0408</td>
</tr>
<tr>
<th>27</th>
<td>0</td>
<td>40</td>
<td>2.4915</td>
<td>0.0085</td>
</tr>
<tr>
<th>28</th>
<td>0</td>
<td>3</td>
<td>0.1605</td>
<td>0.0270</td>
</tr>
<tr>
<th>29</th>
<td>0</td>
<td>78</td>
<td>4.8496</td>
<td>0.0254</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>71</th>
<td>0</td>
<td>59</td>
<td>3.6675</td>
<td>0.0200</td>
</tr>
<tr>
<th>72</th>
<td>0</td>
<td>2</td>
<td>0.1253</td>
<td>0.0003</td>
</tr>
<tr>
<th>73</th>
<td>0</td>
<td>63</td>
<td>3.9284</td>
<td>0.0091</td>
</tr>
<tr>
<th>74</th>
<td>0</td>
<td>20</td>
<td>1.2092</td>
<td>0.0408</td>
</tr>
<tr>
<th>75</th>
<td>0</td>
<td>40</td>
<td>2.4915</td>
<td>0.0085</td>
</tr>
<tr>
<th>76</th>
<td>0</td>
<td>3</td>
<td>0.1605</td>
<td>0.0270</td>
</tr>
<tr>
<th>77</th>
<td>0</td>
<td>78</td>
<td>4.8496</td>
<td>0.0254</td>
</tr>
<tr>
<th>78</th>
<td>0</td>
<td>42</td>
<td>2.5846</td>
<td>0.0404</td>
</tr>
<tr>
<th>79</th>
<td>0</td>
<td>74</td>
<td>4.6502</td>
<td>0.0252</td>
</tr>
<tr>
<th>80</th>
<td>0</td>
<td>6</td>
<td>0.4368</td>
<td>0.0618</td>
</tr>
<tr>
<th>81</th>
<td>0</td>
<td>31</td>
<td>1.9282</td>
<td>0.0093</td>
</tr>
<tr>
<th>82</th>
<td>0</td>
<td>59</td>
<td>3.6992</td>
<td>0.0117</td>
</tr>
<tr>
<th>83</th>
<td>0</td>
<td>59</td>
<td>3.6675</td>
<td>0.0200</td>
</tr>
<tr>
<th>84</th>
<td>0</td>
<td>2</td>
<td>0.1253</td>
<td>0.0003</td>
</tr>
<tr>
<th>85</th>
<td>0</td>
<td>63</td>
<td>3.9284</td>
<td>0.0091</td>
</tr>
<tr>
<th>86</th>
<td>0</td>
<td>20</td>
<td>1.2092</td>
<td>0.0408</td>
</tr>
<tr>
<th>87</th>
<td>0</td>
<td>40</td>
<td>2.4915</td>
<td>0.0085</td>
</tr>
<tr>
<th>88</th>
<td>0</td>
<td>3</td>
<td>0.1605</td>
<td>0.0270</td>
</tr>
<tr>
<th>89</th>
<td>0</td>
<td>78</td>
<td>4.8496</td>
<td>0.0254</td>
</tr>
<tr>
<th>90</th>
<td>0</td>
<td>42</td>
<td>2.5846</td>
<td>0.0404</td>
</tr>
<tr>
<th>91</th>
<td>0</td>
<td>74</td>
<td>4.6502</td>
<td>0.0252</td>
</tr>
<tr>
<th>92</th>
<td>0</td>
<td>6</td>
<td>0.4368</td>
<td>0.0618</td>
</tr>
<tr>
<th>93</th>
<td>0</td>
<td>31</td>
<td>1.9282</td>
<td>0.0093</td>
</tr>
<tr>
<th>94</th>
<td>0</td>
<td>59</td>
<td>3.6992</td>
<td>0.0117</td>
</tr>
<tr>
<th>95</th>
<td>0</td>
<td>59</td>
<td>3.6675</td>
<td>0.0200</td>
</tr>
<tr>
<th>96</th>
<td>0</td>
<td>2</td>
<td>0.1253</td>
<td>0.0003</td>
</tr>
<tr>
<th>97</th>
<td>0</td>
<td>63</td>
<td>3.9284</td>
<td>0.0091</td>
</tr>
<tr>
<th>98</th>
<td>0</td>
<td>20</td>
<td>1.2092</td>
<td>0.0408</td>
</tr>
<tr>
<th>99</th>
<td>0</td>
<td>40</td>
<td>2.4915</td>
<td>0.0085</td>
</tr>
<tr>
<th>100</th>
<td>0</td>
<td>3</td>
<td>0.1605</td>
<td>0.0270</td>
</tr>
</tbody>
</table>
<p>101 rows × 4 columns</p>
</div>
```python
AODataErr.describe()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>aQ+bQ_c</th>
<th>c_cKTVQ</th>
<th>cdQ_cKTV</th>
<th>c_cKTVQ__cdQ_cKTV</th>
</tr>
</thead>
<tbody>
<tr>
<th>count</th>
<td>101.0</td>
<td>101.000000</td>
<td>101.000000</td>
<td>101.000000</td>
</tr>
<tr>
<th>mean</th>
<td>0.0</td>
<td>39.118812</td>
<td>2.438137</td>
<td>0.023496</td>
</tr>
<tr>
<th>std</th>
<td>0.0</td>
<td>26.385711</td>
<td>1.646784</td>
<td>0.016939</td>
</tr>
<tr>
<th>min</th>
<td>0.0</td>
<td>2.000000</td>
<td>0.125300</td>
<td>0.000300</td>
</tr>
<tr>
<th>25%</th>
<td>0.0</td>
<td>9.000000</td>
<td>0.614200</td>
<td>0.009100</td>
</tr>
<tr>
<th>50%</th>
<td>0.0</td>
<td>40.000000</td>
<td>2.491500</td>
<td>0.025200</td>
</tr>
<tr>
<th>75%</th>
<td>0.0</td>
<td>59.000000</td>
<td>3.699200</td>
<td>0.040400</td>
</tr>
<tr>
<th>max</th>
<td>0.0</td>
<td>78.000000</td>
<td>4.849600</td>
<td>0.061800</td>
</tr>
</tbody>
</table>
</div>
```python
DUT.convert()
VerilogTextReader('AdderBehaverial');
```
***Verilog modual from AdderBehaverial.v***
// File: AdderBehaverial.v
// Generated by MyHDL 0.10
// Date: Sat Aug 25 17:54:24 2018
`timescale 1ns/10ps
module AdderBehaverial (
a,
b,
c
);
input signed [7:0] a;
input signed [7:0] b;
output signed [7:0] c;
wire signed [7:0] c;
assign c = (a + b);
endmodule
# Multiblication
```python
#Q4.4 *Q4.4 -> Q8.8
Q2=(Q[0]*2, Q[1]*2)
Q2len=Q2[0]+Q2[1]
Q2scale=2**(Q2[1]); Q2scale
```
```python
a=3.2500; aQ=int(a*Qscale)
b=-2.065; bQ=int(b*Qscale)
aQ, bQ
bin(aQ, Qlen), bin(bQ, Qlen)
```
('00110100', '11011111')
```python
ab=a*b; ab
abQ=int(ab*Qscale); abQ
abdQ=abQ/ Qscale; abdQ, ab
```
```python
aQBV=intbv(aQ)[Qlen:].signed(); bQBV=intbv(bQ)[Qlen:].signed()
f'aQBV: {bin(aQBV, Qlen)}; bQBV: {bin(bQBV, Qlen)}'
```
'aQBV: 00110100; bQBV: 11011111'
```python
abQ=aQBV*bQBV; abQ
```
```python
abdQ=abQ/ Qscale; abdQ, ab
```
```python
abdQ=abQ/ Q2scale; abdQ,ab
```
```python
class MultPosNegTVGen():
"""
Class to generate postive random numbers to be Qed for testing
"""
def __init__(self, Q, N):
"""
Take in arguments and create output holds
Args:
Q(tuple): Q notation tuple where Q[0] is int bit len and Q[1] is
dec bit len
N(int): number of values to generate
"""
self.Q=Q; self.N=N
self.Qlen=self.Q[0]+self.Q[1]
self.Qmin=-(2**(self.Qlen-1)); self.Qmax=2**(self.Qlen-1) -1
self.Qscale=2**self.Q[1]
#Q4.4 *Q4.4 -> Q8.8
self.Q2=(self.Q[0]*2, self.Q[1]*2)
self.Q2len=self.Q2[0]+self.Q2[1]
self.Q2min=-(2**(self.Q2len-1)); self.Q2max=2**(self.Q2len-1) -1
self.Q2scale=2**(Q2[1])
self.aTV=np.zeros(0); self.aTVQ=np.zeros(0)
self.bTV=np.zeros(0); self.bTVQ=np.zeros(0)
self.cK=np.zeros(0); self.cKQ=np.zeros(0)
def Genrator(self):
"""
Random Number genrator in floating point and supsequent Qed version
"""
self.V1=np.array((1/np.random.ranf())).round(decimals=self.Q[1])
#needed to force np.random to generate a differint random num
np.random.seed(np.random.randint(self.Qmax))
self.V2=np.array((1/np.random.ranf())).round(decimals=self.Q[1])
#needed to force np.random to generate a differint random num
np.random.seed(np.random.randint(self.Qmax))
self.Sign=np.random.randint(2)
if self.Sign==1:
self.V2=-self.V2
self.V1Q=(self.V1*self.Qscale).astype(int)
self.V2Q=(self.V2*self.Qscale).astype(int)
def GenratorCheckAndMul(self):
"""
Cheacks if the sum of the two randome numbers generated are going to break the Qmax
if they do dont append to retrun holds
"""
self.V1tV2=(self.V1*self.V2).round(decimals=self.Q2[1])
self.V1tV2Q=(self.V1tV2*self.Q2scale).astype(int)
check=self.V1Q*self.V2Q
if self.V1Q>self.Qmin and self.V1Q<self.Qmax:
if self.V2Q>self.Qmin and self.V2Q<self.Qmax:
if check>self.Q2min and check<self.Q2max:
self.aTV=np.append(self.aTV, self.V1); self.aTVQ=np.append(self.aTVQ, self.V1Q).astype(int)
self.bTV=np.append(self.bTV, self.V2); self.bTVQ=np.append(self.bTVQ, self.V2Q).astype(int)
self.cK=np.append(self.cK, self.V1tV2); self.cKQ=np.append(self.cKQ, self.V1tV2Q).astype(int)
def MakeTVs(self):
"""
Automates the generating, testing and appending to make the TVs
Returns:
self.aTV(np.array): floating point numbers for a
self.aTVQ(np.array): fixed point Qed from self.aTV
self.bTV(np.array): floating point numbers for b
self.bTVQ(np.array): fixed point Qed from self.bTV
self.cK(np.array): known floating point rounded sum of self.aTV, self.bTV
self.cKQ(np.array): known fixed point Qed from self.cK
"""
while len(self.aTV)<=self.N:
self.Genrator()
self.GenratorCheckAndMul()
#print('Done')
```
```python
@block
def MultiBehaverial(a, b, c):
@always_comb
def logic():
c.next=a*b
return instances()
```
```python
Peeker.clear()
a=Signal(intbv(0)[Qlen:].signed()); Peeker(a, 'a')
b=Signal(intbv(0)[Qlen:].signed()); Peeker(b, 'b')
c=Signal(intbv(0)[Q2len:].signed()); Peeker(c, 'c')
TVG=MultPosNegTVGen(Q, 100); TVG.MakeTVs()
aTV=TVG.aTV; aTVQ=TVG.aTVQ
bTV=TVG.bTV; bTVQ=TVG.bTVQ
cKTV=TVG.cK; cKTVQ=TVG.cKQ
DUT=MultiBehaverial(a, b, c)
def Multi_TB():
@instance
def simules():
for i in range(len(aTVQ)):
a.next=int(aTVQ[i])
b.next=int(bTVQ[i])
yield delay(1)
raise StopSimulation()
return instances()
sim=Simulation(DUT, Multi_TB(), *Peeker.instances()).run()
```
```python
Peeker.to_wavedrom()
```
<div></div>
```python
MultiData=Peeker.to_dataframe()
#load in the source floating values
MultiData['aTV']=aTV; MultiData['bTV']=bTV
#get the predicted floating Point Sum
MultiData['aTV*bTV']=aTV*bTV
#get the predicted fixed point sum
MultiData['aQ*bQ']=aTVQ*bTVQ
#reorder
MultiData=MultiData[['a', 'aTV', 'b', 'bTV', 'aTV*bTV', 'aQ*bQ', 'c']]
#load the sourced Qed sum
MultiData['cKTVQ']=cKTVQ
#de Q the testbench gen sum
MultiData['cdQ']=MultiData['c']/Q2scale
#load the sourced floting sum
MultiData['cKTV']=cKTV
MultiData
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>a</th>
<th>aTV</th>
<th>b</th>
<th>bTV</th>
<th>aTV*bTV</th>
<th>aQ*bQ</th>
<th>c</th>
<th>cKTVQ</th>
<th>cdQ</th>
<th>cKTV</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>49</td>
<td>3.0806</td>
<td>-28</td>
<td>-1.8052</td>
<td>-5.561099</td>
<td>-1372</td>
<td>-1372</td>
<td>-1423</td>
<td>-5.359375</td>
<td>-5.561099</td>
</tr>
<tr>
<th>1</th>
<td>23</td>
<td>1.4397</td>
<td>-18</td>
<td>-1.1493</td>
<td>-1.654647</td>
<td>-414</td>
<td>-414</td>
<td>-423</td>
<td>-1.617188</td>
<td>-1.654647</td>
</tr>
<tr>
<th>2</th>
<td>19</td>
<td>1.2206</td>
<td>-55</td>
<td>-3.4958</td>
<td>-4.266973</td>
<td>-1045</td>
<td>-1045</td>
<td>-1092</td>
<td>-4.082031</td>
<td>-4.266973</td>
</tr>
<tr>
<th>3</th>
<td>48</td>
<td>3.0613</td>
<td>54</td>
<td>3.3755</td>
<td>10.333418</td>
<td>2592</td>
<td>2592</td>
<td>2645</td>
<td>10.125000</td>
<td>10.333418</td>
</tr>
<tr>
<th>4</th>
<td>52</td>
<td>3.2753</td>
<td>20</td>
<td>1.2965</td>
<td>4.246426</td>
<td>1040</td>
<td>1040</td>
<td>1087</td>
<td>4.062500</td>
<td>4.246426</td>
</tr>
<tr>
<th>5</th>
<td>23</td>
<td>1.4900</td>
<td>-37</td>
<td>-2.3142</td>
<td>-3.448158</td>
<td>-851</td>
<td>-851</td>
<td>-882</td>
<td>-3.324219</td>
<td>-3.448158</td>
</tr>
<tr>
<th>6</th>
<td>37</td>
<td>2.3231</td>
<td>-21</td>
<td>-1.3656</td>
<td>-3.172425</td>
<td>-777</td>
<td>-777</td>
<td>-812</td>
<td>-3.035156</td>
<td>-3.172425</td>
</tr>
<tr>
<th>7</th>
<td>19</td>
<td>1.2145</td>
<td>20</td>
<td>1.2858</td>
<td>1.561604</td>
<td>380</td>
<td>380</td>
<td>399</td>
<td>1.484375</td>
<td>1.561604</td>
</tr>
<tr>
<th>8</th>
<td>38</td>
<td>2.3776</td>
<td>-24</td>
<td>-1.5560</td>
<td>-3.699546</td>
<td>-912</td>
<td>-912</td>
<td>-947</td>
<td>-3.562500</td>
<td>-3.699546</td>
</tr>
<tr>
<th>9</th>
<td>62</td>
<td>3.8753</td>
<td>42</td>
<td>2.6655</td>
<td>10.329612</td>
<td>2604</td>
<td>2604</td>
<td>2644</td>
<td>10.171875</td>
<td>10.329612</td>
</tr>
<tr>
<th>10</th>
<td>22</td>
<td>1.4366</td>
<td>-18</td>
<td>-1.1781</td>
<td>-1.692458</td>
<td>-396</td>
<td>-396</td>
<td>-433</td>
<td>-1.546875</td>
<td>-1.692458</td>
</tr>
<tr>
<th>11</th>
<td>35</td>
<td>2.2410</td>
<td>31</td>
<td>1.9735</td>
<td>4.422614</td>
<td>1085</td>
<td>1085</td>
<td>1132</td>
<td>4.238281</td>
<td>4.422613</td>
</tr>
<tr>
<th>12</th>
<td>49</td>
<td>3.0806</td>
<td>-28</td>
<td>-1.8052</td>
<td>-5.561099</td>
<td>-1372</td>
<td>-1372</td>
<td>-1423</td>
<td>-5.359375</td>
<td>-5.561099</td>
</tr>
<tr>
<th>13</th>
<td>23</td>
<td>1.4397</td>
<td>-18</td>
<td>-1.1493</td>
<td>-1.654647</td>
<td>-414</td>
<td>-414</td>
<td>-423</td>
<td>-1.617188</td>
<td>-1.654647</td>
</tr>
<tr>
<th>14</th>
<td>19</td>
<td>1.2206</td>
<td>-55</td>
<td>-3.4958</td>
<td>-4.266973</td>
<td>-1045</td>
<td>-1045</td>
<td>-1092</td>
<td>-4.082031</td>
<td>-4.266973</td>
</tr>
<tr>
<th>15</th>
<td>48</td>
<td>3.0613</td>
<td>54</td>
<td>3.3755</td>
<td>10.333418</td>
<td>2592</td>
<td>2592</td>
<td>2645</td>
<td>10.125000</td>
<td>10.333418</td>
</tr>
<tr>
<th>16</th>
<td>52</td>
<td>3.2753</td>
<td>20</td>
<td>1.2965</td>
<td>4.246426</td>
<td>1040</td>
<td>1040</td>
<td>1087</td>
<td>4.062500</td>
<td>4.246426</td>
</tr>
<tr>
<th>17</th>
<td>23</td>
<td>1.4900</td>
<td>-37</td>
<td>-2.3142</td>
<td>-3.448158</td>
<td>-851</td>
<td>-851</td>
<td>-882</td>
<td>-3.324219</td>
<td>-3.448158</td>
</tr>
<tr>
<th>18</th>
<td>37</td>
<td>2.3231</td>
<td>-21</td>
<td>-1.3656</td>
<td>-3.172425</td>
<td>-777</td>
<td>-777</td>
<td>-812</td>
<td>-3.035156</td>
<td>-3.172425</td>
</tr>
<tr>
<th>19</th>
<td>19</td>
<td>1.2145</td>
<td>20</td>
<td>1.2858</td>
<td>1.561604</td>
<td>380</td>
<td>380</td>
<td>399</td>
<td>1.484375</td>
<td>1.561604</td>
</tr>
<tr>
<th>20</th>
<td>38</td>
<td>2.3776</td>
<td>-24</td>
<td>-1.5560</td>
<td>-3.699546</td>
<td>-912</td>
<td>-912</td>
<td>-947</td>
<td>-3.562500</td>
<td>-3.699546</td>
</tr>
<tr>
<th>21</th>
<td>62</td>
<td>3.8753</td>
<td>42</td>
<td>2.6655</td>
<td>10.329612</td>
<td>2604</td>
<td>2604</td>
<td>2644</td>
<td>10.171875</td>
<td>10.329612</td>
</tr>
<tr>
<th>22</th>
<td>22</td>
<td>1.4366</td>
<td>-18</td>
<td>-1.1781</td>
<td>-1.692458</td>
<td>-396</td>
<td>-396</td>
<td>-433</td>
<td>-1.546875</td>
<td>-1.692458</td>
</tr>
<tr>
<th>23</th>
<td>35</td>
<td>2.2410</td>
<td>31</td>
<td>1.9735</td>
<td>4.422614</td>
<td>1085</td>
<td>1085</td>
<td>1132</td>
<td>4.238281</td>
<td>4.422613</td>
</tr>
<tr>
<th>24</th>
<td>49</td>
<td>3.0806</td>
<td>-28</td>
<td>-1.8052</td>
<td>-5.561099</td>
<td>-1372</td>
<td>-1372</td>
<td>-1423</td>
<td>-5.359375</td>
<td>-5.561099</td>
</tr>
<tr>
<th>25</th>
<td>23</td>
<td>1.4397</td>
<td>-18</td>
<td>-1.1493</td>
<td>-1.654647</td>
<td>-414</td>
<td>-414</td>
<td>-423</td>
<td>-1.617188</td>
<td>-1.654647</td>
</tr>
<tr>
<th>26</th>
<td>19</td>
<td>1.2206</td>
<td>-55</td>
<td>-3.4958</td>
<td>-4.266973</td>
<td>-1045</td>
<td>-1045</td>
<td>-1092</td>
<td>-4.082031</td>
<td>-4.266973</td>
</tr>
<tr>
<th>27</th>
<td>48</td>
<td>3.0613</td>
<td>54</td>
<td>3.3755</td>
<td>10.333418</td>
<td>2592</td>
<td>2592</td>
<td>2645</td>
<td>10.125000</td>
<td>10.333418</td>
</tr>
<tr>
<th>28</th>
<td>52</td>
<td>3.2753</td>
<td>20</td>
<td>1.2965</td>
<td>4.246426</td>
<td>1040</td>
<td>1040</td>
<td>1087</td>
<td>4.062500</td>
<td>4.246426</td>
</tr>
<tr>
<th>29</th>
<td>23</td>
<td>1.4900</td>
<td>-37</td>
<td>-2.3142</td>
<td>-3.448158</td>
<td>-851</td>
<td>-851</td>
<td>-882</td>
<td>-3.324219</td>
<td>-3.448158</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>71</th>
<td>35</td>
<td>2.2410</td>
<td>31</td>
<td>1.9735</td>
<td>4.422614</td>
<td>1085</td>
<td>1085</td>
<td>1132</td>
<td>4.238281</td>
<td>4.422613</td>
</tr>
<tr>
<th>72</th>
<td>49</td>
<td>3.0806</td>
<td>-28</td>
<td>-1.8052</td>
<td>-5.561099</td>
<td>-1372</td>
<td>-1372</td>
<td>-1423</td>
<td>-5.359375</td>
<td>-5.561099</td>
</tr>
<tr>
<th>73</th>
<td>23</td>
<td>1.4397</td>
<td>-18</td>
<td>-1.1493</td>
<td>-1.654647</td>
<td>-414</td>
<td>-414</td>
<td>-423</td>
<td>-1.617188</td>
<td>-1.654647</td>
</tr>
<tr>
<th>74</th>
<td>19</td>
<td>1.2206</td>
<td>-55</td>
<td>-3.4958</td>
<td>-4.266973</td>
<td>-1045</td>
<td>-1045</td>
<td>-1092</td>
<td>-4.082031</td>
<td>-4.266973</td>
</tr>
<tr>
<th>75</th>
<td>48</td>
<td>3.0613</td>
<td>54</td>
<td>3.3755</td>
<td>10.333418</td>
<td>2592</td>
<td>2592</td>
<td>2645</td>
<td>10.125000</td>
<td>10.333418</td>
</tr>
<tr>
<th>76</th>
<td>52</td>
<td>3.2753</td>
<td>20</td>
<td>1.2965</td>
<td>4.246426</td>
<td>1040</td>
<td>1040</td>
<td>1087</td>
<td>4.062500</td>
<td>4.246426</td>
</tr>
<tr>
<th>77</th>
<td>23</td>
<td>1.4900</td>
<td>-37</td>
<td>-2.3142</td>
<td>-3.448158</td>
<td>-851</td>
<td>-851</td>
<td>-882</td>
<td>-3.324219</td>
<td>-3.448158</td>
</tr>
<tr>
<th>78</th>
<td>37</td>
<td>2.3231</td>
<td>-21</td>
<td>-1.3656</td>
<td>-3.172425</td>
<td>-777</td>
<td>-777</td>
<td>-812</td>
<td>-3.035156</td>
<td>-3.172425</td>
</tr>
<tr>
<th>79</th>
<td>19</td>
<td>1.2145</td>
<td>20</td>
<td>1.2858</td>
<td>1.561604</td>
<td>380</td>
<td>380</td>
<td>399</td>
<td>1.484375</td>
<td>1.561604</td>
</tr>
<tr>
<th>80</th>
<td>38</td>
<td>2.3776</td>
<td>-24</td>
<td>-1.5560</td>
<td>-3.699546</td>
<td>-912</td>
<td>-912</td>
<td>-947</td>
<td>-3.562500</td>
<td>-3.699546</td>
</tr>
<tr>
<th>81</th>
<td>62</td>
<td>3.8753</td>
<td>42</td>
<td>2.6655</td>
<td>10.329612</td>
<td>2604</td>
<td>2604</td>
<td>2644</td>
<td>10.171875</td>
<td>10.329612</td>
</tr>
<tr>
<th>82</th>
<td>22</td>
<td>1.4366</td>
<td>-18</td>
<td>-1.1781</td>
<td>-1.692458</td>
<td>-396</td>
<td>-396</td>
<td>-433</td>
<td>-1.546875</td>
<td>-1.692458</td>
</tr>
<tr>
<th>83</th>
<td>35</td>
<td>2.2410</td>
<td>31</td>
<td>1.9735</td>
<td>4.422614</td>
<td>1085</td>
<td>1085</td>
<td>1132</td>
<td>4.238281</td>
<td>4.422613</td>
</tr>
<tr>
<th>84</th>
<td>49</td>
<td>3.0806</td>
<td>-28</td>
<td>-1.8052</td>
<td>-5.561099</td>
<td>-1372</td>
<td>-1372</td>
<td>-1423</td>
<td>-5.359375</td>
<td>-5.561099</td>
</tr>
<tr>
<th>85</th>
<td>23</td>
<td>1.4397</td>
<td>-18</td>
<td>-1.1493</td>
<td>-1.654647</td>
<td>-414</td>
<td>-414</td>
<td>-423</td>
<td>-1.617188</td>
<td>-1.654647</td>
</tr>
<tr>
<th>86</th>
<td>19</td>
<td>1.2206</td>
<td>-55</td>
<td>-3.4958</td>
<td>-4.266973</td>
<td>-1045</td>
<td>-1045</td>
<td>-1092</td>
<td>-4.082031</td>
<td>-4.266973</td>
</tr>
<tr>
<th>87</th>
<td>48</td>
<td>3.0613</td>
<td>54</td>
<td>3.3755</td>
<td>10.333418</td>
<td>2592</td>
<td>2592</td>
<td>2645</td>
<td>10.125000</td>
<td>10.333418</td>
</tr>
<tr>
<th>88</th>
<td>52</td>
<td>3.2753</td>
<td>20</td>
<td>1.2965</td>
<td>4.246426</td>
<td>1040</td>
<td>1040</td>
<td>1087</td>
<td>4.062500</td>
<td>4.246426</td>
</tr>
<tr>
<th>89</th>
<td>23</td>
<td>1.4900</td>
<td>-37</td>
<td>-2.3142</td>
<td>-3.448158</td>
<td>-851</td>
<td>-851</td>
<td>-882</td>
<td>-3.324219</td>
<td>-3.448158</td>
</tr>
<tr>
<th>90</th>
<td>37</td>
<td>2.3231</td>
<td>-21</td>
<td>-1.3656</td>
<td>-3.172425</td>
<td>-777</td>
<td>-777</td>
<td>-812</td>
<td>-3.035156</td>
<td>-3.172425</td>
</tr>
<tr>
<th>91</th>
<td>19</td>
<td>1.2145</td>
<td>20</td>
<td>1.2858</td>
<td>1.561604</td>
<td>380</td>
<td>380</td>
<td>399</td>
<td>1.484375</td>
<td>1.561604</td>
</tr>
<tr>
<th>92</th>
<td>38</td>
<td>2.3776</td>
<td>-24</td>
<td>-1.5560</td>
<td>-3.699546</td>
<td>-912</td>
<td>-912</td>
<td>-947</td>
<td>-3.562500</td>
<td>-3.699546</td>
</tr>
<tr>
<th>93</th>
<td>62</td>
<td>3.8753</td>
<td>42</td>
<td>2.6655</td>
<td>10.329612</td>
<td>2604</td>
<td>2604</td>
<td>2644</td>
<td>10.171875</td>
<td>10.329612</td>
</tr>
<tr>
<th>94</th>
<td>22</td>
<td>1.4366</td>
<td>-18</td>
<td>-1.1781</td>
<td>-1.692458</td>
<td>-396</td>
<td>-396</td>
<td>-433</td>
<td>-1.546875</td>
<td>-1.692458</td>
</tr>
<tr>
<th>95</th>
<td>35</td>
<td>2.2410</td>
<td>31</td>
<td>1.9735</td>
<td>4.422614</td>
<td>1085</td>
<td>1085</td>
<td>1132</td>
<td>4.238281</td>
<td>4.422613</td>
</tr>
<tr>
<th>96</th>
<td>49</td>
<td>3.0806</td>
<td>-28</td>
<td>-1.8052</td>
<td>-5.561099</td>
<td>-1372</td>
<td>-1372</td>
<td>-1423</td>
<td>-5.359375</td>
<td>-5.561099</td>
</tr>
<tr>
<th>97</th>
<td>23</td>
<td>1.4397</td>
<td>-18</td>
<td>-1.1493</td>
<td>-1.654647</td>
<td>-414</td>
<td>-414</td>
<td>-423</td>
<td>-1.617188</td>
<td>-1.654647</td>
</tr>
<tr>
<th>98</th>
<td>19</td>
<td>1.2206</td>
<td>-55</td>
<td>-3.4958</td>
<td>-4.266973</td>
<td>-1045</td>
<td>-1045</td>
<td>-1092</td>
<td>-4.082031</td>
<td>-4.266973</td>
</tr>
<tr>
<th>99</th>
<td>48</td>
<td>3.0613</td>
<td>54</td>
<td>3.3755</td>
<td>10.333418</td>
<td>2592</td>
<td>2592</td>
<td>2645</td>
<td>10.125000</td>
<td>10.333418</td>
</tr>
<tr>
<th>100</th>
<td>52</td>
<td>3.2753</td>
<td>20</td>
<td>1.2965</td>
<td>4.246426</td>
<td>1040</td>
<td>1040</td>
<td>1087</td>
<td>4.062500</td>
<td>4.246426</td>
</tr>
</tbody>
</table>
<p>101 rows × 10 columns</p>
</div>
```python
#dataframe of error measures
MultiDataErr=pd.DataFrame()
MultiDataErr['aQ*bQ_c']=np.abs(MultiData['aQ*bQ']-MultiData['c'])
MultiDataErr['c_cKTVQ']=np.abs(MultiData['c']-MultiData['cKTVQ'])
MultiDataErr['cdQ_cKTV']=np.abs(MultiData['cdQ']-MultiData['cKTV'])
MultiDataErr['c_cKTVQ__cdQ_cKTV']=np.abs((MultiDataErr['c_cKTVQ']/ Q2scale)- MultiDataErr['cdQ_cKTV'])
MultiDataErr
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>aQ*bQ_c</th>
<th>c_cKTVQ</th>
<th>cdQ_cKTV</th>
<th>c_cKTVQ__cdQ_cKTV</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>0</td>
<td>51</td>
<td>0.201724</td>
<td>0.002505</td>
</tr>
<tr>
<th>1</th>
<td>0</td>
<td>9</td>
<td>0.037460</td>
<td>0.002303</td>
</tr>
<tr>
<th>2</th>
<td>0</td>
<td>47</td>
<td>0.184942</td>
<td>0.001348</td>
</tr>
<tr>
<th>3</th>
<td>0</td>
<td>53</td>
<td>0.208418</td>
<td>0.001387</td>
</tr>
<tr>
<th>4</th>
<td>0</td>
<td>47</td>
<td>0.183926</td>
<td>0.000333</td>
</tr>
<tr>
<th>5</th>
<td>0</td>
<td>31</td>
<td>0.123939</td>
<td>0.002845</td>
</tr>
<tr>
<th>6</th>
<td>0</td>
<td>35</td>
<td>0.137269</td>
<td>0.000550</td>
</tr>
<tr>
<th>7</th>
<td>0</td>
<td>19</td>
<td>0.077229</td>
<td>0.003010</td>
</tr>
<tr>
<th>8</th>
<td>0</td>
<td>35</td>
<td>0.137046</td>
<td>0.000327</td>
</tr>
<tr>
<th>9</th>
<td>0</td>
<td>40</td>
<td>0.157737</td>
<td>0.001487</td>
</tr>
<tr>
<th>10</th>
<td>0</td>
<td>37</td>
<td>0.145583</td>
<td>0.001052</td>
</tr>
<tr>
<th>11</th>
<td>0</td>
<td>47</td>
<td>0.184332</td>
<td>0.000738</td>
</tr>
<tr>
<th>12</th>
<td>0</td>
<td>51</td>
<td>0.201724</td>
<td>0.002505</td>
</tr>
<tr>
<th>13</th>
<td>0</td>
<td>9</td>
<td>0.037460</td>
<td>0.002303</td>
</tr>
<tr>
<th>14</th>
<td>0</td>
<td>47</td>
<td>0.184942</td>
<td>0.001348</td>
</tr>
<tr>
<th>15</th>
<td>0</td>
<td>53</td>
<td>0.208418</td>
<td>0.001387</td>
</tr>
<tr>
<th>16</th>
<td>0</td>
<td>47</td>
<td>0.183926</td>
<td>0.000333</td>
</tr>
<tr>
<th>17</th>
<td>0</td>
<td>31</td>
<td>0.123939</td>
<td>0.002845</td>
</tr>
<tr>
<th>18</th>
<td>0</td>
<td>35</td>
<td>0.137269</td>
<td>0.000550</td>
</tr>
<tr>
<th>19</th>
<td>0</td>
<td>19</td>
<td>0.077229</td>
<td>0.003010</td>
</tr>
<tr>
<th>20</th>
<td>0</td>
<td>35</td>
<td>0.137046</td>
<td>0.000327</td>
</tr>
<tr>
<th>21</th>
<td>0</td>
<td>40</td>
<td>0.157737</td>
<td>0.001487</td>
</tr>
<tr>
<th>22</th>
<td>0</td>
<td>37</td>
<td>0.145583</td>
<td>0.001052</td>
</tr>
<tr>
<th>23</th>
<td>0</td>
<td>47</td>
<td>0.184332</td>
<td>0.000738</td>
</tr>
<tr>
<th>24</th>
<td>0</td>
<td>51</td>
<td>0.201724</td>
<td>0.002505</td>
</tr>
<tr>
<th>25</th>
<td>0</td>
<td>9</td>
<td>0.037460</td>
<td>0.002303</td>
</tr>
<tr>
<th>26</th>
<td>0</td>
<td>47</td>
<td>0.184942</td>
<td>0.001348</td>
</tr>
<tr>
<th>27</th>
<td>0</td>
<td>53</td>
<td>0.208418</td>
<td>0.001387</td>
</tr>
<tr>
<th>28</th>
<td>0</td>
<td>47</td>
<td>0.183926</td>
<td>0.000333</td>
</tr>
<tr>
<th>29</th>
<td>0</td>
<td>31</td>
<td>0.123939</td>
<td>0.002845</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>71</th>
<td>0</td>
<td>47</td>
<td>0.184332</td>
<td>0.000738</td>
</tr>
<tr>
<th>72</th>
<td>0</td>
<td>51</td>
<td>0.201724</td>
<td>0.002505</td>
</tr>
<tr>
<th>73</th>
<td>0</td>
<td>9</td>
<td>0.037460</td>
<td>0.002303</td>
</tr>
<tr>
<th>74</th>
<td>0</td>
<td>47</td>
<td>0.184942</td>
<td>0.001348</td>
</tr>
<tr>
<th>75</th>
<td>0</td>
<td>53</td>
<td>0.208418</td>
<td>0.001387</td>
</tr>
<tr>
<th>76</th>
<td>0</td>
<td>47</td>
<td>0.183926</td>
<td>0.000333</td>
</tr>
<tr>
<th>77</th>
<td>0</td>
<td>31</td>
<td>0.123939</td>
<td>0.002845</td>
</tr>
<tr>
<th>78</th>
<td>0</td>
<td>35</td>
<td>0.137269</td>
<td>0.000550</td>
</tr>
<tr>
<th>79</th>
<td>0</td>
<td>19</td>
<td>0.077229</td>
<td>0.003010</td>
</tr>
<tr>
<th>80</th>
<td>0</td>
<td>35</td>
<td>0.137046</td>
<td>0.000327</td>
</tr>
<tr>
<th>81</th>
<td>0</td>
<td>40</td>
<td>0.157737</td>
<td>0.001487</td>
</tr>
<tr>
<th>82</th>
<td>0</td>
<td>37</td>
<td>0.145583</td>
<td>0.001052</td>
</tr>
<tr>
<th>83</th>
<td>0</td>
<td>47</td>
<td>0.184332</td>
<td>0.000738</td>
</tr>
<tr>
<th>84</th>
<td>0</td>
<td>51</td>
<td>0.201724</td>
<td>0.002505</td>
</tr>
<tr>
<th>85</th>
<td>0</td>
<td>9</td>
<td>0.037460</td>
<td>0.002303</td>
</tr>
<tr>
<th>86</th>
<td>0</td>
<td>47</td>
<td>0.184942</td>
<td>0.001348</td>
</tr>
<tr>
<th>87</th>
<td>0</td>
<td>53</td>
<td>0.208418</td>
<td>0.001387</td>
</tr>
<tr>
<th>88</th>
<td>0</td>
<td>47</td>
<td>0.183926</td>
<td>0.000333</td>
</tr>
<tr>
<th>89</th>
<td>0</td>
<td>31</td>
<td>0.123939</td>
<td>0.002845</td>
</tr>
<tr>
<th>90</th>
<td>0</td>
<td>35</td>
<td>0.137269</td>
<td>0.000550</td>
</tr>
<tr>
<th>91</th>
<td>0</td>
<td>19</td>
<td>0.077229</td>
<td>0.003010</td>
</tr>
<tr>
<th>92</th>
<td>0</td>
<td>35</td>
<td>0.137046</td>
<td>0.000327</td>
</tr>
<tr>
<th>93</th>
<td>0</td>
<td>40</td>
<td>0.157737</td>
<td>0.001487</td>
</tr>
<tr>
<th>94</th>
<td>0</td>
<td>37</td>
<td>0.145583</td>
<td>0.001052</td>
</tr>
<tr>
<th>95</th>
<td>0</td>
<td>47</td>
<td>0.184332</td>
<td>0.000738</td>
</tr>
<tr>
<th>96</th>
<td>0</td>
<td>51</td>
<td>0.201724</td>
<td>0.002505</td>
</tr>
<tr>
<th>97</th>
<td>0</td>
<td>9</td>
<td>0.037460</td>
<td>0.002303</td>
</tr>
<tr>
<th>98</th>
<td>0</td>
<td>47</td>
<td>0.184942</td>
<td>0.001348</td>
</tr>
<tr>
<th>99</th>
<td>0</td>
<td>53</td>
<td>0.208418</td>
<td>0.001387</td>
</tr>
<tr>
<th>100</th>
<td>0</td>
<td>47</td>
<td>0.183926</td>
<td>0.000333</td>
</tr>
</tbody>
</table>
<p>101 rows × 4 columns</p>
</div>
```python
MultiDataErr.describe()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>aQ*bQ_c</th>
<th>c_cKTVQ</th>
<th>cdQ_cKTV</th>
<th>c_cKTVQ__cdQ_cKTV</th>
</tr>
</thead>
<tbody>
<tr>
<th>count</th>
<td>101.0</td>
<td>101.000000</td>
<td>101.000000</td>
<td>101.000000</td>
</tr>
<tr>
<th>mean</th>
<td>0.0</td>
<td>37.772277</td>
<td>0.149043</td>
<td>0.001495</td>
</tr>
<tr>
<th>std</th>
<td>0.0</td>
<td>12.915015</td>
<td>0.050054</td>
<td>0.000920</td>
</tr>
<tr>
<th>min</th>
<td>0.0</td>
<td>9.000000</td>
<td>0.037460</td>
<td>0.000327</td>
</tr>
<tr>
<th>25%</th>
<td>0.0</td>
<td>35.000000</td>
<td>0.137046</td>
<td>0.000738</td>
</tr>
<tr>
<th>50%</th>
<td>0.0</td>
<td>40.000000</td>
<td>0.157737</td>
<td>0.001387</td>
</tr>
<tr>
<th>75%</th>
<td>0.0</td>
<td>47.000000</td>
<td>0.184942</td>
<td>0.002303</td>
</tr>
<tr>
<th>max</th>
<td>0.0</td>
<td>53.000000</td>
<td>0.208418</td>
<td>0.003010</td>
</tr>
</tbody>
</table>
</div>
```python
DUT.convert()
VerilogTextReader('MultiBehaverial');
```
***Verilog modual from MultiBehaverial.v***
// File: MultiBehaverial.v
// Generated by MyHDL 0.10
// Date: Sat Aug 25 17:54:33 2018
`timescale 1ns/10ps
module MultiBehaverial (
a,
b,
c
);
input signed [7:0] a;
input signed [7:0] b;
output signed [15:0] c;
wire signed [15:0] c;
assign c = (a * b);
endmodule
# Trunction (Unsighned)
```python
#Q4.4 *Q4.4 -> Q8.8
Q2=(Q[0]*2, Q[1]*2)
Q2len=Q2[0]+Q2[1]
Q2scale=2**(Q2[1]); Q2scale
```
```python
a=3.2500; aQ=int(a*Qscale)
b=2.0625; bQ=int(b*Qscale)
aQ, bQ
#bin(aQ, Qlen), bin(bQ, Qlen)
```
```python
ab=a*b; ab
abQ=int(ab*Qscale); abQ
abdQ=abQ/ Qscale; abdQ, ab
```
```python
aQBV=intbv(aQ)[Qlen:]; bQBV=intbv(bQ)[Qlen:]
f'aQBV: {bin(aQBV, Qlen)}; bQBV: {bin(bQBV, Qlen)}'
```
'aQBV: 00110100; bQBV: 00100001'
```python
abQ=aQBV*bQBV; abQ
```
```python
abQBV=intbv(abQ)[Q2len:].signed(); abQBV, bin(abQBV), len(bin(abQBV))
```
(intbv(1716), '11010110100', 11)
```python
for j in range(Q2[1]):
Trunc=abQBV[Q2len:j]
TruncDQ=Trunc/(2**(Q2[1]-j))
print(bin(Trunc), TruncDQ, np.abs(ab-TruncDQ))
```
11010110100 6.703125 0.0
1101011010 6.703125 0.0
110101101 6.703125 0.0
11010110 6.6875 0.015625
1101011 6.6875 0.015625
110101 6.625 0.078125
11010 6.5 0.203125
1101 6.5 0.203125
# Trunction (sighned)
```python
a=3.2500; aQ=int(a*Qscale)
b=-2.0625; bQ=int(b*Qscale)
aQ, bQ
#bin(aQ, Qlen), bin(bQ, Qlen)
```
```python
ab=a*b; ab
abQ=int(ab*Qscale); abQ
abdQ=abQ/ Qscale; abdQ, ab
```
```python
aQBV=intbv(aQ)[Qlen:].signed(); bQBV=intbv(bQ)[Qlen:].signed()
f'aQBV: {bin(aQBV, Qlen)}; bQBV: {bin(bQBV, Qlen)}'
```
'aQBV: 00110100; bQBV: 11011111'
```python
abQ=aQBV*bQBV; abQ
```
```python
abQBV=intbv(abQ)[Q2len:].signed(); abQBV, bin(abQBV), len(bin(abQBV))
```
(intbv(-1716), '100101001100', 12)
```python
for j in range(Q2[1]):
Trunc=abQBV[Q2len:j].signed()
TruncDQ=Trunc/(2**(Q2[1]-j))
print(bin(Trunc), TruncDQ, np.abs(ab-TruncDQ))
```
100101001100 -6.703125 0.0
10010100110 -6.703125 0.0
1001010011 -6.703125 0.0
100101001 -6.71875 0.015625
10010100 -6.75 0.046875
1001010 -6.75 0.046875
100101 -6.75 0.046875
10010 -7.0 0.296875
```python
for j in range(Q2[1]):
Trunc=(abQBV>>j).signed()
TruncDQ=Trunc/(2**(Q2[1]-j))
print(bin(Trunc), TruncDQ, np.abs(ab-TruncDQ))
```
100101001100 -6.703125 0.0
10010100110 -6.703125 0.0
1001010011 -6.703125 0.0
100101001 -6.71875 0.015625
10010100 -6.75 0.046875
1001010 -6.75 0.046875
100101 -6.75 0.046875
10010 -7.0 0.296875
# Round Half Up
```python
a=3.2500; aQ=int(a*Qscale)
b=-2.0625; bQ=int(b*Qscale)
aQ, bQ
bin(aQ, Qlen), bin(bQ, Qlen)
```
('00110100', '11011111')
```python
ab=a*b; ab
abQ=int(ab*Qscale); abQ
abdQ=abQ/ Qscale; abdQ, ab
```
```python
aQBV=intbv(aQ)[Qlen:].signed(); bQBV=intbv(bQ)[Qlen:].signed()
f'aQBV: {bin(aQBV, Qlen)}; bQBV: {bin(bQBV, Qlen)}'
```
'aQBV: 00110100; bQBV: 11011111'
```python
abQ=aQBV*bQBV; abQ
```
```python
abQBV=intbv(abQ)[Q2len:].signed(); abQBV, bin(abQBV), len(bin(abQBV))
```
(intbv(-1716), '100101001100', 12)
```python
ab, floor(ab+.5), -ceiling(-ab-.5), ceiling(floor(2*ab)/2)
```
```python
Round=abQBV[Q2len-1:0].signed()
RoundDQ=Round/(2**(Q2[1]))
print(bin(Round), RoundDQ, np.abs(ab-RoundDQ))
```
100101001100 -6.703125 0.0
`{ {(OWID){1'b0}}, 1'b1, {(IWID-OWID-1){1'b0}} }`, .5
`i_data[(IWID-1):0]+ { {(OWID){1'b0}}, 1'b1, {(IWID-OWID-1){1'b0}} }`, x+.5
`w_halfup[(IWID-1):(IWID-OWID)]`, floor(x+.5)
```python
concat(intbv(0)[8:], True, intbv(0)[16-8-1:])
```
intbv(128)
```python
PointFive=intbv(int(.5*Q2scale))[16:]; PointFive, bin(PointFive, 16)
```
(intbv(128), '0000000010000000')
```python
abQBVP5=intbv(abQBV+PointFive)[16:].signed()
abQBVP5
```
intbv(-1588)
```python
abQBVP5=abQBVP5[Q2len-1:Q2len-Qlen].signed(); abQBVP5
```
intbv(-7)
```python
abQBVP5, floor(ab+.5)
```
(intbv(-7), -7)
# Round towards zero
```python
```
# Round Away from Zero
# Round Half to Even
```python
```
|
(ite (> x 10)
(say 'hello world')
(say 'good bye'))
(puts "hello" name)
# hello
(wrap-ruby-domain :Uri, String)
# The following function does something
# this is a multi-line comment
(define-operator :say_hello
(signature :name String)
(returns String)
(puts 'hello')
) |
[STATEMENT]
lemma fun_typ_eq_ret_unique:
assumes "(AF_fun_typ x1 b1 c1 \<tau>1' s1') = (AF_fun_typ x2 b2 c2 \<tau>2' s2')"
shows "\<tau>1'[x1::=v]\<^sub>\<tau>\<^sub>v = \<tau>2'[x2::=v]\<^sub>\<tau>\<^sub>v"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<tau>1'[x1::=v]\<^sub>\<tau>\<^sub>v = \<tau>2'[x2::=v]\<^sub>\<tau>\<^sub>v
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<tau>1'[x1::=v]\<^sub>\<tau>\<^sub>v = \<tau>2'[x2::=v]\<^sub>\<tau>\<^sub>v
[PROOF STEP]
have "[[atom x1]]lst. \<tau>1' = [[atom x2]]lst. \<tau>2'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. [[atom x1]]lst. \<tau>1' = [[atom x2]]lst. \<tau>2'
[PROOF STEP]
using assms lst_fst fun_typ.eq_iff lst_snd
[PROOF STATE]
proof (prove)
using this:
AF_fun_typ x1 b1 c1 \<tau>1' s1' = AF_fun_typ x2 b2 c2 \<tau>2' s2'
[[atom ?x]]lst. (?t1.0, ?t2.0) = [[atom ?x']]lst. (?t1', ?t2') \<Longrightarrow> [[atom ?x]]lst. ?t1.0 = [[atom ?x']]lst. ?t1'
(AF_fun_typ ?x ?b ?c ?\<tau> ?s = AF_fun_typ ?xa ?ba ?ca ?\<tau>' ?sa) = ([[atom ?x]]lst. ((?c, ?\<tau>), ?s) = [[atom ?xa]]lst. ((?ca, ?\<tau>'), ?sa) \<and> ?b = ?ba)
[[atom ?x]]lst. (?t1.0, ?t2.0) = [[atom ?x']]lst. (?t1', ?t2') \<Longrightarrow> [[atom ?x]]lst. ?t2.0 = [[atom ?x']]lst. ?t2'
goal (1 subgoal):
1. [[atom x1]]lst. \<tau>1' = [[atom x2]]lst. \<tau>2'
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
[[atom x1]]lst. \<tau>1' = [[atom x2]]lst. \<tau>2'
goal (1 subgoal):
1. \<tau>1'[x1::=v]\<^sub>\<tau>\<^sub>v = \<tau>2'[x2::=v]\<^sub>\<tau>\<^sub>v
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
[[atom x1]]lst. \<tau>1' = [[atom x2]]lst. \<tau>2'
goal (1 subgoal):
1. \<tau>1'[x1::=v]\<^sub>\<tau>\<^sub>v = \<tau>2'[x2::=v]\<^sub>\<tau>\<^sub>v
[PROOF STEP]
using subst_v_flip_eq_two[of x1 \<tau>1' x2 \<tau>2' v] subst_v_\<tau>_def
[PROOF STATE]
proof (prove)
using this:
[[atom x1]]lst. \<tau>1' = [[atom x2]]lst. \<tau>2'
[[atom x1]]lst. \<tau>1' = [[atom x2]]lst. \<tau>2' \<Longrightarrow> \<tau>1'[x1::=v]\<^sub>v = \<tau>2'[x2::=v]\<^sub>v
subst_v = subst_tv_abbrev
goal (1 subgoal):
1. \<tau>1'[x1::=v]\<^sub>\<tau>\<^sub>v = \<tau>2'[x2::=v]\<^sub>\<tau>\<^sub>v
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
\<tau>1'[x1::=v]\<^sub>\<tau>\<^sub>v = \<tau>2'[x2::=v]\<^sub>\<tau>\<^sub>v
goal:
No subgoals!
[PROOF STEP]
qed |
/-
Copyright (c) 2021 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import analysis.specific_limits.basic
import topology.urysohns_lemma
import topology.continuous_function.bounded
import topology.uniform_space.cauchy
/-!
# Metrizability of a T₃ topological space with second countable topology
In this file we define metrizable topological spaces, i.e., topological spaces for which there
exists a metric space structure that generates the same topology.
We also show that a T₃ topological space with second countable topology `X` is metrizable.
First we prove that `X` can be embedded into `l^∞`, then use this embedding to pull back the metric
space structure.
-/
open set filter metric
open_locale bounded_continuous_function filter topology
namespace topological_space
variables {ι X Y : Type*} {π : ι → Type*} [topological_space X] [topological_space Y]
[finite ι] [Π i, topological_space (π i)]
/-- A topological space is *pseudo metrizable* if there exists a pseudo metric space structure
compatible with the topology. To endow such a space with a compatible distance, use
`letI : pseudo_metric_space X := topological_space.pseudo_metrizable_space_pseudo_metric X`. -/
class pseudo_metrizable_space (X : Type*) [t : topological_space X] : Prop :=
(exists_pseudo_metric : ∃ (m : pseudo_metric_space X), m.to_uniform_space.to_topological_space = t)
@[priority 100]
instance _root_.pseudo_metric_space.to_pseudo_metrizable_space {X : Type*}
[m : pseudo_metric_space X] :
pseudo_metrizable_space X :=
⟨⟨m, rfl⟩⟩
/-- Construct on a metrizable space a metric compatible with the topology. -/
noncomputable def pseudo_metrizable_space_pseudo_metric
(X : Type*) [topological_space X] [h : pseudo_metrizable_space X] :
pseudo_metric_space X :=
h.exists_pseudo_metric.some.replace_topology h.exists_pseudo_metric.some_spec.symm
instance pseudo_metrizable_space_prod [pseudo_metrizable_space X] [pseudo_metrizable_space Y] :
pseudo_metrizable_space (X × Y) :=
begin
letI : pseudo_metric_space X := pseudo_metrizable_space_pseudo_metric X,
letI : pseudo_metric_space Y := pseudo_metrizable_space_pseudo_metric Y,
apply_instance
end
/-- Given an inducing map of a topological space into a pseudo metrizable space, the source space
is also pseudo metrizable. -/
lemma _root_.inducing.pseudo_metrizable_space [pseudo_metrizable_space Y] {f : X → Y}
(hf : inducing f) :
pseudo_metrizable_space X :=
begin
letI : pseudo_metric_space Y := pseudo_metrizable_space_pseudo_metric Y,
exact ⟨⟨hf.comap_pseudo_metric_space, rfl⟩⟩
end
/-- Every pseudo-metrizable space is first countable. -/
@[priority 100]
instance pseudo_metrizable_space.first_countable_topology [h : pseudo_metrizable_space X] :
topological_space.first_countable_topology X :=
begin
unfreezingI { rcases h with ⟨_, hm⟩, rw ←hm },
exact @uniform_space.first_countable_topology X pseudo_metric_space.to_uniform_space
emetric.uniformity.filter.is_countably_generated,
end
instance pseudo_metrizable_space.subtype [pseudo_metrizable_space X]
(s : set X) : pseudo_metrizable_space s :=
inducing_coe.pseudo_metrizable_space
instance pseudo_metrizable_space_pi [Π i, pseudo_metrizable_space (π i)] :
pseudo_metrizable_space (Π i, π i) :=
by { casesI nonempty_fintype ι, letI := λ i, pseudo_metrizable_space_pseudo_metric (π i),
apply_instance }
/-- A topological space is metrizable if there exists a metric space structure compatible with the
topology. To endow such a space with a compatible distance, use
`letI : metric_space X := topological_space.metrizable_space_metric X` -/
class metrizable_space (X : Type*) [t : topological_space X] : Prop :=
(exists_metric : ∃ (m : metric_space X), m.to_uniform_space.to_topological_space = t)
@[priority 100]
instance _root_.metric_space.to_metrizable_space {X : Type*} [m : metric_space X] :
metrizable_space X :=
⟨⟨m, rfl⟩⟩
@[priority 100]
instance metrizable_space.to_pseudo_metrizable_space [h : metrizable_space X] :
pseudo_metrizable_space X :=
⟨let ⟨m, hm⟩ := h.1 in ⟨m.to_pseudo_metric_space, hm⟩⟩
/-- Construct on a metrizable space a metric compatible with the topology. -/
noncomputable def metrizable_space_metric (X : Type*) [topological_space X]
[h : metrizable_space X] :
metric_space X :=
h.exists_metric.some.replace_topology h.exists_metric.some_spec.symm
@[priority 100]
instance t2_space_of_metrizable_space [metrizable_space X] : t2_space X :=
by { letI : metric_space X := metrizable_space_metric X, apply_instance }
instance metrizable_space_prod [metrizable_space X] [metrizable_space Y] :
metrizable_space (X × Y) :=
begin
letI : metric_space X := metrizable_space_metric X,
letI : metric_space Y := metrizable_space_metric Y,
apply_instance
end
/-- Given an embedding of a topological space into a metrizable space, the source space is also
metrizable. -/
lemma _root_.embedding.metrizable_space [metrizable_space Y] {f : X → Y} (hf : embedding f) :
metrizable_space X :=
begin
letI : metric_space Y := metrizable_space_metric Y,
exact ⟨⟨hf.comap_metric_space f, rfl⟩⟩
end
instance metrizable_space.subtype [metrizable_space X] (s : set X) : metrizable_space s :=
embedding_subtype_coe.metrizable_space
instance metrizable_space_pi [Π i, metrizable_space (π i)] : metrizable_space (Π i, π i) :=
by { casesI nonempty_fintype ι, letI := λ i, metrizable_space_metric (π i), apply_instance }
variables (X) [t3_space X] [second_countable_topology X]
/-- A T₃ topological space with second countable topology can be embedded into `l^∞ = ℕ →ᵇ ℝ`.
-/
lemma exists_embedding_l_infty : ∃ f : X → (ℕ →ᵇ ℝ), embedding f :=
begin
haveI : normal_space X := normal_space_of_t3_second_countable X,
-- Choose a countable basis, and consider the set `s` of pairs of set `(U, V)` such that `U ∈ B`,
-- `V ∈ B`, and `closure U ⊆ V`.
rcases exists_countable_basis X with ⟨B, hBc, -, hB⟩,
set s : set (set X × set X) := {UV ∈ B ×ˢ B| closure UV.1 ⊆ UV.2},
-- `s` is a countable set.
haveI : encodable s := ((hBc.prod hBc).mono (inter_subset_left _ _)).to_encodable,
-- We don't have the space of bounded (possibly discontinuous) functions, so we equip `s`
-- with the discrete topology and deal with `s →ᵇ ℝ` instead.
letI : topological_space s := ⊥, haveI : discrete_topology s := ⟨rfl⟩,
rsuffices ⟨f, hf⟩ : ∃ f : X → (s →ᵇ ℝ), embedding f,
{ exact ⟨λ x, (f x).extend (encodable.encode' s) 0, (bounded_continuous_function.isometry_extend
(encodable.encode' s) (0 : ℕ →ᵇ ℝ)).embedding.comp hf⟩ },
have hd : ∀ UV : s, disjoint (closure UV.1.1) (UV.1.2ᶜ) :=
λ UV, disjoint_compl_right.mono_right (compl_subset_compl.2 UV.2.2),
-- Choose a sequence of `εₙ > 0`, `n : s`, that is bounded above by `1` and tends to zero
-- along the `cofinite` filter.
obtain ⟨ε, ε01, hε⟩ : ∃ ε : s → ℝ, (∀ UV, ε UV ∈ Ioc (0 : ℝ) 1) ∧ tendsto ε cofinite (𝓝 0),
{ rcases pos_sum_of_encodable zero_lt_one s with ⟨ε, ε0, c, hεc, hc1⟩,
refine ⟨ε, λ UV, ⟨ε0 UV, _⟩, hεc.summable.tendsto_cofinite_zero⟩,
exact (le_has_sum hεc UV $ λ _ _, (ε0 _).le).trans hc1 },
/- For each `UV = (U, V) ∈ s` we use Urysohn's lemma to choose a function `f UV` that is equal to
zero on `U` and is equal to `ε UV` on the complement to `V`. -/
have : ∀ UV : s, ∃ f : C(X, ℝ), eq_on f 0 UV.1.1 ∧ eq_on f (λ _, ε UV) UV.1.2ᶜ ∧
∀ x, f x ∈ Icc 0 (ε UV),
{ intro UV,
rcases exists_continuous_zero_one_of_closed is_closed_closure
(hB.is_open UV.2.1.2).is_closed_compl (hd UV) with ⟨f, hf₀, hf₁, hf01⟩,
exact ⟨ε UV • f, λ x hx, by simp [hf₀ (subset_closure hx)], λ x hx, by simp [hf₁ hx],
λ x, ⟨mul_nonneg (ε01 _).1.le (hf01 _).1, mul_le_of_le_one_right (ε01 _).1.le (hf01 _).2⟩⟩ },
choose f hf0 hfε hf0ε,
have hf01 : ∀ UV x, f UV x ∈ Icc (0 : ℝ) 1,
from λ UV x, Icc_subset_Icc_right (ε01 _).2 (hf0ε _ _),
/- The embedding is given by `F x UV = f UV x`. -/
set F : X → s →ᵇ ℝ := λ x, ⟨⟨λ UV, f UV x, continuous_of_discrete_topology⟩, 1, λ UV₁ UV₂,
real.dist_le_of_mem_Icc_01 (hf01 _ _) (hf01 _ _)⟩,
have hF : ∀ x UV, F x UV = f UV x := λ _ _, rfl,
refine ⟨F, embedding.mk' _ (λ x y hxy, _) (λ x, le_antisymm _ _)⟩,
{ /- First we prove that `F` is injective. Indeed, if `F x = F y` and `x ≠ y`, then we can find
`(U, V) ∈ s` such that `x ∈ U` and `y ∉ V`, hence `F x UV = 0 ≠ ε UV = F y UV`. -/
refine not_not.1 (λ Hne, _), -- `by_contra Hne` timeouts
rcases hB.mem_nhds_iff.1 (is_open_ne.mem_nhds Hne) with ⟨V, hVB, hxV, hVy⟩,
rcases hB.exists_closure_subset (hB.mem_nhds hVB hxV) with ⟨U, hUB, hxU, hUV⟩,
set UV : ↥s := ⟨(U, V), ⟨hUB, hVB⟩, hUV⟩,
apply (ε01 UV).1.ne,
calc (0 : ℝ) = F x UV : (hf0 UV hxU).symm
... = F y UV : by rw hxy
... = ε UV : hfε UV (λ h : y ∈ V, hVy h rfl) },
{ /- Now we prove that each neighborhood `V` of `x : X` include a preimage of a neighborhood of
`F x` under `F`. Without loss of generality, `V` belongs to `B`. Choose `U ∈ B` such that
`x ∈ V` and `closure V ⊆ U`. Then the preimage of the `(ε (U, V))`-neighborhood of `F x`
is included by `V`. -/
refine ((nhds_basis_ball.comap _).le_basis_iff hB.nhds_has_basis).2 _,
rintro V ⟨hVB, hxV⟩,
rcases hB.exists_closure_subset (hB.mem_nhds hVB hxV) with ⟨U, hUB, hxU, hUV⟩,
set UV : ↥s := ⟨(U, V), ⟨hUB, hVB⟩, hUV⟩,
refine ⟨ε UV, (ε01 UV).1, λ y (hy : dist (F y) (F x) < ε UV), _⟩,
replace hy : dist (F y UV) (F x UV) < ε UV,
from (bounded_continuous_function.dist_coe_le_dist _).trans_lt hy,
contrapose! hy,
rw [hF, hF, hfε UV hy, hf0 UV hxU, pi.zero_apply, dist_zero_right],
exact le_abs_self _ },
{ /- Finally, we prove that `F` is continuous. Given `δ > 0`, consider the set `T` of `(U, V) ∈ s`
such that `ε (U, V) ≥ δ`. Since `ε` tends to zero, `T` is finite. Since each `f` is continuous,
we can choose a neighborhood such that `dist (F y (U, V)) (F x (U, V)) ≤ δ` for any
`(U, V) ∈ T`. For `(U, V) ∉ T`, the same inequality is true because both `F y (U, V)` and
`F x (U, V)` belong to the interval `[0, ε (U, V)]`. -/
refine (nhds_basis_closed_ball.comap _).ge_iff.2 (λ δ δ0, _),
have h_fin : {UV : s | δ ≤ ε UV}.finite, by simpa only [← not_lt] using hε (gt_mem_nhds δ0),
have : ∀ᶠ y in 𝓝 x, ∀ UV, δ ≤ ε UV → dist (F y UV) (F x UV) ≤ δ,
{ refine (eventually_all_finite h_fin).2 (λ UV hUV, _),
exact (f UV).continuous.tendsto x (closed_ball_mem_nhds _ δ0) },
refine this.mono (λ y hy, (bounded_continuous_function.dist_le δ0.le).2 $ λ UV, _),
cases le_total δ (ε UV) with hle hle,
exacts [hy _ hle, (real.dist_le_of_mem_Icc (hf0ε _ _) (hf0ε _ _)).trans (by rwa sub_zero)] }
end
/-- *Urysohn's metrization theorem* (Tychonoff's version): a T₃ topological space with second
countable topology `X` is metrizable, i.e., there exists a metric space structure that generates the
same topology. -/
lemma metrizable_space_of_t3_second_countable : metrizable_space X :=
let ⟨f, hf⟩ := exists_embedding_l_infty X in hf.metrizable_space
instance : metrizable_space ennreal := metrizable_space_of_t3_second_countable ennreal
end topological_space
|
State Before: R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n : ℕ
h : op (fst x) • snd x = fst x • snd x
this : ∀ (n : ℕ), op (fst x ^ n) • snd x = fst x ^ n • snd x
⊢ snd (x ^ n) = n • fst x ^ Nat.pred n • snd x State After: R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n : ℕ
h : op (fst x) • snd x = fst x • snd x
this : ∀ (n : ℕ), op (fst x ^ n) • snd x = fst x ^ n • snd x
⊢ List.sum (List.map (fun i => fst x ^ (Nat.pred n - i + i) • snd x) (List.range n)) = n • fst x ^ Nat.pred n • snd x Tactic: simp_rw [snd_pow_eq_sum, this, smul_smul, ← pow_add] State Before: R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n : ℕ
h : op (fst x) • snd x = fst x • snd x
⊢ ∀ (n : ℕ), op (fst x ^ n) • snd x = fst x ^ n • snd x State After: R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n✝ : ℕ
h : op (fst x) • snd x = fst x • snd x
n : ℕ
⊢ op (fst x ^ n) • snd x = fst x ^ n • snd x Tactic: intro n State Before: R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n✝ : ℕ
h : op (fst x) • snd x = fst x • snd x
n : ℕ
⊢ op (fst x ^ n) • snd x = fst x ^ n • snd x State After: case zero
R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n : ℕ
h : op (fst x) • snd x = fst x • snd x
⊢ op (fst x ^ Nat.zero) • snd x = fst x ^ Nat.zero • snd x
case succ
R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n✝ : ℕ
h : op (fst x) • snd x = fst x • snd x
n : ℕ
ih : op (fst x ^ n) • snd x = fst x ^ n • snd x
⊢ op (fst x ^ Nat.succ n) • snd x = fst x ^ Nat.succ n • snd x Tactic: induction' n with n ih State Before: case zero
R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n : ℕ
h : op (fst x) • snd x = fst x • snd x
⊢ op (fst x ^ Nat.zero) • snd x = fst x ^ Nat.zero • snd x State After: no goals Tactic: simp State Before: case succ
R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n✝ : ℕ
h : op (fst x) • snd x = fst x • snd x
n : ℕ
ih : op (fst x ^ n) • snd x = fst x ^ n • snd x
⊢ op (fst x ^ Nat.succ n) • snd x = fst x ^ Nat.succ n • snd x State After: no goals Tactic: rw [pow_succ', MulOpposite.op_mul, mul_smul, mul_smul, ← h,
smul_comm (_ : R) (op x.fst) x.snd, ih] State Before: R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n : ℕ
h : op (fst x) • snd x = fst x • snd x
this : ∀ (n : ℕ), op (fst x ^ n) • snd x = fst x ^ n • snd x
⊢ List.sum (List.map (fun i => fst x ^ (Nat.pred 0 - i + i) • snd x) (List.range 0)) = 0 • fst x ^ Nat.pred 0 • snd x State After: no goals Tactic: rw [Nat.pred_zero, pow_zero, List.range_zero, zero_smul, List.map_nil, List.sum_nil] State Before: R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n✝ : ℕ
h : op (fst x) • snd x = fst x • snd x
this : ∀ (n : ℕ), op (fst x ^ n) • snd x = fst x ^ n • snd x
n : ℕ
⊢ List.sum (List.map (fun i => fst x ^ (Nat.pred (Nat.succ n) - i + i) • snd x) (List.range (Nat.succ n))) =
Nat.succ n • fst x ^ Nat.pred (Nat.succ n) • snd x State After: R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n✝ : ℕ
h : op (fst x) • snd x = fst x • snd x
this : ∀ (n : ℕ), op (fst x ^ n) • snd x = fst x ^ n • snd x
n : ℕ
⊢ List.sum (List.map (fun i => fst x ^ (n - i + i) • snd x) (List.range (Nat.succ n))) = Nat.succ n • fst x ^ n • snd x Tactic: simp_rw [Nat.pred_succ] State Before: R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n✝ : ℕ
h : op (fst x) • snd x = fst x • snd x
this : ∀ (n : ℕ), op (fst x ^ n) • snd x = fst x ^ n • snd x
n : ℕ
⊢ List.sum (List.map (fun i => fst x ^ (n - i + i) • snd x) (List.range (Nat.succ n))) = Nat.succ n • fst x ^ n • snd x State After: case refine'_1
R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n✝ : ℕ
h : op (fst x) • snd x = fst x • snd x
this : ∀ (n : ℕ), op (fst x ^ n) • snd x = fst x ^ n • snd x
n : ℕ
⊢ ∀ (x_1 : M), x_1 ∈ List.map (fun i => fst x ^ (n - i + i) • snd x) (List.range (Nat.succ n)) → x_1 = fst x ^ n • snd x
case refine'_2
R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n✝ : ℕ
h : op (fst x) • snd x = fst x • snd x
this : ∀ (n : ℕ), op (fst x ^ n) • snd x = fst x ^ n • snd x
n : ℕ
⊢ List.length (List.map (fun i => fst x ^ (n - i + i) • snd x) (List.range (Nat.succ n))) • fst x ^ n • snd x =
Nat.succ n • fst x ^ n • snd x Tactic: refine' (List.sum_eq_card_nsmul _ (x.fst ^ n • x.snd) _).trans _ State Before: case refine'_1
R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n✝ : ℕ
h : op (fst x) • snd x = fst x • snd x
this : ∀ (n : ℕ), op (fst x ^ n) • snd x = fst x ^ n • snd x
n : ℕ
⊢ ∀ (x_1 : M), x_1 ∈ List.map (fun i => fst x ^ (n - i + i) • snd x) (List.range (Nat.succ n)) → x_1 = fst x ^ n • snd x State After: case refine'_1
R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n✝ : ℕ
h : op (fst x) • snd x = fst x • snd x
this : ∀ (n : ℕ), op (fst x ^ n) • snd x = fst x ^ n • snd x
n : ℕ
m : M
hm : m ∈ List.map (fun i => fst x ^ (n - i + i) • snd x) (List.range (Nat.succ n))
⊢ m = fst x ^ n • snd x Tactic: rintro m hm State Before: case refine'_1
R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n✝ : ℕ
h : op (fst x) • snd x = fst x • snd x
this : ∀ (n : ℕ), op (fst x ^ n) • snd x = fst x ^ n • snd x
n : ℕ
m : M
hm : m ∈ List.map (fun i => fst x ^ (n - i + i) • snd x) (List.range (Nat.succ n))
⊢ m = fst x ^ n • snd x State After: case refine'_1
R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n✝ : ℕ
h : op (fst x) • snd x = fst x • snd x
this : ∀ (n : ℕ), op (fst x ^ n) • snd x = fst x ^ n • snd x
n : ℕ
m : M
hm : ∃ a, a < Nat.succ n ∧ fst x ^ (n - a + a) • snd x = m
⊢ m = fst x ^ n • snd x Tactic: simp_rw [List.mem_map, List.mem_range] at hm State Before: case refine'_1
R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n✝ : ℕ
h : op (fst x) • snd x = fst x • snd x
this : ∀ (n : ℕ), op (fst x ^ n) • snd x = fst x ^ n • snd x
n : ℕ
m : M
hm : ∃ a, a < Nat.succ n ∧ fst x ^ (n - a + a) • snd x = m
⊢ m = fst x ^ n • snd x State After: case refine'_1.intro.intro
R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n✝ : ℕ
h : op (fst x) • snd x = fst x • snd x
this : ∀ (n : ℕ), op (fst x ^ n) • snd x = fst x ^ n • snd x
n i : ℕ
hi : i < Nat.succ n
⊢ fst x ^ (n - i + i) • snd x = fst x ^ n • snd x Tactic: obtain ⟨i, hi, rfl⟩ := hm State Before: case refine'_1.intro.intro
R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n✝ : ℕ
h : op (fst x) • snd x = fst x • snd x
this : ∀ (n : ℕ), op (fst x ^ n) • snd x = fst x ^ n • snd x
n i : ℕ
hi : i < Nat.succ n
⊢ fst x ^ (n - i + i) • snd x = fst x ^ n • snd x State After: no goals Tactic: rw [tsub_add_cancel_of_le (Nat.lt_succ_iff.mp hi)] State Before: case refine'_2
R : Type u
M : Type v
inst✝⁴ : Monoid R
inst✝³ : AddMonoid M
inst✝² : DistribMulAction R M
inst✝¹ : DistribMulAction Rᵐᵒᵖ M
inst✝ : SMulCommClass R Rᵐᵒᵖ M
x : tsze R M
n✝ : ℕ
h : op (fst x) • snd x = fst x • snd x
this : ∀ (n : ℕ), op (fst x ^ n) • snd x = fst x ^ n • snd x
n : ℕ
⊢ List.length (List.map (fun i => fst x ^ (n - i + i) • snd x) (List.range (Nat.succ n))) • fst x ^ n • snd x =
Nat.succ n • fst x ^ n • snd x State After: no goals Tactic: rw [List.length_map, List.length_range] |
module Twist.Rotation
import Control.Algebra
import Twist.Cycle
import Twist.Face
import Twist.Util
%default total
%access export
data Rotation : (f : Type) -> (c : Type) -> (face : f) -> Type where
Rot : (face : (Face (S k))) -> (c : Cycle (S j)) -> Rotation (Face (S k)) (Cycle (S j)) face
Eq c => Eq (Rotation f c face) where
(Rot face cx) == (Rot face cy) = cx == cy
Show (Rotation f c face) where
show (Rot face x) = show face ++ show x
cw : (Rotation f c face) -> (Rotation f c face)
cw (Rot face cx) = Rot face (next cx)
ccw : (Rotation f c face) -> (Rotation f c face)
ccw (Rot face cx) = Rot face (prev cx)
Semigroup (Rotation f c face) where
(Rot face cx) <+> (Rot face cy) = Rot face (cx <+> cy)
rotateZ : (face : Face (S k)) -> Rotation (Face (S k)) (Cycle (S j)) face
rotateZ x = Rot x neutral
rotate : (face : Face (S k)) -> Rotation (Face (S k)) (Cycle (S j)) face
rotate x = Rot x (next neutral)
rotate' : (face : Face (S k)) -> Rotation (Face (S k)) (Cycle (S j)) face
rotate' x = Rot x (prev neutral)
Monoid (Rotation (Face (S k)) (Cycle (S j)) face) where
neutral = Rot face neutral
Group (Rotation (Face (S k)) (Cycle (S j)) face) where
inverse (Rot face cx) = Rot face (inverse cx)
|
/*
** t-tests
**
** G.Lohmann, Jan 2017
*/
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdlib.h>
#include <gsl/gsl_cdf.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#define TINY 1.0e-10
#define ABS(x) ((x) > 0 ? (x) : -(x))
#define SQR(x) ((x)*(x))
/*
** convert t to z values
*/
double t2z(double t,double df)
{
double p=0,z=0;
double a,b,x;
extern double gsl_sf_beta_inc(double,double,double);
/* t to p */
x = df/(df+(t*t));
if (x <= 0 || x > 1) return 0;
a = 0.5*df;
b = 0.5;
p = 0.5 * gsl_sf_beta_inc(a,b,x);
/* p to z */
z = gsl_cdf_ugaussian_Qinv(p);
return z;
}
void avevar(double *data,int n,double *a,double *v)
{
int j;
double ave,var,nx,s,u;
nx = (double)n;
ave = 0;
for (j=0; j<n; j++) ave += data[j];
ave /= nx;
var = u = 0;
for (j=0; j<n; j++) {
s = data[j]-ave;
u += s;
var += s*s;
}
var=(var-u*u/nx)/(nx-1);
*v = var;
*a = ave;
}
/* welch test, unequal variance */
double welchtest(double *data1,double *data2,int n1,int n2)
{
double tiny=TINY;
double ave1,ave2,var1,var2;
double z=0,t=0,df=0;
double nx1 = (double)n1;
double nx2 = (double)n2;
avevar(data1,n1,&ave1,&var1);
if (var1 < tiny) return 0;
avevar(data2,n2,&ave2,&var2);
if (var2 < tiny) return 0;
t = (ave1 - ave2)/sqrt(var1/nx1 + var2/nx2);
df = SQR(var1/nx1+var2/nx2)/(SQR(var1/nx1)/(nx1-1)+SQR(var2/nx2)/(nx2-1));
z = t2z((double) t,(double) df);
if (t < 0) z = -z;
return z;
}
/* paired twosample test */
double paired_ttest(double *data1,double *data2,int n)
{
int j;
double ave,var,tiny=TINY;
for (j=0; j<n; j++) {
data1[j] -= data2[j];
}
avevar(data1,n,&ave,&var);
if (var < tiny) return 0;
double nx = (double)n;
double df = nx-1.0;
double t = sqrt(nx) * ave/sqrt(var);
double z = t2z(t,df);
if (t < 0) z = -z;
return z;
}
/* paired twosample test */
double xtest2(double *data1,double *data2,int n)
{
int j;
double nx,ave1,ave2,var1,var2,sd,u,df,cov;
double tiny=TINY;
double t=0,z=0;
avevar(data1,n,&ave1,&var1);
avevar(data2,n,&ave2,&var2);
if (var1 < tiny || var2 < tiny) return 0;
nx = (double)n;
df = nx-1;
cov = 0;
for (j=0; j<n; j++)
cov += (data1[j]-ave1)*(data2[j]-ave2);
cov /= df;
t = 0;
u = (var1+var2-2.0*cov);
if (u < tiny) return 0;
sd = sqrt(u/nx);
if (sd < tiny) return 0;
t = (double)(ave1-ave2)/sd;
z = t2z(t,df);
if (t < 0) z = -z;
return z;
}
/* twosample t-test, pooled variance */
double ttest2(double *data1,double *data2,int n1,int n2)
{
int j;
double nx1,nx2,ave1,ave2,sd,s1,s2,df;
double tiny=TINY;
double t=0,z=0;
nx1 = (double)n1;
nx2 = (double)n2;
ave1 = ave2 = 0;
for (j=0; j<n1; j++) ave1 += data1[j];
for (j=0; j<n2; j++) ave2 += data2[j];
if (ABS(ave1) < tiny) return 0;
if (ABS(ave2) < tiny) return 0;
ave1 /= nx1;
ave2 /= nx2;
s1 = 0;
for (j=0; j<n1; j++) s1 += SQR(data1[j]-ave1);
s2 = 0;
for (j=0; j<n2; j++) s2 += SQR(data2[j]-ave2);
df = nx1 + nx2 - 2.0;
sd = sqrt((1.0/nx1 + 1.0/nx2) * (s1+s2)/df);
if (sd < tiny) return 0;
t = (double)(ave1-ave2)/sd;
z = t2z(t,df);
if (t < 0) z = -z;
return z;
}
/* onesample t-test */
double ttest1(double *data,int n)
{
double ave=0,var=0,tiny=TINY;
avevar(data,n,&ave,&var);
if (var < tiny) return 0;
double nx = (double)n;
double df = nx-1.0;
double t = sqrt(nx) * ave/sqrt(var);
double z = t2z(t,df);
if (t < 0) z = -z;
return z;
}
|
module Data.Time.Calendar.MonthDay
import Data.Vect
import Data.Fin
import public Data.Time.Calendar.Types
%default total
-- --------------------------------------------------------------------------
export monthLengths : Bool -> Vect 12 DayOfMonth
monthLengths isleap = [
31, (if isleap then 29 else 28), 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 ]
-- | The length of a given month in the Gregorian or Julian calendars.
-- First arg is leap year flag.
export monthLength : Bool -> MonthOfYear -> DayOfMonth
monthLength isLeap month = index (restrict 11 $ cast $ month - 1) $ monthLengths isLeap
||| Convert month and day in the Gregorian or Julian calendars to day of year.
||| First arg is leap year flag.
export monthAndDayToDayOfYear : Bool -> MonthOfYear -> DayOfMonth -> DayOfYear
monthAndDayToDayOfYear isLeap month day =
let m' = if month < 1 then 1 else if month > 12 then 12 else month
ml = monthLength isLeap m'
d' = if day < 1 then 1 else if day > ml then ml else day
k = if m' <= 2 then 0 else if isLeap then -1 else -2
in ((367 * m' - 362) `div` 12) + cast k + d'
||| Convert month and day in the Gregorian or Julian calendars to day of year.
-- First arg is leap year flag.
export monthAndDayToDayOfYearValid : Bool -> MonthOfYear -> DayOfMonth -> Maybe DayOfYear
monthAndDayToDayOfYearValid isLeap month day =
if month < 1 || month > 12
then Nothing
else
let ml = monthLength isLeap month
k = if month < 2 then 0 else if isLeap then -1 else -2
in if day < 1 || day > ml
then Nothing
else Just $ ((367 * month - 362) `div` 12) + cast k + day
findMonthDay : Int -> Vect _ DayOfMonth -> Int -> (Int, Int)
findMonthDay m (n::ns) yd =
if yd > n then findMonthDay (m + 1) ns (yd - n)
else (m, yd)
findMonthDay m [] yd = (m, yd)
||| Convert day of year in the Gregorian or Julian calendars to month and day.
||| First arg is leap year flag.
export dayOfYearToMonthAndDay : Bool -> DayOfYear -> (MonthOfYear, DayOfMonth)
dayOfYearToMonthAndDay isLeap yd =
let yl = the Int $ if isLeap then 366 else 365
in findMonthDay 1
(monthLengths isLeap)
(if yd < 1 then 1 else if yd > yl then yl else yd)
-- --------------------------------------------------------------------------
-- vim: tw=80 sw=2 expandtab : |
# Linear Regression
## Introduction
* 什麼是回歸,什麼是線性迴歸
* 估計方法
<a href = "https://www.kdnuggets.com/2016/11/machine-learning-vs-statistics.html"> Aatash Shah曾在他的文章中作过这样的定义:</a>
> * “机器学习”是一种能够直接从数据中学习,而无需依赖规则编程的算法。
> * “建立统计模型”的意思是以数学方程式来表示数据变量间的关系。
實際上,機器學習的很多 fashion advanced 的算法都是基於線性回歸的,所以,在能夠跟進一步了解其他算法之前,我們需要好好了解一下甚麼是線性迴歸。
### 什麼是回歸
在现实世界中存在大量这样的情况:两个或多个变量之间有一些联系,但是没有确切关系(没有确切到可以严格决定的程度),例如:人的身高$X$和體重$Y$有關係,一般表現為$X$增大時$Y$也傾向於增大,但由$X$並不能嚴格決定$Y$;一種農作物的畝產量$Y$與其播種量$X_1$、施肥量$X_2$,等等有關,但是$X Series$並不能嚴格確定$Y$。
因此,根據上述例子,$Y$一般被稱為**因變量**或者回歸變量(regressand),而$X$被稱為回歸量、自變量(independent variable, regressors)。
為什麼$X$不能嚴格決定$Y$?很簡單,因為自變量太多了,而我們是無法窮盡(大部分情況)這些自變量,而且如果窮盡了這些自變量,很容易發生「過擬合」的情況(overfitted);另外,$X$是那些跟$Y$有關的變量,而非絕對的因果關係,如果從英文中,我們可以很容易理解這個「關係」:correlation & causality.
現在我們家者一個問題中有因變量$Y$以及自變量$X_1, X_2,\dots, X_p$.可以設想$Y$的值由兩部分構成,一部分由$X_1, X_2,\dots, X_p$的影響所導致,一部分是由未知因素導致,我們將第二部分歸咎為「誤差」(error),這樣一來,我們就可以得到一個式子:
$$Y = f(X) + \epsilon \ \ \ \ X = (X_1, X_2,\dots, X_p), \ \ \epsilon = \text{error}$$
作為隨機誤差,我們要求它的期望為零:$\displaystyle E(\sum_i^p \epsilon_i) = 0$
### 線性迴歸 (Linear Regression)
在這裏,我們只討論回歸函數$f(x)$為線型函數的情況(包括可以轉化成線性函數的情況),我們稱其為:線性迴歸(Linear Regression),在這一節,我們只討論簡單線性迴歸,就是一元線性迴歸(Simple Linear Regression),我們只討論還有一個自變量$X$(因變量只有也只會有一個$Y$)。
$\begin{align*}
Y &=& \beta_0 + \beta_1 X + \epsilon \\
E(\epsilon) = 0 \ \ \ Var(\epsilon) = \sigma^2 \\
\end{align*}$
where:
* $x$: Independent variable(regressor);
* $Y$: Dependent variable(response);
* $\epsilon$: Random error;
* $\beta_0, \beta_1, \sigma^2$: Unknown parameters;
* $\sigma^2$: Random error variance
#### 估計方法 (Methods of Estimation) —— 點估計(Point Estimation)
我們對模型的變量$X, Y$進行了$n$次獨立觀察,得樣本:
$$(X_1, Y_1), (X_2, Y_2), \dots, (X_n, Y_n)$$
這組樣本:$y_i = \beta_0 + \beta_1 x_i + \epsilon_i, \ \ \ i = (1,\dots,n)$
我們可以將樣本帶入方程中得出$p$個式子,每一個式子裡都會有一個隨機誤差。
我們需要估計$\beta_0$和$\beta_1$
##### 最小二乘法 (Least Square Estimiation)
* 觀測點:$(X_1, Y_1), (X_2, Y_2), \dots, (X_n, Y_n)$
* 函數:$y_i \beta_0 + \beta_1 x_i + \epsilon_i , \ \ \ i = 1,\dots, n$
* $\epsilon_i, \dots, \epsilon_n$ are i.i.d, $\ E(\epsilon) = 0, \ \ Var(\epsilon_i) = \sigma^2$
我們想要估計的是$\beta_0, \beta_1$和$\sigma^2$,我們可以使用**最小二乘法**去進行估計。
* 殘差:$e_i = y_i - \hat{y_i}$;
* $Y$的估計值:$\hat{y_i} = \hat{beta_0} + \hat{beta_1}x_i$;
* $\beta_0$和$\beta_1$ 的估計值:$\hat{\beta_0}$ 和 $\hat{\beta_1}$.
使用最小二乘法的目的就是使得殘差最小化。
$$\displaystyle Q = \sum_{i=1}^n e_i^2 = \sum_{i=1}^n(y_i - \beta_0 - \beta_1 x_i)^2$$
For minimizeing $Q$ with respect to $\beta_0$ and $\beta_1$, take derivatives:
$\begin{align*}
&\frac{\partial Q}{\partial \beta_0} = -2\sum_{i=1}^n (y_i - \beta_0 - \beta_1x_i) = 0 \\
&\frac{\partial Q}{\partial \beta_1} = -2\sum_{i=1}^n (y_i -\beta_0 -\beta_1 x_i)x_i = 0 \\
&S_{XX} = \sum_{i=1}^n (x_i - \bar{x})^2 = \sum_{i=1}^n x_i^2 - \frac{1}{n}(\sum_{i=1}^n x_i)^2 \\
&S_{XY} = \sum_{i=1}^n (x_i - \bar{x})(y_i - \bar{y}) = \sum_{i=1}^n x_i y_i - \frac{1}{n}(\sum_{i=1}^n x_i)(\sum_{i=1}^n y_i) \\
\end{align*}$
$\Rightarrow
\begin{equation}
\left\{
\begin{array}{lr}
\hat{\beta_1} = S_{XY} / S_{XX} \\
\hat{\beta_0} = \bar{y} - \hat{\beta_1} \bar{x} \\
\end{array}
\right.
\end{equation}
$
In this section(Using PY in Linear Regression), we will use `numpy`, `scipy.stats`, `matplotlib.pyplot` to do the regression analysis. You can also try `scikit-learn` by yourselves . If you want to know more about the `scikit-learn`, **please skim through its offical website:** <a href="https://scikit-learn.org/stable/index.html"> <strong>scikit-learn</strong> </a>
```python
from scipy import stats
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
plt.scatter(x,y)
dataset= pd.read_csv("https://raw.githubusercontent.com/TerenceLiu98/Using-R-Series/master/data_set/student_scores.csv")
dataset.shape # dataset's shape
dataset.head() # the first five data of the dataset
dataset.describe() # to see some statistical details
```
```python
dataset.plot(x='Hours', y='Scores', style='o')
plt.title('Hours vs Percentage')
plt.xlabel('Hours Studied')
plt.ylabel('Percentage Score')
plt.show()
```
```python
b_1, b_0, r_value, p_value, std_err = stats.linregress(dataset.Hours,dataset.Scores)
```
```python
print('Linear regression using stats.linregress')
print('parameters: b_1=%.2f b_0=%.2f \nmodel parameters: r_value =%.2f, p_value =%.2f, std error= %.3f' % (b_1, b_0, r_value, p_value, std_err))
print('\n')
```
Linear regression using stats.linregress
parameters: b_1=9.78 b_0=2.48
model parameters: r_value =0.98, p_value =0.00, std error= 0.453
```python
x = dataset.Hours
y = dataset.Scores
y_new = b_0 + b_1 * x
plt.figure(figsize=(4, 3))
ax = plt.axes()
ax.scatter(x, y, label = "original data")
ax.plot(x, y_new, "r-",label="regression line")
ax.set_xlabel('x')
ax.set_ylabel('y')
plt.legend(loc = 'best')
plt.show()
```
## 多元線性迴歸 (Multiple Linear Regression)
考慮完只有一個自變量$X$後,我們需要考慮有$p$個自變量$X_1, X_2, \dots $X_p$, 因變量依舊只有一個$Y$, 此時模型則變成了:
$$Y = \beta_0 + \beta_1 X_1 + \beta_2 X_2 + \dots + \epsilon$$
自變量增多以後,我們自然需要考慮:是否隨機這個問題,在這裡我們考慮自變量時非隨機的(不然就很難做了)。將所有的自變量看作一個矩陣,$\beta$看作一個向量,$Y$看作一個向量,$\epsilon$看作一個向量,這樣,我們就將模型向量化/矩陣化(matrixlize):
$$Y = X\beta + \epsilon$$
同樣的,我們可以重新考慮最小二乘法。
$\begin{align*}
Q &= (X\beta - y)^T(X\beta - y) \\
&\vdots \\
\end{align*}$
可以計算出:$\hat{\beta} = (X^T X)^{-1}X^T Y$
下面,我們用一個例子來解釋一下代碼部分:
下面是一個數據集,我們用多元線性迴歸來预测美国48个州的汽油消耗量(以百万加仑为单位),其基础是汽油税(以美分为单位)、人均收入(以美元为单位)、铺设的公路(以英里为单位)以及拥有驾照的人口比例。
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
dataset = pd.read_csv("https://raw.githubusercontent.com/TerenceLiu98/Using-R-Series/master/data_set/price.csv")
dataset.shape # dataset's shape
dataset.head() # the first five data of the dataset
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Index</th>
<th>One</th>
<th>Petrol tax (cents per gallon)</th>
<th>Average income (dollars)</th>
<th>Paved Highways (miles)</th>
<th>Proportion of population with driver's licenses</th>
<th>Consumption of petrol (millions of gallons)</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>1</td>
<td>1</td>
<td>9.0</td>
<td>3571</td>
<td>1976</td>
<td>0.525</td>
<td>541</td>
</tr>
<tr>
<th>1</th>
<td>2</td>
<td>1</td>
<td>9.0</td>
<td>4092</td>
<td>1250</td>
<td>0.572</td>
<td>524</td>
</tr>
<tr>
<th>2</th>
<td>3</td>
<td>1</td>
<td>9.0</td>
<td>3865</td>
<td>1586</td>
<td>0.580</td>
<td>561</td>
</tr>
<tr>
<th>3</th>
<td>4</td>
<td>1</td>
<td>7.5</td>
<td>4870</td>
<td>2351</td>
<td>0.529</td>
<td>414</td>
</tr>
<tr>
<th>4</th>
<td>5</td>
<td>1</td>
<td>8.0</td>
<td>4399</td>
<td>431</td>
<td>0.544</td>
<td>410</td>
</tr>
</tbody>
</table>
</div>
```python
dataset.describe() # to see some statistical details
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Index</th>
<th>One</th>
<th>Petrol tax (cents per gallon)</th>
<th>Average income (dollars)</th>
<th>Paved Highways (miles)</th>
<th>Proportion of population with driver's licenses</th>
<th>Consumption of petrol (millions of gallons)</th>
</tr>
</thead>
<tbody>
<tr>
<th>count</th>
<td>48.00</td>
<td>48.0</td>
<td>48.000000</td>
<td>48.000000</td>
<td>48.000000</td>
<td>48.000000</td>
<td>48.000000</td>
</tr>
<tr>
<th>mean</th>
<td>24.50</td>
<td>1.0</td>
<td>7.668333</td>
<td>4241.833333</td>
<td>5565.416667</td>
<td>0.570333</td>
<td>576.770833</td>
</tr>
<tr>
<th>std</th>
<td>14.00</td>
<td>0.0</td>
<td>0.950770</td>
<td>573.623768</td>
<td>3491.507166</td>
<td>0.055470</td>
<td>111.885816</td>
</tr>
<tr>
<th>min</th>
<td>1.00</td>
<td>1.0</td>
<td>5.000000</td>
<td>3063.000000</td>
<td>431.000000</td>
<td>0.451000</td>
<td>344.000000</td>
</tr>
<tr>
<th>25%</th>
<td>12.75</td>
<td>1.0</td>
<td>7.000000</td>
<td>3739.000000</td>
<td>3110.250000</td>
<td>0.529750</td>
<td>509.500000</td>
</tr>
<tr>
<th>50%</th>
<td>24.50</td>
<td>1.0</td>
<td>7.500000</td>
<td>4298.000000</td>
<td>4735.500000</td>
<td>0.564500</td>
<td>568.500000</td>
</tr>
<tr>
<th>75%</th>
<td>36.25</td>
<td>1.0</td>
<td>8.125000</td>
<td>4578.750000</td>
<td>7156.000000</td>
<td>0.595250</td>
<td>632.750000</td>
</tr>
<tr>
<th>max</th>
<td>48.00</td>
<td>1.0</td>
<td>10.000000</td>
<td>5342.000000</td>
<td>17782.000000</td>
<td>0.724000</td>
<td>968.000000</td>
</tr>
</tbody>
</table>
</div>
```python
dataset.dtypes.index # get the row's name of the dataset, this can make next step easier
```
Index(['Index', 'One', 'Petrol tax (cents per gallon)',
'Average income (dollars)', 'Paved Highways (miles)',
'Proportion of population with driver's licenses',
'Consumption of petrol (millions of gallons)'],
dtype='object')
```python
X = dataset[['Petrol tax (cents per gallon)',
'Average income (dollars)', 'Paved Highways (miles)',
"Proportion of population with driver's licenses"]]
y = dataset['Consumption of petrol (millions of gallons)']
```
為了訓練模型,我們使用 `fitted`:
```python
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
```
如一元線性迴歸一樣,在多元線性迴歸的情況下,回歸模型必須找到所有參數的最優係數:
```python
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
coeff_df = pd.DataFrame(regressor.coef_, X.columns, columns=['Coefficient'])
coeff_df
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Coefficient</th>
</tr>
</thead>
<tbody>
<tr>
<th>Petrol tax (cents per gallon)</th>
<td>-40.016660</td>
</tr>
<tr>
<th>Average income (dollars)</th>
<td>-0.065413</td>
</tr>
<tr>
<th>Paved Highways (miles)</th>
<td>-0.004741</td>
</tr>
<tr>
<th>Proportion of population with driver's licenses</th>
<td>1341.862121</td>
</tr>
</tbody>
</table>
</div>
下面是應用模型進行預測:
```python
y_pred = regressor.predict(X_test)
```
```python
df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
df
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Actual</th>
<th>Predicted</th>
</tr>
</thead>
<tbody>
<tr>
<th>29</th>
<td>534</td>
<td>469.391989</td>
</tr>
<tr>
<th>4</th>
<td>410</td>
<td>545.645464</td>
</tr>
<tr>
<th>26</th>
<td>577</td>
<td>589.668394</td>
</tr>
<tr>
<th>30</th>
<td>571</td>
<td>569.730413</td>
</tr>
<tr>
<th>32</th>
<td>577</td>
<td>649.774809</td>
</tr>
<tr>
<th>37</th>
<td>704</td>
<td>646.631164</td>
</tr>
<tr>
<th>34</th>
<td>487</td>
<td>511.608148</td>
</tr>
<tr>
<th>40</th>
<td>587</td>
<td>672.475177</td>
</tr>
<tr>
<th>7</th>
<td>467</td>
<td>502.074782</td>
</tr>
<tr>
<th>10</th>
<td>580</td>
<td>501.270734</td>
</tr>
</tbody>
</table>
</div>
```python
from sklearn import metrics
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
```
Mean Absolute Error: 56.82224747896468
Mean Squared Error: 4666.344787588359
Root Mean Squared Error: 68.31064915215167
```python
```
|
[STATEMENT]
lemma midpoint_col:
assumes "M Midpoint A B"
shows "Col M A B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Col M A B
[PROOF STEP]
using assms bet_col col_permutation_4 midpoint_bet
[PROOF STATE]
proof (prove)
using this:
M Midpoint A B
Bet ?A ?B ?C \<Longrightarrow> Col ?A ?B ?C
Col ?A ?B ?C \<Longrightarrow> Col ?B ?A ?C
?B Midpoint ?A ?C \<Longrightarrow> Bet ?A ?B ?C
goal (1 subgoal):
1. Col M A B
[PROOF STEP]
by blast |
[GOAL]
E : Type u_1
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
inst✝¹ : HasContDiffBump E
inst✝ : MeasurableSpace E
c : E
f : ContDiffBump c
x✝ : E
n : ℕ∞
μ : Measure E
x : E
⊢ ContDiffBump.normed f μ (c - x) = ContDiffBump.normed f μ (c + x)
[PROOFSTEP]
simp_rw [f.normed_def, f.sub]
[GOAL]
E : Type u_1
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
inst✝¹ : HasContDiffBump E
inst✝ : MeasurableSpace E
c : E
f✝ : ContDiffBump c
x✝ : E
n : ℕ∞
μ : Measure E
f : ContDiffBump 0
x : E
⊢ ContDiffBump.normed f μ (-x) = ContDiffBump.normed f μ x
[PROOFSTEP]
simp_rw [f.normed_def, f.neg]
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
⊢ 0 < ∫ (x : E), ↑f x ∂μ
[PROOFSTEP]
refine' (integral_pos_iff_support_of_nonneg f.nonneg' f.integrable).mpr _
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
⊢ 0 < ↑↑μ (support fun i => ↑f i)
[PROOFSTEP]
rw [f.support_eq]
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
⊢ 0 < ↑↑μ (ball c f.rOut)
[PROOFSTEP]
exact measure_ball_pos μ c f.rOut_pos
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
⊢ ∫ (x : E), ContDiffBump.normed f μ x ∂μ = 1
[PROOFSTEP]
simp_rw [ContDiffBump.normed, div_eq_mul_inv, mul_comm (f _), ← smul_eq_mul, integral_smul]
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
⊢ (∫ (x : E), ↑f x ∂μ)⁻¹ • ∫ (x : E), ↑f x ∂μ = 1
[PROOFSTEP]
exact inv_mul_cancel f.integral_pos.ne'
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
⊢ support (ContDiffBump.normed f μ) = ball c f.rOut
[PROOFSTEP]
unfold ContDiffBump.normed
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
⊢ (support fun x => ↑f x / ∫ (x : E), ↑f x ∂μ) = ball c f.rOut
[PROOFSTEP]
rw [support_div, f.support_eq, support_const f.integral_pos.ne', inter_univ]
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
⊢ tsupport (ContDiffBump.normed f μ) = closedBall c f.rOut
[PROOFSTEP]
rw [tsupport, f.support_normed_eq, closure_ball _ f.rOut_pos.ne']
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
⊢ HasCompactSupport (ContDiffBump.normed f μ)
[PROOFSTEP]
simp only [HasCompactSupport, f.tsupport_normed_eq (μ := μ), isCompact_closedBall]
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
ι : Type u_2
φ : ι → ContDiffBump c
l : Filter ι
hφ : Tendsto (fun i => (φ i).rOut) l (𝓝 0)
⊢ Tendsto (fun i => support fun x => ContDiffBump.normed (φ i) μ x) l (smallSets (𝓝 c))
[PROOFSTEP]
simp_rw [NormedAddCommGroup.tendsto_nhds_zero, Real.norm_eq_abs, abs_eq_self.mpr (φ _).rOut_pos.le] at hφ
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
ι : Type u_2
φ : ι → ContDiffBump c
l : Filter ι
hφ : ∀ (ε : ℝ), ε > 0 → ∀ᶠ (x : ι) in l, (φ x).rOut < ε
⊢ Tendsto (fun i => support fun x => ContDiffBump.normed (φ i) μ x) l (smallSets (𝓝 c))
[PROOFSTEP]
rw [nhds_basis_ball.smallSets.tendsto_right_iff]
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
ι : Type u_2
φ : ι → ContDiffBump c
l : Filter ι
hφ : ∀ (ε : ℝ), ε > 0 → ∀ᶠ (x : ι) in l, (φ x).rOut < ε
⊢ ∀ (i : ℝ), 0 < i → ∀ᶠ (x : ι) in l, (support fun x_1 => ContDiffBump.normed (φ x) μ x_1) ∈ 𝒫 ball c i
[PROOFSTEP]
refine fun ε hε ↦ (hφ ε hε).mono fun i hi ↦ ?_
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
ι : Type u_2
φ : ι → ContDiffBump c
l : Filter ι
hφ : ∀ (ε : ℝ), ε > 0 → ∀ᶠ (x : ι) in l, (φ x).rOut < ε
ε : ℝ
hε : 0 < ε
i : ι
hi : (φ i).rOut < ε
⊢ (support fun x => ContDiffBump.normed (φ i) μ x) ∈ 𝒫 ball c ε
[PROOFSTEP]
rw [(φ i).support_normed_eq]
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
ι : Type u_2
φ : ι → ContDiffBump c
l : Filter ι
hφ : ∀ (ε : ℝ), ε > 0 → ∀ᶠ (x : ι) in l, (φ x).rOut < ε
ε : ℝ
hε : 0 < ε
i : ι
hi : (φ i).rOut < ε
⊢ ball c (φ i).rOut ∈ 𝒫 ball c ε
[PROOFSTEP]
exact ball_subset_ball hi.le
[GOAL]
E : Type u_1
inst✝¹⁰ : NormedAddCommGroup E
inst✝⁹ : NormedSpace ℝ E
inst✝⁸ : HasContDiffBump E
inst✝⁷ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝⁶ : BorelSpace E
inst✝⁵ : FiniteDimensional ℝ E
inst✝⁴ : IsLocallyFiniteMeasure μ
inst✝³ : IsOpenPosMeasure μ
X : Type u_2
inst✝² : NormedAddCommGroup X
inst✝¹ : NormedSpace ℝ X
inst✝ : CompleteSpace X
z : X
⊢ ∫ (x : E), ContDiffBump.normed f μ x • z ∂μ = z
[PROOFSTEP]
simp_rw [integral_smul_const, f.integral_normed (μ := μ), one_smul]
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
⊢ ENNReal.toReal (↑↑μ (closedBall c f.rIn)) ≤ ∫ (x : E), ↑f x ∂μ
[PROOFSTEP]
calc
(μ (closedBall c f.rIn)).toReal = ∫ x in closedBall c f.rIn, 1 ∂μ := by simp
_ = ∫ x in closedBall c f.rIn, f x ∂μ :=
(set_integral_congr (measurableSet_closedBall) (fun x hx ↦ (one_of_mem_closedBall f hx).symm))
_ ≤ ∫ x, f x ∂μ := set_integral_le_integral f.integrable (eventually_of_forall (fun x ↦ f.nonneg))
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
⊢ ENNReal.toReal (↑↑μ (closedBall c f.rIn)) = ∫ (x : E) in closedBall c f.rIn, 1 ∂μ
[PROOFSTEP]
simp
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x✝ : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
x : E
⊢ ContDiffBump.normed f μ x ≤ 1 / ENNReal.toReal (↑↑μ (closedBall c f.rIn))
[PROOFSTEP]
rw [normed_def]
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x✝ : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
x : E
⊢ ↑f x / ∫ (x : E), ↑f x ∂μ ≤ 1 / ENNReal.toReal (↑↑μ (closedBall c f.rIn))
[PROOFSTEP]
gcongr
[GOAL]
case hd
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x✝ : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
x : E
⊢ 0 < ENNReal.toReal (↑↑μ (closedBall c f.rIn))
[PROOFSTEP]
exact ENNReal.toReal_pos (measure_closedBall_pos _ _ f.rIn_pos).ne' measure_closedBall_lt_top.ne
[GOAL]
case hac
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x✝ : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
x : E
⊢ ↑f x ≤ 1
[PROOFSTEP]
exact f.le_one
[GOAL]
case hbd
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x✝ : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
x : E
⊢ ENNReal.toReal (↑↑μ (closedBall c f.rIn)) ≤ ∫ (x : E), ↑f x ∂μ
[PROOFSTEP]
exact f.measure_closedBall_le_integral μ
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
⊢ ∫ (x : E), ↑f x ∂μ ≤ ENNReal.toReal (↑↑μ (closedBall c f.rOut))
[PROOFSTEP]
calc
∫ x, f x ∂μ = ∫ x in closedBall c f.rOut, f x ∂μ :=
by
apply (set_integral_eq_integral_of_forall_compl_eq_zero (fun x hx ↦ ?_)).symm
apply f.zero_of_le_dist (le_of_lt _)
simpa using hx
_ ≤ ∫ x in closedBall c f.rOut, 1 ∂μ :=
by
apply set_integral_mono f.integrable.integrableOn _ (fun x ↦ f.le_one)
simp [measure_closedBall_lt_top]
_ = (μ (closedBall c f.rOut)).toReal := by simp
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
⊢ ∫ (x : E), ↑f x ∂μ = ∫ (x : E) in closedBall c f.rOut, ↑f x ∂μ
[PROOFSTEP]
apply (set_integral_eq_integral_of_forall_compl_eq_zero (fun x hx ↦ ?_)).symm
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x✝ : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
x : E
hx : ¬x ∈ closedBall c f.rOut
⊢ ↑f x = 0
[PROOFSTEP]
apply f.zero_of_le_dist (le_of_lt _)
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x✝ : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
x : E
hx : ¬x ∈ closedBall c f.rOut
⊢ f.rOut < dist x c
[PROOFSTEP]
simpa using hx
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
⊢ ∫ (x : E) in closedBall c f.rOut, ↑f x ∂μ ≤ ∫ (x : E) in closedBall c f.rOut, 1 ∂μ
[PROOFSTEP]
apply set_integral_mono f.integrable.integrableOn _ (fun x ↦ f.le_one)
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
⊢ IntegrableOn (fun x => 1) (closedBall c f.rOut)
[PROOFSTEP]
simp [measure_closedBall_lt_top]
[GOAL]
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace ℝ E
inst✝⁵ : HasContDiffBump E
inst✝⁴ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝³ : BorelSpace E
inst✝² : FiniteDimensional ℝ E
inst✝¹ : IsLocallyFiniteMeasure μ
inst✝ : IsOpenPosMeasure μ
⊢ ∫ (x : E) in closedBall c f.rOut, 1 ∂μ = ENNReal.toReal (↑↑μ (closedBall c f.rOut))
[PROOFSTEP]
simp
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : HasContDiffBump E
inst✝⁵ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝⁴ : BorelSpace E
inst✝³ : FiniteDimensional ℝ E
inst✝² : IsLocallyFiniteMeasure μ
inst✝¹ : IsOpenPosMeasure μ
inst✝ : IsAddHaarMeasure μ
K : ℝ
h : f.rOut ≤ K * f.rIn
⊢ ENNReal.toReal (↑↑μ (closedBall c f.rOut)) / K ^ finrank ℝ E ≤ ∫ (x : E), ↑f x ∂μ
[PROOFSTEP]
have K_pos : 0 < K := by simpa [f.rIn_pos, not_lt.2 f.rIn_pos.le] using mul_pos_iff.1 (f.rOut_pos.trans_le h)
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : HasContDiffBump E
inst✝⁵ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝⁴ : BorelSpace E
inst✝³ : FiniteDimensional ℝ E
inst✝² : IsLocallyFiniteMeasure μ
inst✝¹ : IsOpenPosMeasure μ
inst✝ : IsAddHaarMeasure μ
K : ℝ
h : f.rOut ≤ K * f.rIn
⊢ 0 < K
[PROOFSTEP]
simpa [f.rIn_pos, not_lt.2 f.rIn_pos.le] using mul_pos_iff.1 (f.rOut_pos.trans_le h)
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : HasContDiffBump E
inst✝⁵ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝⁴ : BorelSpace E
inst✝³ : FiniteDimensional ℝ E
inst✝² : IsLocallyFiniteMeasure μ
inst✝¹ : IsOpenPosMeasure μ
inst✝ : IsAddHaarMeasure μ
K : ℝ
h : f.rOut ≤ K * f.rIn
K_pos : 0 < K
⊢ ENNReal.toReal (↑↑μ (closedBall c f.rOut)) / K ^ finrank ℝ E ≤ ∫ (x : E), ↑f x ∂μ
[PROOFSTEP]
apply le_trans _ (f.measure_closedBall_le_integral μ)
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : HasContDiffBump E
inst✝⁵ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝⁴ : BorelSpace E
inst✝³ : FiniteDimensional ℝ E
inst✝² : IsLocallyFiniteMeasure μ
inst✝¹ : IsOpenPosMeasure μ
inst✝ : IsAddHaarMeasure μ
K : ℝ
h : f.rOut ≤ K * f.rIn
K_pos : 0 < K
⊢ ENNReal.toReal (↑↑μ (closedBall c f.rOut)) / K ^ finrank ℝ E ≤ ENNReal.toReal (↑↑μ (closedBall c f.rIn))
[PROOFSTEP]
rw [div_le_iff (pow_pos K_pos _), addHaar_closedBall' _ _ f.rIn_pos.le, addHaar_closedBall' _ _ f.rOut_pos.le,
ENNReal.toReal_mul, ENNReal.toReal_mul, ENNReal.toReal_ofReal (pow_nonneg f.rOut_pos.le _),
ENNReal.toReal_ofReal (pow_nonneg f.rIn_pos.le _), mul_assoc, mul_comm _ (K ^ _), ← mul_assoc, ← mul_pow,
mul_comm _ K]
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : HasContDiffBump E
inst✝⁵ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝⁴ : BorelSpace E
inst✝³ : FiniteDimensional ℝ E
inst✝² : IsLocallyFiniteMeasure μ
inst✝¹ : IsOpenPosMeasure μ
inst✝ : IsAddHaarMeasure μ
K : ℝ
h : f.rOut ≤ K * f.rIn
K_pos : 0 < K
⊢ f.rOut ^ finrank ℝ E * ENNReal.toReal (↑↑μ (closedBall 0 1)) ≤
(K * f.rIn) ^ finrank ℝ E * ENNReal.toReal (↑↑μ (closedBall 0 1))
[PROOFSTEP]
gcongr
[GOAL]
case h.ha
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : HasContDiffBump E
inst✝⁵ : MeasurableSpace E
c : E
f : ContDiffBump c
x : E
n : ℕ∞
μ : Measure E
inst✝⁴ : BorelSpace E
inst✝³ : FiniteDimensional ℝ E
inst✝² : IsLocallyFiniteMeasure μ
inst✝¹ : IsOpenPosMeasure μ
inst✝ : IsAddHaarMeasure μ
K : ℝ
h : f.rOut ≤ K * f.rIn
K_pos : 0 < K
⊢ 0 ≤ f.rOut
[PROOFSTEP]
exact f.rOut_pos.le
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : HasContDiffBump E
inst✝⁵ : MeasurableSpace E
c : E
f : ContDiffBump c
x✝ : E
n : ℕ∞
μ : Measure E
inst✝⁴ : BorelSpace E
inst✝³ : FiniteDimensional ℝ E
inst✝² : IsLocallyFiniteMeasure μ
inst✝¹ : IsOpenPosMeasure μ
inst✝ : IsAddHaarMeasure μ
K : ℝ
h : f.rOut ≤ K * f.rIn
x : E
⊢ ContDiffBump.normed f μ x ≤ K ^ finrank ℝ E / ENNReal.toReal (↑↑μ (closedBall c f.rOut))
[PROOFSTEP]
have K_pos : 0 < K := by simpa [f.rIn_pos, not_lt.2 f.rIn_pos.le] using mul_pos_iff.1 (f.rOut_pos.trans_le h)
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : HasContDiffBump E
inst✝⁵ : MeasurableSpace E
c : E
f : ContDiffBump c
x✝ : E
n : ℕ∞
μ : Measure E
inst✝⁴ : BorelSpace E
inst✝³ : FiniteDimensional ℝ E
inst✝² : IsLocallyFiniteMeasure μ
inst✝¹ : IsOpenPosMeasure μ
inst✝ : IsAddHaarMeasure μ
K : ℝ
h : f.rOut ≤ K * f.rIn
x : E
⊢ 0 < K
[PROOFSTEP]
simpa [f.rIn_pos, not_lt.2 f.rIn_pos.le] using mul_pos_iff.1 (f.rOut_pos.trans_le h)
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : HasContDiffBump E
inst✝⁵ : MeasurableSpace E
c : E
f : ContDiffBump c
x✝ : E
n : ℕ∞
μ : Measure E
inst✝⁴ : BorelSpace E
inst✝³ : FiniteDimensional ℝ E
inst✝² : IsLocallyFiniteMeasure μ
inst✝¹ : IsOpenPosMeasure μ
inst✝ : IsAddHaarMeasure μ
K : ℝ
h : f.rOut ≤ K * f.rIn
x : E
K_pos : 0 < K
⊢ ContDiffBump.normed f μ x ≤ K ^ finrank ℝ E / ENNReal.toReal (↑↑μ (closedBall c f.rOut))
[PROOFSTEP]
have : f x / ∫ y, f y ∂μ ≤ 1 / ∫ y, f y ∂μ := by
gcongr
· exact f.integral_pos.le
· exact f.le_one
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : HasContDiffBump E
inst✝⁵ : MeasurableSpace E
c : E
f : ContDiffBump c
x✝ : E
n : ℕ∞
μ : Measure E
inst✝⁴ : BorelSpace E
inst✝³ : FiniteDimensional ℝ E
inst✝² : IsLocallyFiniteMeasure μ
inst✝¹ : IsOpenPosMeasure μ
inst✝ : IsAddHaarMeasure μ
K : ℝ
h : f.rOut ≤ K * f.rIn
x : E
K_pos : 0 < K
⊢ ↑f x / ∫ (y : E), ↑f y ∂μ ≤ 1 / ∫ (y : E), ↑f y ∂μ
[PROOFSTEP]
gcongr
[GOAL]
case hc
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : HasContDiffBump E
inst✝⁵ : MeasurableSpace E
c : E
f : ContDiffBump c
x✝ : E
n : ℕ∞
μ : Measure E
inst✝⁴ : BorelSpace E
inst✝³ : FiniteDimensional ℝ E
inst✝² : IsLocallyFiniteMeasure μ
inst✝¹ : IsOpenPosMeasure μ
inst✝ : IsAddHaarMeasure μ
K : ℝ
h : f.rOut ≤ K * f.rIn
x : E
K_pos : 0 < K
⊢ 0 ≤ ∫ (y : E), ↑f y ∂μ
[PROOFSTEP]
exact f.integral_pos.le
[GOAL]
case h
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : HasContDiffBump E
inst✝⁵ : MeasurableSpace E
c : E
f : ContDiffBump c
x✝ : E
n : ℕ∞
μ : Measure E
inst✝⁴ : BorelSpace E
inst✝³ : FiniteDimensional ℝ E
inst✝² : IsLocallyFiniteMeasure μ
inst✝¹ : IsOpenPosMeasure μ
inst✝ : IsAddHaarMeasure μ
K : ℝ
h : f.rOut ≤ K * f.rIn
x : E
K_pos : 0 < K
⊢ ↑f x ≤ 1
[PROOFSTEP]
exact f.le_one
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : HasContDiffBump E
inst✝⁵ : MeasurableSpace E
c : E
f : ContDiffBump c
x✝ : E
n : ℕ∞
μ : Measure E
inst✝⁴ : BorelSpace E
inst✝³ : FiniteDimensional ℝ E
inst✝² : IsLocallyFiniteMeasure μ
inst✝¹ : IsOpenPosMeasure μ
inst✝ : IsAddHaarMeasure μ
K : ℝ
h : f.rOut ≤ K * f.rIn
x : E
K_pos : 0 < K
this : ↑f x / ∫ (y : E), ↑f y ∂μ ≤ 1 / ∫ (y : E), ↑f y ∂μ
⊢ ContDiffBump.normed f μ x ≤ K ^ finrank ℝ E / ENNReal.toReal (↑↑μ (closedBall c f.rOut))
[PROOFSTEP]
apply this.trans
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : HasContDiffBump E
inst✝⁵ : MeasurableSpace E
c : E
f : ContDiffBump c
x✝ : E
n : ℕ∞
μ : Measure E
inst✝⁴ : BorelSpace E
inst✝³ : FiniteDimensional ℝ E
inst✝² : IsLocallyFiniteMeasure μ
inst✝¹ : IsOpenPosMeasure μ
inst✝ : IsAddHaarMeasure μ
K : ℝ
h : f.rOut ≤ K * f.rIn
x : E
K_pos : 0 < K
this : ↑f x / ∫ (y : E), ↑f y ∂μ ≤ 1 / ∫ (y : E), ↑f y ∂μ
⊢ 1 / ∫ (y : E), ↑f y ∂μ ≤ K ^ finrank ℝ E / ENNReal.toReal (↑↑μ (closedBall c f.rOut))
[PROOFSTEP]
rw [div_le_div_iff f.integral_pos, one_mul, ← div_le_iff' (pow_pos K_pos _)]
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : HasContDiffBump E
inst✝⁵ : MeasurableSpace E
c : E
f : ContDiffBump c
x✝ : E
n : ℕ∞
μ : Measure E
inst✝⁴ : BorelSpace E
inst✝³ : FiniteDimensional ℝ E
inst✝² : IsLocallyFiniteMeasure μ
inst✝¹ : IsOpenPosMeasure μ
inst✝ : IsAddHaarMeasure μ
K : ℝ
h : f.rOut ≤ K * f.rIn
x : E
K_pos : 0 < K
this : ↑f x / ∫ (y : E), ↑f y ∂μ ≤ 1 / ∫ (y : E), ↑f y ∂μ
⊢ ENNReal.toReal (↑↑μ (closedBall c f.rOut)) / K ^ finrank ℝ E ≤ ∫ (x : E), ↑f x ∂μ
[PROOFSTEP]
exact f.measure_closedBall_div_le_integral μ K h
[GOAL]
E : Type u_1
inst✝⁸ : NormedAddCommGroup E
inst✝⁷ : NormedSpace ℝ E
inst✝⁶ : HasContDiffBump E
inst✝⁵ : MeasurableSpace E
c : E
f : ContDiffBump c
x✝ : E
n : ℕ∞
μ : Measure E
inst✝⁴ : BorelSpace E
inst✝³ : FiniteDimensional ℝ E
inst✝² : IsLocallyFiniteMeasure μ
inst✝¹ : IsOpenPosMeasure μ
inst✝ : IsAddHaarMeasure μ
K : ℝ
h : f.rOut ≤ K * f.rIn
x : E
K_pos : 0 < K
this : ↑f x / ∫ (y : E), ↑f y ∂μ ≤ 1 / ∫ (y : E), ↑f y ∂μ
⊢ 0 < ENNReal.toReal (↑↑μ (closedBall c f.rOut))
[PROOFSTEP]
exact ENNReal.toReal_pos (measure_closedBall_pos _ _ f.rOut_pos).ne' measure_closedBall_lt_top.ne
|
theory Exe3p4
imports Main
begin
inductive star :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool" where
refl: "star r x x" |
step: "r x y \<Longrightarrow> star r y z \<Longrightarrow> star r x z"
lemma star_post: "star r x y \<Longrightarrow> r y z \<Longrightarrow> star r x z"
apply(induction rule: star.induct)
apply(auto simp add: refl step)
done
lemma star_trans: "star r x y \<Longrightarrow> star r y z \<Longrightarrow> star r x z"
apply(induction rule: star.induct)
apply(auto simp add: step)
done
inductive iter :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> nat \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool" where
iter_refl: "iter r 0 x x" |
iter_step: "r x y \<Longrightarrow> iter r n y z \<Longrightarrow> iter r (Suc n) x z"
thm exI exE
lemma "star r x y \<Longrightarrow> \<exists>n. iter r n x y"
proof (induction rule: star.induct)
case (refl r x)
then show ?case
next
case (step r x y z)
then show ?case sorry
qed
|
[STATEMENT]
lemma op_cat_Hom[cat_op_simps]: "Hom (op_cat \<CC>) a b = Hom \<CC> b a"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Hom (op_cat \<CC>) a b = Hom \<CC> b a
[PROOF STEP]
unfolding cat_op_simps
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Hom \<CC> b a = Hom \<CC> b a
[PROOF STEP]
by simp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.