text
stringlengths 0
3.34M
|
---|
lemma open_Diff [continuous_intros, intro]: "open S \<Longrightarrow> closed T \<Longrightarrow> open (S - T)" |
!!####LIBRARY MODULE: LIB_SPARSKIT_unary
MODULE LIB_SPARSKIT_unary
!!###PURPOSE
!! Provide unary operations on <SPARSKIT> matrices.
!!###MODIFICATIONS
!! {May2006}
!! [waw] | William A. Wieselquist | william.wieselquist AT gmail.com
!! @ conversion to module
!! @ added dperm666 to handle situations
!! where a dummy is passed for a because the job integer
!! is such that it is not needed---the 666 version
!! uses an integer array in place of a.
!!###DESCRIPTION
c----------------------------------------------------------------------c
c S P A R S K I T c
c----------------------------------------------------------------------c
c UNARY SUBROUTINES MODULE c
c----------------------------------------------------------------------c
c contents: c
c---------- c
c submat : extracts a submatrix from a sparse matrix. c
c filter : filters elements from a matrix according to their magnitude.c
c filterm: same as above, but for the MSR format c
c csort : sorts the elements in increasing order of columns c
c clncsr : clean up the CSR format matrix, remove duplicate entry, etc c
c transp : in-place transposition routine (see also csrcsc in formats) c
c copmat : copy of a matrix into another matrix (both stored csr) c
c msrcop : copies a matrix in MSR format into a matrix in MSR format c
c getelm : returns a(i,j) for any (i,j) from a CSR-stored matrix. c
c getdia : extracts a specified diagonal from a matrix. c
c getl : extracts lower triangular part c
c getu : extracts upper triangular part c
c levels : gets the level scheduling structure for lower triangular c
c matrices. c
c amask : extracts C = A mask M c
c rperm : permutes the rows of a matrix (B = P A) c
c cperm : permutes the columns of a matrix (B = A Q) c
c dperm : permutes both the rows and columns of a matrix (B = P A Q ) c
c dperm1 : general extraction routine (extracts arbitrary rows) c
c dperm2 : general submatrix permutation/extraction routine c
c dmperm : symmetric permutation of row and column (B=PAP') in MSR fmt c
c dvperm : permutes a real vector (in-place) c
c ivperm : permutes an integer vector (in-place) c
c retmx : returns the max absolute value in each row of the matrix c
c diapos : returns the positions of the diagonal elements in A. c
c extbdg : extracts the main diagonal blocks of a matrix. c
c getbwd : returns the bandwidth information on a matrix. c
c blkfnd : finds the block-size of a matrix. c
c blkchk : checks whether a given integer is the block size of A. c
c infdia : obtains information on the diagonals of A. c
c amubdg : gets number of nonzeros in each row of A*B (as well as NNZ) c
c aplbdg : gets number of nonzeros in each row of A+B (as well as NNZ) c
c rnrms : computes the norms of the rows of A c
c cnrms : computes the norms of the columns of A c
c addblk : Adds a matrix B into a block of A. c
c get1up : Collects the first elements of each row of the upper c
c triangular portion of the matrix. c
c xtrows : extracts given rows from a matrix in CSR format. c
c csrkvstr: Finds block row partitioning of matrix in CSR format c
c csrkvstc: Finds block column partitioning of matrix in CSR format c
c kvstmerge: Merges block partitionings, for conformal row/col pattern c
c----------------------------------------------------------------------c
!!###MODULE PROCEDURES
CONTAINS
subroutine submat (n,job,i1,i2,j1,j2,a,ja,ia,nr,nc,ao,jao,iao)
integer n,job,i1,i2,j1,j2,nr,nc,ia(*),ja(*),jao(*),iao(*)
real*8 a(*),ao(*)
c-----------------------------------------------------------------------
c extracts the submatrix A(i1:i2,j1:j2) and puts the result in
c matrix ao,iao,jao
c---- In place: ao,jao,iao may be the same as a,ja,ia.
c--------------
c on input
c---------
c n = row dimension of the matrix
c i1,i2 = two integers with i2 .ge. i1 indicating the range of rows to be
c extracted.
c j1,j2 = two integers with j2 .ge. j1 indicating the range of columns
c to be extracted.
c * There is no checking whether the input values for i1, i2, j1,
c j2 are between 1 and n.
c a,
c ja,
c ia = matrix in compressed sparse row format.
c
c job = job indicator: if job .ne. 1 then the real values in a are NOT
c extracted, only the column indices (i.e. data structure) are.
c otherwise values as well as column indices are extracted...
c
c on output
c--------------
c nr = number of rows of submatrix
c nc = number of columns of submatrix
c * if either of nr or nc is nonpositive the code will quit.
c
c ao,
c jao,iao = extracted matrix in general sparse format with jao containing
c the column indices,and iao being the pointer to the beginning
c of the row,in arrays a,ja.
c----------------------------------------------------------------------c
c Y. Saad, Sep. 21 1989 c
c----------------------------------------------------------------------c
nr = i2-i1+1
nc = j2-j1+1
c
if ( nr .le. 0 .or. nc .le. 0) return
c
klen = 0
c
c simple procedure. proceeds row-wise...
c
do 100 i = 1,nr
ii = i1+i-1
k1 = ia(ii)
k2 = ia(ii+1)-1
iao(i) = klen+1
c-----------------------------------------------------------------------
do 60 k=k1,k2
j = ja(k)
if (j .ge. j1 .and. j .le. j2) then
klen = klen+1
if (job .eq. 1) ao(klen) = a(k)
jao(klen) = j - j1+1
endif
60 continue
100 continue
iao(nr+1) = klen+1
return
c------------end-of submat----------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine filter(n,job,drptol,a,ja,ia,b,jb,ib,len,ierr)
real*8 a(*),b(*),drptol
integer ja(*),jb(*),ia(*),ib(*),n,job,len,ierr
c-----------------------------------------------------------------------
c This module removes any elements whose absolute value
c is small from an input matrix A and puts the resulting
c matrix in B. The input parameter job selects a definition
c of small.
c-----------------------------------------------------------------------
c on entry:
c---------
c n = integer. row dimension of matrix
c job = integer. used to determine strategy chosen by caller to
c drop elements from matrix A.
c job = 1
c Elements whose absolute value is less than the
c drop tolerance are removed.
c job = 2
c Elements whose absolute value is less than the
c product of the drop tolerance and the Euclidean
c norm of the row are removed.
c job = 3
c Elements whose absolute value is less that the
c product of the drop tolerance and the largest
c element in the row are removed.
c
c drptol = real. drop tolerance used for dropping strategy.
c a
c ja
c ia = input matrix in compressed sparse format
c len = integer. the amount of space available in arrays b and jb.
c
c on return:
c----------
c b
c jb
c ib = resulting matrix in compressed sparse format.
c
c ierr = integer. containing error message.
c ierr .eq. 0 indicates normal return
c ierr .gt. 0 indicates that there is'nt enough
c space is a and ja to store the resulting matrix.
c ierr then contains the row number where filter stopped.
c note:
c------ This module is in place. (b,jb,ib can ne the same as
c a, ja, ia in which case the result will be overwritten).
c----------------------------------------------------------------------c
c contributed by David Day, Sep 19, 1989. c
c----------------------------------------------------------------------c
c local variables
real*8 norm,loctol
integer index,row,k,k1,k2
c
index = 1
do 10 row= 1,n
k1 = ia(row)
k2 = ia(row+1) - 1
ib(row) = index
goto (100,200,300) job
100 norm = 1.0d0
goto 400
200 norm = 0.0d0
do 22 k = k1,k2
norm = norm + a(k) * a(k)
22 continue
norm = sqrt(norm)
goto 400
300 norm = 0.0d0
do 23 k = k1,k2
if( abs(a(k)) .gt. norm) then
norm = abs(a(k))
endif
23 continue
400 loctol = drptol * norm
do 30 k = k1,k2
if( abs(a(k)) .gt. loctol)then
if (index .gt. len) then
ierr = row
return
endif
b(index) = a(k)
jb(index) = ja(k)
index = index + 1
endif
30 continue
10 continue
ib(n+1) = index
return
c--------------------end-of-filter -------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine filterm (n,job,drop,a,ja,b,jb,len,ierr)
real*8 a(*),b(*),drop
integer ja(*),jb(*),n,job,len,ierr
c-----------------------------------------------------------------------
c This subroutine removes any elements whose absolute value
c is small from an input matrix A. Same as filter but
c uses the MSR format.
c-----------------------------------------------------------------------
c on entry:
c---------
c n = integer. row dimension of matrix
c job = integer. used to determine strategy chosen by caller to
c drop elements from matrix A.
c job = 1
c Elements whose absolute value is less than the
c drop tolerance are removed.
c job = 2
c Elements whose absolute value is less than the
c product of the drop tolerance and the Euclidean
c norm of the row are removed.
c job = 3
c Elements whose absolute value is less that the
c product of the drop tolerance and the largest
c element in the row are removed.
c
c drop = real. drop tolerance used for dropping strategy.
c a
c ja = input matrix in Modifief Sparse Row format
c len = integer. the amount of space in arrays b and jb.
c
c on return:
c----------
c
c b, jb = resulting matrix in Modifief Sparse Row format
c
c ierr = integer. containing error message.
c ierr .eq. 0 indicates normal return
c ierr .gt. 0 indicates that there is'nt enough
c space is a and ja to store the resulting matrix.
c ierr then contains the row number where filter stopped.
c note:
c------ This module is in place. (b,jb can ne the same as
c a, ja in which case the result will be overwritten).
c----------------------------------------------------------------------c
c contributed by David Day, Sep 19, 1989. c
c----------------------------------------------------------------------c
c local variables
c
real*8 norm,loctol
integer index,row,k,k1,k2
c
index = n+2
do 10 row= 1,n
k1 = ja(row)
k2 = ja(row+1) - 1
jb(row) = index
goto (100,200,300) job
100 norm = 1.0d0
goto 400
200 norm = a(row)**2
do 22 k = k1,k2
norm = norm + a(k) * a(k)
22 continue
norm = sqrt(norm)
goto 400
300 norm = abs(a(row))
do 23 k = k1,k2
norm = max(abs(a(k)),norm)
23 continue
400 loctol = drop * norm
do 30 k = k1,k2
if( abs(a(k)) .gt. loctol)then
if (index .gt. len) then
ierr = row
return
endif
b(index) = a(k)
jb(index) = ja(k)
index = index + 1
endif
30 continue
10 continue
jb(n+1) = index
return
c--------------------end-of-filterm-------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine csort (n,a,ja,ia,iwork,values)
logical values
integer n, ja(*), ia(n+1), iwork(*)
real*8 a(*)
c-----------------------------------------------------------------------
c This routine sorts the elements of a matrix (stored in Compressed
c Sparse Row Format) in increasing order of their column indices within
c each row. It uses a form of bucket sort with a cost of O(nnz) where
c nnz = number of nonzero elements.
c requires an integer work array of length 2*nnz.
c-----------------------------------------------------------------------
c on entry:
c---------
c n = the row dimension of the matrix
c a = the matrix A in compressed sparse row format.
c ja = the array of column indices of the elements in array a.
c ia = the array of pointers to the rows.
c iwork = integer work array of length max ( n+1, 2*nnz )
c where nnz = (ia(n+1)-ia(1)) ) .
c values= logical indicating whether or not the real values a(*) must
c also be permuted. if (.not. values) then the array a is not
c touched by csort and can be a dummy array.
c
c on return:
c----------
c the matrix stored in the structure a, ja, ia is permuted in such a
c way that the column indices are in increasing order within each row.
c iwork(1:nnz) contains the permutation used to rearrange the elements.
c-----------------------------------------------------------------------
c Y. Saad - Feb. 1, 1991.
c-----------------------------------------------------------------------
c local variables
integer i, k, j, ifirst, nnz, next
c
c count the number of elements in each column
c
do 1 i=1,n+1
iwork(i) = 0
1 continue
do 3 i=1, n
do 2 k=ia(i), ia(i+1)-1
j = ja(k)+1
iwork(j) = iwork(j)+1
2 continue
3 continue
c
c compute pointers from lengths.
c
iwork(1) = 1
do 4 i=1,n
iwork(i+1) = iwork(i) + iwork(i+1)
4 continue
c
c get the positions of the nonzero elements in order of columns.
c
ifirst = ia(1)
nnz = ia(n+1)-ifirst
do 5 i=1,n
do 51 k=ia(i),ia(i+1)-1
j = ja(k)
next = iwork(j)
iwork(nnz+next) = k
iwork(j) = next+1
51 continue
5 continue
c
c convert to coordinate format
c
do 6 i=1, n
do 61 k=ia(i), ia(i+1)-1
iwork(k) = i
61 continue
6 continue
c
c loop to find permutation: for each element find the correct
c position in (sorted) arrays a, ja. Record this in iwork.
c
do 7 k=1, nnz
ko = iwork(nnz+k)
irow = iwork(ko)
next = ia(irow)
c
c the current element should go in next position in row. iwork
c records this position.
c
iwork(ko) = next
ia(irow) = next+1
7 continue
c
c perform an in-place permutation of the arrays.
c
call ivperm (nnz, ja(ifirst), iwork)
if (values) call dvperm (nnz, a(ifirst), iwork)
c
c reshift the pointers of the original matrix back.
c
do 8 i=n,1,-1
ia(i+1) = ia(i)
8 continue
ia(1) = ifirst
c
return
c---------------end-of-csort--------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine clncsr(job,value2,nrow,a,ja,ia,indu,iwk)
c .. Scalar Arguments ..
integer job, nrow, value2
c ..
c .. Array Arguments ..
integer ia(nrow+1),indu(nrow),iwk(nrow+1),ja(*)
real*8 a(*)
c ..
c
c This routine performs two tasks to clean up a CSR matrix
c -- remove duplicate/zero entries,
c -- perform a partial ordering, new order lower triangular part,
c main diagonal, upper triangular part.
c
c On entry:
c
c job = options
c 0 -- nothing is done
c 1 -- eliminate duplicate entries, zero entries.
c 2 -- eliminate duplicate entries and perform partial ordering.
c 3 -- eliminate duplicate entries, sort the entries in the
c increasing order of clumn indices.
c
c value2 -- 0 the matrix is pattern only (a is not touched)
c 1 matrix has values too.
c nrow -- row dimension of the matrix
c a,ja,ia -- input matrix in CSR format
c
c On return:
c a,ja,ia -- cleaned matrix
c indu -- pointers to the beginning of the upper triangular
c portion if job > 1
c
c Work space:
c iwk -- integer work space of size nrow+1
c
c .. Local Scalars ..
integer i,j,k,ko,ipos,kfirst,klast
real*8 tmp
c ..
c
if (job.le.0) return
c
c .. eliminate duplicate entries --
c array INDU is used as marker for existing indices, it is also the
c location of the entry.
c IWK is used to stored the old IA array.
c matrix is copied to squeeze out the space taken by the duplicated
c entries.
c
do 90 i = 1, nrow
indu(i) = 0
iwk(i) = ia(i)
90 continue
iwk(nrow+1) = ia(nrow+1)
k = 1
do 120 i = 1, nrow
ia(i) = k
ipos = iwk(i)
klast = iwk(i+1)
100 if (ipos.lt.klast) then
j = ja(ipos)
if (indu(j).eq.0) then
c .. new entry ..
if (value2.ne.0) then
if (a(ipos) .ne. 0.0D0) then
indu(j) = k
ja(k) = ja(ipos)
a(k) = a(ipos)
k = k + 1
endif
else
indu(j) = k
ja(k) = ja(ipos)
k = k + 1
endif
else if (value2.ne.0) then
c .. duplicate entry ..
a(indu(j)) = a(indu(j)) + a(ipos)
endif
ipos = ipos + 1
go to 100
endif
c .. remove marks before working on the next row ..
do 110 ipos = ia(i), k - 1
indu(ja(ipos)) = 0
110 continue
120 continue
ia(nrow+1) = k
if (job.le.1) return
c
c .. partial ordering ..
c split the matrix into strict upper/lower triangular
c parts, INDU points to the the beginning of the upper part.
c
do 140 i = 1, nrow
klast = ia(i+1) - 1
kfirst = ia(i)
130 if (klast.gt.kfirst) then
if (ja(klast).lt.i .and. ja(kfirst).ge.i) then
c .. swap klast with kfirst ..
j = ja(klast)
ja(klast) = ja(kfirst)
ja(kfirst) = j
if (value2.ne.0) then
tmp = a(klast)
a(klast) = a(kfirst)
a(kfirst) = tmp
endif
endif
if (ja(klast).ge.i)
& klast = klast - 1
if (ja(kfirst).lt.i)
& kfirst = kfirst + 1
go to 130
endif
c
if (ja(klast).lt.i) then
indu(i) = klast + 1
else
indu(i) = klast
endif
140 continue
if (job.le.2) return
c
c .. order the entries according to column indices
c burble-sort is used
c
do 190 i = 1, nrow
do 160 ipos = ia(i), indu(i)-1
do 150 j = indu(i)-1, ipos+1, -1
k = j - 1
if (ja(k).gt.ja(j)) then
ko = ja(k)
ja(k) = ja(j)
ja(j) = ko
if (value2.ne.0) then
tmp = a(k)
a(k) = a(j)
a(j) = tmp
endif
endif
150 continue
160 continue
do 180 ipos = indu(i), ia(i+1)-1
do 170 j = ia(i+1)-1, ipos+1, -1
k = j - 1
if (ja(k).gt.ja(j)) then
ko = ja(k)
ja(k) = ja(j)
ja(j) = ko
if (value2.ne.0) then
tmp = a(k)
a(k) = a(j)
a(j) = tmp
endif
endif
170 continue
180 continue
190 continue
return
c---- end of clncsr ----------------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine copmat (nrow,a,ja,ia,ao,jao,iao,ipos,job)
real*8 a(*),ao(*)
integer nrow, ia(*),ja(*),jao(*),iao(*), ipos, job
c----------------------------------------------------------------------
c copies the matrix a, ja, ia, into the matrix ao, jao, iao.
c----------------------------------------------------------------------
c on entry:
c---------
c nrow = row dimension of the matrix
c a,
c ja,
c ia = input matrix in compressed sparse row format.
c ipos = integer. indicates the position in the array ao, jao
c where the first element should be copied. Thus
c iao(1) = ipos on return.
c job = job indicator. if (job .ne. 1) the values are not copies
c (i.e., pattern only is copied in the form of arrays ja, ia).
c
c on return:
c----------
c ao,
c jao,
c iao = output matrix containing the same data as a, ja, ia.
c-----------------------------------------------------------------------
c Y. Saad, March 1990.
c-----------------------------------------------------------------------
c local variables
integer kst, i, k
c
kst = ipos -ia(1)
do 100 i = 1, nrow+1
iao(i) = ia(i) + kst
100 continue
c
do 200 k=ia(1), ia(nrow+1)-1
jao(kst+k)= ja(k)
200 continue
c
if (job .ne. 1) return
do 201 k=ia(1), ia(nrow+1)-1
ao(kst+k) = a(k)
201 continue
c
return
c--------end-of-copmat -------------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine msrcop (nrow,a,ja,ao,jao,job)
real*8 a(*),ao(*)
integer nrow, ja(*),jao(*), job
c----------------------------------------------------------------------
c copies the MSR matrix a, ja, into the MSR matrix ao, jao
c----------------------------------------------------------------------
c on entry:
c---------
c nrow = row dimension of the matrix
c a,ja = input matrix in Modified compressed sparse row format.
c job = job indicator. Values are not copied if job .ne. 1
c
c on return:
c----------
c ao, jao = output matrix containing the same data as a, ja.
c-----------------------------------------------------------------------
c Y. Saad,
c-----------------------------------------------------------------------
c local variables
integer i, k
c
do 100 i = 1, nrow+1
jao(i) = ja(i)
100 continue
c
do 200 k=ja(1), ja(nrow+1)-1
jao(k)= ja(k)
200 continue
c
if (job .ne. 1) return
do 201 k=ja(1), ja(nrow+1)-1
ao(k) = a(k)
201 continue
do 202 k=1,nrow
ao(k) = a(k)
202 continue
c
return
c--------end-of-msrcop -------------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
double precision function getelm (i,j,a,ja,ia,iadd,sorted)
c-----------------------------------------------------------------------
c purpose:
c --------
c this function returns the element a(i,j) of a matrix a,
c for any pair (i,j). the matrix is assumed to be stored
c in compressed sparse row (csr) format. getelm performs a
c binary search in the case where it is known that the elements
c are sorted so that the column indices are in increasing order.
c also returns (in iadd) the address of the element a(i,j) in
c arrays a and ja when the search is successsful (zero if not).
c-----
c first contributed by noel nachtigal (mit).
c recoded jan. 20, 1991, by y. saad [in particular
c added handling of the non-sorted case + the iadd output]
c-----------------------------------------------------------------------
c parameters:
c -----------
c on entry:
c----------
c i = the row index of the element sought (input).
c j = the column index of the element sought (input).
c a = the matrix a in compressed sparse row format (input).
c ja = the array of column indices (input).
c ia = the array of pointers to the rows' data (input).
c sorted = logical indicating whether the matrix is knonw to
c have its column indices sorted in increasing order
c (sorted=.true.) or not (sorted=.false.).
c (input).
c on return:
c-----------
c getelm = value of a(i,j).
c iadd = address of element a(i,j) in arrays a, ja if found,
c zero if not found. (output)
c
c note: the inputs i and j are not checked for validity.
c-----------------------------------------------------------------------
c noel m. nachtigal october 28, 1990 -- youcef saad jan 20, 1991.
c-----------------------------------------------------------------------
integer i, ia(*), iadd, j, ja(*)
double precision a(*)
logical sorted
c
c local variables.
c
integer ibeg, iend, imid, k
c
c initialization
c
iadd = 0
getelm = 0.0
ibeg = ia(i)
iend = ia(i+1)-1
c
c case where matrix is not necessarily sorted
c
if (.not. sorted) then
c
c scan the row - exit as soon as a(i,j) is found
c
do 5 k=ibeg, iend
if (ja(k) .eq. j) then
iadd = k
goto 20
endif
5 continue
c
c end unsorted case. begin sorted case
c
else
c
c begin binary search. compute the middle index.
c
10 imid = ( ibeg + iend ) / 2
c
c test if found
c
if (ja(imid).eq.j) then
iadd = imid
goto 20
endif
if (ibeg .ge. iend) goto 20
c
c else update the interval bounds.
c
if (ja(imid).gt.j) then
iend = imid -1
else
ibeg = imid +1
endif
goto 10
c
c end both cases
c
endif
c
20 if (iadd .ne. 0) getelm = a(iadd)
c
return
c--------end-of-getelm--------------------------------------------------
c-----------------------------------------------------------------------
end function
c-----------------------------------------------------------------------
subroutine getdia (nrow,ncol,job,a,ja,ia,len,diag,idiag,ioff)
real*8 diag(*),a(*)
integer nrow, ncol, job, len, ioff, ia(*), ja(*), idiag(*)
c-----------------------------------------------------------------------
c this subroutine extracts a given diagonal from a matrix stored in csr
c format. the output matrix may be transformed with the diagonal removed
c from it if desired (as indicated by job.)
c-----------------------------------------------------------------------
c our definition of a diagonal of matrix is a vector of length nrow
c (always) which contains the elements in rows 1 to nrow of
c the matrix that are contained in the diagonal offset by ioff
c with respect to the main diagonal. if the diagonal element
c falls outside the matrix then it is defined as a zero entry.
c thus the proper definition of diag(*) with offset ioff is
c
c diag(i) = a(i,ioff+i) i=1,2,...,nrow
c with elements falling outside the matrix being defined as zero.
c
c-----------------------------------------------------------------------
c
c on entry:
c----------
c
c nrow = integer. the row dimension of the matrix a.
c ncol = integer. the column dimension of the matrix a.
c job = integer. job indicator. if job = 0 then
c the matrix a, ja, ia, is not altered on return.
c if job.ne.0 then getdia will remove the entries
c collected in diag from the original matrix.
c this is done in place.
c
c a,ja,
c ia = matrix stored in compressed sparse row a,ja,ia,format
c ioff = integer,containing the offset of the wanted diagonal
c the diagonal extracted is the one corresponding to the
c entries a(i,j) with j-i = ioff.
c thus ioff = 0 means the main diagonal
c
c on return:
c-----------
c len = number of nonzero elements found in diag.
c (len .le. min(nrow,ncol-ioff)-max(1,1-ioff) + 1 )
c
c diag = real*8 array of length nrow containing the wanted diagonal.
c diag contains the diagonal (a(i,j),j-i = ioff ) as defined
c above.
c
c idiag = integer array of length len, containing the poisitions
c in the original arrays a and ja of the diagonal elements
c collected in diag. a zero entry in idiag(i) means that
c there was no entry found in row i belonging to the diagonal.
c
c a, ja,
c ia = if job .ne. 0 the matrix is unchanged. otherwise the nonzero
c diagonal entries collected in diag are removed from the
c matrix and therefore the arrays a, ja, ia will change.
c (the matrix a, ja, ia will contain len fewer elements)
c
c----------------------------------------------------------------------c
c Y. Saad, sep. 21 1989 - modified and retested Feb 17, 1996. c
c----------------------------------------------------------------------c
c local variables
integer istart, max, iend, i, kold, k, kdiag, ko
c
istart = max(0,-ioff)
iend = min(nrow,ncol-ioff)
len = 0
do 1 i=1,nrow
idiag(i) = 0
diag(i) = 0.0d0
1 continue
c
c extract diagonal elements
c
do 6 i=istart+1, iend
do 51 k= ia(i),ia(i+1) -1
if (ja(k)-i .eq. ioff) then
diag(i)= a(k)
idiag(i) = k
len = len+1
goto 6
endif
51 continue
6 continue
if (job .eq. 0 .or. len .eq.0) return
c
c remove diagonal elements and rewind structure
c
ko = 0
do 7 i=1, nrow
kold = ko
kdiag = idiag(i)
do 71 k= ia(i), ia(i+1)-1
if (k .ne. kdiag) then
ko = ko+1
a(ko) = a(k)
ja(ko) = ja(k)
endif
71 continue
ia(i) = kold+1
7 continue
c
c redefine ia(nrow+1)
c
ia(nrow+1) = ko+1
return
c------------end-of-getdia----------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine transp (nrow,ncol,a,ja,ia,iwk,ierr)
integer nrow, ncol, ia(*), ja(*), iwk(*), ierr
real*8 a(*)
c------------------------------------------------------------------------
c In-place transposition routine.
c------------------------------------------------------------------------
c this subroutine transposes a matrix stored in compressed sparse row
c format. the transposition is done in place in that the arrays a,ja,ia
c of the transpose are overwritten onto the original arrays.
c------------------------------------------------------------------------
c on entry:
c---------
c nrow = integer. The row dimension of A.
c ncol = integer. The column dimension of A.
c a = real array of size nnz (number of nonzero elements in A).
c containing the nonzero elements
c ja = integer array of length nnz containing the column positions
c of the corresponding elements in a.
c ia = integer of size n+1, where n = max(nrow,ncol). On entry
c ia(k) contains the position in a,ja of the beginning of
c the k-th row.
c
c iwk = integer work array of same length as ja.
c
c on return:
c----------
c
c ncol = actual row dimension of the transpose of the input matrix.
c Note that this may be .le. the input value for ncol, in
c case some of the last columns of the input matrix are zero
c columns. In the case where the actual number of rows found
c in transp(A) exceeds the input value of ncol, transp will
c return without completing the transposition. see ierr.
c a,
c ja,
c ia = contains the transposed matrix in compressed sparse
c row format. The row dimension of a, ja, ia is now ncol.
c
c ierr = integer. error message. If the number of rows for the
c transposed matrix exceeds the input value of ncol,
c then ierr is set to that number and transp quits.
c Otherwise ierr is set to 0 (normal return).
c
c Note:
c----- 1) If you do not need the transposition to be done in place
c it is preferrable to use the conversion routine csrcsc
c (see conversion routines in formats).
c 2) the entries of the output matrix are not sorted (the column
c indices in each are not in increasing order) use csrcsc
c if you want them sorted.
c----------------------------------------------------------------------c
c Y. Saad, Sep. 21 1989 c
c modified Oct. 11, 1989. c
c----------------------------------------------------------------------c
c local variables
real*8 t, t1
ierr = 0
nnz = ia(nrow+1)-1
c
c determine column dimension
c
jcol = 0
do 1 k=1, nnz
jcol = max(jcol,ja(k))
1 continue
if (jcol .gt. ncol) then
ierr = jcol
return
endif
c
c convert to coordinate format. use iwk for row indices.
c
ncol = jcol
c
do 3 i=1,nrow
do 2 k=ia(i),ia(i+1)-1
iwk(k) = i
2 continue
3 continue
c find pointer array for transpose.
do 35 i=1,ncol+1
ia(i) = 0
35 continue
do 4 k=1,nnz
i = ja(k)
ia(i+1) = ia(i+1)+1
4 continue
ia(1) = 1
c------------------------------------------------------------------------
do 44 i=1,ncol
ia(i+1) = ia(i) + ia(i+1)
44 continue
c
c loop for a cycle in chasing process.
c
init = 1
k = 0
5 t = a(init)
i = ja(init)
j = iwk(init)
iwk(init) = -1
c------------------------------------------------------------------------
6 k = k+1
c current row number is i. determine where to go.
l = ia(i)
c save the chased element.
t1 = a(l)
inext = ja(l)
c then occupy its location.
a(l) = t
ja(l) = j
c update pointer information for next element to be put in row i.
ia(i) = l+1
c determine next element to be chased
if (iwk(l) .lt. 0) goto 65
t = t1
i = inext
j = iwk(l)
iwk(l) = -1
if (k .lt. nnz) goto 6
goto 70
65 init = init+1
if (init .gt. nnz) goto 70
if (iwk(init) .lt. 0) goto 65
c restart chasing --
goto 5
70 continue
do 80 i=ncol,1,-1
ia(i+1) = ia(i)
80 continue
ia(1) = 1
c
return
c------------------end-of-transp ----------------------------------------
c------------------------------------------------------------------------
end subroutine
c------------------------------------------------------------------------
subroutine getl (n,a,ja,ia,ao,jao,iao)
integer n, ia(*), ja(*), iao(*), jao(*)
real*8 a(*), ao(*)
c------------------------------------------------------------------------
c this subroutine extracts the lower triangular part of a matrix
c and writes the result ao, jao, iao. The routine is in place in
c that ao, jao, iao can be the same as a, ja, ia if desired.
c-----------
c on input:
c
c n = dimension of the matrix a.
c a, ja,
c ia = matrix stored in compressed sparse row format.
c On return:
c ao, jao,
c iao = lower triangular matrix (lower part of a)
c stored in a, ja, ia, format
c note: the diagonal element is the last element in each row.
c i.e. in a(ia(i+1)-1 )
c ao, jao, iao may be the same as a, ja, ia on entry -- in which case
c getl will overwrite the result on a, ja, ia.
c
c------------------------------------------------------------------------
c local variables
real*8 t
integer ko, kold, kdiag, k, i
c
c inititialize ko (pointer for output matrix)
c
ko = 0
do 7 i=1, n
kold = ko
kdiag = 0
do 71 k = ia(i), ia(i+1) -1
if (ja(k) .gt. i) goto 71
ko = ko+1
ao(ko) = a(k)
jao(ko) = ja(k)
if (ja(k) .eq. i) kdiag = ko
71 continue
if (kdiag .eq. 0 .or. kdiag .eq. ko) goto 72
c
c exchange
c
t = ao(kdiag)
ao(kdiag) = ao(ko)
ao(ko) = t
c
k = jao(kdiag)
jao(kdiag) = jao(ko)
jao(ko) = k
72 iao(i) = kold+1
7 continue
c redefine iao(n+1)
iao(n+1) = ko+1
return
c----------end-of-getl -------------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine getu (n,a,ja,ia,ao,jao,iao)
integer n, ia(*), ja(*), iao(*), jao(*)
real*8 a(*), ao(*)
c------------------------------------------------------------------------
c this subroutine extracts the upper triangular part of a matrix
c and writes the result ao, jao, iao. The routine is in place in
c that ao, jao, iao can be the same as a, ja, ia if desired.
c-----------
c on input:
c
c n = dimension of the matrix a.
c a, ja,
c ia = matrix stored in a, ja, ia, format
c On return:
c ao, jao,
c iao = upper triangular matrix (upper part of a)
c stored in compressed sparse row format
c note: the diagonal element is the last element in each row.
c i.e. in a(ia(i+1)-1 )
c ao, jao, iao may be the same as a, ja, ia on entry -- in which case
c getu will overwrite the result on a, ja, ia.
c
c------------------------------------------------------------------------
c local variables
real*8 t
integer ko, k, i, kdiag, kfirst
ko = 0
do 7 i=1, n
kfirst = ko+1
kdiag = 0
do 71 k = ia(i), ia(i+1) -1
if (ja(k) .lt. i) goto 71
ko = ko+1
ao(ko) = a(k)
jao(ko) = ja(k)
if (ja(k) .eq. i) kdiag = ko
71 continue
if (kdiag .eq. 0 .or. kdiag .eq. kfirst) goto 72
c exchange
t = ao(kdiag)
ao(kdiag) = ao(kfirst)
ao(kfirst) = t
c
k = jao(kdiag)
jao(kdiag) = jao(kfirst)
jao(kfirst) = k
72 iao(i) = kfirst
7 continue
c redefine iao(n+1)
iao(n+1) = ko+1
return
c----------end-of-getu -------------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine levels (n, jal, ial, nlev, lev, ilev, levnum)
integer jal(*),ial(*), levnum(*), ilev(*), lev(*)
c-----------------------------------------------------------------------
c levels gets the level structure of a lower triangular matrix
c for level scheduling in the parallel solution of triangular systems
c strict lower matrices (e.g. unit) as well matrices with their main
c diagonal are accepted.
c-----------------------------------------------------------------------
c on entry:
c----------
c n = integer. The row dimension of the matrix
c jal, ial =
c
c on return:
c-----------
c nlev = integer. number of levels found
c lev = integer array of length n containing the level
c scheduling permutation.
c ilev = integer array. pointer to beginning of levels in lev.
c the numbers lev(i) to lev(i+1)-1 contain the row numbers
c that belong to level number i, in the level scheduling
c ordering. The equations of the same level can be solved
c in parallel, once those of all the previous levels have
c been solved.
c work arrays:
c-------------
c levnum = integer array of length n (containing the level numbers
c of each unknown on return)
c-----------------------------------------------------------------------
do 10 i = 1, n
levnum(i) = 0
10 continue
c
c compute level of each node --
c
nlev = 0
do 20 i = 1, n
levi = 0
do 15 j = ial(i), ial(i+1) - 1
levi = max (levi, levnum(jal(j)))
15 continue
levi = levi+1
levnum(i) = levi
nlev = max(nlev,levi)
20 continue
c-------------set data structure --------------------------------------
do 21 j=1, nlev+1
ilev(j) = 0
21 continue
c------count number of elements in each level -----------------------
do 22 j=1, n
i = levnum(j)+1
ilev(i) = ilev(i)+1
22 continue
c---- set up pointer for each level ----------------------------------
ilev(1) = 1
do 23 j=1, nlev
ilev(j+1) = ilev(j)+ilev(j+1)
23 continue
c-----determine elements of each level --------------------------------
do 30 j=1,n
i = levnum(j)
lev(ilev(i)) = j
ilev(i) = ilev(i)+1
30 continue
c reset pointers backwards
do 35 j=nlev, 1, -1
ilev(j+1) = ilev(j)
35 continue
ilev(1) = 1
return
c----------end-of-levels------------------------------------------------
C-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine amask (nrow,ncol,a,ja,ia,jmask,imask,
* c,jc,ic,iw,nzmax,ierr)
c---------------------------------------------------------------------
real*8 a(*),c(*)
integer ia(nrow+1),ja(*),jc(*),ic(nrow+1),jmask(*),imask(nrow+1)
logical iw(ncol)
c-----------------------------------------------------------------------
c This subroutine builds a sparse matrix from an input matrix by
c extracting only elements in positions defined by the mask jmask, imask
c-----------------------------------------------------------------------
c On entry:
c---------
c nrow = integer. row dimension of input matrix
c ncol = integer. Column dimension of input matrix.
c
c a,
c ja,
c ia = matrix in Compressed Sparse Row format
c
c jmask,
c imask = matrix defining mask (pattern only) stored in compressed
c sparse row format.
c
c nzmax = length of arrays c and jc. see ierr.
c
c On return:
c-----------
c
c a, ja, ia and jmask, imask are unchanged.
c
c c
c jc,
c ic = the output matrix in Compressed Sparse Row format.
c
c ierr = integer. serving as error message.c
c ierr = 1 means normal return
c ierr .gt. 1 means that amask stopped when processing
c row number ierr, because there was not enough space in
c c, jc according to the value of nzmax.
c
c work arrays:
c-------------
c iw = logical work array of length ncol.
c
c note:
c------ the algorithm is in place: c, jc, ic can be the same as
c a, ja, ia in which cas the code will overwrite the matrix c
c on a, ja, ia
c
c-----------------------------------------------------------------------
ierr = 0
len = 0
do 1 j=1, ncol
iw(j) = .false.
1 continue
c unpack the mask for row ii in iw
do 100 ii=1, nrow
c save pointer in order to be able to do things in place
do 2 k=imask(ii), imask(ii+1)-1
iw(jmask(k)) = .true.
2 continue
c add umasked elemnts of row ii
k1 = ia(ii)
k2 = ia(ii+1)-1
ic(ii) = len+1
do 200 k=k1,k2
j = ja(k)
if (iw(j)) then
len = len+1
if (len .gt. nzmax) then
ierr = ii
return
endif
jc(len) = j
c(len) = a(k)
endif
200 continue
c
do 3 k=imask(ii), imask(ii+1)-1
iw(jmask(k)) = .false.
3 continue
100 continue
ic(nrow+1)=len+1
c
return
c-----end-of-amask -----------------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine rperm (nrow,a,ja,ia,ao,jao,iao,perm,job)
integer nrow,ja(*),ia(nrow+1),jao(*),iao(nrow+1),perm(nrow),job
real*8 a(*),ao(*)
c-----------------------------------------------------------------------
c this subroutine permutes the rows of a matrix in CSR format.
c rperm computes B = P A where P is a permutation matrix.
c the permutation P is defined through the array perm: for each j,
c perm(j) represents the destination row number of row number j.
c Youcef Saad -- recoded Jan 28, 1991.
c-----------------------------------------------------------------------
c on entry:
c----------
c n = dimension of the matrix
c a, ja, ia = input matrix in csr format
c perm = integer array of length nrow containing the permutation arrays
c for the rows: perm(i) is the destination of row i in the
c permuted matrix.
c ---> a(i,j) in the original matrix becomes a(perm(i),j)
c in the output matrix.
c
c job = integer indicating the work to be done:
c job = 1 permute a, ja, ia into ao, jao, iao
c (including the copying of real values ao and
c the array iao).
c job .ne. 1 : ignore real values.
c (in which case arrays a and ao are not needed nor
c used).
c
c------------
c on return:
c------------
c ao, jao, iao = input matrix in a, ja, ia format
c note :
c if (job.ne.1) then the arrays a and ao are not used.
c----------------------------------------------------------------------c
c Y. Saad, May 2, 1990 c
c----------------------------------------------------------------------c
logical values
values = (job .eq. 1)
c
c determine pointers for output matix.
c
do 50 j=1,nrow
i = perm(j)
iao(i+1) = ia(j+1) - ia(j)
50 continue
c
c get pointers from lengths
c
iao(1) = 1
do 51 j=1,nrow
iao(j+1)=iao(j+1)+iao(j)
51 continue
c
c copying
c
do 100 ii=1,nrow
c
c old row = ii -- new row = iperm(ii) -- ko = new pointer
c
ko = iao(perm(ii))
do 60 k=ia(ii), ia(ii+1)-1
jao(ko) = ja(k)
if (values) ao(ko) = a(k)
ko = ko+1
60 continue
100 continue
c
return
c---------end-of-rperm -------------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine rperm666 (nrow,a,ja,ia,ao,jao,iao,perm,job)
integer nrow,ja(*),ia(nrow+1),jao(*),iao(nrow+1),perm(nrow),job
integer a(*)
real*8 ao(*)
c-----------------------------------------------------------------------
c this subroutine permutes the rows of a matrix in CSR format.
c rperm computes B = P A where P is a permutation matrix.
c the permutation P is defined through the array perm: for each j,
c perm(j) represents the destination row number of row number j.
c Youcef Saad -- recoded Jan 28, 1991.
c-----------------------------------------------------------------------
c on entry:
c----------
c n = dimension of the matrix
c a, ja, ia = input matrix in csr format
c perm = integer array of length nrow containing the permutation arrays
c for the rows: perm(i) is the destination of row i in the
c permuted matrix.
c ---> a(i,j) in the original matrix becomes a(perm(i),j)
c in the output matrix.
c
c job = integer indicating the work to be done:
c job = 1 permute a, ja, ia into ao, jao, iao
c (including the copying of real values ao and
c the array iao).
c job .ne. 1 : ignore real values.
c (in which case arrays a and ao are not needed nor
c used).
c
c------------
c on return:
c------------
c ao, jao, iao = input matrix in a, ja, ia format
c note :
c if (job.ne.1) then the arrays a and ao are not used.
c----------------------------------------------------------------------c
c Y. Saad, May 2, 1990 c
c----------------------------------------------------------------------c
c this is another version of rperm, used to handle the case when a and
c ao are passed as dummy arguments with newer compilers that do not allow
c a integer dummy to be passed when a real*8 is expected
c----------------------------------------------------------------------c
c [email protected], May 2, 2006 c
c----------------------------------------------------------------------c
logical values
values = (job .eq. 1)
c
c determine pointers for output matix.
c
do 50 j=1,nrow
i = perm(j)
iao(i+1) = ia(j+1) - ia(j)
50 continue
c
c get pointers from lengths
c
iao(1) = 1
do 51 j=1,nrow
iao(j+1)=iao(j+1)+iao(j)
51 continue
c
c copying
c
do 100 ii=1,nrow
c
c old row = ii -- new row = iperm(ii) -- ko = new pointer
c
ko = iao(perm(ii))
do 60 k=ia(ii), ia(ii+1)-1
jao(ko) = ja(k)
if (values) ao(ko) = a(k)
ko = ko+1
60 continue
100 continue
c
return
c---------end-of-rperm666-----------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine cperm (nrow,a,ja,ia,ao,jao,iao,perm,job)
integer nrow,ja(*),ia(nrow+1),jao(*),iao(nrow+1),perm(*), job
real*8 a(*), ao(*)
c-----------------------------------------------------------------------
c this subroutine permutes the columns of a matrix a, ja, ia.
c the result is written in the output matrix ao, jao, iao.
c cperm computes B = A P, where P is a permutation matrix
c that maps column j into column perm(j), i.e., on return
c a(i,j) becomes a(i,perm(j)) in new matrix
c Y. Saad, May 2, 1990 / modified Jan. 28, 1991.
c-----------------------------------------------------------------------
c on entry:
c----------
c nrow = row dimension of the matrix
c
c a, ja, ia = input matrix in csr format.
c
c perm = integer array of length ncol (number of columns of A
c containing the permutation array the columns:
c a(i,j) in the original matrix becomes a(i,perm(j))
c in the output matrix.
c
c job = integer indicating the work to be done:
c job = 1 permute a, ja, ia into ao, jao, iao
c (including the copying of real values ao and
c the array iao).
c job .ne. 1 : ignore real values ao and ignore iao.
c
c------------
c on return:
c------------
c ao, jao, iao = input matrix in a, ja, ia format (array ao not needed)
c
c Notes:
c-------
c 1. if job=1 then ao, iao are not used.
c 2. This routine is in place: ja, jao can be the same.
c 3. If the matrix is initially sorted (by increasing column number)
c then ao,jao,iao may not be on return.
c
c----------------------------------------------------------------------c
c local parameters:
integer k, i, nnz
c
nnz = ia(nrow+1)-1
do 100 k=1,nnz
jao(k) = perm(ja(k))
100 continue
c
c done with ja array. return if no need to touch values.
c
if (job .ne. 1) return
c
c else get new pointers -- and copy values too.
c
do 1 i=1, nrow+1
iao(i) = ia(i)
1 continue
c
do 2 k=1, nnz
ao(k) = a(k)
2 continue
c
return
c---------end-of-cperm--------------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine dperm666 (nrow,a,ja,ia,ao,jao,iao,perm,qperm,job)
integer nrow,ja(*),ia(nrow+1),jao(*),iao(nrow+1),perm(nrow),
+ qperm(*),job
integer a(*)
real*8 ao(*)
c-----------------------------------------------------------------------
c This routine permutes the rows and columns of a matrix stored in CSR
c format. i.e., it computes P A Q, where P, Q are permutation matrices.
c P maps row i into row perm(i) and Q maps column j into column qperm(j):
c a(i,j) becomes a(perm(i),qperm(j)) in new matrix
c In the particular case where Q is the transpose of P (symmetric
c permutation of A) then qperm is not needed.
c note that qperm should be of length ncol (number of columns) but this
c is not checked.
c-----------------------------------------------------------------------
c Y. Saad, Sep. 21 1989 / recoded Jan. 28 1991.
c-----------------------------------------------------------------------
c on entry:
c----------
c n = dimension of the matrix
c a, ja,
c ia = input matrix in a, ja, ia format
c perm = integer array of length n containing the permutation arrays
c for the rows: perm(i) is the destination of row i in the
c permuted matrix -- also the destination of column i in case
c permutation is symmetric (job .le. 2)
c
c qperm = same thing for the columns. This should be provided only
c if job=3 or job=4, i.e., only in the case of a nonsymmetric
c permutation of rows and columns. Otherwise qperm is a dummy
c
c job = integer indicating the work to be done:
c * job = 1,2 permutation is symmetric Ao :== P * A * transp(P)
c job = 1 permute a, ja, ia into ao, jao, iao
c job = 2 permute matrix ignoring real values.
c * job = 3,4 permutation is non-symmetric Ao :== P * A * Q
c job = 3 permute a, ja, ia into ao, jao, iao
c job = 4 permute matrix ignoring real values.
c
c on return:
c-----------
c ao, jao, iao = input matrix in a, ja, ia format
c
c in case job .eq. 2 or job .eq. 4, a and ao are never referred to
c and can be dummy arguments.
c Notes:
c-------
c 1) algorithm is in place
c 2) column indices may not be sorted on return even though they may be
c on entry.
c----------------------------------------------------------------------c
c
c this is another version of dperm, used to handle the case when a and
c ao are passed as dummy arguments with newer compilers that do not allow
c a integer dummy to be passed when a real*8 is expected
c----------------------------------------------------------------------c
c [email protected], May 2, 2006 c
c----------------------------------------------------------------------c
c local variables
integer locjob, mod
c
c locjob indicates whether or not real values must be copied.
c
locjob = mod(job,2)
c
c permute rows first
c
call rperm666 (nrow,a,ja,ia,ao,jao,iao,perm,locjob)
c
c then permute columns
c
locjob = 0
c
if (job .le. 2) then
call cperm (nrow,ao,jao,iao,ao,jao,iao,perm,locjob)
else
call cperm (nrow,ao,jao,iao,ao,jao,iao,qperm,locjob)
endif
c
return
c-------end-of-dperm666-------------------------------------------------
c-----------------------------------------------------------------------
end subroutine
subroutine dperm (nrow,a,ja,ia,ao,jao,iao,perm,qperm,job)
integer nrow,ja(*),ia(nrow+1),jao(*),iao(nrow+1),perm(nrow),
+ qperm(*),job
real*8 a(*),ao(*)
c-----------------------------------------------------------------------
c This routine permutes the rows and columns of a matrix stored in CSR
c format. i.e., it computes P A Q, where P, Q are permutation matrices.
c P maps row i into row perm(i) and Q maps column j into column qperm(j):
c a(i,j) becomes a(perm(i),qperm(j)) in new matrix
c In the particular case where Q is the transpose of P (symmetric
c permutation of A) then qperm is not needed.
c note that qperm should be of length ncol (number of columns) but this
c is not checked.
c-----------------------------------------------------------------------
c Y. Saad, Sep. 21 1989 / recoded Jan. 28 1991.
c-----------------------------------------------------------------------
c on entry:
c----------
c n = dimension of the matrix
c a, ja,
c ia = input matrix in a, ja, ia format
c perm = integer array of length n containing the permutation arrays
c for the rows: perm(i) is the destination of row i in the
c permuted matrix -- also the destination of column i in case
c permutation is symmetric (job .le. 2)
c
c qperm = same thing for the columns. This should be provided only
c if job=3 or job=4, i.e., only in the case of a nonsymmetric
c permutation of rows and columns. Otherwise qperm is a dummy
c
c job = integer indicating the work to be done:
c * job = 1,2 permutation is symmetric Ao :== P * A * transp(P)
c job = 1 permute a, ja, ia into ao, jao, iao
c job = 2 permute matrix ignoring real values.
c * job = 3,4 permutation is non-symmetric Ao :== P * A * Q
c job = 3 permute a, ja, ia into ao, jao, iao
c job = 4 permute matrix ignoring real values.
c
c on return:
c-----------
c ao, jao, iao = input matrix in a, ja, ia format
c
c in case job .eq. 2 or job .eq. 4, a and ao are never referred to
c and can be dummy arguments.
c Notes:
c-------
c 1) algorithm is in place
c 2) column indices may not be sorted on return even though they may be
c on entry.
c----------------------------------------------------------------------c
c local variables
integer locjob, mod
c
c locjob indicates whether or not real values must be copied.
c
locjob = mod(job,2)
c
c permute rows first
c
call rperm (nrow,a,ja,ia,ao,jao,iao,perm,locjob)
c
c then permute columns
c
locjob = 0
c
if (job .le. 2) then
call cperm (nrow,ao,jao,iao,ao,jao,iao,perm,locjob)
else
call cperm (nrow,ao,jao,iao,ao,jao,iao,qperm,locjob)
endif
c
return
c-------end-of-dperm----------------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine dperm1 (i1,i2,a,ja,ia,b,jb,ib,perm,ipos,job)
integer i1,i2,job,ja(*),ia(*),jb(*),ib(*),perm(*),ipos
real*8 a(*),b(*)
c-----------------------------------------------------------------------
c general submatrix extraction routine.
c-----------------------------------------------------------------------
c extracts rows perm(i1), perm(i1+1), ..., perm(i2) (in this order)
c from a matrix (doing nothing in the column indices.) The resulting
c submatrix is constructed in b, jb, ib. A pointer ipos to the
c beginning of arrays b,jb,is also allowed (i.e., nonzero elements
c are accumulated starting in position ipos of b, jb).
c-----------------------------------------------------------------------
c Y. Saad,Sep. 21 1989 / recoded Jan. 28 1991 / modified for PSPARSLIB
c Sept. 1997..
c-----------------------------------------------------------------------
c on entry:
c----------
c n = dimension of the matrix
c a,ja,
c ia = input matrix in CSR format
c perm = integer array of length n containing the indices of the rows
c to be extracted.
c
c job = job indicator. if (job .ne.1) values are not copied (i.e.,
c only pattern is copied).
c
c on return:
c-----------
c b,ja,
c ib = matrix in csr format. b(ipos:ipos+nnz-1),jb(ipos:ipos+nnz-1)
c contain the value and column indices respectively of the nnz
c nonzero elements of the permuted matrix. thus ib(1)=ipos.
c
c Notes:
c-------
c algorithm is NOT in place
c-----------------------------------------------------------------------
c local variables
c
integer ko,irow,k
logical values
c-----------------------------------------------------------------------
values = (job .eq. 1)
ko = ipos
ib(1) = ko
do 900 i=i1,i2
irow = perm(i)
do 800 k=ia(irow),ia(irow+1)-1
if (values) b(ko) = a(k)
jb(ko) = ja(k)
ko=ko+1
800 continue
ib(i-i1+2) = ko
900 continue
return
c--------end-of-dperm1--------------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine dperm2 (i1,i2,a,ja,ia,b,jb,ib,cperm,rperm,istart,
* ipos,job)
integer i1,i2,job,istart,ja(*),ia(*),jb(*),ib(*),cperm(*),rperm(*)
integer ipos
real*8 a(*),b(*)
c-----------------------------------------------------------------------
c general submatrix permutation/ extraction routine.
c-----------------------------------------------------------------------
c extracts rows rperm(i1), rperm(i1+1), ..., rperm(i2) and does an
c associated column permutation (using array cperm). The resulting
c submatrix is constructed in b, jb, ib. For added flexibility, the
c extracted elements are put in sequence starting from row 'istart'
c of B. In addition a pointer ipos to the beginning of arrays b,jb,
c is also allowed (i.e., nonzero elements are accumulated starting in
c position ipos of b, jb). In most applications istart and ipos are
c equal to one. However, the generality adds substantial flexiblity.
c EXPLE: (1) to permute msr to msr (excluding diagonals)
c call dperm2 (1,n,a,ja,ja,b,jb,jb,rperm,rperm,1,n+2)
c (2) To extract rows 1 to 10: define rperm and cperm to be
c identity permutations (rperm(i)=i, i=1,n) and then
c call dperm2 (1,10,a,ja,ia,b,jb,ib,rperm,rperm,1,1)
c (3) to achieve a symmetric permutation as defined by perm:
c call dperm2 (1,10,a,ja,ia,b,jb,ib,perm,perm,1,1)
c (4) to get a symmetric permutation of A and append the
c resulting data structure to A's data structure (useful!)
c call dperm2 (1,10,a,ja,ia,a,ja,ia(n+1),perm,perm,1,ia(n+1))
c-----------------------------------------------------------------------
c Y. Saad,Sep. 21 1989 / recoded Jan. 28 1991.
c-----------------------------------------------------------------------
c on entry:
c----------
c n = dimension of the matrix
c i1,i2 = extract rows rperm(i1) to rperm(i2) of A, with i1<i2.
c
c a,ja,
c ia = input matrix in CSR format
c cperm = integer array of length n containing the permutation arrays
c for the columns: cperm(i) is the destination of column j,
c i.e., any column index ja(k) is transformed into cperm(ja(k))
c
c rperm = permutation array for the rows. rperm(i) = origin (in A) of
c row i in B. This is the reverse permutation relative to the
c ones used in routines cperm, dperm,....
c rows rperm(i1), rperm(i1)+1, ... rperm(i2) are
c extracted from A and stacked into B, starting in row istart
c of B.
c istart= starting row for B where extracted matrix is to be added.
c this is also only a pointer of the be beginning address for
c ib , on return.
c ipos = beginning position in arrays b and jb where to start copying
c elements. Thus, ib(istart) = ipos.
c
c job = job indicator. if (job .ne.1) values are not copied (i.e.,
c only pattern is copied).
c
c on return:
c-----------
c b,ja,
c ib = matrix in csr format. positions 1,2,...,istart-1 of ib
c are not touched. b(ipos:ipos+nnz-1),jb(ipos:ipos+nnz-1)
c contain the value and column indices respectively of the nnz
c nonzero elements of the permuted matrix. thus ib(istart)=ipos.
c
c Notes:
c-------
c 1) algorithm is NOT in place
c 2) column indices may not be sorted on return even though they
c may be on entry.
c-----------------------------------------------------------------------
c local variables
c
integer ko,irow,k
logical values
c-----------------------------------------------------------------------
values = (job .eq. 1)
ko = ipos
ib(istart) = ko
do 900 i=i1,i2
irow = rperm(i)
do 800 k=ia(irow),ia(irow+1)-1
if (values) b(ko) = a(k)
jb(ko) = cperm(ja(k))
ko=ko+1
800 continue
ib(istart+i-i1+1) = ko
900 continue
return
c--------end-of-dperm2--------------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine dmperm (nrow,a,ja,ao,jao,perm,job)
integer nrow,ja(*),jao(*),perm(nrow),job
real*8 a(*),ao(*)
c-----------------------------------------------------------------------
c This routine performs a symmetric permutation of the rows and
c columns of a matrix stored in MSR format. i.e., it computes
c B = P A transp(P), where P, is a permutation matrix.
c P maps row i into row perm(i) and column j into column perm(j):
c a(i,j) becomes a(perm(i),perm(j)) in new matrix
c (i.e. ao(perm(i),perm(j)) = a(i,j) )
c calls dperm.
c-----------------------------------------------------------------------
c Y. Saad, Nov 15, 1991.
c-----------------------------------------------------------------------
c on entry:
c----------
c n = dimension of the matrix
c a, ja = input matrix in MSR format.
c perm = integer array of length n containing the permutation arrays
c for the rows: perm(i) is the destination of row i in the
c permuted matrix -- also the destination of column i in case
c permutation is symmetric (job .le. 2)
c
c job = integer indicating the work to be done:
c job = 1 permute a, ja, ia into ao, jao, iao
c job = 2 permute matrix ignoring real values.
c
c on return:
c-----------
c ao, jao = output matrix in MSR.
c
c in case job .eq. 2 a and ao are never referred to and can be dummy
c arguments.
c
c Notes:
c-------
c 1) algorithm is NOT in place
c 2) column indices may not be sorted on return even though they may be
c on entry.
c----------------------------------------------------------------------c
c local variables
c
integer n1, n2
n1 = nrow+1
n2 = n1+1
c
call dperm (nrow,a,ja,ja,ao(n2),jao(n2),jao,perm,perm,job)
c
jao(1) = n2
do 101 j=1, nrow
ao(perm(j)) = a(j)
jao(j+1) = jao(j+1)+n1
101 continue
c
c done
c
return
c-----------------------------------------------------------------------
c--------end-of-dmperm--------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine dvperm (n, x, perm)
integer n, perm(n)
real*8 x(n)
c-----------------------------------------------------------------------
c this subroutine performs an in-place permutation of a real vector x
c according to the permutation array perm(*), i.e., on return,
c the vector x satisfies,
c
c x(perm(j)) :== x(j), j=1,2,.., n
c
c-----------------------------------------------------------------------
c on entry:
c---------
c n = length of vector x.
c perm = integer array of length n containing the permutation array.
c x = input vector
c
c on return:
c----------
c x = vector x permuted according to x(perm(*)) := x(*)
c
c----------------------------------------------------------------------c
c Y. Saad, Sep. 21 1989 c
c----------------------------------------------------------------------c
c local variables
real*8 tmp, tmp1
c
init = 1
tmp = x(init)
ii = perm(init)
perm(init)= -perm(init)
k = 0
c
c loop
c
6 k = k+1
c
c save the chased element --
c
tmp1 = x(ii)
x(ii) = tmp
next = perm(ii)
if (next .lt. 0 ) goto 65
c
c test for end
c
if (k .gt. n) goto 101
tmp = tmp1
perm(ii) = - perm(ii)
ii = next
c
c end loop
c
goto 6
c
c reinitilaize cycle --
c
65 init = init+1
if (init .gt. n) goto 101
if (perm(init) .lt. 0) goto 65
tmp = x(init)
ii = perm(init)
perm(init)=-perm(init)
goto 6
c
101 continue
do 200 j=1, n
perm(j) = -perm(j)
200 continue
c
return
c-------------------end-of-dvperm---------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine ivperm (n, ix, perm)
integer n, perm(n), ix(n)
c-----------------------------------------------------------------------
c this subroutine performs an in-place permutation of an integer vector
c ix according to the permutation array perm(*), i.e., on return,
c the vector x satisfies,
c
c ix(perm(j)) :== ix(j), j=1,2,.., n
c
c-----------------------------------------------------------------------
c on entry:
c---------
c n = length of vector x.
c perm = integer array of length n containing the permutation array.
c ix = input vector
c
c on return:
c----------
c ix = vector x permuted according to ix(perm(*)) := ix(*)
c
c----------------------------------------------------------------------c
c Y. Saad, Sep. 21 1989 c
c----------------------------------------------------------------------c
c local variables
integer tmp, tmp1
c
init = 1
tmp = ix(init)
ii = perm(init)
perm(init)= -perm(init)
k = 0
c
c loop
c
6 k = k+1
c
c save the chased element --
c
tmp1 = ix(ii)
ix(ii) = tmp
next = perm(ii)
if (next .lt. 0 ) goto 65
c
c test for end
c
if (k .gt. n) goto 101
tmp = tmp1
perm(ii) = - perm(ii)
ii = next
c
c end loop
c
goto 6
c
c reinitilaize cycle --
c
65 init = init+1
if (init .gt. n) goto 101
if (perm(init) .lt. 0) goto 65
tmp = ix(init)
ii = perm(init)
perm(init)=-perm(init)
goto 6
c
101 continue
do 200 j=1, n
perm(j) = -perm(j)
200 continue
c
return
c-------------------end-of-ivperm---------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine retmx (n,a,ja,ia,dd)
real*8 a(*),dd(*)
integer n,ia(*),ja(*)
c-----------------------------------------------------------------------
c returns in dd(*) the max absolute value of elements in row *.
c used for scaling purposes. superseded by rnrms .
c
c on entry:
c n = dimension of A
c a,ja,ia
c = matrix stored in compressed sparse row format
c dd = real*8 array of length n. On output,entry dd(i) contains
c the element of row i that has the largest absolute value.
c Moreover the sign of dd is modified such that it is the
c same as that of the diagonal element in row i.
c----------------------------------------------------------------------c
c Y. Saad, Sep. 21 1989 c
c----------------------------------------------------------------------c
c local variables
integer k2, i, k1, k
real*8 t, t1, t2
c
c initialize
c
k2 = 1
do 11 i=1,n
k1 = k2
k2 = ia(i+1) - 1
t = 0.0d0
do 101 k=k1,k2
t1 = abs(a(k))
if (t1 .gt. t) t = t1
if (ja(k) .eq. i) then
if (a(k) .ge. 0.0) then
t2 = a(k)
else
t2 = - a(k)
endif
endif
101 continue
dd(i) = t2*t
c we do not invert diag
11 continue
return
c---------end of retmx -------------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine diapos (n,ja,ia,idiag)
integer ia(n+1), ja(*), idiag(n)
c-----------------------------------------------------------------------
c this subroutine returns the positions of the diagonal elements of a
c sparse matrix a, ja, ia, in the array idiag.
c-----------------------------------------------------------------------
c on entry:
c----------
c
c n = integer. row dimension of the matrix a.
c a,ja,
c ia = matrix stored compressed sparse row format. a array skipped.
c
c on return:
c-----------
c idiag = integer array of length n. The i-th entry of idiag
c points to the diagonal element a(i,i) in the arrays
c a, ja. (i.e., a(idiag(i)) = element A(i,i) of matrix A)
c if no diagonal element is found the entry is set to 0.
c----------------------------------------------------------------------c
c Y. Saad, March, 1990
c----------------------------------------------------------------------c
do 1 i=1, n
idiag(i) = 0
1 continue
c
c sweep through data structure.
c
do 6 i=1,n
do 51 k= ia(i),ia(i+1) -1
if (ja(k) .eq. i) idiag(i) = k
51 continue
6 continue
c----------- -end-of-diapos---------------------------------------------
c-----------------------------------------------------------------------
return
end subroutine
c-----------------------------------------------------------------------
subroutine dscaldg (n,a,ja,ia,diag,job)
real*8 a(*), diag(*),t
integer ia(*),ja(*)
c-----------------------------------------------------------------------
c scales rows by diag where diag is either given (job=0)
c or to be computed:
c job = 1 ,scale row i by by +/- max |a(i,j) | and put inverse of
c scaling factor in diag(i),where +/- is the sign of a(i,i).
c job = 2 scale by 2-norm of each row..
c if diag(i) = 0,then diag(i) is replaced by one
c (no scaling)..
c----------------------------------------------------------------------c
c Y. Saad, Sep. 21 1989 c
c----------------------------------------------------------------------c
goto (12,11,10) job+1
10 do 110 j=1,n
k1= ia(j)
k2 = ia(j+1)-1
t = 0.0d0
do 111 k = k1,k2
111 t = t+a(k)*a(k)
110 diag(j) = sqrt(t)
goto 12
11 continue
call retmx (n,a,ja,ia,diag)
c------
12 do 1 j=1,n
if (diag(j) .ne. 0.0d0) then
diag(j) = 1.0d0/diag(j)
else
diag(j) = 1.0d0
endif
1 continue
do 2 i=1,n
t = diag(i)
do 21 k=ia(i),ia(i+1) -1
a(k) = a(k)*t
21 continue
2 continue
return
c--------end of dscaldg -----------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine extbdg (n,a,ja,ia,bdiag,nblk,ao,jao,iao)
implicit real*8 (a-h,o-z)
real*8 bdiag(*),a(*),ao(*)
integer ia(*),ja(*),jao(*),iao(*)
c-----------------------------------------------------------------------
c this subroutine extracts the main diagonal blocks of a
c matrix stored in compressed sparse row format and puts the result
c into the array bdiag and the remainder in ao,jao,iao.
c-----------------------------------------------------------------------
c on entry:
c----------
c n = integer. The row dimension of the matrix a.
c a,
c ja,
c ia = matrix stored in csr format
c nblk = dimension of each diagonal block. The diagonal blocks are
c stored in compressed format rowwise,i.e.,we store in
c succession the i nonzeros of the i-th row after those of
c row number i-1..
c
c on return:
c----------
c bdiag = real*8 array of size (n x nblk) containing the diagonal
c blocks of A on return
c ao,
c jao,
C iao = remainder of the matrix stored in csr format.
c----------------------------------------------------------------------c
c Y. Saad, Sep. 21 1989 c
c----------------------------------------------------------------------c
m = 1 + (n-1)/nblk
c this version is sequential -- there is a more parallel version
c that goes through the structure twice ....
ltr = ((nblk-1)*nblk)/2
l = m * ltr
do 1 i=1,l
bdiag(i) = 0.0d0
1 continue
ko = 0
kb = 1
iao(1) = 1
c-------------------------
do 11 jj = 1,m
j1 = (jj-1)*nblk+1
j2 = min0 (n,j1+nblk-1)
do 12 j=j1,j2
do 13 i=ia(j),ia(j+1) -1
k = ja(i)
if (k .lt. j1) then
ko = ko+1
ao(ko) = a(i)
jao(ko) = k
else if (k .lt. j) then
c kb = (jj-1)*ltr+((j-j1)*(j-j1-1))/2+k-j1+1
c bdiag(kb) = a(i)
bdiag(kb+k-j1) = a(i)
endif
13 continue
kb = kb + j-j1
iao(j+1) = ko+1
12 continue
11 continue
return
c---------end-of-extbdg-------------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine getbwd(n,a,ja,ia,ml,mu)
c-----------------------------------------------------------------------
c gets the bandwidth of lower part and upper part of A.
c does not assume that A is sorted.
c-----------------------------------------------------------------------
c on entry:
c----------
c n = integer = the row dimension of the matrix
c a, ja,
c ia = matrix in compressed sparse row format.
c
c on return:
c-----------
c ml = integer. The bandwidth of the strict lower part of A
c mu = integer. The bandwidth of the strict upper part of A
c
c Notes:
c ===== ml and mu are allowed to be negative or return. This may be
c useful since it will tell us whether a band is confined
c in the strict upper/lower triangular part.
c indeed the definitions of ml and mu are
c
c ml = max ( (i-j) s.t. a(i,j) .ne. 0 )
c mu = max ( (j-i) s.t. a(i,j) .ne. 0 )
c----------------------------------------------------------------------c
c Y. Saad, Sep. 21 1989 c
c----------------------------------------------------------------------c
real*8 a(*)
integer ja(*),ia(n+1),ml,mu,ldist,i,k
ml = - n
mu = - n
do 3 i=1,n
do 31 k=ia(i),ia(i+1)-1
ldist = i-ja(k)
ml = max(ml,ldist)
mu = max(mu,-ldist)
31 continue
3 continue
return
c---------------end-of-getbwd ------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine blkfnd (nrow,ja,ia,nblk)
c-----------------------------------------------------------------------
c This routine attemptps to determine whether or not the input
c matrix has a block structure and finds the blocks size
c if it does. A block matrix is one which is
c comprised of small square dense blocks. If there are zero
c elements within the square blocks and the original data structure
c takes these zeros into account then blkchk may fail to find the
c correct block size.
c-----------------------------------------------------------------------
c on entry
c---------
c nrow = integer equal to the row dimension of the matrix.
c ja = integer array containing the column indices of the entries
c nonzero entries of the matrix stored by row.
c ia = integer array of length nrow + 1 containing the pointers
c beginning of each row in array ja.
c
c nblk = integer containing the assumed value of nblk if job = 0
c
c on return
c----------
c nblk = integer containing the value found for nblk when job = 1.
c if imsg .ne. 0 this value is meaningless however.
c
c----------------------------------------------------------------------c
c Y. Saad, Sep. 21 1989 c
c----------------------------------------------------------------------c
integer ia(nrow+1),ja(*)
c-----------------------------------------------------------------------
c first part of code will find candidate block sizes.
c criterion used here is a simple one: scan rows and determine groups
c of rows that have the same length and such that the first column
c number and the last column number are identical.
c-----------------------------------------------------------------------
minlen = ia(2)-ia(1)
irow = 1
do 1 i=2,nrow
len = ia(i+1)-ia(i)
if (len .lt. minlen) then
minlen = len
irow = i
endif
1 continue
c
c ---- candidates are all dividers of minlen
c
nblk = 1
if (minlen .le. 1) return
c
do 99 iblk = minlen, 1, -1
if (mod(minlen,iblk) .ne. 0) goto 99
len = ia(2) - ia(1)
len0 = len
jfirst = ja(1)
jlast = ja(ia(2)-1)
do 10 jrow = irow+1,irow+nblk-1
i1 = ia(jrow)
i2 = ia(jrow+1)-1
len = i2+1-i1
jf = ja(i1)
jl = ja(i2)
if (len .ne. len0 .or. jf .ne. jfirst .or.
* jl .ne. jlast) goto 99
10 continue
c
c check for this candidate ----
c
call blkchk (nrow,ja,ia,iblk,imsg)
if (imsg .eq. 0) then
c
c block size found
c
nblk = iblk
return
endif
99 continue
c--------end-of-blkfnd -------------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine blkchk (nrow,ja,ia,nblk,imsg)
c-----------------------------------------------------------------------
c This routine checks whether the input matrix is a block
c matrix with block size of nblk. A block matrix is one which is
c comprised of small square dense blocks. If there are zero
c elements within the square blocks and the data structure
c takes them into account then blkchk may fail to find the
c correct block size.
c-----------------------------------------------------------------------
c on entry
c---------
c nrow = integer equal to the row dimension of the matrix.
c ja = integer array containing the column indices of the entries
c nonzero entries of the matrix stored by row.
c ia = integer array of length nrow + 1 containing the pointers
c beginning of each row in array ja.
c
c nblk = integer containing the value of nblk to be checked.
c
c on return
c----------
c
c imsg = integer containing a message with the following meaning.
c imsg = 0 means that the output value of nblk is a correct
c block size. nblk .lt. 0 means nblk not correct
c block size.
c imsg = -1 : nblk does not divide nrow
c imsg = -2 : a starting element in a row is at wrong position
c (j .ne. mult*nblk +1 )
c imsg = -3 : nblk does divide a row length -
c imsg = -4 : an element is isolated outside a block or
c two rows in same group have different lengths
c----------------------------------------------------------------------c
c Y. Saad, Sep. 21 1989 c
c----------------------------------------------------------------------c
integer ia(nrow+1),ja(*)
c----------------------------------------------------------------------
c first part of code will find candidate block sizes.
c this is not guaranteed to work . so a check is done at the end
c the criterion used here is a simple one:
c scan rows and determine groups of rows that have the same length
c and such that the first column number and the last column number
c are identical.
c----------------------------------------------------------------------
imsg = 0
if (nblk .le. 1) return
nr = nrow/nblk
if (nr*nblk .ne. nrow) goto 101
c-- main loop ---------------------------------------------------------
irow = 1
do 20 ii=1, nr
c i1= starting position for group of nblk rows in original matrix
i1 = ia(irow)
j2 = i1
c lena = length of each row in that group in the original matrix
lena = ia(irow+1)-i1
c len = length of each block-row in that group in the output matrix
len = lena/nblk
if (len* nblk .ne. lena) goto 103
c
c for each row
c
do 6 i = 1, nblk
irow = irow + 1
if (ia(irow)-ia(irow-1) .ne. lena ) goto 104
c
c for each block
c
do 7 k=0, len-1
jstart = ja(i1+nblk*k)-1
if ( (jstart/nblk)*nblk .ne. jstart) goto 102
c
c for each column
c
do 5 j=1, nblk
if (jstart+j .ne. ja(j2) ) goto 104
j2 = j2+1
5 continue
7 continue
6 continue
20 continue
c went through all loops successfully:
return
101 imsg = -1
return
102 imsg = -2
return
103 imsg = -3
return
104 imsg = -4
c----------------end of chkblk -----------------------------------------
c-----------------------------------------------------------------------
return
end subroutine
c-----------------------------------------------------------------------
subroutine infdia (n,ja,ia,ind,idiag)
integer ia(*), ind(*), ja(*)
c-----------------------------------------------------------------------
c obtains information on the diagonals of A.
c-----------------------------------------------------------------------
c this subroutine finds the lengths of each of the 2*n-1 diagonals of A
c it also outputs the number of nonzero diagonals found.
c-----------------------------------------------------------------------
c on entry:
c----------
c n = dimension of the matrix a.
c
c a, ..... not needed here.
c ja,
c ia = matrix stored in csr format
c
c on return:
c-----------
c
c idiag = integer. number of nonzero diagonals found.
c
c ind = integer array of length at least 2*n-1. The k-th entry in
c ind contains the number of nonzero elements in the diagonal
c number k, the numbering beeing from the lowermost diagonal
c (bottom-left). In other words ind(k) = length of diagonal
c whose offset wrt the main diagonal is = - n + k.
c----------------------------------------------------------------------c
c Y. Saad, Sep. 21 1989 c
c----------------------------------------------------------------------c
n2= n+n-1
do 1 i=1,n2
ind(i) = 0
1 continue
do 3 i=1, n
do 2 k=ia(i),ia(i+1)-1
j = ja(k)
ind(n+j-i) = ind(n+j-i) +1
2 continue
3 continue
c count the nonzero ones.
idiag = 0
do 41 k=1, n2
if (ind(k) .ne. 0) idiag = idiag+1
41 continue
return
c done
c------end-of-infdia ---------------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine amubdg (nrow,ncol,ncolb,ja,ia,jb,ib,ndegr,nnz,iw)
integer ja(*),jb(*),ia(nrow+1),ib(ncol+1),ndegr(nrow),iw(ncolb)
c-----------------------------------------------------------------------
c gets the number of nonzero elements in each row of A*B and the total
c number of nonzero elements in A*B.
c-----------------------------------------------------------------------
c on entry:
c --------
c
c nrow = integer. row dimension of matrix A
c ncol = integer. column dimension of matrix A = row dimension of
c matrix B.
c ncolb = integer. the colum dimension of the matrix B.
c
c ja, ia= row structure of input matrix A: ja = column indices of
c the nonzero elements of A stored by rows.
c ia = pointer to beginning of each row in ja.
c
c jb, ib= row structure of input matrix B: jb = column indices of
c the nonzero elements of A stored by rows.
c ib = pointer to beginning of each row in jb.
c
c on return:
c ---------
c ndegr = integer array of length nrow containing the degrees (i.e.,
c the number of nonzeros in each row of the matrix A * B
c
c nnz = total number of nonzero elements found in A * B
c
c work arrays:
c-------------
c iw = integer work array of length ncolb.
c-----------------------------------------------------------------------
do 1 k=1, ncolb
iw(k) = 0
1 continue
do 2 k=1, nrow
ndegr(k) = 0
2 continue
c
c method used: Transp(A) * A = sum [over i=1, nrow] a(i)^T a(i)
c where a(i) = i-th row of A. We must be careful not to add the
c elements already accounted for.
c
c
do 7 ii=1,nrow
c
c for each row of A
c
ldg = 0
c
c end-of-linked list
c
last = -1
do 6 j = ia(ii),ia(ii+1)-1
c
c row number to be added:
c
jr = ja(j)
do 5 k=ib(jr),ib(jr+1)-1
jc = jb(k)
if (iw(jc) .eq. 0) then
c
c add one element to the linked list
c
ldg = ldg + 1
iw(jc) = last
last = jc
endif
5 continue
6 continue
ndegr(ii) = ldg
c
c reset iw to zero
c
do 61 k=1,ldg
j = iw(last)
iw(last) = 0
last = j
61 continue
c-----------------------------------------------------------------------
7 continue
c
nnz = 0
do 8 ii=1, nrow
nnz = nnz+ndegr(ii)
8 continue
c
return
c---------------end-of-amubdg ------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine aplbdg (nrow,ncol,ja,ia,jb,ib,ndegr,nnz,iw)
integer ja(*),jb(*),ia(nrow+1),ib(nrow+1),iw(ncol),ndegr(nrow)
c-----------------------------------------------------------------------
c gets the number of nonzero elements in each row of A+B and the total
c number of nonzero elements in A+B.
c-----------------------------------------------------------------------
c on entry:
c ---------
c nrow = integer. The row dimension of A and B
c ncol = integer. The column dimension of A and B.
c
c a,
c ja,
c ia = Matrix A in compressed sparse row format.
c
c b,
c jb,
c ib = Matrix B in compressed sparse row format.
c
c on return:
c----------
c ndegr = integer array of length nrow containing the degrees (i.e.,
c the number of nonzeros in each row of the matrix A + B.
c
c nnz = total number of nonzero elements found in A * B
c
c work arrays:
c------------
c iw = integer work array of length equal to ncol.
c
c-----------------------------------------------------------------------
do 1 k=1, ncol
iw(k) = 0
1 continue
c
do 2 k=1, nrow
ndegr(k) = 0
2 continue
c
do 7 ii=1,nrow
ldg = 0
c
c end-of-linked list
c
last = -1
c
c row of A
c
do 5 j = ia(ii),ia(ii+1)-1
jr = ja(j)
c
c add element to the linked list
c
ldg = ldg + 1
iw(jr) = last
last = jr
5 continue
c
c row of B
c
do 6 j=ib(ii),ib(ii+1)-1
jc = jb(j)
if (iw(jc) .eq. 0) then
c
c add one element to the linked list
c
ldg = ldg + 1
iw(jc) = last
last = jc
endif
6 continue
c done with row ii.
ndegr(ii) = ldg
c
c reset iw to zero
c
do 61 k=1,ldg
j = iw(last)
iw(last) = 0
last = j
61 continue
c-----------------------------------------------------------------------
7 continue
c
nnz = 0
do 8 ii=1, nrow
nnz = nnz+ndegr(ii)
8 continue
return
c----------------end-of-aplbdg -----------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine rnrms (nrow, nrm, a, ja, ia, diag)
real*8 a(*), diag(nrow), scal
integer ja(*), ia(nrow+1)
c-----------------------------------------------------------------------
c gets the norms of each row of A. (choice of three norms)
c-----------------------------------------------------------------------
c on entry:
c ---------
c nrow = integer. The row dimension of A
c
c nrm = integer. norm indicator. nrm = 1, means 1-norm, nrm =2
c means the 2-nrm, nrm = 0 means max norm
c
c a,
c ja,
c ia = Matrix A in compressed sparse row format.
c
c on return:
c----------
c
c diag = real vector of length nrow containing the norms
c
c-----------------------------------------------------------------
do 1 ii=1,nrow
c
c compute the norm if each element.
c
scal = 0.0d0
k1 = ia(ii)
k2 = ia(ii+1)-1
if (nrm .eq. 0) then
do 2 k=k1, k2
scal = max(scal,abs(a(k) ) )
2 continue
elseif (nrm .eq. 1) then
do 3 k=k1, k2
scal = scal + abs(a(k) )
3 continue
else
do 4 k=k1, k2
scal = scal+a(k)**2
4 continue
endif
if (nrm .eq. 2) scal = sqrt(scal)
diag(ii) = scal
1 continue
return
c-----------------------------------------------------------------------
c-------------end-of-rnrms----------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine cnrms (nrow, nrm, a, ja, ia, diag)
real*8 a(*), diag(nrow)
integer ja(*), ia(nrow+1)
c-----------------------------------------------------------------------
c gets the norms of each column of A. (choice of three norms)
c-----------------------------------------------------------------------
c on entry:
c ---------
c nrow = integer. The row dimension of A
c
c nrm = integer. norm indicator. nrm = 1, means 1-norm, nrm =2
c means the 2-nrm, nrm = 0 means max norm
c
c a,
c ja,
c ia = Matrix A in compressed sparse row format.
c
c on return:
c----------
c
c diag = real vector of length nrow containing the norms
c
c-----------------------------------------------------------------
do 10 k=1, nrow
diag(k) = 0.0d0
10 continue
do 1 ii=1,nrow
k1 = ia(ii)
k2 = ia(ii+1)-1
do 2 k=k1, k2
j = ja(k)
c update the norm of each column
if (nrm .eq. 0) then
diag(j) = max(diag(j),abs(a(k) ) )
elseif (nrm .eq. 1) then
diag(j) = diag(j) + abs(a(k) )
else
diag(j) = diag(j)+a(k)**2
endif
2 continue
1 continue
if (nrm .ne. 2) return
do 3 k=1, nrow
diag(k) = sqrt(diag(k))
3 continue
return
c-----------------------------------------------------------------------
c------------end-of-cnrms-----------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine addblk(nrowa, ncola, a, ja, ia, ipos, jpos, job,
& nrowb, ncolb, b, jb, ib, nrowc, ncolc, c, jc, ic, nzmx, ierr)
c implicit none
integer nrowa, nrowb, nrowc, ncola, ncolb, ncolc, ipos, jpos
integer nzmx, ierr, job
integer ja(1:*), ia(1:*), jb(1:*), ib(1:*), jc(1:*), ic(1:*)
real*8 a(1:*), b(1:*), c(1:*)
c-----------------------------------------------------------------------
c This subroutine adds a matrix B into a submatrix of A whose
c (1,1) element is located in the starting position (ipos, jpos).
c The resulting matrix is allowed to be larger than A (and B),
c and the resulting dimensions nrowc, ncolc will be redefined
c accordingly upon return.
c The input matrices are assumed to be sorted, i.e. in each row
c the column indices appear in ascending order in the CSR format.
c-----------------------------------------------------------------------
c on entry:
c ---------
c nrowa = number of rows in A.
c bcola = number of columns in A.
c a,ja,ia = Matrix A in compressed sparse row format with entries sorted
c nrowb = number of rows in B.
c ncolb = number of columns in B.
c b,jb,ib = Matrix B in compressed sparse row format with entries sorted
c
c nzmax = integer. The length of the arrays c and jc. addblk will
c stop if the number of nonzero elements in the matrix C
c exceeds nzmax. See ierr.
c
c on return:
c----------
c nrowc = number of rows in C.
c ncolc = number of columns in C.
c c,jc,ic = resulting matrix C in compressed sparse row sparse format
c with entries sorted ascendly in each row.
c
c ierr = integer. serving as error message.
c ierr = 0 means normal return,
c ierr .gt. 0 means that addblk stopped while computing the
c i-th row of C with i=ierr, because the number
c of elements in C exceeds nzmax.
c
c Notes:
c-------
c this will not work if any of the two input matrices is not sorted
c-----------------------------------------------------------------------
logical values
integer i,j1,j2,ka,kb,kc,kamax,kbmax
values = (job .ne. 0)
ierr = 0
nrowc = max(nrowa, nrowb+ipos-1)
ncolc = max(ncola, ncolb+jpos-1)
kc = 1
kbmax = 0
ic(1) = kc
c
do 10 i=1, nrowc
if (i.le.nrowa) then
ka = ia(i)
kamax = ia(i+1)-1
else
ka = ia(nrowa+1)
end if
if ((i.ge.ipos).and.((i-ipos).le.nrowb)) then
kb = ib(i-ipos+1)
kbmax = ib(i-ipos+2)-1
else
kb = ib(nrowb+1)
end if
c
c a do-while type loop -- goes through all the elements in a row.
c
20 continue
if (ka .le. kamax) then
j1 = ja(ka)
else
j1 = ncolc+1
endif
if (kb .le. kbmax) then
j2 = jb(kb) + jpos - 1
else
j2 = ncolc+1
endif
c
c if there are more elements to be added.
c
if ((ka .le. kamax .or. kb .le. kbmax) .and.
& (j1 .le. ncolc .or. j2 .le. ncolc)) then
c
c three cases
c
if (j1 .eq. j2) then
if (values) c(kc) = a(ka)+b(kb)
jc(kc) = j1
ka = ka+1
kb = kb+1
kc = kc+1
else if (j1 .lt. j2) then
jc(kc) = j1
if (values) c(kc) = a(ka)
ka = ka+1
kc = kc+1
else if (j1 .gt. j2) then
jc(kc) = j2
if (values) c(kc) = b(kb)
kb = kb+1
kc = kc+1
endif
if (kc .gt. nzmx) goto 999
goto 20
end if
ic(i+1) = kc
10 continue
return
999 ierr = i
return
c---------end-of-addblk-------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine get1up (n,ja,ia,ju)
integer n, ja(*),ia(*),ju(*)
c----------------------------------------------------------------------
c obtains the first element of each row of the upper triangular part
c of a matrix. Assumes that the matrix is already sorted.
c-----------------------------------------------------------------------
c parameters
c input
c -----
c ja = integer array containing the column indices of aij
c ia = pointer array. ia(j) contains the position of the
c beginning of row j in ja
c
c output
c ------
c ju = integer array of length n. ju(i) is the address in ja
c of the first element of the uper triangular part of
c of A (including rthe diagonal. Thus if row i does have
c a nonzero diagonal element then ju(i) will point to it.
c This is a more general version of diapos.
c-----------------------------------------------------------------------
c local vAriables
integer i, k
c
do 5 i=1, n
ju(i) = 0
k = ia(i)
c
1 continue
if (ja(k) .ge. i) then
ju(i) = k
goto 5
elseif (k .lt. ia(i+1) -1) then
k=k+1
c
c go try next element in row
c
goto 1
endif
5 continue
return
c-----end-of-get1up-----------------------------------------------------
end subroutine
c----------------------------------------------------------------------
subroutine xtrows (i1,i2,a,ja,ia,ao,jao,iao,iperm,job)
integer i1,i2,ja(*),ia(*),jao(*),iao(*),iperm(*),job
real*8 a(*),ao(*)
c-----------------------------------------------------------------------
c this subroutine extracts given rows from a matrix in CSR format.
c Specifically, rows number iperm(i1), iperm(i1+1), ...., iperm(i2)
c are extracted and put in the output matrix ao, jao, iao, in CSR
c format. NOT in place.
c Youcef Saad -- coded Feb 15, 1992.
c-----------------------------------------------------------------------
c on entry:
c----------
c i1,i2 = two integers indicating the rows to be extracted.
c xtrows will extract rows iperm(i1), iperm(i1+1),..,iperm(i2),
c from original matrix and stack them in output matrix
c ao, jao, iao in csr format
c
c a, ja, ia = input matrix in csr format
c
c iperm = integer array of length nrow containing the reverse permutation
c array for the rows. row number iperm(j) in permuted matrix PA
c used to be row number j in unpermuted matrix.
c ---> a(i,j) in the permuted matrix was a(iperm(i),j)
c in the inout matrix.
c
c job = integer indicating the work to be done:
c job .ne. 1 : get structure only of output matrix,,
c i.e., ignore real values. (in which case arrays a
c and ao are not used nor accessed).
c job = 1 get complete data structure of output matrix.
c (i.e., including arrays ao and iao).
c------------
c on return:
c------------
c ao, jao, iao = input matrix in a, ja, ia format
c note :
c if (job.ne.1) then the arrays a and ao are not used.
c----------------------------------------------------------------------c
c Y. Saad, revised May 2, 1990 c
c----------------------------------------------------------------------c
logical values
values = (job .eq. 1)
c
c copying
c
ko = 1
iao(1) = ko
do 100 j=i1,i2
c
c ii=iperm(j) is the index of old row to be copied.
c
ii = iperm(j)
do 60 k=ia(ii), ia(ii+1)-1
jao(ko) = ja(k)
if (values) ao(ko) = a(k)
ko = ko+1
60 continue
iao(j-i1+2) = ko
100 continue
c
return
c---------end-of-xtrows-------------------------------------------------
c-----------------------------------------------------------------------
end subroutine
c-----------------------------------------------------------------------
subroutine csrkvstr(n, ia, ja, nr, kvstr)
c-----------------------------------------------------------------------
integer n, ia(n+1), ja(*), nr, kvstr(*)
c-----------------------------------------------------------------------
c Finds block row partitioning of matrix in CSR format.
c-----------------------------------------------------------------------
c On entry:
c--------------
c n = number of matrix scalar rows
c ia,ja = input matrix sparsity structure in CSR format
c
c On return:
c---------------
c nr = number of block rows
c kvstr = first row number for each block row
c
c Notes:
c-----------
c Assumes that the matrix is sorted by columns.
c This routine does not need any workspace.
c
c-----------------------------------------------------------------------
c local variables
integer i, j, jdiff
c-----------------------------------------------------------------------
nr = 1
kvstr(1) = 1
c---------------------------------
do i = 2, n
jdiff = ia(i+1)-ia(i)
if (jdiff .eq. ia(i)-ia(i-1)) then
do j = ia(i), ia(i+1)-1
if (ja(j) .ne. ja(j-jdiff)) then
nr = nr + 1
kvstr(nr) = i
goto 299
endif
enddo
299 continue
else
300 nr = nr + 1
kvstr(nr) = i
endif
enddo
kvstr(nr+1) = n+1
c---------------------------------
return
end subroutine
c-----------------------------------------------------------------------
c------------------------end-of-csrkvstr--------------------------------
subroutine csrkvstc(n, ia, ja, nc, kvstc, iwk)
c-----------------------------------------------------------------------
integer n, ia(n+1), ja(*), nc, kvstc(*), iwk(*)
c-----------------------------------------------------------------------
c Finds block column partitioning of matrix in CSR format.
c-----------------------------------------------------------------------
c On entry:
c--------------
c n = number of matrix scalar rows
c ia,ja = input matrix sparsity structure in CSR format
c
c On return:
c---------------
c nc = number of block columns
c kvstc = first column number for each block column
c
c Work space:
c----------------
c iwk(*) of size equal to the number of scalar columns plus one.
c Assumed initialized to 0, and left initialized on return.
c
c Notes:
c-----------
c Assumes that the matrix is sorted by columns.
c
c-----------------------------------------------------------------------
c local variables
integer i, j, k, ncol
c
c-----------------------------------------------------------------------
c-----use ncol to find maximum scalar column number
ncol = 0
c-----mark the beginning position of the blocks in iwk
do i = 1, n
if (ia(i) .lt. ia(i+1)) then
j = ja(ia(i))
iwk(j) = 1
do k = ia(i)+1, ia(i+1)-1
j = ja(k)
if (ja(k-1).ne.j-1) then
iwk(j) = 1
iwk(ja(k-1)+1) = 1
endif
enddo
iwk(j+1) = 1
ncol = max0(ncol, j)
endif
enddo
c---------------------------------
nc = 1
kvstc(1) = 1
do i = 2, ncol+1
if (iwk(i).ne.0) then
nc = nc + 1
kvstc(nc) = i
iwk(i) = 0
endif
enddo
nc = nc - 1
c---------------------------------
return
end subroutine
c-----------------------------------------------------------------------
c------------------------end-of-csrkvstc--------------------------------
c-----------------------------------------------------------------------
subroutine kvstmerge(nr, kvstr, nc, kvstc, n, kvst)
c-----------------------------------------------------------------------
integer nr, kvstr(nr+1), nc, kvstc(nc+1), n, kvst(*)
c-----------------------------------------------------------------------
c Merges block partitionings, for conformal row/col pattern.
c-----------------------------------------------------------------------
c On entry:
c--------------
c nr,nc = matrix block row and block column dimension
c kvstr = first row number for each block row
c kvstc = first column number for each block column
c
c On return:
c---------------
c n = conformal row/col matrix block dimension
c kvst = conformal row/col block partitioning
c
c Notes:
c-----------
c If matrix is not square, this routine returns without warning.
c
c-----------------------------------------------------------------------
c-----local variables
integer i,j
c---------------------------------
if (kvstr(nr+1) .ne. kvstc(nc+1)) return
i = 1
j = 1
n = 1
200 if (i .gt. nr+1) then
kvst(n) = kvstc(j)
j = j + 1
elseif (j .gt. nc+1) then
kvst(n) = kvstr(i)
i = i + 1
elseif (kvstc(j) .eq. kvstr(i)) then
kvst(n) = kvstc(j)
j = j + 1
i = i + 1
elseif (kvstc(j) .lt. kvstr(i)) then
kvst(n) = kvstc(j)
j = j + 1
else
kvst(n) = kvstr(i)
i = i + 1
endif
n = n + 1
if (i.le.nr+1 .or. j.le.nc+1) goto 200
n = n - 2
c---------------------------------
return
c------------------------end-of-kvstmerge-------------------------------
end subroutine
end module
|
mutable struct BasicContMuvParameterNState{N<:Real} <: ParameterNState{Continuous, Multivariate}
value::Matrix{N}
loglikelihood::Vector{N}
logprior::Vector{N}
logtarget::Vector{N}
gradloglikelihood::Matrix{N}
gradlogprior::Matrix{N}
gradlogtarget::Matrix{N}
tensorloglikelihood::Array{N, 3}
tensorlogprior::Array{N, 3}
tensorlogtarget::Array{N, 3}
dtensorloglikelihood::Array{N, 4}
dtensorlogprior::Array{N, 4}
dtensorlogtarget::Array{N, 4}
diagnosticvalues::Matrix
size::Integer
sizesquared::Integer
sizecubed::Integer
monitor::Vector{Bool}
n::Integer
diagnostickeys::Vector{Symbol}
function BasicContMuvParameterNState{N}(
size::Integer,
n::Integer,
monitor::Vector{Bool}=[true; fill(false, 12)],
diagnostickeys::Vector{Symbol}=Symbol[],
::Type{N}=Float64,
diagnosticvalues::Matrix=Array{Any}(length(diagnostickeys), isempty(diagnostickeys) ? 0 : n)
) where N<:Real
instance = new()
fnames = fieldnames(BasicContMuvParameterNState)
for i in 2:4
l = (monitor[i] == false ? zero(Integer) : n)
setfield!(instance, fnames[i], Array{N}(l))
end
for i in (1, 5, 6, 7)
s, l = (monitor[i] == false ? (zero(Integer), zero(Integer)) : (size, n))
setfield!(instance, fnames[i], Array{N}(s, l))
end
for i in 8:10
s, l = (monitor[i] == false ? (zero(Integer), zero(Integer)) : (size, n))
setfield!(instance, fnames[i], Array{N}(s, s, l))
end
for i in 11:13
s, l = (monitor[i] == false ? (zero(Integer), zero(Integer)) : (size, n))
setfield!(instance, fnames[i], Array{N}(s, s, s, l))
end
instance.diagnosticvalues = diagnosticvalues
instance.size = size
instance.sizesquared = instance.size^2
instance.sizecubed = instance.size^3
instance.monitor = monitor
instance.n = n
instance.diagnostickeys = diagnostickeys
instance
end
end
BasicContMuvParameterNState(
size::Integer,
n::Integer,
monitor::Vector{Bool}=[true; fill(false, 12)],
diagnostickeys::Vector{Symbol}=Symbol[],
::Type{N}=Float64,
diagnosticvalues::Matrix=Array{Any}(length(diagnostickeys), isempty(diagnostickeys) ? 0 : n)
) where N<:Real =
BasicContMuvParameterNState{N}(size, n, monitor, diagnostickeys, N, diagnosticvalues)
function BasicContMuvParameterNState(
size::Integer,
n::Integer,
monitor::Vector{Symbol},
diagnostickeys::Vector{Symbol}=Symbol[],
::Type{N}=Float64,
diagnosticvalues::Matrix=Array{Any}(length(diagnostickeys), isempty(diagnostickeys) ? 0 : n)
) where N<:Real
fnames = fieldnames(BasicContMuvParameterNState)
BasicContMuvParameterNState(
size, n, [fnames[i] in monitor ? true : false for i in 1:13], diagnostickeys, N, diagnosticvalues
)
end
const ContMuvMarkovChain = BasicContMuvParameterNState
function copy!(nstate::BasicContMuvParameterNState, state::BasicContMuvParameterState, i::Integer)
fnames = fieldnames(BasicContMuvParameterNState)
for j in 2:4
if nstate.monitor[j]
getfield(nstate, fnames[j])[i] = getfield(state, fnames[j])
end
end
for j in (1, 5, 6, 7)
if nstate.monitor[j]
getfield(nstate, fnames[j])[1+(i-1)*state.size:i*state.size] = getfield(state, fnames[j])
end
end
for j in 8:10
if nstate.monitor[j]
getfield(nstate, fnames[j])[1+(i-1)*nstate.sizesquared:i*nstate.sizesquared] = getfield(state, fnames[j])
end
end
for j in 11:13
if nstate.monitor[j]
getfield(nstate, fnames[j])[1+(i-1)*nstate.sizecubed:i*nstate.sizecubed] = getfield(state, fnames[j])
end
end
if !isempty(nstate.diagnosticvalues)
nstate.diagnosticvalues[:, i] = state.diagnosticvalues
end
end
eltype{N<:Real}(::Type{BasicContMuvParameterNState{N}}) = N
eltype{N<:Real}(::BasicContMuvParameterNState{N}) = N
==(z::S, w::S) where {S<:BasicContMuvParameterNState} = reduce(&, [getfield(z, n) == getfield(w, n) for n in fieldnames(S)[1:17]])
isequal(z::S, w::S) where {S<:BasicContMuvParameterNState} =
reduce(&, [isequal(getfield(z, n), getfield(w, n)) for n in fieldnames(S)[1:17]])
function show(io::IO, nstate::BasicContMuvParameterNState{N}) where N<:Real
fnames = fieldnames(BasicContMuvParameterNState)
fbool = map(n -> !isempty(getfield(nstate, n)), fnames[1:13])
indentation = " "
println(io, "BasicContMuvParameterNState:")
println(io, indentation*"eltype: $(eltype(nstate))")
println(io, indentation*"state size = $(nstate.size)")
println(io, indentation*"number of states = $(nstate.n)")
print(io, indentation*"monitored components:")
if !any(fbool)
println(io, " none")
else
print(io, "\n")
for i in 1:13
if fbool[i]
println(io, string(indentation^2, fnames[i]))
end
end
end
print(io, indentation*"diagnostics:")
if isempty(nstate.diagnostickeys)
print(io, " none")
else
for k in nstate.diagnostickeys
print(io, "\n")
print(io, string(indentation^2, k))
end
end
end
|
subroutine dqk21(f,a,b,result,abserr,resabs,resasc)
c***begin prologue dqk21
c***date written 800101 (yymmdd)
c***revision date 830518 (yymmdd)
c***category no. h2a1a2
c***keywords 21-point gauss-kronrod rules
c***author piessens,robert,appl. math. & progr. div. - k.u.leuven
c de doncker,elise,appl. math. & progr. div. - k.u.leuven
c***purpose to compute i = integral of f over (a,b), with error
c estimate
c j = integral of abs(f) over (a,b)
c***description
c
c integration rules
c standard fortran subroutine
c double precision version
c
c parameters
c on entry
c f - double precision
c function subprogram defining the integrand
c function f(x). the actual name for f needs to be
c declared e x t e r n a l in the driver program.
c
c a - double precision
c lower limit of integration
c
c b - double precision
c upper limit of integration
c
c on return
c result - double precision
c approximation to the integral i
c result is computed by applying the 21-point
c kronrod rule (resk) obtained by optimal addition
c of abscissae to the 10-point gauss rule (resg).
c
c abserr - double precision
c estimate of the modulus of the absolute error,
c which should not exceed abs(i-result)
c
c resabs - double precision
c approximation to the integral j
c
c resasc - double precision
c approximation to the integral of abs(f-i/(b-a))
c over (a,b)
c
c***references (none)
c***routines called d1mach
c***end prologue dqk21
c
double precision a,absc,abserr,b,centr,dabs,dhlgth,dmax1,dmin1,
* d1mach,epmach,f,fc,fsum,fval1,fval2,fv1,fv2,hlgth,resabs,resasc,
* resg,resk,reskh,result,uflow,wg,wgk,xgk
integer j,jtw,jtwm1
external f
c
dimension fv1(10),fv2(10),wg(5),wgk(11),xgk(11)
c
c the abscissae and weights are given for the interval (-1,1).
c because of symmetry only the positive abscissae and their
c corresponding weights are given.
c
c xgk - abscissae of the 21-point kronrod rule
c xgk(2), xgk(4), ... abscissae of the 10-point
c gauss rule
c xgk(1), xgk(3), ... abscissae which are optimally
c added to the 10-point gauss rule
c
c wgk - weights of the 21-point kronrod rule
c
c wg - weights of the 10-point gauss rule
c
c
c gauss quadrature weights and kronron quadrature abscissae and weights
c as evaluated with 80 decimal digit arithmetic by l. w. fullerton,
c bell labs, nov. 1981.
c
data wg ( 1) / 0.0666713443 0868813759 3568809893 332 d0 /
data wg ( 2) / 0.1494513491 5058059314 5776339657 697 d0 /
data wg ( 3) / 0.2190863625 1598204399 5534934228 163 d0 /
data wg ( 4) / 0.2692667193 0999635509 1226921569 469 d0 /
data wg ( 5) / 0.2955242247 1475287017 3892994651 338 d0 /
c
data xgk ( 1) / 0.9956571630 2580808073 5527280689 003 d0 /
data xgk ( 2) / 0.9739065285 1717172007 7964012084 452 d0 /
data xgk ( 3) / 0.9301574913 5570822600 1207180059 508 d0 /
data xgk ( 4) / 0.8650633666 8898451073 2096688423 493 d0 /
data xgk ( 5) / 0.7808177265 8641689706 3717578345 042 d0 /
data xgk ( 6) / 0.6794095682 9902440623 4327365114 874 d0 /
data xgk ( 7) / 0.5627571346 6860468333 9000099272 694 d0 /
data xgk ( 8) / 0.4333953941 2924719079 9265943165 784 d0 /
data xgk ( 9) / 0.2943928627 0146019813 1126603103 866 d0 /
data xgk ( 10) / 0.1488743389 8163121088 4826001129 720 d0 /
data xgk ( 11) / 0.0000000000 0000000000 0000000000 000 d0 /
c
data wgk ( 1) / 0.0116946388 6737187427 8064396062 192 d0 /
data wgk ( 2) / 0.0325581623 0796472747 8818972459 390 d0 /
data wgk ( 3) / 0.0547558965 7435199603 1381300244 580 d0 /
data wgk ( 4) / 0.0750396748 1091995276 7043140916 190 d0 /
data wgk ( 5) / 0.0931254545 8369760553 5065465083 366 d0 /
data wgk ( 6) / 0.1093871588 0229764189 9210590325 805 d0 /
data wgk ( 7) / 0.1234919762 6206585107 7958109831 074 d0 /
data wgk ( 8) / 0.1347092173 1147332592 8054001771 707 d0 /
data wgk ( 9) / 0.1427759385 7706008079 7094273138 717 d0 /
data wgk ( 10) / 0.1477391049 0133849137 4841515972 068 d0 /
data wgk ( 11) / 0.1494455540 0291690566 4936468389 821 d0 /
c
c
c list of major variables
c -----------------------
c
c centr - mid point of the interval
c hlgth - half-length of the interval
c absc - abscissa
c fval* - function value
c resg - result of the 10-point gauss formula
c resk - result of the 21-point kronrod formula
c reskh - approximation to the mean value of f over (a,b),
c i.e. to i/(b-a)
c
c
c machine dependent constants
c ---------------------------
c
c epmach is the largest relative spacing.
c uflow is the smallest positive magnitude.
c
c***first executable statement dqk21
epmach = d1mach(4)
uflow = d1mach(1)
c
centr = 0.5d+00*(a+b)
hlgth = 0.5d+00*(b-a)
dhlgth = dabs(hlgth)
c
c compute the 21-point kronrod approximation to
c the integral, and estimate the absolute error.
c
resg = 0.0d+00
fc = f(centr)
resk = wgk(11)*fc
resabs = dabs(resk)
do 10 j=1,5
jtw = 2*j
absc = hlgth*xgk(jtw)
fval1 = f(centr-absc)
fval2 = f(centr+absc)
fv1(jtw) = fval1
fv2(jtw) = fval2
fsum = fval1+fval2
resg = resg+wg(j)*fsum
resk = resk+wgk(jtw)*fsum
resabs = resabs+wgk(jtw)*(dabs(fval1)+dabs(fval2))
10 continue
do 15 j = 1,5
jtwm1 = 2*j-1
absc = hlgth*xgk(jtwm1)
fval1 = f(centr-absc)
fval2 = f(centr+absc)
fv1(jtwm1) = fval1
fv2(jtwm1) = fval2
fsum = fval1+fval2
resk = resk+wgk(jtwm1)*fsum
resabs = resabs+wgk(jtwm1)*(dabs(fval1)+dabs(fval2))
15 continue
reskh = resk*0.5d+00
resasc = wgk(11)*dabs(fc-reskh)
do 20 j=1,10
resasc = resasc+wgk(j)*(dabs(fv1(j)-reskh)+dabs(fv2(j)-reskh))
20 continue
result = resk*hlgth
resabs = resabs*dhlgth
resasc = resasc*dhlgth
abserr = dabs((resk-resg)*hlgth)
if(resasc.ne.0.0d+00.and.abserr.ne.0.0d+00)
* abserr = resasc*dmin1(0.1d+01,(0.2d+03*abserr/resasc)**1.5d+00)
if(resabs.gt.uflow/(0.5d+02*epmach)) abserr = dmax1
* ((epmach*0.5d+02)*resabs,abserr)
return
end
|
By this time , however , North Korean logistics had been stretched to their limit , and resupply became increasingly difficult . By the beginning of August , the North Korean units operating in the area were getting little to no food and ammunition supply , instead relying on captured UN weapons and foraging for what they could find . They were also exhausted from over a month of advancing , though morale remained high among the 766th troops . The 766th Regiment specialized in raiding UN supply lines , and effectively mounted small disruptive attacks against UN targets to equip themselves .
|
[GOAL]
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f f₀ f₁ g✝ : E → F
f' f₀' f₁' g'✝ e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
g : F → G
g' : F →L[𝕜] G
L' : Filter F
hg : HasFDerivAtFilter g g' (f x) L'
hf : HasFDerivAtFilter f f' x L
hL : Tendsto f L L'
⊢ HasFDerivAtFilter (g ∘ f) (ContinuousLinearMap.comp g' f') x L
[PROOFSTEP]
let eq₁ := (g'.isBigO_comp _ _).trans_isLittleO hf
[GOAL]
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f f₀ f₁ g✝ : E → F
f' f₀' f₁' g'✝ e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
g : F → G
g' : F →L[𝕜] G
L' : Filter F
hg : HasFDerivAtFilter g g' (f x) L'
hf : HasFDerivAtFilter f f' x L
hL : Tendsto f L L'
eq₁ : (fun x' => ↑g' (f x' - f x - ↑f' (x' - x))) =o[L] fun x' => x' - x :=
IsBigO.trans_isLittleO (isBigO_comp g' (fun x' => f x' - f x - ↑f' (x' - x)) L) hf
⊢ HasFDerivAtFilter (g ∘ f) (ContinuousLinearMap.comp g' f') x L
[PROOFSTEP]
let eq₂ := (hg.comp_tendsto hL).trans_isBigO hf.isBigO_sub
[GOAL]
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f f₀ f₁ g✝ : E → F
f' f₀' f₁' g'✝ e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
g : F → G
g' : F →L[𝕜] G
L' : Filter F
hg : HasFDerivAtFilter g g' (f x) L'
hf : HasFDerivAtFilter f f' x L
hL : Tendsto f L L'
eq₁ : (fun x' => ↑g' (f x' - f x - ↑f' (x' - x))) =o[L] fun x' => x' - x :=
IsBigO.trans_isLittleO (isBigO_comp g' (fun x' => f x' - f x - ↑f' (x' - x)) L) hf
eq₂ : ((fun x' => g x' - g (f x) - ↑g' (x' - f x)) ∘ f) =o[L] fun x' => x' - x :=
IsLittleO.trans_isBigO (IsLittleO.comp_tendsto hg hL) (isBigO_sub hf)
⊢ HasFDerivAtFilter (g ∘ f) (ContinuousLinearMap.comp g' f') x L
[PROOFSTEP]
refine' eq₂.triangle (eq₁.congr_left fun x' => _)
[GOAL]
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f f₀ f₁ g✝ : E → F
f' f₀' f₁' g'✝ e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
g : F → G
g' : F →L[𝕜] G
L' : Filter F
hg : HasFDerivAtFilter g g' (f x) L'
hf : HasFDerivAtFilter f f' x L
hL : Tendsto f L L'
eq₁ : (fun x' => ↑g' (f x' - f x - ↑f' (x' - x))) =o[L] fun x' => x' - x :=
IsBigO.trans_isLittleO (isBigO_comp g' (fun x' => f x' - f x - ↑f' (x' - x)) L) hf
eq₂ : ((fun x' => g x' - g (f x) - ↑g' (x' - f x)) ∘ f) =o[L] fun x' => x' - x :=
IsLittleO.trans_isBigO (IsLittleO.comp_tendsto hg hL) (isBigO_sub hf)
x' : E
⊢ ↑g' (f x' - f x - ↑f' (x' - x)) = ↑g' (f x' - f x) - ↑(ContinuousLinearMap.comp g' f') (x' - x)
[PROOFSTEP]
simp
[GOAL]
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g✝ : E → F
f' f₀' f₁' g' e : E →L[𝕜] F
x✝ : E
s✝ t✝ : Set E
L L₁ L₂ : Filter E
g : F → G
f : E → F
x : E
y : F
s : Set E
t : Set F
hg : DifferentiableWithinAt 𝕜 g t y
hf : DifferentiableWithinAt 𝕜 f s x
h : MapsTo f s t
hxs : UniqueDiffWithinAt 𝕜 s x
hy : f x = y
v : E
⊢ ↑(fderivWithin 𝕜 g t y) (↑(fderivWithin 𝕜 f s x) v) = ↑(fderivWithin 𝕜 (g ∘ f) s x) v
[PROOFSTEP]
subst y
[GOAL]
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g✝ : E → F
f' f₀' f₁' g' e : E →L[𝕜] F
x✝ : E
s✝ t✝ : Set E
L L₁ L₂ : Filter E
g : F → G
f : E → F
x : E
s : Set E
t : Set F
hf : DifferentiableWithinAt 𝕜 f s x
h : MapsTo f s t
hxs : UniqueDiffWithinAt 𝕜 s x
v : E
hg : DifferentiableWithinAt 𝕜 g t (f x)
⊢ ↑(fderivWithin 𝕜 g t (f x)) (↑(fderivWithin 𝕜 f s x) v) = ↑(fderivWithin 𝕜 (g ∘ f) s x) v
[PROOFSTEP]
rw [fderivWithin.comp x hg hf h hxs]
[GOAL]
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g✝ : E → F
f' f₀' f₁' g' e : E →L[𝕜] F
x✝ : E
s✝ t✝ : Set E
L L₁ L₂ : Filter E
g : F → G
f : E → F
x : E
s : Set E
t : Set F
hf : DifferentiableWithinAt 𝕜 f s x
h : MapsTo f s t
hxs : UniqueDiffWithinAt 𝕜 s x
v : E
hg : DifferentiableWithinAt 𝕜 g t (f x)
⊢ ↑(fderivWithin 𝕜 g t (f x)) (↑(fderivWithin 𝕜 f s x) v) = ↑(comp (fderivWithin 𝕜 g t (f x)) (fderivWithin 𝕜 f s x)) v
[PROOFSTEP]
rfl
[GOAL]
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f f₀ f₁ g✝ : E → F
f' f₀' f₁' g'✝ e : E →L[𝕜] F
x : E
s t✝ : Set E
L L₁ L₂ : Filter E
g' : G → G'
g : F → G
t : Set F
u : Set G
y : F
y' : G
hg' : DifferentiableWithinAt 𝕜 g' u y'
hg : DifferentiableWithinAt 𝕜 g t y
hf : DifferentiableWithinAt 𝕜 f s x
h2g : MapsTo g t u
h2f : MapsTo f s t
h3g : g y = y'
h3f : f x = y
hxs : UniqueDiffWithinAt 𝕜 s x
⊢ fderivWithin 𝕜 (g' ∘ g ∘ f) s x =
ContinuousLinearMap.comp (fderivWithin 𝕜 g' u y')
(ContinuousLinearMap.comp (fderivWithin 𝕜 g t y) (fderivWithin 𝕜 f s x))
[PROOFSTEP]
substs h3g h3f
[GOAL]
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f f₀ f₁ g✝ : E → F
f' f₀' f₁' g'✝ e : E →L[𝕜] F
x : E
s t✝ : Set E
L L₁ L₂ : Filter E
g' : G → G'
g : F → G
t : Set F
u : Set G
hf : DifferentiableWithinAt 𝕜 f s x
h2g : MapsTo g t u
h2f : MapsTo f s t
hxs : UniqueDiffWithinAt 𝕜 s x
hg : DifferentiableWithinAt 𝕜 g t (f x)
hg' : DifferentiableWithinAt 𝕜 g' u (g (f x))
⊢ fderivWithin 𝕜 (g' ∘ g ∘ f) s x =
ContinuousLinearMap.comp (fderivWithin 𝕜 g' u (g (f x)))
(ContinuousLinearMap.comp (fderivWithin 𝕜 g t (f x)) (fderivWithin 𝕜 f s x))
[PROOFSTEP]
exact
(hg'.hasFDerivWithinAt.comp x (hg.hasFDerivWithinAt.comp x hf.hasFDerivWithinAt h2f) <| h2g.comp h2f).fderivWithin hxs
[GOAL]
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f f₀ f₁ g✝ : E → F
f' f₀' f₁' g'✝ e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
g : F → G
g' : F →L[𝕜] G
hg : HasStrictFDerivAt g g' (f x)
hf : HasStrictFDerivAt f f' x
⊢ (fun x =>
↑g' (((fun p => (f p.fst, f p.snd)) x).fst - ((fun p => (f p.fst, f p.snd)) x).snd) -
↑(comp g' f') (x.fst - x.snd)) =o[𝓝 (x, x)]
fun p => p.fst - p.snd
[PROOFSTEP]
simpa only [g'.map_sub, f'.coe_comp'] using (g'.isBigO_comp _ _).trans_isLittleO hf
[GOAL]
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g : E → F
f'✝ f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
f : E → E
f' : E →L[𝕜] E
hf : HasFDerivAtFilter f f' x L
hL : Tendsto f L L
hx : f x = x
n : ℕ
⊢ HasFDerivAtFilter f^[n] (f' ^ n) x L
[PROOFSTEP]
induction' n with n ihn
[GOAL]
case zero
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g : E → F
f'✝ f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
f : E → E
f' : E →L[𝕜] E
hf : HasFDerivAtFilter f f' x L
hL : Tendsto f L L
hx : f x = x
⊢ HasFDerivAtFilter f^[Nat.zero] (f' ^ Nat.zero) x L
[PROOFSTEP]
exact hasFDerivAtFilter_id x L
[GOAL]
case succ
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g : E → F
f'✝ f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
f : E → E
f' : E →L[𝕜] E
hf : HasFDerivAtFilter f f' x L
hL : Tendsto f L L
hx : f x = x
n : ℕ
ihn : HasFDerivAtFilter f^[n] (f' ^ n) x L
⊢ HasFDerivAtFilter f^[Nat.succ n] (f' ^ Nat.succ n) x L
[PROOFSTEP]
rw [Function.iterate_succ, pow_succ']
[GOAL]
case succ
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g : E → F
f'✝ f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
f : E → E
f' : E →L[𝕜] E
hf : HasFDerivAtFilter f f' x L
hL : Tendsto f L L
hx : f x = x
n : ℕ
ihn : HasFDerivAtFilter f^[n] (f' ^ n) x L
⊢ HasFDerivAtFilter (f^[n] ∘ f) (f' ^ n * f') x L
[PROOFSTEP]
rw [← hx] at ihn
[GOAL]
case succ
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g : E → F
f'✝ f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
f : E → E
f' : E →L[𝕜] E
hf : HasFDerivAtFilter f f' x L
hL : Tendsto f L L
hx : f x = x
n : ℕ
ihn : HasFDerivAtFilter f^[n] (f' ^ n) (f x) L
⊢ HasFDerivAtFilter (f^[n] ∘ f) (f' ^ n * f') x L
[PROOFSTEP]
exact ihn.comp x hf hL
[GOAL]
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g : E → F
f'✝ f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
f : E → E
f' : E →L[𝕜] E
hf : HasFDerivAt f f' x
hx : f x = x
n : ℕ
⊢ HasFDerivAt f^[n] (f' ^ n) x
[PROOFSTEP]
refine' HasFDerivAtFilter.iterate hf _ hx n
[GOAL]
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g : E → F
f'✝ f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
f : E → E
f' : E →L[𝕜] E
hf : HasFDerivAt f f' x
hx : f x = x
n : ℕ
⊢ Tendsto f (𝓝 x) (𝓝 x)
[PROOFSTEP]
have := hf.continuousAt
[GOAL]
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g : E → F
f'✝ f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
f : E → E
f' : E →L[𝕜] E
hf : HasFDerivAt f f' x
hx : f x = x
n : ℕ
this : ContinuousAt f x
⊢ Tendsto f (𝓝 x) (𝓝 x)
[PROOFSTEP]
unfold ContinuousAt at this
[GOAL]
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g : E → F
f'✝ f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
f : E → E
f' : E →L[𝕜] E
hf : HasFDerivAt f f' x
hx : f x = x
n : ℕ
this : Tendsto f (𝓝 x) (𝓝 (f x))
⊢ Tendsto f (𝓝 x) (𝓝 x)
[PROOFSTEP]
convert this
[GOAL]
case h.e'_5.h.e'_3
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g : E → F
f'✝ f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
f : E → E
f' : E →L[𝕜] E
hf : HasFDerivAt f f' x
hx : f x = x
n : ℕ
this : Tendsto f (𝓝 x) (𝓝 (f x))
⊢ x = f x
[PROOFSTEP]
exact hx.symm
[GOAL]
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g : E → F
f'✝ f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
f : E → E
f' : E →L[𝕜] E
hf : HasFDerivWithinAt f f' s x
hx : f x = x
hs : MapsTo f s s
n : ℕ
⊢ HasFDerivWithinAt f^[n] (f' ^ n) s x
[PROOFSTEP]
refine' HasFDerivAtFilter.iterate hf _ hx n
[GOAL]
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g : E → F
f'✝ f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
f : E → E
f' : E →L[𝕜] E
hf : HasFDerivWithinAt f f' s x
hx : f x = x
hs : MapsTo f s s
n : ℕ
⊢ Tendsto f (𝓝[s] x) (𝓝[s] x)
[PROOFSTEP]
rw [_root_.nhdsWithin]
-- Porting note: Added `rw` to get rid of an error
[GOAL]
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g : E → F
f'✝ f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
f : E → E
f' : E →L[𝕜] E
hf : HasFDerivWithinAt f f' s x
hx : f x = x
hs : MapsTo f s s
n : ℕ
⊢ Tendsto f (𝓝 x ⊓ 𝓟 s) (𝓝 x ⊓ 𝓟 s)
[PROOFSTEP]
convert tendsto_inf.2 ⟨hf.continuousWithinAt, _⟩
[GOAL]
case h.e'_5.h.e'_3.h.e'_3
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g : E → F
f'✝ f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
f : E → E
f' : E →L[𝕜] E
hf : HasFDerivWithinAt f f' s x
hx : f x = x
hs : MapsTo f s s
n : ℕ
⊢ x = f x
case convert_2
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g : E → F
f'✝ f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
f : E → E
f' : E →L[𝕜] E
hf : HasFDerivWithinAt f f' s x
hx : f x = x
hs : MapsTo f s s
n : ℕ
⊢ Tendsto f (𝓝[s] x) (𝓟 s)
[PROOFSTEP]
exacts [hx.symm, (tendsto_principal_principal.2 hs).mono_left inf_le_right]
[GOAL]
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g : E → F
f'✝ f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
f : E → E
f' : E →L[𝕜] E
hf : HasStrictFDerivAt f f' x
hx : f x = x
n : ℕ
⊢ HasStrictFDerivAt f^[n] (f' ^ n) x
[PROOFSTEP]
induction' n with n ihn
[GOAL]
case zero
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g : E → F
f'✝ f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
f : E → E
f' : E →L[𝕜] E
hf : HasStrictFDerivAt f f' x
hx : f x = x
⊢ HasStrictFDerivAt f^[Nat.zero] (f' ^ Nat.zero) x
[PROOFSTEP]
exact hasStrictFDerivAt_id x
[GOAL]
case succ
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g : E → F
f'✝ f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
f : E → E
f' : E →L[𝕜] E
hf : HasStrictFDerivAt f f' x
hx : f x = x
n : ℕ
ihn : HasStrictFDerivAt f^[n] (f' ^ n) x
⊢ HasStrictFDerivAt f^[Nat.succ n] (f' ^ Nat.succ n) x
[PROOFSTEP]
rw [Function.iterate_succ, pow_succ']
[GOAL]
case succ
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g : E → F
f'✝ f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
f : E → E
f' : E →L[𝕜] E
hf : HasStrictFDerivAt f f' x
hx : f x = x
n : ℕ
ihn : HasStrictFDerivAt f^[n] (f' ^ n) x
⊢ HasStrictFDerivAt (f^[n] ∘ f) (f' ^ n * f') x
[PROOFSTEP]
rw [← hx] at ihn
[GOAL]
case succ
𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type u_4
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type u_5
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f✝ f₀ f₁ g : E → F
f'✝ f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
f : E → E
f' : E →L[𝕜] E
hf : HasStrictFDerivAt f f' x
hx : f x = x
n : ℕ
ihn : HasStrictFDerivAt f^[n] (f' ^ n) (f x)
⊢ HasStrictFDerivAt (f^[n] ∘ f) (f' ^ n * f') x
[PROOFSTEP]
exact ihn.comp x hf
|
= = = Civilian use = = =
|
lemma holomorphic_on_imp_differentiable_at: "\<lbrakk>f holomorphic_on s; open s; x \<in> s\<rbrakk> \<Longrightarrow> f field_differentiable (at x)" |
(** * Hoare: Hoare Logic, Part I *)
Set Warnings "-notation-overridden,-parsing,-deprecated-hint-without-locality".
From LF Require Import Maps.
From Coq Require Import Bool.Bool.
From Coq Require Import Arith.Arith.
From Coq Require Import Arith.EqNat.
From Coq Require Import Arith.PeanoNat. Import Nat.
From Coq Require Import Lia.
From LF Require Export Imp.
(** In the final chaper of _Logical Foundations_ (_Software
Foundations_, volume 1), we began applying the mathematical tools
developed in the first part of the course to studying the theory
of a small programming language, Imp.
- We defined a type of _abstract syntax trees_ for Imp, together
with an _evaluation relation_ (a partial function on states)
that specifies the _operational semantics_ of programs.
The language we defined, though small, captures some of the key
features of full-blown languages like C, C++, and Java,
including the fundamental notion of mutable state and some
common control structures.
- We proved a number of _metatheoretic properties_ -- "meta" in
the sense that they are properties of the language as a whole,
rather than of particular programs in the language. These
included:
- determinism of evaluation
- equivalence of some different ways of writing down the
definitions (e.g., functional and relational definitions of
arithmetic expression evaluation)
- guaranteed termination of certain classes of programs
- correctness (in the sense of preserving meaning) of a number
of useful program transformations
- behavioral equivalence of programs (in the [Equiv]
chapter). *)
(** If we stopped here, we would already have something useful: a set
of tools for defining and discussing programming languages and
language features that are mathematically precise, flexible, and
easy to work with, applied to a set of key properties. All of
these properties are things that language designers, compiler
writers, and users might care about knowing. Indeed, many of them
are so fundamental to our understanding of the programming
languages we deal with that we might not consciously recognize
them as "theorems." But properties that seem intuitively obvious
can sometimes be quite subtle (sometimes also subtly wrong!).
We'll return to the theme of metatheoretic properties of whole
languages later in this volume when we discuss _types_ and _type
soundness_. In this chapter, though, we turn to a different set
of issues.
*)
(** Our goal is to carry out some simple examples of _program
verification_ -- i.e., to use the precise definition of Imp to
prove formally that particular programs satisfy particular
specifications of their behavior. We'll develop a reasoning
system called _Floyd-Hoare Logic_ -- often shortened to just
_Hoare Logic_ -- in which each of the syntactic constructs of Imp
is equipped with a generic "proof rule" that can be used to reason
compositionally about the correctness of programs involving this
construct.
Hoare Logic originated in the 1960s, and it continues to be the
subject of intensive research right up to the present day. It
lies at the core of a multitude of tools that are being used in
academia and industry to specify and verify real software systems.
Hoare Logic combines two beautiful ideas: a natural way of writing
down _specifications_ of programs, and a _compositional proof
technique_ for proving that programs are correct with respect to
such specifications -- where by "compositional" we mean that the
structure of proofs directly mirrors the structure of the programs
that they are about. *)
(* ################################################################# *)
(** * Assertions *)
(** An _assertion_ is a claim about the current state of memory. We will
use assertions to write program specifications. *)
Definition Assertion := state -> Prop.
(** For example,
- [fun st => st X = 3] holds if the value of [X] according to [st] is [3],
- [fun st => True] always holds, and
- [fun st => False] never holds. *)
(** **** Exercise: 1 star, standard, optional (assertions)
Paraphrase the following assertions in English (or your favorite
natural language). *)
Module ExAssertions.
Definition assn1 : Assertion := fun st => st X <= st Y.
Definition assn2 : Assertion :=
fun st => st X = 3 \/ st X <= st Y.
Definition assn3 : Assertion :=
fun st => st Z * st Z <= st X /\
~ (((S (st Z)) * (S (st Z))) <= st X).
Definition assn4 : Assertion :=
fun st => st Z = max (st X) (st Y).
(* FILL IN HERE *)
End ExAssertions.
(** [] *)
(** This way of writing assertions can be a little bit heavy,
for two reasons: (1) every single assertion that we ever write is
going to begin with [fun st => ]; and (2) this state [st] is the
only one that we ever use to look up variables in assertions (we
will never need to talk about two different memory states at the
same time). For discussing examples informally, we'll adopt some
simplifying conventions: we'll drop the initial [fun st =>], and
we'll write just [X] to mean [st X]. Thus, instead of writing
fun st => st X = m
we'll write just
X = m
*)
(** This example also illustrates a convention that we'll use
throughout the Hoare Logic chapters: in informal assertions,
capital letters like [X], [Y], and [Z] are Imp variables, while
lowercase letters like [x], [y], [m], and [n] are ordinary Coq
variables (of type [nat]). This is why, when translating from
informal to formal, we replace [X] with [st X] but leave [m]
alone. *)
(** Given two assertions [P] and [Q], we say that [P] _implies_ [Q],
written [P ->> Q], if, whenever [P] holds in some state [st], [Q]
also holds. *)
Definition assert_implies (P Q : Assertion) : Prop :=
forall st, P st -> Q st.
Declare Scope hoare_spec_scope.
Notation "P ->> Q" := (assert_implies P Q)
(at level 80) : hoare_spec_scope.
Open Scope hoare_spec_scope.
(** (The [hoare_spec_scope] annotation here tells Coq that this
notation is not global but is intended to be used in particular
contexts. The [Open Scope] tells Coq that this file is one such
context.) *)
(** We'll also want the "iff" variant of implication between
assertions: *)
Notation "P <<->> Q" :=
(P ->> Q /\ Q ->> P) (at level 80) : hoare_spec_scope.
(* ================================================================= *)
(** ** Notations for Assertions *)
(** The convention described above can be implemented with a little
Coq syntax magic, using coercions and annotation scopes, much as
we did with [%imp] in [Imp], to automatically lift [aexp]s,
numbers, and [Prop]s into [Assertion]s when they appear in the
[%assertion] scope or when Coq knows the type of an expression is
[Assertion].
There is no need to understand the details. *)
Definition Aexp : Type := state -> nat.
Definition assert_of_Prop (P : Prop) : Assertion := fun _ => P.
Definition Aexp_of_nat (n : nat) : Aexp := fun _ => n.
Definition Aexp_of_aexp (a : aexp) : Aexp := fun st => aeval st a.
Coercion assert_of_Prop : Sortclass >-> Assertion.
Coercion Aexp_of_nat : nat >-> Aexp.
Coercion Aexp_of_aexp : aexp >-> Aexp.
Arguments assert_of_Prop /.
Arguments Aexp_of_nat /.
Arguments Aexp_of_aexp /.
Declare Scope assertion_scope.
Bind Scope assertion_scope with Assertion.
Bind Scope assertion_scope with Aexp.
Delimit Scope assertion_scope with assertion.
Notation assert P := (P%assertion : Assertion).
Notation mkAexp a := (a%assertion : Aexp).
Notation "~ P" := (fun st => ~ assert P st) : assertion_scope.
Notation "P /\ Q" := (fun st => assert P st /\ assert Q st) : assertion_scope.
Notation "P \/ Q" := (fun st => assert P st \/ assert Q st) : assertion_scope.
Notation "P -> Q" := (fun st => assert P st -> assert Q st) : assertion_scope.
Notation "P <-> Q" := (fun st => assert P st <-> assert Q st) : assertion_scope.
Notation "a = b" := (fun st => mkAexp a st = mkAexp b st) : assertion_scope.
Notation "a <> b" := (fun st => mkAexp a st <> mkAexp b st) : assertion_scope.
Notation "a <= b" := (fun st => mkAexp a st <= mkAexp b st) : assertion_scope.
Notation "a < b" := (fun st => mkAexp a st < mkAexp b st) : assertion_scope.
Notation "a >= b" := (fun st => mkAexp a st >= mkAexp b st) : assertion_scope.
Notation "a > b" := (fun st => mkAexp a st > mkAexp b st) : assertion_scope.
Notation "a + b" := (fun st => mkAexp a st + mkAexp b st) : assertion_scope.
Notation "a - b" := (fun st => mkAexp a st - mkAexp b st) : assertion_scope.
Notation "a * b" := (fun st => mkAexp a st * mkAexp b st) : assertion_scope.
(** One small limitation of this approach is that we don't have
an automatic way to coerce function applications that appear
within an assertion to make appropriate use of the state.
Instead, we use an explicit [ap] operator to lift the function. *)
Definition ap {X} (f : nat -> X) (x : Aexp) :=
fun st => f (x st).
Definition ap2 {X} (f : nat -> nat -> X) (x : Aexp) (y : Aexp) (st : state) :=
f (x st) (y st).
Module ExPrettyAssertions.
Definition ex1 : Assertion := X = 3.
Definition ex2 : Assertion := True.
Definition ex3 : Assertion := False.
Definition assn1 : Assertion := X <= Y.
Definition assn2 : Assertion := X = 3 \/ X <= Y.
Definition assn3 : Assertion :=
Z * Z <= X /\ ~ (((ap S Z) * (ap S Z)) <= X).
Definition assn4 : Assertion :=
Z = ap2 max X Y.
End ExPrettyAssertions.
(* ################################################################# *)
(** * Hoare Triples, Informally *)
(** A _Hoare triple_ is a claim about the state before and after executing
a command. A standard notation is
{P} c {Q}
meaning:
- If command [c] begins execution in a state satisfying assertion [P],
- and if [c] eventually terminates in some final state,
- then that final state will satisfy the assertion [Q].
Assertion [P] is called the _precondition_ of the triple, and [Q] is
the _postcondition_.
Because single braces are already used for other things in Coq, we'll write
Hoare triples with double braces:
{{P}} c {{Q}}
*)
(** For example,
- [{{X = 0}} X := X + 1 {{X = 1}}] is a valid Hoare triple,
stating that command [X := X + 1] would transform a state in
which [X = 0] to a state in which [X = 1].
- [forall m, {{X = m}} X := X + 1 {{X = m + 1}}], is a
_proposition_ stating that the Hoare triple [{{X = m}} X := X +
m {{X = m * 2}}]) is valid for any choice of [m]. Note that [m]
in the two assertions and the command in the middle is a
reference to the Coq variable [m], which is bound outside the
Hoare triple, not to an Imp variable. *)
(** **** Exercise: 1 star, standard, optional (triples)
Paraphrase the following in English.
1) {{True}} c {{X = 5}}
2) forall m, {{X = m}} c {{X = m + 5)}}
3) {{X <= Y}} c {{Y <= X}}
4) {{True}} c {{False}}
5) forall m,
{{X = m}}
c
{{Y = real_fact m}}
6) forall m,
{{X = m}}
c
{{(Z * Z) <= m /\ ~ (((S Z) * (S Z)) <= m)}}
*)
(* FILL IN HERE
[] *)
(** **** Exercise: 1 star, standard, optional (valid_triples)
Which of the following Hoare triples are _valid_ -- i.e., the
claimed relation between [P], [c], and [Q] is true?
1) {{True}} X := 5 {{X = 5}}
2) {{X = 2}} X := X + 1 {{X = 3}}
3) {{True}} X := 5; Y := 0 {{X = 5}}
4) {{X = 2 /\ X = 3}} X := 5 {{X = 0}}
5) {{True}} skip {{False}}
6) {{False}} skip {{True}}
7) {{True}} while true do skip end {{False}}
8) {{X = 0}}
while X = 0 do X := X + 1 end
{{X = 1}}
9) {{X = 1}}
while ~(X = 0) do X := X + 1 end
{{X = 100}}
*)
(* FILL IN HERE
[] *)
(* ################################################################# *)
(** * Hoare Triples, Formally *)
(** We can formalize Hoare triples and their notation in Coq as follows: *)
Definition hoare_triple
(P : Assertion) (c : com) (Q : Assertion) : Prop :=
forall st st',
st =[ c ]=> st' ->
P st ->
Q st'.
Notation "{{ P }} c {{ Q }}" :=
(hoare_triple P c Q) (at level 90, c custom com at level 99)
: hoare_spec_scope.
Check ({{True}} X := 0 {{True}}).
(** **** Exercise: 1 star, standard (hoare_post_true) *)
(** Prove that if [Q] holds in every state, then any triple with [Q]
as its postcondition is valid. *)
Theorem hoare_post_true : forall (P Q : Assertion) c,
(forall st, Q st) ->
{{P}} c {{Q}}.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 1 star, standard (hoare_pre_false) *)
(** Prove that if [P] holds in no state, then any triple with [P] as
its precondition is valid. *)
Theorem hoare_pre_false : forall (P Q : Assertion) c,
(forall st, ~ (P st)) ->
{{P}} c {{Q}}.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(* ################################################################# *)
(** * Proof Rules *)
(** The goal of Hoare logic is to provide a _compositional_
method for proving the validity of specific Hoare triples. That
is, we want the structure of a program's correctness proof to
mirror the structure of the program itself. To this end, in the
sections below, we'll introduce a rule for reasoning about each of
the different syntactic forms of commands in Imp -- one for
assignment, one for sequencing, one for conditionals, etc. -- plus
a couple of "structural" rules for gluing things together. We
will then be able to prove programs correct using these proof
rules, without ever unfolding the definition of [hoare_triple]. *)
(* ================================================================= *)
(** ** Assignment *)
(** The rule for assignment is the most fundamental of the Hoare
logic proof rules. Here's how it works.
Consider this incomplete Hoare triple:
{{ ??? }} X := Y {{ X = 1 }}
We want to assign [Y] to [X] and finish in a state where [X] is [1].
What could the precondition be?
One possibility is [Y = 1], because if [Y] is already [1] then
assigning it to [X] causes [X] to be [1]. That leads to a valid
Hoare triple:
{{ Y = 1 }} X := Y {{ X = 1 }}
It may seem as though coming up with that precondition must have
taken some clever thought. But there is a mechanical way we could
have done it: if we take the postcondition [X = 1] and in it
replace [X] with [Y]---that is, replace the left-hand side of the
assignment statement with the right-hand side---we get the
precondition, [Y = 1]. *)
(** That same idea works in more complicated cases. For
example:
{{ ??? }} X := X + Y {{ X = 1 }}
If we replace the [X] in [X = 1] with [X + Y], we get [X + Y = 1].
That again leads to a valid Hoare triple:
{{ X + Y = 1 }} X := X + Y {{ X = 1 }}
Why does this technique work? The postcondition identifies some
property [P] that we want to hold of the variable [X] being
assigned. In this case, [P] is "equals [1]". To complete the
triple and make it valid, we need to identify a precondition that
guarantees that property will hold of [X]. Such a precondition
must ensure that the same property holds of _whatever is being
assigned to_ [X]. So, in the example, we need "equals [1]" to
hold of [X + Y]. That's exactly what the technique guarantees. *)
(** In general, the postcondition could be some arbitrary assertion
[Q], and the right-hand side of the assignment could be some
arbitrary arithmetic expression [a]:
{{ ??? }} X := a {{ Q }}
The precondition would then be [Q], but with any occurrences of
[X] in it replaced by [a].
Let's introduce a notation for this idea of replacing occurrences:
Define [Q [X |-> a]] to mean "[Q] where [a] is substituted in
place of [X]".
That yields the Hoare logic rule for assignment:
{{ Q [X |-> a] }} X := a {{ Q }}
One way of reading that rule is: If you want statement [X := a]
to terminate in a state that satisfies assertion [Q], then it
suffices to start in a state that also satisfies [Q], except
where [a] is substituted for every occurrence of [X].
To many people, this rule seems "backwards" at first, because
it proceeds from the postcondition to the precondition. Actually
it makes good sense to go in this direction: the postcondition is
often what is more important, because it characterizes what we
can assume afer running the code.
Nonetheless, it's also possible to formulate a "forward" assignment
rule. We'll do that later in some exercises. *)
(** Here are some valid instances of the assignment rule:
{{ (X <= 5) [X |-> X + 1] }} (that is, X + 1 <= 5)
X := X + 1
{{ X <= 5 }}
{{ (X = 3) [X |-> 3] }} (that is, 3 = 3)
X := 3
{{ X = 3 }}
{{ (0 <= X /\ X <= 5) [X |-> 3] (that is, 0 <= 3 /\ 3 <= 5)
X := 3
{{ 0 <= X /\ X <= 5 }}
*)
(** To formalize the rule, we must first formalize the idea of
"substituting an expression for an Imp variable in an assertion",
which we refer to as assertion substitution, or [assn_sub]. That
is, given a proposition [P], a variable [X], and an arithmetic
expression [a], we want to derive another proposition [P'] that is
just the same as [P] except that [P'] should mention [a] wherever
[P] mentions [X]. *)
(** Since [P] is an arbitrary Coq assertion, we can't directly "edit"
its text. However, we can achieve the same effect by evaluating
[P] in an updated state: *)
Definition assn_sub X a (P:Assertion) : Assertion :=
fun (st : state) =>
P (X !-> aeval st a ; st).
Notation "P [ X |-> a ]" := (assn_sub X a P)
(at level 10, X at next level, a custom com).
(** That is, [P [X |-> a]] stands for an assertion -- let's call it [P'] --
that is just like [P] except that, wherever [P] looks up the
variable [X] in the current state, [P'] instead uses the value
of the expression [a]. *)
(** To see how this works, let's calculate what happens with a couple
of examples. First, suppose [P'] is [(X <= 5) [X |-> 3]] -- that
is, more formally, [P'] is the Coq expression
fun st =>
(fun st' => st' X <= 5)
(X !-> aeval st 3 ; st),
which simplifies to
fun st =>
(fun st' => st' X <= 5)
(X !-> 3 ; st)
and further simplifies to
fun st =>
((X !-> 3 ; st) X) <= 5
and finally to
fun st =>
3 <= 5.
That is, [P'] is the assertion that [3] is less than or equal to
[5] (as expected). *)
(** For a more interesting example, suppose [P'] is [(X <= 5) [X |->
X + 1]]. Formally, [P'] is the Coq expression
fun st =>
(fun st' => st' X <= 5)
(X !-> aeval st (X + 1) ; st),
which simplifies to
fun st =>
(X !-> aeval st (X + 1) ; st) X <= 5
and further simplifies to
fun st =>
(aeval st (X + 1)) <= 5.
That is, [P'] is the assertion that [X + 1] is at most [5].
*)
(** Now, using the concept of substitution, we can give the precise
proof rule for assignment:
------------------------------ (hoare_asgn)
{{Q [X |-> a]}} X := a {{Q}}
*)
(** We can prove formally that this rule is indeed valid. *)
Theorem hoare_asgn : forall Q X a,
{{Q [X |-> a]}} X := a {{Q}}.
Proof.
unfold hoare_triple.
intros Q X a st st' HE HQ.
inversion HE. subst.
unfold assn_sub in HQ. assumption. Qed.
(** Here's a first formal proof using this rule. *)
Example assn_sub_example :
{{(X < 5) [X |-> X + 1]}}
X := X + 1
{{X < 5}}.
Proof.
(* WORKED IN CLASS *)
apply hoare_asgn. Qed.
(** (Of course, what we'd probably prefer is to prove this
simpler triple:
{{X < 4}} X := X + 1 {{X < 5}}
We will see how to do so in the next section. *)
(** Complete these Hoare triples by providing an appropriate
precondition using [exists], then prove then with [apply
hoare_asgn]. If you find that tactic doesn't suffice, double check
that you have completed the triple properly. *)
(** **** Exercise: 2 stars, standard, optional (hoare_asgn_examples1) *)
Example hoare_asgn_examples1 :
exists P,
{{ P }}
X := 2 * X
{{ X <= 10 }}.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 2 stars, standard, optional (hoare_asgn_examples2) *)
Example hoare_asgn_examples2 :
exists P,
{{ P }}
X := 3
{{ 0 <= X /\ X <= 5 }}.
Proof. (* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 2 stars, standard, especially useful (hoare_asgn_wrong)
The assignment rule looks backward to almost everyone the first
time they see it. If it still seems puzzling to you, it may help
to think a little about alternative "forward" rules. Here is a
seemingly natural one:
------------------------------ (hoare_asgn_wrong)
{{ True }} X := a {{ X = a }}
Give a counterexample showing that this rule is incorrect and
argue informally that it is really a counterexample. (Hint:
The rule universally quantifies over the arithmetic expression
[a], and your counterexample needs to exhibit an [a] for which
the rule doesn't work.) *)
(* FILL IN HERE *)
(* Do not modify the following line: *)
Definition manual_grade_for_hoare_asgn_wrong : option (nat*string) := None.
(** [] *)
(** **** Exercise: 3 stars, advanced (hoare_asgn_fwd)
However, by using a _parameter_ [m] (a Coq number) to remember the
original value of [X] we can define a Hoare rule for assignment
that does, intuitively, "work forwards" rather than backwards.
------------------------------------------ (hoare_asgn_fwd)
{{fun st => P st /\ st X = m}}
X := a
{{fun st => P st' /\ st X = aeval st' a }}
(where st' = (X !-> m ; st))
Note that we use the original value of [X] to reconstruct the
state [st'] before the assignment took place. Prove that this rule
is correct. (Also note that this rule is more complicated than
[hoare_asgn].) *)
Theorem hoare_asgn_fwd :
forall m a P,
{{fun st => P st /\ st X = m}}
X := a
{{fun st => P (X !-> m ; st)
/\ st X = aeval (X !-> m ; st) a }}.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 2 stars, advanced, optional (hoare_asgn_fwd_exists)
Another way to define a forward rule for assignment is to
existentially quantify over the previous value of the assigned
variable. Prove that it is correct.
------------------------------------ (hoare_asgn_fwd_exists)
{{fun st => P st}}
X := a
{{fun st => exists m, P (X !-> m ; st) /\
st X = aeval (X !-> m ; st) a }}
*)
Theorem hoare_asgn_fwd_exists :
forall a P,
{{fun st => P st}}
X := a
{{fun st => exists m, P (X !-> m ; st) /\
st X = aeval (X !-> m ; st) a }}.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(* ================================================================= *)
(** ** Consequence *)
(** Sometimes the preconditions and postconditions we get from the
Hoare rules won't quite be the ones we want in the particular
situation at hand -- they may be logically equivalent but have a
different syntactic form that fails to unify with the goal we are
trying to prove, or they actually may be logically weaker (for
preconditions) or stronger (for postconditions) than what we need. *)
(** For instance, while
{{(X = 3) [X |-> 3]}} X := 3 {{X = 3}},
follows directly from the assignment rule,
{{True}} X := 3 {{X = 3}}
does not. This triple is valid, but it is not an instance of
[hoare_asgn] because [True] and [(X = 3) [X |-> 3]] are not
syntactically equal assertions. However, they are logically
_equivalent_, so if one triple is valid, then the other must
certainly be as well. We can capture this observation with the
following rule:
{{P'}} c {{Q}}
P <<->> P'
----------------------------- (hoare_consequence_pre_equiv)
{{P}} c {{Q}}
*)
(** Taking this line of thought a bit further, we can see that
strengthening the precondition or weakening the postcondition of a
valid triple always produces another valid triple. This
observation is captured by two _Rules of Consequence_.
{{P'}} c {{Q}}
P ->> P'
----------------------------- (hoare_consequence_pre)
{{P}} c {{Q}}
{{P}} c {{Q'}}
Q' ->> Q
----------------------------- (hoare_consequence_post)
{{P}} c {{Q}}
*)
(** Here are the formal versions: *)
Theorem hoare_consequence_pre : forall (P P' Q : Assertion) c,
{{P'}} c {{Q}} ->
P ->> P' ->
{{P}} c {{Q}}.
Proof.
unfold hoare_triple, "->>".
intros P P' Q c Hhoare Himp st st' Heval Hpre.
apply Hhoare with (st := st).
- assumption.
- apply Himp. assumption.
Qed.
Theorem hoare_consequence_post : forall (P Q Q' : Assertion) c,
{{P}} c {{Q'}} ->
Q' ->> Q ->
{{P}} c {{Q}}.
Proof.
unfold hoare_triple, "->>".
intros P Q Q' c Hhoare Himp st st' Heval Hpre.
apply Himp.
apply Hhoare with (st := st).
- assumption.
- assumption.
Qed.
(** For example, we can use the first consequence rule like this:
{{ True }} ->>
{{ (X = 1) [X |-> 1] }}
X := 1
{{ X = 1 }}
Or, formally... *)
Example hoare_asgn_example1 :
{{True}} X := 1 {{X = 1}}.
Proof.
(* WORKED IN CLASS *)
apply hoare_consequence_pre with (P' := (X = 1) [X |-> 1]).
- apply hoare_asgn.
- unfold "->>", assn_sub, t_update.
intros st _. simpl. reflexivity.
Qed.
(** We can also use it to prove the example mentioned earlier.
{{ X < 4 }} ->>
{{ (X < 5)[X |-> X + 1] }}
X := X + 1
{{ X < 5 }}
Or, formally ... *)
Example assn_sub_example2 :
{{X < 4}}
X := X + 1
{{X < 5}}.
Proof.
(* WORKED IN CLASS *)
apply hoare_consequence_pre with (P' := (X < 5) [X |-> X + 1]).
- apply hoare_asgn.
- unfold "->>", assn_sub, t_update.
intros st H. simpl in *. lia.
Qed.
(** Finally, here is a combined rule of consequence that allows us to
vary both the precondition and the postcondition.
{{P'}} c {{Q'}}
P ->> P'
Q' ->> Q
----------------------------- (hoare_consequence)
{{P}} c {{Q}}
*)
Theorem hoare_consequence : forall (P P' Q Q' : Assertion) c,
{{P'}} c {{Q'}} ->
P ->> P' ->
Q' ->> Q ->
{{P}} c {{Q}}.
Proof.
intros P P' Q Q' c Htriple Hpre Hpost.
apply hoare_consequence_pre with (P' := P').
- apply hoare_consequence_post with (Q' := Q').
+ assumption.
+ assumption.
- assumption.
Qed.
(* ================================================================= *)
(** ** Automation *)
(** Many of the proofs we have done so far with Hoare triples can be
streamlined using the automation techniques that we introduced in
the [Auto] chapter of _Logical Foundations_.
Recall that the [auto] tactic can be told to [unfold] definitions
as part of its proof search. Let's give that hint for the
definitions and coercions we're using: *)
Hint Unfold assert_implies hoare_triple assn_sub t_update : core.
Hint Unfold assert_of_Prop Aexp_of_nat Aexp_of_aexp : core.
(** Also recall that [auto] will search for a proof involving [intros]
and [apply]. By default, the theorems that it will apply include
any of the local hypotheses, as well as theorems in a core
database. *)
(** The proof of [hoare_consequence_pre], repeated below, looks
like an opportune place for such automation, because all it does
is [unfold], [intros], and [apply]. It uses [assumption], too,
but that's just application of a hypothesis. *)
Theorem hoare_consequence_pre' : forall (P P' Q : Assertion) c,
{{P'}} c {{Q}} ->
P ->> P' ->
{{P}} c {{Q}}.
Proof.
unfold hoare_triple, "->>".
intros P P' Q c Hhoare Himp st st' Heval Hpre.
apply Hhoare with (st := st).
- assumption.
- apply Himp. assumption.
Qed.
(** Merely using [auto], though, doesn't complete the proof. *)
Theorem hoare_consequence_pre'' : forall (P P' Q : Assertion) c,
{{P'}} c {{Q}} ->
P ->> P' ->
{{P}} c {{Q}}.
Proof.
auto. (* no progress *)
Abort.
(** The problem is the [apply Hhoare with...] part of the proof. Coq
isn't able to figure out how to instantiate [st] without some help
from us. Recall, though, that there are versions of many tactics
that will use _existential variables_ to make progress even when
the regular versions of those tactics would get stuck.
Here, the [eapply] tactic will introduce an existential variable
[?st] as a placeholder for [st], and [eassumption] will
instantiate [?st] with [st] when it discovers [st] in assumption
[Heval]. By using [eapply] we are essentially telling Coq, "Be
patient: The missing part is going to be filled in later in the
proof." *)
Theorem hoare_consequence_pre''' : forall (P P' Q : Assertion) c,
{{P'}} c {{Q}} ->
P ->> P' ->
{{P}} c {{Q}}.
Proof.
unfold hoare_triple, "->>".
intros P P' Q c Hhoare Himp st st' Heval Hpre.
eapply Hhoare.
- eassumption.
- apply Himp. assumption.
Qed.
(** Tactic [eauto] will use [eapply] as part of its proof search.
So, the entire proof can be done in just one line. *)
Theorem hoare_consequence_pre'''' : forall (P P' Q : Assertion) c,
{{P'}} c {{Q}} ->
P ->> P' ->
{{P}} c {{Q}}.
Proof.
eauto.
Qed.
(** Of course, it's hard to predict that [eauto] suffices here
without having gone through the original proof of
[hoare_consequence_pre] to see the tactics it used. But now that
we know [eauto] works, it's a good bet that it will also work for
[hoare_consequence_post]. *)
Theorem hoare_consequence_post' : forall (P Q Q' : Assertion) c,
{{P}} c {{Q'}} ->
Q' ->> Q ->
{{P}} c {{Q}}.
Proof.
eauto.
Qed.
(** We can also use [eapply] to streamline a proof,
[hoare_asgn_example1], that we did earlier as an example of using
the consequence rule: *)
Example hoare_asgn_example1' :
{{True}} X := 1 {{X = 1}}.
Proof.
eapply hoare_consequence_pre. (* no need to state an assertion *)
- apply hoare_asgn.
- unfold "->>", assn_sub, t_update.
intros st _. simpl. reflexivity.
Qed.
(** The final bullet of that proof also looks like a candidate for
automation. *)
Example hoare_asgn_example1'' :
{{True}} X := 1 {{X = 1}}.
Proof.
eapply hoare_consequence_pre.
- apply hoare_asgn.
- auto.
Qed.
(** Now we have quite a nice proof script: it simply identifies the
Hoare rules that need to be used and leaves the remaining
low-level details up to Coq to figure out. *)
(** By now it might be apparent that the _entire_ proof could be
automated if we added [hoare_consequence_pre] and [hoare_asgn] to
the hint database. We won't do that in this chapter, so that we
can get a better understanding of when and how the Hoare rules are
used. In the next chapter, [Hoare2], we'll dive deeper into
automating entire proofs of Hoare triples. *)
(** The other example of using consequence that we did earlier,
[hoare_asgn_example2], requires a little more work to automate.
We can streamline the first line with [eapply], but we can't just use
[auto] for the final bullet, since it needs [lia]. *)
Example assn_sub_example2' :
{{X < 4}}
X := X + 1
{{X < 5}}.
Proof.
eapply hoare_consequence_pre.
- apply hoare_asgn.
- auto. (* no progress *)
unfold "->>", assn_sub, t_update.
intros st H. simpl in *. lia.
Qed.
(** Let's introduce our own tactic to handle both that bullet and the
bullet from example 1: *)
Ltac assn_auto :=
try auto; (* as in example 1, above *)
try (unfold "->>", assn_sub, t_update;
intros; simpl in *; lia). (* as in example 2 *)
Example assn_sub_example2'' :
{{X < 4}}
X := X + 1
{{X < 5}}.
Proof.
eapply hoare_consequence_pre.
- apply hoare_asgn.
- assn_auto.
Qed.
Example hoare_asgn_example1''':
{{True}} X := 1 {{X = 1}}.
Proof.
eapply hoare_consequence_pre.
- apply hoare_asgn.
- assn_auto.
Qed.
(** Again, we have quite a nice proof script. All the low-level
details of proof about assertions have been taken care of
automatically. Of course, [assn_auto] isn't able to prove
everything we could possibly want to know about assertions --
there's no magic here! But it's good enough so far. *)
(** **** Exercise: 2 stars, standard (hoare_asgn_examples_2)
Prove these triples. Try to make your proof scripts as nicely automated
as those above. *)
Example assn_sub_ex1' :
{{ X <= 5 }}
X := 2 * X
{{ X <= 10 }}.
Proof.
(* FILL IN HERE *) Admitted.
Example assn_sub_ex2' :
{{ 0 <= 3 /\ 3 <= 5 }}
X := 3
{{ 0 <= X /\ X <= 5 }}.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(* ================================================================= *)
(** ** Skip *)
(** Since [skip] doesn't change the state, it preserves any
assertion [P]:
-------------------- (hoare_skip)
{{ P }} skip {{ P }}
*)
Theorem hoare_skip : forall P,
{{P}} skip {{P}}.
Proof.
intros P st st' H HP. inversion H; subst. assumption.
Qed.
(* ================================================================= *)
(** ** Sequencing *)
(** If command [c1] takes any state where [P] holds to a state where
[Q] holds, and if [c2] takes any state where [Q] holds to one
where [R] holds, then doing [c1] followed by [c2] will take any
state where [P] holds to one where [R] holds:
{{ P }} c1 {{ Q }}
{{ Q }} c2 {{ R }}
---------------------- (hoare_seq)
{{ P }} c1;c2 {{ R }}
*)
Theorem hoare_seq : forall P Q R c1 c2,
{{Q}} c2 {{R}} ->
{{P}} c1 {{Q}} ->
{{P}} c1; c2 {{R}}.
Proof.
unfold hoare_triple.
intros P Q R c1 c2 H1 H2 st st' H12 Pre.
inversion H12; subst.
eauto.
Qed.
(** Note that, in the formal rule [hoare_seq], the premises are
given in backwards order ([c2] before [c1]). This matches the
natural flow of information in many of the situations where we'll
use the rule, since the natural way to construct a Hoare-logic
proof is to begin at the end of the program (with the final
postcondition) and push postconditions backwards through commands
until we reach the beginning. *)
(** Here's an example of a program involving sequencing. Note the use
of [hoare_seq] in conjunction with [hoare_consequence_pre] and the
[eapply] tactic. *)
Example hoare_asgn_example3 : forall (a:aexp) (n:nat),
{{a = n}}
X := a;
skip
{{X = n}}.
Proof.
intros a n. eapply hoare_seq.
- (* right part of seq *)
apply hoare_skip.
- (* left part of seq *)
eapply hoare_consequence_pre.
+ apply hoare_asgn.
+ assn_auto.
Qed.
(** Informally, a nice way of displaying a proof using the sequencing
rule is as a "decorated program" where the intermediate assertion
[Q] is written between [c1] and [c2]:
{{ a = n }}
X := a;
{{ X = n }} <--- decoration for Q
skip
{{ X = n }}
*)
(** **** Exercise: 2 stars, standard, especially useful (hoare_asgn_example4)
Translate this "decorated program" into a formal proof:
{{ True }} ->>
{{ 1 = 1 }}
X := 1;
{{ X = 1 }} ->>
{{ X = 1 /\ 2 = 2 }}
Y := 2
{{ X = 1 /\ Y = 2 }}
Note the use of "[->>]" decorations, each marking a use of
[hoare_consequence_pre].
We've started you off by providing a use of [hoare_seq] that
explicitly identifies [X = 1] as the intermediate assertion. *)
Example hoare_asgn_example4 :
{{ True }}
X := 1;
Y := 2
{{ X = 1 /\ Y = 2 }}.
Proof.
apply hoare_seq with (Q := (X = 1)%assertion).
(* The annotation [%assertion] is needed here to help Coq parse correctly. *)
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 3 stars, standard (swap_exercise)
Write an Imp program [c] that swaps the values of [X] and [Y] and
show that it satisfies the following specification:
{{X <= Y}} c {{Y <= X}}
Your proof should not need to use [unfold hoare_triple]. (Hint:
Remember that the assignment rule works best when it's applied
"back to front," from the postcondition to the precondition. So
your proof will want to start at the end and work back to the
beginning of your program.) *)
Definition swap_program : com
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
Theorem swap_exercise :
{{X <= Y}}
swap_program
{{Y <= X}}.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 4 stars, standard (invalid_triple)
Show that
{{ a = n }}
X := 3; Y := a
{{ Y = n }}
is not a valid Hoare triple for some choices of [a] and [n].
Conceptual hint: invent a particular [a] and [n] for which the triple
in invalid, then use those to complete the proof.
Technical hint: hypothesis [H], below, begins [forall a n, ...].
You'll want to instantiate that for the particular [a] and [n]
you've invented. You can do that with [assert] and [apply], but
Coq offers an even easier tactic: [specialize]. If you write
specialize H with (a := your_a) (n := your_n)
the hypothesis will be instantiated on [your_a] and [your_n].
*)
Theorem invalid_triple : ~ forall (a : aexp) (n : nat),
{{ a = n }}
X := 3; Y := a
{{ Y = n }}.
Proof.
unfold hoare_triple.
intros H.
(* FILL IN HERE *) Admitted.
(** [] *)
(* ================================================================= *)
(** ** Conditionals *)
(** What sort of rule do we want for reasoning about conditional
commands?
Certainly, if the same assertion [Q] holds after executing
either of the branches, then it holds after the whole conditional.
So we might be tempted to write:
{{P}} c1 {{Q}}
{{P}} c2 {{Q}}
---------------------------------
{{P}} if b then c1 else c2 {{Q}}
*)
(** However, this is rather weak. For example, using this rule,
we cannot show
{{ True }}
if X = 0
then Y := 2
else Y := X + 1
end
{{ X <= Y }}
since the rule tells us nothing about the state in which the
assignments take place in the "then" and "else" branches. *)
(** Fortunately, we can say something more precise. In the
"then" branch, we know that the boolean expression [b] evaluates to
[true], and in the "else" branch, we know it evaluates to [false].
Making this information available in the premises of the rule gives
us more information to work with when reasoning about the behavior
of [c1] and [c2] (i.e., the reasons why they establish the
postcondition [Q]).
{{P /\ b}} c1 {{Q}}
{{P /\ ~ b}} c2 {{Q}}
------------------------------------ (hoare_if)
{{P}} if b then c1 else c2 end {{Q}}
*)
(** To interpret this rule formally, we need to do a little work.
Strictly speaking, the assertion we've written, [P /\ b], is the
conjunction of an assertion and a boolean expression -- i.e., it
doesn't typecheck. To fix this, we need a way of formally
"lifting" any bexp [b] to an assertion. We'll write [bassn b] for
the assertion "the boolean expression [b] evaluates to [true] (in
the given state)." *)
Definition bassn b : Assertion :=
fun st => (beval st b = true).
Coercion bassn : bexp >-> Assertion.
Arguments bassn /.
(** A useful fact about [bassn]: *)
Lemma bexp_eval_false : forall b st,
beval st b = false -> ~ ((bassn b) st).
Proof. congruence. Qed.
Hint Resolve bexp_eval_false : core.
(** We mentioned the [congruence] tactic in passing in [Auto] when
building the [find_rwd] tactic. Like [find_rwd], [congruence] is able to
automatically find that both [beval st b = false] and [beval st b = true]
are being assumed, notice the contradiction, and [discriminate] to complete
the proof. *)
(** Now we can formalize the Hoare proof rule for conditionals
and prove it correct. *)
Theorem hoare_if : forall P Q (b:bexp) c1 c2,
{{ P /\ b }} c1 {{Q}} ->
{{ P /\ ~ b}} c2 {{Q}} ->
{{P}} if b then c1 else c2 end {{Q}}.
(** That is (unwrapping the notations):
Theorem hoare_if : forall P Q b c1 c2,
{{fun st => P st /\ bassn b st}} c1 {{Q}} ->
{{fun st => P st /\ ~ (bassn b st)}} c2 {{Q}} ->
{{P}} if b then c1 else c2 end {{Q}}.
*)
Proof.
intros P Q b c1 c2 HTrue HFalse st st' HE HP.
inversion HE; subst; eauto.
Qed.
(* ----------------------------------------------------------------- *)
(** *** Example *)
(** Here is a formal proof that the program we used to motivate the
rule satisfies the specification we gave. *)
Example if_example :
{{True}}
if (X = 0)
then Y := 2
else Y := X + 1
end
{{X <= Y}}.
Proof.
apply hoare_if.
- (* Then *)
eapply hoare_consequence_pre.
+ apply hoare_asgn.
+ assn_auto. (* no progress *)
unfold "->>", assn_sub, t_update, bassn.
simpl. intros st [_ H].
apply eqb_eq in H.
rewrite H. lia.
- (* Else *)
eapply hoare_consequence_pre.
+ apply hoare_asgn.
+ assn_auto.
Qed.
(** As we did earlier, it would be nice to eliminate all the low-level
proof script that isn't about the Hoare rules. Unfortunately, the
[assn_auto] tactic we wrote wasn't quite up to the job. Looking
at the proof of [if_example], we can see why. We had to unfold a
definition ([bassn]) and use a theorem ([eqb_eq]) that we didn't
need in earlier proofs. So, let's add those into our tactic,
and clean it up a little in the process. *)
Ltac assn_auto' :=
unfold "->>", assn_sub, t_update, bassn;
intros; simpl in *;
try rewrite -> eqb_eq in *; (* for equalities *)
auto; try lia.
(** Now the proof is quite streamlined. *)
Example if_example'' :
{{True}}
if X = 0
then Y := 2
else Y := X + 1
end
{{X <= Y}}.
Proof.
apply hoare_if.
- eapply hoare_consequence_pre.
+ apply hoare_asgn.
+ assn_auto'.
- eapply hoare_consequence_pre.
+ apply hoare_asgn.
+ assn_auto'.
Qed.
(** We can even shorten it a little bit more. *)
Example if_example''' :
{{True}}
if X = 0
then Y := 2
else Y := X + 1
end
{{X <= Y}}.
Proof.
apply hoare_if; eapply hoare_consequence_pre;
try apply hoare_asgn; try assn_auto'.
Qed.
(** For later proofs, it will help to extend [assn_auto'] to handle
inequalities, too. *)
Ltac assn_auto'' :=
unfold "->>", assn_sub, t_update, bassn;
intros; simpl in *;
try rewrite -> eqb_eq in *;
try rewrite -> leb_le in *; (* for inequalities *)
auto; try lia.
(** **** Exercise: 2 stars, standard (if_minus_plus)
Prove the theorem below using [hoare_if]. Do not use [unfold
hoare_triple]. *)
Theorem if_minus_plus :
{{True}}
if (X <= Y)
then Z := Y - X
else Y := X + Z
end
{{Y = X + Z}}.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(* ----------------------------------------------------------------- *)
(** *** Exercise: One-sided conditionals *)
(** In this exercise we consider extending Imp with "one-sided
conditionals" of the form [if1 b then c end]. Here [b] is a boolean
expression, and [c] is a command. If [b] evaluates to [true], then
command [c] is evaluated. If [b] evaluates to [false], then [if1 b
then c end] does nothing.
We recommend that you complete this exercise before attempting the
ones that follow, as it should help solidify your understanding of
the material. *)
(** The first step is to extend the syntax of commands and introduce
the usual notations. (We've done this for you. We use a separate
module to prevent polluting the global name space.) *)
Module If1.
Inductive com : Type :=
| CSkip : com
| CAsgn : string -> aexp -> com
| CSeq : com -> com -> com
| CIf : bexp -> com -> com -> com
| CWhile : bexp -> com -> com
| CIf1 : bexp -> com -> com.
Notation "'if1' x 'then' y 'end'" :=
(CIf1 x y)
(in custom com at level 0, x custom com at level 99).
Notation "'skip'" :=
CSkip (in custom com at level 0).
Notation "x := y" :=
(CAsgn x y)
(in custom com at level 0, x constr at level 0,
y at level 85, no associativity).
Notation "x ; y" :=
(CSeq x y)
(in custom com at level 90, right associativity).
Notation "'if' x 'then' y 'else' z 'end'" :=
(CIf x y z)
(in custom com at level 89, x at level 99,
y at level 99, z at level 99).
Notation "'while' x 'do' y 'end'" :=
(CWhile x y)
(in custom com at level 89, x at level 99, y at level 99).
(** **** Exercise: 2 stars, standard (if1_ceval) *)
(** Add two new evaluation rules to relation [ceval], below, for
[if1]. Let the rules for [if] guide you.*)
Reserved Notation "st '=[' c ']=>'' st'"
(at level 40, c custom com at level 99,
st constr, st' constr at next level).
Inductive ceval : com -> state -> state -> Prop :=
| E_Skip : forall st,
st =[ skip ]=> st
| E_Asgn : forall st a1 n x,
aeval st a1 = n ->
st =[ x := a1 ]=> (x !-> n ; st)
| E_Seq : forall c1 c2 st st' st'',
st =[ c1 ]=> st' ->
st' =[ c2 ]=> st'' ->
st =[ c1 ; c2 ]=> st''
| E_IfTrue : forall st st' b c1 c2,
beval st b = true ->
st =[ c1 ]=> st' ->
st =[ if b then c1 else c2 end ]=> st'
| E_IfFalse : forall st st' b c1 c2,
beval st b = false ->
st =[ c2 ]=> st' ->
st =[ if b then c1 else c2 end ]=> st'
| E_WhileFalse : forall b st c,
beval st b = false ->
st =[ while b do c end ]=> st
| E_WhileTrue : forall st st' st'' b c,
beval st b = true ->
st =[ c ]=> st' ->
st' =[ while b do c end ]=> st'' ->
st =[ while b do c end ]=> st''
(* FILL IN HERE *)
where "st '=[' c ']=>' st'" := (ceval c st st').
Hint Constructors ceval : core.
(** The following unit tests should be provable simply by [eauto] if
you have defined the rules for [if1] correctly. *)
Example if1true_test :
empty_st =[ if1 X = 0 then X := 1 end ]=> (X !-> 1).
Proof. (* FILL IN HERE *) Admitted.
Example if1false_test :
(X !-> 2) =[ if1 X = 0 then X := 1 end ]=> (X !-> 2).
Proof. (* FILL IN HERE *) Admitted.
(** [] *)
(** Now we have to repeat the definition and notation of Hoare triples,
so that they will use the updated [com] type. *)
Definition hoare_triple
(P : Assertion) (c : com) (Q : Assertion) : Prop :=
forall st st',
st =[ c ]=> st' ->
P st ->
Q st'.
Hint Unfold hoare_triple : core.
Notation "{{ P }} c {{ Q }}" := (hoare_triple P c Q)
(at level 90, c custom com at level 99)
: hoare_spec_scope.
(** **** Exercise: 2 stars, standard (hoare_if1) *)
(** Invent a Hoare logic proof rule for [if1]. State and prove a
theorem named [hoare_if1] that shows the validity of your rule.
Use [hoare_if] as a guide. Try to invent a rule that is
_complete_, meaning it can be used to prove the correctness of as
many one-sided conditionals as possible. Also try to keep your
rule _compositional_, meaning that any Imp command that appears
in a premise should syntactically be a part of the command
in the conclusion.
Hint: if you encounter difficulty getting Coq to parse part of
your rule as an assertion, try manually indicating that it should
be in the assertion scope. For example, if you want [e] to be
parsed as an assertion, write it as [(e)%assertion]. *)
(* FILL IN HERE *)
(** For full credit, prove formally [hoare_if1_good] that your rule is
precise enough to show the following valid Hoare triple:
{{ X + Y = Z }}
if1 ~(Y = 0) then
X := X + Y
end
{{ X = Z }}
*)
(* Do not modify the following line: *)
Definition manual_grade_for_hoare_if1 : option (nat*string) := None.
(** [] *)
(** Before the next exercise, we need to restate the Hoare rules of
consequence (for preconditions) and assignment for the new [com]
type. *)
Theorem hoare_consequence_pre : forall (P P' Q : Assertion) c,
{{P'}} c {{Q}} ->
P ->> P' ->
{{P}} c {{Q}}.
Proof.
eauto.
Qed.
Theorem hoare_asgn : forall Q X a,
{{Q [X |-> a]}} (X := a) {{Q}}.
Proof.
intros Q X a st st' Heval HQ.
inversion Heval; subst.
auto.
Qed.
(** **** Exercise: 2 stars, standard (hoare_if1_good) *)
(** Prove that your [if1] rule is complete enough for the following
valid Hoare triple.
Hint: [assn_auto''] once more will get you most but not all the way
to a completely automated proof. You can finish manually, or
tweak the tactic further. *)
Lemma hoare_if1_good :
{{ X + Y = Z }}
if1 ~(Y = 0) then
X := X + Y
end
{{ X = Z }}.
Proof. (* FILL IN HERE *) Admitted.
(** [] *)
End If1.
(* ================================================================= *)
(** ** While Loops *)
(** The Hoare rule for [while] loops is based on the idea of an
_invariant_: an assertion whose truth is guaranteed before and
after executing a command. An assertion [P] is an invariant of [c] if
{{P}} c {{P}}
holds. Note that in the middle of executing [c], the invariant
might temporarily become false, but by the end of [c], it must be
restored. *)
(** As a first attempt at a [while] rule, we could try:
{{P}} c {{P}}
---------------------------
{{P} while b do c end {{P}}
That rule is valid: if [P] is an invariant of [c], as the premise
requires, then no matter how many times the loop body executes,
[P] is going to be true when the loop finally finishes.
But the rule also omits two crucial pieces of information. First,
the loop terminates when [b] becomes false. So we can strengthen
the postcondition in the conclusion:
{{P}} c {{P}}
---------------------------------
{{P} while b do c end {{P /\ ~b}}
Second, the loop body will be executed only if [b] is true. So we
can also strengthen the precondition in the premise:
{{P /\ b}} c {{P}}
--------------------------------- (hoare_while)
{{P} while b do c end {{P /\ ~b}}
*)
(** That is the Hoare [while] rule. Note how it combines
aspects of [skip] and conditionals:
- If the loop body executes zero times, the rule is like [skip] in
that the precondition survives to become (part of) the
postcondition.
- Like a conditional, we can assume guard [b] holds on entry to
the subcommand.
*)
Theorem hoare_while : forall P (b:bexp) c,
{{P /\ b}} c {{P}} ->
{{P}} while b do c end {{P /\ ~ b}}.
Proof.
intros P b c Hhoare st st' Heval HP.
(* We proceed by induction on [Heval], because, in the "keep looping" case,
its hypotheses talk about the whole loop instead of just [c]. The
[remember] is used to keep the original command in the hypotheses;
otherwise, it would be lost in the [induction]. By using [inversion]
we clear away all the cases except those involving [while]. *)
remember <{while b do c end}> as original_command eqn:Horig.
induction Heval;
try (inversion Horig; subst; clear Horig);
eauto.
Qed.
(** We call that [P] a _loop invariant_ of [while b do c end] if
{{P /\ b}} c {{P}}
holds. This means that [P] remains true whenever the loop executes.
If [P] contradicts [b], this holds trivially since the precondition
is false. For instance, [X = 0] is a loop invariant of
while X = 2 do X := 1 end
since we will never enter the loop. *)
(** The program
while Y > 10 do Y := Y - 1; Z := Z + 1 end
admits an interesting loop invariant:
X = Y + Z
Note that this doesn't contradict the loop guard but neither
is it an invariant of [Y := Y - 1; Z := Z + 1] -- if X = 5,
Y = 0 and Z = 5, running the command will set Y + Z to 6. The
loop guard [Y > 10] guarantees that this will not be the case.
We will see many such loop invariants in the following chapter.
*)
Example while_example :
{{X <= 3}}
while (X <= 2) do
X := X + 1
end
{{X = 3}}.
Proof.
eapply hoare_consequence_post.
- apply hoare_while.
eapply hoare_consequence_pre.
+ apply hoare_asgn.
+ assn_auto''.
- assn_auto''.
Qed.
(** If the loop never terminates, any postcondition will work. *)
Theorem always_loop_hoare : forall Q,
{{True}} while true do skip end {{Q}}.
Proof.
intros Q.
eapply hoare_consequence_post.
- apply hoare_while. apply hoare_post_true. auto.
- simpl. intros st [Hinv Hguard]. congruence.
Qed.
(** Of course, this result is not surprising if we remember that
the definition of [hoare_triple] asserts that the postcondition
must hold _only_ when the command terminates. If the command
doesn't terminate, we can prove anything we like about the
post-condition.
Hoare rules that specify what happens _if_ commands terminate,
without proving that they do, are said to describe a logic of
_partial_ correctness. It is also possible to give Hoare rules
for _total_ correctness, which additionally specifies that
commands must terminate. Total correctness is out of the scope of
this textbook. *)
(* ----------------------------------------------------------------- *)
(** *** Exercise: [REPEAT] *)
(** **** Exercise: 4 stars, advanced (hoare_repeat)
In this exercise, we'll add a new command to our language of
commands: [REPEAT] c [until] b [end]. You will write the
evaluation rule for [REPEAT] and add a new Hoare rule to the
language for programs involving it. (You may recall that the
evaluation rule is given in an example in the [Auto] chapter.
Try to figure it out yourself here rather than peeking.) *)
Module RepeatExercise.
Inductive com : Type :=
| CSkip : com
| CAsgn : string -> aexp -> com
| CSeq : com -> com -> com
| CIf : bexp -> com -> com -> com
| CWhile : bexp -> com -> com
| CRepeat : com -> bexp -> com.
(** [REPEAT] behaves like [while], except that the loop guard is
checked _after_ each execution of the body, with the loop
repeating as long as the guard stays _false_. Because of this,
the body will always execute at least once. *)
Notation "'repeat' e1 'until' b2 'end'" :=
(CRepeat e1 b2)
(in custom com at level 0,
e1 custom com at level 99, b2 custom com at level 99).
Notation "'skip'" :=
CSkip (in custom com at level 0).
Notation "x := y" :=
(CAsgn x y)
(in custom com at level 0, x constr at level 0,
y at level 85, no associativity).
Notation "x ; y" :=
(CSeq x y)
(in custom com at level 90, right associativity).
Notation "'if' x 'then' y 'else' z 'end'" :=
(CIf x y z)
(in custom com at level 89, x at level 99,
y at level 99, z at level 99).
Notation "'while' x 'do' y 'end'" :=
(CWhile x y)
(in custom com at level 89, x at level 99, y at level 99).
(** Add new rules for [REPEAT] to [ceval] below. You can use the rules
for [while] as a guide, but remember that the body of a [REPEAT]
should always execute at least once, and that the loop ends when
the guard becomes true. *)
Inductive ceval : state -> com -> state -> Prop :=
| E_Skip : forall st,
st =[ skip ]=> st
| E_Asgn : forall st a1 n x,
aeval st a1 = n ->
st =[ x := a1 ]=> (x !-> n ; st)
| E_Seq : forall c1 c2 st st' st'',
st =[ c1 ]=> st' ->
st' =[ c2 ]=> st'' ->
st =[ c1 ; c2 ]=> st''
| E_IfTrue : forall st st' b c1 c2,
beval st b = true ->
st =[ c1 ]=> st' ->
st =[ if b then c1 else c2 end ]=> st'
| E_IfFalse : forall st st' b c1 c2,
beval st b = false ->
st =[ c2 ]=> st' ->
st =[ if b then c1 else c2 end ]=> st'
| E_WhileFalse : forall b st c,
beval st b = false ->
st =[ while b do c end ]=> st
| E_WhileTrue : forall st st' st'' b c,
beval st b = true ->
st =[ c ]=> st' ->
st' =[ while b do c end ]=> st'' ->
st =[ while b do c end ]=> st''
(* FILL IN HERE *)
where "st '=[' c ']=>' st'" := (ceval st c st').
(** A couple of definitions from above, copied here so they use the
new [ceval]. *)
Definition hoare_triple (P : Assertion) (c : com) (Q : Assertion)
: Prop :=
forall st st', st =[ c ]=> st' -> P st -> Q st'.
Notation "{{ P }} c {{ Q }}" :=
(hoare_triple P c Q) (at level 90, c custom com at level 99).
(** To make sure you've got the evaluation rules for [repeat] right,
prove that [ex1_repeat] evaluates correctly. *)
Definition ex1_repeat :=
<{ repeat
X := 1;
Y := Y + 1
until (X = 1) end }>.
Theorem ex1_repeat_works :
empty_st =[ ex1_repeat ]=> (Y !-> 1 ; X !-> 1).
Proof.
(* FILL IN HERE *) Admitted.
(** Now state and prove a theorem, [hoare_repeat], that expresses an
appropriate proof rule for [repeat] commands. Use [hoare_while]
as a model, and try to make your rule as precise as possible. *)
(* FILL IN HERE *)
(** For full credit, make sure (informally) that your rule can be used
to prove the following valid Hoare triple:
{{ X > 0 }}
repeat
Y := X;
X := X - 1
until X = 0 end
{{ X = 0 /\ Y > 0 }}
*)
End RepeatExercise.
(* Do not modify the following line: *)
Definition manual_grade_for_hoare_repeat : option (nat*string) := None.
(** [] *)
(* ################################################################# *)
(** * Summary *)
(** So far, we've introduced Hoare Logic as a tool for reasoning about
Imp programs.
The rules of Hoare Logic are:
--------------------------- (hoare_asgn)
{{Q [X |-> a]}} X:=a {{Q}}
-------------------- (hoare_skip)
{{ P }} skip {{ P }}
{{ P }} c1 {{ Q }}
{{ Q }} c2 {{ R }}
---------------------- (hoare_seq)
{{ P }} c1;c2 {{ R }}
{{P /\ b}} c1 {{Q}}
{{P /\ ~ b}} c2 {{Q}}
------------------------------------ (hoare_if)
{{P}} if b then c1 else c2 end {{Q}}
{{P /\ b}} c {{P}}
----------------------------------- (hoare_while)
{{P}} while b do c end {{P /\ ~ b}}
{{P'}} c {{Q'}}
P ->> P'
Q' ->> Q
----------------------------- (hoare_consequence)
{{P}} c {{Q}}
In the next chapter, we'll see how these rules are used to prove
that more interesting programs satisfy interesting specifications of
their behavior. *)
(* ################################################################# *)
(** * Additional Exercises *)
(* ================================================================= *)
(** ** Havoc *)
(** In this exercise, we will derive proof rules for a [HAVOC]
command, which is similar to the nondeterministic [any] expression
from the the [Imp] chapter.
First, we enclose this work in a separate module, and recall the
syntax and big-step semantics of Himp commands. *)
Module Himp.
Inductive com : Type :=
| CSkip : com
| CAsgn : string -> aexp -> com
| CSeq : com -> com -> com
| CIf : bexp -> com -> com -> com
| CWhile : bexp -> com -> com
| CHavoc : string -> com.
Notation "'havoc' l" := (CHavoc l)
(in custom com at level 60, l constr at level 0).
Notation "'skip'" :=
CSkip (in custom com at level 0).
Notation "x := y" :=
(CAsgn x y)
(in custom com at level 0, x constr at level 0,
y at level 85, no associativity).
Notation "x ; y" :=
(CSeq x y)
(in custom com at level 90, right associativity).
Notation "'if' x 'then' y 'else' z 'end'" :=
(CIf x y z)
(in custom com at level 89, x at level 99,
y at level 99, z at level 99).
Notation "'while' x 'do' y 'end'" :=
(CWhile x y)
(in custom com at level 89, x at level 99, y at level 99).
Inductive ceval : com -> state -> state -> Prop :=
| E_Skip : forall st,
st =[ skip ]=> st
| E_Asgn : forall st a1 n x,
aeval st a1 = n ->
st =[ x := a1 ]=> (x !-> n ; st)
| E_Seq : forall c1 c2 st st' st'',
st =[ c1 ]=> st' ->
st' =[ c2 ]=> st'' ->
st =[ c1 ; c2 ]=> st''
| E_IfTrue : forall st st' b c1 c2,
beval st b = true ->
st =[ c1 ]=> st' ->
st =[ if b then c1 else c2 end ]=> st'
| E_IfFalse : forall st st' b c1 c2,
beval st b = false ->
st =[ c2 ]=> st' ->
st =[ if b then c1 else c2 end ]=> st'
| E_WhileFalse : forall b st c,
beval st b = false ->
st =[ while b do c end ]=> st
| E_WhileTrue : forall st st' st'' b c,
beval st b = true ->
st =[ c ]=> st' ->
st' =[ while b do c end ]=> st'' ->
st =[ while b do c end ]=> st''
| E_Havoc : forall st X n,
st =[ havoc X ]=> (X !-> n ; st)
where "st '=[' c ']=>' st'" := (ceval c st st').
Hint Constructors ceval : core.
(** The definition of Hoare triples is exactly as before. *)
Definition hoare_triple (P:Assertion) (c:com) (Q:Assertion) : Prop :=
forall st st', st =[ c ]=> st' -> P st -> Q st'.
Hint Unfold hoare_triple : core.
Notation "{{ P }} c {{ Q }}" := (hoare_triple P c Q)
(at level 90, c custom com at level 99)
: hoare_spec_scope.
(** And the precondition consequence rule is exactly as before. *)
Theorem hoare_consequence_pre : forall (P P' Q : Assertion) c,
{{P'}} c {{Q}} ->
P ->> P' ->
{{P}} c {{Q}}.
Proof. eauto. Qed.
(** **** Exercise: 3 stars, standard (hoare_havoc) *)
(** Complete the Hoare rule for [HAVOC] commands below by defining
[havoc_pre], and prove that the resulting rule is correct. *)
Definition havoc_pre (X : string) (Q : Assertion) (st : total_map nat) : Prop
(* REPLACE THIS LINE WITH ":= _your_definition_ ." *). Admitted.
Theorem hoare_havoc : forall (Q : Assertion) (X : string),
{{ havoc_pre X Q }} havoc X {{ Q }}.
Proof.
(* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 3 stars, standard (havoc_post) *)
(** Complete the following proof without changing any of the provided
commands. If you find that it can't be completed, your definition of
[havoc_pre] is probably too strong. Find a way to relax it so that
[havoc_post] can be proved.
Hint: the [assn_auto] tactics we've built won't help you here.
You need to proceed manually. *)
Theorem havoc_post : forall (P : Assertion) (X : string),
{{ P }} havoc X {{ fun st => exists (n:nat), P [X |-> n] st }}.
Proof.
intros P X. eapply hoare_consequence_pre.
- apply hoare_havoc.
- (* FILL IN HERE *) Admitted.
(** [] *)
End Himp.
(* ================================================================= *)
(** ** Assert and Assume *)
(** **** Exercise: 4 stars, standard, optional (assert_vs_assume) *)
Module HoareAssertAssume.
(** In this exercise, we will extend IMP with two commands,
[assert] and [ASSUME]. Both commands are ways
to indicate that a certain statement should hold any time this part
of the program is reached. However they differ as follows:
- If an [assert] statement fails, it causes the program to go into
an error state and exit.
- If an [ASSUME] statement fails, the program fails to evaluate
at all. In other words, the program gets stuck and has no
final state.
The new set of commands is: *)
Inductive com : Type :=
| CSkip : com
| CAsgn : string -> aexp -> com
| CSeq : com -> com -> com
| CIf : bexp -> com -> com -> com
| CWhile : bexp -> com -> com
| CAssert : bexp -> com
| CAssume : bexp -> com.
Notation "'assert' l" := (CAssert l)
(in custom com at level 8, l custom com at level 0).
Notation "'assume' l" := (CAssume l)
(in custom com at level 8, l custom com at level 0).
Notation "'skip'" :=
CSkip (in custom com at level 0).
Notation "x := y" :=
(CAsgn x y)
(in custom com at level 0, x constr at level 0,
y at level 85, no associativity).
Notation "x ; y" :=
(CSeq x y)
(in custom com at level 90, right associativity).
Notation "'if' x 'then' y 'else' z 'end'" :=
(CIf x y z)
(in custom com at level 89, x at level 99,
y at level 99, z at level 99).
Notation "'while' x 'do' y 'end'" :=
(CWhile x y)
(in custom com at level 89, x at level 99, y at level 99).
(** To define the behavior of [assert] and [ASSUME], we need to add
notation for an error, which indicates that an assertion has
failed. We modify the [ceval] relation, therefore, so that
it relates a start state to either an end state or to [error].
The [result] type indicates the end value of a program,
either a state or an error: *)
Inductive result : Type :=
| RNormal : state -> result
| RError : result.
(** Now we are ready to give you the ceval relation for the new language. *)
Inductive ceval : com -> state -> result -> Prop :=
(* Old rules, several modified *)
| E_Skip : forall st,
st =[ skip ]=> RNormal st
| E_Asgn : forall st a1 n x,
aeval st a1 = n ->
st =[ x := a1 ]=> RNormal (x !-> n ; st)
| E_SeqNormal : forall c1 c2 st st' r,
st =[ c1 ]=> RNormal st' ->
st' =[ c2 ]=> r ->
st =[ c1 ; c2 ]=> r
| E_SeqError : forall c1 c2 st,
st =[ c1 ]=> RError ->
st =[ c1 ; c2 ]=> RError
| E_IfTrue : forall st r b c1 c2,
beval st b = true ->
st =[ c1 ]=> r ->
st =[ if b then c1 else c2 end ]=> r
| E_IfFalse : forall st r b c1 c2,
beval st b = false ->
st =[ c2 ]=> r ->
st =[ if b then c1 else c2 end ]=> r
| E_WhileFalse : forall b st c,
beval st b = false ->
st =[ while b do c end ]=> RNormal st
| E_WhileTrueNormal : forall st st' r b c,
beval st b = true ->
st =[ c ]=> RNormal st' ->
st' =[ while b do c end ]=> r ->
st =[ while b do c end ]=> r
| E_WhileTrueError : forall st b c,
beval st b = true ->
st =[ c ]=> RError ->
st =[ while b do c end ]=> RError
(* Rules for Assert and Assume *)
| E_AssertTrue : forall st b,
beval st b = true ->
st =[ assert b ]=> RNormal st
| E_AssertFalse : forall st b,
beval st b = false ->
st =[ assert b ]=> RError
| E_Assume : forall st b,
beval st b = true ->
st =[ assume b ]=> RNormal st
where "st '=[' c ']=>' r" := (ceval c st r).
(** We redefine hoare triples: Now, [{{P}} c {{Q}}] means that,
whenever [c] is started in a state satisfying [P], and terminates
with result [r], then [r] is not an error and the state of [r]
satisfies [Q]. *)
Definition hoare_triple
(P : Assertion) (c : com) (Q : Assertion) : Prop :=
forall st r,
st =[ c ]=> r -> P st ->
(exists st', r = RNormal st' /\ Q st').
Notation "{{ P }} c {{ Q }}" :=
(hoare_triple P c Q) (at level 90, c custom com at level 99)
: hoare_spec_scope.
(** To test your understanding of this modification, give an example
precondition and postcondition that are satisfied by the [ASSUME]
statement but not by the [assert] statement. Then prove that any
triple for [assert] also works for [ASSUME]. *)
Theorem assert_assume_differ : exists (P:Assertion) b (Q:Assertion),
({{P}} assume b {{Q}})
/\ ~ ({{P}} assert b {{Q}}).
(* FILL IN HERE *) Admitted.
Theorem assert_implies_assume : forall P b Q,
({{P}} assert b {{Q}})
-> ({{P}} assume b {{Q}}).
Proof.
(* FILL IN HERE *) Admitted.
(** Your task is now to state Hoare rules for [assert] and [assume],
and use them to prove a simple program correct. Name your hoare
rule theorems [hoare_assert] and [hoare_assume].
For your benefit, we provide proofs for the old hoare rules
adapted to the new semantics. *)
Theorem hoare_asgn : forall Q X a,
{{Q [X |-> a]}} X := a {{Q}}.
Proof.
unfold hoare_triple.
intros Q X a st st' HE HQ.
inversion HE. subst.
exists (X !-> aeval st a ; st). split; try reflexivity.
assumption. Qed.
Theorem hoare_consequence_pre : forall (P P' Q : Assertion) c,
{{P'}} c {{Q}} ->
P ->> P' ->
{{P}} c {{Q}}.
Proof.
intros P P' Q c Hhoare Himp.
intros st st' Hc HP. apply (Hhoare st st').
assumption. apply Himp. assumption. Qed.
Theorem hoare_consequence_post : forall (P Q Q' : Assertion) c,
{{P}} c {{Q'}} ->
Q' ->> Q ->
{{P}} c {{Q}}.
Proof.
intros P Q Q' c Hhoare Himp.
intros st r Hc HP.
unfold hoare_triple in Hhoare.
assert (exists st', r = RNormal st' /\ Q' st').
{ apply (Hhoare st); assumption. }
destruct H as [st' [Hr HQ'] ].
exists st'. split; try assumption.
apply Himp. assumption.
Qed.
Theorem hoare_seq : forall P Q R c1 c2,
{{Q}} c2 {{R}} ->
{{P}} c1 {{Q}} ->
{{P}} c1;c2 {{R}}.
Proof.
intros P Q R c1 c2 H1 H2 st r H12 Pre.
inversion H12; subst.
- eapply H1.
+ apply H6.
+ apply H2 in H3. apply H3 in Pre.
destruct Pre as [st'0 [Heq HQ] ].
inversion Heq; subst. assumption.
- (* Find contradictory assumption *)
apply H2 in H5. apply H5 in Pre.
destruct Pre as [st' [C _] ].
inversion C.
Qed.
(** State and prove your hoare rules, [hoare_assert] and
[hoare_assume], below. *)
(* FILL IN HERE *)
(** Here are the other proof rules (sanity check) *)
(* NOTATION : IY -- Do we want <{ }> to be printing in here? *)
Theorem hoare_skip : forall P,
{{P}} skip {{P}}.
Proof.
intros P st st' H HP. inversion H. subst.
eexists. split. reflexivity. assumption.
Qed.
Theorem hoare_if : forall P Q (b:bexp) c1 c2,
{{ P /\ b}} c1 {{Q}} ->
{{ P /\ ~ b}} c2 {{Q}} ->
{{P}} if b then c1 else c2 end {{Q}}.
Proof.
intros P Q b c1 c2 HTrue HFalse st st' HE HP.
inversion HE; subst.
- (* b is true *)
apply (HTrue st st').
assumption.
split. assumption. assumption.
- (* b is false *)
apply (HFalse st st').
assumption.
split. assumption.
apply bexp_eval_false. assumption. Qed.
Theorem hoare_while : forall P (b:bexp) c,
{{P /\ b}} c {{P}} ->
{{P}} while b do c end {{ P /\ ~b}}.
Proof.
intros P b c Hhoare st st' He HP.
remember <{while b do c end}> as wcom eqn:Heqwcom.
induction He;
try (inversion Heqwcom); subst; clear Heqwcom.
- (* E_WhileFalse *)
eexists. split. reflexivity. split.
assumption. apply bexp_eval_false. assumption.
- (* E_WhileTrueNormal *)
clear IHHe1.
apply IHHe2. reflexivity.
clear IHHe2 He2 r.
unfold hoare_triple in Hhoare.
apply Hhoare in He1.
+ destruct He1 as [st1 [Heq Hst1] ].
inversion Heq; subst.
assumption.
+ split; assumption.
- (* E_WhileTrueError *)
exfalso. clear IHHe.
unfold hoare_triple in Hhoare.
apply Hhoare in He.
+ destruct He as [st' [C _] ]. inversion C.
+ split; assumption.
Qed.
Example assert_assume_example:
{{True}}
assume (X = 1);
X := X + 1;
assert (X = 2)
{{True}}.
Proof.
(* FILL IN HERE *) Admitted.
End HoareAssertAssume.
(** [] *)
(* 2021-08-11 15:11 *)
|
{-# OPTIONS --without-K --exact-split --safe #-}
module Fragment.Equational.Theory.Base where
open import Function using (_∘_)
open import Fragment.Algebra.Signature
open import Fragment.Algebra.Free
open import Fragment.Algebra.Algebra
open import Fragment.Algebra.Properties
open import Data.Nat using (ℕ)
open import Data.Fin using (Fin)
open import Data.Product using (_×_; map)
Eq : (Σ : Signature) → (n : ℕ) → Set
Eq Σ n = ∥ F Σ n ∥ × ∥ F Σ n ∥
record Theory : Set₁ where
field
Σ : Signature
eqs : ℕ → Set
_⟦_⟧ₑ : ∀ {arity} → eqs arity → Eq Σ arity
open Signature Σ
open Theory public
data ExtendedEq (Θ : Theory) (E : ℕ → Set) : ℕ → Set where
newₑ : ∀ {n} → E n → ExtendedEq Θ E n
oldₑ : ∀ {n} → eqs Θ n → ExtendedEq Θ E n
_⦅_⦆ₒ : (Θ : Theory) → (O : ℕ → Set) → Theory
Θ ⦅ O ⦆ₒ = record { Σ = (Σ Θ) ⦅ O ⦆
; eqs = eqs Θ
; _⟦_⟧ₑ = (map extend extend) ∘ Θ ⟦_⟧ₑ
}
_⦅_/_⦆ : (Θ : Theory)
→ (E : ℕ → Set)
→ (∀ {n} → E n → Eq (Σ Θ) n)
→ Theory
Θ ⦅ E / ⟦_⟧' ⦆ = record { Σ = Σ Θ
; eqs = ExtendedEq Θ E
; _⟦_⟧ₑ = withE
}
where withE : ∀ {n} → ExtendedEq Θ E n → Eq (Σ Θ) n
withE (newₑ eq) = ⟦ eq ⟧'
withE (oldₑ eq) = Θ ⟦ eq ⟧ₑ
_⦅_∣_/_⦆ : (Θ : Theory)
→ (O : ℕ → Set)
→ (E : ℕ → Set)
→ (∀ {n} → E n → Eq ((Σ Θ) ⦅ O ⦆) n)
→ Theory
Θ ⦅ O ∣ E / ⟦_⟧' ⦆ = (Θ ⦅ O ⦆ₒ) ⦅ E / ⟦_⟧' ⦆
|
"""
one_step!
Quickly compute the next xy coordinate or anticipate a collision/border cross
by calling for more computation.
"""
function one_step!(initxy::Array{Float64,1})
theta = 2 * pi * rand(Float64) - pi # -pi to pi
dx = cos(theta)
dy = sin(theta)
if insafebounds(initxy) # fast computation
flowBias = flow_arlett(initxy)
# IMPORTANT: updating arrays always faster than initizing new array
initxy[1] += WATER_STEP_SIZE * dx + flowBias
initxy[2] += WATER_STEP_SIZE * dy
return initxy, "no collision"
else # more computation needed
return boundary_check(initxy, dx, dy)
end
end
"In water and away from borders and collisions"
function insafebounds(xy::Array{Float64,1})::Bool
return abs(xy[1]) < SAFE_MAX_X && SAFE_MIN_Y < xy[2] < SAFE_MAX_Y
end
"Far ends of the simulation space to end a runaway walk"
function inescapebounds(x::Float64, y::Float64)::Bool
withinx = -1*ESCAPE_X <= x <= ESCAPE_X
withiny = -1 <= y <= ESCAPE_Y
return withinx && withiny
end
inescapebounds(xy::Array{Float64,1}) = inescapebounds(xy[1], xy[2])
|
#=
SEND+MOST=MONEY in Julia + ConstraintSolver.jl
Alphametic problem were we maximize MONEY.
This version do two things:
- find the maximum of MONEY
- and then find all solutions for the maximum value of MONEY.
Problem from the lecture notes:
http://www.ict.kth.se/courses/ID2204/notes/L01.pdf
Model created by Hakan Kjellerstrand, [email protected]
See also my Julia page: http://www.hakank.org/julia/
=#
using ConstraintSolver, JuMP
const CS = ConstraintSolver
include("constraints_utils.jl")
function send_most_money()
model = Model(optimizer_with_attributes(CS.Optimizer,
# "all_solutions"=>true,
"all_optimal_solutions"=>true,
"logging"=>[]))
@variable(model, 0 <= x[1:8] <= 9, Int)
s,e,n,d,m,o,t,y = x
@variable(model, 10000 <= MONEY <= 99999, Int)
@constraint(model, x in CS.AllDifferentSet())
@constraint(model, s != 0)
@constraint(model, m != 0)
@constraint(model, MONEY == 10000*m + 1000*o + 100*n + 10*e + y)
@constraint(model,
1000*s + 100*e + 10*n + d
+ 1000*m + 100*o + 10*s + t
== MONEY
)
@objective(model,Max,MONEY)
# Solve the problem
optimize!(model)
status = JuMP.termination_status(model)
println("status:$status")
if status == MOI.OPTIMAL
num_sols = MOI.get(model, MOI.ResultCount())
println("num_sols:$num_sols\n")
for sol in 1:num_sols
println("solution #$sol")
xx = convert.(Integer,JuMP.value.(x; result=sol))
MONEYx = convert.(Integer,JuMP.value.(MONEY; result=sol))
println("x:$xx MONEY:$MONEYx\n")
end
end
end
send_most_money()
|
(* *********************************************************************)
(* *)
(* The Compcert verified compiler *)
(* *)
(* Xavier Leroy, INRIA Paris-Rocquencourt *)
(* *)
(* Copyright Institut National de Recherche en Informatique et en *)
(* Automatique. All rights reserved. This file is distributed *)
(* under the terms of the INRIA Non-Commercial License Agreement. *)
(* *)
(* *********************************************************************)
(** Correctness proof for ARM code generation: auxiliary results. *)
Require Import Coqlib.
Require Import Errors.
Require Import Maps.
Require Import AST.
Require Import Integers.
Require Import Floats.
Require Import Values.
Require Import Memory.
Require Import Globalenvs.
Require Import Op.
Require Import Locations.
Require Import Mach.
Require Import Compopts.
Require Import Asm.
Require Import Asmgen.
Require Import Conventions.
Require Import Asmgenproof0.
Local Transparent Archi.ptr64.
(** Useful properties of the R14 registers. *)
Lemma ireg_of_not_R14:
forall m r, ireg_of m = OK r -> IR r <> IR IR14.
Proof.
intros. erewrite <- ireg_of_eq; eauto with asmgen.
Qed.
Hint Resolve ireg_of_not_R14: asmgen.
Lemma ireg_of_not_R14':
forall m r, ireg_of m = OK r -> r <> IR14.
Proof.
intros. generalize (ireg_of_not_R14 _ _ H). congruence.
Qed.
Hint Resolve ireg_of_not_R14': asmgen.
(** [undef_flags] and [nextinstr_nf] *)
Lemma nextinstr_nf_pc:
forall rs, (nextinstr_nf rs)#PC = Val.offset_ptr rs#PC Ptrofs.one.
Proof.
intros. reflexivity.
Qed.
Definition if_preg (r: preg) : bool :=
match r with
| IR _ => true
| FR _ => true
| CR _ => false
| PC => false
end.
Lemma data_if_preg: forall r, data_preg r = true -> if_preg r = true.
Proof.
intros. destruct r; reflexivity || discriminate.
Qed.
Lemma if_preg_not_PC: forall r, if_preg r = true -> r <> PC.
Proof.
intros; red; intros; subst; discriminate.
Qed.
Hint Resolve data_if_preg if_preg_not_PC: asmgen.
Lemma nextinstr_nf_inv:
forall r rs, if_preg r = true -> (nextinstr_nf rs)#r = rs#r.
Proof.
intros. destruct r; reflexivity || discriminate.
Qed.
Lemma nextinstr_nf_inv1:
forall r rs, data_preg r = true -> (nextinstr_nf rs)#r = rs#r.
Proof.
intros. destruct r; reflexivity || discriminate.
Qed.
(** Useful simplification tactic *)
Ltac Simplif :=
((rewrite nextinstr_inv by eauto with asmgen)
|| (rewrite nextinstr_inv1 by eauto with asmgen)
|| (rewrite nextinstr_nf_inv by eauto with asmgen)
|| (rewrite Pregmap.gss)
|| (rewrite nextinstr_pc)
|| (rewrite nextinstr_nf_pc)
|| (rewrite Pregmap.gso by eauto with asmgen)); auto with asmgen.
Ltac Simpl := repeat Simplif.
(** * Correctness of ARM constructor functions *)
Section CONSTRUCTORS.
Context `{memory_model_prf: Mem.MemoryModel}.
Variable ge: genv.
Variable fn: function.
(** Decomposition of an integer constant *)
Lemma decompose_int_arm_or:
forall N n p x, List.fold_left Int.or (decompose_int_arm N n p) x = Int.or x n.
Proof.
induction N; intros; simpl.
predSpec Int.eq Int.eq_spec n Int.zero; simpl.
subst n. rewrite Int.or_zero. auto.
auto.
predSpec Int.eq Int.eq_spec (Int.and n (Int.shl (Int.repr 3) p)) Int.zero.
auto.
simpl. rewrite IHN. rewrite Int.or_assoc. decEq. rewrite <- Int.and_or_distrib.
rewrite Int.or_not_self. apply Int.and_mone.
Qed.
Lemma decompose_int_arm_xor:
forall N n p x, List.fold_left Int.xor (decompose_int_arm N n p) x = Int.xor x n.
Proof.
induction N; intros; simpl.
predSpec Int.eq Int.eq_spec n Int.zero; simpl.
subst n. rewrite Int.xor_zero. auto.
auto.
predSpec Int.eq Int.eq_spec (Int.and n (Int.shl (Int.repr 3) p)) Int.zero.
auto.
simpl. rewrite IHN. rewrite Int.xor_assoc. decEq. rewrite <- Int.and_xor_distrib.
rewrite Int.xor_not_self. apply Int.and_mone.
Qed.
Lemma decompose_int_arm_add:
forall N n p x, List.fold_left Int.add (decompose_int_arm N n p) x = Int.add x n.
Proof.
induction N; intros; simpl.
predSpec Int.eq Int.eq_spec n Int.zero; simpl.
subst n. rewrite Int.add_zero. auto.
auto.
predSpec Int.eq Int.eq_spec (Int.and n (Int.shl (Int.repr 3) p)) Int.zero.
auto.
simpl. rewrite IHN. rewrite Int.add_assoc. decEq. rewrite Int.add_and.
rewrite Int.or_not_self. apply Int.and_mone. apply Int.and_not_self.
Qed.
Remark decompose_int_arm_nil:
forall N n p, decompose_int_arm N n p = nil -> n = Int.zero.
Proof.
intros. generalize (decompose_int_arm_or N n p Int.zero). rewrite H. simpl.
rewrite Int.or_commut; rewrite Int.or_zero; auto.
Qed.
Lemma decompose_int_thumb_or:
forall N n p x, List.fold_left Int.or (decompose_int_thumb N n p) x = Int.or x n.
Proof.
induction N; intros; simpl.
predSpec Int.eq Int.eq_spec n Int.zero; simpl.
subst n. rewrite Int.or_zero. auto.
auto.
predSpec Int.eq Int.eq_spec (Int.and n (Int.shl Int.one p)) Int.zero.
auto.
simpl. rewrite IHN. rewrite Int.or_assoc. decEq. rewrite <- Int.and_or_distrib.
rewrite Int.or_not_self. apply Int.and_mone.
Qed.
Lemma decompose_int_thumb_xor:
forall N n p x, List.fold_left Int.xor (decompose_int_thumb N n p) x = Int.xor x n.
Proof.
induction N; intros; simpl.
predSpec Int.eq Int.eq_spec n Int.zero; simpl.
subst n. rewrite Int.xor_zero. auto.
auto.
predSpec Int.eq Int.eq_spec (Int.and n (Int.shl Int.one p)) Int.zero.
auto.
simpl. rewrite IHN. rewrite Int.xor_assoc. decEq. rewrite <- Int.and_xor_distrib.
rewrite Int.xor_not_self. apply Int.and_mone.
Qed.
Lemma decompose_int_thumb_add:
forall N n p x, List.fold_left Int.add (decompose_int_thumb N n p) x = Int.add x n.
Proof.
induction N; intros; simpl.
predSpec Int.eq Int.eq_spec n Int.zero; simpl.
subst n. rewrite Int.add_zero. auto.
auto.
predSpec Int.eq Int.eq_spec (Int.and n (Int.shl Int.one p)) Int.zero.
auto.
simpl. rewrite IHN. rewrite Int.add_assoc. decEq. rewrite Int.add_and.
rewrite Int.or_not_self. apply Int.and_mone. apply Int.and_not_self.
Qed.
Remark decompose_int_thumb_nil:
forall N n p, decompose_int_thumb N n p = nil -> n = Int.zero.
Proof.
intros. generalize (decompose_int_thumb_or N n p Int.zero). rewrite H. simpl.
rewrite Int.or_commut; rewrite Int.or_zero; auto.
Qed.
Lemma decompose_int_general:
forall (f: val -> int -> val) (g: int -> int -> int),
(forall v1 n2 n3, f (f v1 n2) n3 = f v1 (g n2 n3)) ->
(forall n1 n2 n3, g (g n1 n2) n3 = g n1 (g n2 n3)) ->
(forall n, g Int.zero n = n) ->
(forall N n p x, List.fold_left g (decompose_int_arm N n p) x = g x n) ->
(forall N n p x, List.fold_left g (decompose_int_thumb N n p) x = g x n) ->
forall n v,
List.fold_left f (decompose_int n) v = f v n.
Proof.
intros f g DISTR ASSOC ZERO DECOMP1 DECOMP2.
assert (A: forall l x y, g x (fold_left g l y) = fold_left g l (g x y)).
induction l; intros; simpl. auto. rewrite IHl. decEq. rewrite ASSOC; auto.
assert (B: forall l v n, fold_left f l (f v n) = f v (fold_left g l n)).
induction l; intros; simpl.
auto.
rewrite IHl. rewrite DISTR. decEq. decEq. auto.
intros. unfold decompose_int, decompose_int_base.
destruct (thumb tt); [destruct (is_immed_arith_thumb_special n)|].
- reflexivity.
- destruct (decompose_int_thumb 24%nat n Int.zero) eqn:DB.
+ simpl. exploit decompose_int_thumb_nil; eauto. congruence.
+ simpl. rewrite B. decEq.
generalize (DECOMP2 24%nat n Int.zero Int.zero).
rewrite DB; simpl. rewrite ! ZERO. auto.
- destruct (decompose_int_arm 12%nat n Int.zero) eqn:DB.
+ simpl. exploit decompose_int_arm_nil; eauto. congruence.
+ simpl. rewrite B. decEq.
generalize (DECOMP1 12%nat n Int.zero Int.zero).
rewrite DB; simpl. rewrite ! ZERO. auto.
Qed.
Lemma decompose_int_or:
forall n v,
List.fold_left (fun v i => Val.or v (Vint i)) (decompose_int n) v = Val.or v (Vint n).
Proof.
intros. apply decompose_int_general with (f := fun v n => Val.or v (Vint n)) (g := Int.or).
intros. rewrite Val.or_assoc. auto.
apply Int.or_assoc.
intros. rewrite Int.or_commut. apply Int.or_zero.
apply decompose_int_arm_or. apply decompose_int_thumb_or.
Qed.
Lemma decompose_int_bic:
forall n v,
List.fold_left (fun v i => Val.and v (Vint (Int.not i))) (decompose_int n) v = Val.and v (Vint (Int.not n)).
Proof.
intros. apply decompose_int_general with (f := fun v n => Val.and v (Vint (Int.not n))) (g := Int.or).
intros. rewrite Val.and_assoc. simpl. decEq. decEq. rewrite Int.not_or_and_not. auto.
apply Int.or_assoc.
intros. rewrite Int.or_commut. apply Int.or_zero.
apply decompose_int_arm_or. apply decompose_int_thumb_or.
Qed.
Lemma decompose_int_xor:
forall n v,
List.fold_left (fun v i => Val.xor v (Vint i)) (decompose_int n) v = Val.xor v (Vint n).
Proof.
intros. apply decompose_int_general with (f := fun v n => Val.xor v (Vint n)) (g := Int.xor).
intros. rewrite Val.xor_assoc. auto.
apply Int.xor_assoc.
intros. rewrite Int.xor_commut. apply Int.xor_zero.
apply decompose_int_arm_xor. apply decompose_int_thumb_xor.
Qed.
Lemma decompose_int_add:
forall n v,
List.fold_left (fun v i => Val.add v (Vint i)) (decompose_int n) v = Val.add v (Vint n).
Proof.
intros. apply decompose_int_general with (f := fun v n => Val.add v (Vint n)) (g := Int.add).
intros. rewrite Val.add_assoc. auto.
apply Int.add_assoc.
intros. rewrite Int.add_commut. apply Int.add_zero.
apply decompose_int_arm_add. apply decompose_int_thumb_add.
Qed.
Lemma decompose_int_sub:
forall n v,
List.fold_left (fun v i => Val.sub v (Vint i)) (decompose_int n) v = Val.sub v (Vint n).
Proof.
intros. apply decompose_int_general with (f := fun v n => Val.sub v (Vint n)) (g := Int.add).
intros. repeat rewrite Val.sub_add_opp. rewrite Val.add_assoc. decEq. simpl. decEq.
rewrite Int.neg_add_distr; auto.
apply Int.add_assoc.
intros. rewrite Int.add_commut. apply Int.add_zero.
apply decompose_int_arm_add. apply decompose_int_thumb_add.
Qed.
Lemma iterate_op_correct:
forall op1 op2 (f: val -> int -> val) (rs: regset) (r: ireg) m v0 n k,
(forall (rs:regset) n,
exec_instr ge fn (op2 (SOimm n)) rs m =
Next (nextinstr_nf (rs#r <- (f (rs#r) n))) m) ->
(forall n,
exec_instr ge fn (op1 (SOimm n)) rs m =
Next (nextinstr_nf (rs#r <- (f v0 n))) m) ->
exists rs',
exec_straight ge fn (iterate_op op1 op2 (decompose_int n) k) rs m k rs' m
/\ rs'#r = List.fold_left f (decompose_int n) v0
/\ forall r': preg, r' <> r -> if_preg r' = true -> rs'#r' = rs#r'.
Proof.
intros until k; intros SEM2 SEM1.
unfold iterate_op.
destruct (decompose_int n) as [ | i tl] eqn:DI.
unfold decompose_int in DI. destruct (decompose_int_base n); congruence.
revert k. pattern tl. apply List.rev_ind.
(* base case *)
intros; simpl. econstructor.
split. apply exec_straight_one. rewrite SEM1. reflexivity. reflexivity.
intuition Simpl.
(* inductive case *)
intros.
rewrite List.map_app. simpl. rewrite app_ass. simpl.
destruct (H (op2 (SOimm x) :: k)) as [rs' [A [B C]]].
econstructor.
split. eapply exec_straight_trans. eexact A. apply exec_straight_one.
rewrite SEM2. reflexivity. reflexivity.
split. rewrite fold_left_app; simpl. Simpl. rewrite B. auto.
intros; Simpl.
Qed.
(** Loading a constant. *)
Lemma loadimm_correct:
forall r n k rs m,
exists rs',
exec_straight ge fn (loadimm r n k) rs m k rs' m
/\ rs'#r = Vint n
/\ forall r': preg, r' <> r -> if_preg r' = true -> rs'#r' = rs#r'.
Proof.
intros. unfold loadimm.
set (l1 := length (decompose_int n)).
set (l2 := length (decompose_int (Int.not n))).
destruct (Nat.leb l1 1%nat).
{ (* single mov *)
econstructor; split. apply exec_straight_one. simpl; reflexivity. auto.
split; intros; Simpl. }
destruct (Nat.leb l2 1%nat).
{ (* single movn *)
econstructor; split. apply exec_straight_one.
simpl. rewrite Int.not_involutive. reflexivity. auto.
split; intros; Simpl. }
destruct (thumb tt).
{ (* movw / movt *)
unfold loadimm_thumb. destruct (Int.eq (Int.shru n (Int.repr 16)) Int.zero).
econstructor; split.
apply exec_straight_one. simpl; eauto. auto. split; intros; Simpl.
econstructor; split.
eapply exec_straight_two. simpl; reflexivity. simpl; reflexivity. auto. auto.
split; intros; Simpl. simpl. f_equal. rewrite Int.zero_ext_and by omega.
rewrite Int.and_assoc. change 65535 with (two_p 16 - 1). rewrite Int.and_idem.
apply Int.same_bits_eq; intros.
rewrite Int.bits_or, Int.bits_and, Int.bits_shl, Int.testbit_repr by auto.
rewrite Int.Ztestbit_two_p_m1 by omega. change (Int.unsigned (Int.repr 16)) with 16.
destruct (zlt i 16).
rewrite andb_true_r, orb_false_r; auto.
rewrite andb_false_r; simpl. rewrite Int.bits_shru by omega.
change (Int.unsigned (Int.repr 16)) with 16. rewrite zlt_true by omega. f_equal; omega.
}
destruct (Nat.leb l1 l2).
{ (* mov - orr* *)
replace (Vint n) with (List.fold_left (fun v i => Val.or v (Vint i)) (decompose_int n) Vzero).
apply iterate_op_correct.
auto.
intros; simpl. rewrite Int.or_commut; rewrite Int.or_zero; auto.
rewrite decompose_int_or. simpl. rewrite Int.or_commut; rewrite Int.or_zero; auto.
}
{ (* mvn - bic* *)
replace (Vint n) with (List.fold_left (fun v i => Val.and v (Vint (Int.not i))) (decompose_int (Int.not n)) (Vint Int.mone)).
apply iterate_op_correct.
auto.
intros. simpl. rewrite Int.and_commut; rewrite Int.and_mone; auto.
rewrite decompose_int_bic. simpl. rewrite Int.not_involutive. rewrite Int.and_commut. rewrite Int.and_mone; auto.
}
Qed.
(** Add integer immediate. *)
Lemma addimm_correct:
forall r1 r2 n k rs m,
exists rs',
exec_straight ge fn (addimm r1 r2 n k) rs m k rs' m
/\ rs'#r1 = Val.add rs#r2 (Vint n)
/\ forall r': preg, r' <> r1 -> if_preg r' = true -> rs'#r' = rs#r'.
Proof.
intros. unfold addimm.
destruct (Int.ltu (Int.repr (-256)) n).
(* sub *)
econstructor; split. apply exec_straight_one; simpl; auto.
split; intros; Simpl. apply Val.sub_opp_add.
destruct (Nat.leb (length (decompose_int n)) (length (decompose_int (Int.neg n)))).
(* add - add* *)
replace (Val.add (rs r2) (Vint n))
with (List.fold_left (fun v i => Val.add v (Vint i)) (decompose_int n) (rs r2)).
apply iterate_op_correct.
auto.
auto.
apply decompose_int_add.
(* sub - sub* *)
replace (Val.add (rs r2) (Vint n))
with (List.fold_left (fun v i => Val.sub v (Vint i)) (decompose_int (Int.neg n)) (rs r2)).
apply iterate_op_correct.
auto.
auto.
rewrite decompose_int_sub. apply Val.sub_opp_add.
Qed.
(* And integer immediate *)
Lemma andimm_correct:
forall r1 r2 n k rs m,
exists rs',
exec_straight ge fn (andimm r1 r2 n k) rs m k rs' m
/\ rs'#r1 = Val.and rs#r2 (Vint n)
/\ forall r': preg, r' <> r1 -> if_preg r' = true -> rs'#r' = rs#r'.
Proof.
intros. unfold andimm. destruct (is_immed_arith n).
(* andi *)
exists (nextinstr_nf (rs#r1 <- (Val.and rs#r2 (Vint n)))).
split. apply exec_straight_one; auto. split; intros; Simpl.
(* bic - bic* *)
replace (Val.and (rs r2) (Vint n))
with (List.fold_left (fun v i => Val.and v (Vint (Int.not i))) (decompose_int (Int.not n)) (rs r2)).
apply iterate_op_correct.
auto. auto.
rewrite decompose_int_bic. rewrite Int.not_involutive. auto.
Qed.
(** Reverse sub immediate *)
Lemma rsubimm_correct:
forall r1 r2 n k rs m,
exists rs',
exec_straight ge fn (rsubimm r1 r2 n k) rs m k rs' m
/\ rs'#r1 = Val.sub (Vint n) rs#r2
/\ forall r': preg, r' <> r1 -> if_preg r' = true -> rs'#r' = rs#r'.
Proof.
intros. unfold rsubimm.
(* rsb - add* *)
replace (Val.sub (Vint n) (rs r2))
with (List.fold_left (fun v i => Val.add v (Vint i)) (decompose_int n) (Val.neg (rs r2))).
apply iterate_op_correct.
auto.
intros. simpl. destruct (rs r2); auto. simpl. rewrite Int.sub_add_opp.
rewrite Int.add_commut; auto.
rewrite decompose_int_add.
destruct (rs r2); simpl; auto. rewrite Int.sub_add_opp. rewrite Int.add_commut; auto.
Qed.
(** Or immediate *)
Lemma orimm_correct:
forall r1 r2 n k rs m,
exists rs',
exec_straight ge fn (orimm r1 r2 n k) rs m k rs' m
/\ rs'#r1 = Val.or rs#r2 (Vint n)
/\ forall r': preg, r' <> r1 -> if_preg r' = true -> rs'#r' = rs#r'.
Proof.
intros. unfold orimm.
(* ori - ori* *)
replace (Val.or (rs r2) (Vint n))
with (List.fold_left (fun v i => Val.or v (Vint i)) (decompose_int n) (rs r2)).
apply iterate_op_correct.
auto.
auto.
apply decompose_int_or.
Qed.
(** Xor immediate *)
Lemma xorimm_correct:
forall r1 r2 n k rs m,
exists rs',
exec_straight ge fn (xorimm r1 r2 n k) rs m k rs' m
/\ rs'#r1 = Val.xor rs#r2 (Vint n)
/\ forall r': preg, r' <> r1 -> if_preg r' = true -> rs'#r' = rs#r'.
Proof.
intros. unfold xorimm.
(* xori - xori* *)
replace (Val.xor (rs r2) (Vint n))
with (List.fold_left (fun v i => Val.xor v (Vint i)) (decompose_int n) (rs r2)).
apply iterate_op_correct.
auto.
auto.
apply decompose_int_xor.
Qed.
(** Indexed memory loads. *)
Lemma indexed_memory_access_correct:
forall (P: regset -> Prop) (mk_instr: ireg -> int -> instruction)
(mk_immed: int -> int) (base: ireg) n k (rs: regset) m m',
(forall (r1: ireg) (rs1: regset) n1 k,
Val.add rs1#r1 (Vint n1) = Val.add rs#base (Vint n) ->
(forall (r: preg), if_preg r = true -> r <> IR14 -> rs1 r = rs r) ->
exists rs',
exec_straight ge fn (mk_instr r1 n1 :: k) rs1 m k rs' m' /\ P rs') ->
exists rs',
exec_straight ge fn
(indexed_memory_access mk_instr mk_immed base n k) rs m
k rs' m'
/\ P rs'.
Proof.
intros until m'; intros SEM.
unfold indexed_memory_access.
destruct (Int.eq n (mk_immed n)).
- apply SEM; auto.
- destruct (addimm_correct IR14 base (Int.sub n (mk_immed n)) (mk_instr IR14 (mk_immed n) :: k) rs m)
as (rs1 & A & B & C).
destruct (SEM IR14 rs1 (mk_immed n) k) as (rs2 & D & E).
rewrite B. rewrite Val.add_assoc. f_equal. simpl.
rewrite Int.sub_add_opp. rewrite Int.add_assoc.
rewrite (Int.add_commut (Int.neg (mk_immed n))).
rewrite Int.add_neg_zero. rewrite Int.add_zero. auto.
auto with asmgen.
exists rs2; split; auto. eapply exec_straight_trans; eauto.
Qed.
Lemma loadind_int_correct:
forall (base: ireg) ofs dst (rs: regset) m v k,
Mem.loadv Mint32 m (Val.offset_ptr rs#base ofs) = Some v ->
exists rs',
exec_straight ge fn (loadind_int base ofs dst k) rs m k rs' m
/\ rs'#dst = v
/\ forall r, if_preg r = true -> r <> IR14 -> r <> dst -> rs'#r = rs#r.
Proof.
intros; unfold loadind_int.
assert (Val.offset_ptr (rs base) ofs = Val.add (rs base) (Vint (Ptrofs.to_int ofs))).
{ destruct (rs base); try discriminate. simpl. f_equal; f_equal. symmetry; auto with ptrofs. }
apply indexed_memory_access_correct; intros.
econstructor; split.
apply exec_straight_one. simpl. unfold exec_load. rewrite H1, <- H0, H. eauto. auto.
split; intros; Simpl.
Qed.
Lemma loadind_correct:
forall (base: ireg) ofs ty dst k c (rs: regset) m v,
loadind base ofs ty dst k = OK c ->
Mem.loadv (chunk_of_type ty) m (Val.offset_ptr rs#base ofs) = Some v ->
exists rs',
exec_straight ge fn c rs m k rs' m
/\ rs'#(preg_of dst) = v
/\ forall r, if_preg r = true -> r <> IR14 -> r <> preg_of dst -> rs'#r = rs#r.
Proof.
unfold loadind; intros.
assert (Val.offset_ptr (rs base) ofs = Val.add (rs base) (Vint (Ptrofs.to_int ofs))).
{ destruct (rs base); try discriminate. simpl. f_equal; f_equal. symmetry; auto with ptrofs. }
destruct ty; destruct (preg_of dst); inv H; simpl in H0.
- (* int *)
apply loadind_int_correct; auto.
- (* float *)
apply indexed_memory_access_correct; intros.
econstructor; split.
apply exec_straight_one. simpl. unfold exec_load. rewrite H, <- H1, H0. eauto. auto.
split; intros; Simpl.
- (* single *)
apply indexed_memory_access_correct; intros.
econstructor; split.
apply exec_straight_one. simpl. unfold exec_load. rewrite H, <- H1, H0. eauto. auto.
split; intros; Simpl.
- (* any32 *)
apply indexed_memory_access_correct; intros.
econstructor; split.
apply exec_straight_one. simpl. unfold exec_load. rewrite H, <- H1, H0. eauto. auto.
split; intros; Simpl.
- (* any64 *)
apply indexed_memory_access_correct; intros.
econstructor; split.
apply exec_straight_one. simpl. unfold exec_load. rewrite H, <- H1, H0. eauto. auto.
split; intros; Simpl.
Qed.
(** Indexed memory stores. *)
Lemma storeind_correct:
forall (base: ireg) ofs ty src k c (rs: regset) m m',
storeind src base ofs ty k = OK c ->
Mem.storev (chunk_of_type ty) m (Val.offset_ptr rs#base ofs) (rs#(preg_of src)) = Some m' ->
exists rs',
exec_straight ge fn c rs m k rs' m'
/\ forall r, if_preg r = true -> r <> IR14 -> rs'#r = rs#r.
Proof.
unfold storeind; intros.
assert (DATA: data_preg (preg_of src) = true) by eauto with asmgen.
assert (Val.offset_ptr (rs base) ofs = Val.add (rs base) (Vint (Ptrofs.to_int ofs))).
{ destruct (rs base); try discriminate. simpl. f_equal; f_equal. symmetry; auto with ptrofs. }
destruct ty; destruct (preg_of src); inv H; simpl in H0.
- (* int *)
apply indexed_memory_access_correct; intros.
econstructor; split.
apply exec_straight_one. simpl. unfold exec_store. rewrite H, <- H1, H2, H0 by auto with asmgen; eauto. auto.
intros; Simpl.
- (* float *)
apply indexed_memory_access_correct; intros.
econstructor; split.
apply exec_straight_one. simpl. unfold exec_store. rewrite H, <- H1, H2, H0 by auto with asmgen; eauto. auto.
intros; Simpl.
- (* single *)
apply indexed_memory_access_correct; intros.
econstructor; split.
apply exec_straight_one. simpl. unfold exec_store. rewrite H, <- H1, H2, H0 by auto with asmgen; eauto. auto.
intros; Simpl.
- (* any32 *)
apply indexed_memory_access_correct; intros.
econstructor; split.
apply exec_straight_one. simpl. unfold exec_store. rewrite H, <- H1, H2, H0 by auto with asmgen; eauto. auto.
intros; Simpl.
- (* any64 *)
apply indexed_memory_access_correct; intros.
econstructor; split.
apply exec_straight_one. simpl. unfold exec_store. rewrite H, <- H1, H2, H0 by auto with asmgen; eauto. auto.
intros; Simpl.
Qed.
(** Saving the link register *)
Lemma save_lr_correct:
forall ofs k (rs: regset) m m',
Mem.storev Mint32 m (Val.offset_ptr rs#IR13 ofs) (rs#IR14) = Some m' ->
exists rs',
exec_straight ge fn (save_lr ofs k) rs m k rs' m'
/\ (forall r, if_preg r = true -> r <> IR12 -> rs'#r = rs#r)
/\ (save_lr_preserves_R12 ofs = true -> rs'#IR12 = rs#IR12).
Proof.
intros; unfold save_lr, save_lr_preserves_R12.
set (n := Ptrofs.to_int ofs). set (n1 := mk_immed_mem_word n).
assert (EQ: Val.offset_ptr rs#IR13 ofs = Val.add rs#IR13 (Vint n)).
{ destruct rs#IR13; try discriminate. simpl. f_equal; f_equal. unfold n; symmetry; auto with ptrofs. }
destruct (Int.eq n n1).
- econstructor; split. apply exec_straight_one. simpl; unfold exec_store. rewrite <- EQ, H; reflexivity. auto.
split. intros; Simpl. intros; Simpl.
- destruct (addimm_correct IR12 IR13 (Int.sub n n1) (Pstr IR14 IR12 (SOimm n1) :: k) rs m)
as (rs1 & A & B & C).
econstructor; split.
eapply exec_straight_trans. eexact A.
apply exec_straight_one. simpl; unfold exec_store.
rewrite B. rewrite Val.add_assoc. simpl.
rewrite Int.sub_add_opp. rewrite Int.add_assoc.
rewrite (Int.add_commut (Int.neg n1)).
rewrite Int.add_neg_zero. rewrite Int.add_zero.
rewrite <- EQ. rewrite C by eauto with asmgen. rewrite H. reflexivity.
auto.
split. intros; Simpl. congruence.
Qed.
(** Translation of shift immediates *)
Lemma transl_shift_correct:
forall s (r: ireg) (rs: regset),
eval_shift_op (transl_shift s r) rs = eval_shift s (rs#r).
Proof.
intros. destruct s; simpl; auto.
Qed.
(** Translation of conditions *)
Lemma compare_int_spec:
forall rs v1 v2 m,
let rs1 := nextinstr (compare_int rs v1 v2 m) in
rs1#CN = Val.negative (Val.sub v1 v2)
/\ rs1#CZ = Val.cmpu (Mem.valid_pointer m) Ceq v1 v2
/\ rs1#CC = Val.cmpu (Mem.valid_pointer m) Cge v1 v2
/\ rs1#CV = Val.sub_overflow v1 v2.
Proof.
intros. unfold rs1. intuition.
Qed.
Lemma compare_int_inv:
forall rs v1 v2 m,
let rs1 := nextinstr (compare_int rs v1 v2 m) in
forall r', data_preg r' = true -> rs1#r' = rs#r'.
Proof.
intros. unfold rs1, compare_int.
repeat Simplif.
Qed.
Lemma int_signed_eq:
forall x y, Int.eq x y = zeq (Int.signed x) (Int.signed y).
Proof.
intros. unfold Int.eq. unfold proj_sumbool.
destruct (zeq (Int.unsigned x) (Int.unsigned y));
destruct (zeq (Int.signed x) (Int.signed y)); auto.
elim n. unfold Int.signed. rewrite e; auto.
elim n. apply Int.eqm_small_eq; auto with ints.
eapply Int.eqm_trans. apply Int.eqm_sym. apply Int.eqm_signed_unsigned.
rewrite e. apply Int.eqm_signed_unsigned.
Qed.
Lemma int_not_lt:
forall x y, negb (Int.lt y x) = (Int.lt x y || Int.eq x y).
Proof.
intros. unfold Int.lt. rewrite int_signed_eq. unfold proj_sumbool.
destruct (zlt (Int.signed y) (Int.signed x)).
rewrite zlt_false. rewrite zeq_false. auto. omega. omega.
destruct (zeq (Int.signed x) (Int.signed y)).
rewrite zlt_false. auto. omega.
rewrite zlt_true. auto. omega.
Qed.
Lemma int_lt_not:
forall x y, Int.lt y x = negb (Int.lt x y) && negb (Int.eq x y).
Proof.
intros. rewrite <- negb_orb. rewrite <- int_not_lt. rewrite negb_involutive. auto.
Qed.
Lemma int_not_ltu:
forall x y, negb (Int.ltu y x) = (Int.ltu x y || Int.eq x y).
Proof.
intros. unfold Int.ltu, Int.eq.
destruct (zlt (Int.unsigned y) (Int.unsigned x)).
rewrite zlt_false. rewrite zeq_false. auto. omega. omega.
destruct (zeq (Int.unsigned x) (Int.unsigned y)).
rewrite zlt_false. auto. omega.
rewrite zlt_true. auto. omega.
Qed.
Lemma int_ltu_not:
forall x y, Int.ltu y x = negb (Int.ltu x y) && negb (Int.eq x y).
Proof.
intros. rewrite <- negb_orb. rewrite <- int_not_ltu. rewrite negb_involutive. auto.
Qed.
Lemma cond_for_signed_cmp_correct:
forall c v1 v2 rs m b,
Val.cmp_bool c v1 v2 = Some b ->
eval_testcond (cond_for_signed_cmp c)
(nextinstr (compare_int rs v1 v2 m)) = Some b.
Proof.
intros. generalize (compare_int_spec rs v1 v2 m).
set (rs' := nextinstr (compare_int rs v1 v2 m)).
intros [A [B [C D]]].
destruct v1; destruct v2; simpl in H; inv H.
unfold eval_testcond. rewrite A; rewrite B; rewrite C; rewrite D.
simpl. unfold Val.cmp, Val.cmpu.
rewrite Int.lt_sub_overflow.
destruct c; simpl.
destruct (Int.eq i i0); auto.
destruct (Int.eq i i0); auto.
destruct (Int.lt i i0); auto.
rewrite int_not_lt. destruct (Int.lt i i0); simpl; destruct (Int.eq i i0); auto.
rewrite (int_lt_not i i0). destruct (Int.lt i i0); destruct (Int.eq i i0); reflexivity.
destruct (Int.lt i i0); reflexivity.
Qed.
Lemma cond_for_unsigned_cmp_correct:
forall c v1 v2 rs m b,
Val.cmpu_bool (Mem.valid_pointer m) c v1 v2 = Some b ->
eval_testcond (cond_for_unsigned_cmp c)
(nextinstr (compare_int rs v1 v2 m)) = Some b.
Proof.
intros. generalize (compare_int_spec rs v1 v2 m).
set (rs' := nextinstr (compare_int rs v1 v2 m)).
intros [A [B [C D]]].
unfold eval_testcond. rewrite B; rewrite C. unfold Val.cmpu, Val.cmp.
destruct v1; destruct v2; simpl in H; inv H.
(* int int *)
destruct c; simpl; auto.
destruct (Int.eq i i0); reflexivity.
destruct (Int.eq i i0); auto.
destruct (Int.ltu i i0); auto.
rewrite (int_not_ltu i i0). destruct (Int.ltu i i0); destruct (Int.eq i i0); auto.
rewrite (int_ltu_not i i0). destruct (Int.ltu i i0); destruct (Int.eq i i0); reflexivity.
destruct (Int.ltu i i0); reflexivity.
(* int ptr *)
destruct (Int.eq i Int.zero &&
(Mem.valid_pointer m b0 (Ptrofs.unsigned i0) || Mem.valid_pointer m b0 (Ptrofs.unsigned i0 - 1))) eqn:?; try discriminate.
destruct c; simpl in *; inv H1.
rewrite Heqb1; reflexivity.
rewrite Heqb1; reflexivity.
(* ptr int *)
destruct (Int.eq i0 Int.zero &&
(Mem.valid_pointer m b0 (Ptrofs.unsigned i) || Mem.valid_pointer m b0 (Ptrofs.unsigned i - 1))) eqn:?; try discriminate.
destruct c; simpl in *; inv H1.
rewrite Heqb1; reflexivity.
rewrite Heqb1; reflexivity.
(* ptr ptr *)
simpl.
fold (Mem.weak_valid_pointer m b0 (Ptrofs.unsigned i)) in *.
fold (Mem.weak_valid_pointer m b1 (Ptrofs.unsigned i0)) in *.
destruct (eq_block b0 b1).
destruct (Mem.weak_valid_pointer m b0 (Ptrofs.unsigned i) &&
Mem.weak_valid_pointer m b1 (Ptrofs.unsigned i0)); inversion H1.
destruct c; simpl; auto.
destruct (Ptrofs.eq i i0); reflexivity.
destruct (Ptrofs.eq i i0); auto.
destruct (Ptrofs.ltu i i0); auto.
rewrite (Ptrofs.not_ltu i i0). destruct (Ptrofs.ltu i i0); simpl; destruct (Ptrofs.eq i i0); auto.
rewrite (Ptrofs.ltu_not i i0). destruct (Ptrofs.ltu i i0); destruct (Ptrofs.eq i i0); reflexivity.
destruct (Ptrofs.ltu i i0); reflexivity.
destruct (Mem.valid_pointer m b0 (Ptrofs.unsigned i) &&
Mem.valid_pointer m b1 (Ptrofs.unsigned i0)); try discriminate.
destruct c; simpl in *; inv H1; reflexivity.
Qed.
Lemma compare_float_spec:
forall rs f1 f2,
let rs1 := nextinstr (compare_float rs (Vfloat f1) (Vfloat f2)) in
rs1#CN = Val.of_bool (Float.cmp Clt f1 f2)
/\ rs1#CZ = Val.of_bool (Float.cmp Ceq f1 f2)
/\ rs1#CC = Val.of_bool (negb (Float.cmp Clt f1 f2))
/\ rs1#CV = Val.of_bool (negb (Float.cmp Ceq f1 f2 || Float.cmp Clt f1 f2 || Float.cmp Cgt f1 f2)).
Proof.
intros. intuition.
Qed.
Lemma compare_float_inv:
forall rs v1 v2,
let rs1 := nextinstr (compare_float rs v1 v2) in
forall r', data_preg r' = true -> rs1#r' = rs#r'.
Proof.
intros. unfold rs1, compare_float.
assert (nextinstr (rs#CN <- Vundef #CZ <- Vundef #CC <- Vundef #CV <- Vundef) r' = rs r').
{ repeat Simplif. }
destruct v1; destruct v2; auto.
repeat Simplif.
Qed.
Lemma compare_float_nextpc:
forall rs v1 v2,
nextinstr (compare_float rs v1 v2) PC = Val.offset_ptr (rs PC) Ptrofs.one.
Proof.
intros. unfold compare_float. destruct v1; destruct v2; reflexivity.
Qed.
Lemma cond_for_float_cmp_correct:
forall c n1 n2 rs,
eval_testcond (cond_for_float_cmp c)
(nextinstr (compare_float rs (Vfloat n1) (Vfloat n2))) =
Some(Float.cmp c n1 n2).
Proof.
intros.
generalize (compare_float_spec rs n1 n2).
set (rs' := nextinstr (compare_float rs (Vfloat n1) (Vfloat n2))).
intros [A [B [C D]]].
unfold eval_testcond. rewrite A; rewrite B; rewrite C; rewrite D.
destruct c; simpl.
(* eq *)
destruct (Float.cmp Ceq n1 n2); auto.
(* ne *)
rewrite Float.cmp_ne_eq. destruct (Float.cmp Ceq n1 n2); auto.
(* lt *)
destruct (Float.cmp Clt n1 n2); auto.
(* le *)
rewrite Float.cmp_le_lt_eq.
destruct (Float.cmp Clt n1 n2); destruct (Float.cmp Ceq n1 n2); auto.
(* gt *)
destruct (Float.cmp Ceq n1 n2) eqn:EQ;
destruct (Float.cmp Clt n1 n2) eqn:LT;
destruct (Float.cmp Cgt n1 n2) eqn:GT; auto.
exfalso; eapply Float.cmp_lt_gt_false; eauto.
exfalso; eapply Float.cmp_gt_eq_false; eauto.
exfalso; eapply Float.cmp_lt_gt_false; eauto.
(* ge *)
rewrite Float.cmp_ge_gt_eq.
destruct (Float.cmp Ceq n1 n2) eqn:EQ;
destruct (Float.cmp Clt n1 n2) eqn:LT;
destruct (Float.cmp Cgt n1 n2) eqn:GT; auto.
exfalso; eapply Float.cmp_lt_eq_false; eauto.
exfalso; eapply Float.cmp_lt_eq_false; eauto.
exfalso; eapply Float.cmp_lt_gt_false; eauto.
Qed.
Lemma cond_for_float_not_cmp_correct:
forall c n1 n2 rs,
eval_testcond (cond_for_float_not_cmp c)
(nextinstr (compare_float rs (Vfloat n1) (Vfloat n2)))=
Some(negb(Float.cmp c n1 n2)).
Proof.
intros.
generalize (compare_float_spec rs n1 n2).
set (rs' := nextinstr (compare_float rs (Vfloat n1) (Vfloat n2))).
intros [A [B [C D]]].
unfold eval_testcond. rewrite A; rewrite B; rewrite C; rewrite D.
destruct c; simpl.
(* eq *)
destruct (Float.cmp Ceq n1 n2); auto.
(* ne *)
rewrite Float.cmp_ne_eq. destruct (Float.cmp Ceq n1 n2); auto.
(* lt *)
destruct (Float.cmp Clt n1 n2); auto.
(* le *)
rewrite Float.cmp_le_lt_eq.
destruct (Float.cmp Clt n1 n2) eqn:LT; destruct (Float.cmp Ceq n1 n2) eqn:EQ; auto.
(* gt *)
destruct (Float.cmp Ceq n1 n2) eqn:EQ;
destruct (Float.cmp Clt n1 n2) eqn:LT;
destruct (Float.cmp Cgt n1 n2) eqn:GT; auto.
exfalso; eapply Float.cmp_lt_gt_false; eauto.
exfalso; eapply Float.cmp_gt_eq_false; eauto.
exfalso; eapply Float.cmp_lt_gt_false; eauto.
(* ge *)
rewrite Float.cmp_ge_gt_eq.
destruct (Float.cmp Ceq n1 n2) eqn:EQ;
destruct (Float.cmp Clt n1 n2) eqn:LT;
destruct (Float.cmp Cgt n1 n2) eqn:GT; auto.
exfalso; eapply Float.cmp_lt_eq_false; eauto.
exfalso; eapply Float.cmp_lt_eq_false; eauto.
exfalso; eapply Float.cmp_lt_gt_false; eauto.
Qed.
Lemma compare_float32_spec:
forall rs f1 f2,
let rs1 := nextinstr (compare_float32 rs (Vsingle f1) (Vsingle f2)) in
rs1#CN = Val.of_bool (Float32.cmp Clt f1 f2)
/\ rs1#CZ = Val.of_bool (Float32.cmp Ceq f1 f2)
/\ rs1#CC = Val.of_bool (negb (Float32.cmp Clt f1 f2))
/\ rs1#CV = Val.of_bool (negb (Float32.cmp Ceq f1 f2 || Float32.cmp Clt f1 f2 || Float32.cmp Cgt f1 f2)).
Proof.
intros. intuition.
Qed.
Lemma compare_float32_inv:
forall rs v1 v2,
let rs1 := nextinstr (compare_float32 rs v1 v2) in
forall r', data_preg r' = true -> rs1#r' = rs#r'.
Proof.
intros. unfold rs1, compare_float32.
assert (nextinstr (rs#CN <- Vundef #CZ <- Vundef #CC <- Vundef #CV <- Vundef) r' = rs r').
{ repeat Simplif. }
destruct v1; destruct v2; auto.
repeat Simplif.
Qed.
Lemma compare_float32_nextpc:
forall rs v1 v2,
nextinstr (compare_float32 rs v1 v2) PC = Val.offset_ptr (rs PC) Ptrofs.one.
Proof.
intros. unfold compare_float32. destruct v1; destruct v2; reflexivity.
Qed.
Lemma cond_for_float32_cmp_correct:
forall c n1 n2 rs,
eval_testcond (cond_for_float_cmp c)
(nextinstr (compare_float32 rs (Vsingle n1) (Vsingle n2))) =
Some(Float32.cmp c n1 n2).
Proof.
intros.
generalize (compare_float32_spec rs n1 n2).
set (rs' := nextinstr (compare_float32 rs (Vsingle n1) (Vsingle n2))).
intros [A [B [C D]]].
unfold eval_testcond. rewrite A; rewrite B; rewrite C; rewrite D.
destruct c; simpl.
(* eq *)
destruct (Float32.cmp Ceq n1 n2); auto.
(* ne *)
rewrite Float32.cmp_ne_eq. destruct (Float32.cmp Ceq n1 n2); auto.
(* lt *)
destruct (Float32.cmp Clt n1 n2); auto.
(* le *)
rewrite Float32.cmp_le_lt_eq.
destruct (Float32.cmp Clt n1 n2); destruct (Float32.cmp Ceq n1 n2); auto.
(* gt *)
destruct (Float32.cmp Ceq n1 n2) eqn:EQ;
destruct (Float32.cmp Clt n1 n2) eqn:LT;
destruct (Float32.cmp Cgt n1 n2) eqn:GT; auto.
exfalso; eapply Float32.cmp_lt_gt_false; eauto.
exfalso; eapply Float32.cmp_gt_eq_false; eauto.
exfalso; eapply Float32.cmp_lt_gt_false; eauto.
(* ge *)
rewrite Float32.cmp_ge_gt_eq.
destruct (Float32.cmp Ceq n1 n2) eqn:EQ;
destruct (Float32.cmp Clt n1 n2) eqn:LT;
destruct (Float32.cmp Cgt n1 n2) eqn:GT; auto.
exfalso; eapply Float32.cmp_lt_eq_false; eauto.
exfalso; eapply Float32.cmp_lt_eq_false; eauto.
exfalso; eapply Float32.cmp_lt_gt_false; eauto.
Qed.
Lemma cond_for_float32_not_cmp_correct:
forall c n1 n2 rs,
eval_testcond (cond_for_float_not_cmp c)
(nextinstr (compare_float32 rs (Vsingle n1) (Vsingle n2)))=
Some(negb(Float32.cmp c n1 n2)).
Proof.
intros.
generalize (compare_float32_spec rs n1 n2).
set (rs' := nextinstr (compare_float32 rs (Vsingle n1) (Vsingle n2))).
intros [A [B [C D]]].
unfold eval_testcond. rewrite A; rewrite B; rewrite C; rewrite D.
destruct c; simpl.
(* eq *)
destruct (Float32.cmp Ceq n1 n2); auto.
(* ne *)
rewrite Float32.cmp_ne_eq. destruct (Float32.cmp Ceq n1 n2); auto.
(* lt *)
destruct (Float32.cmp Clt n1 n2); auto.
(* le *)
rewrite Float32.cmp_le_lt_eq.
destruct (Float32.cmp Clt n1 n2) eqn:LT; destruct (Float32.cmp Ceq n1 n2) eqn:EQ; auto.
(* gt *)
destruct (Float32.cmp Ceq n1 n2) eqn:EQ;
destruct (Float32.cmp Clt n1 n2) eqn:LT;
destruct (Float32.cmp Cgt n1 n2) eqn:GT; auto.
exfalso; eapply Float32.cmp_lt_gt_false; eauto.
exfalso; eapply Float32.cmp_gt_eq_false; eauto.
exfalso; eapply Float32.cmp_lt_gt_false; eauto.
(* ge *)
rewrite Float32.cmp_ge_gt_eq.
destruct (Float32.cmp Ceq n1 n2) eqn:EQ;
destruct (Float32.cmp Clt n1 n2) eqn:LT;
destruct (Float32.cmp Cgt n1 n2) eqn:GT; auto.
exfalso; eapply Float32.cmp_lt_eq_false; eauto.
exfalso; eapply Float32.cmp_lt_eq_false; eauto.
exfalso; eapply Float32.cmp_lt_gt_false; eauto.
Qed.
Ltac ArgsInv :=
repeat (match goal with
| [ H: Error _ = OK _ |- _ ] => discriminate
| [ H: match ?args with nil => _ | _ :: _ => _ end = OK _ |- _ ] => destruct args
| [ H: bind _ _ = OK _ |- _ ] => monadInv H
| [ H: match _ with left _ => _ | right _ => assertion_failed end = OK _ |- _ ] => monadInv H
| [ H: match _ with true => _ | false => assertion_failed end = OK _ |- _ ] => monadInv H
end);
subst;
repeat (match goal with
| [ H: ireg_of ?x = OK ?y |- _ ] => simpl in *; rewrite (ireg_of_eq _ _ H) in *
| [ H: freg_of ?x = OK ?y |- _ ] => simpl in *; rewrite (freg_of_eq _ _ H) in *
end).
Lemma transl_cond_correct:
forall cond args k rs m c,
transl_cond cond args k = OK c ->
exists rs',
exec_straight ge fn c rs m k rs' m
/\ match eval_condition cond (map rs (map preg_of args)) m with
| Some b => eval_testcond (cond_for_cond cond) rs' = Some b
/\ eval_testcond (cond_for_cond (negate_condition cond)) rs' = Some (negb b)
| None => True
end
/\ forall r, data_preg r = true -> rs'#r = rs r.
Proof.
intros until c; intros TR.
unfold transl_cond in TR; destruct cond; ArgsInv.
- (* Ccomp *)
econstructor.
split. apply exec_straight_one. simpl. eauto. auto.
split. destruct (Val.cmp_bool c0 (rs x) (rs x0)) eqn:CMP; auto.
split; apply cond_for_signed_cmp_correct; auto. rewrite Val.negate_cmp_bool, CMP; auto.
apply compare_int_inv.
- (* Ccompu *)
econstructor.
split. apply exec_straight_one. simpl. eauto. auto.
split. destruct (Val.cmpu_bool (Mem.valid_pointer m) c0 (rs x) (rs x0)) eqn:CMP; auto.
split; apply cond_for_unsigned_cmp_correct; auto. rewrite Val.negate_cmpu_bool, CMP; auto.
apply compare_int_inv.
- (* Ccompshift *)
econstructor.
split. apply exec_straight_one. simpl. eauto. auto.
split. rewrite transl_shift_correct.
destruct (Val.cmp_bool c0 (rs x) (eval_shift s (rs x0))) eqn:CMP; auto.
split; apply cond_for_signed_cmp_correct; auto. rewrite Val.negate_cmp_bool, CMP; auto.
apply compare_int_inv.
- (* Ccompushift *)
econstructor.
split. apply exec_straight_one. simpl. eauto. auto.
split. rewrite transl_shift_correct.
destruct (Val.cmpu_bool (Mem.valid_pointer m) c0 (rs x) (eval_shift s (rs x0))) eqn:CMP; auto.
split; apply cond_for_unsigned_cmp_correct; auto. rewrite Val.negate_cmpu_bool, CMP; auto.
apply compare_int_inv.
- (* Ccompimm *)
destruct (is_immed_arith i).
econstructor.
split. apply exec_straight_one. simpl. eauto. auto.
split. destruct (Val.cmp_bool c0 (rs x) (Vint i)) eqn:CMP; auto.
split; apply cond_for_signed_cmp_correct; auto. rewrite Val.negate_cmp_bool, CMP; auto.
apply compare_int_inv.
exploit (loadimm_correct IR14). intros [rs' [P [Q R]]].
econstructor.
split. eapply exec_straight_trans. eexact P. apply exec_straight_one. simpl.
rewrite Q. rewrite R; eauto with asmgen. auto.
split. rewrite <- R by (eauto with asmgen).
destruct (Val.cmp_bool c0 (rs' x) (Vint i)) eqn:CMP; auto.
split; apply cond_for_signed_cmp_correct; auto. rewrite Val.negate_cmp_bool, CMP; auto.
intros. rewrite compare_int_inv by auto. auto with asmgen.
- (* Ccompuimm *)
destruct (is_immed_arith i).
econstructor.
split. apply exec_straight_one. simpl. eauto. auto.
split. destruct (Val.cmpu_bool (Mem.valid_pointer m) c0 (rs x) (Vint i)) eqn:CMP; auto.
split; apply cond_for_unsigned_cmp_correct; auto. rewrite Val.negate_cmpu_bool, CMP; auto.
apply compare_int_inv.
exploit (loadimm_correct IR14). intros [rs' [P [Q R]]].
econstructor.
split. eapply exec_straight_trans. eexact P. apply exec_straight_one. simpl.
rewrite Q. rewrite R; eauto with asmgen. auto.
split. rewrite <- R by (eauto with asmgen).
destruct (Val.cmpu_bool (Mem.valid_pointer m) c0 (rs' x) (Vint i)) eqn:CMP; auto.
split; apply cond_for_unsigned_cmp_correct; auto. rewrite Val.negate_cmpu_bool, CMP; auto.
intros. rewrite compare_int_inv by auto. auto with asmgen.
- (* Ccompf *)
econstructor.
split. apply exec_straight_one. simpl. eauto. apply compare_float_nextpc.
split. destruct (Val.cmpf_bool c0 (rs x) (rs x0)) eqn:CMP; auto.
destruct (rs x); try discriminate. destruct (rs x0); try discriminate.
simpl in CMP. inv CMP.
split. apply cond_for_float_cmp_correct. apply cond_for_float_not_cmp_correct.
apply compare_float_inv.
- (* Cnotcompf *)
econstructor.
split. apply exec_straight_one. simpl. eauto. apply compare_float_nextpc.
split. destruct (Val.cmpf_bool c0 (rs x) (rs x0)) eqn:CMP; auto.
destruct (rs x); try discriminate. destruct (rs x0); try discriminate.
simpl in CMP. inv CMP.
Local Opaque compare_float. simpl.
split. apply cond_for_float_not_cmp_correct. rewrite negb_involutive. apply cond_for_float_cmp_correct.
exact I.
apply compare_float_inv.
- (* Ccompfzero *)
econstructor.
split. apply exec_straight_one. simpl. eauto. apply compare_float_nextpc.
split. destruct (Val.cmpf_bool c0 (rs x) (Vfloat Float.zero)) eqn:CMP; auto.
destruct (rs x); try discriminate.
simpl in CMP. inv CMP.
split. apply cond_for_float_cmp_correct. apply cond_for_float_not_cmp_correct.
apply compare_float_inv.
- (* Cnotcompfzero *)
econstructor.
split. apply exec_straight_one. simpl. eauto. apply compare_float_nextpc.
split. destruct (Val.cmpf_bool c0 (rs x) (Vfloat Float.zero)) eqn:CMP; auto.
destruct (rs x); try discriminate. simpl in CMP. inv CMP.
Local Opaque compare_float. simpl.
split. apply cond_for_float_not_cmp_correct. rewrite negb_involutive. apply cond_for_float_cmp_correct.
exact I.
apply compare_float_inv.
- (* Ccompfs *)
econstructor.
split. apply exec_straight_one. simpl. eauto. apply compare_float32_nextpc.
split. destruct (Val.cmpfs_bool c0 (rs x) (rs x0)) eqn:CMP; auto.
destruct (rs x); try discriminate. destruct (rs x0); try discriminate.
simpl in CMP. inv CMP.
split. apply cond_for_float32_cmp_correct. apply cond_for_float32_not_cmp_correct.
apply compare_float32_inv.
- (* Cnotcompfs *)
econstructor.
split. apply exec_straight_one. simpl. eauto. apply compare_float32_nextpc.
split. destruct (Val.cmpfs_bool c0 (rs x) (rs x0)) eqn:CMP; auto.
destruct (rs x); try discriminate. destruct (rs x0); try discriminate.
simpl in CMP. inv CMP.
Local Opaque compare_float32. simpl.
split. apply cond_for_float32_not_cmp_correct. rewrite negb_involutive. apply cond_for_float32_cmp_correct.
exact I.
apply compare_float32_inv.
- (* Ccompfszero *)
econstructor.
split. apply exec_straight_one. simpl. eauto. apply compare_float32_nextpc.
split. destruct (Val.cmpfs_bool c0 (rs x) (Vsingle Float32.zero)) eqn:CMP; auto.
destruct (rs x); try discriminate.
simpl in CMP. inv CMP.
split. apply cond_for_float32_cmp_correct. apply cond_for_float32_not_cmp_correct.
apply compare_float32_inv.
- (* Cnotcompfzero *)
econstructor.
split. apply exec_straight_one. simpl. eauto. apply compare_float32_nextpc.
split. destruct (Val.cmpfs_bool c0 (rs x) (Vsingle Float32.zero)) eqn:CMP; auto.
destruct (rs x); try discriminate. simpl in CMP. inv CMP.
simpl. split. apply cond_for_float32_not_cmp_correct. rewrite negb_involutive. apply cond_for_float32_cmp_correct.
exact I.
apply compare_float32_inv.
Qed.
(** Translation of arithmetic operations. *)
Ltac TranslOpSimpl :=
econstructor; split;
[ apply exec_straight_one; [simpl; eauto | reflexivity ]
| split; [try rewrite transl_shift_correct; repeat Simpl | intros; repeat Simpl] ].
Lemma transl_op_correct_same:
forall op args res k c (rs: regset) m v,
transl_op op args res k = OK c ->
eval_operation ge rs#IR13 op (map rs (map preg_of args)) m = Some v ->
match op with Ocmp _ => False | Oaddrstack _ => False | _ => True end ->
exists rs',
exec_straight ge fn c rs m k rs' m
/\ rs'#(preg_of res) = v
/\ forall r, data_preg r = true -> r <> preg_of res -> preg_notin r (destroyed_by_op op) -> rs'#r = rs#r.
Proof.
intros until v; intros TR EV NOCMP.
unfold transl_op in TR; destruct op; ArgsInv; simpl in EV; inv EV; try (TranslOpSimpl; fail).
(* Omove *)
destruct (preg_of res) eqn:RES; try discriminate;
destruct (preg_of m0) eqn:ARG; inv TR.
econstructor; split. apply exec_straight_one; simpl; eauto. intuition Simpl.
econstructor; split. apply exec_straight_one; simpl; eauto. intuition Simpl.
(* Ointconst *)
generalize (loadimm_correct x i k rs m). intros [rs' [A [B C]]].
exists rs'; auto with asmgen.
(* Oaddrstack *)
contradiction.
(* Ocast8signed *)
destruct (thumb tt).
econstructor; split. apply exec_straight_one; simpl; eauto. intuition Simpl.
destruct (rs x0); auto; simpl. rewrite Int.shru_zero. reflexivity.
set (rs1 := nextinstr_nf (rs#x <- (Val.shl rs#x0 (Vint (Int.repr 24))))).
set (rs2 := nextinstr_nf (rs1#x <- (Val.shr rs1#x (Vint (Int.repr 24))))).
exists rs2.
split. apply exec_straight_two with rs1 m; auto.
split. unfold rs2; Simpl. unfold rs1; Simpl.
unfold Val.shr, Val.shl; destruct (rs x0); auto.
change (Int.ltu (Int.repr 24) Int.iwordsize) with true; simpl.
f_equal. symmetry. apply (Int.sign_ext_shr_shl 8). compute; auto.
intros. unfold rs2, rs1; Simpl.
(* Ocast16signed *)
destruct (thumb tt).
econstructor; split. apply exec_straight_one; simpl; eauto. intuition Simpl.
destruct (rs x0); auto; simpl. rewrite Int.shru_zero. reflexivity.
set (rs1 := nextinstr_nf (rs#x <- (Val.shl rs#x0 (Vint (Int.repr 16))))).
set (rs2 := nextinstr_nf (rs1#x <- (Val.shr rs1#x (Vint (Int.repr 16))))).
exists rs2.
split. apply exec_straight_two with rs1 m; auto.
split. unfold rs2; Simpl. unfold rs1; Simpl.
unfold Val.shr, Val.shl; destruct (rs x0); auto.
change (Int.ltu (Int.repr 16) Int.iwordsize) with true; simpl.
f_equal. symmetry. apply (Int.sign_ext_shr_shl 16). compute; auto.
intros. unfold rs2, rs1; Simpl.
(* Oaddimm *)
generalize (addimm_correct x x0 i k rs m).
intros [rs' [A [B C]]].
exists rs'; auto with asmgen.
(* Orsbimm *)
generalize (rsubimm_correct x x0 i k rs m).
intros [rs' [A [B C]]].
exists rs'; auto with asmgen.
(* divs *)
Local Transparent destroyed_by_op.
econstructor. split. apply exec_straight_one. simpl. rewrite H0. reflexivity. auto.
split. Simpl. simpl; intros. intuition Simpl.
(* divu *)
econstructor. split. apply exec_straight_one. simpl. rewrite H0. reflexivity. auto.
split. Simpl. simpl; intros. intuition Simpl.
(* Oandimm *)
generalize (andimm_correct x x0 i k rs m).
intros [rs' [A [B C]]].
exists rs'; auto with asmgen.
(* Oorimm *)
generalize (orimm_correct x x0 i k rs m).
intros [rs' [A [B C]]].
exists rs'; auto with asmgen.
(* Oxorimm *)
generalize (xorimm_correct x x0 i k rs m).
intros [rs' [A [B C]]].
exists rs'; auto with asmgen.
(* Oshrximm *)
destruct (rs x0) eqn: X0; simpl in H0; try discriminate.
destruct (Int.ltu i (Int.repr 31)) eqn: LTU; inv H0.
revert EQ2. predSpec Int.eq Int.eq_spec i Int.zero; intros EQ2.
(* i = 0 *)
inv EQ2. econstructor.
split. apply exec_straight_one. simpl. reflexivity. auto.
split. Simpl. unfold Int.shrx. rewrite Int.shl_zero. unfold Int.divs.
change (Int.signed Int.one) with 1. rewrite Z.quot_1_r. rewrite Int.repr_signed. auto.
intros. Simpl.
(* i <> 0 *)
inv EQ2.
assert (LTU': Int.ltu (Int.sub Int.iwordsize i) Int.iwordsize = true).
{
generalize (Int.ltu_inv _ _ LTU). intros.
unfold Int.sub, Int.ltu. rewrite Int.unsigned_repr_wordsize.
rewrite Int.unsigned_repr. apply zlt_true.
assert (Int.unsigned i <> 0).
{ red; intros; elim H. rewrite <- (Int.repr_unsigned i). rewrite H1; reflexivity. }
omega.
change (Int.unsigned (Int.repr 31)) with (Int.zwordsize - 1) in H0.
generalize Int.wordsize_max_unsigned; omega.
}
assert (LTU'': Int.ltu i Int.iwordsize = true).
{
generalize (Int.ltu_inv _ _ LTU). intros.
unfold Int.ltu. rewrite Int.unsigned_repr_wordsize. apply zlt_true.
change (Int.unsigned (Int.repr 31)) with (Int.zwordsize - 1) in H0.
omega.
}
set (j := Int.sub Int.iwordsize i) in *.
set (rs1 := nextinstr_nf (rs#IR14 <- (Val.shr (Vint i0) (Vint (Int.repr 31))))).
set (rs2 := nextinstr_nf (rs1#IR14 <- (Val.add (Vint i0) (Val.shru rs1#IR14 (Vint j))))).
set (rs3 := nextinstr_nf (rs2#x <- (Val.shr rs2#IR14 (Vint i)))).
exists rs3; split.
apply exec_straight_three with rs1 m rs2 m.
simpl. rewrite X0; reflexivity.
simpl. f_equal. Simpl. replace (rs1 x0) with (rs x0). rewrite X0; reflexivity.
unfold rs1; Simpl.
reflexivity.
auto. auto. auto.
split. unfold rs3; Simpl. unfold rs2; Simpl. unfold rs1; Simpl.
simpl. change (Int.ltu (Int.repr 31) Int.iwordsize) with true. simpl.
rewrite LTU'; simpl. rewrite LTU''; simpl.
f_equal. symmetry. apply Int.shrx_shr_2. assumption.
intros. unfold rs3; Simpl. unfold rs2; Simpl. unfold rs1; Simpl.
(* intoffloat *)
econstructor; split. apply exec_straight_one; simpl. rewrite H0; simpl. eauto. auto.
Transparent destroyed_by_op.
simpl. intuition Simpl.
(* intuoffloat *)
econstructor; split. apply exec_straight_one; simpl. rewrite H0; simpl. eauto. auto.
simpl. intuition Simpl.
(* floatofint *)
econstructor; split. apply exec_straight_one; simpl. rewrite H0; simpl. eauto. auto.
intuition Simpl.
(* floatofintu *)
econstructor; split. apply exec_straight_one; simpl. rewrite H0; simpl. eauto. auto.
intuition Simpl.
(* intofsingle *)
econstructor; split. apply exec_straight_one; simpl. rewrite H0; simpl. eauto. auto.
simpl. intuition Simpl.
(* intuofsingle *)
econstructor; split. apply exec_straight_one; simpl. rewrite H0; simpl. eauto. auto.
simpl. intuition Simpl.
(* singleofint *)
econstructor; split. apply exec_straight_one; simpl. rewrite H0; simpl. eauto. auto.
intuition Simpl.
(* singleofintu *)
econstructor; split. apply exec_straight_one; simpl. rewrite H0; simpl. eauto. auto.
intuition Simpl.
(* Ocmp *)
contradiction.
Qed.
Lemma transl_op_correct:
forall op args res k c (rs: regset) m v,
transl_op op args res k = OK c ->
eval_operation ge rs#IR13 op (map rs (map preg_of args)) m = Some v ->
exists rs',
exec_straight ge fn c rs m k rs' m
/\ Val.lessdef v rs'#(preg_of res)
/\ forall r, data_preg r = true -> r <> preg_of res -> preg_notin r (destroyed_by_op op) -> rs'#r = rs#r.
Proof.
intros.
assert (SAME:
(exists rs', exec_straight ge fn c rs m k rs' m
/\ rs'#(preg_of res) = v
/\ forall r, data_preg r = true -> r <> preg_of res -> preg_notin r (destroyed_by_op op) -> rs'#r = rs#r) ->
exists rs', exec_straight ge fn c rs m k rs' m
/\ Val.lessdef v rs'#(preg_of res)
/\ forall r, data_preg r = true -> r <> preg_of res -> preg_notin r (destroyed_by_op op) -> rs'#r = rs#r).
{ intros (rs' & A & B & C). subst v; exists rs'; auto. }
destruct op; try (apply SAME; eapply transl_op_correct_same; eauto; fail).
- (* Oaddrstack *)
clear SAME; simpl in *; ArgsInv.
destruct (addimm_correct x IR13 (Ptrofs.to_int i) k rs m) as [rs' [EX [RES OTH]]].
exists rs'; split. auto. split.
rewrite RES; inv H0. destruct (rs IR13); simpl; auto. rewrite Ptrofs.of_int_to_int; auto.
intros; apply OTH; eauto with asmgen.
- (* Ocmp *)
clear SAME. simpl in H. monadInv H. simpl in H0. inv H0.
rewrite (ireg_of_eq _ _ EQ).
exploit transl_cond_correct; eauto. instantiate (1 := rs). instantiate (1 := m). intros [rs1 [A [B C]]].
econstructor; split.
eapply exec_straight_trans. eexact A. apply exec_straight_one. simpl; eauto. auto.
split; intros; Simpl.
destruct (eval_condition c0 rs ## (preg_of ## args) m) as [b|]; simpl; auto.
destruct B as [B1 B2]; rewrite B1. destruct b; auto.
Qed.
(** Translation of loads and stores. *)
Remark val_add_add_zero:
forall v1 v2, Val.add v1 v2 = Val.add (Val.add v1 v2) (Vint Int.zero).
Proof.
intros. destruct v1; destruct v2; simpl; auto.
rewrite Int.add_zero; auto.
rewrite Ptrofs.add_zero; auto.
rewrite Ptrofs.add_zero; auto.
Qed.
Lemma transl_memory_access_correct:
forall (P: regset -> Prop) (mk_instr_imm: ireg -> int -> instruction)
(mk_instr_gen: option (ireg -> shift_op -> instruction))
(mk_immed: int -> int)
addr args k c (rs: regset) a m m',
transl_memory_access mk_instr_imm mk_instr_gen mk_immed addr args k = OK c ->
eval_addressing ge (rs#SP) addr (map rs (map preg_of args)) = Some a ->
match a with Vptr _ _ => True | _ => False end ->
(forall (r1: ireg) (rs1: regset) n k,
Val.add rs1#r1 (Vint n) = a ->
(forall (r: preg), if_preg r = true -> r <> IR14 -> rs1 r = rs r) ->
exists rs',
exec_straight ge fn (mk_instr_imm r1 n :: k) rs1 m k rs' m' /\ P rs') ->
match mk_instr_gen with
| None => True
| Some mk =>
(forall (r1: ireg) (sa: shift_op) k,
Val.add rs#r1 (eval_shift_op sa rs) = a ->
exists rs',
exec_straight ge fn (mk r1 sa :: k) rs m k rs' m' /\ P rs')
end ->
exists rs',
exec_straight ge fn c rs m k rs' m' /\ P rs'.
Proof.
intros until m'; intros TR EA ADDR MK1 MK2.
unfold transl_memory_access in TR; destruct addr; ArgsInv; simpl in EA; inv EA.
(* Aindexed *)
apply indexed_memory_access_correct. exact MK1.
(* Aindexed2 *)
destruct mk_instr_gen as [mk | ]; monadInv TR. apply MK2.
simpl. erewrite ! ireg_of_eq; eauto.
(* Aindexed2shift *)
destruct mk_instr_gen as [mk | ]; monadInv TR. apply MK2.
erewrite ! ireg_of_eq; eauto. rewrite transl_shift_correct. auto.
(* Ainstack *)
inv TR. apply indexed_memory_access_correct. intros. eapply MK1; eauto.
rewrite H. destruct (rs IR13); try contradiction. simpl. f_equal; f_equal. auto with ptrofs.
Qed.
Lemma transl_load_int_correct:
forall mk_instr is_immed dst addr args k c (rs: regset) a chunk m v,
transl_memory_access_int mk_instr is_immed dst addr args k = OK c ->
eval_addressing ge (rs#SP) addr (map rs (map preg_of args)) = Some a ->
Mem.loadv chunk m a = Some v ->
(forall (r1 r2: ireg) (sa: shift_op) (rs1: regset),
exec_instr ge fn (mk_instr r1 r2 sa) rs1 m =
exec_load chunk (Val.add rs1#r2 (eval_shift_op sa rs1)) r1 rs1 m) ->
exists rs',
exec_straight ge fn c rs m k rs' m
/\ rs'#(preg_of dst) = v
/\ forall r, data_preg r = true -> r <> preg_of dst -> rs'#r = rs#r.
Proof.
intros. monadInv H. erewrite ireg_of_eq by eauto.
eapply transl_memory_access_correct; eauto.
destruct a; discriminate || trivial.
intros; simpl. econstructor; split. apply exec_straight_one.
rewrite H2. unfold exec_load. simpl eval_shift_op. rewrite H. rewrite H1. eauto. auto.
split. Simpl. intros; Simpl.
simpl; intros.
econstructor; split. apply exec_straight_one.
rewrite H2. unfold exec_load. rewrite H. rewrite H1. eauto. auto.
split. Simpl. intros; Simpl.
Qed.
Lemma transl_load_float_correct:
forall mk_instr is_immed dst addr args k c (rs: regset) a chunk m v,
transl_memory_access_float mk_instr is_immed dst addr args k = OK c ->
eval_addressing ge (rs#SP) addr (map rs (map preg_of args)) = Some a ->
Mem.loadv chunk m a = Some v ->
(forall (r1: freg) (r2: ireg) (n: int) (rs1: regset),
exec_instr ge fn (mk_instr r1 r2 n) rs1 m =
exec_load chunk (Val.add rs1#r2 (Vint n)) r1 rs1 m) ->
exists rs',
exec_straight ge fn c rs m k rs' m
/\ rs'#(preg_of dst) = v
/\ forall r, data_preg r = true -> r <> preg_of dst -> rs'#r = rs#r.
Proof.
intros. monadInv H. erewrite freg_of_eq by eauto.
eapply transl_memory_access_correct; eauto.
destruct a; discriminate || trivial.
intros; simpl. econstructor; split. apply exec_straight_one.
rewrite H2. unfold exec_load. rewrite H. rewrite H1. eauto. auto.
split. Simpl. intros; Simpl.
simpl; auto.
Qed.
Lemma transl_store_int_correct:
forall mr mk_instr is_immed src addr args k c (rs: regset) a chunk m m',
transl_memory_access_int mk_instr is_immed src addr args k = OK c ->
eval_addressing ge (rs#SP) addr (map rs (map preg_of args)) = Some a ->
Mem.storev chunk m a rs#(preg_of src) = Some m' ->
(forall (r1 r2: ireg) (sa: shift_op) (rs1: regset),
exec_instr ge fn (mk_instr r1 r2 sa) rs1 m =
exec_store chunk (Val.add rs1#r2 (eval_shift_op sa rs1)) r1 rs1 m) ->
exists rs',
exec_straight ge fn c rs m k rs' m'
/\ forall r, data_preg r = true -> preg_notin r mr -> rs'#r = rs#r.
Proof.
intros. assert (DR: data_preg (preg_of src) = true) by eauto with asmgen.
monadInv H. erewrite ireg_of_eq in * by eauto.
eapply transl_memory_access_correct; eauto.
destruct a; discriminate || trivial.
intros; simpl. econstructor; split. apply exec_straight_one.
rewrite H2. unfold exec_store. simpl eval_shift_op. rewrite H. rewrite H3; eauto with asmgen.
rewrite H1. eauto. auto.
intros; Simpl.
simpl; intros.
econstructor; split. apply exec_straight_one.
rewrite H2. unfold exec_store. rewrite H. rewrite H1. eauto. auto.
intros; Simpl.
Qed.
Lemma transl_store_float_correct:
forall mr mk_instr is_immed src addr args k c (rs: regset) a chunk m m',
transl_memory_access_float mk_instr is_immed src addr args k = OK c ->
eval_addressing ge (rs#SP) addr (map rs (map preg_of args)) = Some a ->
Mem.storev chunk m a rs#(preg_of src) = Some m' ->
(forall (r1: freg) (r2: ireg) (n: int) (rs1: regset),
exec_instr ge fn (mk_instr r1 r2 n) rs1 m =
exec_store chunk (Val.add rs1#r2 (Vint n)) r1 rs1 m) ->
exists rs',
exec_straight ge fn c rs m k rs' m'
/\ forall r, data_preg r = true -> preg_notin r mr -> rs'#r = rs#r.
Proof.
intros. assert (DR: data_preg (preg_of src) = true) by eauto with asmgen.
monadInv H. erewrite freg_of_eq in * by eauto.
eapply transl_memory_access_correct; eauto.
destruct a; discriminate || trivial.
intros; simpl. econstructor; split. apply exec_straight_one.
rewrite H2. unfold exec_store. rewrite H. rewrite H3; auto with asmgen. rewrite H1. eauto. auto.
intros; Simpl.
simpl; auto.
Qed.
Lemma transl_load_correct:
forall chunk addr args dst k c (rs: regset) a m v,
transl_load chunk addr args dst k = OK c ->
eval_addressing ge (rs#SP) addr (map rs (map preg_of args)) = Some a ->
Mem.loadv chunk m a = Some v ->
exists rs',
exec_straight ge fn c rs m k rs' m
/\ rs'#(preg_of dst) = v
/\ forall r, data_preg r = true -> r <> preg_of dst -> rs'#r = rs#r.
Proof.
intros. destruct chunk; simpl in H.
eapply transl_load_int_correct; eauto.
eapply transl_load_int_correct; eauto.
eapply transl_load_int_correct; eauto.
eapply transl_load_int_correct; eauto.
eapply transl_load_int_correct; eauto.
discriminate.
eapply transl_load_float_correct; eauto.
eapply transl_load_float_correct; eauto.
discriminate.
discriminate.
Qed.
Lemma transl_store_correct:
forall chunk addr args src k c (rs: regset) a m m',
transl_store chunk addr args src k = OK c ->
eval_addressing ge (rs#SP) addr (map rs (map preg_of args)) = Some a ->
Mem.storev chunk m a rs#(preg_of src) = Some m' ->
exists rs',
exec_straight ge fn c rs m k rs' m'
/\ forall r, data_preg r = true -> preg_notin r (destroyed_by_store chunk addr) -> rs'#r = rs#r.
Proof.
intros. destruct chunk; simpl in H.
- assert (Mem.storev Mint8unsigned m a (rs (preg_of src)) = Some m').
rewrite <- H1. destruct a; simpl; auto. symmetry. apply Mem.store_signed_unsigned_8.
clear H1. eapply transl_store_int_correct; eauto.
- eapply transl_store_int_correct; eauto.
- assert (Mem.storev Mint16unsigned m a (rs (preg_of src)) = Some m').
rewrite <- H1. destruct a; simpl; auto. symmetry. apply Mem.store_signed_unsigned_16.
clear H1. eapply transl_store_int_correct; eauto.
- eapply transl_store_int_correct; eauto.
- eapply transl_store_int_correct; eauto.
- discriminate.
- eapply transl_store_float_correct; eauto.
- eapply transl_store_float_correct; eauto.
- discriminate.
- discriminate.
Qed.
End CONSTRUCTORS.
|
Formal statement is: lemma (in finite_measure) fmeasurable_eq_sets: "fmeasurable M = sets M" Informal statement is: In a finite measure space, the $\sigma$-algebra of measurable sets is the same as the $\sigma$-algebra of sets. |
[STATEMENT]
lemma (in cat_preorder) cat_peo_is_le[cat_order_cs_intros]:
assumes "f : a \<mapsto>\<^bsub>\<CC>\<^esub> b"
shows "a \<le>\<^sub>O\<^bsub>\<CC>\<^esub> b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a \<le>\<^sub>O\<^bsub>\<CC>\<^esub> b
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
f : a \<mapsto>\<^bsub>\<CC>\<^esub> b
goal (1 subgoal):
1. a \<le>\<^sub>O\<^bsub>\<CC>\<^esub> b
[PROOF STEP]
by (force intro: is_leI) |
If $f$ converges to $l$, then $\lvert f \rvert$ converges to $\lvert l \rvert$. |
From Undecidability.Shared.Libs.PSL Require Import Base FinTypes.
From Undecidability Require Import L.Functions.EqBool.
From Complexity.NP.SAT.CookLevin Require Export CC.
From Complexity.Libs.CookPrelim Require Import MorePrelim FlatFinTypes.
Require Import Lia.
(** * Flat Covering Cards *)
(*A flattened version of Covering Cards using natural numbers to represent finite types *)
(** ** Definition *)
Inductive FlatCC := {
Sigma : nat;
offset : nat;
width : nat;
init : list nat;
cards : list (CCCard nat);
final : list (list nat);
steps : nat
}.
(**validity of cards and final constraints relative to an alphabet *)
(**we have to enforce this as we always consider only a finite subset of N *)
Definition CCCard_ofFlatType (card : CCCard nat) k:= list_ofFlatType k (prem card) /\ list_ofFlatType k (conc card).
Definition isValidFlatCards (l : list (CCCard nat)) k := (forall card, card el l -> CCCard_ofFlatType card k).
Definition isValidFlatFinal (l : list (list nat)) k := (forall s, s el l -> list_ofFlatType k s).
Definition isValidFlatInitial (l : list nat) k := list_ofFlatType k l.
(**the well-formedness constraints are the same as for CC *)
Definition FlatCC_wellformed fpr :=
width fpr > 0
/\ offset fpr > 0
/\ (exists k, k > 0 /\ width fpr = k * offset fpr)
/\ length (init fpr) >= width fpr
/\ (forall card, card el cards fpr -> CCCard_of_size card (width fpr))
/\ (exists k, length (init fpr) = k * offset fpr).
(**additionally, we require that instances only use the given finite subset of N *)
Definition isValidFlattening fpr :=
list_ofFlatType (Sigma fpr) (init fpr)
/\ (forall s, s el final fpr -> list_ofFlatType (Sigma fpr) s)
/\ (forall card, card el cards fpr -> CCCard_ofFlatType card (Sigma fpr)).
(**the definitions of validity and satisfaction of final constraints can be re-used *)
(**we can now define the language of valid FlatCC instances *)
Definition FlatCCLang (C : FlatCC) := FlatCC_wellformed C /\ isValidFlattening C
/\ exists (sf : list nat), list_ofFlatType (Sigma C) sf
/\ relpower (valid (offset C) (width C) (cards C)) (steps C) (init C) sf
/\ satFinal (offset C) (length (init C)) (final C) sf.
Section fixInstance.
Variable (fpr : FlatCC).
Notation Sigma := (Sigma fpr).
Notation offset := (offset fpr).
Notation width := (width fpr).
Notation init := (init fpr).
Notation cards := (cards fpr).
Notation final := (final fpr).
Notation steps := (steps fpr).
Notation "a ⇝ b" := (valid offset width cards a b) (at level 40).
Context (A : FlatCC_wellformed fpr).
Context (B : isValidFlattening fpr).
(**we want to prove that for syntactically well-formed instances, we can only make a step to strings that are again strings over the finite alphabet *)
(**we first prove a more general statement over abstract homomorph properties*)
Lemma p_invariant (p : list nat -> Prop) (a b : list nat) :
p a
-> (forall x y, p (x ++ y) <-> p x /\ p y)
-> |a| >= width
-> (forall x y u v card, coversHead card (u ++ x) (v ++ y) -> card el cards -> |u| = offset -> |v| = offset -> p v)
-> (forall card, card el cards -> p (conc card))
-> a ⇝ b
-> p b.
Proof.
intros H H0 H1 G1 G2 H2.
(*we switch to the direct characterisation *)
assert (a ⇝ b /\ |a| >= width) as H3%validDirect_valid by tauto. 2-4: apply A.
clear H2 H1. induction H3 as [a b card H1 H2 H4 H5 | a b u v card H1 IH H2 H4 H5 H6].
- clear G1.
destruct A as (_ & _ & (k & _ & A2) & _ & A6 & _).
destruct B as ( _ & _ & A5).
specialize (A5 _ H4) as (A5 & A7).
specialize H5 as ((rest' & H6') & (rest & H6)). (*show rest = [] *)
specialize (A6 _ H4) as (A6 & A6').
subst. rewrite app_length in H2.
(*we need some structural assumptions *)
assert (rest = []) as ->.
{
destruct rest'; cbn in H2; [ | lia]. rewrite !app_length in H1.
destruct rest; [ easy | cbn in H1; lia].
}
rewrite app_nil_r. now apply G2.
- rewrite H0. apply H0 in H. split; [ | apply IH; easy]. now eapply G1.
Qed.
(**the statement is now a straightforward corollary *)
Corollary valid_list_ofFlatType_invariant a b :
list_ofFlatType Sigma a -> |a| >= width -> a ⇝ b -> list_ofFlatType Sigma b.
Proof.
intros H H0 H1. eapply (@p_invariant (list_ofFlatType Sigma)).
- apply H.
- intros. apply list_ofFlatType_app.
- apply H0.
- intros. destruct H2 as (_ & (c & H2)).
destruct A as (_ & _ & A3 & _ & A1 & _ ). destruct B as ( _ & _ & A2).
specialize (A1 _ H3) as (_ & A1). specialize (A2 _ H3) as (_ & A2).
apply app_length_split in H2 as (u' & H2).
* rewrite H2 in A2. now apply list_ofFlatType_app in A2.
* destruct A3 as (ak & A3 & A4). nia.
- intros. destruct B as (_ & _ & A1).
apply A1 in H2 as (_ & H2). apply H2.
- apply H1.
Qed.
(**of course, this transfers to an arbitrary number of rewrite steps *)
Lemma relpower_valid_list_ofFlatType_invariant steps a b:
list_ofFlatType Sigma a
-> |a| >= width
-> relpower (valid offset width cards) steps a b
-> list_ofFlatType Sigma b.
Proof.
intros. induction H1 as [ a | ? ? ? ? H1 H2 IH]; [easy | ].
apply IH. eapply valid_list_ofFlatType_invariant, H1; [ apply H | ].
- apply H0.
- apply valid_length_inv in H1. nia.
Qed.
End fixInstance.
(**deciders for isValidFlattening and well-formedness *)
Definition CCCard_ofFlatType_dec (Sigma : nat) (card : CCCard nat) :=
list_ofFlatType_dec Sigma (prem card) && list_ofFlatType_dec Sigma (conc card).
Lemma CCCard_ofFlatType_dec_correct card Sigma :
CCCard_ofFlatType_dec Sigma card = true <-> CCCard_ofFlatType card Sigma.
Proof.
unfold CCCard_ofFlatType_dec, CCCard_ofFlatType. rewrite andb_true_iff.
rewrite !list_ofFlatType_dec_correct. easy.
Qed.
Definition FlatCC_wf_dec (fpr : FlatCC) :=
leb 1 (width fpr)
&& leb 1 (offset fpr)
&& Nat.eqb (Nat.modulo (width fpr) (offset fpr)) 0
&& leb (width fpr) (|init fpr|)
&& forallb (CCCard_of_size_dec (width fpr)) (cards fpr)
&& Nat.eqb (Nat.modulo (|init fpr|) (offset fpr)) 0.
Definition isValidFlattening_dec (fpr : FlatCC) :=
list_ofFlatType_dec (Sigma fpr) (init fpr)
&& forallb (list_ofFlatType_dec (Sigma fpr)) (final fpr)
&& forallb (CCCard_ofFlatType_dec (Sigma fpr)) (cards fpr).
Lemma FlatCC_wf_dec_correct (fpr : FlatCC) : FlatCC_wf_dec fpr = true <-> FlatCC_wellformed fpr.
Proof.
unfold FlatCC_wf_dec, FlatCC_wellformed. rewrite !andb_true_iff, !and_assoc.
rewrite !leb_iff. rewrite <- !(reflect_iff _ _ (Nat.eqb_spec _ _ )).
rewrite !forallb_forall.
setoid_rewrite CCCard_of_size_dec_correct.
split; intros; repeat match goal with [H : _ /\ _ |- _] => destruct H end;
repeat match goal with [ |- _ /\ _ ] => split end; try easy.
- apply Nat.mod_divide in H1 as (k & H1); [ | lia].
exists k; split; [ | apply H1 ]. destruct k; cbn in *; lia.
- apply Nat.mod_divide in H4 as (k & H4); [ | lia]. exists k; apply H4.
- apply Nat.mod_divide; [ lia | ]. destruct H1 as (k & _ & H1). exists k; apply H1.
- apply Nat.mod_divide; [ lia | ]. apply H4.
Qed.
Lemma isValidFlattening_dec_correct (fpr : FlatCC) : isValidFlattening_dec fpr = true <-> isValidFlattening fpr.
Proof.
unfold isValidFlattening_dec, isValidFlattening.
rewrite !andb_true_iff, !and_assoc.
rewrite !forallb_forall.
setoid_rewrite CCCard_ofFlatType_dec_correct.
setoid_rewrite list_ofFlatType_dec_correct.
split; intros; repeat match goal with [H : _ /\ _ |- _] => destruct H end;
repeat match goal with [ |- _ /\ _ ] => split end; try easy.
Qed.
(** ** relation to CC instances *)
Inductive isFlatCCCardOf (X : finType) (flatcard : CCCard nat) (card : CCCard X) :=
mkIsFlatCCCardO (Hprem : isFlatListOf (prem flatcard) (prem card))
(Hconc : isFlatListOf (conc flatcard) (conc card))
: isFlatCCCardOf flatcard card.
Inductive isFlatCardsOf (X : finType) (flatcards : list (CCCard nat)) (cards : list (CCCard X)) :=
mkIsFlatCardsOf (Hsound : forall flatcard, flatcard el flatcards -> exists card, card el cards /\ isFlatCCCardOf flatcard card)
(Hcomplete : forall card, card el cards -> exists flatcard, flatcard el flatcards /\ isFlatCCCardOf flatcard card)
: isFlatCardsOf flatcards cards.
Inductive isFlatFinalOf (X : finType) (flatfinal : list (list nat)) (final : list (list X)) :=
mkIsFlatFinalOf (Hsound : forall fin, fin el flatfinal -> exists fin', fin' el final /\ isFlatListOf fin fin')
(Hcomplete : forall fin, fin el final -> exists fin', fin' el flatfinal /\ isFlatListOf fin' fin)
: isFlatFinalOf flatfinal final.
Inductive isFlatCCOf (fpr : FlatCC) (pr : CC) :=
mkIsFlatCCOf (HSigma : finRepr (CC.Sigma pr) (Sigma fpr))
(HOffset : offset fpr = CC.offset pr)
(HWidth : width fpr = CC.width pr)
(HInit : isFlatListOf (init fpr) (CC.init pr))
(HCards : isFlatCardsOf (cards fpr) (CC.cards pr))
(HFinal : isFlatFinalOf (final fpr) (CC.final pr))
(HSteps : steps fpr = CC.steps pr)
: isFlatCCOf fpr pr.
Lemma isFlatCCOf_isValidFlattening (fpr : FlatCC) (pr : CC) : isFlatCCOf fpr pr -> isValidFlattening fpr.
Proof.
intros. destruct H.
repeat split.
- rewrite HSigma. now eapply isFlatListOf_list_ofFlatType.
- intros s H0%HFinal. rewrite HSigma. destruct H0 as (s' & F1 & ->).
eapply isFlatListOf_list_ofFlatType. unfold isFlatListOf. reflexivity.
- apply HCards in H. destruct H as (card' & F1 & F2).
destruct F2 as (F2 & F3). rewrite HSigma.
eapply isFlatListOf_list_ofFlatType, F2.
- apply HCards in H. destruct H as (card' & F1 & F2).
destruct F2 as (F2 & F3). rewrite HSigma.
eapply isFlatListOf_list_ofFlatType, F3.
Qed.
(** we show that FlatCC instances "behave in the same way" as CC instances *)
Lemma coversHead_flat_agree (X : finType) (cardsFin : list (CCCard X)) cardsFlat finStr finStr' flatStr flatStr' :
isFlatListOf flatStr finStr
-> isFlatListOf flatStr' finStr'
-> isFlatCardsOf cardsFlat cardsFin
-> (exists card, card el cardsFin /\ coversHead card finStr finStr') <-> (exists card, card el cardsFlat /\ coversHead card flatStr flatStr').
Proof.
intros. split; intros (card & H2 & H3).
- apply H1 in H2 as (card' & F1 & F2). exists card'. split; [apply F1 | ].
unfold coversHead, prefix in *. destruct H3 as ((b1 & ->) & (b2 & ->)).
unfold isFlatListOf in H, H0.
rewrite map_app in H, H0. split.
+ exists (map index b1). rewrite H. enough (map index (prem card) = prem card') as H2.
{ now setoid_rewrite H2. }
destruct card; cbn.
destruct F2. cbn in *. now rewrite Hprem.
+ exists (map index b2). rewrite H0. enough (map index (conc card) = conc card') as H2.
{ now setoid_rewrite H2. }
destruct card; cbn.
destruct F2. cbn in *. now rewrite Hconc.
- apply H1 in H2 as (card' & F1 & F2). exists card'. split; [ apply F1 | ].
unfold coversHead, prefix in *. destruct H3 as ((b1 & ->) & (b2 & ->)).
unfold isFlatListOf in H, H0. split.
+ symmetry in H. apply map_eq_app in H as (finStr1 & finStr2 & -> & ? & ?).
exists finStr2.
enough (finStr1 = prem card') as H3 by (now setoid_rewrite H3).
destruct F2. rewrite Hprem in H.
apply map_injective in H; [easy | unfold injective; apply injective_index].
+ symmetry in H0. apply map_eq_app in H0 as (finStr1 & finStr2 & -> & ? & ?).
exists finStr2.
enough (finStr1 = conc card') as H3 by (now setoid_rewrite H3).
destruct F2. rewrite Hconc in H0.
apply map_injective in H0; [easy | unfold injective; apply injective_index].
Qed.
Section fixFCCInstance.
(*for the proof, we fix an instance *)
Variable (fpr : FlatCC).
Notation Sigma := (Sigma fpr).
Notation offset := (offset fpr).
Notation width := (width fpr).
Notation init := (init fpr).
Notation cards := (cards fpr).
Notation final := (final fpr).
Notation steps := (steps fpr).
Context (A : FlatCC_wellformed fpr).
Context (B : isValidFlattening fpr).
Lemma valid_flat_agree (X : finType) (fcards : list (CCCard X)) s1 s2 fs1 fs2:
isFlatListOf fs1 s1
-> isFlatListOf fs2 s2
-> isFlatCardsOf cards fcards
-> valid offset width cards fs1 fs2 <-> valid offset width fcards s1 s2.
Proof.
intros H H1 H2.
split.
- intros H3. revert s1 s2 H H1. induction H3; intros.
+ destruct s1; [ | now unfold isFlatListOf in H].
destruct s2; [ | now unfold isFlatListOf in H1].
constructor.
+ unfold isFlatListOf in H4, H5.
symmetry in H4. apply map_eq_app in H4 as (ls1 & ls2 & -> & -> & ->).
symmetry in H5. apply map_eq_app in H5 as (rs1 & rs2 & -> & -> & ->).
constructor 2.
2-4: now rewrite map_length in *.
apply IHvalid; easy.
+ unfold isFlatListOf in H5, H6.
symmetry in H5. apply map_eq_app in H5 as (ls1 & ls2 & -> & -> & ->).
symmetry in H6. apply map_eq_app in H6 as (rs1 & rs2 & -> & -> & ->).
assert (exists w, w el cards /\ coversHead w (map index ls1 ++ map index ls2) (map index rs1 ++ map index rs2)) as H5 by eauto.
eapply coversHead_flat_agree in H5 as (fincard & H5 & H6).
* econstructor 3. 2-3: now rewrite map_length in *.
-- eapply IHvalid; easy.
-- apply H5.
-- apply H6.
* clear H5 H6. apply isFlatListOf_app; easy.
* clear H5 H6. apply isFlatListOf_app; easy.
* clear H5 H6. apply H2.
- intros H3. revert fs1 fs2 H H1 H2. induction H3; intros.
+ rewrite H, H1; cbn; constructor 1.
+ rewrite H2, H4. rewrite !map_app. constructor. 2-4: rewrite map_length; auto.
now eapply IHvalid.
+ rewrite H4, H5. rewrite !map_app.
assert (exists w, w el fcards /\ coversHead w (u ++ a) (v++b)) as H7 by eauto.
eapply coversHead_flat_agree in H7 as (fincard & H7 & H8). 2-4: eauto.
econstructor 3. 2-3: rewrite map_length; auto.
* now eapply IHvalid.
* apply H7.
* rewrite H4, H5 in H8. now rewrite !map_app in H8.
Qed.
(** we re-use the lemma proven above which asserts that list_ofFlatType is invariant *)
(** for that, we need more assumptions than are required in principle, but this is okay *)
Lemma valid_flat_isFlatListOf_invariant (X : finType) (s1 : list X) fs1 fs2:
finRepr X Sigma
-> isFlatListOf fs1 s1
-> |fs1| >= width
-> valid offset width cards fs1 fs2
-> exists (s2 : list X), isFlatListOf fs2 s2.
Proof.
intros.
apply isFlatListOf_list_ofFlatType in H0. rewrite <- H in H0.
specialize (@valid_list_ofFlatType_invariant fpr A B fs1 fs2 H0 H1 H2) as H4.
apply (finRepr_exists_list H) in H4. destruct H4 as (s2 & H4); easy.
Qed.
Lemma relpower_valid_flat_agree (X : finType) (fincards : list (CCCard X)) s1 s2 fs1 fs2 n:
finRepr X Sigma
-> |fs1| >= width
-> isFlatListOf fs1 s1
-> isFlatListOf fs2 s2
-> isFlatCardsOf cards fincards
-> relpower (valid offset width cards) n fs1 fs2 <-> relpower (valid offset width fincards) n s1 s2.
Proof.
intros H0 H1 H2 H3 H4. split.
- intros H5. revert s1 s2 H2 H3. induction H5; intros.
+ specialize (isFlatListOf_functional H2 H3) as ->. eauto.
+ specialize (valid_flat_isFlatListOf_invariant H0 H2 H1 H) as (s3 & H6).
specialize (valid_length_inv H) as H7. rewrite H7 in H1.
specialize (IHrelpower H1 _ _ H6 H3).
econstructor.
* apply (valid_flat_agree H2 H6 H4) in H. apply H.
* apply IHrelpower.
- intros H5. clear H1.
revert fs1 fs2 H2 H3. induction H5; intros.
+ specialize (isFlatListOf_injective H2 H3) as ->. constructor.
+ assert (isFlatListOf (map index b) b) as H1 by (unfold isFlatListOf; easy).
specialize (IHrelpower _ _ H1 H3).
apply (valid_flat_agree H2 H1 H4) in H. eauto.
Qed.
Lemma final_flat_agree (X : finType) (F : list (list X)) (f : list (list nat)) l:
isFlatFinalOf f F -> forall s fs, isFlatListOf fs s -> satFinal offset l f fs <-> satFinal offset l F s.
Proof.
intros. split.
- intros (subs & k & H1 & H2 & H3). apply H in H1 as (subs' & H4 & H5).
exists subs', k. split; [ apply H4 | split; [ apply H2 | ]].
unfold isFlatListOf in H0. rewrite H0 in H3. rewrite H5 in H3.
destruct H3 as (b & H3). rewrite <- map_skipn in H3.
apply map_eq_app in H3 as (ls1 & ls2 & H3 & H6 & H7).
rewrite H3.
apply map_injective in H6; [ | apply injective_index].
rewrite H6. now exists ls2.
- intros (subs & k & H1 & H2 & H3). apply H in H1 as (subs' & H4 &H5).
exists subs', k. split; [ apply H4 | split; [ apply H2 | ]].
rewrite H5, H0. destruct H3 as (b & H3).
exists (map index b). rewrite <- map_skipn. rewrite H3.
now rewrite !map_app.
Qed.
End fixFCCInstance.
(*well-formedness is equivalent, of course *)
Lemma isFlatCCOf_wf_equivalent (pr : CC) (fpr : FlatCC) :
isFlatCCOf fpr pr -> (FlatCC_wellformed fpr <-> CC_wellformed pr).
Proof.
intros [H1 H2 H3 H4]. split; intros; unfold FlatCC_wellformed, CC_wellformed in *.
- destruct H as (F1 & F2 & F3 & F4 & F5 & F6). repeat split.
+ easy.
+ easy.
+ destruct F3 as (k & F3 & F3'). exists k. nia.
+ rewrite -> H4 in F4. rewrite map_length in F4. lia.
+ apply HCards in H as (flatcard & H & H5). apply F5 in H.
destruct H5. destruct H as (H & _). rewrite Hprem in H. rewrite map_length in H. lia.
+ apply HCards in H as (flatcard & H & H5). apply F5 in H.
destruct H5. destruct H as (_ & H). rewrite Hconc in H. rewrite map_length in H. lia.
+ destruct F6 as (k & F6). rewrite H4 in F6. rewrite map_length in F6. exists k; nia.
- destruct H as (F1 & F2 & F3 & F4 & F5 & F6). repeat split.
+ easy.
+ easy.
+ destruct F3 as (k & F3 & F3'). exists k. nia.
+ rewrite H4, map_length. lia.
+ apply HCards in H as (fincard & H & H5). apply F5 in H.
destruct H5. destruct H as (H & _). rewrite Hprem, map_length. lia.
+ apply HCards in H as (fincard & H & H5). apply F5 in H.
destruct H5. destruct H as (_ & H). rewrite Hconc, map_length. lia.
+ destruct F6 as (k & F6). rewrite H4, map_length. exists k. nia.
Qed.
(** now we can derive equivalence of instances related via isFlatCCOf *)
Lemma isFlatCCOf_equivalence (pr : CC) (fpr : FlatCC) :
isFlatCCOf fpr pr -> (FlatCCLang fpr <-> CCLang pr).
Proof.
intros. split.
- intros (H1 & H2 & H3). split; [ now eapply isFlatCCOf_wf_equivalent | ].
destruct H as [F1 F2 F3 F4 F5].
destruct H3 as (sf & H3 & H4 & H5).
apply (finRepr_exists_list F1) in H3 as (SF & H3).
exists SF. split.
+ rewrite <- F2, <- F3, <- HSteps. eapply relpower_valid_flat_agree; eauto. apply H1.
+ rewrite <- F2. rewrite F4, map_length in H5. eapply final_flat_agree; eauto.
- intros (H1 & H2). split; [ now eapply isFlatCCOf_wf_equivalent | ].
split; [now eapply isFlatCCOf_isValidFlattening | ].
destruct H as [F1 F2 F3 F4 F5].
destruct H2 as (sf & H3 & H4).
exists (map index sf). repeat split.
+ unfold list_ofFlatType, ofFlatType.
setoid_rewrite in_map_iff. intros a (x & <- & H). rewrite F1. apply index_le.
+ eapply relpower_valid_flat_agree; eauto.
* now rewrite isFlatCCOf_wf_equivalent.
* now eapply isFlatCCOf_isValidFlattening with (pr := pr).
* rewrite F4, map_length, F3. apply H1.
* unfold isFlatListOf. reflexivity.
* rewrite F2, F3, HSteps. apply H3.
+ eapply final_flat_agree; eauto.
* unfold isFlatListOf; reflexivity.
* rewrite F2, F4, map_length. apply H4.
Qed.
(** ** Unflattening *)
(** given a well-formed flat instance, we can derive a "canonical" (up to bijections of the finite type) CC instance *)
(** we use Fin.t as the canonical finite type *)
Import Coq.Init.Specif.
Lemma unflattenString (f : list nat) k : list_ofFlatType k f -> {f' : list (finType_CS (Fin.t k)) & isFlatListOf f f'}.
Proof.
intros H.
eapply finRepr_exists_list with (X := finType_CS (Fin.t k)) in H as (a' & H).
2: { unfold finRepr. specialize (Fin_cardinality k). easy. }
eauto.
Qed.
Lemma unflattenCarddow (w : CCCard nat) k : CCCard_ofFlatType w k -> {w' : CCCard (finType_CS (Fin.t k)) & isFlatCCCardOf w w'}.
Proof.
intros. destruct w. destruct H as (H1 & H2). cbn in *.
apply unflattenString in H1 as (prem' & H1).
apply unflattenString in H2 as (conc' & H2).
exists (Build_CCCard prem' conc'). split; easy.
Qed.
Lemma unflattenCards (l : list (CCCard nat)) k : isValidFlatCards l k -> {l' : list (CCCard (finType_CS (Fin.t k))) & isFlatCardsOf l l'}.
Proof.
intros. unfold isValidFlatCards in H. induction l.
- exists []. easy.
- edestruct IHl as (l' & IH);[ easy | ]. specialize (H a (or_introl eq_refl)).
apply unflattenCarddow in H as (a' & H). exists (a' :: l'). split; intros.
+ destruct H0 as [-> | H0]; [easy | ]. apply IH in H0 as (card & ? & ?); eauto.
+ destruct H0 as [-> | H0]; [ easy | ]. apply IH in H0 as (card' & ? & ?); eauto.
Qed.
Lemma unflattenFinal (f : list (list nat)) k : isValidFlatFinal f k -> {f' : list (list (finType_CS (Fin.t k))) & isFlatFinalOf f f'}.
Proof.
intros. unfold isValidFlatFinal in H. induction f; intros.
- exists []; easy.
- edestruct IHf as (f' & IH); [easy | ]. specialize (H a (or_introl eq_refl)).
apply unflattenString in H as (a' &H).
exists (a'::f'). split; intros.
+ destruct H0 as [-> | H0]; [easy | ]. apply IH in H0 as (? & ? & ?); eauto.
+ destruct H0 as [-> | H0]; [easy | ]. apply IH in H0 as (? & ? & ?); eauto.
Qed.
Lemma unflattenCC (f : FlatCC) : isValidFlattening f -> {f' : CC & isFlatCCOf f f'}.
Proof.
intros (H1 & H2 & H3).
apply unflattenCards in H3 as (w' & H3).
apply unflattenFinal in H2 as (f' & H2).
apply unflattenString in H1 as (i' & H1).
exists (Build_CC (offset f) (width f) i' w' f' (steps f)).
constructor; eauto.
cbn. unfold finRepr. specialize (Fin_cardinality (Sigma f)). easy.
Qed.
(** ** extraction *)
From Undecidability.L.Tactics Require Import LTactics GenEncode.
From Undecidability.L.Datatypes Require Import LProd LOptions.
Section fix_X.
Variable (X:Type).
Context `{X_encodable: encodable X}.
MetaCoq Run (tmGenEncode "CCCard_enc" (CCCard X)).
Hint Resolve CCCard_enc_correct : Lrewrite.
Global Instance term_Build_CCCard : computableTime' (@Build_CCCard X) (fun _ _ => (1, fun _ _ => (1, tt))).
Proof.
extract constructor. solverec.
Qed.
Definition cnst_prem := 5.
Global Instance term_prem : computableTime' (@prem X) (fun _ _ => (cnst_prem, tt)).
Proof.
extract. unfold cnst_prem. solverec.
Qed.
Definition cnst_conc := 5.
Global Instance term_conc : computableTime' (@conc X) (fun _ _ => (cnst_conc, tt)).
Proof.
extract. unfold cnst_conc. solverec.
Qed.
Definition c__sizeCCCard := 4.
Lemma CCCard_enc_size (card : CCCard X) : size (enc card) = size (enc (prem card)) + size (enc (conc card)) + c__sizeCCCard.
Proof.
destruct card. cbn. unfold enc at 1, c__sizeCCCard. cbn. nia.
Qed.
End fix_X.
#[export]
Hint Resolve CCCard_enc_correct : Lrewrite.
MetaCoq Run (tmGenEncode "FlatCC_enc" (FlatCC)).
#[export]
Hint Resolve FlatCC_enc_correct : Lrewrite.
From Complexity.Libs.CookPrelim Require Import PolyBounds.
#[export]
Instance term_Build_FlatCC : computableTime' Build_FlatCC (fun _ _ => (1, fun _ _ => (1, fun _ _ => (1, fun _ _ => (1, fun _ _ => (1, fun _ _ => (1, fun _ _ => (1, tt)))))))).
Proof.
extract constructor. solverec.
Qed.
Definition c__Sigma := 10.
#[export]
Instance term_FlatCC_Sigma : computableTime' Sigma (fun _ _ => (c__Sigma, tt)).
Proof.
extract. unfold c__Sigma. solverec.
Qed.
Definition c__offset := 10.
#[export]
Instance term_FlatCC_offset : computableTime' offset (fun _ _ => (c__offset, tt)).
Proof.
extract. unfold c__offset. solverec.
Qed.
Definition c__width := 10.
#[export]
Instance term_FlatCC_width : computableTime' width (fun _ _ => (c__width, tt)).
Proof.
extract. unfold c__width. solverec.
Qed.
Definition c__init := 10.
#[export]
Instance term_FlatCC_init : computableTime' init (fun _ _ => (c__init, tt)).
Proof.
extract. unfold c__init. solverec.
Qed.
Definition c__cards := 10.
#[export]
Instance term_FlatCC_cards : computableTime' cards (fun _ _ => (c__cards, tt)).
Proof.
extract. unfold c__cards. solverec.
Qed.
Definition c__final := 10.
#[export]
Instance term_FlatCC_final : computableTime' final (fun _ _ => (c__final, tt)).
Proof.
extract. unfold c__final. solverec.
Qed.
Definition c__steps := 10.
#[export]
Instance term_FlatCC_steps : computableTime' steps (fun _ _ => (c__steps, tt)).
Proof.
extract. unfold c__steps. solverec.
Qed.
Lemma FlatCC_enc_size (fpr : FlatCC) : size (enc fpr) = size (enc (Sigma fpr)) + size(enc (offset fpr)) + size (enc (width fpr)) + size (enc (init fpr)) + size (enc (cards fpr)) + size (enc (final fpr)) + size (enc (steps fpr)) + 9.
Proof.
destruct fpr. cbn.
unfold enc at 1. cbn. nia.
Qed.
(** extraction of CCCard_of_size_dec *)
Section CCCard_of_size.
Variable ( X : Type).
Context `{X_encodable: encodable X}.
Definition c__prcardOfSizeDec := 2 * (cnst_prem + 2 * 5 + cnst_conc + 6 + c__length).
Definition CCCard_of_size_dec_time (width : nat) (card : CCCard X) := c__prcardOfSizeDec * (1 + |prem card| + |conc card|) + eqbTime (X := nat) (size (enc (|prem card|))) (size (enc width)) + eqbTime (X := nat) (size (enc (|conc card|))) (size (enc width)).
Global Instance term_CCCard_of_size_dec : computableTime' (@CCCard_of_size_dec X) (fun width _ => (1, fun card _ => (CCCard_of_size_dec_time width card, tt))).
Proof.
extract. solverec. unfold CCCard_of_size_dec_time, c__prcardOfSizeDec. nia.
Qed.
Definition c__prcardOfSizeDecBound := c__prcardOfSizeDec + c__eqbComp nat.
Lemma CCCard_of_size_dec_time_bound width (card : CCCard X) : CCCard_of_size_dec_time width card <= (size(enc card) + 1) * c__prcardOfSizeDecBound.
Proof.
unfold CCCard_of_size_dec_time. rewrite !eqbTime_le_l. rewrite !list_size_enc_length, !list_size_length.
rewrite CCCard_enc_size. unfold c__prcardOfSizeDecBound, c__sizeCCCard. nia.
Qed.
End CCCard_of_size.
(*extraction of FlatCC_wf_dec *)
Definition c__FlatCCWfDec := 3 * c__leb2 + 4 * c__width + 3 * c__offset + 2 * 5 + 2 * c__init + 2 * c__length + c__cards + 32 + 4 * c__leb + 2 * c__eqbComp nat * size (enc 0).
Definition FlatCC_wf_dec_time x := 2 * c__length * (|init x|) + leb_time (width x) (|init x|) + forallb_time (@CCCard_of_size_dec_time nat (width x)) (cards x) + modulo_time (|init x|) (offset x) + modulo_time (width x) (offset x) + c__FlatCCWfDec.
#[export]
Instance term_FlatCC_wf_dec : computableTime' FlatCC_wf_dec (fun fpr _ => (FlatCC_wf_dec_time fpr, tt)).
Proof.
extract. solverec. unfold FlatCC_wf_dec_time, c__FlatCCWfDec, leb_time. rewrite !eqbTime_le_r.
(*ring_simplify.*)
rewrite !Nat.le_min_l with (n:=1).
simp_comp_arith. ring_simplify. reflexivity.
Qed.
(*nia. *)
(*zify. clear. nia.*)
Definition c__FlatCCWfDecBound := 2*(2 * c__length + c__leb + c__prcardOfSizeDecBound + c__forallb + 2 * c__moduloBound + c__FlatCCWfDec).
Definition poly__FlatCCWfDec n := (n*n + 2* n + 1) * c__FlatCCWfDecBound.
Lemma FlatCC_wf_dec_time_bound fpr :
FlatCC_wf_dec_time fpr <= poly__FlatCCWfDec (size (enc fpr)).
Proof.
unfold FlatCC_wf_dec_time. rewrite leb_time_bound_l.
rewrite !modulo_time_bound. rewrite list_size_enc_length.
rewrite list_size_length.
erewrite forallb_time_bound_env.
2: {
split; [intros | ].
- specialize (CCCard_of_size_dec_time_bound (X := nat) y a) as H1.
instantiate (2:= fun n => (n + 1) * c__prcardOfSizeDecBound). simp_comp_arith. nia.
- smpl_inO.
}
rewrite list_size_length.
replace_le (size(enc (cards fpr))) with (size (enc fpr)) by (rewrite FlatCC_enc_size; lia).
replace_le (size(enc (init fpr))) with (size (enc fpr)) by (rewrite FlatCC_enc_size; lia).
replace_le (size(enc (width fpr))) with (size (enc fpr)) by (rewrite FlatCC_enc_size; lia).
replace_le (size(enc(offset fpr))) with (size (enc fpr)) by (rewrite FlatCC_enc_size; lia).
unfold poly__FlatCCWfDec, c__FlatCCWfDecBound. nia.
Qed.
Lemma FlatCC_wf_dec_poly : monotonic poly__FlatCCWfDec /\ inOPoly poly__FlatCCWfDec.
Proof.
unfold poly__FlatCCWfDec; split; smpl_inO.
Qed.
(** CCCard_ofFlatType_dec *)
Definition c__CCCardOfFlatTypeDec := cnst_prem + cnst_conc +8.
Definition CCCard_ofFlatType_dec_time (sig : nat) (w : CCCard nat):= list_ofFlatType_dec_time sig (prem w) + list_ofFlatType_dec_time sig (conc w) + c__CCCardOfFlatTypeDec.
#[export]
Instance term_CCCard_ofFlatType_dec : computableTime' CCCard_ofFlatType_dec (fun sig _ => (1, fun w _ => (CCCard_ofFlatType_dec_time sig w, tt))).
Proof.
extract. solverec. unfold CCCard_ofFlatType_dec_time, c__CCCardOfFlatTypeDec. nia.
Qed.
Definition c__CCCardOfFlatTypeDecBound := 2 * (c__CCCardOfFlatTypeDec + 1).
Definition poly__CCCardOfFlatTypeDec n := (poly__listOfFlatTypeDec n + 1) * c__CCCardOfFlatTypeDecBound.
Lemma CCCard_ofFlatType_dec_time_bound sig w : CCCard_ofFlatType_dec_time sig w <= poly__CCCardOfFlatTypeDec (size (enc sig) + size (enc w)).
Proof.
unfold CCCard_ofFlatType_dec_time. rewrite !list_ofFlatType_dec_time_bound.
unfold poly__CCCardOfFlatTypeDec.
poly_mono list_ofFlatType_dec_poly at 2.
2: (replace_le (size (enc (conc w))) with (size (enc w)) by (rewrite CCCard_enc_size; lia)); reflexivity.
poly_mono list_ofFlatType_dec_poly at 1.
2: (replace_le (size (enc (prem w))) with (size (enc w)) by (rewrite CCCard_enc_size; lia)); reflexivity.
unfold c__CCCardOfFlatTypeDecBound. nia.
Qed.
Lemma CCCard_ofFlatType_dec_poly : monotonic poly__CCCardOfFlatTypeDec /\ inOPoly poly__CCCardOfFlatTypeDec.
Proof.
split; unfold poly__CCCardOfFlatTypeDec; smpl_inO; apply list_ofFlatType_dec_poly.
Qed.
(** isValidFlattening_dec *)
Definition c__isValidFlatteningDec := 3 * c__Sigma + c__init + c__final + c__cards + 16.
Definition isValidFlattening_dec_time x := list_ofFlatType_dec_time (Sigma x) (init x) + forallb_time (list_ofFlatType_dec_time (Sigma x)) (final x)+ forallb_time (CCCard_ofFlatType_dec_time (Sigma x)) (cards x) + c__isValidFlatteningDec.
#[export]
Instance term_isValidFlattening_dec : computableTime' isValidFlattening_dec (fun fpr _ => (isValidFlattening_dec_time fpr, tt)).
Proof.
extract. solverec. unfold isValidFlattening_dec_time, c__isValidFlatteningDec.
repeat change (fun x => ?h x) with h. solverec.
Qed.
Definition c__isValidFlatteningDecBound := 2 * c__forallb + c__isValidFlatteningDec.
Definition poly__isValidFlatteningDec n :=(n + 2) * poly__listOfFlatTypeDec n + (n + 1) * poly__CCCardOfFlatTypeDec n + (n + 1) * c__isValidFlatteningDecBound.
Lemma isValidFlattening_dec_time_bound fpr : isValidFlattening_dec_time fpr <= poly__isValidFlatteningDec (size (enc fpr)).
Proof.
unfold isValidFlattening_dec_time.
rewrite list_ofFlatType_dec_time_bound.
erewrite forallb_time_bound_env.
2: {
split; [intros | ].
- rewrite list_ofFlatType_dec_time_bound, Nat.add_comm; reflexivity.
- apply list_ofFlatType_dec_poly.
}
erewrite forallb_time_bound_env.
2 : {
split; [intros | ].
- rewrite CCCard_ofFlatType_dec_time_bound, Nat.add_comm; reflexivity.
- apply CCCard_ofFlatType_dec_poly.
}
rewrite !list_size_length.
poly_mono list_ofFlatType_dec_poly at 1.
2: (replace_le (size (enc (Sigma fpr)) + size (enc (init fpr))) with (size (enc fpr)) by (rewrite FlatCC_enc_size; lia)); reflexivity.
poly_mono list_ofFlatType_dec_poly at 2.
2: (replace_le (size (enc (final fpr)) + size (enc (Sigma fpr))) with (size (enc fpr)) by (rewrite FlatCC_enc_size; lia)); reflexivity.
replace_le (size (enc (final fpr))) with (size (enc fpr)) by (rewrite FlatCC_enc_size; lia) at 1.
replace_le (size (enc (cards fpr))) with (size (enc fpr)) by (rewrite FlatCC_enc_size; lia) at 1.
poly_mono CCCard_ofFlatType_dec_poly at 1.
2: (replace_le (size (enc (cards fpr)) + size (enc (Sigma fpr))) with (size (enc fpr)) by (rewrite FlatCC_enc_size; lia)); reflexivity.
unfold poly__isValidFlatteningDec, c__isValidFlatteningDecBound. nia.
Qed.
Lemma isValidFlatteningDec_poly : monotonic poly__isValidFlatteningDec /\ inOPoly poly__isValidFlatteningDec.
Proof.
split; (unfold poly__isValidFlatteningDec; smpl_inO; [apply list_ofFlatType_dec_poly |apply CCCard_ofFlatType_dec_poly ]).
Qed.
|
function chrpak_test ( )
%*****************************************************************************80
%
%% CHRPAK_TEST tests the CHRPAK library.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 15 January 2013
%
% Author:
%
% John Burkardt
%
timestamp ( );
fprintf ( 1, '\n' );
fprintf ( 1, 'CHRPAK_TEST:\n' );
fprintf ( 1, ' MATLAB version\n' );
fprintf ( 1, ' Test the CHRPAK library.\n' );
chrpak_test001 ( );
chrpak_test005 ( );
chrpak_test006 ( );
chrpak_test011 ( );
chrpak_test016 ( );
chrpak_test021 ( );
chrpak_test022 ( );
chrpak_test026 ( );
chrpak_test029 ( );
chrpak_test046 ( );
chrpak_test051 ( );
chrpak_test054 ( );
chrpak_test056 ( );
chrpak_test057 ( );
chrpak_test065 ( );
chrpak_test066 ( );
chrpak_test067 ( );
chrpak_test083 ( );
chrpak_test090 ( );
chrpak_test091 ( );
chrpak_test1015 ( );
chrpak_test102 ( );
chrpak_test105 ( );
chrpak_test115 ( );
chrpak_test1155 ( );
chrpak_test1225 ( );
chrpak_test1227 ( );
chrpak_test1255 ( );
chrpak_test129 ( );
chrpak_test137 ( );
chrpak_test138 ( );
%
% Terminate.
%
fprintf ( 1, '\n' );
fprintf ( 1, 'CHRPAK_TEST:\n' );
fprintf ( 1, ' Normal end of execution.\n' );
fprintf ( 1, '\n' );
timestamp ( );
return
end
|
Require Import Crypto.Specific.Framework.RawCurveParameters.
Require Import Crypto.Util.LetIn.
(***
Modulus : 2^206 - 5
Base: 64
***)
Definition curve : CurveParameters :=
{|
sz := 4%nat;
base := 64;
bitwidth := 64;
s := 2^206;
c := [(1, 5)];
carry_chains := None;
a24 := None;
coef_div_modulus := None;
goldilocks := None;
karatsuba := None;
montgomery := true;
freeze := Some false;
ladderstep := false;
mul_code := None;
square_code := None;
upper_bound_of_exponent_loose := None;
upper_bound_of_exponent_tight := None;
allowable_bit_widths := None;
freeze_extra_allowable_bit_widths := None;
modinv_fuel := None
|}.
Ltac extra_prove_mul_eq _ := idtac.
Ltac extra_prove_square_eq _ := idtac.
|
import Numeric.LinearAlgebra
import ActivationFunction
import Mnist
import SampleWeight
batchSize = 100
predict :: SampleWeight -> Vector R -> Vector R
predict ([w1,w2,w3],[b1,b2,b3]) x =
softMax' . (\x'' -> sumInput x'' w3 b3) . sigmoid . (\x' -> sumInput x' w2 b2) . sigmoid $ sumInput x w1 b1
sumInput :: Vector R -> Weight -> Bias -> Vector R
sumInput x w b = (x <# w) + b
maxIndexPredict :: SampleWeight -> Vector R -> Double
maxIndexPredict sw x = fromIntegral . maxIndex $ predict sw x
take' :: Indexable c t => Int -> Int -> c -> [t]
take' n1 n2 x
| n1 >= n2 = []
| otherwise = (x ! n1) : take' (n1+1) n2 x
increment :: [Double] -> [Double] -> Double
increment ps l = fromIntegral . length . filter id $ zipWith (==) ps l
countAccuracy :: Double -> Int -> SampleWeight -> DataSet -> Double
countAccuracy a n sw ds@(i,l)
| n <= 0 = a
| otherwise =
if maxIndexPredict sw (i ! (n-1)) == l ! (n-1)
then countAccuracy (a+1) (n-1) sw ds
else countAccuracy a (n-1) sw ds
-- For batch
countAccuracy' :: Double -> Int -> SampleWeight -> DataSet -> Double
countAccuracy' a n sw ds@(i,l)
| n <= 0 = a
| otherwise = countAccuracy' (a+cnt) (n-batchSize) sw ds
where ps = maxIndexPredict sw <$> take' (n-batchSize) n i
ls = take' (n-batchSize) n l
cnt = increment ps ls
main = do
[_, ds] <- loadMnist True
sw <- loadSW
let r = rows $ fst ds
cnt = countAccuracy' 0 r sw ds
putStrLn $ "Accuracy: " ++ show (cnt / fromIntegral r)
|
Require Import Nat Arith.
Inductive Nat : Type := succ : Nat -> Nat | zero : Nat.
Inductive Lst : Type := cons : Nat -> Lst -> Lst | nil : Lst.
Inductive Tree : Type := node : Nat -> Tree -> Tree -> Tree | leaf : Tree.
Inductive Pair : Type := mkpair : Nat -> Nat -> Pair
with ZLst : Type := zcons : Pair -> ZLst -> ZLst | znil : ZLst.
Fixpoint plus (plus_arg0 : Nat) (plus_arg1 : Nat) : Nat
:= match plus_arg0, plus_arg1 with
| zero, n => n
| succ n, m => succ (plus n m)
end.
Fixpoint half (half_arg0 : Nat) : Nat
:= match half_arg0 with
| zero => zero
| succ zero => zero
| succ (succ n) => succ (half n)
end.
Lemma lem: forall m n, plus (succ m) n = plus m (succ n).
Proof.
intros. simpl. induction m.
- simpl. rewrite IHm. reflexivity.
- reflexivity.
Qed.
Theorem theorem0 : forall (x : Nat), eq (half (plus x x)) x.
Proof.
induction x.
- simpl. destruct x.
* simpl. rewrite <- lem. rewrite IHx. reflexivity.
* reflexivity.
- reflexivity.
Qed.
|
/-
Copyright (c) 2020 Damiano Testa. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Damiano Testa
-/
import data.polynomial.degree.trailing_degree
import data.polynomial.erase_lead
import data.polynomial.eval
/-!
# Reverse of a univariate polynomial
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
The main definition is `reverse`. Applying `reverse` to a polynomial `f : R[X]` produces
the polynomial with a reversed list of coefficients, equivalent to `X^f.nat_degree * f(1/X)`.
The main result is that `reverse (f * g) = reverse f * reverse g`, provided the leading
coefficients of `f` and `g` do not multiply to zero.
-/
namespace polynomial
open polynomial finsupp finset
open_locale classical polynomial
section semiring
variables {R : Type*} [semiring R] {f : R[X]}
/-- If `i ≤ N`, then `rev_at_fun N i` returns `N - i`, otherwise it returns `i`.
This is the map used by the embedding `rev_at`.
-/
def rev_at_fun (N i : ℕ) : ℕ := ite (i ≤ N) (N-i) i
lemma rev_at_fun_invol {N i : ℕ} : rev_at_fun N (rev_at_fun N i) = i :=
begin
unfold rev_at_fun,
split_ifs with h j,
{ exact tsub_tsub_cancel_of_le h, },
{ exfalso,
apply j,
exact nat.sub_le N i, },
{ refl, },
end
lemma rev_at_fun_inj {N : ℕ} : function.injective (rev_at_fun N) :=
begin
intros a b hab,
rw [← @rev_at_fun_invol N a, hab, rev_at_fun_invol],
end
/-- If `i ≤ N`, then `rev_at N i` returns `N - i`, otherwise it returns `i`.
Essentially, this embedding is only used for `i ≤ N`.
The advantage of `rev_at N i` over `N - i` is that `rev_at` is an involution.
-/
def rev_at (N : ℕ) : function.embedding ℕ ℕ :=
{ to_fun := λ i , (ite (i ≤ N) (N-i) i),
inj' := rev_at_fun_inj }
/-- We prefer to use the bundled `rev_at` over unbundled `rev_at_fun`. -/
@[simp] lemma rev_at_fun_eq (N i : ℕ) : rev_at_fun N i = rev_at N i := rfl
@[simp] lemma rev_at_invol {N i : ℕ} : (rev_at N) (rev_at N i) = i :=
rev_at_fun_invol
@[simp] lemma rev_at_le {N i : ℕ} (H : i ≤ N) : rev_at N i = N - i :=
if_pos H
lemma rev_at_add {N O n o : ℕ} (hn : n ≤ N) (ho : o ≤ O) :
rev_at (N + O) (n + o) = rev_at N n + rev_at O o :=
begin
rcases nat.le.dest hn with ⟨n', rfl⟩,
rcases nat.le.dest ho with ⟨o', rfl⟩,
repeat { rw rev_at_le (le_add_right rfl.le) },
rw [add_assoc, add_left_comm n' o, ← add_assoc, rev_at_le (le_add_right rfl.le)],
repeat {rw add_tsub_cancel_left},
end
@[simp] lemma rev_at_zero (N : ℕ) : rev_at N 0 = N :=
by simp [rev_at]
/-- `reflect N f` is the polynomial such that `(reflect N f).coeff i = f.coeff (rev_at N i)`.
In other words, the terms with exponent `[0, ..., N]` now have exponent `[N, ..., 0]`.
In practice, `reflect` is only used when `N` is at least as large as the degree of `f`.
Eventually, it will be used with `N` exactly equal to the degree of `f`. -/
noncomputable def reflect (N : ℕ) : R[X] → R[X]
| ⟨f⟩ := ⟨finsupp.emb_domain (rev_at N) f⟩
lemma reflect_support (N : ℕ) (f : R[X]) :
(reflect N f).support = finset.image (rev_at N) f.support :=
begin
rcases f,
ext1,
simp only [reflect, support_of_finsupp, support_emb_domain, finset.mem_map, finset.mem_image],
end
@[simp] lemma coeff_reflect (N : ℕ) (f : R[X]) (i : ℕ) :
coeff (reflect N f) i = f.coeff (rev_at N i) :=
begin
rcases f,
simp only [reflect, coeff],
calc finsupp.emb_domain (rev_at N) f i
= finsupp.emb_domain (rev_at N) f (rev_at N (rev_at N i)) : by rw rev_at_invol
... = f (rev_at N i) : finsupp.emb_domain_apply _ _ _
end
@[simp] lemma reflect_zero {N : ℕ} : reflect N (0 : R[X]) = 0 := rfl
@[simp] lemma reflect_eq_zero_iff {N : ℕ} {f : R[X]} :
reflect N (f : R[X]) = 0 ↔ f = 0 :=
by { rcases f, simp [reflect] }
@[simp] lemma reflect_add (f g : R[X]) (N : ℕ) :
reflect N (f + g) = reflect N f + reflect N g :=
by { ext, simp only [coeff_add, coeff_reflect], }
@[simp] lemma reflect_C_mul (f : R[X]) (r : R) (N : ℕ) :
reflect N (C r * f) = C r * (reflect N f) :=
by { ext, simp only [coeff_reflect, coeff_C_mul], }
@[simp] lemma reflect_C_mul_X_pow (N n : ℕ) {c : R} :
reflect N (C c * X ^ n) = C c * X ^ (rev_at N n) :=
begin
ext,
rw [reflect_C_mul, coeff_C_mul, coeff_C_mul, coeff_X_pow, coeff_reflect],
split_ifs with h j,
{ rw [h, rev_at_invol, coeff_X_pow_self], },
{ rw [not_mem_support_iff.mp],
intro a,
rw [← one_mul (X ^ n), ← C_1] at a,
apply h,
rw [← (mem_support_C_mul_X_pow a), rev_at_invol], },
end
@[simp] lemma reflect_C (r : R) (N : ℕ) : reflect N (C r) = C r * X ^ N :=
by conv_lhs { rw [← mul_one (C r), ← pow_zero X, reflect_C_mul_X_pow, rev_at_zero] }
@[simp] lemma reflect_monomial (N n : ℕ) : reflect N ((X : R[X]) ^ n) = X ^ (rev_at N n) :=
by rw [← one_mul (X ^ n), ← one_mul (X ^ (rev_at N n)), ← C_1, reflect_C_mul_X_pow]
lemma reflect_mul_induction (cf cg : ℕ) :
∀ N O : ℕ, ∀ f g : R[X],
f.support.card ≤ cf.succ → g.support.card ≤ cg.succ → f.nat_degree ≤ N → g.nat_degree ≤ O →
(reflect (N + O) (f * g)) = (reflect N f) * (reflect O g) :=
begin
induction cf with cf hcf,
--first induction (left): base case
{ induction cg with cg hcg,
-- second induction (right): base case
{ intros N O f g Cf Cg Nf Og,
rw [← C_mul_X_pow_eq_self Cf, ← C_mul_X_pow_eq_self Cg],
simp_rw [mul_assoc, X_pow_mul, mul_assoc, ← pow_add (X : R[X]), reflect_C_mul,
reflect_monomial, add_comm, rev_at_add Nf Og, mul_assoc, X_pow_mul, mul_assoc,
← pow_add (X : R[X]), add_comm], },
-- second induction (right): induction step
{ intros N O f g Cf Cg Nf Og,
by_cases g0 : g = 0,
{ rw [g0, reflect_zero, mul_zero, mul_zero, reflect_zero], },
rw [← erase_lead_add_C_mul_X_pow g, mul_add, reflect_add, reflect_add, mul_add, hcg, hcg];
try { assumption },
{ exact le_add_left card_support_C_mul_X_pow_le_one },
{ exact (le_trans (nat_degree_C_mul_X_pow_le g.leading_coeff g.nat_degree) Og) },
{ exact nat.lt_succ_iff.mp (gt_of_ge_of_gt Cg (erase_lead_support_card_lt g0)) },
{ exact le_trans erase_lead_nat_degree_le_aux Og } } },
--first induction (left): induction step
{ intros N O f g Cf Cg Nf Og,
by_cases f0 : f = 0,
{ rw [f0, reflect_zero, zero_mul, zero_mul, reflect_zero], },
rw [← erase_lead_add_C_mul_X_pow f, add_mul, reflect_add, reflect_add, add_mul, hcf, hcf];
try { assumption },
{ exact le_add_left card_support_C_mul_X_pow_le_one },
{ exact (le_trans (nat_degree_C_mul_X_pow_le f.leading_coeff f.nat_degree) Nf) },
{ exact nat.lt_succ_iff.mp (gt_of_ge_of_gt Cf (erase_lead_support_card_lt f0)) },
{ exact (le_trans erase_lead_nat_degree_le_aux Nf) } }
end
@[simp] theorem reflect_mul
(f g : R[X]) {F G : ℕ} (Ff : f.nat_degree ≤ F) (Gg : g.nat_degree ≤ G) :
reflect (F + G) (f * g) = reflect F f * reflect G g :=
reflect_mul_induction _ _ F G f g f.support.card.le_succ g.support.card.le_succ Ff Gg
section eval₂
variables {S : Type*} [comm_semiring S]
lemma eval₂_reflect_mul_pow (i : R →+* S) (x : S) [invertible x] (N : ℕ) (f : R[X])
(hf : f.nat_degree ≤ N) : eval₂ i (⅟x) (reflect N f) * x ^ N = eval₂ i x f :=
begin
refine induction_with_nat_degree_le (λ f, eval₂ i (⅟x) (reflect N f) * x ^ N = eval₂ i x f)
_ _ _ _ f hf,
{ simp },
{ intros n r hr0 hnN,
simp only [rev_at_le hnN, reflect_C_mul_X_pow, eval₂_X_pow, eval₂_C, eval₂_mul],
conv in (x ^ N) { rw [← nat.sub_add_cancel hnN] },
rw [pow_add, ← mul_assoc, mul_assoc (i r), ← mul_pow, inv_of_mul_self, one_pow, mul_one] },
{ intros,
simp [*, add_mul] }
end
lemma eval₂_reflect_eq_zero_iff (i : R →+* S) (x : S) [invertible x] (N : ℕ) (f : R[X])
(hf : f.nat_degree ≤ N) : eval₂ i (⅟x) (reflect N f) = 0 ↔ eval₂ i x f = 0 :=
begin
conv_rhs { rw [← eval₂_reflect_mul_pow i x N f hf] },
split,
{ intro h, rw [h, zero_mul] },
{ intro h, rw [← mul_one (eval₂ i (⅟x) _), ← one_pow N, ← mul_inv_of_self x,
mul_pow, ← mul_assoc, h, zero_mul] }
end
end eval₂
/-- The reverse of a polynomial f is the polynomial obtained by "reading f backwards".
Even though this is not the actual definition, reverse f = f (1/X) * X ^ f.nat_degree. -/
noncomputable def reverse (f : R[X]) : R[X] := reflect f.nat_degree f
lemma coeff_reverse (f : R[X]) (n : ℕ) :
f.reverse.coeff n = f.coeff (rev_at f.nat_degree n) :=
by rw [reverse, coeff_reflect]
@[simp] lemma coeff_zero_reverse (f : R[X]) : coeff (reverse f) 0 = leading_coeff f :=
by rw [coeff_reverse, rev_at_le (zero_le f.nat_degree), tsub_zero, leading_coeff]
@[simp] lemma reverse_zero : reverse (0 : R[X]) = 0 := rfl
@[simp] lemma reverse_eq_zero : f.reverse = 0 ↔ f = 0 :=
by simp [reverse]
lemma reverse_nat_degree_le (f : R[X]) : f.reverse.nat_degree ≤ f.nat_degree :=
begin
rw [nat_degree_le_iff_degree_le, degree_le_iff_coeff_zero],
intros n hn,
rw with_bot.coe_lt_coe at hn,
rw [coeff_reverse, rev_at, function.embedding.coe_fn_mk,
if_neg (not_le_of_gt hn), coeff_eq_zero_of_nat_degree_lt hn],
end
lemma nat_degree_eq_reverse_nat_degree_add_nat_trailing_degree (f : R[X]) :
f.nat_degree = f.reverse.nat_degree + f.nat_trailing_degree :=
begin
by_cases hf : f = 0,
{ rw [hf, reverse_zero, nat_degree_zero, nat_trailing_degree_zero] },
apply le_antisymm,
{ refine tsub_le_iff_right.mp _,
apply le_nat_degree_of_ne_zero,
rw [reverse, coeff_reflect, ←rev_at_le f.nat_trailing_degree_le_nat_degree, rev_at_invol],
exact trailing_coeff_nonzero_iff_nonzero.mpr hf },
{ rw ← le_tsub_iff_left f.reverse_nat_degree_le,
apply nat_trailing_degree_le_of_ne_zero,
have key := mt leading_coeff_eq_zero.mp (mt reverse_eq_zero.mp hf),
rwa [leading_coeff, coeff_reverse, rev_at_le f.reverse_nat_degree_le] at key },
end
lemma reverse_nat_degree (f : R[X]) :
f.reverse.nat_degree = f.nat_degree - f.nat_trailing_degree :=
by rw [f.nat_degree_eq_reverse_nat_degree_add_nat_trailing_degree, add_tsub_cancel_right]
lemma reverse_leading_coeff (f : R[X]) : f.reverse.leading_coeff = f.trailing_coeff :=
by rw [leading_coeff, reverse_nat_degree, ←rev_at_le f.nat_trailing_degree_le_nat_degree,
coeff_reverse, rev_at_invol, trailing_coeff]
lemma reverse_nat_trailing_degree (f : R[X]) : f.reverse.nat_trailing_degree = 0 :=
begin
by_cases hf : f = 0,
{ rw [hf, reverse_zero, nat_trailing_degree_zero] },
{ rw ← le_zero_iff,
apply nat_trailing_degree_le_of_ne_zero,
rw [coeff_zero_reverse],
exact mt leading_coeff_eq_zero.mp hf },
end
theorem reverse_mul {f g : R[X]} (fg : f.leading_coeff * g.leading_coeff ≠ 0) :
reverse (f * g) = reverse f * reverse g :=
begin
unfold reverse,
rw [nat_degree_mul' fg, reflect_mul f g rfl.le rfl.le],
end
@[simp] lemma reverse_mul_of_domain {R : Type*} [ring R] [no_zero_divisors R] (f g : R[X]) :
reverse (f * g) = reverse f * reverse g :=
begin
by_cases f0 : f=0,
{ simp only [f0, zero_mul, reverse_zero], },
by_cases g0 : g=0,
{ rw [g0, mul_zero, reverse_zero, mul_zero], },
simp [reverse_mul, *],
end
lemma trailing_coeff_mul {R : Type*} [ring R] [no_zero_divisors R] (p q : R[X]) :
(p * q).trailing_coeff = p.trailing_coeff * q.trailing_coeff :=
by rw [←reverse_leading_coeff, reverse_mul_of_domain, leading_coeff_mul,
reverse_leading_coeff, reverse_leading_coeff]
@[simp] lemma coeff_one_reverse (f : R[X]) : coeff (reverse f) 1 = next_coeff f :=
begin
rw [coeff_reverse, next_coeff],
split_ifs with hf,
{ have : coeff f 1 = 0 := coeff_eq_zero_of_nat_degree_lt (by simp only [hf, zero_lt_one]),
simp [*, rev_at] },
{ rw rev_at_le,
exact nat.succ_le_iff.2 (pos_iff_ne_zero.2 hf) }
end
section eval₂
variables {S : Type*} [comm_semiring S]
lemma eval₂_reverse_mul_pow (i : R →+* S) (x : S) [invertible x] (f : R[X]) :
eval₂ i (⅟x) (reverse f) * x ^ f.nat_degree = eval₂ i x f :=
eval₂_reflect_mul_pow i _ _ f le_rfl
@[simp] lemma eval₂_reverse_eq_zero_iff (i : R →+* S) (x : S) [invertible x] (f : R[X]) :
eval₂ i (⅟x) (reverse f) = 0 ↔ eval₂ i x f = 0 :=
eval₂_reflect_eq_zero_iff i x _ _ le_rfl
end eval₂
end semiring
section ring
variables {R : Type*} [ring R]
@[simp] lemma reflect_neg (f : R[X]) (N : ℕ) :
reflect N (- f) = - reflect N f :=
by rw [neg_eq_neg_one_mul, ←C_1, ←C_neg, reflect_C_mul, C_neg, C_1, ←neg_eq_neg_one_mul]
@[simp] lemma reflect_sub (f g : R[X]) (N : ℕ) :
reflect N (f - g) = reflect N f - reflect N g :=
by rw [sub_eq_add_neg, sub_eq_add_neg, reflect_add, reflect_neg]
@[simp] lemma reverse_neg (f : R[X]) :
reverse (- f) = - reverse f :=
by rw [reverse, reverse, reflect_neg, nat_degree_neg]
end ring
end polynomial
|
module MyInterpolations
include("lin_interp.jl")
export MyLinInterp
end # module
|
import base64
from collections import deque, namedtuple
from typing import Iterable
from numpy import transpose
from set1.break_single_byte_xor import break_single_byte_xor
from util.hamming_distance import hamming_dist_bit
MIN_KEYSIZE = 2
MAX_KEYSIZE = 40
MAX_KEYSIZE_CANDIDATES = 3
def break_xor(bs: bytes,
min_keysize: int = MIN_KEYSIZE,
max_keysize: int = MAX_KEYSIZE):
Candidate = namedtuple('Candidate', 'hamming_dist, keysize')
keysize_candidates = deque(maxlen=MAX_KEYSIZE_CANDIDATES)
for keysize in range(min_keysize, max_keysize + 1):
first = bs[:keysize]
second = bs[keysize:(keysize*2)]
hamming = hamming_dist_bit(first, second)
normalized_hamming = hamming / keysize
if (len(keysize_candidates) < MAX_KEYSIZE_CANDIDATES
or normalized_hamming
< max(map(lambda c: c.hamming_dist, keysize_candidates))):
keysize_candidates.append(Candidate(normalized_hamming, keysize))
for keysize in sorted(map(lambda c: c.keysize, keysize_candidates)):
break_xor_with_keysize(bs, keysize)
def break_xor_with_keysize(bs: bytes, keysize: int) -> str:
key = []
for transposed in transpose(to_chunks(bs, keysize)):
r = break_single_byte_xor(transposed)
print(r)
return ''.join(key)
def to_chunks(bs: bytes, keysize: int) -> Iterable[bytes]:
return [bs[i:i+keysize] for i in range(0, len(bs), keysize)]
if __name__ == '__main__':
assert hamming_dist_bit(bytearray('this is a test', 'utf-8'),
bytearray('wokka wokka!!!', 'utf-8')) == 37
filename = '6.txt'
with open(filename) as f:
byte_str = base64.b64decode(f.read())
break_xor(byte_str)
|
If $r$ is the remainder of the division of a polynomial $p$ by a polynomial $q$, then the remainder of the division of $p$ by $q^2$ is the remainder of the division of $r$ by $q$. |
(* IsageoCoq2_R1
Tarski_Archimedes.thy
Version 2.2.0 IsaGeoCoq2_R1, Port part of GeoCoq 3.4.0
[X] equivalence Grad (function) \<longleftrightarrow> GradI (induction)
[X] local: smt \<longrightarrow> metis/meson
[x] angle_archimedes.v
Version 2.1.0 IsaGeoCoq2_R1, Port part of GeoCoq 3.4.0
Copyright (C) 2021-2023 Roland Coghetto roland.coghetto ( a t ) cafr-msa2p.be
License: LGPGL
History
Version 1.0.0: IsaGeoCoq
Port part of GeoCoq 3.4.0 (https://geocoq.github.io/GeoCoq/) in Isabelle/Hol (Isabelle2021)
Copyright (C) 2021 Roland Coghetto roland_coghetto (at) hotmail.com
License: LGPL
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*)
(*>*)
theory Tarski_Archimedes
imports
Tarski_Neutral
begin
(*>*)
context Tarski_neutral_dimensionless
begin
subsection "Graduation"
subsubsection "Définitions"
definition PreGrad :: "TPoint \<Rightarrow> TPoint \<Rightarrow> TPoint \<Rightarrow> TPoint \<Rightarrow> bool" where
"PreGrad A B C D \<equiv> (A \<noteq> B \<and> Bet A B C \<and> Bet A C D \<and> Cong A B C D)"
fun Sym :: "TPoint \<Rightarrow> TPoint \<Rightarrow> TPoint \<Rightarrow> TPoint" where
"Sym A B C = (if (A \<noteq> B \<and> Bet A B C) then
(SOME x::TPoint. PreGrad A B C x)
else
A)"
fun Gradn :: "[TPoint,TPoint] \<Rightarrow> nat \<Rightarrow>TPoint"where
"Gradn A B n = (if (A = B) then
A
else
(if (n = 0) then
A
else
(if (n = 1) then
B
else
(Sym A B (Gradn A B (n-1))))))"
definition Grad :: "[TPoint,TPoint,TPoint] \<Rightarrow> bool" where
"Grad A B C \<equiv> \<exists> n. (n \<noteq> 0) \<and> (C = Gradn A B n)"
definition Reach :: "[TPoint,TPoint,TPoint,TPoint] \<Rightarrow> bool" where
"Reach A B C D \<equiv> \<exists> B'. Grad A B B' \<and> C D Le A B'"
definition Grad2 :: "[TPoint,TPoint,TPoint,TPoint,TPoint,TPoint] \<Rightarrow> bool" where
"Grad2 A B C D E F \<equiv> \<exists> n. (n \<noteq> 0) \<and> (C = Gradn A B n) \<and> (F = Gradn D E n)"
fun SymR :: "TPoint \<Rightarrow> TPoint \<Rightarrow> TPoint" where
"SymR A B = (SOME x::TPoint. B Midpoint A x)"
fun GradExpn :: "TPoint \<Rightarrow> TPoint \<Rightarrow> nat \<Rightarrow> TPoint" where
"(GradExpn A B n) = (if (A = B) then
A
else
(if (n = 0) then
A
else
(if (n = 1) then
B
else
(SymR A (GradExpn A B (n-1))))))"
definition GradExp :: "TPoint \<Rightarrow> TPoint \<Rightarrow> TPoint \<Rightarrow> bool" where
"GradExp A B C \<equiv> \<exists> n. (n \<noteq> 0) \<and> C = GradExpn A B n"
definition GradExp2 :: "[TPoint,TPoint,TPoint,TPoint,TPoint,TPoint] \<Rightarrow> bool" where
"GradExp2 A B C D E F \<equiv> \<exists> n. (n \<noteq> 0) \<and> (C = GradExpn A B n) \<and> (F = GradExpn D E n)"
fun MidR :: "TPoint \<Rightarrow> TPoint \<Rightarrow> TPoint" where
"MidR A B = (SOME x. x Midpoint A B)"
(* Je peux encore reduire
(if (n = 1) then (MidR A B)
else (MidR A (GradExpInvn A B (n-1))))))
par
(if (n = 1) then (MidR A (GradExpInvn A B (n-1))))))"
car si n - 1 =0 alors = MidR A B
*)
fun GradExpInvn :: "TPoint \<Rightarrow> TPoint \<Rightarrow> nat \<Rightarrow> TPoint" where
"(GradExpInvn A B n) = (if (A = B) then
A
else
(if (n = 0) then
B
else
(if (n = 1) then
(MidR A B)
else
(MidR A (GradExpInvn A B (n-1))))))"
definition GradExpInv :: "TPoint \<Rightarrow> TPoint \<Rightarrow> TPoint \<Rightarrow> bool" where
"GradExpInv A B C \<equiv> \<exists> n. B = GradExpInvn A C n"
subsubsection "Continuity Axioms"
definition archimedes_axiom ::
"bool"
("ArchimedesAxiom") where
"archimedes_axiom \<equiv> \<forall> A B C D::TPoint.
A \<noteq> B \<longrightarrow> Reach A B C D"
definition greenberg_s_axiom ::
"bool"
("GreenBergsAxiom")
where
"greenberg_s_axiom \<equiv> \<forall> P Q R A B C.
\<not> Col A B C \<and> Acute A B C \<and> Q \<noteq> R \<and> Per P Q R \<longrightarrow>
(\<exists> S. P S Q LtA A B C \<and> Q Out S R)"
definition aristotle_s_axiom ::
"bool"
("AristotleAxiom") where
"aristotle_s_axiom \<equiv> \<forall> P Q A B C.
\<not> Col A B C \<and> Acute A B C \<longrightarrow>
(\<exists> X Y. B Out A X \<and> B Out C Y \<and> Per B X Y \<and> P Q Lt X Y)"
definition Axiom1:: "bool" where "Axiom1 \<equiv> \<forall> A B C D.
(\<exists> I. Col I A B \<and> Col I C D) \<or> \<not> (\<exists> I. Col I A B \<and> Col I C D)"
subsubsection "Propositions"
lemma PreGrad_lem1:
assumes "A \<noteq> B" and
"Bet A B C"
shows "\<exists> x. PreGrad A B C x"
by (meson PreGrad_def assms(1) assms(2) not_cong_3412 segment_construction)
lemma PreGrad_uniq:
assumes "PreGrad A B C x" and
"PreGrad A B C y"
shows "x = y"
by (metis (no_types, lifting) PreGrad_def assms(1) assms(2)
bet_neq12__neq between_cong_3 cong_inner_transitivity)
lemma Diff_Mid__PreGrad:
assumes "A \<noteq> B" and
"B Midpoint A C"
shows "PreGrad A B B C"
by (simp add: PreGrad_def assms(1) assms(2) between_trivial midpoint_bet midpoint_cong)
lemma Diff_Mid_Mid_PreGrad:
assumes "A \<noteq> B" and
"B Midpoint A C" and
"C Midpoint B D"
shows "PreGrad A B C D"
proof -
have "Bet A B C"
using Midpoint_def assms(2) by presburger
moreover have "Bet A C D"
using Midpoint_def assms(3) calculation is_midpoint_id outer_transitivity_between2 by blast
moreover have "Cong A B C D"
using assms(2) assms(3) cong_transitivity midpoint_cong by blast
ultimately show ?thesis
by (simp add: assms(1) PreGrad_def)
qed
lemma Sym_Diff__Diff:
assumes "Sym A B C = D" and
"A \<noteq> D"
shows "A \<noteq> B"
using assms(1) assms(2) by force
lemma Sym_Refl:
"Sym A A A = A"
by simp
lemma Diff_Mid__Sym:
assumes "A \<noteq> B" and
"B Midpoint A C"
shows "Sym A B B = C"
using someI_ex by (metis Diff_Mid__PreGrad Sym.elims PreGrad_uniq
assms(1) assms(2) between_trivial)
lemma Mid_Mid__Sym:
assumes "A \<noteq> B" and
"B Midpoint A C" and
"C Midpoint B D"
shows "Sym A B C = D"
proof -
have "PreGrad A B C D"
by (simp add: Diff_Mid_Mid_PreGrad assms(1) assms(2) assms(3))
thus ?thesis
using someI_ex assms(1)
by (metis PreGrad_uniq Sym.elims assms(2) midpoint_bet)
qed
lemma Sym_Bet__Bet_Bet:
assumes "Sym A B C = D" and
"A \<noteq> B" and
"Bet A B C"
shows "Bet A B D \<and> Bet A C D"
proof -
have "(SOME x::TPoint. PreGrad A B C x) = D"
using assms(1) assms(2) assms(3) by auto
hence "PreGrad A B C D"
by (metis PreGrad_lem1 assms(2) assms(3) someI2)
thus ?thesis
by (meson PreGrad_def between_exchange4)
qed
lemma Sym_Bet__Cong:
assumes "Sym A B C = D" and
"A \<noteq> B" and
"Bet A B C"
shows "Cong A B C D"
proof -
have "(SOME x::TPoint. PreGrad A B C x) = D"
using assms(1) assms(2) assms(3) by auto
hence "PreGrad A B C D"
by (metis PreGrad_lem1 assms(2) assms(3) someI2)
thus ?thesis
by (meson PreGrad_def between_exchange4)
qed
lemma LemSym_aux:
assumes "A \<noteq> B" and
"Bet A B C" and
"Bet A C D" and
"Cong A B C D"
shows "Sym A B C = D"
proof -
have "PreGrad A B C D"
using PreGrad_def assms(1) assms(2) assms(3) assms(4) by blast
thus ?thesis
by (metis PreGrad_def PreGrad_uniq Sym_Bet__Bet_Bet Sym_Bet__Cong)
qed
lemma Lem_Gradn_id_n:
"Gradn A A n = A"
by simp
lemma Lem_Gradn_0:
"Gradn A B 0 = A"
by simp
lemma Lem_Gradn_1:
"Gradn A B 1 = B"
by simp
lemma Diff__Gradn_Sym:
assumes "A \<noteq> B" and
"n > 1"
shows "Gradn A B n = Sym A B (Gradn A B (n-1))"
proof -
have "\<not> (n = 0 \<and> n = 1)"
by auto
thus ?thesis
using assms(1) assms(2) by simp
qed
lemma Diff__Bet_Gradn_Suc:
assumes "A \<noteq> B"
shows "Bet A B (Gradn A B (Suc n))"
proof (induction n)
case 0
hence "Gradn A B (Suc 0) = B"
using assms(1) by simp
thus ?case
by (simp add: between_trivial)
next
case (Suc n)
{
assume 1: "Bet A B (Gradn A B (Suc n))"
have "Gradn A B (Suc (Suc n)) = Sym A B (Gradn A B (Suc n))"
by simp
hence "Bet A B (Gradn A B (Suc (Suc n))) \<and>
Bet A (Gradn A B (Suc n)) (Gradn A B (Suc (Suc n)))"
using 1 Sym_Bet__Bet_Bet assms by presburger
hence "Bet A B (Gradn A B (Suc (Suc n)))"
by simp
}
thus ?case
using Suc.IH by blast
qed
lemma Diff_Le_Gradn_Suc:
assumes "A \<noteq> B"
shows "A B Le A (Gradn A B (Suc n))"
by (meson Diff__Bet_Gradn_Suc assms bet__le1213)
lemma Diff__Bet_Gradn:
assumes "A \<noteq> B" and
"n \<noteq> 0"
shows "Bet A B (Gradn A B n)"
using assms(1) assms(2) Diff__Bet_Gradn_Suc not0_implies_Suc by blast
lemma Diff_Le_Gradn_n:
assumes "A \<noteq> B" and
"n \<noteq> 0"
shows "A B Le A (Gradn A B n)"
by (meson Diff__Bet_Gradn assms(1) assms(2) l5_12_a)
lemma Diff_Bet_Gradn_Suc_Gradn_Suc2:
assumes "A \<noteq> B"
shows "Bet A (Gradn A B (Suc n)) (Gradn A B (Suc (Suc n)))"
proof (induction n)
case 0
hence 1: "Gradn A B (Suc 0) = B"
using assms(1) by simp
from assms(1)
have "(Gradn A B (Suc (Suc 0))) = (Sym A B (Gradn A B (Suc 0)))"
by simp
thus ?case
by (metis "1" Diff__Bet_Gradn_Suc assms)
next
case (Suc n)
{
assume 1: "Bet A (Gradn A B (Suc n)) (Gradn A B (Suc (Suc n)))"
have "Gradn A B (Suc (Suc n)) = Sym A B (Gradn A B (Suc n))"
by simp
hence "Bet A B (Gradn A B (Suc (Suc n)))"
using Diff__Bet_Gradn_Suc assms by blast
have "Gradn A B (Suc(Suc (Suc n))) = Sym A B (Gradn A B (Suc(Suc n)))"
by simp
hence "PreGrad A B (Gradn A B (Suc(Suc n))) (Gradn A B (Suc(Suc (Suc n))))"
using PreGrad_def Sym_Bet__Bet_Bet Sym_Bet__Cong
\<open>Bet A B (Gradn A B (Suc (Suc n)))\<close> assms by presburger
hence "Bet A B (Gradn A B (Suc(Suc (Suc n)))) \<and>
Bet A (Gradn A B (Suc(Suc n))) (Gradn A B (Suc(Suc (Suc n))))"
by (metis Diff__Bet_Gradn_Suc Sym_Bet__Bet_Bet
\<open>Gradn A B (Suc (Suc (Suc n))) = Sym A B (Gradn A B (Suc (Suc n)))\<close> assms)
hence "Bet A (Gradn A B (Suc(Suc n))) (Gradn A B (Suc(Suc (Suc n))))"
by blast
}
thus ?case
using Suc.IH by blast
qed
lemma Diff__Bet_Gradn_Gradn_SucA:
assumes "A \<noteq> B"
shows "A (Gradn A B (Suc n)) Le A (Gradn A B (Suc (Suc n)))"
by (meson Diff_Bet_Gradn_Suc_Gradn_Suc2 assms bet__le1213)
lemma Diff__Bet_Gradn_Gradn_Suc:
assumes "A \<noteq> B"
shows "Bet A (Gradn A B n) (Gradn A B (Suc n))"
proof (induction n)
case 0
hence "Gradn A B 0 = A" by simp
thus ?case
using between_trivial2 by presburger
next
case (Suc n)
{
assume 1: "Bet A (Gradn A B n) (Gradn A B (Suc n))"
have "Gradn A B (Suc (Suc n)) = Sym A B (Gradn A B (Suc n))"
by simp
hence "Bet A B (Gradn A B (Suc (Suc n))) \<and>
Bet A (Gradn A B (Suc n)) (Gradn A B (Suc (Suc n)))"
using 1 Sym_Bet__Bet_Bet assms Diff__Bet_Gradn_Suc by presburger
hence "Bet A (Gradn A B (Suc n)) (Gradn A B (Suc (Suc n)))"
by blast
}
thus ?case
using Suc.IH by simp
qed
lemma Bet_Gradn_Gradn_Suc:
shows "Bet A (Gradn A B n) (Gradn A B (Suc n))"
by (metis Lem_Gradn_id_n Diff__Bet_Gradn_Gradn_Suc not_bet_distincts)
lemma Gradn_Le_Gradn_Suc:
shows "A (Gradn A B n) Le A (Gradn A B (Suc n))"
using Bet_Gradn_Gradn_Suc bet__le1213 by blast
lemma Bet_Gradn_Suc_Gradn_Suc2:
shows "Bet B (Gradn A B (Suc n)) (Gradn A B (Suc(Suc n)))"
by (metis Bet_Gradn_Gradn_Suc Diff__Bet_Gradn_Suc between_exchange3)
lemma Gradn_Suc_Le_Gradn_Suc2:
shows "B (Gradn A B (Suc n)) Le B (Gradn A B (Suc(Suc n)))"
using Bet_Gradn_Suc_Gradn_Suc2 bet__le1213 by blast
lemma Diff_Le__Bet_Gradn_Plus:
assumes "A \<noteq> B" and
"n \<le> m"
shows "Bet A (Gradn A B n) (Gradn A B (k + n))"
proof (induction k)
case 0
thus ?case
using between_trivial by auto
next
case (Suc k)
{
assume "Bet A (Gradn A B n) (Gradn A B (k + n))"
have "Bet A (Gradn A B (k + n)) (Gradn A B (Suc (k + n)))"
using Diff__Bet_Gradn_Gradn_Suc assms(1) by presburger
hence "Bet A (Gradn A B n) (Gradn A B ((Suc k) + n))"
by (metis \<open>Bet A (Gradn A B n) (Gradn A B (k + n))\<close>
add_Suc between_exchange4)
}
thus ?case
using Suc.IH by blast
qed
lemma Diff_Le_Gradn_Plus:
assumes "A \<noteq> B" and
"n \<le> m"
shows "A (Gradn A B n) Le A (Gradn A B (k + n))"
by (meson Diff_Le__Bet_Gradn_Plus assms(1) assms(2) l5_12_a)
lemma Diff_Le_Bet__Gradn_Gradn:
assumes "A \<noteq> B" and
"n \<le> m"
shows "Bet A (Gradn A B n) (Gradn A B m)"
proof (cases "n = 0")
case True
thus ?thesis
using Lem_Gradn_0 between_trivial2 by presburger
next
case False
hence 1: "n \<noteq> 0"
by auto
show "Bet A (Gradn A B n) (Gradn A B m)"
proof (cases "n = m")
case True
thus ?thesis
using between_trivial by presburger
next
case False
hence "n < m"
using assms(2) le_neq_implies_less by blast
then obtain k where "m = k + n"
using add.commute assms(2) le_Suc_ex by blast
have "Bet A (Gradn A B n) (Gradn A B (k + n))"
using Diff_Le__Bet_Gradn_Plus assms(1) by blast
thus ?thesis
using \<open>m = k + n\<close> by blast
qed
qed
lemma Diff_Le_Gradn:
assumes "A \<noteq> B" and
"n \<le> m"
shows "A (Gradn A B n) Le A (Gradn A B m)"
by (metis Diff_Le_Bet__Gradn_Gradn bet__le1213 assms(1) assms(2))
lemma Diff__Cong_Gradn_Suc_Gradn_Suc2:
assumes "A \<noteq> B"
shows "Cong A B (Gradn A B (Suc n)) (Gradn A B (Suc (Suc n)))"
proof (induction n)
case 0
hence 1: "Gradn A B (Suc 0) = B"
using assms(1) by simp
from assms(1)
have "(Gradn A B (Suc (Suc 0))) = (Sym A B (Gradn A B (Suc 0)))" by simp
hence "(Gradn A B (Suc (Suc 0))) = (Sym A B B)"
using "1" by presburger
obtain C where "B Midpoint A C"
using symmetric_point_construction by blast
hence "C = Sym A B B"
using Diff_Mid__Sym assms by blast
hence "(Gradn A B (Suc (Suc 0))) = C"
using "1" \<open>Gradn A B (Suc (Suc 0)) = Sym A B (Gradn A B (Suc 0))\<close> by presburger
have "Cong A B (Gradn A B (Suc 0)) (Gradn A B (Suc (Suc 0)))"
using "1" \<open>B Midpoint A C\<close> \<open>Gradn A B (Suc (Suc 0)) = C\<close> midpoint_cong
by presburger
thus ?case
by blast
next
case (Suc n)
{
assume "Cong A B (Gradn A B (Suc n)) (Gradn A B (Suc (Suc n)))"
have 1: "Gradn A B (Suc(Suc (Suc n))) = Sym A B (Gradn A B (Suc(Suc n)))"
by simp
have "Bet A B (Gradn A B (Suc (Suc n)))"
using Diff__Bet_Gradn_Suc assms by blast
hence "PreGrad A B (Gradn A B (Suc(Suc n))) (Gradn A B (Suc(Suc (Suc n))))"
using 1 assms
by (metis PreGrad_def Sym_Bet__Cong Diff_Bet_Gradn_Suc_Gradn_Suc2)
hence "Cong A B (Gradn A B (Suc (Suc n))) (Gradn A B (Suc(Suc (Suc n))))"
using "1" Sym_Bet__Cong \<open>Bet A B (Gradn A B (Suc (Suc n)))\<close> assms
by presburger
}
thus ?case
using Suc.IH by blast
qed
lemma Cong_Gradn_Suc_Gradn_Suc2:
shows "Cong A B (Gradn A B (Suc n)) (Gradn A B (Suc (Suc n)))"
using Diff__Cong_Gradn_Suc_Gradn_Suc2 cong_reflexivity by auto
lemma Cong_Gradn_Gradn_Suc:
shows "Cong a b (Gradn a b n) (Gradn a b (Suc n))"
proof (cases "a = b")
case True
thus ?thesis
by (simp add: cong_trivial_identity)
next
case False
have 1: "Gradn a b 0 = a"
by auto
have 2: "(Gradn a b (Suc 0)) = b"
by auto
{
assume 3: "n = 0"
hence "Cong a b a b \<and> (Suc n = 1)"
by (simp add: cong_reflexivity)
hence "Cong a b (Gradn a b n) (Gradn a b (Suc n))"
using 1 2 3 by presburger
}
moreover
{
assume "n \<noteq> 0"
hence "Cong a b (Gradn a b n) (Gradn a b (Suc n))"
by (metis False Diff__Cong_Gradn_Suc_Gradn_Suc2 old.nat.exhaust)
}
ultimately
show ?thesis
by blast
qed
lemma Diff_Bet_Bet_Cong_Gradn_Suc:
assumes "A \<noteq> B" and
"Bet A B C" and
"Bet A (Gradn A B n) C" and
"Cong A B (Gradn A B n) C"
shows "C = (Gradn A B (Suc n))"
proof (cases "n = 0")
case True
thus ?thesis
by (metis Lem_Gradn_0 Diff__Bet_Gradn_Suc Cong_Gradn_Gradn_Suc
assms(1) assms(2) assms(4) between_cong)
next
case False
hence "Bet A B (Gradn A B n)"
using Diff__Bet_Gradn assms(1) by blast
thus ?thesis
by (metis LemSym_aux Diff__Bet_Gradn_Gradn_Suc Cong_Gradn_Gradn_Suc
assms(1) assms(3) assms(4))
qed
lemma grad_rec_0_1:
shows "Cong a b (Gradn a b 0) (Gradn a b 1)"
by (simp add: cong_reflexivity)
lemma grad_rec_1_2:
shows "Cong a b (Gradn a b 1) (Gradn a b 2)"
by (metis Cong_Gradn_Gradn_Suc Suc_1)
lemma grad_rec_2_3:
shows "Cong a b (Gradn a b 2) (Gradn a b 3)"
proof (cases "a = b")
case True
thus ?thesis
using Lem_Gradn_id_n cong_reflexivity by presburger
next
case False
thus ?thesis
using Diff__Cong_Gradn_Suc_Gradn_Suc2 numeral_2_eq_2 numeral_3_eq_3
by presburger
qed
lemma grad_rec_a_a:
shows "(Gradn a a n) = a"
by simp
lemma Gradn_uniq_aux_1:
assumes "A \<noteq> B"
shows "Gradn A B n \<noteq> Gradn A B (Suc n)"
proof -
have "Gradn A B 0 \<noteq> Gradn A B (Suc 0)"
by (simp add: assms)
moreover
have "n > 0 \<longrightarrow> (Gradn A B n \<noteq> Gradn A B (Suc n))"
by (metis Diff__Cong_Gradn_Suc_Gradn_Suc2 assms cong_diff_2 gr0_implies_Suc)
ultimately
show ?thesis
by blast
qed
lemma Gradn_uniq_aux_1_aa:
assumes "A \<noteq> B"
shows "Gradn A B (k + n) \<noteq> Gradn A B (k + (Suc n))"
proof (induction k)
case 0
show "Gradn A B (0 + n) \<noteq> Gradn A B (0 + (Suc n))"
using Gradn_uniq_aux_1 assms plus_nat.add_0 by presburger
next
case (Suc k)
show "Gradn A B (Suc k + n) \<noteq> Gradn A B (Suc k + (Suc n))"
using Gradn_uniq_aux_1 add_Suc_right assms by presburger
qed
lemma Gradn_uniq_aux_1_bb:
assumes "A \<noteq> B"
shows "Gradn A B (k + n) \<noteq> Gradn A B (k + (Suc (Suc n)))"
proof (induction k)
case 0
show "Gradn A B (0 + n) \<noteq> Gradn A B (0 + (Suc(Suc n)))"
by (metis Gradn_uniq_aux_1 Diff__Bet_Gradn_Gradn_Suc add.left_neutral
assms between_equality_2)
next
case (Suc k)
show "Gradn A B ((Suc k) + n) \<noteq> Gradn A B ((Suc k) + (Suc(Suc n)))"
by (metis Gradn_uniq_aux_1_aa Diff_Bet_Gradn_Suc_Gradn_Suc2 add_Suc
add_Suc_shift assms between_equality_2)
qed
lemma Gradn_aux_1_0:
assumes "A \<noteq> B"
shows "Gradn A B (Suc n) \<noteq> A"
by (metis Diff__Bet_Gradn_Suc assms bet_neq32__neq)
lemma Gradn_aux_1_1:
assumes "A \<noteq> B" and
"n \<noteq> 0"
shows "Gradn A B (Suc n) \<noteq> B"
proof -
obtain m where "n = Suc m"
using assms(2) not0_implies_Suc by blast
have "Gradn A B (Suc(Suc m)) \<noteq> B"
proof (induction m)
show "Gradn A B (Suc(Suc 0)) \<noteq> B"
by (metis Gradn_uniq_aux_1 Diff__Bet_Gradn_Suc
Diff_Bet_Gradn_Suc_Gradn_Suc2 assms(1) between_equality_2)
next
fix m
assume "Gradn A B (Suc(Suc m)) \<noteq> B"
thus "Gradn A B (Suc(Suc(Suc m))) \<noteq> B"
by (metis Diff__Bet_Gradn_Suc Diff_Bet_Gradn_Suc_Gradn_Suc2
assms(1) between_equality_2)
qed
thus ?thesis
by (simp add: \<open>n = Suc m\<close>)
qed
lemma Gradn_aux_1_1_bis:
assumes "A \<noteq> B" and
"n \<noteq> 1"
shows "Gradn A B n \<noteq> B"
proof (cases "n = 0")
case True
thus ?thesis
using Lem_Gradn_0 assms(1) by presburger
next
case False
then obtain m where "n = Suc m"
using not0_implies_Suc by presburger
hence "m \<noteq> 0"
using assms(2) by force
thus ?thesis
using Gradn_aux_1_1 assms(1) \<open>n = Suc m\<close> by blast
qed
lemma Gradn_aux_1_2:
assumes "A \<noteq> B" and
"Gradn A B n = A"
shows "n = 0"
proof -
{
assume "n \<noteq> 0"
then obtain m where "n = Suc m"
using not0_implies_Suc by presburger
hence "Gradn A B n \<noteq> A"
using Gradn_aux_1_0 assms(1) by blast
hence "False"
using Gradn_aux_1_0 assms(1) assms(2) by blast
}
thus ?thesis by blast
qed
lemma Gradn_aux_1_3:
assumes "A \<noteq> B" and
"Gradn A B n = B"
shows "n = 1"
using Gradn_aux_1_1_bis assms(1) assms(2) by blast
lemma Gradn_uniq_aux_2_a:
assumes "A \<noteq> B" and
"n \<noteq> 0"
shows "Gradn A B 0 \<noteq> Gradn A B n"
by (metis Gradn_aux_1_2 Lem_Gradn_0 assms(1) assms(2))
lemma Gradn_uniq_aux_2:
assumes "A \<noteq> B" and
"n < m"
shows "Gradn A B n \<noteq> Gradn A B m"
proof -
obtain k where "m = (Suc k) + n"
by (metis Suc_diff_Suc add.commute add_diff_cancel_left'
assms(2) less_imp_add_positive)
have "Gradn A B n \<noteq> Gradn A B ((Suc k) + n)"
proof (induction k)
case 0
have "0 + n = n"
by simp
thus "Gradn A B n \<noteq> Gradn A B ((Suc 0) + n)"
using Gradn_uniq_aux_1_aa assms(1) by (metis add.commute)
next
case (Suc k)
hence "Gradn A B n \<noteq> Gradn A B ((Suc k) + n)"
by blast
have "Suc ((Suc k) + n) = Suc(Suc(k)) + n"
by simp
{
assume "Gradn A B n = Gradn A B ((Suc (Suc k)) + n)"
have "Gradn A B n = Gradn A B ((Suc k) + n)"
proof (cases "n = 0")
case True
thus ?thesis
by (metis Gradn_aux_1_2 Lem_Gradn_0
\<open>Gradn A B n = Gradn A B (Suc (Suc k) + n)\<close> add_cancel_left_right
assms(1) nat_neq_iff zero_less_Suc)
next
case False
have "(Suc k) + n = Suc(k + n)"
by simp
hence "Bet A B (Gradn A B ((Suc k)+n))"
using assms(1) Diff__Bet_Gradn_Suc by presburger
hence "Bet A B (Gradn A B n)"
using assms(1) Diff__Bet_Gradn_Suc
\<open>Gradn A B n = Gradn A B (Suc (Suc k) + n)\<close> add_Suc by presburger
have "Bet A (Gradn A B ((Suc k)+n)) (Gradn A B (Suc((Suc k) +n)))"
using Diff__Bet_Gradn_Gradn_Suc assms(1) by blast
hence "Bet A (Gradn A B ((Suc k)+n)) (Gradn A B n)"
using \<open>Gradn A B n = Gradn A B ((Suc (Suc k)) + n)\<close>
\<open>Suc ((Suc k) + n) = Suc(Suc(k)) + n\<close> by simp
moreover
have "Bet A (Gradn A B n) (Gradn A B ((Suc k) + n))"
using Diff_Le__Bet_Gradn_Plus assms(1) by blast
ultimately
show ?thesis
using between_equality_2 by blast
qed
hence False
using Suc.IH by blast
}
thus "Gradn A B n \<noteq> Gradn A B ((Suc (Suc k)) + n)"
by blast
qed
thus ?thesis
using \<open>m = Suc k + n\<close> by blast
qed
lemma Gradn_uniq:
assumes "A \<noteq> B" and
"Gradn A B n = Gradn A B m"
shows "n = m"
proof -
{
assume "n \<noteq> m"
{
assume "n < m"
hence "False"
using Gradn_uniq_aux_2 assms(1) assms(2) by blast
}
moreover
{
assume "m < n"
hence "False"
by (metis Gradn_uniq_aux_2 assms(1) assms(2))
}
ultimately
have "False"
using \<open>n \<noteq> m\<close> nat_neq_iff by blast
}
thus ?thesis by blast
qed
lemma Gradn_le_suc_1:
shows "A (Gradn A B n) Le A (Gradn A B (Suc n))"
using Bet_Gradn_Gradn_Suc l5_12_a by presburger
lemma Gradn_le_1:
assumes "m \<le> n"
shows "A (Gradn A B m) Le A (Gradn A B (Suc n))"
by (metis Bet_Gradn_Gradn_Suc Lem_Gradn_id_n Diff_Le_Bet__Gradn_Gradn
assms bet__le1213 le_Suc_eq)
lemma Gradn_le_suc_2:
shows "B (Gradn A B (Suc n)) Le B (Gradn A B (Suc(Suc n)))"
by (metis Bet_Gradn_Gradn_Suc Diff__Bet_Gradn_Suc bet__le1213
between_exchange3)
lemma grad_equiv_coq_1:
shows "Grad A B B"
proof -
have "(Gradn A B (Suc 0)) = B"
by auto
thus ?thesis
by (metis Grad_def n_not_Suc_n)
qed
lemma grad_aab__ab:
assumes "Grad A A B"
shows "A = B"
proof -
obtain n where "B = Gradn A A n"
using Grad_def assms by blast
thus ?thesis
by simp
qed
lemma grad_stab:
assumes "Grad A B C" and
"Bet A C C'" and
"Cong A B C C'"
shows "Grad A B C'"
proof (cases "A = B")
case True
thus ?thesis
using assms(1) assms(3) cong_reverse_identity by blast
next
case False
obtain n where "n \<noteq> 0 \<and> C = Gradn A B n"
using Grad_def assms(1) by presburger
hence "Bet A B (Gradn A B n)"
using False Diff__Bet_Gradn by blast
hence "Bet A B C"
using \<open>n \<noteq> 0 \<and> C = Gradn A B n\<close> by blast
hence "C' = Gradn A B (Suc n)"
using False Diff_Bet_Bet_Cong_Gradn_Suc \<open>n \<noteq> 0 \<and> C = Gradn A B n\<close>
assms(2) assms(3) between_exchange4 by blast
thus ?thesis
using Grad_def by blast
qed
lemma grad__bet:
assumes "Grad A B C"
shows "Bet A B C"
proof (cases "A = B")
case True
thus ?thesis
by (simp add: between_trivial2)
next
case False
obtain n where "n \<noteq> 0 \<and> C = Gradn A B n"
using Grad_def assms(1) by presburger
hence "Bet A B (Gradn A B n)"
using False Diff__Bet_Gradn by blast
thus ?thesis
using \<open>n \<noteq> 0 \<and> C = Gradn A B n\<close> by blast
qed
lemma grad__col:
assumes "Grad A B C"
shows "Col A B C"
by (simp add: assms bet_col grad__bet)
lemma grad_neq__neq13:
assumes "Grad A B C" and
"A \<noteq> B"
shows "A \<noteq> C"
using assms(1) assms(2) between_identity grad__bet by blast
lemma grad_neq__neq12:
assumes "Grad A B C" and
"A \<noteq> C"
shows "A \<noteq> B"
using Grad_def assms(1) assms(2) grad_rec_a_a by force
lemma grad112__eq:
assumes "Grad A A B"
shows "A = B"
by (meson assms grad_neq__neq12)
lemma grad121__eq:
assumes "Grad A B A"
shows "A = B"
using assms grad_neq__neq13 by blast
lemma grad__le:
assumes "Grad A B C"
shows "A B Le A C"
using assms bet__le1213 grad__bet by blast
lemma grad2_init:
shows "Grad2 A B B C D D"
proof -
have "(B = Gradn A B (Suc 0)) \<and> (D = Gradn C D (Suc 0))"
using One_nat_def Lem_Gradn_1 by presburger
thus ?thesis
using Grad2_def by blast
qed
lemma Grad2_stab:
assumes "Grad2 A B C D E F" and
"Bet A C C'" and
"Cong A B C C'" and
"Bet D F F'" and
"Cong D E F F'"
shows "Grad2 A B C' D E F'"
proof -
obtain n where "(n \<noteq> 0) \<and> (C = Gradn A B n) \<and> (F = Gradn D E n)"
using Grad2_def assms(1) by presburger
have "C' = Gradn A B (Suc n)"
by (metis Diff_Bet_Bet_Cong_Gradn_Suc Lem_Gradn_id_n Diff__Bet_Gradn
\<open>n \<noteq> 0 \<and> C = Gradn A B n \<and> F = Gradn D E n\<close> assms(2) assms(3)
between_exchange4 cong_reverse_identity)
moreover
have "F' = Gradn D E (Suc n)"
by (metis Diff_Bet_Bet_Cong_Gradn_Suc Lem_Gradn_id_n Diff__Bet_Gradn
\<open>n \<noteq> 0 \<and> C = Gradn A B n \<and> F = Gradn D E n\<close> assms(4) assms(5)
between_exchange4 cong_reverse_identity)
ultimately
show ?thesis
using Grad2_def by blast
qed
lemma bet_cong2_grad__grad2_aux_1:
assumes "C = (Gradn A B 0)" and
"Bet D E F" and
"Cong A B D E" and
"Cong B C E F"
shows "F = Gradn D E 2"
proof -
have "(Gradn A B 0) = A"
using Lem_Gradn_0 by blast
hence "A = C"
using assms(1) by auto
hence "Cong D E E F"
using assms(3) assms(4) cong_transitivity not_cong_4312 by blast
thus ?thesis
by (metis Diff_Bet_Bet_Cong_Gradn_Suc Lem_Gradn_1 Suc_1
assms(2) cong_diff_3 grad_rec_1_2)
qed
lemma bet_cong2_grad__grad2_aux_2:
assumes "Bet D E F" and
"Cong A B D E" and
"Cong B (Gradn A B (Suc n)) E F"
shows "F = Gradn D E (Suc n)"
proof -
have "\<forall> A B D E F. (Bet D E F \<and> Cong A B D E \<and>
Cong B (Gradn A B (Suc n)) E F \<longrightarrow> F = Gradn D E (Suc n))"
proof (induction n)
case 0
{
fix A B C D E F
assume 1: "Bet D E F \<and> Cong A B D E \<and> Cong B (Gradn A B (Suc 0)) E F"
hence "Gradn A B (Suc 0) = B"
using One_nat_def Lem_Gradn_1 by presburger
hence "E = F"
by (metis "1" cong_diff_4)
hence "F = Gradn D E (Suc 0)"
by simp
}
thus ?case
by blast
next
case (Suc n)
{
assume 2: "\<forall> A B D E F. (Bet D E F \<and> Cong A B D E \<and>
Cong B (Gradn A B (Suc n)) E F) \<longrightarrow> F = Gradn D E (Suc n)"
{
fix A B D E F
assume "Bet D E F" and
"Cong A B D E" and
"Cong B (Gradn A B (Suc (Suc n))) E F"
have "Cong A B (Gradn A B (Suc n)) (Gradn A B (Suc(Suc n)))"
using Cong_Gradn_Suc_Gradn_Suc2 by auto
have "Bet A (Gradn A B (Suc n)) (Gradn A B (Suc(Suc n)))"
using Bet_Gradn_Gradn_Suc by auto
have "F = Gradn D E (Suc (Suc n))"
proof (cases "A = B")
case True
thus ?thesis
by (metis \<open>Cong A B D E\<close> \<open>Cong B (Gradn A B (Suc (Suc n))) E F\<close>
cong_reverse_identity grad_rec_a_a)
next
case False
have "D \<noteq> E"
using False \<open>Cong A B D E\<close> cong_diff by blast
obtain F' where "Bet D E F'" and "Cong E F' B (Gradn A B (Suc n))"
using segment_construction by fastforce
have "Cong B (Gradn A B (Suc n)) E F'"
using \<open>Cong E F' B (Gradn A B (Suc n))\<close> not_cong_3412 by blast
hence "F' = Gradn D E (Suc n)"
using \<open>Bet D E F'\<close> \<open>Cong A B D E\<close> "2" by blast
thus ?thesis
proof (cases "E = F'")
case True
thus ?thesis
by (metis Diff_Bet_Bet_Cong_Gradn_Suc
\<open>Cong A B (Gradn A B (Suc n)) (Gradn A B (Suc (Suc n)))\<close> \<open>D \<noteq> E\<close>
\<open>F' = Gradn D E (Suc n)\<close> cong_inner_transitivity
cong_reverse_identity \<open>Bet D E F\<close> \<open>Cong A B D E\<close>
\<open>Cong B (Gradn A B (Suc (Suc n))) E F\<close>
\<open>Cong E F' B (Gradn A B (Suc n))\<close>)
next
case False
have "F = Sym D E (Gradn D E ((Suc(Suc n))-1))"
proof -
have "Bet D E (Gradn D E ((Suc(Suc n))-1))"
using \<open>Bet D E F'\<close> \<open>F' = Gradn D E (Suc n)\<close> by auto
moreover
have "PreGrad D E (Gradn D E ((Suc(Suc n))-1)) F"
proof -
have "Bet D E (Gradn D E (Suc n))"
using Diff__Bet_Gradn_Suc \<open>D \<noteq> E\<close> by blast
hence "Bet D E (Gradn D E ((Suc(Suc n))-1))"
by fastforce
moreover
have "Bet D (Gradn D E (Suc n)) F"
proof -
have "Bet B (Gradn A B (Suc n)) (Gradn A B (Suc(Suc n)))"
by (metis Diff__Bet_Gradn_Suc
\<open>Bet A (Gradn A B (Suc n)) (Gradn A B (Suc (Suc n)))\<close>
between_exchange3)
moreover
have "Cong B (Gradn A B (Suc n)) E (Gradn D E (Suc n))"
using \<open>Cong B (Gradn A B (Suc n)) E F'\<close>
\<open>F' = Gradn D E (Suc n)\<close> by blast
moreover
have "Cong B (Gradn A B (Suc(Suc n))) E (Gradn D E (Suc(Suc n)))"
proof -
have "Bet A B (Gradn A B (Suc n))"
using Diff__Bet_Gradn_Suc not_bet_distincts by blast
moreover
have "Cong D E (Gradn D E (Suc n)) (Gradn D E (Suc (Suc n)))"
using Cong_Gradn_Suc_Gradn_Suc2 by auto
moreover
have "Bet A (Gradn A B (Suc n)) (Gradn A B (Suc (Suc n)))"
using Bet_Gradn_Gradn_Suc by blast
moreover
have "Cong D E (Gradn A B (Suc n)) (Gradn A B (Suc (Suc n)))"
using \<open>Cong A B (Gradn A B (Suc n)) (Gradn A B (Suc (Suc n)))\<close>
\<open>Cong A B D E\<close> cong_inner_transitivity by blast
moreover
have "Bet D (Gradn D E (Suc n)) (Gradn D E (Suc (Suc n)))"
using Bet_Gradn_Gradn_Suc by blast
thus ?thesis
using l2_11_b between_exchange3 calculation(2) cong_inner_transitivity
by (meson \<open>Bet B (Gradn A B (Suc n)) (Gradn A B (Suc (Suc n)))\<close>
\<open>Cong B (Gradn A B (Suc n)) E (Gradn D E (Suc n))\<close>
\<open>Bet D E (Gradn D E (Suc n))\<close>
calculation(3) calculation(4))
qed
moreover
have "E Out (Gradn D E (Suc(Suc n))) (Gradn D E (Suc n))"
by (metis False Diff__Bet_Gradn_Gradn_Suc
\<open>Bet D E (Gradn D E (Suc n))\<close>
\<open>D \<noteq> E\<close> \<open>F' = Gradn D E (Suc n)\<close>
bet_out between_exchange3 l6_6)
ultimately
show ?thesis
using Bet_Gradn_Gradn_Suc Diff__Bet_Gradn_Suc \<open>D \<noteq> E\<close>
construction_uniqueness not_cong_3412
by (metis \<open>Bet D E F\<close> \<open>Cong B (Gradn A B (Suc (Suc n))) E F\<close>)
qed
hence "Bet D (Gradn D E ((Suc(Suc n))-1)) F"
by simp
moreover
have "Cong (Gradn A B (Suc n)) (Gradn A B(Suc(Suc n))) (Gradn D E (Suc n)) F"
proof -
let ?A = "A"
let ?B = "Gradn A B (Suc n)"
let ?C = "Gradn A B (Suc (Suc n))"
let ?A' = "D"
let ?B' = "Gradn D E (Suc n)"
let ?C' = "F"
have "Bet ?A ?B ?C"
using \<open>Bet A (Gradn A B (Suc n)) (Gradn A B (Suc (Suc n)))\<close> by auto
moreover have "Bet ?A' ?B' ?C'"
using \<open>Bet D (Gradn D E (Suc n)) F\<close> by auto
moreover have "Cong ?A ?B ?A' ?B'"
by (metis Diff__Bet_Gradn_Suc cong_reverse_identity
\<open>Cong A B D E\<close> \<open>Cong E F' B (Gradn A B (Suc n))\<close>
\<open>F' = Gradn D E (Suc n)\<close> l2_11_b not_cong_3412)
moreover have "Cong ?A ?C ?A' ?C'"
by (metis Diff__Bet_Gradn_Suc cong_reverse_identity
\<open>Bet D E F\<close> \<open>Cong A B D E\<close>
\<open>Cong B (Gradn A B (Suc (Suc n))) E F\<close> l2_11_b)
ultimately show ?thesis
using l4_3_1 by blast
qed
hence "Cong A B (Gradn D E (Suc n)) F"
using \<open>Cong A B (Gradn A B (Suc n)) (Gradn A B (Suc (Suc n)))\<close>
cong_transitivity by blast
hence "Cong D E (Gradn D E (Suc n)) F"
using \<open>Cong A B D E\<close> cong_inner_transitivity by blast
hence "Cong D E (Gradn D E ((Suc(Suc n))-1)) F"
by simp
ultimately
show ?thesis
using \<open>D \<noteq> E\<close> PreGrad_def by blast
qed
ultimately
show ?thesis
by (metis PreGrad_def LemSym_aux)
qed
hence "F = Gradn D E (Suc (Suc n))"
by simp
thus ?thesis by blast
qed
qed
}
hence "\<forall> A B D E F. (Bet D E F \<and> Cong A B D E \<and>
Cong B (Gradn A B (Suc(Suc n))) E F) \<longrightarrow> F = Gradn D E (Suc(Suc n))"
by blast
}
thus ?case
using Suc.IH by fastforce
qed
thus ?thesis
using assms(1) assms(2) assms(3) by blast
qed
lemma bet_cong2_grad__grad2_aux:
assumes "n \<noteq> 0" and
"C = (Gradn A B n)" and
"Bet D E F" and
"Cong A B D E" and
"Cong B C E F"
shows "F = Gradn D E n"
proof -
obtain k where "n = Suc k"
using assms(1) not0_implies_Suc by blast
thus ?thesis
using assms(5) assms(2) assms (3) assms(4) bet_cong2_grad__grad2_aux_2 by blast
qed
lemma bet_cong2_grad__grad2:
assumes "Grad A B C" and
"Bet D E F" and
"Cong A B D E" and
"Cong B C E F"
shows "Grad2 A B C D E F"
proof (cases "A = B")
case True
thus ?thesis
by (metis assms(1) assms(4) cong_diff_4 grad2_init grad_neq__neq12)
next
case False
hence "D \<noteq> E"
using assms(3) cong_diff by blast
obtain n where "n \<noteq> 0 \<and> C = (Gradn A B n)"
using Grad_def assms(1) by presburger
have "F = Gradn D E n"
using bet_cong2_grad__grad2_aux \<open>n \<noteq> 0 \<and> C = Gradn A B n\<close>
assms(2) assms(3) assms(4) by blast
thus ?thesis
using \<open>n \<noteq> 0 \<and> C = (Gradn A B n)\<close> Grad2_def by blast
qed
lemma grad2__grad123:
assumes "Grad2 A B C D E F"
shows "Grad A B C"
proof -
obtain n where "(n \<noteq> 0) \<and> (C = Gradn A B n) \<and> (F = Gradn D E n)"
using Grad2_def assms by presburger
thus ?thesis
using Grad_def by blast
qed
lemma grad2__grad456:
assumes "Grad2 A B C D E F"
shows "Grad D E F"
proof -
obtain n where "(n \<noteq> 0) \<and> (C = Gradn A B n) \<and> (F = Gradn D E n)"
using Grad2_def assms by presburger
thus ?thesis
using Grad_def by blast
qed
lemma grad_sum_aux_R1:
assumes
"A C Le A D" and
"Cong A D A E" and
"Cong A C A E'" and
"A Out E E'"
shows "Bet A E' E"
by (meson Out_cases l5_6 assms(2) assms(3) assms(4) assms(1) l6_13_1)
lemma grad_sum_aux_0:
assumes "A \<noteq> B" and
"D = Gradn A B (Suc(Suc n))" and
"Cong A D A E" and
"C = Gradn A B (Suc n)" and
"Cong A C A E'" and
"A Out E E'"
shows "Bet A E' E"
proof -
{
assume "Bet A E E'"
have False
by (metis Gradn_aux_1_0 Gradn_uniq_aux_1
Diff__Bet_Gradn_Gradn_Suc \<open>Bet A E E'\<close>
assms(1) assms(2) assms(3) assms(4) assms(5) assms(6)
between_cong_2 between_equality_2 cong_preserves_bet
cong_transitivity l5_1 l6_6 not_cong_3412)
}
thus ?thesis
using Out_def assms(6) by auto
qed
lemma grad_sum_aux_1:
assumes "A \<noteq> B" and
"D = Gradn A B (Suc(Suc n))" and
"Bet A C E" and
"Cong A D C E" and
"F = Gradn A B (Suc n)" and
"Bet A C E'" and
"Cong A F C E'" and
"C \<noteq> A"
shows "Bet A E' E"
proof -
have "A \<noteq> B \<and> D = Gradn A B (Suc(Suc n)) \<and>
Bet A C E \<and> Cong A D C E \<and>
F = Gradn A B (Suc n) \<and>
Bet A C E' \<and> Cong A F C E' \<and> A \<noteq> C \<longrightarrow>
Bet A E' E"
proof (induction n)
case 0
{
fix A B C D E F
assume "A \<noteq> B" and
"D = Gradn A B (Suc(Suc 0))" and
"Bet A C E" and
"Cong A D C E" and
"F = Gradn A B (Suc 0)" and
"Bet A C E'" and
"Cong A F C E'" and
"A \<noteq> C"
have "F = B"
by (simp add: \<open>F = Gradn A B (Suc 0)\<close>)
have "Bet C E' E"
proof -
have "C Out E' E"
proof -
{
assume "C = E'"
hence "A = F"
using \<open>Cong A F C E'\<close> cong_identity by blast
hence False
using \<open>A \<noteq> B\<close> \<open>F = B\<close> by fastforce
}
hence "C \<noteq> E'"
by blast
moreover
{
assume "C = E"
hence "A = D"
using \<open>Cong A D C E\<close> cong_identity by blast
hence False
by (metis Gradn_aux_1_0 \<open>A \<noteq> B\<close> \<open>D = Gradn A B (Suc (Suc 0))\<close>)
}
hence "C \<noteq> E"
by blast
moreover
have "Bet E' C A"
using \<open>Bet A C E'\<close> Bet_cases by blast
moreover
have "Bet E C A"
using Bet_cases \<open>Bet A C E\<close> by blast
ultimately
show ?thesis
using \<open>A \<noteq> C\<close> l6_2 by metis
qed
moreover
have "A F Le A D"
using \<open>F = Gradn A B (Suc 0)\<close> \<open>D = Gradn A B (Suc(Suc 0))\<close>
Gradn_le_suc_1 by blast
hence "C E' Le C E"
using l5_6 \<open>Cong A D C E\<close> \<open>Cong A F C E'\<close> by blast
ultimately
show ?thesis
using l6_13_1 by blast
qed
hence "Bet A E' E"
by (meson \<open>Bet A C E\<close> between_exchange2)
}
thus ?case
by force
next
case (Suc n)
{
assume 1: "A \<noteq> B \<and> D = Gradn A B (Suc(Suc n)) \<and>
Bet A C E \<and> Cong A D C E \<and>
F = Gradn A B (Suc n) \<and>
Bet A C E' \<and> Cong A F C E' \<and> A \<noteq> C \<longrightarrow>
Bet A E' E"
{
assume "A \<noteq> B" and
"D = Gradn A B (Suc(Suc(Suc n)))" and
"Bet A C E" and
"Cong A D C E" and
"F = Gradn A B (Suc(Suc n))" and
"Bet A C E'"
"Cong A F C E'" and
"A \<noteq> C"
obtain F' where "F' = Gradn A B (Suc n)"
by auto
obtain E'' where "Bet A C E'' \<and> Cong A F' C E''"
by (meson Cong_cases segment_construction)
have "Bet A E'' E'"
proof -
have "A \<noteq> B"
by (simp add: assms(1))
moreover
have "F = Gradn A B (Suc(Suc n))"
by (simp add: \<open>F = Gradn A B (Suc (Suc n))\<close>)
moreover
have "Bet A C E'"
by (simp add: assms(6))
moreover
have "Cong A F C E'"
by (simp add: assms(7))
moreover
have "F' = Gradn A B (Suc n)"
by (simp add: \<open>F' = Gradn A B (Suc n)\<close>)
moreover
have "Bet A C E''"
by (simp add: \<open>Bet A C E'' \<and> Cong A F' C E''\<close>)
moreover
have "Cong A F' C E''"
by (simp add: \<open>Bet A C E'' \<and> Cong A F' C E''\<close>)
moreover
have "A \<noteq> C"
using assms(8) by auto
ultimately
show ?thesis
proof -
{
assume "Bet C E'' E'"
hence ?thesis
using assms(6) between_exchange2 by blast
}
moreover
{
assume "Bet C E' E''"
hence ?thesis
by (metis Diff__Bet_Gradn_Gradn_Suc Gradn_aux_1_0
bet2__out cong_identity_inv cong_preserves_bet
\<open>Bet A C E'' \<and> Cong A F' C E''\<close> \<open>F = Gradn A B (Suc (Suc n))\<close>
\<open>F' = Gradn A B (Suc n)\<close> assms(1) assms(7)
calculation not_bet_distincts)
}
ultimately show ?thesis
by (metis \<open>Bet A C E''\<close> assms(6) assms(8) l5_2)
qed
qed
have "C E'' Le C E'"
using \<open>Bet A C E'' \<and> Cong A F' C E''\<close> \<open>Bet A E'' E'\<close>
bet__le1213 between_exchange3 by blast
have "A (Gradn A B (Suc (Suc n))) Le A (Gradn A B (Suc (Suc (Suc n))))"
using Gradn_Le_Gradn_Suc by blast
hence "A F Le A D"
using \<open>F = Gradn A B (Suc (Suc n))\<close>
\<open>D = Gradn A B (Suc(Suc(Suc n)))\<close> by blast
hence "C E' Le C E"
using assms(4) assms(7) l5_6 by blast
have "Bet A E' E"
proof -
{
assume "Bet A E E'"
hence "A E Le A E'"
using bet__le1213 by auto
hence "C E Le C E'"
using \<open>Bet A E E'\<close> assms(3) bet__le1213 between_exchange3 by blast
hence "Cong C E C E'"
by (simp add: \<open>C E' Le C E\<close> le_anti_symmetry)
hence "Cong A D A F"
by (meson assms(4) assms(7) cong_transitivity not_cong_3412)
hence False
by (metis Gradn_uniq_aux_1 Diff__Bet_Gradn_Gradn_Suc
\<open>D = Gradn A B (Suc (Suc (Suc n)))\<close>
\<open>F = Gradn A B (Suc (Suc n))\<close>
assms(1) between_cong not_cong_3412)
}
thus ?thesis
by (metis assms(3) assms(6) assms(8) l5_1)
qed
}
}
thus ?case
using Suc.IH by fastforce
qed
thus ?thesis
using assms(1) assms(2) assms(3) assms(4) assms(5) assms(6)
assms(7) assms(8) by blast
qed
lemma Grad_sum_aux_1A: (*renommer autrement et remonter*)
assumes "A \<noteq> B" and
"C = Gradn A B (Suc (Suc 0))"
shows "B Midpoint A C"
proof -
have "C = (Sym A B (Gradn A B (Suc 0)))"
by (simp add: assms(2))
have "... = (Sym A B B)"
by (simp add: assms(1))
hence "(SOME x::TPoint. PreGrad A B B x) = C"
using assms(1) by (metis Gradn_aux_1_0 Sym.simps
\<open>C = Sym A B (Gradn A B (Suc 0))\<close> assms(2))
hence "PreGrad A B B C"
by (metis PreGrad_def Sym_Bet__Bet_Bet Sym_Bet__Cong
Sym.elims assms(1) not_bet_distincts)
thus ?thesis
by (simp add: Midpoint_def PreGrad_def)
qed
lemma grad_sum_aux_2:
assumes "A \<noteq> B" and
"D = Gradn A B (Suc(Suc n))" and
"Bet A C E" and
"Cong A D C E" and
"F = Gradn A B (Suc n)" and
"Bet A C E'" and
"Cong A F C E'" and
"C \<noteq> A"
shows "Cong A B E' E"
proof -
have "A \<noteq> B \<and> D = Gradn A B (Suc(Suc n)) \<and> Bet A C E \<and> Cong A D C E \<and>
F = Gradn A B (Suc n) \<and> Bet A C E' \<and> Cong A F C E' \<and> A \<noteq> C \<longrightarrow>
Cong A B E' E"
proof (induction n)
case 0
{
fix A B C D E F
assume "A \<noteq> B" and
"D = Gradn A B (Suc(Suc 0))" and
"Bet A C E" and
"Cong A D C E" and
"F = Gradn A B (Suc 0)" and
"Bet A C E'" and
"Cong A F C E'" and
"A \<noteq> C"
have "B Midpoint A D"
using Grad_sum_aux_1A \<open>A \<noteq> B\<close> \<open>D = Gradn A B (Suc (Suc 0))\<close> by blast
hence "Bet A B D \<and> Cong A B B D"
using Midpoint_def by blast
have "B = F"
using One_nat_def Lem_Gradn_1 \<open>F = Gradn A B (Suc 0)\<close> by presburger
hence "Cong A B C E'"
by (simp add: \<open>Cong A F C E'\<close>)
have "Cong B D C E'"
using \<open>B = F\<close> \<open>Bet A B D \<and> Cong A B B D\<close> \<open>Cong A B C E'\<close>
cong_inner_transitivity by blast
have "Cong B D E' E"
using cong_commutativity cong_diff_2 Tarski_neutral_dimensionless_axioms
\<open>A \<noteq> B\<close> \<open>A \<noteq> C\<close> \<open>B = F\<close> \<open>Bet A B D \<and> Cong A B B D\<close> \<open>Bet A C E'\<close>
\<open>Bet A C E\<close> \<open>Cong A D C E\<close> \<open>Cong A F C E'\<close> bet_out between_symmetry
l6_2 out_cong_cong by metis
hence "Cong C E' E' E"
using \<open>Cong B D C E'\<close> cong_inner_transitivity by blast
hence "Cong A B E' E"
using \<open>B = F\<close> \<open>Cong A F C E'\<close> cong_transitivity by blast
}
thus ?case
by blast
next
case (Suc n)
have "Cong A B E' E"
proof -
have "Bet A F D"
using Diff__Bet_Gradn_Gradn_Suc assms(1) assms(2) assms(5) by blast
have "Cong A B F D"
using Diff__Cong_Gradn_Suc_Gradn_Suc2 assms(1) assms(2)
assms(5) by blast
have "Bet A E' E"
using grad_sum_aux_1 assms(1) assms(2) assms(3) assms(4)
assms(5) assms(6) assms(7) assms(8) by blast
{
assume "Bet C E E'"
hence "Cong A B E' E"
by (metis \<open>Bet A E' E\<close> \<open>Bet A F D\<close> \<open>Cong A B F D\<close>
assms(1) assms(4) assms(6) assms(7) between_cong between_equality_2
between_exchange3 cong_diff cong_transitivity not_cong_3412)
}
moreover
{
assume "Bet C E' E"
hence "Cong A B E' E"
using \<open>Bet A F D\<close> \<open>Cong A B F D\<close> assms(4) assms(7)
cong_transitivity l4_3_1 by blast
}
ultimately show ?thesis
by (metis assms(3) assms(6) assms(8) l5_2)
qed
thus ?case
by blast
qed
thus ?thesis
using assms(1) assms(2) assms(3) assms(4) assms(5) assms(6) assms(7) assms(8) by blast
qed
lemma grad_sum_aux:
assumes "A \<noteq> B" and
"C = Gradn A B (Suc n)" and
"D = Gradn A B (Suc m)" and
"Bet A C E" and
"Cong A D C E"
shows "E = Gradn A B ((Suc n) + (Suc m))"
proof -
have "\<forall> A B C D E. (A \<noteq> B \<and> C = Gradn A B (Suc n) \<and>
D = Gradn A B (Suc m) \<and> Bet A C E \<and> Cong A D C E) \<longrightarrow>
E = Gradn A B ((Suc n) + (Suc m))"
proof (induction m)
case 0
{
fix A B C D E
assume "A \<noteq> B" and
"C = Gradn A B (Suc n)" and
"D = Gradn A B (Suc 0)" and
"Bet A C E" and
"Cong A D C E"
have "E = Gradn A B ((Suc n) + (Suc 0))"
by (metis Nat.add_0_right Diff_Bet_Bet_Cong_Gradn_Suc
Lem_Gradn_0 Diff__Bet_Gradn_Suc Cong_Gradn_Gradn_Suc
cong_transitivity \<open>A \<noteq> B\<close> \<open>Bet A C E\<close> \<open>C = Gradn A B (Suc n)\<close>
\<open>Cong A D C E\<close> \<open>D = Gradn A B (Suc 0)\<close> add_Suc_shift between_exchange4)
}
thus ?case
by force
next
case (Suc m)
{
assume 1: "\<forall> A B C D E. (A \<noteq> B \<and> C = Gradn A B (Suc n) \<and>
D = Gradn A B (Suc m) \<and> Bet A C E \<and> Cong A D C E) \<longrightarrow>
E = Gradn A B ((Suc n) + (Suc m))"
{
fix A B C D E
assume "A \<noteq> B" and
"C = Gradn A B (Suc n)" and
"D = Gradn A B (Suc(Suc m))" and
"Bet A C E" and
"Cong A D C E"
obtain F where 2: "F = Gradn A B (Suc m)"
by simp
obtain E' where 3: "Bet A C E' \<and> Cong A F C E'"
by (meson cong_4312 segment_construction)
hence 4: "E' = Gradn A B ((Suc n) + (Suc m))"
using 1 2 \<open>A \<noteq> B\<close> \<open>C = Gradn A B (Suc n)\<close> by blast
have 5: "Bet A B E"
proof -
have "Grad A B C"
using Grad_def \<open>C = Gradn A B (Suc n)\<close> by blast
hence "Bet A B C"
using grad__bet by blast
thus ?thesis
using \<open>Bet A C E\<close> between_exchange4 by blast
qed
have 6: "Bet A E' E"
using grad_sum_aux_1 \<open>A \<noteq> B\<close> \<open>D = Gradn A B (Suc(Suc m))\<close>
\<open>Bet A C E\<close> \<open>Cong A D C E\<close> 2 3 Gradn_aux_1_0
\<open>C = Gradn A B (Suc n)\<close> by blast
have "Cong A B E' E"
using grad_sum_aux_2 "2" "3" Gradn_aux_1_0 \<open>A \<noteq> B\<close>
\<open>Bet A C E\<close> \<open>C = Gradn A B (Suc n)\<close> \<open>Cong A D C E\<close>
\<open>D = Gradn A B (Suc (Suc m))\<close> by blast
hence "E = Gradn A B (Suc(Suc n) + (Suc m))"
using Diff_Bet_Bet_Cong_Gradn_Suc 3 4 5 6
by (metis \<open>A \<noteq> B\<close> add_Suc_right add_Suc_shift)
hence "E = Gradn A B ((Suc n) + (Suc(Suc m)))"
by simp
}
hence "\<forall> A B C D E. (A \<noteq> B \<and> C = Gradn A B (Suc n) \<and>
D = Gradn A B (Suc(Suc m)) \<and> Bet A C E \<and> Cong A D C E) \<longrightarrow>
E = Gradn A B ((Suc n) + (Suc(Suc m)))" by blast
}
thus ?case
using Suc.IH by fastforce
qed
thus ?thesis
using assms(1) assms(2) assms(3) assms(4) assms(5) by blast
qed
lemma grad_sum:
assumes "Grad A B C" and
"Grad A B D" and
"Bet A C E" and
"Cong A D C E"
shows "Grad A B E"
proof (cases "A = B")
case True
thus ?thesis
using assms(1) assms(2) assms(3) assms(4) grad112__eq grad_stab by blast
next
case False
obtain n where 1: "(n \<noteq> 0) \<and> (C = Gradn A B n)"
using Grad_def assms(1) by presburger
obtain m where 2: "(m \<noteq> 0) \<and> (D = Gradn A B m)"
using Grad_def assms(2) by presburger
obtain k l where "n = Suc k \<and> m = Suc l"
using 1 2 not0_implies_Suc by presburger
hence "E = Gradn A B (n + m)"
using False 1 2 grad_sum_aux assms(3) assms(4) by blast
thus ?thesis
by (meson Grad_def \<open>n \<noteq> 0 \<and> C = Gradn A B n\<close> add_is_0)
qed
lemma SymR_ex:
assumes "B Midpoint A C"
shows "C = SymR A B"
proof -
have "\<exists> x. B Midpoint A x"
using assms(1) by blast
thus ?thesis
using assms(1) someI_ex
by (metis SymR_uniq_aux SymR.elims)
qed
lemma SymR__midp:
assumes "C = SymR A B"
shows "B Midpoint A C"
using SymR_ex assms symmetric_point_construction by blast
lemma SymR_uniq:
assumes "C = SymR A B" and
"D = SymR A B"
shows "C = D"
by (simp add: assms(1) assms(2))
lemma GradExpn_1:
shows "GradExpn A A n = A"
by simp
lemma GradExpn_2:
shows "Bet A B (GradExpn A B (Suc n))"
proof (cases "A = B")
case True
thus ?thesis
using between_trivial2 by blast
next
case False
show "Bet A B (GradExpn A B (Suc n))"
proof(induction n)
case 0
have "(GradExpn A B 1) = B"
using False by simp
thus "Bet A B (GradExpn A B (Suc 0))"
by (metis One_nat_def not_bet_distincts)
next
case (Suc n)
{
assume "Bet A B (GradExpn A B (Suc n))"
have "(Suc (Suc n)) \<noteq> 0 \<and> (Suc (Suc n)) \<noteq> 1"
by presburger
hence "GradExpn A B (Suc(Suc n)) = SymR A (GradExpn A B (Suc n))"
using False by simp
obtain C where "(GradExpn A B (Suc n)) Midpoint A C"
using symmetric_point_construction by blast
hence "Bet A (GradExpn A B (Suc n)) C"
using Midpoint_def by blast
have "C = GradExpn A B (Suc(Suc n))"
using SymR_ex
\<open>GradExpn A B (Suc (Suc n)) = SymR A (GradExpn A B (Suc n))\<close>
\<open>GradExpn A B (Suc n) Midpoint A C\<close> by presburger
hence "Bet A B (GradExpn A B (Suc(Suc n)))"
using \<open>Bet A (GradExpn A B (Suc n)) C\<close>
\<open>Bet A B (GradExpn A B (Suc n))\<close> between_exchange4 by blast
}
thus ?case
using Suc.IH by blast
qed
qed
lemma GradExpn_3:
shows "Bet A (GradExpn A B (Suc n)) (GradExpn A B (Suc(Suc n)))"
proof (cases "A = B")
case True
thus ?thesis
using between_trivial2 by force
next
case False
show "Bet A (GradExpn A B (Suc n)) (GradExpn A B (Suc(Suc n)))"
proof(induction n)
case 0
have "(GradExpn A B 1) = B"
using False by simp
thus "Bet A (GradExpn A B (Suc 0)) (GradExpn A B (Suc(Suc 0)))"
using GradExpn_2 One_nat_def by presburger
next
case (Suc n)
{
assume "Bet A (GradExpn A B (Suc n)) (GradExpn A B (Suc(Suc n)))"
have "(Suc(Suc (Suc n))) \<noteq> 0 \<and> (Suc(Suc (Suc n))) \<noteq> 1"
by presburger
hence "GradExpn A B (Suc(Suc(Suc n))) = SymR A (GradExpn A B (Suc(Suc n)))"
using False by simp
obtain C where "(GradExpn A B (Suc(Suc n))) Midpoint A C"
using symmetric_point_construction by blast
hence "Bet A (GradExpn A B (Suc(Suc n))) C"
using Midpoint_def by blast
have "C = GradExpn A B (Suc(Suc(Suc n)))"
using SymR_ex
\<open>GradExpn A B (Suc(Suc (Suc n))) = SymR A (GradExpn A B (Suc(Suc n)))\<close>
\<open>GradExpn A B (Suc(Suc n)) Midpoint A C\<close> by presburger
hence "Bet A (GradExpn A B (Suc (Suc(n)))) (GradExpn A B (Suc(Suc(Suc n))))"
using \<open>Bet A (GradExpn A B (Suc (Suc n))) C\<close> by blast
}
thus ?case
using Suc.IH by blast
qed
qed
lemma GradExpn_4:
shows "Cong A (GradExpn A B (Suc n)) (GradExpn A B (Suc n)) (GradExpn A B (Suc(Suc n)))"
proof (cases "A = B")
case True
thus ?thesis
using GradExpn_1 cong_trivial_identity by presburger
next
case False
show "Cong A (GradExpn A B (Suc n)) (GradExpn A B (Suc n)) (GradExpn A B (Suc(Suc n)))"
proof (induction n)
case 0
have "GradExpn A B (Suc 0) = B"
using False by simp
show "Cong A (GradExpn A B (Suc 0)) (GradExpn A B (Suc 0)) (GradExpn A B (Suc(Suc 0)))"
proof -
have "(Suc(Suc 0)) \<noteq> 0 \<and> (Suc(Suc 0)) \<noteq> 1"
by presburger
hence "GradExpn A B (Suc(Suc 0)) = SymR A (GradExpn A B (Suc 0))"
using False by simp
obtain C where "(GradExpn A B (Suc 0)) Midpoint A C"
using symmetric_point_construction by blast
hence "Cong A (GradExpn A B (Suc 0)) (GradExpn A B (Suc 0)) C"
using Midpoint_def by blast
have "C = GradExpn A B (Suc(Suc 0))"
using SymR_ex \<open>GradExpn A B (Suc (Suc 0)) = SymR A (GradExpn A B (Suc 0))\<close>
\<open>GradExpn A B (Suc 0) Midpoint A C\<close> by presburger
thus ?thesis
using \<open>Cong A (GradExpn A B (Suc 0)) (GradExpn A B (Suc 0)) C\<close> by blast
qed
next
case (Suc n)
{
assume "Cong A (GradExpn A B (Suc n)) (GradExpn A B (Suc n)) (GradExpn A B (Suc(Suc n)))"
have "(Suc(Suc(Suc n))) \<noteq> 0 \<and> (Suc(Suc(Suc n))) \<noteq> 1"
by presburger
hence "GradExpn A B (Suc(Suc(Suc n))) = SymR A (GradExpn A B (Suc(Suc n)))"
using False by simp
obtain C where "(GradExpn A B (Suc(Suc n))) Midpoint A C"
using symmetric_point_construction by blast
hence "Cong A (GradExpn A B (Suc(Suc n))) (GradExpn A B (Suc(Suc n))) C"
using Midpoint_def by blast
have "C = GradExpn A B (Suc(Suc(Suc n)))"
using SymR_ex
\<open>GradExpn A B (Suc(Suc (Suc n))) = SymR A (GradExpn A B (Suc(Suc n)))\<close>
\<open>GradExpn A B (Suc(Suc n)) Midpoint A C\<close> by presburger
hence "Cong A (GradExpn A B (Suc(Suc n)))
(GradExpn A B (Suc(Suc n))) (GradExpn A B (Suc(Suc(Suc n))))"
using \<open>Cong A (GradExpn A B (Suc (Suc n))) (GradExpn A B (Suc (Suc n))) C\<close>
by fastforce
}
thus ?case
using Suc.IH by blast
qed
qed
lemma gradexp_init:
shows "GradExp A B B"
proof -
have "B = GradExpn A B (Suc 0)"
by auto
thus ?thesis
using GradExp_def by blast
qed
lemma gradexp_stab_aux_0:
assumes "C = GradExpn A B 0" and
"Bet A C C'" and
"Cong A C C C'"
shows "C' = GradExpn A B 0"
proof -
obtain m where "m = Suc 0"
by simp
show ?thesis
proof (cases "A = B")
case True
thus ?thesis
by (metis GradExpn_1 assms(1) assms(3) cong_reverse_identity)
next
case False
hence "A \<noteq> B"
by auto
show ?thesis
proof -
have "C = A"
using assms(1) by simp
hence "C = C'"
using assms(3) cong_reverse_identity by blast
thus ?thesis
using assms(1) by fastforce
qed
qed
qed
lemma gradexp_stab_aux_n:
assumes "C = GradExpn A B (Suc n)" and
"Bet A C C'" and
"Cong A C C C'"
shows "C' = GradExpn A B (Suc(Suc n))"
proof -
obtain m where "m = Suc(Suc n)"
by simp
hence "m - 1 = Suc n"
by simp
show ?thesis
proof (cases "A = B")
case True
thus ?thesis
by (metis GradExpn_1 assms(1) assms(3) cong_reverse_identity)
next
case False
hence "A \<noteq> B"
by auto
show ?thesis
proof -
have "C Midpoint A C'"
using assms(2) assms(3) midpoint_def by auto
hence "C' = SymR A C"
using SymR_ex by blast
hence "C' = SymR A (GradExpn A B (m - 1))"
using \<open>m - 1 = Suc n\<close> assms(1) by presburger
hence "C' = GradExpn A B m"
by (simp add: False \<open>m = Suc (Suc n)\<close>)
thus ?thesis
using \<open>m = Suc (Suc n)\<close> by blast
qed
qed
qed
lemma gradexp_stab:
assumes "GradExp A B C" and
"Bet A C C'" and
"Cong A C C C'"
shows "GradExp A B C'"
proof -
obtain n where "C = GradExpn A B n"
using GradExp_def assms(1) by blast
show ?thesis
proof (cases "n = 0")
case True
thus ?thesis
using \<open>C = GradExpn A B n\<close> assms(1) assms(2) assms(3)
gradexp_stab_aux_0 by blast
next
case False
then obtain m where "Suc m = n"
using not0_implies_Suc by auto
hence "C = GradExpn A B (Suc m)"
by (simp add: \<open>C = GradExpn A B n\<close>)
hence "C' = GradExpn A B (Suc(Suc m))"
using assms(2) assms(3) gradexp_stab_aux_n by blast
moreover
have "(Suc (Suc m)) \<noteq> 0"
by simp
ultimately
show ?thesis
using GradExp_def by blast
qed
qed
lemma gradexp__grad_aux_1:
shows "\<forall> C. (C = (GradExpn A A (Suc n)) \<longrightarrow> Grad A A C)"
by (simp add: grad_equiv_coq_1)
lemma gradexp__grad_aux:
assumes "A \<noteq> B"
shows "\<forall> C. (C = (GradExpn A B (Suc n)) \<longrightarrow> Grad A B C)"
proof (induction n)
case 0
{
fix C
assume "C = (GradExpn A B (Suc 0))"
hence "C = B"
by force
hence "Grad A B C"
using grad_equiv_coq_1 by auto
}
hence "(\<forall> C. (C = (GradExpn A B (Suc 0))) \<longrightarrow> Grad A B C)"
by blast
thus ?case
by blast
next
case (Suc n)
{
assume 1: "\<forall> C. (C = (GradExpn A B (Suc n)) \<longrightarrow> Grad A B C)"
{
fix C'
assume "C' = (GradExpn A B ((Suc n) + 1))"
have "A \<noteq> B \<and> ((Suc n) + 1) \<noteq> 0 \<and> ((Suc n)+1) \<noteq> 1"
using assms by auto
then
have "C' = (SymR A (GradExpn A B (Suc n)))"
by (simp add: \<open>C' = GradExpn A B ((Suc n)+1)\<close>)
hence "Grad A B (GradExpn A B (Suc n))"
using "1" by blast
have "Grad A B C'"
proof -
have "Grad A B (GradExpn A B (Suc n))"
using \<open>Grad A B (GradExpn A B (Suc n))\<close> by blast
moreover
have "Bet A (GradExpn A B (Suc n)) (GradExpn A B (Suc(Suc n)))"
using GradExpn_3 by blast
moreover
have "Cong A (GradExpn A B (Suc n)) (GradExpn A B (Suc n)) C'"
using GradExpn_4 \<open>C' = GradExpn A B (Suc n + 1)\<close> by force
ultimately
show ?thesis
using grad_sum by (metis Suc_eq_plus1 \<open>C' = GradExpn A B (Suc n + 1)\<close>)
qed
}
hence "\<forall> C. (C = (GradExpn A B ((Suc n) + 1)) \<longrightarrow> Grad A B C)"
by blast
}
thus ?case
using Suc.IH Suc_eq_plus1 by metis
qed
lemma gradexp__grad:
assumes "GradExp A B C"
shows "Grad A B C"
proof -
obtain n where "n \<noteq> 0 \<and> C = GradExpn A B n"
using GradExp_def assms by blast
hence "n \<noteq> 0"
by blast
then obtain m where "n = Suc m"
using not0_implies_Suc by presburger
thus ?thesis
by (simp add: \<open>n \<noteq> 0 \<and> C = GradExpn A B n\<close> gradexp__grad_aux
gradexp__grad_aux_1)
qed
lemma gradexp_le__reach:
assumes "GradExp A B B'" and
"C D Le A B'"
shows "Reach A B C D"
using Reach_def assms(1) assms(2) gradexp__grad by blast
lemma grad__ex_gradexp_le_aux_0:
assumes "A = B"
shows "\<exists> D. GradExp A B D \<and> A (Gradn A B n) Le A D"
proof (induction n)
case 0
have "Gradn A B 0 = A"
by auto
moreover
have "GradExp A B B"
by (simp add: gradexp_init)
moreover
have "A A Le A B"
by (simp add: le_trivial)
ultimately
show "\<exists> D. GradExp A B D \<and> A (Gradn A B 0) Le A D"
by auto
next
case (Suc n)
{
assume "\<exists> D. GradExp A B D \<and> A (Gradn A B n) Le A D"
then obtain D where "GradExp A B D \<and> A (Gradn A B n) Le A D"
by blast
let ?n = "Gradn A B n"
let ?sucn = "Gradn A B (Suc n)"
have "?n = ?sucn"
using assms by force
hence "\<exists> D. GradExp A B D \<and> A (Gradn A B (Suc n)) Le A D"
using \<open>\<exists>D. GradExp A B D \<and> A (Gradn A B n) Le A D\<close> by presburger
}
thus ?case
using Suc.IH by blast
qed
lemma grad__ex_gradexp_le_aux_1:
assumes "A \<noteq> B"
shows "\<exists> D. GradExp A B D \<and> A (Gradn A B n) Le A D"
proof (induction n)
case 0
have "Gradn A B 0 = A"
by auto
moreover
have "GradExp A B B"
by (simp add: gradexp_init)
moreover
have "A A Le A B"
by (simp add: le_trivial)
ultimately
show "\<exists> D. GradExp A B D \<and> A (Gradn A B 0) Le A D"
by auto
next
case (Suc n)
{
assume "\<exists> D. GradExp A B D \<and> A (Gradn A B n) Le A D"
then obtain D where "GradExp A B D \<and> A (Gradn A B n) Le A D"
by blast
obtain D' where "Bet A D D' \<and> Cong D D' A D"
using segment_construction by blast
have "(Gradn A B n) (Gradn A B (Suc n)) Le D D'"
by (meson Cong_Gradn_Gradn_Suc \<open>Bet A D D' \<and> Cong D D' A D\<close>
\<open>GradExp A B D \<and> A (Gradn A B n) Le A D\<close> grad__le gradexp__grad
l5_6 le_right_comm not_cong_4312)
hence "GradExp A B D' \<and> A (Gradn A B (Suc n)) Le A D'"
by (meson Diff__Bet_Gradn_Gradn_Suc \<open>Bet A D D' \<and> Cong D D' A D\<close>
\<open>GradExp A B D \<and> A (Gradn A B n) Le A D\<close> assms bet2_le2__le1346
gradexp_stab not_cong_3412)
hence "\<exists> D. GradExp A B D \<and> A (Gradn A B (Suc n)) Le A D"
by blast
}
thus ?case
using Suc.IH by blast
qed
lemma grad__ex_gradexp_le_aux:
shows "\<exists> D. GradExp A B D \<and> A (Gradn A B n) Le A D"
using grad__ex_gradexp_le_aux_0 grad__ex_gradexp_le_aux_1 by blast
lemma grad__ex_gradexp_le:
assumes "Grad A B C"
shows "\<exists> D. GradExp A B D \<and> A C Le A D"
using grad__ex_gradexp_le_aux Grad_def assms by auto
lemma reach__ex_gradexp_le:
assumes "Reach A B C D"
shows "\<exists> B'. GradExp A B B' \<and> C D Le A B'"
by (meson Reach_def le_transitivity assms grad__ex_gradexp_le)
lemma gradexp2_init:
shows "GradExp2 A B B C D D"
proof -
have "B = GradExpn A B (Suc 0)"
by auto
moreover
have "D = GradExpn C D (Suc 0)"
by auto
ultimately
show ?thesis
using GradExp2_def by blast
qed
lemma GradExp2_stab:
assumes "GradExp2 A B C D E F" and
"Bet A C C'" and
"Cong A C C C'" and
"Bet D F F'" and
"Cong D F F F'"
shows "GradExp2 A B C' D E F'"
proof -
obtain n where "(n \<noteq> 0) \<and> (C = GradExpn A B n) \<and> (F = GradExpn D E n)"
using GradExp2_def assms(1) by presburger
hence "n \<noteq> 0"
by blast
then obtain m where "Suc m = n"
using not0_implies_Suc by auto
hence "C = GradExpn A B (Suc m)"
using \<open>n \<noteq> 0 \<and> C = GradExpn A B n \<and> F = GradExpn D E n\<close> by blast
hence "C' = GradExpn A B (Suc (Suc m))"
using assms(2) assms(3) gradexp_stab_aux_n by blast
moreover
have "F = GradExpn D E (Suc m)"
using \<open>Suc m = n\<close> \<open>n \<noteq> 0 \<and> C = GradExpn A B n \<and> F = GradExpn D E n\<close> by blast
hence "F' = GradExpn D E (Suc (Suc m))"
using assms(4) assms(5) gradexp_stab_aux_n by blast
moreover
have "Suc (Suc m) \<noteq> 0"
by simp
ultimately
show ?thesis
using GradExp2_def by blast
qed
lemma gradexp2__gradexp123:
assumes "GradExp2 A B C D E F"
shows "GradExp A B C"
proof -
obtain n where "n \<noteq> 0 \<and> (C = GradExpn A B n) \<and> (F = GradExpn D E n)"
using assms(1) GradExp2_def by blast
thus ?thesis using GradExp_def by blast
qed
lemma gradexp2__gradexp456:
assumes "GradExp2 A B C D E F"
shows "GradExp D E F"
proof -
obtain n where "n \<noteq> 0 \<and> (C = GradExpn A B n) \<and> (F = GradExpn D E n)"
using assms(1) GradExp2_def by blast
thus ?thesis using GradExp_def by blast
qed
lemma MidR_uniq:
assumes "C = MidR A B" and
"D = MidR A B"
shows "C = D"
by (simp add: assms(1) assms(2))
lemma MidR_ex_0:
shows "(MidR A B) Midpoint A B"
proof -
have "MidR A B = (SOME x. x Midpoint A B)"
by simp
thus ?thesis
using someI_ex by (metis midpoint_existence)
qed
lemma MidR_ex_1:
assumes "C = (MidR A B)"
shows "C Midpoint A B"
using assms MidR_ex_0 by blast
lemma MidR_ex_aux:
assumes "C Midpoint A B"
shows "C = (SOME x. x Midpoint A B)"
by (metis assms MidR_uniq_aux someI)
lemma MidR_ex:
assumes "C Midpoint A B"
shows "C = (MidR A B)"
using MidR_ex_0 MidR_uniq_aux assms by blast
lemma gradexpinv_init_aux:
shows "B = GradExpInvn A B 0"
by simp
lemma gradexpinv_init:
shows "GradExpInv A B B"
using gradexpinv_init_aux using GradExpInv_def by blast
lemma gradexpinv_stab_aux_0:
assumes "B = GradExpInvn A C 0" and
"Bet A B' B" and
"Cong A B' B' B"
shows "B' = GradExpInvn A C (Suc 0)"
proof -
have "B = C"
using assms(1) by simp
have "B' Midpoint A B"
by (simp add: assms(2) assms(3) midpoint_def)
hence "B' = MidR A B"
using MidR_ex by blast
show ?thesis
proof (cases "A = C")
case True
hence "B' = B"
using \<open>B = C\<close> assms(2) bet_neq12__neq by blast
moreover
have "A = GradExpInvn A C (Suc 0)"
using True by simp
ultimately
show ?thesis
using True \<open>B = C\<close> by force
next
case False
hence "MidR A C = GradExpInvn A C (Suc 0)"
by simp
thus ?thesis
using \<open>B = C\<close> \<open>B' = MidR A B\<close> by blast
qed
qed
lemma gradexpinv_stab_aux_n:
assumes "Bet A B' B" and
"Cong A B' B' B" and
"B = GradExpInvn A C (Suc n)"
shows "B' = GradExpInvn A C (Suc (Suc n))"
proof -
have "Suc (Suc n) \<noteq> 0" by simp
moreover
have "Suc (Suc n) \<noteq> 1" by simp
moreover
have "B' Midpoint A B"
using Midpoint_def assms(1) assms(2) by blast
hence "B' = MidR A (GradExpInvn A C (Suc n))"
using MidR_ex assms(3) by blast
ultimately
show ?thesis
by (metis GradExpInvn.simps One_nat_def assms(1) assms(3)
diff_Suc_Suc l8_20_1_R1 l8_6 minus_nat.diff_0)
qed
lemma gradexpinv_stab:
assumes "Bet A B' B" and
"Cong A B' B' B" and
"GradExpInv A B C"
shows "GradExpInv A B' C"
proof -
have "GradExpInv A B C"
using GradExpInv_def assms(3) by auto
then obtain n where "B = GradExpInvn A C n"
using GradExpInv_def by blast
have "B' = GradExpInvn A C (Suc n)"
proof (cases "n = 0")
case True
show ?thesis
using True \<open>B = GradExpInvn A C n\<close> assms(1) assms(2)
gradexpinv_stab_aux_0 by blast
next
case False
then obtain k where "Suc k = n"
using not0_implies_Suc by auto
hence "B = GradExpInvn A C (Suc k)"
using \<open>B = GradExpInvn A C n\<close> by blast
hence "B' = GradExpInvn A C (Suc (Suc k))"
using gradexpinv_stab_aux_n assms(1) assms(2) by blast
show ?thesis
using \<open>B' = GradExpInvn A C (Suc (Suc k))\<close> \<open>Suc k = n\<close> by blast
qed
moreover
have "Suc n \<noteq> 0"
by simp
ultimately
show ?thesis
using GradExpInv_def by blast
qed
lemma gradexp__gradexpinv_aux_1_0:
assumes "C = GradExpn A B (Suc 0)"
shows "B = GradExpInvn A C 0"
proof (cases "A = B")
case True
hence "C = A"
by (simp add: assms)
thus ?thesis
by (simp add: True)
next
case False
hence "C = B"
using assms by simp
thus ?thesis
using False by simp
qed
(* A REMONTER*)
lemma SymR_MidR:
assumes "A = SymR B C"
shows "C = MidR A B"
proof -
have "C Midpoint B A"
using assms SymR_ex symmetric_point_construction by blast
thus ?thesis
using MidR_ex_0 l7_17_bis by auto
qed
lemma MidR_comm:
assumes "C = MidR A B"
shows "C = MidR B A"
proof -
have "C Midpoint B A"
using MidR_ex_0 assms l7_2 by blast
thus ?thesis
by (meson MidR_ex \<open>C Midpoint B A\<close>)
qed
lemma MidR_SymR:
assumes "C = MidR A B"
shows "A = SymR B C"
proof -
have "C Midpoint B A"
using MidR_ex_0 assms l7_2 by blast
thus ?thesis
using SymR_ex by auto
qed
lemma MidR_AA:
shows "A = MidR A A"
using l7_3_2 MidR_ex by blast
lemma MidR_AB:
assumes "A = MidR A B"
shows "A = B"
by (metis MidR_AA MidR_SymR MidR_comm assms)
lemma gradexp__gradexpinv_aux_1_n_aa:
shows "GradExpInvn A' (MidR A' C') n = GradExpInvn A' C' (Suc n)"
proof (cases "A' = C'")
case True
hence "A' = MidR A' C'"
using l7_3_2 MidR_ex by blast
hence "GradExpInvn A' (MidR A' C') n = A'"
by simp
moreover
have "GradExpInvn A' C' (Suc n) = A'"
using True by simp
ultimately
show ?thesis
by simp
next
case False
have "GradExpInvn A' (MidR A' C') n = GradExpInvn A' C' (Suc n)"
proof (induction n)
case 0
hence "A' \<noteq> MidR A' C'"
using False MidR_AB by blast
hence "GradExpInvn A' (MidR A' C') 0 = (MidR A' C')"
by simp
moreover
have "GradExpInvn A' C' 0 = C'"
using gradexpinv_init_aux by presburger
hence "GradExpInvn A' C' (Suc 0) = (MidR A' C')"
using False by simp
ultimately
have "GradExpInvn A' (MidR A' C') 0 = GradExpInvn A' C' (Suc 0)"
by simp
thus ?case
by blast
next
case (Suc n)
{
assume "GradExpInvn A' (MidR A' C') n = GradExpInvn A' C' (Suc n)"
have "GradExpInvn A' (MidR A' C') (Suc n)
= (MidR A' (GradExpInvn A' (MidR A' C') ((Suc n)-1)))"
by (metis GradExpInvn.simps MidR_AB Suc_neq_Zero diff_self_eq_0)
have "\<dots> = MidR A' (GradExpInvn A' (MidR A' C') n)"
by simp
have "\<dots> = MidR A' (GradExpInvn A' C' (Suc n))"
using Suc.IH by presburger
have "\<dots> = MidR A' (GradExpInvn A' C' ((Suc(Suc n))-1))"
using diff_Suc_1 by presburger
have "\<dots> = GradExpInvn A' C' (Suc(Suc n))"
using False by auto
hence "GradExpInvn A' (MidR A' C') (Suc n) = GradExpInvn A' C' (Suc(Suc n))"
using \<open>GradExpInvn A' (MidR A' C') (Suc n)
= MidR A' (GradExpInvn A' (MidR A' C') (Suc n - 1))\<close>
\<open>MidR A' (GradExpInvn A' (MidR A' C') (Suc n - 1))
= MidR A' (GradExpInvn A' (MidR A' C') n)\<close>
\<open>MidR A' (GradExpInvn A' (MidR A' C') n)
= MidR A' (GradExpInvn A' C' (Suc n))\<close>
\<open>MidR A' (GradExpInvn A' C' (Suc n))
= MidR A' (GradExpInvn A' C' (Suc (Suc n) - 1))\<close>
by presburger
}
thus ?case
using Suc.IH by blast
qed
thus ?thesis by blast
qed
lemma gradexp__gradexpinv_aux_1_n_a:
assumes "\<forall> A B C. (C = GradExpn A B (Suc n)) \<longrightarrow> B = GradExpInvn A C n"
shows "\<forall> A' B' C'. (C' = GradExpn A' B' (Suc(Suc n))) \<longrightarrow> B' = GradExpInvn A' C' (Suc n)"
proof -
{
fix A' B' C'
assume "C' = GradExpn A' B' (Suc(Suc n))"
have "B' = GradExpInvn A' C' (Suc n)"
proof (cases "A' = B'")
case True
{
hence "C' = A'"
using True by (simp add: \<open>C' = GradExpn A' B' (Suc (Suc n))\<close>)
hence "A' = GradExpInvn A' C' (Suc n)"
by simp
hence "B' = GradExpInvn A' C' (Suc n)"
using True by simp
}
thus ?thesis
using \<open>C' = GradExpn A' B' (Suc (Suc n))\<close> by blast
next
case False
{
have "Suc (Suc n) \<noteq> 0"
by simp
moreover
have "Suc (Suc n) \<noteq> 1"
by simp
moreover
have "C' = SymR A' (GradExpn A' B' ((Suc (Suc n))-1))"
by (simp add: False \<open>C' = GradExpn A' B' (Suc (Suc n))\<close>)
have "(Suc (Suc n))-1 = Suc n"
by simp
let ?ssn1 = "(Suc (Suc n))-1"
let ?B1 = "GradExpn A' B' ?ssn1"
have "?B1 = (GradExpn A' B' (Suc n))"
by simp
hence "B' = (GradExpInvn A' ?B1 n)"
using assms by blast
have "C' = SymR A' ?B1"
using \<open>C' = SymR A' (GradExpn A' B' (Suc (Suc n) - 1))\<close> by blast
{
assume "n = 0"
hence "B' = ?B1"
by force
}
{
assume "n = 1"
hence "B' = MidR A' ?B1"
by (metis False GradExpInvn.simps One_nat_def
\<open>B' = GradExpInvn A' (GradExpn A' B' (Suc (Suc n) - 1)) n\<close>
\<open>GradExpn A' B' (Suc (Suc n) - 1) = GradExpn A' B' (Suc n)\<close>
n_not_Suc_n)
}
{
assume "n \<noteq> 0 \<and> n \<noteq> 1"
hence "B' = MidR A' (GradExpInvn A' ?B1 (n-1))"
by (metis False GradExpInvn.simps
\<open>B' = GradExpInvn A' (GradExpn A' B' (Suc (Suc n) - 1)) n\<close>)
}
{
assume "Suc n = 1"
hence "n = 0"
by simp
have "C' = SymR A' ?B1"
using \<open>C' = SymR A' (GradExpn A' B' (Suc (Suc n) - 1))\<close> by blast
hence "?B1 = MidR C' A'"
using SymR_MidR by blast
hence "?B1 = MidR A' C'"
using MidR_comm by blast
hence "B' = MidR A' C'"
using \<open>n = 0 \<Longrightarrow> B' = GradExpn A' B' (Suc (Suc n) - 1)\<close> \<open>n = 0\<close>
by force
}
moreover
{
assume "Suc n \<noteq> 1"
hence "B' = MidR A' (GradExpInvn A' ?B1 (n-1))"
by (metis One_nat_def
\<open>n = 1 \<Longrightarrow> B' = MidR A' (GradExpn A' B' (Suc (Suc n) - 1))\<close>
\<open>n \<noteq> 0 \<and> n \<noteq> 1 \<Longrightarrow> B'
= MidR A' (GradExpInvn A' (GradExpn A' B' (Suc (Suc n) - 1)) (n - 1))\<close>
diff_Suc_1 gradexpinv_init_aux)
obtain k where "Suc k = n"
using \<open>Suc n \<noteq> 1\<close> not0_implies_Suc by auto
have "C' = SymR A' ?B1"
using \<open>C' = SymR A' (GradExpn A' B' (Suc (Suc n) - 1))\<close> by blast
hence "?B1 = MidR A' C'"
using MidR_comm SymR_MidR by presburger
hence "GradExpInvn A' ?B1 k = (GradExpInvn A' C' (Suc k))"
using gradexp__gradexpinv_aux_1_n_aa by presburger
hence "GradExpInvn A' ?B1 (n-1) = (GradExpInvn A' C' n)"
using \<open>Suc k = n\<close> by force
hence "B' = MidR A' (GradExpInvn A' C' n)"
using \<open>B' = MidR A' (GradExpInvn A' (GradExpn A' B' (Suc (Suc n) - 1)) (n - 1))\<close>
by presburger
hence "B' = MidR A' (GradExpInvn A' C' ((Suc n)-1))"
using diff_Suc_1 by presburger
}
ultimately
have "B' = GradExpInvn A' C' (Suc n)"
by (metis GradExpInvn.elims GradExpn_2 Suc_neq_Zero
\<open>C' = GradExpn A' B' (Suc (Suc n))\<close> bet_neq12__neq)
}
hence "(C' = GradExpn A' B' (Suc(Suc n))) \<longrightarrow> B' = GradExpInvn A' C' (Suc n)" by blast
thus ?thesis
using \<open>C' = GradExpn A' B' (Suc (Suc n))\<close> by blast
qed
}
thus ?thesis by blast
qed
lemma gradexp__gradexpinv_aux_1_b:
shows "\<forall> A B C. C = GradExpn A B (Suc n) \<longrightarrow> B = GradExpInvn A C n"
proof (induction n)
case 0
have "\<forall> A B C. C = GradExpn A B (Suc 0) \<longrightarrow> B = GradExpInvn A C 0"
by (meson gradexp__gradexpinv_aux_1_0)
thus ?case by blast
next
case (Suc n)
{
assume "\<forall> A B C. C = GradExpn A B (Suc n) \<longrightarrow> B = GradExpInvn A C n"
{
fix A B C
assume "C = GradExpn A B (Suc(Suc n))"
have "B = GradExpInvn A C (Suc n)"
using \<open>C = GradExpn A B (Suc (Suc n))\<close>
\<open>\<forall>A B C. C = GradExpn A B (Suc n) \<longrightarrow> B = GradExpInvn A C n\<close>
gradexp__gradexpinv_aux_1_n_a by presburger
}
hence "\<forall> A B C. C = GradExpn A B (Suc(Suc n)) \<longrightarrow> B = GradExpInvn A C (Suc n)"
by blast
}
thus ?case
using Suc.IH by fastforce
qed
lemma gradexp__gradexpinv_aux_1:
assumes "C = GradExpn A B (Suc n)"
shows "B = GradExpInvn A C n"
using assms gradexp__gradexpinv_aux_1_b by blast
lemma gradexp__gradexpinv_aux:
assumes "GradExp A B C"
shows "GradExpInv A B C"
proof -
obtain n where "n \<noteq> 0 \<and> C = GradExpn A B n"
using assms GradExp_def by blast
hence "n \<noteq> 0"
by blast
then obtain m where "n = Suc m"
using not0_implies_Suc by presburger
hence "B = GradExpInvn A C m"
using gradexp__gradexpinv_aux_1
using \<open>n \<noteq> 0 \<and> C = GradExpn A B n\<close> by blast
thus ?thesis
using GradExpInv_def \<open>n \<noteq> 0 \<and> C = GradExpn A B n\<close> by blast
qed
lemma gradexpinv__gradexp_aux_1_a_0: (* peut-on descendre d'un suc 0 à 0 ?*)
assumes "B' = MidR A' (GradExpInvn A' C' (Suc 0))"
shows "C' = SymR A' (GradExpn A' B' (Suc(Suc 0)))"
proof (cases "A' = C'")
case True
hence "B' = MidR A' C'"
by (simp add: assms)
thus ?thesis
by (metis GradExpn_1 MidR_AA MidR_SymR True)
next
case False
thus ?thesis
by (metis GradExpn_2 GradExpn_4 Mid_cases SymR_MidR
assms gradexp__gradexpinv_aux_1_0 gradexp__gradexpinv_aux_1_n_aa
gradexpinv_init_aux gradexpinv_stab_aux_0 l7_9
midpoint_def midpoint_existence)
qed
lemma sym_mid:
shows "SymR A (MidR A B) = B"
by (metis MidR_SymR MidR_comm)
lemma gradexpn_suc_suc:
shows "GradExpn A B (Suc n) = GradExpn A (MidR A B) (Suc(Suc n))"
proof (cases "A = B")
case True
thus ?thesis
using GradExpn_1 MidR_AA by presburger
next
case False
hence "A \<noteq> B"
by simp
have "GradExpn A B (Suc n) = GradExpn A (MidR A B) (Suc(Suc n))"
proof (induction n)
case 0
have "GradExpn A (MidR A B) (Suc(Suc 0))
= SymR A (GradExpn A (MidR A B) ((Suc(Suc 0))-1))"
using False MidR_AB
by (metis diff_Suc_1 gradexp__gradexpinv_aux_1_b
gradexp__gradexpinv_aux_1_n_aa gradexpinv_init_aux sym_mid)
hence "\<dots> = SymR A (GradExpn A (MidR A B) (Suc 0))"
using diff_Suc_1 by presburger
hence "\<dots> = SymR A (MidR A B)"
by simp
hence "\<dots> = B"
using sym_mid by blast
hence "GradExpn A B (Suc 0) = GradExpn A (MidR A B) (Suc (Suc 0))"
by (metis gradexp__gradexpinv_aux_1 gradexp__gradexpinv_aux_1_n_aa
gradexpinv__gradexp_aux_1_a_0 gradexpinv_init_aux)
thus ?case
by blast
next
case (Suc n)
{
assume "\<forall> A' B'. GradExpn A' B' (Suc n) = GradExpn A' (MidR A' B') (Suc(Suc n))"
{
fix A B
have "GradExpn A (MidR A B) (Suc(Suc(Suc n)))
= SymR A (GradExpn A (MidR A B) (Suc(Suc(Suc n))-1))"
using MidR_AA MidR_SymR by fastforce
hence "\<dots> = GradExpn A B (Suc(Suc n))"
by (metis GradExpn_3 GradExpn_4
\<open>\<forall>A' B'. GradExpn A' B' (Suc n) = GradExpn A' (MidR A' B') (Suc (Suc n))\<close>
gradexp_stab_aux_n)
hence "GradExpn A B (Suc(Suc n)) = GradExpn A (MidR A B) (Suc(Suc(Suc n)))"
using \<open>GradExpn A (MidR A B) (Suc (Suc (Suc n)))
= SymR A (GradExpn A (MidR A B) (Suc (Suc (Suc n)) - 1))\<close>
by presburger
}
hence "\<forall> A B. GradExpn A B (Suc(Suc n)) = GradExpn A (MidR A B) (Suc(Suc(Suc n)))"
by blast
}
thus ?case
using GradExpn_3 GradExpn_4 Suc.IH gradexp_stab_aux_n by blast
qed
thus ?thesis by blast
qed
lemma gradexpinv__gradexp_aux_1_a_n: (* peut-on descendre d'un suc n à n ?*)
assumes "\<forall> A B C. (B = MidR A (GradExpInvn A C (Suc n))) \<longrightarrow>
C = SymR A (GradExpn A B (Suc(Suc n)))"
shows "B' = MidR A' (GradExpInvn A' C' (Suc(Suc n))) \<longrightarrow>
C' = SymR A' (GradExpn A' B' (Suc(Suc(Suc n))))"
proof (cases "A' = C'")
case True
thus ?thesis
by (metis GradExpn_1 MidR_AA MidR_SymR gradexp__gradexpinv_aux_1)
next
case False
{
assume "B' = MidR A' (GradExpInvn A' C' (Suc(Suc n)))"
hence "\<dots> = MidR A' (MidR A' (GradExpInvn A' C' (Suc(Suc n)-1)))"
using False by force
hence "\<dots> = MidR A' (MidR A' (GradExpInvn A' C' (Suc n)))"
by simp
let ?B1 = "(MidR A' (GradExpInvn A' C' (Suc n)))"
have "C' = SymR A' (GradExpn A' ?B1 (Suc(Suc n)))"
using assms by blast
have "GradExpn A' ?B1 (Suc(Suc n)) = GradExpn A' (MidR A' ?B1) (Suc(Suc(Suc n)))"
using gradexpn_suc_suc by blast
hence "C' = SymR A' (GradExpn A' B' (Suc(Suc(Suc n))))"
by (metis MidR_comm SymR_MidR \<open>B' = MidR A' (GradExpInvn A' C' (Suc (Suc n)))\<close>
\<open>C' = SymR A' (GradExpn A' (MidR A' (GradExpInvn A' C' (Suc n))) (Suc (Suc n)))\<close>
gradexp__gradexpinv_aux_1 gradexp__gradexpinv_aux_1_n_aa)
}
thus ?thesis
by blast
qed
lemma gradexpinv__gradexp_aux_1_a:
shows "\<forall> A B C. B = MidR A (GradExpInvn A C (Suc n)) \<longrightarrow>
C = SymR A (GradExpn A B (Suc(Suc n)))"
proof (induction n)
case 0
thus ?case
using gradexpinv__gradexp_aux_1_a_0 by blast
next
case (Suc n)
thus ?case
using gradexpinv__gradexp_aux_1_a_n by blast
qed
lemma gradexpinv__gradexp_aux_1_n:
assumes "B = GradExpInvn A C n \<longrightarrow> C = GradExpn A B (Suc n)"
shows "B' = GradExpInvn A' C' (Suc n)\<longrightarrow>C' = GradExpn A' B' (Suc(Suc n))"
proof (cases "A' = C'")
case True
thus ?thesis
by force
next
case False
hence "A' \<noteq> C'"
by blast
{
assume "B' = GradExpInvn A' C' (Suc n)"
have "C' = GradExpn A' B' (Suc(Suc n))"
proof (cases "Suc n = 1")
case True
hence "B' = GradExpInvn A' C' 1"
using \<open>B' = GradExpInvn A' C' (Suc n)\<close> by simp
hence "B' = MidR A' C'"
using \<open>A' \<noteq> C'\<close> by simp
hence "C' = SymR A' B'"
by (metis MidR_SymR MidR_comm)
have "A' \<noteq> B'"
using False MidR_AB \<open>B' = MidR A' C'\<close> by blast
have "Suc(Suc n) \<noteq> 0 \<and> Suc (Suc n) \<noteq> 1"
by simp
hence "GradExpn A' B' (Suc(Suc n)) = SymR A' (GradExpn A' B' (Suc(Suc n)-1))"
by (simp add: \<open>A' \<noteq> B'\<close>)
hence "\<dots> = SymR A' B'"
by (simp add: True)
thus ?thesis
using \<open>C' = SymR A' B'\<close>
\<open>GradExpn A' B' (Suc (Suc n)) = SymR A' (GradExpn A' B' (Suc (Suc n) - 1))\<close>
by presburger
next
case False
hence "B' = (MidR A' (GradExpInvn A' C' ((Suc n)-1)))"
using \<open>B' = GradExpInvn A' C' (Suc n)\<close>
by (simp add: \<open>A' \<noteq> C'\<close>)
hence "C' = SymR A' (GradExpn A' B' ((Suc(Suc n)-1)))"
using gradexpinv__gradexp_aux_1_a
by (metis GradExpn.simps One_nat_def \<open>A' \<noteq> C'\<close>
\<open>B' = GradExpInvn A' C' (Suc n)\<close> gradexp__gradexpinv_aux_1
nat.distinct(1) nat.inject)
hence "\<dots> = GradExpn A' B' (Suc(Suc n))"
by (metis GradExpn.simps GradExpn_1 MidR_AB MidR_SymR
One_nat_def \<open>B' = MidR A' (GradExpInvn A' C' (Suc n - 1))\<close>
nat.distinct(1) nat.inject)
thus ?thesis
using \<open>C' = SymR A' (GradExpn A' B' (Suc (Suc n) - 1))\<close> by blast
qed
}
thus ?thesis
by blast
qed
lemma gradexpinv__gradexp_aux_1:
shows "B = GradExpInvn A C n \<longrightarrow> C = GradExpn A B (Suc n)"
proof (induction n)
case 0
thus ?case by force
next
case (Suc n)
thus ?case
using gradexpinv__gradexp_aux_1_n by blast
qed
lemma gradexpinv__gradexp_aux:
assumes "GradExpInv A B C"
shows "GradExp A B C"
proof -
obtain n where "B = GradExpInvn A C n"
using GradExpInv_def assms by blast
hence "C = GradExpn A B (Suc n)"
using gradexpinv__gradexp_aux_1 by blast
thus ?thesis
using GradExp_def by blast
qed
lemma gradexp__gradexpinv:
shows "GradExp A B C \<longleftrightarrow> GradExpInv A B C"
using gradexp__gradexpinv_aux gradexpinv__gradexp_aux by blast
lemma reach__ex_gradexp_lt_aux:
shows "\<forall> A B C P Q R. ((A \<noteq> B \<and> A B Le P R \<and> R = GradExpn P Q (Suc n)) \<longrightarrow>
(\<exists> C. GradExp A C B \<and> A C Lt P Q))"
proof (induction n)
case 0
{
fix A B C P Q R
assume 1: "A \<noteq> B" and 2:"A B Le P R" and 3: "R = GradExpn P Q (Suc 0)"
obtain C where "Bet A C B \<and> Cong A C C B"
by (meson midpoint_bet midpoint_cong midpoint_existence)
have "P \<noteq> Q"
using "1" "2" "3" le_zero by force
have "R = Q"
by (simp add: "3")
have "GradExpInv A C B"
proof -
have "GradExpInv A B B"
using gradexpinv_init by blast
thus ?thesis
using \<open>Bet A C B \<and> Cong A C C B\<close> gradexpinv_stab by blast
qed
hence "GradExp A C B"
by (simp add: gradexpinv__gradexp_aux)
moreover
have "A C Lt P Q"
proof -
have "A C Lt A B"
by (simp add: "1" \<open>Bet A C B \<and> Cong A C C B\<close> mid__lt midpoint_def)
moreover
have "A B Le P Q"
using "2" \<open>R = Q\<close> by auto
ultimately
show ?thesis
using le3456_lt__lt by blast
qed
ultimately
have "\<exists> C. GradExp A C B \<and> A C Lt P Q"
by blast
}
thus ?case by blast
next
case (Suc n)
{
assume H1: "\<forall> A' B' C' P' Q' R'.
(A' \<noteq> B' \<and> A' B' Le P' R' \<and> R' = GradExpn P' Q' (Suc n)
\<longrightarrow> (\<exists> C'. GradExp A' C' B' \<and> A' C' Lt P' Q'))"
{
fix A B C P Q R
assume 1: "A \<noteq> B" and
2:"A B Le P R" and
3: "R = GradExpn P Q (Suc (Suc n))"
obtain M where "M Midpoint A B"
using midpoint_existence by blast
have "P \<noteq> R"
using "1" "2" le_diff by blast
have "M \<noteq> A"
using "1" \<open>M Midpoint A B\<close> bet_cong_eq between_trivial2
midpoint_cong by blast
have "M \<noteq> B"
using "1" \<open>M Midpoint A B\<close> cong_identity midpoint_cong by blast
have "A M Le P R"
using "1" "2" \<open>M Midpoint A B\<close> le3456_lt__lt lt__le mid__lt by blast
have "R = SymR P (GradExpn P Q ((Suc(Suc n))-1))"
using "3" \<open>P \<noteq> R\<close> by auto
hence "\<dots> = SymR P (GradExpn P Q (Suc n))"
using diff_Suc_1 by presburger
let ?R' = "GradExpn P Q (Suc n)"
have "\<exists> C. (GradExp A C M \<and> A C Lt P Q)"
proof -
have "A M Le P ?R'"
using "2" "3" GradExpn_3 GradExpn_4 \<open>M Midpoint A B\<close>
le_mid2__le12 midpoint_def by blast
moreover
have "M \<noteq> A"
using \<open>M \<noteq> A\<close> by force
moreover
have "?R' = GradExpn P Q (Suc n)"
by simp
ultimately
show ?thesis
using H1 by force
qed
then obtain C where "GradExp A C M \<and> A C Lt P Q"
by auto
hence "\<exists> C. GradExp A C B \<and> A C Lt P Q"
using \<open>M Midpoint A B\<close> gradexp_stab midpoint_bet
midpoint_cong by blast
}
hence "\<forall> A B C P Q R.
A \<noteq> B \<and> A B Le P R \<and> R = GradExpn P Q (Suc (Suc n))
\<longrightarrow> (\<exists> C. GradExp A C B \<and> A C Lt P Q)"
by blast
}
thus ?case
using Suc.IH by presburger
qed
lemma reach__grad_min_1:
assumes "A \<noteq> B" and
"Bet A B C" and
"A C Le A (Gradn A B (Suc 0))"
shows "\<exists> D E. (Bet A D C \<and> Grad A B D \<and> E \<noteq> C \<and> Bet A C E \<and> Bet A D E \<and> Cong A B D E)"
proof (cases "C = (Gradn A B (Suc 0))")
case True
let ?f = "Gradn A B (Suc (Suc 0))"
have "Grad A B B"
by (simp add: grad_equiv_coq_1)
moreover
have "?f \<noteq> C"
by (metis Gradn_uniq_aux_1 True assms(1))
moreover
have "Bet A C ?f"
using Bet_Gradn_Gradn_Suc True by blast
moreover
have "Bet A B ?f"
by (meson Diff__Bet_Gradn_Suc assms(1))
moreover
have "Cong A B B ?f"
by (metis Lem_Gradn_0 Cong_Gradn_Gradn_Suc True assms(2) between_cong)
ultimately
show ?thesis
using assms(2) by blast
next
case False
let ?e = "Gradn A B (Suc 0)"
have "Grad A B B"
by (simp add: grad_equiv_coq_1)
moreover
have "?e \<noteq> C"
using False by auto
moreover
have "Bet A C ?e"
by (meson Gradn_aux_1_0 Diff__Bet_Gradn_Suc assms(1)
assms(2) assms(3) bet_out l5_1 l6_13_1 l6_6)
moreover
have "Bet A B ?e"
using Diff__Bet_Gradn_Suc not_bet_distincts by blast
moreover
have "Cong A B B ?e"
by (metis (full_types) False Lem_Gradn_0 Cong_Gradn_Gradn_Suc
assms(2) bet_neq23__neq between_cong_2 between_exchange3
calculation(3) calculation(4) l5_1)
ultimately
show ?thesis
using assms(2) by blast
qed
lemma reach__grad_min_n:
assumes "A \<noteq> B" and
"Bet A B C" and
"A C Le A (Gradn A B (Suc n)) \<longrightarrow>
(\<exists> D E. (Bet A D C \<and> Grad A B D \<and> E \<noteq> C \<and> Bet A C E \<and> Bet A D E \<and> Cong A B D E))"
shows "A C Le A (Gradn A B (Suc(Suc n))) \<longrightarrow>
(\<exists> D E. (Bet A D C \<and> Grad A B D \<and> E \<noteq> C \<and> Bet A C E \<and> Bet A D E \<and> Cong A B D E))"
proof -
{
assume
H1: "A C Le A (Gradn A B (Suc(Suc n)))"
have "\<exists> D E. (Bet A D C \<and> Grad A B D \<and> E \<noteq> C \<and> Bet A C E \<and> Bet A D E \<and> Cong A B D E)"
proof (cases "A C Le A (Gradn A B (Suc n))")
case True
hence "A C Le A (Gradn A B (Suc n))"
by blast
then obtain D E where
"Bet A D C \<and> Grad A B D \<and> E \<noteq> C \<and> Bet A C E \<and> Bet A D E \<and> Cong A B D E"
using assms(1) assms(3) by blast
thus ?thesis
by blast
next
case False
hence "A (Gradn A B (Suc n)) Lt A C"
by (meson nlt__le)
let ?f = "Gradn A B (Suc (Suc n))"
show ?thesis
proof (cases "C = ?f")
case True
let ?e = "Gradn A B (Suc (Suc (Suc n)))"
show ?thesis
proof -
have "Bet A C C"
using not_bet_distincts by auto
moreover
have "C = Gradn A B (Suc (Suc n))"
using True by auto
hence "Grad A B C"
using Grad_def by blast
moreover
have "?e \<noteq> C"
by (metis Gradn_uniq_aux_1 True assms(1))
moreover
have "Bet A C ?e"
using Bet_Gradn_Gradn_Suc True by blast
moreover
have "Bet A B ?e"
using Diff__Bet_Gradn_Suc not_bet_distincts by blast
moreover
have "Cong A B C ?e"
using Cong_Gradn_Gradn_Suc True by blast
ultimately
show ?thesis
using assms(2) by blast
qed
next
case False
hence " A C Le A ?f"
using H1 by blast
show ?thesis
proof -
let ?d = "Gradn A B (Suc n)"
let ?e = "Gradn A B (Suc(Suc n))"
have "Bet A ?d C"
by (meson Diff__Bet_Gradn_Suc
\<open>A (Gradn A B (Suc n)) Lt A C\<close> assms(1) assms(2)
l5_1 l5_12_a lt__nle)
moreover
have "Grad A B ?d"
using Grad_def by blast
moreover
have "?e \<noteq> C"
using False by auto
moreover
have "Bet A C ?e"
by (metis H1 Bet_Gradn_Gradn_Suc assms(1) bet__lt1213
calculation(1) calculation(2) grad_neq__neq13 l5_1 lt__nle)
moreover
have "Bet A B ?e"
using Diff__Bet_Gradn_Suc not_bet_distincts by blast
moreover
have "Cong A B ?d ?e"
using Cong_Gradn_Gradn_Suc by blast
ultimately
show ?thesis
using between_exchange4 by blast
qed
qed
qed
}
thus ?thesis
by blast
qed
lemma reach__grad_min_aux:
assumes "A \<noteq> B" and
"Bet A B C"
shows "(Grad A B (Gradn A B (Suc n)) \<and> A C Le A (Gradn A B (Suc n)))
\<longrightarrow> (\<exists> D E. (Bet A D C \<and> Grad A B D \<and> E \<noteq> C \<and> Bet A C E \<and> Bet A D E \<and> Cong A B D E))"
proof (induction n)
case 0
show ?case
using assms(1) assms(2) reach__grad_min_1 by blast
next
case (Suc n)
have "Grad A B (Gradn A B (Suc n)) \<and> Grad A B (Gradn A B (Suc (Suc n)))"
using Grad_def by blast
thus ?case
using assms(1) assms(2) reach__grad_min_n Suc.IH by blast
qed
(** D is the last graduation of AB before or equal to C, and E the first graduation after C *)
lemma reach__grad_min:
assumes "A \<noteq> B" and
"Bet A B C" and
"Reach A B A C"
shows "\<exists> D E. (Bet A D C \<and> Grad A B D \<and> E \<noteq> C \<and> Bet A C E \<and> Bet A D E \<and> Cong A B D E)"
proof -
obtain D where "Grad A B D \<and> A C Le A D"
by (meson assms(3) gradexp__grad reach__ex_gradexp_le)
hence "A C Le A D"
by blast
have "A Out C D"
proof -
have "A Out C B"
using assms(1) assms(2) bet_out l6_6 by presburger
moreover
have "Grad A B D"
by (simp add: \<open>Grad A B D \<and> A C Le A D\<close>)
hence "Bet A B D"
by (simp add: grad__bet)
hence "A Out B D"
using assms(1) bet_out by force
ultimately
show ?thesis
using l6_7 by blast
qed
hence "Bet A C D"
using l6_13_1 by (simp add: \<open>A C Le A D\<close>)
have "Grad A B D"
by (simp add: \<open>Grad A B D \<and> A C Le A D\<close>)
then obtain n where "(n \<noteq> 0) \<and> (D = Gradn A B n)"
using Grad_def by blast
hence "n \<noteq> 0"
by simp
then obtain m where "Suc m = n"
by (metis not0_implies_Suc)
hence "D = Gradn A B (Suc m)"
by (simp add: \<open>n \<noteq> 0 \<and> D = Gradn A B n\<close>)
show ?thesis
by (metis \<open>Grad A B D \<and> A C Le A D\<close>
\<open>Suc m = n\<close> \<open>n \<noteq> 0 \<and> D = Gradn A B n\<close>
assms(1) assms(2) reach__grad_min_aux)
qed
lemma reach__ex_gradexp_lt:
assumes "A \<noteq> B" and
"Reach P Q A B"
shows "\<exists> C. GradExp A C B \<and> A C Lt P Q"
proof -
obtain R where "GradExp P Q R \<and> A B Le P R"
using assms(2) reach__ex_gradexp_le by blast
then obtain n where "n \<noteq> 0 \<and> R = GradExpn P Q n"
using GradExp_def by blast
hence "n \<noteq> 0"
by blast
then obtain m where "Suc m = n"
using not0_implies_Suc by blast
hence "R = GradExpn P Q (Suc m)"
by (simp add: \<open>n \<noteq> 0 \<and> R = GradExpn P Q n\<close>)
{
fix B
assume "A \<noteq> B" and "A B Le P R"
hence "\<exists> C. (GradExp A C B \<and> A C Lt P Q)"
using \<open>R = GradExpn P Q (Suc m)\<close> reach__ex_gradexp_lt_aux by force
}
thus ?thesis
by (simp add: \<open>GradExp P Q R \<and> A B Le P R\<close> assms(1))
qed
(** This development is inspired by The Foundations of Geometry and
the Non-Euclidean Plane, by George E Martin, chapter 22 *)
lemma t22_18_aux_0:
assumes "Bet A0 D1 A1" and
"Cong E0 E1 A1 D1" and
"D = Gradn A0 D1 (Suc 0)"
shows "\<exists> A E. (Grad2 A0 A1 A E0 E1 E \<and> Cong E0 E A D \<and> Bet A0 D A)"
proof -
have "D = D1"
by (simp add: assms(3))
thus ?thesis
using assms(1) assms(2) grad2_init by blast
qed
lemma t22_18_aux_n:
assumes "\<forall> A0 D1 A1 E0 E1 D.
(Bet A0 D1 A1 \<and> Cong E0 E1 A1 D1 \<and> D = Gradn A0 D1 (Suc n)) \<longrightarrow>
(\<exists> A E. (Grad2 A0 A1 A E0 E1 E \<and> Cong E0 E A D \<and> Bet A0 D A))"
shows "\<forall> A0 D1 A1 E0 E1 D.
(Bet A0 D1 A1 \<and> Cong E0 E1 A1 D1 \<and> D = Gradn A0 D1 (Suc(Suc n))) \<longrightarrow>
(\<exists> A E. (Grad2 A0 A1 A E0 E1 E \<and> Cong E0 E A D \<and> Bet A0 D A))"
proof -
have "\<forall> A0 D1 A1 E0 E1 D. (Bet A0 D1 A1 \<and> Cong E0 E1 A1 D1 \<and>
D = Gradn A0 D1 (Suc n)) \<longrightarrow>
(\<exists> A E. (Grad2 A0 A1 A E0 E1 E \<and> Cong E0 E A D \<and> Bet A0 D A))"
using assms by blast
{
fix A0 D1 A1 E0 E1 D
assume 1: "Bet A0 D1 A1" and
2: "Cong E0 E1 A1 D1" and
"D = Gradn A0 D1 (Suc(Suc n))"
let ?C = "Gradn A0 D1 (Suc n)"
obtain A E where "Grad2 A0 A1 A E0 E1 E" and "Cong E0 E A ?C" and "Bet A0 ?C A"
using 1 2 assms by blast
obtain A' where "Bet A0 A A'" and "Cong A A' A0 A1"
using segment_construction by blast
obtain E' where "Bet E0 E E'" and "Cong E E' E0 E1"
using segment_construction by blast
have "Grad2 A0 A1 A' E0 E1 E'"
using Grad2_stab \<open>Bet A0 A A'\<close> \<open>Bet E0 E E'\<close> \<open>Cong A A' A0 A1\<close>
\<open>Cong E E' E0 E1\<close> \<open>Grad2 A0 A1 A E0 E1 E\<close> cong_symmetry by blast
moreover
have "E0 E1 Le A A'"
by (meson "1" "2" \<open>Cong A A' A0 A1\<close> bet__le2313 l5_6
le_left_comm not_cong_3412)
obtain D' where "Bet A D' A'" and "Cong E0 E1 A D'"
using Le_def \<open>E0 E1 Le A A'\<close> by blast
have "Cong E0 E' A' D"
proof -
have "Cong E0 E' ?C D'"
proof -
have "Bet ?C A D'"
by (metis between_exchange3 \<open>Bet A D' A'\<close>
\<open>Bet A0 (Gradn A0 D1 (Suc n)) A\<close> \<open>Bet A0 A A'\<close>
between_inner_transitivity)
moreover
have "Cong E0 E ?C A"
using \<open>Cong E0 E A (Gradn A0 D1 (Suc n))\<close> not_cong_1243 by blast
moreover
have "Cong E E' A D'"
by (metis cong_transitivity \<open>Cong E E' E0 E1\<close> \<open>Cong E0 E1 A D'\<close>)
ultimately
show ?thesis
using \<open>Bet E0 E E'\<close> l2_11 by blast
qed
moreover
have "Cong ?C D' A' D"
proof -
have "Bet ?C D' A'"
by (metis between_exchange3 \<open>Bet A D' A'\<close>
\<open>Bet A0 (Gradn A0 D1 (Suc n)) A\<close> \<open>Bet A0 A A'\<close> between_exchange2)
moreover
have "Bet A' D ?C"
proof -
have "Grad A0 A1 A"
using \<open>Grad2 A0 A1 A E0 E1 E\<close> grad2__grad123 by blast
hence "Bet A0 A1 A"
by (simp add: grad__bet)
have "Bet A0 D1 ?C"
using Diff__Bet_Gradn_Suc not_bet_distincts by blast
show ?thesis
proof (cases "A0 = ?C")
case True
thus ?thesis
by (metis Gradn_aux_1_0 Lem_Gradn_id_n
\<open>D = Gradn A0 D1 (Suc (Suc n))\<close> not_bet_distincts)
next
case False
hence "A0 \<noteq> ?C"
by blast
show ?thesis
proof (cases "D = ?C")
case True
thus ?thesis
using not_bet_distincts by blast
next
case False
hence "D \<noteq> ?C"
by blast
show ?thesis
proof (cases "A' = ?C")
case True
thus ?thesis
by (metis "1" Lem_Gradn_id_n cong_diff_3
\<open>Bet A0 (Gradn A0 D1 (Suc n)) A\<close> \<open>Bet A0 A A'\<close>
\<open>Cong A A' A0 A1\<close> \<open>D = Gradn A0 D1 (Suc (Suc n))\<close>
between_equality_2 between_identity)
next
case False
hence "A' \<noteq> ?C"
by blast
have "?C Out D A'"
proof -
have "D \<noteq> ?C"
using \<open>D \<noteq> Gradn A0 D1 (Suc n)\<close> by auto
moreover
have "A' \<noteq> ?C"
using False by blast
moreover
have "A0 \<noteq> ?C"
using \<open>A0 \<noteq> Gradn A0 D1 (Suc n)\<close> by auto
moreover
have "Bet D ?C A0"
using Bet_Gradn_Gradn_Suc
\<open>D = Gradn A0 D1 (Suc (Suc n))\<close> between_symmetry by blast
ultimately
show ?thesis
by (meson \<open>Bet A0 (Gradn A0 D1 (Suc n)) A\<close> \<open>Bet A0 A A'\<close>
between_exchange4 between_symmetry l6_3_2)
qed
moreover
have "?C D Le ?C A'"
proof -
have "?C D Le A A'"
by (metis "1" Cong_Gradn_Gradn_Suc cong_symmetry
\<open>Cong A A' A0 A1\<close> \<open>D = Gradn A0 D1 (Suc (Suc n))\<close>
bet__le1213 l5_6)
moreover
have "A A' Le ?C A'"
by (metis between_exchange3
\<open>Bet A0 (Gradn A0 D1 (Suc n)) A\<close>
\<open>Bet A0 A A'\<close> bet__le2313)
ultimately
show ?thesis
using le_transitivity by blast
qed
ultimately
show ?thesis
using between_symmetry l6_13_1 by blast
qed
qed
qed
qed
moreover
have "Cong ?C A' A' ?C"
by (simp add: cong_pseudo_reflexivity)
moreover
have "Cong D' A' D ?C"
proof -
have "Cong A0 D1 (Gradn A0 D1 (Suc n)) (Gradn A0 D1 (Suc (Suc n)))"
using Cong_Gradn_Gradn_Suc by blast
hence "Cong A0 D1 ?C D"
using \<open>D = Gradn A0 D1 (Suc (Suc n))\<close> by blast
hence "Cong A0 D1 D ?C"
using not_cong_1243 by blast
have "Bet A1 D1 A0"
by (simp add: "1" between_symmetry)
have "Cong A1 D1 A D'"
using "2" \<open>Cong E0 E1 A D'\<close> cong_inner_transitivity
by blast
have "Cong A0 A1 A A'"
using \<open>Cong A A' A0 A1\<close> cong_symmetry by presburger
hence "Cong A0 D1 D' A'"
using \<open>Bet A D' A'\<close> \<open>Bet A1 D1 A0\<close> \<open>Cong A1 D1 A D'\<close>
l4_3_1 not_cong_2134 by blast
thus ?thesis
using \<open>Cong A0 D1 D (Gradn A0 D1 (Suc n))\<close>
cong_inner_transitivity by blast
qed
ultimately
show ?thesis
using l4_3 by blast
qed
ultimately
show ?thesis
using Cong_cases cong_inner_transitivity by blast
qed
moreover
have "Bet A0 D A'"
proof -
have "Bet A0 ?C A'"
using \<open>Bet A0 (Gradn A0 D1 (Suc n)) A\<close> \<open>Bet A0 A A'\<close>
between_exchange4 by blast
have "Bet ?C D A'"
proof -
have "Grad A0 A1 A"
using \<open>Grad2 A0 A1 A E0 E1 E\<close> grad2__grad123 by blast
hence "Bet A0 A1 A"
by (simp add: grad__bet)
have "Bet A0 D1 ?C"
using Diff__Bet_Gradn_Suc not_bet_distincts by blast
show ?thesis
proof (cases "A0 = ?C")
case True
thus ?thesis
by (metis Gradn_aux_1_0 Lem_Gradn_id_n
\<open>Bet A0 (Gradn A0 D1 (Suc n)) A'\<close>
\<open>D = Gradn A0 D1 (Suc (Suc n))\<close>)
next
case False
hence "A0 \<noteq> ?C"
by blast
show ?thesis
proof (cases "D = ?C")
case True
thus ?thesis
using between_trivial2 by auto
next
case False
hence "D \<noteq> ?C"
by blast
show ?thesis
proof (cases "A' = ?C")
case True
thus ?thesis
by (metis "1" Lem_Gradn_id_n cong_diff_3
\<open>Bet A0 (Gradn A0 D1 (Suc n)) A\<close> \<open>Bet A0 A A'\<close>
\<open>Cong A A' A0 A1\<close> \<open>D = Gradn A0 D1 (Suc (Suc n))\<close>
between_equality_2 between_identity)
next
case False
hence "A' \<noteq> ?C"
by blast
have "?C Out D A'"
proof -
have "D \<noteq> ?C"
using \<open>D \<noteq> Gradn A0 D1 (Suc n)\<close> by auto
moreover
have "A' \<noteq> ?C"
using False by blast
moreover
have "A0 \<noteq> ?C"
using \<open>A0 \<noteq> Gradn A0 D1 (Suc n)\<close> by auto
moreover
have "Bet D ?C A0"
using Bet_Gradn_Gradn_Suc
\<open>D = Gradn A0 D1 (Suc (Suc n))\<close>
between_symmetry by blast
ultimately
show ?thesis
by (meson \<open>Bet A0 (Gradn A0 D1 (Suc n)) A'\<close>
between_symmetry l6_3_2)
qed
moreover
have "?C D Le ?C A'"
proof -
have "?C D Le A A'"
by (metis "1" Cong_Gradn_Gradn_Suc cong_symmetry
\<open>Cong A A' A0 A1\<close> \<open>D = Gradn A0 D1 (Suc (Suc n))\<close>
bet__le1213 l5_6)
moreover
have "A A' Le ?C A'"
by (metis between_exchange3 \<open>Bet A0 (Gradn A0 D1 (Suc n)) A\<close>
\<open>Bet A0 A A'\<close> bet__le2313)
ultimately
show ?thesis
using le_transitivity by blast
qed
ultimately
show ?thesis
using l6_13_1 by blast
qed
qed
qed
qed
thus ?thesis
using \<open>Bet A0 (Gradn A0 D1 (Suc n)) A'\<close> between_exchange2 by blast
qed
ultimately
have "\<exists> A E. (Grad2 A0 A1 A E0 E1 E \<and> Cong E0 E A D \<and> Bet A0 D A)"
by blast
}
thus ?thesis by blast
qed
lemma t22_18_aux0:
shows "\<forall> A0 D1 A1 E0 E1 D.
(Bet A0 D1 A1 \<and> Cong E0 E1 A1 D1 \<and> D = Gradn A0 D1 (Suc n)) \<longrightarrow>
(\<exists> A E. (Grad2 A0 A1 A E0 E1 E \<and> Cong E0 E A D \<and> Bet A0 D A))"
proof (induction n)
case 0
show ?case
using t22_18_aux_0 by auto
next
case (Suc n)
show ?case
using t22_18_aux_n Suc.IH by blast
qed
(** For every m, there exists n such that A0Dm = A0An - E0En = n(A0A1 - E0E1) (m=n) *)
lemma t22_18_aux1:
assumes "Bet A0 D1 A1" and
"Cong E0 E1 A1 D1" and
"Grad A0 D1 D"
shows "\<exists> A E. (Grad2 A0 A1 A E0 E1 E \<and> Cong E0 E A D \<and> Bet A0 D A)"
proof -
obtain n where "n \<noteq> 0" and "D = Gradn A0 D1 n"
using Grad_def assms(3) by blast
hence "n \<noteq> 0"
by blast
then obtain m where "n = Suc m"
using not0_implies_Suc by presburger
hence "D = Gradn A0 D1 (Suc m)"
by (simp add: \<open>D = Gradn A0 D1 n\<close>)
thus ?thesis
using assms(1) assms(2) t22_18_aux0 by blast
qed
lemma t22_18_aux2_0:
assumes "Saccheri A0 B0 B1 A1" and
"A = Gradn A0 A1 (Suc 0)" and
"E = Gradn B0 B1 (Suc 0)" and
"Saccheri A0 B0 B A"
shows "B0 B Le B0 E"
proof -
have "Per B0 A0 A1"
using Saccheri_def assms(1) by blast
have "Per A0 A1 B1"
using Saccheri_def assms(1) by blast
have "Cong A0 B0 B1 A1"
using Saccheri_def assms(1) by blast
have "A0 A1 OS B0 B1"
using Saccheri_def assms(1) by blast
have "Per B0 A0 A"
using Saccheri_def assms(4) by blast
have "Per A0 A B"
using Saccheri_def assms(4) by blast
have "A0 A OS B0 B"
using Saccheri_def assms(4) by blast
have "Cong A0 B0 B A"
using Saccheri_def assms(4) by blast
have "A = A1"
by (simp add: assms(2))
have "E = B1"
by (simp add: assms(3))
have "Saccheri A0 B0 B A1"
using \<open>A = A1\<close> assms(4) by auto
hence "Cong A0 B0 B A1"
using Saccheri_def by blast
hence "B = B1"
proof-
have "A0 \<noteq> A1"
using \<open>A0 A1 OS B0 B1\<close> os_distincts by blast
have "A1 Out B B1"
proof -
have "Col A1 B B1"
proof -
have "Coplanar A0 B B1 A1"
proof -
have "\<not> Col B0 A0 A1"
by (meson \<open>A0 A1 OS B0 B1\<close> col123__nos not_col_permutation_2)
moreover
have "Coplanar B0 A0 A1 B"
using \<open>A = A1\<close> assms(4) coplanar_perm_7 sac__coplanar by blast
moreover
have "Coplanar B0 A0 A1 B1"
by (meson \<open>A0 A1 OS B0 B1\<close> ncoplanar_perm_8 os__coplanar)
ultimately
show ?thesis
by (meson coplanar_pseudo_trans ncop_distincts)
qed
moreover
have "A0 \<noteq> A1"
using \<open>A0 \<noteq> A1\<close> by auto
moreover
have "Per B A1 A0"
using \<open>A = A1\<close> \<open>Per A0 A B\<close> l8_2 by blast
moreover
have "Per B1 A1 A0"
using Per_cases \<open>Per A0 A1 B1\<close> by blast
ultimately
show ?thesis
using col_permutation_2 cop_per2__col by blast
qed
moreover
have "A1 A0 OS B B1"
proof -
have "A1 A0 OS B B0"
using Saccheri_def \<open>A = A1\<close> assms(4) invert_one_side
one_side_symmetry by blast
moreover
have "A1 A0 OS B0 B1"
by (simp add: \<open>A0 A1 OS B0 B1\<close> invert_one_side)
ultimately
show ?thesis
using one_side_transitivity by blast
qed
ultimately
show ?thesis
using col_one_side_out by auto
qed
moreover
have "Cong A1 B A0 B0"
using Cong_cases \<open>Cong A0 B0 B A1\<close> by blast
moreover
have "A1 Out B1 B1"
using calculation(1) out_diff2 out_trivial by auto
moreover
have "Cong A1 B1 A0 B0"
using \<open>Cong A0 B0 B1 A1\<close> not_cong_3421 by blast
ultimately
show ?thesis
using l6_11_uniqueness by blast
qed
thus ?thesis
using \<open>E = B1\<close> local.le_cases by blast
qed
lemma t22_18_aux2_Sucn:
assumes "\<forall> A A0 A1 B B0 B1 E.
Saccheri A0 B0 B1 A1 \<and> A = Gradn A0 A1 (Suc n) \<and>
E = Gradn B0 B1 (Suc n) \<and> Saccheri A0 B0 B A \<longrightarrow> B0 B Le B0 E"
shows "\<forall> A A0 A1 B B0 B1 E.
Saccheri A0 B0 B1 A1 \<and> A = Gradn A0 A1 (Suc(Suc n)) \<and>
E = Gradn B0 B1 (Suc(Suc n)) \<and> Saccheri A0 B0 B A \<longrightarrow> B0 B Le B0 E"
proof -
{
fix A A0 A1 B B0 B1 E
assume "Saccheri A0 B0 B1 A1" and
"A = Gradn A0 A1 (Suc(Suc n))" and
"E = Gradn B0 B1 (Suc(Suc n))" and
"Saccheri A0 B0 B A"
have "A0 \<noteq> B0"
using \<open>Saccheri A0 B0 B1 A1\<close> sac_distincts by blast
let ?A = "Gradn A0 A1 (Suc n)"
let ?E = "Gradn B0 B1 (Suc n)"
have "Saccheri A0 B0 B ?A \<longrightarrow> B0 B Le B0 ?E"
using \<open>Saccheri A0 B0 B1 A1\<close> assms by blast
(*A \<rightarrow> ?A*)
(*E \<rightarrow>?E*)
(*B'\<rightarrow>B*)
(*A'\<rightarrow>A*)
(*E'\<rightarrow> E*)
{
assume "A0 = ?A"
have "Bet A0 A1 ?A"
using Diff__Bet_Gradn_Suc not_bet_distincts by blast
hence "A1 = A0"
by (metis \<open>A0 = Gradn A0 A1 (Suc n)\<close> bet_neq32__neq)
hence False
using sac_distincts \<open>Saccheri A0 B0 B1 A1\<close> by blast
}
hence "A0 \<noteq> ?A"
by blast
have "\<exists> B. Saccheri A0 B0 B ?A"
proof -
have "Grad A0 A1 ?A"
using Grad_def by blast
hence "Bet A0 A1 ?A"
by (simp add: grad__bet)
have "\<exists> P. A0 A1 Perp P ?A \<and> A0 A1 OS B0 P"
proof -
have "Grad A0 A1 ?A"
using Grad_def by blast
hence "Bet A0 A1 ?A"
by (simp add: grad__bet)
hence "Col A0 A1 ?A"
using Col_def by blast
moreover
have "\<not> Col A0 A1 B0"
by (meson \<open>Saccheri A0 B0 B1 A1\<close> Col_cases col_trivial_3
l8_16_1 sac__perp1214)
ultimately
show ?thesis
using l10_15 by blast
qed
then obtain P where "A0 A1 Perp P ?A" and "A0 A1 OS B0 P"
by blast
hence "P \<noteq> ?A"
using perp_not_eq_2 by blast
hence "\<exists> B. ?A Out B P \<and> Cong ?A B A0 B0"
using l6_11_existence \<open>A0 \<noteq> B0\<close> by simp
then obtain B' where "?A Out B' P" and "Cong ?A B' A0 B0"
by blast
have "Saccheri A0 B0 B' ?A"
proof -
have "Per B0 A0 ?A"
proof -
have "Per B0 A0 A1"
by (meson perp_per_2 \<open>Saccheri A0 B0 B1 A1\<close> sac__perp1214)
moreover
have "Col A0 A1 ?A"
using Col_def \<open>Bet A0 A1 (Gradn A0 A1 (Suc n))\<close> by blast
ultimately
show ?thesis
using \<open>Saccheri A0 B0 B1 A1\<close> per_col sac_distincts by blast
qed
moreover
have "Per A0 ?A B'"
proof -
have "?A A0 Perp ?A P"
using Perp_cases \<open>A0 A1 Perp P (Gradn A0 A1 (Suc n))\<close>
\<open>A0 \<noteq> Gradn A0 A1 (Suc n)\<close> \<open>Bet A0 A1 (Gradn A0 A1 (Suc n))\<close>
bet_col perp_col1 by blast
moreover
have "Col ?A P B'"
using \<open>Gradn A0 A1 (Suc n) Out B' P\<close> col_permutation_5 out_col by blast
ultimately
show ?thesis
by (meson Perp_cases col_trivial_3 l8_16_1)
qed
moreover
have "Col A0 A1 ?A"
using Col_def \<open>Bet A0 A1 (Gradn A0 A1 (Suc n))\<close> by blast
hence "A0 ?A OS B0 P"
using \<open>A0 A1 OS B0 P\<close> \<open>A0 \<noteq> Gradn A0 A1 (Suc n)\<close>
col_one_side by blast
hence "?A A0 OS B0 P"
using invert_one_side by blast
hence "?A A0 OS B0 B'"
using \<open>Gradn A0 A1 (Suc n) Out B' P\<close> l6_6
out_out_one_side by blast
hence "A0 ?A OS B0 B'"
using invert_one_side by blast
ultimately
show ?thesis
using Saccheri_def \<open>Cong (Gradn A0 A1 (Suc n)) B' A0 B0\<close>
not_cong_4312 by blast
qed
thus ?thesis
by blast
qed
then obtain B' where "Saccheri A0 B0 B' ?A"
by blast
hence "B0 B' Le B0 ?E"
using \<open>Saccheri A0 B0 B1 A1\<close> assms by blast
have "Per B0 A0 ?A"
using \<open>Saccheri A0 B0 B' ?A\<close> Saccheri_def by blast
have "Per A0 ?A B'"
using \<open>Saccheri A0 B0 B' ?A\<close> Saccheri_def by blast
have "Cong A0 B0 B' ?A"
using \<open>Saccheri A0 B0 B' ?A\<close> Saccheri_def by blast
have "A0 ?A OS B0 B'"
using \<open>Saccheri A0 B0 B' ?A\<close> Saccheri_def by blast
obtain C where "Bet B0 B' C" and "Cong B' C ?E E"
using segment_construction by blast
have "Cong B0 B1 B' B"
proof -
have "Saccheri A0 B0 B1 A1"
by (simp add: \<open>Saccheri A0 B0 B1 A1\<close>)
moreover
have "Saccheri ?A B' B A"
proof -
have "Saccheri A0 B0 B' ?A"
using \<open>Saccheri A0 B0 B' (Gradn A0 A1 (Suc n))\<close> by auto
moreover
have "?A \<noteq> A"
by (metis Gradn_uniq_aux_1 \<open>A = Gradn A0 A1 (Suc (Suc n))\<close>
\<open>A0 \<noteq> Gradn A0 A1 (Suc n)\<close> grad_rec_a_a)
moreover
have "Coplanar A0 B0 ?A A"
by (metis Bet_Gradn_Gradn_Suc ncop__ncols
\<open>A = Gradn A0 A1 (Suc (Suc n))\<close> bet_col)
ultimately
show ?thesis
by (meson cop_sac2__sac \<open>Saccheri A0 B0 B A\<close>)
qed
moreover
have "Cong A0 B0 ?A B'"
using \<open>Cong A0 B0 B' (Gradn A0 A1 (Suc n))\<close>
cong_right_commutativity by blast
moreover
have "Cong A0 A1 ?A A"
using Cong_Gradn_Gradn_Suc \<open>A = Gradn A0 A1 (Suc (Suc n))\<close> by blast
ultimately
show ?thesis
using cong2_sac2__cong by blast
qed
have "Cong B B' B' C"
proof -
have "Cong B B' ?E E"
using Cong_Gradn_Gradn_Suc
\<open>Cong B0 B1 B' B\<close> \<open>E = Gradn B0 B1 (Suc (Suc n))\<close>
cong_transitivity not_cong_3421 by blast
moreover
have "Cong ?E E B' C"
using \<open>Cong B' C (Gradn B0 B1 (Suc n)) E\<close>
cong_inner_transitivity cong_reflexivity by blast
ultimately
show ?thesis
by (meson cong_transitivity)
qed
hence "B0 B Le B0 C"
by (meson \<open>Bet B0 B' C\<close> Cong_cases cong_reflexivity
triangle_inequality_2)
moreover
have "B0 C Le B0 E"
proof -
have "Bet B0 B' C"
by (simp add: \<open>Bet B0 B' C\<close>)
moreover
have "Bet B0 ?E E"
using Bet_Gradn_Gradn_Suc \<open>E = Gradn B0 B1 (Suc (Suc n))\<close> by blast
moreover
have "B0 B' Le B0 ?E"
using \<open>B0 B' Le B0 (Gradn B0 B1 (Suc n))\<close> by auto
moreover
have "B' C Le ?E E"
using \<open>Cong B' C (Gradn B0 B1 (Suc n)) E\<close> cong__le by blast
ultimately
show ?thesis
using \<open>Cong B' C (Gradn B0 B1 (Suc n)) E\<close>
bet2_le2__le1346 cong__le by blast
qed
ultimately
have "B0 B Le B0 E"
using le_transitivity by blast
}
thus ?thesis
by blast
qed
lemma t22_18_aux2_n:
shows "\<forall> A A0 A1 B B0 B1 E.
Saccheri A0 B0 B1 A1 \<and> A = Gradn A0 A1 (Suc n) \<and>
E = Gradn B0 B1 (Suc n) \<and> Saccheri A0 B0 B A
\<longrightarrow> B0 B Le B0 E"
proof (induction n)
case 0
show ?case
using t22_18_aux2_0 by blast
next
case (Suc n)
show ?case
using t22_18_aux2_Sucn Suc.IH by blast
qed
(** For every n, B0Bn is lower than or equal to n times B0B1 *)
lemma t22_18_aux2:
assumes "Saccheri A0 B0 B1 A1" and
"Grad2 A0 A1 A B0 B1 E" and
"Saccheri A0 B0 B A"
shows "B0 B Le B0 E"
proof -
obtain n where "n \<noteq> 0" and "A = Gradn A0 A1 n" and
"E = Gradn B0 B1 n"
using Grad2_def assms(2) by blast
obtain m where "n = Suc m"
using \<open>n \<noteq> 0\<close> not0_implies_Suc by blast
hence "A = Gradn A0 A1 (Suc m) \<and> E = Gradn B0 B1 (Suc m)"
using \<open>A = Gradn A0 A1 n\<close> \<open>E = Gradn B0 B1 n\<close> by blast
show ?thesis
using t22_18_aux2_n
\<open>A = Gradn A0 A1 (Suc m) \<and> E = Gradn B0 B1 (Suc m)\<close>
assms(1) assms(3) by blast
qed
lemma t22_18:
assumes "archimedes_axiom"
shows "\<forall> A0 B0 B1 A1. Saccheri A0 B0 B1 A1 \<longrightarrow> \<not> (B0 B1 Lt A1 A0)"
proof -
{
fix A0 B0 B1 A1
assume "Saccheri A0 B0 B1 A1"
assume "B0 B1 Lt A1 A0"
hence "B0 B1 Le A1 A0"
using cong__nlt lt__le by blast
then obtain D1 where "Bet A1 D1 A0" and "Cong B0 B1 A1 D1"
using Le_def by blast
obtain C0 where "Bet A0 D1 C0" and "Cong D1 C0 A0 B0"
using segment_construction by fastforce
obtain C where "Bet A0 C0 C" and "Cong C0 C A0 B0"
using segment_construction by fastforce
{
fix D
assume "Grad A0 D1 D"
have "A0 D Lt A0 C"
proof (cases "A0 = D1")
case True
thus ?thesis
using \<open>B0 B1 Lt A1 A0\<close> \<open>Cong B0 B1 A1 D1\<close> cong__nlt by blast
next
case False
obtain A E where "Grad2 A0 A1 A B0 B1 E \<and> Cong B0 E A D \<and> Bet A0 D A"
using t22_18_aux1 Bet_cases \<open>Bet A1 D1 A0\<close>
\<open>Cong B0 B1 A1 D1\<close> \<open>Grad A0 D1 D\<close> by blast
have "Grad2 A0 A1 A B0 B1 E"
using \<open>Grad2 A0 A1 A B0 B1 E \<and> Cong B0 E A D \<and> Bet A0 D A\<close> by blast
hence "Grad A0 A1 A"
using grad2__grad123 by blast
hence "Bet A0 A1 A"
by (simp add: grad__bet)
have "Cong B0 E A D"
using \<open>Grad2 A0 A1 A B0 B1 E \<and> Cong B0 E A D \<and> Bet A0 D A\<close> by blast
have "Bet A0 D A"
using \<open>Grad2 A0 A1 A B0 B1 E \<and> Cong B0 E A D \<and> Bet A0 D A\<close> by blast
have "\<exists> P. A0 A1 Perp A P \<and> A0 A1 OS B0 P"
proof -
have "Col A0 A1 A"
by (simp add: Col_def \<open>Bet A0 A1 A\<close>)
moreover
have "A0 A1 ParStrict B0 B1"
by (simp add: \<open>Saccheri A0 B0 B1 A1\<close> sac__pars1423)
hence "\<not> Col A0 A1 B0"
using par_strict_not_col_1 by blast
ultimately
show ?thesis
using Perp_cases l10_15 by blast
qed
then obtain P where "A0 A1 Perp A P" and "A0 A1 OS B0 P"
by blast
have "P \<noteq> A"
using \<open>A0 A1 Perp A P\<close> perp_not_eq_2 by blast
have "A0 \<noteq> B0"
using \<open>A0 A1 OS B0 P\<close> os_distincts by blast
then obtain B where "A Out B P" and "Cong A B A0 B0"
using l6_11_existence \<open>P \<noteq> A\<close> by blast
have "Saccheri A0 B0 B A"
proof -
have "Per B0 A0 A"
proof -
have "A0 \<noteq> A1"
using \<open>A0 A1 OS B0 P\<close> os_distincts by blast
moreover
have "Per B0 A0 A1"
by (meson perp_per_2 \<open>Saccheri A0 B0 B1 A1\<close> sac__perp1214)
moreover
have "Col A0 A1 A"
by (simp add: Col_def \<open>Bet A0 A1 A\<close>)
ultimately
show ?thesis
using per_col by blast
qed
moreover
have "Per A0 A B"
proof -
have "A \<noteq> B"
using \<open>A Out B P\<close> out_distinct by blast
moreover
have "A A0 Perp A P"
proof -
have "A0 \<noteq> A"
using False \<open>Bet A0 D A\<close> \<open>Grad A0 D1 D\<close>
bet_neq32__neq grad_neq__neq13 by blast
moreover
have "A0 A1 Perp P A"
using Perp_cases \<open>A0 A1 Perp A P\<close> by blast
moreover
have "Col A0 A1 A"
by (simp add: Col_def \<open>Bet A0 A1 A\<close>)
ultimately
show ?thesis
using \<open>A0 A1 Perp A P\<close> col_trivial_3 perp_col2
by presburger
qed
moreover
have "Col A P B"
by (simp add: \<open>A Out B P\<close> l6_6 out_col)
ultimately
show ?thesis
by (meson perp_per_2 perp_col1)
qed
moreover
have "Cong A0 B0 B A"
using \<open>Cong A B A0 B0\<close> not_cong_4312 by blast
moreover
have "A A0 OS B0 B"
proof -
have "A A0 OS B0 P"
using \<open>A0 A1 OS B0 P\<close> \<open>Bet A0 A1 A\<close> bet_col
bet_neq32__neq col_one_side invert_one_side by blast
moreover
have "A Out P B"
using Out_cases \<open>A Out B P\<close> by blast
ultimately
show ?thesis
using out_out_one_side by blast
qed
hence "A0 A OS B0 B"
using invert_one_side by blast
ultimately
show ?thesis
using Saccheri_def by blast
qed
have "B0 B Le B0 E"
using \<open>Saccheri A0 B0 B1 A1\<close> \<open>Grad2 A0 A1 A B0 B1 E\<close>
\<open>Saccheri A0 B0 B A\<close> t22_18_aux2 by blast
have "B0 E Le A A0"
using Le_def \<open>Bet A0 D A\<close> \<open>Cong B0 E A D\<close>
between_symmetry by blast
hence "B0 B Le A A0"
using le_transitivity \<open>B0 B Le B0 E\<close> by blast
then obtain Q where "Bet A Q A0" and "Cong B0 B A Q"
using Le_def by blast
have "A0 D Le A0 Q"
proof -
have "Bet A0 Q A"
using Bet_cases \<open>Bet A Q A0\<close> by blast
moreover
have "Q A Le D A"
proof -
have "Cong B0 B Q A"
using Cong_cases \<open>Cong B0 B A Q\<close> by blast
moreover
have "Cong B0 E D A"
using Cong_cases \<open>Cong B0 E A D\<close> by blast
ultimately
show ?thesis
using \<open>B0 B Le B0 E\<close> l5_6 by blast
qed
moreover
have "A0 A Le A0 A"
using between_trivial2 l5_12_a by auto
ultimately
show ?thesis
using \<open>Bet A0 D A\<close> bet2_le2__le1245 by blast
qed
moreover
have "A \<noteq> A0"
using False \<open>Bet A0 D A\<close> \<open>Grad A0 D1 D\<close> between_identity
grad_neq__neq13 by blast
then obtain B0' where "A0 Out B0' A \<and> Cong A0 B0' A0 B0"
using \<open>A0 \<noteq> B0\<close> l6_11_existence by presburger
have "A0 Out B0' A"
using \<open>A0 Out B0' A \<and> Cong A0 B0' A0 B0\<close> by blast
have "Cong A0 B0' A0 B0"
using \<open>A0 Out B0' A \<and> Cong A0 B0' A0 B0\<close> by blast
obtain B' where "Bet A0 B0' B'" and "Cong B0' B' B0 B"
using segment_construction by blast
obtain A' where "Bet B0' B' A'" and "Cong B' A' B A"
using segment_construction by blast
have "A0 A Le A0 A'"
proof -
obtain B'' where "Bet A0 B0' B''" and "Cong B0' B'' B0 A"
using segment_construction by blast
have "A0 A Le A0 B''"
by (meson \<open>Bet A0 B0' B''\<close> \<open>Cong A0 B0' A0 B0\<close>
\<open>Cong B0' B'' B0 A\<close> cong__le3412 l2_11_b lt__le not_cong_3412
triangle_strict_inequality_2)
moreover
have "A0 B'' Le A0 A'"
proof -
have "B0' \<noteq> B'"
using sac_distincts
using \<open>Cong B0' B' B0 B\<close> \<open>Saccheri A0 B0 B A\<close>
cong_reverse_identity by blast
hence "Bet A0 B0' A'"
using \<open>Bet B0' B' A'\<close> \<open>Bet A0 B0' B'\<close> outer_transitivity_between
by blast
moreover
have "A0 B0' Le A0 B0'"
using local.le_cases by auto
moreover
have "B0' B'' Le B0' A'"
proof -
have "B0 A Le B0' A'"
proof -
have "Cong B0 B B0' B'"
using \<open>Cong B0' B' B0 B\<close> not_cong_3412 by blast
moreover
have "Cong B A B' A'"
using \<open>Cong B' A' B A\<close> cong_symmetry by presburger
ultimately
show ?thesis
using \<open>Bet B0' B' A'\<close> triangle_inequality_2 by blast
qed
moreover
have "Cong B0 A B0' B''"
using \<open>Cong B0' B'' B0 A\<close> cong_symmetry by blast
moreover
have "Cong B0' A' B0' A'"
by (simp add: cong_reflexivity)
ultimately
show ?thesis
using l5_6 by blast
qed
ultimately
show ?thesis
using \<open>Bet A0 B0' B''\<close> bet2_le2__le1346 by blast
qed
ultimately
show ?thesis
using le_transitivity by blast
qed
have "B0 B Le A' B0'"
proof -
have "B0' B' Le B0' A'"
by (simp add: \<open>Bet B0' B' A'\<close> l5_12_a)
moreover
have "Cong B0' A' A' B0'"
by (simp add: cong_pseudo_reflexivity)
ultimately
show ?thesis
using \<open>Cong B0' B' B0 B\<close> l5_6 by blast
qed
obtain Q' where "Bet A' Q' B0'" and "Cong B0 B A' Q'"
using Le_def \<open>B0 B Le A' B0'\<close> by blast
have "B0' \<noteq> B'"
using sac_distincts \<open>Cong B0' B' B0 B\<close> \<open>Saccheri A0 B0 B A\<close>
cong_reverse_identity by blast
hence "Bet A0 B0' A'"
using \<open>Bet A0 B0' B'\<close> \<open>Bet B0' B' A'\<close>
outer_transitivity_between by blast
have "A0 Q Lt A0 C"
proof -
have "A0 Q Le A0 Q'"
proof -
have "Bet A0 Q' A'"
using \<open>Bet A' Q' B0'\<close> \<open>Bet A0 B0' A'\<close> bet3__bet
between_trivial by blast
moreover
have "Bet A0 Q A"
using Bet_cases \<open>Bet A Q A0\<close> by auto
moreover
have "Q' A' Le Q A"
by (meson le_reflexivity \<open>Cong B0 B A Q\<close>
\<open>Cong B0 B A' Q'\<close> l5_6 le_comm)
ultimately
show ?thesis
using \<open>A0 A Le A0 A'\<close> bet2_le2__le1245 by blast
qed
moreover
have "A0 Q' Lt A0 C"
proof -
have "Bet A0 D1 C"
using \<open>Bet A0 C0 C\<close> \<open>Bet A0 D1 C0\<close> between_exchange4 by blast
hence "D1 C Lt A0 C"
by (simp add: False bet__lt2313)
moreover
have "Cong D1 C A0 Q'"
proof -
have "Bet D1 C0 C"
using \<open>Bet A0 C0 C\<close> \<open>Bet A0 D1 C0\<close> between_exchange3 by blast
moreover
have "Bet A0 B0' Q'"
using \<open>Bet A' Q' B0'\<close> \<open>Bet A0 B0' A'\<close> between_exchange3
between_symmetry by blast
moreover
have "Cong D1 C0 A0 B0'"
by (metis Cong_cases \<open>Cong A0 B0' A0 B0\<close>
\<open>Cong D1 C0 A0 B0\<close> cong_inner_transitivity)
moreover
have "Cong B0' Q' A B"
proof -
have "Cong B0' B' A' Q'"
using \<open>Cong B0 B A' Q'\<close> \<open>Cong B0' B' B0 B\<close>
cong_transitivity by blast
have "Cong B0' Q' A' B'"
proof (cases "Bet B0' Q' B'")
case True
hence "Bet A' B' Q'"
using Bet_perm \<open>Bet B0' B' A'\<close> between_exchange3 by blast
moreover
have "Cong Q' B' B' Q'"
by (simp add: cong_pseudo_reflexivity)
ultimately
show ?thesis
using \<open>Cong B0' B' A' Q'\<close> True l4_3 by blast
next
case False
hence "Q' \<noteq> B0'"
using between_trivial2 by blast
hence "A' \<noteq> B0'"
using \<open>Bet A' Q' B0'\<close> between_identity by blast
have "B0' Out B' Q'"
proof -
have "B0' Out B' A'"
using \<open>B0' \<noteq> B'\<close> \<open>Bet B0' B' A'\<close> bet_out by force
moreover
have "B0' Out A' Q'"
by (simp add: \<open>Bet A' Q' B0'\<close> \<open>Q' \<noteq> B0'\<close> bet_out_1 l6_6)
ultimately
show ?thesis
using l6_7 by blast
qed
hence "Bet B0' B' Q'"
using False Out_def by blast
moreover
have "Bet A' Q' B'"
using \<open>Bet A' Q' B0'\<close> between_exchange3
between_symmetry calculation by blast
moreover
have "Cong B' Q' Q' B'"
by (simp add: cong_pseudo_reflexivity)
ultimately
show ?thesis
using \<open>Cong B0' B' A' Q'\<close> l2_11_b by blast
qed
moreover
have "Cong A' B' A B"
using \<open>Cong B' A' B A\<close> not_cong_2143 by blast
ultimately
show ?thesis
using cong_transitivity by blast
qed
hence "Cong B0' Q' A0 B0"
using \<open>Cong A B A0 B0\<close> cong_transitivity by blast
hence "Cong C0 C B0' Q'"
using cong_inner_transitivity \<open>Cong C0 C A0 B0\<close> cong_symmetry by blast
ultimately
show ?thesis
using l2_11_b by blast
qed
moreover
have "Cong A0 C A0 C"
by (simp add: cong_reflexivity)
ultimately
show ?thesis
using cong2_lt__lt by blast
qed
ultimately
show ?thesis
by (meson le1234_lt__lt)
qed
ultimately
show ?thesis
using le1234_lt__lt by blast
qed
}
hence "\<forall> D. Grad A0 D1 D \<longrightarrow> A0 D Lt A0 C"
by blast
have "\<not> Cong B0 B1 A1 A0"
using \<open>B0 B1 Lt A1 A0\<close> cong__nlt lt__le by blast
have"A0 \<noteq> D1 \<longrightarrow> Reach A0 D1 A0 C"
using archimedes_axiom_def assms by blast
then obtain D where "Grad A0 D1 D \<and> A0 C Le A0 D"
using Reach_def \<open>Cong B0 B1 A1 D1\<close> \<open>\<not> Cong B0 B1 A1 A0\<close> by blast
hence "A0 D Lt A0 C"
by (simp add: \<open>\<forall>D. Grad A0 D1 D \<longrightarrow> A0 D Lt A0 C\<close>)
moreover
have "A0 C Lt A0 C"
using \<open>Grad A0 D1 D \<and> A0 C Le A0 D\<close> calculation lt__nle by blast
ultimately
have False
using nlt by auto
}
thus ?thesis
by blast
qed
lemma t22_19:
assumes "archimedes_axiom"
shows "\<forall> A B C D. Saccheri A B C D \<longrightarrow> \<not> Obtuse A B C"
proof -
{
fix A B C D
assume "Saccheri A B C D"
hence "\<not> C B Lt A D"
using assms lt_comm t22_18 by blast
moreover
assume "Obtuse A B C"
hence "C B Lt A D"
using \<open>Saccheri A B C D\<close> lt_left_comm lt_sac__obtuse_aux2 by blast
ultimately
have False
by blast
}
thus ?thesis
by blast
qed
lemma archi__obtuse_case_elimination:
assumes "archimedes_axiom"
shows "\<not> hypothesis_of_obtuse_saccheri_quadrilaterals"
proof -
have "\<not> (\<forall> A B C D. Saccheri A B C D \<longrightarrow> Obtuse A B C)"
using assms ex_saccheri t22_19 by blast
thus ?thesis
using hypothesis_of_obtuse_saccheri_quadrilaterals_def by blast
qed
lemma t22_23_aux:
assumes "\<not> Col A M N" and
"Per B C A" and
"A \<noteq> C" and
"M Midpoint A B" and
"Per M N A" and
"Col A C N" and
"M Midpoint N L"
shows "Bet A N C \<and> Lambert N L B C \<and> Cong B L A N"
proof -
have "A \<noteq> M"
using assms(1) col_trivial_1 by blast
have "N \<noteq> M"
using assms(1) col_trivial_2 by blast
have "A \<noteq> N"
using assms(1) col_trivial_3 by blast
have "L \<noteq> N"
using \<open>N \<noteq> M\<close> assms(7) l7_3 by blast
have "A \<noteq> B"
using \<open>A \<noteq> M\<close> assms(4) l7_3 by blast
hence "B \<noteq> M"
using assms(4) is_midpoint_id_2 by blast
have "Bet A N C"
proof -
have "Bet A M B"
by (simp add: assms(4) midpoint_bet)
moreover
have "Col A N C"
using assms(6) not_col_permutation_5 by blast
moreover
have "Per A N M"
by (simp add: assms(5) l8_2)
moreover
have "Per A C B"
by (simp add: assms(2) l8_2)
ultimately
show ?thesis
by (metis per23_preserves_bet \<open>A \<noteq> N\<close> assms(3))
qed
moreover
have "A M N CongA B M L"
using \<open>A \<noteq> M\<close> \<open>N \<noteq> M\<close> assms(4) assms(7) l7_3_2
symmetry_preserves_conga by blast
hence "Cong A N B L \<and> M A N CongA M B L \<and> M N A CongA M L B"
using per23_preserves_bet Cong_cases l11_49 \<open>A \<noteq> N\<close> assms(4)
assms(7) midpoint_cong by meson
have "Lambert N L B C"
proof -
{
assume "C = N"
have "M = B"
proof -
have "\<not> Col A M N"
by (simp add: assms(1))
moreover
have "N \<noteq> M"
by (simp add: \<open>N \<noteq> M\<close>)
moreover
have "Col A M M"
using not_col_distincts by blast
moreover
have "Bet A M B"
by (simp add: assms(4) midpoint_bet)
hence "Col A M B"
by (simp add: Col_def)
moreover
have "Col N M M"
by (simp add: col_trivial_2)
moreover
have "Coplanar A B M N"
by (meson \<open>A \<noteq> M\<close> calculation(3) calculation(4)
col__coplanar col_transitivity_1)
hence "Col N M B"
using \<open>C = N\<close> \<open>A \<noteq> N\<close> assms(2) assms(5) col_permutation_3
cop_per2__col by blast
moreover
show ?thesis
using calculation(1) calculation(4) calculation(6) l6_16_1 by blast
qed
hence False
using \<open>B \<noteq> M\<close> by auto
}
hence "C \<noteq> N"
by blast
moreover
have "L \<noteq> B"
using \<open>A \<noteq> N\<close> \<open>Cong A N B L \<and> M A N CongA M B L \<and> M N A CongA M L B\<close>
cong_diff by blast
moreover
have "B \<noteq> C"
using \<open>A \<noteq> B\<close> assms(1) assms(4) assms(6) col_permutation_1
l6_16_1 midpoint_col by blast
moreover
have "N \<noteq> L"
using \<open>L \<noteq> N\<close> by auto
moreover
have "Per L N C"
by (metis per_col \<open>A \<noteq> N\<close> \<open>N \<noteq> M\<close> assms(5) assms(6) assms(7)
col_per2__per l8_20_1_R1 midpoint_col not_col_permutation_1)
moreover
have "Per N C B"
by (metis per_col assms(2) assms(3) assms(6) col_permutation_4 l8_2)
moreover
have "Per N L B"
proof -
have "Per M N A"
by (simp add: assms(5))
moreover
have "M N A CongA N L B"
proof -
have "M N A CongA M L B"
by (simp add: \<open>Cong A N B L \<and> M A N CongA M B L \<and> M N A CongA M L B\<close>)
moreover
have "N Out M M"
using \<open>N \<noteq> M\<close> out_trivial by auto
moreover
have "N Out A A"
using \<open>A \<noteq> N\<close> \<open>Bet A N C\<close> \<open>C \<noteq> N\<close> l6_3_2 by blast
moreover
have "M \<noteq> L"
using \<open>N \<noteq> L\<close> assms(7) is_midpoint_id_2 by blast
have "Bet L M N"
using assms(7) Bet_perm Midpoint_def by blast
hence "L Out N M"
by (simp add: Out_def \<open>M \<noteq> L\<close> \<open>N \<noteq> L\<close>)
moreover
have "L Out B B"
using \<open>L \<noteq> B\<close> out_trivial by auto
ultimately
show ?thesis
using l11_10 by blast
qed
ultimately
show ?thesis
using l11_17 by blast
qed
moreover
have "Coplanar N L B C"
proof -
have "Coplanar B C N M"
proof -
have "Coplanar C N A B"
by (simp add: \<open>Bet A N C\<close> bet__coplanar between_symmetry)
moreover
have "Bet A M B"
by (simp add: assms(4) midpoint_bet)
hence "Col B A M"
using Col_def by auto
ultimately
show ?thesis
using \<open>A \<noteq> B\<close> col_cop__cop coplanar_perm_14
coplanar_perm_7 by blast
qed
moreover
have "Bet N M L"
using assms(7) midpoint_bet by blast
hence "Col N M L"
by (simp add: Col_def)
ultimately
show ?thesis
using \<open>N \<noteq> M\<close> col_cop__cop ncoplanar_perm_16 by blast
qed
ultimately
show ?thesis
using Lambert_def by blast
qed
moreover
have "Cong A N B L"
using \<open>Cong A N B L \<and> M A N CongA M B L \<and> M N A CongA M L B\<close> by blast
hence "Cong B L A N"
by (simp add: cong_symmetry)
ultimately
show ?thesis
by simp
qed
lemma t22_23:
assumes "\<not> hypothesis_of_obtuse_saccheri_quadrilaterals"
shows "\<forall> A B C M N L.
\<not> Col A M N \<and> Per B C A \<and> A \<noteq> C \<and> M Midpoint A B \<and>
Per M N A \<and> Col A C N \<and> M Midpoint N L \<longrightarrow>
(Bet A N C \<and> N C Le A N \<and> L N Le B C)"
proof -
{
fix A B C M N L
assume "\<not> Col A M N" and
"Per B C A" and
"A \<noteq> C" and
"M Midpoint A B" and
"Per M N A" and
"Col A C N" and
"M Midpoint N L"
hence "Bet A N C \<and> Lambert N L B C \<and> Cong B L A N"
using t22_23_aux by blast
have "Bet A N C"
using \<open>Bet A N C \<and> Lambert N L B C \<and> Cong B L A N\<close> by blast
moreover
have "Lambert N L B C"
using \<open>Bet A N C \<and> Lambert N L B C \<and> Cong B L A N\<close> by blast
have "Cong B L A N"
using \<open>Bet A N C \<and> Lambert N L B C \<and> Cong B L A N\<close> by blast
have "\<not> Obtuse L B C"
using \<open>Bet A N C \<and> Lambert N L B C \<and> Cong B L A N\<close> assms
lam_obtuse__oah_1 by blast
have "N L OS B C"
using \<open>Lambert N L B C\<close> by (simp add: lam__os)
have "Lambert N C B L"
by (simp add: lam_perm \<open>Lambert N L B C\<close>)
hence "N C OS B L"
by (simp add: lam__os)
have "N C Le A N \<and> L N Le B C"
proof (cases "Acute L B C")
case True
have "N C Lt A N"
proof -
have "N C Lt B L"
by (simp add: acute_lam__lt True
\<open>Bet A N C \<and> Lambert N L B C \<and> Cong B L A N\<close> lt_right_comm)
moreover
have "Cong N C N C"
by (simp add: cong_reflexivity)
moreover
have "Cong B L A N"
by (simp add: \<open>Cong B L A N\<close>)
ultimately
show ?thesis
using cong2_lt__lt by blast
qed
moreover
have "N L Lt B C"
proof -
have "Per L N C"
by (metis Col_def \<open>Bet A N C\<close> \<open>M Midpoint N L\<close>
\<open>Per M N A\<close> \<open>\<not> Col A M N\<close> between_trivial col_per2__per
l8_2 l8_5 midpoint_col)
moreover
have "Per N C B"
by (metis \<open>A \<noteq> C\<close> \<open>Col A C N\<close> \<open>Per B C A\<close>
col_permutation_4 l8_2 per_col)
moreover
have "N C OS L B"
by (simp add: \<open>N C OS B L\<close> one_side_symmetry)
moreover
have "L B C LtA N L B"
proof -
have "Acute L B C"
by (simp add: True)
moreover
have "Per N L B"
using \<open>Lambert N L B C\<close> Lambert_def by blast
ultimately
show ?thesis
by (metis \<open>Bet A N C \<and> Lambert N L B C \<and> Cong B L A N\<close>
acute_per__lta lam__os os_distincts)
qed
ultimately
show ?thesis
by (simp add: lta_os_per2__lt)
qed
hence "L N Lt B C"
by (meson lt_left_comm)
ultimately
show ?thesis
by (simp add: lt__le)
next
case False
hence "Per L B C"
by (metis \<open>N C OS B L\<close> \<open>N L OS B C\<close> \<open>\<not> Obtuse L B C\<close>
angle_partition os_distincts)
have "Cong N C B L"
using \<open>Lambert N L B C\<close> \<open>Per L B C\<close> cong_right_commutativity
lam_per__cong by blast
hence "Cong N C A N"
using \<open>Cong B L A N\<close> cong_transitivity by blast
moreover
have "Cong L N B C"
using \<open>Lambert N C B L\<close> \<open>Per L B C\<close> cong_commutativity
l8_2 lam_per__cong by blast
ultimately
show ?thesis
by (simp add: cong__le)
qed
ultimately
have "Bet A N C \<and> N C Le A N \<and> L N Le B C"
by blast
}
thus ?thesis
by blast
qed
lemma t22_24_aux_0_a:
assumes (*"\<not> hypothesis_of_obtuse_saccheri_quadrilaterals" and*)
"\<not> Col A B0 C0" and
"A C0 Perp B0 C0" and
"B = GradExpn A B0 (Suc 0)" and
"E = GradExpn B0 C0 (Suc 0)" and
"A C0 Perp B C" and
"Col A C0 C"
shows "B0 E Le B C"
proof -
have "A \<noteq> B0"
using assms(1) col_trivial_1 by auto
hence "B = B0"
using assms(3) by simp
have "B0 \<noteq> C0"
using assms(1) col_trivial_2 by fastforce
hence "C0 = E"
using assms(4) by simp
thus ?thesis
using \<open>B = B0\<close> assms(2) assms(6) assms(5) col_trivial_2
l8_18_uniqueness le_reflexivity by blast
qed
lemma t22_24_aux_0:
(* assumes "\<not> hypothesis_of_obtuse_saccheri_quadrilaterals"*)
shows "\<forall> A B0 C0 B C E.
\<not> Col A B0 C0 \<and> A C0 Perp B0 C0 \<and>
(B = GradExpn A B0 (Suc 0)) \<and> (E = GradExpn B0 C0 (Suc 0)) \<and>
A C0 Perp B C \<and> Col A C0 C \<longrightarrow>
B0 E Le B C"
using t22_24_aux_0_a by blast
lemma t22_24_aux_suc:
assumes "\<not> hypothesis_of_obtuse_saccheri_quadrilaterals" and
"\<forall> A B0 C0 B C E.
\<not> Col A B0 C0 \<and> A C0 Perp B0 C0 \<and>
(B = GradExpn A B0 (Suc n)) \<and> (E = GradExpn B0 C0 (Suc n)) \<and>
A C0 Perp B C \<and> Col A C0 C \<longrightarrow>
B0 E Le B C"
shows
"\<forall> A B0 C0 B C E.
\<not> Col A B0 C0 \<and> A C0 Perp B0 C0 \<and>
(B = GradExpn A B0 (Suc(Suc n))) \<and> (E = GradExpn B0 C0 (Suc(Suc n))) \<and>
A C0 Perp B C \<and> Col A C0 C \<longrightarrow>
B0 E Le B C"
proof -
{
fix A B0 C0 B' C' E'
assume "\<not> Col A B0 C0" and
"A C0 Perp B0 C0" and
"B' = GradExpn A B0 (Suc(Suc n))" and
"E' = GradExpn B0 C0 (Suc(Suc n))" and
"A C0 Perp B' C'" and
"Col A C0 C'"
let ?B = "GradExpn A B0 (Suc n)"
have "Cong A ?B ?B B'"
using GradExpn_4 \<open>B' = GradExpn A B0 (Suc (Suc n))\<close> by presburger
have "Bet A ?B B'"
using GradExpn_3 \<open>B' = GradExpn A B0 (Suc (Suc n))\<close> by blast
have "Bet A B0 ?B"
using GradExpn_2 by auto
let ?E = "GradExpn B0 C0 (Suc n)"
have "Cong B0 ?E ?E E'"
using GradExpn_4 \<open>E' = GradExpn B0 C0 (Suc (Suc n))\<close> by presburger
have "Bet B0 ?E E'"
using GradExpn_3 \<open>E' = GradExpn B0 C0 (Suc (Suc n))\<close> by blast
have "\<not> Col A C0 ?B"
by (metis \<open>Bet A B0 (GradExpn A B0 (Suc n))\<close> \<open>\<not> Col A B0 C0\<close>
bet_col bet_neq12__neq colx not_col_distincts not_col_permutation_5)
then obtain C where "Col A C0 C" and "A C0 Perp ?B C"
using l8_18_existence by blast
have "B0 ?E Le ?B C"
using \<open>A C0 Perp B0 C0\<close> \<open>A C0 Perp (GradExpn A B0 (Suc n)) C\<close>
\<open>Col A C0 C\<close> \<open>\<not> Col A B0 C0\<close> assms(2) by blast
obtain D where "?B Midpoint C D"
using symmetric_point_construction by blast
have "B0 E' Le D C"
proof -
have "Bet B0 ?E E'"
using \<open>Bet B0 (GradExpn B0 C0 (Suc n)) E'\<close> by blast
moreover
have "Bet D ?B C"
by (metis midpoint_bet \<open>GradExpn A B0 (Suc n) Midpoint C D\<close>
between_symmetry)
moreover
have "B0 ?E Le D ?B"
by (meson \<open>B0 (GradExpn B0 C0 (Suc n)) Le (GradExpn A B0 (Suc n)) C\<close>
\<open>GradExpn A B0 (Suc n) Midpoint C D\<close> cong_pseudo_reflexivity
l5_6 le_right_comm midpoint_cong)
moreover
have "?E E' Le ?B C"
by (meson \<open>Cong B0 (GradExpn B0 C0 (Suc n)) (GradExpn B0 C0 (Suc n)) E'\<close>
\<open>GradExpn A B0 (Suc n) Midpoint C D\<close> calculation(3)
l5_6 l7_2 midpoint_cong)
ultimately
show ?thesis
using bet2_le2__le1346 by blast
qed
moreover
{
assume "A = C"
have "A = C0"
proof -
have "\<not> Col A C0 B0"
using \<open>\<not> Col A B0 C0\<close> col_permutation_5 by blast
moreover
have "Col A C0 A"
using col_trivial_3 by blast
moreover
have "A C0 Perp B0 A"
proof -
have "A C0 Perp A ?B"
using \<open>A = C\<close> \<open>A C0 Perp (GradExpn A B0 (Suc n)) C\<close>
perp_right_comm by blast
moreover
have "Col A ?B B0"
using \<open>Bet A B0 (GradExpn A B0 (Suc n))\<close> bet_col
not_col_permutation_5 by blast
ultimately
show ?thesis
using \<open>\<not> Col A C0 B0\<close> not_col_distincts perp_col2_bis by blast
qed
moreover
have "Col A C0 C0"
by (simp add: col_trivial_2)
ultimately
show ?thesis
using \<open>A C0 Perp B0 C0\<close> l8_18_uniqueness by blast
qed
hence False
using \<open>\<not> Col A B0 C0\<close> col_trivial_3 by blast
}
hence "A \<noteq> C"
by auto
{
assume "A = C'"
have "A = C0"
proof -
have "\<not> Col A C0 B0"
using \<open>\<not> Col A B0 C0\<close> col_permutation_5 by blast
moreover
have "Col A C0 A"
using col_trivial_3 by blast
moreover
have "A C0 Perp B0 A"
proof -
have "A C0 Perp A B'"
using \<open>A = C'\<close> \<open>A C0 Perp B' C'\<close> perp_right_comm by blast
moreover
have "Bet A B0 B'"
using \<open>B' = GradExpn A B0 (Suc(Suc n))\<close> GradExpn_2 by blast
hence "Col A B' B0"
by (simp add: bet_col col_permutation_5)
ultimately
show ?thesis
using \<open>\<not> Col A C0 B0\<close> not_col_distincts perp_col2_bis by blast
qed
moreover
have "Col A C0 C0"
by (simp add: col_trivial_2)
ultimately
show ?thesis
using \<open>A C0 Perp B0 C0\<close> l8_18_uniqueness by blast
qed
hence False
using \<open>\<not> Col A B0 C0\<close> col_trivial_3 by blast
}
hence "A \<noteq> C'"
by blast
have "Per A C ?B"
using \<open>Col A C0 C\<close> \<open>A C0 Perp (GradExpn A B0 (Suc n)) C\<close>
col_trivial_3 l8_16_1 l8_2 by blast
have "D C Le B' C'"
proof -
have "\<not> Col A ?B C"
using NCol_perm \<open>A \<noteq> C\<close> \<open>Col A C0 C\<close> \<open>\<not> Col A C0 (GradExpn A B0 (Suc n))\<close>
col_transitivity_1 by blast
moreover
have "C' B' Perp A C0"
using Perp_perm \<open>A C0 Perp B' C'\<close> by blast
hence "C' B' Perp A C'"
using \<open>A \<noteq> C'\<close> \<open>Col A C0 C'\<close> perp_col1 by blast
hence "B' C' Perp C' A"
using Perp_cases by blast
hence "Per B' C' A"
using \<open>C' B' Perp A C'\<close> perp_per_1 by blast
moreover
have "?B Midpoint A B'"
using \<open>Bet A (GradExpn A B0 (Suc n)) B'\<close>
\<open>Cong A (GradExpn A B0 (Suc n)) (GradExpn A B0 (Suc n)) B'\<close>
midpoint_def by blast
moreover
have "Col A C' C"
by (metis \<open>Col A C0 C'\<close> \<open>Col A C0 C\<close> \<open>\<not> Col A B0 C0\<close>
col_transitivity_1 not_col_distincts)
ultimately
show ?thesis using t22_23
using \<open>A \<noteq> C'\<close> \<open>GradExpn A B0 (Suc n) Midpoint C D\<close>
\<open>Per A C (GradExpn A B0 (Suc n))\<close> assms(1) l8_2 by blast
qed
ultimately
have "B0 E' Le B' C'"
using le_transitivity by blast
}
thus ?thesis
by blast
qed
lemma t22_24_aux_n:
assumes "\<not> hypothesis_of_obtuse_saccheri_quadrilaterals"
shows "\<forall> A B0 C0 B C E.
(\<not> Col A B0 C0 \<and> A C0 Perp B0 C0 \<and>
(B = GradExpn A B0 (Suc n)) \<and> (E = GradExpn B0 C0 (Suc n)) \<and>
A C0 Perp B C \<and> Col A C0 C) \<longrightarrow>
B0 E Le B C"
proof (induction n)
case 0
thus ?case
using t22_24_aux_0 by fastforce
next
case (Suc n)
thus ?case
using assms t22_24_aux_suc by presburger
qed
(** For every n, 2^n times B0C0 is lower than or equal to BnCn *)
(** B0 is introduced twice for the induction tactic to work properly *)
lemma t22_24_aux:
assumes "\<not> hypothesis_of_obtuse_saccheri_quadrilaterals"
shows "\<forall> A B0 B00 C0 B C E.
\<not> Col A B0 C0 \<and> A C0 Perp B0 C0 \<and> B0 = B00 \<and>
GradExp2 A B0 B B00 C0 E \<and> A C0 Perp B C \<and> Col A C0 C \<longrightarrow>
B0 E Le B C"
proof -
{
fix A B0 B00 C0 B C E
assume " \<not> Col A B0 C0" and
"A C0 Perp B0 C0" and
"B0 = B00" and
"GradExp2 A B0 B B00 C0 E" and
"A C0 Perp B C" and
"Col A C0 C"
obtain n where "n \<noteq> 0 \<and> (B = GradExpn A B0 n) \<and> (E = GradExpn B00 C0 n)"
using GradExp2_def \<open>GradExp2 A B0 B B00 C0 E\<close> by presburger
hence "n \<noteq> 0"
by blast
then obtain m where "n = Suc m"
using not0_implies_Suc by blast
have "B = GradExpn A B0 (Suc m)"
by (simp add: \<open>n = Suc m\<close> \<open>n \<noteq> 0 \<and> B = GradExpn A B0 n \<and> E = GradExpn B00 C0 n\<close>)
moreover
have "E = GradExpn B0 C0 (Suc m)"
by (simp add: \<open>B0 = B00\<close> \<open>n = Suc m\<close>
\<open>n \<noteq> 0 \<and> B = GradExpn A B0 n \<and> E = GradExpn B00 C0 n\<close>)
ultimately
have "B0 E Le B C"
using \<open>A C0 Perp B C\<close> \<open>A C0 Perp B0 C0\<close> \<open>Col A C0 C\<close>
\<open>\<not> Col A B0 C0\<close> assms t22_24_aux_n by blast
}
thus ?thesis by blast
qed
lemma t22_24_aux1_0:
shows"\<forall> A B0 C0 E.
(\<not> Col A B0 C0 \<and> A C0 Perp B0 C0 \<and>
E = GradExpn B0 C0 (Suc 0)) \<longrightarrow>
(\<exists> B C. GradExp2 A B0 B B0 C0 E \<and> A C0 Perp B C \<and> Col A C0 C)"
proof -
{
fix A B0 C0 E
assume "\<not> Col A B0 C0" and
"A C0 Perp B0 C0" and
"E = GradExpn B0 C0 (Suc 0)"
have "B0 \<noteq> C0"
using \<open>\<not> Col A B0 C0\<close> not_col_distincts by blast
hence "E = C0"
using \<open>E = GradExpn B0 C0 (Suc 0)\<close> by simp
have "A \<noteq> B0"
using \<open>\<not> Col A B0 C0\<close> col_trivial_1 by blast
hence "B0 = GradExpn A B0 (Suc 0)"
by simp
have "GradExp2 A B0 B0 B0 C0 E"
using \<open>E = C0\<close> gradexp2_init by auto
moreover
have "Col A C0 C0"
by (simp add: col_trivial_2)
ultimately
have "\<exists> B C. GradExp2 A B0 B B0 C0 E \<and> A C0 Perp B C \<and> Col A C0 C"
using \<open>A C0 Perp B0 C0\<close> by blast
}
thus ?thesis
by blast
qed
lemma t22_24_aux1_suc:
assumes "\<forall> A B0 C0 E.
(\<not> Col A B0 C0 \<and> A C0 Perp B0 C0 \<and>
E = GradExpn B0 C0 (Suc(n))) \<longrightarrow>
(\<exists> B C. GradExp2 A B0 B B0 C0 E \<and> A C0 Perp B C \<and> Col A C0 C)"
shows "\<forall> A B0 C0 E.
(\<not> Col A B0 C0 \<and> A C0 Perp B0 C0 \<and>
E = GradExpn B0 C0 (Suc(Suc(n)))) \<longrightarrow>
(\<exists> B C. GradExp2 A B0 B B0 C0 E \<and> A C0 Perp B C \<and> Col A C0 C)"
proof -
{
fix A B0 C0 E'
assume "\<not> Col A B0 C0" and
"A C0 Perp B0 C0" and
"E' = GradExpn B0 C0 (Suc(Suc(n)))"
let ?E = "GradExpn B0 C0 (Suc n)"
have "Bet B0 ?E E'"
using GradExpn_3 \<open>E' = GradExpn B0 C0 (Suc (Suc n))\<close> by blast
have "Cong B0 ?E ?E E'"
using GradExpn_4 \<open>E' = GradExpn B0 C0 (Suc (Suc n))\<close> by blast
obtain B C where "GradExp2 A B0 B B0 C0 ?E \<and> A C0 Perp B C \<and> Col A C0 C"
using \<open>A C0 Perp B0 C0\<close> \<open>\<not> Col A B0 C0\<close> assms by presburger
have "GradExp2 A B0 B B0 C0 ?E"
using \<open>GradExp2 A B0 B B0 C0 ?E \<and> A C0 Perp B C \<and> Col A C0 C\<close> by blast
hence "GradExp A B0 B"
by (simp add: gradexp2__gradexp123)
hence "Grad A B0 B"
by (simp add: gradexp__grad)
hence "Bet A B0 B"
by (simp add: grad__bet)
have "A C0 Perp B C"
using \<open>GradExp2 A B0 B B0 C0 ?E \<and> A C0 Perp B C \<and> Col A C0 C\<close> by blast
have "Col A C0 C"
using \<open>GradExp2 A B0 B B0 C0 ?E \<and> A C0 Perp B C \<and> Col A C0 C\<close> by blast
obtain B' where "Bet A B B' \<and> Cong B B' A B"
using segment_construction by presburger
have "Bet A B B'"
using \<open>Bet A B B' \<and> Cong B B' A B\<close> by blast
have "Cong B B' A B"
using \<open>Bet A B B' \<and> Cong B B' A B\<close> by blast
have "\<not> Col A C0 B'"
by (metis \<open>Bet A B0 B\<close> \<open>Bet A B B'\<close> \<open>\<not> Col A B0 C0\<close>
bet_col bet_neq12__neq col_trivial_3 colx not_col_permutation_5)
then obtain C' where "Col A C0 C' \<and> A C0 Perp B' C'"
using l8_18_existence by blast
moreover
have "GradExp2 A B0 B' B0 C0 E'"
proof -
have "GradExp2 A B0 B B0 C0 ?E"
using \<open>GradExp2 A B0 B B0 C0 (GradExpn B0 C0 (Suc n))\<close> by blast
moreover
have "Bet A B B'"
by (simp add: \<open>Bet A B B' \<and> Cong B B' A B\<close>)
moreover
have "Cong A B B B'"
using Cong_cases \<open>Cong B B' A B\<close> by blast
moreover
have "Bet B0 ?E E'"
using \<open>Bet B0 (GradExpn B0 C0 (Suc n)) E'\<close> by auto
moreover
have "Cong B0 ?E ?E E'"
using \<open>Cong B0 (GradExpn B0 C0 (Suc n)) (GradExpn B0 C0 (Suc n)) E'\<close>
by auto
ultimately
show ?thesis
using GradExp2_stab by blast
qed
ultimately
have "\<exists> B C. GradExp2 A B0 B B0 C0 E' \<and> A C0 Perp B C \<and> Col A C0 C"
by blast
}
thus ?thesis
by blast
qed
lemma t22_24_aux1_n:
shows "\<forall> A B0 C0 E.
(\<not> Col A B0 C0 \<and> A C0 Perp B0 C0 \<and>
E = GradExpn B0 C0 (Suc(n))) \<longrightarrow>
(\<exists> B C. GradExp2 A B0 B B0 C0 E \<and> A C0 Perp B C \<and> Col A C0 C)"
proof (induction n)
case 0
thus ?case using t22_24_aux1_0 by blast
next
case (Suc n)
thus ?case using t22_24_aux1_suc by presburger
qed
(** For every n, it is possible to get Bn and Cn *)
lemma t22_24_aux1:
assumes "\<not> Col A B0 C0" and
"A C0 Perp B0 C0" and
"GradExp B0 C0 E"
shows "\<exists> B C. GradExp2 A B0 B B0 C0 E \<and> A C0 Perp B C \<and> Col A C0 C"
proof -
obtain n where "n \<noteq> 0 \<and> E = GradExpn B0 C0 n"
using GradExp_def assms(3) by presburger
hence "n \<noteq> 0"
by simp
then obtain m where "n = Suc m"
using not0_implies_Suc by presburger
thus ?thesis
using t22_24_aux1_n assms(1) assms(2) \<open>n \<noteq> 0 \<and> E = GradExpn B0 C0 n\<close> by blast
qed
subsubsection "Axiomes d'Archimedes et d'Aristote"
lemma t22_24:
assumes "archimedes_axiom"
shows "aristotle_s_axiom"
proof -
{
fix P Q D A B0
assume "\<not> Col D A B0" and
"Acute D A B0"
obtain C0 where "Col A D C0" and "A D Perp B0 C0"
using Col_cases \<open>\<not> Col D A B0\<close> l8_18_existence by blast
hence "A \<noteq> C0"
using \<open>Acute D A B0\<close> acute_col_perp__out acute_sym
col_trivial_3 l6_3_1 by blast
{
assume "Col A B0 C0"
hence "Col C0 A B0"
using Col_cases by blast
hence "Col D A B0"
by (metis \<open>A D Perp B0 C0\<close> \<open>Acute D A B0\<close>
acute_col_perp__out acute_sym col_trivial_3 l6_3_1
not_col_permutation_1 perp_col1)
hence False
using \<open>\<not> Col D A B0\<close> by blast
}
hence "\<not> Col A B0 C0"
by blast
have "A C0 Perp B0 C0"
using \<open>Col A D C0\<close> \<open>A \<noteq> C0\<close> \<open>A D Perp B0 C0\<close> perp_col by blast
have "\<exists> X Y. A Out D X \<and> A Out B0 Y \<and> Per A X Y \<and> P Q Lt X Y"
proof (cases "P = Q")
case True
have "Acute B0 A D"
by (simp add: \<open>Acute D A B0\<close> acute_sym)
hence "A Out D C0"
using \<open>Col A D C0\<close> \<open>A D Perp B0 C0\<close>
acute_col_perp__out l6_6 by blast
moreover
have "A Out B0 B0"
by (metis \<open>\<not> Col D A B0\<close> col_trivial_2 out_trivial)
moreover
have "Per A C0 B0"
by (simp add: \<open>A C0 Perp B0 C0\<close> perp_comm perp_per_2)
moreover
have "\<not> Cong P P C0 B0"
using \<open>A C0 Perp B0 C0\<close> cong_reverse_identity
perp_not_eq_2 by blast
hence "P P Lt C0 B0"
by (metis cong_trivial_identity lt1123)
ultimately
show ?thesis
using True by blast
next
case False
obtain Q' where "Bet P Q Q'" and "Cong Q Q' P Q"
using segment_construction by blast
have "B0 \<noteq> C0"
using \<open>\<not> Col A B0 C0\<close> col_trivial_2 by blast
hence "Reach B0 C0 P Q'"
using archimedes_axiom_def assms by blast
then obtain E where "GradExp B0 C0 E" and "P Q' Le B0 E"
using reach__ex_gradexp_le by blast
obtain B C where "GradExp2 A B0 B B0 C0 E \<and> A C0 Perp B C \<and> Col A C0 C"
using \<open>A C0 Perp B0 C0\<close> \<open>GradExp B0 C0 E\<close> \<open>\<not> Col A B0 C0\<close>
t22_24_aux1 by blast
have "GradExp2 A B0 B B0 C0 E"
using \<open>GradExp2 A B0 B B0 C0 E \<and> A C0 Perp B C \<and> Col A C0 C\<close> by blast
hence "Grad A B0 B"
using gradexp2__gradexp123 gradexp__grad by blast
hence "Bet A B0 B"
by (simp add: grad__bet)
have "A C0 Perp B C"
using \<open>GradExp2 A B0 B B0 C0 E \<and> A C0 Perp B C \<and> Col A C0 C\<close> by blast
have "Col A C0 C"
using \<open>GradExp2 A B0 B B0 C0 E \<and> A C0 Perp B C \<and> Col A C0 C\<close> by blast
have "A Out B0 B"
by (metis \<open>Bet A B0 B\<close> \<open>\<not> Col D A B0\<close> bet_out col_trivial_2)
have "Acute D A B"
by (metis \<open>Acute D A B0\<close> \<open>Bet A B0 B\<close> \<open>\<not> Col D A B0\<close>
acute_col__out acute_out2__acute acute_trivial bet2__out
bet_neq21__neq l5_1 not_col_distincts)
{
assume "A = C"
have "Per D A B"
proof -
have "A C0 Perp B A"
using \<open>A = C\<close> \<open>A C0 Perp B C\<close> by auto
moreover
have "Col A C0 D"
using Col_cases \<open>Col A D C0\<close> by blast
ultimately
show ?thesis
by (meson col_trivial_3 l8_16_1 l8_2)
qed
hence False
using \<open>Acute D A B\<close> acute_not_per by auto
}
hence "A \<noteq> C"
by blast
have "A Out D C"
proof -
have "Acute B A D"
by (simp add: \<open>Acute D A B\<close> acute_sym)
moreover
have "Col A D C"
using \<open>A \<noteq> C0\<close> \<open>Col A C0 C\<close> \<open>Col A D C0\<close> col_trivial_3
colx by blast
moreover
have "A D Perp B C"
by (metis \<open>A C0 Perp B C\<close> \<open>Col A D C0\<close> \<open>\<not> Col D A B0\<close>
col_trivial_3 not_col_permutation_5 perp_col)
ultimately
show ?thesis
using acute_col_perp__out l6_6 by blast
qed
moreover
have "A Out B0 B"
by (simp add: \<open>A Out B0 B\<close>)
moreover
have "Per A C B"
using \<open>A C0 Perp B C\<close> \<open>Col A C0 C\<close> col_trivial_3
l8_16_1 l8_2 by blast
moreover
have "P Q Lt C B"
proof -
have "P Q Lt P Q'"
by (metis False bet__lt1213 cong_diff_3
\<open>Bet P Q Q'\<close> \<open>Cong Q Q' P Q\<close>)
moreover
have "B0 E Le B C"
proof -
have "\<not> hypothesis_of_obtuse_saccheri_quadrilaterals"
by (simp add: archi__obtuse_case_elimination assms)
moreover
have "\<not> Col A B0 C0"
by (simp add: \<open>\<not> Col A B0 C0\<close>)
moreover
have "A C0 Perp B0 C0"
by (simp add: \<open>A C0 Perp B0 C0\<close>)
moreover
have "GradExp2 A B0 B B0 C0 E"
by (simp add: \<open>GradExp2 A B0 B B0 C0 E\<close>)
moreover
have "A C0 Perp B C"
by (simp add: \<open>A C0 Perp B C\<close>)
moreover
have "Col A C0 C"
using \<open>Col A C0 C\<close> by auto
ultimately
show ?thesis
using t22_24_aux by blast
qed
hence "B0 E Le C B"
by (simp add: le_right_comm)
hence "P Q' Le C B"
using \<open>P Q' Le B0 E\<close> le_transitivity by blast
ultimately
show ?thesis
using le3456_lt__lt by blast
qed
ultimately
show ?thesis
by blast
qed
}
thus ?thesis
by (simp add: aristotle_s_axiom_def)
qed
subsection "Equivalence Grad / GradI (inductive)"
inductive GradI :: "[TPoint,TPoint,TPoint] \<Rightarrow> bool" for A B
where
gradi_init : "GradI A B B"
| gradi_stab : "GradI A B C'" if
"GradI A B C"
and "Bet A C C'"
and "Cong A B C C'"
lemma Grad1__GradI:
shows "GradI A B (Gradn A B 1)"
proof -
have "B = Gradn A B 1"
by force
thus ?thesis
using gradi_init by auto
qed
lemma Gradn__GradI:
shows "GradI A B (Gradn A B (Suc n))"
proof (induction n)
show "GradI A B (Gradn A B (Suc 0))"
by (simp add: gradi_init)
{
fix n
assume "GradI A B (Gradn A B (Suc n))"
have "GradI A B (Gradn A B (Suc (Suc n)))"
proof(rule GradI.induct [where ?A ="A" and ?B="B"])
show "GradI A B (Gradn A B (Suc (Suc n)))"
using Bet_Gradn_Gradn_Suc Cong_Gradn_Gradn_Suc GradI.simps
\<open>GradI A B (Gradn A B (Suc n))\<close> by blast
show "GradI A B B"
by (simp add: gradi_init)
show "\<And>C C'. GradI A B C \<Longrightarrow> GradI A B C \<Longrightarrow> Bet A C C' \<Longrightarrow> Cong A B C C' \<Longrightarrow> GradI A B C'"
using gradi_stab by blast
qed
}
thus "\<And>n. GradI A B (Gradn A B (Suc n)) \<Longrightarrow> GradI A B (Gradn A B (Suc (Suc n)))"
by blast
qed
lemma Grad__GradI:
assumes "Grad A B C"
shows "GradI A B C"
proof (cases "B = C")
case True
thus ?thesis
by (simp add: gradi_init)
next
case False
obtain n where "n \<noteq> 0" and "C = Gradn A B n"
using Grad_def assms by blast
then obtain m where "n = Suc m"
using not0_implies_Suc by blast
hence "C = Gradn A B (Suc m)"
using \<open>C = Gradn A B n\<close> by blast
thus ?thesis
using Gradn__GradI by blast
qed
lemma GradIAAB__not:
assumes "GradI A A B"
shows "A = B"
proof (rule GradI.induct [OF assms])
show "A = A"
by blast
show "\<And>C C'. GradI A A C \<Longrightarrow> A = C \<Longrightarrow> Bet A C C' \<Longrightarrow> Cong A A C C' \<Longrightarrow> A = C'"
using between_cong by presburger
qed
lemma GradI__Grad:
assumes "GradI A B C"
shows "Grad A B C"
proof (cases "A = B")
case True
hence "A = C"
using GradIAAB__not assms by blast
thus ?thesis
using True grad_equiv_coq_1 by blast
next
case False
show ?thesis
proof (rule GradI.induct [where ?A="A" and ?B="B"])
show "GradI A B C"
by (simp add: assms)
show "Grad A B B"
by (simp add: grad_equiv_coq_1)
show "\<And>C C'. GradI A B C \<Longrightarrow> Grad A B C \<Longrightarrow> Bet A C C' \<Longrightarrow> Cong A B C C' \<Longrightarrow> Grad A B C'"
using grad_stab by blast
qed
qed
theorem Grad_GradI:
shows "Grad A B C \<longleftrightarrow> GradI A B C"
using GradI__Grad Grad__GradI by blast
subsection "GradA"
inductive GradA :: "[TPoint,TPoint,TPoint,TPoint,TPoint,TPoint] \<Rightarrow> bool" for A B C
where
grada_init : "GradA A B C D E F" if
"A B C CongA D E F"
| grada_stab : "GradA A B C G H I" if
"GradA A B C D E F"
and "SAMS D E F A B C"
and "D E F A B C SumA G H I"
inductive GradAExp :: "[TPoint,TPoint,TPoint,TPoint,TPoint,TPoint] \<Rightarrow> bool" for A B C
where
gradaexp_init : "GradAExp A B C D E F" if
"A B C CongA D E F"
| gradaexp_stab : "GradAExp A B C G H I" if
"GradAExp A B C D E F"
and "SAMS D E F D E F"
and "D E F D E F SumA G H I"
lemma grada_distincts:
assumes "GradA A B C D E F"
shows "A \<noteq> B \<and> C \<noteq> B \<and> D \<noteq> E \<and> F \<noteq> E"
proof(induction rule: GradA.induct [OF assms(1)])
show "\<And>D E F. A B C CongA D E F \<Longrightarrow> A \<noteq> B \<and> C \<noteq> B \<and> D \<noteq> E \<and> F \<noteq> E"
by (simp add: CongA_def)
show "\<And>D E F G H I.
GradA A B C D E F \<Longrightarrow>
A \<noteq> B \<and> C \<noteq> B \<and> D \<noteq> E \<and> F \<noteq> E \<Longrightarrow>
SAMS D E F A B C \<Longrightarrow>
D E F A B C SumA G H I \<Longrightarrow> A \<noteq> B \<and> C \<noteq> B \<and> G \<noteq> H \<and> I \<noteq> H"
using suma_distincts by blast
qed
lemma grada_ABC:
assumes "A \<noteq> B" and
"B \<noteq> C"
shows "GradA A B C A B C"
proof -
have "A B C CongA A B C"
using assms(1) assms(2) conga_refl by force
thus ?thesis
using grada_init
by force
qed
lemma gradaexp_distincts:
assumes "GradAExp A B C D E F"
shows "A \<noteq> B \<and> C \<noteq> B \<and> D \<noteq> E \<and> F \<noteq> E"
proof(induction rule: GradAExp.induct [OF assms(1)])
show "\<And>D E F. A B C CongA D E F \<Longrightarrow> A \<noteq> B \<and> C \<noteq> B \<and> D \<noteq> E \<and> F \<noteq> E"
by (simp add: CongA_def)
show "\<And>D E F G H I.
GradAExp A B C D E F \<Longrightarrow>
A \<noteq> B \<and> C \<noteq> B \<and> D \<noteq> E \<and> F \<noteq> E \<Longrightarrow>
SAMS D E F D E F \<Longrightarrow> D E F D E F SumA G H I \<Longrightarrow> A \<noteq> B \<and> C \<noteq> B \<and> G \<noteq> H \<and> I \<noteq> H"
using suma_distincts by blast
qed
lemma gradaexp_ABC:
assumes "A \<noteq> B" and
"B \<noteq> C"
shows "GradAExp A B C A B C"
proof -
have "A B C CongA A B C"
using assms(1) assms(2) conga_refl by force
thus ?thesis
using gradaexp_init
by force
qed
lemma conga2_grada__grada_aux1:
assumes "GradA A B C D E F" and
"A B C CongA A' B' C'"
shows "GradA A' B' C' D E F"
proof (induction rule: GradA.cases [OF assms(1)])
{
assume "A B C CongA D E F"
hence "GradA A' B' C' D E F"
using grada_init
by (meson assms(2) not_conga not_conga_sym)
}
thus "\<And>Da Ea Fa.
D = Da \<Longrightarrow>
E = Ea \<Longrightarrow> F = Fa \<Longrightarrow> A B C CongA Da Ea Fa \<Longrightarrow> GradA A' B' C' D E F"
by blast
{
fix Da Ea Fa
assume 1: "GradA A B C Da Ea Fa" and
2: "SAMS Da Ea Fa A B C" and
3: "Da Ea Fa A B C SumA D E F"
have "GradA A' B' C' D E F"
proof (rule grada_stab)
let ?D = "Da"
let ?E = "Ea"
let ?F = "Fa"
show "GradA A' B' C' ?D ?E ?F"
proof (rule GradA.inducts)
show "GradA A B C Da Ea Fa"
by (simp add: "1")
show "\<And>D E F. A B C CongA D E F \<Longrightarrow> GradA A' B' C' D E F"
by (meson conga_trans not_conga_sym assms(2) grada_init)
{
fix D E F G H I
assume "GradA A B C D E F" and
A: "GradA A' B' C' D E F" and
B: "SAMS D E F A B C" and
C: "D E F A B C SumA G H I"
have "GradA A' B' C' G H I"
proof (rule grada_stab)
show "GradA A' B' C' D E F"
by (simp add: A)
show "SAMS D E F A' B' C'" using B
by (meson C conga2_sams__sams assms(2) sams2_suma2__conga123)
show "D E F A' B' C' SumA G H I" using C
by (meson B conga3_suma__suma sams2_suma2__conga123 suma2__conga assms(2))
qed
}
thus "\<And>D E F G H I.
GradA A B C D E F \<Longrightarrow>
GradA A' B' C' D E F \<Longrightarrow>
SAMS D E F A B C \<Longrightarrow>
D E F A B C SumA G H I \<Longrightarrow> GradA A' B' C' G H I"
by blast
qed
show "SAMS ?D ?E ?F A' B' C'" using 2
by (meson "3" sams2_suma2__conga123 assms(2) conga2_sams__sams)
show "?D ?E ?F A' B' C' SumA D E F" using 3
by (meson "2" sams2_suma2__conga123 assms(2) conga3_suma__suma suma2__conga)
qed
}
thus "\<And>Da Ea Fa G H I.
D = G \<Longrightarrow>
E = H \<Longrightarrow>
F = I \<Longrightarrow>
GradA A B C Da Ea Fa \<Longrightarrow>
SAMS Da Ea Fa A B C \<Longrightarrow>
Da Ea Fa A B C SumA G H I \<Longrightarrow> GradA A' B' C' D E F "
by blast
qed
lemma conga2_grada__grada_aux2:
assumes "GradA A B C D E F" and
"D E F CongA D' E' F'"
shows "GradA A B C D' E' F'"
proof (induction rule: GradA.cases [OF assms(1)])
{
assume "A B C CongA D E F"
hence "A B C CongA D' E' F'"
by (metis conga_trans assms(2))
hence "GradA A B C D' E' F'"
by (simp add: grada_init)
}
thus "\<And>Da Ea Fa.
D = Da \<Longrightarrow>
E = Ea \<Longrightarrow> F = Fa \<Longrightarrow> A B C CongA Da Ea Fa \<Longrightarrow> GradA A B C D' E' F'"
by blast
{
fix Da Ea Fa
assume 1: "GradA A B C Da Ea Fa" and
2: "SAMS Da Ea Fa A B C" and
3: "Da Ea Fa A B C SumA D E F"
have "GradA A B C D' E' F'"
proof (rule grada_stab)
show "GradA A B C Da Ea Fa"
by (simp add: "1")
show "SAMS Da Ea Fa A B C"
by (simp add: "2")
show "Da Ea Fa A B C SumA D' E' F'"
by (meson "2" "3" sams2_suma2__conga123 assms(2)
conga3_suma__suma sams2_suma2__conga456)
qed
}
thus "\<And>Da Ea Fa G H I.
D = G \<Longrightarrow>
E = H \<Longrightarrow>
F = I \<Longrightarrow>
GradA A B C Da Ea Fa \<Longrightarrow>
SAMS Da Ea Fa A B C \<Longrightarrow>
Da Ea Fa A B C SumA G H I \<Longrightarrow> GradA A B C D' E' F'"
by blast
qed
lemma conga2_grada__grada:
assumes "GradA A B C D E F" and
"A B C CongA A' B' C'" and
"D E F CongA D' E' F'"
shows "GradA A' B' C' D' E' F'"
using assms(1) assms(2) assms(3)
conga2_grada__grada_aux1 conga2_grada__grada_aux2 by blast
lemma grada__lea:
assumes "GradA A B C D E F"
shows "A B C LeA D E F"
proof (induction rule: GradA.cases [OF assms(1)])
case (1 D E F)
then show ?case
by (simp add: conga__lea)
next
case (2 D E F G H I)
thus ?case
by (metis sams_suma__lea456789)
qed
lemma grada_out__out:
assumes "E Out D F" and
"GradA A B C D E F"
shows "B Out A C"
proof (induction rule: GradA.cases [OF assms(2)])
case (1 D E F)
then show ?case
by (metis not_conga_sym assms(1) l11_21_a)
next
case (2 D E F G H I)
then show ?case
by (metis sams_suma__lea456789 assms(1) out_lea__out)
qed
lemma grada2_sams_suma__grada_aux:
shows "\<forall> A B C D E F G H I K L M.
GradA A B C D E F \<and> GradA A B C G H I \<and>
SAMS D E F G H I \<and> D E F G H I SumA K L M \<longrightarrow> GradA A B C K L M"
proof -
{
fix A B C D E F G' H' I' K L M
assume K1: "GradA A B C D E F" and
K2: "GradA A B C G' H' I'"
have "\<forall> K L M. (SAMS D E F G' H' I' \<and> D E F G' H' I' SumA K L M) \<longrightarrow> GradA A B C K L M"
proof(induction rule: GradA.induct [OF K2])
{
fix Da Ea Fa
assume "A B C CongA Da Ea Fa"
{
fix K L M
assume "SAMS D E F Da Ea Fa" and "D E F Da Ea Fa SumA K L M"
have "SAMS D E F A B C"
by (meson conga2_sams__sams not_conga_sym sams2_suma2__conga123
\<open>A B C CongA Da Ea Fa\<close> \<open>D E F Da Ea Fa SumA K L M\<close> \<open>SAMS D E F Da Ea Fa\<close>)
moreover have "D E F A B C SumA K L M"
by (meson conga3_suma__suma not_conga_sym sams2_suma2__conga123
\<open>A B C CongA Da Ea Fa\<close> \<open>D E F Da Ea Fa SumA K L M\<close> \<open>SAMS D E F Da Ea Fa\<close>
suma2__conga)
ultimately have "GradA A B C K L M"
using K1 grada_stab by blast
}
hence "\<forall>K L M. (SAMS D E F Da Ea Fa \<and> D E F Da Ea Fa SumA K L M) \<longrightarrow> GradA A B C K L M"
by blast
}
thus "\<And>Da Ea Fa.
A B C CongA Da Ea Fa \<Longrightarrow>
\<forall>K L M. (SAMS D E F Da Ea Fa \<and> D E F Da Ea Fa SumA K L M) \<longrightarrow> GradA A B C K L M"
by blast
{
fix G H I
{
fix Da Ea Fa
assume "GradA A B C Da Ea Fa" and
"SAMS Da Ea Fa A B C" and
"Da Ea Fa A B C SumA G H I" and
P1: "\<forall>K L M. SAMS D E F Da Ea Fa \<and> D E F Da Ea Fa SumA K L M \<longrightarrow> GradA A B C K L M"
{
fix K0 L0 M0
assume "SAMS D E F G H I" and
"D E F G H I SumA K0 L0 M0"
have "Da \<noteq> Ea"
using \<open>SAMS Da Ea Fa A B C\<close> sams_distincts by auto
have "Fa \<noteq> Ea"
using \<open>GradA A B C Da Ea Fa\<close> grada_distincts by blast
have "D \<noteq> E"
using \<open>SAMS D E F G H I\<close> sams_distincts by blast
have "E \<noteq> F"
using \<open>SAMS D E F G H I\<close> sams_distincts by blast
obtain K L M where "D E F Da Ea Fa SumA K L M"
using ex_suma \<open>D \<noteq> E\<close> \<open>Da \<noteq> Ea\<close> \<open>E \<noteq> F\<close> \<open>Fa \<noteq> Ea\<close> by presburger
have "SAMS D E F Da Ea Fa"
proof (rule sams_lea2__sams [where ?A'="D" and ?B'="E" and ?C'="F" and
?D'="G" and ?E'="H" and ?F'="I"])
show "SAMS D E F G H I"
using \<open>SAMS D E F G H I\<close> by blast
show "D E F LeA D E F"
using \<open>D \<noteq> E\<close> \<open>E \<noteq> F\<close> lea_refl by force
show "Da Ea Fa LeA G H I"
proof (rule sams_suma__lea123789 [where ?D="A" and ?E="B" and ?F="C"])
show "Da Ea Fa A B C SumA G H I"
using \<open>Da Ea Fa A B C SumA G H I\<close> by blast
show "SAMS Da Ea Fa A B C"
by (simp add: \<open>SAMS Da Ea Fa A B C\<close>)
qed
qed
have "GradA A B C K0 L0 M0"
proof (rule grada_stab [where ?D = "K" and ?E = "L" and ?F = "M"])
show "GradA A B C K L M"
using P1 \<open>D E F Da Ea Fa SumA K L M\<close> \<open>SAMS D E F Da Ea Fa\<close> by blast
show "SAMS K L M A B C"
using sams_assoc_2 [where ?A="D" and ?B="E" and ?C="F" and
?D="Da" and ?E="Ea" and ?F="Fa" and ?D'="G" and ?E'="H" and ?F'="I"]
using \<open>D E F Da Ea Fa SumA K L M\<close> \<open>Da Ea Fa A B C SumA G H I\<close>
\<open>SAMS D E F Da Ea Fa\<close> \<open>SAMS D E F G H I\<close> \<open>SAMS Da Ea Fa A B C\<close> by blast
show "K L M A B C SumA K0 L0 M0"
by (meson suma_assoc_2 \<open>D E F Da Ea Fa SumA K L M\<close>
\<open>D E F G H I SumA K0 L0 M0\<close> \<open>Da Ea Fa A B C SumA G H I\<close> \<open>SAMS D E F Da Ea Fa\<close>
\<open>SAMS Da Ea Fa A B C\<close>)
qed
}
hence "\<forall>K L M. SAMS D E F G H I \<and> D E F G H I SumA K L M \<longrightarrow> GradA A B C K L M"
by blast
}
hence "\<And> Da Ea Fa. GradA A B C Da Ea Fa \<and> SAMS Da Ea Fa A B C \<and>
Da Ea Fa A B C SumA G H I \<and>
(\<forall>K L M. SAMS D E F Da Ea Fa \<and> D E F Da Ea Fa SumA K L M \<longrightarrow> GradA A B C K L M) \<longrightarrow>
(\<forall>K L M. SAMS D E F G H I \<and> D E F G H I SumA K L M \<longrightarrow> GradA A B C K L M)"
by blast
}
(* hence "\<And>G H I. (\<And> Da Ea Fa.
GradA A B C Da Ea Fa \<Longrightarrow>
(\<forall>K L M. SAMS D E F Da Ea Fa \<and> D E F Da Ea Fa SumA K L M \<longrightarrow> GradA A B C K L M) \<Longrightarrow>
SAMS Da Ea Fa A B C \<Longrightarrow>
Da Ea Fa A B C SumA G H I \<Longrightarrow>
(\<forall>K L M. SAMS D E F G H I \<and> D E F G H I SumA K L M \<longrightarrow> GradA A B C K L M))"
by blast*)
(* hence "\<And>G H I Da Ea Fa.
GradA A B C Da Ea Fa \<Longrightarrow>
(\<forall>K L M. SAMS D E F Da Ea Fa \<and> D E F Da Ea Fa SumA K L M \<longrightarrow> GradA A B C K L M) \<Longrightarrow>
SAMS Da Ea Fa A B C \<Longrightarrow>
Da Ea Fa A B C SumA G H I \<Longrightarrow>
(\<forall>K L M. SAMS D E F G H I \<and> D E F G H I SumA K L M \<longrightarrow> GradA A B C K L M)"
by blast*)
thus "\<And>Da Ea Fa G H I.
GradA A B C Da Ea Fa \<Longrightarrow>
(\<forall>K L M. SAMS D E F Da Ea Fa \<and> D E F Da Ea Fa SumA K L M \<longrightarrow> GradA A B C K L M) \<Longrightarrow>
SAMS Da Ea Fa A B C \<Longrightarrow>
Da Ea Fa A B C SumA G H I \<Longrightarrow>
(\<forall>K L M. SAMS D E F G H I \<and> D E F G H I SumA K L M \<longrightarrow> GradA A B C K L M)"
by blast
qed
}
thus ?thesis by blast
qed
lemma grada2_sams_suma__grada:
assumes "GradA A B C D E F" and
"GradA A B C G H I" and
"SAMS D E F G H I" and
"D E F G H I SumA K L M"
shows "GradA A B C K L M"
using assms(1) assms(2) assms(3) assms(4) grada2_sams_suma__grada_aux by blast
lemma gradaexp__grada:
assumes "GradAExp A B C D E F"
shows "GradA A B C D E F"
proof (rule GradAExp.induct [OF assms])
show "\<And>D E F. A B C CongA D E F \<Longrightarrow> GradA A B C D E F"
by (simp add: grada_init)
show " \<And>D E F G H I.
GradAExp A B C D E F \<Longrightarrow>
GradA A B C D E F \<Longrightarrow> SAMS D E F D E F \<Longrightarrow> D E F D E F SumA G H I \<Longrightarrow> GradA A B C G H I"
using grada2_sams_suma__grada_aux by blast
qed
lemma acute_archi_aux:
assumes "Per PO A B" and
"PO \<noteq> A" and
"B \<noteq> A" and
"C \<noteq> D" and
"D \<noteq> E" and
"Bet A C D" and
"Bet C D E" and
"Bet D E B" and
"C PO D CongA D PO E"
shows "C D Lt D E"
proof -
have "D \<noteq> A"
using assms(4) assms(6) between_identity by blast
have "C \<noteq> PO"
using assms(9) conga_diff1 by auto
have "D \<noteq> PO"
using assms(9) conga_diff45 by blast
have "\<not> Col PO A B"
by (metis assms(1) assms(2) assms(3) l8_8 not_col_permutation_2 per_col)
hence "\<not> Col PO A D"
by (metis Col_def \<open>D \<noteq> A\<close> assms(4) assms(5) assms(6) assms(7) assms(8) l6_16_1)
have "\<not> Col PO D E"
by (metis Col_def \<open>\<not> Col PO A D\<close> assms(4) assms(5) assms(6) assms(7) l6_16_1)
then obtain P where "A D PO CongA PO D P" and "PO D OS P E"
using \<open>\<not> Col PO A D\<close> angle_construction_1 not_col_permutation_1 by blast
have "Acute A D PO"
by (metis \<open>D \<noteq> A\<close> assms(1) assms(2) assms(3) assms(4) assms(5) assms(6) assms(7)
assms(8) bet_col1 between_trivial l11_43 outer_transitivity_between per_col)
hence "Acute A D PO \<longleftrightarrow> A D PO LtA E D PO"
by (metis acute_chara_1 assms(4) assms(5) assms(6) assms(7) outer_transitivity_between2)
have "A D PO LeA E D PO"
by (metis lta__lea outer_transitivity_between2 \<open>Acute A D PO\<close> acute_chara_1
assms(4) assms(5) assms(6) assms(7))
hence "PO D P LeA PO D E"
by (meson lea_right_comm lea_trans \<open>A D PO CongA PO D P\<close> conga__lea456123)
hence "P InAngle PO D E"
using \<open>PO D OS P E\<close> lea_in_angle one_side_symmetry by presburger
have "P \<noteq> D"
using \<open>PO D OS P E\<close> os_distincts by blast
obtain F where "Bet PO F E" and "D Out F P"
using InAngle_def \<open>P InAngle PO D E\<close> \<open>\<not> Col PO D E\<close> bet_col by auto
have "A D PO CongA PO D F"
by (metis out2__conga \<open>A D PO CongA PO D P\<close> \<open>D Out F P\<close> \<open>D \<noteq> PO\<close>
bet_out_1 conga_trans not_bet_distincts)
have "D Out A C"
by (simp add: assms(4) assms(6) bet_out_1 l6_6)
have "D Out PO PO"
using \<open>D \<noteq> PO\<close> out_trivial by auto
have "PO D C CongA A D PO"
by (simp add: out2__conga \<open>D Out A C\<close> \<open>D Out PO PO\<close> conga_left_comm)
have "\<not> Col PO D F"
using \<open>D Out F P\<close> \<open>D Out PO PO\<close> \<open>PO D OS P E\<close> col_out2_col col_trivial_3 l9_19 by blast
have "PO \<noteq> F"
using \<open>\<not> Col PO D F\<close> col_trivial_3 by auto
have "\<not> Col PO D C"
using \<open>\<not> Col PO A D\<close> assms(4) assms(6) bet_col col2__eq col_permutation_5 by blast
have "PO Out D D"
by (simp add: \<open>D \<noteq> PO\<close> out_trivial)
have "PO Out C C"
by (simp add: \<open>C \<noteq> PO\<close> out_trivial)
have "PO Out F E"
using \<open>Bet PO F E\<close> \<open>PO \<noteq> F\<close> bet_out by auto
hence "D PO C CongA D PO F"
using l11_10 \<open>PO Out C C\<close> \<open>PO Out D D\<close> assms(9) conga_left_comm by blast
have "PO D C CongA PO D F"
by (meson conga_trans \<open>A D PO CongA PO D F\<close> \<open>PO D C CongA A D PO\<close>)
have "Cong PO D PO D"
by (simp add: cong_reflexivity)
have "Cong PO C PO F \<and> Cong D C D F \<and> PO C D CongA PO F D"
using l11_50_1 \<open>Cong PO D PO D\<close> \<open>D PO C CongA D PO F\<close> \<open>PO D C CongA PO D F\<close>
\<open>\<not> Col PO D C\<close> by blast
hence "Cong D F C D"
using not_cong_4312 by blast
moreover
{
assume "Col E D F"
{
assume "E = F"
have "D \<noteq> C"
using assms(4) by auto
moreover have "Per PO D C"
using \<open>E = F\<close> \<open>PO D C CongA PO D F\<close> assms(7) l11_18_2 by auto
moreover have "Col D C A"
using \<open>D Out A C\<close> not_col_permutation_5 out_col by blast
ultimately have "Per A D PO"
by (meson l11_17 \<open>PO D C CongA A D PO\<close>)
hence False
using acute_not_per \<open>Acute A D PO\<close> by blast
}
moreover
{
assume "E \<noteq> F"
hence False
by (metis col_permutation_1 \<open>Bet PO F E\<close> \<open>Col E D F\<close> \<open>\<not> Col PO D F\<close> bet_col col2__eq)
}
ultimately have False
by blast
}
hence "\<not> Col E D F"
by blast
have "E \<noteq> F"
using \<open>\<not> Col E D F\<close> not_col_distincts by blast
have "D E F LtA D F E"
proof (rule lta_trans [where ?A1.0 = "F" and ?B1.0 = "D" and ?C1.0 = "PO"])
have "D PO E LtA PO D C \<and> D E PO LtA PO D C"
by (metis l11_41 not_col_permutation_1 one_side_not_col124
\<open>\<And>thesis. (\<And>P. \<lbrakk>A D PO CongA PO D P; PO D OS P E\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis\<close>
assms(4) assms(7) between_symmetry)
show "D E F LtA F D PO"
proof -
have "D E PO CongA D E F"
by (metis bet_out_1 out2__conga \<open>Bet PO F E\<close> \<open>E \<noteq> F\<close> assms(5) out_trivial)
moreover have "PO D C CongA F D PO"
by (simp add: \<open>PO D C CongA PO D F\<close> conga_right_comm)
ultimately show ?thesis
by (simp add: conga_preserves_lta \<open>D PO E LtA PO D C \<and> D E PO LtA PO D C\<close>)
qed
show "F D PO LtA D F E"
by (metis bet_col col_lta__bet l11_41_aux not_col_permutation_2
\<open>Bet PO F E\<close> \<open>D PO E LtA PO D C \<and> D E PO LtA PO D C\<close> \<open>E \<noteq> F\<close> \<open>PO D C CongA A D PO\<close>
\<open>PO D C CongA PO D F\<close>
\<open>\<And>thesis. (\<And>P. \<lbrakk>A D PO CongA PO D P; PO D OS P E\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis\<close>
bet_conga__bet col123__nos ncol_conga_ncol)
qed
hence "D F Lt D E"
using l11_44_2_b by blast
ultimately show ?thesis
using cong2_lt__lt
using cong_reflexivity by blast
qed
lemma acute_archi_aux1:
assumes "Per PO A0 B" and
"B \<noteq> A0" and
"Bet A0 A1 B" and
"GradA A0 PO A1 P Q R" and
"A0 \<noteq> A1"
shows "A0 PO B LeA P Q R \<or> (\<exists> A. Bet A0 A1 A \<and> Bet A0 A B \<and> P Q R CongA A0 PO A)"
proof -
have "A0 \<noteq> PO"
using assms(4) grada_distincts by auto
have "A1 \<noteq> PO"
using assms(4) grada_distincts by auto
have "P \<noteq> Q"
using assms(4) grada_distincts by auto
have "R \<noteq> Q"
using assms(4) grada_distincts by auto
have "\<not> Col PO A0 B"
by (metis per_not_col \<open>A0 \<noteq> PO\<close> assms(1) assms(2))
have "PO \<noteq> B"
using assms(1) assms(2) per_distinct_1 by blast
{
assume "P Q R LeA A0 PO B"
{
assume "Col P Q R"
{
assume "Q Out P R"
hence "PO Out A0 A1"
using grada_out__out [where ?D = "P" and ?E = "Q" and ?F = "R"] assms(4) by blast
hence False
using \<open>\<not> Col PO A0 B\<close> assms(3) assms(4) assms(5) bet_col col_trivial_2
colx out_col by blast
}
hence "\<not> Q Out P R"
by blast
hence "Bet P Q R"
using \<open>Col P Q R\<close> not_out_bet by blast
hence "Bet A0 PO B"
using \<open>P Q R LeA A0 PO B\<close> bet_lea__bet [where ?A = "P" and ?B = "Q" and ?C = "R"]
by blast
hence False
using \<open>\<not> Col PO A0 B\<close> bet_col not_col_permutation_4 by blast
}
hence "\<not> Col P Q R"
by blast
then obtain C where "P Q R CongA A0 PO C" and "A0 PO OS C B"
by (metis NCol_cases \<open>\<not> Col PO A0 B\<close> angle_construction_1)
have "C InAngle A0 PO B"
proof (rule lea_in_angle)
have "A0 PO B CongA A0 PO B"
using \<open>A0 \<noteq> PO\<close> \<open>PO \<noteq> B\<close> conga_refl by auto
thus "A0 PO C LeA A0 PO B"
using l11_30 [where ?A="P" and ?B="Q" and ?C="R" and ?D="A0" and ?E="PO" and ?F="B"]
\<open>P Q R LeA A0 PO B\<close> \<open>P Q R CongA A0 PO C\<close> by blast
show "A0 PO OS B C"
by (simp add: \<open>A0 PO OS C B\<close> one_side_symmetry)
qed
have "C \<noteq> PO"
using \<open>A0 PO OS C B\<close> os_distincts by blast
obtain A where "Bet A0 A B" and "A = PO \<or> PO Out A C"
using InAngle_def \<open>C InAngle A0 PO B\<close> by blast
hence "PO Out A C"
using Bet_cases Col_def \<open>\<not> Col PO A0 B\<close> by blast
have "P Q R CongA A0 PO A"
proof (rule l11_10 [where ?A ="P" and ?C="R" and ?D="A0" and ?F="C"],
insert \<open>P Q R CongA A0 PO C\<close> \<open>PO Out A C\<close>)
show "Q Out P P"
by (simp add: \<open>P \<noteq> Q\<close> out_trivial)
show "Q Out R R"
using \<open>R \<noteq> Q\<close> out_trivial by auto
show "PO Out A0 A0"
by (simp add: \<open>A0 \<noteq> PO\<close> out_trivial)
qed
have "Bet A0 A1 A"
proof (cases "A0 = A1")
case True
thus ?thesis
using assms(5) by auto
next
case False
hence "\<not> Col A0 PO A"
by (metis Col_perm \<open>A0 PO OS C B\<close> \<open>PO Out A C\<close> l6_16_1
one_side_not_col123 out_col out_distinct)
have "A1 InAngle A0 PO A"
proof (rule lea_in_angle)
show "A0 PO A1 LeA A0 PO A"
proof (rule l11_30 [where ?A="A0" and ?B="PO" and ?C="A1" and ?D="P" and ?E="Q" and ?F="R"])
show "A0 PO A1 LeA P Q R"
by (simp add: assms(4) grada__lea)
show "A0 PO A1 CongA A0 PO A1"
by (simp add: \<open>A0 \<noteq> PO\<close> \<open>A1 \<noteq> PO\<close> conga_refl)
show "P Q R CongA A0 PO A"
by (simp add: \<open>P Q R CongA A0 PO A\<close>)
qed
show "A0 PO OS A A1"
using False \<open>Bet A0 A B\<close> \<open>\<not> Col A0 PO A\<close> assms(3) bet2__out
not_col_distincts out_one_side by presburger
qed
obtain X where "Bet A0 X A" and "X = PO \<or> PO Out X A1"
using InAngle_def \<open>A1 InAngle A0 PO A\<close> by blast
hence "PO Out X A1"
using \<open>\<not> Col A0 PO A\<close> bet_col by blast
have "X = A1 \<longrightarrow> Bet A0 A1 A"
using \<open>Bet A0 X A\<close> by blast
moreover
have "X \<noteq> A1 \<longrightarrow> Bet A0 A1 A"
by (meson \<open>Bet A0 A B\<close> \<open>Bet A0 X A\<close> \<open>PO Out X A1\<close> \<open>\<not> Col A0 PO A\<close>
assms(3) bet_col bet_col1 col_permutation_2 colx out_col)
ultimately
show ?thesis
by blast
qed
moreover have "Bet A0 A B"
by (simp add: \<open>Bet A0 A B\<close>)
ultimately have "A0 PO B LeA P Q R \<or> (\<exists> A. Bet A0 A1 A \<and> Bet A0 A B \<and> P Q R CongA A0 PO A)"
using \<open>P Q R CongA A0 PO A\<close> by blast
}
thus ?thesis
by (metis \<open>A0 \<noteq> PO\<close> \<open>P \<noteq> Q\<close> \<open>PO \<noteq> B\<close> \<open>R \<noteq> Q\<close> lea_total)
qed
lemma acute_archi_aux2_1_a:
assumes "Per PO A0 B" and
"PO \<noteq> A0" and
"B \<noteq> A0" and
"Bet A0 A1 B" and
"A0 \<noteq> A1" and "\<not> Col PO A0 B" and "\<not> Col A0 PO A1" and "PO \<noteq> A1" and "PO \<noteq> B"
shows "\<exists> P Q R. (GradA A0 PO A1 P Q R \<and> (A0 PO B LeA P Q R \<or>
(\<exists> A'. Bet A0 A1 A' \<and> Bet A0 A' B \<and> P Q R CongA A0 PO A' \<and> A0 A1 Le A0 A' \<and>
(\<exists> A. Bet A0 A A' \<and> A0 PO A' CongA A0 PO A1 \<and> A0 A1 Le A A'))))"
proof -
let ?P = "A0"
let ?Q = "PO"
let ?R = "A1"
have "(GradA A0 PO A1 ?P ?Q ?R \<and> (A0 PO B LeA ?P ?Q ?R \<or>
(\<exists> A'. Bet A0 A1 A' \<and> Bet A0 A' B \<and> ?P ?Q ?R CongA A0 PO A' \<and> A0 A1 Le A0 A' \<and>
(\<exists> A. Bet A0 A A' \<and> A0 PO A' CongA A0 PO A1 \<and> A0 A1 Le A A'))))"
by (metis assms(2) assms(4) assms(8) bet__le1213 conga_refl grada_init not_bet_distincts)
thus ?thesis by blast
qed
lemma acute_archi_aux2_1:
assumes "Per PO A B" and
"PO \<noteq> A" and
"B \<noteq> A" and
"Bet A B0 B" and
"A \<noteq> B0" and "\<not> Col PO A B" and "\<not> Col A PO B0" and "PO \<noteq> B0" and "PO \<noteq> B"
shows "\<exists> P Q R. (GradA A PO B0 P Q R \<and> (A PO B LeA P Q R \<or>
(\<exists> A'. Bet A B0 A' \<and> Bet A A' B \<and> P Q R CongA A PO A' \<and> A B0 Le A A' \<and>
(\<exists> A0. Bet A A0 A' \<and> A0 PO A' CongA A PO B0 \<and> A B0 Le A0 A'))))"
proof -
let ?A0 = "A"
let ?A1 = "B0"
have "\<exists> P Q R. (GradA ?A0 PO ?A1 P Q R \<and> (?A0 PO B LeA P Q R \<or>
(\<exists> A'. Bet ?A0 ?A1 A' \<and> Bet ?A0 A' B \<and> P Q R CongA ?A0 PO A' \<and>
?A0 ?A1 Le ?A0 A' \<and>
(\<exists> A. Bet ?A0 A A' \<and> ?A0 PO A' CongA ?A0 PO ?A1 \<and> ?A0 ?A1 Le A A'))))"
using acute_archi_aux2_1_a assms(1) assms(2) assms(3) assms(4) assms(5) assms(6) assms(7)
assms(8) assms(9) by blast
thus ?thesis
by (metis not_bet_distincts)
qed
lemma acute_archi_aux2_2:
assumes "Per PO A0 B" and
"PO \<noteq> A0" and
"B \<noteq> A0" and
"Bet A0 A1 B" and
"A0 \<noteq> A1" and
"Grad A0 A1 C" and
"Bet A0 C C'" and
"Cong A0 A1 C C'" and
"\<not> Col PO A0 B" and
"\<not> Col A0 PO A1" and
"PO \<noteq> A1" and
"PO \<noteq> B" and
"Per PO A0 B \<and> PO \<noteq> A0 \<and>
B \<noteq> A0 \<and> Bet A0 A1 B \<and>
A0 \<noteq> A1 \<and>
\<not> Col PO A0 B \<and>
\<not> Col A0 PO A1 \<and>
PO \<noteq> A1 \<longrightarrow> (\<exists> P Q R.
(GradA A0 PO A1 P Q R \<and>
(A0 PO B LeA P Q R \<or>
(\<exists> A'.
Bet A0 A1 A' \<and>
Bet A0 A' B \<and>
P Q R CongA A0 PO A' \<and>
A0 C Le A0 A' \<and> (\<exists> A. ( Bet A0 A A' \<and> A PO A' CongA A0 PO A1 \<and> A0 A1 Le A A'))))))"
shows "\<exists> P Q R. (GradA A0 PO A1 P Q R \<and>
(A0 PO B LeA P Q R \<or>
(\<exists> A'. Bet A0 A1 A' \<and> Bet A0 A' B \<and> P Q R CongA A0 PO A' \<and> A0 C' Le A0 A' \<and>
(\<exists> A. Bet A0 A A' \<and> A PO A' CongA A0 PO A1 \<and> A0 A1 Le A A'))))"
proof -
have "\<exists> P Q R.
(GradA A0 PO A1 P Q R \<and> (A0 PO B LeA P Q R \<or>
(\<exists> A'. Bet A0 A1 A' \<and> Bet A0 A' B \<and> P Q R CongA A0 PO A' \<and>
A0 C Le A0 A' \<and> (\<exists> A. ( Bet A0 A A' \<and> A PO A' CongA A0 PO A1 \<and> A0 A1 Le A A')))))"
using assms(13) assms(1) assms(10) assms(11) assms(2) assms(3) assms(4)
assms(5) assms(9) by blast
then obtain P Q R where "GradA A0 PO A1 P Q R" and
P2: "A0 PO B LeA P Q R \<or>
(\<exists> A'. Bet A0 A1 A' \<and> Bet A0 A' B \<and> P Q R CongA A0 PO A' \<and> A0 C Le A0 A' \<and>
(\<exists> A. ( Bet A0 A A' \<and> A PO A' CongA A0 PO A1 \<and> A0 A1 Le A A')))"
by blast
{
assume "A0 PO B LeA P Q R"
hence "\<exists> P Q R. (GradA A0 PO A1 P Q R \<and> (A0 PO B LeA P Q R \<or>
(\<exists> A'. Bet A0 A1 A' \<and> Bet A0 A' B \<and> P Q R CongA A0 PO A' \<and> A0 C' Le A0 A' \<and>
(\<exists> A. Bet A0 A A' \<and> A PO A' CongA A0 PO A1 \<and> A0 A1 Le A A'))))"
using \<open>GradA A0 PO A1 P Q R\<close> by blast
}
moreover
{
assume "\<not> A0 PO B LeA P Q R"
assume "\<exists> A'.
Bet A0 A1 A' \<and>
Bet A0 A' B \<and>
P Q R CongA A0 PO A' \<and>
A0 C Le A0 A' \<and> (\<exists> A. ( Bet A0 A A' \<and> A PO A' CongA A0 PO A1 \<and> A0 A1 Le A A'))"
then obtain A' where "Bet A0 A1 A'" and
"Bet A0 A' B" and
"P Q R CongA A0 PO A'" and
"A0 C Le A0 A'" and
P3: "(\<exists> A. ( Bet A0 A A' \<and> A PO A' CongA A0 PO A1 \<and> A0 A1 Le A A'))"
by blast
then obtain A where "Bet A0 A A'" and "A PO A' CongA A0 PO A1" and "A0 A1 Le A A'"
by blast
have "SAMS P Q R A0 PO A1"
proof (rule sams_lea2__sams [where ?A'="A0" and ?B'="PO" and ?C'="B" and
?D'="A0" and ?E'="PO" and ?F'="B"])
show "SAMS A0 PO B A0 PO B"
by (metis acute_chara_1 lea_right_comm lta__lea assms(1) assms(2) assms(3) l11_43
point_construction_different sams_chara)
show "P Q R LeA A0 PO B"
proof (rule l11_30 [where ?A="A0" and ?B="PO" and ?C="A'" and
?D="A0" and ?E="PO" and ?F="B"])
have "A' InAngle A0 PO B"
by (metis InAngle_def Out_def \<open>A PO A' CongA A0 PO A1\<close>
\<open>Bet A0 A' B\<close> assms(12) assms(2) between_trivial conga_diff2)
thus "A0 PO A' LeA A0 PO B"
using inangle__lea by force
show "A0 PO A' CongA P Q R"
using \<open>P Q R CongA A0 PO A'\<close> conga_sym_equiv by auto
show "A0 PO B CongA A0 PO B"
using assms(12) assms(2) conga_refl by force
qed
have "A1 InAngle A0 PO B"
using InAngle_def assms(11) assms(12) assms(2) assms(4) out_trivial by auto
thus "A0 PO A1 LeA A0 PO B"
by (simp add: inangle__lea)
qed
have "A0 \<noteq> A'"
using \<open>Bet A0 A1 A'\<close> assms(5) between_identity by blast
have "A \<noteq> A'"
using Le_def \<open>A0 A1 Le A A'\<close> assms(5) between_identity cong_identity_inv by blast
have "PO \<noteq> A"
using \<open>A PO A' CongA A0 PO A1\<close> conga_diff1 by blast
have "P \<noteq> Q"
using \<open>P Q R CongA A0 PO A'\<close> conga_diff1 by blast
have "PO \<noteq> A'"
using P3 conga_diff2 by blast
have "Q \<noteq> R"
using \<open>P Q R CongA A0 PO A'\<close> conga_diff2 by blast
then obtain P' Q' R' where "P Q R A0 PO A1 SumA P' Q' R'"
using ex_suma \<open>P \<noteq> Q\<close> assms(11) assms(2) by fastforce
have "GradA A0 PO A1 P' Q' R'"
using grada_stab [where ?D="P" and ?E="Q" and ?F="R"]
\<open>GradA A0 PO A1 P Q R\<close> \<open>SAMS P Q R A0 PO A1\<close> \<open>P Q R A0 PO A1 SumA P' Q' R'\<close> by blast
moreover
have "A0 PO B LeA P' Q' R' \<or> (\<exists> A. Bet A0 A1 A \<and> Bet A0 A B \<and> P' Q' R' CongA A0 PO A)"
using acute_archi_aux1 assms(1) assms(3) assms(4) assms(5) calculation by blast
moreover
{
assume "A0 PO B LeA P' Q' R'"
hence "GradA A0 PO A1 P' Q' R'"
using calculation by auto
hence "\<exists> P Q R. (GradA A0 PO A1 P Q R \<and>
(A0 PO B LeA P Q R \<or>
(\<exists> A'. Bet A0 A1 A' \<and> Bet A0 A' B \<and> P Q R CongA A0 PO A' \<and> A0 C' Le A0 A' \<and>
(\<exists> A. Bet A0 A A' \<and> A PO A' CongA A0 PO A1 \<and> A0 A1 Le A A'))))"
using \<open>A0 PO B LeA P' Q' R'\<close> by blast
}
moreover
{
assume "\<exists> A. Bet A0 A1 A \<and> Bet A0 A B \<and> P' Q' R' CongA A0 PO A"
then obtain A'' where "Bet A0 A1 A''" and "Bet A0 A'' B" and "P' Q' R' CongA A0 PO A''"
by blast
have "\<not> Col A PO A'"
by (meson \<open>A \<noteq> A'\<close> \<open>Bet A0 A A'\<close> \<open>Bet A0 A' B\<close> \<open>Bet A0 A1 A'\<close> assms(10) assms(4)
bet_col1 colx not_col_permutation_5)
have "\<not> Col A0 PO A'"
by (meson \<open>A0 \<noteq> A'\<close> \<open>Bet A0 A' B\<close> assms(10) assms(4) bet_col1 col_trivial_3 colx)
{
assume "Col A' PO A''"
have "A' \<noteq> A'' \<longrightarrow> False"
by (meson \<open>Bet A0 A1 A''\<close> \<open>Bet A0 A1 A'\<close> \<open>Col A' PO A''\<close> assms(10)
bet_col col_permutation_5 colx)
moreover
{
assume "A' = A''"
have "A0 PO A' A0 PO A1 SumA A0 PO A'"
using conga3_suma__suma [where ?A="P" and ?B="Q" and ?C="R" and
?D="A0" and ?E="PO" and ?F="A1" and ?G="P'" and ?H="Q'" and ?I="R'"]
using \<open>A' = A''\<close> \<open>P Q R A0 PO A1 SumA P' Q' R'\<close> \<open>P Q R CongA A0 PO A'\<close>
\<open>P' Q' R' CongA A0 PO A''\<close> assms(11) assms(2) conga_refl by force
have "\<not> Col A0 PO A''"
using \<open>A' = A''\<close> \<open>\<not> Col A0 PO A'\<close> by auto
have "Bet A0 A' A''"
by (simp add: \<open>A' = A''\<close> between_trivial)
have "SAMS A0 PO A' A0 PO A1"
using conga2_sams__sams [where ?A="P" and ?B="Q" and ?C="R" and
?D="A0" and ?E="PO" and ?F="A1"]
using \<open>P Q R CongA A0 PO A'\<close> \<open>SAMS P Q R A0 PO A1\<close> assms(11) assms(2)
conga_refl by presburger
hence "Col A0 PO A1"
using sams_suma__out546
by (meson not_col_permutation_4 \<open>A0 PO A' A0 PO A1 SumA A0 PO A'\<close> out_col)
hence False
using assms(10) by blast
}
ultimately have False
by blast
}
hence "\<not> Col A' PO A''"
by blast
have "\<not> Col A0 PO A''"
by (metis \<open>Bet A0 A'' B\<close> \<open>Bet A0 A1 A''\<close> assms(10) assms(4) bet_col1
between_identity colx not_col_distincts)
have "Bet A0 A' A''"
proof (rule col_two_sides_bet [where ?B="PO"])
show "Col A' A0 A''"
using \<open>Bet A0 A' B\<close> \<open>Bet A0 A'' B\<close> bet_col1 not_col_permutation_1 by blast
have "A' InAngle A0 PO A''"
proof (rule lea_in_angle)
show "A0 PO A' LeA A0 PO A''"
using l11_30 [where ?A="P" and ?B="Q" and ?C="R" and
?D="P'" and ?E="Q'" and ?F="R'"]
by (meson sams_suma__lea123789 \<open>P Q R A0 PO A1 SumA P' Q' R'\<close>
\<open>P Q R CongA A0 PO A'\<close> \<open>P' Q' R' CongA A0 PO A''\<close> \<open>SAMS P Q R A0 PO A1\<close>)
show "A0 PO OS A'' A'"
by (metis \<open>A0 \<noteq> A'\<close> \<open>Bet A0 A' B\<close> \<open>Bet A0 A'' B\<close> \<open>Bet A0 A1 A''\<close>
\<open>\<not> Col A0 PO A'\<close> assms(5) bet2__out between_identity out_one_side)
qed
thus "A' PO TS A0 A''"
by (simp add: \<open>\<not> Col A' PO A''\<close> \<open>\<not> Col A0 PO A'\<close> in_angle_two_sides
not_col_permutation_1 not_col_permutation_4)
qed
have "A PO A' CongA A' PO A''"
proof (rule conga_trans [where ?A'="A0" and ?B'="PO" and ?C'="A1"])
have "PO \<noteq> A''"
using \<open>\<not> Col A' PO A''\<close> not_col_distincts by blast
have "A' \<noteq> A''"
using \<open>\<not> Col A' PO A''\<close> not_col_distincts by blast
have "\<not> PO A' OS A0 A''"
using \<open>Bet A0 A' A''\<close> col_trivial_3 one_side_chara by force
show "A PO A' CongA A0 PO A1"
by (simp add: \<open>A PO A' CongA A0 PO A1\<close>)
show "A0 PO A1 CongA A' PO A''"
proof (rule sams2_suma2__conga456 [where ?A="P" and ?B="Q" and ?C="R"
and ?G="P'" and ?H="Q'" and ?I="R'"])
show "SAMS P Q R A0 PO A1"
using \<open>SAMS P Q R A0 PO A1\<close> by auto
show "SAMS P Q R A' PO A''"
proof (rule conga2_sams__sams [where ?A="A0" and ?B="PO" and ?C="A'" and
?D="A'" and ?E="PO" and ?F="A''"])
show "A0 PO A' CongA P Q R"
using \<open>P Q R CongA A0 PO A'\<close> conga_sym_equiv by blast
show "A' PO A'' CongA A' PO A''"
using \<open>PO \<noteq> A''\<close> \<open>PO \<noteq> A'\<close> conga_refl by auto
show "SAMS A0 PO A' A' PO A''"
by (metis Col_cases bet_out bet_out_1 os2__sams out_one_side
\<open>A' \<noteq> A''\<close> \<open>A0 \<noteq> A'\<close> \<open>Bet A0 A' A''\<close> \<open>\<not> Col A0 PO A''\<close>)
qed
show "P Q R A0 PO A1 SumA P' Q' R'"
by (simp add: \<open>P Q R A0 PO A1 SumA P' Q' R'\<close>)
show "P Q R A' PO A'' SumA P' Q' R'"
proof (rule conga3_suma__suma [where ?A="A0" and ?B="PO" and ?C="A'" and
?D="A'" and ?E="PO" and ?F="A''" and ?G="A0" and ?H="PO" and ?I="A''"])
show "A0 PO A' A' PO A'' SumA A0 PO A''"
proof -
have "A' PO A'' CongA A' PO A''"
using \<open>PO \<noteq> A''\<close> \<open>PO \<noteq> A'\<close> conga_refl by auto
moreover have "\<not> PO A' OS A0 A''"
using \<open>\<not> PO A' OS A0 A''\<close> by auto
moreover have "Coplanar A0 PO A' A''"
using \<open>Bet A0 A' A''\<close> bet_col ncop__ncols by blast
moreover have "A0 PO A'' CongA A0 PO A''"
using \<open>PO \<noteq> A''\<close> assms(2) conga_refl by auto
ultimately show ?thesis
using SumA_def by blast
qed
show "A0 PO A' CongA P Q R"
using \<open>P Q R CongA A0 PO A'\<close> conga_sym_equiv by auto
show "A' PO A'' CongA A' PO A''"
using \<open>PO \<noteq> A''\<close> \<open>PO \<noteq> A'\<close> conga_refl by auto
show "A0 PO A'' CongA P' Q' R'"
using \<open>P' Q' R' CongA A0 PO A''\<close> conga_sym_equiv by auto
qed
qed
qed
have "A A' Lt A' A''"
using acute_archi_aux [where ?PO="PO" and ?A="A0" and ?B="B"]
by (metis ncol_conga_ncol not_col_distincts not_conga_sym
\<open>A PO A' CongA A' PO A''\<close> \<open>A PO A' CongA A0 PO A1\<close> \<open>Bet A0 A A'\<close>
\<open>Bet A0 A' A''\<close> \<open>Bet A0 A'' B\<close> assms(1) assms(10) assms(3) between_exchange3)
hence "A A' Le A' A''"
using Lt_def by blast
hence "A0 A1 Le A' A''"
by (meson le_transitivity \<open>A0 A1 Le A A'\<close>)
have "Bet A0 A1 A''"
by (simp add: \<open>Bet A0 A1 A''\<close>)
moreover have "Bet A0 A'' B"
by (simp add: \<open>Bet A0 A'' B\<close>)
moreover have "P' Q' R' CongA A0 PO A''"
by (simp add: \<open>P' Q' R' CongA A0 PO A''\<close>)
moreover have "A0 C' Le A0 A''"
by (meson bet2_le2__le1346 \<open>A0 A1 Le A' A''\<close> \<open>A0 C Le A0 A'\<close>
\<open>Bet A0 A' A''\<close> assms(7) assms(8) cong_reflexivity l5_6)
moreover
have "\<exists> A. Bet A0 A A'' \<and> A PO A'' CongA A0 PO A1 \<and> A0 A1 Le A A''"
proof -
have "Bet A0 A' A''"
using \<open>Bet A0 A' A''\<close> by fastforce
moreover have "A' PO A'' CongA A0 PO A1"
by (meson not_conga not_conga_sym \<open>A PO A' CongA A' PO A''\<close>
\<open>A PO A' CongA A0 PO A1\<close>)
moreover have "A0 A1 Le A' A''"
using \<open>A0 A1 Le A' A''\<close> by auto
ultimately show ?thesis by blast
qed
ultimately
have "\<exists> P Q R. (GradA A0 PO A1 P Q R \<and> (A0 PO B LeA P Q R \<or>
(\<exists> A'. Bet A0 A1 A' \<and> Bet A0 A' B \<and> P Q R CongA A0 PO A' \<and> A0 C' Le A0 A' \<and>
(\<exists> A. Bet A0 A A' \<and> A PO A' CongA A0 PO A1 \<and> A0 A1 Le A A'))))"
using \<open>GradA A0 PO A1 P' Q' R'\<close> by blast
}
ultimately
have "\<exists> P Q R. (GradA A0 PO A1 P Q R \<and> (A0 PO B LeA P Q R \<or>
(\<exists> A'. Bet A0 A1 A' \<and> Bet A0 A' B \<and> P Q R CongA A0 PO A' \<and> A0 C' Le A0 A' \<and>
(\<exists> A. Bet A0 A A' \<and> A PO A' CongA A0 PO A1 \<and> A0 A1 Le A A'))))"
using \<open>GradA A0 PO A1 P Q R\<close> by blast
}
ultimately
show ?thesis
using P2 by blast
qed
lemma acute_archi_aux2:
assumes "Per PO A0 B" and
"PO \<noteq> A0" and
"B \<noteq> A0" and
"Bet A0 A1 B" and
"A0 \<noteq> A1" and
"Grad A0 A1 C"
shows "\<exists> P Q R. (GradA A0 PO A1 P Q R \<and> (A0 PO B LeA P Q R \<or>
(\<exists> A'. Bet A0 A1 A' \<and> Bet A0 A' B \<and> P Q R CongA A0 PO A' \<and> A0 C Le A0 A' \<and>
(\<exists> A. Bet A0 A A' \<and> A PO A' CongA A0 PO A1 \<and> A0 A1 Le A A'))))"
proof -
have "\<not> Col PO A0 B"
by (metis assms(1) assms(2) assms(3) col_permutation_1 l8_8 per_col)
have "\<not> Col A0 PO A1"
by (metis \<open>\<not> Col PO A0 B\<close> assms(4) assms(5) bet_col col_trivial_3 colx not_col_permutation_2)
have "PO \<noteq> A1"
using \<open>\<not> Col A0 PO A1\<close> assms(4) bet_col1 by blast
have "PO \<noteq> B"
using assms(1) assms(3) per_distinct_1 by auto
have "GradI A0 A1 C"
by (simp add: Grad__GradI assms(6))
let ?th = "\<exists> P Q R. (GradA A0 PO A1 P Q R \<and> (A0 PO B LeA P Q R \<or>
(\<exists> A'. Bet A0 A1 A' \<and> Bet A0 A' B \<and> P Q R CongA A0 PO A' \<and> A0 C Le A0 A' \<and>
(\<exists> A. Bet A0 A A' \<and> A PO A' CongA A0 PO A1 \<and> A0 A1 Le A A'))))"
have ?th
proof (rule GradI.induct [where ?A="A0" and ?B="A1" and ?x="C"])
show "GradI A0 A1 C"
using \<open>GradI A0 A1 C\<close> by blast
show "\<exists>P Q R.
GradA A0 PO A1 P Q R \<and> (A0 PO B LeA P Q R \<or>
(\<exists>A'. Bet A0 A1 A' \<and> Bet A0 A' B \<and> P Q R CongA A0 PO A' \<and>
A0 A1 Le A0 A' \<and> (\<exists>A. Bet A0 A A' \<and> A PO A' CongA A0 PO A1 \<and> A0 A1 Le A A')))"
by (metis bet_col assms(1) assms(2) assms(3) assms(4) assms(5) cong2_per2__cong_conga2
cong_reflexivity grada_init le_reflexivity not_bet_distincts
not_col_permutation_5 per_col)
{
fix C0 C'
assume H1: "GradI A0 A1 C0" and
H2: "\<exists>P Q R. GradA A0 PO A1 P Q R \<and> (A0 PO B LeA P Q R \<or>
(\<exists>A'. Bet A0 A1 A' \<and> Bet A0 A' B \<and> P Q R CongA A0 PO A' \<and>
A0 C0 Le A0 A' \<and> (\<exists>A. Bet A0 A A' \<and> A PO A' CongA A0 PO A1 \<and> A0 A1 Le A A')))" and
H3: "Bet A0 C0 C'" and
H4: "Cong A0 A1 C0 C'"
have "\<exists>P Q R. GradA A0 PO A1 P Q R \<and> (A0 PO B LeA P Q R \<or>
(\<exists>A'. Bet A0 A1 A' \<and> Bet A0 A' B \<and> P Q R CongA A0 PO A' \<and>
A0 C' Le A0 A' \<and> (\<exists>A. Bet A0 A A' \<and> A PO A' CongA A0 PO A1 \<and> A0 A1 Le A A')))"
proof (rule acute_archi_aux2_2 [where ?C="C0"])
show "Per PO A0 B"
using assms(1) by auto
show "PO \<noteq> A0"
by (simp add: assms(2))
show "B \<noteq> A0"
by (simp add: assms(3))
show "Bet A0 A1 B"
by (simp add: assms(4))
show "A0 \<noteq> A1"
by (simp add: assms(5))
show "Grad A0 A1 C0"
by (simp add: Grad_GradI H1)
show "Bet A0 C0 C'"
using H3 by auto
show "Cong A0 A1 C0 C'"
using H4 by auto
show "\<not> Col PO A0 B"
using \<open>\<not> Col PO A0 B\<close> by auto
show "\<not> Col A0 PO A1"
by (simp add: \<open>\<not> Col A0 PO A1\<close>)
show "PO \<noteq> A1"
by (simp add: \<open>PO \<noteq> A1\<close>)
show "PO \<noteq> B"
using \<open>PO \<noteq> B\<close> by auto
show "Per PO A0 B \<and> PO \<noteq> A0 \<and> B \<noteq> A0 \<and> Bet A0 A1 B \<and> A0 \<noteq> A1 \<and>
\<not> Col PO A0 B \<and> \<not> Col A0 PO A1 \<and> PO \<noteq> A1 \<longrightarrow>
(\<exists>P Q R. GradA A0 PO A1 P Q R \<and> (A0 PO B LeA P Q R \<or>
(\<exists>A'. Bet A0 A1 A' \<and> Bet A0 A' B \<and> P Q R CongA A0 PO A' \<and>
A0 C0 Le A0 A' \<and> (\<exists>A. Bet A0 A A' \<and> A PO A' CongA A0 PO A1 \<and> A0 A1 Le A A'))))"
using H2 by blast
qed
}
thus "\<And>C C'.
GradI A0 A1 C \<Longrightarrow>
\<exists>P Q R. GradA A0 PO A1 P Q R \<and> (A0 PO B LeA P Q R \<or>
(\<exists>A'. Bet A0 A1 A' \<and> Bet A0 A' B \<and> P Q R CongA A0 PO A' \<and>
A0 C Le A0 A' \<and> (\<exists>A. Bet A0 A A' \<and> A PO A' CongA A0 PO A1 \<and> A0 A1 Le A A'))) \<Longrightarrow>
Bet A0 C C' \<Longrightarrow>
Cong A0 A1 C C' \<Longrightarrow>
\<exists>P Q R.
GradA A0 PO A1 P Q R \<and>
(A0 PO B LeA P Q R \<or>
(\<exists>A'. Bet A0 A1 A' \<and>
Bet A0 A' B \<and>
P Q R CongA A0 PO A' \<and>
A0 C' Le A0 A' \<and> (\<exists>A. Bet A0 A A' \<and> A PO A' CongA A0 PO A1 \<and> A0 A1 Le A A')))"
by blast
qed
thus ?thesis by blast
qed
lemma archi_in_acute_angles:
assumes "archimedes_axiom"
shows "\<forall> A B C D E F. \<not> Col A B C \<and> Acute D E F
\<longrightarrow> (\<exists> P Q R. GradA A B C P Q R \<and> D E F LeA P Q R)"
proof -
{
fix A B C D E F
assume "\<not> Col A B C" and
"Acute D E F"
have "A \<noteq> B"
using \<open>\<not> Col A B C\<close> col_trivial_1 by fastforce
have "C \<noteq> B"
using \<open>\<not> Col A B C\<close> col_trivial_2 by force
have "E \<noteq> D"
using \<open>Acute D E F\<close> acute_distincts by blast
have "E \<noteq> F"
using \<open>Acute D E F\<close> acute_distincts by blast
have "\<exists> P Q R. GradA A B C P Q R \<and> D E F LeA P Q R"
proof (cases "Col D E F")
case True
thus ?thesis
by (metis Col_def Out_cases bet_out bet_out_1 l11_31_1 \<open>A \<noteq> B\<close>
\<open>Acute D E F\<close> \<open>C \<noteq> B\<close> \<open>E \<noteq> D\<close> \<open>E \<noteq> F\<close> acute_not_bet grada_ABC)
next
case False
{
assume "D E F LeA A B C"
hence "\<exists> P Q R. GradA A B C P Q R \<and> D E F LeA P Q R"
by (metis \<open>A \<noteq> B\<close> \<open>C \<noteq> B\<close> grada_ABC)
}
moreover
{
assume "A B C LeA D E F"
obtain D0 where "Col D E D0" and "D E Perp F D0"
using False l8_18_existence by blast
have "E Out D0 D"
proof (rule acute_col_perp__out [where ?A="F"])
show "Acute F E D"
using \<open>Acute D E F\<close> acute_sym by blast
show "Col E D D0"
using \<open>Col D E D0\<close> not_col_permutation_4 by blast
show "E D Perp F D0"
using \<open>D E Perp F D0\<close> perp_left_comm by blast
qed
have "D0 \<noteq> F"
using \<open>D E Perp F D0\<close> perp_not_eq_2 by blast
have "D0 \<noteq> E"
using \<open>E Out D0 D\<close> out_diff1 by auto
have "D E F CongA D0 E F"
by (metis acute_col_perp__out acute_sym out2__conga out_trivial
perp_left_comm \<open>Acute D E F\<close> \<open>Col D E D0\<close> \<open>D E Perp F D0\<close> \<open>E \<noteq> F\<close>
not_col_permutation_4)
have "Acute D0 E F"
by (meson acute_conga__acute \<open>Acute D E F\<close> \<open>D E F CongA D0 E F\<close>)
have "A B C LeA D0 E F"
by (meson conga__lea lea_trans \<open>A B C LeA D E F\<close> \<open>D E F CongA D0 E F\<close>)
have "\<not> Col D E F"
by (simp add: False)
have "Per E D0 F"
by (meson Per_cases l8_16_1 \<open>Col D E D0\<close> \<open>D E Perp F D0\<close> col_trivial_2)
obtain D1' where "A B C CongA D0 E D1'" and "D0 E OS D1' F"
by (metis Col_cases False angle_construction_1 \<open>Col D E D0\<close>
\<open>D0 \<noteq> E\<close> \<open>\<not> Col A B C\<close> col2__eq)
have "D0 E F CongA D0 E F"
using \<open>D0 \<noteq> E\<close> \<open>E \<noteq> F\<close> conga_refl by auto
hence "D0 E D1' LeA D0 E F"
using l11_30 [where ?A="A" and ?B="B" and ?C="C" and ?D="D0" and ?E="E" and ?F="F"]
\<open>A B C LeA D0 E F\<close> \<open>A B C CongA D0 E D1'\<close> by blast
have "D0 E OS F D1'"
by (simp add: \<open>D0 E OS D1' F\<close> one_side_symmetry)
hence "D1' InAngle D0 E F"
using lea_in_angle \<open>D0 E D1' LeA D0 E F\<close> by blast
then obtain D1 where "Bet D0 D1 F" and "D1 = E \<or> E Out D1 D1'"
using InAngle_def by force
have "D1 = E \<longrightarrow> (\<exists> P Q R. GradA A B C P Q R \<and> D E F LeA P Q R)"
using \<open>Acute D0 E F\<close> \<open>Bet D0 D1 F\<close> acute_not_bet by blast
moreover
{
assume "E Out D1 D1'"
have "A B C CongA D0 E D1"
proof (rule l11_10 [where ?A="A" and ?C="C" and ?D="D0" and ?F="D1'"],
insert \<open>A B C CongA D0 E D1'\<close> \<open>E Out D1 D1'\<close>)
show "B Out A A"
using \<open>A \<noteq> B\<close> out_trivial by auto
show "B Out C C"
by (simp add: \<open>C \<noteq> B\<close> out_trivial)
show "E Out D0 D0"
by (simp add: \<open>D0 \<noteq> E\<close> out_trivial)
qed
have "\<not> Col D0 E D1'"
using \<open>D0 E OS D1' F\<close> col123__nos by force
have "D0 \<noteq> D1"
using Col_cases \<open>E Out D1 D1'\<close> \<open>\<not> Col D0 E D1'\<close> out_col by blast
obtain F' where "Bet D0 F F'" and "Cong F F' D0 F"
using segment_construction by blast
obtain G where "Grad D0 D1 G" and "D0 F' Le D0 G"
using Reach_def \<open>D0 \<noteq> D1\<close> archimedes_axiom_def assms by blast
have "GradI D0 D1 G"
by (simp add: Grad__GradI \<open>Grad D0 D1 G\<close>)
have "\<exists> P Q R. (GradA D0 E D1 P Q R \<and> (D0 E F LeA P Q R \<or>
(\<exists> A'. Bet D0 D1 A' \<and> Bet D0 A' F \<and> P Q R CongA D0 E A' \<and>
D0 G Le D0 A' \<and> (\<exists> A. Bet D0 A A' \<and> A E A' CongA D0 E D1 \<and> D0 D1 Le A A'))))"
using acute_archi_aux2 \<open>Bet D0 D1 F\<close> \<open>D0 \<noteq> D1\<close> \<open>D0 \<noteq> E\<close> \<open>D0 \<noteq> F\<close>
\<open>Grad D0 D1 G\<close> \<open>Per E D0 F\<close> by blast
then obtain P Q R where
"GradA D0 E D1 P Q R" and
"D0 E F LeA P Q R \<or> (\<exists> A'. Bet D0 D1 A' \<and> Bet D0 A' F \<and>
P Q R CongA D0 E A' \<and> D0 G Le D0 A' \<and>
(\<exists> A. Bet D0 A A' \<and> A E A' CongA D0 E D1 \<and> D0 D1 Le A A'))" by blast
have "D0 \<noteq> E"
using grada_distincts [where ?A="D0" and ?B="E" and ?C="D1" and
?D="P" and ?E="Q" and ?F="R"] \<open>GradA D0 E D1 P Q R\<close> by blast
have "D1 \<noteq> E"
using grada_distincts [where ?A="D0" and ?B="E" and ?C="D1" and
?D="P" and ?E="Q" and ?F="R"] \<open>GradA D0 E D1 P Q R\<close> by blast
have "P \<noteq> Q"
using grada_distincts [where ?A="D0" and ?B="E" and ?C="D1" and
?D="P" and ?E="Q" and ?F="R"] \<open>GradA D0 E D1 P Q R\<close> by blast
have "R \<noteq> Q"
using grada_distincts [where ?A="D0" and ?B="E" and ?C="D1" and
?D="P" and ?E="Q" and ?F="R"] \<open>GradA D0 E D1 P Q R\<close> by blast
have "GradA A B C P Q R"
proof (rule conga2_grada__grada [where ?A="D0" and ?B="E" and ?C="D1"
and ?D="P" and ?E="Q" and ?F="R"], insert \<open>GradA D0 E D1 P Q R\<close>)
show "D0 E D1 CongA A B C"
using \<open>A B C CongA D0 E D1\<close> conga_sym_equiv by auto
show "P Q R CongA P Q R"
by (simp add: \<open>P \<noteq> Q\<close> \<open>R \<noteq> Q\<close> conga_refl)
qed
moreover
have "D E F LeA P Q R"
proof -
{
assume "D0 E F LeA P Q R"
have "D E F LeA P Q R"
proof (rule l11_30 [where ?A="D0" and ?B="E" and ?C="F" and
?D="P" and ?E="Q" and ?F="R"])
show "D0 E F LeA P Q R"
by (simp add: \<open>D0 E F LeA P Q R\<close>)
show "D0 E F CongA D E F"
using \<open>D E F CongA D0 E F\<close> conga_sym_equiv by auto
show "P Q R CongA P Q R"
using \<open>P \<noteq> Q\<close> \<open>R \<noteq> Q\<close> conga_refl by auto
qed
}
moreover
{
assume "\<exists> A'. Bet D0 D1 A' \<and> Bet D0 A' F \<and> P Q R CongA D0 E A' \<and>
D0 G Le D0 A' \<and> (\<exists> A. Bet D0 A A' \<and> A E A' CongA D0 E D1 \<and> D0 D1 Le A A')"
then obtain A' where "Bet D0 D1 A'" and "Bet D0 A' F" and
"P Q R CongA D0 E A'" and "D0 G Le D0 A'" and
"\<exists> A. Bet D0 A A' \<and> A E A' CongA D0 E D1 \<and> D0 D1 Le A A'"
by blast
have "D0 A' Le D0 F"
by (simp add: \<open>Bet D0 A' F\<close> bet__le1213)
hence "D0 G Le D0 F"
using \<open>D0 G Le D0 A'\<close> le_transitivity by blast
moreover have "D0 F Lt D0 F'"
using \<open>Bet D0 F F'\<close> \<open>Cong F F' D0 F\<close> \<open>D0 \<noteq> F\<close>
bet__lt1213 cong_diff_3 by presburger
ultimately have "D0 G Lt D0 F'"
using le1234_lt__lt by blast
hence False
using \<open>D0 F' Le D0 G\<close> le__nlt by auto
hence "D E F LeA P Q R"
by blast
}
ultimately show ?thesis
using \<open>D0 E F LeA P Q R \<or> (\<exists>A'. Bet D0 D1 A' \<and> Bet D0 A' F \<and>
P Q R CongA D0 E A' \<and> D0 G Le D0 A' \<and>
(\<exists>A. Bet D0 A A' \<and> A E A' CongA D0 E D1 \<and> D0 D1 Le A A'))\<close> by blast
qed
ultimately have "\<exists> P Q R. GradA A B C P Q R \<and> D E F LeA P Q R"
by blast
}
ultimately have "\<exists> P Q R. GradA A B C P Q R \<and> D E F LeA P Q R"
using \<open>D1 = E \<or> E Out D1 D1'\<close> by blast
}
ultimately show ?thesis using lea_total
by (metis \<open>A \<noteq> B\<close> \<open>C \<noteq> B\<close> \<open>E \<noteq> D\<close> \<open>E \<noteq> F\<close>)
qed
}
thus ?thesis
by blast
qed
lemma angles_archi_aux:
assumes "GradA A B C D E F" and
"GradA A B C G H I" and
"\<not> SAMS D E F G H I"
shows "\<exists> P Q R. GradA A B C P Q R \<and> \<not> SAMS P Q R A B C"
proof -
have "\<not> SAMS D E F G H I \<longrightarrow> (\<exists> P Q R. GradA A B C P Q R \<and> \<not> SAMS P Q R A B C)"
proof (rule GradA.induct [OF assms(2)])
show "\<And>Da Ea Fa.
A B C CongA Da Ea Fa \<Longrightarrow> \<not> SAMS D E F Da Ea Fa \<longrightarrow>
(\<exists>P Q R. GradA A B C P Q R \<and> \<not> SAMS P Q R A B C)"
by (metis Tarski_neutral_dimensionless.conga_refl Tarski_neutral_dimensionless_axioms
assms(1) conga2_sams__sams grada_distincts)
{
fix Da Ea Fa G H I
assume "GradA A B C Da Ea Fa" and
"\<not> SAMS D E F Da Ea Fa \<longrightarrow> (\<exists> P Q R. GradA A B C P Q R \<and> \<not> SAMS P Q R A B C)" and
"SAMS Da Ea Fa A B C" and
"Da Ea Fa A B C SumA G H I"
{
assume "\<not> SAMS D E F G H I"
{
assume "SAMS D E F Da Ea Fa"
have "E \<noteq> D"
using \<open>SAMS D E F Da Ea Fa\<close> sams_distincts by blast
have "E \<noteq> F"
using \<open>SAMS D E F Da Ea Fa\<close> sams_distincts by blast
have "Ea \<noteq> Da"
using \<open>SAMS D E F Da Ea Fa\<close> sams_distincts by blast
have "Ea \<noteq> Fa"
using \<open>SAMS D E F Da Ea Fa\<close> sams_distincts by blast
obtain P Q R where "D E F Da Ea Fa SumA P Q R"
using ex_suma \<open>E \<noteq> D\<close> \<open>E \<noteq> F\<close> \<open>Ea \<noteq> Da\<close> \<open>Ea \<noteq> Fa\<close> by presburger
have "GradA A B C P Q R"
using grada2_sams_suma__grada [where ?D="D" and ?E="E" and ?F="F" and
?G="Da" and ?H="Ea" and ?I="Fa"]
assms(1) \<open>GradA A B C Da Ea Fa\<close> \<open>SAMS D E F Da Ea Fa\<close> \<open>D E F Da Ea Fa SumA P Q R\<close>
by blast
moreover
{
assume "SAMS P Q R A B C"
have "SAMS D E F G H I"
using \<open>SAMS D E F Da Ea Fa\<close> \<open>SAMS Da Ea Fa A B C\<close> \<open>D E F Da Ea Fa SumA P Q R\<close>
\<open>Da Ea Fa A B C SumA G H I\<close> \<open>SAMS P Q R A B C\<close> sams_assoc_1 by blast
hence False
using \<open>\<not> SAMS D E F G H I\<close> by blast
}
ultimately have "\<exists>P Q R. GradA A B C P Q R \<and> \<not> SAMS P Q R A B C"
by blast
}
hence "\<exists>P Q R. GradA A B C P Q R \<and> \<not> SAMS P Q R A B C"
using \<open>\<not> SAMS D E F Da Ea Fa \<longrightarrow> (\<exists>P Q R. GradA A B C P Q R \<and> \<not> SAMS P Q R A B C)\<close>
by blast
}
hence "\<not> SAMS D E F G H I \<longrightarrow> (\<exists>P Q R. GradA A B C P Q R \<and> \<not> SAMS P Q R A B C)"
by blast
}
thus "\<And>Da Ea Fa G H I.
GradA A B C Da Ea Fa \<Longrightarrow>
\<not> SAMS D E F Da Ea Fa \<longrightarrow> (\<exists>P Q R. GradA A B C P Q R \<and> \<not> SAMS P Q R A B C) \<Longrightarrow>
SAMS Da Ea Fa A B C \<Longrightarrow> Da Ea Fa A B C SumA G H I \<Longrightarrow>
\<not> SAMS D E F G H I \<longrightarrow> (\<exists>P Q R. GradA A B C P Q R \<and> \<not> SAMS P Q R A B C)"
by blast
qed
thus ?thesis
using assms(3) by blast
qed
lemma angles_archi_aux1:
assumes "archimedes_axiom"
shows "\<forall> A B C D E F.
\<not> Col A B C \<and> \<not> Bet D E F \<longrightarrow>
(\<exists> P Q R. GradA A B C P Q R \<and> (D E F LeA P Q R \<or> \<not> SAMS P Q R A B C))"
proof -
{
fix A B C D E F
assume "\<not> Col A B C" and
"\<not> Bet D E F"
have "D \<noteq> E"
using \<open>\<not> Bet D E F\<close> between_trivial2 by auto
have "F \<noteq> E"
using \<open>\<not> Bet D E F\<close> not_bet_distincts by blast
obtain F1 where "F1 InAngle D E F" and "F1 E D CongA F1 E F"
using angle_bisector \<open>D \<noteq> E\<close> \<open>F \<noteq> E\<close> by blast
have "F1 \<noteq> E"
using \<open>F1 E D CongA F1 E F\<close> conga_distinct by auto
have "\<not> E F1 OS D F"
proof (cases "Col D E F1")
case True
thus ?thesis
using NCol_perm col123__nos by blast
next
case False
thus ?thesis
by (meson \<open>F1 InAngle D E F\<close> col124__nos col_permutation_4 col_permutation_5
in_angle_two_sides invert_one_side l9_9)
qed
have "D E F1 D E F1 SumA D E F"
by (meson conga_refl \<open>D \<noteq> E\<close> \<open>F \<noteq> E\<close> \<open>F1 E D CongA F1 E F\<close>
\<open>F1 InAngle D E F\<close> \<open>F1 \<noteq> E\<close> conga3_suma__suma
conga_left_comm inangle__suma not_conga_sym)
have "SAMS D E F1 D E F1"
proof -
{
assume "Bet D E F1"
hence False
using bet_in_angle_bet \<open>F1 InAngle D E F\<close> \<open>\<not> Bet D E F\<close> by blast
}
hence "E Out D F1 \<or> \<not> Bet D E F1"
by blast
moreover
have "\<exists> J. F1 E J CongA D E F1 \<and> \<not> E F1 OS D J \<and> \<not> D E TS F1 J \<and> Coplanar D E F1 J"
proof -
have "F1 E F CongA D E F1"
using \<open>F1 E D CongA F1 E F\<close> conga_left_comm conga_sym_equiv by blast
moreover have "\<not> E F1 OS D F"
by (simp add: \<open>\<not> E F1 OS D F\<close>)
moreover have "\<not> D E TS F1 F"
proof (cases "Col D E F1")
case True
thus ?thesis
using TS_def col_permutation_2 by blast
next
case False
thus ?thesis
by (metis Col_cases TS_def \<open>F1 InAngle D E F\<close> in_angle_one_side l9_9)
qed
moreover have "Coplanar D E F1 F"
by (meson inangle__coplanar \<open>F1 InAngle D E F\<close> coplanar_perm_8)
ultimately show ?thesis
by blast
qed
ultimately show ?thesis
using SAMS_def \<open>D \<noteq> E\<close> by auto
qed
hence "Acute D E F1"
by (metis nbet_sams_suma__acute \<open>D E F1 D E F1 SumA D E F\<close> \<open>\<not> Bet D E F\<close>)
then obtain P1 Q1 R1 where "GradA A B C P1 Q1 R1" and "D E F1 LeA P1 Q1 R1"
using archi_in_acute_angles \<open>\<not> Col A B C\<close> assms by blast
have "P1 \<noteq> Q1"
using \<open>GradA A B C P1 Q1 R1\<close> grada_distincts by blast
have "Q1 \<noteq> R1"
using \<open>GradA A B C P1 Q1 R1\<close> grada_distincts by blast
{
assume "SAMS P1 Q1 R1 P1 Q1 R1"
obtain P Q R where "P1 Q1 R1 P1 Q1 R1 SumA P Q R"
using ex_suma \<open>P1 \<noteq> Q1\<close> \<open>Q1 \<noteq> R1\<close> by presburger
have "GradA A B C P Q R"
using grada2_sams_suma__grada [where ?D="P1" and ?E="Q1" and ?F="R1" and
?G="P1" and ?H="Q1" and ?I="R1"] \<open>GradA A B C P1 Q1 R1\<close> \<open>GradA A B C P1 Q1 R1\<close>
\<open>SAMS P1 Q1 R1 P1 Q1 R1\<close> \<open>P1 Q1 R1 P1 Q1 R1 SumA P Q R\<close> by blast
moreover have "D E F LeA P Q R"
using sams_lea2_suma2__lea [where ?A="D" and ?B="E" and ?C="F1" and
?D="D" and ?E="E" and ?F="F1" and
?A'="P1" and ?B'="Q1" and ?C'="R1" and ?D'="P1" and ?E'="Q1" and ?F'="R1"]
\<open>D E F1 LeA P1 Q1 R1\<close> \<open>SAMS P1 Q1 R1 P1 Q1 R1\<close> \<open>D E F1 D E F1 SumA D E F\<close>
\<open>P1 Q1 R1 P1 Q1 R1 SumA P Q R\<close> by blast
ultimately have "\<exists> P Q R. GradA A B C P Q R \<and> (D E F LeA P Q R \<or> \<not> SAMS P Q R A B C)"
by blast
}
moreover
{
assume "\<not> SAMS P1 Q1 R1 P1 Q1 R1"
hence "\<exists> P Q R. GradA A B C P Q R \<and> (D E F LeA P Q R \<or> \<not> SAMS P Q R A B C)"
using angles_archi_aux \<open>GradA A B C P1 Q1 R1\<close> calculation by blast
}
ultimately have "\<exists> P Q R. GradA A B C P Q R \<and> (D E F LeA P Q R \<or> \<not> SAMS P Q R A B C)"
by blast
}
thus ?thesis
by blast
qed
(** Inspired by Hartshorne's demonstration of Lemma 35.1 in Geometry Euclid and Beyond *)
lemma archi_in_angles:
assumes "archimedes_axiom"
shows "\<forall> A B C. \<forall> D ::TPoint. \<forall> E ::TPoint. \<forall> F ::TPoint. (\<not> Col A B C \<and> D \<noteq> E \<and> F \<noteq> E) \<longrightarrow>
(\<exists> P Q R. GradA A B C P Q R \<and> (D E F LeA P Q R \<or> \<not> SAMS P Q R A B C))"
proof -
{
fix A B C
fix D::TPoint
fix E::TPoint
fix F::TPoint
assume "\<not> Col A B C" and
"D \<noteq> E" and
"F \<noteq> E"
have "\<exists> P Q R. (GradA A B C P Q R \<and> (D E F LeA P Q R \<or> \<not> SAMS P Q R A B C))"
proof (cases "Bet D E F")
case True
obtain A0 where "Bet A B A0" and "Cong B A0 A B"
using segment_construction by blast
have "A \<noteq> B"
using \<open>\<not> Col A B C\<close> not_col_distincts by blast
have "C \<noteq> B"
using \<open>\<not> Col A B C\<close> col_trivial_2 by auto
have "B \<noteq> A0"
using \<open>A \<noteq> B\<close> \<open>Cong B A0 A B\<close> cong_reverse_identity by blast
have "\<not> Col A0 B C"
by (meson \<open>B \<noteq> A0\<close> \<open>Bet A B A0\<close> \<open>\<not> Col A B C\<close> bet_col col2__eq col_permutation_3)
obtain P1 Q1 R1 where "GradA A B C P1 Q1 R1" and
"C B A0 LeA P1 Q1 R1 \<or> \<not> SAMS P1 Q1 R1 A B C"
using angles_archi_aux1
by (metis Col_def \<open>\<not> Col A B C\<close> \<open>\<not> Col A0 B C\<close> assms between_symmetry)
{
assume "SAMS P1 Q1 R1 A B C"
hence "C B A0 LeA P1 Q1 R1"
using \<open>C B A0 LeA P1 Q1 R1 \<or> \<not> SAMS P1 Q1 R1 A B C\<close> by auto
have "P1 \<noteq> Q1"
using \<open>SAMS P1 Q1 R1 A B C\<close>
by (simp add: sams_distincts)
have "R1 \<noteq> Q1"
using \<open>C B A0 LeA P1 Q1 R1\<close> lea_distincts by blast
obtain P Q R where "P1 Q1 R1 A B C SumA P Q R"
using ex_suma \<open>A \<noteq> B\<close> \<open>C \<noteq> B\<close> \<open>P1 \<noteq> Q1\<close> \<open>R1 \<noteq> Q1\<close> by presburger
have "GradA A B C P Q R"
using grada_stab [where ?D="P1" and ?E="Q1" and ?F="R1"]
\<open>GradA A B C P1 Q1 R1\<close> \<open>SAMS P1 Q1 R1 A B C\<close> \<open>P1 Q1 R1 A B C SumA P Q R\<close> by auto
moreover
have "P \<noteq> Q"
using calculation grada_distincts by blast
have "R \<noteq> Q"
using calculation grada_distincts by blast
have "A B A0 LeA P Q R"
proof (rule sams_lea2_suma2__lea [where
?A="A0" and ?B="B" and ?C="C" and ?D="A" and ?E="B" and ?F="C"
and ?A'="P1" and ?B'="Q1" and ?C'="R1" and ?D'="A" and ?E'="B" and ?F'="C"])
show "A0 B C LeA P1 Q1 R1"
using \<open>C B A0 LeA P1 Q1 R1\<close> lea_left_comm by blast
show "A B C LeA A B C"
using \<open>A \<noteq> B\<close> \<open>C \<noteq> B\<close> lea_refl by force
show "SAMS P1 Q1 R1 A B C"
using \<open>SAMS P1 Q1 R1 A B C\<close> by auto
show "A0 B C A B C SumA A B A0"
by (metis Bet_cases \<open>A \<noteq> B\<close> \<open>B \<noteq> A0\<close> \<open>Bet A B A0\<close> \<open>C \<noteq> B\<close>
bet__suma suma_middle_comm suma_right_comm)
show "P1 Q1 R1 A B C SumA P Q R"
using \<open>P1 Q1 R1 A B C SumA P Q R\<close> by auto
qed
hence "Bet P Q R"
using \<open>Bet A B A0\<close> bet_lea__bet by blast
hence "D E F LeA P Q R"
using l11_31_2 \<open>D \<noteq> E\<close> \<open>F \<noteq> E\<close> \<open>P \<noteq> Q\<close> \<open>R \<noteq> Q\<close> by blast
ultimately have "\<exists> P Q R. (GradA A B C P Q R \<and> (D E F LeA P Q R \<or> \<not> SAMS P Q R A B C))"
by blast
}
moreover
{
assume "\<not> SAMS P1 Q1 R1 A B C"
hence "\<exists> P Q R. (GradA A B C P Q R \<and> (D E F LeA P Q R \<or> \<not> SAMS P Q R A B C))"
using \<open>GradA A B C P1 Q1 R1\<close> by blast
}
ultimately show ?thesis
by blast
next
case False
thus ?thesis
using \<open>\<not> Col A B C\<close> angles_archi_aux1 assms by blast
qed
}
thus ?thesis
by blast
qed
(** If Archimedes' postulate holds, every nondegenerate angle can be
repeated until exceeding 180\<degree> *)
lemma archi__grada_destruction:
assumes "archimedes_axiom"
shows "\<forall> A B C. \<not> Col A B C \<longrightarrow>
(\<exists> P Q R. GradA A B C P Q R \<and> \<not> SAMS P Q R A B C)"
proof -
{
fix A B C
assume "\<not> Col A B C"
obtain A0 where "Bet A B A0" and "Cong B A0 A B"
using segment_construction by blast
have "A \<noteq> B"
using \<open>\<not> Col A B C\<close> col_trivial_1 by blast
have "B \<noteq> A0"
using \<open>A \<noteq> B\<close> \<open>Cong B A0 A B\<close> cong_reverse_identity by blast
obtain P Q R where "GradA A B C P Q R" and "A B A0 LeA P Q R \<or> \<not> SAMS P Q R A B C"
using archi_in_angles \<open>A \<noteq> B\<close> \<open>B \<noteq> A0\<close> \<open>\<not> Col A B C\<close> assms by metis
{
assume "A B A0 LeA P Q R"
assume "SAMS P Q R A B C"
hence "B Out A C \<or> \<not> Bet P Q R"
using SAMS_def by blast
have "B Out A C \<longrightarrow> False"
using Col_cases \<open>\<not> Col A B C\<close> out_col by blast
moreover
{
assume "\<not> Bet P Q R"
have "Bet A B A0"
by (simp add: \<open>Bet A B A0\<close>)
hence False
using bet_lea__bet \<open>A B A0 LeA P Q R\<close> \<open>B Out A C \<or> \<not> Bet P Q R\<close> calculation by blast
}
hence "\<not> Bet P Q R \<longrightarrow> False"
by blast
ultimately have False
using \<open>B Out A C \<or> \<not> Bet P Q R\<close> by fastforce
}
hence "A B A0 LeA P Q R \<longrightarrow> \<not> SAMS P Q R A B C"
by blast
hence "\<not> SAMS P Q R A B C"
using \<open>A B A0 LeA P Q R \<or> \<not> SAMS P Q R A B C\<close> by blast
hence "\<exists> P Q R. GradA A B C P Q R \<and> \<not> SAMS P Q R A B C"
using \<open>GradA A B C P Q R\<close> by blast
}
thus ?thesis
by blast
qed
lemma gradaexp_destruction_aux:
assumes "GradA A B C P Q R"
shows "\<exists> S T U. GradAExp A B C S T U \<and> (Obtuse S T U \<or> P Q R LeA S T U)"
proof (rule GradA.induct [OF assms(1)])
show "\<And>D E F. A B C CongA D E F \<Longrightarrow>
\<exists>S T U. GradAExp A B C S T U \<and> (Obtuse S T U \<or> D E F LeA S T U)"
by (metis conga__lea456123 conga_diff1 conga_diff2 gradaexp_ABC)
{
fix D E F G H I
assume "GradA A B C D E F" and
"\<exists>S T U. GradAExp A B C S T U \<and> (Obtuse S T U \<or> D E F LeA S T U)" and
"SAMS D E F A B C" and
"D E F A B C SumA G H I"
then obtain P Q R where "GradAExp A B C P Q R" and "Obtuse P Q R \<or> D E F LeA P Q R"
by blast
have "P \<noteq> Q"
using \<open>GradAExp A B C P Q R\<close> gradaexp_distincts by blast
have "R \<noteq> Q"
using \<open>GradAExp A B C P Q R\<close> gradaexp_distincts by blast
{
assume "SAMS P Q R P Q R"
{
assume "Obtuse P Q R"
hence "\<exists>S T U. GradAExp A B C S T U \<and> (Obtuse S T U \<or> G H I LeA S T U)"
using \<open>GradAExp A B C P Q R\<close> by blast
}
moreover
{
assume "D E F LeA P Q R"
obtain S T U where "P Q R P Q R SumA S T U"
using ex_suma \<open>P \<noteq> Q\<close> \<open>R \<noteq> Q\<close> by presburger
have "GradAExp A B C S T U"
proof (rule gradaexp_stab [where ?D="P" and ?E="Q" and ?F="R"])
show "GradAExp A B C P Q R"
by (simp add: \<open>GradAExp A B C P Q R\<close>)
show "SAMS P Q R P Q R"
by (simp add: \<open>SAMS P Q R P Q R\<close>)
show "P Q R P Q R SumA S T U"
using \<open>P Q R P Q R SumA S T U\<close> by auto
qed
moreover
have "GradA A B C P Q R"
using \<open>GradAExp A B C P Q R\<close> gradaexp__grada by auto
hence "G H I LeA S T U"
by (meson \<open>D E F A B C SumA G H I\<close> \<open>D E F LeA P Q R\<close> \<open>P Q R P Q R SumA S T U\<close>
\<open>SAMS P Q R P Q R\<close> grada__lea sams_lea2_suma2__lea)
ultimately have "\<exists>S T U. GradAExp A B C S T U \<and> (Obtuse S T U \<or> G H I LeA S T U)"
by blast
}
ultimately have "\<exists>S T U. GradAExp A B C S T U \<and> (Obtuse S T U \<or> G H I LeA S T U)"
using \<open>Obtuse P Q R \<or> D E F LeA P Q R\<close> by blast
}
moreover
{
assume "\<not> SAMS P Q R P Q R"
hence "Obtuse P Q R"
using \<open>P \<noteq> Q\<close> \<open>R \<noteq> Q\<close> nsams__obtuse by auto
hence "\<exists>S T U. GradAExp A B C S T U \<and> (Obtuse S T U \<or> G H I LeA S T U)"
using \<open>GradAExp A B C P Q R\<close> by blast
}
ultimately have "\<exists>S T U. GradAExp A B C S T U \<and> (Obtuse S T U \<or> G H I LeA S T U)"
by blast
}
thus "\<And>D E F G H I.
GradA A B C D E F \<Longrightarrow>
\<exists>S T U. GradAExp A B C S T U \<and> (Obtuse S T U \<or> D E F LeA S T U) \<Longrightarrow>
SAMS D E F A B C \<Longrightarrow>
D E F A B C SumA G H I \<Longrightarrow> \<exists>S T U. GradAExp A B C S T U \<and> (Obtuse S T U \<or> G H I LeA S T U)"
by blast
qed
(** If Archimedes' postulate holds, every nondegenerate angle can be doubled until exceeding 90\<degree> *)
lemma archi__gradaexp_destruction:
assumes "archimedes_axiom"
shows "\<forall> A B C. \<not> Col A B C \<longrightarrow> (\<exists> P Q R. GradAExp A B C P Q R \<and> Obtuse P Q R)"
proof -
{
fix A B C
assume "\<not> Col A B C"
obtain D E F where "GradA A B C D E F" and "\<not> SAMS D E F A B C"
using archi__grada_destruction \<open>\<not> Col A B C\<close> assms by blast
obtain P Q R where "GradAExp A B C P Q R" and "Obtuse P Q R \<or> D E F LeA P Q R"
using \<open>GradA A B C D E F\<close> gradaexp_destruction_aux by blast
have "P \<noteq> Q"
using \<open>GradAExp A B C P Q R\<close> gradaexp_distincts by blast
have "R \<noteq> Q"
using \<open>GradAExp A B C P Q R\<close> gradaexp_distincts by blast
{
assume "D E F LeA P Q R"
{
assume "SAMS P Q R P Q R"
have "A B C LeA P Q R"
using \<open>GradAExp A B C P Q R\<close> grada__lea gradaexp__grada by blast
hence "SAMS D E F A B C"
using sams_lea2__sams [where ?A'="P" and ?B'="Q" and ?C'="R" and
?D'="P" and ?E'="Q" and ?F'="R"]
\<open>SAMS P Q R P Q R\<close> \<open>D E F LeA P Q R\<close> by blast
hence False
using \<open>\<not> SAMS D E F A B C\<close> by blast
}
hence "\<not> SAMS P Q R P Q R"
by blast
hence "Obtuse P Q R"
using \<open>P \<noteq> Q\<close> \<open>R \<noteq> Q\<close> nsams__obtuse by auto
}
hence "\<exists> P Q R. GradAExp A B C P Q R \<and> Obtuse P Q R"
using \<open>GradAExp A B C P Q R\<close> \<open>Obtuse P Q R \<or> D E F LeA P Q R\<close> by blast
}
thus ?thesis
by blast
qed
end
end
|
'''
Synopsis: Script to obtain videos from webcam.
Author: Nikhil Venkatesh
Contact: mailto:[email protected]
'''
#Opencv Imports
import cv2
import numpy as np
#Python Imports
import multiprocessing
cores_available = multiprocessing.cpu_count()
def image_capture_background(imgcap_connection):
global cap, latest_image
if imgcap_connection is None:
print ("image_capture failed because pipe is uninitialised")
return
latest_image = None
while True:
success_flag, image = cap.read()
if success_flag:
latest_image = image
if imgcap_connection.poll():
recv_obj = imgcap_connection.recv()
if recv_obj == -1:
break
imgcap_connection.send(latest_image)
cap.release()
def startCamera():
global cap, parent_conn, imgcap_conn, is_backgroundCap
is_backgroundCap=False
cap = cv2.VideoCapture(0)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)
if not cap.isOpened():
print("Cannot open camera")
exit(0)
if(cores_available > 3):
print("BG process")
parent_conn, imgcap_conn = multiprocessing.Pipe()
proc = multiprocessing.Process(target=image_capture_background, args=(imgcap_conn,))
proc.daemon = True
proc.start()
is_backgroundCap = True
print("Camera is opened")
def get_frame():
global cap, is_backgroundCap, parent_conn, img_counter
img_counter =1
if(is_backgroundCap):
if(parent_conn == None):
return None
parent_conn.send(img_counter)
img_counter = img_counter + 1
img = parent_conn.recv()
img = cv2.resize(img, (200,150))
else:
success_flag, img= cap.read()
img = cv2.resize(img, (200,150))
return img
def cap_end():
global cap
print("Releasing camera")
cap.release()
if __name__ == "__main__":
startCamera()
i=1
while True:
img = get_frame()
print("Got image " +str(i))
if i==200:
break
i+=1
cap_end()
|
/-
Copyright (c) 2015 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura, Mario Carneiro
-/
import algebra.group.pi
import algebra.group_power
/-!
# The group of permutations (self-equivalences) of a type `α`
This file defines the `group` structure on `equiv.perm α`.
-/
universes u v
namespace equiv
variables {α : Type u}
namespace perm
instance perm_group : group (perm α) :=
{ mul := λ f g, equiv.trans g f,
one := equiv.refl α,
inv := equiv.symm,
mul_assoc := λ f g h, (trans_assoc _ _ _).symm,
one_mul := trans_refl,
mul_one := refl_trans,
mul_left_inv := trans_symm }
theorem mul_apply (f g : perm α) (x) : (f * g) x = f (g x) :=
equiv.trans_apply _ _ _
theorem one_apply (x) : (1 : perm α) x = x := rfl
@[simp] lemma inv_apply_self (f : perm α) (x) : f⁻¹ (f x) = x := f.symm_apply_apply x
@[simp] lemma apply_inv_self (f : perm α) (x) : f (f⁻¹ x) = x := f.apply_symm_apply x
lemma one_def : (1 : perm α) = equiv.refl α := rfl
lemma mul_def (f g : perm α) : f * g = g.trans f := rfl
lemma inv_def (f : perm α) : f⁻¹ = f.symm := rfl
@[simp] lemma coe_mul (f g : perm α) : ⇑(f * g) = f ∘ g := rfl
@[simp] lemma coe_one : ⇑(1 : perm α) = id := rfl
lemma eq_inv_iff_eq {f : perm α} {x y : α} : x = f⁻¹ y ↔ f x = y := f.eq_symm_apply
lemma inv_eq_iff_eq {f : perm α} {x y : α} : f⁻¹ x = y ↔ x = f y := f.symm_apply_eq
lemma gpow_apply_comm {α : Type*} (σ : equiv.perm α) (m n : ℤ) {x : α} :
(σ ^ m) ((σ ^ n) x) = (σ ^ n) ((σ ^ m) x) :=
by rw [←equiv.perm.mul_apply, ←equiv.perm.mul_apply, gpow_mul_comm]
/-! Lemmas about mixing `perm` with `equiv`. Because we have multiple ways to express
`equiv.refl`, `equiv.symm`, and `equiv.trans`, we want simp lemmas for every combination.
The assumption made here is that if you're using the group structure, you want to preserve it after
simp. -/
@[simp] lemma trans_one {α : Sort*} {β : Type*} (e : α ≃ β) : e.trans (1 : perm β) = e :=
equiv.trans_refl e
@[simp] lemma mul_refl (e : perm α) : e * equiv.refl α = e := equiv.trans_refl e
@[simp] lemma one_symm : (1 : perm α).symm = 1 := equiv.refl_symm
@[simp] lemma refl_inv : (equiv.refl α : perm α)⁻¹ = 1 := equiv.refl_symm
@[simp] lemma one_trans {α : Type*} {β : Sort*} (e : α ≃ β) : (1 : perm α).trans e = e :=
equiv.refl_trans e
@[simp] lemma refl_mul (e : perm α) : equiv.refl α * e = e := equiv.refl_trans e
@[simp] lemma inv_trans (e : perm α) : e⁻¹.trans e = 1 := equiv.symm_trans e
@[simp] lemma mul_symm (e : perm α) : e * e.symm = 1 := equiv.symm_trans e
@[simp] lemma trans_inv (e : perm α) : e.trans e⁻¹ = 1 := equiv.trans_symm e
@[simp] lemma symm_mul (e : perm α) : e.symm * e = 1 := equiv.trans_symm e
/-! Lemmas about `equiv.perm.sum_congr` re-expressed via the group structure. -/
@[simp] lemma sum_congr_mul {α β : Type*} (e : perm α) (f : perm β) (g : perm α) (h : perm β) :
sum_congr e f * sum_congr g h = sum_congr (e * g) (f * h) :=
sum_congr_trans g h e f
@[simp] lemma sum_congr_inv {α β : Type*} (e : perm α) (f : perm β) :
(sum_congr e f)⁻¹ = sum_congr e⁻¹ f⁻¹ :=
sum_congr_symm e f
@[simp] lemma sum_congr_one {α β : Type*} :
sum_congr (1 : perm α) (1 : perm β) = 1 :=
sum_congr_refl
/-- `equiv.perm.sum_congr` as a `monoid_hom`, with its two arguments bundled into a single `prod`.
This is particularly useful for its `monoid_hom.range` projection, which is the subgroup of
permutations which do not exchange elements between `α` and `β`. -/
@[simps]
def sum_congr_hom (α β : Type*) :
perm α × perm β →* perm (α ⊕ β) :=
{ to_fun := λ a, sum_congr a.1 a.2,
map_one' := sum_congr_one,
map_mul' := λ a b, (sum_congr_mul _ _ _ _).symm}
lemma sum_congr_hom_injective {α β : Type*} :
function.injective (sum_congr_hom α β) :=
begin
rintros ⟨⟩ ⟨⟩ h,
rw prod.mk.inj_iff,
split; ext i,
{ simpa using equiv.congr_fun h (sum.inl i), },
{ simpa using equiv.congr_fun h (sum.inr i), },
end
@[simp] lemma sum_congr_swap_one {α β : Type*} [decidable_eq α] [decidable_eq β] (i j : α) :
sum_congr (equiv.swap i j) (1 : perm β) = equiv.swap (sum.inl i) (sum.inl j) :=
sum_congr_swap_refl i j
@[simp] lemma sum_congr_one_swap {α β : Type*} [decidable_eq α] [decidable_eq β] (i j : β) :
sum_congr (1 : perm α) (equiv.swap i j) = equiv.swap (sum.inr i) (sum.inr j) :=
sum_congr_refl_swap i j
/-! Lemmas about `equiv.perm.sigma_congr_right` re-expressed via the group structure. -/
@[simp] lemma sigma_congr_right_mul {α : Type*} {β : α → Type*}
(F : Π a, perm (β a)) (G : Π a, perm (β a)) :
sigma_congr_right F * sigma_congr_right G = sigma_congr_right (F * G) :=
sigma_congr_right_trans G F
@[simp] lemma sigma_congr_right_inv {α : Type*} {β : α → Type*} (F : Π a, perm (β a)) :
(sigma_congr_right F)⁻¹ = sigma_congr_right (λ a, (F a)⁻¹) :=
sigma_congr_right_symm F
@[simp] lemma sigma_congr_right_one {α : Type*} {β : α → Type*} :
(sigma_congr_right (1 : Π a, equiv.perm $ β a)) = 1 :=
sigma_congr_right_refl
/-- `equiv.perm.sigma_congr_right` as a `monoid_hom`.
This is particularly useful for its `monoid_hom.range` projection, which is the subgroup of
permutations which do not exchange elements between fibers. -/
@[simps]
def sigma_congr_right_hom {α : Type*} (β : α → Type*) :
(Π a, perm (β a)) →* perm (Σ a, β a) :=
{ to_fun := sigma_congr_right,
map_one' := sigma_congr_right_one,
map_mul' := λ a b, (sigma_congr_right_mul _ _).symm }
lemma sigma_congr_right_hom_injective {α : Type*} {β : α → Type*} :
function.injective (sigma_congr_right_hom β) :=
begin
intros x y h,
ext a b,
simpa using equiv.congr_fun h ⟨a, b⟩,
end
/-- `equiv.perm.subtype_congr` as a `monoid_hom`. -/
@[simps] def subtype_congr_hom (p : α → Prop) [decidable_pred p] :
(perm {a // p a}) × (perm {a // ¬ p a}) →* perm α :=
{ to_fun := λ pair, perm.subtype_congr pair.fst pair.snd,
map_one' := perm.subtype_congr.refl,
map_mul' := λ _ _, (perm.subtype_congr.trans _ _ _ _).symm }
lemma subtype_congr_hom_injective (p : α → Prop) [decidable_pred p] :
function.injective (subtype_congr_hom p) :=
begin
rintros ⟨⟩ ⟨⟩ h,
rw prod.mk.inj_iff,
split;
ext i;
simpa using equiv.congr_fun h i
end
/-- If `e` is also a permutation, we can write `perm_congr`
completely in terms of the group structure. -/
@[simp] lemma perm_congr_eq_mul (e p : perm α) :
e.perm_congr p = e * p * e⁻¹ := rfl
section extend_domain
/-! Lemmas about `equiv.perm.extend_domain` re-expressed via the group structure. -/
variables {β : Type*} (e : perm α) {p : β → Prop} [decidable_pred p] (f : α ≃ subtype p)
@[simp] lemma extend_domain_one : extend_domain 1 f = 1 :=
extend_domain_refl f
@[simp] lemma extend_domain_inv : (e.extend_domain f)⁻¹ = e⁻¹.extend_domain f := rfl
@[simp] lemma extend_domain_mul (e e' : perm α) :
(e.extend_domain f) * (e'.extend_domain f) = (e * e').extend_domain f :=
extend_domain_trans _ _ _
end extend_domain
/-- If the permutation `f` fixes the subtype `{x // p x}`, then this returns the permutation
on `{x // p x}` induced by `f`. -/
def subtype_perm (f : perm α) {p : α → Prop} (h : ∀ x, p x ↔ p (f x)) : perm {x // p x} :=
⟨λ x, ⟨f x, (h _).1 x.2⟩, λ x, ⟨f⁻¹ x, (h (f⁻¹ x)).2 $ by simpa using x.2⟩,
λ _, by simp only [perm.inv_apply_self, subtype.coe_eta, subtype.coe_mk],
λ _, by simp only [perm.apply_inv_self, subtype.coe_eta, subtype.coe_mk]⟩
@[simp] lemma subtype_perm_apply (f : perm α) {p : α → Prop} (h : ∀ x, p x ↔ p (f x))
(x : {x // p x}) : subtype_perm f h x = ⟨f x, (h _).1 x.2⟩ := rfl
@[simp] lemma subtype_perm_one (p : α → Prop) (h : ∀ x, p x ↔ p ((1 : perm α) x)) :
@subtype_perm α 1 p h = 1 :=
equiv.ext $ λ ⟨_, _⟩, rfl
/-- The inclusion map of permutations on a subtype of `α` into permutations of `α`,
fixing the other points. -/
def of_subtype {p : α → Prop} [decidable_pred p] : perm (subtype p) →* perm α :=
{ to_fun := λ f,
⟨λ x, if h : p x then f ⟨x, h⟩ else x, λ x, if h : p x then f⁻¹ ⟨x, h⟩ else x,
λ x, have h : ∀ h : p x, p (f ⟨x, h⟩), from λ h, (f ⟨x, h⟩).2,
by { simp only [], split_ifs at *;
simp only [perm.inv_apply_self, subtype.coe_eta, subtype.coe_mk, not_true, *] at * },
λ x, have h : ∀ h : p x, p (f⁻¹ ⟨x, h⟩), from λ h, (f⁻¹ ⟨x, h⟩).2,
by { simp only [], split_ifs at *;
simp only [perm.apply_inv_self, subtype.coe_eta, subtype.coe_mk, not_true, *] at * }⟩,
map_one' := begin ext, dsimp, split_ifs; refl, end,
map_mul' := λ f g, equiv.ext $ λ x, begin
by_cases h : p x,
{ have h₁ : p (f (g ⟨x, h⟩)), from (f (g ⟨x, h⟩)).2,
have h₂ : p (g ⟨x, h⟩), from (g ⟨x, h⟩).2,
simp only [h, h₂, coe_fn_mk, perm.mul_apply, dif_pos, subtype.coe_eta] },
{ simp only [h, coe_fn_mk, perm.mul_apply, dif_neg, not_false_iff] }
end }
lemma of_subtype_subtype_perm {f : perm α} {p : α → Prop} [decidable_pred p]
(h₁ : ∀ x, p x ↔ p (f x)) (h₂ : ∀ x, f x ≠ x → p x) :
of_subtype (subtype_perm f h₁) = f :=
equiv.ext $ λ x, begin
rw [of_subtype, subtype_perm],
by_cases hx : p x,
{ simp only [hx, coe_fn_mk, dif_pos, monoid_hom.coe_mk, subtype.coe_mk]},
{ haveI := classical.prop_decidable,
simp only [hx, not_not.mp (mt (h₂ x) hx), coe_fn_mk, dif_neg, not_false_iff,
monoid_hom.coe_mk] }
end
lemma of_subtype_apply_of_not_mem {p : α → Prop} [decidable_pred p]
(f : perm (subtype p)) {x : α} (hx : ¬ p x) :
of_subtype f x = x :=
dif_neg hx
lemma mem_iff_of_subtype_apply_mem {p : α → Prop} [decidable_pred p]
(f : perm (subtype p)) (x : α) :
p x ↔ p ((of_subtype f : α → α) x) :=
if h : p x then by simpa only [of_subtype, h, coe_fn_mk, dif_pos, true_iff, monoid_hom.coe_mk]
using (f ⟨x, h⟩).2
else by simp [h, of_subtype_apply_of_not_mem f h]
@[simp] lemma subtype_perm_of_subtype {p : α → Prop} [decidable_pred p] (f : perm (subtype p)) :
subtype_perm (of_subtype f) (mem_iff_of_subtype_apply_mem f) = f :=
equiv.ext $ λ ⟨x, hx⟩, by { dsimp [subtype_perm, of_subtype],
simp only [show p x, from hx, dif_pos, subtype.coe_eta] }
instance perm_unique {n : Type*} [unique n] : unique (equiv.perm n) :=
{ default := 1,
uniq := λ σ, equiv.ext (λ i, subsingleton.elim _ _) }
@[simp] lemma default_perm {n : Type*} : default (equiv.perm n) = 1 := rfl
end perm
section swap
variables [decidable_eq α]
@[simp] lemma swap_inv (x y : α) : (swap x y)⁻¹ = swap x y := rfl
@[simp] lemma swap_mul_self (i j : α) : swap i j * swap i j = 1 := swap_swap i j
lemma swap_mul_eq_mul_swap (f : perm α) (x y : α) : swap x y * f = f * swap (f⁻¹ x) (f⁻¹ y) :=
equiv.ext $ λ z, begin
simp only [perm.mul_apply, swap_apply_def],
split_ifs;
simp only [perm.apply_inv_self, *, perm.eq_inv_iff_eq, eq_self_iff_true, not_true] at *
end
lemma mul_swap_eq_swap_mul (f : perm α) (x y : α) : f * swap x y = swap (f x) (f y) * f :=
by rw [swap_mul_eq_mul_swap, perm.inv_apply_self, perm.inv_apply_self]
lemma swap_apply_apply (f : perm α) (x y : α) : swap (f x) (f y) = f * swap x y * f⁻¹ :=
by rw [mul_swap_eq_swap_mul, mul_inv_cancel_right]
/-- Left-multiplying a permutation with `swap i j` twice gives the original permutation.
This specialization of `swap_mul_self` is useful when using cosets of permutations.
-/
@[simp]
lemma swap_mul_self_mul (i j : α) (σ : perm α) : equiv.swap i j * (equiv.swap i j * σ) = σ :=
by rw [←mul_assoc, swap_mul_self, one_mul]
/-- Right-multiplying a permutation with `swap i j` twice gives the original permutation.
This specialization of `swap_mul_self` is useful when using cosets of permutations.
-/
@[simp]
lemma mul_swap_mul_self (i j : α) (σ : perm α) : (σ * equiv.swap i j) * equiv.swap i j = σ :=
by rw [mul_assoc, swap_mul_self, mul_one]
/-- A stronger version of `mul_right_injective` -/
@[simp]
lemma swap_mul_involutive (i j : α) : function.involutive ((*) (equiv.swap i j)) :=
swap_mul_self_mul i j
/-- A stronger version of `mul_left_injective` -/
@[simp]
lemma mul_swap_involutive (i j : α) : function.involutive (* (equiv.swap i j)) :=
mul_swap_mul_self i j
@[simp] lemma swap_eq_one_iff {i j : α} : swap i j = (1 : perm α) ↔ i = j :=
swap_eq_refl_iff
lemma swap_mul_eq_iff {i j : α} {σ : perm α} : swap i j * σ = σ ↔ i = j :=
⟨(assume h, have swap_id : swap i j = 1 := mul_right_cancel (trans h (one_mul σ).symm),
by {rw [←swap_apply_right i j, swap_id], refl}),
(assume h, by erw [h, swap_self, one_mul])⟩
lemma mul_swap_eq_iff {i j : α} {σ : perm α} : σ * swap i j = σ ↔ i = j :=
⟨(assume h, have swap_id : swap i j = 1 := mul_left_cancel (trans h (one_mul σ).symm),
by {rw [←swap_apply_right i j, swap_id], refl}),
(assume h, by erw [h, swap_self, mul_one])⟩
lemma swap_mul_swap_mul_swap {x y z : α} (hwz: x ≠ y) (hxz : x ≠ z) :
swap y z * swap x y * swap y z = swap z x :=
equiv.ext $ λ n, by { simp only [swap_apply_def, perm.mul_apply], split_ifs; cc }
end swap
end equiv
|
I do not understand [ Finkelstein 's ] charge of plagiarism against Alan Dershowitz . There is no claim that Dershowitz used the words of others without attribution . When he uses the words of others , he quotes them properly and generally cites them to the original sources ( Mark Twain , Palestine Royal Commission , etc . ) [ Finkelstein 's ] complaint is that instead he should have cited them to the secondary source , in which Dershowitz may have come upon them . But as The Chicago Manual of Style emphasizes : ' Importance of attribution . With all reuse of others ' materials , it is important to identify the original as the source . This not only bolsters the claims of fair use , it also helps avoid any accusation of plagiarism . ' This is precisely what Dershowitz did .
|
(* Hendra : The formalization of translation phases from JVM to DEX *)
Require Export LoadBicolano.
Require Export EquivDec.
Require Export Annotated.
Import JVM_Dom.JVM_Prog DEX_Dom.DEX_Prog.
Module MapPC <: MAP with Definition key := JVM_PC := BinNatMap.
Module Type DX_TRANSLATOR_TYPE.
Parameter Block : Type.
Parameter SBMap : Type.
Parameter BlockMap : Type.
Parameter TSMap : Type.
Parameter BPMap : Type.
(* Parameter bm : JVM_BytecodeMethod.
Parameter insnList : list (JVM_PC * (option JVM_PC*JVM_Instruction)). *)
(* Parameter translate : JVM_Program -> DEX_Program. *)
(* We zoom in on bytecode translation *)
(*
Parameter bytecode_translate : list (JVM_PC * (option JVM_PC*JVM_Instruction)) -> DEX_BytecodeMethod.
*)
Parameter start_block : list (JVM_PC * (option JVM_PC*JVM_Instruction)) -> (BlockMap * SBMap).
Parameter trace_parent_child : list (JVM_PC * (option JVM_PC*JVM_Instruction)) -> (BlockMap * SBMap)
-> (BlockMap * (BPMap * (TSMap * Block))).
Parameter translate_instructions : list (JVM_PC * (option JVM_PC*JVM_Instruction))
-> (BlockMap * (BPMap * (TSMap * Block)))
-> ((BlockMap * Block) * (MapAddress.t TypeRegisters * MapN.t (list (N*N)))).
Parameter pick_order : (BlockMap * Block) -> (list JVM_PC * (BlockMap * Block)).
Parameter consolidate_blocks : (list JVM_PC * (BlockMap * Block)) -> list (DEX_Instruction*(DEX_PC*DEX_PC)).
Parameter construct_bytecodemethod : list (DEX_Instruction*(DEX_PC*DEX_PC))
-> MapPC.t (DEX_Instruction*(option DEX_PC * list DEX_ClassName))
-> DEX_BytecodeMethod.
End DX_TRANSLATOR_TYPE.
Module DX_TRANSLATOR <: DX_TRANSLATOR_TYPE.
Module BLOCK.
Record t : Type := mkBlock {
jvm_instructions : list JVM_Instruction;
parents : list JVM_PC;
succs : list JVM_PC;
pSucc : option JVM_PC;
order : option nat;
dex_instructions : list DEX_Instruction;
dex_label : option DEX_PC
}.
Definition empty : t :=
mkBlock (nil) (nil) (nil) (None) (None) (nil) (None).
Definition append_source_instructions (source:t) (l:list JVM_Instruction) : t :=
mkBlock (l++source.(jvm_instructions)) (source.(parents)) (source.(succs))
(source.(pSucc)) (source.(order)) (source.(dex_instructions)) (source.(dex_label)).
Definition append_dex_instructions (source:t) (l:list DEX_Instruction) : t :=
mkBlock (source.(jvm_instructions)) (source.(parents)) (source.(succs))
(source.(pSucc)) (source.(order)) (l++source.(dex_instructions)) (source.(dex_label)).
Lemma PC_eq_dec : forall x y : JVM_PC, {x=y} + {x<>y}.
Proof.
repeat decide equality.
Qed.
Fixpoint append_no_duplicate (l source:list JVM_PC) : list JVM_PC :=
match l with
| nil => (source)
| h :: t => if in_dec PC_eq_dec (h) (source) then
append_no_duplicate (t) (source)
else
append_no_duplicate (t) (h :: source)
end.
Definition append_parents (source:t) (l:list JVM_PC) : t :=
let newParents := append_no_duplicate (l) (source.(parents)) in
mkBlock (source.(jvm_instructions)) (newParents) (source.(succs))
(source.(pSucc)) (source.(order)) (source.(dex_instructions)) (source.(dex_label)).
Definition append_succs (source:t) (l:list JVM_PC) : t :=
let newSuccs := append_no_duplicate (l) (source.(succs)) in
mkBlock (source.(jvm_instructions)) (source.(parents)) (newSuccs)
(source.(pSucc)) (source.(order)) (source.(dex_instructions)) (source.(dex_label)).
Definition update_pSucc (source:t) (pSucc:JVM_PC) : t :=
mkBlock (source.(jvm_instructions)) (source.(parents)) (source.(succs))
(Some pSucc) (source.(order)) (source.(dex_instructions)) (source.(dex_label)).
Definition update_order (source:t) (newOrder:nat) : t :=
mkBlock (source.(jvm_instructions)) (source.(parents)) (source.(succs))
(source.(pSucc)) (Some newOrder) (source.(dex_instructions)) (source.(dex_label)).
Definition update_dex_label (source:t) (newLabel:DEX_PC) : t :=
mkBlock (source.(jvm_instructions)) (source.(parents)) (source.(succs))
(source.(pSucc)) (source.(order)) (source.(dex_instructions)) (Some newLabel).
(* 2's complement of -2 *)
Definition retLabel := Npos (xI (xO (xI (xI (xI (xI (xI (xI
(xI (xI (xI (xI (xI (xI (xI (xI (xI (xI (xI (xI (xI (xI (xI (xI
(xI (xI (xI (xI (xI (xI (xI xH)
)))))))))))))))))))))))))))))).
(* Definition ex := Npos (xI (xO (xO (xO (xO (xO (xO (xO
(xO (xO (xO (xO (xO (xO (xO (xO xH)))))))))))))))). *)
End BLOCK.
Definition Block := BLOCK.t.
Section BytecodeMethod_Translator.
Parameter bm : JVM_BytecodeMethod.
Parameter JVM_S : JVM_PC -> TypeStack.
Definition max_locals := JVM_BYTECODEMETHOD.max_locals bm.
Parameter sgn : JVM_sign.
(*
Fixpoint create_insnList_rec (bm:JVM_BytecodeMethod)
(ls:list (JVM_PC*(JVM_Instruction*(option JVM_PC*list JVM_ClassName))))
(l:list (JVM_PC*(option JVM_PC*JVM_Instruction)))
: list (JVM_PC*(option JVM_PC*JVM_Instruction)) :=
match ls with
| nil => l
| (pc, (ins,(pc',_))) :: ts => create_insnList_rec (bm) (ts) ((pc,(pc',ins))::l)
end.
Definition create_insnList (bm:JVM_BytecodeMethod) : list (JVM_PC * (option JVM_PC*JVM_Instruction)) :=
let pc := JVM_BYTECODEMETHOD.firstAddress bm in
create_insnList_rec (bm)
(MapPC.elements _ (JVM_BYTECODEMETHOD.instr bm)) (nil).
*)
Variable insnList : list (JVM_PC * (option JVM_PC*JVM_Instruction)).
(* Definition insnList := create_insnList (bm). *)
Definition SBMap := MapPC.t bool.
Definition BlockMap := MapPC.t Block.
Definition TSMap := MapPC.t nat.
Definition BPMap := MapPC.t JVM_PC.
Definition start_block_true (pc:option JVM_PC) (maps:(BlockMap*SBMap)) : (BlockMap*SBMap) :=
match pc with
| None => maps
| Some pc' =>
let newM := BinNatMap.update _ (fst (maps)) (pc') BLOCK.empty in
let newSb := BinNatMap.update _ (snd (maps)) (pc') (true) in
(newM, newSb)
end.
Definition start_block_false (pc:option JVM_PC) (maps:(BlockMap*SBMap)) : (BlockMap*SBMap) :=
match pc with
| None => maps
| Some pc' =>
let newSb := BinNatMap.update _ (snd (maps)) (pc') (false) in
(fst (maps), newSb)
end.
Fixpoint start_block_true_offset_list (l:list JVM_OFFSET.t) (pc:JVM_PC) (maps:(BlockMap*SBMap)) : (BlockMap*SBMap) :=
match l with
| nil => maps
| o :: t =>
let newM := BinNatMap.update _ (fst (maps)) (JVM_OFFSET.jump pc o) BLOCK.empty in
let newSb := BinNatMap.update _ (snd (maps)) (JVM_OFFSET.jump pc o) (true) in
start_block_true_offset_list (t) (pc) (newM, newSb)
end.
(* Assumption :
1. There is no dead code, so the instruction after a goto or a return
will be pointed to by some other instruction
*)
Fixpoint start_block_rec (l:list (JVM_PC*(option JVM_PC*JVM_Instruction))) (maps:(BlockMap * SBMap))
: (BlockMap * SBMap) :=
match l with
| nil => maps
| (pc, (pc',ins)) :: t =>
match ins with
(*
| JVM_Aconst_null => start_block_rec (t) (maps)
| JVM_Arraylength => start_block_rec (t) (update_start_block (pc') (maps))
*)
| JVM_Const _ _ => start_block_rec (t) (start_block_false (pc') (maps))
| JVM_Dup => start_block_rec (t) (start_block_false (pc') (maps))
| JVM_Dup_x1 => start_block_rec (t) (start_block_false (pc') (maps))
| JVM_Dup_x2 => start_block_rec (t) (start_block_false (pc') (maps))
| JVM_Dup2 => start_block_rec (t) (start_block_false (pc') (maps))
| JVM_Dup2_x1 => start_block_rec (t) (start_block_false (pc') (maps))
| JVM_Dup2_x2 => start_block_rec (t) (start_block_false (pc') (maps))
(*
| JVM_Getfield _ => start_block_rec (t) (start_block_true (pc') (maps))
*)
| JVM_Goto o => start_block_rec (t) (start_block_true (Some (JVM_OFFSET.jump pc o)) (maps))
| JVM_I2b => start_block_rec (t) (start_block_false (pc') (maps))
| JVM_I2s => start_block_rec (t) (start_block_false (pc') (maps))
| JVM_Ibinop _ => start_block_rec (t) (start_block_false (pc') (maps))
(*
| JVM_If_acmp _ o => start_block_rec (t)
(update_start_block (Some (JVM_OFFSET.jump pc o))
(update_start_block (pc') (maps)) )
*)
| JVM_If_icmp _ o => start_block_rec (t)
(start_block_true (Some (JVM_OFFSET.jump pc o))
(start_block_true (pc') (maps)) )
| JVM_If0 _ o => start_block_rec (t)
(start_block_true (Some (JVM_OFFSET.jump pc o))
(start_block_true (pc') (maps)) )
(*
| JVM_Ifnull _ o => start_block_rec (t)
(update_start_block (Some (JVM_OFFSET.jump pc o))
(update_start_block (pc') (maps)) )
*)
| JVM_Iinc _ _ => start_block_rec (t) (start_block_false (pc') (maps))
| JVM_Ineg => start_block_rec (t) (start_block_false (pc') (maps))
(*
| JVM_Instanceof _ => start_block_rec (t) (maps)
| JVM_Invokeinterface _ => start_block_rec (t) (update_start_block (pc') (maps))
| JVM_Invokespecial _ => start_block_rec (t) (update_start_block (pc') (maps))
| JVM_Invokestatic _ => start_block_rec (t) (update_start_block (pc') (maps))
| JVM_Invokevirtual _ => start_block_rec (t) (update_start_block (pc') (maps))
*)
| JVM_Lookupswitch d l => start_block_rec (t)
(start_block_true (Some (JVM_OFFSET.jump pc d))
(start_block_true_offset_list
(map (fun a => snd a) l) (pc) (maps)) )
(*
| JVM_New _ => start_block_rec (t) (update_start_block (pc') (maps))
| JVM_Newarray _ => start_block_rec (t) (update_start_block (pc') (maps))
*)
| JVM_Nop => start_block_rec (t) (start_block_false (pc') (maps))
| JVM_Pop => start_block_rec (t) (start_block_false (pc') (maps))
| JVM_Pop2 => start_block_rec (t) (start_block_false (pc') (maps))
(*
| JVM_Putfield _ => start_block_rec (t) (update_start_block (pc') (maps))
*)
| JVM_Return => start_block_rec (t) (start_block_false (pc') (maps))
| JVM_Swap => start_block_rec (t) (start_block_false (pc') (maps))
| JVM_Tableswitch d _ _ l => start_block_rec (t)
(start_block_true (Some (JVM_OFFSET.jump pc d))
(start_block_true_offset_list (l) (pc) (maps)))
(*
| JVM_Vaload _ => start_block_rec (t) (update_start_block (pc') (maps))
| JVM_Vastore _ => start_block_rec (t) (update_start_block (pc') (maps))
*)
| JVM_Vload _ _ => start_block_rec (t) (start_block_false (pc') (maps))
| JVM_Vreturn _ => start_block_rec (t) (start_block_false (pc') (maps))
| JVM_Vstore _ _ => start_block_rec (t) (start_block_false (pc') (maps))
(*
| _ => start_block_rec (t) (start_block_false (pc') (maps)) (* Default for
not yet implemented instructions *)
*)
end
end.
Definition start_block : (BlockMap * SBMap) :=
start_block_rec (insnList) (MapPC.empty Block, MapPC.empty bool).
Definition opval (A: Type) (v:option A) (default:A) : A :=
match v with
| None => default
| Some val => val
end.
Implicit Arguments opval.
(* Implicit in the assumption that one step instruction will always has a successor *)
Definition one_step_instructions (pc:JVM_PC) (pc':option JVM_PC) (sb:SBMap) (m:BlockMap) (bp:BPMap) (ts:TSMap)
(ret:Block) (tsValue:nat) :=
let succPC := opval (pc') (pc) in
let blockIndex := if opval (MapPC.get _ sb pc) (false) then
pc else opval (MapPC.get _ bp pc) (pc) in
let newTS := MapPC.update _ ts succPC tsValue in
if opval (MapPC.get _ sb pc) (false) then
let cb := opval (MapPC.get _ m pc) (BLOCK.empty) in
let cb' := BLOCK.update_pSucc (cb) (succPC) in
let cb'' := BLOCK.append_succs (cb') (succPC::nil) in
let succb := opval (MapPC.get _ m succPC) (BLOCK.empty) in
let succb' := BLOCK.append_parents (succb) (blockIndex::nil) in
let newBP := MapPC.update _ bp succPC succPC in
let newM := MapPC.update _ (MapPC.update _ m blockIndex
cb'') succPC succb' in
(newM, (newBP, (newTS, ret)))
else
let newBP := MapPC.update _ bp succPC blockIndex in
(m, (newBP, (newTS, ret)))
.
Definition jump_instructions (pc:JVM_PC) (l:list JVM_PC) (sb:SBMap)
(m:BlockMap) (bp:BPMap) (ts:TSMap) (ret:Block) (tsValue:nat) :=
match l with
| nil => (m, (bp, (ts, ret)))
| pc' :: t =>
let blockIndex := if opval (MapPC.get _ sb pc) (false) then
pc else opval (MapPC.get _ bp pc) (pc) in
let cb := opval (MapPC.get _ m pc) (BLOCK.empty) in
let cb' := BLOCK.update_pSucc (cb) (pc') in
let cb'' := BLOCK.append_succs (cb') (l) in
let succb := opval (MapPC.get _ m pc') (BLOCK.empty) in
let succb' := BLOCK.append_parents (succb) (blockIndex::nil) in
let newBP := MapPC.update _ bp pc' pc' in
let newM := MapPC.update _ (MapPC.update _ m blockIndex
cb'') pc' succb' in
(newM, (newBP, (ts, ret)))
end
.
Definition return_instructions (pc:JVM_PC) (sb:SBMap) (m:BlockMap) (bp:BPMap) (ts:TSMap) (ret:Block) :=
let blockIndex := if opval (MapPC.get _ sb pc) (false) then
pc else opval (MapPC.get _ bp pc) (pc) in
let cb := opval (MapPC.get _ m pc) (BLOCK.empty) in
let cb' := BLOCK.update_pSucc (cb) (BLOCK.retLabel) in
let cb'' := BLOCK.append_succs (cb') (BLOCK.retLabel::nil) in
let newRet := BLOCK.append_parents (ret) (blockIndex::nil) in
let newM := MapPC.update _ m blockIndex cb'' in
(newM, (bp, (ts, newRet)))
.
Definition get_tsValue (val:option nat) (ins:JVM_Instruction) : nat :=
match val with
| None => (0)%nat (* it shouldn't be the case that the mapping returns a None*)
| Some n =>
match ins with
| JVM_Const _ _ => (n + 1)%nat
| JVM_Dup => (n+1)%nat
| JVM_Dup_x1 => (n+1)%nat
| JVM_Dup_x2 => (n+1)%nat
| JVM_Dup2 => (n+2)%nat
| JVM_Dup2_x1 => (n+2)%nat
| JVM_Dup2_x2 => (n+2)%nat
| JVM_Goto _ => (n)%nat
| JVM_I2b => (n)%nat
| JVM_I2s => (n)%nat
| JVM_Ibinop _ => (n-1)%nat
| JVM_If_icmp _ _ => (n-2)%nat
| JVM_If0 _ _ => (n-1)%nat
| JVM_Iinc _ _ => (n)%nat
| JVM_Ineg => (n)%nat
| JVM_Lookupswitch _ _ => (n-1)%nat
| JVM_Nop => (n)%nat
| JVM_Pop => (n-1)%nat
| JVM_Pop2 => (n-2)%nat
| JVM_Return => (n)%nat
| JVM_Swap => (n)%nat
| JVM_Tableswitch _ _ _ _ => (n-1)%nat
| JVM_Vload _ _ => (n+1)%nat
| JVM_Vreturn _ => (n-1)%nat
| JVM_Vstore _ _ => (n-1)%nat
(* | _ => (n)%nat *)
end
end.
(* adding a default value may break proofs, have to check *)
Fixpoint parse_insn_rec (l:list (JVM_PC*(option JVM_PC*JVM_Instruction)))
(sb:SBMap)
(maps:BlockMap * (BPMap * (TSMap * Block))) :=
match l with
| nil => maps
| (pc, (pc',ins)) :: t =>
let m := (fst maps) in
let bp := (fst (snd maps)) in
let ts := (fst (snd (snd maps))) in
let ret := (snd (snd (snd (maps)))) in
let tsValue := get_tsValue (MapPC.get _ ts pc) (ins) in
match ins with
| JVM_Const _ _ => parse_insn_rec (t) (sb) (one_step_instructions (pc) (pc') (sb)
(m) (bp) (ts) (ret) (tsValue))
| JVM_Dup => parse_insn_rec (t) (sb) (one_step_instructions (pc) (pc') (sb)
(m) (bp) (ts) (ret) (tsValue))
| JVM_Dup_x1 => parse_insn_rec (t) (sb) (one_step_instructions (pc) (pc') (sb)
(m) (bp) (ts) (ret) (tsValue))
| JVM_Dup_x2 => parse_insn_rec (t) (sb) (one_step_instructions (pc) (pc') (sb)
(m) (bp) (ts) (ret) (tsValue))
| JVM_Dup2 => parse_insn_rec (t) (sb) (one_step_instructions (pc) (pc') (sb)
(m) (bp) (ts) (ret) (tsValue))
| JVM_Dup2_x1 => parse_insn_rec (t) (sb) (one_step_instructions (pc) (pc') (sb)
(m) (bp) (ts) (ret) (tsValue))
| JVM_Dup2_x2 => parse_insn_rec (t) (sb) (one_step_instructions (pc) (pc') (sb)
(m) (bp) (ts) (ret) (tsValue))
| JVM_Goto o => parse_insn_rec (t) (sb) (jump_instructions (pc) (JVM_OFFSET.jump pc o::nil)
(sb) (m) (bp) (ts) (ret) (tsValue))
| JVM_I2b => parse_insn_rec (t) (sb) (one_step_instructions (pc) (pc') (sb)
(m) (bp) (ts) (ret) (tsValue))
| JVM_I2s => parse_insn_rec (t) (sb) (one_step_instructions (pc) (pc') (sb)
(m) (bp) (ts) (ret) (tsValue))
| JVM_Ibinop _ => parse_insn_rec (t) (sb) (one_step_instructions (pc) (pc') (sb)
(m) (bp) (ts) (ret) (tsValue))
| JVM_If_icmp _ o => parse_insn_rec (t) (sb)
(jump_instructions (pc) (cons_option (pc') (JVM_OFFSET.jump pc o::nil)) (sb) (m) (bp) (ts) (ret) (tsValue))
| JVM_If0 _ o => parse_insn_rec (t) (sb)
(jump_instructions (pc) (cons_option (pc') (JVM_OFFSET.jump pc o::nil)) (sb) (m) (bp) (ts) (ret) (tsValue))
| JVM_Iinc _ _ => parse_insn_rec (t) (sb) (one_step_instructions (pc) (pc') (sb)
(m) (bp) (ts) (ret) (tsValue))
| JVM_Ineg => parse_insn_rec (t) (sb) (one_step_instructions (pc) (pc') (sb)
(m) (bp) (ts) (ret) (tsValue))
| JVM_Lookupswitch d l => parse_insn_rec (t) (sb)
(jump_instructions (pc) (JVM_OFFSET.jump pc d::(map (fun a => JVM_OFFSET.jump pc (snd a)) l)) (sb) (m) (bp) (ts) (ret) (tsValue))
| JVM_Nop => parse_insn_rec (t) (sb) (one_step_instructions (pc) (pc') (sb)
(m) (bp) (ts) (ret) (tsValue))
| JVM_Pop => parse_insn_rec (t) (sb) (one_step_instructions (pc) (pc') (sb)
(m) (bp) (ts) (ret) (tsValue))
| JVM_Pop2 => parse_insn_rec (t) (sb) (one_step_instructions (pc) (pc') (sb)
(m) (bp) (ts) (ret) (tsValue))
| JVM_Return => parse_insn_rec (t) (sb) (return_instructions (pc) (sb)
(m) (bp) (ts) (ret))
| JVM_Swap => parse_insn_rec (t) (sb) (one_step_instructions (pc) (pc') (sb)
(m) (bp) (ts) (ret) (tsValue))
| JVM_Tableswitch d _ _ l => parse_insn_rec (t) (sb)
(jump_instructions (pc) (JVM_OFFSET.jump pc d::(map (fun o => JVM_OFFSET.jump pc o) l)) (sb) (m) (bp) (ts) (ret) (tsValue))
| JVM_Vload _ _ => parse_insn_rec (t) (sb) (one_step_instructions (pc) (pc') (sb)
(m) (bp) (ts) (ret) (tsValue))
| JVM_Vreturn _ => parse_insn_rec (t) (sb) (return_instructions (pc) (sb)
(m) (bp) (ts) (ret))
| JVM_Vstore _ _ => parse_insn_rec (t) (sb) (one_step_instructions (pc) (pc') (sb)
(m) (bp) (ts) (ret) (tsValue))
(* | _ => parse_insn_rec (t) (sb) (maps) (* Default for
not yet implemented instructions *) *)
end
end.
Definition translate_valKind (k:JVM_ValKind) : DEX_ValKind :=
match k with
| JVM_Aval => DEX_Aval
| JVM_Ival => DEX_Ival
end.
Fixpoint create_retBlock (l:list (JVM_PC*(option JVM_PC*JVM_Instruction))) : Block :=
match l with
| nil => BLOCK.empty (* impossible case *)
| (pc, (pc',ins)) :: t =>
match ins with
| JVM_Return => BLOCK.mkBlock (nil) (nil) (nil) (None) (None) (DEX_Return::nil) (None)
| JVM_Vreturn k => BLOCK.mkBlock (nil) (nil) (nil) (None) (None)
(DEX_VReturn (translate_valKind (k)) (0)%N ::nil) (None)
| _ => create_retBlock t
end
end.
Definition trace_parent_child (mSb:BlockMap * SBMap)
: BlockMap * (BPMap * (TSMap * Block))
:= let firstPC := JVM_BYTECODEMETHOD.firstAddress bm in
let initialTS := MapPC.update _ (MapPC.empty nat) (firstPC) (0)%nat in
let initialBP := MapPC.update _ (MapPC.empty JVM_PC) (firstPC) (firstPC) in
let retBlock := create_retBlock insnList in
parse_insn_rec (insnList) (snd (mSb)) (fst (mSb), (initialBP, (initialTS, retBlock))).
Definition translate_const_type (t0:JVM_primitiveType) : DEX_ValKind :=
DEX_Ival.
Definition translate_move_type (t0:JVM_ValKind) : DEX_ValKind :=
match t0 with JVM_Ival => DEX_Ival | JVM_Aval => DEX_Aval end.
Definition translate_binop_op (op:JVM_BinopInt) : DEX_BinopInt :=
match op with
| JVM_AddInt => DEX_AddInt
| JVM_AndInt => DEX_AndInt
| JVM_DivInt => DEX_DivInt
| JVM_MulInt => DEX_MulInt
| JVM_OrInt => DEX_OrInt
| JVM_RemInt => DEX_RemInt
| JVM_ShlInt => DEX_ShlInt
| JVM_ShrInt => DEX_ShrInt
| JVM_SubInt => DEX_SubInt
| JVM_UshrInt => DEX_UshrInt
| JVM_XorInt => DEX_XorInt
end.
Definition translate_comp (cmp:JVM_CompInt) : DEX_CompInt :=
match cmp with
| JVM_EqInt => DEX_EqInt
| JVM_NeInt => DEX_NeInt
| JVM_LtInt => DEX_LtInt
| JVM_LeInt => DEX_LeInt
| JVM_GtInt => DEX_GtInt
| JVM_GeInt => DEX_GeInt
end.
Fixpoint translate_instructions_rec (l:list (JVM_PC*(option JVM_PC*JVM_Instruction)))
(m:BlockMap) (bp:BPMap) (ts:TSMap) (ret:Block)
(RT:MapAddress.t TypeRegisters) (pcMapping:MapN.t (list (N*N)))
: ((BlockMap * Block) * (MapAddress.t TypeRegisters * MapN.t (list (N*N))))
:= match l with
| nil => ((m, ret), (RT, pcMapping))
| (pc, (pc',ins)) :: t =>
let blockIndex := opval (MapPC.get _ bp pc) (0)%N in
let cb := opval (MapPC.get _ m blockIndex) (BLOCK.empty) in
let tsValue := get_tsValue (MapPC.get _ ts pc) (ins) in
match ins with
| JVM_Const t0 z =>
let j := N.of_nat (length (BLOCK.dex_instructions cb)) in
let rt := translate_st_rt (JVM_S pc) (max_locals) (sgn.(JVM_lvt)) in
let RT' := MapAddress.update _ (RT) (pc, j) (rt) in
let newBlock := BLOCK.append_dex_instructions (cb)
((DEX_Const (translate_const_type (t0)) (N_toReg (tsValue)) z)::nil) in
let newM := MapPC.update _ m blockIndex newBlock in
let pcMapping' := MapN.update _ pcMapping (pc) ((pc, j)::nil) in
translate_instructions_rec (t) (newM) (bp) (ts) (ret) (RT') (pcMapping')
| JVM_Dup =>
let j := N.of_nat (length (BLOCK.dex_instructions cb)) in
let rt := translate_st_rt (JVM_S pc) (max_locals) (sgn.(JVM_lvt)) in
let RT' := MapAddress.update _ (RT) (pc, j) (rt) in
let newBlock := BLOCK.append_dex_instructions (cb)
((DEX_Move (DEX_Ival) (N_toReg (tsValue)) (N_toReg (tsValue-1)))::nil) in
let newM := MapPC.update _ m blockIndex newBlock in
let pcMapping' := MapN.update _ pcMapping (pc) ((pc, j)::nil) in
translate_instructions_rec (t) (newM) (bp) (ts) (ret) (RT') (pcMapping')
| JVM_Dup_x1 =>
let j := N.of_nat (length (BLOCK.dex_instructions cb)) in
let rt := translate_st_rt (JVM_S pc) (max_locals) (sgn.(JVM_lvt)) in
let RT' := MapAddress.update _ (RT) (pc, j) (rt) in
let newBlock := BLOCK.append_dex_instructions (cb)
((DEX_Move (DEX_Ival) (N_toReg (tsValue)) (N_toReg (tsValue-1)))::
(DEX_Move (DEX_Ival) (N_toReg (tsValue-1)) (N_toReg (tsValue-2)))::
(DEX_Move (DEX_Ival) (N_toReg (tsValue-2)) (N_toReg (tsValue)))::nil) in
let tsm1 := opval (MapN.get _ rt (N_toReg (tsValue-1))) (L.Simple L.bot) in
let rt1 := MapN.update _ rt (N_toReg (tsValue)) (tsm1) in
let RT1 := MapAddress.update _ (RT') (pc, (j+1)%N) (rt1) in
let tsm2 := opval (MapN.get _ rt1 (N_toReg (tsValue-2))) (L.Simple L.bot) in
let rt2 := MapN.update _ rt1 (N_toReg (tsValue-1)) (tsm2) in
let RT2 := MapAddress.update _ (RT1) (pc, (j+2)%N) (rt2) in
(*
let ts0 := opval (MapN.get _ rt2 (N_toReg (tsValue))) (L.Simple L.bot) in
let rt3 := MapN.update _ rt2 (N_toReg (tsValue-2)) (ts0) in
let RT3 := MapAddress.update _ (RT2) (pc, (j+3)%N) (rt3) in
*)
let newM := MapPC.update _ m blockIndex newBlock in
let pcMapping' := MapN.update _ pcMapping (pc)
((pc, j)::(pc,(j+1)%N)::(pc,(j+2)%N)::nil) in
translate_instructions_rec (t) (newM) (bp) (ts) (ret) (RT2) (pcMapping')
| JVM_Dup_x2 =>
let j := N.of_nat (length (BLOCK.dex_instructions cb)) in
let rt := translate_st_rt (JVM_S pc) (max_locals) (sgn.(JVM_lvt)) in
let RT' := MapAddress.update _ (RT) (pc, j) (rt) in
let newBlock := BLOCK.append_dex_instructions (cb)
((DEX_Move (DEX_Ival) (N_toReg (tsValue)) (N_toReg (tsValue-1)))::
(DEX_Move (DEX_Ival) (N_toReg (tsValue-1)) (N_toReg (tsValue-2)))::
(DEX_Move (DEX_Ival) (N_toReg (tsValue-2)) (N_toReg (tsValue-3)))::
(DEX_Move (DEX_Ival) (N_toReg (tsValue-3)) (N_toReg (tsValue)))::nil) in
let tsm1 := opval (MapN.get _ rt (N_toReg (tsValue-1))) (L.Simple L.bot) in
let rt1 := MapN.update _ rt (N_toReg (tsValue)) (tsm1) in
let RT1 := MapAddress.update _ (RT') (pc, (j+1)%N) (rt1) in
let tsm2 := opval (MapN.get _ rt1 (N_toReg (tsValue-2))) (L.Simple L.bot) in
let rt2 := MapN.update _ rt1 (N_toReg (tsValue-1)) (tsm2) in
let RT2 := MapAddress.update _ (RT1) (pc, (j+2)%N) (rt2) in
let tsm3 := opval (MapN.get _ rt2 (N_toReg (tsValue-3))) (L.Simple L.bot) in
let rt3 := MapN.update _ rt2 (N_toReg (tsValue-2)) (tsm3) in
let RT3 := MapAddress.update _ (RT2) (pc, (j+3)%N) (rt3) in
(*
let ts0 := opval (MapN.get _ rt2 (N_toReg (tsValue))) (L.Simple L.bot) in
let rt4 := MapN.update _ rt3 (N_toReg (tsValue-3)) (ts0) in
let RT4 := MapAddress.update _ (RT3) (pc, (j+4)%N) (rt4) in
*)
let newM := MapPC.update _ m blockIndex newBlock in
let pcMapping' := MapN.update _ pcMapping (pc)
((pc, j)::(pc,(j+1)%N)::(pc,(j+2)%N)::(pc,(j+3)%N)::nil) in
translate_instructions_rec (t) (newM) (bp) (ts) (ret) (RT3) (pcMapping')
| JVM_Dup2 =>
let j := N.of_nat (length (BLOCK.dex_instructions cb)) in
let rt := translate_st_rt (JVM_S pc) (max_locals) (sgn.(JVM_lvt)) in
let RT' := MapAddress.update _ (RT) (pc, j) (rt) in
let newBlock := BLOCK.append_dex_instructions (cb)
((DEX_Move (DEX_Ival) (N_toReg (tsValue+1)) (N_toReg (tsValue-1)))::
(DEX_Move (DEX_Ival) (N_toReg (tsValue)) (N_toReg (tsValue-2)))::nil) in
let tsm1 := opval (MapN.get _ rt (N_toReg (tsValue-1))) (L.Simple L.bot) in
let rt1 := MapN.update _ rt (N_toReg (tsValue+1)) (tsm1) in
let RT1 := MapAddress.update _ (RT') (pc, (j+1)%N) (rt1) in
(*
let tsm2 := opval (MapN.get _ rt1 (N_toReg (tsValue-2))) (L.Simple L.bot) in
let rt2 := MapN.update _ rt1 (N_toReg (tsValue)) (tsm2) in
let RT2 := MapAddress.update _ (RT1) (pc, (j+2)%N) (rt2) in
*)
let newM := MapPC.update _ m blockIndex newBlock in
let pcMapping' := MapN.update _ pcMapping (pc) ((pc, j)::(pc,(j+1)%N)::nil) in
translate_instructions_rec (t) (newM) (bp) (ts) (ret) (RT1) (pcMapping')
| JVM_Dup2_x1 =>
let j := N.of_nat (length (BLOCK.dex_instructions cb)) in
let rt := translate_st_rt (JVM_S pc) (max_locals) (sgn.(JVM_lvt)) in
let RT' := MapAddress.update _ (RT) (pc, j) (rt) in
let newBlock := BLOCK.append_dex_instructions (cb)
((DEX_Move (DEX_Ival) (N_toReg (tsValue+1)) (N_toReg (tsValue-1)))::
(DEX_Move (DEX_Ival) (N_toReg (tsValue)) (N_toReg (tsValue-2)))::
(DEX_Move (DEX_Ival) (N_toReg (tsValue-1)) (N_toReg (tsValue-3)))::
(DEX_Move (DEX_Ival) (N_toReg (tsValue-2)) (N_toReg (tsValue+1)))::
(DEX_Move (DEX_Ival) (N_toReg (tsValue-3)) (N_toReg (tsValue)))::nil) in
let tsm1 := opval (MapN.get _ rt (N_toReg (tsValue-1))) (L.Simple L.bot) in
let rt1 := MapN.update _ rt (N_toReg (tsValue+1)) (tsm1) in
let RT1 := MapAddress.update _ (RT') (pc, (j+1)%N) (rt1) in
let tsm2 := opval (MapN.get _ rt1 (N_toReg (tsValue-2))) (L.Simple L.bot) in
let rt2 := MapN.update _ rt1 (N_toReg (tsValue)) (tsm2) in
let RT2 := MapAddress.update _ (RT1) (pc, (j+2)%N) (rt2) in
let tsm3 := opval (MapN.get _ rt2 (N_toReg (tsValue-3))) (L.Simple L.bot) in
let rt3 := MapN.update _ rt2 (N_toReg (tsValue-1)) (tsm3) in
let RT3 := MapAddress.update _ (RT2) (pc, (j+3)%N) (rt3) in
let tsp1 := opval (MapN.get _ rt2 (N_toReg (tsValue+1))) (L.Simple L.bot) in
let rt4 := MapN.update _ rt3 (N_toReg (tsValue-2)) (tsp1) in
let RT4 := MapAddress.update _ (RT3) (pc, (j+4)%N) (rt4) in
let newM := MapPC.update _ m blockIndex newBlock in
let pcMapping' := MapN.update _ pcMapping (pc)
((pc, j)::(pc,(j+1)%N)::(pc,(j+2)%N)::(pc,(j+3)%N)::(pc,(j+4)%N)::nil) in
translate_instructions_rec (t) (newM) (bp) (ts) (ret) (RT4) (pcMapping')
| JVM_Dup2_x2 =>
let j := N.of_nat (length (BLOCK.dex_instructions cb)) in
let rt := translate_st_rt (JVM_S pc) (max_locals) (sgn.(JVM_lvt)) in
let RT' := MapAddress.update _ (RT) (pc, j) (rt) in
let newBlock := BLOCK.append_dex_instructions (cb)
((DEX_Move (DEX_Ival) (N_toReg (tsValue+1)) (N_toReg (tsValue-1)))::
(DEX_Move (DEX_Ival) (N_toReg (tsValue)) (N_toReg (tsValue-2)))::
(DEX_Move (DEX_Ival) (N_toReg (tsValue-1)) (N_toReg (tsValue-3)))::
(DEX_Move (DEX_Ival) (N_toReg (tsValue-2)) (N_toReg (tsValue-4)))::
(DEX_Move (DEX_Ival) (N_toReg (tsValue-3)) (N_toReg (tsValue+1)))::
(DEX_Move (DEX_Ival) (N_toReg (tsValue-4)) (N_toReg (tsValue)))::nil) in
let tsm1 := opval (MapN.get _ rt (N_toReg (tsValue-1))) (L.Simple L.bot) in
let rt1 := MapN.update _ rt (N_toReg (tsValue+1)) (tsm1) in
let RT1 := MapAddress.update _ (RT') (pc, (j+1)%N) (rt1) in
let tsm2 := opval (MapN.get _ rt1 (N_toReg (tsValue-2))) (L.Simple L.bot) in
let rt2 := MapN.update _ rt1 (N_toReg (tsValue)) (tsm2) in
let RT2 := MapAddress.update _ (RT1) (pc, (j+2)%N) (rt2) in
let tsm3 := opval (MapN.get _ rt2 (N_toReg (tsValue-3))) (L.Simple L.bot) in
let rt3 := MapN.update _ rt2 (N_toReg (tsValue-1)) (tsm3) in
let RT3 := MapAddress.update _ (RT2) (pc, (j+3)%N) (rt3) in
let tsm4 := opval (MapN.get _ rt2 (N_toReg (tsValue-4))) (L.Simple L.bot) in
let rt4 := MapN.update _ rt3 (N_toReg (tsValue-2)) (tsm4) in
let RT4 := MapAddress.update _ (RT3) (pc, (j+4)%N) (rt4) in
let tsp1 := opval (MapN.get _ rt2 (N_toReg (tsValue+1))) (L.Simple L.bot) in
let rt5 := MapN.update _ rt3 (N_toReg (tsValue-3)) (tsp1) in
let RT5 := MapAddress.update _ (RT3) (pc, (j+5)%N) (rt5) in
let newM := MapPC.update _ m blockIndex newBlock in
let pcMapping' := MapN.update _ pcMapping (pc)
((pc, j)::(pc,(j+1)%N)::(pc,(j+2)%N)::(pc,(j+3)%N)::(pc,(j+4)%N)::(pc,(j+5)%N)::nil) in
translate_instructions_rec (t) (newM) (bp) (ts) (ret) (RT5) (pcMapping')
| JVM_Goto o => translate_instructions_rec (t) (m) (bp) (ts) (ret) (RT) (pcMapping)
| JVM_I2b =>
let j := N.of_nat (length (BLOCK.dex_instructions cb)) in
let rt := translate_st_rt (JVM_S pc) (max_locals) (sgn.(JVM_lvt)) in
let RT' := MapAddress.update _ (RT) (pc, j) (rt) in
let newBlock := BLOCK.append_dex_instructions (cb)
((DEX_I2b (N_toReg (tsValue)) (N_toReg (tsValue)))::nil) in
let newM := MapPC.update _ m blockIndex newBlock in
let pcMapping' := MapN.update _ pcMapping (pc) ((pc, j)::nil) in
translate_instructions_rec (t) (newM) (bp) (ts) (ret) (RT') (pcMapping')
| JVM_I2s =>
let j := N.of_nat (length (BLOCK.dex_instructions cb)) in
let rt := translate_st_rt (JVM_S pc) (max_locals) (sgn.(JVM_lvt)) in
let RT' := MapAddress.update _ (RT) (pc, j) (rt) in
let newBlock := BLOCK.append_dex_instructions (cb)
((DEX_I2s (N_toReg (tsValue)) (N_toReg (tsValue)))::nil) in
let newM := MapPC.update _ m blockIndex newBlock in
let pcMapping' := MapN.update _ pcMapping (pc) ((pc, j)::nil) in
translate_instructions_rec (t) (newM) (bp) (ts) (ret) (RT') (pcMapping')
| JVM_Ibinop op =>
let j := N.of_nat (length (BLOCK.dex_instructions cb)) in
let rt := translate_st_rt (JVM_S pc) (max_locals) (sgn.(JVM_lvt)) in
let RT' := MapAddress.update _ (RT) (pc, j) (rt) in
let newBlock := BLOCK.append_dex_instructions (cb)
((DEX_Ibinop (translate_binop_op (op)) (N_toReg (tsValue)) (N_toReg (tsValue)) (N_toReg (tsValue-1)))::nil) in
let newM := MapPC.update _ m blockIndex newBlock in
let pcMapping' := MapN.update _ pcMapping (pc) ((pc, j)::nil) in
translate_instructions_rec (t) (newM) (bp) (ts) (ret) (RT') (pcMapping')
| JVM_If_icmp cmp o =>
let j := N.of_nat (length (BLOCK.dex_instructions cb)) in
let rt := translate_st_rt (JVM_S pc) (max_locals) (sgn.(JVM_lvt)) in
let RT' := MapAddress.update _ (RT) (pc, j) (rt) in
let newBlock := BLOCK.append_dex_instructions (cb)
((DEX_Ifcmp (translate_comp (cmp)) (N_toReg (tsValue-1)) (N_toReg (tsValue-2))
(Z_of_N (DEX_OFFSET.jump pc o)))::nil) in
let newM := MapPC.update _ m blockIndex newBlock in
let pcMapping' := MapN.update _ pcMapping (pc) ((pc, j)::nil) in
translate_instructions_rec (t) (newM) (bp) (ts) (ret) (RT') (pcMapping')
| JVM_If0 cmp o =>
let j := N.of_nat (length (BLOCK.dex_instructions cb)) in
let rt := translate_st_rt (JVM_S pc) (max_locals) (sgn.(JVM_lvt)) in
let RT' := MapAddress.update _ (RT) (pc, j) (rt) in
let newBlock := BLOCK.append_dex_instructions (cb)
((DEX_Ifz (translate_comp (cmp)) (N_toReg (tsValue-1))
(Z_of_N (DEX_OFFSET.jump pc o)))::nil) in
let newM := MapPC.update _ m blockIndex newBlock in
let pcMapping' := MapN.update _ pcMapping (pc) ((pc, j)::nil) in
translate_instructions_rec (t) (newM) (bp) (ts) (ret) (RT') (pcMapping')
| JVM_Iinc l0 z =>
let j := N.of_nat (length (BLOCK.dex_instructions cb)) in
let rt := translate_st_rt (JVM_S pc) (max_locals) (sgn.(JVM_lvt)) in
let RT' := MapAddress.update _ (RT) (pc, j) (rt) in
let newBlock := BLOCK.append_dex_instructions (cb)
((DEX_IbinopConst (DEX_AddInt) (l0) (l0) z)::nil) in
let newM := MapPC.update _ m blockIndex newBlock in
let pcMapping' := MapN.update _ pcMapping (pc) ((pc, j)::nil) in
translate_instructions_rec (t) (newM) (bp) (ts) (ret) (RT') (pcMapping')
| JVM_Ineg =>
let j := N.of_nat (length (BLOCK.dex_instructions cb)) in
let rt := translate_st_rt (JVM_S pc) (max_locals) (sgn.(JVM_lvt)) in
let RT' := MapAddress.update _ (RT) (pc, j) (rt) in
let newBlock := BLOCK.append_dex_instructions (cb)
((DEX_Ineg (N_toReg (tsValue)) (N_toReg (tsValue)))::nil) in
let newM := MapPC.update _ m blockIndex newBlock in
let pcMapping' := MapN.update _ pcMapping (pc) ((pc, j)::nil) in
translate_instructions_rec (t) (newM) (bp) (ts) (ret) (RT') (pcMapping')
| JVM_Lookupswitch d l0 =>
let j := N.of_nat (length (BLOCK.dex_instructions cb)) in
let rt := translate_st_rt (JVM_S pc) (max_locals) (sgn.(JVM_lvt)) in
let RT' := MapAddress.update _ (RT) (pc, j) (rt) in
let newBlock := BLOCK.append_dex_instructions (cb)
((DEX_SparseSwitch (N_toReg (tsValue-1)) (length l0)
(map (fun e => ((fst e), Z_of_N (DEX_OFFSET.jump pc (snd e)))) l0))::nil) in
let newM := MapPC.update _ m blockIndex newBlock in
let pcMapping' := MapN.update _ pcMapping (pc) ((pc, j)::nil) in
translate_instructions_rec (t) (newM) (bp) (ts) (ret) (RT') (pcMapping')
| JVM_Nop =>
let j := N.of_nat (length (BLOCK.dex_instructions cb)) in
let rt := translate_st_rt (JVM_S pc) (max_locals) (sgn.(JVM_lvt)) in
let RT' := MapAddress.update _ (RT) (pc, j) (rt) in
let newBlock := BLOCK.append_dex_instructions (cb)
((DEX_Nop)::nil) in
let newM := MapPC.update _ m blockIndex newBlock in
let pcMapping' := MapN.update _ pcMapping (pc) ((pc, j)::nil) in
translate_instructions_rec (t) (newM) (bp) (ts) (ret) (RT') (pcMapping')
| JVM_Pop => translate_instructions_rec (t) (m) (bp) (ts) (ret) (RT) (pcMapping)
| JVM_Pop2 => translate_instructions_rec (t) (m) (bp) (ts) (ret) (RT) (pcMapping)
| JVM_Return => translate_instructions_rec (t) (m) (bp) (ts) (ret) (RT) (pcMapping)
| JVM_Swap =>
let j := N.of_nat (length (BLOCK.dex_instructions cb)) in
let rt := translate_st_rt (JVM_S pc) (max_locals) (sgn.(JVM_lvt)) in
let RT' := MapAddress.update _ (RT) (pc, j) (rt) in
let newBlock := BLOCK.append_dex_instructions (cb)
((DEX_Move (DEX_Ival) (N_toReg (tsValue+1)) (N_toReg (tsValue-1)))::
(DEX_Move (DEX_Ival) (N_toReg (tsValue)) (N_toReg (tsValue-2)))::
(DEX_Move (DEX_Ival) (N_toReg (tsValue-1)) (N_toReg (tsValue)))::
(DEX_Move (DEX_Ival) (N_toReg (tsValue-2)) (N_toReg (tsValue+1)))::nil) in
let tsm1 := opval (MapN.get _ rt (N_toReg (tsValue-1))) (L.Simple L.bot) in
let rt1 := MapN.update _ rt (N_toReg (tsValue+1)) (tsm1) in
let RT1 := MapAddress.update _ (RT') (pc, (j+1)%N) (rt1) in
let tsm2 := opval (MapN.get _ rt1 (N_toReg (tsValue-2))) (L.Simple L.bot) in
let rt2 := MapN.update _ rt1 (N_toReg (tsValue)) (tsm2) in
let RT2 := MapAddress.update _ (RT1) (pc, (j+2)%N) (rt2) in
let ts0 := opval (MapN.get _ rt2 (N_toReg (tsValue))) (L.Simple L.bot) in
let rt3 := MapN.update _ rt2 (N_toReg (tsValue-1)) (ts0) in
let RT3 := MapAddress.update _ (RT2) (pc, (j+3)%N) (rt3) in
let newM := MapPC.update _ m blockIndex newBlock in
let pcMapping' := MapN.update _ pcMapping (pc)
((pc, j)::(pc,(j+1)%N)::(pc,(j+2)%N)::(pc,(j+3)%N)::nil) in
translate_instructions_rec (t) (newM) (bp) (ts) (ret) (RT3) (pcMapping')
| JVM_Tableswitch d low high l0 =>
let j := N.of_nat (length (BLOCK.dex_instructions cb)) in
let rt := translate_st_rt (JVM_S pc) (max_locals) (sgn.(JVM_lvt)) in
let RT' := MapAddress.update _ (RT) (pc, j) (rt) in
let newBlock := BLOCK.append_dex_instructions (cb)
((DEX_PackedSwitch (N_toReg (tsValue-1)) (low) (length l0)
(map (fun o => Z_of_N (DEX_OFFSET.jump pc o)) l0))::nil) in
let newM := MapPC.update _ m blockIndex newBlock in
let pcMapping' := MapN.update _ pcMapping (pc) ((pc, j)::nil) in
translate_instructions_rec (t) (newM) (bp) (ts) (ret) (RT') (pcMapping')
| JVM_Vload k l0 =>
let j := N.of_nat (length (BLOCK.dex_instructions cb)) in
let rt := translate_st_rt (JVM_S pc) (max_locals) (sgn.(JVM_lvt)) in
let RT' := MapAddress.update _ (RT) (pc, j) (rt) in
let newBlock := BLOCK.append_dex_instructions (cb)
((DEX_Move (translate_move_type (k)) (N_toReg (tsValue)) (l0))::nil) in
let newM := MapPC.update _ m blockIndex newBlock in
let pcMapping' := MapN.update _ pcMapping (pc) ((pc, j)::nil) in
translate_instructions_rec (t) (newM) (bp) (ts) (ret) (RT') (pcMapping')
| JVM_Vreturn k =>
let j := N.of_nat (length (BLOCK.dex_instructions cb)) in
let rt := translate_st_rt (JVM_S pc) (max_locals) (sgn.(JVM_lvt)) in
let RT' := MapAddress.update _ (RT) (pc, j) (rt) in
let newBlock := BLOCK.append_dex_instructions (cb)
((DEX_VReturn (translate_valKind (k)) (0)%N)::nil) in
let newM := MapPC.update _ m blockIndex newBlock in
let pcMapping' := MapN.update _ pcMapping (pc) ((pc, j)::nil) in
translate_instructions_rec (t) (newM) (bp) (ts) (ret) (RT') (pcMapping')
| JVM_Vstore k l0 =>
let j := N.of_nat (length (BLOCK.dex_instructions cb)) in
let rt := translate_st_rt (JVM_S pc) (max_locals) (sgn.(JVM_lvt)) in
let RT' := MapAddress.update _ (RT) (pc, j) (rt) in
let newBlock := BLOCK.append_dex_instructions (cb)
((DEX_Move (translate_move_type (k)) (l0) (N_toReg (tsValue-1)))::nil) in
let newM := MapPC.update _ m blockIndex newBlock in
let pcMapping' := MapN.update _ pcMapping (pc) ((pc, j)::nil) in
translate_instructions_rec (t) (newM) (bp) (ts) (ret) (RT') (pcMapping')
(*
| _ => translate_instructions_rec (t) (m) (bp) (ts) (ret) (* Default for
not yet implemented instructions *) *)
end
end.
Definition translate_instructions (arg:BlockMap * (BPMap * (TSMap * Block)))
: ((BlockMap * Block) * (MapAddress.t TypeRegisters * MapN.t (list (N*N))))
:= let m := fst (arg) in
let bp := fst (snd arg) in
let ts := fst (snd (snd arg)) in
let ret := snd (snd (snd arg)) in
translate_instructions_rec (insnList) (m) (bp) (ts) (ret)
(MapAddress.empty TypeRegisters) (MapN.empty (list (N*N))).
Lemma Label_eq_dec : forall x y : JVM_PC, {x=y} + {x<>y}.
Proof.
repeat decide equality.
Qed.
Definition beq_order (x y:option nat) : bool :=
match x with
| None => match y with None => true | _ => false end
| Some v => match y with Some v => true | _ => false end
end.
Definition beq_pc (x y:option JVM_PC) : bool:=
match x with
| None => match y with None => true | _ => false end
| Some v => match y with Some v => true | _ => false end
end.
Fixpoint find_parent (x:JVM_PC) (parents:list JVM_PC) (loop:list JVM_PC)
(m:BlockMap) : (JVM_PC * bool) :=
match parents with
| nil => (x, false)
| h :: t =>
let parentBlock := MapPC.get _ m h in
if in_dec (Label_eq_dec) (h) (loop) then
(x, false)
else
if (beq_order (BLOCK.order (opval parentBlock BLOCK.empty)) None) &&
(beq_pc (BLOCK.pSucc (opval parentBlock BLOCK.empty)) (Some x))
then
(h, true)
else
find_parent (x) (t) (loop) (m)
end.
Fixpoint pick_starting_point (x:JVM_PC) (loop:list JVM_PC) (m:BlockMap) (bound:nat)
{struct bound} : JVM_PC :=
match bound with
| O => x
| S n =>
let cb := MapPC.get _ m x in
let (y, b) := find_parent (x) (BLOCK.parents (opval cb BLOCK.empty)) (loop) (m) in
if b then
pick_starting_point (y) (y::loop) (m) (n)
else
x
end.
Fixpoint find_available_successor (x:JVM_PC) (succs:list JVM_PC) (m:BlockMap) (ret:Block)
: (JVM_PC * bool) :=
match succs with
| nil => (x, false)
| h :: t =>
let isSuccRet := beq_pc (Some h) (Some BLOCK.retLabel) in
let succ := if isSuccRet then ret else (opval (MapPC.get _ m h) BLOCK.empty) in
if beq_order (BLOCK.order succ) (None) then
(h, true)
else
find_available_successor (x) (t) (m) (ret)
end.
Definition find_successor (x:JVM_PC) (xb:Block) (succs:list JVM_PC) (m:BlockMap) (ret:Block) : (JVM_PC * bool) :=
let pSuccLabel := (BLOCK.pSucc xb) in
if beq_pc (pSuccLabel) (None) then
find_available_successor (x) (succs) (m) (ret)
else
let pSuccBlock := if beq_pc pSuccLabel (Some BLOCK.retLabel) then
ret else opval (MapPC.get _ m (opval pSuccLabel (0)%N)) (BLOCK.empty) in
if beq_order (BLOCK.order pSuccBlock) None then
((opval pSuccLabel (0)%N), true)
else
find_available_successor (x) (succs) (m) (ret).
Fixpoint trace_successors (x:JVM_PC) (order:nat) (m:BlockMap) (ret:Block)
(sortedPC:list JVM_PC) (bound:nat) : (nat * (list JVM_PC * (BlockMap * Block))) :=
match bound with
| O => (order, (sortedPC, (m, ret)))
| S n =>
let isReturn := beq_pc (Some x) (Some BLOCK.retLabel) in
let b := if isReturn then ret else opval (MapPC.get _ m x) (BLOCK.empty) in
let newM := if isReturn then m else MapPC.update _ m x (BLOCK.update_order b order) in
let newRet := if isReturn then BLOCK.update_order ret order else ret in
let newSortedPC := x :: sortedPC in
let (lbl, found) := find_successor (x) (b) (BLOCK.succs b) (m) (ret) in
if found then
trace_successors (x) (S order) (newM) (newRet) (newSortedPC) (n)
else (S order, (newSortedPC, (newM, newRet)))
end.
Fixpoint pick_order_rec (l:list JVM_PC) (order:nat) (arg:BlockMap * Block)
(sortedPC:list JVM_PC) : (list JVM_PC * (BlockMap * Block)):=
match l with
| nil => (sortedPC, arg)
| h :: t =>
let source := pick_starting_point (h) (h::nil) (fst arg) (length l) in
let (newOrder, newSnd) := trace_successors (source) (order) (fst arg) (snd arg) (sortedPC) (length l) in
let (newSortedPC, newSnd') := newSnd in
let (newM, newRet) := newSnd' in
pick_order_rec (t) (newOrder) (newM, newRet) (newSortedPC)
end.
(* the correct behavior is when the list is sorted, at the moment
I don't see how sorted-ness will affect the proof. The crucial
part where it should start with block 0 is assumed with all the
previous step, and further enforced by adding the first address
to the head of the list *)
Definition pick_order (arg:BlockMap * Block) : (list JVM_PC * (BlockMap * Block))
:=
(*let block0 := MapPC.get _ (fst arg) (BYTECODEMETHOD.firstAddress bm) in
let newM := MapPC.update _ (fst arg) (BYTECODEMETHOD.firstAddress bm)
(Block.updateOrder (block0) (Some 0)) in*)
pick_order_rec ((JVM_BYTECODEMETHOD.firstAddress bm)::MapPC.dom _ (fst arg)) (0%nat) (arg) (nil).
Definition opposite_cmp (cmp:DEX_CompInt) : DEX_CompInt :=
match cmp with
| DEX_EqInt => DEX_NeInt
| DEX_NeInt => DEX_EqInt
| DEX_LtInt => DEX_GeInt
| DEX_LeInt => DEX_GtInt
| DEX_GtInt => DEX_LeInt
| DEX_GeInt => DEX_LtInt
end.
Fixpoint add_instructions (lst:list DEX_Instruction) (succs:list DEX_PC) (pSucc:DEX_PC)
(dex_label:DEX_PC) (needsGoto : bool) (output : list (DEX_PC * DEX_Instruction))
: list (DEX_PC * DEX_Instruction) :=
match lst with
| nil =>
let gotoIns := if needsGoto then nil else
(dex_label,
DEX_Goto (Z_of_N (pSucc)))::nil in
output ++ gotoIns
| h :: nil =>
if needsGoto then
match h with
| DEX_Ifcmp cmp ra rb o =>
match succs with
| s :: pSucc :: t =>
output ++ (dex_label, DEX_Ifcmp (opposite_cmp cmp) ra rb o)::nil
| _ => let lastIns := (dex_label, h)::
((dex_label + 1)%N,
DEX_Goto (Z_of_N (pSucc)))::nil in
output ++ lastIns
end
| DEX_Ifz cmp r o =>
match succs with
| s :: pSucc :: t =>
output ++ (dex_label, DEX_Ifz (opposite_cmp cmp) r o)::nil
| _ => let lastIns := (dex_label, h)::
((dex_label + 1)%N,
DEX_Goto (Z_of_N (pSucc)))::nil in
output ++ lastIns
end
| _ => (output ++ (dex_label, h) :: nil)
end
else (output ++ (dex_label, h) :: nil)
| h :: t => add_instructions (t) (succs) (pSucc) (dex_label + 1)%N (needsGoto) (output ++ (dex_label, h) :: nil)
end.
Fixpoint output_blocks (lst : list JVM_PC) (m:BlockMap) (ret:Block) (dex_label:DEX_PC)
(output:list (DEX_PC*DEX_Instruction))
: (list (DEX_PC*DEX_Instruction) * (BlockMap * Block)) :=
match lst with
| nil => (output, (m, ret))
| h :: t =>
let isReturn := beq_pc (Some h) (Some BLOCK.retLabel) in
let cb := if isReturn then ret else
opval (MapPC.get _ m h) BLOCK.empty in
let needsGoto := match (BLOCK.pSucc cb) with
| None => false
| Some x =>
match t with
| x :: t' => false
| _ => true
end
end in
let currentContent := add_instructions (BLOCK.dex_instructions cb)
(BLOCK.succs cb) (opval (BLOCK.pSucc cb) (0)%N)
(dex_label) (needsGoto) (nil) in
let newOutput := output ++ currentContent in
let newBlock := BLOCK.update_dex_label cb dex_label in
let newRet := if isReturn then newBlock else ret in
let newM := if isReturn then m else
MapPC.update _ m h (newBlock) in
output_blocks (t) (newM) (newRet)
(Nplus (dex_label) (N_of_nat (length currentContent)))
(newOutput)
end.
(* TODO fix into pair *)
Fixpoint fix_target (lst : list (DEX_PC*DEX_Instruction))
(m:BlockMap) (ret:Block) (result:list (DEX_Instruction*(DEX_PC*DEX_PC)))
: (list (DEX_Instruction*(DEX_PC*DEX_PC))) :=
match lst with
| nil => result
| (pc,ins) :: t =>
match ins with
| DEX_Goto o =>
let isReturn := beq_pc (Some pc) (Some BLOCK.retLabel) in
let succBlock := if isReturn then ret else
(opval (MapPC.get _ m (N_of_Z o)) BLOCK.empty) in
let succLabel := opval (BLOCK.dex_label succBlock) (0)%N in
let newIns := DEX_Goto (Z_of_N (succLabel) - Z_of_N (pc))%Z in
fix_target (t) (m) (ret) ((newIns,(pc,succLabel))::result)
| DEX_Ifcmp cmp ra rb o =>
let isReturn := beq_pc (Some pc) (Some BLOCK.retLabel) in
let succBlock := if isReturn then ret else
(opval (MapPC.get _ m (N_of_Z o)) BLOCK.empty) in
let succLabel := opval (BLOCK.dex_label succBlock) (0)%N in
let newIns := DEX_Ifcmp cmp ra rb (Z_of_N (succLabel) - Z_of_N (pc))%Z in
fix_target (t) (m) (ret) ((newIns,(pc,(pc+1)%N))::result)
| DEX_Ifz cmp r o =>
let isReturn := beq_pc (Some pc) (Some BLOCK.retLabel) in
let succBlock := if isReturn then ret else
(opval (MapPC.get _ m (N_of_Z o)) BLOCK.empty) in
let succLabel := opval (BLOCK.dex_label succBlock) (0)%N in
let newIns := DEX_Ifz cmp r (Z_of_N (succLabel) - Z_of_N (pc))%Z in
fix_target (t) (m) (ret) ((newIns,(pc,(pc+1)%N))::result)
| _ => fix_target (t) (m) (ret) ((ins,(pc, (pc+1)%N))::result)
end
end.
Definition consolidate_blocks (arg:list JVM_PC * (BlockMap * Block))
: list (DEX_Instruction*(DEX_PC*DEX_PC)) :=
let lst := fst arg in
let m := fst (snd arg) in
let ret := snd (snd arg) in
let (insnList, sndRet) := output_blocks (lst) (m) (ret) (0)%N (nil) in
let (newM, newRet) := sndRet in
fix_target (insnList) (newM) (newRet) (nil).
Fixpoint construct_bytecodemethod (lst:list (DEX_Instruction*(DEX_PC*DEX_PC)) )
(bc:MapPC.t (DEX_Instruction*(option DEX_PC * list DEX_ClassName)))
: DEX_BytecodeMethod :=
match lst with
| nil => DEX_BYTECODEMETHOD.Build_t (1)%N (bc) (1) (1) (1)
| (ins, (pc, pc')) :: t =>
construct_bytecodemethod (t) (DEX_bc_cons (pc) ins (pc') bc)
end.
Definition bytecode_translate : DEX_BytecodeMethod :=
let (codes, types) := translate_instructions
(trace_parent_child (start_block)) in
construct_bytecodemethod (consolidate_blocks (pick_order (codes)))
(DEX_bc_empty).
End BytecodeMethod_Translator.
(*
Section Translate_se.
Parameter Translate_PC : JVM_PC -> DEX_PC.
End Translate_se.
*)
End DX_TRANSLATOR. |
{-
Practical Relational Algebra
Toon Nolten
based on
The Power Of Pi
-}
module relational-algebra where
open import Data.Empty
open import Data.Unit hiding (_≤_)
open import Data.Bool
open import Data.Nat
open import Data.Integer hiding (show)
open import Data.List
open import Data.Char hiding (_==_) renaming (show to charToString)
open import Data.Vec hiding (_++_; lookup; map; foldr; _>>=_)
open import Data.String using (String; toVec; _==_; strictTotalOrder)
renaming (_++_ to _∥_)
open import Data.Product using (_×_; _,_; proj₁)
open import Coinduction
open import IO
open import Relation.Binary
open StrictTotalOrder Data.String.strictTotalOrder renaming (compare to str_cmp)
data Order : Set where
LT EQ GT : Order
module InsertionSort where
insert : {A : Set} → (A → A → Order) → A → List A → List A
insert _ e [] = e ∷ []
insert cmp e (l ∷ ls) with cmp e l
... | GT = l ∷ insert cmp e ls
... | _ = e ∷ l ∷ ls
sort : {A : Set} → (A → A → Order) → List A → List A
sort cmp = foldr (insert cmp) []
open InsertionSort using (insert; sort)
-- Universe U exists of type U and el : U → Set
data U : Set where
CHAR NAT BOOL : U
VEC : U → ℕ → U
el : U → Set
el CHAR = Char
el NAT = ℕ
el (VEC u n) = Vec (el u) n
el BOOL = Bool
parens : String → String
parens str = "(" ∥ str ∥ ")"
show : {u : U} → el u → String
show {CHAR } c = charToString c
show {NAT } zero = "Zero"
show {NAT } (suc k) = "Succ " ∥ parens (show k)
show {VEC u zero } Nil = "Nil"
show {VEC u (suc k)} (x ∷ xs) = parens (show x) ∥ " ∷ " ∥ parens (show xs)
show {BOOL } true = "True"
show {BOOL } false = "False"
_=ᴺ_ : ℕ → ℕ → Bool
zero =ᴺ zero = true
suc m =ᴺ suc n = (m =ᴺ n)
_ =ᴺ _ = false
_≤ᴺ_ : ℕ → ℕ → Order
zero ≤ᴺ zero = EQ
zero ≤ᴺ _ = LT
_ ≤ᴺ zero = GT
suc a ≤ᴺ suc b = a ≤ᴺ b
_=ᵁ_ : U → U → Bool
CHAR =ᵁ CHAR = true
NAT =ᵁ NAT = true
BOOL =ᵁ BOOL = true
VEC u x =ᵁ VEC u' x' = (u =ᵁ u') ∧ (x =ᴺ x')
_ =ᵁ _ = false
_≤ᵁ_ : U → U → Order
CHAR ≤ᵁ CHAR = EQ
CHAR ≤ᵁ _ = LT
_ ≤ᵁ CHAR = GT
NAT ≤ᵁ NAT = EQ
NAT ≤ᵁ _ = LT
_ ≤ᵁ NAT = GT
BOOL ≤ᵁ BOOL = EQ
BOOL ≤ᵁ _ = LT
_ ≤ᵁ BOOL = GT
VEC a x ≤ᵁ VEC b y with a ≤ᵁ b
... | LT = LT
... | EQ = x ≤ᴺ y
... | GT = GT
So : Bool → Set
So true = ⊤
So false = ⊥
data SqlValue : Set where
SqlString : String → SqlValue
SqlChar : Char → SqlValue
SqlBool : Bool → SqlValue
SqlInteger : ℤ → SqlValue
--{-# COMPILED_DATA SqlValue SqlValue SqlString SqlChar SqlBool SqlInteger #-}
module OrderedSchema where
SchemaDescription = List (List SqlValue)
Attribute : Set
Attribute = String × U
-- Compare on type if names are equal.
-- SQL DB's probably don't allow columns with the same name
-- but nothing prevents us from writing a Schema that does,
-- this is necessary to make our sort return a unique answer.
attr_cmp : Attribute → Attribute → Order
attr_cmp (nm₁ , U₁) (nm₂ , U₂) with str_cmp nm₁ nm₂ | U₁ ≤ᵁ U₂
... | tri< _ _ _ | _ = LT
... | tri≈ _ _ _ | U₁≤U₂ = U₁≤U₂
... | tri> _ _ _ | _ = GT
data Schema : Set where
sorted : List Attribute → Schema
mkSchema : List Attribute → Schema
mkSchema xs = sorted (sort attr_cmp xs)
expandSchema : Attribute → Schema → Schema
expandSchema x (sorted xs) = sorted (insert attr_cmp x xs)
schemify : SchemaDescription → Schema
schemify sdesc = {!!}
disjoint : Schema → Schema → Bool
disjoint (sorted [] ) (_ ) = true
disjoint (_ ) (sorted [] ) = true
disjoint (sorted (x ∷ xs)) (sorted (y ∷ ys)) with attr_cmp x y
... | LT = disjoint (sorted xs ) (sorted (y ∷ ys))
... | EQ = false
... | GT = disjoint (sorted (x ∷ xs)) (sorted ys )
sub : Schema → Schema → Bool
sub (sorted [] ) (_ ) = true
sub (sorted (x ∷ _) ) (sorted [] ) = false
sub (sorted (x ∷ xs)) (sorted (X ∷ Xs)) with attr_cmp x X
... | LT = false
... | EQ = sub (sorted xs ) (sorted Xs)
... | GT = sub (sorted (x ∷ xs)) (sorted Xs)
same' : List Attribute → List Attribute → Bool
same' ([] ) ([] ) = true
same' ((nm₁ , ty₁) ∷ xs) ((nm₂ , ty₂) ∷ ys) =
(nm₁ == nm₂) ∧ (ty₁ =ᵁ ty₂) ∧ same' xs ys
same' (_ ) (_ ) = false
same : Schema → Schema → Bool
same (sorted xs) (sorted ys) = same' xs ys
occurs : String → Schema → Bool
occurs nm (sorted s) = any (_==_ nm) (map (proj₁) s)
lookup' : (nm : String) → (s : List Attribute)
→ So (occurs nm (sorted s)) → U
lookup' _ [] ()
lookup' nm ((name , type) ∷ s') p with nm == name
... | true = type
... | false = lookup' nm s' p
lookup : (nm : String) → (s : Schema) → So (occurs nm s) → U
lookup nm (sorted s) = lookup' nm s
append : (s s' : Schema) → Schema
append (sorted s) (sorted s') = mkSchema (s ++ s')
open OrderedSchema using (Schema; mkSchema; expandSchema; schemify;
disjoint; sub; same; occurs; lookup;
append)
data Row : Schema → Set where
EmptyRow : Row (mkSchema [])
ConsRow : ∀ {name u s} → el u → Row s → Row (expandSchema (name , u) s)
Table : Schema → Set
Table s = List (Row s)
DatabasePath = String
TableName = String
postulate
Connection : Set
connectSqlite3 : DatabasePath → IO Connection
describe_table : TableName → Connection → IO (List (List SqlValue))
-- {-# COMPILED_TYPE Connection Connection #-}
-- {-# COMPILED connectSqlite3 connectSqlite3 #-}
-- {-# COMPILED describe_table describe_table #-}
data Handle : Schema → Set where
conn : Connection → (s : Schema) → Handle s
-- Connect currently ignores differences between
-- the expected schema and the actual schema.
-- According to tpop this should result in
-- "a *runtime exception* in the *IO* monad."
-- Agda does not have exceptions(?)
-- -> postulate error with a compiled pragma?
connect : DatabasePath → TableName → (s : Schema) → IO (Handle s)
connect DB table schema_expect =
♯ (connectSqlite3 DB) >>=
(λ sqlite_conn →
♯ (♯ (describe_table table sqlite_conn) >>=
(λ description →
♯ (♯ (return (schemify description)) >>=
(λ schema_actual →
♯ (♯ (return (same schema_expect schema_actual)) >>=
(λ { true → ♯ (return (conn sqlite_conn schema_expect));
false → ♯ (return (conn sqlite_conn schema_expect)) })))))))
data Expr : Schema → U → Set where
equal : ∀ {u s} → Expr s u → Expr s u → Expr s BOOL
lessThan : ∀ {u s} → Expr s u → Expr s u → Expr s BOOL
_!_ : (s : Schema) → (nm : String) → {p : So (occurs nm s)}
→ Expr s (lookup nm s p)
data RA : Schema → Set where
Read : ∀ {s} → Handle s → RA s
Union : ∀ {s} → RA s → RA s → RA s
Diff : ∀ {s} → RA s → RA s → RA s
Product : ∀ {s s'} → {_ : So (disjoint s s')} → RA s → RA s'
→ RA (append s s')
Project : ∀ {s} → (s' : Schema) → {_ : So (sub s' s)} → RA s → RA s'
Select : ∀ {s} → Expr s BOOL → RA s → RA s
-- ...
{-
As we mentioned previously, we have taken a very minimal set of relational
algebra operators. It should be fairly straightforward to add operators
for the many other operators in relational algebra, such as the
natural join, θ-join, equijoin, renaming, or division,
using the same techniques. Alternatively, you can define many of these
operations in terms of the operations we have implemented in the RA data type.
-}
-- We could:
postulate
toSQL : ∀ {s} → RA s → String
-- We can do much better:
postulate
query : {s : Schema} → RA s → IO (List (Row s))
{-
The *query* function uses *toSQL* to produce a query, and passes this to the
database server. When the server replies, however, we know exactly how to
parse the response: we know the schema of the table resulting from our query,
and can use this to parse the database server's response in a type-safe
manner. The type checker can then statically check that the program uses the
returned list in a way consistent with its type.
-}
Cars : Schema
Cars = mkSchema (("Model" , VEC CHAR 20) ∷ ("Time" , VEC CHAR 6)
∷ ("Wet" , BOOL) ∷ [])
zonda : Row Cars
zonda = ConsRow (toVec "Pagani Zonda C12 F ")
(ConsRow (toVec "1:18.4")
(ConsRow false EmptyRow))
Models : Schema
Models = mkSchema (("Model" , VEC CHAR 20) ∷ [])
models : Handle Cars → RA Models
models h = Project Models (Read h)
wet : Handle Cars → RA Models
wet h = Project Models (Select (Cars ! "Wet") (Read h))
{- Discussion
==========
There are many, many aspects of this proposal that can be improved. Some
attributes of a schema contain *NULL*-values; we should close our universe
under *Maybe* accordingly. Some database servers silently truncate strings
longer than 255 characters. We would do well to ensure statically that this
never happens. Our goal, however, was not to provide a complete model of all
of SQL's quirks and idiosyncrasies: we want to show how a language with
dependent types can schine where Haskell struggles.
Our choice of *Schema* data type suffers from the usual disadvantages of
using a list to represent a set: our *Schema* data type may contain
duplicates and the order of the elements matters. The first problem is easy
to solve. Using an implicit proof argument in the *Cons* case, we can define
a data type for lists that do not contain duplicates. The type of *Cons* then
becomes:
Cons : (nm : String) → (u : U) → (s : Schema) → {_ : So (not (elem nm s))}
→ Schema
The second point is a bit trickier. The real solution would involve quotient
types to make the order of the elements unobservable. As Agda does not
support quotient types, however, the best we can do is parameterise our
constructors by an additional proof argument, when necessary. For example,
the *Union* constructor could be defined as follows:
Union : ∀ {s s'} → {_ : So (permute s s')} → RA s → RA s' → RA s
Instead of requiring that both arguments of *Union* are indexed by the same
schema, we should only require that the two schemas are equal up to a
permutation of the elements. Alternatively, we could represent the *Schema*
using a data structure that fixes the order in which its constituent
elements occur, such as a trie or sorted list.
Finally, we would like to return to our example table. We chose to model
the lap time as a fixed-length string ─ clearly, a triple of integers would
be a better representation. Unfortunately, most database servers only
support a handful of built-in types, such as strings, numbers, bits. There
is no way to extend these primitive types. This problem is sometimes
referred to as the *object-relational impedance mismatch*. We believe the
generic programming techniques and views from the previous sections can be
used to marshall data between a low-level representation in the database
and the high-level representation in our programming language.
-}
|
module Bautzen.ZoC
import Bautzen.GameUnit
import Bautzen.Terrain
import public Data.List
import public Data.List.Elem
import public Decidable.Equality
import Data.Nat
import Data.Vect
-- section 3
-- zones of control
public export
data ZoC : Type where
InZoC : (side : Side) -> ZoC
Free : ZoC
public export
Eq ZoC where
Free == Free = True
(InZoC s) == (InZoC s') = s == s'
_ == _ = False
||| Test if given position for given `side` is in the ZoC of the unit.
public export
inZocOf : (pos : Pos) -> (side : Side) -> (GameUnit, Pos) -> Bool
inZocOf pos curSide (unit, location) =
if curSide == side (nation unit)
then False
else case pos `isElem` neighbours location of
(Yes prf) => True
(No contra) => False
||| Is the given `Pos`ition in an enemy ZoC?
||| This assumes the current `side` is playing and checking ZoCs
public export
inZoC : Side -> List (GameUnit, Pos) -> Pos -> ZoC
inZoC curSide units pos =
case find (inZocOf pos curSide) units of
Nothing => Free
(Just (unit, _)) => InZoC (side $ nation unit)
-- ZoC tests
-- inZoCTrue : inZocOf (hex 3 3) Axis (Bautzen.GameUnit.p13_5dp, hex 3 4) = True
-- inZoCTrue = Refl
-- inZoCTrue2 : inZocOf (hex 4 3) Axis (Bautzen.GameUnit.p13_5dp, hex 3 4) = False
-- inZoCTrue2 = Refl
inZoCFalsePolish : inZocOf (hex 3 3) Allies (Bautzen.GameUnit.p13_5dp, hex 3 4) = False
inZoCFalsePolish = Refl
|
REBOL [
System: "REBOL [R3] Language Interpreter and Run-time Environment"
Title: "REBOL 3 Mezzanine: Function Helpers"
Rights: {
Copyright 2012 REBOL Technologies
REBOL is a trademark of REBOL Technologies
}
License: {
Licensed under the Apache License, Version 2.0
See: http://www.apache.org/licenses/LICENSE-2.0
}
]
clos: func [
{Defines a closure function.}
spec [block!] {Help string (opt) followed by arg words (and opt type and string)}
body [block!] {The body block of the function}
][
make closure! copy/deep reduce [spec body]
]
closure: func [
{Defines a closure function with all set-words as locals.}
spec [block!] {Help string (opt) followed by arg words (and opt type and string)}
body [block!] {The body block of the function}
/with {Define or use a persistent object (self)}
object [object! block! map!] {The object or spec}
/extern words [block!] {These words are not local}
][
; Copy the spec and add /local to the end if not found
unless find spec: copy/deep spec /local [append spec [
/local ; In a block so the generated source gets the newlines
]]
; Make a full copy of the body, to allow reuse of the original
body: copy/deep body
; Collect all set-words in the body as words to be used as locals, and add
; them to the spec. Don't include the words already in the spec or object.
append spec collect-words/deep/set/ignore body either with [
; Make our own local object if a premade one is not provided
unless object? object [object: make object! object]
bind body object ; Bind any object words found in the body
; Ignore the words in the spec and those in the object. The spec needs
; to be copied since the object words shouldn't be added to the locals.
append append append copy spec 'self words-of object words ; ignore 'self too
][
; Don't include the words in the spec, or any extern words.
either extern [append copy spec words] [spec]
]
make closure! reduce [spec body]
]
has: func [
{A shortcut to define a function that has local variables but no arguments.}
vars [block!] {List of words that are local to the function}
body [block!] {The body block of the function}
][
make function! reduce [head insert copy/deep vars /local copy/deep body]
]
context: func [
{Defines a unique object.}
blk [block!] {Object words and values (modified)}
][
make object! blk
]
map: func [
{Make a map value (hashed associative block).}
val
][
make map! :val
]
task: func [
{Creates a task.}
spec [block!] {Name or spec block}
body [block!] {The body block of the task}
][
make task! copy/deep reduce [spec body]
]
|
import numpy as np
import pandas as pd
import tensorflow as tf
dataset = pd.read_csv('forestfires.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
X[:, 2] = le.fit_transform(X[:, 2])
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state = 0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
ann = tf.keras.models.Sequential()
ann.add(tf.keras.layers.Dense(units=6, activation='relu'))
ann.add(tf.keras.layers.Dense(units=6, activation='relu'))
ann.add(tf.keras.layers.Dense(units=1))
ann.compile(optimizer = 'adam', loss = 'mean_squared_error')
ann.fit(X_train,y_train, batch_size = 64, epochs = 1000)
y_pred = ann.predict(X_test)
np.set_printoptions(precision=2)
print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)), 1))
|
||| Magic/Virtual files
module System.File.Virtual
import System.File.Support
import public System.File.Types
%default total
%foreign support "idris2_stdin"
"node:lambda:x=>({fd:0, buffer: Buffer.alloc(0), name:'<stdin>', eof: false})"
prim__stdin : FilePtr
%foreign support "idris2_stdout"
"node:lambda:x=>({fd:1, buffer: Buffer.alloc(0), name:'<stdout>', eof: false})"
prim__stdout : FilePtr
%foreign support "idris2_stderr"
"node:lambda:x=>({fd:2, buffer: Buffer.alloc(0), name:'<stderr>', eof: false})"
prim__stderr : FilePtr
||| The standard input.
export
stdin : File
stdin = FHandle prim__stdin
||| The standard output.
export
stdout : File
stdout = FHandle prim__stdout
||| The standard error.
export
stderr : File
stderr = FHandle prim__stderr
|
\subsection{Data Preprocessing}
\label{sec:data-preprocessing}
\subsubsection{Questionnaire}
\label{sec:data-prep-questionnaire}
In total, we received 67 completed questionnaires and 45 of them were provided with GitHub profile. The data file was further cleaned and reformatted with the use of Python \footnote{Python Website: \url{https://www.python.org/}} script. Both the original and formatted \code{csv} files can be found in the project source under the name ,,\code{questionnaire.csv}'' and ,,\code{cleaned\_data.csv}'' as well as the Python script used for data formatting which can be found in ,,\code{py\_scripts}'' directory under the name ,,\code{questionnaire\_adjuster.py}''. To run the script, one must have Python 3.9 or newer installed on the machine along with ,,Numpy'' \footnote{Numpy PyPI Page - \url{https://pypi.org/project/numpy/}}, ,,Pandas'' \footnote{Pandas PyPI Page - \url{https://pypi.org/project/pandas/}} and ,,Requests'' \footnote{Requests PyPI Page - \url{https://pypi.org/project/requests/}} packages.
Delving into the details, starting with the most important - 7 answers were manually modified to standardize the values into one datatype (\code{int}), as they were given in the questionnaire as strings, so that people are not constraint to the few selected numerical values and - unfortunately - there was no other way to have an input field that allows only for \code{UnlimitedNatural} numbers. Majority of these answers were given as ranges (ex.: $10 - 20$, \emph{a dozen}, $30-40$) - these were modified to be the average of the given range. The "self-doubtable" and the ,,not exactly sure'' answers like $60?$, $4/5$ and $20+$, were changed to the lowest value in the suggested range.
Besides that - all available checkbox answers (\emph{Soft Skills} \& \emph{Pre-work Experience}) were transformed so that each of them has their own respective column with \code{Boolean} answers whether it was checked or not.
Other changes are technical and linguistic so that the data set could be reused with comprehension without the knowledge of Polish language:
\begin{itemize}
\item Unicode Characters (like \emph{Emojis}) were removed from the column names, and the column names themselves are now English words
\item Every single column had their data properly adjusted so they could be interpreted with their suitable data type
\item Similar columns from different sections of the questionnaire which were the result of answers to similar but rephrased questions merged
\item Answers which were given as strings like ,,\code{3,5 year}'' were transformed into \code{integers} in units of months
\end{itemize}
\subsubsection{Repositories}
\label{sec:data-prep-repositories}
One of the most difficult tasks is to properly obtain information from the repositories of a given user. For research purposes, we decided to track them for potential errors and warnings which can be identified via linters. Linter is a static code analysis tool used to flag any bugs, errors, stylistic warnings, suspicious constructs, redundant code, and more depending on the language and/or tool. Of course, the user could have repositories with code written in any language that exists, and that is a real problem, which we mitigated by a linter-aggregating tool - \emph{Mega Linter} \footnote{Mega Linter GitHub Page - \url{https://github.com/nvuillam/mega-linter}}. Mega Linter is an open-source tool that simply detects the languages used in a given project and then uses all available linters to scan it through. After the scan, it prints out a summary table with the number of Files that were detected and scanned with a given linter, the number of fixed files automatically during the run time, and the number of errors that couldn't be automatically fixed. There's also a second table that's printed somewhere in the first half of the output log that has information about detected duplicate lines and tokens in the project. To obtain that data we redirected the output stream into a file and then parsed it with Python script ,,\code{scrape.py}'' which can be found in \emph{,,py\_scripts''} folder.
The usage of the Mega Linter and scrape scripts is more widely described in the main \code{README.md} file although, in short, one must have Docker \footnote{Docker Website - https://www.docker.com/} and Python installed. Then, simply navigate to the repository which you would like to lint and run a command \textit{[ref: \ref{lst:shell-command-run-linter}]} which will generate an \code{output.txt} file.
\begin{lstlisting}[language=BashOwn, label={lst:shell-command-run-linter}, caption={"Running
\emph{Mega Linter}"}]
npx mega-linter-runner --flavor all
-e 'ENABLE=,DOCKERFILE,MARKDOWN,YAML'
-e 'SHOW_ELAPSED_TIME=true'
> output.txt
\end{lstlisting}
Copy the file and paste it in the \emph{scrape.py} directory. Finally use shell scrape command \textit{[ref: \ref{lst:shell-command-scrape-py}]} command which will generate an \code{output.json} with a list of dictionaries structured as in output example given below \textit{[ref: \ref{lst:scrape-py-output-example}]}.
\begin{lstlisting}[language=BashOwn, label={lst:shell-command-scrape-py}, caption={"Launching \code{scraper.py}"}]
python scraper.py -f output.txt
\end{lstlisting}
\begin{lstlisting}[language=PythonOwn, label={lst:scrape-py-output-example}, caption={"Parsed Linter Output in \code{.json} format"}]
{
"language": str, # detected language
"linter": str, # checked via linter (name)
"files": int | str, # detected files in given language
"fixed": int, # fixed errors automatically
"errors": int # errors that could not be fixed
},
# or
{
"language": str, # detected language
"files": int, # in a given language
"lines": int, # in a given language
"tokens": int, # ("chars") in a given language
"clones": int,
"duplicate_lines_num": int,
"duplicate_lines_percent": float,
"duplicate_tokens_num": int,
"duplicate_tokens_percent": float
},
\end{lstlisting}
|
/-
Copyright (c) 2018 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison, Johannes Hölzl, Reid Barton, Sean Leather
Bundled type and structure.
-/
import category_theory.functor
import category_theory.types
universes u v
namespace category_theory
variables {c d : Type u → Type v} {α : Type u}
/--
`concrete_category @hom` collects the evidence that a type constructor `c` and a
morphism predicate `hom` can be thought of as a concrete category.
In a typical example, `c` is the type class `topological_space` and `hom` is
`continuous`.
-/
structure concrete_category (hom : out_param $ ∀ {α β}, c α → c β → (α → β) → Prop) :=
(hom_id : ∀ {α} (ia : c α), hom ia ia id)
(hom_comp : ∀ {α β γ} (ia : c α) (ib : c β) (ic : c γ) {f g}, hom ia ib f → hom ib ic g → hom ia ic (g ∘ f))
attribute [class] concrete_category
/-- `bundled` is a type bundled with a type class instance for that type. Only
the type class is exposed as a parameter. -/
structure bundled (c : Type u → Type v) : Type (max (u+1) v) :=
(α : Type u)
(str : c α)
def mk_ob {c : Type u → Type v} (α : Type u) [str : c α] : bundled c := ⟨α, str⟩
namespace bundled
instance : has_coe_to_sort (bundled c) :=
{ S := Type u, coe := bundled.α }
/-- Map over the bundled structure -/
def map (f : ∀ {α}, c α → d α) (b : bundled c) : bundled d :=
⟨b.α, f b.str⟩
section concrete_category
variables (hom : ∀ {α β : Type u}, c α → c β → (α → β) → Prop)
variables [h : concrete_category @hom]
include h
instance : category (bundled c) :=
{ hom := λ a b, subtype (hom a.2 b.2),
id := λ a, ⟨@id a.1, h.hom_id a.2⟩,
comp := λ a b c f g, ⟨g.1 ∘ f.1, h.hom_comp a.2 b.2 c.2 f.2 g.2⟩ }
variables {X Y Z : bundled c}
@[simp] lemma concrete_category_id (X : bundled c) : subtype.val (𝟙 X) = id :=
rfl
@[simp] lemma concrete_category_comp (f : X ⟶ Y) (g : Y ⟶ Z) :
subtype.val (f ≫ g) = g.val ∘ f.val :=
rfl
instance : has_coe_to_fun (X ⟶ Y) :=
{ F := λ f, X → Y,
coe := λ f, f.1 }
@[simp] lemma bundled_hom_coe {X Y : bundled c} (val : X → Y) (prop) (x : X) :
(⟨val, prop⟩ : X ⟶ Y) x = val x := rfl
end concrete_category
end bundled
def concrete_functor
{C : Type u → Type v} {hC : ∀{α β}, C α → C β → (α → β) → Prop} [concrete_category @hC]
{D : Type u → Type v} {hD : ∀{α β}, D α → D β → (α → β) → Prop} [concrete_category @hD]
(m : ∀{α}, C α → D α) (h : ∀{α β} {ia : C α} {ib : C β} {f}, hC ia ib f → hD (m ia) (m ib) f) :
bundled C ⥤ bundled D :=
{ obj := bundled.map @m,
map := λ X Y f, ⟨ f, h f.2 ⟩}
section forget
variables {C : Type u → Type v} {hom : ∀α β, C α → C β → (α → β) → Prop} [i : concrete_category hom]
include i
/-- The forgetful functor from a bundled category to `Type`. -/
def forget : bundled C ⥤ Type u := { obj := bundled.α, map := λa b h, h.1 }
instance forget.faithful : faithful (forget : bundled C ⥤ Type u) := {}
end forget
end category_theory
|
import tactic
universes u
variables {α : Sort u} (r : α → α → Prop)
local infix ` ≺ `:50 := r
def infinite_descending_chain (c : ℕ → α) : Prop := ∀ i, c (i + 1) ≺ c i
variables {r}
lemma non_acc_iff {x : α} : ¬acc (≺) x ↔ ∃ y ≺ x, ¬acc (≺) y :=
⟨by { contrapose, simp, exact acc.intro x }, by { contrapose, simp, rintros ⟨h⟩, assumption }⟩
noncomputable def descending_chain (z : α) : ℕ → α
| 0 := z
| (i + 1) := @classical.epsilon α ⟨z⟩ (λ y, y ≺ descending_chain i ∧ ¬acc (≺) y)
@[simp] lemma descending_chain_zero (z : α) : descending_chain (≺) z 0 = z := rfl
lemma infinite_descending_chain_of_non_acc (z : α) (hz : ¬acc (≺) z) : infinite_descending_chain (≺) (descending_chain (≺) z) :=
begin
haveI : nonempty α, from ⟨z⟩,
have : ∀ n, (n ≠ 0 → descending_chain (≺) z n ≺ descending_chain (≺) z n.pred) ∧ ¬acc (≺) (descending_chain (≺) z n),
{ intros n, induction n with n IH,
{ simp, exact hz },
{ simp[descending_chain],
have : ∃ y, y ≺ (descending_chain r z n) ∧ ¬acc (≺) y,
{ rcases (non_acc_iff (≺)).mp IH.2 with ⟨y, hy, hay⟩, exact ⟨y, hy, hay⟩ },
exact classical.epsilon_spec_aux ⟨z⟩ _ this } },
intros i, simpa using (this (i + 1)).1,
end
theorem has_infinite_descending_chain_of_non_well_founded (h : ¬well_founded (≺)) : ∃ c, infinite_descending_chain (≺) c :=
begin
have : ∃ z, ¬acc (≺) z,
{ suffices : ¬∀ z, acc (≺) z, exact not_forall.mp this,
intros h, have : well_founded r, from ⟨h⟩, contradiction },
rcases this with ⟨z, hz⟩,
refine ⟨descending_chain (≺) z, infinite_descending_chain_of_non_acc r z hz⟩
end |
using DiffEqDevTools
using Test
# write your own tests here
@time @testset "Benchmark Tests" begin include("benchmark_tests.jl") end
@time @testset "ODE AppxTrue Tests" begin include("ode_appxtrue_tests.jl") end
@time @testset "Analyticless Convergence Tests" begin include("analyticless_convergence_tests.jl") end
@time @testset "ODE Tableau Convergence Tests" begin include("ode_tableau_convergence_tests.jl") end ## Windows 32-bit fails on Butcher62 convergence test
@time @testset "Analyticless Stochastic WP" begin include("analyticless_stochastic_wp.jl") end
@time @testset "Stability Region Tests" begin include("stability_region_test.jl") end
|
import numpy as np
from keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout, Bidirectional
from keras.models import Model, Input
from sklearn.model_selection import train_test_split
class BiLSTMNER(object):
def __init__(self, n_words: int, max_len: int, n_tags: int, batch_size: int = 512, nbepochs: int = 5):
self.model = None
self.n_words = n_words
self.n_tags = n_tags
self.max_len = max_len
self.nbepochs = nbepochs
self.batch_size = batch_size
self.build_model()
def build_model(self):
input = Input(shape=(self.max_len,))
model = Embedding(input_dim=self.n_words, output_dim=50, input_length=self.max_len)(input)
model = Dropout(0.1)(model)
model = Bidirectional(LSTM(units=100, return_sequences=True, recurrent_dropout=0.1))(model)
out = TimeDistributed(Dense(self.n_tags, activation="softmax"))(model) # softmax output layer
model = Model(input, out)
self.model = model
def train(self, X, y):
X_tr, X_te, y_tr, y_te = train_test_split(X, y, test_size=0.1)
self.model.compile(optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"])
history = self.model.fit(np.array(X_tr), np.array(y_tr), batch_size=self.batch_size, epochs=self.nbepochs,
validation_data=(np.array(X_te), np.array(y_te)), verbose=1)
return history
def predict(self, x):
return self.model.predict(x)
|
module Util
import public Effects
import public Effect.Random
import public Data.Vect
import public Data.Matrix
import public Data.Matrix.Numeric
%access public export
infixl 5 <+ -- add a vector to each row of a matrix
||| Maximum size of the integer distribution
RAND_MAX : Integer
RAND_MAX = 32767
||| Generate a random Double in the range of [0.0, 1.0]
rndUnitDouble : Eff Double [RND]
rndUnitDouble =
pure $ (fromInteger !(rndInt 0 RAND_MAX)) / fromInteger RAND_MAX
||| Generate a randome Double in the range of [min, max]
rndDouble : (min : Double) -> (max : Double) -> Eff Double [RND]
rndDouble min max =
pure $ (min + (max - min) * !rndUnitDouble)
||| Generate a random vector of length n of Doubles in the range of [min, max]
rndVect : (n : Nat) ->
(min: Double) ->
(max: Double) ->
Eff (Vect n Double) [RND]
rndVect Z _ _ = pure []
rndVect (S k) min max = pure (!(rndDouble min max) :: !(rndVect k min max))
||| Generate an n x m matrix of random Doubles in the range of [min, max]
rndMat : (n : Nat) ->
(m : Nat) ->
(min : Double) ->
(max: Double) ->
Eff (Matrix n m Double) [RND]
rndMat Z _ _ _ = pure []
rndMat (S k) m min max = pure (!(rndVect m min max) :: !(rndMat k m min max))
||| Generate a vector of length n of Doubles initialized to 0.0
zeroVect : (n : Nat) -> Vect n Double
zeroVect Z = []
zeroVect (S k) = 0.0 :: zeroVect k
||| Generate an n x m matrix of random Doubles initialized to 0.0
zeroMat : (n : Nat) -> (m : Nat) -> Matrix n m Double
zeroMat Z _ = []
zeroMat (S k) m = zeroVect m :: zeroMat k m
||| Add a vector to each row of a matrix
(<+) : Matrix n m Double -> Vect m Double -> Matrix n m Double
(<+) [] _ = []
(<+) (x :: xs) v = (x + v) :: (xs <+ v)
|
module Ix where
_-:>_ : {I : Set} -> (I -> Set) -> (I -> Set) -> (I -> Set)
(S -:> T) i = S i -> T i
infixr 4 _-:>_
[_] : {I : Set} -> (I -> Set) -> Set
[ P ] = forall i -> P i
Algebra : {I : Set}(F : (I -> Set) -> (I -> Set)) -> (I -> Set) -> Set
Algebra F X = [ F X -:> X ]
|
{-# OPTIONS --type-in-type #-}
Ty : Set
Ty =
(Ty : Set)
(nat top bot : Ty)
(arr prod sum : Ty → Ty → Ty)
→ Ty
nat : Ty; nat = λ _ nat _ _ _ _ _ → nat
top : Ty; top = λ _ _ top _ _ _ _ → top
bot : Ty; bot = λ _ _ _ bot _ _ _ → bot
arr : Ty → Ty → Ty; arr
= λ A B Ty nat top bot arr prod sum →
arr (A Ty nat top bot arr prod sum) (B Ty nat top bot arr prod sum)
prod : Ty → Ty → Ty; prod
= λ A B Ty nat top bot arr prod sum →
prod (A Ty nat top bot arr prod sum) (B Ty nat top bot arr prod sum)
sum : Ty → Ty → Ty; sum
= λ A B Ty nat top bot arr prod sum →
sum (A Ty nat top bot arr prod sum) (B Ty nat top bot arr prod sum)
Con : Set; Con
= (Con : Set)
(nil : Con)
(snoc : Con → Ty → Con)
→ Con
nil : Con; nil
= λ Con nil snoc → nil
snoc : Con → Ty → Con; snoc
= λ Γ A Con nil snoc → snoc (Γ Con nil snoc) A
Var : Con → Ty → Set; Var
= λ Γ A →
(Var : Con → Ty → Set)
(vz : ∀{Γ A} → Var (snoc Γ A) A)
(vs : ∀{Γ B A} → Var Γ A → Var (snoc Γ B) A)
→ Var Γ A
vz : ∀{Γ A} → Var (snoc Γ A) A; vz
= λ Var vz vs → vz
vs : ∀{Γ B A} → Var Γ A → Var (snoc Γ B) A; vs
= λ x Var vz vs → vs (x Var vz vs)
Tm : Con → Ty → Set; Tm
= λ Γ A →
(Tm : Con → Ty → Set)
(var : ∀{Γ A} → Var Γ A → Tm Γ A)
(lam : ∀{Γ A B} → Tm (snoc Γ A) B → Tm Γ (arr A B))
(app : ∀{Γ A B} → Tm Γ (arr A B) → Tm Γ A → Tm Γ B)
(tt : ∀{Γ} → Tm Γ top)
(pair : ∀{Γ A B} → Tm Γ A → Tm Γ B → Tm Γ (prod A B))
(fst : ∀{Γ A B} → Tm Γ (prod A B) → Tm Γ A)
(snd : ∀{Γ A B} → Tm Γ (prod A B) → Tm Γ B)
(left : ∀{Γ A B} → Tm Γ A → Tm Γ (sum A B))
(right : ∀{Γ A B} → Tm Γ B → Tm Γ (sum A B))
(case : ∀{Γ A B C} → Tm Γ (sum A B) → Tm Γ (arr A C) → Tm Γ (arr B C) → Tm Γ C)
(zero : ∀{Γ} → Tm Γ nat)
(suc : ∀{Γ} → Tm Γ nat → Tm Γ nat)
(rec : ∀{Γ A} → Tm Γ nat → Tm Γ (arr nat (arr A A)) → Tm Γ A → Tm Γ A)
→ Tm Γ A
var : ∀{Γ A} → Var Γ A → Tm Γ A; var
= λ x Tm var lam app tt pair fst snd left right case zero suc rec →
var x
lam : ∀{Γ A B} → Tm (snoc Γ A) B → Tm Γ (arr A B); lam
= λ t Tm var lam app tt pair fst snd left right case zero suc rec →
lam (t Tm var lam app tt pair fst snd left right case zero suc rec)
app : ∀{Γ A B} → Tm Γ (arr A B) → Tm Γ A → Tm Γ B; app
= λ t u Tm var lam app tt pair fst snd left right case zero suc rec →
app (t Tm var lam app tt pair fst snd left right case zero suc rec)
(u Tm var lam app tt pair fst snd left right case zero suc rec)
tt : ∀{Γ} → Tm Γ top; tt
= λ Tm var lam app tt pair fst snd left right case zero suc rec → tt
pair : ∀{Γ A B} → Tm Γ A → Tm Γ B → Tm Γ (prod A B); pair
= λ t u Tm var lam app tt pair fst snd left right case zero suc rec →
pair (t Tm var lam app tt pair fst snd left right case zero suc rec)
(u Tm var lam app tt pair fst snd left right case zero suc rec)
fst : ∀{Γ A B} → Tm Γ (prod A B) → Tm Γ A; fst
= λ t Tm var lam app tt pair fst snd left right case zero suc rec →
fst (t Tm var lam app tt pair fst snd left right case zero suc rec)
snd : ∀{Γ A B} → Tm Γ (prod A B) → Tm Γ B; snd
= λ t Tm var lam app tt pair fst snd left right case zero suc rec →
snd (t Tm var lam app tt pair fst snd left right case zero suc rec)
left : ∀{Γ A B} → Tm Γ A → Tm Γ (sum A B); left
= λ t Tm var lam app tt pair fst snd left right case zero suc rec →
left (t Tm var lam app tt pair fst snd left right case zero suc rec)
right : ∀{Γ A B} → Tm Γ B → Tm Γ (sum A B); right
= λ t Tm var lam app tt pair fst snd left right case zero suc rec →
right (t Tm var lam app tt pair fst snd left right case zero suc rec)
case : ∀{Γ A B C} → Tm Γ (sum A B) → Tm Γ (arr A C) → Tm Γ (arr B C) → Tm Γ C; case
= λ t u v Tm var lam app tt pair fst snd left right case zero suc rec →
case (t Tm var lam app tt pair fst snd left right case zero suc rec)
(u Tm var lam app tt pair fst snd left right case zero suc rec)
(v Tm var lam app tt pair fst snd left right case zero suc rec)
zero : ∀{Γ} → Tm Γ nat; zero
= λ Tm var lam app tt pair fst snd left right case zero suc rec → zero
suc : ∀{Γ} → Tm Γ nat → Tm Γ nat; suc
= λ t Tm var lam app tt pair fst snd left right case zero suc rec →
suc (t Tm var lam app tt pair fst snd left right case zero suc rec)
rec : ∀{Γ A} → Tm Γ nat → Tm Γ (arr nat (arr A A)) → Tm Γ A → Tm Γ A; rec
= λ t u v Tm var lam app tt pair fst snd left right case zero suc rec →
rec (t Tm var lam app tt pair fst snd left right case zero suc rec)
(u Tm var lam app tt pair fst snd left right case zero suc rec)
(v Tm var lam app tt pair fst snd left right case zero suc rec)
v0 : ∀{Γ A} → Tm (snoc Γ A) A; v0
= var vz
v1 : ∀{Γ A B} → Tm (snoc (snoc Γ A) B) A; v1
= var (vs vz)
v2 : ∀{Γ A B C} → Tm (snoc (snoc (snoc Γ A) B) C) A; v2
= var (vs (vs vz))
v3 : ∀{Γ A B C D} → Tm (snoc (snoc (snoc (snoc Γ A) B) C) D) A; v3
= var (vs (vs (vs vz)))
tbool : Ty; tbool
= sum top top
true : ∀{Γ} → Tm Γ tbool; true
= left tt
tfalse : ∀{Γ} → Tm Γ tbool; tfalse
= right tt
ifthenelse : ∀{Γ A} → Tm Γ (arr tbool (arr A (arr A A))); ifthenelse
= lam (lam (lam (case v2 (lam v2) (lam v1))))
times4 : ∀{Γ A} → Tm Γ (arr (arr A A) (arr A A)); times4
= lam (lam (app v1 (app v1 (app v1 (app v1 v0)))))
add : ∀{Γ} → Tm Γ (arr nat (arr nat nat)); add
= lam (rec v0
(lam (lam (lam (suc (app v1 v0)))))
(lam v0))
mul : ∀{Γ} → Tm Γ (arr nat (arr nat nat)); mul
= lam (rec v0
(lam (lam (lam (app (app add (app v1 v0)) v0))))
(lam zero))
fact : ∀{Γ} → Tm Γ (arr nat nat); fact
= lam (rec v0 (lam (lam (app (app mul (suc v1)) v0)))
(suc zero))
{-# OPTIONS --type-in-type #-}
Ty1 : Set
Ty1 =
(Ty1 : Set)
(nat top bot : Ty1)
(arr prod sum : Ty1 → Ty1 → Ty1)
→ Ty1
nat1 : Ty1; nat1 = λ _ nat1 _ _ _ _ _ → nat1
top1 : Ty1; top1 = λ _ _ top1 _ _ _ _ → top1
bot1 : Ty1; bot1 = λ _ _ _ bot1 _ _ _ → bot1
arr1 : Ty1 → Ty1 → Ty1; arr1
= λ A B Ty1 nat1 top1 bot1 arr1 prod sum →
arr1 (A Ty1 nat1 top1 bot1 arr1 prod sum) (B Ty1 nat1 top1 bot1 arr1 prod sum)
prod1 : Ty1 → Ty1 → Ty1; prod1
= λ A B Ty1 nat1 top1 bot1 arr1 prod1 sum →
prod1 (A Ty1 nat1 top1 bot1 arr1 prod1 sum) (B Ty1 nat1 top1 bot1 arr1 prod1 sum)
sum1 : Ty1 → Ty1 → Ty1; sum1
= λ A B Ty1 nat1 top1 bot1 arr1 prod1 sum1 →
sum1 (A Ty1 nat1 top1 bot1 arr1 prod1 sum1) (B Ty1 nat1 top1 bot1 arr1 prod1 sum1)
Con1 : Set; Con1
= (Con1 : Set)
(nil : Con1)
(snoc : Con1 → Ty1 → Con1)
→ Con1
nil1 : Con1; nil1
= λ Con1 nil1 snoc → nil1
snoc1 : Con1 → Ty1 → Con1; snoc1
= λ Γ A Con1 nil1 snoc1 → snoc1 (Γ Con1 nil1 snoc1) A
Var1 : Con1 → Ty1 → Set; Var1
= λ Γ A →
(Var1 : Con1 → Ty1 → Set)
(vz : ∀{Γ A} → Var1 (snoc1 Γ A) A)
(vs : ∀{Γ B A} → Var1 Γ A → Var1 (snoc1 Γ B) A)
→ Var1 Γ A
vz1 : ∀{Γ A} → Var1 (snoc1 Γ A) A; vz1
= λ Var1 vz1 vs → vz1
vs1 : ∀{Γ B A} → Var1 Γ A → Var1 (snoc1 Γ B) A; vs1
= λ x Var1 vz1 vs1 → vs1 (x Var1 vz1 vs1)
Tm1 : Con1 → Ty1 → Set; Tm1
= λ Γ A →
(Tm1 : Con1 → Ty1 → Set)
(var : ∀{Γ A} → Var1 Γ A → Tm1 Γ A)
(lam : ∀{Γ A B} → Tm1 (snoc1 Γ A) B → Tm1 Γ (arr1 A B))
(app : ∀{Γ A B} → Tm1 Γ (arr1 A B) → Tm1 Γ A → Tm1 Γ B)
(tt : ∀{Γ} → Tm1 Γ top1)
(pair : ∀{Γ A B} → Tm1 Γ A → Tm1 Γ B → Tm1 Γ (prod1 A B))
(fst : ∀{Γ A B} → Tm1 Γ (prod1 A B) → Tm1 Γ A)
(snd : ∀{Γ A B} → Tm1 Γ (prod1 A B) → Tm1 Γ B)
(left : ∀{Γ A B} → Tm1 Γ A → Tm1 Γ (sum1 A B))
(right : ∀{Γ A B} → Tm1 Γ B → Tm1 Γ (sum1 A B))
(case : ∀{Γ A B C} → Tm1 Γ (sum1 A B) → Tm1 Γ (arr1 A C) → Tm1 Γ (arr1 B C) → Tm1 Γ C)
(zero : ∀{Γ} → Tm1 Γ nat1)
(suc : ∀{Γ} → Tm1 Γ nat1 → Tm1 Γ nat1)
(rec : ∀{Γ A} → Tm1 Γ nat1 → Tm1 Γ (arr1 nat1 (arr1 A A)) → Tm1 Γ A → Tm1 Γ A)
→ Tm1 Γ A
var1 : ∀{Γ A} → Var1 Γ A → Tm1 Γ A; var1
= λ x Tm1 var1 lam app tt pair fst snd left right case zero suc rec →
var1 x
lam1 : ∀{Γ A B} → Tm1 (snoc1 Γ A) B → Tm1 Γ (arr1 A B); lam1
= λ t Tm1 var1 lam1 app tt pair fst snd left right case zero suc rec →
lam1 (t Tm1 var1 lam1 app tt pair fst snd left right case zero suc rec)
app1 : ∀{Γ A B} → Tm1 Γ (arr1 A B) → Tm1 Γ A → Tm1 Γ B; app1
= λ t u Tm1 var1 lam1 app1 tt pair fst snd left right case zero suc rec →
app1 (t Tm1 var1 lam1 app1 tt pair fst snd left right case zero suc rec)
(u Tm1 var1 lam1 app1 tt pair fst snd left right case zero suc rec)
tt1 : ∀{Γ} → Tm1 Γ top1; tt1
= λ Tm1 var1 lam1 app1 tt1 pair fst snd left right case zero suc rec → tt1
pair1 : ∀{Γ A B} → Tm1 Γ A → Tm1 Γ B → Tm1 Γ (prod1 A B); pair1
= λ t u Tm1 var1 lam1 app1 tt1 pair1 fst snd left right case zero suc rec →
pair1 (t Tm1 var1 lam1 app1 tt1 pair1 fst snd left right case zero suc rec)
(u Tm1 var1 lam1 app1 tt1 pair1 fst snd left right case zero suc rec)
fst1 : ∀{Γ A B} → Tm1 Γ (prod1 A B) → Tm1 Γ A; fst1
= λ t Tm1 var1 lam1 app1 tt1 pair1 fst1 snd left right case zero suc rec →
fst1 (t Tm1 var1 lam1 app1 tt1 pair1 fst1 snd left right case zero suc rec)
snd1 : ∀{Γ A B} → Tm1 Γ (prod1 A B) → Tm1 Γ B; snd1
= λ t Tm1 var1 lam1 app1 tt1 pair1 fst1 snd1 left right case zero suc rec →
snd1 (t Tm1 var1 lam1 app1 tt1 pair1 fst1 snd1 left right case zero suc rec)
left1 : ∀{Γ A B} → Tm1 Γ A → Tm1 Γ (sum1 A B); left1
= λ t Tm1 var1 lam1 app1 tt1 pair1 fst1 snd1 left1 right case zero suc rec →
left1 (t Tm1 var1 lam1 app1 tt1 pair1 fst1 snd1 left1 right case zero suc rec)
right1 : ∀{Γ A B} → Tm1 Γ B → Tm1 Γ (sum1 A B); right1
= λ t Tm1 var1 lam1 app1 tt1 pair1 fst1 snd1 left1 right1 case zero suc rec →
right1 (t Tm1 var1 lam1 app1 tt1 pair1 fst1 snd1 left1 right1 case zero suc rec)
case1 : ∀{Γ A B C} → Tm1 Γ (sum1 A B) → Tm1 Γ (arr1 A C) → Tm1 Γ (arr1 B C) → Tm1 Γ C; case1
= λ t u v Tm1 var1 lam1 app1 tt1 pair1 fst1 snd1 left1 right1 case1 zero suc rec →
case1 (t Tm1 var1 lam1 app1 tt1 pair1 fst1 snd1 left1 right1 case1 zero suc rec)
(u Tm1 var1 lam1 app1 tt1 pair1 fst1 snd1 left1 right1 case1 zero suc rec)
(v Tm1 var1 lam1 app1 tt1 pair1 fst1 snd1 left1 right1 case1 zero suc rec)
zero1 : ∀{Γ} → Tm1 Γ nat1; zero1
= λ Tm1 var1 lam1 app1 tt1 pair1 fst1 snd1 left1 right1 case1 zero1 suc rec → zero1
suc1 : ∀{Γ} → Tm1 Γ nat1 → Tm1 Γ nat1; suc1
= λ t Tm1 var1 lam1 app1 tt1 pair1 fst1 snd1 left1 right1 case1 zero1 suc1 rec →
suc1 (t Tm1 var1 lam1 app1 tt1 pair1 fst1 snd1 left1 right1 case1 zero1 suc1 rec)
rec1 : ∀{Γ A} → Tm1 Γ nat1 → Tm1 Γ (arr1 nat1 (arr1 A A)) → Tm1 Γ A → Tm1 Γ A; rec1
= λ t u v Tm1 var1 lam1 app1 tt1 pair1 fst1 snd1 left1 right1 case1 zero1 suc1 rec1 →
rec1 (t Tm1 var1 lam1 app1 tt1 pair1 fst1 snd1 left1 right1 case1 zero1 suc1 rec1)
(u Tm1 var1 lam1 app1 tt1 pair1 fst1 snd1 left1 right1 case1 zero1 suc1 rec1)
(v Tm1 var1 lam1 app1 tt1 pair1 fst1 snd1 left1 right1 case1 zero1 suc1 rec1)
v01 : ∀{Γ A} → Tm1 (snoc1 Γ A) A; v01
= var1 vz1
v11 : ∀{Γ A B} → Tm1 (snoc1 (snoc1 Γ A) B) A; v11
= var1 (vs1 vz1)
v21 : ∀{Γ A B C} → Tm1 (snoc1 (snoc1 (snoc1 Γ A) B) C) A; v21
= var1 (vs1 (vs1 vz1))
v31 : ∀{Γ A B C D} → Tm1 (snoc1 (snoc1 (snoc1 (snoc1 Γ A) B) C) D) A; v31
= var1 (vs1 (vs1 (vs1 vz1)))
tbool1 : Ty1; tbool1
= sum1 top1 top1
true1 : ∀{Γ} → Tm1 Γ tbool1; true1
= left1 tt1
tfalse1 : ∀{Γ} → Tm1 Γ tbool1; tfalse1
= right1 tt1
ifthenelse1 : ∀{Γ A} → Tm1 Γ (arr1 tbool1 (arr1 A (arr1 A A))); ifthenelse1
= lam1 (lam1 (lam1 (case1 v21 (lam1 v21) (lam1 v11))))
times41 : ∀{Γ A} → Tm1 Γ (arr1 (arr1 A A) (arr1 A A)); times41
= lam1 (lam1 (app1 v11 (app1 v11 (app1 v11 (app1 v11 v01)))))
add1 : ∀{Γ} → Tm1 Γ (arr1 nat1 (arr1 nat1 nat1)); add1
= lam1 (rec1 v01
(lam1 (lam1 (lam1 (suc1 (app1 v11 v01)))))
(lam1 v01))
mul1 : ∀{Γ} → Tm1 Γ (arr1 nat1 (arr1 nat1 nat1)); mul1
= lam1 (rec1 v01
(lam1 (lam1 (lam1 (app1 (app1 add1 (app1 v11 v01)) v01))))
(lam1 zero1))
fact1 : ∀{Γ} → Tm1 Γ (arr1 nat1 nat1); fact1
= lam1 (rec1 v01 (lam1 (lam1 (app1 (app1 mul1 (suc1 v11)) v01)))
(suc1 zero1))
{-# OPTIONS --type-in-type #-}
Ty2 : Set
Ty2 =
(Ty2 : Set)
(nat top bot : Ty2)
(arr prod sum : Ty2 → Ty2 → Ty2)
→ Ty2
nat2 : Ty2; nat2 = λ _ nat2 _ _ _ _ _ → nat2
top2 : Ty2; top2 = λ _ _ top2 _ _ _ _ → top2
bot2 : Ty2; bot2 = λ _ _ _ bot2 _ _ _ → bot2
arr2 : Ty2 → Ty2 → Ty2; arr2
= λ A B Ty2 nat2 top2 bot2 arr2 prod sum →
arr2 (A Ty2 nat2 top2 bot2 arr2 prod sum) (B Ty2 nat2 top2 bot2 arr2 prod sum)
prod2 : Ty2 → Ty2 → Ty2; prod2
= λ A B Ty2 nat2 top2 bot2 arr2 prod2 sum →
prod2 (A Ty2 nat2 top2 bot2 arr2 prod2 sum) (B Ty2 nat2 top2 bot2 arr2 prod2 sum)
sum2 : Ty2 → Ty2 → Ty2; sum2
= λ A B Ty2 nat2 top2 bot2 arr2 prod2 sum2 →
sum2 (A Ty2 nat2 top2 bot2 arr2 prod2 sum2) (B Ty2 nat2 top2 bot2 arr2 prod2 sum2)
Con2 : Set; Con2
= (Con2 : Set)
(nil : Con2)
(snoc : Con2 → Ty2 → Con2)
→ Con2
nil2 : Con2; nil2
= λ Con2 nil2 snoc → nil2
snoc2 : Con2 → Ty2 → Con2; snoc2
= λ Γ A Con2 nil2 snoc2 → snoc2 (Γ Con2 nil2 snoc2) A
Var2 : Con2 → Ty2 → Set; Var2
= λ Γ A →
(Var2 : Con2 → Ty2 → Set)
(vz : ∀{Γ A} → Var2 (snoc2 Γ A) A)
(vs : ∀{Γ B A} → Var2 Γ A → Var2 (snoc2 Γ B) A)
→ Var2 Γ A
vz2 : ∀{Γ A} → Var2 (snoc2 Γ A) A; vz2
= λ Var2 vz2 vs → vz2
vs2 : ∀{Γ B A} → Var2 Γ A → Var2 (snoc2 Γ B) A; vs2
= λ x Var2 vz2 vs2 → vs2 (x Var2 vz2 vs2)
Tm2 : Con2 → Ty2 → Set; Tm2
= λ Γ A →
(Tm2 : Con2 → Ty2 → Set)
(var : ∀{Γ A} → Var2 Γ A → Tm2 Γ A)
(lam : ∀{Γ A B} → Tm2 (snoc2 Γ A) B → Tm2 Γ (arr2 A B))
(app : ∀{Γ A B} → Tm2 Γ (arr2 A B) → Tm2 Γ A → Tm2 Γ B)
(tt : ∀{Γ} → Tm2 Γ top2)
(pair : ∀{Γ A B} → Tm2 Γ A → Tm2 Γ B → Tm2 Γ (prod2 A B))
(fst : ∀{Γ A B} → Tm2 Γ (prod2 A B) → Tm2 Γ A)
(snd : ∀{Γ A B} → Tm2 Γ (prod2 A B) → Tm2 Γ B)
(left : ∀{Γ A B} → Tm2 Γ A → Tm2 Γ (sum2 A B))
(right : ∀{Γ A B} → Tm2 Γ B → Tm2 Γ (sum2 A B))
(case : ∀{Γ A B C} → Tm2 Γ (sum2 A B) → Tm2 Γ (arr2 A C) → Tm2 Γ (arr2 B C) → Tm2 Γ C)
(zero : ∀{Γ} → Tm2 Γ nat2)
(suc : ∀{Γ} → Tm2 Γ nat2 → Tm2 Γ nat2)
(rec : ∀{Γ A} → Tm2 Γ nat2 → Tm2 Γ (arr2 nat2 (arr2 A A)) → Tm2 Γ A → Tm2 Γ A)
→ Tm2 Γ A
var2 : ∀{Γ A} → Var2 Γ A → Tm2 Γ A; var2
= λ x Tm2 var2 lam app tt pair fst snd left right case zero suc rec →
var2 x
lam2 : ∀{Γ A B} → Tm2 (snoc2 Γ A) B → Tm2 Γ (arr2 A B); lam2
= λ t Tm2 var2 lam2 app tt pair fst snd left right case zero suc rec →
lam2 (t Tm2 var2 lam2 app tt pair fst snd left right case zero suc rec)
app2 : ∀{Γ A B} → Tm2 Γ (arr2 A B) → Tm2 Γ A → Tm2 Γ B; app2
= λ t u Tm2 var2 lam2 app2 tt pair fst snd left right case zero suc rec →
app2 (t Tm2 var2 lam2 app2 tt pair fst snd left right case zero suc rec)
(u Tm2 var2 lam2 app2 tt pair fst snd left right case zero suc rec)
tt2 : ∀{Γ} → Tm2 Γ top2; tt2
= λ Tm2 var2 lam2 app2 tt2 pair fst snd left right case zero suc rec → tt2
pair2 : ∀{Γ A B} → Tm2 Γ A → Tm2 Γ B → Tm2 Γ (prod2 A B); pair2
= λ t u Tm2 var2 lam2 app2 tt2 pair2 fst snd left right case zero suc rec →
pair2 (t Tm2 var2 lam2 app2 tt2 pair2 fst snd left right case zero suc rec)
(u Tm2 var2 lam2 app2 tt2 pair2 fst snd left right case zero suc rec)
fst2 : ∀{Γ A B} → Tm2 Γ (prod2 A B) → Tm2 Γ A; fst2
= λ t Tm2 var2 lam2 app2 tt2 pair2 fst2 snd left right case zero suc rec →
fst2 (t Tm2 var2 lam2 app2 tt2 pair2 fst2 snd left right case zero suc rec)
snd2 : ∀{Γ A B} → Tm2 Γ (prod2 A B) → Tm2 Γ B; snd2
= λ t Tm2 var2 lam2 app2 tt2 pair2 fst2 snd2 left right case zero suc rec →
snd2 (t Tm2 var2 lam2 app2 tt2 pair2 fst2 snd2 left right case zero suc rec)
left2 : ∀{Γ A B} → Tm2 Γ A → Tm2 Γ (sum2 A B); left2
= λ t Tm2 var2 lam2 app2 tt2 pair2 fst2 snd2 left2 right case zero suc rec →
left2 (t Tm2 var2 lam2 app2 tt2 pair2 fst2 snd2 left2 right case zero suc rec)
right2 : ∀{Γ A B} → Tm2 Γ B → Tm2 Γ (sum2 A B); right2
= λ t Tm2 var2 lam2 app2 tt2 pair2 fst2 snd2 left2 right2 case zero suc rec →
right2 (t Tm2 var2 lam2 app2 tt2 pair2 fst2 snd2 left2 right2 case zero suc rec)
case2 : ∀{Γ A B C} → Tm2 Γ (sum2 A B) → Tm2 Γ (arr2 A C) → Tm2 Γ (arr2 B C) → Tm2 Γ C; case2
= λ t u v Tm2 var2 lam2 app2 tt2 pair2 fst2 snd2 left2 right2 case2 zero suc rec →
case2 (t Tm2 var2 lam2 app2 tt2 pair2 fst2 snd2 left2 right2 case2 zero suc rec)
(u Tm2 var2 lam2 app2 tt2 pair2 fst2 snd2 left2 right2 case2 zero suc rec)
(v Tm2 var2 lam2 app2 tt2 pair2 fst2 snd2 left2 right2 case2 zero suc rec)
zero2 : ∀{Γ} → Tm2 Γ nat2; zero2
= λ Tm2 var2 lam2 app2 tt2 pair2 fst2 snd2 left2 right2 case2 zero2 suc rec → zero2
suc2 : ∀{Γ} → Tm2 Γ nat2 → Tm2 Γ nat2; suc2
= λ t Tm2 var2 lam2 app2 tt2 pair2 fst2 snd2 left2 right2 case2 zero2 suc2 rec →
suc2 (t Tm2 var2 lam2 app2 tt2 pair2 fst2 snd2 left2 right2 case2 zero2 suc2 rec)
rec2 : ∀{Γ A} → Tm2 Γ nat2 → Tm2 Γ (arr2 nat2 (arr2 A A)) → Tm2 Γ A → Tm2 Γ A; rec2
= λ t u v Tm2 var2 lam2 app2 tt2 pair2 fst2 snd2 left2 right2 case2 zero2 suc2 rec2 →
rec2 (t Tm2 var2 lam2 app2 tt2 pair2 fst2 snd2 left2 right2 case2 zero2 suc2 rec2)
(u Tm2 var2 lam2 app2 tt2 pair2 fst2 snd2 left2 right2 case2 zero2 suc2 rec2)
(v Tm2 var2 lam2 app2 tt2 pair2 fst2 snd2 left2 right2 case2 zero2 suc2 rec2)
v02 : ∀{Γ A} → Tm2 (snoc2 Γ A) A; v02
= var2 vz2
v12 : ∀{Γ A B} → Tm2 (snoc2 (snoc2 Γ A) B) A; v12
= var2 (vs2 vz2)
v22 : ∀{Γ A B C} → Tm2 (snoc2 (snoc2 (snoc2 Γ A) B) C) A; v22
= var2 (vs2 (vs2 vz2))
v32 : ∀{Γ A B C D} → Tm2 (snoc2 (snoc2 (snoc2 (snoc2 Γ A) B) C) D) A; v32
= var2 (vs2 (vs2 (vs2 vz2)))
tbool2 : Ty2; tbool2
= sum2 top2 top2
true2 : ∀{Γ} → Tm2 Γ tbool2; true2
= left2 tt2
tfalse2 : ∀{Γ} → Tm2 Γ tbool2; tfalse2
= right2 tt2
ifthenelse2 : ∀{Γ A} → Tm2 Γ (arr2 tbool2 (arr2 A (arr2 A A))); ifthenelse2
= lam2 (lam2 (lam2 (case2 v22 (lam2 v22) (lam2 v12))))
times42 : ∀{Γ A} → Tm2 Γ (arr2 (arr2 A A) (arr2 A A)); times42
= lam2 (lam2 (app2 v12 (app2 v12 (app2 v12 (app2 v12 v02)))))
add2 : ∀{Γ} → Tm2 Γ (arr2 nat2 (arr2 nat2 nat2)); add2
= lam2 (rec2 v02
(lam2 (lam2 (lam2 (suc2 (app2 v12 v02)))))
(lam2 v02))
mul2 : ∀{Γ} → Tm2 Γ (arr2 nat2 (arr2 nat2 nat2)); mul2
= lam2 (rec2 v02
(lam2 (lam2 (lam2 (app2 (app2 add2 (app2 v12 v02)) v02))))
(lam2 zero2))
fact2 : ∀{Γ} → Tm2 Γ (arr2 nat2 nat2); fact2
= lam2 (rec2 v02 (lam2 (lam2 (app2 (app2 mul2 (suc2 v12)) v02)))
(suc2 zero2))
{-# OPTIONS --type-in-type #-}
Ty3 : Set
Ty3 =
(Ty3 : Set)
(nat top bot : Ty3)
(arr prod sum : Ty3 → Ty3 → Ty3)
→ Ty3
nat3 : Ty3; nat3 = λ _ nat3 _ _ _ _ _ → nat3
top3 : Ty3; top3 = λ _ _ top3 _ _ _ _ → top3
bot3 : Ty3; bot3 = λ _ _ _ bot3 _ _ _ → bot3
arr3 : Ty3 → Ty3 → Ty3; arr3
= λ A B Ty3 nat3 top3 bot3 arr3 prod sum →
arr3 (A Ty3 nat3 top3 bot3 arr3 prod sum) (B Ty3 nat3 top3 bot3 arr3 prod sum)
prod3 : Ty3 → Ty3 → Ty3; prod3
= λ A B Ty3 nat3 top3 bot3 arr3 prod3 sum →
prod3 (A Ty3 nat3 top3 bot3 arr3 prod3 sum) (B Ty3 nat3 top3 bot3 arr3 prod3 sum)
sum3 : Ty3 → Ty3 → Ty3; sum3
= λ A B Ty3 nat3 top3 bot3 arr3 prod3 sum3 →
sum3 (A Ty3 nat3 top3 bot3 arr3 prod3 sum3) (B Ty3 nat3 top3 bot3 arr3 prod3 sum3)
Con3 : Set; Con3
= (Con3 : Set)
(nil : Con3)
(snoc : Con3 → Ty3 → Con3)
→ Con3
nil3 : Con3; nil3
= λ Con3 nil3 snoc → nil3
snoc3 : Con3 → Ty3 → Con3; snoc3
= λ Γ A Con3 nil3 snoc3 → snoc3 (Γ Con3 nil3 snoc3) A
Var3 : Con3 → Ty3 → Set; Var3
= λ Γ A →
(Var3 : Con3 → Ty3 → Set)
(vz : ∀{Γ A} → Var3 (snoc3 Γ A) A)
(vs : ∀{Γ B A} → Var3 Γ A → Var3 (snoc3 Γ B) A)
→ Var3 Γ A
vz3 : ∀{Γ A} → Var3 (snoc3 Γ A) A; vz3
= λ Var3 vz3 vs → vz3
vs3 : ∀{Γ B A} → Var3 Γ A → Var3 (snoc3 Γ B) A; vs3
= λ x Var3 vz3 vs3 → vs3 (x Var3 vz3 vs3)
Tm3 : Con3 → Ty3 → Set; Tm3
= λ Γ A →
(Tm3 : Con3 → Ty3 → Set)
(var : ∀{Γ A} → Var3 Γ A → Tm3 Γ A)
(lam : ∀{Γ A B} → Tm3 (snoc3 Γ A) B → Tm3 Γ (arr3 A B))
(app : ∀{Γ A B} → Tm3 Γ (arr3 A B) → Tm3 Γ A → Tm3 Γ B)
(tt : ∀{Γ} → Tm3 Γ top3)
(pair : ∀{Γ A B} → Tm3 Γ A → Tm3 Γ B → Tm3 Γ (prod3 A B))
(fst : ∀{Γ A B} → Tm3 Γ (prod3 A B) → Tm3 Γ A)
(snd : ∀{Γ A B} → Tm3 Γ (prod3 A B) → Tm3 Γ B)
(left : ∀{Γ A B} → Tm3 Γ A → Tm3 Γ (sum3 A B))
(right : ∀{Γ A B} → Tm3 Γ B → Tm3 Γ (sum3 A B))
(case : ∀{Γ A B C} → Tm3 Γ (sum3 A B) → Tm3 Γ (arr3 A C) → Tm3 Γ (arr3 B C) → Tm3 Γ C)
(zero : ∀{Γ} → Tm3 Γ nat3)
(suc : ∀{Γ} → Tm3 Γ nat3 → Tm3 Γ nat3)
(rec : ∀{Γ A} → Tm3 Γ nat3 → Tm3 Γ (arr3 nat3 (arr3 A A)) → Tm3 Γ A → Tm3 Γ A)
→ Tm3 Γ A
var3 : ∀{Γ A} → Var3 Γ A → Tm3 Γ A; var3
= λ x Tm3 var3 lam app tt pair fst snd left right case zero suc rec →
var3 x
lam3 : ∀{Γ A B} → Tm3 (snoc3 Γ A) B → Tm3 Γ (arr3 A B); lam3
= λ t Tm3 var3 lam3 app tt pair fst snd left right case zero suc rec →
lam3 (t Tm3 var3 lam3 app tt pair fst snd left right case zero suc rec)
app3 : ∀{Γ A B} → Tm3 Γ (arr3 A B) → Tm3 Γ A → Tm3 Γ B; app3
= λ t u Tm3 var3 lam3 app3 tt pair fst snd left right case zero suc rec →
app3 (t Tm3 var3 lam3 app3 tt pair fst snd left right case zero suc rec)
(u Tm3 var3 lam3 app3 tt pair fst snd left right case zero suc rec)
tt3 : ∀{Γ} → Tm3 Γ top3; tt3
= λ Tm3 var3 lam3 app3 tt3 pair fst snd left right case zero suc rec → tt3
pair3 : ∀{Γ A B} → Tm3 Γ A → Tm3 Γ B → Tm3 Γ (prod3 A B); pair3
= λ t u Tm3 var3 lam3 app3 tt3 pair3 fst snd left right case zero suc rec →
pair3 (t Tm3 var3 lam3 app3 tt3 pair3 fst snd left right case zero suc rec)
(u Tm3 var3 lam3 app3 tt3 pair3 fst snd left right case zero suc rec)
fst3 : ∀{Γ A B} → Tm3 Γ (prod3 A B) → Tm3 Γ A; fst3
= λ t Tm3 var3 lam3 app3 tt3 pair3 fst3 snd left right case zero suc rec →
fst3 (t Tm3 var3 lam3 app3 tt3 pair3 fst3 snd left right case zero suc rec)
snd3 : ∀{Γ A B} → Tm3 Γ (prod3 A B) → Tm3 Γ B; snd3
= λ t Tm3 var3 lam3 app3 tt3 pair3 fst3 snd3 left right case zero suc rec →
snd3 (t Tm3 var3 lam3 app3 tt3 pair3 fst3 snd3 left right case zero suc rec)
left3 : ∀{Γ A B} → Tm3 Γ A → Tm3 Γ (sum3 A B); left3
= λ t Tm3 var3 lam3 app3 tt3 pair3 fst3 snd3 left3 right case zero suc rec →
left3 (t Tm3 var3 lam3 app3 tt3 pair3 fst3 snd3 left3 right case zero suc rec)
right3 : ∀{Γ A B} → Tm3 Γ B → Tm3 Γ (sum3 A B); right3
= λ t Tm3 var3 lam3 app3 tt3 pair3 fst3 snd3 left3 right3 case zero suc rec →
right3 (t Tm3 var3 lam3 app3 tt3 pair3 fst3 snd3 left3 right3 case zero suc rec)
case3 : ∀{Γ A B C} → Tm3 Γ (sum3 A B) → Tm3 Γ (arr3 A C) → Tm3 Γ (arr3 B C) → Tm3 Γ C; case3
= λ t u v Tm3 var3 lam3 app3 tt3 pair3 fst3 snd3 left3 right3 case3 zero suc rec →
case3 (t Tm3 var3 lam3 app3 tt3 pair3 fst3 snd3 left3 right3 case3 zero suc rec)
(u Tm3 var3 lam3 app3 tt3 pair3 fst3 snd3 left3 right3 case3 zero suc rec)
(v Tm3 var3 lam3 app3 tt3 pair3 fst3 snd3 left3 right3 case3 zero suc rec)
zero3 : ∀{Γ} → Tm3 Γ nat3; zero3
= λ Tm3 var3 lam3 app3 tt3 pair3 fst3 snd3 left3 right3 case3 zero3 suc rec → zero3
suc3 : ∀{Γ} → Tm3 Γ nat3 → Tm3 Γ nat3; suc3
= λ t Tm3 var3 lam3 app3 tt3 pair3 fst3 snd3 left3 right3 case3 zero3 suc3 rec →
suc3 (t Tm3 var3 lam3 app3 tt3 pair3 fst3 snd3 left3 right3 case3 zero3 suc3 rec)
rec3 : ∀{Γ A} → Tm3 Γ nat3 → Tm3 Γ (arr3 nat3 (arr3 A A)) → Tm3 Γ A → Tm3 Γ A; rec3
= λ t u v Tm3 var3 lam3 app3 tt3 pair3 fst3 snd3 left3 right3 case3 zero3 suc3 rec3 →
rec3 (t Tm3 var3 lam3 app3 tt3 pair3 fst3 snd3 left3 right3 case3 zero3 suc3 rec3)
(u Tm3 var3 lam3 app3 tt3 pair3 fst3 snd3 left3 right3 case3 zero3 suc3 rec3)
(v Tm3 var3 lam3 app3 tt3 pair3 fst3 snd3 left3 right3 case3 zero3 suc3 rec3)
v03 : ∀{Γ A} → Tm3 (snoc3 Γ A) A; v03
= var3 vz3
v13 : ∀{Γ A B} → Tm3 (snoc3 (snoc3 Γ A) B) A; v13
= var3 (vs3 vz3)
v23 : ∀{Γ A B C} → Tm3 (snoc3 (snoc3 (snoc3 Γ A) B) C) A; v23
= var3 (vs3 (vs3 vz3))
v33 : ∀{Γ A B C D} → Tm3 (snoc3 (snoc3 (snoc3 (snoc3 Γ A) B) C) D) A; v33
= var3 (vs3 (vs3 (vs3 vz3)))
tbool3 : Ty3; tbool3
= sum3 top3 top3
true3 : ∀{Γ} → Tm3 Γ tbool3; true3
= left3 tt3
tfalse3 : ∀{Γ} → Tm3 Γ tbool3; tfalse3
= right3 tt3
ifthenelse3 : ∀{Γ A} → Tm3 Γ (arr3 tbool3 (arr3 A (arr3 A A))); ifthenelse3
= lam3 (lam3 (lam3 (case3 v23 (lam3 v23) (lam3 v13))))
times43 : ∀{Γ A} → Tm3 Γ (arr3 (arr3 A A) (arr3 A A)); times43
= lam3 (lam3 (app3 v13 (app3 v13 (app3 v13 (app3 v13 v03)))))
add3 : ∀{Γ} → Tm3 Γ (arr3 nat3 (arr3 nat3 nat3)); add3
= lam3 (rec3 v03
(lam3 (lam3 (lam3 (suc3 (app3 v13 v03)))))
(lam3 v03))
mul3 : ∀{Γ} → Tm3 Γ (arr3 nat3 (arr3 nat3 nat3)); mul3
= lam3 (rec3 v03
(lam3 (lam3 (lam3 (app3 (app3 add3 (app3 v13 v03)) v03))))
(lam3 zero3))
fact3 : ∀{Γ} → Tm3 Γ (arr3 nat3 nat3); fact3
= lam3 (rec3 v03 (lam3 (lam3 (app3 (app3 mul3 (suc3 v13)) v03)))
(suc3 zero3))
{-# OPTIONS --type-in-type #-}
Ty4 : Set
Ty4 =
(Ty4 : Set)
(nat top bot : Ty4)
(arr prod sum : Ty4 → Ty4 → Ty4)
→ Ty4
nat4 : Ty4; nat4 = λ _ nat4 _ _ _ _ _ → nat4
top4 : Ty4; top4 = λ _ _ top4 _ _ _ _ → top4
bot4 : Ty4; bot4 = λ _ _ _ bot4 _ _ _ → bot4
arr4 : Ty4 → Ty4 → Ty4; arr4
= λ A B Ty4 nat4 top4 bot4 arr4 prod sum →
arr4 (A Ty4 nat4 top4 bot4 arr4 prod sum) (B Ty4 nat4 top4 bot4 arr4 prod sum)
prod4 : Ty4 → Ty4 → Ty4; prod4
= λ A B Ty4 nat4 top4 bot4 arr4 prod4 sum →
prod4 (A Ty4 nat4 top4 bot4 arr4 prod4 sum) (B Ty4 nat4 top4 bot4 arr4 prod4 sum)
sum4 : Ty4 → Ty4 → Ty4; sum4
= λ A B Ty4 nat4 top4 bot4 arr4 prod4 sum4 →
sum4 (A Ty4 nat4 top4 bot4 arr4 prod4 sum4) (B Ty4 nat4 top4 bot4 arr4 prod4 sum4)
Con4 : Set; Con4
= (Con4 : Set)
(nil : Con4)
(snoc : Con4 → Ty4 → Con4)
→ Con4
nil4 : Con4; nil4
= λ Con4 nil4 snoc → nil4
snoc4 : Con4 → Ty4 → Con4; snoc4
= λ Γ A Con4 nil4 snoc4 → snoc4 (Γ Con4 nil4 snoc4) A
Var4 : Con4 → Ty4 → Set; Var4
= λ Γ A →
(Var4 : Con4 → Ty4 → Set)
(vz : ∀{Γ A} → Var4 (snoc4 Γ A) A)
(vs : ∀{Γ B A} → Var4 Γ A → Var4 (snoc4 Γ B) A)
→ Var4 Γ A
vz4 : ∀{Γ A} → Var4 (snoc4 Γ A) A; vz4
= λ Var4 vz4 vs → vz4
vs4 : ∀{Γ B A} → Var4 Γ A → Var4 (snoc4 Γ B) A; vs4
= λ x Var4 vz4 vs4 → vs4 (x Var4 vz4 vs4)
Tm4 : Con4 → Ty4 → Set; Tm4
= λ Γ A →
(Tm4 : Con4 → Ty4 → Set)
(var : ∀{Γ A} → Var4 Γ A → Tm4 Γ A)
(lam : ∀{Γ A B} → Tm4 (snoc4 Γ A) B → Tm4 Γ (arr4 A B))
(app : ∀{Γ A B} → Tm4 Γ (arr4 A B) → Tm4 Γ A → Tm4 Γ B)
(tt : ∀{Γ} → Tm4 Γ top4)
(pair : ∀{Γ A B} → Tm4 Γ A → Tm4 Γ B → Tm4 Γ (prod4 A B))
(fst : ∀{Γ A B} → Tm4 Γ (prod4 A B) → Tm4 Γ A)
(snd : ∀{Γ A B} → Tm4 Γ (prod4 A B) → Tm4 Γ B)
(left : ∀{Γ A B} → Tm4 Γ A → Tm4 Γ (sum4 A B))
(right : ∀{Γ A B} → Tm4 Γ B → Tm4 Γ (sum4 A B))
(case : ∀{Γ A B C} → Tm4 Γ (sum4 A B) → Tm4 Γ (arr4 A C) → Tm4 Γ (arr4 B C) → Tm4 Γ C)
(zero : ∀{Γ} → Tm4 Γ nat4)
(suc : ∀{Γ} → Tm4 Γ nat4 → Tm4 Γ nat4)
(rec : ∀{Γ A} → Tm4 Γ nat4 → Tm4 Γ (arr4 nat4 (arr4 A A)) → Tm4 Γ A → Tm4 Γ A)
→ Tm4 Γ A
var4 : ∀{Γ A} → Var4 Γ A → Tm4 Γ A; var4
= λ x Tm4 var4 lam app tt pair fst snd left right case zero suc rec →
var4 x
lam4 : ∀{Γ A B} → Tm4 (snoc4 Γ A) B → Tm4 Γ (arr4 A B); lam4
= λ t Tm4 var4 lam4 app tt pair fst snd left right case zero suc rec →
lam4 (t Tm4 var4 lam4 app tt pair fst snd left right case zero suc rec)
app4 : ∀{Γ A B} → Tm4 Γ (arr4 A B) → Tm4 Γ A → Tm4 Γ B; app4
= λ t u Tm4 var4 lam4 app4 tt pair fst snd left right case zero suc rec →
app4 (t Tm4 var4 lam4 app4 tt pair fst snd left right case zero suc rec)
(u Tm4 var4 lam4 app4 tt pair fst snd left right case zero suc rec)
tt4 : ∀{Γ} → Tm4 Γ top4; tt4
= λ Tm4 var4 lam4 app4 tt4 pair fst snd left right case zero suc rec → tt4
pair4 : ∀{Γ A B} → Tm4 Γ A → Tm4 Γ B → Tm4 Γ (prod4 A B); pair4
= λ t u Tm4 var4 lam4 app4 tt4 pair4 fst snd left right case zero suc rec →
pair4 (t Tm4 var4 lam4 app4 tt4 pair4 fst snd left right case zero suc rec)
(u Tm4 var4 lam4 app4 tt4 pair4 fst snd left right case zero suc rec)
fst4 : ∀{Γ A B} → Tm4 Γ (prod4 A B) → Tm4 Γ A; fst4
= λ t Tm4 var4 lam4 app4 tt4 pair4 fst4 snd left right case zero suc rec →
fst4 (t Tm4 var4 lam4 app4 tt4 pair4 fst4 snd left right case zero suc rec)
snd4 : ∀{Γ A B} → Tm4 Γ (prod4 A B) → Tm4 Γ B; snd4
= λ t Tm4 var4 lam4 app4 tt4 pair4 fst4 snd4 left right case zero suc rec →
snd4 (t Tm4 var4 lam4 app4 tt4 pair4 fst4 snd4 left right case zero suc rec)
left4 : ∀{Γ A B} → Tm4 Γ A → Tm4 Γ (sum4 A B); left4
= λ t Tm4 var4 lam4 app4 tt4 pair4 fst4 snd4 left4 right case zero suc rec →
left4 (t Tm4 var4 lam4 app4 tt4 pair4 fst4 snd4 left4 right case zero suc rec)
right4 : ∀{Γ A B} → Tm4 Γ B → Tm4 Γ (sum4 A B); right4
= λ t Tm4 var4 lam4 app4 tt4 pair4 fst4 snd4 left4 right4 case zero suc rec →
right4 (t Tm4 var4 lam4 app4 tt4 pair4 fst4 snd4 left4 right4 case zero suc rec)
case4 : ∀{Γ A B C} → Tm4 Γ (sum4 A B) → Tm4 Γ (arr4 A C) → Tm4 Γ (arr4 B C) → Tm4 Γ C; case4
= λ t u v Tm4 var4 lam4 app4 tt4 pair4 fst4 snd4 left4 right4 case4 zero suc rec →
case4 (t Tm4 var4 lam4 app4 tt4 pair4 fst4 snd4 left4 right4 case4 zero suc rec)
(u Tm4 var4 lam4 app4 tt4 pair4 fst4 snd4 left4 right4 case4 zero suc rec)
(v Tm4 var4 lam4 app4 tt4 pair4 fst4 snd4 left4 right4 case4 zero suc rec)
zero4 : ∀{Γ} → Tm4 Γ nat4; zero4
= λ Tm4 var4 lam4 app4 tt4 pair4 fst4 snd4 left4 right4 case4 zero4 suc rec → zero4
suc4 : ∀{Γ} → Tm4 Γ nat4 → Tm4 Γ nat4; suc4
= λ t Tm4 var4 lam4 app4 tt4 pair4 fst4 snd4 left4 right4 case4 zero4 suc4 rec →
suc4 (t Tm4 var4 lam4 app4 tt4 pair4 fst4 snd4 left4 right4 case4 zero4 suc4 rec)
rec4 : ∀{Γ A} → Tm4 Γ nat4 → Tm4 Γ (arr4 nat4 (arr4 A A)) → Tm4 Γ A → Tm4 Γ A; rec4
= λ t u v Tm4 var4 lam4 app4 tt4 pair4 fst4 snd4 left4 right4 case4 zero4 suc4 rec4 →
rec4 (t Tm4 var4 lam4 app4 tt4 pair4 fst4 snd4 left4 right4 case4 zero4 suc4 rec4)
(u Tm4 var4 lam4 app4 tt4 pair4 fst4 snd4 left4 right4 case4 zero4 suc4 rec4)
(v Tm4 var4 lam4 app4 tt4 pair4 fst4 snd4 left4 right4 case4 zero4 suc4 rec4)
v04 : ∀{Γ A} → Tm4 (snoc4 Γ A) A; v04
= var4 vz4
v14 : ∀{Γ A B} → Tm4 (snoc4 (snoc4 Γ A) B) A; v14
= var4 (vs4 vz4)
v24 : ∀{Γ A B C} → Tm4 (snoc4 (snoc4 (snoc4 Γ A) B) C) A; v24
= var4 (vs4 (vs4 vz4))
v34 : ∀{Γ A B C D} → Tm4 (snoc4 (snoc4 (snoc4 (snoc4 Γ A) B) C) D) A; v34
= var4 (vs4 (vs4 (vs4 vz4)))
tbool4 : Ty4; tbool4
= sum4 top4 top4
true4 : ∀{Γ} → Tm4 Γ tbool4; true4
= left4 tt4
tfalse4 : ∀{Γ} → Tm4 Γ tbool4; tfalse4
= right4 tt4
ifthenelse4 : ∀{Γ A} → Tm4 Γ (arr4 tbool4 (arr4 A (arr4 A A))); ifthenelse4
= lam4 (lam4 (lam4 (case4 v24 (lam4 v24) (lam4 v14))))
times44 : ∀{Γ A} → Tm4 Γ (arr4 (arr4 A A) (arr4 A A)); times44
= lam4 (lam4 (app4 v14 (app4 v14 (app4 v14 (app4 v14 v04)))))
add4 : ∀{Γ} → Tm4 Γ (arr4 nat4 (arr4 nat4 nat4)); add4
= lam4 (rec4 v04
(lam4 (lam4 (lam4 (suc4 (app4 v14 v04)))))
(lam4 v04))
mul4 : ∀{Γ} → Tm4 Γ (arr4 nat4 (arr4 nat4 nat4)); mul4
= lam4 (rec4 v04
(lam4 (lam4 (lam4 (app4 (app4 add4 (app4 v14 v04)) v04))))
(lam4 zero4))
fact4 : ∀{Γ} → Tm4 Γ (arr4 nat4 nat4); fact4
= lam4 (rec4 v04 (lam4 (lam4 (app4 (app4 mul4 (suc4 v14)) v04)))
(suc4 zero4))
{-# OPTIONS --type-in-type #-}
Ty5 : Set
Ty5 =
(Ty5 : Set)
(nat top bot : Ty5)
(arr prod sum : Ty5 → Ty5 → Ty5)
→ Ty5
nat5 : Ty5; nat5 = λ _ nat5 _ _ _ _ _ → nat5
top5 : Ty5; top5 = λ _ _ top5 _ _ _ _ → top5
bot5 : Ty5; bot5 = λ _ _ _ bot5 _ _ _ → bot5
arr5 : Ty5 → Ty5 → Ty5; arr5
= λ A B Ty5 nat5 top5 bot5 arr5 prod sum →
arr5 (A Ty5 nat5 top5 bot5 arr5 prod sum) (B Ty5 nat5 top5 bot5 arr5 prod sum)
prod5 : Ty5 → Ty5 → Ty5; prod5
= λ A B Ty5 nat5 top5 bot5 arr5 prod5 sum →
prod5 (A Ty5 nat5 top5 bot5 arr5 prod5 sum) (B Ty5 nat5 top5 bot5 arr5 prod5 sum)
sum5 : Ty5 → Ty5 → Ty5; sum5
= λ A B Ty5 nat5 top5 bot5 arr5 prod5 sum5 →
sum5 (A Ty5 nat5 top5 bot5 arr5 prod5 sum5) (B Ty5 nat5 top5 bot5 arr5 prod5 sum5)
Con5 : Set; Con5
= (Con5 : Set)
(nil : Con5)
(snoc : Con5 → Ty5 → Con5)
→ Con5
nil5 : Con5; nil5
= λ Con5 nil5 snoc → nil5
snoc5 : Con5 → Ty5 → Con5; snoc5
= λ Γ A Con5 nil5 snoc5 → snoc5 (Γ Con5 nil5 snoc5) A
Var5 : Con5 → Ty5 → Set; Var5
= λ Γ A →
(Var5 : Con5 → Ty5 → Set)
(vz : ∀{Γ A} → Var5 (snoc5 Γ A) A)
(vs : ∀{Γ B A} → Var5 Γ A → Var5 (snoc5 Γ B) A)
→ Var5 Γ A
vz5 : ∀{Γ A} → Var5 (snoc5 Γ A) A; vz5
= λ Var5 vz5 vs → vz5
vs5 : ∀{Γ B A} → Var5 Γ A → Var5 (snoc5 Γ B) A; vs5
= λ x Var5 vz5 vs5 → vs5 (x Var5 vz5 vs5)
Tm5 : Con5 → Ty5 → Set; Tm5
= λ Γ A →
(Tm5 : Con5 → Ty5 → Set)
(var : ∀{Γ A} → Var5 Γ A → Tm5 Γ A)
(lam : ∀{Γ A B} → Tm5 (snoc5 Γ A) B → Tm5 Γ (arr5 A B))
(app : ∀{Γ A B} → Tm5 Γ (arr5 A B) → Tm5 Γ A → Tm5 Γ B)
(tt : ∀{Γ} → Tm5 Γ top5)
(pair : ∀{Γ A B} → Tm5 Γ A → Tm5 Γ B → Tm5 Γ (prod5 A B))
(fst : ∀{Γ A B} → Tm5 Γ (prod5 A B) → Tm5 Γ A)
(snd : ∀{Γ A B} → Tm5 Γ (prod5 A B) → Tm5 Γ B)
(left : ∀{Γ A B} → Tm5 Γ A → Tm5 Γ (sum5 A B))
(right : ∀{Γ A B} → Tm5 Γ B → Tm5 Γ (sum5 A B))
(case : ∀{Γ A B C} → Tm5 Γ (sum5 A B) → Tm5 Γ (arr5 A C) → Tm5 Γ (arr5 B C) → Tm5 Γ C)
(zero : ∀{Γ} → Tm5 Γ nat5)
(suc : ∀{Γ} → Tm5 Γ nat5 → Tm5 Γ nat5)
(rec : ∀{Γ A} → Tm5 Γ nat5 → Tm5 Γ (arr5 nat5 (arr5 A A)) → Tm5 Γ A → Tm5 Γ A)
→ Tm5 Γ A
var5 : ∀{Γ A} → Var5 Γ A → Tm5 Γ A; var5
= λ x Tm5 var5 lam app tt pair fst snd left right case zero suc rec →
var5 x
lam5 : ∀{Γ A B} → Tm5 (snoc5 Γ A) B → Tm5 Γ (arr5 A B); lam5
= λ t Tm5 var5 lam5 app tt pair fst snd left right case zero suc rec →
lam5 (t Tm5 var5 lam5 app tt pair fst snd left right case zero suc rec)
app5 : ∀{Γ A B} → Tm5 Γ (arr5 A B) → Tm5 Γ A → Tm5 Γ B; app5
= λ t u Tm5 var5 lam5 app5 tt pair fst snd left right case zero suc rec →
app5 (t Tm5 var5 lam5 app5 tt pair fst snd left right case zero suc rec)
(u Tm5 var5 lam5 app5 tt pair fst snd left right case zero suc rec)
tt5 : ∀{Γ} → Tm5 Γ top5; tt5
= λ Tm5 var5 lam5 app5 tt5 pair fst snd left right case zero suc rec → tt5
pair5 : ∀{Γ A B} → Tm5 Γ A → Tm5 Γ B → Tm5 Γ (prod5 A B); pair5
= λ t u Tm5 var5 lam5 app5 tt5 pair5 fst snd left right case zero suc rec →
pair5 (t Tm5 var5 lam5 app5 tt5 pair5 fst snd left right case zero suc rec)
(u Tm5 var5 lam5 app5 tt5 pair5 fst snd left right case zero suc rec)
fst5 : ∀{Γ A B} → Tm5 Γ (prod5 A B) → Tm5 Γ A; fst5
= λ t Tm5 var5 lam5 app5 tt5 pair5 fst5 snd left right case zero suc rec →
fst5 (t Tm5 var5 lam5 app5 tt5 pair5 fst5 snd left right case zero suc rec)
snd5 : ∀{Γ A B} → Tm5 Γ (prod5 A B) → Tm5 Γ B; snd5
= λ t Tm5 var5 lam5 app5 tt5 pair5 fst5 snd5 left right case zero suc rec →
snd5 (t Tm5 var5 lam5 app5 tt5 pair5 fst5 snd5 left right case zero suc rec)
left5 : ∀{Γ A B} → Tm5 Γ A → Tm5 Γ (sum5 A B); left5
= λ t Tm5 var5 lam5 app5 tt5 pair5 fst5 snd5 left5 right case zero suc rec →
left5 (t Tm5 var5 lam5 app5 tt5 pair5 fst5 snd5 left5 right case zero suc rec)
right5 : ∀{Γ A B} → Tm5 Γ B → Tm5 Γ (sum5 A B); right5
= λ t Tm5 var5 lam5 app5 tt5 pair5 fst5 snd5 left5 right5 case zero suc rec →
right5 (t Tm5 var5 lam5 app5 tt5 pair5 fst5 snd5 left5 right5 case zero suc rec)
case5 : ∀{Γ A B C} → Tm5 Γ (sum5 A B) → Tm5 Γ (arr5 A C) → Tm5 Γ (arr5 B C) → Tm5 Γ C; case5
= λ t u v Tm5 var5 lam5 app5 tt5 pair5 fst5 snd5 left5 right5 case5 zero suc rec →
case5 (t Tm5 var5 lam5 app5 tt5 pair5 fst5 snd5 left5 right5 case5 zero suc rec)
(u Tm5 var5 lam5 app5 tt5 pair5 fst5 snd5 left5 right5 case5 zero suc rec)
(v Tm5 var5 lam5 app5 tt5 pair5 fst5 snd5 left5 right5 case5 zero suc rec)
zero5 : ∀{Γ} → Tm5 Γ nat5; zero5
= λ Tm5 var5 lam5 app5 tt5 pair5 fst5 snd5 left5 right5 case5 zero5 suc rec → zero5
suc5 : ∀{Γ} → Tm5 Γ nat5 → Tm5 Γ nat5; suc5
= λ t Tm5 var5 lam5 app5 tt5 pair5 fst5 snd5 left5 right5 case5 zero5 suc5 rec →
suc5 (t Tm5 var5 lam5 app5 tt5 pair5 fst5 snd5 left5 right5 case5 zero5 suc5 rec)
rec5 : ∀{Γ A} → Tm5 Γ nat5 → Tm5 Γ (arr5 nat5 (arr5 A A)) → Tm5 Γ A → Tm5 Γ A; rec5
= λ t u v Tm5 var5 lam5 app5 tt5 pair5 fst5 snd5 left5 right5 case5 zero5 suc5 rec5 →
rec5 (t Tm5 var5 lam5 app5 tt5 pair5 fst5 snd5 left5 right5 case5 zero5 suc5 rec5)
(u Tm5 var5 lam5 app5 tt5 pair5 fst5 snd5 left5 right5 case5 zero5 suc5 rec5)
(v Tm5 var5 lam5 app5 tt5 pair5 fst5 snd5 left5 right5 case5 zero5 suc5 rec5)
v05 : ∀{Γ A} → Tm5 (snoc5 Γ A) A; v05
= var5 vz5
v15 : ∀{Γ A B} → Tm5 (snoc5 (snoc5 Γ A) B) A; v15
= var5 (vs5 vz5)
v25 : ∀{Γ A B C} → Tm5 (snoc5 (snoc5 (snoc5 Γ A) B) C) A; v25
= var5 (vs5 (vs5 vz5))
v35 : ∀{Γ A B C D} → Tm5 (snoc5 (snoc5 (snoc5 (snoc5 Γ A) B) C) D) A; v35
= var5 (vs5 (vs5 (vs5 vz5)))
tbool5 : Ty5; tbool5
= sum5 top5 top5
true5 : ∀{Γ} → Tm5 Γ tbool5; true5
= left5 tt5
tfalse5 : ∀{Γ} → Tm5 Γ tbool5; tfalse5
= right5 tt5
ifthenelse5 : ∀{Γ A} → Tm5 Γ (arr5 tbool5 (arr5 A (arr5 A A))); ifthenelse5
= lam5 (lam5 (lam5 (case5 v25 (lam5 v25) (lam5 v15))))
times45 : ∀{Γ A} → Tm5 Γ (arr5 (arr5 A A) (arr5 A A)); times45
= lam5 (lam5 (app5 v15 (app5 v15 (app5 v15 (app5 v15 v05)))))
add5 : ∀{Γ} → Tm5 Γ (arr5 nat5 (arr5 nat5 nat5)); add5
= lam5 (rec5 v05
(lam5 (lam5 (lam5 (suc5 (app5 v15 v05)))))
(lam5 v05))
mul5 : ∀{Γ} → Tm5 Γ (arr5 nat5 (arr5 nat5 nat5)); mul5
= lam5 (rec5 v05
(lam5 (lam5 (lam5 (app5 (app5 add5 (app5 v15 v05)) v05))))
(lam5 zero5))
fact5 : ∀{Γ} → Tm5 Γ (arr5 nat5 nat5); fact5
= lam5 (rec5 v05 (lam5 (lam5 (app5 (app5 mul5 (suc5 v15)) v05)))
(suc5 zero5))
{-# OPTIONS --type-in-type #-}
Ty6 : Set
Ty6 =
(Ty6 : Set)
(nat top bot : Ty6)
(arr prod sum : Ty6 → Ty6 → Ty6)
→ Ty6
nat6 : Ty6; nat6 = λ _ nat6 _ _ _ _ _ → nat6
top6 : Ty6; top6 = λ _ _ top6 _ _ _ _ → top6
bot6 : Ty6; bot6 = λ _ _ _ bot6 _ _ _ → bot6
arr6 : Ty6 → Ty6 → Ty6; arr6
= λ A B Ty6 nat6 top6 bot6 arr6 prod sum →
arr6 (A Ty6 nat6 top6 bot6 arr6 prod sum) (B Ty6 nat6 top6 bot6 arr6 prod sum)
prod6 : Ty6 → Ty6 → Ty6; prod6
= λ A B Ty6 nat6 top6 bot6 arr6 prod6 sum →
prod6 (A Ty6 nat6 top6 bot6 arr6 prod6 sum) (B Ty6 nat6 top6 bot6 arr6 prod6 sum)
sum6 : Ty6 → Ty6 → Ty6; sum6
= λ A B Ty6 nat6 top6 bot6 arr6 prod6 sum6 →
sum6 (A Ty6 nat6 top6 bot6 arr6 prod6 sum6) (B Ty6 nat6 top6 bot6 arr6 prod6 sum6)
Con6 : Set; Con6
= (Con6 : Set)
(nil : Con6)
(snoc : Con6 → Ty6 → Con6)
→ Con6
nil6 : Con6; nil6
= λ Con6 nil6 snoc → nil6
snoc6 : Con6 → Ty6 → Con6; snoc6
= λ Γ A Con6 nil6 snoc6 → snoc6 (Γ Con6 nil6 snoc6) A
Var6 : Con6 → Ty6 → Set; Var6
= λ Γ A →
(Var6 : Con6 → Ty6 → Set)
(vz : ∀{Γ A} → Var6 (snoc6 Γ A) A)
(vs : ∀{Γ B A} → Var6 Γ A → Var6 (snoc6 Γ B) A)
→ Var6 Γ A
vz6 : ∀{Γ A} → Var6 (snoc6 Γ A) A; vz6
= λ Var6 vz6 vs → vz6
vs6 : ∀{Γ B A} → Var6 Γ A → Var6 (snoc6 Γ B) A; vs6
= λ x Var6 vz6 vs6 → vs6 (x Var6 vz6 vs6)
Tm6 : Con6 → Ty6 → Set; Tm6
= λ Γ A →
(Tm6 : Con6 → Ty6 → Set)
(var : ∀{Γ A} → Var6 Γ A → Tm6 Γ A)
(lam : ∀{Γ A B} → Tm6 (snoc6 Γ A) B → Tm6 Γ (arr6 A B))
(app : ∀{Γ A B} → Tm6 Γ (arr6 A B) → Tm6 Γ A → Tm6 Γ B)
(tt : ∀{Γ} → Tm6 Γ top6)
(pair : ∀{Γ A B} → Tm6 Γ A → Tm6 Γ B → Tm6 Γ (prod6 A B))
(fst : ∀{Γ A B} → Tm6 Γ (prod6 A B) → Tm6 Γ A)
(snd : ∀{Γ A B} → Tm6 Γ (prod6 A B) → Tm6 Γ B)
(left : ∀{Γ A B} → Tm6 Γ A → Tm6 Γ (sum6 A B))
(right : ∀{Γ A B} → Tm6 Γ B → Tm6 Γ (sum6 A B))
(case : ∀{Γ A B C} → Tm6 Γ (sum6 A B) → Tm6 Γ (arr6 A C) → Tm6 Γ (arr6 B C) → Tm6 Γ C)
(zero : ∀{Γ} → Tm6 Γ nat6)
(suc : ∀{Γ} → Tm6 Γ nat6 → Tm6 Γ nat6)
(rec : ∀{Γ A} → Tm6 Γ nat6 → Tm6 Γ (arr6 nat6 (arr6 A A)) → Tm6 Γ A → Tm6 Γ A)
→ Tm6 Γ A
var6 : ∀{Γ A} → Var6 Γ A → Tm6 Γ A; var6
= λ x Tm6 var6 lam app tt pair fst snd left right case zero suc rec →
var6 x
lam6 : ∀{Γ A B} → Tm6 (snoc6 Γ A) B → Tm6 Γ (arr6 A B); lam6
= λ t Tm6 var6 lam6 app tt pair fst snd left right case zero suc rec →
lam6 (t Tm6 var6 lam6 app tt pair fst snd left right case zero suc rec)
app6 : ∀{Γ A B} → Tm6 Γ (arr6 A B) → Tm6 Γ A → Tm6 Γ B; app6
= λ t u Tm6 var6 lam6 app6 tt pair fst snd left right case zero suc rec →
app6 (t Tm6 var6 lam6 app6 tt pair fst snd left right case zero suc rec)
(u Tm6 var6 lam6 app6 tt pair fst snd left right case zero suc rec)
tt6 : ∀{Γ} → Tm6 Γ top6; tt6
= λ Tm6 var6 lam6 app6 tt6 pair fst snd left right case zero suc rec → tt6
pair6 : ∀{Γ A B} → Tm6 Γ A → Tm6 Γ B → Tm6 Γ (prod6 A B); pair6
= λ t u Tm6 var6 lam6 app6 tt6 pair6 fst snd left right case zero suc rec →
pair6 (t Tm6 var6 lam6 app6 tt6 pair6 fst snd left right case zero suc rec)
(u Tm6 var6 lam6 app6 tt6 pair6 fst snd left right case zero suc rec)
fst6 : ∀{Γ A B} → Tm6 Γ (prod6 A B) → Tm6 Γ A; fst6
= λ t Tm6 var6 lam6 app6 tt6 pair6 fst6 snd left right case zero suc rec →
fst6 (t Tm6 var6 lam6 app6 tt6 pair6 fst6 snd left right case zero suc rec)
snd6 : ∀{Γ A B} → Tm6 Γ (prod6 A B) → Tm6 Γ B; snd6
= λ t Tm6 var6 lam6 app6 tt6 pair6 fst6 snd6 left right case zero suc rec →
snd6 (t Tm6 var6 lam6 app6 tt6 pair6 fst6 snd6 left right case zero suc rec)
left6 : ∀{Γ A B} → Tm6 Γ A → Tm6 Γ (sum6 A B); left6
= λ t Tm6 var6 lam6 app6 tt6 pair6 fst6 snd6 left6 right case zero suc rec →
left6 (t Tm6 var6 lam6 app6 tt6 pair6 fst6 snd6 left6 right case zero suc rec)
right6 : ∀{Γ A B} → Tm6 Γ B → Tm6 Γ (sum6 A B); right6
= λ t Tm6 var6 lam6 app6 tt6 pair6 fst6 snd6 left6 right6 case zero suc rec →
right6 (t Tm6 var6 lam6 app6 tt6 pair6 fst6 snd6 left6 right6 case zero suc rec)
case6 : ∀{Γ A B C} → Tm6 Γ (sum6 A B) → Tm6 Γ (arr6 A C) → Tm6 Γ (arr6 B C) → Tm6 Γ C; case6
= λ t u v Tm6 var6 lam6 app6 tt6 pair6 fst6 snd6 left6 right6 case6 zero suc rec →
case6 (t Tm6 var6 lam6 app6 tt6 pair6 fst6 snd6 left6 right6 case6 zero suc rec)
(u Tm6 var6 lam6 app6 tt6 pair6 fst6 snd6 left6 right6 case6 zero suc rec)
(v Tm6 var6 lam6 app6 tt6 pair6 fst6 snd6 left6 right6 case6 zero suc rec)
zero6 : ∀{Γ} → Tm6 Γ nat6; zero6
= λ Tm6 var6 lam6 app6 tt6 pair6 fst6 snd6 left6 right6 case6 zero6 suc rec → zero6
suc6 : ∀{Γ} → Tm6 Γ nat6 → Tm6 Γ nat6; suc6
= λ t Tm6 var6 lam6 app6 tt6 pair6 fst6 snd6 left6 right6 case6 zero6 suc6 rec →
suc6 (t Tm6 var6 lam6 app6 tt6 pair6 fst6 snd6 left6 right6 case6 zero6 suc6 rec)
rec6 : ∀{Γ A} → Tm6 Γ nat6 → Tm6 Γ (arr6 nat6 (arr6 A A)) → Tm6 Γ A → Tm6 Γ A; rec6
= λ t u v Tm6 var6 lam6 app6 tt6 pair6 fst6 snd6 left6 right6 case6 zero6 suc6 rec6 →
rec6 (t Tm6 var6 lam6 app6 tt6 pair6 fst6 snd6 left6 right6 case6 zero6 suc6 rec6)
(u Tm6 var6 lam6 app6 tt6 pair6 fst6 snd6 left6 right6 case6 zero6 suc6 rec6)
(v Tm6 var6 lam6 app6 tt6 pair6 fst6 snd6 left6 right6 case6 zero6 suc6 rec6)
v06 : ∀{Γ A} → Tm6 (snoc6 Γ A) A; v06
= var6 vz6
v16 : ∀{Γ A B} → Tm6 (snoc6 (snoc6 Γ A) B) A; v16
= var6 (vs6 vz6)
v26 : ∀{Γ A B C} → Tm6 (snoc6 (snoc6 (snoc6 Γ A) B) C) A; v26
= var6 (vs6 (vs6 vz6))
v36 : ∀{Γ A B C D} → Tm6 (snoc6 (snoc6 (snoc6 (snoc6 Γ A) B) C) D) A; v36
= var6 (vs6 (vs6 (vs6 vz6)))
tbool6 : Ty6; tbool6
= sum6 top6 top6
true6 : ∀{Γ} → Tm6 Γ tbool6; true6
= left6 tt6
tfalse6 : ∀{Γ} → Tm6 Γ tbool6; tfalse6
= right6 tt6
ifthenelse6 : ∀{Γ A} → Tm6 Γ (arr6 tbool6 (arr6 A (arr6 A A))); ifthenelse6
= lam6 (lam6 (lam6 (case6 v26 (lam6 v26) (lam6 v16))))
times46 : ∀{Γ A} → Tm6 Γ (arr6 (arr6 A A) (arr6 A A)); times46
= lam6 (lam6 (app6 v16 (app6 v16 (app6 v16 (app6 v16 v06)))))
add6 : ∀{Γ} → Tm6 Γ (arr6 nat6 (arr6 nat6 nat6)); add6
= lam6 (rec6 v06
(lam6 (lam6 (lam6 (suc6 (app6 v16 v06)))))
(lam6 v06))
mul6 : ∀{Γ} → Tm6 Γ (arr6 nat6 (arr6 nat6 nat6)); mul6
= lam6 (rec6 v06
(lam6 (lam6 (lam6 (app6 (app6 add6 (app6 v16 v06)) v06))))
(lam6 zero6))
fact6 : ∀{Γ} → Tm6 Γ (arr6 nat6 nat6); fact6
= lam6 (rec6 v06 (lam6 (lam6 (app6 (app6 mul6 (suc6 v16)) v06)))
(suc6 zero6))
{-# OPTIONS --type-in-type #-}
Ty7 : Set
Ty7 =
(Ty7 : Set)
(nat top bot : Ty7)
(arr prod sum : Ty7 → Ty7 → Ty7)
→ Ty7
nat7 : Ty7; nat7 = λ _ nat7 _ _ _ _ _ → nat7
top7 : Ty7; top7 = λ _ _ top7 _ _ _ _ → top7
bot7 : Ty7; bot7 = λ _ _ _ bot7 _ _ _ → bot7
arr7 : Ty7 → Ty7 → Ty7; arr7
= λ A B Ty7 nat7 top7 bot7 arr7 prod sum →
arr7 (A Ty7 nat7 top7 bot7 arr7 prod sum) (B Ty7 nat7 top7 bot7 arr7 prod sum)
prod7 : Ty7 → Ty7 → Ty7; prod7
= λ A B Ty7 nat7 top7 bot7 arr7 prod7 sum →
prod7 (A Ty7 nat7 top7 bot7 arr7 prod7 sum) (B Ty7 nat7 top7 bot7 arr7 prod7 sum)
sum7 : Ty7 → Ty7 → Ty7; sum7
= λ A B Ty7 nat7 top7 bot7 arr7 prod7 sum7 →
sum7 (A Ty7 nat7 top7 bot7 arr7 prod7 sum7) (B Ty7 nat7 top7 bot7 arr7 prod7 sum7)
Con7 : Set; Con7
= (Con7 : Set)
(nil : Con7)
(snoc : Con7 → Ty7 → Con7)
→ Con7
nil7 : Con7; nil7
= λ Con7 nil7 snoc → nil7
snoc7 : Con7 → Ty7 → Con7; snoc7
= λ Γ A Con7 nil7 snoc7 → snoc7 (Γ Con7 nil7 snoc7) A
Var7 : Con7 → Ty7 → Set; Var7
= λ Γ A →
(Var7 : Con7 → Ty7 → Set)
(vz : ∀{Γ A} → Var7 (snoc7 Γ A) A)
(vs : ∀{Γ B A} → Var7 Γ A → Var7 (snoc7 Γ B) A)
→ Var7 Γ A
vz7 : ∀{Γ A} → Var7 (snoc7 Γ A) A; vz7
= λ Var7 vz7 vs → vz7
vs7 : ∀{Γ B A} → Var7 Γ A → Var7 (snoc7 Γ B) A; vs7
= λ x Var7 vz7 vs7 → vs7 (x Var7 vz7 vs7)
Tm7 : Con7 → Ty7 → Set; Tm7
= λ Γ A →
(Tm7 : Con7 → Ty7 → Set)
(var : ∀{Γ A} → Var7 Γ A → Tm7 Γ A)
(lam : ∀{Γ A B} → Tm7 (snoc7 Γ A) B → Tm7 Γ (arr7 A B))
(app : ∀{Γ A B} → Tm7 Γ (arr7 A B) → Tm7 Γ A → Tm7 Γ B)
(tt : ∀{Γ} → Tm7 Γ top7)
(pair : ∀{Γ A B} → Tm7 Γ A → Tm7 Γ B → Tm7 Γ (prod7 A B))
(fst : ∀{Γ A B} → Tm7 Γ (prod7 A B) → Tm7 Γ A)
(snd : ∀{Γ A B} → Tm7 Γ (prod7 A B) → Tm7 Γ B)
(left : ∀{Γ A B} → Tm7 Γ A → Tm7 Γ (sum7 A B))
(right : ∀{Γ A B} → Tm7 Γ B → Tm7 Γ (sum7 A B))
(case : ∀{Γ A B C} → Tm7 Γ (sum7 A B) → Tm7 Γ (arr7 A C) → Tm7 Γ (arr7 B C) → Tm7 Γ C)
(zero : ∀{Γ} → Tm7 Γ nat7)
(suc : ∀{Γ} → Tm7 Γ nat7 → Tm7 Γ nat7)
(rec : ∀{Γ A} → Tm7 Γ nat7 → Tm7 Γ (arr7 nat7 (arr7 A A)) → Tm7 Γ A → Tm7 Γ A)
→ Tm7 Γ A
var7 : ∀{Γ A} → Var7 Γ A → Tm7 Γ A; var7
= λ x Tm7 var7 lam app tt pair fst snd left right case zero suc rec →
var7 x
lam7 : ∀{Γ A B} → Tm7 (snoc7 Γ A) B → Tm7 Γ (arr7 A B); lam7
= λ t Tm7 var7 lam7 app tt pair fst snd left right case zero suc rec →
lam7 (t Tm7 var7 lam7 app tt pair fst snd left right case zero suc rec)
app7 : ∀{Γ A B} → Tm7 Γ (arr7 A B) → Tm7 Γ A → Tm7 Γ B; app7
= λ t u Tm7 var7 lam7 app7 tt pair fst snd left right case zero suc rec →
app7 (t Tm7 var7 lam7 app7 tt pair fst snd left right case zero suc rec)
(u Tm7 var7 lam7 app7 tt pair fst snd left right case zero suc rec)
tt7 : ∀{Γ} → Tm7 Γ top7; tt7
= λ Tm7 var7 lam7 app7 tt7 pair fst snd left right case zero suc rec → tt7
pair7 : ∀{Γ A B} → Tm7 Γ A → Tm7 Γ B → Tm7 Γ (prod7 A B); pair7
= λ t u Tm7 var7 lam7 app7 tt7 pair7 fst snd left right case zero suc rec →
pair7 (t Tm7 var7 lam7 app7 tt7 pair7 fst snd left right case zero suc rec)
(u Tm7 var7 lam7 app7 tt7 pair7 fst snd left right case zero suc rec)
fst7 : ∀{Γ A B} → Tm7 Γ (prod7 A B) → Tm7 Γ A; fst7
= λ t Tm7 var7 lam7 app7 tt7 pair7 fst7 snd left right case zero suc rec →
fst7 (t Tm7 var7 lam7 app7 tt7 pair7 fst7 snd left right case zero suc rec)
snd7 : ∀{Γ A B} → Tm7 Γ (prod7 A B) → Tm7 Γ B; snd7
= λ t Tm7 var7 lam7 app7 tt7 pair7 fst7 snd7 left right case zero suc rec →
snd7 (t Tm7 var7 lam7 app7 tt7 pair7 fst7 snd7 left right case zero suc rec)
left7 : ∀{Γ A B} → Tm7 Γ A → Tm7 Γ (sum7 A B); left7
= λ t Tm7 var7 lam7 app7 tt7 pair7 fst7 snd7 left7 right case zero suc rec →
left7 (t Tm7 var7 lam7 app7 tt7 pair7 fst7 snd7 left7 right case zero suc rec)
right7 : ∀{Γ A B} → Tm7 Γ B → Tm7 Γ (sum7 A B); right7
= λ t Tm7 var7 lam7 app7 tt7 pair7 fst7 snd7 left7 right7 case zero suc rec →
right7 (t Tm7 var7 lam7 app7 tt7 pair7 fst7 snd7 left7 right7 case zero suc rec)
case7 : ∀{Γ A B C} → Tm7 Γ (sum7 A B) → Tm7 Γ (arr7 A C) → Tm7 Γ (arr7 B C) → Tm7 Γ C; case7
= λ t u v Tm7 var7 lam7 app7 tt7 pair7 fst7 snd7 left7 right7 case7 zero suc rec →
case7 (t Tm7 var7 lam7 app7 tt7 pair7 fst7 snd7 left7 right7 case7 zero suc rec)
(u Tm7 var7 lam7 app7 tt7 pair7 fst7 snd7 left7 right7 case7 zero suc rec)
(v Tm7 var7 lam7 app7 tt7 pair7 fst7 snd7 left7 right7 case7 zero suc rec)
zero7 : ∀{Γ} → Tm7 Γ nat7; zero7
= λ Tm7 var7 lam7 app7 tt7 pair7 fst7 snd7 left7 right7 case7 zero7 suc rec → zero7
suc7 : ∀{Γ} → Tm7 Γ nat7 → Tm7 Γ nat7; suc7
= λ t Tm7 var7 lam7 app7 tt7 pair7 fst7 snd7 left7 right7 case7 zero7 suc7 rec →
suc7 (t Tm7 var7 lam7 app7 tt7 pair7 fst7 snd7 left7 right7 case7 zero7 suc7 rec)
rec7 : ∀{Γ A} → Tm7 Γ nat7 → Tm7 Γ (arr7 nat7 (arr7 A A)) → Tm7 Γ A → Tm7 Γ A; rec7
= λ t u v Tm7 var7 lam7 app7 tt7 pair7 fst7 snd7 left7 right7 case7 zero7 suc7 rec7 →
rec7 (t Tm7 var7 lam7 app7 tt7 pair7 fst7 snd7 left7 right7 case7 zero7 suc7 rec7)
(u Tm7 var7 lam7 app7 tt7 pair7 fst7 snd7 left7 right7 case7 zero7 suc7 rec7)
(v Tm7 var7 lam7 app7 tt7 pair7 fst7 snd7 left7 right7 case7 zero7 suc7 rec7)
v07 : ∀{Γ A} → Tm7 (snoc7 Γ A) A; v07
= var7 vz7
v17 : ∀{Γ A B} → Tm7 (snoc7 (snoc7 Γ A) B) A; v17
= var7 (vs7 vz7)
v27 : ∀{Γ A B C} → Tm7 (snoc7 (snoc7 (snoc7 Γ A) B) C) A; v27
= var7 (vs7 (vs7 vz7))
v37 : ∀{Γ A B C D} → Tm7 (snoc7 (snoc7 (snoc7 (snoc7 Γ A) B) C) D) A; v37
= var7 (vs7 (vs7 (vs7 vz7)))
tbool7 : Ty7; tbool7
= sum7 top7 top7
true7 : ∀{Γ} → Tm7 Γ tbool7; true7
= left7 tt7
tfalse7 : ∀{Γ} → Tm7 Γ tbool7; tfalse7
= right7 tt7
ifthenelse7 : ∀{Γ A} → Tm7 Γ (arr7 tbool7 (arr7 A (arr7 A A))); ifthenelse7
= lam7 (lam7 (lam7 (case7 v27 (lam7 v27) (lam7 v17))))
times47 : ∀{Γ A} → Tm7 Γ (arr7 (arr7 A A) (arr7 A A)); times47
= lam7 (lam7 (app7 v17 (app7 v17 (app7 v17 (app7 v17 v07)))))
add7 : ∀{Γ} → Tm7 Γ (arr7 nat7 (arr7 nat7 nat7)); add7
= lam7 (rec7 v07
(lam7 (lam7 (lam7 (suc7 (app7 v17 v07)))))
(lam7 v07))
mul7 : ∀{Γ} → Tm7 Γ (arr7 nat7 (arr7 nat7 nat7)); mul7
= lam7 (rec7 v07
(lam7 (lam7 (lam7 (app7 (app7 add7 (app7 v17 v07)) v07))))
(lam7 zero7))
fact7 : ∀{Γ} → Tm7 Γ (arr7 nat7 nat7); fact7
= lam7 (rec7 v07 (lam7 (lam7 (app7 (app7 mul7 (suc7 v17)) v07)))
(suc7 zero7))
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj18eqsynthconj4 : forall (lv0 : natural) (lv1 : natural), (@eq natural (mult lv0 lv1) (mult lv1 (plus Zero lv0))).
Admitted.
QuickChick conj18eqsynthconj4.
|
module Test.Int64
import Data.Prim.Int64
import Data.SOP
import Hedgehog
import Test.RingLaws
allInt64 : Gen Int64
allInt64 = int64 (linear (-0x8000000000000000) 0xffffffffffffffff)
prop_ltMax : Property
prop_ltMax = property $ do
b8 <- forAll allInt64
(b8 <= MaxInt64) === True
prop_ltMin : Property
prop_ltMin = property $ do
b8 <- forAll allInt64
(b8 >= MinInt64) === True
prop_comp : Property
prop_comp = property $ do
[m,n] <- forAll $ np [allInt64, allInt64]
toOrdering (comp m n) === compare m n
export
props : Group
props = MkGroup "Int64" $
[ ("prop_ltMax", prop_ltMax)
, ("prop_ltMin", prop_ltMin)
, ("prop_comp", prop_comp)
] ++ ringProps allInt64
|
{-# OPTIONS --safe --experimental-lossy-unification #-}
module Cubical.Algebra.CommAlgebra.Properties where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.HLevels
open import Cubical.Foundations.Isomorphism
open import Cubical.Foundations.Equiv.HalfAdjoint
open import Cubical.Foundations.SIP
open import Cubical.Data.Sigma
open import Cubical.Reflection.StrictEquiv
open import Cubical.Structures.Axioms
open import Cubical.Algebra.Semigroup
open import Cubical.Algebra.Monoid
open import Cubical.Algebra.CommRing
open import Cubical.Algebra.Ring
open import Cubical.Algebra.Algebra
open import Cubical.Algebra.CommAlgebra.Base
private
variable
ℓ ℓ′ : Level
-- An R-algebra is the same as a CommRing A with a CommRingHom φ : R → A
module CommAlgChar (R : CommRing ℓ) where
open Iso
open IsRingHom
open CommRingTheory
CommRingWithHom : Type (ℓ-suc ℓ)
CommRingWithHom = Σ[ A ∈ CommRing ℓ ] CommRingHom R A
toCommAlg : CommRingWithHom → CommAlgebra R ℓ
toCommAlg (A , φ , φIsHom) = ⟨ A ⟩ , ACommAlgStr
where
open CommRingStr (snd A)
ACommAlgStr : CommAlgebraStr R ⟨ A ⟩
CommAlgebraStr.0a ACommAlgStr = 0r
CommAlgebraStr.1a ACommAlgStr = 1r
CommAlgebraStr._+_ ACommAlgStr = _+_
CommAlgebraStr._·_ ACommAlgStr = _·_
CommAlgebraStr.- ACommAlgStr = -_
CommAlgebraStr._⋆_ ACommAlgStr r a = (φ r) · a
CommAlgebraStr.isCommAlgebra ACommAlgStr = makeIsCommAlgebra
is-set +Assoc +Rid +Rinv +Comm ·Assoc ·Lid ·Ldist+ ·Comm
(λ _ _ x → cong (λ y → y · x) (pres· φIsHom _ _) ∙ sym (·Assoc _ _ _))
(λ _ _ x → cong (λ y → y · x) (pres+ φIsHom _ _) ∙ ·Ldist+ _ _ _)
(λ _ _ _ → ·Rdist+ _ _ _)
(λ x → cong (λ y → y · x) (pres1 φIsHom) ∙ ·Lid x)
(λ _ _ _ → sym (·Assoc _ _ _))
fromCommAlg : CommAlgebra R ℓ → CommRingWithHom
fromCommAlg A = (CommAlgebra→CommRing A) , φ , φIsHom
where
open CommRingStr (snd R) renaming (_·_ to _·r_) hiding (·Lid)
open CommAlgebraStr (snd A)
open AlgebraTheory (CommRing→Ring R) (CommAlgebra→Algebra A)
φ : ⟨ R ⟩ → ⟨ A ⟩
φ r = r ⋆ 1a
φIsHom : IsRingHom (CommRing→Ring R .snd) φ (CommRing→Ring (CommAlgebra→CommRing A) .snd)
φIsHom = makeIsRingHom (⋆-lid _) (λ _ _ → ⋆-ldist _ _ _)
λ x y → cong (λ a → (x ·r y) ⋆ a) (sym (·Lid _)) ∙ ⋆Dist· _ _ _ _
CommRingWithHomRoundTrip : (Aφ : CommRingWithHom) → fromCommAlg (toCommAlg Aφ) ≡ Aφ
CommRingWithHomRoundTrip (A , φ) = ΣPathP (APath , φPathP)
where
open CommRingStr
-- note that the proofs of the axioms might differ!
APath : fst (fromCommAlg (toCommAlg (A , φ))) ≡ A
fst (APath i) = ⟨ A ⟩
0r (snd (APath i)) = 0r (snd A)
1r (snd (APath i)) = 1r (snd A)
_+_ (snd (APath i)) = _+_ (snd A)
_·_ (snd (APath i)) = _·_ (snd A)
-_ (snd (APath i)) = -_ (snd A)
isCommRing (snd (APath i)) = isProp→PathP (λ i → isPropIsCommRing _ _ _ _ _ )
(isCommRing (snd (fst (fromCommAlg (toCommAlg (A , φ)))))) (isCommRing (snd A)) i
-- this only works because fst (APath i) = fst A definitionally!
φPathP : PathP (λ i → CommRingHom R (APath i)) (snd (fromCommAlg (toCommAlg (A , φ)))) φ
φPathP = RingHomPathP _ _ _ _ _ _ λ i x → ·Rid (snd A) (fst φ x) i
CommAlgRoundTrip : (A : CommAlgebra R ℓ) → toCommAlg (fromCommAlg A) ≡ A
CommAlgRoundTrip A = ΣPathP (refl , AlgStrPathP)
where
open CommAlgebraStr ⦃...⦄
instance
_ = snd A
AlgStrPathP : PathP (λ i → CommAlgebraStr R ⟨ A ⟩) (snd (toCommAlg (fromCommAlg A))) (snd A)
CommAlgebraStr.0a (AlgStrPathP i) = 0a
CommAlgebraStr.1a (AlgStrPathP i) = 1a
CommAlgebraStr._+_ (AlgStrPathP i) = _+_
CommAlgebraStr._·_ (AlgStrPathP i) = _·_
CommAlgebraStr.-_ (AlgStrPathP i) = -_
CommAlgebraStr._⋆_ (AlgStrPathP i) r x = (⋆-lassoc r 1a x ∙ cong (r ⋆_) (·Lid x)) i
CommAlgebraStr.isCommAlgebra (AlgStrPathP i) = isProp→PathP
(λ i → isPropIsCommAlgebra _ _ _ _ _ _ (CommAlgebraStr._⋆_ (AlgStrPathP i)))
(CommAlgebraStr.isCommAlgebra (snd (toCommAlg (fromCommAlg A)))) isCommAlgebra i
CommAlgIso : Iso (CommAlgebra R ℓ) CommRingWithHom
fun CommAlgIso = fromCommAlg
inv CommAlgIso = toCommAlg
rightInv CommAlgIso = CommRingWithHomRoundTrip
leftInv CommAlgIso = CommAlgRoundTrip
|
#include <gsl/span>
namespace engine
{
template <class ElementType, std::ptrdiff_t Extent = gsl::dynamic_extent>
using span = gsl::span<ElementType, Extent>;
}
|
lemma convex_linear_image_eq [simp]: fixes f :: "'a::real_vector \<Rightarrow> 'b::real_vector" shows "\<lbrakk>linear f; inj f\<rbrakk> \<Longrightarrow> convex (f ` s) \<longleftrightarrow> convex s" |
/-
Copyright (c) 2016 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Leonardo de Moura, Mario Carneiro, Johannes Hölzl
-/
import algebra.abs
import algebra.order.group.order_iso
import order.min_max
/-!
# Absolute values in ordered groups.
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
-/
variables {α : Type*}
open function
section covariant_add_le
section has_neg
/-- `abs a` is the absolute value of `a`. -/
@[to_additive "`abs a` is the absolute value of `a`",
priority 100] -- see Note [lower instance priority]
instance has_inv.to_has_abs [has_inv α] [has_sup α] : has_abs α := ⟨λ a, a ⊔ a⁻¹⟩
@[to_additive] lemma abs_eq_sup_inv [has_inv α] [has_sup α] (a : α) : |a| = a ⊔ a⁻¹ := rfl
variables [has_neg α] [linear_order α] {a b: α}
lemma abs_eq_max_neg : abs a = max a (-a) :=
rfl
lemma abs_choice (x : α) : |x| = x ∨ |x| = -x := max_choice _ _
lemma abs_le' : |a| ≤ b ↔ a ≤ b ∧ -a ≤ b := max_le_iff
lemma le_abs : a ≤ |b| ↔ a ≤ b ∨ a ≤ -b := le_max_iff
lemma le_abs_self (a : α) : a ≤ |a| := le_max_left _ _
lemma neg_le_abs_self (a : α) : -a ≤ |a| := le_max_right _ _
lemma lt_abs : a < |b| ↔ a < b ∨ a < -b := lt_max_iff
theorem abs_le_abs (h₀ : a ≤ b) (h₁ : -a ≤ b) : |a| ≤ |b| :=
(abs_le'.2 ⟨h₀, h₁⟩).trans (le_abs_self b)
lemma abs_by_cases (P : α → Prop) {a : α} (h1 : P a) (h2 : P (-a)) : P (|a|) :=
sup_ind _ _ h1 h2
end has_neg
section add_group
variables [add_group α] [linear_order α]
@[simp] lemma abs_neg (a : α) : | -a| = |a| :=
begin
rw [abs_eq_max_neg, max_comm, neg_neg, abs_eq_max_neg]
end
lemma eq_or_eq_neg_of_abs_eq {a b : α} (h : |a| = b) : a = b ∨ a = -b :=
by simpa only [← h, eq_comm, neg_eq_iff_eq_neg] using abs_choice a
lemma abs_eq_abs {a b : α} : |a| = |b| ↔ a = b ∨ a = -b :=
begin
refine ⟨λ h, _, λ h, _⟩,
{ obtain rfl | rfl := eq_or_eq_neg_of_abs_eq h;
simpa only [neg_eq_iff_eq_neg, neg_inj, or.comm] using abs_choice b },
{ cases h; simp only [h, abs_neg] },
end
lemma abs_sub_comm (a b : α) : |a - b| = |b - a| :=
calc |a - b| = | - (b - a)| : congr_arg _ (neg_sub b a).symm
... = |b - a| : abs_neg (b - a)
variables [covariant_class α α (+) (≤)] {a b c : α}
lemma abs_of_nonneg (h : 0 ≤ a) : |a| = a :=
max_eq_left $ (neg_nonpos.2 h).trans h
lemma abs_of_pos (h : 0 < a) : |a| = a :=
abs_of_nonneg h.le
lemma abs_of_nonpos (h : a ≤ 0) : |a| = -a :=
max_eq_right $ h.trans (neg_nonneg.2 h)
lemma abs_of_neg (h : a < 0) : |a| = -a :=
abs_of_nonpos h.le
lemma abs_le_abs_of_nonneg (ha : 0 ≤ a) (hab : a ≤ b) : |a| ≤ |b| :=
by rwa [abs_of_nonneg ha, abs_of_nonneg (ha.trans hab)]
@[simp] lemma abs_zero : |0| = (0:α) :=
abs_of_nonneg le_rfl
@[simp] lemma abs_pos : 0 < |a| ↔ a ≠ 0 :=
begin
rcases lt_trichotomy a 0 with (ha|rfl|ha),
{ simp [abs_of_neg ha, neg_pos, ha.ne, ha] },
{ simp },
{ simp [abs_of_pos ha, ha, ha.ne.symm] }
end
lemma abs_pos_of_pos (h : 0 < a) : 0 < |a| := abs_pos.2 h.ne.symm
lemma abs_pos_of_neg (h : a < 0) : 0 < |a| := abs_pos.2 h.ne
lemma neg_abs_le_self (a : α) : -|a| ≤ a :=
begin
cases le_total 0 a with h h,
{ calc -|a| = - a : congr_arg (has_neg.neg) (abs_of_nonneg h)
... ≤ 0 : neg_nonpos.mpr h
... ≤ a : h },
{ calc -|a| = - - a : congr_arg (has_neg.neg) (abs_of_nonpos h)
... ≤ a : (neg_neg a).le }
end
lemma add_abs_nonneg (a : α) : 0 ≤ a + |a| :=
begin
rw ←add_right_neg a,
apply add_le_add_left,
exact (neg_le_abs_self a),
end
lemma neg_abs_le_neg (a : α) : -|a| ≤ -a :=
by simpa using neg_abs_le_self (-a)
@[simp] lemma abs_nonneg (a : α) : 0 ≤ |a| :=
(le_total 0 a).elim (λ h, h.trans (le_abs_self a)) (λ h, (neg_nonneg.2 h).trans $ neg_le_abs_self a)
@[simp] lemma abs_abs (a : α) : | |a| | = |a| :=
abs_of_nonneg $ abs_nonneg a
@[simp] lemma abs_eq_zero : |a| = 0 ↔ a = 0 :=
decidable.not_iff_not.1 $ ne_comm.trans $ (abs_nonneg a).lt_iff_ne.symm.trans abs_pos
@[simp] lemma abs_nonpos_iff {a : α} : |a| ≤ 0 ↔ a = 0 :=
(abs_nonneg a).le_iff_eq.trans abs_eq_zero
variable [covariant_class α α (swap (+)) (≤)]
lemma abs_le_abs_of_nonpos (ha : a ≤ 0) (hab : b ≤ a) : |a| ≤ |b| :=
by { rw [abs_of_nonpos ha, abs_of_nonpos (hab.trans ha)], exact neg_le_neg_iff.mpr hab }
lemma abs_lt : |a| < b ↔ - b < a ∧ a < b :=
max_lt_iff.trans $ and.comm.trans $ by rw [neg_lt]
lemma neg_lt_of_abs_lt (h : |a| < b) : -b < a := (abs_lt.mp h).1
lemma lt_of_abs_lt (h : |a| < b) : a < b := (abs_lt.mp h).2
lemma max_sub_min_eq_abs' (a b : α) : max a b - min a b = |a - b| :=
begin
cases le_total a b with ab ba,
{ rw [max_eq_right ab, min_eq_left ab, abs_of_nonpos, neg_sub], rwa sub_nonpos },
{ rw [max_eq_left ba, min_eq_right ba, abs_of_nonneg], rwa sub_nonneg }
end
lemma max_sub_min_eq_abs (a b : α) : max a b - min a b = |b - a| :=
by { rw abs_sub_comm, exact max_sub_min_eq_abs' _ _ }
end add_group
end covariant_add_le
section linear_ordered_add_comm_group
variables [linear_ordered_add_comm_group α] {a b c d : α}
lemma abs_le : |a| ≤ b ↔ - b ≤ a ∧ a ≤ b := by rw [abs_le', and.comm, neg_le]
lemma le_abs' : a ≤ |b| ↔ b ≤ -a ∨ a ≤ b := by rw [le_abs, or.comm, le_neg]
lemma neg_le_of_abs_le (h : |a| ≤ b) : -b ≤ a := (abs_le.mp h).1
lemma le_of_abs_le (h : |a| ≤ b) : a ≤ b := (abs_le.mp h).2
@[to_additive] lemma apply_abs_le_mul_of_one_le' {β : Type*} [mul_one_class β] [preorder β]
[covariant_class β β (*) (≤)] [covariant_class β β (swap (*)) (≤)] {f : α → β} {a : α}
(h₁ : 1 ≤ f a) (h₂ : 1 ≤ f (-a)) :
f (|a|) ≤ f a * f (-a) :=
(le_total a 0).by_cases (λ ha, (abs_of_nonpos ha).symm ▸ le_mul_of_one_le_left' h₁)
(λ ha, (abs_of_nonneg ha).symm ▸ le_mul_of_one_le_right' h₂)
@[to_additive] lemma apply_abs_le_mul_of_one_le {β : Type*} [mul_one_class β] [preorder β]
[covariant_class β β (*) (≤)] [covariant_class β β (swap (*)) (≤)] {f : α → β}
(h : ∀ x, 1 ≤ f x) (a : α) :
f (|a|) ≤ f a * f (-a) :=
apply_abs_le_mul_of_one_le' (h _) (h _)
/--
The **triangle inequality** in `linear_ordered_add_comm_group`s.
-/
lemma abs_add (a b : α) : |a + b| ≤ |a| + |b| :=
abs_le.2 ⟨(neg_add (|a|) (|b|)).symm ▸
add_le_add (neg_le.2 $ neg_le_abs_self _) (neg_le.2 $ neg_le_abs_self _),
add_le_add (le_abs_self _) (le_abs_self _)⟩
lemma abs_add' (a b : α) : |a| ≤ |b| + |b + a| :=
by simpa using abs_add (-b) (b + a)
theorem abs_sub (a b : α) :
|a - b| ≤ |a| + |b| :=
by { rw [sub_eq_add_neg, ←abs_neg b], exact abs_add a _ }
lemma abs_sub_le_iff : |a - b| ≤ c ↔ a - b ≤ c ∧ b - a ≤ c :=
by rw [abs_le, neg_le_sub_iff_le_add, sub_le_iff_le_add', and_comm, sub_le_iff_le_add']
lemma abs_sub_lt_iff : |a - b| < c ↔ a - b < c ∧ b - a < c :=
by rw [abs_lt, neg_lt_sub_iff_lt_add', sub_lt_iff_lt_add', and_comm, sub_lt_iff_lt_add']
lemma sub_le_of_abs_sub_le_left (h : |a - b| ≤ c) : b - c ≤ a :=
sub_le_comm.1 $ (abs_sub_le_iff.1 h).2
lemma sub_le_of_abs_sub_le_right (h : |a - b| ≤ c) : a - c ≤ b :=
sub_le_of_abs_sub_le_left (abs_sub_comm a b ▸ h)
lemma sub_lt_of_abs_sub_lt_left (h : |a - b| < c) : b - c < a :=
sub_lt_comm.1 $ (abs_sub_lt_iff.1 h).2
lemma sub_lt_of_abs_sub_lt_right (h : |a - b| < c) : a - c < b :=
sub_lt_of_abs_sub_lt_left (abs_sub_comm a b ▸ h)
lemma abs_sub_abs_le_abs_sub (a b : α) : |a| - |b| ≤ |a - b| :=
sub_le_iff_le_add.2 $
calc |a| = |a - b + b| : by rw [sub_add_cancel]
... ≤ |a - b| + |b| : abs_add _ _
lemma abs_abs_sub_abs_le_abs_sub (a b : α) : | |a| - |b| | ≤ |a - b| :=
abs_sub_le_iff.2 ⟨abs_sub_abs_le_abs_sub _ _, by rw abs_sub_comm; apply abs_sub_abs_le_abs_sub⟩
lemma abs_eq (hb : 0 ≤ b) : |a| = b ↔ a = b ∨ a = -b :=
begin
refine ⟨eq_or_eq_neg_of_abs_eq, _⟩,
rintro (rfl|rfl); simp only [abs_neg, abs_of_nonneg hb]
end
lemma abs_le_max_abs_abs (hab : a ≤ b) (hbc : b ≤ c) : |b| ≤ max (|a|) (|c|) :=
abs_le'.2
⟨by simp [hbc.trans (le_abs_self c)],
by simp [(neg_le_neg_iff.mpr hab).trans (neg_le_abs_self a)]⟩
lemma min_abs_abs_le_abs_max : min (|a|) (|b|) ≤ |max a b| :=
(le_total a b).elim
(λ h, (min_le_right _ _).trans_eq $ congr_arg _ (max_eq_right h).symm)
(λ h, (min_le_left _ _).trans_eq $ congr_arg _ (max_eq_left h).symm)
lemma min_abs_abs_le_abs_min : min (|a|) (|b|) ≤ |min a b| :=
(le_total a b).elim
(λ h, (min_le_left _ _).trans_eq $ congr_arg _ (min_eq_left h).symm)
(λ h, (min_le_right _ _).trans_eq $ congr_arg _ (min_eq_right h).symm)
lemma abs_max_le_max_abs_abs : |max a b| ≤ max (|a|) (|b|) :=
(le_total a b).elim
(λ h, (congr_arg _ $ max_eq_right h).trans_le $ le_max_right _ _)
(λ h, (congr_arg _ $ max_eq_left h).trans_le $ le_max_left _ _)
lemma abs_min_le_max_abs_abs : |min a b| ≤ max (|a|) (|b|) :=
(le_total a b).elim
(λ h, (congr_arg _ $ min_eq_left h).trans_le $ le_max_left _ _)
(λ h, (congr_arg _ $ min_eq_right h).trans_le $ le_max_right _ _)
lemma eq_of_abs_sub_eq_zero {a b : α} (h : |a - b| = 0) : a = b :=
sub_eq_zero.1 $ abs_eq_zero.1 h
lemma abs_sub_le (a b c : α) : |a - c| ≤ |a - b| + |b - c| :=
calc
|a - c| = |a - b + (b - c)| : by rw [sub_add_sub_cancel]
... ≤ |a - b| + |b - c| : abs_add _ _
lemma abs_add_three (a b c : α) : |a + b + c| ≤ |a| + |b| + |c| :=
(abs_add _ _).trans (add_le_add_right (abs_add _ _) _)
lemma dist_bdd_within_interval {a b lb ub : α} (hal : lb ≤ a) (hau : a ≤ ub)
(hbl : lb ≤ b) (hbu : b ≤ ub) : |a - b| ≤ ub - lb :=
abs_sub_le_iff.2 ⟨sub_le_sub hau hbl, sub_le_sub hbu hal⟩
lemma eq_of_abs_sub_nonpos (h : |a - b| ≤ 0) : a = b :=
eq_of_abs_sub_eq_zero (le_antisymm h (abs_nonneg (a - b)))
end linear_ordered_add_comm_group
|
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
c : ℂ
hd : ∀ᶠ (z : ℂ) in 𝓝[{c}ᶜ] c, DifferentiableAt ℂ f z
hc : ContinuousAt f c
⊢ AnalyticAt ℂ f c
[PROOFSTEP]
rcases(nhdsWithin_hasBasis nhds_basis_closedBall _).mem_iff.1 hd with ⟨R, hR0, hRs⟩
[GOAL]
case intro.intro
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
c : ℂ
hd : ∀ᶠ (z : ℂ) in 𝓝[{c}ᶜ] c, DifferentiableAt ℂ f z
hc : ContinuousAt f c
R : ℝ
hR0 : 0 < R
hRs : closedBall c R ∩ {c}ᶜ ⊆ {x | (fun z => DifferentiableAt ℂ f z) x}
⊢ AnalyticAt ℂ f c
[PROOFSTEP]
lift R to ℝ≥0 using hR0.le
[GOAL]
case intro.intro.intro
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
c : ℂ
hd : ∀ᶠ (z : ℂ) in 𝓝[{c}ᶜ] c, DifferentiableAt ℂ f z
hc : ContinuousAt f c
R : ℝ≥0
hR0 : 0 < ↑R
hRs : closedBall c ↑R ∩ {c}ᶜ ⊆ {x | (fun z => DifferentiableAt ℂ f z) x}
⊢ AnalyticAt ℂ f c
[PROOFSTEP]
replace hc : ContinuousOn f (closedBall c R)
[GOAL]
case hc
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
c : ℂ
hd : ∀ᶠ (z : ℂ) in 𝓝[{c}ᶜ] c, DifferentiableAt ℂ f z
hc : ContinuousAt f c
R : ℝ≥0
hR0 : 0 < ↑R
hRs : closedBall c ↑R ∩ {c}ᶜ ⊆ {x | (fun z => DifferentiableAt ℂ f z) x}
⊢ ContinuousOn f (closedBall c ↑R)
[PROOFSTEP]
refine' fun z hz => ContinuousAt.continuousWithinAt _
[GOAL]
case hc
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
c : ℂ
hd : ∀ᶠ (z : ℂ) in 𝓝[{c}ᶜ] c, DifferentiableAt ℂ f z
hc : ContinuousAt f c
R : ℝ≥0
hR0 : 0 < ↑R
hRs : closedBall c ↑R ∩ {c}ᶜ ⊆ {x | (fun z => DifferentiableAt ℂ f z) x}
z : ℂ
hz : z ∈ closedBall c ↑R
⊢ ContinuousAt f z
[PROOFSTEP]
rcases eq_or_ne z c with (rfl | hne)
[GOAL]
case hc.inl
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
R : ℝ≥0
hR0 : 0 < ↑R
z : ℂ
hd : ∀ᶠ (z : ℂ) in 𝓝[{z}ᶜ] z, DifferentiableAt ℂ f z
hc : ContinuousAt f z
hRs : closedBall z ↑R ∩ {z}ᶜ ⊆ {x | (fun z => DifferentiableAt ℂ f z) x}
hz : z ∈ closedBall z ↑R
⊢ ContinuousAt f z
case hc.inr
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
c : ℂ
hd : ∀ᶠ (z : ℂ) in 𝓝[{c}ᶜ] c, DifferentiableAt ℂ f z
hc : ContinuousAt f c
R : ℝ≥0
hR0 : 0 < ↑R
hRs : closedBall c ↑R ∩ {c}ᶜ ⊆ {x | (fun z => DifferentiableAt ℂ f z) x}
z : ℂ
hz : z ∈ closedBall c ↑R
hne : z ≠ c
⊢ ContinuousAt f z
[PROOFSTEP]
exacts [hc, (hRs ⟨hz, hne⟩).continuousAt]
[GOAL]
case intro.intro.intro
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
c : ℂ
hd : ∀ᶠ (z : ℂ) in 𝓝[{c}ᶜ] c, DifferentiableAt ℂ f z
R : ℝ≥0
hR0 : 0 < ↑R
hRs : closedBall c ↑R ∩ {c}ᶜ ⊆ {x | (fun z => DifferentiableAt ℂ f z) x}
hc : ContinuousOn f (closedBall c ↑R)
⊢ AnalyticAt ℂ f c
[PROOFSTEP]
exact
(hasFPowerSeriesOnBall_of_differentiable_off_countable (countable_singleton c) hc
(fun z hz => hRs (diff_subset_diff_left ball_subset_closedBall hz)) hR0).analyticAt
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
s : Set ℂ
c : ℂ
hs : s ∈ 𝓝 c
⊢ DifferentiableOn ℂ f (s \ {c}) ∧ ContinuousAt f c ↔ DifferentiableOn ℂ f s
[PROOFSTEP]
refine' ⟨_, fun hd => ⟨hd.mono (diff_subset _ _), (hd.differentiableAt hs).continuousAt⟩⟩
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
s : Set ℂ
c : ℂ
hs : s ∈ 𝓝 c
⊢ DifferentiableOn ℂ f (s \ {c}) ∧ ContinuousAt f c → DifferentiableOn ℂ f s
[PROOFSTEP]
rintro ⟨hd, hc⟩ x hx
[GOAL]
case intro
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
s : Set ℂ
c : ℂ
hs : s ∈ 𝓝 c
hd : DifferentiableOn ℂ f (s \ {c})
hc : ContinuousAt f c
x : ℂ
hx : x ∈ s
⊢ DifferentiableWithinAt ℂ f s x
[PROOFSTEP]
rcases eq_or_ne x c with (rfl | hne)
[GOAL]
case intro.inl
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
s : Set ℂ
x : ℂ
hx : x ∈ s
hs : s ∈ 𝓝 x
hd : DifferentiableOn ℂ f (s \ {x})
hc : ContinuousAt f x
⊢ DifferentiableWithinAt ℂ f s x
[PROOFSTEP]
refine' (analyticAt_of_differentiable_on_punctured_nhds_of_continuousAt _ hc).differentiableAt.differentiableWithinAt
[GOAL]
case intro.inl
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
s : Set ℂ
x : ℂ
hx : x ∈ s
hs : s ∈ 𝓝 x
hd : DifferentiableOn ℂ f (s \ {x})
hc : ContinuousAt f x
⊢ ∀ᶠ (z : ℂ) in 𝓝[{x}ᶜ] x, DifferentiableAt ℂ f z
[PROOFSTEP]
refine' eventually_nhdsWithin_iff.2 ((eventually_mem_nhds.2 hs).mono fun z hz hzx => _)
[GOAL]
case intro.inl
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
s : Set ℂ
x : ℂ
hx : x ∈ s
hs : s ∈ 𝓝 x
hd : DifferentiableOn ℂ f (s \ {x})
hc : ContinuousAt f x
z : ℂ
hz : s ∈ 𝓝 z
hzx : z ∈ {x}ᶜ
⊢ DifferentiableAt ℂ f z
[PROOFSTEP]
exact hd.differentiableAt (inter_mem hz (isOpen_ne.mem_nhds hzx))
[GOAL]
case intro.inr
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
s : Set ℂ
c : ℂ
hs : s ∈ 𝓝 c
hd : DifferentiableOn ℂ f (s \ {c})
hc : ContinuousAt f c
x : ℂ
hx : x ∈ s
hne : x ≠ c
⊢ DifferentiableWithinAt ℂ f s x
[PROOFSTEP]
simpa only [DifferentiableWithinAt, HasFDerivWithinAt, hne.nhdsWithin_diff_singleton] using hd x ⟨hx, hne⟩
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
s : Set ℂ
c : ℂ
hc : s ∈ 𝓝 c
hd : DifferentiableOn ℂ f (s \ {c})
ho : (fun z => f z - f c) =o[𝓝[{c}ᶜ] c] fun z => (z - c)⁻¹
⊢ DifferentiableOn ℂ (update f c (limUnder (𝓝[{c}ᶜ] c) f)) s
[PROOFSTEP]
set F : ℂ → E := fun z => (z - c) • f z
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
s : Set ℂ
c : ℂ
hc : s ∈ 𝓝 c
hd : DifferentiableOn ℂ f (s \ {c})
ho : (fun z => f z - f c) =o[𝓝[{c}ᶜ] c] fun z => (z - c)⁻¹
F : ℂ → E := fun z => (z - c) • f z
⊢ DifferentiableOn ℂ (update f c (limUnder (𝓝[{c}ᶜ] c) f)) s
[PROOFSTEP]
suffices DifferentiableOn ℂ F (s \ { c }) ∧ ContinuousAt F c
by
rw [differentiableOn_compl_singleton_and_continuousAt_iff hc, ← differentiableOn_dslope hc, dslope_sub_smul] at this
have hc : Tendsto f (𝓝[≠] c) (𝓝 (deriv F c)) := continuousAt_update_same.mp (this.continuousOn.continuousAt hc)
rwa [hc.limUnder_eq]
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
s : Set ℂ
c : ℂ
hc : s ∈ 𝓝 c
hd : DifferentiableOn ℂ f (s \ {c})
ho : (fun z => f z - f c) =o[𝓝[{c}ᶜ] c] fun z => (z - c)⁻¹
F : ℂ → E := fun z => (z - c) • f z
this : DifferentiableOn ℂ F (s \ {c}) ∧ ContinuousAt F c
⊢ DifferentiableOn ℂ (update f c (limUnder (𝓝[{c}ᶜ] c) f)) s
[PROOFSTEP]
rw [differentiableOn_compl_singleton_and_continuousAt_iff hc, ← differentiableOn_dslope hc, dslope_sub_smul] at this
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
s : Set ℂ
c : ℂ
hc : s ∈ 𝓝 c
hd : DifferentiableOn ℂ f (s \ {c})
ho : (fun z => f z - f c) =o[𝓝[{c}ᶜ] c] fun z => (z - c)⁻¹
F : ℂ → E := fun z => (z - c) • f z
this✝ : DifferentiableOn ℂ (dslope F c) s
this : DifferentiableOn ℂ (update (fun z => f z) c (deriv (fun x => (x - c) • f x) c)) s
⊢ DifferentiableOn ℂ (update f c (limUnder (𝓝[{c}ᶜ] c) f)) s
[PROOFSTEP]
have hc : Tendsto f (𝓝[≠] c) (𝓝 (deriv F c)) := continuousAt_update_same.mp (this.continuousOn.continuousAt hc)
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
s : Set ℂ
c : ℂ
hc✝ : s ∈ 𝓝 c
hd : DifferentiableOn ℂ f (s \ {c})
ho : (fun z => f z - f c) =o[𝓝[{c}ᶜ] c] fun z => (z - c)⁻¹
F : ℂ → E := fun z => (z - c) • f z
this✝ : DifferentiableOn ℂ (dslope F c) s
this : DifferentiableOn ℂ (update (fun z => f z) c (deriv (fun x => (x - c) • f x) c)) s
hc : Tendsto f (𝓝[{c}ᶜ] c) (𝓝 (deriv F c))
⊢ DifferentiableOn ℂ (update f c (limUnder (𝓝[{c}ᶜ] c) f)) s
[PROOFSTEP]
rwa [hc.limUnder_eq]
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
s : Set ℂ
c : ℂ
hc : s ∈ 𝓝 c
hd : DifferentiableOn ℂ f (s \ {c})
ho : (fun z => f z - f c) =o[𝓝[{c}ᶜ] c] fun z => (z - c)⁻¹
F : ℂ → E := fun z => (z - c) • f z
⊢ DifferentiableOn ℂ F (s \ {c}) ∧ ContinuousAt F c
[PROOFSTEP]
refine' ⟨(differentiableOn_id.sub_const _).smul hd, _⟩
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
s : Set ℂ
c : ℂ
hc : s ∈ 𝓝 c
hd : DifferentiableOn ℂ f (s \ {c})
ho : (fun z => f z - f c) =o[𝓝[{c}ᶜ] c] fun z => (z - c)⁻¹
F : ℂ → E := fun z => (z - c) • f z
⊢ ContinuousAt F c
[PROOFSTEP]
rw [← continuousWithinAt_compl_self]
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
s : Set ℂ
c : ℂ
hc : s ∈ 𝓝 c
hd : DifferentiableOn ℂ f (s \ {c})
ho : (fun z => f z - f c) =o[𝓝[{c}ᶜ] c] fun z => (z - c)⁻¹
F : ℂ → E := fun z => (z - c) • f z
⊢ ContinuousWithinAt F {c}ᶜ c
[PROOFSTEP]
have H := ho.tendsto_inv_smul_nhds_zero
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
s : Set ℂ
c : ℂ
hc : s ∈ 𝓝 c
hd : DifferentiableOn ℂ f (s \ {c})
ho : (fun z => f z - f c) =o[𝓝[{c}ᶜ] c] fun z => (z - c)⁻¹
F : ℂ → E := fun z => (z - c) • f z
H : Tendsto (fun x => (x - c)⁻¹⁻¹ • (f x - f c)) (𝓝[{c}ᶜ] c) (𝓝 0)
⊢ ContinuousWithinAt F {c}ᶜ c
[PROOFSTEP]
have H' : Tendsto (fun z => (z - c) • f c) (𝓝[≠] c) (𝓝 (F c)) :=
(continuousWithinAt_id.tendsto.sub tendsto_const_nhds).smul tendsto_const_nhds
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
s : Set ℂ
c : ℂ
hc : s ∈ 𝓝 c
hd : DifferentiableOn ℂ f (s \ {c})
ho : (fun z => f z - f c) =o[𝓝[{c}ᶜ] c] fun z => (z - c)⁻¹
F : ℂ → E := fun z => (z - c) • f z
H : Tendsto (fun x => (x - c)⁻¹⁻¹ • (f x - f c)) (𝓝[{c}ᶜ] c) (𝓝 0)
H' : Tendsto (fun z => (z - c) • f c) (𝓝[{c}ᶜ] c) (𝓝 (F c))
⊢ ContinuousWithinAt F {c}ᶜ c
[PROOFSTEP]
simpa [← smul_add, ContinuousWithinAt] using H.add H'
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
c : ℂ
hd : ∀ᶠ (z : ℂ) in 𝓝[{c}ᶜ] c, DifferentiableAt ℂ f z
ho : (fun z => f z - f c) =o[𝓝[{c}ᶜ] c] fun z => (z - c)⁻¹
⊢ Tendsto f (𝓝[{c}ᶜ] c) (𝓝 (limUnder (𝓝[{c}ᶜ] c) f))
[PROOFSTEP]
rw [eventually_nhdsWithin_iff] at hd
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
c : ℂ
hd : ∀ᶠ (x : ℂ) in 𝓝 c, x ∈ {c}ᶜ → DifferentiableAt ℂ f x
ho : (fun z => f z - f c) =o[𝓝[{c}ᶜ] c] fun z => (z - c)⁻¹
⊢ Tendsto f (𝓝[{c}ᶜ] c) (𝓝 (limUnder (𝓝[{c}ᶜ] c) f))
[PROOFSTEP]
have : DifferentiableOn ℂ f ({z | z ≠ c → DifferentiableAt ℂ f z} \ { c }) := fun z hz =>
(hz.1 hz.2).differentiableWithinAt
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
c : ℂ
hd : ∀ᶠ (x : ℂ) in 𝓝 c, x ∈ {c}ᶜ → DifferentiableAt ℂ f x
ho : (fun z => f z - f c) =o[𝓝[{c}ᶜ] c] fun z => (z - c)⁻¹
this : DifferentiableOn ℂ f ({z | z ≠ c → DifferentiableAt ℂ f z} \ {c})
⊢ Tendsto f (𝓝[{c}ᶜ] c) (𝓝 (limUnder (𝓝[{c}ᶜ] c) f))
[PROOFSTEP]
have H := differentiableOn_update_limUnder_of_isLittleO hd this ho
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
f : ℂ → E
c : ℂ
hd : ∀ᶠ (x : ℂ) in 𝓝 c, x ∈ {c}ᶜ → DifferentiableAt ℂ f x
ho : (fun z => f z - f c) =o[𝓝[{c}ᶜ] c] fun z => (z - c)⁻¹
this : DifferentiableOn ℂ f ({z | z ≠ c → DifferentiableAt ℂ f z} \ {c})
H : DifferentiableOn ℂ (update f c (limUnder (𝓝[{c}ᶜ] c) f)) {x | (fun x => x ∈ {c}ᶜ → DifferentiableAt ℂ f x) x}
⊢ Tendsto f (𝓝[{c}ᶜ] c) (𝓝 (limUnder (𝓝[{c}ᶜ] c) f))
[PROOFSTEP]
exact continuousAt_update_same.1 (H.differentiableAt hd).continuousAt
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
U : Set ℂ
hU : IsOpen U
c w₀ : ℂ
R : ℝ
f : ℂ → E
hc : closedBall c R ⊆ U
hf : DifferentiableOn ℂ f U
hw₀ : w₀ ∈ ball c R
⊢ ((2 * ↑π * I)⁻¹ • ∮ (z : ℂ) in C(c, R), ((z - w₀) ^ 2)⁻¹ • f z) = deriv f w₀
[PROOFSTEP]
have hf' : DifferentiableOn ℂ (dslope f w₀) U :=
(differentiableOn_dslope (hU.mem_nhds ((ball_subset_closedBall.trans hc) hw₀))).mpr hf
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
U : Set ℂ
hU : IsOpen U
c w₀ : ℂ
R : ℝ
f : ℂ → E
hc : closedBall c R ⊆ U
hf : DifferentiableOn ℂ f U
hw₀ : w₀ ∈ ball c R
hf' : DifferentiableOn ℂ (dslope f w₀) U
⊢ ((2 * ↑π * I)⁻¹ • ∮ (z : ℂ) in C(c, R), ((z - w₀) ^ 2)⁻¹ • f z) = deriv f w₀
[PROOFSTEP]
have h0 := (hf'.diffContOnCl_ball hc).two_pi_i_inv_smul_circleIntegral_sub_inv_smul hw₀
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
U : Set ℂ
hU : IsOpen U
c w₀ : ℂ
R : ℝ
f : ℂ → E
hc : closedBall c R ⊆ U
hf : DifferentiableOn ℂ f U
hw₀ : w₀ ∈ ball c R
hf' : DifferentiableOn ℂ (dslope f w₀) U
h0 : ((2 * ↑π * I)⁻¹ • ∮ (z : ℂ) in C(c, R), (z - w₀)⁻¹ • dslope f w₀ z) = dslope f w₀ w₀
⊢ ((2 * ↑π * I)⁻¹ • ∮ (z : ℂ) in C(c, R), ((z - w₀) ^ 2)⁻¹ • f z) = deriv f w₀
[PROOFSTEP]
rw [← dslope_same, ← h0]
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
U : Set ℂ
hU : IsOpen U
c w₀ : ℂ
R : ℝ
f : ℂ → E
hc : closedBall c R ⊆ U
hf : DifferentiableOn ℂ f U
hw₀ : w₀ ∈ ball c R
hf' : DifferentiableOn ℂ (dslope f w₀) U
h0 : ((2 * ↑π * I)⁻¹ • ∮ (z : ℂ) in C(c, R), (z - w₀)⁻¹ • dslope f w₀ z) = dslope f w₀ w₀
⊢ ((2 * ↑π * I)⁻¹ • ∮ (z : ℂ) in C(c, R), ((z - w₀) ^ 2)⁻¹ • f z) =
(2 * ↑π * I)⁻¹ • ∮ (z : ℂ) in C(c, R), (z - w₀)⁻¹ • dslope f w₀ z
[PROOFSTEP]
congr 1
[GOAL]
case e_a
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
U : Set ℂ
hU : IsOpen U
c w₀ : ℂ
R : ℝ
f : ℂ → E
hc : closedBall c R ⊆ U
hf : DifferentiableOn ℂ f U
hw₀ : w₀ ∈ ball c R
hf' : DifferentiableOn ℂ (dslope f w₀) U
h0 : ((2 * ↑π * I)⁻¹ • ∮ (z : ℂ) in C(c, R), (z - w₀)⁻¹ • dslope f w₀ z) = dslope f w₀ w₀
⊢ (∮ (z : ℂ) in C(c, R), ((z - w₀) ^ 2)⁻¹ • f z) = ∮ (z : ℂ) in C(c, R), (z - w₀)⁻¹ • dslope f w₀ z
[PROOFSTEP]
trans ∮ z in C(c, R), ((z - w₀) ^ 2)⁻¹ • (f z - f w₀)
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
U : Set ℂ
hU : IsOpen U
c w₀ : ℂ
R : ℝ
f : ℂ → E
hc : closedBall c R ⊆ U
hf : DifferentiableOn ℂ f U
hw₀ : w₀ ∈ ball c R
hf' : DifferentiableOn ℂ (dslope f w₀) U
h0 : ((2 * ↑π * I)⁻¹ • ∮ (z : ℂ) in C(c, R), (z - w₀)⁻¹ • dslope f w₀ z) = dslope f w₀ w₀
⊢ (∮ (z : ℂ) in C(c, R), ((z - w₀) ^ 2)⁻¹ • f z) = ∮ (z : ℂ) in C(c, R), ((z - w₀) ^ 2)⁻¹ • (f z - f w₀)
[PROOFSTEP]
have h1 : ContinuousOn (fun z : ℂ => ((z - w₀) ^ 2)⁻¹) (sphere c R) :=
by
refine' ((continuous_id'.sub continuous_const).pow 2).continuousOn.inv₀ fun w hw h => _
exact sphere_disjoint_ball.ne_of_mem hw hw₀ (sub_eq_zero.mp (sq_eq_zero_iff.mp h))
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
U : Set ℂ
hU : IsOpen U
c w₀ : ℂ
R : ℝ
f : ℂ → E
hc : closedBall c R ⊆ U
hf : DifferentiableOn ℂ f U
hw₀ : w₀ ∈ ball c R
hf' : DifferentiableOn ℂ (dslope f w₀) U
h0 : ((2 * ↑π * I)⁻¹ • ∮ (z : ℂ) in C(c, R), (z - w₀)⁻¹ • dslope f w₀ z) = dslope f w₀ w₀
⊢ ContinuousOn (fun z => ((z - w₀) ^ 2)⁻¹) (sphere c R)
[PROOFSTEP]
refine' ((continuous_id'.sub continuous_const).pow 2).continuousOn.inv₀ fun w hw h => _
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
U : Set ℂ
hU : IsOpen U
c w₀ : ℂ
R : ℝ
f : ℂ → E
hc : closedBall c R ⊆ U
hf : DifferentiableOn ℂ f U
hw₀ : w₀ ∈ ball c R
hf' : DifferentiableOn ℂ (dslope f w₀) U
h0 : ((2 * ↑π * I)⁻¹ • ∮ (z : ℂ) in C(c, R), (z - w₀)⁻¹ • dslope f w₀ z) = dslope f w₀ w₀
w : ℂ
hw : w ∈ sphere c R
h : (w - w₀) ^ 2 = 0
⊢ False
[PROOFSTEP]
exact sphere_disjoint_ball.ne_of_mem hw hw₀ (sub_eq_zero.mp (sq_eq_zero_iff.mp h))
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
U : Set ℂ
hU : IsOpen U
c w₀ : ℂ
R : ℝ
f : ℂ → E
hc : closedBall c R ⊆ U
hf : DifferentiableOn ℂ f U
hw₀ : w₀ ∈ ball c R
hf' : DifferentiableOn ℂ (dslope f w₀) U
h0 : ((2 * ↑π * I)⁻¹ • ∮ (z : ℂ) in C(c, R), (z - w₀)⁻¹ • dslope f w₀ z) = dslope f w₀ w₀
h1 : ContinuousOn (fun z => ((z - w₀) ^ 2)⁻¹) (sphere c R)
⊢ (∮ (z : ℂ) in C(c, R), ((z - w₀) ^ 2)⁻¹ • f z) = ∮ (z : ℂ) in C(c, R), ((z - w₀) ^ 2)⁻¹ • (f z - f w₀)
[PROOFSTEP]
have h2 : CircleIntegrable (fun z : ℂ => ((z - w₀) ^ 2)⁻¹ • f z) c R :=
by
refine' ContinuousOn.circleIntegrable (pos_of_mem_ball hw₀).le _
exact h1.smul (hf.continuousOn.mono (sphere_subset_closedBall.trans hc))
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
U : Set ℂ
hU : IsOpen U
c w₀ : ℂ
R : ℝ
f : ℂ → E
hc : closedBall c R ⊆ U
hf : DifferentiableOn ℂ f U
hw₀ : w₀ ∈ ball c R
hf' : DifferentiableOn ℂ (dslope f w₀) U
h0 : ((2 * ↑π * I)⁻¹ • ∮ (z : ℂ) in C(c, R), (z - w₀)⁻¹ • dslope f w₀ z) = dslope f w₀ w₀
h1 : ContinuousOn (fun z => ((z - w₀) ^ 2)⁻¹) (sphere c R)
⊢ CircleIntegrable (fun z => ((z - w₀) ^ 2)⁻¹ • f z) c R
[PROOFSTEP]
refine' ContinuousOn.circleIntegrable (pos_of_mem_ball hw₀).le _
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
U : Set ℂ
hU : IsOpen U
c w₀ : ℂ
R : ℝ
f : ℂ → E
hc : closedBall c R ⊆ U
hf : DifferentiableOn ℂ f U
hw₀ : w₀ ∈ ball c R
hf' : DifferentiableOn ℂ (dslope f w₀) U
h0 : ((2 * ↑π * I)⁻¹ • ∮ (z : ℂ) in C(c, R), (z - w₀)⁻¹ • dslope f w₀ z) = dslope f w₀ w₀
h1 : ContinuousOn (fun z => ((z - w₀) ^ 2)⁻¹) (sphere c R)
⊢ ContinuousOn (fun z => ((z - w₀) ^ 2)⁻¹ • f z) (sphere c R)
[PROOFSTEP]
exact h1.smul (hf.continuousOn.mono (sphere_subset_closedBall.trans hc))
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
U : Set ℂ
hU : IsOpen U
c w₀ : ℂ
R : ℝ
f : ℂ → E
hc : closedBall c R ⊆ U
hf : DifferentiableOn ℂ f U
hw₀ : w₀ ∈ ball c R
hf' : DifferentiableOn ℂ (dslope f w₀) U
h0 : ((2 * ↑π * I)⁻¹ • ∮ (z : ℂ) in C(c, R), (z - w₀)⁻¹ • dslope f w₀ z) = dslope f w₀ w₀
h1 : ContinuousOn (fun z => ((z - w₀) ^ 2)⁻¹) (sphere c R)
h2 : CircleIntegrable (fun z => ((z - w₀) ^ 2)⁻¹ • f z) c R
⊢ (∮ (z : ℂ) in C(c, R), ((z - w₀) ^ 2)⁻¹ • f z) = ∮ (z : ℂ) in C(c, R), ((z - w₀) ^ 2)⁻¹ • (f z - f w₀)
[PROOFSTEP]
have h3 : CircleIntegrable (fun z : ℂ => ((z - w₀) ^ 2)⁻¹ • f w₀) c R :=
ContinuousOn.circleIntegrable (pos_of_mem_ball hw₀).le (h1.smul continuousOn_const)
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
U : Set ℂ
hU : IsOpen U
c w₀ : ℂ
R : ℝ
f : ℂ → E
hc : closedBall c R ⊆ U
hf : DifferentiableOn ℂ f U
hw₀ : w₀ ∈ ball c R
hf' : DifferentiableOn ℂ (dslope f w₀) U
h0 : ((2 * ↑π * I)⁻¹ • ∮ (z : ℂ) in C(c, R), (z - w₀)⁻¹ • dslope f w₀ z) = dslope f w₀ w₀
h1 : ContinuousOn (fun z => ((z - w₀) ^ 2)⁻¹) (sphere c R)
h2 : CircleIntegrable (fun z => ((z - w₀) ^ 2)⁻¹ • f z) c R
h3 : CircleIntegrable (fun z => ((z - w₀) ^ 2)⁻¹ • f w₀) c R
⊢ (∮ (z : ℂ) in C(c, R), ((z - w₀) ^ 2)⁻¹ • f z) = ∮ (z : ℂ) in C(c, R), ((z - w₀) ^ 2)⁻¹ • (f z - f w₀)
[PROOFSTEP]
have h4 : (∮ z : ℂ in C(c, R), ((z - w₀) ^ 2)⁻¹) = 0 := by
simpa using circleIntegral.integral_sub_zpow_of_ne (by decide : (-2 : ℤ) ≠ -1) c w₀ R
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
U : Set ℂ
hU : IsOpen U
c w₀ : ℂ
R : ℝ
f : ℂ → E
hc : closedBall c R ⊆ U
hf : DifferentiableOn ℂ f U
hw₀ : w₀ ∈ ball c R
hf' : DifferentiableOn ℂ (dslope f w₀) U
h0 : ((2 * ↑π * I)⁻¹ • ∮ (z : ℂ) in C(c, R), (z - w₀)⁻¹ • dslope f w₀ z) = dslope f w₀ w₀
h1 : ContinuousOn (fun z => ((z - w₀) ^ 2)⁻¹) (sphere c R)
h2 : CircleIntegrable (fun z => ((z - w₀) ^ 2)⁻¹ • f z) c R
h3 : CircleIntegrable (fun z => ((z - w₀) ^ 2)⁻¹ • f w₀) c R
⊢ (∮ (z : ℂ) in C(c, R), ((z - w₀) ^ 2)⁻¹) = 0
[PROOFSTEP]
simpa using circleIntegral.integral_sub_zpow_of_ne (by decide : (-2 : ℤ) ≠ -1) c w₀ R
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
U : Set ℂ
hU : IsOpen U
c w₀ : ℂ
R : ℝ
f : ℂ → E
hc : closedBall c R ⊆ U
hf : DifferentiableOn ℂ f U
hw₀ : w₀ ∈ ball c R
hf' : DifferentiableOn ℂ (dslope f w₀) U
h0 : ((2 * ↑π * I)⁻¹ • ∮ (z : ℂ) in C(c, R), (z - w₀)⁻¹ • dslope f w₀ z) = dslope f w₀ w₀
h1 : ContinuousOn (fun z => ((z - w₀) ^ 2)⁻¹) (sphere c R)
h2 : CircleIntegrable (fun z => ((z - w₀) ^ 2)⁻¹ • f z) c R
h3 : CircleIntegrable (fun z => ((z - w₀) ^ 2)⁻¹ • f w₀) c R
⊢ -2 ≠ -1
[PROOFSTEP]
decide
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
U : Set ℂ
hU : IsOpen U
c w₀ : ℂ
R : ℝ
f : ℂ → E
hc : closedBall c R ⊆ U
hf : DifferentiableOn ℂ f U
hw₀ : w₀ ∈ ball c R
hf' : DifferentiableOn ℂ (dslope f w₀) U
h0 : ((2 * ↑π * I)⁻¹ • ∮ (z : ℂ) in C(c, R), (z - w₀)⁻¹ • dslope f w₀ z) = dslope f w₀ w₀
h1 : ContinuousOn (fun z => ((z - w₀) ^ 2)⁻¹) (sphere c R)
h2 : CircleIntegrable (fun z => ((z - w₀) ^ 2)⁻¹ • f z) c R
h3 : CircleIntegrable (fun z => ((z - w₀) ^ 2)⁻¹ • f w₀) c R
h4 : (∮ (z : ℂ) in C(c, R), ((z - w₀) ^ 2)⁻¹) = 0
⊢ (∮ (z : ℂ) in C(c, R), ((z - w₀) ^ 2)⁻¹ • f z) = ∮ (z : ℂ) in C(c, R), ((z - w₀) ^ 2)⁻¹ • (f z - f w₀)
[PROOFSTEP]
simp only [smul_sub, circleIntegral.integral_sub h2 h3, h4, circleIntegral.integral_smul_const, zero_smul, sub_zero]
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
U : Set ℂ
hU : IsOpen U
c w₀ : ℂ
R : ℝ
f : ℂ → E
hc : closedBall c R ⊆ U
hf : DifferentiableOn ℂ f U
hw₀ : w₀ ∈ ball c R
hf' : DifferentiableOn ℂ (dslope f w₀) U
h0 : ((2 * ↑π * I)⁻¹ • ∮ (z : ℂ) in C(c, R), (z - w₀)⁻¹ • dslope f w₀ z) = dslope f w₀ w₀
⊢ (∮ (z : ℂ) in C(c, R), ((z - w₀) ^ 2)⁻¹ • (f z - f w₀)) = ∮ (z : ℂ) in C(c, R), (z - w₀)⁻¹ • dslope f w₀ z
[PROOFSTEP]
refine' circleIntegral.integral_congr (pos_of_mem_ball hw₀).le fun z hz => _
[GOAL]
E : Type u
inst✝² : NormedAddCommGroup E
inst✝¹ : NormedSpace ℂ E
inst✝ : CompleteSpace E
U : Set ℂ
hU : IsOpen U
c w₀ : ℂ
R : ℝ
f : ℂ → E
hc : closedBall c R ⊆ U
hf : DifferentiableOn ℂ f U
hw₀ : w₀ ∈ ball c R
hf' : DifferentiableOn ℂ (dslope f w₀) U
h0 : ((2 * ↑π * I)⁻¹ • ∮ (z : ℂ) in C(c, R), (z - w₀)⁻¹ • dslope f w₀ z) = dslope f w₀ w₀
z : ℂ
hz : z ∈ sphere c R
⊢ ((z - w₀) ^ 2)⁻¹ • (f z - f w₀) = (z - w₀)⁻¹ • dslope f w₀ z
[PROOFSTEP]
simp only [dslope_of_ne, Metric.sphere_disjoint_ball.ne_of_mem hz hw₀, slope, ← smul_assoc, sq, mul_inv, Ne.def,
not_false_iff, vsub_eq_sub, Algebra.id.smul_eq_mul]
|
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: mgga_exc *)
(* prefix:
mgga_c_bc95_params *params;
assert(p->params != NULL);
params = (mgga_c_bc95_params * )(p->params);
*)
$define lda_c_pw_params
$define lda_c_pw_modified_params
$include "lda_c_pw.mpl"
(* The B97 function g *)
bc95_gpar := (xs, ts) -> ts*Fermi_D(xs, ts)/(K_FACTOR_C*(1 + params_a_css*xs^2)^2):
bc95_gperp := (xs0, xs1) -> 1/(1 + params_a_copp*(xs0^2 + xs1^2)):
(* The parallel and perpendicular components of the energy *)
bc95_fpar := (rs, z, xs0, xs1, ts0, ts1) ->
+ lda_stoll_par(f_pw, rs, z, 1) * bc95_gpar(xs0, ts0)
+ lda_stoll_par(f_pw, rs, -z, -1) * bc95_gpar(xs1, ts1):
bc95_fperp := (rs, z, xs0, xs1) ->
lda_stoll_perp(f_pw, rs, z) * bc95_gperp(xs0, xs1):
f_bc95 := (rs, z, xs0, xs1, ts0, ts1) ->
+ bc95_fpar (rs, z, xs0, xs1, ts0, ts1)
+ bc95_fperp(rs, z, xs0, xs1):
f := (rs, z, xt, xs0, xs1, us0, us1, ts0, ts1) ->
f_bc95(rs, z, xs0, xs1, ts0, ts1):
|
[STATEMENT]
lemma is_sentence_cons: "is_sentence (x#xs) = (is_symbol x \<and> is_sentence xs)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_sentence (x # xs) = (is_symbol x \<and> is_sentence xs)
[PROOF STEP]
by (auto simp add: is_sentence_def) |
If $S$ is an open, starlike set, $f$ is a continuous function on $S$ that is differentiable on $S$ except for a finite set $k$, $g$ is a closed path in $S$, and $f$ is holomorphic on $S - k$, then $\int_g f(z) dz = 0$. |
import for_mathlib.category_theory.localization.predicate
import category_theory.adjunction.limits
import category_theory.is_connected
import for_mathlib.category_theory.localization.products
import for_mathlib.category_theory.localization.opposite
noncomputable theory
open category_theory category_theory.category
namespace category_theory
namespace limits
def is_terminal.of_equivalence {C D : Type*} [category C] [category D] (e : C ≌ D) {X : C}
(hX : is_terminal X) : is_terminal (e.functor.obj X) :=
begin
change is_limit _,
let e' : functor.empty C ⋙ e.functor ≅ functor.empty D := functor.empty_ext _ _,
equiv_rw (is_limit.postcompose_inv_equiv e' _).symm,
exact is_limit.of_iso_limit (is_limit_of_preserves e.functor hX)
(cones.ext (iso.refl _) (by rintro ⟨⟨⟩⟩)),
end
end limits
@[simps]
instance localization.lifting.of_comp {C D E : Type*} [category C] [category D] [category E]
(L : C ⥤ D) (W : morphism_property C) [L.is_localization W] (F : D ⥤ E) :
localization.lifting L W (L ⋙ F) F := ⟨iso.refl _⟩
section
variables (C : Type*) [category C]
inductive obj_rel : C → C → Prop
| of_hom ⦃X Y : C⦄ (f : X ⟶ Y) : obj_rel X Y
def connected_components := quot (obj_rel C)
variable {C}
def to_connected_components (X : C) : connected_components C :=
quot.mk _ X
variable (C)
class is_preconnected' : Prop :=
(subsingleton_connected_components : subsingleton (connected_components C))
attribute [instance] is_preconnected'.subsingleton_connected_components
class is_connected' extends is_preconnected' C : Prop :=
[is_nonempty : nonempty C]
lemma connected_components.nat_trans_from_eq {D : Type*} [category D]
(X Y : D) (α : (functor.const C).obj X ⟶ (functor.const C).obj Y)
(j j' : C) (h : to_connected_components j = to_connected_components j') :
α.app j = (α.app j' : X ⟶ Y) :=
begin
let β : C → (X ⟶ Y) := λ j, α.app j,
let l : connected_components C → (X ⟶ Y),
{ refine quot.lift β _,
rintro x y ⟨f⟩,
dsimp [β],
have eq := α.naturality f,
dsimp at eq,
rw [id_comp, comp_id] at eq,
rw eq, },
have hl : ∀ (j : C), α.app j = l (to_connected_components j) := λ j, rfl,
simp only [hl, h],
end
lemma nat_trans_from_is_preconnected' {D : Type*} [category D]
[is_preconnected' C] (X Y : D) (α : (functor.const C).obj X ⟶ (functor.const C).obj Y)
(j j' : C) : α.app j = (α.app j' : X ⟶ Y) :=
begin
apply connected_components.nat_trans_from_eq,
apply subsingleton.elim,
end
@[simps]
def connected_components.op_equiv :
connected_components C ≃ connected_components Cᵒᵖ :=
{ to_fun := quot.lift (λ X, to_connected_components (opposite.op X))
(by { rintros X Y ⟨f⟩, symmetry, exact quot.sound ⟨f.op⟩, }),
inv_fun := quot.lift (λ X, to_connected_components (opposite.unop X))
(by { rintros X Y ⟨f⟩, symmetry, exact quot.sound ⟨f.unop⟩, }),
left_inv := by { rintro ⟨X⟩, refl, },
right_inv := by { rintro ⟨X⟩, refl, }, }
variables {C} {D E : Type*} [category D] [category E]
def connected_components.map (F : C ⥤ D) :
connected_components C → connected_components D :=
quot.lift (λ X, to_connected_components (F.obj X))
(by { rintros X Y ⟨f⟩, exact quot.sound ⟨F.map f⟩, })
@[simp]
lemma connected_components.map_id (C : Type*) [category C] :
connected_components.map (𝟭 C) = id := by tidy
@[simp]
lemma connected_components.map_id_apply (x : connected_components C) :
connected_components.map (𝟭 C) x = x :=
by simp only [connected_components.map_id, id.def]
@[simp]
lemma connected_components.map_comp (F : C ⥤ D) (G : D ⥤ E) :
connected_components.map (F ⋙ G) =
connected_components.map G ∘ connected_components.map F := by tidy
@[simp]
lemma connected_components.map_comp_apply (F : C ⥤ D) (G : D ⥤ E) (x : connected_components C) :
connected_components.map (F ⋙ G) x =
connected_components.map G (connected_components.map F x) :=
by simp only [connected_components.map_comp]
lemma connected_components.map_eq_of_nat_trans {F G : C ⥤ D} (τ : F ⟶ G) :
connected_components.map F = connected_components.map G :=
by { ext ⟨X⟩, exact quot.sound ⟨τ.app X⟩, }
lemma connected_components.map_eq_of_nat_trans_apply {F G : C ⥤ D} (τ : F ⟶ G)
(x : connected_components C):
connected_components.map F x = connected_components.map G x :=
by rw connected_components.map_eq_of_nat_trans τ
@[simps]
def connected_components.equiv_of_equivalence (e : C ≌ D) :
connected_components C ≃ connected_components D :=
{ to_fun := connected_components.map e.functor,
inv_fun := connected_components.map e.inverse,
left_inv := λ x, by simpa only [connected_components.map_comp_apply,
connected_components.map_id_apply]
using connected_components.map_eq_of_nat_trans_apply e.unit_iso.inv x,
right_inv := λ x, by simpa only [connected_components.map_comp_apply,
connected_components.map_id_apply]
using connected_components.map_eq_of_nat_trans_apply e.counit_iso.hom x, }
lemma is_preconnected'.of_equivalence (e : C ≌ D) (h : is_preconnected' C) :
is_preconnected' D :=
⟨⟨λ X Y, (connected_components.equiv_of_equivalence e).symm.injective (subsingleton.elim _ _)⟩⟩
lemma is_connected'.of_equivalence (e : C ≌ D) (h : is_connected' C) :
is_connected' D :=
begin
haveI : nonempty D := ⟨e.functor.obj h.is_nonempty.some⟩,
haveI : is_preconnected' D := is_preconnected'.of_equivalence e h.1,
constructor,
end
lemma is_preconnected'.op (h : is_preconnected' C) : is_preconnected' Cᵒᵖ :=
⟨⟨λ X Y, (connected_components.op_equiv C).symm.injective (subsingleton.elim _ _)⟩⟩
lemma is_preconnected'.unop (h : is_preconnected' Cᵒᵖ) : is_preconnected' C :=
⟨⟨λ X Y, (connected_components.op_equiv C).injective (subsingleton.elim _ _)⟩⟩
lemma is_connected'.op (h : is_connected' C) : is_connected' Cᵒᵖ :=
begin
haveI : nonempty Cᵒᵖ := ⟨opposite.op h.is_nonempty.some⟩,
haveI : is_preconnected' Cᵒᵖ := is_preconnected'.op infer_instance,
constructor,
end
lemma is_connected'.unop (h : is_connected' Cᵒᵖ) : is_connected' C :=
begin
haveI : nonempty C := ⟨opposite.unop h.is_nonempty.some⟩,
haveI : is_preconnected' C := is_preconnected'.unop infer_instance,
constructor,
end
end
namespace morphism_property
variables {C : Type*} [category C] (W : morphism_property C)
class multiplicative : Prop :=
(contains_identities [] : W.contains_identities)
(comp [] : W.stable_under_composition)
section
variable [multiplicative W]
@[priority 100]
instance contains_identities_of_multiplicative : W.contains_identities :=
multiplicative.contains_identities _
instance : multiplicative W.op :=
{ contains_identities := (multiplicative.contains_identities W).op,
comp := (multiplicative.comp W).op, }
include W
@[protected, nolint unused_arguments]
structure category :=
(obj : C)
variable {W}
@[ext]
structure category.hom (X Y : W.category) :=
(f : X.obj ⟶ Y.obj)
(hf : W f)
@[simps]
instance : category W.category :=
{ hom := category.hom,
id := λ X,
{ f := 𝟙 _,
hf := contains_identities.id _ _, },
comp := λ X Y Z φ φ',
{ f := φ.f ≫ φ'.f,
hf := multiplicative.comp W φ.f φ'.f φ.hf φ'.hf, }, }
@[simps]
def category.mk_iso {X Y : W.category} (e : X.obj ≅ Y.obj) (h₁ : W e.hom) (h₂ : W e.inv) :
X ≅ Y :=
{ hom := ⟨e.hom, h₁⟩,
inv := ⟨e.inv, h₂⟩, }
end
end morphism_property
namespace functor
variables (C₁ C₂ C₃ : Type*) [category C₁] [category C₂] [category C₃]
(F : C₁ ⥤ C₂) (G : C₂ ⥤ C₃)
@[simps]
def whiskering_left_id : (whiskering_left C₁ C₁ C₃).obj (𝟭 C₁) ≅ 𝟭 _ :=
nat_iso.of_components functor.left_unitor (by tidy)
@[simps]
def whiskering_right_id : (whiskering_right C₁ C₃ C₃).obj (𝟭 C₃) ≅ 𝟭 _ :=
nat_iso.of_components functor.right_unitor (by tidy)
variables {C₁ C₂}
@[simps]
def equivalence_whiskering_left (e : C₁ ≌ C₂) : (C₂ ⥤ C₃) ≌ C₁ ⥤ C₃ :=
{ functor := (whiskering_left _ _ _).obj e.functor,
inverse := (whiskering_left _ _ _).obj e.inverse,
unit_iso := (whiskering_left_id _ _).symm ≪≫ (whiskering_left _ _ C₃).map_iso e.counit_iso.symm,
counit_iso := (whiskering_left _ _ C₃).map_iso e.unit_iso.symm ≪≫ whiskering_left_id _ _,
functor_unit_iso_comp' := λ F, begin
ext X,
dsimp,
simp only [id_comp, comp_id, ← F.map_comp, equivalence.counit_inv_functor_comp, F.map_id],
end, }
instance is_equivalence_whiskering_left [is_equivalence F] :
is_equivalence ((whiskering_left _ _ C₃).obj F) :=
is_equivalence.of_equivalence (equivalence_whiskering_left C₃ (as_equivalence F))
variables {C₂ C₃} (C₁)
@[simps]
def equivalence_whiskering_right (e : C₂ ≌ C₃) : (C₁ ⥤ C₂) ≌ C₁ ⥤ C₃ :=
{ functor := (whiskering_right _ _ _).obj e.functor,
inverse := (whiskering_right _ _ _).obj e.inverse,
unit_iso := (whiskering_right_id C₁ C₂).symm ≪≫ (whiskering_right C₁ _ _).map_iso e.unit_iso,
counit_iso := (whiskering_right C₁ _ _).map_iso e.counit_iso ≪≫ whiskering_right_id C₁ C₃,
functor_unit_iso_comp' := λ F, begin
ext X,
dsimp,
simp,
end, }
instance is_equivalence_whiskering_right [is_equivalence G] :
is_equivalence ((whiskering_right C₁ _ _).obj G) :=
is_equivalence.of_equivalence (equivalence_whiskering_right C₁ (as_equivalence G))
end functor
namespace structured_arrow
variables {C₀ C₁ C₂ C₃ : Type*} [category C₀] [category C₁] [category C₂] [category C₃]
(X₃ X₃' : C₃) (f : X₃' ⟶ X₃) (F : C₁ ⥤ C₂) (eF : C₁ ≌ C₂)
(G G' G'' : C₂ ⥤ C₃) (τ τ' : G ⟶ G') (τ'' : G' ⟶ G'') (eG : G ≅ G')
@[simps]
def whiskering_left : structured_arrow X₃ (F ⋙ G) ⥤ structured_arrow X₃ G :=
{ obj := λ X₂, mk X₂.hom,
map := λ X₂ X₂' φ, hom_mk (F.map φ.right) (w φ), }
variables {X₃ X₃'}
@[simps]
def precomp : structured_arrow X₃ G ⥤ structured_arrow X₃' G :=
{ obj := λ X₂, mk (f ≫ X₂.hom),
map := λ X₂ X₂' φ, hom_mk φ.right (by tidy), }
variables {G G'} (X₃)
@[simps]
def postcomp : structured_arrow X₃ G ⥤ structured_arrow X₃ G' :=
{ obj := λ X₂, mk (X₂.hom ≫ τ.app X₂.right),
map := λ X₂ X₂' φ, hom_mk φ.right begin
dsimp,
simp only [assoc, ← τ.naturality, w_assoc φ],
end, }
variable (G)
@[simps]
def postcomp_id : postcomp X₃ (𝟙 G) ≅ 𝟭 _ :=
nat_iso.of_components (λ X, structured_arrow.iso_mk (iso.refl _) (by tidy)) (by tidy)
variable {G}
@[simps]
def postcomp_comp : postcomp X₃ τ ⋙ postcomp X₃ τ'' ≅
postcomp X₃ (τ ≫ τ'') :=
nat_iso.of_components (λ X, structured_arrow.iso_mk (iso.refl _) (by tidy)) (by tidy)
@[simps]
def postcomp_iso_of_eq (h : τ = τ') : postcomp X₃ τ ≅ postcomp X₃ τ' :=
nat_iso.of_components (λ X, structured_arrow.iso_mk (iso.refl _)
(by { dsimp, rw [G'.map_id, comp_id, h], })) (by tidy)
@[simps]
def postcomp_iso : equivalence (structured_arrow X₃ G) (structured_arrow X₃ G') :=
{ functor := postcomp X₃ eG.hom,
inverse := postcomp X₃ eG.inv,
unit_iso := (postcomp_id X₃ G).symm ≪≫ postcomp_iso_of_eq X₃ _ _ eG.hom_inv_id.symm ≪≫
(postcomp_comp _ _ _ _).symm,
counit_iso := postcomp_comp _ _ _ _ ≪≫ postcomp_iso_of_eq X₃ _ _ eG.inv_hom_id ≪≫
(postcomp_id X₃ G'), }
instance [is_iso τ] : is_equivalence (postcomp X₃ τ) :=
is_equivalence.of_equivalence (postcomp_iso X₃ (as_iso τ))
variable (G)
@[simps]
def whiskering_left_equivalence :
equivalence (structured_arrow X₃ (eF.functor ⋙ G)) (structured_arrow X₃ G) :=
{ functor := whiskering_left X₃ eF.functor G,
inverse := (postcomp_iso X₃ ((functor.left_unitor _).symm ≪≫
iso_whisker_right eF.counit_iso.symm _≪≫ functor.associator _ _ _)).functor ⋙
whiskering_left X₃ eF.inverse (eF.functor ⋙ G),
unit_iso := nat_iso.of_components
(λ Y, structured_arrow.iso_mk (eF.unit_iso.app _) begin
dsimp,
simp only [comp_id, id_comp],
congr' 2,
simpa only [← cancel_mono (eF.counit_iso.hom.app (eF.functor.obj Y.right)),
equivalence.functor_unit_comp, iso.inv_hom_id_app],
end) (by tidy),
counit_iso := nat_iso.of_components
(λ X, structured_arrow.iso_mk (eF.counit_iso.app _) begin
dsimp,
simp only [id_comp, assoc, ← G.map_comp, iso.inv_hom_id_app],
dsimp,
simp only [functor.map_id, comp_id],
end) (by tidy), }
instance [is_equivalence F] : is_equivalence (whiskering_left X₃ F G) :=
is_equivalence.of_equivalence (whiskering_left_equivalence X₃ (functor.as_equivalence F) G)
end structured_arrow
namespace functor
variables {C D H : Type*} [category C] [category D] [category H]
{F : C ⥤ D} (RF RF' : H ⥤ D) (e : RF ≅ RF') {L : C ⥤ H} (α : F ⟶ L ⋙ RF) (α' : F ⟶ L ⋙ RF')
(W : morphism_property C) [L.is_localization W]
class is_right_derived_functor : Prop :=
(is_initial [] : nonempty (limits.is_initial (structured_arrow.mk α :
structured_arrow F ((whiskering_left C H D).obj L))))
variables {RF RF'}
lemma is_right_derived_functor.of_iso [RF.is_right_derived_functor α]
(eq : α' = α ≫ whisker_left L e.hom) : RF'.is_right_derived_functor α' :=
⟨⟨limits.is_initial.of_iso (is_right_derived_functor.is_initial α).some
(structured_arrow.iso_mk e eq.symm)⟩⟩
variables (RF RF')
def is_right_derived_functor_to [RF.is_right_derived_functor α] (G : H ⥤ D) (β : F ⟶ L ⋙ G) :
RF ⟶ G :=
(structured_arrow.proj _ _).map
((functor.is_right_derived_functor.is_initial α).some.to (structured_arrow.mk β))
@[simp, reassoc]
lemma is_right_derived_functor_to_comm [RF.is_right_derived_functor α] (G : H ⥤ D)
(β : F ⟶ L ⋙ G) :
α ≫ whisker_left L (RF.is_right_derived_functor_to α G β) = β :=
structured_arrow.w ((functor.is_right_derived_functor.is_initial α).some.to
(structured_arrow.mk β))
@[simp, reassoc]
lemma is_right_derived_functor_to_comm_app [RF.is_right_derived_functor α] (G : H ⥤ D)
(β : F ⟶ L ⋙ G) (X : C) :
α.app X ≫ (RF.is_right_derived_functor_to α G β).app (L.obj X) = β.app X :=
congr_app (RF.is_right_derived_functor_to_comm α G β) X
lemma is_right_derived_functor_to_ext [RF.is_right_derived_functor α] {G : H ⥤ D}
(γ₁ γ₂ : RF ⟶ G) (hγ : α ≫ whisker_left L γ₁ = α ≫ whisker_left L γ₂) : γ₁ = γ₂ :=
begin
let F' : structured_arrow F ((whiskering_left C H D).obj L) :=
structured_arrow.mk α,
let δ₁ : F' ⟶ structured_arrow.mk (α ≫ whisker_left L γ₁) := structured_arrow.hom_mk γ₁ rfl,
let δ₂ : F' ⟶ structured_arrow.mk (α ≫ whisker_left L γ₁) := structured_arrow.hom_mk γ₂ hγ.symm,
exact (structured_arrow.proj _ _).congr_map
((functor.is_right_derived_functor.is_initial α).some.hom_ext δ₁ δ₂),
end
end functor
namespace nat_trans
variables {C D H : Type*} [category C] [category D] [category H]
{F G G' : C ⥤ D} (τ : F ⟶ G) (τ' : G ⟶ G') {RF RG RG' : H ⥤ D} {L : C ⥤ H}
(α : F ⟶ L ⋙ RF) (β : G ⟶ L ⋙ RG) (γ : G' ⟶ L ⋙ RG')
def right_derived [RF.is_right_derived_functor α] : RF ⟶ RG :=
RF.is_right_derived_functor_to α RG (τ ≫ β)
@[simp]
lemma right_derived_comp [RF.is_right_derived_functor α]
[RG.is_right_derived_functor β] :
nat_trans.right_derived τ α β ≫ nat_trans.right_derived τ' β γ =
nat_trans.right_derived (τ ≫ τ') α γ :=
begin
dsimp only [right_derived],
apply RF.is_right_derived_functor_to_ext α,
simp only [whisker_left_comp, functor.is_right_derived_functor_to_comm_assoc, assoc,
functor.is_right_derived_functor_to_comm],
end
@[simp]
lemma right_derived_id [RF.is_right_derived_functor α] :
nat_trans.right_derived (𝟙 F) α α = 𝟙 RF :=
begin
dsimp only [right_derived],
apply RF.is_right_derived_functor_to_ext α,
simp only [id_comp, functor.is_right_derived_functor_to_comm, whisker_left_id', comp_id],
end
@[simp, reassoc]
lemma right_derived_app [RF.is_right_derived_functor α] (X : C) :
α.app X ≫ (right_derived τ α β).app (L.obj X) = τ.app X ≫ β.app X :=
begin
dsimp only [right_derived],
simp only [functor.is_right_derived_functor_to_comm_app, comp_app],
end
end nat_trans
namespace nat_iso
variables {C D H : Type*} [category C] [category D] [category H]
{F G : C ⥤ D} (e : F ≅ G) {RF RG : H ⥤ D} {L : C ⥤ H}
(α : F ⟶ L ⋙ RF) (β : G ⟶ L ⋙ RG)
@[simps]
def right_derived [RF.is_right_derived_functor α] [RG.is_right_derived_functor β] :
RF ≅ RG :=
{ hom := nat_trans.right_derived e.hom α β,
inv := nat_trans.right_derived e.inv β α, }
instance [RF.is_right_derived_functor α] [RG.is_right_derived_functor β] (τ : F ⟶ G)
[is_iso τ] : is_iso (nat_trans.right_derived τ α β) :=
is_iso.of_iso (nat_iso.right_derived (as_iso τ) α β)
end nat_iso
namespace functor
variables {C D H : Type*} [category C] [category D] [category H]
(F : C ⥤ D) (RF : H ⥤ D) {L : C ⥤ H} (α : F ⟶ L ⋙ RF)
(W : morphism_property C) [L.is_localization W]
class has_right_derived_functor : Prop :=
(has_initial' : limits.has_initial (structured_arrow F ((whiskering_left C _ D).obj W.Q)))
variable (L)
lemma has_right_derived_functor_iff :
has_right_derived_functor F W ↔
limits.has_initial (structured_arrow F ((whiskering_left C H D).obj L)) :=
begin
let Φ := structured_arrow.whiskering_left F ((whiskering_left _ _ _).obj
(localization.equivalence_from_model L W).functor)
((whiskering_left C _ D).obj W.Q),
let Φ' := structured_arrow.postcomp F ((whiskering_left _ _ D).map_iso
(localization.Q_comp_equivalence_from_model_functor_iso L W)).inv,
split,
{ intro h,
haveI := h.has_initial',
exact adjunction.has_colimits_of_shape_of_equivalence (Φ' ⋙ Φ), },
{ introI,
exact ⟨adjunction.has_colimits_of_shape_of_equivalence (inv (Φ' ⋙ Φ))⟩, },
end
lemma is_right_derived_functor.has_right_derived_functor [RF.is_right_derived_functor α] :
F.has_right_derived_functor W :=
begin
rw F.has_right_derived_functor_iff L W,
exact limits.is_initial.has_initial (is_right_derived_functor.is_initial α).some,
end
lemma has_right_derived_functor.has_initial [has_right_derived_functor F W] :
limits.has_initial (structured_arrow F ((whiskering_left C H D).obj L)) :=
(has_right_derived_functor_iff F L W).1 infer_instance
def has_right_derived_functor.initial [has_right_derived_functor F W] :
(structured_arrow F ((whiskering_left C H D).obj L)) :=
begin
haveI := has_right_derived_functor.has_initial F L W,
exact limits.initial _,
end
def right_derived_functor [has_right_derived_functor F W] : H ⥤ D :=
(has_right_derived_functor.initial F L W).right
def right_derived_functor_α [has_right_derived_functor F W] :
F ⟶ L ⋙ F.right_derived_functor L W :=
(has_right_derived_functor.initial F L W).hom
instance right_derived_functor_is_right_derived_functor [has_right_derived_functor F W] :
(F.right_derived_functor L W).is_right_derived_functor (F.right_derived_functor_α L W) :=
⟨⟨begin
haveI := has_right_derived_functor.has_initial F L W,
exact limits.is_initial.of_iso limits.initial_is_initial
(structured_arrow.iso_mk (iso.refl _) (by tidy)),
end⟩⟩
end functor
section
variables {C D : Type*} [category C] [category D]
(W : morphism_property C) (W' : morphism_property D)
structure localizor_morphism :=
(functor : C ⥤ D)
(mapW : ∀ ⦃X Y : C⦄ (f : X ⟶ Y) (hf : W f), W' (functor.map f))
namespace localizor_morphism
variables {W W'} (Φ : localizor_morphism W W')
section
@[simps]
def op : localizor_morphism W.op W'.op :=
{ functor := Φ.functor.op,
mapW := λ X Y f hf, Φ.mapW _ hf, }
variables [morphism_property.multiplicative W] [morphism_property.multiplicative W']
@[simps]
def induced_functor : W.category ⥤ W'.category :=
{ obj := λ X, ⟨Φ.functor.obj X.obj⟩,
map := λ X Y φ,
{ f := Φ.functor.map φ.f,
hf := Φ.mapW φ.f φ.hf, }, }
@[derive category]
def right_resolution (Y : D) := structured_arrow (⟨Y⟩ : W'.category) Φ.induced_functor
@[simps]
def right_resolution.mk {Y : D} (X : C) (f : Y ⟶ Φ.functor.obj X) (hf : W' f) :
Φ.right_resolution Y :=
structured_arrow.mk (⟨f, hf⟩ : (⟨Y⟩ : W'.category) ⟶ Φ.induced_functor.obj ⟨X⟩)
@[derive category]
def left_resolution (Y : D) := costructured_arrow Φ.induced_functor (⟨Y⟩ : W'.category)
@[simps]
def left_resolution.mk {Y : D} (X : C) (f : Φ.functor.obj X ⟶ Y) (hf : W' f) :
Φ.left_resolution Y :=
costructured_arrow.mk (⟨f, hf⟩ : Φ.induced_functor.obj ⟨X⟩ ⟶ (⟨Y⟩ : W'.category))
variable {Φ}
@[simps]
def left_resolution.op {Y : D} (X : Φ.left_resolution Y) :
Φ.op.right_resolution (opposite.op Y) :=
right_resolution.mk Φ.op (opposite.op X.left.1) X.hom.1.op X.hom.2
@[simps]
def right_resolution.unop {Y : D} (X : Φ.op.right_resolution (opposite.op Y)) :
Φ.left_resolution Y :=
left_resolution.mk Φ (opposite.unop X.right.1) X.hom.1.unop X.hom.2
@[simps]
def left_resolution.unop_op {Y : D} (X : Φ.left_resolution Y) :
X.op.unop ≅ X :=
costructured_arrow.iso_mk
(morphism_property.category.mk_iso (iso.refl _)
(morphism_property.contains_identities.id _ _)
(morphism_property.contains_identities.id _ _))
(by { ext, dsimp, simp, })
@[simps]
def right_resolution.op_unop {Y : D} (X : Φ.op.right_resolution (opposite.op Y)) :
X.unop.op ≅ X :=
structured_arrow.iso_mk
(morphism_property.category.mk_iso (iso.refl _)
(morphism_property.contains_identities.id _ _)
(morphism_property.contains_identities.id _ _))
(by { ext, dsimp, simp, })
variable (Φ)
@[simps]
def left_resolution.op_functor (Y : D) :
Φ.left_resolution Y ⥤ (Φ.op.right_resolution (opposite.op Y))ᵒᵖ :=
{ obj := λ X, opposite.op (left_resolution.op X),
map := λ X₁ X₂ f, quiver.hom.op (structured_arrow.hom_mk ⟨f.left.1.op, f.left.2⟩
(by { ext, dsimp, simpa only [← costructured_arrow.w f], })), }
@[simps]
def right_resolution.unop_functor (Y : D) :
(Φ.op.right_resolution (opposite.op Y))ᵒᵖ ⥤ Φ.left_resolution Y :=
{ obj := λ X, (opposite.unop X).unop,
map := λ X₁ X₂ f, costructured_arrow.hom_mk ⟨f.unop.right.1.unop, f.unop.right.2⟩
(by { ext, dsimp, simpa only [← structured_arrow.w f.unop], }), }
@[simps]
def left_resolution.op_equivalence (Y : D) :
Φ.left_resolution Y ≌ (Φ.op.right_resolution (opposite.op Y))ᵒᵖ :=
{ functor := left_resolution.op_functor _ _,
inverse := right_resolution.unop_functor _ _,
unit_iso := nat_iso.of_components (λ X, X.unop_op.symm) (by tidy),
counit_iso := nat_iso.of_components (λ X, ((opposite.unop X).op_unop).symm.op)
(λ X Y f, quiver.hom.unop_inj (by tidy)),
functor_unit_iso_comp' := λ X, quiver.hom.unop_inj (by tidy), }
end
variables {C' D' : Type*} [category C'] [category D'] (L₁ : C ⥤ C') (L₂ : D ⥤ D')
[L₁.is_localization W] [L₂.is_localization W']
abbreviation lift_functor : C' ⥤ D' :=
localization.lift (Φ.functor ⋙ L₂)
(λ X Y f (hf : W f),
by { dsimp, exact localization.inverts L₂ W' _ (Φ.mapW f hf), }) L₁
def fac_functor : L₁ ⋙ Φ.lift_functor L₁ L₂ ≅ Φ.functor ⋙ L₂ :=
localization.fac _ _ _
class is_localization_equivalence : Prop :=
(nonempty_is_equivalence : nonempty (is_equivalence (Φ.lift_functor W.Q W'.Q)))
namespace is_localization_equivalence
lemma iff_aux (C'' D'' : Type*) [category C''] [category D''] (L₁' : C ⥤ C'') (L₂' : D ⥤ D'')
[L₁'.is_localization W] [L₂'.is_localization W']
(h : is_equivalence (Φ.lift_functor L₁ L₂)) : is_equivalence (Φ.lift_functor L₁' L₂') :=
begin
let F₁ : C' ⥤ C'' := localization.lift L₁' (localization.inverts L₁' W) L₁,
let F₂ : D' ⥤ D'' := localization.lift L₂' (localization.inverts L₂' W') L₂,
have e : Φ.lift_functor L₁ L₂ ⋙ F₂ ≅ F₁ ⋙ Φ.lift_functor L₁' L₂' :=
localization.lift_nat_iso L₁ W (L₁ ⋙ Φ.lift_functor L₁ L₂ ⋙ F₂)
(L₁ ⋙ F₁ ⋙ Φ.lift_functor L₁' L₂') _ _ begin
refine (functor.associator _ _ _).symm ≪≫ iso_whisker_right (Φ.fac_functor L₁ L₂) _ ≪≫
functor.associator _ _ _ ≪≫ iso_whisker_left _ (localization.fac _ _ _) ≪≫
(Φ.fac_functor L₁' L₂').symm ≪≫
iso_whisker_right (localization.fac L₁' (localization.inverts L₁' W) L₁).symm _ ≪≫
functor.associator _ _ _,
end,
exact is_equivalence.cancel_comp_left F₁ _ infer_instance
(is_equivalence.of_iso e infer_instance),
end
lemma iff (F : C' ⥤ D') (e : L₁ ⋙ F ≅ Φ.functor ⋙ L₂) :
Φ.is_localization_equivalence ↔ nonempty (is_equivalence F) :=
begin
have h : nonempty (is_equivalence F) ↔ nonempty (is_equivalence (Φ.lift_functor L₁ L₂)),
{ letI : localization.lifting L₁ W (Φ.functor ⋙ L₂) F := ⟨e⟩,
let e' : F ≅ Φ.lift_functor L₁ L₂ :=
localization.lift_nat_iso L₁ W (Φ.functor ⋙ L₂) (Φ.functor ⋙ L₂) _ _ (iso.refl _),
exact ⟨λ h₁, ⟨is_equivalence.of_iso e' h₁.some⟩,
λ h₂, ⟨is_equivalence.of_iso e'.symm h₂.some⟩⟩, },
rw h, clear h,
split,
{ intro h,
exact ⟨(iff_aux Φ _ _ _ _ _ _ h.nonempty_is_equivalence.some)⟩, },
{ intro h,
exact ⟨⟨(iff_aux Φ _ _ _ _ _ _ h.some)⟩⟩, },
end
lemma iff_is_equivalence_lift_functor :
Φ.is_localization_equivalence ↔ nonempty (is_equivalence (Φ.lift_functor L₁ L₂)) :=
is_localization_equivalence.iff Φ L₁ L₂ (Φ.lift_functor L₁ L₂) (Φ.fac_functor L₁ L₂)
lemma iff_is_localization :
Φ.is_localization_equivalence ↔ (Φ.functor ⋙ L₂).is_localization W :=
begin
split,
{ intro h,
rw iff_is_equivalence_lift_functor Φ W.Q L₂ at h,
letI := h.some,
exact functor.is_localization.of_equivalence W.Q W (Φ.functor ⋙ L₂)
(functor.as_equivalence (Φ.lift_functor W.Q L₂)) (Φ.fac_functor _ _), },
{ introI,
rw iff_is_equivalence_lift_functor Φ (Φ.functor ⋙ L₂) L₂,
exact ⟨is_equivalence.of_iso (localization.lifting.uniq (Φ.functor ⋙ L₂) W
(Φ.functor ⋙ L₂) (𝟭 _) _) infer_instance⟩, },
end
instance [hΦ : Φ.is_localization_equivalence] :
is_equivalence (Φ.lift_functor L₁ L₂) :=
((iff_is_equivalence_lift_functor Φ L₁ L₂).mp hΦ).some
instance is_localization_of_is_localization_equivalence [hΦ : Φ.is_localization_equivalence] :
(Φ.functor ⋙ L₂).is_localization W :=
by simpa only [← iff_is_localization Φ L₂] using hΦ
instance op_is_localization_equivalence [hΦ : Φ.is_localization_equivalence] :
Φ.op.is_localization_equivalence :=
begin
rw iff_is_localization Φ W'.Q at hΦ,
rw iff_is_localization Φ.op W'.Q.op,
haveI := hΦ,
change (Φ.functor ⋙ W'.Q).op.is_localization W.op,
apply_instance,
end
end is_localization_equivalence
end localizor_morphism
end
namespace right_derivability_structure
variables {C₀ C H : Type*} [category C] [category C₀] [category H]
{W₀ : morphism_property C₀}
{W : morphism_property C} (Φ : localizor_morphism W₀ W)
[localizor_morphism.is_localization_equivalence Φ]
[morphism_property.multiplicative W₀] [morphism_property.multiplicative W]
structure basic :=
(right_resolution_connected : ∀ (Y : C), is_connected' (Φ.right_resolution Y))
(nonempty_arrow_right_resolution :
∀ ⦃Y₁ Y₂ : C⦄ (f : Y₁ ⟶ Y₂), ∃ (X₁ : Φ.right_resolution Y₁) (X₂ : Φ.right_resolution Y₂)
(f' : X₁.right.obj ⟶ X₂.right.obj), X₁.hom.1 ≫ Φ.functor.map f' = f ≫ X₂.hom.1)
namespace basic
variables {Φ} (β : basic Φ) (L : C ⥤ H) [L.is_localization W]
{D : Type*} [category D]
def some_right_resolution (Y : C) : Φ.right_resolution Y :=
(β.right_resolution_connected Y).is_nonempty.some
variables {F : C ⥤ D} (hF : W₀.is_inverted_by (Φ.functor ⋙ F))
namespace existence_derived_functor
include β hF
def RF : H ⥤ D :=
localization.lift (Φ.functor ⋙ F) hF (Φ.functor ⋙ L)
def ε : (Φ.functor ⋙ L) ⋙ RF β L hF ≅ Φ.functor ⋙ F :=
begin
letI : localization.lifting (Φ.functor ⋙ L) W₀ (Φ.functor ⋙ F) (RF β L hF) :=
localization.lifting_lift _ _ _,
refine localization.lifting.iso (Φ.functor ⋙ L) W₀ _ _,
end
def α' (X : C) : (functor.const (Φ.right_resolution X)).obj (F.obj X) ⟶
(functor.const (Φ.right_resolution X)).obj ((RF β L hF).obj (L.obj X)) :=
{ app := λ X₀, F.map X₀.hom.1 ≫ (ε β L hF).inv.app _ ≫
(RF β L hF).map (localization.iso_of_hom L W _ X₀.hom.2).inv,
naturality' := λ X₀ X₀' φ, begin
dsimp,
simp only [functor.map_inv, id_comp, comp_id],
have eq₁ := φ.w,
have eq₂ := (ε β L hF).inv.naturality φ.right.1,
dsimp at eq₁ eq₂,
rw id_comp at eq₁,
rw eq₁,
dsimp,
rw [functor.map_comp, assoc, reassoc_of eq₂],
congr' 2,
simp only [functor.map_comp],
erw is_iso.comp_inv_eq, -- should be tidied
rw is_iso.inv_hom_id_assoc,
end, }
def α_app (X : C) : F.obj X ⟶ (RF β L hF).obj (L.obj X) :=
(α' β L hF X).app (β.some_right_resolution X)
lemma α_app_eq {X : C} (X₀ : Φ.right_resolution X) :
(α_app β L hF) X = (α' β L hF X).app X₀ :=
begin
haveI := β.right_resolution_connected X,
apply nat_trans_from_is_preconnected',
end
@[simps]
def α : F ⟶ L ⋙ RF β L hF :=
{ app := λ X, (α_app β L hF) X,
naturality' := λ Y₁ Y₂ f, begin
obtain ⟨X₁, X₂, f', fac⟩ := β.nonempty_arrow_right_resolution f,
rw [α_app_eq β L hF X₁, α_app_eq β L hF X₂],
dsimp [α'],
have eq₁ := F.congr_map fac,
have eq₂ := (ε β L hF).inv.naturality f',
simp only [functor.map_comp] at eq₁,
dsimp at eq₂,
simp only [assoc, ← reassoc_of eq₁, reassoc_of eq₂, ← functor.map_comp],
congr' 3,
rw [is_iso.eq_inv_comp, ← L.map_comp_assoc, fac, L.map_comp, assoc,
is_iso.hom_inv_id, comp_id],
end, }
instance (X₀ : C₀) : is_iso ((α β L hF).app (Φ.functor.obj X₀)) :=
begin
let X₀' := localizor_morphism.right_resolution.mk Φ X₀ (𝟙 _)
(morphism_property.contains_identities.id W _),
dsimp [α],
rw α_app_eq β L hF (localizor_morphism.right_resolution.mk Φ X₀ (𝟙 _)
(morphism_property.contains_identities.id W _)),
dsimp [α'],
simp only [functor.map_id, is_iso.inv_id, comp_id, id_comp],
apply_instance,
end
@[simps]
def RF' : structured_arrow F ((whiskering_left C H D).obj L) :=
structured_arrow.mk (α β L hF)
instance is_iso_RF'_hom_app (X₀ : C₀) :
is_iso ((RF' β L hF).hom.app (Φ.functor.obj X₀)) :=
(infer_instance : is_iso ((α β L hF).app (Φ.functor.obj X₀)))
instance (G : structured_arrow F ((whiskering_left C H D).obj L)) :
subsingleton (RF' β L hF ⟶ G) :=
⟨λ φ₁ φ₂, begin
apply structured_arrow.ext,
apply localization.nat_trans_ext (Φ.functor ⋙ L) W₀,
intro X₀,
have eq₁ := congr_app φ₁.w (Φ.functor.obj X₀),
have eq₂ := congr_app φ₂.w (Φ.functor.obj X₀),
dsimp at eq₁ eq₂ ⊢,
rw [id_comp] at eq₁ eq₂,
rw [← cancel_epi ((α β L hF).app (Φ.functor.obj X₀))],
dsimp,
rw [← eq₁, eq₂],
end⟩
def RF_τ' (G : structured_arrow F ((whiskering_left C H D).obj L)) :
RF β L hF ⟶ G.right :=
localization.lift_nat_trans (Φ.functor ⋙ L) W₀ _ _ _ _
((ε β L hF).hom ≫ whisker_left _ G.hom ≫ (functor.associator _ _ _).inv)
@[simp]
lemma RF_τ'_app_eq (G : structured_arrow F ((whiskering_left C H D).obj L)) (X₀ : C₀) :
(RF_τ' β L hF G).app (L.obj (Φ.functor.obj X₀)) =
(ε β L hF).hom.app X₀ ≫ G.hom.app (Φ.functor.obj X₀) :=
begin
dsimp [RF_τ'],
erw localization.lift_nat_trans_app,
simp only [localization.lifting.of_comp_iso, iso.refl_hom, nat_trans.id_app,
nat_trans.comp_app, whisker_left_app, functor.associator_inv_app,
comp_id, iso.refl_inv, assoc],
erw id_comp,
end
def RF_τ (G : structured_arrow F ((whiskering_left C H D).obj L)) :
RF' β L hF ⟶ G :=
begin
refine structured_arrow.hom_mk (RF_τ' β L hF G) _,
ext X,
let X₀ := β.some_right_resolution X,
have eq := (RF_τ' β L hF G).naturality (L.map X₀.hom.f),
haveI : is_iso (L.map X₀.hom.f) := localization.inverts L W _ X₀.hom.hf,
dsimp at ⊢ eq,
simp only [← cancel_mono (G.right.map (L.map X₀.hom.f)), assoc, ← eq, RF_τ'_app_eq,
α_app_eq β L hF X₀, α', ← functor.map_comp_assoc],
erw [is_iso.inv_hom_id, functor.map_id, id_comp, iso.inv_hom_id_app_assoc, G.hom.naturality],
refl,
end
instance (G : structured_arrow F ((whiskering_left C H D).obj L)) :
unique (RF' β L hF ⟶ G) :=
unique_of_subsingleton (RF_τ β L hF G)
lemma is_initial_RF' : limits.is_initial (RF' β L hF) := limits.is_initial.of_unique _
instance RF_is_right_derived_functor :
(RF β L hF).is_right_derived_functor (α _ _ _) :=
⟨⟨is_initial_RF' β L hF⟩⟩
end existence_derived_functor
variable (F)
open existence_derived_functor
/-
The following lemma is a consequence of Lemma 6.5 of
_Structures de dérivabilité_ by Bruno Kahn, Georges Maltsiniotis,
Advances in mathematics 218 (2018).
-/
lemma existence_derived_functor : F.has_right_derived_functor W :=
functor.is_right_derived_functor.has_right_derived_functor F (RF β W.Q hF) W.Q (α _ _ _) W
include β hF
lemma is_iso_app (F' : H ⥤ D) (α' : F ⟶ L ⋙ F') [F'.is_right_derived_functor α'] (X₀ : C₀) :
is_iso (α'.app (Φ.functor.obj X₀)) :=
begin
have h := nat_trans.right_derived_app (𝟙 F) (α β L hF) α' (Φ.functor.obj X₀),
rw [nat_trans.id_app, id_comp] at h,
rw ← h,
apply_instance,
end
end basic
end right_derivability_structure
namespace functor
variables {C D H : Type*} [category C] [category D] [category H]
{F : C ⥤ D} (LF LF' : H ⥤ D) (e : LF ≅ LF') {L : C ⥤ H} (α : L ⋙ LF ⟶ F) (α' : L ⋙ LF' ⟶ F)
(W : morphism_property C) [L.is_localization W]
class is_left_derived_functor : Prop :=
(is_terminal [] : nonempty (limits.is_terminal (costructured_arrow.mk α :
costructured_arrow ((whiskering_left C H D).obj L) F)))
variables {LF LF'}
lemma is_left_derived_functor.of_iso [LF.is_left_derived_functor α]
(eq : α' = whisker_left L e.inv ≫ α) : LF'.is_left_derived_functor α' :=
⟨⟨limits.is_terminal.of_iso (is_left_derived_functor.is_terminal α).some
(costructured_arrow.iso_mk e (by { dsimp, rw eq, ext, simp only [nat_trans.comp_app,
whisker_left_app, iso.hom_inv_id_app_assoc], }))⟩⟩
variables (LF LF')
def is_left_derived_functor_from [LF.is_left_derived_functor α] (G : H ⥤ D) (β : L ⋙ G ⟶ F) :
G ⟶ LF :=
(costructured_arrow.proj _ _).map
((functor.is_left_derived_functor.is_terminal α).some.from (costructured_arrow.mk β))
@[simp, reassoc]
lemma is_left_derived_functor_from_comm [LF.is_left_derived_functor α] (G : H ⥤ D)
(β : L ⋙ G ⟶ F) :
whisker_left L (LF.is_left_derived_functor_from α G β) ≫ α = β :=
costructured_arrow.w ((functor.is_left_derived_functor.is_terminal α).some.from
(costructured_arrow.mk β))
@[simp, reassoc]
lemma is_left_derived_functor_from_comm_app [LF.is_left_derived_functor α] (G : H ⥤ D)
(β : L ⋙ G ⟶ F) (X : C) :
(LF.is_left_derived_functor_from α G β).app (L.obj X) ≫ α.app X = β.app X :=
congr_app (LF.is_left_derived_functor_from_comm α G β) X
lemma is_left_derived_functor_from_ext [LF.is_left_derived_functor α] {G : H ⥤ D}
(γ₁ γ₂ : G ⟶ LF) (hγ : whisker_left L γ₁ ≫ α = whisker_left L γ₂ ≫ α) : γ₁ = γ₂ :=
begin
let F' : costructured_arrow ((whiskering_left C H D).obj L) F :=
costructured_arrow.mk α,
let G' : costructured_arrow ((whiskering_left C H D).obj L) F :=
@costructured_arrow.mk _ _ _ _ F G ((whiskering_left C H D).obj L) (whisker_left L γ₁ ≫ α),
let δ₁ : G' ⟶ F' := costructured_arrow.hom_mk γ₁ rfl,
let δ₂ : G' ⟶ F' := costructured_arrow.hom_mk γ₂ hγ.symm,
exact (costructured_arrow.proj _ _).congr_map
((functor.is_left_derived_functor.is_terminal α).some.hom_ext δ₁ δ₂),
end
end functor
namespace nat_trans
variables {C D H : Type*} [category C] [category D] [category H]
{F G G' : C ⥤ D} (τ : F ⟶ G) (τ' : G ⟶ G') {LF LG LG' : H ⥤ D} {L : C ⥤ H}
(α : L ⋙ LF ⟶ F) (β : L ⋙ LG ⟶ G) (γ : L ⋙ LG' ⟶ G')
def left_derived [LG.is_left_derived_functor β] : LF ⟶ LG :=
LG.is_left_derived_functor_from β LF (α ≫ τ)
@[simp]
lemma left_derived_comp [LG.is_left_derived_functor β]
[LG'.is_left_derived_functor γ] :
nat_trans.left_derived τ α β ≫ nat_trans.left_derived τ' β γ =
nat_trans.left_derived (τ ≫ τ') α γ :=
begin
dsimp only [left_derived],
apply LG'.is_left_derived_functor_from_ext γ,
simp only [whisker_left_comp, assoc, functor.is_left_derived_functor_from_comm,
functor.is_left_derived_functor_from_comm_assoc],
end
@[simp]
lemma left_derived_id [LF.is_left_derived_functor α] :
nat_trans.left_derived (𝟙 F) α α = 𝟙 LF :=
begin
dsimp only [left_derived],
apply LF.is_left_derived_functor_from_ext α,
simp only [comp_id, functor.is_left_derived_functor_from_comm, whisker_left_id', id_comp],
end
@[simp, reassoc]
lemma left_derived_app [LG.is_left_derived_functor β] (X : C) :
(left_derived τ α β).app (L.obj X) ≫ β.app X = α.app X ≫ τ.app X :=
begin
dsimp only [left_derived],
simp only [functor.is_left_derived_functor_from_comm_app, comp_app],
end
end nat_trans
namespace nat_iso
variables {C D H : Type*} [category C] [category D] [category H]
{F G : C ⥤ D} (e : F ≅ G) {LF LG : H ⥤ D} {L : C ⥤ H}
(α : L ⋙ LF ⟶ F) (β : L ⋙ LG ⟶ G)
@[simps]
def left_derived [LF.is_left_derived_functor α] [LG.is_left_derived_functor β] :
LF ≅ LG :=
{ hom := nat_trans.left_derived e.hom α β,
inv := nat_trans.left_derived e.inv β α, }
instance [LF.is_left_derived_functor α] [LG.is_left_derived_functor β] (τ : F ⟶ G)
[is_iso τ] : is_iso (nat_trans.left_derived τ α β) :=
is_iso.of_iso (nat_iso.left_derived (as_iso τ) α β)
end nat_iso
namespace functor
variables {C D H : Type*} [category C] [category D] [category H]
(F : C ⥤ D) (LF : H ⥤ D) (L : C ⥤ H) (α : L ⋙ LF ⟶ F)
(W : morphism_property C) [L.is_localization W]
class has_left_derived_functor : Prop :=
(has_terminal' : limits.has_terminal (costructured_arrow ((whiskering_left C _ D).obj W.Q) F))
namespace costructured_arrow_equivalence_op
@[simps]
def functor : (costructured_arrow ((whiskering_left C H D).obj L) F) ⥤
(structured_arrow F.op ((whiskering_left Cᵒᵖ Hᵒᵖ Dᵒᵖ).obj L.op))ᵒᵖ :=
{ obj := λ X, opposite.op (structured_arrow.mk
(show F.op ⟶ ((whiskering_left Cᵒᵖ Hᵒᵖ Dᵒᵖ).obj L.op).obj X.left.op,
by exact (functor.op_hom _ _).map X.hom.op)),
map := λ X₁ X₂ f, quiver.hom.op
(structured_arrow.hom_mk ((functor.op_hom H D).map (quiver.hom.op f.left))
(by { rw ← costructured_arrow.w f, refl, })), }
@[simps]
def inverse : (structured_arrow F.op ((whiskering_left Cᵒᵖ Hᵒᵖ Dᵒᵖ).obj L.op))ᵒᵖ ⥤
(costructured_arrow ((whiskering_left C H D).obj L) F) :=
{ obj := λ X, costructured_arrow.mk
(show ((whiskering_left C H D).obj L).obj X.unop.right.unop ⟶ F,
by exact ((functor.op_inv C D).map X.unop.hom).unop ≫ F.op_unop_iso.hom),
map := λ X₁ X₂ f, costructured_arrow.hom_mk (((functor.op_inv _ _).map f.unop.right).unop)
(by { rw ← structured_arrow.w f.unop, dsimp, ext, tidy, }), }
@[simps]
def unit_iso : 𝟭 _ ≅ functor F L ⋙ inverse F L :=
nat_iso.of_components (λ X, costructured_arrow.iso_mk (functor.op_unop_iso X.left).symm
(by { ext, dsimp, tidy, })) (by tidy)
@[simps]
def counit_iso : inverse F L ⋙ functor F L ≅ 𝟭 _ :=
nat_iso.of_components (λ X, begin
change opposite.op (opposite.unop _) ≅ opposite.op (opposite.unop _),
apply iso.op,
refine structured_arrow.iso_mk (functor.unop_op_iso _).symm _,
ext, dsimp, tidy,
end) (λ X Y f, quiver.hom.unop_inj (by { dsimp, tidy, }))
end costructured_arrow_equivalence_op
def costructured_arrow_equivalence_op :
(costructured_arrow ((whiskering_left C H D).obj L) F) ≌
(structured_arrow F.op ((whiskering_left Cᵒᵖ Hᵒᵖ Dᵒᵖ).obj L.op))ᵒᵖ :=
{ functor := costructured_arrow_equivalence_op.functor _ _,
inverse := costructured_arrow_equivalence_op.inverse _ _,
unit_iso := costructured_arrow_equivalence_op.unit_iso _ _,
counit_iso := costructured_arrow_equivalence_op.counit_iso _ _,
functor_unit_iso_comp' := λ X, quiver.hom.unop_inj begin
dsimp [structured_arrow.iso_mk, structured_arrow.hom_mk, comma.iso_mk],
tidy,
end, }
variable (L)
lemma has_left_derived_functor_iff_op :
has_left_derived_functor F W ↔ has_right_derived_functor F.op W.op :=
begin
have h : F.has_left_derived_functor W ↔
limits.has_terminal (costructured_arrow ((whiskering_left C _ D).obj W.Q) F) :=
⟨λ h, h.1, λ h, ⟨h⟩⟩,
rw [h, has_right_derived_functor_iff F.op W.Q.op W.op],
have e := costructured_arrow_equivalence_op F W.Q,
split,
{ introI,
haveI : limits.has_terminal
(structured_arrow F.op ((whiskering_left Cᵒᵖ (W.localization)ᵒᵖ Dᵒᵖ).obj W.Q.op))ᵒᵖ :=
adjunction.has_limits_of_shape_of_equivalence e.inverse,
exact limits.has_initial_of_has_terminal_op, },
{ introI,
exact adjunction.has_limits_of_shape_of_equivalence e.functor, },
end
lemma has_left_derived_functor_iff :
has_left_derived_functor F W ↔
limits.has_terminal (costructured_arrow ((whiskering_left C H D).obj L) F) :=
begin
rw [has_left_derived_functor_iff_op, has_right_derived_functor_iff F.op L.op W.op],
have e := costructured_arrow_equivalence_op F L,
split,
{ introI,
exact adjunction.has_limits_of_shape_of_equivalence e.functor, },
{ introI,
haveI : limits.has_terminal
(structured_arrow F.op ((whiskering_left Cᵒᵖ Hᵒᵖ Dᵒᵖ).obj L.op))ᵒᵖ :=
adjunction.has_limits_of_shape_of_equivalence e.inverse,
exact limits.has_initial_of_has_terminal_op, },
end
lemma is_left_derived_functor.has_left_derived_functor [LF.is_left_derived_functor α] :
F.has_left_derived_functor W :=
begin
rw F.has_left_derived_functor_iff L W,
exact limits.is_terminal.has_terminal (is_left_derived_functor.is_terminal α).some,
end
variables {F L LF F α}
lemma is_left_derived_functor.op (hα : LF.is_left_derived_functor α) :
@is_right_derived_functor _ _ _ _ _ _ F.op LF.op L.op ((functor.op_hom _ _).map α.op) :=
is_right_derived_functor.mk
(nonempty.intro (limits.initial_unop_of_terminal
(limits.is_terminal.of_equivalence (costructured_arrow_equivalence_op F L)
hα.is_terminal.some)))
variables (F L LF F α)
lemma has_left_derived_functor.has_terminal [has_left_derived_functor F W] :
limits.has_terminal (costructured_arrow ((whiskering_left C H D).obj L) F) :=
(has_left_derived_functor_iff F L W).1 infer_instance
def has_left_derived_functor.initial [has_left_derived_functor F W] :
(costructured_arrow ((whiskering_left C H D).obj L) F) :=
begin
haveI := has_left_derived_functor.has_terminal F L W,
exact limits.terminal _,
end
def left_derived_functor [has_left_derived_functor F W] : H ⥤ D :=
(has_left_derived_functor.initial F L W).left
def left_derived_functor_α [has_left_derived_functor F W] :
L ⋙ F.left_derived_functor L W ⟶ F :=
(has_left_derived_functor.initial F L W).hom
instance left_derived_functor_is_left_derived_functor [has_left_derived_functor F W] :
(F.left_derived_functor L W).is_left_derived_functor (F.left_derived_functor_α L W) :=
⟨⟨begin
haveI := has_left_derived_functor.has_terminal F L W,
exact limits.is_terminal.of_iso limits.terminal_is_terminal
(costructured_arrow.iso_mk (iso.refl _) (by tidy)),
end⟩⟩
end functor
namespace left_derivability_structure
variables {C₀ C H : Type*} [category C] [category C₀] [category H]
{W₀ : morphism_property C₀}
{W : morphism_property C} (L : C ⥤ H) [L.is_localization W] (Φ : localizor_morphism W₀ W)
[localizor_morphism.is_localization_equivalence Φ]
[morphism_property.multiplicative W₀] [morphism_property.multiplicative W]
structure basic :=
(left_resolution_connected : ∀ (Y : C), is_connected' (Φ.left_resolution Y))
(nonempty_arrow_left_resolution :
∀ ⦃Y₁ Y₂ : C⦄ (f : Y₁ ⟶ Y₂), ∃ (X₁ : Φ.left_resolution Y₁) (X₂ : Φ.left_resolution Y₂)
(f' : X₁.left.obj ⟶ X₂.left.obj), Φ.functor.map f' ≫ X₂.hom.1 = X₁.hom.1 ≫ f)
namespace basic
variables {L Φ}
def op (β : basic Φ) : right_derivability_structure.basic Φ.op :=
{ right_resolution_connected := λ Y, (is_connected'.of_equivalence
(localizor_morphism.left_resolution.op_equivalence Φ (opposite.unop Y))
(β.left_resolution_connected (opposite.unop Y))).unop,
nonempty_arrow_right_resolution := λ Y₁ Y₂ f, begin
obtain ⟨X₁, X₂, f', fac⟩ := β.nonempty_arrow_left_resolution f.unop,
exact ⟨X₂.op, X₁.op, f'.op, quiver.hom.unop_inj fac⟩,
end, }
variables (β : basic Φ) {D : Type*} [category D] (F : C ⥤ D)
(hF : W₀.is_inverted_by (Φ.functor ⋙ F))
include β hF
lemma existence_derived_functor : F.has_left_derived_functor W :=
by simpa only [functor.has_left_derived_functor_iff_op]
using β.op.existence_derived_functor F.op hF.op
lemma is_iso_app (F' : H ⥤ D) (α' : L ⋙ F' ⟶ F) [hα' : F'.is_left_derived_functor α'] (X₀ : C₀) :
is_iso (α'.app (Φ.functor.obj X₀)) :=
begin
suffices : is_iso (α'.app (Φ.functor.obj X₀)).op,
{ haveI := this,
exact is_iso.of_iso ((as_iso (α'.app (Φ.functor.obj X₀)).op).unop), },
let α'' : F.op ⟶ L.op ⋙ F'.op := (functor.op_hom C D).map α'.op,
haveI : F'.op.is_right_derived_functor α'' := hα'.op,
exact β.op.is_iso_app L.op F.op hF.op F'.op α'' (opposite.op X₀),
end
end basic
end left_derivability_structure
end category_theory
|
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright 2011-2012 Vicente J. Botet Escriba
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/thread for documentation.
//
//////////////////////////////////////////////////////////////////////////////
#ifndef BOOST_THREAD_DETAIL_MEMORY_HPP
#define BOOST_THREAD_DETAIL_MEMORY_HPP
#include <boost/container/allocator_traits.hpp>
#include <boost/container/scoped_allocator.hpp>
#include <boost/config.hpp>
namespace boost
{
namespace thread_detail
{
template <class _Alloc>
class allocator_destructor
{
typedef container::allocator_traits<_Alloc> alloc_traits;
public:
typedef typename alloc_traits::pointer pointer;
typedef typename alloc_traits::size_type size_type;
private:
_Alloc& alloc_;
size_type s_;
public:
allocator_destructor(_Alloc& a, size_type s)BOOST_NOEXCEPT
: alloc_(a), s_(s)
{}
void operator()(pointer p)BOOST_NOEXCEPT
{
alloc_traits::deallocate(alloc_, p, s_);
}
};
} //namespace thread_detail
typedef container::allocator_arg_t allocator_arg_t;
BOOST_CONSTEXPR allocator_arg_t allocator_arg = {};
template <class T, class Alloc>
struct uses_allocator: public container::uses_allocator<T, Alloc>
{
};
} // namespace boost
#endif // BOOST_THREAD_DETAIL_MEMORY_HPP
|
State Before: 𝓕 : Type u_1
𝕜 : Type ?u.369462
α : Type ?u.369465
ι : Type ?u.369468
κ : Type ?u.369471
E : Type u_2
F : Type u_3
G : Type ?u.369480
inst✝³ : SeminormedGroup E
inst✝² : SeminormedGroup F
inst✝¹ : SeminormedGroup G
s : Set E
a a₁ a₂ b b₁ b₂ : E
r r₁ r₂ : ℝ
inst✝ : MonoidHomClass 𝓕 E F
f : 𝓕
K : ℝ≥0
h : ∀ (x : E), ‖x‖ ≤ ↑K * ‖↑f x‖
x y : E
⊢ dist x y ≤ ↑K * dist (↑f x) (↑f y) State After: no goals Tactic: simpa only [dist_eq_norm_div, map_div] using h (x / y) |
(*
Copyright (C) 2019 Susi Lehtola
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: gga_exc *)
(* prefix:
gga_c_chachiyo_params *params;
assert(p->params != NULL);
params = (gga_c_chachiyo_params * )(p->params);
*)
(* Functional is based on Chachiyo correlation with modified spin scaling *)
$include "lda_c_chachiyo_mod.mpl"
(* Reduced gradient parameter *)
cha_t := (rs, xt) -> (Pi/3)^(1/6) / 4 * n_total(rs)^(1/6) * xt:
(* The full functional that agrees with the given reference values is *)
f_chachiyo_gga := (rs, z, xt, xs0, xs1) -> f_chachiyo(rs, z) * (1 + cha_t(rs, xt)^2)^(params_a_h / f_chachiyo(rs, z)):
f := (rs, z, xt, xs0, xs1) -> f_chachiyo_gga(rs, z, xt, xs0, xs1):
|
using BinaryBuilder, Pkg
name = "QuantumEspresso"
version = v"7.0.0"
sources = [
ArchiveSource("https://gitlab.com/QEF/q-e/-/archive/qe-7.0/q-e-qe-7.0.tar.gz",
"85beceb1aaa1678a49e774c085866d4612d9d64108e0ac49b23152c8622880ee"),
DirectorySource("bundled"),
]
# Bash recipe for building across all platforms
script = raw"""
cd q-e-qe-*
atomic_patch -p1 ../patches/0000-pass-host-to-configure.patch
export BLAS_LIBS="-L${libdir} -lopenblas"
export LAPACK_LIBS="-L${libdir} -lopenblas"
export FFTW_INCLUDE=${includedir}
export FFT_LIBS="-L${libdir} -lfftw3"
export FC=mpif90
export CC=mpicc
export LD=
flags=(--enable-parallel=yes)
if [ "${nbits}" == 64 ]; then
# Enable Libxc support only on 64-bit platforms
atomic_patch -p1 ../patches/0001-libxc-prefix.patch
flags+=(--with-libxc=yes --with-libxc-prefix=${prefix})
fi
if [[ "${target}" == powerpc64le-linux-* \
|| "${bb_full_target}" == armv6l-linux-* \
|| "${target}" == aarch64-apple-darwin* ]]; then
# No scalapack binary available on these platforms
flags+=(--with-scalapack=no)
else
export SCALAPACK_LIBS="-L${libdir} -lscalapack"
flags+=(--with-scalapack=yes)
fi
./configure --prefix=${prefix} --build=${MACHTYPE} --host=${target} ${flags[@]}
make all "${make_args[@]}" -j $nproc
make install
# Manually make all binary executables...executable. Sigh
chmod +x "${bindir}"/*
"""
# These are the platforms we will build for by default, unless further
# platforms are passed in on the command line
platforms = expand_gfortran_versions(supported_platforms())
filter!(!Sys.iswindows, platforms)
# On aarch64-apple-darwin we get
# f951: internal compiler error: in doloop_contained_procedure_code, at fortran/frontend-passes.c:2464
filter!(p -> !(Sys.isapple(p) && arch(p) == "aarch64"), platforms)
# The products that we will ensure are always built
products = [
ExecutableProduct("pw.x", :pwscf),
ExecutableProduct("bands.x", :bands),
ExecutableProduct("plotband.x", :plotband),
ExecutableProduct("plotrho.x", :plotrho),
ExecutableProduct("dos.x", :density_of_states),
ExecutableProduct("ibrav2cell.x", :ibrav_to_cell),
ExecutableProduct("kpoints.x", :kpoints),
ExecutableProduct("cp.x", :carparinello),
ExecutableProduct("ph.x", :phonon),
ExecutableProduct("q2r.x", :reciprocal_to_real),
ExecutableProduct("matdyn.x", :dynamical_matrix_generic),
ExecutableProduct("dynmat.x", :dynamical_matrix_gamma),
ExecutableProduct("hp.x", :hubbardparams),
ExecutableProduct("neb.x", :nudged_elastic_band),
]
# Dependencies that must be installed before this package can be built
dependencies = [
Dependency(PackageSpec(name="CompilerSupportLibraries_jll", uuid="e66e0078-7015-5450-92f7-15fbd957f2ae")),
Dependency("FFTW_jll"),
Dependency("Libxc_jll"),
Dependency("MPICH_jll"),
Dependency(PackageSpec(name="OpenBLAS32_jll", uuid="656ef2d0-ae68-5445-9ca0-591084a874a2")),
Dependency("SCALAPACK_jll"),
]
# Build the tarballs, and possibly a `build.jl` as well
build_tarballs(ARGS, name, version, sources, script, platforms, products, dependencies;
preferred_gcc_version=v"6", julia_compat="1.6")
|
Although now all in a ruinous state and not retaining their original appearance , at the time of construction the Medway Megaliths would have been some of the largest and most visually imposing Early Neolithic funerary monuments in Britain . Grouped along the River Medway as it cuts through the North Downs , they constitute the most south @-@ easterly group of megalithic monuments in the British Isles , and the only megalithic group in eastern England . Archaeologists Brian Philp and Mike <unk> deemed the Medway Megaliths to be " some of the most interesting and well known " archaeological sites in Kent , while archaeologist Paul Ashbee described them as " the most grandiose and impressive structures of their kind in southern England " .
|
open import FRP.JS.String using ( String ; _≟_ ; _<_ )
open import FRP.JS.Bool using ( Bool ; true ; false ; _∧_ )
open import FRP.JS.True using ( True )
open import FRP.JS.Maybe using ( Maybe ; just ; nothing )
module FRP.JS.Keys where
infixr 4 _∷_
data IKeys : Set where
[] : IKeys
_∷_ : (k : String) → (ks : IKeys) → IKeys
{-# COMPILED_JS IKeys function(x,v) {
if ((x.array.length) <= (x.offset)) { return v["[]"](); }
else { return v["_∷_"](x.key(),x.tail()); }
} #-}
{-# COMPILED_JS [] require("agda.keys").iempty #-}
{-# COMPILED_JS _∷_ function(k) { return function(ks) { return ks.cons(k); }; } #-}
head : IKeys → Maybe String
head [] = nothing
head (k ∷ ks) = just k
{-# COMPILED_JS head function(ks) { return require("agda.box").box(ks.key()); } #-}
_<?_ : String → Maybe String → Bool
k <? nothing = true
k <? just l = k < l
sorted : IKeys → Bool
sorted [] = true
sorted (k ∷ ks) = (k <? head ks) ∧ (sorted ks)
record Keys : Set where
constructor keys
field
ikeys : IKeys
{ikeys✓} : True (sorted ikeys)
open Keys public
{-# COMPILED_JS Keys function(x,v) { return v.keys(require("agda.keys").iarray(x),null); } #-}
{-# COMPILED_JS keys function(ks) { return function() { return ks.keys(); }; } #-}
{-# COMPILED_JS ikeys function(ks) { return require("agda.keys").iarray(ks); } #-}
{-# COMPILED_JS ikeys✓ function(ks) { return null; } #-}
_∈i_ : String → IKeys → Bool
l ∈i [] = false
l ∈i (k ∷ ks)
with k ≟ l
... | true = true
... | false
with k < l
... | true = l ∈i ks
... | false = false
_∈_ : String → Keys → Bool
l ∈ keys ks = l ∈i ks
|
## Performance Indicator
It is fundamental for any algorithm to measure the performance. In a multi-objective scenario, we can not simply calculate the distance to the true global optimum, but must consider a set of solutions. Moreover, sometimes the optimum is not even known and other techniques must be used.
First, let us consider a scenario, where the Pareto-front is known:
```python
import numpy as np
from pymoo.factory import get_problem
from pymoo.visualization.scatter import Scatter
# The pareto front of a scaled zdt1 problem
pf = get_problem("zdt1").pareto_front()
# The result found by an algorithm
A = pf[::10] * 1.1
# plot the result
Scatter(legend=True).add(pf, label="Pareto-front").add(A, label="Result").show()
```
### Generational Distance (GD)
The GD performance indicator <cite data-cite="gd"></cite> measure the distance from solution to the Pareto-front. Let us assume the points found by our algorithm are the objective vector set $A=\{a_1, a_2, \ldots, a_{|A|}\}$ and the reference points set (Pareto-front) is $Z=\{z_1, z_2, \ldots, z_{|Z|}\}$. Then,
\begin{align}
\begin{split}
\text{GD}(A) & = & \; \frac{1}{|A|} \; \bigg( \sum_{i=1}^{|A|} d_i^p \bigg)^{1/p}\\[2mm]
\end{split}
\end{align}
where $d_i$ represents the euclidean distance (p=2) from $a_i$ to its nearest reference point in $Z$. Basically, this results in the average distance from any point $A$ to the closest point in the Pareto-front.
```python
from pymoo.factory import get_performance_indicator
gd = get_performance_indicator("gd", pf)
print("GD", gd.calc(A))
```
GD 0.05497689467314528
### Generational Distance Plus (GD+)
Ishibushi et. al. proposed in <cite data-cite="igd_plus"></cite> GD+:
\begin{align}
\begin{split}
\text{GD}^+(A) & = & \; \frac{1}{|A|} \; \bigg( \sum_{i=1}^{|A|} {d_i^{+}}^2 \bigg)^{1/2}\\[2mm]
\end{split}
\end{align}
where for minimization $d_i^{+} = max \{ a_i - z_i, 0\}$ represents the modified distance from $a_i$ to its nearest reference point in $Z$ with the corresponding value $z_i$.
```python
from pymoo.factory import get_performance_indicator
gd_plus = get_performance_indicator("gd+", pf)
print("GD+", gd_plus.calc(A))
```
GD+ 0.05497689467314528
### Inverted Generational Distance (IGD)
The IGD performance indicator <cite data-cite="igd"></cite> inverts the generational distance and measures the distance from any point in $Z$ to the closest point in $A$.
\begin{align}
\begin{split}
\text{IGD}(A) & = & \; \frac{1}{|Z|} \; \bigg( \sum_{i=1}^{|Z|} \hat{d_i}^p \bigg)^{1/p}\\[2mm]
\end{split}
\end{align}
where $\hat{d_i}$ represents the euclidean distance (p=2) from $z_i$ to its nearest reference point in $A$.
```python
from pymoo.factory import get_performance_indicator
igd = get_performance_indicator("igd", pf)
print("IGD", igd.calc(A))
```
IGD 0.06690908300327662
### Inverted Generational Distance Plus (IGD+)
In <cite data-cite="igd_plus"></cite> Ishibushi et. al. proposed IGD+ which is weakly Pareto compliant wheres the original IGD is not.
\begin{align}
\begin{split}
\text{IGD}^{+}(A) & = & \; \frac{1}{|Z|} \; \bigg( \sum_{i=1}^{|Z|} {d_i^{+}}^2 \bigg)^{1/2}\\[2mm]
\end{split}
\end{align}
where for minimization $d_i^{+} = max \{ a_i - z_i, 0\}$ represents the modified distance from $z_i$ to the closest solution in $A$ with the corresponding value $a_i$.
```python
from pymoo.factory import get_performance_indicator
igd_plus = get_performance_indicator("igd+", pf)
print("IGD+", igd_plus.calc(A))
```
IGD+ 0.06466828842775944
### Hypervolume
For all performance indicators shown so far a target set needs to be known. For Hypervolume only a reference point needs to be provided. First, I would like to mention that we are using the Hypervolume implementation from [DEAP](https://deap.readthedocs.io/en/master/). It calculates the area/volume which is dominated by the provided set of solutions with respect to a reference point.
<div style="display: block;margin-left: auto;margin-right: auto;width: 40%;">
</div>
This image is taken from <cite data-cite="hv"></cite> and illustrates a two objective example where the area which is dominated by a set of points is shown in grey.
Whereas, for the other metrics the goal was to minimize the distance to the Pareto-front, here, we desire to maximize the performance metric.
```python
from pymoo.factory import get_performance_indicator
hv = get_performance_indicator("hv", ref_point=np.array([1.2, 1.2]))
print("hv", hv.calc(A))
```
hv 0.9631646448182305
|
[STATEMENT]
lemma \<omega>consistentStd1_implies_consistent:
assumes "\<omega>consistentStd1"
shows "consistent"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. consistent
[PROOF STEP]
unfolding consistent_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> prv fls
[PROOF STEP]
proof safe
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. prv fls \<Longrightarrow> False
[PROOF STEP]
assume pf: "prv fls"
[PROOF STATE]
proof (state)
this:
prv fls
goal (1 subgoal):
1. prv fls \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
prv fls
[PROOF STEP]
obtain x where x: "x \<in> var" "x \<notin> Fvars fls"
[PROOF STATE]
proof (prove)
using this:
prv fls
goal (1 subgoal):
1. (\<And>x. \<lbrakk>x \<in> var; x \<notin> Fvars fls\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using finite_Fvars getFresh
[PROOF STATE]
proof (prove)
using this:
prv fls
?\<phi> \<in> fmla \<Longrightarrow> finite (Fvars ?\<phi>)
finite ?V \<Longrightarrow> getFresh ?V \<in> var \<and> getFresh ?V \<notin> ?V
goal (1 subgoal):
1. (\<And>x. \<lbrakk>x \<in> var; x \<notin> Fvars fls\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<in> var
x \<notin> Fvars fls
goal (1 subgoal):
1. prv fls \<Longrightarrow> False
[PROOF STEP]
let ?fls = "cnj (fls) (eql (Var x) (Var x))"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. prv fls \<Longrightarrow> False
[PROOF STEP]
have 0: "\<forall> n \<in> num. prv (neg (subst ?fls n x))" and 1: "prv (exi x fls)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>n\<in>num. prv (neg (subst (cnj fls (eql (Var x) (Var x))) n x)) &&& prv (exi x fls)
[PROOF STEP]
using x fls
[PROOF STATE]
proof (prove)
using this:
x \<in> var
x \<notin> Fvars fls
fls \<in> fmla
goal (1 subgoal):
1. \<forall>n\<in>num. prv (neg (subst (cnj fls (eql (Var x) (Var x))) n x)) &&& prv (exi x fls)
[PROOF STEP]
by (auto simp: pf prv_expl)
[PROOF STATE]
proof (state)
this:
\<forall>n\<in>num. prv (neg (subst (cnj fls (eql (Var x) (Var x))) n x))
prv (exi x fls)
goal (1 subgoal):
1. prv fls \<Longrightarrow> False
[PROOF STEP]
have 2: "\<not> prv (exi x ?fls)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> prv (exi x (cnj fls (eql (Var x) (Var x))))
[PROOF STEP]
using 0 fls x assms
[PROOF STATE]
proof (prove)
using this:
\<forall>n\<in>num. prv (neg (subst (cnj fls (eql (Var x) (Var x))) n x))
fls \<in> fmla
x \<in> var
x \<notin> Fvars fls
\<omega>consistentStd1
goal (1 subgoal):
1. \<not> prv (exi x (cnj fls (eql (Var x) (Var x))))
[PROOF STEP]
unfolding \<omega>consistentStd1_def
[PROOF STATE]
proof (prove)
using this:
\<forall>n\<in>num. prv (neg (subst (cnj fls (eql (Var x) (Var x))) n x))
fls \<in> fmla
x \<in> var
x \<notin> Fvars fls
\<forall>\<phi>\<in>fmla. \<forall>x\<in>var. Fvars \<phi> = {x} \<longrightarrow> (\<forall>n\<in>num. prv (neg (subst \<phi> n x))) \<longrightarrow> \<not> prv (exi x \<phi>)
goal (1 subgoal):
1. \<not> prv (exi x (cnj fls (eql (Var x) (Var x))))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<not> prv (exi x (cnj fls (eql (Var x) (Var x))))
goal (1 subgoal):
1. prv fls \<Longrightarrow> False
[PROOF STEP]
show False
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. False
[PROOF STEP]
using 1 2 consistent_def consistent_def2 pf x(1)
[PROOF STATE]
proof (prove)
using this:
prv (exi x fls)
\<not> prv (exi x (cnj fls (eql (Var x) (Var x))))
consistent \<equiv> \<not> prv fls
consistent = (\<exists>\<phi>\<in>fmla. \<not> prv \<phi>)
prv fls
x \<in> var
goal (1 subgoal):
1. False
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed |
Formal statement is: lemma holomorphic_on_scaleR [holomorphic_intros]: "f holomorphic_on A \<Longrightarrow> (\<lambda>x. c *\<^sub>R f x) holomorphic_on A" Informal statement is: If $f$ is holomorphic on a set $A$, then $c \cdot f$ is holomorphic on $A$ for any complex number $c$. |
\section{Computing the Time-Step Using Higher-Order DG Schemes}
When making the jump to higher-order DG schemes, we can simply do the same as in the first-order scheme, except we compute the quantities in all of the nodal points instead of using a cell-average. This is valid because the cell-average is a convex combination...\sd{Need to expand on this}. The proof starts with the discretized equation valid at each quadrature point, $q$:
\begin{equation}
\bU_{q}^{n+1}=\bU_{q}^{n}+\Delta t\,\cL_{q}^{n},
\end{equation}
where $\cL_{q}^{n}$ is a general form of the RHS at time $t^{n}$. If we define a vector $\ol{\bU}\equiv\left(\bU_{1},\cdots,\bU_{q},\cdots,\bU_{Q}\right)^T$, where $Q$ is the total number of quadrature points, and $\ol{\bW}\equiv\left(\bW_{1},\cdots,\bW_{q},\cdots,\bW_{Q}\right)^T$ as a vector of quadrature weights, then we can write the cell-average of $\bU$ as:
\begin{equation}
\bU_{K}\equiv\ol{\bW}^T\ol{\bU}.
\end{equation}
If we then compute the cell-average of the above equation, we get:
\begin{equation}
\bU_{K}^{n+1}=\bU_{K}^{n}+\Delta t\,\ol{\bW}^{T}\,\ol{\cL}_{q}^{n}=\ol{\bW}^{T}\left(\ol{\bU}^{n}+\Delta t\,\ol{\cL}^{n}\right)
\end{equation}
\subsection{High-Order Time-Step Restriction for DG}
\blue{NOTE:} This closely follows Jesse's document CFLCondition.pdf.
Consider the one-dimensional system of hyperbolic balance equations:
\begin{equation}\label{Eq:HypBalEqns}
\pd{\left(\sqrtgm\,\bU\right)}{t}+\pd{\left(\sqrtgm\,\bF^{1}\left(\bU\right)\right)}{1}=\sqrtgm\,\bQ,
\end{equation}
where $\bU$ is a vector of conserved variables, $\bF^{1}\left(\bU\right)$ are the fluxes of those conserved variables in the $x^{1}$-direction, $\bQ$ is a source term, and $\sqrtgm$ is the square-root of the determinant of the spatial three-metric.
We define our reference element by:
\begin{equation}
I_{j}\equiv\left\{x^{1}:x^{1}\in\left(x^{1}_{L},x^{1}_{H}\right)=\left(x^{1}_{\jmh},x^{1}_{\jph}\right)\right\}.
\end{equation}
We proceed by multiplying \eqref{Eq:HypBalEqns} with $v$, where $v=v\left(x^{1}\right)$ is a test function in the DG scheme, and integrate over the $\jth$ element:
\begin{equation}
\int_{I_{j}}\pd{\left(\sqrtgm\,\bU\right)}{t}\,v\,dx^{1}+\int_{I_{j}}\pd{\left(\sqrtgm\,\bF^{1}\left(\bU\right)\right)}{1}\,v\,dx^{1}=\int_{I_{j}}\sqrtgm\,\bQ\,v\,dx^{1}.
\end{equation}
We now move the flux term to the RHS and perform integration-by-parts on it, yielding:
\begin{equation}\label{Eq:IntByParts}
\int_{I_{j}}\pd{\left(\sqrtgm\,\bU\right)}{t}\,v\,dx^{1}=-\left[\sqrtgm\,\hat{\bF^{1}}\,v\Big|_{x^{1}_{H}}-\sqrtgm\,\hat{\bF^{1}}\,v\Big|_{x^{1}_{L}}\right]+\int_{I_{j}}\sqrtgm\,\bF^{1}\,\pd{v}{1}\,dx^{1}+\int_{I_{j}}\sqrtgm\,\bQ\,v\,dx^{1},
\end{equation}
where $\hat{\bF^{1}}$ is a numerical flux.
\blue{NOTE:} $v=1$ is in the space of test functions for the DG method, \textit{and} $v=1$ yields the cell-average when substituted into \eqref{Eq:IntByParts}, therefore the DG method evolves the cell-average.
Substituting $v=1$ into \eqref{Eq:IntByParts} yields:
\begin{equation}\label{Eq:CellAverageDG}
\int_{I_{j}}\pd{\left(\sqrtgm\,\bU\right)}{t}\,dx^{1}=-\left[\sqrtgm\,\hat{\bF^{1}}\Big|_{x^{1}_{H}}-\sqrtgm\,\hat{\bF^{1}}\Big|_{x^{1}_{L}}\right]+\int_{I_{j}}\sqrtgm\,\bQ\,dx^{1}.
\end{equation}
Note that the volume-term has dropped out because the derivative of a constant is equal to zero.
We define the cell-average of a quantity, $\bX=\bX\left(x^{1},t\right)$, as:
\begin{equation}
\ol{\bX}\equiv\f{1}{\Delta V_{j}}\int_{I_{j}}\bX\,\sqrtgm\,dx^{1}.
\end{equation}
\red{NEW ASSUMPTION:} We assume that the spatial three-metric is explicitly independent of time. This allows us to pull the metric determinant out of the first integral, yielding for \eqref{Eq:CellAverageDG}:
\begin{equation}
\f{d\,\ol{\bU}}{dt}=-\f{1}{\Delta V_{j}}\left[\sqrtgm\,\hat{\bF^{1}}\Big|_{x^{1}_{H}}-\sqrtgm\,\hat{\bF^{1}}\Big|_{x^{1}_{L}}\right]+\ol{\bQ}.
\end{equation}
\red{NEW ASSUMPTION:} We now specialize this to using the forward-Euler time-stepping algorithm, yielding:
\begin{equation}
\ol{\bU}^{n+1}=\ol{\bU}^{n}-\f{\Delta t^{n}_{j}}{\Delta V_{j}}\left[\sqrtgm\,\hat{\bF^{1}}\Big|_{x^{1}_{H}}-\sqrtgm\,\hat{\bF^{1}}\Big|_{x^{1}_{L}}\right]^{n}+\Delta t^{n}_{j}\,\ol{\bQ}^{n}
\end{equation}
\blue{NOTE:} Since the spatial three-metric is explicitly independent of time, we don't need to specify the time-step at which the volume is computed (i.e. we don't have to write $\Delta V^{n}_{j}$).
Now we define a parameter $\ve\in\left(0,1\right)$ a la \citet{ZS2011b} and re-write the above equation as:
\begin{align}
\ol{\bU}^{n+1}&=\ve\left\{\ol{\bU}^{n}-\f{\Delta t^{n}_{j}}{\ve\,\Delta V_{j}}\left[\sqrtgm\,\hat{\bF^{1}}\Big|_{x^{1}_{H}}-\sqrtgm\,\hat{\bF^{1}}\Big|_{x^{1}_{L}}\right]^{n}\right\}+\left(1-\ve\right)\left\{\ol{\bU}^{n}+\f{\Delta t^{n}_{j}}{1-\ve}\,\ol{\bQ}^{n}\right\}\\
&=\ve\,\ol{\bH}_{1}+\left(1-\ve\right)\ol{\bH}_{2},
\end{align}
where
\begin{equation}\label{Eq:H1}
\ol{\bH}_{1}\equiv\ol{\bU}^{n}-\f{\Delta t^{n}_{j}}{\ve\,\Delta V_{j}}\left[\sqrtgm\,\hat{\bF^{1}}\Big|_{x^{1}_{H}}-\sqrtgm\,\hat{\bF^{1}}\Big|_{x^{1}_{L}}\right]^{n},
\end{equation}
and
\begin{equation}
\ol{\bH}_{2}\equiv\ol{\bU}^{n}+\f{\Delta t^{n}_{j}}{1-\ve}\,\ol{\bQ}^{n}.
\end{equation}
\red{NEW ASSUMPTION:} We assume that $\ol{\bU}^{n}\in\cG$, as defined in \citet{Mignone2005}.
Assuming that $\ol{\bU}^{n}\in\cG$, we now seek to derive the conditions that guarantee $\ol{\bU}^{n+1}\in\cG$.
\subsubsection{The numerical flux term: $\ol{\bH}_{1}$}
We start by numerically computing the cell-average using quadrature with the Gauss-Lobatto quadrature rule. We assume that the DG approximation polynomial for the conserved variables is of order $k$, and that the order of the approximate solution is of order $k+d$, where $d$ is an integer that depends on the metric determinant. For the case of Cartesian coordinates, $d=0$ (because $\sqrtgm\sim x^{0}$), and for spherical-polar coordinates in spherical symmetry, $d=2$ (because $\sqrtgm\sim r^{2}$). Gauss-Lobatto integration will give an exact result if we choose a sufficiently high number, $M$, of quadrature points:
\begin{equation}
2\,M-3\geq k+d\implies M\geq\f{k+d+3}{2}.
\end{equation}
\blue{NOTE:} We do not use this restriction in the code. We simply take $M$ equal to the number of quadrature points (which is the same as the number of interpolation points). The difference will be accounted for by the presence of the CFL number.
\blue{NOTE:} We now drop the superscript $n$ for the rest of this subsection.
Assuming that we choose a sufficient number of points, we can write the cell-average as:
\begin{align}
\ol{\bU}=\f{1}{\Delta V_{j}}\sum\limits_{q=1}^{M}w_{q}\,\bU_{q}\,\sqrtgm_{q}\,\Delta x_{j}&=\f{\Delta x_{j}}{\Delta V_{j}}\sum\limits_{q=2}^{M-1}w_{q}\,\bU_{q}\,\sqrtgm_{q}+\f{\Delta x_{j}}{\Delta V_{j}}\,w_{1}\,\bU_{1}\,\sqrtgm_{1}+\f{\Delta x_{j}}{\Delta V_{j}}\,w_{M}\,\bU_{M}\,\sqrtgm_{M}\\
&=\f{\Delta x_{j}}{\Delta V_{j}}\sum\limits_{q=2}^{M-1}w_{q}\,\bU_{q}\,\sqrtgm_{q}+\f{\Delta x_{j}}{\Delta V_{j}}\,w_{1}\,\bU^{+}_{L}\,\sqrtgm_{L}+\f{\Delta x_{j}}{\Delta V_{j}}\,w_{M}\,\bU^{-}_{H}\,\sqrtgm_{H},\label{Eq:CellAverageGL}
\end{align}
where $w_{q}$ are the Gauss-Lobatto quadrature weights and $\bU_{q}=\bU\left(x_{q}\right)$, and $\sqrtgm_{q}=\sqrt{\gamma\left(x_{q}\right)}$. The quantity $\bU^{+}_{L}$ refers to the vector of conserved variables evaluated at the lower interface, but on the higher side, so that it is evaluated \textit{in} the $\jth$ cell. Similarly for $\bU^{-}_{H}$.
Our approach is to use the end-points to balance the troublesome terms in the numerical fluxes.
\red{NEW ASSUMPTION:} We now specialize to the local Lax-Friedrichs flux:
\begin{align}
\hat{\bF}^{1}\Big|_{x^{1}_{L}}&=\hat{\bF}^{1}\left(\bU^{+}_{L},\bU^{-}_{L}\right)=\f{1}{2}\left[\bF^{1}\left(\bU^{+}_{L}\right)+\bF^{1}\left(\bU^{-}_{L}\right)-\alpha_{L}\left(\bU_{L}^{+}-\bU_{L}^{-}\right)\right]\\
&=\f{1}{2}\left\{-\alpha_{L}\left[\bU^{+}_{L}-\f{1}{\alpha_{L}}\,\bF^{1}\left(\bU^{+}_{L}\right)\right]+\alpha_{L}\left[\bU^{-}_{L}+\f{1}{\alpha_{L}}\,\bF^{1}\left(\bU^{-}_{L}\right)\right]\right\},
\end{align}
and
\begin{align}
\hat{\bF}^{1}\Big|_{x^{1}_{H}}&=\hat{\bF}^{1}\left(\bU^{+}_{H},\bU^{-}_{H}\right)=\f{1}{2}\left[\bF^{1}\left(\bU^{+}_{H}\right)+\bF^{1}\left(\bU^{-}_{H}\right)-\alpha_{H}\left(\bU_{H}^{+}-\bU_{H}^{-}\right)\right]\\
&=\f{1}{2}\left\{-\alpha_{H}\left[\bU^{+}_{H}-\f{1}{\alpha_{H}}\,\bF^{1}\left(\bU^{+}_{H}\right)\right]+\alpha_{H}\left[\bU^{-}_{H}+\f{1}{\alpha_{H}}\,\bF^{1}\left(\bU^{-}_{H}\right)\right]\right\},
\end{align}
where $\alpha_{H}=\text{max}\left(\alpha_{j},\alpha_{j+1}\right)$ and $\alpha_{L}=\text{max}\left(\alpha_{j-1},\alpha_{j}\right)$ are the largest (in magnitude) wavespeeds as given by the flux-Jacobian.
Now we substitute these expressions along with \eqref{Eq:CellAverageGL} into \eqref{Eq:H1}:
\begin{align}
\ol{\bH}_{1}=&\f{\Delta x_{j}}{\Delta V_{j}}\sum\limits_{q=2}^{M-1}w_{q}\,\bU_{q}\,\sqrtgm_{q}+\f{\Delta x_{j}}{\Delta V_{j}}\,w_{1}\,\bU^{+}_{L}\,\sqrtgm_{L}+\f{\Delta x_{j}}{\Delta V_{j}}\,w_{M}\,\bU^{-}_{H}\,\sqrtgm_{H}\\
&-\f{\Delta t_{j}}{\ve\,\Delta V_{j}}\left[\sqrtgm_{H}\f{1}{2}\left\{-\alpha_{H}\left[\bU^{+}_{H}-\f{1}{\alpha_{H}}\,\bF^{1}\left(\bU^{+}_{H}\right)\right]+\alpha_{H}\left[\bU^{-}_{H}+\f{1}{\alpha_{H}}\,\bF^{1}\left(\bU^{-}_{H}\right)\right]\right\}\right]\\
&+\f{\Delta t_{j}}{\ve\,\Delta V_{j}}\left[\sqrtgm_{L}\f{1}{2}\left\{-\alpha_{L}\left[\bU^{+}_{L}-\f{1}{\alpha_{L}}\,\bF^{1}\left(\bU^{+}_{L}\right)\right]+\alpha_{L}\left[\bU^{-}_{L}+\f{1}{\alpha_{L}}\,\bF^{1}\left(\bU^{-}_{L}\right)\right]\right\}\right].
\end{align}
Now we combine terms with common factors of the metric determinant:
\begin{align}
\ol{\bH}_{1}=&\f{\Delta x_{j}}{\Delta V_{j}}\sum\limits_{q=2}^{M-1}w_{q}\,\bU_{q}\,\sqrtgm_{q}\\
&+\f{\sqrtgm_{L}}{\Delta V_{j}}\left\{\Delta x_{j}\,w_{1}\,\bU^{+}_{L}+\f{\Delta t_{j}\,\alpha_{L}}{2\,\ve}\left(-\left[\bU^{+}_{L}-\f{1}{\alpha_{L}}\,\bF^{1}\left(\bU^{+}_{L}\right)\right]+\left[\bU^{-}_{L}+\f{1}{\alpha_{L}}\,\bF^{1}\left(\bU^{-}_{L}\right)\right]\right)\right\}\\
&+\f{\sqrtgm_{H}}{\Delta V_{j}}\left\{\Delta x_{j}\,w_{M}\,\bU^{-}_{H}-\f{\Delta t_{j}\,\alpha_{H}}{2\,\ve}\left(-\left[\bU^{+}_{H}-\f{1}{\alpha_{H}}\,\bF^{1}\left(\bU^{+}_{H}\right)\right]+\left[\bU^{-}_{H}+\f{1}{\alpha_{H}}\,\bF^{1}\left(\bU^{-}_{H}\right)\right]\right)\right\}.
\end{align}
Next we factor out $\Delta x_{j}$ and the quadrature weights, yielding:
\begin{align}
\ol{\bH}_{1}=&\f{\Delta x_{j}}{\Delta V_{j}}\sum\limits_{q=2}^{M-1}w_{q}\,\bU_{q}\,\sqrtgm_{q}\\
&+\f{\sqrtgm_{L}\,\Delta x_{j}\,w_{1}}{\Delta V_{j}}\left\{\bU^{+}_{L}+\f{\Delta t_{j}\,\alpha_{L}}{2\,\ve\,\Delta x_{j}\,w_{1}}\left(-\left[\bU^{+}_{L}-\f{1}{\alpha_{L}}\,\bF^{1}\left(\bU^{+}_{L}\right)\right]+\left[\bU^{-}_{L}+\f{1}{\alpha_{L}}\,\bF^{1}\left(\bU^{-}_{L}\right)\right]\right)\right\}\\
&+\f{\sqrtgm_{H}\,\Delta x_{j}\,w_{M}}{\Delta V_{j}}\left\{\bU^{-}_{H}-\f{\Delta t_{j}\,\alpha_{H}}{2\,\ve\,\Delta x_{j}\,w_{M}}\left(-\left[\bU^{+}_{H}-\f{1}{\alpha_{H}}\,\bF^{1}\left(\bU^{+}_{H}\right)\right]+\left[\bU^{-}_{H}+\f{1}{\alpha_{H}}\,\bF^{1}\left(\bU^{-}_{H}\right)\right]\right)\right\}.
\end{align}
Next we re-write the $\bU^{+}_{L}$ and $\bU^{-}_{H}$ that appear with the flux terms:
\begin{align}
\bU^{+}_{L}&=2\,\bU^{+}_{L}-\bU^{+}_{L}\\
\bU^{-}_{H}&=2\,\bU^{-}_{H}-\bU^{-}_{H}.
\end{align}
This gives:
\begin{align}
\ol{\bH}_{1}=&\f{\Delta x_{j}}{\Delta V_{j}}\sum\limits_{q=2}^{M-1}w_{q}\,\bU_{q}\,\sqrtgm_{q}\\
&+\f{\sqrtgm_{L}\,\Delta x_{j}\,w_{1}}{\Delta V_{j}}\left\{\bU^{+}_{L}+\f{\Delta t_{j}\,\alpha_{L}}{2\,\ve\,\Delta x_{j}\,w_{1}}\left(-\left[2\,\bU^{+}_{L}-\bU^{+}_{L}-\f{1}{\alpha_{L}}\,\bF^{1}\left(\bU^{+}_{L}\right)\right]+\left[\bU^{-}_{L}+\f{1}{\alpha_{L}}\,\bF^{1}\left(\bU^{-}_{L}\right)\right]\right)\right\}\\
&+\f{\sqrtgm_{H}\,\Delta x_{j}\,w_{M}}{\Delta V_{j}}\left\{\bU^{-}_{H}-\f{\Delta t_{j}\,\alpha_{H}}{2\,\ve\,\Delta x_{j}\,w_{M}}\left(-\left[\bU^{+}_{H}-\f{1}{\alpha_{H}}\,\bF^{1}\left(\bU^{+}_{H}\right)\right]+\left[2\,\bU^{-}_{H}-\bU^{-}_{H}+\f{1}{\alpha_{H}}\,\bF^{1}\left(\bU^{-}_{H}\right)\right]\right)\right\}.
\end{align}
This allows us to write the expression with factors similar to those in \citet{Qin2016}. We find:
\begin{align}
\ol{\bH}_{1}=&\f{\Delta x_{j}}{\Delta V_{j}}\sum\limits_{q=2}^{M-1}w_{q}\,\bU_{q}\,\sqrtgm_{q}\\
&+\f{\sqrtgm_{L}\,\Delta x_{j}\,w_{1}}{\Delta V_{j}}\left\{\bU^{+}_{L}\left(1-\f{\Delta t_{j}\,\alpha_{L}}{\ve\,\Delta x_{j}\,w_{1}}\right)+\f{\Delta t_{j}\,\alpha_{L}}{2\,\ve\,\Delta x_{j}\,w_{1}}\left(\left[\bU^{+}_{L}+\f{1}{\alpha_{L}}\,\bF^{1}\left(\bU^{+}_{L}\right)\right]+\left[\bU^{-}_{L}+\f{1}{\alpha_{L}}\,\bF^{1}\left(\bU^{-}_{L}\right)\right]\right)\right\}\\
&+\f{\sqrtgm_{H}\,\Delta x_{j}\,w_{M}}{\Delta V_{j}}\left\{\bU^{-}_{H}\left(1-\f{\Delta t_{j}\,\alpha_{H}}{\ve\,\Delta x_{j}\,w_{M}}\right)+\f{\Delta t_{j}\,\alpha_{H}}{2\,\ve\,\Delta x_{j}\,w_{M}}\left(\left[\bU^{+}_{H}-\f{1}{\alpha_{H}}\,\bF^{1}\left(\bU^{+}_{H}\right)\right]+\left[\bU^{-}_{H}-\f{1}{\alpha_{H}}\,\bF^{1}\left(\bU^{-}_{H}\right)\right]\right)\right\}.
\end{align}
All of the terms in the square brackets are similar the the $\bH$ quantities in \citet{Qin2016} and therefore belong to the set of admissible states, provided that:
\begin{equation}
\alpha_{L/H}=\alpha^{*}\geq\f{\left|v^{1}\right|\left(h+1-2\,h\,\tau\right)\,W^{2}+\sqrt{\tau^{4}\left(h-1\right)^{2}+\tau^{2}\left(h-1\right)\left(h+1-2\,h\,\tau\right)}}{W^{2}\left(h+1-2\,h\,\tau\right)+\tau^{2}\left(h-1\right)},
\end{equation}
where $h$ is the relativistic specific enthalpy and
\begin{equation}
\tau\equiv\f{\Gamma-1}{\Gamma},
\end{equation}
where $\Gamma$ is the adiabatic index.
We see that the expressions in the curly brackets are convex combinations (given a restriction on $\Delta t_{j}$), because the coefficients sum to unity. Since the quadrature weights are symmetric (so $w_{1}=w_{M}\equiv w_{GL}$), we find that the condition for $\ol{\bH}_{1}\in\cG$ is a time-step restriction:
\begin{equation}
\Delta t_{j}<\ve\,\Delta x_{j}\,w_{GL}\,\text{min}\left(\f{1}{\alpha_{L}},\f{1}{\alpha_{H}},\f{1}{\alpha^{*}}\right)
\end{equation}
Since we want a time-step that is constant for all elements, we choose:
\begin{equation}
\Delta t<\ve\,w_{GL}\,\text{min}_{j}\left(\f{\Delta x_{j}}{\text{max}\left(\alpha_{L},\alpha_{H},\alpha^{*}\right)}\right).
\end{equation}
\red{NEW ASSUMPTION:} In order for this to work, we also demand that all of the $\bU_{q}$ are within physical bounds.
\blue{NOTE:} We see the effect of the high-order approximation in the presence of the quadrature end-point weight $w_{GL}$. As the order increases, $w_{GL}$ decreases, thus making a tighter restriction on the time-step.
\blue{NOTE:} It is worth noting that this result is \textit{not} independent of the metric, because it is incorporated through the wave-speed in the numerical flux.
|
The infnorm of a sum of two vectors is less than or equal to the sum of the infnorms of the vectors. |
FUNCTION:NAME
:BEGIN
__vfs_write:entry
vmlinux`__vfs_write
vmlinux`ksys_write+{ptr}
vmlinux`__arm64_sys_write+{ptr}
-- @@stderr --
dtrace: script 'test/unittest/stack/tst.stack3_fbt.d' matched 2 probes
dtrace: allowing destructive actions
|
module SmallStepImp
import Expr
import Imp
import Maps
import SmallStepExpr
%access public export
%default total
data CStep : (Com, State) -> (Com, State) -> Type where
CS_AssStep : AStep st a a' -> CStep (i ::= a, st) (i ::= a', st)
CS_Ass : CStep (i ::= ANum n, st) (SKIP, t_update i n st)
CS_SeqStep : CStep (c1, st) (c1', st') ->
CStep ((do c1; c2), st) ((do c1'; c2), st')
CS_SeqFinish : CStep ((do SKIP; c2), st) (c2, st)
CS_IfStep : BStep st b b' -> CStep (CIf b c1 c2, st) (CIf b' c1 c2, st)
CS_IfTrue : CStep (CIf BTrue c1 c2, st) (c1, st)
CS_IfFalse : CStep (CIf BFalse c1 c2, st) (c2, st)
CS_While : CStep (WHILE b c, st) (CIf b (do c; WHILE b c) SKIP, st)
syntax [t] "/" [st] "-+>" [t'] "/" [st'] = CStep (t, st) (t', st')
|
function test_fltype()
Tf = Float16
Tc = Complex{Tf}
Tf == fltype(Tf) && Tf == fltype(Tc) && Tf == fltype((Tc, Tf)) && Tf == fltype((Tf, Tc))
end
function test_cxtype()
Tf = Float16
Tc = Complex{Tf}
Tc == cxtype(Tf) && Tc == cxtype(Tc)
end
function test_innereltype(T=Complex{Float32})
a = [zeros(T, 3), zeros(T, 2, 5)]
T == innereltype(a)
end
function test_superzeros()
T = Float64
dims1 = (2, 3)
dims2a = (7,)
dims2b = dims1
dims2 = (dims2a, dims2b)
a1 = rand(T, dims1)
a2 = [(1+im)*rand(T, dims2a), (2+im)*rand(T, dims2b)]
a1a = zeros(T, dims1)
a2a = [zeros(Complex{T}, dims2a), zeros(Complex{T}, dims2b)]
a1z = superzeros(a1)
a2z = superzeros(a2)
a1d = superzeros(dims1)
a2d = superzeros(Complex{T}, dims2)
@superzeros innereltype(a1) a1 a1ma dum
@superzeros innereltype(a2) a2 a2ma dum
@superzeros innereltype(a1) dims1 a1md dum
@superzeros innereltype(a2) dims2 a2md dum
( a1a == a1z && a1a == a1d && a1a == a1ma && a1a == a1md &&
a2a == a2z && a2a == a2d && a2a == a2ma && a2a == a2md )
end
function test_supertuplezeros(; T1=Float64, T2=Complex{Float64}, dims1=(1,), dims2=(3, 3))
T = (T1, T2)
dims = (dims1, dims2)
a = superzeros(T, dims)
( eltype(a[1]) == T1 && eltype(a[2]) == T2 &&
size(a[1]) == dims1 && size(a[2]) == dims2 )
end
# This test could use some further work.
function test_radialspectrum(dev::Device, n, ahkl, ahρ; debug=false, atol=0.1, rfft=false)
g = TwoDGrid(dev, n, 2π)
if rfft==true
ah = @. ahkl(g.kr, g.l)
else
ah = @. ahkl(g.k, g.l)
end
ah[1, 1] = 0.0
ρ, ahρ_estimate = FourierFlows.radialspectrum(ah, g; refinement=16)
if debug
println(sum(ahρ.(ρ) - ahρ_estimate))
return ah, ahρ_estimate, ahρ.(ρ)
else
normalizeddiff = sum(abs.(ahρ.(ρ) - ahρ_estimate)) / length(ρ)
return isapprox(normalizeddiff, 0.0, atol=atol)
end
end
function integralsquare(func, grid::OneDGrid)
sum(abs2.(func))*grid.dx
end
function integralsquare(func, grid::TwoDGrid)
sum(abs2.(func))*grid.dx*grid.dy
end
function test_parsevalsum(func, grid; realvalued=true)
# Compute integral in physical space
integral = integralsquare(func, grid)
# Compute integral in wavenumber space using Parseval's theorem
if realvalued==true; funch = rfft(func)
elseif realvalued==false; funch = fft(func)
end
parsevalsum = FourierFlows.parsevalsum(abs2.(funch), grid)
isapprox(integral, parsevalsum; rtol=rtol_utils)
end
function test_parsevalsum2(func, grid; realvalued=true)
# Compute integral in physical space
integral = integralsquare(func, grid)
# Compute integral in wavenumber space using Parseval's theorem
if realvalued==true; funch = rfft(func)
elseif realvalued==false; funch = fft(func)
end
parsevalsum2 = FourierFlows.parsevalsum2(funch, grid)
isapprox(integral, parsevalsum2; rtol=rtol_utils)
end
"""
Compute the jacobian J(a, b) and compare the result with `analytic`. Use `atol` for the comparison
to ensure validity when `analytic=0`.
"""
test_jacobian(a, b, analytic, grid) = isapprox(FourierFlows.jacobian(a, b, grid), analytic;
atol=grid.nx*grid.ny*10*eps())
"""
Test the zeros macro.
"""
function test_zeros(T=Float64, dims=(13, 45))
a1, b1 = zeros(T, dims), zeros(T, dims)
@zeros T dims a2 b2
a1 == a2 && b1 == b2
end
abstract type TestVars <: AbstractVars end
physicalvars = [:a, :b]
fouriervars = [:ah, :bh]
varspecs = cat(FourierFlows.getfieldspecs(physicalvars, :(Array{T,2})),
FourierFlows.getfieldspecs(fouriervars, :(Array{Complex{T},2})), dims=1)
eval(FourierFlows.varsexpression(:VarsFields, physicalvars, fouriervars))
eval(FourierFlows.varsexpression(:VarsFieldsParent, physicalvars, fouriervars; parent=:TestVars))
eval(FourierFlows.varsexpression(:VarsSpecs, varspecs; typeparams=:T))
eval(FourierFlows.varsexpression(:VarsSpecsParent, varspecs; parent=:TestVars, typeparams=:T))
function test_varsexpression_fields(g::AbstractGrid{T}) where T
@zeros T (g.nx, g.ny) a b
@zeros Complex{T} (g.nkr, g.nl) ah bh
v1 = VarsFields(a, b, ah, bh)
(
typeof(v1.a) == Array{T,2} &&
typeof(v1.ah) == Array{Complex{T},2} &&
size(v1.a) == size(v1.b) &&
size(v1.ah) == size(v1.bh) &&
size(v1.a) == (g.nx, g.ny) &&
size(v1.ah) == (g.nkr, g.nl)
)
end
function test_varsexpression_fields_parent(g)
@zeros T (g.nx, g.ny) a b
@zeros Complex{T} (g.nkr, g.nl) ah bh
v2 = VarsFieldsParent(a, b, ah, bh)
(
typeof(v2.a) == Array{T,2} &&
typeof(v2.ah) == Array{Complex{T},2} &&
size(v2.a) == size(v2.b) &&
size(v2.ah) == size(v2.bh) &&
size(v2.a) == (g.nx, g.ny) &&
size(v2.ah) == (g.nkr, g.nl)
)
end
function test_varsexpression_specs(g::AbstractGrid{T}) where T
@zeros T (g.nx, g.ny) a b
@zeros Complex{T} (g.nkr, g.nl) ah bh
v1 = VarsSpecs(a, b, ah, bh)
(
typeof(v1.a) == Array{T,2} &&
typeof(v1.ah) == Array{Complex{T},2} &&
size(v1.a) == size(v1.b) &&
size(v1.ah) == size(v1.bh) &&
size(v1.a) == (g.nx, g.ny) &&
size(v1.ah) == (g.nkr, g.nl)
)
end
function test_varsexpression_specs_parent(g)
@zeros T (g.nx, g.ny) a b
@zeros Complex{T} (g.nkr, g.nl) ah bh
e2 = VarsSpecsParent(a, b, ah, bh)
(
typeof(v2.a) == Array{T,2} &&
typeof(v2.ah) == Array{Complex{T},2} &&
size(v2.a) == size(v2.b) &&
size(v2.ah) == size(v2.bh) &&
size(v2.a) == (g.nx, g.ny) &&
size(v2.ah) == (g.nkr, g.nl)
)
end
function test_supersize()
a = rand(16, 16)
dimsa = size(a)
b = [rand(1), rand(3, 34)]
dimsb = ((1,), (3, 34))
dimsa == supersize(a) && dimsb == supersize(b)
end
function test_arraytype(dev::Device)
dev==CPU() ? ArrayType(dev)==Array : ArrayType(dev)==CuArray
end
function test_arraytypeTdim(dev::Device, T=Float64, dim=1)
dev==CPU() ? ArrayType(dev, T, dim)<:Array{T, dim} : ArrayType(dev, T, dim)<:CuArray{T, dim}
end
|
{- MathHmatrix.hs
- Mapping of Linear algebra routines via the hMatrix library.
- hMatrix uses GSL and a BLAS implementation such as ATLAS.
-
- Timothy A. Chagnon
- CS 636 - Spring 2009
-}
module MathHmatrix where
import Numeric.LinearAlgebra
deg2rad :: RealT -> RealT
deg2rad d = d * pi / 180
type RealT = Double
type Vec3f = Vector Double
vec3f :: Double -> Double -> Double -> Vector Double
vec3f x y z = (3 |>) [x,y,z]
zeroVec3f :: Vec3f
zeroVec3f = vec3f 0 0 0
svMul :: Double -> Vec3f -> Vec3f
svMul = scale
dot :: Vec3f -> Vec3f -> Double
dot = Numeric.LinearAlgebra.dot
cross :: Vec3f -> Vec3f -> Vec3f
cross v1 v2 =
let [a, b, c] = toList v1 in
let [d, e, f] = toList v2 in
vec3f (b*f-c*e) (c*d-a*f) (a*e-b*d)
mag :: Vec3f -> Double
mag v = sqrt (magSq v)
magSq :: Vec3f -> Double
magSq v = v `Numeric.LinearAlgebra.dot` v
norm :: Vec3f -> Vec3f
norm v = (1/(mag v)) `svMul` v
type Mat4f = Matrix Double
mat4f :: Vec4f -> Vec4f -> Vec4f -> Vec4f -> Mat4f
mat4f a b c d = fromRows [a,b,c,d]
type Vec4f = Vector Double
vec4f :: Double -> Double -> Double -> Double -> Vector Double
vec4f x y z w = (4 |>) [x,y,z,w]
mvMul :: Mat4f -> Vec4f -> Vec4f
mvMul = (<>)
mmMul :: Mat4f -> Mat4f -> Mat4f
mmMul = (<>)
id4f :: Mat4f
id4f = ident 4
point4f :: Vec3f -> Vec4f
point4f v = 4 |> ((toList v) ++ [1.0])
point3f :: Vec4f -> Vec3f
point3f v =
let [x, y, z, w] = toList v in
if w == 0
then error "point3f divide by zero"
else vec3f (x/w) (y/w) (z/w)
direction4f :: Vec3f -> Vec4f
direction4f v = 4 |> ((toList v) ++ [0.0])
direction3f :: Vec4f -> Vec3f
direction3f = subVector 0 3
vec3fElts :: Vec3f -> (RealT, RealT, RealT)
vec3fElts v =
let [x,y,z] = toList v in
(x,y,z)
type ColMat3f = Matrix Double
colMat3f :: Vec3f -> Vec3f -> Vec3f -> ColMat3f
colMat3f a b c = fromColumns [a,b,c]
detMat3f :: ColMat3f -> RealT
detMat3f = det
|
[GOAL]
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁵ : UniformSpace X
inst✝⁴ : UniformSpace Y
inst✝³ : SMul M X
inst✝² : SMul Mᵐᵒᵖ X
inst✝¹ : IsCentralScalar M X
inst✝ : UniformContinuousConstSMul M X
c : M
⊢ UniformContinuous ((fun x x_1 => x • x_1) (MulOpposite.op c))
[PROOFSTEP]
dsimp only
[GOAL]
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁵ : UniformSpace X
inst✝⁴ : UniformSpace Y
inst✝³ : SMul M X
inst✝² : SMul Mᵐᵒᵖ X
inst✝¹ : IsCentralScalar M X
inst✝ : UniformContinuousConstSMul M X
c : M
⊢ UniformContinuous fun x => MulOpposite.op c • x
[PROOFSTEP]
simp_rw [op_smul_eq_smul]
[GOAL]
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁵ : UniformSpace X
inst✝⁴ : UniformSpace Y
inst✝³ : SMul M X
inst✝² : SMul Mᵐᵒᵖ X
inst✝¹ : IsCentralScalar M X
inst✝ : UniformContinuousConstSMul M X
c : M
⊢ UniformContinuous fun x => c • x
[PROOFSTEP]
exact uniformContinuous_const_smul c
[GOAL]
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁷ : UniformSpace X
inst✝⁶ : UniformSpace Y
inst✝⁵ : SMul M X
inst✝⁴ : SMul N X
inst✝³ : SMul M N
inst✝² : UniformContinuousConstSMul M X
inst✝¹ : UniformContinuousConstSMul N X
inst✝ : IsScalarTower M N X
m : M
n : N
x : Completion X
⊢ (m • n) • x = m • n • x
[PROOFSTEP]
have : _ = (_ : Completion X → Completion X) :=
map_comp (uniformContinuous_const_smul m) (uniformContinuous_const_smul n)
[GOAL]
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁷ : UniformSpace X
inst✝⁶ : UniformSpace Y
inst✝⁵ : SMul M X
inst✝⁴ : SMul N X
inst✝³ : SMul M N
inst✝² : UniformContinuousConstSMul M X
inst✝¹ : UniformContinuousConstSMul N X
inst✝ : IsScalarTower M N X
m : M
n : N
x : Completion X
this :
Completion.map ((fun x x_1 => x • x_1) m) ∘ Completion.map ((fun x x_1 => x • x_1) n) =
Completion.map ((fun x x_1 => x • x_1) m ∘ (fun x x_1 => x • x_1) n)
⊢ (m • n) • x = m • n • x
[PROOFSTEP]
refine' Eq.trans _ (congr_fun this.symm x)
[GOAL]
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁷ : UniformSpace X
inst✝⁶ : UniformSpace Y
inst✝⁵ : SMul M X
inst✝⁴ : SMul N X
inst✝³ : SMul M N
inst✝² : UniformContinuousConstSMul M X
inst✝¹ : UniformContinuousConstSMul N X
inst✝ : IsScalarTower M N X
m : M
n : N
x : Completion X
this :
Completion.map ((fun x x_1 => x • x_1) m) ∘ Completion.map ((fun x x_1 => x • x_1) n) =
Completion.map ((fun x x_1 => x • x_1) m ∘ (fun x x_1 => x • x_1) n)
⊢ (m • n) • x = Completion.map ((fun x x_1 => x • x_1) m ∘ (fun x x_1 => x • x_1) n) x
[PROOFSTEP]
exact congr_arg (fun f => Completion.map f x) (funext (smul_assoc _ _))
[GOAL]
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁶ : UniformSpace X
inst✝⁵ : UniformSpace Y
inst✝⁴ : SMul M X
inst✝³ : SMul N X
inst✝² : SMulCommClass M N X
inst✝¹ : UniformContinuousConstSMul M X
inst✝ : UniformContinuousConstSMul N X
m : M
n : N
x : Completion X
⊢ m • n • x = n • m • x
[PROOFSTEP]
have hmn : m • n • x = (Completion.map (SMul.smul m) ∘ Completion.map (SMul.smul n)) x := rfl
[GOAL]
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁶ : UniformSpace X
inst✝⁵ : UniformSpace Y
inst✝⁴ : SMul M X
inst✝³ : SMul N X
inst✝² : SMulCommClass M N X
inst✝¹ : UniformContinuousConstSMul M X
inst✝ : UniformContinuousConstSMul N X
m : M
n : N
x : Completion X
hmn : m • n • x = (Completion.map (SMul.smul m) ∘ Completion.map (SMul.smul n)) x
⊢ m • n • x = n • m • x
[PROOFSTEP]
have hnm : n • m • x = (Completion.map (SMul.smul n) ∘ Completion.map (SMul.smul m)) x := rfl
[GOAL]
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁶ : UniformSpace X
inst✝⁵ : UniformSpace Y
inst✝⁴ : SMul M X
inst✝³ : SMul N X
inst✝² : SMulCommClass M N X
inst✝¹ : UniformContinuousConstSMul M X
inst✝ : UniformContinuousConstSMul N X
m : M
n : N
x : Completion X
hmn : m • n • x = (Completion.map (SMul.smul m) ∘ Completion.map (SMul.smul n)) x
hnm : n • m • x = (Completion.map (SMul.smul n) ∘ Completion.map (SMul.smul m)) x
⊢ m • n • x = n • m • x
[PROOFSTEP]
rw [hmn, hnm, map_comp, map_comp]
[GOAL]
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁶ : UniformSpace X
inst✝⁵ : UniformSpace Y
inst✝⁴ : SMul M X
inst✝³ : SMul N X
inst✝² : SMulCommClass M N X
inst✝¹ : UniformContinuousConstSMul M X
inst✝ : UniformContinuousConstSMul N X
m : M
n : N
x : Completion X
hmn : m • n • x = (Completion.map (SMul.smul m) ∘ Completion.map (SMul.smul n)) x
hnm : n • m • x = (Completion.map (SMul.smul n) ∘ Completion.map (SMul.smul m)) x
⊢ Completion.map (SMul.smul m ∘ SMul.smul n) x = Completion.map (SMul.smul n ∘ SMul.smul m) x
case hg
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁶ : UniformSpace X
inst✝⁵ : UniformSpace Y
inst✝⁴ : SMul M X
inst✝³ : SMul N X
inst✝² : SMulCommClass M N X
inst✝¹ : UniformContinuousConstSMul M X
inst✝ : UniformContinuousConstSMul N X
m : M
n : N
x : Completion X
hmn : m • n • x = (Completion.map (SMul.smul m) ∘ Completion.map (SMul.smul n)) x
hnm : n • m • x = (Completion.map (SMul.smul n) ∘ Completion.map (SMul.smul m)) x
⊢ UniformContinuous (SMul.smul n)
case hf
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁶ : UniformSpace X
inst✝⁵ : UniformSpace Y
inst✝⁴ : SMul M X
inst✝³ : SMul N X
inst✝² : SMulCommClass M N X
inst✝¹ : UniformContinuousConstSMul M X
inst✝ : UniformContinuousConstSMul N X
m : M
n : N
x : Completion X
hmn : m • n • x = (Completion.map (SMul.smul m) ∘ Completion.map (SMul.smul n)) x
hnm : n • m • x = (Completion.map (SMul.smul n) ∘ Completion.map (SMul.smul m)) x
⊢ UniformContinuous (SMul.smul m)
case hg
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁶ : UniformSpace X
inst✝⁵ : UniformSpace Y
inst✝⁴ : SMul M X
inst✝³ : SMul N X
inst✝² : SMulCommClass M N X
inst✝¹ : UniformContinuousConstSMul M X
inst✝ : UniformContinuousConstSMul N X
m : M
n : N
x : Completion X
hmn : m • n • x = (Completion.map (SMul.smul m) ∘ Completion.map (SMul.smul n)) x
hnm : n • m • x = (Completion.map (SMul.smul n) ∘ Completion.map (SMul.smul m)) x
⊢ UniformContinuous (SMul.smul m)
case hf
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁶ : UniformSpace X
inst✝⁵ : UniformSpace Y
inst✝⁴ : SMul M X
inst✝³ : SMul N X
inst✝² : SMulCommClass M N X
inst✝¹ : UniformContinuousConstSMul M X
inst✝ : UniformContinuousConstSMul N X
m : M
n : N
x : Completion X
hmn : m • n • x = (Completion.map (SMul.smul m) ∘ Completion.map (SMul.smul n)) x
hnm : n • m • x = (Completion.map (SMul.smul n) ∘ Completion.map (SMul.smul m)) x
⊢ UniformContinuous (SMul.smul n)
[PROOFSTEP]
exact congr_arg (fun f => Completion.map f x) (funext (smul_comm _ _))
[GOAL]
case hg
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁶ : UniformSpace X
inst✝⁵ : UniformSpace Y
inst✝⁴ : SMul M X
inst✝³ : SMul N X
inst✝² : SMulCommClass M N X
inst✝¹ : UniformContinuousConstSMul M X
inst✝ : UniformContinuousConstSMul N X
m : M
n : N
x : Completion X
hmn : m • n • x = (Completion.map (SMul.smul m) ∘ Completion.map (SMul.smul n)) x
hnm : n • m • x = (Completion.map (SMul.smul n) ∘ Completion.map (SMul.smul m)) x
⊢ UniformContinuous (SMul.smul n)
case hf
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁶ : UniformSpace X
inst✝⁵ : UniformSpace Y
inst✝⁴ : SMul M X
inst✝³ : SMul N X
inst✝² : SMulCommClass M N X
inst✝¹ : UniformContinuousConstSMul M X
inst✝ : UniformContinuousConstSMul N X
m : M
n : N
x : Completion X
hmn : m • n • x = (Completion.map (SMul.smul m) ∘ Completion.map (SMul.smul n)) x
hnm : n • m • x = (Completion.map (SMul.smul n) ∘ Completion.map (SMul.smul m)) x
⊢ UniformContinuous (SMul.smul m)
case hg
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁶ : UniformSpace X
inst✝⁵ : UniformSpace Y
inst✝⁴ : SMul M X
inst✝³ : SMul N X
inst✝² : SMulCommClass M N X
inst✝¹ : UniformContinuousConstSMul M X
inst✝ : UniformContinuousConstSMul N X
m : M
n : N
x : Completion X
hmn : m • n • x = (Completion.map (SMul.smul m) ∘ Completion.map (SMul.smul n)) x
hnm : n • m • x = (Completion.map (SMul.smul n) ∘ Completion.map (SMul.smul m)) x
⊢ UniformContinuous (SMul.smul m)
case hf
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁶ : UniformSpace X
inst✝⁵ : UniformSpace Y
inst✝⁴ : SMul M X
inst✝³ : SMul N X
inst✝² : SMulCommClass M N X
inst✝¹ : UniformContinuousConstSMul M X
inst✝ : UniformContinuousConstSMul N X
m : M
n : N
x : Completion X
hmn : m • n • x = (Completion.map (SMul.smul m) ∘ Completion.map (SMul.smul n)) x
hnm : n • m • x = (Completion.map (SMul.smul n) ∘ Completion.map (SMul.smul m)) x
⊢ UniformContinuous (SMul.smul n)
[PROOFSTEP]
repeat' exact uniformContinuous_const_smul _
[GOAL]
case hg
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁶ : UniformSpace X
inst✝⁵ : UniformSpace Y
inst✝⁴ : SMul M X
inst✝³ : SMul N X
inst✝² : SMulCommClass M N X
inst✝¹ : UniformContinuousConstSMul M X
inst✝ : UniformContinuousConstSMul N X
m : M
n : N
x : Completion X
hmn : m • n • x = (Completion.map (SMul.smul m) ∘ Completion.map (SMul.smul n)) x
hnm : n • m • x = (Completion.map (SMul.smul n) ∘ Completion.map (SMul.smul m)) x
⊢ UniformContinuous (SMul.smul n)
[PROOFSTEP]
exact uniformContinuous_const_smul _
[GOAL]
case hf
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁶ : UniformSpace X
inst✝⁵ : UniformSpace Y
inst✝⁴ : SMul M X
inst✝³ : SMul N X
inst✝² : SMulCommClass M N X
inst✝¹ : UniformContinuousConstSMul M X
inst✝ : UniformContinuousConstSMul N X
m : M
n : N
x : Completion X
hmn : m • n • x = (Completion.map (SMul.smul m) ∘ Completion.map (SMul.smul n)) x
hnm : n • m • x = (Completion.map (SMul.smul n) ∘ Completion.map (SMul.smul m)) x
⊢ UniformContinuous (SMul.smul m)
[PROOFSTEP]
exact uniformContinuous_const_smul _
[GOAL]
case hg
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁶ : UniformSpace X
inst✝⁵ : UniformSpace Y
inst✝⁴ : SMul M X
inst✝³ : SMul N X
inst✝² : SMulCommClass M N X
inst✝¹ : UniformContinuousConstSMul M X
inst✝ : UniformContinuousConstSMul N X
m : M
n : N
x : Completion X
hmn : m • n • x = (Completion.map (SMul.smul m) ∘ Completion.map (SMul.smul n)) x
hnm : n • m • x = (Completion.map (SMul.smul n) ∘ Completion.map (SMul.smul m)) x
⊢ UniformContinuous (SMul.smul m)
[PROOFSTEP]
exact uniformContinuous_const_smul _
[GOAL]
case hf
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁶ : UniformSpace X
inst✝⁵ : UniformSpace Y
inst✝⁴ : SMul M X
inst✝³ : SMul N X
inst✝² : SMulCommClass M N X
inst✝¹ : UniformContinuousConstSMul M X
inst✝ : UniformContinuousConstSMul N X
m : M
n : N
x : Completion X
hmn : m • n • x = (Completion.map (SMul.smul m) ∘ Completion.map (SMul.smul n)) x
hnm : n • m • x = (Completion.map (SMul.smul n) ∘ Completion.map (SMul.smul m)) x
⊢ UniformContinuous (SMul.smul n)
[PROOFSTEP]
exact uniformContinuous_const_smul _
[GOAL]
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁴ : UniformSpace X
inst✝³ : UniformSpace Y
inst✝² : Monoid M
inst✝¹ : MulAction M X
inst✝ : UniformContinuousConstSMul M X
a : X
⊢ 1 • ↑X a = ↑X a
[PROOFSTEP]
rw [← coe_smul, one_smul]
[GOAL]
R : Type u
M : Type v
N : Type w
X : Type x
Y : Type y
inst✝⁴ : UniformSpace X
inst✝³ : UniformSpace Y
inst✝² : Monoid M
inst✝¹ : MulAction M X
inst✝ : UniformContinuousConstSMul M X
x y : M
a : X
⊢ (x * y) • ↑X a = x • y • ↑X a
[PROOFSTEP]
simp only [← coe_smul, mul_smul]
|
function S=streakbar(X,Y,U,V,unit)
% H=streakbar(X,Y,U,V,unit) creates a colorbar for (but not exclusively)
% the function streakarrow.
% The arrays X and Y defines the coordinates for U and V.
% U and V are the same arrays used for streakarrow.
% The string variable unit is the unit of the vector magnitude
% Example:
% streakbar(X,Y,U,V,'m/s')
Vmag=sqrt(U.^2+V.^2);
Vmin=min(Vmag(:)); Vmax=max(Vmag(:));
P=get(gca,'position');
%axes('position',[P(1)+P(3)+.02 P(2)+0.01 .01 P(4)-0.02]')
axes('position',[P(1)+P(3)+.02 P(2) .01 P(4)]')
[X,Y]=meshgrid( [0 1], linspace(Vmin,Vmax,64));
Q= [1:64; 1:64];
S=pcolor(X', Y',Q); shading flat; set(gca,'XTickLabel',[], 'Yaxislocation', 'right')
title(unit) |
Set Implicit Arguments.
Require Import Bedrock.Platform.Cito.ADT.
Module Make (Import E : ADT).
Require Import Bedrock.Platform.Cito.Semantics.
Module Import SemanticsMake := Semantics.Make E.
Section TopSection.
Require Import Bedrock.Platform.Cito.SemanticsExpr.
Local Infix ";;" := Syntax.Seq (right associativity, at level 95).
Local Notation skip := Syntax.Skip.
Hint Constructors Semantics.RunsTo.
Hint Unfold Safe RunsTo.
Require Import Bedrock.Platform.AutoSep.
Ltac invert :=
match goal with
| [ H : Safe _ _ _ |- _ ] => inversion_clear H; []
| [ H : RunsTo _ _ _ _ |- _ ] => inversion_clear H; []
| [ H : Semantics.Safe _ _ _ |- _ ] => inversion_clear H; []
| [ H : Semantics.RunsTo _ _ _ _ |- _ ] => inversion_clear H; []
end; intuition (subst; unfold vals in *; try congruence).
Ltac t := intros; repeat invert; repeat (eauto; econstructor).
Lemma Safe_Seq_Skip : forall fs k v, Safe fs (skip ;; k) v -> Safe fs k v.
t.
Qed.
Lemma RunsTo_Seq_Skip : forall fs k v v', RunsTo fs k v v' -> RunsTo fs (skip ;; k) v v'.
t.
Qed.
Lemma Safe_Seq_assoc : forall fs a b c v, Safe fs ((a ;; b) ;; c) v -> Safe fs (a ;; b;; c) v.
t.
Qed.
Lemma RunsTo_Seq_assoc : forall fs a b c v v', RunsTo fs (a ;; b ;; c) v v' -> RunsTo fs ((a ;; b) ;; c) v v'.
t.
Qed.
Lemma Safe_Seq_If_true : forall fs e t f k v, Safe fs (Syntax.If e t f ;; k) v -> wneb (eval (fst v) e) $0 = true -> Safe fs (t ;; k) v.
t.
Qed.
Lemma RunsTo_Seq_If_true : forall fs e t f k v v', RunsTo fs (t ;; k) v v' -> wneb (eval (fst v) e) $0 = true -> RunsTo fs (Syntax.If e t f ;; k) v v'.
t.
Qed.
Lemma Safe_Seq_If_false : forall fs e t f k v, Safe fs (Syntax.If e t f ;; k) v -> wneb (eval (fst v) e) $0 = false -> Safe fs (f ;; k) v.
t.
Qed.
Lemma RunsTo_Seq_If_false : forall fs e t f k v v', RunsTo fs (f ;; k) v v' -> wneb (eval (fst v) e) $0 = false -> RunsTo fs (Syntax.If e t f ;; k) v v'.
t.
Qed.
Lemma Safe_Seq_While_false : forall fs e s k v, Safe fs (Syntax.While e s ;; k) v -> wneb (eval (fst v) e) $0 = false -> Safe fs k v.
t.
Qed.
Lemma RunsTo_Seq_While_false : forall fs e s k v v', RunsTo fs k v v' -> wneb (eval (fst v) e) $0 = false -> RunsTo fs (Syntax.While e s ;; k) v v'.
t.
Qed.
Lemma RunsTo_Seq_While_true : forall fs e s k v v', RunsTo fs (s ;; Syntax.While e s ;; k) v v' -> wneb (eval (fst v) e) $0 = true -> RunsTo fs (Syntax.While e s ;; k) v v'.
t.
Qed.
Lemma Safe_Seq_While_true : forall fs e s k v, Safe fs (Syntax.While e s ;; k) v -> wneb (eval (fst v) e) $0 = true -> Safe fs (s ;; Syntax.While e s ;; k) v.
intros.
invert.
inversion H1; clear H1; intros.
subst loop0 loop1; subst.
econstructor; eauto.
intros.
econstructor; eauto.
rewrite H5 in H0; intuition.
Qed.
End TopSection.
End Make.
|
[STATEMENT]
lemma not_in_sub : "var\<notin>(vars (p::real mpoly)) \<Longrightarrow> var\<notin>(vars (q::real mpoly)) \<Longrightarrow> var\<notin>(vars (p-q))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>var \<notin> vars p; var \<notin> vars q\<rbrakk> \<Longrightarrow> var \<notin> vars (p - q)
[PROOF STEP]
using not_in_add not_in_neg
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?var \<notin> vars ?p; ?var \<notin> vars ?q\<rbrakk> \<Longrightarrow> ?var \<notin> vars (?p + ?q)
(?var \<notin> vars ?p) = (?var \<notin> vars (- ?p))
goal (1 subgoal):
1. \<lbrakk>var \<notin> vars p; var \<notin> vars q\<rbrakk> \<Longrightarrow> var \<notin> vars (p - q)
[PROOF STEP]
by fastforce |
[STATEMENT]
lemma meeting_paths_produce_cycle:
assumes xs: "path (v # xs)" "xs \<noteq> Nil"
and ys: "path (v # ys)" "ys \<noteq> Nil"
and meet: "last xs = last ys"
and diverge: "hd xs \<noteq> hd ys"
shows "\<exists>zs. cycle zs \<and> hd zs = v"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>zs. cycle zs \<and> hd zs = v
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>zs. cycle zs \<and> hd zs = v
[PROOF STEP]
have "set xs \<inter> set ys \<noteq> {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set xs \<inter> set ys \<noteq> {}
[PROOF STEP]
using meet xs(2) ys(2) last_in_set
[PROOF STATE]
proof (prove)
using this:
last xs = last ys
xs \<noteq> []
ys \<noteq> []
?as \<noteq> [] \<Longrightarrow> last ?as \<in> set ?as
goal (1 subgoal):
1. set xs \<inter> set ys \<noteq> {}
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
set xs \<inter> set ys \<noteq> {}
goal (1 subgoal):
1. \<exists>zs. cycle zs \<and> hd zs = v
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
set xs \<inter> set ys \<noteq> {}
[PROOF STEP]
obtain xs' x xs'' where xs': "xs = xs' @ x # xs''" "set xs' \<inter> set ys = {}" "x \<in> set ys"
[PROOF STATE]
proof (prove)
using this:
set xs \<inter> set ys \<noteq> {}
goal (1 subgoal):
1. (\<And>xs' x xs''. \<lbrakk>xs = xs' @ x # xs''; set xs' \<inter> set ys = {}; x \<in> set ys\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using split_list_first_prop[of xs "\<lambda>x. x \<in> set ys"]
[PROOF STATE]
proof (prove)
using this:
set xs \<inter> set ys \<noteq> {}
\<exists>x\<in>set xs. x \<in> set ys \<Longrightarrow> \<exists>ysa x zs. xs = ysa @ x # zs \<and> x \<in> set ys \<and> (\<forall>y\<in>set ysa. y \<notin> set ys)
goal (1 subgoal):
1. (\<And>xs' x xs''. \<lbrakk>xs = xs' @ x # xs''; set xs' \<inter> set ys = {}; x \<in> set ys\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (metis disjoint_iff_not_equal)
[PROOF STATE]
proof (state)
this:
xs = xs' @ x # xs''
set xs' \<inter> set ys = {}
x \<in> set ys
goal (1 subgoal):
1. \<exists>zs. cycle zs \<and> hd zs = v
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
xs = xs' @ x # xs''
set xs' \<inter> set ys = {}
x \<in> set ys
[PROOF STEP]
obtain ys' ys'' where ys': "ys = ys' @ x # ys''" "x \<notin> set ys'"
[PROOF STATE]
proof (prove)
using this:
xs = xs' @ x # xs''
set xs' \<inter> set ys = {}
x \<in> set ys
goal (1 subgoal):
1. (\<And>ys' ys''. \<lbrakk>ys = ys' @ x # ys''; x \<notin> set ys'\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using split_list_first_prop[of ys "\<lambda>y. y = x"]
[PROOF STATE]
proof (prove)
using this:
xs = xs' @ x # xs''
set xs' \<inter> set ys = {}
x \<in> set ys
\<exists>xa\<in>set ys. xa = x \<Longrightarrow> \<exists>ysa xa zs. ys = ysa @ xa # zs \<and> xa = x \<and> (\<forall>y\<in>set ysa. y \<noteq> x)
goal (1 subgoal):
1. (\<And>ys' ys''. \<lbrakk>ys = ys' @ x # ys''; x \<notin> set ys'\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
ys = ys' @ x # ys''
x \<notin> set ys'
goal (1 subgoal):
1. \<exists>zs. cycle zs \<and> hd zs = v
[PROOF STEP]
let ?zs = "v # xs' @ x # (rev ys')"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>zs. cycle zs \<and> hd zs = v
[PROOF STEP]
have "last ?zs\<rightarrow>hd ?zs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. last (v # xs' @ x # rev ys') \<rightarrow> hd (v # xs' @ x # rev ys')
[PROOF STEP]
using undirected walk_first_edge walk_first_edge' ys'(1) ys(1)
[PROOF STATE]
proof (prove)
using this:
?v \<rightarrow> ?w = ?w \<rightarrow> ?v
walk (?v # ?w # ?xs) \<Longrightarrow> ?v \<rightarrow> ?w
\<lbrakk>walk (?v # ?xs); ?xs \<noteq> []\<rbrakk> \<Longrightarrow> ?v \<rightarrow> hd ?xs
ys = ys' @ x # ys''
path (v # ys)
goal (1 subgoal):
1. last (v # xs' @ x # rev ys') \<rightarrow> hd (v # xs' @ x # rev ys')
[PROOF STEP]
by (fastforce simp: last_rev)
[PROOF STATE]
proof (state)
this:
last (v # xs' @ x # rev ys') \<rightarrow> hd (v # xs' @ x # rev ys')
goal (1 subgoal):
1. \<exists>zs. cycle zs \<and> hd zs = v
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
last (v # xs' @ x # rev ys') \<rightarrow> hd (v # xs' @ x # rev ys')
goal (1 subgoal):
1. \<exists>zs. cycle zs \<and> hd zs = v
[PROOF STEP]
have "path ?zs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. path (v # xs' @ x # rev ys')
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. walk (v # xs' @ x # rev ys')
2. distinct (v # xs' @ x # rev ys')
[PROOF STEP]
have "walk (x # rev ys')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. walk (x # rev ys')
[PROOF STEP]
proof(cases)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. ?P \<Longrightarrow> walk (x # rev ys')
2. \<not> ?P \<Longrightarrow> walk (x # rev ys')
[PROOF STEP]
assume "ys' = Nil"
[PROOF STATE]
proof (state)
this:
ys' = []
goal (2 subgoals):
1. ?P \<Longrightarrow> walk (x # rev ys')
2. \<not> ?P \<Longrightarrow> walk (x # rev ys')
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
ys' = []
goal (1 subgoal):
1. walk (x # rev ys')
[PROOF STEP]
using \<open>last ?zs\<rightarrow>hd ?zs\<close> edges_are_in_V(1)
[PROOF STATE]
proof (prove)
using this:
ys' = []
last (v # xs' @ x # rev ys') \<rightarrow> hd (v # xs' @ x # rev ys')
?v \<rightarrow> ?w \<Longrightarrow> ?v \<in> V
goal (1 subgoal):
1. walk (x # rev ys')
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
walk (x # rev ys')
goal (1 subgoal):
1. ys' \<noteq> [] \<Longrightarrow> walk (x # rev ys')
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. ys' \<noteq> [] \<Longrightarrow> walk (x # rev ys')
[PROOF STEP]
assume "ys' \<noteq> Nil"
[PROOF STATE]
proof (state)
this:
ys' \<noteq> []
goal (1 subgoal):
1. ys' \<noteq> [] \<Longrightarrow> walk (x # rev ys')
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
ys' \<noteq> []
goal (1 subgoal):
1. ys' \<noteq> [] \<Longrightarrow> walk (x # rev ys')
[PROOF STEP]
hence "last ys'\<rightarrow>x"
[PROOF STATE]
proof (prove)
using this:
ys' \<noteq> []
goal (1 subgoal):
1. last ys' \<rightarrow> x
[PROOF STEP]
using walk_last_edge walk_tl ys'(1) ys(1)
[PROOF STATE]
proof (prove)
using this:
ys' \<noteq> []
\<lbrakk>walk (?xs @ ?ys); ?xs \<noteq> []; ?ys \<noteq> []\<rbrakk> \<Longrightarrow> last ?xs \<rightarrow> hd ?ys
walk ?xs \<Longrightarrow> walk (tl ?xs)
ys = ys' @ x # ys''
path (v # ys)
goal (1 subgoal):
1. last ys' \<rightarrow> x
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
last ys' \<rightarrow> x
goal (1 subgoal):
1. ys' \<noteq> [] \<Longrightarrow> walk (x # rev ys')
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
last ys' \<rightarrow> x
goal (1 subgoal):
1. ys' \<noteq> [] \<Longrightarrow> walk (x # rev ys')
[PROOF STEP]
have "hd (rev ys') = last ys'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. hd (rev ys') = last ys'
[PROOF STEP]
by (simp add: \<open>ys' \<noteq> []\<close> hd_rev)
[PROOF STATE]
proof (state)
this:
hd (rev ys') = last ys'
goal (1 subgoal):
1. ys' \<noteq> [] \<Longrightarrow> walk (x # rev ys')
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
hd (rev ys') = last ys'
goal (1 subgoal):
1. ys' \<noteq> [] \<Longrightarrow> walk (x # rev ys')
[PROOF STEP]
have "walk (rev ys')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. walk (rev ys')
[PROOF STEP]
by (metis list.sel(3) walk_decomp(1) walk_rev walk_tl ys'(1) ys(1))
[PROOF STATE]
proof (state)
this:
walk (rev ys')
goal (1 subgoal):
1. ys' \<noteq> [] \<Longrightarrow> walk (x # rev ys')
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
ys' \<noteq> []
last ys' \<rightarrow> x
hd (rev ys') = last ys'
walk (rev ys')
[PROOF STEP]
show "walk (x # rev ys')"
[PROOF STATE]
proof (prove)
using this:
ys' \<noteq> []
last ys' \<rightarrow> x
hd (rev ys') = last ys'
walk (rev ys')
goal (1 subgoal):
1. walk (x # rev ys')
[PROOF STEP]
using path_cons undirected ys'(1) ys(1)
[PROOF STATE]
proof (prove)
using this:
ys' \<noteq> []
last ys' \<rightarrow> x
hd (rev ys') = last ys'
walk (rev ys')
\<lbrakk>path ?xs; ?xs \<noteq> []; ?v \<rightarrow> hd ?xs; ?v \<notin> set ?xs\<rbrakk> \<Longrightarrow> path (?v # ?xs)
?v \<rightarrow> ?w = ?w \<rightarrow> ?v
ys = ys' @ x # ys''
path (v # ys)
goal (1 subgoal):
1. walk (x # rev ys')
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
walk (x # rev ys')
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
walk (x # rev ys')
goal (2 subgoals):
1. walk (v # xs' @ x # rev ys')
2. distinct (v # xs' @ x # rev ys')
[PROOF STEP]
thus "walk (v # xs' @ x # rev ys')"
[PROOF STATE]
proof (prove)
using this:
walk (x # rev ys')
goal (1 subgoal):
1. walk (v # xs' @ x # rev ys')
[PROOF STEP]
using xs'(1) xs(1)
[PROOF STATE]
proof (prove)
using this:
walk (x # rev ys')
xs = xs' @ x # xs''
path (v # xs)
goal (1 subgoal):
1. walk (v # xs' @ x # rev ys')
[PROOF STEP]
by (metis append_Cons list.sel(1) list.simps(3) walk_comp walk_decomp(1) walk_last_edge)
[PROOF STATE]
proof (state)
this:
walk (v # xs' @ x # rev ys')
goal (1 subgoal):
1. distinct (v # xs' @ x # rev ys')
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. distinct (v # xs' @ x # rev ys')
[PROOF STEP]
show "distinct (v # xs' @ x # rev ys')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. distinct (v # xs' @ x # rev ys')
[PROOF STEP]
unfolding distinct_append distinct.simps(2) set_append
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. v \<notin> set xs' \<union> set (x # rev ys') \<and> distinct xs' \<and> (x \<notin> set (rev ys') \<and> distinct (rev ys')) \<and> set xs' \<inter> set (x # rev ys') = {}
[PROOF STEP]
using xs'(1,2) xs(1) ys'(1) ys(1)
[PROOF STATE]
proof (prove)
using this:
xs = xs' @ x # xs''
set xs' \<inter> set ys = {}
path (v # xs)
ys = ys' @ x # ys''
path (v # ys)
goal (1 subgoal):
1. v \<notin> set xs' \<union> set (x # rev ys') \<and> distinct xs' \<and> (x \<notin> set (rev ys') \<and> distinct (rev ys')) \<and> set xs' \<inter> set (x # rev ys') = {}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
distinct (v # xs' @ x # rev ys')
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
path (v # xs' @ x # rev ys')
goal (1 subgoal):
1. \<exists>zs. cycle zs \<and> hd zs = v
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
path (v # xs' @ x # rev ys')
goal (1 subgoal):
1. \<exists>zs. cycle zs \<and> hd zs = v
[PROOF STEP]
have "length ?zs \<noteq> 2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (v # xs' @ x # rev ys') \<noteq> 2
[PROOF STEP]
using diverge xs'(1) ys'(1)
[PROOF STATE]
proof (prove)
using this:
hd xs \<noteq> hd ys
xs = xs' @ x # xs''
ys = ys' @ x # ys''
goal (1 subgoal):
1. length (v # xs' @ x # rev ys') \<noteq> 2
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
length (v # xs' @ x # rev ys') \<noteq> 2
goal (1 subgoal):
1. \<exists>zs. cycle zs \<and> hd zs = v
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
last (v # xs' @ x # rev ys') \<rightarrow> hd (v # xs' @ x # rev ys')
path (v # xs' @ x # rev ys')
length (v # xs' @ x # rev ys') \<noteq> 2
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
last (v # xs' @ x # rev ys') \<rightarrow> hd (v # xs' @ x # rev ys')
path (v # xs' @ x # rev ys')
length (v # xs' @ x # rev ys') \<noteq> 2
goal (1 subgoal):
1. \<exists>zs. cycle zs \<and> hd zs = v
[PROOF STEP]
using cycleI[of ?zs]
[PROOF STATE]
proof (prove)
using this:
last (v # xs' @ x # rev ys') \<rightarrow> hd (v # xs' @ x # rev ys')
path (v # xs' @ x # rev ys')
length (v # xs' @ x # rev ys') \<noteq> 2
\<lbrakk>path (v # xs' @ x # rev ys'); 2 < length (v # xs' @ x # rev ys'); last (v # xs' @ x # rev ys') \<rightarrow> hd (v # xs' @ x # rev ys')\<rbrakk> \<Longrightarrow> cycle (v # xs' @ x # rev ys')
goal (1 subgoal):
1. \<exists>zs. cycle zs \<and> hd zs = v
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<exists>zs. cycle zs \<and> hd zs = v
goal:
No subgoals!
[PROOF STEP]
qed |
Require Import List.
Import ListNotations.
Require Import Psatz.
Require Import compcert.lib.Coqlib.
Require Import compcert.lib.Maps.
Require Import compcert.lib.Integers.
Require Import compcert.common.AST.
Require Import compcert.common.Values.
Require Import compcert.common.Globalenvs.
Require Import compcert.common.Memory.
Require Import compcert.common.Events.
Require Import compcert.common.Errors.
Require Import compcert.common.Switch.
Require Import compcert.backend.Cminor.
Require Import StructTact.StructTactics.
Require Import StructTact.Util.
Require Import oeuf.EricTact.
Require Import oeuf.StuartTact.
Require Import oeuf.ListLemmas.
Require Import oeuf.HighValues.
Require Import oeuf.OpaqueTypes.
Require Import oeuf.Monads.
Require Import oeuf.FullSemantics.
Lemma pos_lt_neq :
forall p q,
(p < q)%positive ->
p <> q.
Proof.
intros.
unfold Pos.lt in H.
intro. rewrite <- Pos.compare_eq_iff in H0.
congruence.
Qed.
Lemma load_lt_nextblock :
forall c m b ofs v,
Mem.load c m b ofs = Some v ->
(b < Mem.nextblock m)%positive.
Proof.
intros.
remember (Mem.nextblock_noaccess m) as H2.
clear HeqH2.
destruct (plt b (Mem.nextblock m)). assumption.
app Mem.load_valid_access Mem.load.
unfold Mem.valid_access in *.
break_and. unfold Mem.range_perm in *.
specialize (H ofs).
assert (ofs <= ofs < ofs + size_chunk c).
destruct c; simpl; omega.
specialize (H H3).
unfold Mem.perm in *.
unfold Mem.perm_order' in H.
rewrite H2 in H; eauto. inversion H.
Qed.
Definition mem_locked' (m m' : mem) (b : block) : Prop :=
forall b',
(b' < b)%positive ->
forall ofs c v,
Mem.load c m b' ofs = Some v ->
Mem.load c m' b' ofs = Some v.
Definition mem_locked (m m' : mem) : Prop :=
mem_locked' m m' (Mem.nextblock m).
Lemma alloc_mem_locked :
forall m lo hi m' b,
Mem.alloc m lo hi = (m',b) ->
mem_locked m m'.
Proof.
unfold mem_locked.
unfold mem_locked'.
intros.
app Mem.alloc_result Mem.alloc. subst b.
app load_lt_nextblock Mem.load.
erewrite Mem.load_alloc_unchanged; eauto.
Qed.
Lemma load_all_mem_locked :
forall m m',
mem_locked m m' ->
forall b,
(b < Mem.nextblock m)%positive ->
forall l ofs l',
load_all (arg_addrs b ofs l) m = Some l' ->
load_all (arg_addrs b ofs l) m' = Some l'.
Proof.
induction l; intros.
simpl in H1. inv H1. simpl. reflexivity.
simpl in H1. repeat break_match_hyp; try congruence.
invc H1.
eapply IHl in Heqo0.
simpl. rewrite Heqo0.
unfold mem_locked in H.
unfold mem_locked' in H.
apply H in Heqo; auto. find_rewrite. reflexivity.
Qed.
Lemma load_all_mem_inj_id :
forall m m',
Mem.mem_inj inject_id m m' ->
forall b l ofs l',
load_all (arg_addrs b ofs l) m = Some l' ->
exists l'',
load_all (arg_addrs b ofs l) m' = Some l'' /\
Forall2 (fun a b => Val.lessdef (snd a) (snd b)) l' l''.
Proof.
induction l; intros0 Hload.
{ simpl in *. inject_some. eexists. split; eauto. }
simpl in Hload.
do 2 (break_match; try discriminate). inject_some.
fwd eapply Mem.load_inj as HH; eauto.
{ reflexivity. }
destruct HH as (v' & ? & ?).
rewrite Z.add_0_r in *.
fwd eapply IHl as HH; eauto. destruct HH as (? & ? & ?).
simpl.
on _, fun H => rewrite H.
on _, fun H => rewrite H.
eexists. split; eauto.
econstructor; eauto.
rewrite <- val_inject_id. eauto.
Qed.
Lemma lessdef_def_eq : forall v v',
Val.lessdef v v' ->
v <> Vundef ->
v' = v.
destruct v; intros0 Hld Hundef; try congruence.
all: invc Hld; reflexivity.
Qed.
Lemma value_inject_lessdef : forall A B (ge : Genv.t A B) m hv cv cv',
value_inject ge m hv cv ->
Val.lessdef cv cv' ->
value_inject ge m hv cv'.
induction hv; intros0 Hvi Hld.
- on >@value_inject, invc. on >Val.lessdef, invc.
econstructor; eauto.
- on >@value_inject, invc. on >Val.lessdef, invc.
econstructor; eauto.
- on >@value_inject, invc. econstructor.
fwd eapply lessdef_def_eq; eauto.
{ eapply opaque_type_inject_defined; eauto. }
fix_existT. subst. auto.
Qed.
Lemma value_inject_defined : forall A B (ge : Genv.t A B) m hv cv,
value_inject ge m hv cv ->
cv <> Vundef.
intros0 Hvi. invc Hvi; try discriminate.
- eapply opaque_type_inject_defined; eauto.
Qed.
Lemma load_all_arg_addrs_zip : forall b ofs args m l,
load_all (arg_addrs b ofs args) m = Some l ->
exists cvs,
l = zip args cvs /\
length cvs = length args.
first_induction args; intros0 Hload; simpl in Hload.
{ inject_some. exists []. split; reflexivity. }
do 2 (break_match; try discriminate). inject_some.
fwd eapply IHargs as HH; eauto. destruct HH as (? & ? & ?).
subst.
eexists (_ :: _). simpl. eauto.
Qed.
Lemma Forall2_eq : forall A (xs ys : list A),
Forall2 (fun x y => x = y) xs ys ->
xs = ys.
induction xs; destruct ys; intros0 HH; inversion HH; eauto.
- subst. erewrite IHxs; eauto.
Qed.
Lemma Forall2_eq' : forall A (xs ys : list A),
xs = ys ->
Forall2 (fun x y => x = y) xs ys.
induction xs; destruct ys; intros0 HH; inversion HH; eauto.
- subst. econstructor; eauto.
Qed.
Lemma zip_Forall2_eq_l : forall A B (xs : list A) (ys : list B),
length xs = length ys ->
Forall2 (fun x p => x = fst p) xs (zip xs ys).
induction xs; destruct ys; intros; try discriminate; constructor; eauto.
Qed.
Lemma zip_Forall2_eq_r : forall A B (xs : list A) (ys : list B),
length xs = length ys ->
Forall2 (fun y p => y = snd p) ys (zip xs ys).
induction xs; destruct ys; intros; try discriminate; constructor; eauto.
Qed.
Lemma mem_inj_id_value_inject_transport : forall A B (ge : Genv.t A B) m1 m2,
forall b ofs head vals l',
forall (P : Prop),
Mem.mem_inj inject_id m1 m2 ->
head <> Vundef ->
(forall cvs,
Forall2 (value_inject ge m1) vals cvs ->
Forall2 (value_inject ge m2) vals cvs) ->
(Mem.loadv Mint32 m2 (Vptr b ofs) = Some head ->
load_all (arg_addrs b (Int.add ofs (Int.repr 4)) vals) m2 = Some l' ->
(forall a b, In (a, b) l' -> value_inject ge m2 a b) ->
P) ->
(Mem.loadv Mint32 m1 (Vptr b ofs) = Some head ->
load_all (arg_addrs b (Int.add ofs (Int.repr 4)) vals) m1 = Some l' ->
(forall a b, In (a, b) l' -> value_inject ge m1 a b) ->
P).
intros0 Hmi Hhdef Hvis HP Hhead Hla Hvi.
fwd eapply load_all_mem_inj_id as HH; eauto.
destruct HH as (lv' & ? & ?).
fwd eapply load_all_arg_addrs_zip with (l := l') as HH; eauto.
destruct HH as (cvs1 & ? & ?). subst l'.
fwd eapply load_all_arg_addrs_zip with (l := lv') as HH; eauto.
destruct HH as (cvs2 & ? & ?). subst lv'.
fwd eapply zip_Forall2_eq_l with (xs := vals) (ys := cvs1); eauto.
fwd eapply zip_Forall2_eq_l with (xs := vals) (ys := cvs2); eauto.
fwd eapply zip_Forall2_eq_r with (xs := vals) (ys := cvs1); eauto.
fwd eapply zip_Forall2_eq_r with (xs := vals) (ys := cvs2); eauto.
remember (zip vals cvs1) as ps1.
remember (zip vals cvs2) as ps2.
assert (Forall (fun p => value_inject ge m1 (fst p) (snd p)) ps1).
{ rewrite Forall_forall. destruct x. eauto. }
assert (cvs1 = cvs2).
{ eapply Forall2_eq.
list_magic_on (vals, (cvs1, (cvs2, (ps1, (ps2, tt))))).
subst. symmetry.
eapply lessdef_def_eq; eauto.
eapply value_inject_defined; eauto. }
subst cvs2.
replace ps2 with ps1 in * by congruence. clear dependent ps2.
eapply HP; eauto.
- unfold Mem.loadv in *.
fwd eapply Mem.load_inj as HH; eauto. { reflexivity. } destruct HH as (v' & ? & ?).
rewrite Z.add_0_r in *.
replace head with v'; cycle 1.
{ eapply lessdef_def_eq; eauto. rewrite <- val_inject_id. auto. }
auto.
- cut (Forall (fun p => value_inject ge m2 (fst p) (snd p)) ps1).
{ intros HH. intros. rewrite Forall_forall in HH.
on _, eapply_lem HH. simpl in *. assumption. }
specialize (Hvis cvs1). spec_assert Hvis.
{ list_magic_on (vals, (cvs1, (ps1, tt))). subst. auto. }
list_magic_on (vals, (cvs1, (ps1, tt))).
subst. auto.
Qed.
Lemma mem_inj_id_value_inject :
forall m1 m2,
Mem.mem_inj inject_id m1 m2 ->
forall {A B} (ge : Genv.t A B) hv cv,
value_inject ge m1 hv cv ->
value_inject ge m2 hv cv.
intros0 Hmi. intros ? ? ge.
induction hv using value_rect_mut with
(Pl := fun hvs => forall cvs,
Forall2 (value_inject ge m1) hvs cvs ->
Forall2 (value_inject ge m2) hvs cvs);
intros0 Hvi; simpl in *.
- invc Hvi.
eapply mem_inj_id_value_inject_transport; eauto.
{ discriminate. }
clear H1 H2 H4. intros.
econstructor; eauto.
- invc Hvi.
eapply mem_inj_id_value_inject_transport; eauto.
{ discriminate. }
clear H1 H4 H6. intros.
econstructor; eauto.
- invc Hvi.
fix_existT. subst.
econstructor; eauto.
eapply opaque_type_value_val_inject; eauto.
+ eapply val_inject_id, Val.lessdef_refl.
+ unfold MemInjProps.same_offsets. intros0 HH. invc HH. reflexivity.
- invc Hvi. constructor.
- invc Hvi. econstructor; eauto.
Qed.
Lemma alloc_store :
forall m lo hi m' b,
Mem.alloc m lo hi = (m',b) ->
forall v c,
hi - lo > size_chunk c ->
(align_chunk c | lo) ->
{ m'' : mem | Mem.store c m' b lo v = Some m''}.
Proof.
intros.
app Mem.valid_access_alloc_same Mem.alloc; try omega.
app Mem.valid_access_implies Mem.valid_access.
2: instantiate (1 := Writable); econstructor; eauto.
eapply Mem.valid_access_store; eauto.
Qed.
Definition writable (m : mem) (b : block) (lo hi : Z) : Prop :=
forall ofs k,
lo <= ofs < hi ->
Mem.perm m b ofs k Freeable.
Lemma alloc_writable :
forall m lo hi m' b,
Mem.alloc m lo hi = (m',b) ->
writable m' b lo hi.
Proof.
intros.
unfold writable.
intros.
eapply Mem.perm_alloc_2; eauto.
Qed.
Lemma mem_locked_store_nextblock :
forall m m',
mem_locked m m' ->
forall c ofs v m'',
Mem.store c m' (Mem.nextblock m) ofs v = Some m'' ->
mem_locked m m''.
Proof.
intros.
unfold mem_locked in *.
unfold mem_locked' in *.
intros.
app Mem.load_store_other Mem.store.
rewrite H0.
eapply H; eauto.
left.
eapply pos_lt_neq; eauto.
Qed.
Lemma writable_storeable :
forall m b lo hi,
writable m b lo hi ->
forall c v ofs,
lo <= ofs < hi ->
(align_chunk c | ofs) ->
hi >= ofs + size_chunk c ->
{m' : mem | Mem.store c m b ofs v = Some m' /\ writable m' b lo hi }.
Proof.
intros.
assert (Mem.valid_access m c b ofs Writable).
unfold Mem.valid_access. split; auto.
unfold Mem.range_perm. intros.
unfold writable in H.
eapply Mem.perm_implies; try apply H; eauto; try solve [econstructor].
omega.
app Mem.valid_access_store Mem.valid_access.
destruct H3.
exists x. split. apply e.
unfold writable. intros.
eapply Mem.perm_store_1; eauto.
Qed.
Lemma writable_storevable :
forall m b lo hi,
writable m b lo hi ->
forall c v ofs,
lo <= Int.unsigned ofs < hi ->
(align_chunk c | Int.unsigned ofs) ->
hi >= (Int.unsigned ofs) + size_chunk c ->
{m' : mem | Mem.storev c m (Vptr b ofs) v = Some m' /\ writable m' b lo hi }.
Proof.
intros.
app writable_storeable writable.
Qed.
Lemma mem_locked_load :
forall m m',
mem_locked m m' ->
forall c b ofs v,
Mem.load c m b ofs = Some v ->
Mem.load c m' b ofs = Some v.
Proof.
intros.
unfold mem_locked in *.
unfold mem_locked' in *.
eapply H; eauto.
eapply load_lt_nextblock; eauto.
Qed.
Fixpoint store_multi chunk m b ofs vs : option mem :=
match vs with
| [] => Some m
| v :: vs =>
match Mem.store chunk m b ofs v with
| Some m' => store_multi chunk m' b (ofs + size_chunk chunk) vs
| None => None
end
end.
Fixpoint load_multi chunk m b ofs n : option (list val) :=
match n with
| O => Some []
| S n =>
match Mem.load chunk m b ofs with
| Some v =>
match load_multi chunk m b (ofs + size_chunk chunk) n with
| Some vs => Some (v :: vs)
| None => None
end
| None => None
end
end.
Lemma shrink_range_perm : forall m b lo1 hi1 lo2 hi2 k p,
Mem.range_perm m b lo1 hi1 k p ->
lo1 <= lo2 ->
hi2 <= hi1 ->
Mem.range_perm m b lo2 hi2 k p.
intros0 Hrp Hlo Hhi. unfold Mem.range_perm in *. intros.
eapply Hrp. lia.
Qed.
Lemma perm_store : forall chunk m1 b ofs v m2,
Mem.store chunk m1 b ofs v = Some m2 ->
forall b' ofs' k p,
Mem.perm m1 b' ofs' k p <-> Mem.perm m2 b' ofs' k p.
intros. split.
- eapply Mem.perm_store_1; eauto.
- eapply Mem.perm_store_2; eauto.
Qed.
Lemma range_perm_store : forall chunk m1 b ofs v m2,
Mem.store chunk m1 b ofs v = Some m2 ->
forall b' lo hi k p,
Mem.range_perm m1 b' lo hi k p <-> Mem.range_perm m2 b' lo hi k p.
intros. unfold Mem.range_perm. split; intros.
- rewrite <- perm_store; eauto.
- rewrite -> perm_store; eauto.
Qed.
Lemma load_multi_spec : forall chunk m b ofs n vs i v,
load_multi chunk m b ofs n = Some vs ->
nth_error vs i = Some v ->
Mem.load chunk m b (ofs + size_chunk chunk * Z.of_nat i) = Some v.
first_induction n; intros0 Hload Hnth; simpl in Hload.
{ inject_some. destruct i; simpl in Hnth. all: discriminate. }
do 2 (break_match; try discriminate). inject_some.
destruct i.
- simpl in Hnth. inject_some.
rewrite Nat2Z.inj_0. replace (ofs + _) with ofs by ring. auto.
- simpl in Hnth.
rewrite Nat2Z.inj_succ. unfold Z.succ.
replace (_ + _) with ((ofs + size_chunk chunk) + (size_chunk chunk * Z.of_nat i)) by ring.
eapply IHn; eauto.
Qed.
Lemma valid_access_store_multi : forall chunk m b ofs vs,
Mem.range_perm m b ofs (ofs + size_chunk chunk * Zlength vs) Cur Writable ->
(align_chunk chunk | ofs) ->
{ m' : mem | store_multi chunk m b ofs vs = Some m' }.
first_induction vs; intros; simpl in *.
{ eauto. }
rename a into v.
fwd eapply Mem.valid_access_store with (m1 := m) (v := v) as HH.
{ econstructor; eauto. eapply shrink_range_perm; eauto.
- lia.
- rewrite Zlength_cons. rewrite <- Zmult_succ_r_reverse.
assert (0 <= size_chunk chunk * Zlength vs).
{ eapply Z.mul_nonneg_nonneg.
- destruct chunk; simpl; lia.
- rewrite Zlength_correct. eapply Zle_0_nat. }
lia.
}
destruct HH as [m' ?].
rewrite range_perm_store in * by eauto.
fwd eapply IHvs with (m := m') (ofs := ofs + size_chunk chunk)
(chunk := chunk) as HH; eauto.
{ eapply shrink_range_perm; eauto.
- assert (0 <= size_chunk chunk) by (destruct chunk; simpl; lia).
lia.
- rewrite Zlength_cons. rewrite <- Zmult_succ_r_reverse.
assert (0 <= size_chunk chunk) by (destruct chunk; simpl; lia).
lia.
}
{ eapply Z.divide_add_r; eauto.
destruct chunk; simpl; eapply Zmod_divide; eauto. all: lia. }
destruct HH as [m'' ?].
exists m''.
on _, fun H => rewrite H. eauto.
Qed.
Lemma Zlength_nonneg : forall A (xs : list A),
0 <= Zlength xs.
intros. rewrite Zlength_correct.
eapply Zle_0_nat.
Qed.
Lemma alloc_range_perm : forall m lo hi m' b,
Mem.alloc m lo hi = (m', b) ->
Mem.range_perm m' b lo hi Cur Freeable.
intros0 Halloc.
unfold Mem.range_perm. intros. break_and.
fwd eapply Mem.valid_access_alloc_same with
(m1 := m) (lo := lo) (hi := hi) (m2 := m') (b := b)
(chunk := Mint8unsigned) (ofs := ofs) as HH; simpl in *; eauto.
{ lia. }
{ eapply Zmod_divide. lia. eapply Zmod_1_r. }
unfold Mem.valid_access, Mem.range_perm in HH.
destruct HH as [HH ?].
fwd eapply (HH ofs).
{ simpl. lia. }
{ auto. }
Qed.
Lemma load_store_multi_other : forall chunk m1 b ofs vs m2,
store_multi chunk m1 b ofs vs = Some m2 ->
forall chunk' b' ofs',
b' <> b \/
ofs' + size_chunk chunk' <= ofs \/
ofs + size_chunk chunk * Zlength vs <= ofs' ->
Mem.load chunk' m2 b' ofs' = Mem.load chunk' m1 b' ofs'.
first_induction vs; intros0 Hstore; intros0 Hnc.
{ simpl in Hstore. inject_some. eauto. }
simpl in Hstore. break_match; try discriminate. rename m2 into m3, m into m2.
fwd eapply Mem.load_store_other with (chunk' := chunk'); eauto.
{ break_or; [|break_or].
- left. eauto.
- right. left. eauto.
- right. right.
rewrite Zlength_cons in *. rewrite <- Zmult_succ_r_reverse in *.
assert (0 <= size_chunk chunk * Zlength vs).
{ eapply Z.mul_nonneg_nonneg.
- destruct chunk; simpl; lia.
- eapply Zlength_nonneg. }
lia.
}
fwd eapply IHvs with (ofs' := ofs') (chunk' := chunk'); eauto.
{ break_or; [|break_or].
- left. eauto.
- right. left.
assert (0 <= size_chunk chunk) by (destruct chunk; simpl; lia).
lia.
- right. right.
rewrite Zlength_cons in *. rewrite <- Zmult_succ_r_reverse in *.
lia.
}
congruence.
Qed.
Lemma int_modulus_big : forall x,
x < 256 ->
x < Int.modulus.
intros. unfold Int.modulus.
replace 256 with (two_power_nat 8) in * by reflexivity.
rewrite two_power_nat_equiv in *.
fwd eapply Z.pow_le_mono_r with (a := 2) (b := 8) (c := Z.of_nat Int.wordsize).
{ lia. }
{ unfold Int.wordsize. simpl. lia. }
lia.
Qed.
Lemma int_unsigned_big : forall x,
x < 256 ->
x <= Int.max_unsigned.
intros.
fwd eapply int_modulus_big with (x := x); eauto.
unfold Int.max_unsigned. lia.
Qed.
Lemma store_multi_load_all_args : forall m1 b args ofs argvs m2,
length args = length argvs ->
0 <= ofs ->
ofs + Zlength args * 4 <= Int.max_unsigned ->
store_multi Mint32 m1 b ofs argvs = Some m2 ->
Forall (fun v => v = Val.load_result Mint32 v) argvs ->
load_all (arg_addrs b (Int.repr ofs) args) m2 = Some (zip args argvs).
first_induction args; destruct argvs; intros0 Hlen Hofs1 Hofs2 Hstore Hi32;
try discriminate.
{ reflexivity. }
simpl in Hstore. simpl.
break_match_hyp; try discriminate. rename m2 into m3, m into m2.
fwd eapply Zlength_nonneg with (xs := a :: args).
rewrite Int.unsigned_repr by lia.
erewrite load_store_multi_other; eauto; cycle 1.
{ right. left. simpl. lia. }
erewrite Mem.load_store_same by eauto.
rewrite Int.add_unsigned.
rewrite Int.unsigned_repr by lia.
rewrite Int.unsigned_repr; cycle 1.
{ split; [lia|]. eapply int_unsigned_big. lia. }
invc Hi32.
erewrite IHargs; eauto.
- congruence.
- lia.
- rewrite Zlength_cons in Hofs2. unfold Z.succ in Hofs2. lia.
Qed.
Definition max_arg_count := Int.max_unsigned / 4 - 1.
Lemma max_arg_count_ok :
4 + max_arg_count * 4 <= Int.max_unsigned.
unfold max_arg_count.
rewrite Z.mul_sub_distr_r.
remember (_ / 4 * 4) as x. replace (4 + (x - 1 * 4)) with x by lia. subst x.
remember Int.max_unsigned as x.
cut (0 <= x - x / 4 * 4). { intro. lia. }
rewrite <- Zmod_eq by lia.
fwd eapply (Z_mod_lt x 4) as HH. { lia. } break_and. auto.
Qed.
Lemma max_arg_count_value_size_ok : forall x,
x <= max_arg_count ->
4 + x * 4 <= Int.max_unsigned.
intros.
eapply Z.le_trans with (m := 4 + max_arg_count * 4).
2: eapply max_arg_count_ok.
eapply Zplus_le_compat_l.
eapply Zmult_le_compat_r; eauto.
lia.
Qed.
Lemma max_arg_count_big : forall x,
x < 256 ->
x <= max_arg_count.
intros.
unfold max_arg_count.
cut (x + 1 <= Int.max_unsigned / 4). { intro. lia. }
eapply Z.div_le_lower_bound. { lia. }
unfold Int.max_unsigned.
cut (4 * (x + 1) + 1 <= Int.modulus). { intro. lia. }
cut (2048 <= Int.modulus). { intro. lia. }
change 2048 with (2 ^ 11). unfold Int.modulus. rewrite two_power_nat_equiv.
eapply Z.pow_le_mono_r.
- lia.
- unfold Int.wordsize. simpl. lia.
Qed.
Lemma value_inject_32bit : forall A B (ge : Genv.t A B) m hv cv,
value_inject ge m hv cv ->
Val.load_result Mint32 cv = cv.
intros0 Hval. invc Hval.
- reflexivity.
- reflexivity.
- eapply opaque_type_value_32bit; eauto.
Qed.
Lemma alloc_mem_inj_id : forall m1 lo hi m2 b,
Mem.alloc m1 lo hi = (m2, b) ->
Mem.mem_inj inject_id m1 m2.
intros.
eapply Mem.alloc_right_inj.
- eapply Mem.mext_inj. eapply Mem.extends_refl.
- eassumption.
Qed.
Definition range_undef m b lo hi :=
forall chunk ofs v,
lo <= ofs < hi ->
Mem.load chunk m b ofs = Some v -> v = Vundef.
(* mem_inj can be carried through a store to a previously nonexistent block *)
Lemma store_new_block_mem_inj_id : forall m1 chunk m2 b ofs v m3,
Mem.mem_inj inject_id m1 m2 ->
(Mem.mem_contents m1) !! b = ZMap.init Undef ->
Mem.store chunk m2 b ofs v = Some m3 ->
Mem.mem_inj inject_id m1 m3.
intros.
eapply Mem.mk_mem_inj.
- intros. unfold inject_id in *. inject_some.
unfold Mem.perm.
replace (Mem.mem_access m3) with (Mem.mem_access m2); cycle 1.
{ symmetry. eapply Mem.store_access; eauto. }
eapply Mem.mi_perm; eauto.
- intros. unfold inject_id in *. inject_some.
destruct chunk0; simpl; eapply Zmod_divide; lia || eapply Zmod_0_l.
- intros. unfold inject_id in *. inject_some.
fwd eapply Mem.store_mem_contents as HH; eauto. rewrite HH. clear HH.
rewrite PMap.gsspec. break_match.
+ (* values inside the modified block *)
replace (ofs0 + 0) with ofs0 by lia.
subst b2.
on (_ = ZMap.init Undef), fun H => rewrite H.
rewrite ZMap.gi. constructor.
+ (* values inside other blocks *)
eapply Mem.mi_memval; eauto.
Qed.
Lemma store_multi_new_block_mem_inj_id : forall m1 chunk m2 b ofs vs m3,
Mem.mem_inj inject_id m1 m2 ->
(Mem.mem_contents m1) !! b = ZMap.init Undef ->
store_multi chunk m2 b ofs vs = Some m3 ->
Mem.mem_inj inject_id m1 m3.
first_induction vs; intros0 Hinj Hnew Hstore; simpl in Hstore.
{ inject_some. eauto. }
break_match; try discriminate. rename m3 into m4, m into m3.
eapply IHvs with (m2 := m3); eauto.
eapply store_new_block_mem_inj_id; eauto.
Qed.
Lemma load_all_load_multi' : forall b ofs args m l,
load_all (arg_addrs b ofs args) m = Some l ->
0 <= Int.unsigned ofs ->
Int.unsigned ofs + 4 * Zlength args <= Int.max_unsigned ->
exists vs,
load_multi Mint32 m b (Int.unsigned ofs) (length args) = Some vs /\
l = zip args vs.
first_induction args; intros0 Hla Hmin Hmax; simpl in Hla.
{ inject_some. simpl. eauto. }
do 2 (break_match; try discriminate). inject_some.
assert (Hzlen : 4 * Zlength (a :: args) = 4 + 4 * Zlength args).
{ rewrite Zlength_cons. unfold Z.succ. ring. }
assert (Hi4 : Int.unsigned (Int.repr 4) = 4).
{ eapply Int.unsigned_repr. split.
- lia.
- eapply int_unsigned_big. lia. }
assert (Hofs4 : Int.unsigned (Int.add ofs (Int.repr 4)) = Int.unsigned ofs + 4).
{ rewrite Int.add_unsigned.
rewrite Int.unsigned_repr, Hi4; [ reflexivity | split ]; rewrite Hi4.
- lia.
- rewrite Hzlen in Hmax.
assert (0 <= 4 * Zlength args).
{ eapply Z.mul_nonneg_nonneg.
- lia.
- eapply Zlength_nonneg. }
lia. }
fwd eapply IHargs as HH; eauto. { lia. } { lia. }
destruct HH as (vs & ? & ?). subst.
eexists. simpl.
on _, fun H => rewrite H.
rewrite <- Hofs4. on _, fun H => rewrite H.
eauto.
Qed.
Lemma load_all_load_multi_4 : forall b args m l,
load_all (arg_addrs b (Int.repr 4) args) m = Some l ->
Zlength args <= max_arg_count ->
exists vs,
load_multi Mint32 m b 4 (length args) = Some vs /\
l = zip args vs.
intros.
assert (Hi4 : Int.unsigned (Int.repr 4) = 4).
{ eapply Int.unsigned_repr. split; [lia|]. eapply int_unsigned_big. lia. }
rewrite <- Hi4.
eapply load_all_load_multi'; eauto; rewrite Hi4.
- lia.
- rewrite Z.mul_comm. eapply max_arg_count_value_size_ok. eauto.
Qed.
Lemma load_multi_load_all' : forall m b ofs n vs args,
load_multi Mint32 m b ofs n = Some vs ->
length args = n ->
0 <= ofs ->
ofs + 4 * Zlength args <= Int.max_unsigned ->
load_all (arg_addrs b (Int.repr ofs) args) m = Some (zip args vs).
first_induction n; intros0 Hload Hlen Hmin Hmax; simpl in Hload.
{ inject_some. destruct args; try discriminate. simpl. reflexivity. }
do 2 (break_match; try discriminate). inject_some. destruct args; try discriminate.
assert (4 * Zlength (v0 :: args) = 4 + 4 * Zlength args).
{ rewrite Zlength_cons. unfold Z.succ. ring. }
assert (0 <= Zlength args) by eapply Zlength_nonneg.
fwd eapply IHn; eauto. { lia. } { lia. }
simpl.
replace (Int.unsigned (Int.repr ofs)) with ofs; cycle 1.
{ symmetry. eapply Int.unsigned_repr. lia. }
replace (Int.add _ _) with (Int.repr (ofs + 4)); cycle 1.
{ rewrite Int.add_unsigned. rewrite 2 Int.unsigned_repr; eauto.
- split; [lia|]. eapply int_unsigned_big. lia.
- lia. }
on _, fun H => rewrite H.
on _, fun H => rewrite H.
eauto.
Qed.
Lemma load_multi_load_all_4 : forall m b n vs args,
load_multi Mint32 m b 4 n = Some vs ->
length args = n ->
Zlength args <= max_arg_count ->
load_all (arg_addrs b (Int.repr 4) args) m = Some (zip args vs).
intros.
eapply load_multi_load_all'; eauto.
- lia.
- rewrite Z.mul_comm. eapply max_arg_count_value_size_ok. eauto.
Qed.
Lemma load_all_inj_id : forall m1 m2 lp lv lp',
Mem.mem_inj inject_id m1 m2 ->
load_all lp m1 = Some lv ->
Forall2 (fun a b => Val.inject inject_id (snd a) (snd b)) lp lp' ->
exists lv',
load_all lp' m2 = Some lv' /\
Forall2 (fun a b => Val.inject inject_id (snd a) (snd b)) lv lv'.
first_induction lp; intros0 Hmi Hload Hvi; simpl in Hload.
{ inject_some. on >Forall2, invc. exists []. eauto. }
break_match. do 2 (break_match; try discriminate). inject_some. on >Forall2, invc.
simpl in * |-.
unfold Mem.loadv in * |-. break_match; try discriminate.
on >Val.inject, invc.
destruct y. simpl in * |-. subst.
unfold inject_id in *. inject_some.
fwd eapply Mem.load_inj as HH; eauto. destruct HH as (v2 & ? & ?).
rewrite Int.add_zero. rewrite Z.add_0_r in *.
fwd eapply IHlp as HH; eauto. destruct HH as (lv' & ? & ?).
simpl.
do 2 on _, fun H => rewrite H.
eexists. split; [ reflexivity | ].
econstructor; eauto.
Qed.
Lemma inject_id_compose_self :
compose_meminj inject_id inject_id = inject_id.
unfold compose_meminj, inject_id. rewrite Z.add_0_r in *. reflexivity.
Qed.
Section MEM_SIM.
Local Open Scope positive_scope.
Definition closure_sig_higher v :=
match v with
| HigherValue.Close fname free => Some (fname, length free)
| _ => None
end.
Definition Plt_dec : forall a b, ({ a < b } + { a >= b })%positive.
intros. destruct (a ?= b)%positive eqn:?.
- right. rewrite Pos.compare_eq_iff in *. lia.
- left. rewrite Pos.compare_lt_iff in *. lia.
- right. rewrite Pos.compare_gt_iff in *. lia.
Defined.
Definition pos_range_dec : forall min max x,
({ x >= min /\ x < max } + { x < min \/ x >= max })%positive.
intros.
destruct (Plt_dec x min), (Plt_dec x max).
- right. left. auto.
- right. left. auto.
- left. split; auto.
- right. right. auto.
Defined.
Definition mem_sim (mi mi' : block -> option (block * Z)) m1 m1' m2 m2' :=
(* mi' maps new blocks on the left to new blocks on the right. *)
(forall b,
b >= Mem.nextblock m1 ->
b < Mem.nextblock m1' ->
exists b',
mi' b = Some (b', 0%Z) /\
b' >= Mem.nextblock m2 /\
b' < Mem.nextblock m2') /\
(* mi' behaves like mi on old blocks on the left. *)
(forall b,
b < Mem.nextblock m1 \/ b >= Mem.nextblock m1' ->
mi' b = mi b) /\
(* The new mappings introduced by mi' are injective. *)
(forall b1 b2 b' delta1 delta2,
b1 >= Mem.nextblock m1 ->
b1 < Mem.nextblock m1' ->
b2 >= Mem.nextblock m1 ->
b2 < Mem.nextblock m1' ->
mi' b1 = Some (b', delta1) ->
mi' b2 = Some (b', delta2) ->
b1 = b2) /\
Mem.nextblock m1 <= Mem.nextblock m1' /\
Mem.nextblock m2 <= Mem.nextblock m2'.
Lemma mem_sim_refl : forall mi m1 m1' m2 m2',
Mem.nextblock m1 = Mem.nextblock m1' ->
Mem.nextblock m2 = Mem.nextblock m2' ->
mem_sim mi mi m1 m1' m2 m2'.
intros0 Hnext1 Hnext2. repeat apply conj; intros.
- exfalso. rewrite <- Hnext1 in *. lia.
- reflexivity.
- exfalso. rewrite <- Hnext1 in *. lia.
- rewrite Hnext1. lia.
- rewrite Hnext2. lia.
Qed.
(* Compose memory simulation "vertically", by adding more steps. *)
Lemma mem_sim_compose : forall mi mi' mi'' m1 m1' m1'' m2 m2' m2'',
mem_sim mi mi' m1 m1' m2 m2' ->
mem_sim mi' mi'' m1' m1'' m2' m2'' ->
mem_sim mi mi'' m1 m1'' m2 m2''.
unfold mem_sim. intros0 Hsim Hsim'.
destruct Hsim as (Hnew & Hold & Hinj & Hext1 & Hext2).
destruct Hsim' as (Hnew' & Hold' & Hinj' & Hext1' & Hext2').
repeat apply conj; intros.
- assert (HH : b >= Mem.nextblock m1' \/ b < Mem.nextblock m1'). { lia. } destruct HH.
+ destruct (Hnew' ?? ** ** ) as (b' & ? & ? & ?).
exists b'. repeat apply conj; eauto. lia.
+ destruct (Hnew ?? ** ** ) as (b' & ? & ? & ?).
fwd eapply Hold' as HH; eauto.
exists b'. repeat apply conj; eauto.
* congruence.
* lia.
- eapply eq_trans.
+ eapply Hold'. break_or; [left; lia | right; eauto].
+ eapply Hold. break_or; [left; eauto | right; lia].
- destruct (Plt_dec b1 (Mem.nextblock m1')), (Plt_dec b2 (Mem.nextblock m1')).
+ rewrite Hold' in *; eauto.
+ exfalso.
(* impossible. b1 is old, b2 is new, so they can't both map to b'. *)
rewrite (Hold' b1) in *; eauto.
fwd eapply (Hnew b1) as HH; eauto. destruct HH as (b1' & ? & ? & ?).
fwd eapply (Hnew' b2) as HH; eauto. destruct HH as (b2' & ? & ? & ?).
assert (b1' = b2') by congruence.
assert (b1' < b2') by lia.
subst b1'. lia.
+ exfalso.
(* impossible. b1 is old, b2 is new, so they can't both map to b'. *)
rewrite (Hold' b2) in *; eauto.
fwd eapply (Hnew' b1) as HH; eauto. destruct HH as (b1' & ? & ? & ?).
fwd eapply (Hnew b2) as HH; eauto. destruct HH as (b2' & ? & ? & ?).
assert (b1' = b2') by congruence.
assert (b1' > b2') by lia.
subst b1'. lia.
+ eauto.
- lia.
- lia.
Qed.
Lemma alloc_mem_sim : forall m1 m2 lo hi m1' b1 mi,
Mem.alloc m1 lo hi = (m1', b1) ->
Mem.inject mi m1 m2 ->
exists mi' m2' b2,
Mem.alloc m2 lo hi = (m2', b2) /\
Mem.inject mi' m1' m2' /\
mem_sim mi mi' m1 m1' m2 m2' /\
mi' b1 = Some (b2, 0%Z).
intros0 Halloc Hinj.
fwd eapply Mem.alloc_parallel_inject with (lo2 := lo) (hi2 := hi) as HH; eauto.
{ lia. } { lia. }
destruct HH as (mi' & m2' & b2 & ? & ? & ? & ? & ?).
fwd eapply Mem.nextblock_alloc with (m1 := m1); eauto.
fwd eapply Mem.alloc_result with (m1 := m1); eauto.
fwd eapply Mem.nextblock_alloc with (m1 := m2); eauto.
fwd eapply Mem.alloc_result with (m1 := m2); eauto.
rewrite <- Pos.add_1_l in *.
exists mi', m2', b2. repeat apply conj; eauto.
unfold mem_sim. repeat apply conj; eauto.
- intros.
assert (b = b1). { subst b1. lia. }
subst b.
exists b2. split; eauto. subst. split; lia.
- intros.
assert (b <> b1). { subst b1. lia. }
eauto.
- intros b1' b2'. intros.
assert (b1' = Mem.nextblock m1) by (zify; lia).
assert (b2' = Mem.nextblock m1) by (zify; lia).
congruence.
- lia.
- lia.
Qed.
End MEM_SIM.
Lemma build_constr_inject' : forall A B (ge : Genv.t A B) m0 m1 m2 m3 m4 b tag args argvs,
Forall2 (value_inject ge m0) args argvs ->
Zlength args <= max_arg_count ->
Mem.alloc m0 (-4) ((1 + Zlength args) * 4) = (m1, b) ->
Mem.store Mint32 m1 b (-4) (Vint (Int.repr ((1 + Zlength args) * 4))) = Some m2 ->
Mem.store Mint32 m2 b 0 (Vint tag) = Some m3 ->
store_multi Mint32 m3 b 4 argvs = Some m4 ->
value_inject ge m4 (Constr tag args) (Vptr b Int.zero).
intros0 Hargs Hmax Hm1 Hm2 Hm3 Hm4.
assert ((Mem.mem_contents m1) !! b = ZMap.init Undef).
{ erewrite Mem.contents_alloc; eauto.
erewrite <- Mem.alloc_result; eauto.
erewrite PMap.gss. reflexivity. }
assert (Mem.mem_inj inject_id m0 m4).
{ rewrite <- inject_id_compose_self. eapply Mem.mem_inj_compose with (m2 := m1).
- eapply alloc_mem_inj_id; eauto.
- eapply store_multi_new_block_mem_inj_id; eauto.
eapply store_new_block_mem_inj_id; eauto.
eapply store_new_block_mem_inj_id; eauto.
eapply Mem.mext_inj, Mem.extends_refl. }
econstructor.
- simpl.
rewrite Int.unsigned_zero.
erewrite load_store_multi_other; eauto; cycle 1.
{ right. left. simpl. lia. }
fwd eapply Mem.load_store_same as HH; eauto.
- eapply store_multi_load_all_args; eauto.
+ eapply Forall2_length; eauto.
+ rewrite Int.unsigned_zero, Int.unsigned_repr; cycle 1.
{ split; [lia|]. eapply int_unsigned_big. lia. }
lia.
+ rewrite Int.unsigned_zero, Int.unsigned_repr; cycle 1.
{ split; [lia|]. eapply int_unsigned_big. lia. }
rewrite Z.add_0_l. eapply max_arg_count_value_size_ok. eauto.
+ list_magic_on (args, (argvs, tt)).
symmetry. eapply value_inject_32bit. eassumption.
- intros0 Hin.
eapply In_nth_error in Hin. destruct Hin as [n ?].
on _, eapply_lem zip_nth_error. break_and.
fwd eapply Forall2_nth_error; eauto.
eapply mem_inj_id_value_inject; eauto.
Qed.
Lemma build_constr_ok' : forall A B (ge : Genv.t A B) m0 tag args argvs,
Forall2 (value_inject ge m0) args argvs ->
Zlength args <= max_arg_count ->
exists m1 m2 m3 m4 b,
Mem.alloc m0 (-4) ((1 + Zlength args) * 4) = (m1, b) /\
Mem.store Mint32 m1 b (-4) (Vint (Int.repr ((1 + Zlength args) * 4))) = Some m2 /\
Mem.store Mint32 m2 b 0 (Vint tag) = Some m3 /\
store_multi Mint32 m3 b 4 argvs = Some m4 /\
value_inject ge m4 (Constr tag args) (Vptr b Int.zero).
intros.
destruct (Mem.alloc m0 (-4) ((1 + Zlength args) * 4)) as [m1 b] eqn:?.
fwd eapply Mem.valid_access_store with
(m1 := m1) (b := b) (ofs := -4) (chunk := Mint32)
(v := Vint (Int.repr ((1 + Zlength args) * 4))) as HH.
{ eapply Mem.valid_access_implies with (p1 := Freeable); cycle 1.
{ constructor. }
eapply Mem.valid_access_alloc_same; eauto.
- lia.
- unfold size_chunk. rewrite Zlength_correct.
fwd eapply Zlength_nonneg with (xs := args). lia.
- simpl. eapply Zmod_divide; eauto; lia.
}
destruct HH as [m2 ?].
fwd eapply Mem.valid_access_store
with (m1 := m2) (b := b) (ofs := 0) (chunk := Mint32) (v := Vint tag) as HH.
{ eapply Mem.valid_access_implies with (p1 := Freeable); cycle 1.
{ constructor. }
eapply Mem.store_valid_access_1; eauto.
eapply Mem.valid_access_alloc_same; eauto.
- clear. lia.
- unfold size_chunk. rewrite Zlength_correct.
fwd eapply Zlength_nonneg with (xs := args). lia.
- simpl. eapply Zmod_divide; eauto; lia.
}
destruct HH as [m3 ?].
fwd eapply (valid_access_store_multi Mint32 m3 b 4 argvs) as HH; eauto.
{ eapply Mem.range_perm_implies with (p1 := Freeable); [ | constructor ].
eapply shrink_range_perm with (lo1 := -4).
- erewrite <- 2 range_perm_store by eauto. eapply alloc_range_perm. eauto.
- clear. lia.
- unfold size_chunk. fwd eapply Forall2_length as HH; eauto. clear -HH.
replace ((1 + Zlength args) * 4) with (4 + 4 * Zlength args) by ring.
rewrite 2 Zlength_correct. rewrite HH. lia.
}
{ simpl. clear. eapply Zmod_divide; eauto. lia. }
destruct HH as [m4 ?].
exists m1, m2, m3, m4, b.
split; eauto.
split; eauto.
split; eauto.
split; eauto.
eapply build_constr_inject'; eauto.
Qed.
Definition val_defined_dec a : { a <> Vundef } + { ~ a <> Vundef }.
destruct a; left + right; congruence.
Defined.
Definition require {A B} : { A } + { B } -> option A.
destruct 1; left + right; solve [eauto].
Defined.
Lemma require_decidable : forall A,
forall (dec : { A } + { ~ A }),
A ->
exists pf, require dec = Some pf.
destruct dec; intro; try contradiction.
eexists. reflexivity.
Qed.
Local Open Scope option_monad.
Definition build_constr m tag args :=
let '(m, b) := Mem.alloc m (-4) ((1 + Zlength args) * 4) in
require (Forall_dec _ val_defined_dec args) >>= fun Hargdef =>
Mem.store Mint32 m b (-4) (Vint (Int.repr ((1 + Zlength args) * 4))) >>= fun m =>
Mem.store Mint32 m b 0 (Vint tag) >>= fun m =>
store_multi Mint32 m b 4 args >>= fun m =>
Some (m, Vptr b Int.zero).
Lemma build_constr_inject : forall A B (ge : Genv.t A B) m1 m2 tag args hargs v,
build_constr m1 tag args = Some (m2, v) ->
Forall2 (value_inject ge m1) hargs args ->
Zlength args <= max_arg_count ->
value_inject ge m2 (Constr tag hargs) v.
intros0 Hbuild Hvi Hlen.
unfold build_constr in Hbuild. break_match. break_bind_option. inject_some.
assert (Hlen_eq : length hargs = length args) by eauto using Forall2_length.
eapply build_constr_inject'; eauto.
all: rewrite Zlength_correct in *.
all: rewrite Hlen_eq in *.
all: eauto.
Qed.
Lemma require_bind_eq : forall (A : Prop) B (k : A -> option B) rhs,
forall (dec : { A } + { ~ A }),
A ->
(forall pf, k pf = rhs) ->
require dec >>= k = rhs.
intros0 HA Hk.
destruct dec; [ | contradiction ].
simpl. eauto.
Qed.
Lemma build_constr_ok : forall A B (ge : Genv.t A B) m1 tag args hargs,
Forall2 (value_inject ge m1) hargs args ->
Zlength args <= max_arg_count ->
exists v m2,
build_constr m1 tag args = Some (m2, v) /\
value_inject ge m2 (Constr tag hargs) v.
intros.
assert (Hlen_eq : length hargs = length args) by eauto using Forall2_length.
rewrite Zlength_correct, <- Hlen_eq, <- Zlength_correct in *.
fwd eapply build_constr_ok' as HH; eauto.
rewrite Zlength_correct, Hlen_eq, <- Zlength_correct in *.
destruct HH as (? & ? & ? & m' & b & ? & ? & ? & ? & ?).
eexists _, _.
split; eauto.
unfold build_constr.
on _, fun H => (rewrite H; clear H).
eapply require_bind_eq.
{ list_magic_on (hargs, (args, tt)). eauto using value_inject_defined. }
intro.
on _, fun H => (rewrite H; clear H; simpl).
on _, fun H => (rewrite H; clear H; simpl).
on _, fun H => (rewrite H; clear H; simpl).
reflexivity.
Qed.
Lemma build_constr_mem_inj_id : forall m1 tag args v m2,
build_constr m1 tag args = Some (m2, v) ->
Mem.mem_inj inject_id m1 m2.
intros0 Hbuild.
unfold build_constr in Hbuild. break_match. break_bind_option. inject_some.
rename m2 into m4, m3 into m3, m0 into m2, m1 into m0, m into m1.
assert ((Mem.mem_contents m1) !! b = ZMap.init Undef).
{ erewrite Mem.contents_alloc; eauto.
erewrite <- Mem.alloc_result; eauto.
erewrite PMap.gss. reflexivity. }
rewrite <- inject_id_compose_self. eapply Mem.mem_inj_compose with (m2 := m1).
- eapply alloc_mem_inj_id; eauto.
- eapply store_multi_new_block_mem_inj_id; eauto.
eapply store_new_block_mem_inj_id; eauto.
eapply store_new_block_mem_inj_id; eauto.
eapply Mem.mext_inj, Mem.extends_refl.
Qed.
Check store_multi.
Lemma store_multi_mapped_inject : forall f chunk m1 b1 ofs vs1 m1' m2 b2 vs2,
Mem.inject f m1 m2 ->
store_multi chunk m1 b1 ofs vs1 = Some m1' ->
f b1 = Some (b2, 0%Z) ->
Forall2 (Val.inject f) vs1 vs2 ->
exists m2',
store_multi chunk m2 b2 ofs vs2 = Some m2' /\
Mem.inject f m1' m2'.
first_induction vs1; intros0 Hmi Hstore Hf Hvi.
all: invc Hvi.
{ simpl in *. inject_some. exists m2. split; eauto. }
simpl in *. break_match_hyp; try discriminate.
rename m1' into m1'', m into m1'.
fwd eapply Mem.store_mapped_inject with (m1 := m1) as HH; eauto.
destruct HH as (m2' & ? & ?).
fwd eapply IHvs1 with (m1 := m1') as HH; eauto.
destruct HH as (m2'' & ? & ?).
exists m2''.
rewrite Z.add_0_r in *. find_rewrite. find_rewrite. eauto.
Qed.
Check Mem.nextblock_store.
Lemma nextblock_store_multi : forall chunk m1 b ofs vs m2,
store_multi chunk m1 b ofs vs = Some m2 ->
Mem.nextblock m2 = Mem.nextblock m1.
first_induction vs; intros0 Hstore; simpl in *.
{ inject_some. reflexivity. }
break_match; try discriminate.
erewrite IHvs by eauto. eapply Mem.nextblock_store; eauto.
Qed.
(*
*)
Definition valid_ptr m v :=
match v with
| Vptr b _ => Mem.valid_block m b
| _ => True
end.
Lemma mem_sim_valid_val_inject : forall v1 v2 mi mi' m1 m1' m2 m2',
Val.inject mi v1 v2 ->
Mem.inject mi m1 m2 ->
mem_sim mi mi' m1 m1' m2 m2' ->
valid_ptr m1 v1 ->
Val.inject mi' v1 v2.
intros0 Hvi Hmi Hsim Hvalid.
invc Hvi; try solve [econstructor; eauto].
simpl in Hvalid.
econstructor; eauto.
destruct Hsim as (Hnew & Hold & Hinj & Hext1 & Hext2).
rewrite Hold; eauto.
Qed.
Lemma mem_sim_valid_val_inject_list : forall vs1 vs2 mi mi' m1 m1' m2 m2',
Forall2 (Val.inject mi) vs1 vs2 ->
Mem.inject mi m1 m2 ->
mem_sim mi mi' m1 m1' m2 m2' ->
Forall (valid_ptr m1) vs1 ->
Forall2 (Val.inject mi') vs1 vs2.
induction vs1; intros0 Hvi Hmi Hsim Hvalid; invc Hvi; invc Hvalid;
econstructor; eauto using mem_sim_valid_val_inject.
Qed.
Lemma build_constr_mem_inject : forall m1 tag args1 m1' v1,
forall mi m2 args2,
build_constr m1 tag args1 = Some (m1', v1) ->
Mem.inject mi m1 m2 ->
MemInjProps.same_offsets mi ->
Forall2 (Val.inject mi) args1 args2 ->
Forall (valid_ptr m1) args1 ->
exists mi' m2' v2,
build_constr m2 tag args2 = Some (m2', v2) /\
Mem.inject mi' m1' m2' /\
Val.inject mi' v1 v2 /\
mem_sim mi mi' m1 m1' m2 m2'.
intros0 Hbuild Hmi Hoff Hvi Hvalid.
unfold build_constr in * |-. break_match_hyp.
rewrite Z.add_comm in *. break_bind_option. inject_some. rewrite Z.add_comm in *.
rename m1' into m1'''', m into m1', m0 into m1'', m3 into m1'''.
rename b into b1.
fwd eapply alloc_mem_sim as HH; eauto.
destruct HH as (mi' & m2' & b2 & ? & ? & ? & ?).
fwd eapply Mem.store_mapped_inject with (m1 := m1') as HH; eauto.
destruct HH as (m2'' & ? & ?).
fwd eapply Mem.store_mapped_inject with (m1 := m1'') as HH; eauto.
destruct HH as (m2''' & ? & ?).
fwd eapply store_multi_mapped_inject with (m1 := m1''') as HH; eauto.
{ eapply mem_sim_valid_val_inject_list; eauto. }
destruct HH as (m2'''' & ? & ?).
eexists mi', m2'''', _.
split; cycle 1.
{ split; [|split]; eauto.
eapply mem_sim_compose; cycle 1.
{ eapply mem_sim_refl; symmetry; eapply nextblock_store_multi; eauto. }
eapply mem_sim_compose; cycle 1.
{ eapply mem_sim_refl; symmetry; eapply Mem.nextblock_store; eauto. }
eapply mem_sim_compose; cycle 1.
{ eapply mem_sim_refl; symmetry; eapply Mem.nextblock_store; eauto. }
eauto. }
assert (Hzlen : Zlength args1 = Zlength args2).
{ rewrite 2 Zlength_correct. f_equal. eauto using Forall2_length. }
unfold build_constr.
rewrite <- Hzlen. on (Mem.alloc _ _ _ = _), fun H => rewrite H.
eapply require_bind_eq.
{ list_magic_on (args1, (args2, tt)).
assert (args1_i <> Vundef).
{ eapply Forall_nth_error with (P := fun v => v <> Vundef); eauto. }
on >Val.inject, invc; eauto. discriminate. }
intro.
rewrite Z.add_0_r in *. on (Mem.store _ m2' _ _ _ = _), fun H => rewrite H. simpl.
on (Mem.store _ m2'' _ _ _ = _), fun H => rewrite H. simpl.
on (store_multi _ m2''' _ _ _ = _), fun H => rewrite H. simpl.
reflexivity.
Qed.
Definition cm_func f := Econst (Oaddrsymbol f Int.zero).
Definition cm_malloc_sig := ef_sig EF_malloc.
Definition cm_int i := Econst (Ointconst (Int.repr i)).
Section BUILD_CONSTR_CMINOR.
Local Notation "A + B" := (Ebinop Oadd A B) : expr_scope.
Local Notation "A <-call ( B , C , D )" := (Scall (Some A) B C D) (at level 70).
Local Notation "A <- B" := (Sassign A B) (at level 70).
Local Notation "A ;; B" := (Sseq A B) (at level 50).
Delimit Scope expr_scope with expr.
Fixpoint store_args_cminor base args off :=
match args with
| [] => Sskip
| arg :: args =>
Sstore Mint32 (base + cm_int off)%expr arg ;;
store_args_cminor base args (off + 4)
end.
Lemma valid_pointer_mem_inj_id : forall m m' b ofs,
Mem.valid_pointer m b ofs = true ->
Mem.mem_inj inject_id m m' ->
Mem.valid_pointer m' b ofs = true.
intros0 Hvalid Hmem.
unfold Mem.valid_pointer in *.
destruct (Mem.perm_dec m _ _ _ _); try discriminate.
fwd eapply Mem.mi_perm; try eassumption. { reflexivity. }
destruct (Mem.perm_dec m' _ _ _ _); try reflexivity.
exfalso.
rewrite Z.add_0_r in *. eauto.
Qed.
Lemma weak_valid_pointer_mem_inj_id : forall m m' b ofs,
(Mem.valid_pointer m b ofs || Mem.valid_pointer m b (ofs - 1)) = true ->
Mem.mem_inj inject_id m m' ->
(Mem.valid_pointer m' b ofs || Mem.valid_pointer m' b (ofs - 1)) = true.
intros.
rewrite orb_true_iff in *.
break_or; [left | right]; eapply valid_pointer_mem_inj_id; eauto.
Qed.
Lemma cmpu_bool_mem_inj_id : forall m m' cmp a b r,
Val.cmpu_bool (Mem.valid_pointer m) cmp a b = Some r ->
Mem.mem_inj inject_id m m' ->
Val.cmpu_bool (Mem.valid_pointer m') cmp a b = Some r.
intros0 Hcmpu Hmem.
destruct a, b; try discriminate; simpl in *.
- eauto.
- break_match_hyp; try discriminate.
rewrite andb_true_iff in *. break_and. find_rewrite. simpl.
erewrite weak_valid_pointer_mem_inj_id; eauto.
- break_match_hyp; try discriminate.
rewrite andb_true_iff in *. break_and. find_rewrite. simpl.
erewrite weak_valid_pointer_mem_inj_id; eauto.
- break_if.
+ break_match_hyp; try discriminate.
rewrite andb_true_iff in *. break_and.
do 2 erewrite weak_valid_pointer_mem_inj_id by eauto.
simpl. eauto.
+ break_match_hyp; try discriminate.
rewrite andb_true_iff in *. break_and.
do 2 erewrite valid_pointer_mem_inj_id by eauto.
simpl. eauto.
Qed.
Lemma eval_binop_mem_inj_id : forall op a b r m m',
eval_binop op a b m = Some r ->
r <> Vundef ->
Mem.mem_inj inject_id m m' ->
eval_binop op a b m' = Some r.
destruct op; intros0 Heval Hdef Hmem; simpl; eauto.
- (* Ocmpu *)
unfold eval_binop, Val.cmpu, Val.of_optbool in *.
inject_some. f_equal.
break_match_hyp; try (exfalso; congruence).
erewrite cmpu_bool_mem_inj_id; eauto.
Qed.
Lemma eval_unop_undef : forall op v v',
eval_unop op v = Some v' ->
v = Vundef ->
v' = Vundef.
destruct op; intros0 Heval Hundef; subst v; simpl in *;
discriminate || inject_some; eauto.
Qed.
Lemma eval_binop_undef1 : forall op v1 v2 m v',
eval_binop op v1 v2 m = Some v' ->
v1 = Vundef ->
v' = Vundef.
destruct op; intros0 Heval Hundef; subst v1; simpl in *;
discriminate || inject_some; eauto.
Qed.
Lemma eval_binop_undef2 : forall op v1 v2 m v',
eval_binop op v1 v2 m = Some v' ->
v2 = Vundef ->
v' = Vundef.
destruct op; intros0 Heval Hundef; subst v2; simpl in *.
all: destruct v1; try discriminate.
all: invc Heval; reflexivity.
Qed.
Lemma eval_unop_defined : forall op v v',
eval_unop op v = Some v' ->
v' <> Vundef ->
v <> Vundef.
intros0 Heval Hdef. contradict Hdef. eauto using eval_unop_undef.
Qed.
Lemma eval_binop_defined1 : forall op v1 v2 m v',
eval_binop op v1 v2 m = Some v' ->
v' <> Vundef ->
v1 <> Vundef.
intros0 Heval Hdef. contradict Hdef. eauto using eval_binop_undef1.
Qed.
Lemma eval_binop_defined2 : forall op v1 v2 m v',
eval_binop op v1 v2 m = Some v' ->
v' <> Vundef ->
v2 <> Vundef.
intros0 Heval Hdef. contradict Hdef. eauto using eval_binop_undef2.
Qed.
Lemma eval_expr_mem_inj_id : forall m m' ge sp e a b,
eval_expr ge sp e m a b ->
b <> Vundef ->
Mem.mem_inj inject_id m m' ->
eval_expr ge sp e m' a b.
induction 1; intros0 Hdef Hmem; try solve [econstructor; eauto].
- econstructor; eauto.
eapply IHeval_expr; eauto using eval_unop_defined.
- econstructor; eauto using eval_binop_defined1, eval_binop_defined2.
eapply eval_binop_mem_inj_id; eauto.
- destruct vaddr; try discriminate.
econstructor; eauto.
+ eapply IHeval_expr; eauto. discriminate.
+ simpl.
fwd eapply Mem.load_inj as HH; try eassumption. { reflexivity. }
destruct HH as (v' & ? & ?).
rewrite val_inject_id in *.
fwd eapply lessdef_def_eq; eauto. subst v'.
rewrite Z.add_0_r in *. eauto.
Qed.
Lemma eval_exprlist_mem_inj_id : forall m m' ge sp e es vs,
eval_exprlist ge sp e m es vs ->
Forall (fun v => v <> Vundef) vs ->
Mem.mem_inj inject_id m m' ->
eval_exprlist ge sp e m' es vs.
induction 1; intros0 Hdef Hmem; invc Hdef;
econstructor; eauto using eval_expr_mem_inj_id.
Qed.
Lemma store_args_cminor_effect : forall m0 ge sp e m base b ofs delta es vs m' f k,
store_multi Mint32 m b (ofs + delta) vs = Some m' ->
eval_expr ge sp e m0 base (Vptr b (Int.repr ofs)) ->
eval_exprlist ge sp e m0 es vs ->
Forall (fun v => v <> Vundef) vs ->
Mem.mem_inj inject_id m0 m ->
(Mem.mem_contents m0) !! b = ZMap.init Undef ->
0 <= ofs ->
0 <= delta ->
ofs + delta + 4 * Zlength es <= Int.max_unsigned ->
star Cminor.step ge
(State f (store_args_cminor base es delta) k sp e m)
E0 (State f Sskip k sp e m').
first_induction es; intros0 Hstore Hbase Heval Hdef Hnewblock Hmem Hmin1 Hmin2 Hmax.
all: on >eval_exprlist, invc.
{ simpl in *. inject_some. eapply star_refl. }
simpl in Hstore. break_match; try discriminate.
fwd eapply Zlength_nonneg with (xs := a :: es).
invc Hdef.
fwd eapply eval_expr_mem_inj_id with (a := a); eauto.
fwd eapply eval_expr_mem_inj_id with (a := base); eauto. { discriminate. }
fwd eapply eval_exprlist_mem_inj_id; eauto.
eapply star_left with (t1 := E0) (t2 := E0); eauto.
{ simpl. econstructor. }
eapply star_left with (t1 := E0) (t2 := E0); eauto.
{ econstructor.
- econstructor; eauto.
+ econstructor. simpl. reflexivity.
+ simpl. reflexivity.
- eauto.
- simpl. rewrite Int.add_unsigned.
rewrite Int.unsigned_repr with (z := ofs) by lia.
rewrite Int.unsigned_repr with (z := delta) by lia.
rewrite Int.unsigned_repr by lia.
eauto.
}
eapply star_left with (t1 := E0) (t2 := E0); eauto.
{ econstructor. }
eapply (IHes m0); try eassumption.
- rewrite Z.add_assoc. eauto.
- eapply store_new_block_mem_inj_id; eauto.
- lia.
- rewrite Zlength_cons in *. unfold Z.succ in *. rewrite Z.mul_add_distr_l in *.
lia.
Qed.
Definition build_constr_cminor malloc_id id tag args :=
let sz := 4 * (1 + Zlength args) in
id <-call (cm_malloc_sig, cm_func malloc_id, [cm_int sz]) ;;
Sstore Mint32 (Evar id) (Econst (Ointconst tag)) ;;
store_args_cminor (Evar id) args 4.
Fixpoint expr_no_access id e :=
match e with
| Evar id' => id <> id'
| Econst _ => True
| Eunop _ a => expr_no_access id a
| Ebinop _ a b => expr_no_access id a /\ expr_no_access id b
| Eload _ a => expr_no_access id a
end.
Definition eval_expr_no_access : forall ge sp e m a b id v,
eval_expr ge sp e m a b ->
expr_no_access id a ->
eval_expr ge sp (PTree.set id v e) m a b.
induction 1; intros0 Hacc; econstructor; eauto.
- rewrite PTree.gso; eauto.
- invc Hacc. eauto.
- invc Hacc. eauto.
Qed.
Definition eval_exprlist_no_access : forall ge sp e m a b id v,
eval_exprlist ge sp e m a b ->
Forall (expr_no_access id) a ->
eval_exprlist ge sp (PTree.set id v e) m a b.
induction 1; intros0 Hacc; invc Hacc; econstructor; eauto using eval_expr_no_access.
Qed.
Lemma E0_E0_E0 : E0 = Eapp E0 E0.
reflexivity.
Qed.
Lemma build_constr_cminor_effect : forall malloc_id m tag args argvs v m',
forall ge f id k sp e fp,
build_constr m tag argvs = Some (m', v) ->
eval_exprlist ge sp e m args argvs ->
Forall (expr_no_access id) args ->
Zlength args <= max_arg_count ->
Genv.find_symbol ge malloc_id = Some fp ->
Genv.find_funct ge (Vptr fp Int.zero) = Some (External EF_malloc) ->
plus Cminor.step ge
(State f (build_constr_cminor malloc_id id tag args) k sp e m)
E0 (State f Sskip k sp (PTree.set id v e) m').
intros0 Hbuild Heval Hacc Hargc Hmsym Hmfun.
unfold build_constr in Hbuild. break_match. break_bind_option. inject_some.
assert (Hzlen : Zlength args = Zlength argvs).
{ do 2 rewrite Zlength_correct. f_equal.
clear -Heval. induction Heval; simpl; f_equal; eauto. }
assert ((Mem.mem_contents m0) !! b = ZMap.init Undef).
{ erewrite Mem.contents_alloc; eauto.
erewrite <- Mem.alloc_result; eauto.
erewrite PMap.gss. reflexivity. }
eapply plus_left. 3: eapply E0_E0_E0. { econstructor. }
eapply star_left. 3: eapply E0_E0_E0. { econstructor. }
eapply star_left. 3: eapply E0_E0_E0. {
econstructor.
- econstructor. simpl. rewrite Hmsym. reflexivity.
- repeat econstructor.
- rewrite Hmfun. reflexivity.
- reflexivity.
}
eapply star_left. 3: eapply E0_E0_E0. {
econstructor. econstructor.
- rewrite Int.unsigned_repr; cycle 1.
{ replace (4 * _) with (4 + Zlength args * 4) by ring.
split.
- fwd eapply Zlength_nonneg with (xs := args). lia.
- eapply max_arg_count_value_size_ok. eauto. }
rewrite Z.mul_comm, Hzlen. eauto.
- rewrite Z.mul_comm, Hzlen. eauto.
}
eapply star_left. 3: eapply E0_E0_E0. { econstructor. }
eapply star_left. 3: eapply E0_E0_E0. { econstructor. }
eapply star_left. 3: eapply E0_E0_E0. {
econstructor.
- econstructor. simpl. rewrite PTree.gss. reflexivity.
- econstructor. simpl. reflexivity.
- simpl. rewrite Int.unsigned_zero. eauto.
}
eapply star_left. 3: eapply E0_E0_E0. { econstructor. }
eapply store_args_cminor_effect with (ofs := 0) (m0 := m0).
- simpl. eauto.
- econstructor. rewrite PTree.gss. reflexivity.
- eapply eval_exprlist_mem_inj_id; cycle 1.
+ eauto.
+ eapply alloc_mem_inj_id; eauto.
+ eapply eval_exprlist_no_access; eauto.
- eauto.
- eapply store_new_block_mem_inj_id; eauto.
eapply store_new_block_mem_inj_id; eauto.
eapply Mem.mext_inj, Mem.extends_refl.
- eauto.
- lia.
- lia.
- rewrite Z.add_0_l. rewrite Z.mul_comm.
eapply max_arg_count_value_size_ok. eauto.
Qed.
End BUILD_CONSTR_CMINOR.
Lemma build_close_inject' : forall A B (ge : Genv.t A B) m0 m1 m2 m3 m4 b fname free freev,
forall bcode fp,
Genv.find_symbol ge fname = Some bcode ->
Genv.find_funct_ptr ge bcode = Some fp ->
Forall2 (value_inject ge m0) free freev ->
Zlength free <= max_arg_count ->
Mem.alloc m0 (-4) ((1 + Zlength free) * 4) = (m1, b) ->
Mem.store Mint32 m1 b (-4) (Vint (Int.repr ((1 + Zlength free) * 4))) = Some m2 ->
Mem.store Mint32 m2 b 0 (Vptr bcode Int.zero) = Some m3 ->
store_multi Mint32 m3 b 4 freev = Some m4 ->
value_inject ge m4 (Close fname free) (Vptr b Int.zero).
intros0 Hsym Hfp Hfree Hmax Hm1 Hm2 Hm3 H4.
assert ((Mem.mem_contents m1) !! b = ZMap.init Undef).
{ erewrite Mem.contents_alloc; eauto.
erewrite <- Mem.alloc_result; eauto.
erewrite PMap.gss. reflexivity. }
assert (Mem.mem_inj inject_id m0 m4).
{ rewrite <- inject_id_compose_self. eapply Mem.mem_inj_compose with (m2 := m1).
- eapply alloc_mem_inj_id; eauto.
- eapply store_multi_new_block_mem_inj_id; eauto.
eapply store_new_block_mem_inj_id; eauto.
eapply store_new_block_mem_inj_id; eauto.
eapply Mem.mext_inj, Mem.extends_refl. }
econstructor.
- simpl.
rewrite Int.unsigned_zero.
erewrite load_store_multi_other; eauto; cycle 1.
{ right. left. simpl. lia. }
fwd eapply Mem.load_store_same as HH; eauto.
- eauto.
- eauto.
- eapply store_multi_load_all_args; eauto.
+ eapply Forall2_length; eauto.
+ rewrite Int.unsigned_zero, Int.unsigned_repr; cycle 1.
{ split; [lia|]. eapply int_unsigned_big. lia. }
lia.
+ rewrite Int.unsigned_zero, Int.unsigned_repr; cycle 1.
{ split; [lia|]. eapply int_unsigned_big. lia. }
rewrite Z.add_0_l. eapply max_arg_count_value_size_ok. eauto.
+ list_magic_on (free, (freev, tt)).
symmetry. eapply value_inject_32bit. eassumption.
- intros0 Hin.
eapply In_nth_error in Hin. destruct Hin as [n ?].
on _, eapply_lem zip_nth_error. break_and.
fwd eapply Forall2_nth_error; eauto.
eapply mem_inj_id_value_inject; eauto.
Qed.
Lemma build_close_ok' : forall A B (ge : Genv.t A B) m0 fname free freev,
forall bcode fp,
Genv.find_symbol ge fname = Some bcode ->
Genv.find_funct_ptr ge bcode = Some fp ->
Forall2 (value_inject ge m0) free freev ->
Zlength free <= max_arg_count ->
exists m1 m2 m3 m4 b,
Mem.alloc m0 (-4) ((1 + Zlength free) * 4) = (m1, b) /\
Mem.store Mint32 m1 b (-4) (Vint (Int.repr ((1 + Zlength free) * 4))) = Some m2 /\
Mem.store Mint32 m2 b 0 (Vptr bcode Int.zero) = Some m3 /\
store_multi Mint32 m3 b 4 freev = Some m4 /\
value_inject ge m4 (Close fname free) (Vptr b Int.zero).
intros.
destruct (Mem.alloc m0 (-4) ((1 + Zlength free) * 4)) as [m1 b] eqn:?.
fwd eapply Mem.valid_access_store with
(m1 := m1) (b := b) (ofs := -4) (chunk := Mint32)
(v := Vint (Int.repr ((1 + Zlength free) * 4))) as HH.
{ eapply Mem.valid_access_implies with (p1 := Freeable); cycle 1.
{ constructor. }
eapply Mem.valid_access_alloc_same; eauto.
- lia.
- unfold size_chunk. rewrite Zlength_correct.
fwd eapply Zlength_nonneg with (xs := free). lia.
- simpl. eapply Zmod_divide; eauto; lia.
}
destruct HH as [m2 ?].
fwd eapply Mem.valid_access_store with
(m1 := m2) (b := b) (ofs := 0) (chunk := Mint32) (v := Vptr bcode Int.zero) as HH.
{ eapply Mem.valid_access_implies with (p1 := Freeable); cycle 1.
{ constructor. }
eapply Mem.store_valid_access_1; eauto.
eapply Mem.valid_access_alloc_same; eauto.
- clear. lia.
- unfold size_chunk. rewrite Zlength_correct.
fwd eapply Zlength_nonneg with (xs := free). lia.
- simpl. eapply Zmod_divide; eauto; lia.
}
destruct HH as [m3 ?].
fwd eapply (valid_access_store_multi Mint32 m3 b 4 freev) as HH; eauto.
{ eapply Mem.range_perm_implies with (p1 := Freeable); [ | constructor ].
eapply shrink_range_perm with (lo1 := -4).
- erewrite <- 2 range_perm_store by eauto. eapply alloc_range_perm. eauto.
- clear. lia.
- unfold size_chunk. fwd eapply Forall2_length as HH; eauto. clear -HH.
replace ((1 + Zlength free) * 4) with (4 + 4 * Zlength free) by ring.
rewrite 2 Zlength_correct. rewrite HH. lia.
}
{ simpl. clear. eapply Zmod_divide; eauto. lia. }
destruct HH as [m4 ?].
exists m1, m2, m3, m4, b.
split; eauto.
split; eauto.
split; eauto.
split; eauto.
eapply build_close_inject'; eauto.
Qed.
Local Open Scope option_monad.
Definition build_close {A B} (ge : Genv.t A B) m fname free :=
Genv.find_symbol ge fname >>= fun bcode =>
Genv.find_funct_ptr ge bcode >>= fun fp =>
let '(m, b) := Mem.alloc m (-4) ((1 + Zlength free) * 4) in
Mem.store Mint32 m b (-4) (Vint (Int.repr ((1 + Zlength free) * 4))) >>= fun m =>
Mem.store Mint32 m b 0 (Vptr bcode Int.zero) >>= fun m =>
store_multi Mint32 m b 4 free >>= fun m =>
Some (m, Vptr b Int.zero).
Lemma build_close_inject : forall A B (ge : Genv.t A B) m1 m2 fname free hfree v,
build_close ge m1 fname free = Some (m2, v) ->
Forall2 (value_inject ge m1) hfree free ->
Zlength free <= max_arg_count ->
value_inject ge m2 (Close fname hfree) v.
intros0 Hbuild Hvi Hlen.
unfold build_close in Hbuild. break_match. break_bind_option. inject_some.
assert (Hlen_eq : length hfree = length free) by eauto using Forall2_length.
eapply build_close_inject'; eauto.
all: rewrite Zlength_correct in *.
all: rewrite Hlen_eq in *.
all: eauto.
Qed.
Lemma build_close_ok : forall A B (ge : Genv.t A B) m1 fname free hfree,
forall bcode fp,
Genv.find_symbol ge fname = Some bcode ->
Genv.find_funct_ptr ge bcode = Some fp ->
Forall2 (value_inject ge m1) hfree free ->
Zlength free <= max_arg_count ->
exists v m2,
build_close ge m1 fname free = Some (m2, v) /\
value_inject ge m2 (Close fname hfree) v.
intros.
assert (Hlen_eq : length hfree = length free) by eauto using Forall2_length.
rewrite Zlength_correct, <- Hlen_eq, <- Zlength_correct in *.
fwd eapply build_close_ok' as HH; eauto.
rewrite Zlength_correct, Hlen_eq, <- Zlength_correct in *.
destruct HH as (? & ? & ? & m' & b & ? & ? & ? & ? & ?).
eexists _, _.
split; eauto.
unfold build_close.
on _, fun H => (rewrite H; clear H).
on _, fun H => (rewrite H; clear H; simpl).
on _, fun H => (rewrite H; clear H; simpl).
on _, fun H => (rewrite H; clear H; simpl).
on _, fun H => (rewrite H; clear H; simpl).
on _, fun H => (rewrite H; clear H; simpl).
reflexivity.
Qed.
Lemma build_close_mem_inj_id : forall A B (ge : Genv.t A B) m1 fname free v m2,
build_close ge m1 fname free = Some (m2, v) ->
Mem.mem_inj inject_id m1 m2.
intros0 Hbuild.
unfold build_close in Hbuild. break_match. break_bind_option. inject_some.
rename m2 into m4, m3 into m3, m0 into m2, m1 into m0, m into m1.
assert ((Mem.mem_contents m1) !! b = ZMap.init Undef).
{ erewrite Mem.contents_alloc; eauto.
erewrite <- Mem.alloc_result; eauto.
erewrite PMap.gss. reflexivity. }
rewrite <- inject_id_compose_self. eapply Mem.mem_inj_compose with (m2 := m1).
- eapply alloc_mem_inj_id; eauto.
- eapply store_multi_new_block_mem_inj_id; eauto.
eapply store_new_block_mem_inj_id; eauto.
eapply store_new_block_mem_inj_id; eauto.
eapply Mem.mext_inj, Mem.extends_refl.
Qed.
|
open import Logic
open import Type
module Structure.Sets.Quantifiers {ℓₑ ℓₛ ℓₗ}{E : Type{ℓₑ}}{S : Type{ℓₛ}} (_∈_ : E → S → Stmt{ℓₗ}) where
import Lvl
open import Logic.Propositional
open import Logic.Predicate
open import Syntax.Function
private variable ℓ : Lvl.Level
-- Set restricted existential quantifier.
∃ₛ : S → (E → Stmt{ℓ}) → Stmt
∃ₛ(A) P = ∃(x ↦ (x ∈ A) ∧ P(x))
-- Set restricted universal quantifier.
∀ₛ : S → (E → Stmt{ℓ}) → Stmt
∀ₛ(A) P = ∀ₗ(x ↦ ((x ∈ A) → P(x)))
|
If $f$ converges to $l$ and $l$ is not an integer, then the ceiling of $f$ converges to the ceiling of $l$. |
!/ ------------------------------------------------------------------- /
MODULE W3CONSTANTS
!/
!/ +-----------------------------------+
!/ | WAVEWATCH III NOAA/NCEP |
!/ | H. L. Tolman |
!/ | FORTRAN 90 |
!/ | Last update : 29-May-2009 |
!/ +-----------------------------------+
!/
!/ 11-Nov-1999 : Fortran 90 version. ( version 2.00 )
!/ 29-May-2009 : Preparing distribution version. ( version 3.14 )
!/
!/ Copyright 2009 National Weather Service (NWS),
!/ National Oceanic and Atmospheric Administration. All rights
!/ reserved. WAVEWATCH III is a trademark of the NWS.
!/ No unauthorized use without permission.
!/
! 1. Purpose :
!
! Define some much-used constants for global use (all defined
! as PARAMETER).
!
! 2. Variables and types :
!
! Name Type Scope Description
! ----------------------------------------------------------------
! GRAV Real Global Acc. of gravity (m/s2)
! DWAT Real Global Density of water (kg/m3)
! DAIR Real Global Density of air (kg/m3)
! PI Real Global pi.
! TPI Real Global 2pi.
! HPI Real Global 0.5pi.
! TPIINV Real Global 1/2pi.
! HPIINV Real Global 2/pi.
! RADE Real Global Conv. factor from radians to degrees.
! DERA Real Global Conv. factor from degrees to radians.
! RADIUS Real Global Radius of the earth. (m)
! ----------------------------------------------------------------
!
!/ ------------------------------------------------------------------- /
!/
REAL, PARAMETER :: GRAV = 9.806
REAL, PARAMETER :: DWAT = 1000.
REAL, PARAMETER :: DAIR = 1.225
!
REAL, PARAMETER :: PI = 3.1415927
REAL, PARAMETER :: TPI = 2.0 * PI
REAL, PARAMETER :: HPI = 0.5 * PI
REAL, PARAMETER :: TPIINV = 1. / TPI
REAL, PARAMETER :: HPIINV = 1. / HPI
REAL, PARAMETER :: RADE = 180. / PI
REAL, PARAMETER :: DERA = PI / 180.
!
REAL, PARAMETER :: RADIUS = 4.E7 * TPIINV
!
REAL, PARAMETER :: G2PI3I = 1. / ( GRAV**2 * TPI**3 )
REAL, PARAMETER :: G1PI1I = 1. / ( GRAV * TPI )
!/
!/ End of module CONSTANTS ------------------------------------------- /
!/
END MODULE W3CONSTANTS
|
# Imports and preamble
```python
# !pip install pulp
# !apt-get install glpk-utils
# !apt-get install coinor-cbc
```
Collecting pulp
[?25l Downloading https://files.pythonhosted.org/packages/fb/34/ff5915ff6bae91cfb7c4cc22c3c369a6aea0b2127045dd5f308a91c260ac/PuLP-2.0-py3-none-any.whl (39.2MB)
[K 100% |████████████████████████████████| 39.2MB 1.1MB/s
[?25hRequirement already satisfied: pyparsing>=2.0.1 in /home/jefehern/anaconda3/lib/python3.7/site-packages (from pulp) (2.3.1)
Installing collected packages: pulp
Successfully installed pulp-2.0
E: Could not open lock file /var/lib/dpkg/lock-frontend - open (13: Permission denied)
E: Unable to acquire the dpkg frontend lock (/var/lib/dpkg/lock-frontend), are you root?
E: Could not open lock file /var/lib/dpkg/lock-frontend - open (13: Permission denied)
E: Unable to acquire the dpkg frontend lock (/var/lib/dpkg/lock-frontend), are you root?
```python
import numpy as np
import pulp
```
## Kestrel solver
```python
import time
import xmlrpc.client
import sys
import os
import lxml.etree
import lxml.builder
from pulp.solvers import LpSolver_CMD
from pulp.solvers import PulpSolverError
from pulp.constants import *
```
```python
class Kestrel(LpSolver_CMD):
"""
API Wrapper for Neos Solver XML-RPC API. Compatible with pulp linear programming api.
Only Xpress solver works; in the future more solvers will be added
:param ptype: problem type for example 'milp': mixed integer linear programming
olny tested with 'milp', 'lp'
:param maximize: 'yes' or 'no' to maximize or minimize respectibly
:param email: add your email
:param priority: default 'long', if 'short' is scpecified job will be killed after 5 min
:param keepFiles = 0, mip = 1, msg = 0, options = []: neccesary to make LpSolver_CMD work
"""
def __init__(self, ptype, maximize=None, email=None, priority=None, keepFiles=0, mip=1, msg=0, options=[]):
self.ptype = ptype
if maximize:
self.maximize = 'yes'
else:
self.maximize = "no"
if email:
self.email=email
else:
raise ValueError('Missing email')
if priority:
self.priority = priority
else:
self.priority = 'long'
LpSolver_CMD.__init__(self, './', keepFiles, mip, msg, options)
def copy(self):
"""Make a copy of self"""
aCopy = LpSolver_CMD.copy(self)
aCopy.cuts = self.cuts
aCopy.presolve = self.presolve
aCopy.dual = self.dual
aCopy.strong = self.strong
return aCopy
def actualSolve(self, lp, **kwargs):
"""Solve a well formulated lp problem"""
return self.solve_Kestrel(lp, **kwargs)
def available(self):
"""True if the solver is available"""
neos = xmlrpc.client.ServerProxy("https://neos-server.org:3333")
alive = neos.ping()
if alive!="NeosServer is alive\n":
print("Could not make connection to NEOS Server")
available = False
else:
available = True
return available
def solve_Kestrel(self, lp):
vs = lp.writeMPS("problem.mps", rename = 0)
file = open('problem.mps', "r")
E = lxml.builder.ElementMaker()
root = E.document
field1 = E.category
field2 = E.solver
field3 = E.inputMethod
field4 = E.MPS
field5 = E.maximize
field6 = E.nosol
field7 = E.email
field8 = E.priority
xmldoc = root(
field1(self.ptype),
field2("FICO-Xpress"),
field3("MPS"),
field4(lxml.etree.CDATA(file.read())),
field5(self.maximize),
field6("no"),
field7(self.email),
field8(self.priority))
xml = lxml.etree.tostring(xmldoc).decode()
file.close()
try:
os.remove('problem.mps')
except:
pass
neos = xmlrpc.client.ServerProxy("https://neos-server.org:3333")
alive = neos.ping()
if alive != "NeosServer is alive\n":
raise RuntimeError("Could not make connection to NEOS Server")
else:
(jobNumber, password) = neos.submitJob(xml)
print("Job number = {} Job password = {}".format(jobNumber, password))
if jobNumber == 0:
raise RuntimeError("NEOS Server error: {}".format(password))
else:
status=""
while status!="Done":
time.sleep(5)
status= neos.getJobStatus(jobNumber, password)
print('Solving... Problem status: ', status)
msg=neos.getFinalResults(jobNumber, password)
#print(msg.data.decode())
tmpSol=open('tmpSol.txt', 'w')
tmpSol.write(msg.data.decode())
tmpSol.close()
values= self.readsol_MPS(self.path+'tmpSol.txt')
try:
os.remove('tmpSol.txt')
except:
pass
kestrelStatus = {"Done": LpStatusOptimal}
if status not in kestrelStatus:
raise PulpSolverError("Unknown status returned by Kestrel: " + statusString)
lp.status = kestrelStatus[status]
lp.assignVarsVals(values)
return lp.status
def readsol_MPS(self, filename):
with open(filename,'r') as f:
values = {}
while 1:
l = f.readline()
if l == "": break
line = l.split()
if len(line) and line[0] == 'C':
name = line[2]
value = float(line[4])
values[name] = value
return values
```
# Job shop Scheduling
Scheduling es la asignación de recursos a lo largo del tiempo. En los Job shop mal programados, no es nada raro que los trabajos esperen durante el 95 por ciento de su ciclo de producción total. Esto resulta en un largo ciclo de flujo de trabajo. Si a esto le sumamos el tiempo de inventario y el tiempo de cobro de cuentas por cobrar, se obtendrá un largo ciclo de flujo de caja. Por lo tanto, el flujo de trabajo es igual al flujo de caja, y el flujo de trabajo es impulsado por el schedule. Un schedule es un calendario para realizar actividades, utilizar recursos o asignar instalaciones.
En un problema de Scheduling tenemos que realizar un conjunto de trabajos en varios procesadores que utilizan otros recursos bajo ciertas restricciones, como restricciones en los tiempos de finalización del trabajo, prioridades entre trabajos (un trabajo no puede comenzar hasta que otro está terminado), etc. El objetivo es optimizar algún criterio, por ejemplo, minimizar el tiempo total de procesamiento, que es el tiempo de finalización del último trabajo (suponiendo que el primer trabajo comienza en el tiempo $0$); o maximizar el número de trabajos procesados.
A continuación formulamos un problema de scheduling muy general que subsume como casos especiales una gran cantidad de problemas de scheduling estudiados en la literatura.
## Fomulacion del problema de Job shop scheduling
Se nos dan $n$ trabajos para ser procesados en $m$ procesadores (máquinas). Sea $M_j \subseteq \mathcal{P} = \{ 1, \cdots, m\}$ el subconjunto de procesadores que pueden cumplir con el trabajo $j \in \mathcal{J} = \{ 1, \cdots, n\}$. Cada trabajo $j$ se caracteriza por los siguientes parámetros:
* $w_j$: peso del trabajo
* $r_j, d_j$: fechas de liberación y vencimiento (el trabajo debe procesarse durante el intervalo de tiempo $[r_j, d_j]$
* $p_{jk}$: tiempo de procesamiento en la maquina $k$
Las relaciones de precedencia entre puestos de trabajo están dadas por un dígrafo acíclico $G = (\mathcal{J}, E)$ definido sobre el conjunto $\mathcal{J}$: paa cada arco $(j_1, j_2) \in \mathcal{J}$, el trabajo $j_2$ no puede empezar hasta que el trabajo $j_1$ termine.
El Job shop Scheduling implica decidir cuándo iniciar cada paso de cada trabajo en su procesador. Por lo tanto, las variables de decisión de la hora de inicio están ahora indexadas tanto por el trabajo como por el procesador:
\begin{equation}
x_{jk} = \text{hora de inicio del trabajo j en el procesador k}
\end{equation}
Los pasos de los distintos trabajos que se programan en job shop deben tener lugar en la secuencia indicada. Es decir, las horas de inicio están sujetas a restricciones de precedencia.
la restriction de precedencia que el trabajo $j$ se debe completar en el procesador $k$ antes de que comience la actividad en $k'$ puede expresarse de la siguiente manera
\begin{equation}
x_{jk} + p_{jk} \leq x_{jk'}
\end{equation}
donde $x_{jk}$ denota el tiempo de inicio del trabajo $j$ en el procesador $k$, $p_{jk}$ es el tiempo de proceso de $j$ en $k$, y $x_{jk'}$ es el tiempo de inicio del trabajo $j$ en el procesador $k'$.
Se puede modelar los conflictos introduciendo variables de decisión discretas
\begin{equation}
y_{jj'k}=
\begin{cases}
1 & \text{si j está programado antes del trabajo j' en el procesador k} \\
0 &\text{si no}
\end{cases}
\end{equation}
Asi como las restricciones
\begin{equation}
x_{jk} + p_{jk} \leq x_{j'k} + M(1- y_{jj'k}) \\
x_{j'k} + p_{j'k} \leq x_{jk} + My_{jj'k}
\end{equation}
Una de las características intrigantes de los modelos Job shop Scheduling es la amplia variedad de funciones objetivo que pueden ser apropiadas.
Funcion objetivo | Ecuacion
--- | -----
Maximum completion time | $\max_j \{ x_{j\hat{k}} + p_{j\hat{k}}\}$
Mean completion time | $\frac{1}{n} \sum_j x_{j\hat{k}} + p_{j\hat{k}}$
Maximum flow time | $\max_j \{ x_{j\hat{k}} + p_{j\hat{k}} - r_j\}$
Mean flow time | $\frac{1}{n} \sum_j x_{j\hat{k}} + p_{j\hat{k}} - r_j$
Maximum lateness | $\max_j \{ x_{j\hat{k}} + p_{j\hat{k}} - d_j\}$
Mean lateness | $\frac{1}{n} \sum_j x_{j\hat{k}} + p_{j\hat{k}} - d_j$
Maximum tardiness| $\max_j \{\max\{0, x_{j\hat{k}} + p_{j\hat{k}} - d_j\}\}$
Mean tardiness| $\frac{1}{n} \sum_j \{\max\{0, x_{j\hat{k}} + p_{j\hat{k}} - d_j\}$
donde $\hat{k}$ es la ultima maquina donde debe ocurrir el trabajo $j$
## Formulacion en Pulp
```python
# rows = jobs
# columns = machines
procesing_times = np.array(
[
[29,78,9,36,49,11,62,56,44,21],
[43,90,75,11,69,28,46,46,72,30],
[91,85,39,74,90,10,12,89,45,33],
[81,95,71,99,9,52,85,98,22,43],
[14,6,22,61,26,69,21,49,72,53],
[84,2,52,95,48,72,47,65,6,25],
[46,37,61,13,32,21,32,89,30,55],
[31,86,46,74,32,88,19,48,36,79],
[76,69,76,51,85,11,40,89,26,74],
[85,13,61,7,64,76,47,52,90,45]
]
)
```
```python
# rows = jobs
# columns = machine index
order = np.array(
[
[0,1,2,3,4,5,6,7,8,9],
[0,2,4,9,3,1,6,5,7,8],
[1,0,3,2,8,5,7,6,9,4],
[1,2,0,4,6,8,7,3,9,5],
[2,0,1,5,3,4,8,7,9,6],
[2,1,5,3,8,9,0,6,4,7],
[1,0,3,2,6,5,9,8,7,4],
[2,0,1,5,4,6,8,9,7,3],
[0,1,3,5,2,9,6,7,4,8],
[1,0,2,6,8,9,5,3,4,7]
]
)
```
```python
def job_shop(procesing_times, order):
n_jobs, n_maq = procesing_times.shape
x = {}
for job in range(n_jobs):
for maq in range(n_maq):
x[(job, maq)] = pulp.LpVariable('x_{}_{}'.format(job, maq), 0, None, pulp.LpContinuous)
y = {}
for maq in range(n_maq):
for job in range(n_jobs):
for job_ in range(n_jobs):
if job < job_:
y[(job, job_, maq)] = pulp.LpVariable('y_{}_{}_{}'.format(job, job_, maq), 0, 1, pulp.LpInteger)
z = pulp.LpVariable('z', 0, None, pulp.LpContinuous)
prob = pulp.LpProblem("Job_shop", pulp.LpMinimize)
# Set objective
prob += z
# create precedance constrains
for job in range(n_jobs):
for maq in range(1, n_maq):
prob += x[(job, order[job, maq])] >= x[(job, order[job, maq-1])] + procesing_times[job ,order[job, maq-1]]
# create conflict constrains
for maq in range(n_maq):
for job in range(n_jobs):
for job_ in range(n_jobs):
if job < job_:
prob += x[(job_, maq)] >= x[(job, maq)] + procesing_times[job, maq] - 9999*y[(job, job_, maq)]
prob += x[(job, maq)] >= x[(job_, maq)] + procesing_times[job_, maq] - 9999*(1 - y[(job, job_, maq)])
# Objective function
for job in range(n_jobs):
prob += z >= x[(job, order[job, -1])] + procesing_times[job, order[job, -1]]
return prob
# Solving
# prob = job_shop(procesing_times, order)
# prob.writeLP("job_shop.lp")
# prob.solve(Kestrel(ptype='milp', email='[email protected]'))
```
```python
x_sol = np.zeros((n_jobs, n_maq))
for v in prob.variables():
if v.name != 'z':
line = v.name.split(sep='_')
if line[0] == 'x':
x_sol[int(line[1]), int(line[2])] = v.varValue
else:
z_sol = v.varValue
```
```python
x_sol = np.array(
[[ 83, 138, 228, 260, 321, 422, 433, 526, 902, 951],
[188, 480, 283, 469, 370, 644, 583, 672, 727, 439],
[428, 309, 593, 519, 882, 677, 807, 718, 632, 849],
[231, 43, 157, 593, 312, 920, 321, 428, 406, 806],
[ 22, 37, 0, 112, 173, 43, 412, 271, 199, 320],
[568, 225, 22, 374, 834, 275, 691, 907, 469, 543],
[ 37, 0, 96, 83, 439, 189, 157, 339, 271, 210],
[312, 394, 237, 898, 640, 552, 672, 807, 691, 727],
[112, 227, 358, 296, 672, 347, 543, 583, 946, 469],
[343, 296, 434, 763, 770, 687, 495, 855, 542, 642]]
)
z_sol = 972
```
```python
import random
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
```
```python
plt.rcParams["font.size"] = "20"
cmap = plt.get_cmap('Paired')
colors = cmap(np.linspace(0, 1, n_maq))
fig, ax = plt.subplots(1, 1, figsize=(30,10))
ax.set_title('Job shop')
height = 0.9
for job in range(n_jobs):
for maq in range(n_maq):
ax.add_patch(patches.Rectangle((x_sol[job, maq], job-0.5),
procesing_times[job, maq],
height,
color=colors[maq],
alpha=1))
ax.set_ylim(-0.5, n_jobs)
ax.set_xlim(0, (x_sol + procesing_times).max())
ax.set_yticks([job for job in range(n_jobs)])
ax.set_yticklabels(['Job {}'.format(job) for job in range(n_jobs, -1, -1)])
plt.legend(handles=[mpatches.Patch(color=colors[maq], label='Maq {}'.format(maq)) for maq in range(n_maq)],
bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
```
## Shifting bottleneck heuristic
El Shifting Bottleneck Heuristic es un procedimiento destinado a minimizar el tiempo que se tarda en hacer el trabajo, o específicamente, el makepan en un taller de trabajo. El makespan se define como la cantidad de tiempo, de principio a fin, para completar un conjunto de trabajos con varias máquinas en los que el pedido de la máquina está preestablecido para cada trabajo. Suponiendo que los puestos de trabajo compitan realmente por los mismos recursos (máquinas), siempre habrá uno o más recursos que actúen como un "cuello de botella" en el procesamiento. Este procedimiento heurístico o "regla empírica" minimiza el efecto del cuello de botella.
```python
import networkx as nx
```
```python
class Job(object):
def __init__(self, Id, r, p):
self.Id = Id
self.r = r # route
self.p = p # processing times
```
```python
class Jobshop(nx.DiGraph):
def __init__(self): # , jobs):
super().__init__()
self.machines = {}
self.add_node("U", p=0)
self.add_node("V", p=0)
def handleJobRoutings(self, jobs):
for j in jobs.values():
self.add_edge("U", (j.r[0], j.Id))
for m, n in zip(j.r[:-1], j.r[1:]):
self.add_edge((m, j.Id), (n, j.Id))
self.add_edge((j.r[-1], j.Id), "V")
def handleJobProcessingTimes(self, jobs):
for j in jobs.values():
for m, p in zip(j.r, j.p):
self.add_node((m, j.Id), p=p)
def makeMachineSubgraphs(self):
machineIds = set(ij[0] for ij in self if ij[0] not in ("U", "V"))
for m in machineIds:
self.machines[m] = self.subgraph(ij for ij in self if ij[0] == m)
self.machines[m].remove_nodes_from(["U", "V"])
def addJobs(self, jobs):
self.handleJobRoutings(jobs)
self.handleJobProcessingTimes(jobs)
self.makeMachineSubgraphs()
def output(self):
for m in sorted(self.machines):
for j in sorted(self.machines[m]):
print("{}: {}".format(j, self.node[j]['C']))
```
|
Require Import Crypto.Specific.Framework.RawCurveParameters.
Require Import Crypto.Util.LetIn.
(***
Modulus : 2^130 - 5
Base: 32.5
***)
Definition curve : CurveParameters :=
{|
sz := 4%nat;
base := 32 + 1/2;
bitwidth := 64;
s := 2^130;
c := [(1, 5)];
carry_chains := Some [seq 0 (pred 4); [0; 1]]%nat;
a24 := None;
coef_div_modulus := Some 2%nat;
goldilocks := None;
karatsuba := None;
montgomery := false;
freeze := Some true;
ladderstep := false;
mul_code := None;
square_code := None;
upper_bound_of_exponent_loose := None;
upper_bound_of_exponent_tight := None;
allowable_bit_widths := None;
freeze_extra_allowable_bit_widths := None;
modinv_fuel := None
|}.
Ltac extra_prove_mul_eq _ := idtac.
Ltac extra_prove_square_eq _ := idtac.
|
# On Kepler 452
Kepler 452 is a solar-like star in the Kepler field that was [recently announced to possess a planet](http://iopscience.iop.org/1538-3881/150/2/56/article) with an orbit of 385 Earth days. Based on a stellar evolution model analysis of the host star, the planet is found to have a radius of approximately $1.63 \pm 0.23 R_{\oplus}$. [Standard Dartmouth stellar models](http://adsabs.harvard.edu/abs/2008ApJS..178...89D) were used to draw this conclusion, with added support from a similar analysis performed with [YREC models](http://dx.doi.org/10.1086/424966). While I do not doubt the overall validity of the stellar models, it is still a worthwhile excerise to explore how various modeling assumptions may affect the results given that only three observable properties were used to constrain the model parameters: $\log(g)$, $T_{\rm eff}$, and [Fe/H].
Being by initializing matplotlib and numpy (eventually I'll add this to the default config)
```python
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
```
## Solar Abundance Distribution
```python
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
# configure axes
ax.set_xlabel('Effective Temperature (K)', fontsize=22.)
ax.set_xlim(6000., 5000.)
ax.set_ylabel('$\log_{10}(g)$', fontsize=22.)
ax.set_ylim(4.6, 4.1)
ax.tick_params(axis='both', which='major', length=20., labelsize=20.)
# approximate mass range from measured temperature
masses = np.arange(0.90, 1.3, 0.05)
# directories for metallicity of +0.2 dex
f15_directory = '../../evolve/dmestar/trk/gas07/p020/a0/amlt2202'
d08_directory = '../../evolve/dsep08/trk/fehp02afep0'
# plot mass tracks for GS98 and GAS07 composition
for mass in masses:
f15_file = '{:s}/m{:04.0f}_GAS07_p020_p0_y27_mlt2.202.trk'.format(f15_directory, mass*1000.)
d08_file = '{:s}/m{:03.0f}fehp02afep0.jc2mass'.format(d08_directory, mass*100.)
try:
f15_trk = np.genfromtxt(f15_file)
d08_trk = np.genfromtxt(d08_file)
except IOError:
continue
ax.plot(10**d08_trk[:, 1], d08_trk[:, 2], '--', lw=2, color='#444444')
ax.plot(10**f15_trk[:, 1], f15_trk[:, 2], '-', lw=2, color='#333333')
# add Kepler 452 point
ax.errorbar([5757.], [4.32], xerr=85., yerr=0.09, fmt='-o', lw=3, markersize=14., color='#4682B4')
```
Note that there is not an $0.95 M_{\odot}$ mass track plotted. The right-most track for both model sets is a $0.90 M_{\odot}$ track. Assuming the star is burning hydrogen in the core and is _not_ on the pre-main-sequence, then we can estimate a mass of approximately $1.03\pm0.03 M_{\odot}$ from the Dartmouth 2008 models (dashed lines). If we instead look at the Dartmouth 2015 models, we find the mass is approximately $1.08\pm0.03 M_{\odot}$, consistent with the first esimate, within $2\sigma$. Once one builds in the metallicity uncertainty, the errors increase further, providing a greater consistency between the two measurements. Jenkins et al. quote a mass of $1.04\pm0.05 M_{\odot}$, in agreement with the aforementioned values.
One small factor that was not accounted for is that the observed metallicity provides the _present day_ metallicity, which is not necessarily equivalent to the quote metallicity for model mass tracks. Due to gravitational settling and multiple diffusive processes, one may need to use models with a higher proto-stellar (re: initial) surface metal abundance to acheive a present day value of [Fe/H] $= +0.2$ dex. At most, we might expect an 0.1 dex reduction in the surface abundance of heavy element over time. Although this is very rough, it should provide an upper limit to the uncertainty one expects heavy element diffusion to inflict on model properties.
```python
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
# configure axes
ax.set_xlabel('Effective Temperature (K)', fontsize=22.)
ax.set_xlim(6000., 5000.)
ax.set_ylabel('$\log_{10}(g)$', fontsize=22.)
ax.set_ylim(4.6, 4.1)
ax.tick_params(axis='both', which='major', length=20., labelsize=20.)
# directories for metallicity of +0.2, +0.3 dex
d08_directory_03 = '../../evolve/dsep08/trk/fehp03afep0'
# plot mass tracks for GS98 and GAS07 composition
for mass in masses:
d08_file = '{:s}/m{:03.0f}fehp02afep0.jc2mass'.format(d08_directory, mass*100.)
d08_03_file = '{:s}/m{:03.0f}fehp03afep0.jc2mass'.format(d08_directory_03, mass*100.)
try:
d08_trk = np.genfromtxt(d08_file)
d08_03_trk = np.genfromtxt(d08_03_file)
except IOError:
continue
ax.plot(10**d08_trk[:, 1], d08_trk[:, 2], '--', lw=2, color='#444444')
ax.plot(10**d08_03_trk[:, 1], d08_03_trk[:, 2], '-', lw=2, color='#333333')
# add Kepler 452 point
ax.errorbar([5757.], [4.32], xerr=85., yerr=0.09, fmt='-o', lw=3, markersize=14., color='#4682B4')
```
Dartmouth 2008 tracks with [Fe/H] $= +0.30$ dex are shown as solid lines, with dotted lines showing models computed with the present day metallicity of [Fe/H] = $+0.20$ dex. As was the case with the Dartmouth 2015 models, adopting a higher metallicity pushes the inferred stellar mass up to approximately $1.10 \pm 0.04 M_{\odot}$.
```python
fig, ax = plt.subplots(1, 1, figsize=(10, 8))
ax.set_title('Evolution of Surface Metallicity', fontsize=26., family='serif')
ax.set_xlabel('Age (Gyr)', fontsize=22., family='serif')
ax.set_xlim(1.0, 10.0)
ax.set_ylabel('[M/H] (dex)', fontsize=22., family='serif')
ax.tick_params(axis='both', which='major', length=16., labelsize=20.)
f15_trk = np.genfromtxt('{:s}/m1100_GAS07_p020_p0_y27_mlt2.202.trk'.format(f15_directory))
d08_trk = np.genfromtxt('../../evolve/models/tmp/m1100_GS98_p020_p0_y29_mlt1.884.trk')
# solar Z/X = 0.0165 for GAS07 solar abundance distribution, 0.0231 for GS98
ax.plot(f15_trk[:,0]/1.0e9, np.log10(f15_trk[:,7]/0.0165), '-', lw=3, color="#333333")
ax.plot(d08_trk[:,0]/1.0e9, np.log10(d08_trk[:,7]/0.0231), '--', lw=3, color="#333333")
```
Clearly a metallicity 0.1 dex above the quoted value is not a reasonable estimate, as it never dips as low as 0.2 dex. An initial metallicity in excess of 0.2 dex would be needed to derive precisely 0.2 dex as the present day metallicity for Kepler 452, but this is well within the uncertainty limits of ±0.08 dex derived from spectroscopy. While there may be a slight preference among all models for a higher mass than $1.04 M_{\odot}$, it is a valid estimate.
How does a 5% change in mass affect the radius and luminosity evolution of the star? If we take the gravity to be constant regardless of the derived mass, we can compute the fractional change in radius introduced by a change in mass,
\begin{equation}
\frac{R_{\star}}{R_{\star,0}} = \left(\frac{M_{\star}}{M_{\star,0}}\right)^{1/2}.
\end{equation}
Therefore, a 5% change in stellar mass introduces a 3% increase on the stellar radius. However, since the planet radius is proportional to the projected surface area of the host star, the planet radius increases by approximately 5%, as well. The planet would therefore have a radius of $R_p = 1.71 R_{\oplus}$, which is entirely consistent with the Jenkins et al. estimate. This would further confirm the analysis of Jenkins et al. that lead to the conclusion that _it is unlikely that Kepler 452b has an Earth-like composition_.
The luminosity evolution would also be impacted by the increased radius, for at constant temperature, the luminosity of the star will increase by a factor proportional to $R_{\star}^2$. A 5% increase of indicdent flux is extremely minor in the grand scheme of things.
Indeed, even if the star were young, the mass estimate would increase by approximately 10% - 15%. This would increase the estimated planet radius by an equivalent amount, leading to a final planet mass $R_p = 2.0 R_{\oplus}$, which would still place the object in the super-Earth category. Although it would mean that the planet is even less likely to be rocky.
```python
```
|
#include <boost/lambda/lambda.hpp>
#include <boost/lambda/bind.hpp>
#include <iostream>
#include <vector>
#include <algorithm>
struct Test {
Test(int i) : integer_(i) {}
int integer_;
};
void print(const Test& test) {
std::cout << test.integer_ << std::endl;
}
int main() {
using namespace boost::lambda;
std::vector<Test> container;
for (int i = 0; i < 10; ++i) {
container.push_back(i);
}
std::for_each(container.begin(), container.end(), bind(print, _1));
}
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Jellyfish Example Courtesy of Alexander P. Hoover, PhD
%
% Converted from IBAMR: 1/16/2018 by NAB.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function Make_Jelly_Geometry()
close all;
clear all;
L = 8; % height of computational domain (m) for keeping desired resolution
Lh = 10; % actual height of computational domain (m) (MATCHES INPUT2D)
Lw = 3; % width of computational domain (m) (MATCHES INPUT2D)
N = 96; % number of Cartesian grid meshwidths at the finest level of the AMR grid
dx = L/N; % Cartesian mesh width (m)
ds = dx/2;
a=.5; % bell radius (semi-minor axis, horizontal axis, note width=2a)
b=.75; % bell semi-major axis
d=-0.25;
factor_a=.8;
F=1e5; %5e0
theta=zeros(1000,1);
theta_lim=asin(d/b);
theta_test=pi/2;
x_points=zeros(1000,1);
z_points=zeros(1000,1);
id_points=zeros(1000,1);
offset = 0;
kappa_spring = 1e7; %1e5 % spring constant (Newton)
kappa_beam = 2.5e5; %1e5 %5e3 % beam stiffness constant (Newton m^2)
%kappa_beam_flexible = kappa_beam/5; % beam stiffness constant (Newton m^2)
kappa_target = kappa_spring; % target point penalty spring constant (Newton)
c=0;
while(theta_test<(pi-theta_lim))
c=c+1;
theta(c)=theta_test;
x_points(c)=a*cos(theta(c));
z_points(c)=b*sin(theta(c));
id_points(c)=c-1;
theta_test=ds/((a*sin(theta(c)))^(2)+(b*cos(theta(c)))^(2))^(.5)+theta(c);
end
c_stiff=c;
npts=2*c-1;
npts_wing=floor(npts/2);
npts_musc=floor(npts_wing/4);
for j=(c+1):(npts)
x_points(j)=-1*x_points(j-c+1);
z_points(j)=z_points(j-c+1);
id_points(j)=j-1;
end
mesh_name = 'jelly';
xShift = 1.5;
yShift = 2;
x_points=x_points(1:npts)+xShift;
z_points=z_points(1:npts)+yShift;
it_points=id_points(1:npts);
plot(x_points(:),z_points(:),'*'); hold on;
axis([0 8 0 8])
% Lag Pts to Mess up Flow At Edge
xBlock = ds:4*ds:Lw-ds;
yBlock = (Lh-5*ds)*ones(1,length(xBlock))+ds;
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Print .vertex information
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
vertex_fid = fopen([mesh_name num2str(N) '.vertex'], 'w');
fprintf(vertex_fid, '%d\n', npts + npts_musc*2 + length(xBlock));
lag_ct = 0;
%
% bell
%
for j=1:npts
fprintf(vertex_fid, '%1.16e %1.16e\n', x_points(j), z_points(j));
lag_ct = lag_ct + 1;
end
%
% muscles
%
for s = 1:npts_musc
fprintf(vertex_fid, '%1.16e %1.16e\n', x_points(npts_wing+1-npts_musc+s), z_points(npts_wing+1-npts_musc+s));
plot(x_points(npts_wing+1-npts_musc+s),z_points(npts_wing+1-npts_musc+s),'r*'); hold on;
lag_ct = lag_ct + 1;
end
for s = 1:npts_musc
fprintf(vertex_fid, '%1.16e %1.16e\n', x_points(npts-npts_musc+s), z_points(npts-npts_musc+s));
plot(x_points(npts-npts_musc+s),z_points(npts-npts_musc+s),'r*'); hold on;
lag_ct = lag_ct + 1;
end
for ii=1:length(xBlock)
fprintf(vertex_fid, '%1.16e %1.16e\n', xBlock(ii), yBlock(ii));
end
fclose(vertex_fid);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Print .spring information
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
spring_fid = fopen([mesh_name num2str(N) '.spring'], 'w');
npts_spring_type1=npts-1;
fprintf(spring_fid, '%d\n', npts-1 + npts_musc);
fprintf('\nNumber of springs before muscles: %d \n\n',npts-1)
factor = 1;%ds^2/ds;
%
% bell
%
for s = 1:c-1
resting=sqrt((x_points(s)-x_points(s+1))^(2)+(z_points(s)-z_points(s+1))^(2));
fprintf(spring_fid, '%d %d %1.16e %1.16e %d\n', id_points(s)+1, id_points(s+1)+1, kappa_spring*ds/(ds^2)*factor, resting, 1);
end
for s = c+1:npts-1
resting=sqrt((x_points(s)-x_points(s+1))^(2)+(z_points(s)-z_points(s+1))^(2));
fprintf(spring_fid, '%d %d %1.16e %1.16e %d\n', id_points(s)+1, id_points(s+1)+1, kappa_spring*ds/(ds^2)*factor, resting, 1);
end
resting=sqrt((x_points(1)-x_points(c+1))^(2)+(z_points(1)-z_points(c+1))^(2));
fprintf(spring_fid, '%d %d %1.16e %1.16e %d\n', id_points(1)+1, id_points(c+1)+1, kappa_spring*ds/(ds^2)*factor, resting, 1);
%
% muscles
%
for s = 1:npts_musc
fprintf(spring_fid, '%d %d %1.16e %1.16e %d\n',npts+s-1+1, npts+s+npts_musc-1+1, F, 0, 1);
end
fclose(spring_fid);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Print .nonInv_beam information
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
beam_fid = fopen([mesh_name num2str(N) '.nonInv_beam'], 'w');
fprintf(beam_fid, '%d\n', npts-2);
factor=1;% = (ds^4)/ds;
for s = 2:c-1
C1 = x_points(s-1)+x_points(s+1)-2*x_points(s);
C2 = z_points(s-1)+z_points(s+1)-2*z_points(s);
fprintf(beam_fid, '%d %d %d %1.16e %1.16e %1.16e\n', id_points(s-1)+1, id_points(s)+1, id_points(s+1)+1, kappa_beam*ds/(ds^4)*factor, C1, C2);
end
for s = c+2:npts-1
C1 = x_points(s-1)+x_points(s+1)-2*x_points(s);
C2 = z_points(s-1)+z_points(s+1)-2*z_points(s);
fprintf(beam_fid, '%d %d %d %1.16e %1.16e %1.16e\n', id_points(s-1)+1, id_points(s)+1, id_points(s+1)+1, kappa_beam*ds/(ds^4)*factor, C1, C2);
end
C1 = x_points(c+2)+x_points(1)-2*x_points(c+1);
C2 = z_points(c+2)+z_points(1)-2*z_points(c+1);
fprintf(beam_fid, '%d %d %d %1.16e %1.16e %1.16e\n', id_points(c+2)+1, id_points(c+1)+1, id_points(1)+1, kappa_beam*ds/(ds^4)*factor, C1, C2);
C1 = x_points(c+1)+x_points(2)-2*x_points(1);
C2 = z_points(c+1)+z_points(2)-2*z_points(1);
fprintf(beam_fid, '%d %d %d %1.16e %1.16e %1.16e\n', id_points(c+1)+1, id_points(1)+1, id_points(2)+1, kappa_beam*ds/(ds^4)*factor, C1, C2);
fclose(beam_fid);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PRINT TARGET POINTS!!!
%
% print target points (flow blocker along edge)
k_Target = 2.5e6;
nBefore = lag_ct; % Counts pts in jellyfish for bookkeeping for .target file
struct_name = ['jelly' num2str(N)];
print_Lagrangian_Target_Pts(xBlock,k_Target,struct_name,nBefore)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FUNCTION: prints TARGET points to a file called 'struct_name'.target
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function print_Lagrangian_Target_Pts(xLag,k_Target,struct_name,nBefore)
N = length(xLag);
Nstart = nBefore+1;
Nend = nBefore+N;
target_fid = fopen([struct_name '.target'], 'w');
fprintf(target_fid, '%d\n', N );
%Loops over all Lagrangian Pts.
for s = Nstart:Nend
fprintf(target_fid, '%d %1.16e\n', s, k_Target);
end
fclose(target_fid); |
[STATEMENT]
lemma "!!a::int. [| a+b+c+d <= i+j+k+l; a<=b; b<=c; c<=d; i<=j; j<=k; k<=l |]
==> a+a+a+a+a+a <= l+l+l+l+i+l"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a. \<lbrakk>a + b + c + d \<le> i + j + k + l; a \<le> b; b \<le> c; c \<le> d; i \<le> j; j \<le> k; k \<le> l\<rbrakk> \<Longrightarrow> a + a + a + a + a + a \<le> l + l + l + l + i + l
[PROOF STEP]
by arith |
In this example, an equation for a contour of an axially symmetric
beam propagating in free space is checked.
Suppose there is an axially symmetric charged particles beam propagating
in free space.
Under certain set of approximations, trajectory of a particle on the edge
of the beam can be described by the following expressions:
<!-- -->
\begin{align}
& r = r(0) e^{u^2}
\\
& z =
\left( \dfrac{m v^3}{q I} \right)^{1/2}
r(0)
\int_{0}^{u} e^{u^2} du
\end{align}
where $u$ is a parameter, $v$ is speed of particles along the $z$ axis,
$r(0)$ is initial radius and $I$ is full current of the beam.
Derivation of these relations can be found in supplementary notebook [link].
These equations define a contour of the beam $r(z)$. It is possible to perform
a numerical simulation of a beam propagating in free space and
plot particles coordinates on the X-Z or Y-Z plane.
This will allow to compare the numerical profile
with the analytical expression.
The beam will become $e = 2.71$ times wider than the initial at $u = 1$.
For this value of the parameter, the integral in the $z$ equation
[approximately equals to](https://www.wolframalpha.com/input/?i=integrate%28+e^u^2,+u,+0,+1+%29) $1.46$.
Since the speed in the $z$-direction is assumed constant, it is possible to estimate the time needed for
particles to pass this distance as
<!-- -->
\begin{align}
t \approx
1.5 \left( \dfrac{m v}{q I} \right)^{1/2}
r(0)
\end{align}
Let's substitute some numerical values (see the cell below). Suppose there is an electron beam ($q = 4.8 \cdot 10^{-10} ~ [\mbox{cgs}], ~ m = 9.1 \cdot 10^{-28} ~ [\mbox{g}]$) with full current $I = 0.1 ~ [\mbox{A}] = 2.998 \cdot 10^{8} ~ [\mbox{cgs}]$ and initial radius $r(0) = 0.5 ~ [\mbox{cm}]$.
Energy of it's particles equals to 1 keV, so that speed is $v = 1.808 \cdot 10^{9} ~ [\mbox{cm/s}]$.
For this beam to get $2.71$ times wider (from $0.5 ~ [\mbox{cm}]$ to $1.36 ~ [\mbox{cm}]$), it takes $t = 2.63 \cdot 10^{-9} ~ [\mbox{s}]$ seconds.
This happens over a distance $z = 4.632 ~ [\mbox{cm}]$.
```python
from math import *
m = 9.8e-28
q = 4.8e-10
print( "q = {:.3e} [cgs]".format( q ) )
print( "m = {:.3e} [g]".format( m ) )
r_0 = 0.5
print( "beam_radius = {:.3e} [cm]".format( r_0 ) )
ampere_to_cgs = 2997924536.8431
I = 0.1 * ampere_to_cgs
print( "I = {:.3e} [A] = {:.3e} [cgs]".format( I / ampere_to_cgs, I ) )
ev_to_cgs = 1.60218e-12
E = 1000 * ev_to_cgs
v = sqrt( 2 * E / m )
print( "E = {:.3e} [eV] = {:.3e} [erg]".format( E / ev_to_cgs, E ) )
print( "v = {:.3e} [cm/s]".format( v ) )
r0_e_times_wider = e * r_0
z_e_times_wider = 1.46 * sqrt( m * v**3 / q / I ) * r_0
t_e_times_wider = 1.5 * sqrt( m * v / q / I ) * r_0
print( "r0_e_times_wider = {:.3e} [cm]".format( r0_e_times_wider ) )
print( "t_e_times_wider = {:.3e} [s]".format( t_e_times_wider ) )
print( "z_e_times_wider = {:.3e} [cm]".format( z_e_times_wider ) )
```
q = 4.800e-10 [cgs]
m = 9.800e-28 [g]
beam_radius = 5.000e-01 [cm]
I = 1.000e-01 [A] = 2.998e+08 [cgs]
E = 1.000e+03 [eV] = 1.602e-09 [erg]
v = 1.808e+09 [cm/s]
r0_e_times_wider = 1.359e+00 [cm]
t_e_times_wider = 2.632e-09 [s]
z_e_times_wider = 4.632e+00 [cm]
These estimates can be used as a basis for config file parameters.
Full simulation time is set to 3.0e-9 [s]. There are 100 time steps, with each 10th step written to file.
```python
from ef.config.components import *
sim_time = 3.0e-9
n_of_steps = 100
dt = sim_time / n_of_steps
save_each_step = 10
dt_save = dt * save_each_step
time_grid = TimeGridConf(total=sim_time, step=dt, save_step=dt_save)
print(time_grid)
```
### TimeGridConf:
total = 3e-09
save_step = 3e-10
step = 3e-11
The simulation domain is defined as
```python
mesh = SpatialMeshConf(size=(5, 5, 10), step=(.1, .1, .1))
print(mesh)
```
### SpatialMeshConf:
size = array([ 5., 5., 10.])
step = array([0.1, 0.1, 0.1])
with z size approximately two times the estimated value, with 100 nodes in that direction. X and y sizes are 10 times the radius with 50 nodes in each direction.
The source is centered along the x and y axes and is close to origin along the z. For a time step dt = 3.00e-11 [s], to provide a current I = 0.1 [A] the source has to generate $n = I ~ dt ~/~ q = 1.87 \cdot 10^7$ particles each time step. This is not computationally feasible, so instead let's fix an amount of generated particles to 5000 at each step. The charge of macroparticles should be $Q = I ~ dt ~/~ n = 1.799 \cdot 10^{-6} ~ [\mbox{cgs}]$ To preserve charge-to-mass ratio, mass of the macroparticles should be set to $M = Q ~/~ q_e ~ m_e = 3.672 \cdot 10^{-24} ~ [\mbox{g}]$. To have the same initial velocity as electrons, mean momentum should be set to $p = M ~ v = 6.641 \cdot 10^{-15} ~ [\mbox{g * cm / s}]$.
```python
num_of_real_particles = I * dt / q
print( "num_of_real_particles = {:.3e}".format( num_of_real_particles ) )
num_of_macro_particles = 5000
macro_q = I * dt / num_of_macro_particles
macro_m = macro_q / q * m
macro_mean_momentum = macro_m * v
print( "num_of_macro_particles = {:d}".format( num_of_macro_particles ) )
print( "macro_q = {:.3e} [cgs]".format( macro_q ) )
print( "macro_m = {:.3e} [g]".format( macro_m ) )
print( "macro_mean_momentum = {:.3e} [g * cm / s]".format( macro_mean_momentum ) )
source = ParticleSourceConf(
name = "cathode_emitter",
shape = Cylinder(start=(mesh.size[0]/2., mesh.size[1]/2., 0.52),
end=(mesh.size[0]/2., mesh.size[1]/2., 0.51),
radius=0.5),
initial_particles = num_of_macro_particles,
particles_to_generate_each_step = num_of_macro_particles,
momentum = (0, 0, macro_mean_momentum),
temperature = 0.0,
charge = -macro_q,
mass = macro_m
)
```
```python
from ef.config.config import Config
from ef.config.visualizer import Visualizer3d
axially_symmetric_beam_conf = Config(time_grid, mesh, sources=[source])
vis = Visualizer3d()
axially_symmetric_beam_conf.visualize_all(vis)
```
<Figure size 640x480 with 1 Axes>
Potential is zero on each boundary of the domain (by default).
Other config parameters:
```python
output_file = OutputFileConf(prefix = "contour_jup_", suffix = ".h5")
particle_interaction_model = ParticleInteractionModelConf(model = "PIC")
```
```python
axially_symmetric_beam_conf = Config(time_grid, mesh, sources=[source],
particle_interaction_model=particle_interaction_model,
output_file=output_file)
print(axially_symmetric_beam_conf.export_to_string())
```
[TimeGrid]
total_time = 3e-09
time_save_step = 3e-10
time_step_size = 3e-11
[SpatialMesh]
grid_x_size = 5.0
grid_x_step = 0.1
grid_y_size = 5.0
grid_y_step = 0.1
grid_z_size = 10.0
grid_z_step = 0.1
[ParticleSourceCylinder.cathode_emitter]
cylinder_axis_start_x = 2.5
cylinder_axis_start_y = 2.5
cylinder_axis_start_z = 0.51
cylinder_axis_end_x = 2.5
cylinder_axis_end_y = 2.5
cylinder_axis_end_z = 0.52
cylinder_radius = 0.5
initial_number_of_particles = 5000
particles_to_generate_each_step = 5000
mean_momentum_x = 0.0
mean_momentum_y = 0.0
mean_momentum_z = 6.640708217582056e-15
temperature = 0.0
charge = -1.7987547221058602e-06
mass = 3.672457557632798e-24
[OutputFilename]
output_filename_prefix = contour_jup_
output_filename_suffix = .h5
[BoundaryConditions]
boundary_phi_right = 0.0
boundary_phi_left = 0.0
boundary_phi_bottom = 0.0
boundary_phi_top = 0.0
boundary_phi_near = 0.0
boundary_phi_far = 0.0
[ParticleInteractionModel]
particle_interaction_model = PIC
```python
axially_symmetric_beam_conf.make().start_pic_simulation()
```
```python
import h5py
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate
filename = 'contour_jup_0000100.h5'
h5file = h5py.File( filename, mode = "r" )
def get_source_current( h5file ):
time_step = h5file["/TimeGrid"].attrs["time_step_size"]
charge = h5file["/ParticleSources/cathode_emitter"].attrs["charge"]
particles_per_step = h5file[
"/ParticleSources/cathode_emitter"].attrs["particles_to_generate_each_step"]
current = particles_per_step * charge / time_step
return current
def get_source_geometry( h5file ):
axis_start_x = \
h5file["/ParticleSources/cathode_emitter"].attrs["cylinder_axis_start_x"]
axis_start_z = \
h5file["/ParticleSources/cathode_emitter"].attrs["cylinder_axis_start_z"]
radius = h5file["/ParticleSources/cathode_emitter"].attrs["cylinder_radius"]
return ( axis_start_x, axis_start_z, radius )
def get_source_particle_parameters( h5file ):
mass = h5file["/ParticleSources/cathode_emitter"].attrs["mass"]
charge = h5file["/ParticleSources/cathode_emitter"].attrs["charge"]
momentum_z = h5file["/ParticleSources/cathode_emitter"].attrs["mean_momentum_z"]
return ( mass, charge, momentum_z )
def beam_radius( u, r_0 ):
return r_0 * np.exp( u ** 2 )
def beam_z( u, m, v, q, I, r_0 ):
coeff = np.sqrt( m * v**3 / q / I ) * r_0
subint = lambda t: np.exp( t * t )
low_lim = 0
up_lim = u
integral_value = scipy.integrate.quad( subint, low_lim, up_lim )[0]
return coeff * integral_value
beam_axis_x_pos, emitter_z_pos, r_0 = get_source_geometry( h5file )
I = get_source_current( h5file )
m, q, p = get_source_particle_parameters( h5file )
v = p / m
u_min = 0; u_max = 2; num_u_points = 100 # for u = 1, r = r(0) * 2.71812
u = np.linspace( u_min, u_max, num_u_points )
r_an = [ beam_radius( x, r_0 ) for x in u ]
r_an_upper = r_an + beam_axis_x_pos
r_an_lower = beam_axis_x_pos - r_an
z_an = [ beam_z( x, m = m, v = v, q = q, I = I, r_0 = r_0 ) for x in u ]
z_an = z_an + emitter_z_pos
r_num = h5file["/ParticleSources/cathode_emitter/position_x"]
z_num = h5file["/ParticleSources/cathode_emitter/position_z"]
z_volume_size = h5file["/SpatialMesh"].attrs["z_volume_size"]
x_volume_size = h5file["/SpatialMesh"].attrs["x_volume_size"]
plt.xlabel( "Z [cm]" )
plt.ylabel( "X [cm]" )
plt.ylim( 0, x_volume_size )
plt.xlim( 0, z_volume_size )
plt.plot( z_num, r_num, '.', label = "num" )
plt.plot( z_an, r_an_upper, label = "theory", color = "g" )
plt.plot( z_an, r_an_lower, color = "g" )
plt.legend()
h5file.close()
plt.savefig( "beam_contour_jup.png" )
plt.show()
```
```python
```
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj25eqsynthconj3 : forall (lv0 : natural) (lv1 : natural), (@eq natural (plus lv0 lv1) (plus lv1 (plus lv0 Zero))).
Admitted.
QuickChick conj25eqsynthconj3.
|
[STATEMENT]
lemma zeroth_ac:
assumes "x \<in> carrier Q\<^sub>p"
shows "ac 0 x = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ac 0 x = 0
[PROOF STEP]
apply(cases "x = \<zero> ")
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. x = \<zero> \<Longrightarrow> ac 0 x = 0
2. x \<noteq> \<zero> \<Longrightarrow> ac 0 x = 0
[PROOF STEP]
unfolding ac_def
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. x = \<zero> \<Longrightarrow> (if x = \<zero> then 0 else angular_component x 0) = 0
2. x \<noteq> \<zero> \<Longrightarrow> (if x = \<zero> then 0 else angular_component x 0) = 0
[PROOF STEP]
apply presburger
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<noteq> \<zero> \<Longrightarrow> (if x = \<zero> then 0 else angular_component x 0) = 0
[PROOF STEP]
using assms angular_component_closed[of x] Zp_car_zero_res
[PROOF STATE]
proof (prove)
using this:
x \<in> carrier Q\<^sub>p
x \<in> nonzero Q\<^sub>p \<Longrightarrow> angular_component x \<in> carrier Z\<^sub>p
?x \<in> carrier Z\<^sub>p \<Longrightarrow> ?x 0 = 0
goal (1 subgoal):
1. x \<noteq> \<zero> \<Longrightarrow> (if x = \<zero> then 0 else angular_component x 0) = 0
[PROOF STEP]
unfolding nonzero_def mem_Collect_eq
[PROOF STATE]
proof (prove)
using this:
x \<in> carrier Q\<^sub>p
x \<in> carrier Q\<^sub>p \<and> x \<noteq> \<zero> \<Longrightarrow> angular_component x \<in> carrier Z\<^sub>p
?x \<in> carrier Z\<^sub>p \<Longrightarrow> ?x 0 = 0
goal (1 subgoal):
1. x \<noteq> \<zero> \<Longrightarrow> (if x = \<zero> then 0 else angular_component x 0) = 0
[PROOF STEP]
by presburger |
(* Title: HOL/Auth/n_flash_nodata_cub_lemma_on_inv__104.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_flash_nodata_cub Protocol Case Study*}
theory n_flash_nodata_cub_lemma_on_inv__104 imports n_flash_nodata_cub_base
begin
section{*All lemmas on causal relation between inv__104 and some rule r*}
lemma n_PI_Remote_GetVsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_Get src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_Get src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_PI_Remote_GetXVsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_GetX src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_GetX src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_NakVsinv__104:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Nak dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Nak dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__0Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__1Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__2Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Get__part__0Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Get__part__1Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Para (Field (Field (Ident ''Sta'') ''Dir'') ''ShrSet'') p__Inv4)) (Const true)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Dirty'')) (Const true))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Put_HeadVsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_PutVsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Put_DirtyVsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_NakVsinv__104:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_PutVsinv__104:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__0Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__1Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__2Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_GetX__part__0Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_GetX__part__1Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_1Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_2Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_3Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_4Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_5Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_6Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7__part__0Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7__part__1Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__0Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__1Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_HomeVsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_Home_NODE_GetVsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8Vsinv__104:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_NODE_GetVsinv__104:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_9__part__0Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_9__part__1Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_10_HomeVsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_10Vsinv__104:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_11Vsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_NakVsinv__104:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_PutXVsinv__104:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_PutVsinv__104:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Put dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_Put dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_PutXVsinv__104:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_PutX dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_PutX dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_ReplaceVsinv__104:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Replace src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Replace src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "((formEval (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''ShrVld'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''ShrVld'')) (Const true))) s))" by auto
moreover {
assume c1: "((formEval (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''ShrVld'')) (Const true)) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (neg (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''ShrVld'')) (Const true))) s))"
have "?P2 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately have "invHoldForRule s f r (invariants N)" by satx
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_PI_Local_GetX_PutX_HeadVld__part__0Vsinv__104:
assumes a1: "(r=n_PI_Local_GetX_PutX_HeadVld__part__0 N )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_PI_Local_GetX_PutX_HeadVld__part__1Vsinv__104:
assumes a1: "(r=n_PI_Local_GetX_PutX_HeadVld__part__1 N )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_NI_ShWbVsinv__104:
assumes a1: "(r=n_NI_ShWb N )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__104 p__Inv4" apply fastforce done
have "((formEval (andForm (eqn (Const (index p__Inv4)) (IVar (Field (Field (Ident ''Sta'') ''ShWbMsg'') ''Proc''))) (eqn (IVar (Field (Field (Ident ''Sta'') ''ShWbMsg'') ''HomeProc'')) (Const false))) s))\<or>((formEval (eqn (IVar (Para (Field (Field (Ident ''Sta'') ''Dir'') ''ShrSet'') p__Inv4)) (Const true)) s))\<or>((formEval (andForm (neg (eqn (Const (index p__Inv4)) (IVar (Field (Field (Ident ''Sta'') ''ShWbMsg'') ''Proc'')))) (neg (eqn (IVar (Para (Field (Field (Ident ''Sta'') ''Dir'') ''ShrSet'') p__Inv4)) (Const true)))) s))\<or>((formEval (andForm (neg (eqn (IVar (Field (Field (Ident ''Sta'') ''ShWbMsg'') ''HomeProc'')) (Const false))) (neg (eqn (IVar (Para (Field (Field (Ident ''Sta'') ''Dir'') ''ShrSet'') p__Inv4)) (Const true)))) s))" by auto
moreover {
assume c1: "((formEval (andForm (eqn (Const (index p__Inv4)) (IVar (Field (Field (Ident ''Sta'') ''ShWbMsg'') ''Proc''))) (eqn (IVar (Field (Field (Ident ''Sta'') ''ShWbMsg'') ''HomeProc'')) (Const false))) s))"
have "?P3 s"
apply (cut_tac a1 a2 c1, simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_Get)) (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''HomeProc'')) (Const false))) (eqn (IVar (Field (Field (Ident ''Sta'') ''ShWbMsg'') ''Cmd'')) (Const SHWB_ShWb))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (eqn (IVar (Para (Field (Field (Ident ''Sta'') ''Dir'') ''ShrSet'') p__Inv4)) (Const true)) s))"
have "?P3 s"
apply (cut_tac a1 a2 c1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Field (Ident ''Sta'') ''ShWbMsg'') ''Cmd'')) (Const SHWB_ShWb)) (eqn (IVar (Para (Field (Field (Ident ''Sta'') ''Dir'') ''ShrSet'') p__Inv4)) (Const true))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (neg (eqn (Const (index p__Inv4)) (IVar (Field (Field (Ident ''Sta'') ''ShWbMsg'') ''Proc'')))) (neg (eqn (IVar (Para (Field (Field (Ident ''Sta'') ''Dir'') ''ShrSet'') p__Inv4)) (Const true)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (andForm (neg (eqn (IVar (Field (Field (Ident ''Sta'') ''ShWbMsg'') ''HomeProc'')) (Const false))) (neg (eqn (IVar (Para (Field (Field (Ident ''Sta'') ''Dir'') ''ShrSet'') p__Inv4)) (Const true)))) s))"
have "?P1 s"
proof(cut_tac a1 a2 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_PutX_HomeVsinv__104:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_PutX_Home dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX__part__0Vsinv__104:
assumes a1: "r=n_PI_Local_GetX_PutX__part__0 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_WbVsinv__104:
assumes a1: "r=n_NI_Wb " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_3Vsinv__104:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_3 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_1Vsinv__104:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_1 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_GetX__part__1Vsinv__104:
assumes a1: "r=n_PI_Local_GetX_GetX__part__1 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_GetX__part__0Vsinv__104:
assumes a1: "r=n_PI_Local_GetX_GetX__part__0 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Remote_ReplaceVsinv__104:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_PI_Remote_Replace src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_ReplaceVsinv__104:
assumes a1: "r=n_PI_Local_Replace " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_existsVsinv__104:
assumes a1: "\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_InvAck_exists src pp" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Remote_PutXVsinv__104:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_PI_Remote_PutX dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Remote_Get_Put_HomeVsinv__104:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Put_Home dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvVsinv__104:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Inv dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_PutXVsinv__104:
assumes a1: "r=n_PI_Local_PutX " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_Get_PutVsinv__104:
assumes a1: "r=n_PI_Local_Get_Put " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Remote_GetX_Nak_HomeVsinv__104:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_Nak_Home dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Local_PutXAcksDoneVsinv__104:
assumes a1: "r=n_NI_Local_PutXAcksDone " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX__part__1Vsinv__104:
assumes a1: "r=n_PI_Local_GetX_PutX__part__1 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Remote_Get_Nak_HomeVsinv__104:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Nak_Home dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_exists_HomeVsinv__104:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_exists_Home src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Replace_HomeVsinv__104:
assumes a1: "r=n_NI_Replace_Home " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Local_PutVsinv__104:
assumes a1: "r=n_NI_Local_Put " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Nak_ClearVsinv__104:
assumes a1: "r=n_NI_Nak_Clear " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_Get_GetVsinv__104:
assumes a1: "r=n_PI_Local_Get_Get " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Nak_HomeVsinv__104:
assumes a1: "r=n_NI_Nak_Home " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_2Vsinv__104:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_2 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_FAckVsinv__104:
assumes a1: "r=n_NI_FAck " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__104 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
Require Export decidability.
Require back.
(** Duality **)
Module swap (M': ProjectivePlane') <: ProjectivePlane'.
Definition Point := M'.Line.
Definition Line := M'.Point.
Definition Incid := fun (x:Point) (y:Line) => M'.Incid y x.
Definition incid_dec : forall (A : Point) (l : Line), {Incid A l} + {~ Incid A l}.
Proof.
unfold Incid.
intros.
apply M'.incid_dec.
Qed.
Definition a1_exist := M'.a2_exist.
Definition a2_exist := M'.a1_exist.
Lemma uniqueness : forall A B :Point, forall l m : Line,
Incid A l -> Incid B l -> Incid A m -> Incid B m -> A=B\/l=m.
intros.
unfold Point, Line, Incid in *.
assert ((l=m)\/(A=B)).
apply M'.uniqueness; assumption.
intuition.
Qed.
Export back.
Module M := back.back M'.
Module ProjectivePlaneFacts_m := decidability.decidability M.
Module uniq := uniqueness_axioms M'.
Ltac Apply_unicity := match goal with
H1: ?A <> ?B, H2: ?Incid ?A ?l, H3: ?Incid ?B ?l, H4 : ?Incid ?A ?m, H5: ?Incid ?B ?m |- _ =>
let id:= fresh in assert (id: l=m); try apply (uniq.a1_unique A B l m H1 H2 H3 H4 H5);subst l
| H1: ?l <> ?m, H2: ?Incid ?A ?l, H3: ?Incid ?A ?m, H4 : ?Incid ?B ?l, H5: ?Incid ?B ?m |- _ =>
let id:= fresh in assert (id: A=B); try apply (uniq.a2_unique l m A B H1 H2 H3 H4 H5);subst A
end.
Ltac Collapse := repeat (Apply_unicity; CleanDuplicatedHyps).
Ltac line_through l A B Ha Hb := elim (M'.a1_exist A B); intros l [Ha Hb].
Lemma a3_1_aux :
forall (P: Point) (l:Line),
~ Incid P l -> {A:Point & {B:Point & {C:Point |
(dist3 A B C/\Incid A l /\Incid B l /\ Incid C l)}}}.
Proof.
intro l1.
intro P.
intro Hincid.
elim (M'.a3_1 l1).
intros A HA; elim HA.
intros B HB; elim HB.
intros C (Hz1,(Hz2,(Hz3,Hz4))); clear HA HB.
line_through m1 P A Hm1A Hm1B.
line_through m2 P B Hm2A Hm2B.
line_through m3 P C Hm3A Hm3B.
exists m1.
exists m2.
exists m3.
split.
2:intuition.
unfold dist3.
split.
intros Hm1m2.
subst m1.
assert (A=B).
assert (m2<>l1).
intro Hm2l1.
subst m2.
intuition.
apply (uniq.a2_unique m2 l1 A B H) ; intuition.
subst A.
unfold dist3 in Hz1; intuition.
split.
intros Hm1m2.
subst m1.
assert (A=C).
assert (m3<>l1).
intro Hm3l1.
subst.
intuition.
apply (uniq.a2_unique m3 l1 A C H) ; intuition.
subst.
unfold dist3 in Hz1; intuition.
intros Hm1m2.
subst.
assert (B=C).
assert (m3<>l1).
intro Hm3l1.
subst.
intuition.
apply (uniq.a2_unique m3 l1 B C H) ; intuition.
subst.
unfold dist3 in Hz1; intuition.
Qed.
Definition a3_1:
forall l:Line,{A:Point & {B:Point & {C:Point |
(dist3 A B C/\Incid A l /\Incid B l /\ Incid C l)}}}.
(** Using a3_2, we build two lines l1 and l2.
The we perform case distinction on Incid P l1 and Indic P l2
If P does not belong to l1 or l2 we use the previous lemma.
Othewise P belongs to l1 and l2, we use the outsider lemma.
*)
(*
unfold Line, Point, Incid.
*)
elim M'.a3_2 ; intros l1 Hl1.
elim Hl1; intros l2 Hl2; clear Hl1.
intros P.
elim (M'.incid_dec P l1);intros HMl1.
elim (M'.incid_dec P l2);intros HMl2.
(** Case P is at the intersections of l1 and l2 **)
exists l1.
exists l2.
elim (ProjectivePlaneFacts_m.outsider l1 l2).
intros K (HK1, HK2).
line_through m P K Hm1 Hm2.
exists m.
unfold dist3.
unfold ProjectivePlaneFacts_m.Incid in *.
unfold M.Incid in *.
intuition;subst;intuition.
(** Case P is not on l2 *)
apply (a3_1_aux) with (P:=l2);assumption.
(** Case P is not on l1 *)
apply (a3_1_aux) with (P:=l1);assumption.
Qed.
Definition a3_2 : {l1:Line & {l2:Line | l1 <> l2}}.
generalize M'.a3_2 M'.a3_1.
intros H1 H2.
elim H1.
intro l.
intro Hl2.
elim (H2 l).
intros P H';elim H'.
intro Q.
intros.
elim p.
intros x (H1', (H2', (H3', H4'))).
exists P.
exists Q.
unfold dist3 in H1'; tauto.
Qed.
End swap.
Module example (M': ProjectivePlane').
Module Swaped := swap M'.
Export M'.
Lemma dual_example :
forall P : Point,
{l1 : Line &
{l2 : Line &
{l3 : Line |
dist3 l1 l2 l3 /\
Incid P l1 /\ Incid P l2 /\ Incid P l3}}}.
Proof.
apply Swaped.a3_1.
Qed.
End example.
|
\section{Project Development}
\label{sec:projDev}
%---------------------------------------------------------------------
% ANTICIPATED FEATURES
%---------------------------------------------------------------------
\subsection{Anticipated Feature: Android App}
An Android app to view the livestream would greatly increase the accessibility of the Home Surveillance System. This feature was tentatively announced during the Proposal Phase, but time constraints limited the resources available for Android development. As a result, the Android app is currently on hold until the next major iteration of the system.
\begin{figure}[H]
\centering
\makebox[\textwidth][c]{\includegraphics[width=0.75\textwidth]{pics/android_app.jpg}}
\caption{Concept UI for the Android app.}
\label{fig:droidconcept}
\end{figure}
\subsection{Anticipated Feature: Servos}
User-controlled pan-tilt movements is a planned feature for the next major iteration of the Home Surveillance System. The tentative plan is to allow users to control the system through the website livestream as well as through the planned Android app. |
\documentclass[paper=a4, fontsize=11pt]{scrartcl} % A4 paper and 11pt font size
\usepackage[T1]{fontenc} % Use 8-bit encoding that has 256 glyphs
\usepackage{fourier} % Use the Adobe Utopia font for the document - comment this line to return to the LaTeX default
\usepackage[english]{babel} % English language/hyphenation
\usepackage{amsmath,amsfonts,amsthm,amssymb} % Math packages
\usepackage{algorithm, algorithmic}
\renewcommand{\algorithmicrequire}{\textbf{Input:}} %Use Input in the format of Algorithm
\renewcommand{\algorithmicensure}{\textbf{Output:}} %UseOutput in the format of Algorithm
\usepackage{graphicx}
\usepackage{blindtext}
\usepackage{enumerate}
\usepackage{ulem}
\usepackage{pdfpages}
\usepackage{multirow}
\usepackage{listings}
\lstset{language=Matlab}
\usepackage{lipsum} % Used for inserting dummy 'Lorem ipsum' text into the template
\usepackage{sectsty} % Allows customizing section commands
\allsectionsfont{\centering \normalfont\scshape} % Make all sections centered, the default font and small caps
\usepackage{fancyhdr} % Custom headers and footers
\pagestyle{fancyplain} % Makes all pages in the document conform to the custom headers and footers
\fancyhead{} % No page header - if you want one, create it in the same way as the footers below
\fancyfoot[L]{} % Empty left footer
\fancyfoot[C]{} % Empty center footer
\fancyfoot[R]{\thepage} % Page numbering for right footer
\renewcommand{\headrulewidth}{0pt} % Remove header underlines
\renewcommand{\footrulewidth}{0pt} % Remove footer underlines
\setlength{\headheight}{13.6pt} % Customize the height of the header
\numberwithin{equation}{section} % Number equations within sections (i.e. 1.1, 1.2, 2.1, 2.2 instead of 1, 2, 3, 4)
\numberwithin{figure}{section} % Number figures within sections (i.e. 1.1, 1.2, 2.1, 2.2 instead of 1, 2, 3, 4)
\numberwithin{table}{section} % Number tables within sections (i.e. 1.1, 1.2, 2.1, 2.2 instead of 1, 2, 3, 4)
\setlength\parindent{0pt} % Removes all indentation from paragraphs - comment this line for an assignment with lots of text
\newcommand{\horrule}[1]{\rule{\linewidth}{#1}} % Create horizontal rule command with 1 argument of height
\newcommand*{\dif}{\mathop{}\!\mathrm{d}}
\title{
\normalfont \normalsize
\textsc{Shanghai Jiao Tong University, UM-SJTU JOINT INSTITUTE} \\ [25pt] % Your university, school and/or department name(s)
\horrule{0.5pt} \\[0.4cm] % Thin top horizontal rule
\huge Technical Communication\\ HW6 \\ % The assignment title
\horrule{2pt} \\[0.5cm] % Thick bottom horizontal rule
}
\author{Yu Cang \quad 018370210001} % Your name
\date{\normalsize \today} % Today's date or a custom date
\begin{document}
\maketitle % Print the title
\section{Slides in \LaTeX}
Please refer to ``../ex1/slide.pdf'' and ``../ex1/slide.tex''.
\section{Writting}
\subsection*{First version}
Turbulence combustion consists of complex flow and combustion phenomenon. Based on the model of flamelet, progress variable-flamelet model was developed for simulation of turbulence combustion. The progress variable was introduced to describe the complicated combustion phenomenon, such as local extinction and re-ignition. In my project, In order to exam the LES based on progress variable-flamelet model, I have been trying to apply this model to the simulation of diffusion jet flame, especially investigating the accuracy and availability of this model under the conditions of different Reynolds number, inflow profiles, and ignition approaches.
\subsection*{Modified version}
The research focuses on developing the progress variable flamelet model for simulating the turbulence combustion. There are some common phenomenon in turbulence combustion, e.g. local extinction and re-ignition, bringing great trouble to the simulation. And progress variable is very suitable to describe them. Large-eddy simulation(LES) is one of the most popular simulation methods in combustion field, and it is combined with variable-flamelet model for the simulation of diffusion jet flame in his project. The details include investigating the performance of this method under different combustion conditions.
\subsection*{Final version}
Focusing on developing the progress variable flamelet model for the simulation of turbulent combustion, the research is combining Large-eddy simulation(LES) with variable flamelet model, for diffusion jet flame simulation. Some complicated phenomenon in turbulence combustion, e.g. local extinction and re-ignition, bring great trouble to the simulation. And progress variable is developed to describe them. On the other hand, Large-eddy simulation is one of
the most popular simulation methods in combustion field. To exam the availability of this combined model, a lot of numerical experiments under different conditions are proposed.
\section{Group Exercise}
Please refer to Xuqing Zhou's submission, whose student ID is 118370910023.
\section{Leonardo da Vinci}
This letter is excellent in terms of both contents and expressing skills. Generally, it is full of passion and confidence, which helps to make a positive atmosphere. \newline
Introductions are split into 10 parts, with each focusing on a specific topic. This makes the general structure looks much clear. \newline
In each part, active verbs and simple tense are used in the beginning to make it reads more powerful and reliable. Then, effects are vividly described. This is exactly the selling strategy "Sell the hole" does.\newline
Besides, rhetoric words are used properly, which make it looks polite and suitable. All aspects like target readers, contends, time and conditions are well considered. I think it's a good job.
\section{IDEA survey}
\begin{figure}[h]
\centering
\includegraphics[width=\linewidth]{IDEA.jpg}
\end{figure}
\end{document}
|
Mission:
To provide quality, collective theater that celebrates storytelling to the greater Sacramento area.
Description:
Common House Productions was formed out the fervent desire to provide an outlet for collective and truly democratic theater in the greater Sacramento area. Common House produces works from both established playwrights and new artists that focus on the power of storytelling. Our aim as theater practitioners is to transform an ordinary or unconventional performance space into a common house: a sacred space that fosters art and transformation. We recognize that the transformative acts of theater impact the individual, as well as the community that takes on the responsibility of telling a story, and we hope to be the catalyst for such transformations in the communities in which we perform. Together we can create a common space that provokes extraordinary theater.
Stay updated for the website launch, first season announcement, and opportunities for how you can get involved!
UPCOMING PRODUCTIONS:
Come join us for The Woman in Black, a spinechilling ghost story perfect for the Halloween season! Bring a folding chair and a blanket to wrap up warm and tight. Try to remain calm as the Common House players thrill you with a campfire rendition of a tale about the vengeful ghost of a scorned woman and the town haunted by her past.
October 31November 4 & November 811
May be unsuitable for children under 10
|
function [expr morder] = sim_ex_Schelter_2009_3_1(varargin)
% Simulation: Schelter 2009 Eq 3.1
%
% Description:
%
% 5-variate VAR[3] system of coupled oscillators.
% This system was first described in [1].
%
% The directed graph for this model is:
% x1 -> x3
% x1 -> x4
% x2 -> x1
% x2 -> x3
% x4 -> x5
% x5 -> x4
%
% The dependency structure of this model can
% be viewed by executing the following command:
%
% >>hlp_viewGraphicsResource('sim/Schelter_2009_3_1.jpg');
%
% Author Credits:
%
% Tim Mullen, 2011
%
% References and Code:
%
% [1] (Ex 3.1, Eq. 11-15) Schelter B, Timmer J, Eichler M (2009) Assessing the strength of directed influences among neural signals using renormalized partial directed coherence. Journal of neuroscience methods 179:121-30
%
% ------------------------------------------------------------------------
% specify the default system of equations
expr_def = { ...
'x1(t) = 0.9*x1(t-1) + 0.3*x2(t-2) + e1(t)' ...
'x2(t) = 1.3*x2(t-1) + -0.8*x2(t-2) + e2(t)' ...
'x3(t) = 0.3*x1(t-2) + 0.6*x2(t-1) + e3(t)' ...
'x4(t) = -0.7*x4(t-3) + -0.7*x1(t-3) + 0.3*x5(t-3) + e4(t)' ...
'x5(t) = 1*x5(t-1) + -0.4*x5(t-2) + 0.3*x4(t-2) + e5(t)' ...
};
% set up argument definitions
arg_define(varargin, ...
arg({'expr','DynamicalEquations'},expr_def,[],'System of equations'), ...
arg({'morder','ModelOrder'},3,[1 Inf],'Model order. This is mandatory'));
if isempty(morder)
error('SIFT:sim_examples:badParam','ModelOrder must be specified');
end |
function x=v_rsfft(y,n)
%V_RSFFT fft of a real symmetric spectrum X=(Y,N)
% Y is the "first half" of a symmetric real input signal and X is the
% "first half" of the symmetric real fourier transform.
% If the length, N, of the full signal is even, then the "first half"
% contains 1+N/2 elements (the first and last are excluded from the reflection).
% If N is odd, the "first half" conatins 0.5+N/2 elements and only the first
% is excluded from the reflection.
% If N is specified explicitly, then Y will be truncated of zero-padded accordingly.
% If N is omitted it will be taken to be 2*(length(Y)-1) and is always even.
%
% If Y is a matrix, the transform is performed along each column
%
% The inverse function is y=v_rsfft(x,n)/n
% Could be made faster for even n by using symmetry
% Copyright (C) Mike Brookes 1998
% Version: $Id: v_rsfft.m 10865 2018-09-21 17:22:45Z dmb $
%
% VOICEBOX is a MATLAB toolbox for speech processing.
% Home page: http://www.ee.ic.ac.uk/hp/staff/dmb/voicebox/voicebox.html
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This program is free software; you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 2 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You can obtain a copy of the GNU General Public License from
% http://www.gnu.org/copyleft/gpl.html or by writing to
% Free Software Foundation, Inc.,675 Mass Ave, Cambridge, MA 02139, USA.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if ~isreal(y) error('RSFFT: Input must be real'); end
fl=size(y,1)==1;
if fl y=y(:); end
[m,k]=size(y);
if nargin<2 n=2*m-2;
else
mm=1+fix(n/2);
if mm>m y=[y; zeros(mm-m,k)];
elseif mm<m y(mm+1:m,:)=[];
end
m=mm;
end
x=real(fft([y;y(n-m+1:-1:2,:)]));
x(m+1:end,:)=[];
if fl x=x.'; end
|
lemma nonneg_incseq_Bseq_subseq_iff: fixes f :: "nat \<Rightarrow> real" and g :: "nat \<Rightarrow> nat" assumes "\<And>x. f x \<ge> 0" "incseq f" "strict_mono g" shows "Bseq (\<lambda>x. f (g x)) \<longleftrightarrow> Bseq f" |
module Control.Monad.Logic
import public Control.Monad.Identity
import public Control.Monad.Logic.Interface
import public Control.Monad.Logic.Logic
|
Require Import List.
Require Import Omega.
Require Import LibUtils.
Require Import ListAdd.
Section Vector.
Definition Vector (T:Type) (n : nat) := {n':nat | n' < n}%nat -> T.
Definition Matrix (T:Type) (n m : nat) :=
{n':nat | n' < n}%nat -> {m':nat | m' < m}%nat -> T.
Definition ConstVector {T} (n:nat) (c:T) : (Vector T n) :=
fun (n': {n':nat | n' < n}%nat) => c.
Definition ConstMatrix {T} (n m : nat) (c:T) : (Matrix T n m) :=
fun (n': {n':nat | n' < n}%nat) (m':{m':nat | m' < m}%nat) => c.
(* Definition vector_fold_right1_bounded_dep {A:nat->Type} {B}
(f:forall n,B->A n->A (S n)) (init:A 0%nat) (singleton:B->A 1%nat) {m:nat}
(v:Vector B m) (n:nat) (pf:(n<=m)%nat) {struct n}
: A n.
Proof.
destruct n.
- exact init.
- specialize (vector_fold_right1_bounded_dep A B f init singleton m v n (le_Sn_le _ _ pf)).
destruct n; intros.
+ exact (singleton (v (exist _ 0 pf)%nat)).
+ apply f.
* exact (v (exist _ n (le_Sn_le _ _ pf))).
* exact vector_fold_right1_bounded_dep.
Defined.c
*)
Fixpoint vector_fold_right1_bounded_dep {A:nat->Type} {B}
(f:forall n,B->A n->A (S n)) (init:A 0%nat) (singleton:B->A 1%nat) {m:nat}
(v:Vector B m) (bound:nat) {struct bound}
: (bound <= m)%nat -> A bound :=
match bound as bound' return (bound' <= m -> A bound') with
| 0 => fun _ =>
init
| S bound1 =>
fun pf0 : S bound1 <= m =>
let an := vector_fold_right1_bounded_dep f init singleton v bound1 (le_Sn_le bound1 m pf0) in
match bound1 as bound1' return (A bound1' -> S bound1' <= m -> A (S bound1')) with
| 0 => fun (_ : A 0) (pf1 : 1 <= m) =>
singleton (v (exist _ 0 pf1))
| S bound2 => fun (an' : A (S bound2)) (pf1 : S (S bound2) <= m) =>
f (S bound2) (v (exist _ bound1 pf0)) an'
end an pf0
end.
Definition vector_fold_right_bounded_dep {A:nat->Type} {B}
(f:forall n,B->A n->A (S n)) (init:A 0%nat) {m:nat} (v:Vector B m) (n:nat)
(pf:(n<=m)%nat)
: A n.
Proof.
induction n.
- exact init.
- apply f.
+ exact (v (exist _ n pf)).
+ apply IHn.
exact (le_Sn_le _ _ pf).
Defined.
Definition vnil {T} : Vector T 0.
Proof.
intros [i pf].
omega.
Defined.
Definition vcons {T} {n} (x:T) (v:Vector T n) : (Vector T (S n)).
Proof.
intros [i pf].
destruct (Nat.eq_dec i n).
+ exact x.
+ apply v.
exists i.
apply NPeano.Nat.le_neq.
split; trivial.
now apply le_S_n in pf.
Defined.
Definition vhd {T} {n} (v:Vector T (S n)) : T := v (exist _ (0%nat) (Nat.lt_0_succ n)).
Definition vlast {T} {n} (v:Vector T (S n)) : T := v (exist _ (n%nat) (Nat.lt_succ_diag_r n)).
Definition vdrop_last {T} {n} (v:Vector T (S n)) : Vector T n.
Proof.
intros [i pf]; apply v.
exists i.
apply NPeano.Nat.lt_lt_succ_r; trivial.
Defined.
Lemma vector_fold_right1_bounded_dep_as_vector_fold_right_bounded_dep {A:nat->Type} {B}
(f:forall n,B->A n->A (S n)) (init:A 0%nat) (singleton:B->A 1%nat) {m:nat}
(v:Vector B m) (bound:nat) pf
:
(forall x, singleton x = f _ x init) ->
vector_fold_right1_bounded_dep f init singleton v bound pf =
vector_fold_right_bounded_dep f init v bound pf.
Proof.
intros feq.
unfold vector_fold_right_bounded_dep.
induction bound; simpl; trivial.
rewrite IHbound.
destruct bound; simpl; auto.
Qed.
Lemma vector_fold_right_bounded_dep_as_vector_fold_right1_bounded_dep {A:nat->Type} {B}
(f:forall n,B->A n->A (S n)) (init:A 0%nat) {m:nat}
(v:Vector B m) (bound:nat) pf
:
vector_fold_right_bounded_dep f init v bound pf =
vector_fold_right1_bounded_dep f init (fun x => f 0 x init) v bound pf.
Proof.
unfold vector_fold_right_bounded_dep.
induction bound; simpl; trivial.
rewrite IHbound.
destruct bound; simpl; auto.
Qed.
Definition vector_fold_right1_dep {A:nat->Type} {B} (f:forall n, B->A n->A (S n))
(init:A 0%nat) (singleton:B->A 1%nat) {m:nat} (v:Vector B m) : A m
:= vector_fold_right1_bounded_dep f init singleton v m (le_refl _).
Definition vector_fold_right_dep {A:nat->Type} {B} (f:forall n, B->A n->A (S n))
(init:A 0%nat) {m:nat} (v:Vector B m) : A m
:= vector_fold_right_bounded_dep f init v m (le_refl _).
Definition vector_fold_right1 {A B:Type} (f:B->A->A) (init:A) (singleton:B->A) {m:nat} (v:Vector B m)
:= vector_fold_right1_dep (A:=fun _ => A) (fun _ => f) init singleton v.
Definition vector_fold_right {A B:Type} (f:B->A->A) (init:A) {m:nat} (v:Vector B m)
:= vector_fold_right_dep (fun _ => f) init v.
Lemma vector_fold_right1_dep_as_vector_fold_right_dep {A:nat->Type} {B}
(f:forall n,B->A n->A (S n)) (init:A 0%nat) (singleton:B->A 1%nat) {m:nat}
(v:Vector B m)
:
(forall x, singleton x = f _ x init) ->
vector_fold_right1_dep f init singleton v =
vector_fold_right_dep f init v.
Proof.
apply vector_fold_right1_bounded_dep_as_vector_fold_right_bounded_dep.
Qed.
Lemma vector_fold_right_dep_as_vector_fold_right1_dep {A:nat->Type} {B}
(f:forall n,B->A n->A (S n)) (init:A 0%nat) {m:nat}
(v:Vector B m)
:
vector_fold_right_dep f init v =
vector_fold_right1_dep f init (fun x => f 0 x init) v.
Proof.
apply vector_fold_right_bounded_dep_as_vector_fold_right1_bounded_dep.
Qed.
Lemma vector_fold_right1_as_vector_fold_right {A:Type} {B}
(f:B->A->A) (init:A) (singleton:B->A) {m:nat}
(v:Vector B m)
:
(forall x, singleton x = f x init) ->
vector_fold_right1 f init singleton v =
vector_fold_right f init v.
Proof.
apply (vector_fold_right1_dep_as_vector_fold_right_dep (fun _ => f)).
Qed.
Lemma vector_fold_right_as_vector_fold_right1 {A:Type} {B}
(f:B->A->A) (init:A) {m:nat}
(v:Vector B m)
:
vector_fold_right f init v =
vector_fold_right1 f init (fun x => f x init) v.
Proof.
apply (vector_fold_right_dep_as_vector_fold_right1_dep (fun _ => f)).
Qed.
Definition vectoro_to_ovector {T} {n} (v:Vector (option T) n) : option (Vector T n)
:= vector_fold_right_dep (fun n => lift2 (@vcons _ n)) (Some vnil) v.
Definition matrixo_to_omatrix {T} {m n} (v:Matrix (option T) m n) : option (Matrix T m n)
:= vectoro_to_ovector (fun i => vectoro_to_ovector (v i)).
Definition vmap {A B} {n} (f:A->B) (v:Vector A n) : Vector B n
:= vector_fold_right_dep (fun n x y => vcons (n:=n) (f x) y) vnil v.
Definition mmap {A B} {m n} (f:A->B) (mat:Matrix A m n) : Matrix B m n
:= vmap (fun mrow => vmap f mrow) mat.
Definition list_fold_right1_bounded_dep {A:nat->Type} {B} (f:forall n,B->A n->A (S n))
(init:A 0%nat) (singleton:B->A 1%nat) (l:list B) (n:nat) (pf:(n<=length l)%nat)
: A n.
Proof.
revert l pf.
induction n; intros l pf.
- exact init.
- destruct n.
+ assert (pf2:(0 < length l)%nat) by omega.
destruct l.
* simpl in pf; omega.
* exact (singleton b).
+ destruct l; simpl in *; try omega.
apply f.
* apply b.
* apply (IHn l).
omega.
Defined.
Definition list_fold_right1_dep {A:nat->Type} {B} (f:forall n, B->A n->A (S n))
(init:A 0%nat) (singleton:B->A 1%nat) (l:list B) : A (length l)
:= list_fold_right1_bounded_dep f init singleton l (length l) (le_refl _).
Definition list_fold_right_dep {A:nat->Type} {B} (f:forall n, B->A n->A (S n))
(init:A 0%nat) (l:list B) : A (length l)
:= list_fold_right1_dep f init (fun a => f _ a init) l.
Definition list_to_vector {A} (l:list A) : Vector A (length l)
:= list_fold_right_dep (@vcons _) vnil l.
Definition vector_to_list {A} {n} (v:Vector A n) : list A
:= vector_fold_right cons nil v.
Definition matrix_to_list_list {T} {m n} (v:Matrix T m n) : (list (list T))
:= vector_to_list (fun i => vector_to_list (v i)).
Definition matrix_to_list {T} {m n} (v:Matrix T m n) : (list T)
:= concat (matrix_to_list_list v).
Definition vseq start len : Vector nat len
:= eq_rect _ _ (list_to_vector (seq start len)) _ (seq_length _ _).
Definition vector_zip {A B} {m:nat} (v1:Vector A m) (v2:Vector B m) : Vector (A*B) m
:= fun i => (v1 i, v2 i).
Definition matrix_zip {A B} {m n:nat} (mat1:Matrix A m n) (mat2:Matrix B m n) : Matrix (A*B) m n
:= let mat12:Vector (Vector A n*Vector B n) m := vector_zip mat1 mat2 in
vmap (fun '(a,b) => vector_zip a b) mat12.
Definition vector_split {A B} {m:nat} (v:Vector (A*B) m) : Vector A m * Vector B m
:= (fun i => fst (v i), fun i => snd (v i)).
Program Definition vtake {A} {m:nat} (v:Vector (A) m) (n:nat) (pf:(n<=m)%nat) : Vector A n
:= fun i => v i.
Next Obligation.
omega.
Defined.
Program Definition vskip {A} {m:nat} (v:Vector (A) m) (n:nat) (pf:(n<=m)%nat) : Vector A (m-n)
:= fun i => v (i+n).
Next Obligation.
omega.
Defined.
Definition transpose {A} {n m:nat} (mat:Matrix A n m) :=
fun i j => mat j i.
Definition vec_eq {A} {m:nat} (x y:Vector A m) := forall i, x i = y i.
Notation "x =v= y" := (vec_eq x y) (at level 70).
(* If we are willing to assume an axiom *)
Lemma vec_eq_eq {A} {m:nat} (x y:Vector A m) : vec_eq x y -> x = y.
Proof.
intros.
apply FunctionalExtensionality.functional_extensionality.
apply H.
Qed.
Lemma index_pf_irrel n m pf1 pf2 :
exist (fun n' : nat => (n' < n)%nat) m pf1 =
exist (fun n' : nat => (n' < n)%nat) m pf2.
f_equal.
apply digit_pf_irrel.
Qed.
Ltac index_prover := erewrite index_pf_irrel; reflexivity.
Lemma vector_Sn_split {T} {n} (v:Vector T (S n)) :
v =v= vcons (vlast v) (vdrop_last v).
Proof.
intros [i pf].
unfold vcons, vlast, vdrop_last.
destruct (Nat.eq_dec i n)
; subst
; f_equal
; apply index_pf_irrel.
Qed.
Lemma vector_split_zip {A B} {m:nat} (v:Vector (A*B) m) :
let '(va,vb):=vector_split v in vector_zip va vb =v= v.
Proof.
simpl.
intros i.
vm_compute.
now destruct (v i).
Qed.
Lemma split_vector_zip {A B} {m:nat} (va:Vector A m) (vb:Vector B m) :
vector_split (vector_zip va vb) = (va,vb).
Proof.
vm_compute.
f_equal.
Qed.
Definition vlconcat {A n} (v:Vector (list A) n) : list A
:= concat (vector_to_list v).
Definition vlconcat_map {A B n} (f:A->list B) (v:Vector A n) : list B
:= vlconcat (vmap f v).
Definition vin {A n} (x:A) (v:Vector A n) : Prop
:= exists i, v i = x.
(*
Lemma nth_In :
forall (n:nat) (l:list A) (d:A), n < length l -> In (nth n l d) l.
Lemma In_nth l x d : In x l ->
exists n, n < length l /\ nth n l d = x.
*)
Notation "x =v= y" := (vec_eq x y) (at level 70).
Lemma vector_fold_right_dep_bounded_pf_ext {A:nat->Type} {B} (f:forall n,B->A n->A (S n))
(init:A 0%nat) {m:nat} (v:Vector B m) bound pf1 pf2 :
vector_fold_right_bounded_dep f init v bound pf1 = vector_fold_right_bounded_dep f init v bound pf2.
Proof.
revert pf1 pf2.
induction bound; trivial; intros.
simpl.
f_equal.
f_equal.
apply index_pf_irrel.
trivial.
Qed.
Lemma vector_fold_right_dep_bounded_ext {A:nat->Type} {B} (f:forall n,B->A n->A (S n))
(init:A 0%nat) {m:nat} (x y:Vector B m) bound pf :
x =v= y -> vector_fold_right_bounded_dep f init x bound pf = vector_fold_right_bounded_dep f init y bound pf.
Proof.
intros eqq.
induction bound; simpl; congruence.
Qed.
Lemma vector_fold_right_dep_bounded_cut_down {A:nat->Type} {B} (f:forall n,B->A n->A (S n))
(init:A 0%nat) {m:nat} (x:Vector B (S m)) bound pf1 pf2 :
vector_fold_right_bounded_dep f init x bound pf1 = vector_fold_right_bounded_dep f init (vdrop_last x) bound pf2.
Proof.
induction bound; simpl; trivial.
f_equal.
- f_equal.
apply index_pf_irrel.
- apply IHbound.
Qed.
Lemma vector_fold_right_dep_ext {A:nat->Type} {B} (f:forall n,B->A n->A (S n))
(init:A 0%nat) {m:nat} {x y:Vector B m} :
x =v= y -> vector_fold_right_dep f init x = vector_fold_right_dep f init y.
Proof.
apply vector_fold_right_dep_bounded_ext.
Qed.
Lemma vector_fold_right_ext {A:Type} {B} (f:B->A->A) (init:A) {m:nat} {x y:Vector B m} :
x =v= y -> vector_fold_right f init x = vector_fold_right f init y.
Proof.
apply (@vector_fold_right_dep_ext (fun _ => A)).
Qed.
Lemma veq_refl {T} {n} (x:Vector T n) : x =v= x.
Proof.
intros i; reflexivity.
Qed.
Lemma veq_sym {T} {n} {x y:Vector T n} : x =v= y -> y =v= x.
Proof.
intros eqq i; symmetry; trivial.
Qed.
Lemma veq_trans {T} {n} {x y z:Vector T n} : x =v= y -> y =v= z -> x =v= z.
Proof.
intros eqq1 eqq2 i; etransitivity; eauto.
Qed.
Lemma vcons_proper {T} {n} a b (x y:Vector T n) : a = b -> x =v= y -> vcons a x =v= vcons b y.
Proof.
intros; subst.
intros [i pf].
unfold vcons.
destruct (Nat.eq_dec i n); simpl; trivial.
Qed.
Lemma vdrop_last_proper {T} {n} (x y:Vector T (S n)) : x =v= y -> vdrop_last x =v= vdrop_last y.
Proof.
intros eqq [i pf].
apply eqq.
Qed.
Lemma vlast_vcons {T} {n} x (d:Vector T n) : vlast (vcons x d) = x.
Proof.
unfold vlast, vcons.
match_destr; congruence.
Qed.
Lemma vdrop_last_vcons {T} {n} x (d:Vector T n) : vdrop_last (vcons x d) = d.
Proof.
unfold vdrop_last, vcons.
apply vec_eq_eq; intros [i pf].
match_destr; [omega | ].
erewrite index_pf_irrel; eauto.
Qed.
Lemma vector_fold_right_dep_0 {A:nat->Type} {B} (f:forall n,B->A n->A (S n))
(init:A 0%nat) (v:Vector B 0) :
vector_fold_right_dep f init v = init.
Proof.
reflexivity.
Qed.
Lemma vector_fold_right_dep_Sn {A:nat->Type} {B} (f:forall n,B->A n->A (S n))
(init:A 0%nat) {m:nat} (v:Vector B (S m)) :
vector_fold_right_dep f init v = f m (vlast v) (vector_fold_right_dep f init (vdrop_last v)).
Proof.
rewrite (vector_fold_right_dep_ext _ _ (vector_Sn_split v)).
unfold vector_fold_right_dep.
simpl.
destruct (Nat.eq_dec m m) ; [ | congruence].
f_equal.
erewrite vector_fold_right_dep_bounded_pf_ext.
erewrite vector_fold_right_dep_bounded_cut_down.
apply vector_fold_right_dep_bounded_ext.
apply vdrop_last_proper.
apply veq_sym.
apply vector_Sn_split.
Unshelve.
omega.
Qed.
Lemma vector_fold_right_Sn {A:Type} {B} (f:B->A->A) (init:A%nat) {m:nat} (v:Vector B (S m)) :
vector_fold_right f init v = f (vlast v) (vector_fold_right f init (vdrop_last v)).
Proof.
unfold vector_fold_right.
apply (@vector_fold_right_dep_Sn (fun _ => A)).
Qed.
Lemma vector_fold_right_dep_vcons {A:nat->Type} {B}
(f:forall n,B->A n->A (S n)) (init:A 0%nat) {m:nat}
x (v:Vector B m) :
vector_fold_right_dep f init (vcons x v) = f m x (vector_fold_right_dep f init v).
Proof.
now rewrite vector_fold_right_dep_Sn, vlast_vcons, vdrop_last_vcons.
Qed.
Lemma vector_fold_right_vcons {A:Type} {B}
(f:B->A->A) (init:A) {m:nat}
x (v:Vector B m) :
vector_fold_right f init (vcons x v) = f x (vector_fold_right f init v).
Proof.
apply (vector_fold_right_dep_vcons (fun _ => f)).
Qed.
Lemma vector_fold_right1_dep_bounded_ext {A:nat->Type} {B} (f:forall n,B->A n->A (S n))
(init:A 0%nat) sing {m:nat} (x y:Vector B m) bound pf :
x =v= y -> vector_fold_right1_bounded_dep f init sing x bound pf = vector_fold_right1_bounded_dep f init sing y bound pf.
Proof.
intros eqq.
induction bound; simpl; trivial.
destruct bound; trivial.
- congruence.
- f_equal.
+ erewrite index_pf_irrel; eauto.
+ apply IHbound.
Unshelve.
omega.
Qed.
Lemma vector_fold_right1_dep_ext {A:nat->Type} {B} (f:forall n,B->A n->A (S n))
(init:A 0%nat) sing {m:nat} {x y:Vector B m} :
x =v= y -> vector_fold_right1_dep f init sing x = vector_fold_right1_dep f init sing y.
Proof.
apply vector_fold_right1_dep_bounded_ext.
Qed.
Lemma vector_fold_right1_ext {A:Type} {B} (f:B->A->A) (init:A) sing {m:nat} {x y:Vector B m} :
x =v= y -> vector_fold_right1 f init sing x = vector_fold_right1 f init sing y.
Proof.
apply (@vector_fold_right1_dep_ext (fun _ => A)).
Qed.
Lemma vector_fold_right1_dep_bounded_f_ext {A:nat->Type} {B} (f1 f2:forall n,B->A n->A (S n))
(init:A 0%nat) sing {m:nat} (v:Vector B m) bound pf :
(forall n pf a, f1 n (v (exist _ n pf)) a = f2 n (v (exist _ n pf)) a) -> vector_fold_right1_bounded_dep f1 init sing v bound pf = vector_fold_right1_bounded_dep f2 init sing v bound pf.
Proof.
intros eqq.
induction bound; simpl; trivial.
destruct bound; trivial.
f_equal.
eauto.
Qed.
Lemma vector_fold_right1_dep_f_ext {A:nat->Type} {B} (f1 f2:forall n,B->A n->A (S n))
(init:A 0%nat) sing {m:nat} {v:Vector B m} :
(forall n pf a, f1 n (v (exist _ n pf)) a = f2 n (v (exist _ n pf)) a) -> vector_fold_right1_dep f1 init sing v = vector_fold_right1_dep f2 init sing v.
Proof.
apply vector_fold_right1_dep_bounded_f_ext.
Qed.
Lemma vector_fold_right1_f_ext {A:Type} {B} (f1 f2:B->A->A) (init:A) sing {m:nat} {v:Vector B m} :
(forall i a, f1 (v i) a = f2 (v i) a) -> vector_fold_right1 f1 init sing v = vector_fold_right1 f2 init sing v.
Proof.
intros.
apply (@vector_fold_right1_dep_f_ext (fun _ => A)); eauto.
Qed.
Lemma vector_fold_right1_dep_0 {A:nat->Type} {B} (f:forall n,B->A n->A (S n))
(init:A 0%nat) sing (v:Vector B 0) :
vector_fold_right1_dep f init sing v = init.
Proof.
reflexivity.
Qed.
Lemma vector_fold_right1_dep_1 {A:nat->Type} {B} (f:forall n,B->A n->A (S n))
(init:A 0%nat) sing (v:Vector B 1) :
vector_fold_right1_dep f init sing v = sing (v (exist _ 0 Nat.lt_0_1)).
Proof.
unfold vector_fold_right1_dep.
simpl.
f_equal.
erewrite index_pf_irrel; eauto.
Qed.
Lemma vector_fold_right1_bounded_dep_pf_ext {A:nat->Type} {B} (f:forall n,B->A n->A (S n))
(init:A 0%nat) sing {m:nat} (v:Vector B m) bound pf1 pf2 :
vector_fold_right1_bounded_dep f init sing v bound pf1 = vector_fold_right1_bounded_dep f init sing v bound pf2.
Proof.
revert pf1 pf2.
induction bound; trivial; intros.
simpl.
destruct bound; simpl.
- f_equal; index_prover.
- f_equal; try index_prover.
apply IHbound.
Qed.
Lemma vector_fold_right1_bounded_dep_SSn {A:nat->Type} {B} (f:forall n,B->A n->A (S n))
(init:A 0%nat) sing {m:nat} (v:Vector B (S m)) bound pf1 pf2 pf4:
vector_fold_right1_bounded_dep f init sing v (S (S bound)) pf1 = f _ (v (exist _ (S bound) pf4)) (vector_fold_right1_bounded_dep f init sing v (S bound) pf2).
Proof.
revert pf4.
induction bound; simpl; trivial; intros.
f_equal; try index_prover.
simpl in *.
f_equal; try index_prover.
destruct bound.
- f_equal; try index_prover.
- destruct bound.
apply IHbound.
f_equal; try index_prover.
f_equal; try index_prover.
apply vector_fold_right1_bounded_dep_pf_ext.
Qed.
Lemma vector_fold_right1_bounded_dep_relevant {A:nat->Type} {B} (f:forall n,B->A n->A (S n))
(init:A 0%nat) sing {m1:nat} (v1:Vector B m1) {m2:nat} (v2:Vector B m2) bound pf1 pf2:
(forall i, i <= bound -> forall pf1 pf2, v1 (exist _ i pf1) = v2 (exist _ i pf2)) ->
vector_fold_right1_bounded_dep f init sing v1 bound pf1 =
vector_fold_right1_bounded_dep f init sing v2 bound pf2.
Proof.
intros eqq.
induction bound; simpl; trivial.
destruct bound; simpl.
- f_equal; try index_prover.
apply eqq.
omega.
- f_equal.
+ apply eqq.
omega.
+ apply IHbound; auto.
Qed.
Lemma vector_fold_right1_bounded_dep_droplast {A:nat->Type} {B} (f:forall n,B->A n->A (S n))
(init:A 0%nat) sing {m:nat} (v:Vector B (S m)) bound pf1 pf2:
bound < S m ->
vector_fold_right1_bounded_dep f init sing (vdrop_last v) bound pf1 =
vector_fold_right1_bounded_dep f init sing v bound pf2.
Proof.
intros.
apply vector_fold_right1_bounded_dep_relevant; intros.
unfold vdrop_last.
index_prover.
Qed.
Lemma vector_fold_right1_dep_SSn {A:nat->Type} {B} (f:forall n,B->A n->A (S n))
(init:A 0%nat) sing {m:nat} (v:Vector B (S (S m))) :
vector_fold_right1_dep f init sing v = f (S m) (vlast v) (vector_fold_right1_dep f init sing (vdrop_last v)).
Proof.
unfold vector_fold_right1_dep.
unfold vlast.
erewrite vector_fold_right1_bounded_dep_SSn.
f_equal.
erewrite vector_fold_right1_bounded_dep_droplast; trivial.
omega.
Unshelve.
omega.
Qed.
Lemma vector_fold_right_bounded_dep_ind {A:nat->Type} {B} {P:forall m, A m -> Prop} (f:forall n,B->A n->A (S n))
(init:A 0%nat)
(finit : P 0 init)
(ff: forall n b v, P n v -> P (S n) (f n b v)) :
forall {m:nat} (v:Vector B m) bound pf, P bound (vector_fold_right_bounded_dep f init v bound pf).
Proof.
intros m v bound pf.
revert m pf v.
induction bound; simpl; trivial; intros.
apply ff.
apply IHbound.
Qed.
Lemma vector_fold_right_dep_ind {A:nat->Type} {B} {P:forall m, A m -> Prop} (f:forall n,B->A n->A (S n))
(init:A 0%nat)
(finit : P 0 init)
(ff: forall n b v, P n v -> P (S n) (f n b v)) :
forall {m:nat} (v:Vector B m), P m (vector_fold_right_dep f init v).
Proof.
intros.
apply vector_fold_right_bounded_dep_ind; trivial.
Qed.
Lemma vector_fold_right_ind {A:Type} {B} {P:A -> Prop} (f:B->A ->A)
(init:A)
(finit : P init)
(ff: forall b v, P v -> P (f b v)) :
forall {m:nat} (v:Vector B m), P (vector_fold_right f init v).
Proof.
intros.
apply (vector_fold_right_dep_ind (P:=fun _ => P)); trivial.
Qed.
Lemma vector_fold_right1_bounded_dep_ind {A:nat->Type} {B} {P:forall m, A m -> Prop} (f:forall n,B->A n->A (S n))
(init:A 0%nat) sing
(finit : P 0 init)
(fsing: forall b, P 1 (sing b))
(ff: forall n b v, P n v -> P (S n) (f n b v)) :
forall {m:nat} (v:Vector B m) bound pf, P bound (vector_fold_right1_bounded_dep f init sing v bound pf).
Proof.
intros m v bound pf.
revert m pf v.
induction bound; simpl; trivial; intros.
destruct bound; simpl; trivial.
apply ff.
apply IHbound.
Qed.
Lemma vector_fold_right1_dep_ind {A:nat->Type} {B} {P:forall m, A m -> Prop} (f:forall n,B->A n->A (S n))
(init:A 0%nat) sing
(finit : P 0 init)
(fsing: forall b, P 1 (sing b))
(ff: forall n b v, P n v -> P (S n) (f n b v)) :
forall {m:nat} (v:Vector B m), P m (vector_fold_right1_dep f init sing v).
Proof.
intros.
apply vector_fold_right1_bounded_dep_ind; trivial.
Qed.
Lemma vector_fold_right1_ind {A:Type} {B} {P:A -> Prop} (f:B->A ->A)
(init:A) sing
(finit : P init)
(fsing: forall b, P (sing b))
(ff: forall b v, P v -> P (f b v)) :
forall {m:nat} (v:Vector B m), P (vector_fold_right1 f init sing v).
Proof.
intros.
apply (vector_fold_right1_dep_ind (P:=fun _ => P)); trivial.
Qed.
Lemma vector_to_list_In {A} (x:A) {n} (v:Vector A n) :
vin x v -> In x (vector_to_list v).
Proof.
induction n.
- intros [[i pf] eqqi].
omega.
- intros [[i pf] eqqi].
unfold vector_to_list in *.
rewrite vector_fold_right_Sn; simpl.
destruct (Nat.eq_dec i n).
+ left.
unfold vlast.
subst.
erewrite index_pf_irrel; eauto.
+ right.
apply IHn.
eexists (exist _ i _).
simpl.
erewrite index_pf_irrel; eauto.
Unshelve.
simpl; omega.
Qed.
Lemma vin_cons {A} x (a:A) {n} {v:Vector A n} : vin x (vcons a v) <-> (x = a \/ vin x v).
Proof.
unfold vcons.
split.
- intros [[i pf] eqq].
destruct (Nat.eq_dec i n).
+ subst; eauto.
+ right.
eexists (exist _ i _).
erewrite index_pf_irrel; eauto.
Unshelve.
simpl; omega.
- intros [eqq | inn].
+ red.
eexists (exist _ n _).
destruct (Nat.eq_dec n n); congruence.
+ destruct inn as [[i pf] eqq].
eexists (exist _ i _).
destruct (Nat.eq_dec i n); [omega | ].
erewrite index_pf_irrel; eauto.
Unshelve.
simpl; omega.
simpl; omega.
Qed.
Lemma vin_proper {A} (x:A) {n} {v1 v2:Vector A n} : v1 =v= v2 -> vin x v1 <-> vin x v2.
Proof.
revert v1 v2.
cut (forall (v1 v2:Vector A n), v1 =v= v2 -> vin x v1 -> vin x v2).
{ intros; split; [eauto| ].
apply veq_sym in H0; eauto.
}
intros v1 v2 eqq1 [i eqq2].
exists i.
rewrite <- eqq1; trivial.
Qed.
Lemma vector_to_list_vin {A} (x:A) {n} (v:Vector A n) :
In x (vector_to_list v) -> vin x v.
Proof.
unfold vector_to_list.
revert v.
induction n; [simpl; tauto|].
intros v inn.
rewrite vector_fold_right_Sn in inn.
destruct inn as [eqq | inn].
- eexists.
apply eqq.
- apply (@vin_proper A _ (S n) _ _ (vector_Sn_split v)).
apply vin_cons.
eauto.
Qed.
Lemma vdrop_last_i {A} {n} (v:Vector A (S n)) i pf1 pf2 :
vdrop_last v (exist (fun n' : nat => (n' < n)%nat) i pf1) =
v (exist (fun n' : nat => (n' < S n)%nat) i pf2).
Proof.
unfold vdrop_last.
erewrite index_pf_irrel; eauto.
Qed.
Lemma vin_vlast {A n} (v:Vector A (S n)) : vin (vlast v) v.
Proof.
unfold vin, vlast.
eauto.
Qed.
Lemma vin_vdrop_last {A n} x (v:Vector A (S n)) : vin x (vdrop_last v) -> vin x v.
Proof.
unfold vin, vdrop_last.
intros [[??]?].
eauto.
Qed.
Lemma vmap_nth {A B : Type} (f : A -> B) {n} (v : Vector A n) i :
vmap f v i = f (v i).
Proof.
revert v i.
unfold vmap.
induction n; intros v [i pf].
- omega.
- rewrite vector_fold_right_dep_Sn.
simpl.
destruct (Nat.eq_dec i n).
+ subst.
unfold vlast.
erewrite index_pf_irrel; eauto.
+ specialize (IHn (vdrop_last v)).
unfold vdrop_last.
erewrite index_pf_irrel; rewrite IHn.
erewrite vdrop_last_i; eauto.
Unshelve.
omega.
Qed.
Lemma mmap_nth {A B : Type} (f : A -> B) {m n} (mat : Matrix A m n) i j :
mmap f mat i j = f (mat i j).
Proof.
unfold mmap.
now repeat rewrite vmap_nth.
Qed.
Lemma vmap_ext {A B n} (f1 f2:A->B) (df:Vector A n) :
(forall x, vin x df -> f1 x = f2 x) ->
vmap f1 df = vmap f2 df.
Proof.
unfold vmap.
induction n.
- reflexivity.
- intros.
repeat rewrite vector_fold_right_dep_Sn.
f_equal.
+ eapply H.
eapply vin_vlast.
+ rewrite IHn; trivial.
intros.
eapply H.
now apply vin_vdrop_last.
Qed.
Lemma vnil0 {A} (v:Vector A 0) : v = vnil.
Proof.
apply FunctionalExtensionality.functional_extensionality.
intros [i pf].
omega.
Qed.
Lemma vmap_id {A n} (df:Vector A n) :
vmap (fun x => x) df = df.
Proof.
unfold vmap.
induction n; simpl.
- now rewrite vnil0, vector_fold_right_dep_0.
- rewrite vector_fold_right_dep_Sn, IHn.
apply vec_eq_eq.
apply veq_sym.
apply vector_Sn_split.
Qed.
Definition bounded_seq_bounded (start len : nat) : forall bound, bound<=len -> list {n':nat | n' < start+len}%nat.
Proof.
refine (fix F bound :=
match bound as bound_ return bound_ <= len -> list {n':nat | n' < start+len}%nat with
| 0 => fun _ => nil
| S bound' => fun _ => exist _ (start + len-(S bound')) _ :: F bound' _
end); omega.
Defined.
Lemma bounded_seq_bounded_ext (start len : nat) (bound:nat) (pf1 pf2:bound<=len) :
bounded_seq_bounded start len bound pf1 = bounded_seq_bounded start len bound pf2.
Proof.
induction bound; simpl; trivial.
f_equal.
- eapply index_pf_irrel; eauto.
- eapply IHbound.
Qed.
Definition bounded_seq (start len : nat) : list {n':nat | n' < start+len}%nat
:= bounded_seq_bounded start len len (le_refl _).
Definition bounded_seq0 len : list {n':nat | n' < len}%nat := bounded_seq 0 len.
Lemma bounded_seq_bounded_domain start len (bound:nat) (pf:bound<=len) : map (@proj1_sig _ _) (bounded_seq_bounded start len bound pf) = seq (start+(len-bound)) bound.
Proof.
revert start.
induction bound; simpl; intros start; trivial.
rewrite IHbound.
f_equal.
- omega.
- f_equal.
omega.
Qed.
Lemma bounded_seq_domain start len : map (@proj1_sig _ _) (bounded_seq start len) = seq start len.
Proof.
unfold bounded_seq.
rewrite bounded_seq_bounded_domain.
f_equal.
omega.
Qed.
Lemma bounded_seq_strongly_sorted start len:
StronglySorted (fun x y => proj1_sig x < proj1_sig y) (bounded_seq start len).
Proof.
apply StronglySorted_compose.
rewrite bounded_seq_domain.
apply StronglySorted_seq.
Qed.
Lemma bounded_seq_break_at start len x :
proj1_sig x >= start ->
exists b c, bounded_seq start len = b++x::c /\ Forall (fun y => (proj1_sig y) < (proj1_sig x)) b /\ Forall (fun y => (proj1_sig x) < (proj1_sig y)) c.
Proof.
intros xge.
apply (StronglySorted_break (fun y x => (proj1_sig y) < (proj1_sig x)) (bounded_seq start len)).
- apply bounded_seq_strongly_sorted.
- destruct x as [x pf]; simpl in *.
assert (inn:In x (map (@proj1_sig _ _) (bounded_seq start len))).
+ rewrite bounded_seq_domain.
apply in_seq.
omega.
+ apply in_map_iff in inn.
destruct inn as [[??] [??]].
subst.
erewrite index_pf_irrel; eauto.
Qed.
Definition vforall {A n} (P:A->Prop) (v:Vector A n) :=
vector_fold_right (fun x p => P x /\ p) True v.
Lemma vforall_forall {A n} (P:A->Prop) (v:Vector A n) :
vforall P v <-> forall i, P (v i).
Proof.
unfold vforall.
split.
- induction n.
+ intros ? [??].
omega.
+ rewrite vector_fold_right_Sn.
intros [Plast Pdrop].
intros [i pf].
destruct (Nat.eq_dec i n).
* unfold vlast in Plast.
subst.
erewrite index_pf_irrel; eauto.
* assert (pf2:(i < n)%nat) by omega.
specialize (IHn _ Pdrop (exist _ i pf2)).
erewrite index_pf_irrel; eauto.
- induction n.
+ vm_compute; trivial.
+ rewrite vector_fold_right_Sn.
intros.
split.
* eauto.
* eapply IHn.
intros [i pf].
assert (pf2 : (i < S n)%nat) by omega.
specialize (H (exist _ i pf2)).
simpl in *.
erewrite index_pf_irrel; eauto.
Qed.
Lemma vectoro_to_ovector_forall_some_f {A n} {vo:Vector (option A) n} {v:Vector A n} :
vectoro_to_ovector vo = Some v ->
(forall i, vo i = Some (v i)).
Proof.
unfold vectoro_to_ovector.
induction n; simpl.
- intros ? [??]; omega.
- rewrite vector_fold_right_dep_Sn.
intros eqq.
apply some_lift2 in eqq.
destruct eqq as [x [y eqq1 [eqq2 eqq3]]].
subst.
intros [i pf].
rewrite vector_Sn_split.
specialize (IHn _ _ eqq2).
rewrite eqq1.
unfold vcons.
destruct (Nat.eq_dec i n); trivial.
Qed.
Lemma vectoro_to_ovector_forall_some_b {A n} (vo:Vector (option A) n) (v:Vector A n) :
(forall i, vo i = Some (v i)) ->
exists v', vectoro_to_ovector vo = Some v' /\ v =v= v'.
Proof.
unfold vectoro_to_ovector.
induction n; simpl.
- intros eqq.
unfold vector_fold_right_dep.
simpl.
exists vnil; split; trivial.
intros [??]; omega.
- rewrite vector_fold_right_dep_Sn.
intros eqq.
specialize (IHn (vdrop_last vo) (vdrop_last v)).
destruct IHn as [v' [eqq2 eqq3]].
+ intros [i pf].
simpl; eauto.
+ rewrite eqq2.
unfold vlast.
rewrite eqq.
simpl.
eexists; split; [reflexivity | ].
eapply veq_trans; [eapply (vector_Sn_split v) | ].
apply vcons_proper; simpl; trivial.
Qed.
Lemma vectoro_to_ovector_forall_some_b_strong {A n} (vo:Vector (option A) n) (v:Vector A n) :
(forall i, vo i = Some (v i)) ->
vectoro_to_ovector vo = Some v.
Proof.
intros.
destruct (vectoro_to_ovector_forall_some_b _ _ H) as [? [??]].
rewrite H0.
f_equal.
apply FunctionalExtensionality.functional_extensionality.
intros.
symmetry.
apply H1.
Qed.
Lemma vectoro_to_ovector_not_none {A n} (vo : Vector (option A) n) :
(forall i, vo i <> None) -> vectoro_to_ovector vo <> None.
Proof.
unfold vectoro_to_ovector.
induction n; simpl.
- intros eqq.
unfold vector_fold_right_dep.
simpl.
congruence.
- rewrite vector_fold_right_dep_Sn.
intros eqq.
specialize (IHn (vdrop_last vo)).
unfold lift2 in *.
repeat match_option.
+ elim IHn; trivial.
unfold vdrop_last.
now intros [i pf].
+ unfold vlast in eqq0.
elim (eqq _ eqq0).
Qed.
Lemma vectoro_to_ovector_exists_None {A n} {vo:Vector (option A) n} :
vectoro_to_ovector vo = None ->
{i | vo i = None}.
Proof.
unfold vectoro_to_ovector.
induction n; simpl.
- unfold vector_fold_right_dep; simpl.
discriminate.
- rewrite vector_fold_right_dep_Sn.
intros eqq.
specialize (IHn (vdrop_last vo)).
unfold lift2 in *.
repeat match_option_in eqq.
+ destruct (IHn eqq1) as [[i pf] ?].
eauto.
+ eauto.
Qed.
Lemma vectoro_to_ovector_None_None {A n} {vo:Vector (option A) n} i :
vo i = None ->
vectoro_to_ovector vo = None.
Proof.
destruct i as [i pf].
unfold vectoro_to_ovector.
induction n; simpl.
- omega.
- intros eqq.
rewrite vector_fold_right_dep_Sn.
unfold vlast.
destruct (Nat.eq_dec i n).
+ subst.
erewrite index_pf_irrel.
rewrite eqq; simpl; trivial.
+ unfold lift2.
erewrite IHn; simpl.
* match_destr.
* erewrite index_pf_irrel; eauto.
Unshelve.
omega.
Qed.
Definition vfirstn {T} {n} (v:Vector T n) m (pf:(m<=n)%nat): Vector T m.
Proof.
intros [i pf2].
apply v.
exists i.
eapply NPeano.Nat.lt_le_trans; eassumption.
Defined.
Lemma vfirstn0 {T} {n} (v:Vector T n) pf : vfirstn v 0 pf = vnil.
Proof.
apply vec_eq_eq; intros [??]; simpl.
omega.
Qed.
Definition vfirstn_eq {T} {n} (v:Vector T n) pf : vfirstn v n pf = v.
Proof.
unfold vfirstn.
apply FunctionalExtensionality.functional_extensionality; intros [??].
erewrite index_pf_irrel; eauto.
Qed.
Lemma vfirstn_vdrop_last {T} {n} (v:Vector T n) bound pf pf2 :
vdrop_last (vfirstn v (S bound) pf) = vfirstn v bound pf2.
Proof.
apply FunctionalExtensionality.functional_extensionality; intros [??]; simpl.
erewrite index_pf_irrel; eauto.
Qed.
Lemma vlast_vfirstn {T} {n} (d:Vector T n) bound pf pf2 :
(vlast (vfirstn d (S bound) pf)) = d ((exist _ bound pf2)).
Proof.
unfold vfirstn, vlast.
erewrite index_pf_irrel; eauto.
Qed.
Lemma vector_fold_right1_bounded_dep_gen_ind {A:nat->Type} {B} {P:forall m, Vector B m -> A m -> Prop} (f:forall n,B->A n->A (S n))
(init:A 0%nat) sing
(finit : P 0%nat vnil init)
(fsing: forall b, P 1%nat (vcons b vnil) (sing b))
(ff: forall n b v r, P n v r -> P (S n) (vcons b v) (f n b r)) :
forall {m:nat} (v:Vector B m) bound pf, P bound (vfirstn v _ pf) (vector_fold_right1_bounded_dep f init sing v bound pf).
Proof.
intros m v bound pf.
revert m pf v.
induction bound; simpl; trivial; intros.
- generalize (vfirstn v 0 pf); intros.
rewrite (vnil0 v0); trivial.
- destruct m; [omega | ].
destruct bound; simpl; trivial.
+ replace (vfirstn v 1 pf) with (vcons (v (exist (fun n' : nat => (n' < S m)%nat) 0%nat pf)) vnil)
; trivial.
apply FunctionalExtensionality.functional_extensionality; intros [??]; simpl.
destruct x; [ | omega].
simpl.
erewrite index_pf_irrel; eauto.
+ assert (pf2:(S bound <= S m)%nat) by omega.
replace (vfirstn v (S (S bound)) pf)
with (vcons (v (exist (fun n' : nat => (n' < S m)%nat) (S bound) pf)) (vfirstn v (S bound) pf2)).
* apply ff.
replace (match bound as bound1' return (A bound1' -> (S bound1' <= S m)%nat -> A (S bound1')) with
| 0%nat =>
fun (_ : A 0%nat) (pf1 : (1 <= S m)%nat) =>
sing (v (exist (fun n' : nat => (n' < S m)%nat) 0%nat pf1))
| S bound2 =>
fun (an' : A (S bound2)) (_ : (S (S bound2) <= S m)%nat) =>
f (S bound2) (v (exist (fun n' : nat => (n' < S m)%nat) bound (le_Sn_le (S bound) (S m) pf))) an'
end
(vector_fold_right1_bounded_dep f init sing v bound
(le_Sn_le bound (S m) (le_Sn_le (S bound) (S m) pf))) (le_Sn_le (S bound) (S m) pf)) with (vector_fold_right1_bounded_dep f init sing v (S bound) pf2); try eapply IHbound.
clear.
destruct bound; simpl.
-- erewrite index_pf_irrel; eauto.
-- f_equal.
++ erewrite index_pf_irrel; eauto.
++ destruct bound.
** erewrite index_pf_irrel; eauto.
** { f_equal.
-- erewrite index_pf_irrel; eauto.
-- apply vector_fold_right1_bounded_dep_pf_ext.
}
* generalize (vector_Sn_split (vfirstn v (S (S bound)) pf)); intros eqq.
apply vec_eq_eq in eqq.
rewrite eqq.
f_equal.
-- unfold vlast; simpl.
erewrite index_pf_irrel; eauto.
-- erewrite vfirstn_vdrop_last; eauto.
Qed.
Lemma vector_fold_right1_dep_gen_ind {A:nat->Type} {B} {P:forall m, Vector B m -> A m -> Prop} (f:forall n,B->A n->A (S n))
(init:A 0%nat) sing
(finit : P 0%nat vnil init)
(fsing: forall b, P 1%nat (vcons b vnil) (sing b))
(ff: forall n b v r, P n v r -> P (S n) (vcons b v) (f n b r)) :
forall {m:nat} (v:Vector B m), P m v (vector_fold_right1_dep f init sing v).
Proof.
intros.
rewrite <- (vfirstn_eq v (le_refl m)) at 1.
apply vector_fold_right1_bounded_dep_gen_ind; trivial.
Qed.
Program Definition vapp {A} {m n} (v1:Vector A m) (v2:Vector A n) : Vector A (m+n)
:= fun i => if lt_dec i m then v1 i else v2 (i-m).
Next Obligation.
omega.
Defined.
Lemma vtake_skip_app_eq_pf n m (pf:(n<=m)%nat) : n + (m - n) = m.
Proof.
rewrite Nat.add_sub_assoc by trivial.
now rewrite minus_plus.
Defined.
Lemma vtake_skip_app_lt_pf {m n i} (pf:(n<=m)%nat) (p2f:i < m) : i < n + (m - n).
Proof.
now rewrite vtake_skip_app_eq_pf.
Defined.
Lemma vtake_skip_app {A} {m:nat} (v:Vector (A) m) (n:nat) (pf:(n<=m)%nat) :
forall i, v i = vapp (vtake v n pf) (vskip v n pf) (exist _ (proj1_sig i) (vtake_skip_app_lt_pf pf (proj2_sig i))).
Proof.
intros.
unfold vapp, vtake, vskip.
destruct i; simpl.
match_destr.
- now erewrite index_pf_irrel.
-
match goal with
[|- _ = v (exist _ _ ?pff)] => generalize pff
end.
assert (HH:x - n + n = x) by omega.
rewrite HH.
intros.
now erewrite index_pf_irrel.
Qed.
Lemma vmap_vdrop_last {A B} {n} (f:A->B) (v:Vector A (S n)) : vmap f (vdrop_last v) = vdrop_last (vmap f v).
Proof.
unfold vdrop_last.
apply vec_eq_eq; intros [??]; simpl.
now repeat rewrite vmap_nth.
Qed.
Lemma map_vector_to_list_vmap {A B} {n} (f:A->B) (v:Vector A n) :
map f (vector_to_list v) = vector_to_list (vmap f v).
Proof.
unfold vector_to_list, vector_fold_right.
induction n.
- rewrite vector_fold_right_dep_0; trivial.
- repeat rewrite vector_fold_right_dep_Sn.
simpl.
rewrite IHn, vmap_vdrop_last.
unfold vlast.
now rewrite vmap_nth.
Qed.
Lemma vector_to_list_ext {A} {n} (x y:Vector A n) :
vec_eq x y -> vector_to_list x = vector_to_list y.
Proof.
apply vector_fold_right_ext.
Qed.
Lemma vector_to_list_length (n : nat) (A : Type) (v : Vector.Vector A n) :
length (Vector.vector_to_list v) = n.
Proof.
unfold vector_to_list, vector_fold_right.
revert v.
induction n; intros.
- simpl; trivial.
- rewrite vector_fold_right_dep_Sn.
simpl.
now rewrite IHn.
Qed.
End Vector.
|
function _append(file, txt)
open(file, "a") do io
print(io, txt)
end
end |
# This file is a part of Julia. License is MIT: https://julialang.org/license
# tests the output of the embedding example is correct
using Test
if Sys.iswindows()
# libjulia needs to be in the same directory as the embedding executable or in path
ENV["PATH"] = string(Sys.BINDIR, ";", ENV["PATH"])
end
@test length(ARGS) == 1
@testset "embedding example" begin
out = Pipe()
err = Pipe()
embedded_cmd_path = abspath(ARGS[1])
p = cd(@__DIR__) do
run(pipeline(Cmd([embedded_cmd_path]), stdin=devnull, stdout=out, stderr=err), wait=false)
end
close(out.in)
close(err.in)
out_task = @async readlines(out)
err = read(err, String)
@test err == "MethodError: no method matching this_function_has_no_methods()\n"
@test success(p)
lines = fetch(out_task)
@test length(lines) == 10
@test parse(Float64, lines[1]) ≈ sqrt(2)
@test lines[8] == "called bar"
@test lines[9] == "calling new bar"
@test lines[10] == " From worker 2:\tTaking over the world..."
end
|
module Client.Util
import Data.Buffer
import Extra.Buffer
import Fmt
import Inigo.Async.Base
import Inigo.Async.Promise
import System
import System.File
import System.Path
rejectStatus : String -> Int -> Buffer -> Promise a
rejectStatus url status buf =
do
contents <- liftIO $ readAll buf
let extra = if status == 500 then " -- You may need to login again with `inigo login`" else ""
reject (fmt "HTTP Failure (%d) from %s: \"%s\"%s" status url contents extra)
export
assertOk : String -> Promise (Int, Buffer) -> Promise Buffer
assertOk url result =
do
(200, buf) <- result
| (status, res) => rejectStatus url status res
pure buf
||| TODO: Improve home dir
export
getHomeDir : IO (Maybe String)
getHomeDir =
do
Nothing <- getEnv "INIGO_HOME"
| Just home => pure $ Just home
Nothing <- getEnv "HOME"
| Just home => pure $ Just home
pure Nothing
export
inigoSessionFile : IO (Maybe String)
inigoSessionFile =
do
Nothing <- getEnv "INIGO_SESSION"
| Just sessionFile => pure (Just sessionFile)
Just homeDir <- getHomeDir
| Nothing => pure Nothing
pure $ Just (homeDir </> ".inigo_session")
export
readSessionFile : IO (Maybe String)
readSessionFile =
do
Just sessionFile <- inigoSessionFile
| Nothing => pure Nothing
Right session <- readFile sessionFile
| Left err => pure Nothing
pure (Just session)
export
auth : String -> (String, String)
auth session =
("Authorization", "Basic " ++ session)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.