text
stringlengths 0
3.34M
|
---|
#This example fits a GP regression model to the Mauna Loa CO2 data set. This data set is regularly updated and can be found at co2now.org/images/stories/data/co2-mlo-monthly-noaa-esrl.xls
#
#This example follows from Chapter 5 of Gaussian Processes for Machine Learning, Rasmussen and Williams (2006)
############################################################################################
using Gadfly, GaussianProcesses
data = readcsv("CO2_data.csv")
year = data[:,1]; co2 = data[:,2];
x = year[year.<2004]; y = co2[year.<2004];
xpred = year[year.>=2004]; ypred = co2[year.>=2004];
mConst = MeanConst(mean(y)) #Fit the constant mean function
#Kernel is represented as a sum of kernels
kernel = SE(4.0,4.0) + Periodic(0.0,1.0,0.0)*SE(4.0,0.0) + RQ(0.0,0.0,-1.0) + SE(-2.0,-2.0)
gp = GP(x,y,mConst,kernel,-2.0) #Fit the GP
plot(gp,clim=(2004.0,2024.0)) #Gadfly can take a while to load
|
C***********************************************************************
C Module: aoutput.f
C
C Copyright (C) 2002 Mark Drela, Harold Youngren
C
C This program is free software; you can redistribute it and/or modify
C it under the terms of the GNU General Public License as published by
C the Free Software Foundation; either version 2 of the License, or
C (at your option) any later version.
C
C This program is distributed in the hope that it will be useful,
C but WITHOUT ANY WARRANTY; without even the implied warranty of
C MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
C GNU General Public License for more details.
C
C You should have received a copy of the GNU General Public License
C along with this program; if not, write to the Free Software
C Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
C***********************************************************************
SUBROUTINE OUTTOT(LUN)
C
C...PURPOSE To print out results of the vortex lattice calculation
C for the input configuration.
C
C...INPUT Configuration data for case in labeled commons
C
C...OUTPUT Printed output on logical unit LUN
C
INCLUDE 'AVL.INC'
CHARACTER*50 SATYPE
REAL CMSAX(3), ROTSAX(3)
C
1000 FORMAT (A)
C
IF (LUN.EQ.0) RETURN
C
C
CALL GETSA(LNASA_SA,SATYPE,DIR)
C
CA = COS(ALFA)
SA = SIN(ALFA)
C
C---- set rates in stability axes
ROTSAX(1) = WROT(1)*CA + WROT(3)*SA
ROTSAX(2) = WROT(2)
ROTSAX(3) = WROT(3)*CA - WROT(1)*SA
C
C---- set moments in stability axes
CMSAX(1) = CMT(1)*CA + CMT(3)*SA
CMSAX(2) = CMT(2)
CMSAX(3) = CMT(3)*CA - CMT(1)*SA
C
C---- inviscid near-field forces
CDTI = CA*CFTI(1) + SA*CFTI(3)
CYTI = CFTI(2)
CLTI = CA*CFTI(3) - SA*CFTI(1)
C
C---- jet near-field forces
CDTJ = CA*CFTJ(1) + SA*CFTJ(3)
CYTJ = CFTJ(2)
CLTJ = CA*CFTJ(3) - SA*CFTJ(1)
C
C---- viscous near-field forces
CDTV = CA*CFTV(1) + SA*CFTV(3)
C
C---- dump it
WRITE(LUN,200) ! Albert add
WRITE(LUN,'(F10.5)') CJT/SREF ! Albert add
WRITE(LUN,'(F10.5)') DIR*CFT(1)/SREF ! Albert add
WRITE(LUN,'(F10.5)') CFT(2)/SREF ! Albert add
WRITE(LUN,'(F10.5)') DIR*CFT(3)/SREF ! Albert add
WRITE(LUN,'(F10.5)') CLT/SREF ! Albert add
WRITE(LUN,'(F10.5)') CDT/SREF ! Albert add
WRITE(LUN,'(F10.5)') CLTI/SREF ! Albert add
WRITE(LUN,'(F10.5)') CLTJ/SREF ! Albert add
WRITE(LUN,'(F10.5)') CDTI/SREF ! Albert add
WRITE(LUN,'(F10.5)') CDTJ/SREF ! Albert add
WRITE(LUN,'(F10.5)') CDTV/SREF ! Albert add
WRITE(LUN,200)
WRITE(LUN,201)
WRITE(LUN,202) TITLE(1:60),NSURF,NSTRIP,NVOR
IF(IYSYM.GT.0) WRITE(LUN,2034) YSYM ! Albert mod
IF(IYSYM.LT.0) WRITE(LUN,2035) YSYM ! Albert mod
IF(IZSYM.GT.0) WRITE(LUN,2036) ZSYM ! Albert mod
IF(IZSYM.LT.0) WRITE(LUN,2037) ZSYM ! Albert mod
WRITE(LUN,204) SREF,CREF,BREF,
& XYZREF(1), XYZREF(2), XYZREF(3)
C
WRITE(LUN,205) SATYPE
WRITE(LUN,218) RTITLE(IRUN)
WRITE(LUN,220)
& ALFA/DTR, DIR*WROT(1)*BREF/2.0, DIR*ROTSAX(1)*BREF/2.0,
& BETA/DTR, WROT(2)*CREF/2.0,
& AMACH , DIR*WROT(3)*BREF/2.0, DIR*ROTSAX(3)*BREF/2.0
IF(NVARJET .GT. 0) WRITE(LUN,222) CJT/SREF, CQT/SREF
C
WRITE (LUN,226)
& DIR*CFT(1)/SREF, DIR*CMT(1)/(SREF*BREF),DIR*CMSAX(1)/(SREF*BREF),
& CFT(2)/SREF, CMT(2)/(SREF*CREF),
& DIR*CFT(3)/SREF, DIR*CMT(3)/(SREF*BREF),DIR*CMSAX(3)/(SREF*BREF),
& CLT/SREF, CLFF/SREF, CYFF/SREF,
& CDT/SREF, CDFF/SREF, SPANEF,
& CLTI/SREF, CLFFI/SREF,
& CLTJ/SREF, CLFFJ/SREF,
& CDTI/SREF, CDFFI/SREF, SPANEV,
& CDTJ/SREF, CDFFJ/SREF,
& CDTV/SREF, CDFFV/SREF
226 FORMAT (
& /2X,'CXtot =',F10.5,5X,'Cltot =',F10.5,5X,'Cl''tot =',F10.5
& /2X,'CYtot =',F10.5,5X,'Cmtot =',F10.5
& /2X,'CZtot =',F10.5,5X,'Cntot =',F10.5,5X,'Cn''tot =',F10.5
&//2X,'CLtot =',F10.5,5X,'CLff =',F10.5,4X,'CYff =',F10.5,
& /2X,'CDtot =',F10.5,5X,'CDff =',F10.5,4X,' e =',F10.4,
&//2X,'CLcir =',F10.5,5X,'CLffc =',F10.5,4X,
& /2X,'CLjet =',F10.5,5X,'CLffj =',F10.5,4X,
&//2X,'CDind =',F10.5,5X,'CDffi =',F10.5,4X,'e_vec =',F10.4,
& /2X,'CDjet =',F10.5,5X,'CDffj =',F10.5,4X,
& /2X,'CDvis =',F10.5,5X,'CDffv =',F10.5,4X )
C
WRITE(LUN,*)
DO K = 1, NCONTROL
WRITE(LUN,231) DNAME(K), DELCON(K)
ENDDO
IF(NDESIGN.GT.0) WRITE(LUN,*)
DO K = 1, NDESIGN
WRITE(LUN,231) GNAME(K), DELDES(K)
ENDDO
C
IF(NVARJET.GT.0) WRITE(LUN,*)
DO K = 1, NVARJET
WRITE(LUN,231) JNAME(K), DELJET(K)
ENDDO
C
WRITE(LUN,200)
200 FORMAT(1X,
&'---------------------------------------------------------------')
201 FORMAT(' Vortex Lattice Output -- Total Forces')
202 FORMAT(/' Configuration: ',A
& /5X,'# Surfaces =',I4
& /5X,'# Strips =',I4
& /5X,'# Vortices =',I4)
C
2034 FORMAT(/' Y Symmetry: Wall plane at Ysym =',F10.4) ! Albert mod
2035 FORMAT(/' Y Symmetry: Free surface at Ysym =',F10.4) ! Albert mod
2036 FORMAT(/' Z Symmetry: Ground plane at Zsym =',F10.4) ! Albert mod
2037 FORMAT(/' Z Symmetry: Free surface at Zsym =',F10.4) ! Albert mod
C
204 FORMAT(/2X, 'Sref =',G12.5,3X,'Cref =',G12.5,3X,'Bref =',G12.5
& /2X, 'Xref =',G12.5,3X,'Yref =',G12.5,3X,'Zref =',G12.5 )
205 FORMAT(/1X, A)
218 FORMAT(/' Run case: ', A)
220 FORMAT(
& /2X,'Alpha =',F10.5,5X,'pb/2V =',F10.5,5X,'p''b/2V =',F10.5
& /2X,'Beta =',F10.5,5X,'qc/2V =',F10.5
& /2X,'Mach =',F10.3,5X,'rb/2V =',F10.5,5X,'r''b/2V =',F10.5)
222 FORMAT(
& /2X,'CJtot =',F10.5,5X,'CQtot =',F10.5)
C
231 FORMAT(3X,A,'=',F10.5)
C
240 FORMAT (/)
C
RETURN
END ! OUTTOT
SUBROUTINE OUTSURF(LUN)
C
C...PURPOSE To print out results of the vortex lattice calculation
C for the input configuration.
C
C...INPUT Configuration data for case in labeled commons
C
C...OUTPUT Printed output on logical unit LUN
C
INCLUDE 'AVL.INC'
CHARACTER*50 SATYPE
REAL CFN(3), CMN(3), R(3)
C
INTEGER ICRS(3), JCRS(3)
DATA ICRS / 2, 3, 1 / , JCRS / 3, 1, 2 /
C
CALL GETSA(LNASA_SA,SATYPE,DIR)
C
SA = SIN(ALFA)
CA = COS(ALFA)
C
C========================================================================
C---- Force components from each surface
WRITE(LUN,200)
200 FORMAT(1X,
&'---------------------------------------------------------------')
WRITE (LUN,210) SATYPE,
& SREF,CREF,BREF,
& XYZREF(1), XYZREF(2), XYZREF(3)
DO IS = 1, NSURF
DO K = 1, 3
CFN(K) = CFNI(K,IS) + CFNJ(K,IS) + CFNV(K,IS)
CMN(K) = CMNI(K,IS) + CMNJ(K,IS) + CMNV(K,IS)
ENDDO
C
CDN = CA*CFN(1) + SA*CFN(3)
CYN = CFN(2)
CLN = CA*CFN(3) - SA*CFN(1)
C
CDNI = CA*CFNI(1,IS) + SA*CFNI(3,IS)
CDNV = CA*CFNV(1,IS) + SA*CFNV(3,IS)
C
CALL STRIP(STITLE(IS),NT)
WRITE (LUN,211) IS,SSURF(IS),
& CLN / SREF,
& CDN / SREF,
& CMN(2) / (SREF*CREF),
& CYN / SREF,
& DIR*CMN(3) / (SREF*BREF),
& DIR*CMN(1) / (SREF*BREF),
& CDNI / SREF,
& CDNV / SREF,
& STITLE(IS)(1:NT)
END DO
cc WRITE(LUN,212)
210 FORMAT ( ' Surface Forces (referred to Sref,Cref,Bref',
& ' about Xref,Yref,Zref)',
& /' ',A //
& 5X,'Sref =',G12.4, 3X,'Cref =',F10.4,3X,'Bref =',F10.4/
& 5X,'Xref =',2X,F10.4,3X,'Yref =',F10.4,3X,'Zref =',F10.4//
& ' n',6X,'Area',6X,'CL',6X,'CD',6X,'Cm',
& 6X,'CY',6X,'Cn',6X,'Cl',5X,'CDi',5X,'CDv')
211 FORMAT (I2,1X,F9.3,8F8.4,3X,A)
212 FORMAT (/)
C
C
C========================================================================
C--- Surface forces normalized by local reference quantities
WRITE (LUN,220)
DO IS = 1, NSURF
C------ set total surface force and moment
DO K = 1, 3
CFN(K) = CFNI(K,IS) + CFNJ(K,IS) + CFNV(K,IS)
CMN(K) = CMNI(K,IS) + CMNJ(K,IS) + CMNV(K,IS)
ENDDO
C
C------ rotate forces into stability axes
CDN = CA*CFN(1) + SA*CFN(3)
CYN = CFN(2)
CLN = CA*CFN(3) - SA*CFN(1)
C
CDNI = CA*CFNI(1,IS) + SA*CFNI(3,IS)
CDNV = CA*CFNV(1,IS) + SA*CFNV(3,IS)
C
C------ reference point for surface LE (hinge) moments
C defined by surface hinge vector direction thru first strip LE point
IF(IMAGS(IS).GE.0) THEN
R(1) = -RLE1(1,JFRST(IS))
R(2) = -RLE1(2,JFRST(IS))
R(3) = -RLE1(3,JFRST(IS))
ELSE
R(1) = -RLE2(1,JFRST(IS))
R(2) = -RLE2(2,JFRST(IS))
R(3) = -RLE2(3,JFRST(IS))
ENDIF
C
C------ set moment about point R(.)
DO K = 1, 3
IC = ICRS(K)
JC = JCRS(K)
CMN(K) = CMN(K) + R(IC)*CFN(JC) - R(JC)*CFN(IC)
ENDDO
C
C------ Surface hinge moment defined about hinge vector
CMLE = DOT(CMN,ESS(1,IS))
C
CALL STRIP(STITLE(IS),NT)
WRITE (LUN,221) IS,
& SSURF(IS),CAVESURF(IS),
& CLN / SSURF(IS),
& CDN / SSURF(IS),
& CDNV / SSURF(IS),
& CMLE / (SSURF(IS)*CAVESURF(IS)),
& STITLE(IS)(1:NT)
END DO
WRITE(LUN,200)
C
220 FORMAT (/' Surface Forces (referred to Ssurf, Cave ',
& 'about root LE on hinge axis)'//
& 2X,' n',5X,'Ssurf',6X,'Cave',
& 7X,'cl',7X,'cd',6X,'cdv',4x,'cm_LE')
221 FORMAT (2X,I2,F10.3,F10.3,4(1X,F8.4),2X,A)
C
RETURN
END ! OUTSURF
SUBROUTINE OUTSTRP(LUN)
C
C...PURPOSE To print out results of the vortex lattice calculation
C for the input configuration strip and surface forces.
C
C...INPUT Configuration data for case in labeled commons
C
C...OUTPUT Printed output on logical unit LUN
C
INCLUDE 'AVL.INC'
CHARACTER*50 SATYPE
REAL CFN(3), CMN(3),
& CFS(3), CMS(3),
& R(3)
C
INTEGER ICRS(3), JCRS(3)
DATA ICRS / 2, 3, 1 / , JCRS / 3, 1, 2 /
C
CALL GETSA(LNASA_SA,SATYPE,DIR)
C
IF (LUN.EQ.0) RETURN
C
CA = COS(ALFA)
SA = SIN(ALFA)
C
C...Print out the results -> Forces by surface and strip
WRITE(LUN,200)
WRITE(LUN,210)
WRITE(LUN,211) SATYPE
200 FORMAT(1X,
&'---------------------------------------------------------------')
210 FORMAT (' Surface and Strip Forces by surface')
211 FORMAT (/' Forces referred to Sref, Cref, Bref ',
& 'about Xref, Yref, Zref'/
& ' ',A)
C
DO IS = 1, NSURF
DO K = 1, 3
CFN(K) = CFNI(K,IS) + CFNJ(K,IS) + CFNV(K,IS)
CMN(K) = CMNI(K,IS) + CMNJ(K,IS) + CMNV(K,IS)
ENDDO
C
CDN = CA*CFN(1) + SA*CFN(3)
CYN = CFN(2)
CLN = CA*CFN(3) - SA*CFN(1)
C
CDNI = CA*CFNI(1,IS) + SA*CFNI(3,IS)
CDNV = CA*CFNV(1,IS) + SA*CFNV(3,IS)
C
C
NS = NJ(IS)
NV = NK(IS)
J1 = JFRST(IS)
C
WRITE (LUN,212) IS,STITLE(IS),NV,NS,J1,SSURF(IS),CAVESURF(IS)
WRITE (LUN,213)
& CLN/SREF, DIR*CMN(1)/(SREF*BREF),
& CYN/SREF, CMN(2)/(SREF*CREF),
& CDN/SREF, DIR*CMN(3)/(SREF*BREF),
& CDNI/SREF,
& CDNV/SREF
C
212 FORMAT (/2X,'Surface #',I2,5X,A/
& 5X,'# Chordwise =',I3,3X,'# Spanwise =',I3,
& 5X,'First strip =',I3/
& 5X,'Surface area =',F12.6,5X,' Ave. chord =',F12.6)
213 FORMAT ( 5X,'CLsurf =',F10.5,5X,'Clsurf =',F10.5,
& /5X,'CYsurf =',F10.5,5X,'Cmsurf =',F10.5,
& /5X,'CDsurf =',F10.5,5X,'Cnsurf =',F10.5,
& /5X,'CDisurf =',F10.5,5x,'CDvsurf =',F10.5)
C
C
WRITE (LUN,214) CLN/SREF,CDN/SREF
WRITE (LUN,216)
DO JJ = 1, NS
J = J1 + JJ-1
ASTRP = WSTRIP(J)*CHORD(J)
C
CALL FSTRIP(J,
& CAXIAL,CNORML,
& CL_STRP,CD_STRP,
& CLJ_STRP,CDJ_STRP,
& CLT_STRP,CLA_STRP,
& CMC4_STRP,CMLE_STRP,
& CNC_STRP )
C
XCP = 999.
IF(CL_STRP.NE.0.0) THEN
XCP = 0.25 - CMC4_STRP/CL_STRP
XCP = MIN( 99.0, MAX( -99.0 , XCP ) )
ELSE
XCP = 0.
ENDIF
WRITE (LUN,217)
& J,RLE(2,J),CHORD(J),ASTRP,CNCI,DWWAKE(J),
& CLT_STRP, CLA_STRP,CD_STRP,CDV_STRP(J),
& CMC4_STRP,CMLE_STRP,XCP
END DO
END DO
WRITE(LUN,200)
C
214 FORMAT (/' Forces referred to Ssurf, Cave ',
& 'about hinge axis thru LE'/
& 5X,'CLsurf =',F10.5,5X,'CDsurf =',F10.5/
& 5X,'Deflect =',F10.5,5X,'CmLEsurf=',F10.5)
C
216 FORMAT (/' Strip Forces referred to Strip Area, Chord'/
& 2X,' j ',5X,'Yle',4X,'Chord',5X,'Area',
& 5X,'c cl',6X,'ai',6X,'cl_norm',2X,'cl',7X,'cd',7X,
& 'cdv',4x,'cm_c/4',4x,'cm_LE',2x,'C.P.x/c')
217 FORMAT (2X,I4,11(1X,F8.4),1X,F8.3)
C
RETURN
END ! OUTSTRP
SUBROUTINE OUTELE(LUN)
INCLUDE 'AVL.INC'
CHARACTER*50 SATYPE
REAL CFN(3), CMN(3),
& CFS(3), CMS(3),
& R(3)
C
INTEGER ICRS(3), JCRS(3)
DATA ICRS / 2, 3, 1 / , JCRS / 3, 1, 2 /
C
CALL GETSA(LNASA_SA,SATYPE,DIR)
C
CA = COS(ALFA)
SA = SIN(ALFA)
C
C...Forces on each strip and element (long output, and slow to printout)
WRITE(LUN,200)
WRITE(LUN,202)
WRITE(LUN,205) SATYPE
C
200 FORMAT(1X,
&'---------------------------------------------------------------')
202 FORMAT (' Vortex Strengths (by surface, by strip)')
205 FORMAT (/' Forces referred to Sref, Cref, Bref ',
& 'about Xref, Yref, Zref'/
& ' ',A)
C
DO IS = 1, NSURF
DO K = 1, 3
CFN(K) = CFNI(K,IS) + CFNJ(K,IS) + CFNV(K,IS)
CMN(K) = CMNI(K,IS) + CMNJ(K,IS) + CMNV(K,IS)
ENDDO
C
CDN = CA*CFN(1) + SA*CFN(3)
CYN = CFN(2)
CLN = CA*CFN(3) - SA*CFN(1)
C
CDNI = CA*CFNI(1,IS) + SA*CFNI(3,IS)
CDNV = CA*CFNV(1,IS) + SA*CFNV(3,IS)
C
C
NS = NJ(IS)
NV = NK(IS)
J1 = JFRST(IS)
C
WRITE (LUN,212) IS,STITLE(IS),NV,NS,J1,
& SSURF(IS),CAVESURF(IS)
212 FORMAT (/1X,78('*')/2X,'Surface #',I2,5X,A/
& 5X,'# Chordwise =',I3,3X,'# Spanwise =',I3,
& 3X,'First strip =',I3/
& 5X,'Surface area =',F12.6,5X,' Ave. chord =',F12.6)
C
WRITE (LUN,213)
& CLN/SREF, DIR*CMN(1)/(SREF*BREF),
& CYN/SREF, CMN(2)/(SREF*CREF),
& CDN/SREF, DIR*CMN(3)/(SREF*BREF),
& CDNI/SREF,
& CDNV/SREF
213 FORMAT ( 5X,'CLsurf =',F10.5,5X,'Clsurf =',F10.5,
& /5X,'CYsurf =',F10.5,5X,'Cmsurf =',F10.5,
& /5X,'CDsurf =',F10.5,5X,'Cnsurf =',F10.5,
& /5X,'CDisurf =',F10.5,5x,'CDvsurf =',F10.5)
C
WRITE (LUN,214) CLN/SREF, CDN/SREF
214 FORMAT (/' Forces referred to Ssurf, Cave ',
& 'about hinge axis thru LE'/
& 5X,'CLsurf =',F10.5,5X,'CDsurf =',F10.5/
& 1X,78('*'))
C
DO JJ = 1, NS
J = J1 + JJ-1
I1 = IJFRST(J)
ASTRP = WSTRIP(J)*CHORD(J)
DIHED = -ATAN2(ENSY(J),ENSZ(J))/DTR
WRITE (LUN,232) J,NV,I1,
& RLE(1,J),CHORD(J),AINC(J)/DTR,
& RLE(2,J),WSTRIP(J),ASTRP,
& RLE(3,J),DIHED
C
CALL FSTRIP(J,
& CAXIAL,CNORML,
& CL_STRP,CD_STRP,
& CLJ_STRP,CDJ_STRP,
& CLT_STRP,CLA_STRP,
& CMC4_STRP,CMLE_STRP,
& CNC_STRP )
WRITE (LUN,233) CL_STRP, CD_STRP, CDV_STRP(J),
& CNORML , CAXIAL,
& CNCI, DWWAKE(J),
& CMLE_STRP, CMC4_STRP
DO II = 1, NV
I = I1 + (II-1)
XM = 0.5*(RV1(1,I)+RV2(1,I))
YM = 0.5*(RV1(2,I)+RV2(2,I))
ZM = 0.5*(RV1(3,I)+RV2(3,I))
WRITE (LUN,234) I,XM,YM,ZM,DXV(I),SLOPEC(I),DCP(I)
END DO
END DO
END DO
WRITE(LUN,200)
C
RETURN
C
232 FORMAT (/1X,'Strip #',I3,5X,'# Chordwise =',I3,
& 3X,'First Vortex =',I3/
& 4X,'Xle =',F10.5,4X,'Ave. Chord =',F10.4,
& 3X,'Incidence =',F10.4,' deg'/
& 4X,'Yle =',F10.5,4X,'Strip Width =',F10.5,
& 3X,'Strip Area =',F12.6/
& 4X,'Zle =',F10.5,4X,'Strip Dihed. =',F10.4)
233 FORMAT (/4X,'cl =',F10.5,4X,' cd =',F10.5,4X,' cdv =',F10.5,
& /4X,'cn =',F10.5,4X,' ca =',F10.5,
& 4X,' cnc =',F10.5,4X,'wake dnwsh =',F10.5,
& /4X,'cmLE=',F10.5,4X,'cm c/4 =',F10.5,
& //4X,'I',8X,'X ',8X,'Y ',8X,'Z ',8X,'DX ',
& 6X,'Slope',8X,'dCp')
234 FORMAT (2X,I3,6(2X,F10.5))
C
END ! OUTELE
SUBROUTINE OUTHINGE(LUN)
C
C...PURPOSE To print out results of the vortex lattice calculation
C for the input configuration.
C
C...INPUT Configuration data for case in labeled commons
C
C...OUTPUT Printed output on logical unit LUN
C
INCLUDE 'AVL.INC'
C
C...Hinge moments for each CONTROL
WRITE(LUN,200)
200 FORMAT(1X,
&'---------------------------------------------------------------')
C
WRITE (LUN,210) SREF,CREF
210 FORMAT (
& ' Control Hinge Moments' /
& ' (referred to Sref =',G12.4, 3X,'Cref =',F10.4,')' )
C
WRITE (LUN,212)
212 FORMAT(/' Control Chinge'
& /' ---------------- -----------')
C
DO N = 1, NCONTROL
WRITE (LUN,220) DNAME(N),CHINGE(N)
220 FORMAT (1X,A16,G12.4)
END DO
C
WRITE(LUN,200)
C
RETURN
END ! OUTHINGE
SUBROUTINE OUTCNC
C
C...PURPOSE To write out a CNC loading file
C for the input configuration strips
C
C...INPUT Configuration data for case in labeled commons
C
C...OUTPUT Printed output on logical unit LUN
C
INCLUDE 'AVL.INC'
CHARACTER*1 ANS
CHARACTER*80 FNAM
SAVE FNAM
DATA FNAM /' '/
C
1000 FORMAT (A)
C
CALL STRIP(FNAM,NFN)
10 WRITE(*,2080) FNAM(1:NFN)
2080 FORMAT('Enter forces output file: ', A)
READ (*,1000) FNAM
C
IF(FNAM.NE.' ') THEN
OPEN(18,FILE=FNAM,STATUS='UNKNOWN',ERR=10)
REWIND(18)
ELSE
C-------- just a <return> was entered...
RETURN
ENDIF
C
WRITE(*,2090)
2090 FORMAT('Output file Simple(y,cnc,cl) or Full(x,y,z,cnc,cl,c)? ',$)
READ (*,1000) ANS
CALL TOUPER(ANS)
C
C...Print out the results -> strip loadings
c WRITE (18,210)
DO J=1, NSTRIP
CALL FSTRIP(J,
& CAXIAL,CNORML,
& CL_STRP,CD_STRP,
& CLJ_STRP,CDJ_STRP,
& CLT_STRP,CLA_STRP,
& CMC4_STRP,CMLE_STRP,
& CNC_STRP )
I = IJFRST(J)
XM = 0.5*(RV1(1,I)+RV2(1,I))
YM = 0.5*(RV1(2,I)+RV2(2,I))
ZM = 0.5*(RV1(3,I)+RV2(3,I))
CNCM = CNC_STRP
CLM = CL_STRP
CHM = CHORD(J)
DYM = WSTRIP(J)
ASM = DYM*CHM
IF(ANS.EQ.'S') THEN
WRITE (18,213) YM,CNCM,CLM,CHM
ELSE
WRITE (18,212) XM,YM,ZM,CNCM,CLM,CHM,DYM,ASM
ENDIF
END DO
CLOSE(18)
C
210 FORMAT (//' *** Strip Loadings')
212 FORMAT (3(F8.3,1X),2(F10.4,1X),2(F8.4,1X),F9.4)
213 FORMAT (F8.3,1X,3(F10.4,1X))
C
RETURN
END ! OUTCNC
SUBROUTINE DERMATM(LU)
C---------------------------------------------------------
C Calculates and outputs stability derivative matrix
C for current ALFA, BETA.
C---------------------------------------------------------
INCLUDE 'AVL.INC'
CHARACTER*50 SATYPE
REAL WROT_RX(3), WROT_RZ(3), WROT_A(3)
C
CALL GETSA(LNASA_SA,SATYPE,DIR)
C
C---- set freestream velocity components from alpha, beta
CALL VINFAB
C
C---- calculate forces and sensitivities
CALL AERO
C
C---- set stability-axes rates (RX,RY,RZ) in terms of body-axes rates
CA = COS(ALFA)
SA = SIN(ALFA)
RX = (WROT(1)*CA + WROT(3)*SA) * DIR
RY = WROT(2)
RZ = (WROT(3)*CA - WROT(1)*SA) * DIR
C
C---- now vice-versa, and set sensitivities (which is what's really needed)
cc WROT(1) = RX*CA - RZ*SA
cc WROT(2) = RY
cc WROT(3) = RZ*CA + RX*SA
C
WROT_RX(1) = CA * DIR
WROT_RX(2) = 0.
WROT_RX(3) = SA * DIR
C
WROT_RZ(1) = -SA * DIR
WROT_RZ(2) = 0.
WROT_RZ(3) = CA * DIR
C
WROT_A(1) = -RX*SA - RZ*CA !!! = -WROT(3)
WROT_A(2) = 0.
WROT_A(3) = -RZ*SA + RX*CA !!! = WROT(1)
C
C
C---- set force derivatives in stability axes
CL_AL = CLT_U(1)*VINF_A(1) + CLT_U(4)*WROT_A(1)
& + CLT_U(2)*VINF_A(2) + CLT_U(5)*WROT_A(2)
& + CLT_U(3)*VINF_A(3) + CLT_U(6)*WROT_A(3) + CLT_A
CL_BE = CLT_U(1)*VINF_B(1)
& + CLT_U(2)*VINF_B(2)
& + CLT_U(3)*VINF_B(3)
CL_RX = CLT_U(4)*WROT_RX(1) + CLT_U(6)*WROT_RX(3)
CL_RY = CLT_U(5)
CL_RZ = CLT_U(6)*WROT_RZ(3) + CLT_U(4)*WROT_RZ(1)
C
CY_AL = CYT_U(1)*VINF_A(1) + CYT_U(4)*WROT_A(1)
& + CYT_U(2)*VINF_A(2) + CYT_U(5)*WROT_A(2)
& + CYT_U(3)*VINF_A(3) + CYT_U(6)*WROT_A(3)
CY_BE = CYT_U(1)*VINF_B(1)
& + CYT_U(2)*VINF_B(2)
& + CYT_U(3)*VINF_B(3)
CY_RX = CYT_U(4)*WROT_RX(1) + CYT_U(6)*WROT_RX(3)
CY_RY = CYT_U(5)
CY_RZ = CYT_U(6)*WROT_RZ(3) + CYT_U(4)*WROT_RZ(1)
C
CR_AL = CMT_U(1,1)*VINF_A(1) + CMT_U(1,4)*WROT_A(1)
& + CMT_U(1,2)*VINF_A(2) + CMT_U(1,5)*WROT_A(2)
& + CMT_U(1,3)*VINF_A(3) + CMT_U(1,6)*WROT_A(3)
CR_BE = CMT_U(1,1)*VINF_B(1)
& + CMT_U(1,2)*VINF_B(2)
& + CMT_U(1,3)*VINF_B(3)
CR_RX = CMT_U(1,4)*WROT_RX(1) + CMT_U(1,6)*WROT_RX(3)
CR_RY = CMT_U(1,5)
CR_RZ = CMT_U(1,6)*WROT_RZ(3) + CMT_U(1,4)*WROT_RZ(1)
C
CM_AL = CMT_U(2,1)*VINF_A(1) + CMT_U(2,4)*WROT_A(1)
& + CMT_U(2,2)*VINF_A(2) + CMT_U(2,5)*WROT_A(2)
& + CMT_U(2,3)*VINF_A(3) + CMT_U(2,6)*WROT_A(3)
CM_BE = CMT_U(2,1)*VINF_B(1)
& + CMT_U(2,2)*VINF_B(2)
& + CMT_U(2,3)*VINF_B(3)
CM_RX = CMT_U(2,4)*WROT_RX(1) + CMT_U(2,6)*WROT_RX(3)
CM_RY = CMT_U(2,5)
CM_RZ = CMT_U(2,6)*WROT_RZ(3) + CMT_U(2,4)*WROT_RZ(1)
C
CN_AL = CMT_U(3,1)*VINF_A(1) + CMT_U(3,4)*WROT_A(1)
& + CMT_U(3,2)*VINF_A(2) + CMT_U(3,5)*WROT_A(2)
& + CMT_U(3,3)*VINF_A(3) + CMT_U(3,6)*WROT_A(3)
CN_BE = CMT_U(3,1)*VINF_B(1)
& + CMT_U(3,2)*VINF_B(2)
& + CMT_U(3,3)*VINF_B(3)
CN_RX = CMT_U(3,4)*WROT_RX(1) + CMT_U(3,6)*WROT_RX(3)
CN_RY = CMT_U(3,5)
CN_RZ = CMT_U(3,6)*WROT_RZ(3) + CMT_U(3,4)*WROT_RZ(1)
C
C
CALL OUTTOT(LU)
C
WRITE(LU,7004)
7004 FORMAT(/' Derivatives...')
C
WRITE(LU,7006)
7006 FORMAT(14X, 4X,' alpha',
& 4X,' beta'
& /14X, 4X,'----------------',
& 4X,'----------------')
C
WRITE(LU,7010) CL_AL, CL_BE
7010 FORMAT(' z force |',' CLa =',F11.6,' CLb =',F11.6)
C
WRITE(LU,7020) CY_AL, CY_BE
7020 FORMAT(' y force |',' CYa =',F11.6,' CYb =',F11.6)
C
WRITE(LU,7040) DIR*CR_AL, DIR*CR_BE
7040 FORMAT(' roll x mom.|',' Cla =',F11.6,' Clb =',F11.6)
C
WRITE(LU,7050) CM_AL, CM_BE
7050 FORMAT(' pitch y mom.|',' Cma =',F11.6,' Cmb =',F11.6)
C
WRITE(LU,7060) DIR*CN_AL, DIR*CN_BE
7060 FORMAT(' yaw z mom.|',' Cna =',F11.6,' Cnb =',F11.6)
C
C
WRITE(LU,7106)
7106 FORMAT(/14X, 4X,' roll rate p',
& 4X,' pitch rate q',
& 4X,' yaw rate r'
& /14X, 4X,'----------------',
& 4X,'----------------',
& 4X,'----------------' )
C
WRITE(LU,7110) CL_RX*2.0/BREF,
& CL_RY*2.0/CREF,
& CL_RZ*2.0/BREF
7110 FORMAT(' z force |',' CLp =',F11.6,
& ' CLq =',F11.6,
& ' CLr =',F11.6 )
C
WRITE(LU,7120) CY_RX*2.0/BREF,
& CY_RY*2.0/CREF,
& CY_RZ*2.0/BREF
7120 FORMAT(' y force |',' CYp =',F11.6,
& ' CYq =',F11.6,
& ' CYr =',F11.6 )
C
WRITE(LU,7140) DIR*CR_RX*2.0/BREF,
& DIR*CR_RY*2.0/CREF,
& DIR*CR_RZ*2.0/BREF
7140 FORMAT(' roll x mom.|',' Clp =',F11.6,
& ' Clq =',F11.6,
& ' Clr =',F11.6 )
C
WRITE(LU,7150) CM_RX*2.0/BREF,
& CM_RY*2.0/CREF,
& CM_RZ*2.0/BREF
7150 FORMAT(' pitch y mom.|',' Cmp =',F11.6,
& ' Cmq =',F11.6,
& ' Cmr =',F11.6 )
C
WRITE(LU,7160) DIR*CN_RX*2.0/BREF,
& DIR*CN_RY*2.0/CREF,
& DIR*CN_RZ*2.0/BREF
7160 FORMAT(' yaw z mom.|',' Cnp =',F11.6,
& ' Cnq =',F11.6,
& ' Cnr =',F11.6 )
C
IF(NCONTROL.GT.0) THEN
C
WRITE(LU,8106) (DNAME(K), K, K=1, NCONTROL)
8106 FORMAT(/14X,20(4X,A12, ' d',I1,' '))
WRITE(LU,8107) (' ',K=1, NCONTROL)
8107 FORMAT( 14X,20(3X,A,'----------------'))
C
WRITE(LU,8110) (' ',K,CLT_D(K)/SREF, K=1, NCONTROL)
8110 FORMAT(' z force |',20(A,' CLd',I1,' =',F11.6))
C
WRITE(LU,8120) (' ',K,CYT_D(K)/SREF, K=1, NCONTROL)
8120 FORMAT(' y force |',20(A,' CYd',I1,' =',F11.6))
C
WRITE(LU,8140) (' ',K,DIR*CMT_D(1,K)/(SREF*BREF), K=1, NCONTROL)
8140 FORMAT(' roll x mom.|',20(A,' Cld',I1,' =',F11.6))
C
WRITE(LU,8150) (' ',K, CMT_D(2,K)/(SREF*CREF), K=1, NCONTROL)
8150 FORMAT(' pitch y mom.|',20(A,' Cmd',I1,' =',F11.6))
C
WRITE(LU,8160) (' ',K,DIR*CMT_D(3,K)/(SREF*BREF), K=1, NCONTROL)
8160 FORMAT(' yaw z mom.|',20(A,' Cnd',I1,' =',F11.6))
C
WRITE(LU,8170) (' ',K, CDFF_D(K)/SREF, K=1, NCONTROL)
8170 FORMAT(' Trefftz drag|',20(A,'CDffd',I1,' =',F11.6))
C
WRITE(LU,8180) (' ',K, SPANEF_D(K), K=1, NCONTROL)
8180 FORMAT(' span eff. |',20(A,' ed',I1,' =',F11.6))
C
WRITE(LU,*)
WRITE(LU,*)
C
ENDIF
C
IF(NDESIGN.GT.0) THEN
C
WRITE(LU,8206) (GNAME(K), K, K=1, NDESIGN)
8206 FORMAT(/14X,20(4X,A12, ' g',I1,' '))
WRITE(LU,8207) (' ',K=1, NDESIGN)
8207 FORMAT( 14X,20(3X,A,'----------------'))
C
WRITE(LU,8210) (' ',K,CLT_G(K)/SREF, K=1, NDESIGN)
8210 FORMAT(' z force |',20(A,' CLg',I1,' =',F11.6))
C
WRITE(LU,8220) (' ',K,CYT_G(K)/SREF, K=1, NDESIGN)
8220 FORMAT(' y force |',20(A,' CYg',I1,' =',F11.6))
C
WRITE(LU,8230) (' ',K,DIR*CMT_G(1,K)/(SREF*BREF), K=1, NDESIGN)
8230 FORMAT(' roll x mom.|',20(A,' Clg',I1,' =',F11.6))
C
WRITE(LU,8240) (' ',K, CMT_G(2,K)/(SREF*CREF), K=1, NDESIGN)
8240 FORMAT(' pitch y mom.|',20(A,' Cmg',I1,' =',F11.6))
C
WRITE(LU,8250) (' ',K,DIR*CMT_G(3,K)/(SREF*BREF), K=1, NDESIGN)
8250 FORMAT(' yaw z mom.|',20(A,' Cng',I1,' =',F11.6))
C
WRITE(LU,8260) (' ',K, CDFF_G(K)/SREF, K=1, NDESIGN)
8260 FORMAT(' Trefftz drag|',20(A,'CDffg',I1,' =',F11.6))
C
WRITE(LU,8270) (' ',K, SPANEF_G(K), K=1, NDESIGN)
8270 FORMAT(' span eff. |',20(A,' eg',I1,' =',F11.6))
C
WRITE(LU,*)
WRITE(LU,*)
C
ENDIF
C
IF(CL_AL .NE. 0.0) THEN
XNP = XYZREF(1) - CM_AL/CL_AL
WRITE(LU,8401) XNP
8401 FORMAT(/' Neutral point Xnp =', F11.6)
ENDIF
C
IF(ABS(CR_RZ*CN_BE*(2.0/BREF)/(SREF*BREF)**2) .GT. 0.0001) THEN
BB = CR_BE*CN_RZ / (CR_RZ*CN_BE)
WRITE(LU,8402) BB
8402 FORMAT(/' Clb Cnr / Clr Cnb =', F11.6,
& ' ( > 1 if spirally stable )')
ENDIF
C
RETURN
END ! DERMATM
SUBROUTINE DERMATS(LU)
C---------------------------------------------------------
C Calculates and outputs stability derivative matrix
C for current ALFA, BETA.
C---------------------------------------------------------
INCLUDE 'AVL.INC'
CHARACTER*50 SATYPE
REAL WROT_RX(3), WROT_RZ(3), WROT_A(3)
REAL CMSAX(3),
& CMSAX_A(3),
& CMSAX_U(3,NUMAX),
& CMSAX_D(3,NDMAX),
& CMSAX_G(3,NGMAX)
C
CALL GETSA(LNASA_SA,SATYPE,DIR)
C
C---- set freestream velocity components from alpha, beta
CALL VINFAB
C
C---- calculate forces and sensitivities
CALL AERO
C
C---- set stability-axes rates (RX,RY,RZ) in terms of body-axes rates
CA = COS(ALFA)
SA = SIN(ALFA)
C
RX = (WROT(1)*CA + WROT(3)*SA) * DIR
RY = WROT(2)
RZ = (WROT(3)*CA - WROT(1)*SA) * DIR
C
C---- now vice-versa, and set sensitivities (which is what's really needed)
cc WROT(1) = RX*CA - RZ*SA
cc WROT(2) = RY
cc WROT(3) = RZ*CA + RX*SA
C
WROT_RX(1) = CA * DIR
WROT_RX(2) = 0.
WROT_RX(3) = SA * DIR
C
WROT_RZ(1) = -SA * DIR
WROT_RZ(2) = 0.
WROT_RZ(3) = CA * DIR
C
WROT_A(1) = -RX*SA - RZ*CA !!! = -WROT(3)
WROT_A(2) = 0.
WROT_A(3) = -RZ*SA + RX*CA !!! = WROT(1)
C
C
CMSAX(1) = CMT(1)*CA + CMT(3)*SA
CMSAX(2) = CMT(2)
CMSAX(3) = CMT(3)*CA - CMT(1)*SA
CMSAX_A(1) = -CMT(1)*SA + CMT(3)*CA
CMSAX_A(2) = 0.
CMSAX_A(3) = -CMT(3)*SA - CMT(1)*CA
C
DO K = 1, 6
CMSAX_U(1,K) = CMT_U(1,K)*CA + CMT_U(3,K)*SA
CMSAX_U(2,K) = CMT_U(2,K)
CMSAX_U(3,K) = CMT_U(3,K)*CA - CMT_U(1,K)*SA
ENDDO
DO K = 1, NCONTROL
CMSAX_D(1,K) = CMT_D(1,K)*CA + CMT_D(3,K)*SA
CMSAX_D(2,K) = CMT_D(2,K)
CMSAX_D(3,K) = CMT_D(3,K)*CA - CMT_D(1,K)*SA
ENDDO
DO K = 1, NDESIGN
CMSAX_G(1,K) = CMT_G(1,K)*CA + CMT_G(3,K)*SA
CMSAX_G(2,K) = CMT_G(2,K)
CMSAX_G(3,K) = CMT_G(3,K)*CA - CMT_G(1,K)*SA
ENDDO
C
C---- set force derivatives in stability axes
CL_AL = CLT_U(1)*VINF_A(1) + CLT_U(4)*WROT_A(1)
& + CLT_U(2)*VINF_A(2) + CLT_U(5)*WROT_A(2)
& + CLT_U(3)*VINF_A(3) + CLT_U(6)*WROT_A(3) + CLT_A
CL_BE = CLT_U(1)*VINF_B(1)
& + CLT_U(2)*VINF_B(2)
& + CLT_U(3)*VINF_B(3)
CL_RX = CLT_U(4)*WROT_RX(1) + CLT_U(6)*WROT_RX(3)
CL_RY = CLT_U(5)
CL_RZ = CLT_U(6)*WROT_RZ(3) + CLT_U(4)*WROT_RZ(1)
C
CY_AL = CYT_U(1)*VINF_A(1) + CYT_U(4)*WROT_A(1)
& + CYT_U(2)*VINF_A(2) + CYT_U(5)*WROT_A(2)
& + CYT_U(3)*VINF_A(3) + CYT_U(6)*WROT_A(3)
CY_BE = CYT_U(1)*VINF_B(1)
& + CYT_U(2)*VINF_B(2)
& + CYT_U(3)*VINF_B(3)
CY_RX = CYT_U(4)*WROT_RX(1) + CYT_U(6)*WROT_RX(3)
CY_RY = CYT_U(5)
CY_RZ = CYT_U(6)*WROT_RZ(3) + CYT_U(4)*WROT_RZ(1)
C
CR_AL = CMSAX_U(1,1)*VINF_A(1) + CMSAX_U(1,4)*WROT_A(1)
& + CMSAX_U(1,2)*VINF_A(2) + CMSAX_U(1,5)*WROT_A(2)
& + CMSAX_U(1,3)*VINF_A(3) + CMSAX_U(1,6)*WROT_A(3)
& + CMSAX_A(1)
CR_BE = CMSAX_U(1,1)*VINF_B(1)
& + CMSAX_U(1,2)*VINF_B(2)
& + CMSAX_U(1,3)*VINF_B(3)
CR_RX = CMSAX_U(1,4)*WROT_RX(1) + CMSAX_U(1,6)*WROT_RX(3)
CR_RY = CMSAX_U(1,5)
CR_RZ = CMSAX_U(1,6)*WROT_RZ(3) + CMSAX_U(1,4)*WROT_RZ(1)
C
CM_AL = CMSAX_U(2,1)*VINF_A(1) + CMSAX_U(2,4)*WROT_A(1)
& + CMSAX_U(2,2)*VINF_A(2) + CMSAX_U(2,5)*WROT_A(2)
& + CMSAX_U(2,3)*VINF_A(3) + CMSAX_U(2,6)*WROT_A(3)
& + CMSAX_A(2)
CM_BE = CMSAX_U(2,1)*VINF_B(1)
& + CMSAX_U(2,2)*VINF_B(2)
& + CMSAX_U(2,3)*VINF_B(3)
CM_RX = CMSAX_U(2,4)*WROT_RX(1) + CMSAX_U(2,6)*WROT_RX(3)
CM_RY = CMSAX_U(2,5)
CM_RZ = CMSAX_U(2,6)*WROT_RZ(3) + CMSAX_U(2,4)*WROT_RZ(1)
C
CN_AL = CMSAX_U(3,1)*VINF_A(1) + CMSAX_U(3,4)*WROT_A(1)
& + CMSAX_U(3,2)*VINF_A(2) + CMSAX_U(3,5)*WROT_A(2)
& + CMSAX_U(3,3)*VINF_A(3) + CMSAX_U(3,6)*WROT_A(3)
& + CMSAX_A(3)
CN_BE = CMSAX_U(3,1)*VINF_B(1)
& + CMSAX_U(3,2)*VINF_B(2)
& + CMSAX_U(3,3)*VINF_B(3)
CN_RX = CMSAX_U(3,4)*WROT_RX(1) + CMSAX_U(3,6)*WROT_RX(3)
CN_RY = CMSAX_U(3,5)
CN_RZ = CMSAX_U(3,6)*WROT_RZ(3) + CMSAX_U(3,4)*WROT_RZ(1)
C
C
CALL OUTTOT(LU)
C
WRITE(LU,7004)
7004 FORMAT(/' Stability-axis derivatives...')
C
WRITE(LU,7006)
7006 FORMAT(/14X, 4X,' alpha',
& 4X,' beta'
& /14X, 4X,'----------------',
& 4X,'----------------')
C
WRITE(LU,7010) CL_AL/SREF, CL_BE/SREF
7010 FORMAT(' z'' force CL |' ,' CLa =',F11.6,' CLb =',F11.6)
C
WRITE(LU,7020) CY_AL/SREF, CY_BE/SREF
7020 FORMAT(' y force CY |' ,' CYa =',F11.6,' CYb =',F11.6)
C
WRITE(LU,7040) DIR*CR_AL/(SREF*BREF), DIR*CR_BE/(SREF*BREF)
7040 FORMAT(' x'' mom. Cl''|',' Cla =',F11.6,' Clb =',F11.6)
C
WRITE(LU,7050) CM_AL/(SREF*CREF), CM_BE/(SREF*CREF)
7050 FORMAT(' y mom. Cm |' ,' Cma =',F11.6,' Cmb =',F11.6)
C
WRITE(LU,7060) DIR*CN_AL/(SREF*BREF), DIR*CN_BE/(SREF*BREF)
7060 FORMAT(' z'' mom. Cn''|',' Cna =',F11.6,' Cnb =',F11.6)
C
C
WRITE(LU,7106)
7106 FORMAT(/14X, 4X,' roll rate p''',
& 4X,' pitch rate q''',
& 4X,' yaw rate r'''
& /14X, 4X,'----------------',
& 4X,'----------------',
& 4X,'----------------' )
C
WRITE(LU,7110) CL_RX*(2.0/BREF)/SREF,
& CL_RY*(2.0/CREF)/SREF,
& CL_RZ*(2.0/BREF)/SREF
7110 FORMAT(' z'' force CL |',' CLp =',F11.6,
& ' CLq =',F11.6,
& ' CLr =',F11.6 )
C
WRITE(LU,7120) CY_RX*(2.0/BREF)/SREF,
& CY_RY*(2.0/CREF)/SREF,
& CY_RZ*(2.0/BREF)/SREF
7120 FORMAT(' y force CY |',' CYp =',F11.6,
& ' CYq =',F11.6,
& ' CYr =',F11.6 )
C
WRITE(LU,7140) DIR*CR_RX*(2.0/BREF)/(SREF*BREF),
& DIR*CR_RY*(2.0/CREF)/(SREF*BREF),
& DIR*CR_RZ*(2.0/BREF)/(SREF*BREF)
7140 FORMAT(' x'' mom. Cl''|',' Clp =',F11.6,
& ' Clq =',F11.6,
& ' Clr =',F11.6 )
C
WRITE(LU,7150) CM_RX*(2.0/BREF)/(SREF*CREF),
& CM_RY*(2.0/CREF)/(SREF*CREF),
& CM_RZ*(2.0/BREF)/(SREF*CREF)
7150 FORMAT(' y mom. Cm |',' Cmp =',F11.6,
& ' Cmq =',F11.6,
& ' Cmr =',F11.6 )
C
WRITE(LU,7160) DIR*CN_RX*(2.0/BREF)/(SREF*BREF),
& DIR*CN_RY*(2.0/CREF)/(SREF*BREF),
& DIR*CN_RZ*(2.0/BREF)/(SREF*BREF)
7160 FORMAT(' z'' mom. Cn''|',' Cnp =',F11.6,
& ' Cnq =',F11.6,
& ' Cnr =',F11.6 )
C
IF(NCONTROL.GT.0) THEN
C
WRITE(LU,8106) (DNAME(K), K, K=1, NCONTROL)
8106 FORMAT(/14X,20(4X,A12, ' d',I1,' '))
WRITE(LU,8107) (' ',K=1, NCONTROL)
8107 FORMAT( 14X,20(3X,A,'----------------'))
C
WRITE(LU,8110) (' ',K,CLT_D(K)/SREF, K=1, NCONTROL)
8110 FORMAT(' z'' force CL |' ,20(A,' CLd',I1,' =',F11.6))
C
WRITE(LU,8120) (' ',K,CYT_D(K)/SREF, K=1, NCONTROL)
8120 FORMAT(' y force CY |' ,20(A,' CYd',I1,' =',F11.6))
C
WRITE(LU,8140) (' ',K,DIR*CMT_D(1,K)/(SREF*BREF), K=1, NCONTROL)
8140 FORMAT(' x'' mom. Cl''|',20(A,' Cld',I1,' =',F11.6))
C
WRITE(LU,8150) (' ',K, CMT_D(2,K)/(SREF*CREF), K=1, NCONTROL)
8150 FORMAT(' y mom. Cm |' ,20(A,' Cmd',I1,' =',F11.6))
C
WRITE(LU,8160) (' ',K,DIR*CMT_D(3,K)/(SREF*BREF), K=1, NCONTROL)
8160 FORMAT(' z'' mom. Cn''|',20(A,' Cnd',I1,' =',F11.6))
C
WRITE(LU,8170) (' ',K, CDFF_D(K)/SREF, K=1, NCONTROL)
8170 FORMAT(' Trefftz drag|' ,20(A,'CDffd',I1,' =',F11.6))
C
WRITE(LU,8180) (' ',K, SPANEF_D(K), K=1, NCONTROL)
8180 FORMAT(' span eff. |' ,20(A,' ed',I1,' =',F11.6))
C
WRITE(LU,*)
WRITE(LU,*)
C
ENDIF
C
IF(NDESIGN.GT.0) THEN
C
WRITE(LU,8206) (GNAME(K), K, K=1, NDESIGN)
8206 FORMAT(/14X,20(4X,A12, ' g',I1,' '))
WRITE(LU,8207) (' ',K=1, NDESIGN)
8207 FORMAT( 14X,20(3X,A,'----------------'))
C
WRITE(LU,8210) (' ',K,CLT_G(K)/SREF, K=1, NDESIGN)
8210 FORMAT(' z'' force CL |' ,20(A,' CLg',I1,' =',F11.6))
C
WRITE(LU,8220) (' ',K,CYT_G(K)/SREF, K=1, NDESIGN)
8220 FORMAT(' y force CY |' ,20(A,' CYg',I1,' =',F11.6))
C
WRITE(LU,8230) (' ',K,DIR*CMT_G(1,K)/(SREF*BREF), K=1, NDESIGN)
8230 FORMAT(' x'' mom. Cl''|' ,20(A,' Clg',I1,' =',F11.6))
C
WRITE(LU,8240) (' ',K, CMT_G(2,K)/(SREF*CREF), K=1, NDESIGN)
8240 FORMAT(' y mom. Cm |' ,20(A,' Cmg',I1,' =',F11.6))
C
WRITE(LU,8250) (' ',K,DIR*CMT_G(3,K)/(SREF*BREF), K=1, NDESIGN)
8250 FORMAT(' z'' mom. Cn''|',20(A,' Cng',I1,' =',F11.6))
C
WRITE(LU,8260) (' ',K, CDFF_G(K)/SREF, K=1, NDESIGN)
8260 FORMAT(' Trefftz drag|',20(A,'CDffg',I1,' =',F11.6))
C
WRITE(LU,8270) (' ',K, SPANEF_G(K), K=1, NDESIGN)
8270 FORMAT(' span eff. |',20(A,' eg',I1,' =',F11.6))
C
WRITE(LU,*)
WRITE(LU,*)
C
ENDIF
C
IF(CL_AL .NE. 0.0) THEN
XNP = XYZREF(1) - CM_AL/CL_AL
WRITE(LU,8401) XNP
8401 FORMAT(/' Neutral point Xnp =', F11.6)
ENDIF
C
IF(ABS(CR_RZ*CN_BE*(2.0/BREF)/(SREF*BREF)**2) .GT. 0.0001) THEN
BB = CR_BE*CN_RZ / (CR_RZ*CN_BE)
WRITE(LU,8402) BB
8402 FORMAT(/' Clb Cnr / Clr Cnb =', F11.6,
& ' ( > 1 if spirally stable )')
ENDIF
C
RETURN
END ! DERMATS
SUBROUTINE DERMATB(LU)
C---------------------------------------------------------
C Calculates and outputs stability derivative matrix
C in body axes
C---------------------------------------------------------
INCLUDE 'AVL.INC'
CHARACTER*50 SATYPE
REAL WROT_RX(3), WROT_RZ(3), WROT_A(3)
C
CALL GETSA(LNASA_SA,SATYPE,DIR)
C
C---- set freestream velocity components from alpha, beta
CALL VINFAB
C
C---- calculate forces and sensitivities
CALL AERO
C
CALL OUTTOT(LU)
C
WRITE(LU,7004)
7004 FORMAT(/' Geometry-axis derivatives...')
C
C
WRITE(LU,7006)
7006 FORMAT(/14X, 4X,' axial vel. u',
& 4X,' sideslip vel. v',
& 4X,' normal vel. w'
& /14X, 4X,'----------------',
& 4X,'----------------',
& 4X,'----------------' )
C
WRITE(LU,7010) - CFT_U(1,1)/SREF,
& -DIR*CFT_U(1,2)/SREF,
& - CFT_U(1,3)/SREF
7010 FORMAT(' x force CX |',' CXu =',F11.6,
& ' CXv =',F11.6,
& ' CXw =',F11.6 )
C
WRITE(LU,7020) -DIR*CFT_U(2,1)/SREF,
& - CFT_U(2,2)/SREF,
& -DIR*CFT_U(2,3)/SREF
7020 FORMAT(' y force CY |',' CYu =',F11.6,
& ' CYv =',F11.6,
& ' CYw =',F11.6 )
C
WRITE(LU,7030) - CFT_U(3,1)/SREF,
& -DIR*CFT_U(3,2)/SREF,
& - CFT_U(3,3)/SREF
7030 FORMAT(' z force CZ |',' CZu =',F11.6,
& ' CZv =',F11.6,
& ' CZw =',F11.6 )
C
WRITE(LU,7040) - CMT_U(1,1)/(SREF*BREF),
& -DIR*CMT_U(1,2)/(SREF*BREF),
& - CMT_U(1,3)/(SREF*BREF)
7040 FORMAT(' x mom. Cl |',' Clu =',F11.6,
& ' Clv =',F11.6,
& ' Clw =',F11.6 )
C
WRITE(LU,7050) -DIR*CMT_U(2,1)/(SREF*CREF),
& - CMT_U(2,2)/(SREF*CREF),
& -DIR*CMT_U(2,3)/(SREF*CREF)
7050 FORMAT(' y mom. Cm |',' Cmu =',F11.6,
& ' Cmv =',F11.6,
& ' Cmw =',F11.6 )
C
WRITE(LU,7060) - CMT_U(3,1)/(SREF*BREF),
& -DIR*CMT_U(3,2)/(SREF*BREF),
& - CMT_U(3,3)/(SREF*BREF)
7060 FORMAT(' z mom. Cn |',' Cnu =',F11.6,
& ' Cnv =',F11.6,
& ' Cnw =',F11.6 )
C
C
WRITE(LU,7106)
7106 FORMAT(/14X, 4X,' roll rate p',
& 4X,' pitch rate q',
& 4X,' yaw rate r'
& /14X, 4X,'----------------',
& 4X,'----------------',
& 4X,'----------------' )
C
WRITE(LU,7110) CFT_U(1,4)*(2.0/BREF)/SREF,
& DIR*CFT_U(1,5)*(2.0/CREF)/SREF,
& CFT_U(1,6)*(2.0/BREF)/SREF
7110 FORMAT(' x force CX |',' CXp =',F11.6,
& ' CXq =',F11.6,
& ' CXr =',F11.6 )
C
WRITE(LU,7120) DIR*CFT_U(2,4)*(2.0/BREF)/SREF,
& CFT_U(2,5)*(2.0/CREF)/SREF,
& DIR*CFT_U(2,6)*(2.0/BREF)/SREF
7120 FORMAT(' y force CY |',' CYp =',F11.6,
& ' CYq =',F11.6,
& ' CYr =',F11.6 )
C
WRITE(LU,7130) CFT_U(3,4)*(2.0/BREF)/SREF,
& DIR*CFT_U(3,5)*(2.0/CREF)/SREF,
& CFT_U(3,6)*(2.0/BREF)/SREF
7130 FORMAT(' z force CZ |',' CZp =',F11.6,
& ' CZq =',F11.6,
& ' CZr =',F11.6 )
C
WRITE(LU,7140) CMT_U(1,4)*(2.0/BREF)/(SREF*BREF),
& DIR*CMT_U(1,5)*(2.0/CREF)/(SREF*BREF),
& CMT_U(1,6)*(2.0/BREF)/(SREF*BREF)
7140 FORMAT(' x mom. Cl |',' Clp =',F11.6,
& ' Clq =',F11.6,
& ' Clr =',F11.6 )
C
WRITE(LU,7150) DIR*CMT_U(2,4)*(2.0/BREF)/(SREF*CREF),
& CMT_U(2,5)*(2.0/CREF)/(SREF*CREF),
& DIR*CMT_U(2,6)*(2.0/BREF)/(SREF*CREF)
7150 FORMAT(' y mom. Cm |',' Cmp =',F11.6,
& ' Cmq =',F11.6,
& ' Cmr =',F11.6 )
C
WRITE(LU,7160) CMT_U(3,4)*(2.0/BREF)/(SREF*BREF),
& DIR*CMT_U(3,5)*(2.0/CREF)/(SREF*BREF),
& CMT_U(3,6)*(2.0/BREF)/(SREF*BREF)
7160 FORMAT(' z mom. Cn |',' Cnp =',F11.6,
& ' Cnq =',F11.6,
& ' Cnr =',F11.6 )
C
IF(NCONTROL.GT.0) THEN
C
WRITE(LU,8106) (DNAME(K), K, K=1, NCONTROL)
8106 FORMAT(/14X,20(4X,A12, ' d',I1,' '))
WRITE(LU,8107) (' ',K=1, NCONTROL)
8107 FORMAT( 14X,20(3X,A,'----------------'))
C
WRITE(LU,8110) (' ',K,DIR*CFT_D(1,K)/SREF, K=1, NCONTROL)
8110 FORMAT(' x force CX |',20(A,' CXd',I1,' =',F11.6))
C
WRITE(LU,8120) (' ',K, CFT_D(2,K)/SREF, K=1, NCONTROL)
8120 FORMAT(' y force CY |',20(A,' CYd',I1,' =',F11.6))
C
WRITE(LU,8130) (' ',K,DIR*CFT_D(3,K)/SREF, K=1, NCONTROL)
8130 FORMAT(' z force CZ |',20(A,' CZd',I1,' =',F11.6))
C
WRITE(LU,8140) (' ',K,DIR*CMT_D(1,K)/(SREF*BREF), K=1, NCONTROL)
8140 FORMAT(' x mom. Cl |',20(A,' Cld',I1,' =',F11.6))
C
WRITE(LU,8150) (' ',K, CMT_D(2,K)/(SREF*CREF), K=1, NCONTROL)
8150 FORMAT(' y mom. Cm |',20(A,' Cmd',I1,' =',F11.6))
C
WRITE(LU,8160) (' ',K,DIR*CMT_D(3,K)/(SREF*BREF), K=1, NCONTROL)
8160 FORMAT(' z mom. Cn |',20(A,' Cnd',I1,' =',F11.6))
C
WRITE(LU,*)
WRITE(LU,*)
C
ENDIF
C
IF(NDESIGN.GT.0) THEN
C
WRITE(LU,8206) (GNAME(K), K, K=1, NDESIGN)
8206 FORMAT(/14X,20(4X,A12, ' g',I1,' '))
WRITE(LU,8207) (' ',K=1, NDESIGN)
8207 FORMAT( 14X,20(3X,A,'----------------'))
C
WRITE(LU,8210) (' ',K,DIR*CFT_G(1,K)/SREF, K=1, NDESIGN)
8210 FORMAT(' x force CX |',20(A,' CXg',I1,' =',F11.6))
C
WRITE(LU,8220) (' ',K, CFT_G(2,K)/SREF, K=1, NDESIGN)
8220 FORMAT(' y force CY |',20(A,' CYg',I1,' =',F11.6))
C
WRITE(LU,8230) (' ',K,DIR*CFT_G(3,K)/SREF, K=1, NDESIGN)
8230 FORMAT(' z force CZ |',20(A,' CZg',I1,' =',F11.6))
C
WRITE(LU,8240) (' ',K,DIR*CMT_G(1,K)/(SREF*BREF), K=1, NDESIGN)
8240 FORMAT(' x mom. Cl |',20(A,' Clg',I1,' =',F11.6))
C
WRITE(LU,8250) (' ',K, CMT_G(2,K)/(SREF*CREF), K=1, NDESIGN)
8250 FORMAT(' y mom. Cm |',20(A,' Cmg',I1,' =',F11.6))
C
WRITE(LU,8260) (' ',K,DIR*CMT_G(3,K)/(SREF*BREF), K=1, NDESIGN)
8260 FORMAT(' z mom. Cn |',20(A,' Cng',I1,' =',F11.6))
C
WRITE(LU,*)
WRITE(LU,*)
C
ENDIF
C
RETURN
END ! DERMATB
SUBROUTINE DUMPIT(LU,NF,VINF,ALPHA, BETA,
& OMEGAX, OMEGAY, OMEGAZ,
& CFX, CFY, CFZ, CMX, CMY, CMZ)
C
C--- Writes flow condition header to logical unit
C
REAL VINF(NF),ALPHA(NF), BETA(NF),
& OMEGAX(NF), OMEGAY(NF), OMEGAZ(NF),
& CFX(NF), CFY(NF), CFZ(NF), CMX(NF), CMY(NF), CMZ(NF)
C
DO IF=1, NF
WRITE(LU,2050) IF, VINF(IF)
WRITE(LU,2060) ALPHA(IF), BETA(IF),
& OMEGAX(IF), OMEGAY(IF), OMEGAZ(IF)
WRITE(LU,2070) CFX(IF), CFY(IF), CFZ(IF),
& CMX(IF), CMY(IF), CMZ(IF)
END DO
C
2050 FORMAT(/1X,'Flow condition', I3, ' Vinf =', F8.3)
2060 FORMAT(/1X,6X,'Alpha' ,7X,'Beta',
& 5X,'Omegax',5X,'Omegay',5X,'Omegaz' /1X,5F11.6)
2070 FORMAT(/1X,8X,'CFx',8X,'CFy',8X,'CFz',
& 8X,'CMx',8X,'CMy',8X,'CMz' /1X,6F11.6 / )
C
RETURN
END
SUBROUTINE GETSA(LSA,SATYPE,DIR)
LOGICAL LSA
CHARACTER*(*) SATYPE
C
IF(LSA) THEN
SATYPE = 'Standard axis orientation, X fwd, Z down'
DIR = -1.0
ELSE
SATYPE = 'Geometric axis orientation, X aft, Z up '
DIR = 1.0
ENDIF
C
RETURN
END ! GETSA
|
Formal statement is: lemma convex_cone: "convex s \<and> cone s \<longleftrightarrow> (\<forall>x\<in>s. \<forall>y\<in>s. (x + y) \<in> s) \<and> (\<forall>x\<in>s. \<forall>c\<ge>0. (c *\<^sub>R x) \<in> s)" (is "?lhs = ?rhs") Informal statement is: A set $S$ is a convex cone if and only if for all $x, y \in S$, $x + y \in S$ and for all $x \in S$ and $c \geq 0$, $cx \in S$. |
module SecretSanta
import Combinatorics
import Dates
import GLPK
import JSON
import JuMP
import MathOptInterface
import Random
import SMTPClient
struct SecretSantaModel
model::JuMP.Model
data::Dict{String, Any}
constraints::Dict{Symbol, Any} # JuMP constraint references.
variables::Dict{Symbol, Any} # JuMP variable references.
solution::Dict{String, Any} # Solution reference.
end
function SecretSantaModel(data::Dict{String, Any})
# Create the set of the participants.
P = data["participants"]
# Create the node set.
N = [x["email"] for x in P]
# Create arcs for a complete bipartite graph.
exclude = vcat([[[x["email"], y] for y in x["exclude"]] for x in P]...)
A = collect(Combinatorics.combinations(N, 2)) # Collect arcs from i to j.
A = vcat([reverse(a) for a in A], A) # Concatenate arcs from j to i.
A = filter(x -> !(x in exclude), A) # Remove excluded arcs.
A = [(a[1], a[2]) for a in A] # Convert to array of tuples.
# Shuffle the array of arcs to induce random solutions.
A = Random.shuffle(A)
# Create the JuMP model.
model = JuMP.Model(GLPK.Optimizer)
variables = Dict{Symbol, Any}(:x => nothing)
constraints = Dict{Symbol, Any}(:out_flow => nothing, :in_flow => nothing)
constraints[:out_flow] = Dict{String, JuMP.ConstraintRef}()
constraints[:in_flow] = Dict{String, JuMP.ConstraintRef}()
# Create variables corresponding to arc selection.
variables[:x] = JuMP.@variable(model, [a in A], binary=true, base_name="x")
for i in N
out_arcs = collect(filter(x -> (x[1] == i), A))
out_vars = Array{JuMP.VariableRef}([variables[:x][a] for a in out_arcs])
constraints[:out_flow][i] = JuMP.@constraint(model, sum(out_vars) == 1)
in_arcs = collect(filter(x -> (x[2] == i), A))
in_vars = Array{JuMP.VariableRef}([variables[:x][a] for a in in_arcs])
constraints[:in_flow][i] = JuMP.@constraint(model, sum(in_vars) == 1)
end
solution = Dict{String, Any}()
ssm = SecretSantaModel(model, data, constraints, variables, solution)
return ssm # Return the SecretSantaModel instance.
end
function build_model(input_path::String)
data = JSON.parsefile(input_path)
return SecretSantaModel(data)
end
function solve_model(ssm::SecretSantaModel)
JuMP.optimize!(ssm.model)
if JuMP.termination_status(ssm.model) == MathOptInterface.OPTIMAL
A = ssm.variables[:x].axes[1]
return filter(a -> isapprox(JuMP.value(ssm.variables[:x][a]), 1.0), A)
else
error("Secret Santa assignment is not possible. Adjust participants.")
end
end
function send_email(ssm::SecretSantaModel, sender::Dict{String,Any}, recipient::Dict{String,Any}, test::Bool=true)
# Prepare the subject of the email.
subject = ssm.data["email"]["subject"]
recipient_name = recipient["name"]
subject = replace(subject, "{recipient}" => recipient_name)
# Prepare the body of the email.
message = ssm.data["email"]["message"]
sender_name = sender["name"]
sender_email = sender["email"]
recipient_email = recipient["email"]
message = replace(message, "{sender}" => sender_name)
message = replace(message, "{recipient}" => recipient_name)
message = replace(message, "{recipient_email}" => recipient_email)
time_now = Dates.now(Dates.UTC)
datetime = Dates.format(time_now, "e, dd u yyyy HH:MM:SS")
body = "Date: $(datetime) +0000\n" *
"From: Santa Claus <$(ssm.data["email"]["username"])>\n" *
"To: $(sender_email)\n" * "Subject: $(subject)\n" * "\n" *
message * "\n"
body_io = IOBuffer(body)
# Prepare email sending options.
opt = SMTPClient.SendOptions(isSSL=true,
username=ssm.data["email"]["username"],
passwd=ssm.data["email"]["password"])
# Prepare the email.
server = ssm.data["email"]["smtp_server"]
port = string(ssm.data["email"]["smtp_port"])
url = "smtps://$(server):$(port)"
rcpt = ["$(sender_email)"]
from = "$(ssm.data["email"]["username"])"
if !test
# Send the email.
resp = SMTPClient.send(url, rcpt, from, body_io, opt)
else
println("------------------------------------------------------------")
println("Message to $(sender_name) ($(sender_email))")
println("Subject: $(subject)")
println("$(message)")
println("------------------------------------------------------------")
end
end
function send_matchings(ssm::SecretSantaModel, solution::Array{Tuple{String, String}, 1}, test::Bool=true)
participants = ssm.data["participants"]
for matching in solution
sender = findfirst(x -> x["email"] == matching[1], participants)
recipient = findfirst(x -> x["email"] == matching[2], participants)
send_email(ssm, participants[sender], participants[recipient], test)
end
end
function run(input_path::String; test::Bool=true)
ssm = build_model(input_path)
solution = solve_model(ssm)
send_matchings(ssm, solution, test)
end
end
|
[STATEMENT]
lemma Seed_holds_faces_distinct: "faces_distinct (Seed p)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. faces_distinct Seed\<^bsub>p\<^esub>
[PROOF STEP]
apply(simp add: Seed_def graph_def
faces_distinct_def normFaces_def facesAt_def normFace_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. verticesFrom (Face [0..<p + 3] Final) (minVertex (Face [0..<p + 3] Final)) \<noteq> verticesFrom (Face (rev [0..<p + 3]) Nonfinal) (minVertex (Face (rev [0..<p + 3]) Nonfinal))
[PROOF STEP]
apply(simp add: eval_nat_numeral minVertex_zero1 minVertex_zero2 verticesFrom_Def
fst_splitAt_upt snd_splitAt_upt fst_splitAt_rev snd_splitAt_rev del:upt_Suc)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. [Suc 0..<Suc (Suc (Suc p))] \<noteq> rev [Suc 0..<Suc (Suc (Suc p))]
[PROOF STEP]
apply(simp add:upt_conv_Cons del:upt_Suc)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Suc 0 # Suc (Suc 0) # [Suc (Suc (Suc 0))..<Suc (Suc (Suc p))] \<noteq> rev [Suc (Suc (Suc 0))..<Suc (Suc (Suc p))] @ [Suc (Suc 0), Suc 0]
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
# Realization of Recursive Filters
*This jupyter notebook is part of a [collection of notebooks](../index.ipynb) on various topics of Digital Signal Processing. Please direct questions and suggestions to [[email protected]](mailto:[email protected]).*
## Introduction
Computing the output $y[k] = \mathcal{H} \{ x[k] \}$ of a [linear time-invariant](https://en.wikipedia.org/wiki/LTI_system_theory) (LTI) system is of central importance in digital signal processing. This is often referred to as [*filtering*](https://en.wikipedia.org/wiki/Digital_filter) of the input signal $x[k]$. We already have discussed the realization of [non-recursive filters](../nonrecursive_filters/introduction.ipynb). This section focuses on the realization of recursive filters.
### Recursive Filters
Linear difference equations with constant coefficients represent linear time-invariant (LTI) systems
\begin{equation}
\sum_{n=0}^{N} a_n \; y[k-n] = \sum_{m=0}^{M} b_m \; x[k-m]
\end{equation}
where $y[k] = \mathcal{H} \{ x[k] \}$ denotes the response of the system to the input signal $x[k]$, $N$ the order, $a_n$ and $b_m$ constant coefficients, respectively. Above equation can be rearranged with respect to the output signal $y[k]$ by extracting the first element ($n=0$) of the left-hand sum
\begin{equation}
y[k] = \frac{1}{a_0} \left( \sum_{m=0}^{M} b_m \; x[k-m] - \sum_{n=1}^{N} a_n \; y[k-n] \right)
\end{equation}
It is evident that the output signal $y[k]$ at time instant $k$ is given as a linear combination of past output samples $y[k-n]$ superimposed by a linear combination of the actual $x[k]$ and past $x[k-m]$ input samples. Hence, the actual output $y[k]$ is composed from the two contributions
1. a [non-recursive part](../nonrecursive_filters/introduction.ipynb#Non-Recursive-Filters), and
2. a recursive part where a linear combination of past output samples is fed back.
The impulse response of the system is given as the response of the system to a Dirac impulse at the input $h[k] = \mathcal{H} \{ \delta[k] \}$. Using above result and the properties of the discrete Dirac impulse we get
\begin{equation}
h[k] = \frac{1}{a_0} \left( b_k - \sum_{n=1}^{N} a_n \; h[k-n] \right)
\end{equation}
Due to the feedback, the impulse response will in general be of infinite length. The impulse response is termed as [infinite impulse response](https://en.wikipedia.org/wiki/Infinite_impulse_response) (IIR) and the system as recursive system/filter.
### Transfer Function
Applying a $z$-transform to the left- and right-hand side of the difference equation and rearranging terms yields the transfer function $H(z)$ of the system
\begin{equation}
H(z) = \frac{Y(z)}{X(z)} = \frac{\sum_{m=0}^{M} b_m \; z^{-m}}{\sum_{n=0}^{N} a_n \; z^{-n}}
\end{equation}
The transfer function is given as a [rational function](https://en.wikipedia.org/wiki/Rational_function) in $z$. The polynominals of the numerator and denominator can be expressed alternatively by their roots as
\begin{equation}
H(z) = \frac{b_M}{a_N} \cdot \frac{\prod_{\mu=1}^{P} (z - z_{0\mu})^{m_\mu}}{\prod_{\nu=1}^{Q} (z - z_{\infty\nu})^{n_\nu}}
\end{equation}
where $z_{0\mu}$ and $z_{\infty\nu}$ denote the $\mu$-th zero and $\nu$-th pole of degree $m_\mu$ and $n_\nu$ of $H(z)$, respectively. The total number of zeros and poles is denoted by $P$ and $Q$. Due to the symmetries of the $z$-transform, the transfer function of a real-valued system $h[k] \in \mathbb{R}$ exhibits complex conjugate symmetry
\begin{equation}
H(z) = H^*(z^*)
\end{equation}
Poles and zeros are either real valued or complex conjugate pairs for real-valued systems ($b_m\in\mathbb{R}$, $a_n\in\mathbb{R}$). For the poles of a causal and stable system $H(z)$ the following condition has to hold
\begin{equation}
\max_{\nu} | z_{\infty\nu} | < 1
\end{equation}
Hence, all poles have to be located inside the unit circle $|z| = 1$. Amongst others, this implies that $M \leq N$.
### Example
The following example shows the pole/zero diagram, the magnitude and phase response, and impulse response of a recursive filter with so-called [Butterworth](https://en.wikipedia.org/wiki/Butterworth_filter) lowpass characteristic.
```python
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.markers import MarkerStyle
from matplotlib.patches import Circle
import scipy.signal as sig
N = 5 # order of recursive filter
L = 128 # number of computed samples
def zplane(z, p, title='Poles and Zeros'):
"Plots zero and pole locations in the complex z-plane"
ax = plt.gca()
ax.plot(np.real(z), np.imag(z), 'bo', fillstyle='none', ms = 10)
ax.plot(np.real(p), np.imag(p), 'rx', fillstyle='none', ms = 10)
unit_circle = Circle((0,0), radius=1, fill=False,
color='black', ls='solid', alpha=0.9)
ax.add_patch(unit_circle)
ax.axvline(0, color='0.7')
ax.axhline(0, color='0.7')
plt.title(title)
plt.xlabel(r'Re{$z$}')
plt.ylabel(r'Im{$z$}')
plt.axis('equal')
plt.xlim((-2, 2))
plt.ylim((-2, 2))
plt.grid()
# compute coefficients of recursive filter
b, a = sig.butter(N, 0.2, 'low')
# compute transfer function
Om, H = sig.freqz(b, a)
# compute impulse response
k = np.arange(L)
x = np.where(k==0, 1.0, 0)
h = sig.lfilter(b, a, x)
# plot pole/zero-diagram
plt.figure(figsize=(5, 5))
zplane(np.roots(b), np.roots(a))
# plot magnitude response
plt.figure(figsize=(10, 3))
plt.plot(Om, 20 * np.log10(abs(H)))
plt.xlabel(r'$\Omega$')
plt.ylabel(r'$|H(e^{j \Omega})|$ in dB')
plt.grid()
plt.title('Magnitude response')
# plot phase response
plt.figure(figsize=(10, 3))
plt.plot(Om, np.unwrap(np.angle(H)))
plt.xlabel(r'$\Omega$')
plt.ylabel(r'$\varphi (\Omega)$ in rad')
plt.grid()
plt.title('Phase response')
# plot impulse response (magnitude)
plt.figure(figsize=(10, 3))
plt.stem(20*np.log10(np.abs(np.squeeze(h))))
plt.xlabel(r'$k$')
plt.ylabel(r'$|h[k]|$ in dB')
plt.grid()
plt.title('Impulse response (magnitude)');
```
**Exercise**
* Does the system have an IIR?
* What happens if you increase the order `N` of the filter?
Solution: It can be concluded from the last illustration, showing the magnitude of the impulse response $|h[k]|$ on a logarithmic scale, that the magnitude of the impulse response decays continuously for increasing $k$ but does not become zero at some point. This behavior continues with increasing $k$ as can be observed when increasing the number `L` of computed samples in above example. The magnitude response $|H(e^{j \Omega})|$ of the filter decays faster with increasing order `N` of the filter.
**Copyright**
This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *Sascha Spors, Digital Signal Processing - Lecture notes featuring computational examples, 2016-2018*.
|
function a = fourier_cosine ( n )
%*****************************************************************************80
%
%% FOURIER_COSINE returns the FOURIER_COSINE matrix.
%
% Discussion:
%
% FOURIER_COSINE is the discrete Fourier Cosine Transform matrix.
%
% Example:
%
% N = 5
%
% 0.447214 0.447214 0.447214 0.447214 0.447214
% 0.601501 0.371748 0.000000 -0.371748 -0.601501
% 0.511667 -0.195440 -0.632456 -0.195439 0.511667
% 0.371748 -0.601501 0.000000 0.601501 -0.371748
% 0.195439 -0.511667 0.632456 -0.511668 0.195439
%
% Properties:
%
% A * A' = I.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 08 October 2007
%
% Author:
%
% John Burkardt
%
% Parameters:
%
% Input, integer N, the order of A.
%
% Output, real A(N,N), the matrix.
%
a = zeros ( n, n );
a(1,1:n) = 1.0 / sqrt ( n );
for i = 2 : n
for j = 1 : n
angle = ( i - 1 ) * ( 2 * j - 1 ) * pi / ( 2 * n );
a(i,j) = sqrt ( 2.0 ) * cos ( angle ) / sqrt ( n );
end
end
return
end
|
module GUIgeneric.Prelude where
open import Data.Sum public hiding (map)
open import Data.List public
open import Data.Bool public
open import Data.String hiding (decSetoid) renaming (_++_ to _++Str_; _==_ to _==Str_; _≟_ to _≟Str_) public
open import Data.Unit hiding (_≟_; decSetoid; setoid) public
open import Data.Empty public
open import Data.Product hiding (map; zip) public
open import Data.Sum hiding (map) public
open import Data.Nat hiding (_≟_; _≤_; _≤?_) public
open import Data.Maybe.Base hiding (map) public
open import Function public
open import Relation.Binary.PropositionalEquality.Core public
open import Relation.Binary.PropositionalEquality hiding (setoid ; preorder ; decSetoid; [_]) public
open import Size public
open import Level renaming (_⊔_ to _⊔Level_; suc to sucLevel; zero to zeroLevel) public
open import Relation.Binary.Core using (Decidable) public
open import Relation.Nullary using (Dec) public
open import Relation.Nullary.Decidable using (⌊_⌋) public
--
-- ooAgda Imports
--
open import NativeIO public
open import StateSizedIO.Base hiding (IOInterfaceˢ; IOˢ; IOˢ'; IOˢ+; delayˢ; doˢ; fmapˢ; fmapˢ'; returnˢ) public
open import StateSizedIO.GUI.WxGraphicsLibLevel3 public renaming (createFrame to createWxFrame)
open import StateSizedIO.GUI.WxBindingsFFI renaming (Frame to FFIFrame; Button to FFIButton; TextCtrl to FFITextCtrl) public
open import SizedIO.Base public
open import StateSizedIO.GUI.BaseStateDependent public
open import StateSizedIO.GUI.VariableList renaming (addVar to addVar'; addVar' to addVar) public
|
State Before: α : Type u
β : Type v
γ : Type w
a✝ : α
as as1 as2 as3 : List α
inst✝¹ : Inhabited α
inst✝ : Inhabited β
a : α
h : a ∈ []
⊢ ∃ n, a = get n [] State After: no goals Tactic: cases h State Before: α : Type u
β : Type v
γ : Type w
a✝ : α
as✝ as1 as2 as3 : List α
inst✝¹ : Inhabited α
inst✝ : Inhabited β
a b : α
as : List α
h : a ∈ b :: as
⊢ ∃ n, a = get n (b :: as) State After: α : Type u
β : Type v
γ : Type w
a✝ : α
as✝ as1 as2 as3 : List α
inst✝¹ : Inhabited α
inst✝ : Inhabited β
a b : α
as : List α
h : a = b ∨ a ∈ as
⊢ ∃ n, a = get n (b :: as) Tactic: rw [mem_cons] at h State Before: α : Type u
β : Type v
γ : Type w
a✝ : α
as✝ as1 as2 as3 : List α
inst✝¹ : Inhabited α
inst✝ : Inhabited β
a b : α
as : List α
h : a = b ∨ a ∈ as
⊢ ∃ n, a = get n (b :: as) State After: no goals Tactic: cases h with
| inl h => exact ⟨0, h⟩
| inr h =>
rcases eq_get_of_mem h with ⟨n, h⟩
exact ⟨n + 1, h⟩ State Before: case inl
α : Type u
β : Type v
γ : Type w
a✝ : α
as✝ as1 as2 as3 : List α
inst✝¹ : Inhabited α
inst✝ : Inhabited β
a b : α
as : List α
h : a = b
⊢ ∃ n, a = get n (b :: as) State After: no goals Tactic: exact ⟨0, h⟩ State Before: case inr
α : Type u
β : Type v
γ : Type w
a✝ : α
as✝ as1 as2 as3 : List α
inst✝¹ : Inhabited α
inst✝ : Inhabited β
a b : α
as : List α
h : a ∈ as
⊢ ∃ n, a = get n (b :: as) State After: case inr.intro
α : Type u
β : Type v
γ : Type w
a✝ : α
as✝ as1 as2 as3 : List α
inst✝¹ : Inhabited α
inst✝ : Inhabited β
a b : α
as : List α
h✝ : a ∈ as
n : ℕ
h : a = get n as
⊢ ∃ n, a = get n (b :: as) Tactic: rcases eq_get_of_mem h with ⟨n, h⟩ State Before: case inr.intro
α : Type u
β : Type v
γ : Type w
a✝ : α
as✝ as1 as2 as3 : List α
inst✝¹ : Inhabited α
inst✝ : Inhabited β
a b : α
as : List α
h✝ : a ∈ as
n : ℕ
h : a = get n as
⊢ ∃ n, a = get n (b :: as) State After: no goals Tactic: exact ⟨n + 1, h⟩ |
open import SOAS.Metatheory.Syntax
-- Initial (⅀, 𝔛)-meta-algebra 𝕋 𝔛 is the free ⅀-monoid on 𝔛
module SOAS.Metatheory.FreeMonoid {T : Set} (Syn : Syntax {T}) where
open Syntax Syn
open import SOAS.Common
open import SOAS.Families.Core {T}
open import SOAS.Context {T}
open import SOAS.Variable {T}
open import SOAS.Construction.Structure as Structure
open import SOAS.Abstract.Hom {T}
import SOAS.Abstract.Coalgebra {T} as →□ ; open →□.Sorted
import SOAS.Abstract.Box {T} as □ ; open □.Sorted
open import Categories.Monad
open import SOAS.Abstract.Monoid
open import SOAS.Coalgebraic.Map
open import SOAS.Coalgebraic.Monoid
open import SOAS.Coalgebraic.Strength
open import SOAS.Metatheory Syn
private
variable
α β : T
Γ Δ : Ctx
module _ (𝔛 : Familyₛ) where
open Theory 𝔛
-- 𝕋 is a Σ-monoid
Σ𝕋ᵐ : ΣMon 𝕋
Σ𝕋ᵐ = record
{ ᵐ = 𝕋ᵐ
; 𝑎𝑙𝑔 = 𝕒𝕝𝕘
; μ⟨𝑎𝑙𝑔⟩ = λ{ {σ = σ} t → begin
𝕤𝕦𝕓 (𝕒𝕝𝕘 t) σ
≡⟨ Substitution.𝕥⟨𝕒⟩ ⟩
𝕒𝕝𝕘 (str 𝕋ᴮ 𝕋 (⅀₁ 𝕤𝕦𝕓 t) σ)
≡⟨ cong 𝕒𝕝𝕘 (CoalgMon.str-eq 𝕋ᴹ 𝕋 ⅀:Str (⅀₁ 𝕤𝕦𝕓 t) σ) ⟩
𝕒𝕝𝕘 (str (Mon.ᴮ 𝕋ᵐ) 𝕋 (⅀₁ 𝕤𝕦𝕓 t) σ)
∎ }
} where open ≡-Reasoning
-- Given a ⅀-monoid ℳ and interpretation ω : 𝔛 ⇾̣ ℳ,
-- there is a unique homomorphic extension 𝕋 𝔛 ⇾̣ ℳ
module FΣM {ℳ : Familyₛ}(Σℳᵐ : ΣMon ℳ) (ω : 𝔛 ⇾̣ ℳ) where
open ΣMon Σℳᵐ renaming (𝑎𝑙𝑔 to ℳ𝑎𝑙𝑔 ; ᴮ to ℳᴮ ; ᵐ to ℳᵐ) public
private module ℳ = ΣMon Σℳᵐ
-- Metavariable operator of ℳ using ω and monoid multiplication, making
-- ℳ into a meta-algebra
χ : 𝔛 ⇾̣ 〖 ℳ , ℳ 〗
χ 𝔪 ε = μ (ω 𝔪) ε
ℳᵃ : MetaAlg ℳ
ℳᵃ = record { 𝑎𝑙𝑔 = ℳ.𝑎𝑙𝑔 ; 𝑣𝑎𝑟 = η ; 𝑚𝑣𝑎𝑟 = χ }
open Semantics ℳᵃ public renaming (𝕤𝕖𝕞 to 𝕖𝕩𝕥)
open MetaAlg ℳᵃ
open Coalgebraic μᶜ
-- Extension is pointed coalgebra hommorphism
𝕖𝕩𝕥ᵇ⇒ : Coalg⇒ 𝕋ᵇ ℳ.ᵇ 𝕖𝕩𝕥
𝕖𝕩𝕥ᵇ⇒ = 𝕤𝕖𝕞ᵇ⇒ ℳ.ᵇ ℳᵃ record
{ ⟨𝑎𝑙𝑔⟩ = λ{ {t = t} → dext (λ ρ → begin
μ (𝑎𝑙𝑔 t) (η ∘ ρ)
≡⟨ μ⟨𝑎𝑙𝑔⟩ t ⟩
𝑎𝑙𝑔 (str ℳ.ᴮ ℳ (⅀₁ μ t) (η ∘ ρ))
≡⟨ cong 𝑎𝑙𝑔 (str-nat₁ (ηᴮ⇒ ℳᴮ) (⅀₁ ℳ.μ t) ρ) ⟩
𝑎𝑙𝑔 (str ℐᴮ ℳ (⅀.F₁ (λ { h ς → h (λ v → η (ς v)) }) (⅀₁ ℳ.μ t)) ρ)
≡˘⟨ congr ⅀.homomorphism (λ - → 𝑎𝑙𝑔 (str ℐᴮ ℳ - ρ)) ⟩
𝑎𝑙𝑔 (str ℐᴮ ℳ (⅀.F₁ (λ{ t ρ → μ t (η ∘ ρ)}) t) ρ)
∎) }
; ⟨𝑣𝑎𝑟⟩ = dext′ ℳ.lunit
; ⟨𝑚𝑣𝑎𝑟⟩ = dext′ ℳ.assoc
} where open ≡-Reasoning
𝕖𝕩𝕥ᴮ⇒ : Coalgₚ⇒ 𝕋ᴮ ℳ.ᴮ 𝕖𝕩𝕥
𝕖𝕩𝕥ᴮ⇒ = record { ᵇ⇒ = 𝕖𝕩𝕥ᵇ⇒ ; ⟨η⟩ = ⟨𝕧⟩ }
-- Extension is monoid homomorphims
μ∘𝕖𝕩𝕥 : MapEq₁ 𝕋ᴮ ℳ.𝑎𝑙𝑔 (λ t σ → 𝕖𝕩𝕥 (𝕤𝕦𝕓 t σ))
(λ t σ → μ (𝕖𝕩𝕥 t) (𝕖𝕩𝕥 ∘ σ))
μ∘𝕖𝕩𝕥 = record
{ φ = 𝕖𝕩𝕥
; χ = χ
; f⟨𝑣⟩ = cong 𝕖𝕩𝕥 Substitution.𝕥⟨𝕧⟩
; f⟨𝑚⟩ = trans (cong 𝕖𝕩𝕥 Substitution.𝕥⟨𝕞⟩) ⟨𝕞⟩
; f⟨𝑎⟩ = λ{ {σ = σ}{t} → begin
𝕖𝕩𝕥 (𝕤𝕦𝕓 (𝕒𝕝𝕘 t) σ)
≡⟨ cong 𝕖𝕩𝕥 Substitution.𝕥⟨𝕒⟩ ⟩
𝕖𝕩𝕥 (𝕒𝕝𝕘 (str 𝕋ᴮ 𝕋 (⅀₁ 𝕤𝕦𝕓 t) σ))
≡⟨ ⟨𝕒⟩ ⟩
𝑎𝑙𝑔 (⅀₁ 𝕖𝕩𝕥 (str 𝕋ᴮ 𝕋 (⅀₁ 𝕤𝕦𝕓 t) σ))
≡˘⟨ cong 𝑎𝑙𝑔 (str-nat₂ 𝕖𝕩𝕥 (⅀₁ 𝕤𝕦𝕓 t) σ) ⟩
𝑎𝑙𝑔 (str 𝕋ᴮ ℳ (⅀.F₁ (λ { h ς → 𝕖𝕩𝕥 (h ς) }) (⅀₁ 𝕤𝕦𝕓 t)) σ)
≡˘⟨ congr ⅀.homomorphism (λ - → 𝑎𝑙𝑔 (str 𝕋ᴮ ℳ - σ)) ⟩
𝑎𝑙𝑔 (str 𝕋ᴮ ℳ (⅀₁ (λ{ t σ → 𝕖𝕩𝕥 (𝕤𝕦𝕓 t σ)}) t) σ)
∎ }
; g⟨𝑣⟩ = trans (μ≈₁ ⟨𝕧⟩) (Mon.lunit ℳ.ᵐ)
; g⟨𝑚⟩ = trans (μ≈₁ ⟨𝕞⟩) (Mon.assoc ℳ.ᵐ)
; g⟨𝑎⟩ = λ{ {σ = σ}{t} → begin
μ (𝕖𝕩𝕥 (𝕒𝕝𝕘 t)) (𝕖𝕩𝕥 ∘ σ)
≡⟨ μ≈₁ ⟨𝕒⟩ ⟩
μ (𝑎𝑙𝑔 (⅀₁ 𝕖𝕩𝕥 t)) (𝕖𝕩𝕥 ∘ σ)
≡⟨ μ⟨𝑎𝑙𝑔⟩ _ ⟩
𝑎𝑙𝑔 (str ℳᴮ ℳ (⅀₁ μ (⅀₁ 𝕖𝕩𝕥 t)) (𝕖𝕩𝕥 ∘ σ))
≡˘⟨ congr ⅀.homomorphism (λ - → 𝑎𝑙𝑔 (str ℳᴮ ℳ - (𝕖𝕩𝕥 ∘ σ))) ⟩
𝑎𝑙𝑔 (str ℳᴮ ℳ (⅀₁ (μ ∘ 𝕖𝕩𝕥) t) (𝕖𝕩𝕥 ∘ σ))
≡⟨ cong 𝑎𝑙𝑔 (str-nat₁ 𝕖𝕩𝕥ᴮ⇒ ((⅀₁ (μ ∘ 𝕖𝕩𝕥) t)) σ) ⟩
𝑎𝑙𝑔 (str 𝕋ᴮ ℳ (⅀.F₁ (λ { h′ ς → h′ (𝕖𝕩𝕥 ∘ ς) }) (⅀₁ (μ ∘ 𝕖𝕩𝕥) t)) σ)
≡˘⟨ congr ⅀.homomorphism (λ - → ℳ𝑎𝑙𝑔 (str 𝕋ᴮ ℳ - σ)) ⟩
𝑎𝑙𝑔 (str 𝕋ᴮ ℳ (⅀₁ (λ{ t σ → μ (𝕖𝕩𝕥 t) (𝕖𝕩𝕥 ∘ σ)}) t) σ)
∎ }
} where open ≡-Reasoning
𝕖𝕩𝕥ᵐ⇒ : ΣMon⇒ Σ𝕋ᵐ Σℳᵐ 𝕖𝕩𝕥
𝕖𝕩𝕥ᵐ⇒ = record { ᵐ⇒ = record
{ ⟨η⟩ = ⟨𝕧⟩
; ⟨μ⟩ = λ{ {t = t} → MapEq₁.≈ μ∘𝕖𝕩𝕥 t } }
; ⟨𝑎𝑙𝑔⟩ = ⟨𝕒⟩ }
module 𝕖𝕩𝕥ᵐ⇒ = ΣMon⇒ 𝕖𝕩𝕥ᵐ⇒
-- Interpretation map is equal to any homomorphism that factors through 𝔛 ⇾ ℳ
module _ {g : 𝕋 ⇾̣ ℳ}
(gᵐ⇒ : ΣMon⇒ Σ𝕋ᵐ Σℳᵐ g)
(p : ∀{α Π}{𝔪 : 𝔛 α Π} → g (𝕞𝕧𝕒𝕣 𝔪 𝕧𝕒𝕣) ≡ ω 𝔪) where
open ΣMon⇒ gᵐ⇒ renaming (⟨𝑎𝑙𝑔⟩ to g⟨𝑎𝑙𝑔⟩)
gᵃ⇒ : MetaAlg⇒ 𝕋ᵃ ℳᵃ g
gᵃ⇒ = record
{ ⟨𝑎𝑙𝑔⟩ = g⟨𝑎𝑙𝑔⟩
; ⟨𝑣𝑎𝑟⟩ = ⟨η⟩
; ⟨𝑚𝑣𝑎𝑟⟩ = λ{ {𝔪 = 𝔪}{ε} → begin
g (𝕞𝕧𝕒𝕣 𝔪 ε) ≡˘⟨ cong g (cong (𝕞𝕧𝕒𝕣 𝔪) (dext′ Substitution.𝕥⟨𝕧⟩)) ⟩
g (𝕞𝕧𝕒𝕣 𝔪 (λ v → 𝕤𝕦𝕓 (𝕧𝕒𝕣 v) ε)) ≡˘⟨ cong g Substitution.𝕥⟨𝕞⟩ ⟩
g (𝕤𝕦𝕓 (𝕞𝕧𝕒𝕣 𝔪 𝕧𝕒𝕣) ε) ≡⟨ ⟨μ⟩ ⟩
μ (g (𝕞𝕧𝕒𝕣 𝔪 𝕧𝕒𝕣)) (g ∘ ε) ≡⟨ μ≈₁ p ⟩
μ (ω 𝔪) (λ x → g (ε x)) ∎ }
} where open ≡-Reasoning
𝕖𝕩𝕥ᵐ! : {α : T}{Γ : Ctx}(t : 𝕋 α Γ) → 𝕖𝕩𝕥 t ≡ g t
𝕖𝕩𝕥ᵐ! = 𝕤𝕖𝕞! gᵃ⇒
-- Free Σ-monoid functor
Famₛ→ΣMon : Familyₛ → ΣMonoid
Famₛ→ΣMon 𝔛 = Theory.𝕋 𝔛 ⋉ (Σ𝕋ᵐ 𝔛)
open ΣMonoidStructure.Free
Free-ΣMon-Mapping : FreeΣMonoid.FreeMapping Famₛ→ΣMon
Free-ΣMon-Mapping = record
{ embed = λ {𝔛} 𝔪 → let open Theory 𝔛 in 𝕞𝕧𝕒𝕣 𝔪 𝕧𝕒𝕣
; univ = λ{ 𝔛 (ℳ ⋉ Σℳᵐ) ω → let open FΣM 𝔛 Σℳᵐ ω in record
{ extend = 𝕖𝕩𝕥 ⋉ 𝕖𝕩𝕥ᵐ⇒
; factor = trans ⟨𝕞⟩ (trans (μ≈₂ ⟨𝕧⟩) runit)
; unique = λ{ (g ⋉ gᵐ⇒) p {x = t} → sym (𝕖𝕩𝕥ᵐ! gᵐ⇒ p t) } }}
}
Free:𝔽amₛ⟶Σ𝕄on : Functor 𝔽amiliesₛ Σ𝕄onoids
Free:𝔽amₛ⟶Σ𝕄on = FreeΣMonoid.FreeMapping.Free Free-ΣMon-Mapping
-- Σ-monoid monad on families
ΣMon:Monad : Monad 𝔽amiliesₛ
ΣMon:Monad = FreeΣMonoid.FreeMapping.FreeMonad Free-ΣMon-Mapping
𝕋F : Functor 𝔽amiliesₛ 𝔽amiliesₛ
𝕋F = Monad.F ΣMon:Monad
open Theory
open Monad ΣMon:Monad
-- Functorial action of 𝕋
𝕋₁ : {𝔛 𝔜 : Familyₛ} → (𝔛 ⇾̣ 𝔜) → 𝕋 𝔛 ⇾̣ 𝕋 𝔜
𝕋₁ f t = Functor.₁ 𝕋F f t
-- Functorial action preserves variables
𝕋₁∘𝕧𝕒𝕣 : {𝔛 𝔜 : Familyₛ}(f : 𝔛 ⇾̣ 𝔜)(v : ℐ α Γ)
→ 𝕋₁ f (𝕧𝕒𝕣 𝔛 v) ≡ 𝕧𝕒𝕣 𝔜 v
𝕋₁∘𝕧𝕒𝕣 {𝔛 = 𝔛}{𝔜} f v = FΣM.⟨𝕧⟩ 𝔛 (Σ𝕋ᵐ 𝔜) (λ 𝔪 → 𝕞𝕧𝕒𝕣 𝔜 (f 𝔪) (𝕧𝕒𝕣 𝔜))
-- Functorial action preserves metavariables
𝕋₁∘𝕞𝕧𝕒𝕣 : {𝔛 𝔜 : Familyₛ}(f : 𝔛 ⇾̣ 𝔜)(𝔪 : 𝔛 α Γ)(ε : Γ ~[ 𝕋 𝔛 ]↝ Δ)
→ 𝕋₁ f (𝕞𝕧𝕒𝕣 𝔛 𝔪 ε) ≡ 𝕞𝕧𝕒𝕣 𝔜 (f 𝔪) (𝕋₁ f ∘ ε)
𝕋₁∘𝕞𝕧𝕒𝕣 {𝔛 = 𝔛}{𝔜} f 𝔪 ε = begin
𝕋₁ f (𝕞𝕧𝕒𝕣 𝔛 𝔪 ε)
≡⟨⟩
FΣM.𝕖𝕩𝕥 𝔛 (Σ𝕋ᵐ 𝔜) (λ 𝔪 → 𝕞𝕧𝕒𝕣 𝔜 (f 𝔪) (𝕧𝕒𝕣 𝔜)) (𝕞𝕧𝕒𝕣 𝔛 𝔪 ε)
≡⟨ FΣM.⟨𝕞⟩ 𝔛 (Σ𝕋ᵐ 𝔜) (λ 𝔪 → 𝕞𝕧𝕒𝕣 𝔜 (f 𝔪) (𝕧𝕒𝕣 𝔜)) ⟩
𝕤𝕦𝕓 𝔜 (𝕞𝕧𝕒𝕣 𝔜 (f 𝔪) (𝕧𝕒𝕣 𝔜)) (𝕋₁ f ∘ ε)
≡⟨ Substitution.𝕥⟨𝕞⟩ 𝔜 ⟩
𝕞𝕧𝕒𝕣 𝔜 (f 𝔪) (λ 𝔫 → 𝕤𝕦𝕓 𝔜 (𝕧𝕒𝕣 𝔜 𝔫) (𝕋₁ f ∘ ε))
≡⟨ cong (𝕞𝕧𝕒𝕣 𝔜 (f 𝔪)) (dext (λ 𝔫 → lunit 𝔜)) ⟩
𝕞𝕧𝕒𝕣 𝔜 (f 𝔪) (𝕋₁ f ∘ ε)
∎ where open ≡-Reasoning
-- Corollary fo the above two
𝕋₁∘𝕞𝕧𝕒𝕣[𝕧𝕒𝕣] : {𝔛 𝔜 : Familyₛ}(f : 𝔛 ⇾̣ 𝔜)(𝔪 : 𝔛 α Γ)(ρ : Γ ↝ Δ)
→ 𝕋₁ f (𝕞𝕧𝕒𝕣 𝔛 𝔪 (𝕧𝕒𝕣 𝔛 ∘ ρ)) ≡ 𝕞𝕧𝕒𝕣 𝔜 (f 𝔪) (𝕧𝕒𝕣 𝔜 ∘ ρ)
𝕋₁∘𝕞𝕧𝕒𝕣[𝕧𝕒𝕣] {𝔛 = 𝔛}{𝔜} f 𝔪 ρ = begin
𝕋₁ f (𝕞𝕧𝕒𝕣 𝔛 𝔪 (𝕧𝕒𝕣 𝔛 ∘ ρ))
≡⟨ 𝕋₁∘𝕞𝕧𝕒𝕣 f 𝔪 (𝕧𝕒𝕣 𝔛 ∘ ρ) ⟩
𝕞𝕧𝕒𝕣 𝔜 (f 𝔪) (𝕋₁ f ∘ 𝕧𝕒𝕣 𝔛 ∘ ρ)
≡⟨ cong (𝕞𝕧𝕒𝕣 𝔜 (f 𝔪)) (dext λ v → 𝕋₁∘𝕧𝕒𝕣 f (ρ v)) ⟩
𝕞𝕧𝕒𝕣 𝔜 (f 𝔪) (𝕧𝕒𝕣 𝔜 ∘ ρ)
∎ where open ≡-Reasoning
|
Formal statement is: lemma bounded_uniformly_continuous_image: fixes f :: "'a :: heine_borel \<Rightarrow> 'b :: heine_borel" assumes "uniformly_continuous_on S f" "bounded S" shows "bounded(f ` S)" Informal statement is: If $f$ is uniformly continuous on a bounded set $S$, then $f(S)$ is bounded. |
import numpy as np
import cv2
cv2.ocl.setUseOpenCL(False)
class LazyFrames(object):
"""
From OpenAI Baseline.
https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
This class provides a solution to optimize the use of memory when
concatenating different frames, e.g. Atari frames in DQN. The frames are
individually stored in a list and, when numpy arrays containing them are
created, the reference to each frame is used instead of a copy.
"""
def __init__(self, frames, history_length):
self._frames = frames
assert len(self._frames) == history_length
def __array__(self, dtype=None):
out = np.array(self._frames)
if dtype is not None:
out = out.astype(dtype)
return out
def copy(self):
return self
@property
def shape(self):
return (len(self._frames),) + self._frames[0].shape
def preprocess_frame(obs, img_size):
"""
Convert a frame from rgb to grayscale and resize it.
Args:
obs (np.ndarray): array representing an rgb frame;
img_size (tuple): target size for images.
Returns:
The transformed frame as 8 bit integer array.
"""
image = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
image = cv2.resize(image, img_size, interpolation=cv2.INTER_LINEAR)
return np.array(image, dtype=np.uint8)
|
import PencilFFTs
import Oceananigans.Solvers: poisson_eigenvalues, solve_poisson_equation!
struct DistributedFFTBasedPoissonSolver{P, F, L, λ, S}
plan :: P
full_grid :: F
my_grid :: L
eigenvalues :: λ
storage :: S
end
function DistributedFFTBasedPoissonSolver(arch, full_grid, local_grid)
topo = (TX, TY, TZ) = topology(full_grid)
λx = poisson_eigenvalues(full_grid.Nx, full_grid.Lx, 1, TX())
λy = poisson_eigenvalues(full_grid.Ny, full_grid.Ly, 2, TY())
λz = poisson_eigenvalues(full_grid.Nz, full_grid.Lz, 3, TZ())
I, J, K = arch.local_index
λx = λx[(J-1)*local_grid.Ny+1:J*local_grid.Ny, :, :]
eigenvalues = (; λx, λy, λz)
transform = PencilFFTs.Transforms.FFT!()
proc_dims = (arch.ranks[2], arch.ranks[3])
plan = PencilFFTs.PencilFFTPlan(size(full_grid), transform, proc_dims, MPI.COMM_WORLD)
storage = PencilFFTs.allocate_input(plan)
return DistributedFFTBasedPoissonSolver(plan, full_grid, local_grid, eigenvalues, storage)
end
function solve_poisson_equation!(solver::DistributedFFTBasedPoissonSolver)
λx, λy, λz = solver.eigenvalues
# Apply forward transforms.
solver.plan * solver.storage
# Solve the discrete Poisson equation.
RHS = ϕ = solver.storage[2]
@. ϕ = - RHS / (λx + λy + λz)
# Setting DC component of the solution (the mean) to be zero. This is also
# necessary because the source term to the Poisson equation has zero mean
# and so the DC component comes out to be ∞.
if MPI.Comm_rank(MPI.COMM_WORLD) == 0
ϕ[1, 1, 1] = 0
end
# Apply backward transforms.
solver.plan \ solver.storage
return nothing
end
|
[STATEMENT]
lemma "(\<forall>x. P x \<longrightarrow> (\<forall>x. Q x)) \<and>
((\<forall>x. Q x \<or> R x) \<longrightarrow> (\<exists>x. Q x \<and> S x)) \<and>
((\<exists>x. S x) \<longrightarrow> (\<forall>x. L x \<longrightarrow> M x))
\<longrightarrow> (\<forall>x. P x \<and> L x \<longrightarrow> M x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<forall>x. P x \<longrightarrow> (\<forall>x. Q x)) \<and> ((\<forall>x. Q x \<or> R x) \<longrightarrow> (\<exists>x. Q x \<and> S x)) \<and> ((\<exists>x. S x) \<longrightarrow> (\<forall>x. L x \<longrightarrow> M x)) \<longrightarrow> (\<forall>x. P x \<and> L x \<longrightarrow> M x)
[PROOF STEP]
by blast |
[STATEMENT]
lemma eq_\<I>_gpv_inline1:
includes lifting_syntax
assumes "S s1 s2" "eq_\<I>_gpv A \<I> gpv1 gpv2"
shows "rel_spmf (rel_sum (rel_prod A S)
(\<lambda>(q, rpv1, rpv2) (q', rpv1', rpv2'). q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q'' \<in> outs_\<I> \<I>.
(\<forall>r \<in> responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and>
(\<forall>r' \<in> responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r')))))
(inline1 callee1 gpv1 s1) (inline1 callee2 gpv2 s2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. rel_spmf (rel_sum (rel_prod A S) (\<lambda>(q, rpv1, rpv2) (q', rpv1', rpv2'). q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (inline1 callee1 gpv1 s1) (inline1 callee2 gpv2 s2)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
S s1 s2
eq_\<I>_gpv A \<I> gpv1 gpv2
goal (1 subgoal):
1. rel_spmf (rel_sum (rel_prod A S) (\<lambda>(q, rpv1, rpv2) (q', rpv1', rpv2'). q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (inline1 callee1 gpv1 s1) (inline1 callee2 gpv2 s2)
[PROOF STEP]
proof(induction arbitrary: gpv1 gpv2 s1 s2 rule: parallel_fixp_induct_2_2[OF partial_function_definitions_spmf partial_function_definitions_spmf inline1.mono inline1.mono inline1_def inline1_def, unfolded lub_spmf_empty, case_names adm bottom step])
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. ccpo.admissible (prod_lub spmf.lub_fun spmf.lub_fun) (rel_prod spmf.le_fun spmf.le_fun) (\<lambda>x. \<forall>xa xb xc xd. S xc xd \<longrightarrow> eq_\<I>_gpv A \<I> xa xb \<longrightarrow> rel_spmf (rel_sum (rel_prod A S) (\<lambda>(q, rpv1, rpv2) (q', rpv1', rpv2'). q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (curry (fst x) xa xc) (curry (snd x) xb xd))
2. \<And>gpv1 gpv2 s1 s2. \<lbrakk>S s1 s2; eq_\<I>_gpv A \<I> gpv1 gpv2\<rbrakk> \<Longrightarrow> rel_spmf (rel_sum (rel_prod A S) (\<lambda>(q, rpv1, rpv2) (q', rpv1', rpv2'). q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (return_pmf None) (return_pmf None)
3. \<And>f g gpv1 gpv2 s1 s2. \<lbrakk>\<And>gpv1 gpv2 s1 s2. \<lbrakk>S s1 s2; eq_\<I>_gpv A \<I> gpv1 gpv2\<rbrakk> \<Longrightarrow> rel_spmf (rel_sum (rel_prod A S) (\<lambda>(q, rpv1, rpv2) (q', rpv1', rpv2'). q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (f gpv1 s1) (g gpv2 s2); S s1 s2; eq_\<I>_gpv A \<I> gpv1 gpv2\<rbrakk> \<Longrightarrow> rel_spmf (rel_sum (rel_prod A S) (\<lambda>(q, rpv1, rpv2) (q', rpv1', rpv2'). q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (the_gpv gpv1 \<bind> case_generat (\<lambda>x. return_spmf (Inl (x, s1))) (\<lambda>out rpv. the_gpv (callee1 s1 out) \<bind> case_generat (\<lambda>(x, y). f (rpv x) y) (\<lambda>out rpv'. return_spmf (Inr (out, rpv', rpv))))) (the_gpv gpv2 \<bind> case_generat (\<lambda>x. return_spmf (Inl (x, s2))) (\<lambda>out rpv. the_gpv (callee2 s2 out) \<bind> case_generat (\<lambda>(x, y). g (rpv x) y) (\<lambda>out rpv'. return_spmf (Inr (out, rpv', rpv)))))
[PROOF STEP]
case adm
[PROOF STATE]
proof (state)
this:
goal (3 subgoals):
1. ccpo.admissible (prod_lub spmf.lub_fun spmf.lub_fun) (rel_prod spmf.le_fun spmf.le_fun) (\<lambda>x. \<forall>xa xb xc xd. S xc xd \<longrightarrow> eq_\<I>_gpv A \<I> xa xb \<longrightarrow> rel_spmf (rel_sum (rel_prod A S) (\<lambda>(q, rpv1, rpv2) (q', rpv1', rpv2'). q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (curry (fst x) xa xc) (curry (snd x) xb xd))
2. \<And>gpv1 gpv2 s1 s2. \<lbrakk>S s1 s2; eq_\<I>_gpv A \<I> gpv1 gpv2\<rbrakk> \<Longrightarrow> rel_spmf (rel_sum (rel_prod A S) (\<lambda>(q, rpv1, rpv2) (q', rpv1', rpv2'). q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (return_pmf None) (return_pmf None)
3. \<And>f g gpv1 gpv2 s1 s2. \<lbrakk>\<And>gpv1 gpv2 s1 s2. \<lbrakk>S s1 s2; eq_\<I>_gpv A \<I> gpv1 gpv2\<rbrakk> \<Longrightarrow> rel_spmf (rel_sum (rel_prod A S) (\<lambda>(q, rpv1, rpv2) (q', rpv1', rpv2'). q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (f gpv1 s1) (g gpv2 s2); S s1 s2; eq_\<I>_gpv A \<I> gpv1 gpv2\<rbrakk> \<Longrightarrow> rel_spmf (rel_sum (rel_prod A S) (\<lambda>(q, rpv1, rpv2) (q', rpv1', rpv2'). q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (the_gpv gpv1 \<bind> case_generat (\<lambda>x. return_spmf (Inl (x, s1))) (\<lambda>out rpv. the_gpv (callee1 s1 out) \<bind> case_generat (\<lambda>(x, y). f (rpv x) y) (\<lambda>out rpv'. return_spmf (Inr (out, rpv', rpv))))) (the_gpv gpv2 \<bind> case_generat (\<lambda>x. return_spmf (Inl (x, s2))) (\<lambda>out rpv. the_gpv (callee2 s2 out) \<bind> case_generat (\<lambda>(x, y). g (rpv x) y) (\<lambda>out rpv'. return_spmf (Inr (out, rpv', rpv)))))
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ccpo.admissible (prod_lub spmf.lub_fun spmf.lub_fun) (rel_prod spmf.le_fun spmf.le_fun) (\<lambda>x. \<forall>xa xb xc xd. S xc xd \<longrightarrow> eq_\<I>_gpv A \<I> xa xb \<longrightarrow> rel_spmf (rel_sum (rel_prod A S) (\<lambda>a. case a of (q, a) \<Rightarrow> case a of (rpv1, rpv2) \<Rightarrow> \<lambda>a. case a of (q', rpv1', rpv2') \<Rightarrow> q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (curry (fst x) xa xc) (curry (snd x) xb xd))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
ccpo.admissible (prod_lub spmf.lub_fun spmf.lub_fun) (rel_prod spmf.le_fun spmf.le_fun) (\<lambda>x. \<forall>xa xb xc xd. S xc xd \<longrightarrow> eq_\<I>_gpv A \<I> xa xb \<longrightarrow> rel_spmf (rel_sum (rel_prod A S) (\<lambda>a. case a of (q, a) \<Rightarrow> case a of (rpv1, rpv2) \<Rightarrow> \<lambda>a. case a of (q', rpv1', rpv2') \<Rightarrow> q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (curry (fst x) xa xc) (curry (snd x) xb xd))
goal (2 subgoals):
1. \<And>gpv1 gpv2 s1 s2. \<lbrakk>S s1 s2; eq_\<I>_gpv A \<I> gpv1 gpv2\<rbrakk> \<Longrightarrow> rel_spmf (rel_sum (rel_prod A S) (\<lambda>(q, rpv1, rpv2) (q', rpv1', rpv2'). q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (return_pmf None) (return_pmf None)
2. \<And>f g gpv1 gpv2 s1 s2. \<lbrakk>\<And>gpv1 gpv2 s1 s2. \<lbrakk>S s1 s2; eq_\<I>_gpv A \<I> gpv1 gpv2\<rbrakk> \<Longrightarrow> rel_spmf (rel_sum (rel_prod A S) (\<lambda>(q, rpv1, rpv2) (q', rpv1', rpv2'). q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (f gpv1 s1) (g gpv2 s2); S s1 s2; eq_\<I>_gpv A \<I> gpv1 gpv2\<rbrakk> \<Longrightarrow> rel_spmf (rel_sum (rel_prod A S) (\<lambda>(q, rpv1, rpv2) (q', rpv1', rpv2'). q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (the_gpv gpv1 \<bind> case_generat (\<lambda>x. return_spmf (Inl (x, s1))) (\<lambda>out rpv. the_gpv (callee1 s1 out) \<bind> case_generat (\<lambda>(x, y). f (rpv x) y) (\<lambda>out rpv'. return_spmf (Inr (out, rpv', rpv))))) (the_gpv gpv2 \<bind> case_generat (\<lambda>x. return_spmf (Inl (x, s2))) (\<lambda>out rpv. the_gpv (callee2 s2 out) \<bind> case_generat (\<lambda>(x, y). g (rpv x) y) (\<lambda>out rpv'. return_spmf (Inr (out, rpv', rpv)))))
[PROOF STEP]
case bottom
[PROOF STATE]
proof (state)
this:
S s1 s2
eq_\<I>_gpv A \<I> gpv1 gpv2
goal (2 subgoals):
1. \<And>gpv1 gpv2 s1 s2. \<lbrakk>S s1 s2; eq_\<I>_gpv A \<I> gpv1 gpv2\<rbrakk> \<Longrightarrow> rel_spmf (rel_sum (rel_prod A S) (\<lambda>(q, rpv1, rpv2) (q', rpv1', rpv2'). q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (return_pmf None) (return_pmf None)
2. \<And>f g gpv1 gpv2 s1 s2. \<lbrakk>\<And>gpv1 gpv2 s1 s2. \<lbrakk>S s1 s2; eq_\<I>_gpv A \<I> gpv1 gpv2\<rbrakk> \<Longrightarrow> rel_spmf (rel_sum (rel_prod A S) (\<lambda>(q, rpv1, rpv2) (q', rpv1', rpv2'). q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (f gpv1 s1) (g gpv2 s2); S s1 s2; eq_\<I>_gpv A \<I> gpv1 gpv2\<rbrakk> \<Longrightarrow> rel_spmf (rel_sum (rel_prod A S) (\<lambda>(q, rpv1, rpv2) (q', rpv1', rpv2'). q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (the_gpv gpv1 \<bind> case_generat (\<lambda>x. return_spmf (Inl (x, s1))) (\<lambda>out rpv. the_gpv (callee1 s1 out) \<bind> case_generat (\<lambda>(x, y). f (rpv x) y) (\<lambda>out rpv'. return_spmf (Inr (out, rpv', rpv))))) (the_gpv gpv2 \<bind> case_generat (\<lambda>x. return_spmf (Inl (x, s2))) (\<lambda>out rpv. the_gpv (callee2 s2 out) \<bind> case_generat (\<lambda>(x, y). g (rpv x) y) (\<lambda>out rpv'. return_spmf (Inr (out, rpv', rpv)))))
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. rel_spmf (rel_sum (rel_prod A S) (\<lambda>a. case a of (q, a) \<Rightarrow> case a of (rpv1, rpv2) \<Rightarrow> \<lambda>a. case a of (q', rpv1', rpv2') \<Rightarrow> q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (return_pmf None) (return_pmf None)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
rel_spmf (rel_sum (rel_prod A S) (\<lambda>a. case a of (q, a) \<Rightarrow> case a of (rpv1, rpv2) \<Rightarrow> \<lambda>a. case a of (q', rpv1', rpv2') \<Rightarrow> q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (return_pmf None) (return_pmf None)
goal (1 subgoal):
1. \<And>f g gpv1 gpv2 s1 s2. \<lbrakk>\<And>gpv1 gpv2 s1 s2. \<lbrakk>S s1 s2; eq_\<I>_gpv A \<I> gpv1 gpv2\<rbrakk> \<Longrightarrow> rel_spmf (rel_sum (rel_prod A S) (\<lambda>(q, rpv1, rpv2) (q', rpv1', rpv2'). q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (f gpv1 s1) (g gpv2 s2); S s1 s2; eq_\<I>_gpv A \<I> gpv1 gpv2\<rbrakk> \<Longrightarrow> rel_spmf (rel_sum (rel_prod A S) (\<lambda>(q, rpv1, rpv2) (q', rpv1', rpv2'). q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (the_gpv gpv1 \<bind> case_generat (\<lambda>x. return_spmf (Inl (x, s1))) (\<lambda>out rpv. the_gpv (callee1 s1 out) \<bind> case_generat (\<lambda>(x, y). f (rpv x) y) (\<lambda>out rpv'. return_spmf (Inr (out, rpv', rpv))))) (the_gpv gpv2 \<bind> case_generat (\<lambda>x. return_spmf (Inl (x, s2))) (\<lambda>out rpv. the_gpv (callee2 s2 out) \<bind> case_generat (\<lambda>(x, y). g (rpv x) y) (\<lambda>out rpv'. return_spmf (Inr (out, rpv', rpv)))))
[PROOF STEP]
case (step inline1' inline1'')
[PROOF STATE]
proof (state)
this:
\<lbrakk>S ?s1.0 ?s2.0; eq_\<I>_gpv A \<I> ?gpv1.0 ?gpv2.0\<rbrakk> \<Longrightarrow> rel_spmf (rel_sum (rel_prod A S) (\<lambda>a. case a of (q, a) \<Rightarrow> case a of (rpv1, rpv2) \<Rightarrow> \<lambda>a. case a of (q', rpv1', rpv2') \<Rightarrow> q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (inline1' ?gpv1.0 ?s1.0) (inline1'' ?gpv2.0 ?s2.0)
S s1 s2
eq_\<I>_gpv A \<I> gpv1 gpv2
goal (1 subgoal):
1. \<And>f g gpv1 gpv2 s1 s2. \<lbrakk>\<And>gpv1 gpv2 s1 s2. \<lbrakk>S s1 s2; eq_\<I>_gpv A \<I> gpv1 gpv2\<rbrakk> \<Longrightarrow> rel_spmf (rel_sum (rel_prod A S) (\<lambda>(q, rpv1, rpv2) (q', rpv1', rpv2'). q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (f gpv1 s1) (g gpv2 s2); S s1 s2; eq_\<I>_gpv A \<I> gpv1 gpv2\<rbrakk> \<Longrightarrow> rel_spmf (rel_sum (rel_prod A S) (\<lambda>(q, rpv1, rpv2) (q', rpv1', rpv2'). q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (the_gpv gpv1 \<bind> case_generat (\<lambda>x. return_spmf (Inl (x, s1))) (\<lambda>out rpv. the_gpv (callee1 s1 out) \<bind> case_generat (\<lambda>(x, y). f (rpv x) y) (\<lambda>out rpv'. return_spmf (Inr (out, rpv', rpv))))) (the_gpv gpv2 \<bind> case_generat (\<lambda>x. return_spmf (Inl (x, s2))) (\<lambda>out rpv. the_gpv (callee2 s2 out) \<bind> case_generat (\<lambda>(x, y). g (rpv x) y) (\<lambda>out rpv'. return_spmf (Inr (out, rpv', rpv)))))
[PROOF STEP]
from step.prems
[PROOF STATE]
proof (chain)
picking this:
S s1 s2
eq_\<I>_gpv A \<I> gpv1 gpv2
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
S s1 s2
eq_\<I>_gpv A \<I> gpv1 gpv2
goal (1 subgoal):
1. rel_spmf (rel_sum (rel_prod A S) (\<lambda>a. case a of (q, a) \<Rightarrow> case a of (rpv1, rpv2) \<Rightarrow> \<lambda>a. case a of (q', rpv1', rpv2') \<Rightarrow> q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (the_gpv gpv1 \<bind> (\<lambda>a. case a of Pure x \<Rightarrow> return_spmf (Inl (x, s1)) | IO out rpv \<Rightarrow> the_gpv (callee1 s1 out) \<bind> (\<lambda>a. case a of Pure (x, y) \<Rightarrow> inline1' (rpv x) y | IO out rpv' \<Rightarrow> return_spmf (Inr (out, rpv', rpv))))) (the_gpv gpv2 \<bind> (\<lambda>a. case a of Pure x \<Rightarrow> return_spmf (Inl (x, s2)) | IO out rpv \<Rightarrow> the_gpv (callee2 s2 out) \<bind> (\<lambda>a. case a of Pure (x, y) \<Rightarrow> inline1'' (rpv x) y | IO out rpv' \<Rightarrow> return_spmf (Inr (out, rpv', rpv)))))
[PROOF STEP]
by - (erule eq_\<I>_gpvD[THEN rel_spmf_bindI]
, clarsimp split!: generat.split
, erule eq_\<I>_gpvD[OF callee(1), THEN rel_spmf_bindI]
, auto simp add: eq_onp_def intro: step.IH[THEN rel_spmf_mono] elim: eq_\<I>_gpvD[OF callee(1), THEN rel_spmf_bindI] split!: generat.split)
[PROOF STATE]
proof (state)
this:
rel_spmf (rel_sum (rel_prod A S) (\<lambda>a. case a of (q, a) \<Rightarrow> case a of (rpv1, rpv2) \<Rightarrow> \<lambda>a. case a of (q', rpv1', rpv2') \<Rightarrow> q = q' \<and> q' \<in> outs_\<I> \<I>' \<and> (\<exists>q''\<in>outs_\<I> \<I>. (\<forall>r\<in>responses_\<I> \<I>' q'. eq_\<I>_gpv (rel_prod (eq_onp (\<lambda>r'. r' \<in> responses_\<I> \<I> q'')) S) \<I>' (rpv1 r) (rpv1' r)) \<and> (\<forall>r'\<in>responses_\<I> \<I> q''. eq_\<I>_gpv A \<I> (rpv2 r') (rpv2' r'))))) (the_gpv gpv1 \<bind> (\<lambda>a. case a of Pure x \<Rightarrow> return_spmf (Inl (x, s1)) | IO out rpv \<Rightarrow> the_gpv (callee1 s1 out) \<bind> (\<lambda>a. case a of Pure (x, y) \<Rightarrow> inline1' (rpv x) y | IO out rpv' \<Rightarrow> return_spmf (Inr (out, rpv', rpv))))) (the_gpv gpv2 \<bind> (\<lambda>a. case a of Pure x \<Rightarrow> return_spmf (Inl (x, s2)) | IO out rpv \<Rightarrow> the_gpv (callee2 s2 out) \<bind> (\<lambda>a. case a of Pure (x, y) \<Rightarrow> inline1'' (rpv x) y | IO out rpv' \<Rightarrow> return_spmf (Inr (out, rpv', rpv)))))
goal:
No subgoals!
[PROOF STEP]
qed |
```python
import sympy
sympy.init_printing()
## constants
stages = 3
```
```python
s = sympy.Symbol('s')
x = sympy.Symbol('x')
cf1, rf1, cc1, rc1 = sympy.symbols(r"C_f1, R_f1, C_c1, R_c1")
cf2, rf2, cc2, rc2 = sympy.symbols(r"C_f2, R_f2, C_c2, R_c2")
cf3, rf3, cc3, rc3 = sympy.symbols(r"C_f3, R_f3, C_c3, R_c3")
CauerVector = sympy.Matrix([[cc1, rc1],[cc2, rc2],[cc3, rc3]])
## enter FosterVector value manually.
FosterVector = sympy.ones(stages,2);FosterVector[1,1]=2;FosterVector[2,1]=3
ZfVector = sympy.zeros(stages,1)
Zfall = 0
```
```python
CauerVector
```
```python
FosterVector
```
```python
for i in range(3):
ZfVector[i] = 1/(s*FosterVector[i,0] + 1/FosterVector[i,1])
Zfall += ZfVector[i]
ZfVector, Zfall
```
```python
Zfall=Zfall.cancel()
```
```python
(pf, qf) = sympy.fraction(Zfall)
pf = sympy.Poly(pf,s)
qf = sympy.Poly(qf,s)
```
```python
cc3 = qf.nth(3)/pf.nth(2)
```
```python
Yfall = (1/Zfall - cc3*s).cancel()
(qf, pf) = sympy.fraction(Yfall)
qf = sympy.Poly(qf, s)
pf = sympy.Poly(pf, s)
```
```python
rc3 = pf.nth(2)/qf.nth(2)
```
```python
Zfall = (1/Yfall - rc3).cancel()
(pf, qf) = sympy.fraction(Zfall)
qf = sympy.Poly(qf, s)
pf = sympy.Poly(pf, s)
```
```python
cc2 = qf.nth(2)/pf.nth(1)
```
```python
Yfall = (1/Zfall - cc2 * s).cancel()
(qf, pf) = sympy.fraction(Yfall)
qf = sympy.Poly(qf, s)
pf = sympy.Poly(pf, s)
```
```python
rc2 = pf.nth(1)/qf.nth(1)
```
```python
Zfall = (1/Yfall - rc2).cancel()
(pf, qf) = sympy.fraction(Zfall)
qf = sympy.Poly(qf, s)
pf = sympy.Poly(pf, s)
```
```python
cc1 = qf.nth(1)/pf.nth(0)
```
```python
Yfall = (1/Zfall - cc1 * s).cancel()
(qf, pf) = sympy.fraction(Yfall)
qf = sympy.Poly(qf, s)
pf = sympy.Poly(pf, s)
```
```python
rc1 = pf.nth(0)/qf.nth(0)
```
```python
rc3+rc2+rc1
```
```python
```
|
REAL*4 FUNCTION LPOWER
. (NTR,XBAR,SX,SLOPES,ALPHA,LAMBDA,SIGMA,N,D_LAMBDA,M,D_GAMMA)
USE MSIMSL
IMPLICIT NONE
C
C-Description-----------------------------------------------------------
C
C Function:
C Main routine of program to compute power calculations
C involving Linear regression.
C
C Input:
C NTR (Integer)
C Number of treatments (1 or 2)
C XBAR(0:1) (Real)
C SX(0:1) (Real)
C SLOPES (Integer) 1=comparing slopes
C 2=comparing intercepts
C
C Output:
C POWER
C
C Notes:
C . This routine was written by Dale Plummer
C
C-Declarations----------------------------------------------------------
C
C Arguments
C
INTEGER NTR,SLOPES
REAL XBAR(0:1),SX(0:1)
REAL ALPHA,LAMBDA,SIGMA,N,D_LAMBDA,M,D_GAMMA
C
C Functions
C
REAL TCUM,TCRVALUE
C
C Locals
C
REAL DRN,TCRV,NU
REAL DELTA,SIGR2
C
C-Code------------------------------------------------------------------
C
C Prompt for input.
C
IF (NTR.EQ.1) THEN
C
C Number of treatments is 1.
C
1 CONTINUE
C . ' Enter ALPHA, LAMBDA, SIGMA, and N: '
DELTA=SX(0)*LAMBDA/SIGMA
NU=N-2.
IF (NU.LT.1.) THEN
LPOWER=-999
RETURN
END IF
C TCRV=TCRVALUE(ALPHA/2.,NU)
TCRV=TIN((1.-ALPHA/2.),NU)
DRN=DELTA*SQRT(N)
LPOWER=TCUM(DRN-TCRV,NU)+TCUM(-DRN-TCRV,NU)
ELSE IF (NTR.EQ.2) THEN
C
C Number of treatments is 2
C
IF (SLOPES.EQ.1) THEN
C
C Comparing slopes...
C
C . ' Enter ALPHA, D_LAMBDA, SIGMA, N, and M: '
SIGR2=SIGMA**2*(1./(M*SX(0)**2)+1./SX(1)**2)
DELTA=D_LAMBDA/SQRT(SIGR2)
NU=N*(1.+M)-4.
IF (NU.LT.1.) THEN
LPOWER=-999
RETURN
END IF
C TCRV=TCRVALUE(ALPHA/2.,NU)
TCRV=TIN((1.-ALPHA/2.),NU)
DRN=DELTA*SQRT(N)
LPOWER=TCUM(DRN-TCRV,NU)+TCUM(-DRN-TCRV,NU)
ELSE IF (SLOPES.EQ.2) THEN
C
C Comparing intercepts...
C
C . ' Enter ALPHA, D_GAMMA, SIGMA, N, and M: '
SIGR2=(SIGMA**2/M)*
. (1.+XBAR(0)**2/SX(0)**2 + M*(1.+XBAR(1)**2/SX(1)**2))
DELTA=D_GAMMA/SQRT(SIGR2)
NU=N*(1.+M)-4.
IF (NU.LT.1.) THEN
LPOWER=-999
RETURN
END IF
C TCRV=TCRVALUE(ALPHA/2.,NU)
TCRV=TIN((1.-ALPHA/2.),NU)
DRN=DELTA*SQRT(N)
LPOWER=TCUM(DRN-TCRV,NU)+TCUM(-DRN-TCRV,NU)
END IF
END IF
C
RETURN
END
|
r=0.73
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7hp45/media/images/d7hp45-019/svc:tesseract/full/full/0.73/default.jpg Accept:application/hocr+xml
|
FUNCTION SPEVAL(N, U, X, Y, B, C, D) ABSH1004
C***********************************************************************ABSH1005
C*****SPEVAL EVALUATES THE DERIVATIVE OF THE CUBIC SPLINE FUNCTI0N. *ABSH1006
C*****REFERENCES: *ABSH1007
C*****G.E.FORSYTHE,M.A.MALCOLM,C.B.MOLER, COMPUTER METHODS FOR MATHE- *ABSH1008
C***** MATICAL COMPUTATIONS, PRENTICE-HALL, 1977, P.76. *ABSH1009
C*****LAST REVISION: 6/81 R.M.WIELAND AND W.A.HOULBERG ORNL. *ABSH1010
C*****INPUT PARAMETERS: *ABSH1011
C*****N-NUMBER OF DATA POINTS. *ABSH1012
C*****U-ABSCISSA AT WHICH THE SPLINE IS TO BE EVALUATED. *ABSH1013
C*****X-ARRAY CONTAINING THE DATA ABCISSAS. *ABSH1014
C*****Y-ARRAY CONTAINING THE DATA ORDINATES. *ABSH1015
C*****B,C,D-ARRAYS OF SPLINE COEFFICIENTS COMPUTED BY SPLINE. *ABSH1016
C*****OTHER COMMENTS: *ABSH1017
C*****SPEVAL=B(I)+2*C(I)*(U-X(I))+3*D(I)*(U-X(I))**2 *ABSH1018
C*****WHERE X(I).LT.U.LT.X(I+1), USING HORNER'S RULE. *ABSH1019
C*****IF U.LT.X(1) THEN I=1 IS USED. *ABSH1020
C*****IF U.GE.X(N) THEN I=N IS USED. *ABSH1021
C*****IF U IS NOT IN THE SAME INTERVAL AS THE PREVIOUS CALL, THEN A *ABSH1022
C*****BINARY SEARCH IS PERFORMED TO DETERMINE THE PROPER INTERVAL. *ABSH1023
C***********************************************************************ABSH1024
DIMENSION X(N), Y(N), B(N), C(N), D(N) ABSH1025
DATA I /1/ ABSH1026
IF (I.GE.N) I = 1 ABSH1027
IF (U.LT.X(I)) GO TO 10 ABSH1028
IF (U.LE.X(I+1)) GO TO 30 ABSH1029
C*****BINARY SEARCH. ABSH1030
10 I = 1 ABSH1031
J = N + 1 ABSH1032
20 K = (I+J)/2 ABSH1033
IF (U.LT.X(K)) J = K ABSH1034
IF (U.GE.X(K)) I = K ABSH1035
IF (J.GT.I+1) GO TO 20 ABSH1036
C*****EVALUATE SPLINE. ABSH1037
30 DX = U - X(I) ABSH1038
SPEVAL = B(I) + DX*(2.0*C(I)+3.0*DX*D(I)) ABSH1039
RETURN ABSH1040
END ABSH1041
|
"""
getraster(T::Type, layers::Union{Tuple,Int,Symbol}; kw...)
Download raster layers `layers` from the data source `T`,
returning a `String` for a single layer, or a `NamedTuple`
for a `Tuple` of layers. `layer` values are usually values of
`Symbol`, but can also be `Int` for `BioClim` datasets.
Keyword arguments depend on the specific data source.
The may modify the return value, following a pattern:
- `month` keywords of `AbstractArray will return a `Vector{String}`
or `Vector{<:NamedTuple}`.
- `date` keywords of `AbstractArray` will return a `Vector{String}` or
`Vector{<:NamedTuple}`.
- `date` keywords of `Tuple{start,end}` will take all the dates between the
start and end dates, and also return `Vector{String}` or `Vector{<:NamedTuple}`.
Where `date` and `month` keywords coexist, `Vector{Vector{String}}` of
`Vector{Vector{NamedTuple}}` is the result. `date` ranges are always
the outer `Vector`, `month` the inner `Vector` with `layer` tuples as
the inner `NamedTuple`. No other keywords can be `Vector`.
This schema may be added to in future for datasets with additional axes,
but should not change for the existing `RasterDataSource` types.
"""
function getraster end
# Vector layers are allowed, but converted to `Tuple` immediatedly.
function getraster(T::Type, layers::AbstractArray; kw...)
getraster(T, (layers...,); kw...)
end
# Without a layers argument, all layers are downloaded
getraster(T::Type; kw...) = getraster(T, layers(T); kw...)
# Default assumption for `layerkeys` is that the layer
# is the same as the layer key. This is not the case for
# e.g. BioClim, where layers can be specified with Int.
layerkeys(T::Type) = layers(T)
layerkeys(T::Type, layers) = layers
has_matching_layer_size(T) = true
has_constant_dims(T) = true
has_constant_metadata(T) = true
date_sequence(T::Type, dates) = date_sequence(date_step(T), dates)
date_sequence(step, date) = _date_sequence(step, date)
_date_sequence(step, dates::AbstractArray) = dates
_date_sequence(step, dates::NTuple{2}) = first(dates):step:last(dates)
_date_sequence(step, date) = date:step:date
function _maybe_download(uri::URI, filepath)
if !isfile(filepath)
mkpath(dirname(filepath))
@info "Starting download for $uri"
try
HTTP.download(string(uri), filepath)
catch e
# Remove anything that was downloaded before the error
isfile(filepath) && rm(filepath)
throw(e)
end
end
filepath
end
function rasterpath()
if haskey(ENV, "RASTERDATASOURCES_PATH") && isdir(ENV["RASTERDATASOURCES_PATH"])
ENV["RASTERDATASOURCES_PATH"]
else
error("You must set `ENV[\"RASTERDATASOURCES_PATH\"]` to a path in your system")
end
end
function delete_rasters()
# May need an "are you sure"? - this could be a lot of GB of data to lose
ispath(rasterpath()) && rm(rasterpath())
end
function delete_rasters(T::Type)
ispath(rasterpath(T)) && rm(rasterpath(T))
end
_check_res(T, res) =
res in resolutions(T) || throw(ArgumentError("Resolution $res not in $(resolutions(T))"))
_check_layer(T, layer) =
layer in layers(T) || throw(ArgumentError("Layer $layer not in $(layers(T))"))
_date2string(t, date) = Dates.format(date, _dateformat(t))
_string2date(t, d::AbstractString) = Date(d, _dateformat(t))
# Inner map over layers Tuple - month/date maps earlier
# so we get Vectors of NamedTuples of filenames
function _map_layers(T, layers, args...; kw...)
filenames = map(layers) do l
_getraster(T, l, args...; kw...)
end
keys = layerkeys(T, layers)
return NamedTuple{keys}(filenames)
end
|
{--- -}
fun : Int -> Bool
fun x = (x /= 0)
{-
-}
main : IO ()
main = print (fun 5)
-- EXPLANATION:
-- this is about parsing the opening `{---`
-- which used to be parsed as two separate tokens `{-` and `--`
-- making the above program fail
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- Morphisms between algebraic structures
------------------------------------------------------------------------
{-# OPTIONS --without-K --safe #-}
module Algebra.Morphism where
import Algebra.Morphism.Definitions as MorphismDefinitions
open import Relation.Binary
open import Algebra
import Algebra.Properties.Group as GroupP
open import Function
open import Level
import Relation.Binary.Reasoning.Setoid as EqR
private
variable
a b ℓ₁ ℓ₂ : Level
A : Set a
B : Set b
------------------------------------------------------------------------
--
module Definitions {a b ℓ₁} (A : Set a) (B : Set b) (_≈_ : Rel B ℓ₁) where
open MorphismDefinitions A B _≈_ public
open import Algebra.Morphism.Structures public
------------------------------------------------------------------------
-- Bundle homomorphisms
module _ {c₁ ℓ₁ c₂ ℓ₂}
(From : Semigroup c₁ ℓ₁)
(To : Semigroup c₂ ℓ₂) where
private
module F = Semigroup From
module T = Semigroup To
open Definitions F.Carrier T.Carrier T._≈_
record IsSemigroupMorphism (⟦_⟧ : Morphism) :
Set (c₁ ⊔ ℓ₁ ⊔ c₂ ⊔ ℓ₂) where
field
⟦⟧-cong : ⟦_⟧ Preserves F._≈_ ⟶ T._≈_
∙-homo : Homomorphic₂ ⟦_⟧ F._∙_ T._∙_
IsSemigroupMorphism-syntax = IsSemigroupMorphism
syntax IsSemigroupMorphism-syntax From To F = F Is From -Semigroup⟶ To
module _ {c₁ ℓ₁ c₂ ℓ₂}
(From : Monoid c₁ ℓ₁)
(To : Monoid c₂ ℓ₂) where
private
module F = Monoid From
module T = Monoid To
open Definitions F.Carrier T.Carrier T._≈_
record IsMonoidMorphism (⟦_⟧ : Morphism) :
Set (c₁ ⊔ ℓ₁ ⊔ c₂ ⊔ ℓ₂) where
field
sm-homo : IsSemigroupMorphism F.semigroup T.semigroup ⟦_⟧
ε-homo : Homomorphic₀ ⟦_⟧ F.ε T.ε
open IsSemigroupMorphism sm-homo public
IsMonoidMorphism-syntax = IsMonoidMorphism
syntax IsMonoidMorphism-syntax From To F = F Is From -Monoid⟶ To
module _ {c₁ ℓ₁ c₂ ℓ₂}
(From : CommutativeMonoid c₁ ℓ₁)
(To : CommutativeMonoid c₂ ℓ₂) where
private
module F = CommutativeMonoid From
module T = CommutativeMonoid To
open Definitions F.Carrier T.Carrier T._≈_
record IsCommutativeMonoidMorphism (⟦_⟧ : Morphism) :
Set (c₁ ⊔ ℓ₁ ⊔ c₂ ⊔ ℓ₂) where
field
mn-homo : IsMonoidMorphism F.monoid T.monoid ⟦_⟧
open IsMonoidMorphism mn-homo public
IsCommutativeMonoidMorphism-syntax = IsCommutativeMonoidMorphism
syntax IsCommutativeMonoidMorphism-syntax From To F = F Is From -CommutativeMonoid⟶ To
module _ {c₁ ℓ₁ c₂ ℓ₂}
(From : IdempotentCommutativeMonoid c₁ ℓ₁)
(To : IdempotentCommutativeMonoid c₂ ℓ₂) where
private
module F = IdempotentCommutativeMonoid From
module T = IdempotentCommutativeMonoid To
open Definitions F.Carrier T.Carrier T._≈_
record IsIdempotentCommutativeMonoidMorphism (⟦_⟧ : Morphism) :
Set (c₁ ⊔ ℓ₁ ⊔ c₂ ⊔ ℓ₂) where
field
mn-homo : IsMonoidMorphism F.monoid T.monoid ⟦_⟧
open IsMonoidMorphism mn-homo public
isCommutativeMonoidMorphism :
IsCommutativeMonoidMorphism F.commutativeMonoid T.commutativeMonoid ⟦_⟧
isCommutativeMonoidMorphism = record { mn-homo = mn-homo }
IsIdempotentCommutativeMonoidMorphism-syntax = IsIdempotentCommutativeMonoidMorphism
syntax IsIdempotentCommutativeMonoidMorphism-syntax From To F = F Is From -IdempotentCommutativeMonoid⟶ To
module _ {c₁ ℓ₁ c₂ ℓ₂}
(From : Group c₁ ℓ₁)
(To : Group c₂ ℓ₂) where
private
module F = Group From
module T = Group To
open Definitions F.Carrier T.Carrier T._≈_
record IsGroupMorphism (⟦_⟧ : Morphism) :
Set (c₁ ⊔ ℓ₁ ⊔ c₂ ⊔ ℓ₂) where
field
mn-homo : IsMonoidMorphism F.monoid T.monoid ⟦_⟧
open IsMonoidMorphism mn-homo public
⁻¹-homo : Homomorphic₁ ⟦_⟧ F._⁻¹ T._⁻¹
⁻¹-homo x = let open EqR T.setoid in T.uniqueˡ-⁻¹ ⟦ x F.⁻¹ ⟧ ⟦ x ⟧ $ begin
⟦ x F.⁻¹ ⟧ T.∙ ⟦ x ⟧ ≈⟨ T.sym (∙-homo (x F.⁻¹) x) ⟩
⟦ x F.⁻¹ F.∙ x ⟧ ≈⟨ ⟦⟧-cong (F.inverseˡ x) ⟩
⟦ F.ε ⟧ ≈⟨ ε-homo ⟩
T.ε ∎
IsGroupMorphism-syntax = IsGroupMorphism
syntax IsGroupMorphism-syntax From To F = F Is From -Group⟶ To
module _ {c₁ ℓ₁ c₂ ℓ₂}
(From : AbelianGroup c₁ ℓ₁)
(To : AbelianGroup c₂ ℓ₂) where
private
module F = AbelianGroup From
module T = AbelianGroup To
open Definitions F.Carrier T.Carrier T._≈_
record IsAbelianGroupMorphism (⟦_⟧ : Morphism) :
Set (c₁ ⊔ ℓ₁ ⊔ c₂ ⊔ ℓ₂) where
field
gp-homo : IsGroupMorphism F.group T.group ⟦_⟧
open IsGroupMorphism gp-homo public
IsAbelianGroupMorphism-syntax = IsAbelianGroupMorphism
syntax IsAbelianGroupMorphism-syntax From To F = F Is From -AbelianGroup⟶ To
module _ {c₁ ℓ₁ c₂ ℓ₂}
(From : Ring c₁ ℓ₁)
(To : Ring c₂ ℓ₂) where
private
module F = Ring From
module T = Ring To
open Definitions F.Carrier T.Carrier T._≈_
record IsRingMorphism (⟦_⟧ : Morphism) :
Set (c₁ ⊔ ℓ₁ ⊔ c₂ ⊔ ℓ₂) where
field
+-abgp-homo : ⟦_⟧ Is F.+-abelianGroup -AbelianGroup⟶ T.+-abelianGroup
*-mn-homo : ⟦_⟧ Is F.*-monoid -Monoid⟶ T.*-monoid
IsRingMorphism-syntax = IsRingMorphism
syntax IsRingMorphism-syntax From To F = F Is From -Ring⟶ To
|
\usepackage[utf8]{inputenc}
\usepackage{geometry}
\geometry{
bindingoffset=0.1in,
left=0.6in,
right=0.6in,
paperheight=8.5in,
paperwidth=5.5in,
heightrounded,
}
\usepackage[nottoc]{tocbibind}
\usepackage{float}
\usepackage{amssymb}
\renewcommand{\labelitemi}{$\square$}
\usepackage{setspace}
\usepackage{enumitem}
\usepackage{setspace}
\usepackage{lipsum}
\usepackage{etoolbox}
\AtBeginEnvironment{quote}{\singlespace\vspace{-\topsep}\small}
\AtEndEnvironment{quote}{\vspace{-\topsep}\endsinglespace}
\usepackage{attrib}
\usepackage{wrapfig}
\usepackage{multicol}
\usepackage[T1]{fontenc}
\usepackage{fancyhdr}
\renewcommand{\chaptermark}[1]{\markboth{#1}{}}
\renewcommand{\sectionmark}[1]{\markright{#1}}
\pagestyle{fancy}
\fancyhf{}
\fancyhead[LE,RO]{\thepage}
\fancyhead[LO]{\itshape\nouppercase{\rightmark}}
\fancyhead[RE]{\itshape\nouppercase{\leftmark}}
\renewcommand{\headrulewidth}{0pt}
\newenvironment{bottompar}{\par\vspace*{\fill}}{\clearpage}
% layout
\textheight=480pt
\righthyphenmin=3
\lefthyphenmin=3
% Insert a blank page after current page
% https://tex.stackexchange.com/a/36881/18760
\usepackage{afterpage}
\newcommand\blankpage{%
\null
\thispagestyle{empty}%
\addtocounter{page}{-1}%
\newpage}
% How to disable links completely using hyperref package?
% https://tex.stackexchange.com/a/53316/18760
\usepackage{nohyperref}
\usepackage{url}
\hypersetup{
colorlinks,
citecolor=black,
filecolor=black,
linkcolor=black,
urlcolor=black
}
\DeclareUnicodeCharacter{00A0}{ }
% List in footnote in list
% https://tex.stackexchange.com/a/354719/18760
\makeatletter
\let\oldfootnote=\footnote
\renewcommand{\footnote}[2][\empty]% #1 = number, optional, #2 = text
{\ifx\empty#1\relax \oldfootnote{\@listdepth=0 \@enumdepth=0 #2}%
\else \oldfootnote[#1]{\@listdepth=0 \@enumdepth=0 #2}%
\fi}
% How to add an extra level of sections with headings below \subsubsection
% https://tex.stackexchange.com/a/60218/18760
\renewcommand\paragraph{\@startsection{paragraph}{4}{\z@}%
{-2.5ex\@plus -1ex \@minus -.25ex}%
{1.25ex \@plus .25ex}%
{\normalfont\normalsize\bfseries}}
% Avoiding page breaks shortly after section/subsection headings
% https://tex.stackexchange.com/a/2358/18760
\patchcmd{\@afterheading}%
{\clubpenalty \@M}{\clubpenalties 3 \@M \@M 0}{}{}
\patchcmd{\@afterheading}%
{\clubpenalty \@clubpenalty}{\clubpenalties 2 \@clubpenalty 0}{}{}
\makeatother
|
section \<open>Postdomination\<close>
theory Postdomination imports CFGExit begin
text \<open>For static interprocedural slicing, we only consider standard control
dependence, hence we only need standard postdomination.\<close>
locale Postdomination = CFGExit sourcenode targetnode kind valid_edge Entry
get_proc get_return_edges procs Main Exit
for sourcenode :: "'edge \<Rightarrow> 'node" and targetnode :: "'edge \<Rightarrow> 'node"
and kind :: "'edge \<Rightarrow> ('var,'val,'ret,'pname) edge_kind"
and valid_edge :: "'edge \<Rightarrow> bool"
and Entry :: "'node" ("'('_Entry'_')") and get_proc :: "'node \<Rightarrow> 'pname"
and get_return_edges :: "'edge \<Rightarrow> 'edge set"
and procs :: "('pname \<times> 'var list \<times> 'var list) list" and Main :: "'pname"
and Exit::"'node" ("'('_Exit'_')") +
assumes Entry_path:"valid_node n \<Longrightarrow> \<exists>as. (_Entry_) -as\<rightarrow>\<^sub>\<surd>* n"
and Exit_path:"valid_node n \<Longrightarrow> \<exists>as. n -as\<rightarrow>\<^sub>\<surd>* (_Exit_)"
and method_exit_unique:
"\<lbrakk>method_exit n; method_exit n'; get_proc n = get_proc n'\<rbrakk> \<Longrightarrow> n = n'"
begin
lemma get_return_edges_unique:
assumes "valid_edge a" and "a' \<in> get_return_edges a" and "a'' \<in> get_return_edges a"
shows "a' = a''"
proof -
from \<open>valid_edge a\<close> \<open>a' \<in> get_return_edges a\<close>
obtain Q r p fs where "kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs"
by(fastforce dest!:only_call_get_return_edges)
with \<open>valid_edge a\<close> \<open>a' \<in> get_return_edges a\<close> obtain Q' f' where "kind a' = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'"
by(fastforce dest!:call_return_edges)
from \<open>valid_edge a\<close> \<open>a' \<in> get_return_edges a\<close> have "valid_edge a'"
by(rule get_return_edges_valid)
from this \<open>kind a' = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'\<close> have "get_proc (sourcenode a') = p"
by(rule get_proc_return)
from \<open>valid_edge a'\<close> \<open>kind a' = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'\<close> have "method_exit (sourcenode a')"
by(fastforce simp:method_exit_def)
from \<open>valid_edge a\<close> \<open>a'' \<in> get_return_edges a\<close> \<open>kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs\<close>
obtain Q'' f'' where "kind a'' = Q''\<hookleftarrow>\<^bsub>p\<^esub>f''" by(fastforce dest!:call_return_edges)
from \<open>valid_edge a\<close> \<open>a'' \<in> get_return_edges a\<close> have "valid_edge a''"
by(rule get_return_edges_valid)
from this \<open>kind a'' = Q''\<hookleftarrow>\<^bsub>p\<^esub>f''\<close> have "get_proc (sourcenode a'') = p"
by(rule get_proc_return)
from \<open>valid_edge a''\<close> \<open>kind a'' = Q''\<hookleftarrow>\<^bsub>p\<^esub>f''\<close> have "method_exit (sourcenode a'')"
by(fastforce simp:method_exit_def)
with \<open>method_exit (sourcenode a')\<close> \<open>get_proc (sourcenode a') = p\<close>
\<open>get_proc (sourcenode a'') = p\<close> have "sourcenode a' = sourcenode a''"
by(fastforce elim!:method_exit_unique)
from \<open>valid_edge a\<close> \<open>a' \<in> get_return_edges a\<close>
obtain ax' where "valid_edge ax'" and "sourcenode ax' = sourcenode a"
and "targetnode ax' = targetnode a'" and "intra_kind(kind ax')"
by -(drule call_return_node_edge,auto simp:intra_kind_def)
from \<open>valid_edge a\<close> \<open>a'' \<in> get_return_edges a\<close>
obtain ax'' where "valid_edge ax''" and "sourcenode ax'' = sourcenode a"
and "targetnode ax'' = targetnode a''" and "intra_kind(kind ax'')"
by -(drule call_return_node_edge,auto simp:intra_kind_def)
from \<open>valid_edge a\<close> \<open>kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs\<close> \<open>valid_edge ax'\<close>
\<open>sourcenode ax' = sourcenode a\<close> \<open>intra_kind(kind ax')\<close>
\<open>valid_edge ax''\<close> \<open>sourcenode ax'' = sourcenode a\<close> \<open>intra_kind(kind ax'')\<close>
have "ax' = ax''" by -(drule call_only_one_intra_edge,auto)
with \<open>targetnode ax' = targetnode a'\<close> \<open>targetnode ax'' = targetnode a''\<close>
have "targetnode a' = targetnode a''" by simp
with \<open>valid_edge a'\<close> \<open>valid_edge a''\<close> \<open>sourcenode a' = sourcenode a''\<close>
show ?thesis by(rule edge_det)
qed
definition postdominate :: "'node \<Rightarrow> 'node \<Rightarrow> bool" ("_ postdominates _" [51,0])
where postdominate_def:"n' postdominates n \<equiv>
(valid_node n \<and> valid_node n' \<and>
(\<forall>as pex. (n -as\<rightarrow>\<^sub>\<iota>* pex \<and> method_exit pex) \<longrightarrow> n' \<in> set (sourcenodes as)))"
lemma postdominate_implies_inner_path:
assumes "n' postdominates n"
obtains as where "n -as\<rightarrow>\<^sub>\<iota>* n'" and "n' \<notin> set (sourcenodes as)"
proof(atomize_elim)
from \<open>n' postdominates n\<close> have "valid_node n"
and all:"\<forall>as pex. (n -as\<rightarrow>\<^sub>\<iota>* pex \<and> method_exit pex) \<longrightarrow> n' \<in> set (sourcenodes as)"
by(auto simp:postdominate_def)
from \<open>valid_node n\<close> obtain asx where "n -asx\<rightarrow>\<^sub>\<surd>* (_Exit_)" by(auto dest:Exit_path)
then obtain as where "n -as\<rightarrow>\<^sub>\<surd>* (_Exit_)"
and "\<forall>a \<in> set as. intra_kind(kind a) \<or> (\<exists>Q f p. kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f)"
by -(erule valid_Exit_path_descending_path)
show "\<exists>as. n -as\<rightarrow>\<^sub>\<iota>* n' \<and> n' \<notin> set (sourcenodes as)"
proof(cases "\<exists>a \<in> set as. \<exists>Q f p. kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f")
case True
then obtain asx ax asx' where [simp]:"as = asx@ax#asx'"
and "\<exists>Q f p. kind ax = Q\<hookleftarrow>\<^bsub>p\<^esub>f" and "\<forall>a \<in> set asx. \<forall>Q f p. kind a \<noteq> Q\<hookleftarrow>\<^bsub>p\<^esub>f"
by -(erule split_list_first_propE,simp)
with \<open>\<forall>a \<in> set as. intra_kind(kind a) \<or> (\<exists>Q f p. kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f)\<close>
have "\<forall>a \<in> set asx. intra_kind(kind a)" by auto
from \<open>n -as\<rightarrow>\<^sub>\<surd>* (_Exit_)\<close> have "n -asx\<rightarrow>\<^sub>\<surd>* sourcenode ax"
and "valid_edge ax" by(auto dest:vp_split)
from \<open>n -asx\<rightarrow>\<^sub>\<surd>* sourcenode ax\<close> \<open>\<forall>a \<in> set asx. intra_kind(kind a)\<close>
have "n -asx\<rightarrow>\<^sub>\<iota>* sourcenode ax" by(simp add:vp_def intra_path_def)
from \<open>valid_edge ax\<close> \<open>\<exists>Q f p. kind ax = Q\<hookleftarrow>\<^bsub>p\<^esub>f\<close>
have "method_exit (sourcenode ax)" by(fastforce simp:method_exit_def)
with \<open>n -asx\<rightarrow>\<^sub>\<iota>* sourcenode ax\<close> all have "n' \<in> set (sourcenodes asx)" by fastforce
then obtain xs ys where "sourcenodes asx = xs@n'#ys" and "n' \<notin> set xs"
by(fastforce dest:split_list_first)
then obtain as' a as'' where "xs = sourcenodes as'"
and [simp]:"asx = as'@a#as''" and "sourcenode a = n'"
by(fastforce elim:map_append_append_maps simp:sourcenodes_def)
from \<open>n -asx\<rightarrow>\<^sub>\<iota>* sourcenode ax\<close> have "n -as'\<rightarrow>\<^sub>\<iota>* sourcenode a"
by(fastforce dest:path_split simp:intra_path_def)
with \<open>sourcenode a = n'\<close> \<open>n' \<notin> set xs\<close> \<open>xs = sourcenodes as'\<close>
show ?thesis by fastforce
next
case False
with \<open>\<forall>a \<in> set as. intra_kind(kind a) \<or> (\<exists>Q f p. kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f)\<close>
have "\<forall>a \<in> set as. intra_kind(kind a)" by fastforce
with \<open>n -as\<rightarrow>\<^sub>\<surd>* (_Exit_)\<close> all have "n' \<in> set (sourcenodes as)"
by(auto simp:vp_def intra_path_def simp:method_exit_def)
then obtain xs ys where "sourcenodes as = xs@n'#ys" and "n' \<notin> set xs"
by(fastforce dest:split_list_first)
then obtain as' a as'' where "xs = sourcenodes as'"
and [simp]:"as = as'@a#as''" and "sourcenode a = n'"
by(fastforce elim:map_append_append_maps simp:sourcenodes_def)
from \<open>n -as\<rightarrow>\<^sub>\<surd>* (_Exit_)\<close> \<open>\<forall>a \<in> set as. intra_kind(kind a)\<close> \<open>as = as'@a#as''\<close>
have "n -as'\<rightarrow>\<^sub>\<iota>* sourcenode a"
by(fastforce dest:path_split simp:vp_def intra_path_def)
with \<open>sourcenode a = n'\<close> \<open>n' \<notin> set xs\<close> \<open>xs = sourcenodes as'\<close>
show ?thesis by fastforce
qed
qed
lemma postdominate_variant:
assumes "n' postdominates n"
shows "\<forall>as. n -as\<rightarrow>\<^sub>\<surd>* (_Exit_) \<longrightarrow> n' \<in> set (sourcenodes as)"
proof -
from \<open>n' postdominates n\<close>
have all:"\<forall>as pex. (n -as\<rightarrow>\<^sub>\<iota>* pex \<and> method_exit pex) \<longrightarrow> n' \<in> set (sourcenodes as)"
by(simp add:postdominate_def)
{ fix as assume "n -as\<rightarrow>\<^sub>\<surd>* (_Exit_)"
then obtain as' pex where "n -as'\<rightarrow>\<^sub>\<iota>* pex" and "method_exit pex"
and "set(sourcenodes as') \<subseteq> set(sourcenodes as)"
by(erule valid_Exit_path_intra_path)
from \<open>n -as'\<rightarrow>\<^sub>\<iota>* pex\<close> \<open>method_exit pex\<close> \<open>n' postdominates n\<close>
have "n' \<in> set (sourcenodes as')" by(fastforce simp:postdominate_def)
with \<open>set(sourcenodes as') \<subseteq> set(sourcenodes as)\<close>
have "n' \<in> set (sourcenodes as)" by fastforce }
thus ?thesis by simp
qed
lemma postdominate_refl:
assumes "valid_node n" and "\<not> method_exit n" shows "n postdominates n"
using \<open>valid_node n\<close>
proof(induct rule:valid_node_cases)
case Entry
{ fix as pex assume "(_Entry_) -as\<rightarrow>\<^sub>\<iota>* pex" and "method_exit pex"
from \<open>method_exit pex\<close> have "(_Entry_) \<in> set (sourcenodes as)"
proof(rule method_exit_cases)
assume "pex = (_Exit_)"
with \<open>(_Entry_) -as\<rightarrow>\<^sub>\<iota>* pex\<close> have "as \<noteq> []"
apply(clarsimp simp:intra_path_def) apply(erule path.cases)
by (drule sym,simp,drule Exit_noteq_Entry,auto)
with \<open>(_Entry_) -as\<rightarrow>\<^sub>\<iota>* pex\<close> have "hd (sourcenodes as) = (_Entry_)"
by(fastforce intro:path_sourcenode simp:intra_path_def)
with \<open>as \<noteq> []\<close>show ?thesis by(fastforce intro:hd_in_set simp:sourcenodes_def)
next
fix a Q p f assume "pex = sourcenode a" and "valid_edge a" and "kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f"
from \<open>(_Entry_) -as\<rightarrow>\<^sub>\<iota>* pex\<close> have "get_proc (_Entry_) = get_proc pex"
by(rule intra_path_get_procs)
hence "get_proc pex = Main" by(simp add:get_proc_Entry)
from \<open>valid_edge a\<close> \<open>kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f\<close> have "get_proc (sourcenode a) = p"
by(rule get_proc_return)
with \<open>pex = sourcenode a\<close> \<open>get_proc pex = Main\<close> have "p = Main" by simp
with \<open>valid_edge a\<close> \<open>kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f\<close> have False
by simp (rule Main_no_return_source)
thus ?thesis by simp
qed }
with Entry show ?thesis
by(fastforce intro:empty_path simp:postdominate_def intra_path_def)
next
case Exit
with \<open>\<not> method_exit n\<close> have False by(simp add:method_exit_def)
thus ?thesis by simp
next
case inner
show ?thesis
proof(cases "\<exists>as. n -as\<rightarrow>\<^sub>\<surd>* (_Exit_)")
case True
{ fix as pex assume "n -as\<rightarrow>\<^sub>\<iota>* pex" and "method_exit pex"
with \<open>\<not> method_exit n\<close> have "as \<noteq> []"
by(fastforce elim:path.cases simp:intra_path_def)
with \<open>n -as\<rightarrow>\<^sub>\<iota>* pex\<close> inner have "hd (sourcenodes as) = n"
by(fastforce intro:path_sourcenode simp:intra_path_def)
from \<open>as \<noteq> []\<close> have "sourcenodes as \<noteq> []" by(simp add:sourcenodes_def)
with \<open>hd (sourcenodes as) = n\<close>[THEN sym]
have "n \<in> set (sourcenodes as)" by simp }
hence "\<forall>as pex. (n -as\<rightarrow>\<^sub>\<iota>* pex \<and> method_exit pex) \<longrightarrow> n \<in> set (sourcenodes as)"
by fastforce
with True inner show ?thesis
by(fastforce intro:empty_path
simp:postdominate_def inner_is_valid intra_path_def)
next
case False
with inner show ?thesis by(fastforce dest:inner_is_valid Exit_path)
qed
qed
lemma postdominate_trans:
assumes "n'' postdominates n" and "n' postdominates n''"
shows "n' postdominates n"
proof -
from \<open>n'' postdominates n\<close> \<open>n' postdominates n''\<close>
have "valid_node n" and "valid_node n'" by(simp_all add:postdominate_def)
{ fix as pex assume "n -as\<rightarrow>\<^sub>\<iota>* pex" and "method_exit pex"
with \<open>n'' postdominates n\<close> have "n'' \<in> set (sourcenodes as)"
by(fastforce simp:postdominate_def)
then obtain ns' ns'' where "sourcenodes as = ns'@n''#ns''"
by(auto dest:split_list)
then obtain as' as'' a where "sourcenodes as'' = ns''" and [simp]:"as=as'@a#as''"
and [simp]:"sourcenode a = n''"
by(fastforce elim:map_append_append_maps simp:sourcenodes_def)
from \<open>n -as\<rightarrow>\<^sub>\<iota>* pex\<close> have "n -as'@a#as''\<rightarrow>\<^sub>\<iota>* pex" by simp
hence "n'' -a#as''\<rightarrow>\<^sub>\<iota>* pex"
by(fastforce dest:path_split_second simp:intra_path_def)
with \<open>n' postdominates n''\<close> \<open>method_exit pex\<close>
have "n' \<in> set(sourcenodes (a#as''))" by(fastforce simp:postdominate_def)
hence "n' \<in> set (sourcenodes as)" by(fastforce simp:sourcenodes_def) }
with \<open>valid_node n\<close> \<open>valid_node n'\<close>
show ?thesis by(fastforce simp:postdominate_def)
qed
lemma postdominate_antisym:
assumes "n' postdominates n" and "n postdominates n'"
shows "n = n'"
proof -
from \<open>n' postdominates n\<close> have "valid_node n" and "valid_node n'"
by(auto simp:postdominate_def)
from \<open>valid_node n\<close> obtain asx where "n -asx\<rightarrow>\<^sub>\<surd>* (_Exit_)" by(auto dest:Exit_path)
then obtain as' pex where "n -as'\<rightarrow>\<^sub>\<iota>* pex" and "method_exit pex"
by -(erule valid_Exit_path_intra_path)
with \<open>n' postdominates n\<close> have "\<exists>nx \<in> set(sourcenodes as'). nx = n'"
by(fastforce simp:postdominate_def)
then obtain ns ns' where "sourcenodes as' = ns@n'#ns'"
and "\<forall>nx \<in> set ns'. nx \<noteq> n'"
by(fastforce elim!:split_list_last_propE)
from \<open>sourcenodes as' = ns@n'#ns'\<close> obtain asx a asx'
where [simp]:"ns' = sourcenodes asx'" "as' = asx@a#asx'" "sourcenode a = n'"
by(fastforce elim:map_append_append_maps simp:sourcenodes_def)
from \<open>n -as'\<rightarrow>\<^sub>\<iota>* pex\<close> have "n' -a#asx'\<rightarrow>\<^sub>\<iota>* pex"
by(fastforce dest:path_split_second simp:intra_path_def)
with \<open>n postdominates n'\<close> \<open>method_exit pex\<close> have "n \<in> set(sourcenodes (a#asx'))"
by(fastforce simp:postdominate_def)
hence "n = n' \<or> n \<in> set(sourcenodes asx')" by(simp add:sourcenodes_def)
thus ?thesis
proof
assume "n = n'" thus ?thesis .
next
assume "n \<in> set(sourcenodes asx')"
then obtain nsx' nsx'' where "sourcenodes asx' = nsx'@n#nsx''"
by(auto dest:split_list)
then obtain asi asi' a' where [simp]:"asx' = asi@a'#asi'" "sourcenode a' = n"
by(fastforce elim:map_append_append_maps simp:sourcenodes_def)
with \<open>n -as'\<rightarrow>\<^sub>\<iota>* pex\<close> have "n -(asx@a#asi)@a'#asi'\<rightarrow>\<^sub>\<iota>* pex" by simp
hence "n -(asx@a#asi)@a'#asi'\<rightarrow>* pex"
and "\<forall>a \<in> set ((asx@a#asi)@a'#asi'). intra_kind (kind a)"
by(simp_all add:intra_path_def)
from \<open>n -(asx@a#asi)@a'#asi'\<rightarrow>* pex\<close>
have "n -a'#asi'\<rightarrow>* pex" by(fastforce dest:path_split_second)
with \<open>\<forall>a \<in> set ((asx@a#asi)@a'#asi'). intra_kind (kind a)\<close>
have "n -a'#asi'\<rightarrow>\<^sub>\<iota>* pex" by(simp add:intra_path_def)
with \<open>n' postdominates n\<close> \<open>method_exit pex\<close>
have "n' \<in> set(sourcenodes (a'#asi'))" by(fastforce simp:postdominate_def)
hence "n' = n \<or> n' \<in> set(sourcenodes asi')"
by(simp add:sourcenodes_def)
thus ?thesis
proof
assume "n' = n" thus ?thesis by(rule sym)
next
assume "n' \<in> set(sourcenodes asi')"
with \<open>\<forall>nx \<in> set ns'. nx \<noteq> n'\<close> have False by(fastforce simp:sourcenodes_def)
thus ?thesis by simp
qed
qed
qed
lemma postdominate_path_branch:
assumes "n -as\<rightarrow>* n''" and "n' postdominates n''" and "\<not> n' postdominates n"
obtains a as' as'' where "as = as'@a#as''" and "valid_edge a"
and "\<not> n' postdominates (sourcenode a)" and "n' postdominates (targetnode a)"
proof(atomize_elim)
from assms
show "\<exists>as' a as''. as = as'@a#as'' \<and> valid_edge a \<and>
\<not> n' postdominates (sourcenode a) \<and> n' postdominates (targetnode a)"
proof(induct rule:path.induct)
case (Cons_path n'' as nx a n)
note IH = \<open>\<lbrakk>n' postdominates nx; \<not> n' postdominates n''\<rbrakk>
\<Longrightarrow> \<exists>as' a as''. as = as'@a#as'' \<and> valid_edge a \<and>
\<not> n' postdominates sourcenode a \<and> n' postdominates targetnode a\<close>
show ?case
proof(cases "n' postdominates n''")
case True
with \<open>\<not> n' postdominates n\<close> \<open>sourcenode a = n\<close> \<open>targetnode a = n''\<close>
\<open>valid_edge a\<close> show ?thesis by blast
next
case False
from IH[OF \<open>n' postdominates nx\<close> this] show ?thesis
by clarsimp(rule_tac x="a#as'" in exI,clarsimp)
qed
qed simp
qed
lemma Exit_no_postdominator:
assumes "(_Exit_) postdominates n" shows False
proof -
from \<open>(_Exit_) postdominates n\<close> have "valid_node n" by(simp add:postdominate_def)
from \<open>valid_node n\<close> obtain asx where "n -asx\<rightarrow>\<^sub>\<surd>* (_Exit_)" by(auto dest:Exit_path)
then obtain as' pex where "n -as'\<rightarrow>\<^sub>\<iota>* pex" and "method_exit pex"
by -(erule valid_Exit_path_intra_path)
with \<open>(_Exit_) postdominates n\<close> have "(_Exit_) \<in> set (sourcenodes as')"
by(fastforce simp:postdominate_def)
with \<open>n -as'\<rightarrow>\<^sub>\<iota>* pex\<close> show False by(fastforce simp:intra_path_def)
qed
lemma postdominate_inner_path_targetnode:
assumes "n' postdominates n" and "n -as\<rightarrow>\<^sub>\<iota>* n''" and "n' \<notin> set(sourcenodes as)"
shows "n' postdominates n''"
proof -
from \<open>n' postdominates n\<close> obtain asx
where "valid_node n" and "valid_node n'"
and all:"\<forall>as pex. (n -as\<rightarrow>\<^sub>\<iota>* pex \<and> method_exit pex) \<longrightarrow> n' \<in> set (sourcenodes as)"
by(auto simp:postdominate_def)
from \<open>n -as\<rightarrow>\<^sub>\<iota>* n''\<close> have "valid_node n''"
by(fastforce dest:path_valid_node simp:intra_path_def)
have "\<forall>as' pex'. (n'' -as'\<rightarrow>\<^sub>\<iota>* pex' \<and> method_exit pex') \<longrightarrow>
n' \<in> set (sourcenodes as')"
proof(rule ccontr)
assume "\<not> (\<forall>as' pex'. (n'' -as'\<rightarrow>\<^sub>\<iota>* pex' \<and> method_exit pex') \<longrightarrow>
n' \<in> set (sourcenodes as'))"
then obtain as' pex' where "n'' -as'\<rightarrow>\<^sub>\<iota>* pex'" and "method_exit pex'"
and "n' \<notin> set (sourcenodes as')" by blast
from \<open>n -as\<rightarrow>\<^sub>\<iota>* n''\<close> \<open>n'' -as'\<rightarrow>\<^sub>\<iota>* pex'\<close> have "n -as@as'\<rightarrow>\<^sub>\<iota>* pex'"
by(fastforce intro:path_Append simp:intra_path_def)
from \<open>n' \<notin> set(sourcenodes as)\<close> \<open>n' \<notin> set (sourcenodes as')\<close>
have "n' \<notin> set (sourcenodes (as@as'))"
by(simp add:sourcenodes_def)
with \<open>n -as@as'\<rightarrow>\<^sub>\<iota>* pex'\<close> \<open>method_exit pex'\<close> \<open>n' postdominates n\<close>
show False by(fastforce simp:postdominate_def)
qed
with \<open>valid_node n'\<close> \<open>valid_node n''\<close>
show ?thesis by(auto simp:postdominate_def)
qed
lemma not_postdominate_source_not_postdominate_target:
assumes "\<not> n postdominates (sourcenode a)"
and "valid_node n" and "valid_edge a" and "intra_kind (kind a)"
obtains ax where "sourcenode a = sourcenode ax" and "valid_edge ax"
and "\<not> n postdominates targetnode ax"
proof(atomize_elim)
show "\<exists>ax. sourcenode a = sourcenode ax \<and> valid_edge ax \<and>
\<not> n postdominates targetnode ax"
proof -
from assms obtain asx pex
where "sourcenode a -asx\<rightarrow>\<^sub>\<iota>* pex" and "method_exit pex"
and "n \<notin> set(sourcenodes asx)" by(fastforce simp:postdominate_def)
show ?thesis
proof(cases asx)
case Nil
with \<open>sourcenode a -asx\<rightarrow>\<^sub>\<iota>* pex\<close> have "pex = sourcenode a"
by(fastforce simp:intra_path_def)
with \<open>method_exit pex\<close> have "method_exit (sourcenode a)" by simp
thus ?thesis
proof(rule method_exit_cases)
assume "sourcenode a = (_Exit_)"
with \<open>valid_edge a\<close> have False by(rule Exit_source)
thus ?thesis by simp
next
fix a' Q f p assume "sourcenode a = sourcenode a'"
and "valid_edge a'" and "kind a' = Q\<hookleftarrow>\<^bsub>p\<^esub>f"
hence False using \<open>intra_kind (kind a)\<close> \<open>valid_edge a\<close>
by(fastforce dest:return_edges_only simp:intra_kind_def)
thus ?thesis by simp
qed
next
case (Cons ax asx')
with \<open>sourcenode a -asx\<rightarrow>\<^sub>\<iota>* pex\<close>
have "sourcenode a -[]@ax#asx'\<rightarrow>* pex"
and "\<forall>a \<in> set (ax#asx'). intra_kind (kind a)" by(simp_all add:intra_path_def)
from \<open>sourcenode a -[]@ax#asx'\<rightarrow>* pex\<close>
have "sourcenode a = sourcenode ax" and "valid_edge ax"
and "targetnode ax -asx'\<rightarrow>* pex" by(fastforce dest:path_split)+
with \<open>\<forall>a \<in> set (ax#asx'). intra_kind (kind a)\<close>
have "targetnode ax -asx'\<rightarrow>\<^sub>\<iota>* pex" by(simp add:intra_path_def)
with \<open>n \<notin> set(sourcenodes asx)\<close> Cons \<open>method_exit pex\<close>
have "\<not> n postdominates targetnode ax"
by(fastforce simp:postdominate_def sourcenodes_def)
with \<open>sourcenode a = sourcenode ax\<close> \<open>valid_edge ax\<close> show ?thesis by blast
qed
qed
qed
lemma inner_node_Exit_edge:
assumes "inner_node n"
obtains a where "valid_edge a" and "intra_kind (kind a)"
and "inner_node (sourcenode a)" and "targetnode a = (_Exit_)"
proof(atomize_elim)
from \<open>inner_node n\<close> have "valid_node n" by(rule inner_is_valid)
then obtain as where "n -as\<rightarrow>\<^sub>\<surd>* (_Exit_)" by(fastforce dest:Exit_path)
show "\<exists>a. valid_edge a \<and> intra_kind (kind a) \<and> inner_node (sourcenode a) \<and>
targetnode a = (_Exit_)"
proof(cases "as = []")
case True
with \<open>inner_node n\<close> \<open>n -as\<rightarrow>\<^sub>\<surd>* (_Exit_)\<close> have False by(fastforce simp:vp_def)
thus ?thesis by simp
next
case False
with \<open>n -as\<rightarrow>\<^sub>\<surd>* (_Exit_)\<close> obtain a' as' where "as = as'@[a']"
and "n -as'\<rightarrow>\<^sub>\<surd>* sourcenode a'" and "valid_edge a'"
and "(_Exit_) = targetnode a'" by -(erule vp_split_snoc)
from \<open>valid_edge a'\<close> have "valid_node (sourcenode a')" by simp
thus ?thesis
proof(cases "sourcenode a'" rule:valid_node_cases)
case Entry
with \<open>n -as'\<rightarrow>\<^sub>\<surd>* sourcenode a'\<close> have "n -as'\<rightarrow>* (_Entry_)" by(simp add:vp_def)
with \<open>inner_node n\<close>
have False by -(drule path_Entry_target,auto simp:inner_node_def)
thus ?thesis by simp
next
case Exit
from \<open>valid_edge a'\<close> this have False by(rule Exit_source)
thus ?thesis by simp
next
case inner
have "intra_kind (kind a')"
proof(cases "kind a'" rule:edge_kind_cases)
case Intra thus ?thesis by simp
next
case (Call Q r p fs)
with \<open>valid_edge a'\<close> have "get_proc(targetnode a') = p" by(rule get_proc_call)
with \<open>(_Exit_) = targetnode a'\<close> get_proc_Exit have "p = Main" by simp
with \<open>kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs\<close> have "kind a' = Q:r\<hookrightarrow>\<^bsub>Main\<^esub>fs" by simp
with \<open>valid_edge a'\<close> have False by(rule Main_no_call_target)
thus ?thesis by simp
next
case (Return Q p f)
from \<open>valid_edge a'\<close> \<open>kind a' = Q\<hookleftarrow>\<^bsub>p\<^esub>f\<close> \<open>(_Exit_) = targetnode a'\<close>[THEN sym]
have False by(rule Exit_no_return_target)
thus ?thesis by simp
qed
with \<open>valid_edge a'\<close> \<open>(_Exit_) = targetnode a'\<close> \<open>inner_node (sourcenode a')\<close>
show ?thesis by simp blast
qed
qed
qed
lemma inner_node_Entry_edge:
assumes "inner_node n"
obtains a where "valid_edge a" and "intra_kind (kind a)"
and "inner_node (targetnode a)" and "sourcenode a = (_Entry_)"
proof(atomize_elim)
from \<open>inner_node n\<close> have "valid_node n" by(rule inner_is_valid)
then obtain as where "(_Entry_) -as\<rightarrow>\<^sub>\<surd>* n" by(fastforce dest:Entry_path)
show "\<exists>a. valid_edge a \<and> intra_kind (kind a) \<and> inner_node (targetnode a) \<and>
sourcenode a = (_Entry_)"
proof(cases "as = []")
case True
with \<open>inner_node n\<close> \<open>(_Entry_) -as\<rightarrow>\<^sub>\<surd>* n\<close> have False
by(fastforce simp:inner_node_def vp_def)
thus ?thesis by simp
next
case False
with \<open>(_Entry_) -as\<rightarrow>\<^sub>\<surd>* n\<close> obtain a' as' where "as = a'#as'"
and "targetnode a' -as'\<rightarrow>\<^sub>\<surd>* n" and "valid_edge a'"
and "(_Entry_) = sourcenode a'" by -(erule vp_split_Cons)
from \<open>valid_edge a'\<close> have "valid_node (targetnode a')" by simp
thus ?thesis
proof(cases "targetnode a'" rule:valid_node_cases)
case Entry
from \<open>valid_edge a'\<close> this have False by(rule Entry_target)
thus ?thesis by simp
next
case Exit
with \<open>targetnode a' -as'\<rightarrow>\<^sub>\<surd>* n\<close> have "(_Exit_) -as'\<rightarrow>* n" by(simp add:vp_def)
with \<open>inner_node n\<close>
have False by -(drule path_Exit_source,auto simp:inner_node_def)
thus ?thesis by simp
next
case inner
have "intra_kind (kind a')"
proof(cases "kind a'" rule:edge_kind_cases)
case Intra thus ?thesis by simp
next
case (Call Q r p fs)
from \<open>valid_edge a'\<close> \<open>kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs\<close>
\<open>(_Entry_) = sourcenode a'\<close>[THEN sym]
have False by(rule Entry_no_call_source)
thus ?thesis by simp
next
case (Return Q p f)
with \<open>valid_edge a'\<close> have "get_proc(sourcenode a') = p"
by(rule get_proc_return)
with \<open>(_Entry_) = sourcenode a'\<close> get_proc_Entry have "p = Main" by simp
with \<open>kind a' = Q\<hookleftarrow>\<^bsub>p\<^esub>f\<close> have "kind a' = Q\<hookleftarrow>\<^bsub>Main\<^esub>f" by simp
with \<open>valid_edge a'\<close> have False by(rule Main_no_return_source)
thus ?thesis by simp
qed
with \<open>valid_edge a'\<close> \<open>(_Entry_) = sourcenode a'\<close> \<open>inner_node (targetnode a')\<close>
show ?thesis by simp blast
qed
qed
qed
lemma intra_path_to_matching_method_exit:
assumes "method_exit n'" and "get_proc n = get_proc n'" and "valid_node n"
obtains as where "n -as\<rightarrow>\<^sub>\<iota>* n'"
proof(atomize_elim)
from \<open>valid_node n\<close> obtain as' where "n -as'\<rightarrow>\<^sub>\<surd>* (_Exit_)"
by(fastforce dest:Exit_path)
then obtain as mex where "n -as\<rightarrow>\<^sub>\<iota>* mex" and "method_exit mex"
by(fastforce elim:valid_Exit_path_intra_path)
from \<open>n -as\<rightarrow>\<^sub>\<iota>* mex\<close> have "get_proc n = get_proc mex"
by(rule intra_path_get_procs)
with \<open>method_exit n'\<close> \<open>get_proc n = get_proc n'\<close> \<open>method_exit mex\<close>
have "mex = n'" by(fastforce intro:method_exit_unique)
with \<open>n -as\<rightarrow>\<^sub>\<iota>* mex\<close> show "\<exists>as. n -as\<rightarrow>\<^sub>\<iota>* n'" by fastforce
qed
end
end
|
The democratization of data is a real phenomenon, but building a sustainable data democracy means truly giving power to the people. The alternative is just a shift of power from traditional data analysts within IT departments to a new generation of data scientists and app developers. And this seems a lot more like a dictatorship than a democracy — a benevolent dictatorship, but a dictatorship nonetheless.
These individuals and companies aren’t entirely bad, of course, and they’re actually necessary. Apps that help predict what we want to read, where we’ll want to go next or what songs we’ll like are certainly cool and even beneficial in their ability to automate and optimize certain aspects of our lives and jobs. In the corporate world, there will always be data experts who are smarter and trained in advanced techniques and who should be called upon to answer the toughest questions or tackle the thorniest problems.
Last week, for example, Salesforce.com introduced a new feature of its Chatter intra-company social network that categorizes a variety of data sources so employees can easily find the people, documents and other information relevant to topics they’re interested in. As with similarly devised services — LinkedIn’s People You May Know, the gravitational search movement, or any type of service using an interest graph — the new feature’s beauty and utility lie in its abstraction of the underlying semantic algorithms and data processing.
The problem, however, comes when we’re forced to rely on these people, features and applications to decide how data can affect our lives or jobs, or what questions we can answer using the troves of data now available to us. In a true data democracy, citizens must be empowered to make use of their own data as they see fit and they must only have to rely apps and experts by choice or when the task really requires an expert hand. At any rate, citizens must be informed enough to have a meaningful voice in bigger decisions about data.
The good news is that there’s a whole new breed of startups trying to empower the data citizenry, whatever their role. Companies such as 0xdata, Precog and BigML are trying to make data science more accessible to everyday business users. There are next-generation business intelligence startups such as SiSense, Platfora and ClearStory rethinking how business analytics are done in an area of HTML5 and big data. And then there are companies such as Statwing, Infogram and Datahero (which will be in beta mode soon, by the way) trying to bring data analysis to the unwashed non-data-savvy masses.
Combined with a growing number of publicly available data sets and data marketplaces, and more ways of collecting every possible kind of data — personal fitness, web analytics, energy consumption, you name it — these self-service tools can provide an invaluable service. In January, I highlighted how a number of them can work by using my own dietary and activity data, as well as publicly available gun-ownership data and even web-page text. But as I explained then, they’re still not always easy for laypeople to use, much less perfect.
Statwing spells out statistics for laypeople.
Can Tableau be data’s George Washington?
This is why I’m so excited about Tableau’s forthcoming IPO. There are few companies that helped spur the democratization of data over the past few years more than Tableau. It has become the face of the next-generation business intelligence software thanks to its ease of use and focus on appealing visualization, and its free public software has found avid users even among relative data novices like myself. Tableau’s success and vision no doubt inspired a number of the companies I’ve already referenced.
Assuming it begins its publicly traded life flush with capital, Tableau will not just be financially sound — it will also be in a position to help the burgeoning data democracy evolve into something that can last. More money means being able to develop more features that Tableau can use to bolster sales (and further empower business users with data analysis), which should mean the company can afford to also continually improve its free service and perhaps put premium versions in the hands of more types of more non-corporate professionals for free.
Tableau is already easy (I made this) — but not easy enough.
The bottom-up approach has already proven very effective in the worlds of cloud computing, software as a service and open-source software, and I have to assume it’s a win-win situation in analytics, too. Today’s free users will be tomorrow’s paying users once they get skilled enough to want to move onto bigger data sets and better features. But the base products have to be easy enough and useful enough to get started with, or companies will only have a lot of registrations and downloads but very few avid users.
And if Tableau steps ups its game around data democratization, I have to assume it will up the ante for the company’s fellow large analytics vendors and even startups. A race to empower the lower classes on the data ladder would certainly be in stark contrast to the historical strategy of building ever-bigger, ever-more-advanced products targeting only the already-powerful data elite. That’s the kind of revolution I think we all can get behind.
Feature image courtesy of Shutterstock user Tiago Jorge da Silva Estima.
Great article Derrick – appreciating your work on the topic here on GigaOm.
We’re seeing wider availability of reasonably priced BI and visualization software tools to help us understand that harnessing all this data is possible – and I think even consumers are beginning to understand the value of all the data, and the ability to make meaning from it. One part of the puzzle that’s missing from what I can see is the education – knowledge transfer of how individuals can use the tools, what good data science methods are, and how data citizens can actively contribute to the larger data analysis community. I see movements like the Open Data/Open Gov folks, and events like the NYC Big Apps hackathon as part of the solution – but as individuals, where do we go to take part? What is the role of an informed, curious citizen in this? More venues exist for learning some of the ‘how’ to make sense of big data as an individual taking a course online, but I’m not seeing a vision from anyone talking about how to connect all of the dots. To make sense of data, we need the tools, the practitioners, the analysis of the problems, but we also need a vision of how all of these will work. If anyone has ideas of who’s got that vision, I’d love to hear it.
I feel one of the biggest impediments to the democratization of data is access. Most people know what they would like to answer, and how the data needs to be shaped to achieve that, but getting the data to do the actual analysis with can be one of the most difficult aspects.
This is a bit of a plug, but we’re working on enabling data access that is easily attainable by everyone. Our platform http://www.quandl.com is a “search engine for data” that is able to fetch time series data from a disparate sets of sources, and provide it in a simple searchable form that allows users to extract, validate, format, merge, graph, and share it however they want.
By providing the underlying data for analysis tools like Tableau, Statwing, and many others, we feel we can help to create the tool stack that empowers people to create a sustainable DIY data culture.
In every company I’ve worked at, I’ve seen this major divide between IT analysts and Business users. Part of it was cultural, but a major reason was as you point out: “a historical strategy of building ever-bigger, ever-more-advanced products targeting only the already-powerful data elite”. The business user typically was left to use Excel to prepare and analyze data.
It took 15+ years, but thanks to new players like Tableau, Spotfire and Qlikview which were sold primarily to the business user and focused on ease of use, the data democratization process has resulted in a power shift to the business user. Some IT departments have now come around and are trying to accommodate these “shadow IT” projects by providing IT support and giving Tableau users limited access to enterprise data stores.
As for upping the ante for the traditional players, it has happened already. Over the last two years, the larger vendors have responded with products like Visual Insight (MicroStrategy), Visual Intelligence (SAP), PowerPivot (MicroSoft), JMP (SAS) etc. taking aim at this segment of the market. The Big Data market is still new, but the trend to build user-friendly (or at the very least, SQL-aware) tools on top of Hadoop is also hitting its stride.
One good thing coming out of this data democratization is the realization that it has to be supported by a Data Governance effort. Otherwise we’ll see the unfortunate return of a major problem with data democracy: data chaos. Previously it would have meant comparing and reconciling two Excel spreadsheets, now we may end up reconciling the findings from two Tableau workbooks.
Thanks for the comment, and for making a really good point about data governance. Obviously, that’s not too big a concern for personal data use, but competing findings from lots of disparate data sets would be problematic. |
State Before: α : Type u_1
β : Type ?u.83670
inst✝ : LinearOrder α
a a₁ a₂ b b₁ b₂ c d : α
h : c ≤ max a b
⊢ Ico a b ∪ Ici c = Ici (min a c) State After: case inl
α : Type u_1
β : Type ?u.83670
inst✝ : LinearOrder α
a a₁ a₂ b b₁ b₂ c d : α
hab : a ≤ b
h : c ≤ b
⊢ Ico a b ∪ Ici c = Ici (min a c)
case inr
α : Type u_1
β : Type ?u.83670
inst✝ : LinearOrder α
a a₁ a₂ b b₁ b₂ c d : α
hab : b ≤ a
h : c ≤ a
⊢ Ico a b ∪ Ici c = Ici (min a c) Tactic: cases' le_total a b with hab hab <;> simp [hab] at h State Before: case inl
α : Type u_1
β : Type ?u.83670
inst✝ : LinearOrder α
a a₁ a₂ b b₁ b₂ c d : α
hab : a ≤ b
h : c ≤ b
⊢ Ico a b ∪ Ici c = Ici (min a c) State After: no goals Tactic: exact Ico_union_Ici' h State Before: case inr
α : Type u_1
β : Type ?u.83670
inst✝ : LinearOrder α
a a₁ a₂ b b₁ b₂ c d : α
hab : b ≤ a
h : c ≤ a
⊢ Ico a b ∪ Ici c = Ici (min a c) State After: no goals Tactic: simp [*] |
Require Import Coqlib.
Require Import Integers.
Require Import Values.
Require Import Memory.
Require Import Events.
Require Import Globalenvs.
Require Import Op.
(*Generalizations of some lemmas from Op.v.*)
Section EVAL_INJECT.
Variable F V: Type.
Variable genv: Genv.t F V.
Variable f: meminj.
Hypothesis globals: meminj_preserves_globals genv f.
Variable sp1: block.
Variable sp2: block.
Variable delta: Z.
Hypothesis sp_inj: f sp1 = Some(sp2, delta).
Lemma eval_addressing_inject':
forall addr vl1 vl2 v1 ofs,
val_list_inject f vl1 vl2 ->
eval_addressing genv (Vptr sp1 ofs) addr vl1 = Some v1 ->
exists v2,
eval_addressing genv (Vptr sp2 ofs) (shift_stack_addressing (Int.repr delta) addr) vl2 = Some v2
/\ val_inject f v1 v2.
Proof.
intros.
rewrite eval_shift_stack_addressing. simpl.
eapply eval_addressing_inj with (sp1 := Vptr sp1 ofs); eauto.
eapply symbol_address_inject; trivial.
Qed.
Lemma eval_operation_inject':
forall op vl1 vl2 v1 m1 m2 ofs,
val_list_inject f vl1 vl2 ->
Mem.inject f m1 m2 ->
eval_operation genv (Vptr sp1 ofs) op vl1 m1 = Some v1 ->
exists v2,
eval_operation genv (Vptr sp2 ofs) (shift_stack_operation (Int.repr delta) op) vl2 m2 = Some v2
/\ val_inject f v1 v2.
Proof.
intros.
rewrite eval_shift_stack_operation. simpl.
eapply eval_operation_inj with (sp1 := Vptr sp1 ofs) (m1 := m1); eauto.
eapply symbol_address_inject; trivial.
intros; eapply Mem.valid_pointer_inject_val; eauto.
intros; eapply Mem.weak_valid_pointer_inject_val; eauto.
intros; eapply Mem.weak_valid_pointer_inject_no_overflow; eauto.
intros; eapply Mem.different_pointers_inject; eauto.
Qed.
End EVAL_INJECT.
Section EVAL_INJECT2.
Variable F V: Type.
Variable genv: Genv.t F V.
Variable f: meminj.
Variable sp1: val.
Variable sp2: val.
Hypothesis sp_inj: val_inject f sp1 sp2 .
Hypothesis PG: meminj_preserves_globals genv f.
Variable vl1: list val.
Variable vl2: list val.
Hypothesis VL: val_list_inject f vl1 vl2.
Lemma eval_operation_inject'':
forall op v1 m1 m2,
Mem.inject f m1 m2 ->
eval_operation genv sp1 op vl1 m1 = Some v1 ->
exists v2,
eval_operation genv sp2 op vl2 m2 = Some v2
/\ val_inject f v1 v2.
Proof.
intros.
eapply eval_operation_inj.
eapply symbol_address_inject; trivial.
intros; eapply Mem.valid_pointer_inject_val; eauto.
intros; eapply Mem.weak_valid_pointer_inject_val; eauto.
intros; eapply Mem.weak_valid_pointer_inject_no_overflow; eauto.
intros; eapply Mem.different_pointers_inject; eauto.
eassumption. eassumption. eassumption.
Qed.
Hypothesis SPVundef: sp1<>Vundef.
Hypothesis SPPtr: forall b ofs, sp1<>Vptr b ofs.
Lemma eval_addressing_sp_scalar:
forall addr v1,
eval_addressing genv sp1 addr vl1 = Some v1 ->
exists v2, eval_addressing genv sp2 addr vl2 = Some v2
/\ val_inject f v1 v2.
Proof.
intros.
destruct addr; destruct vl1; simpl in H; try inv H; simpl; trivial.
destruct l; inv H1. inv VL. inv H3.
eexists; split. reflexivity.
eapply val_add_inject; eauto.
destruct l; inv H1. destruct l; inv H0. inv VL. inv H3. inv H5.
eexists; split. reflexivity.
eapply val_add_inject; eauto.
eapply val_add_inject; eauto.
destruct l; inv H1. inv VL. inv H3.
eexists; split. reflexivity.
eapply val_add_inject; eauto.
inv H1; try econstructor.
destruct l; inv H1. destruct l; inv H0. inv VL. inv H3. inv H5.
eexists; split. reflexivity.
eapply val_add_inject; eauto.
eapply val_add_inject; eauto.
inv H2; try econstructor.
inv VL.
eexists; split. reflexivity.
eapply symbol_address_inject; trivial.
destruct l; inv H1. inv VL. inv H3.
eexists; split. reflexivity.
eapply val_add_inject; eauto.
eapply symbol_address_inject; trivial.
destruct l; inv H1. inv VL. inv H3.
eexists; split. reflexivity.
eapply val_add_inject; eauto.
eapply symbol_address_inject; trivial.
inv H1; try econstructor.
inv VL.
eexists; split. reflexivity.
eapply val_add_inject; eauto.
Qed.
(*
Hypothesis globals: meminj_preserves_globals genv f.
Lemma eval_operation_inject'':
forall op vl1 vl2 v1 m1 m2,
val_list_inject f vl1 vl2 ->
Mem.inject f m1 m2 ->
eval_operation genv sp1 op vl1 m1 = Some v1 ->
exists v2,
eval_operation genv sp2 op vl2 m2 = Some v2
/\ val_inject f v1 v2.
Proof.
intros.
eapply eval_operation_inj.
eapply symbol_address_inject; trivial.
intros; eapply Mem.valid_pointer_inject_val; eauto.
intros; eapply Mem.weak_valid_pointer_inject_val; eauto.
intros; eapply Mem.weak_valid_pointer_inject_no_overflow; eauto.
intros; eapply Mem.different_pointers_inject; eauto.
eassumption. eassumption. eassumption.
Qed.
*)
End EVAL_INJECT2. |
/-
Copyright (c) 2019 Chris Hughes. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Chris Hughes
-/
import ring_theory.finiteness
import linear_algebra.dimension
/-!
# A module over a division ring is noetherian if and only if it is finite.
-/
universes u v
open_locale classical cardinal
open cardinal submodule module function
namespace is_noetherian
variables {K : Type u} {V : Type v} [division_ring K] [add_comm_group V] [module K V]
/--
A module over a division ring is noetherian if and only if
its dimension (as a cardinal) is strictly less than the first infinite cardinal `ℵ₀`.
-/
lemma iff_dim_lt_aleph_0 : is_noetherian K V ↔ module.rank K V < ℵ₀ :=
begin
let b := basis.of_vector_space K V,
rw [← b.mk_eq_dim'', lt_aleph_0_iff_finite],
split,
{ introI,
exact finite_of_linear_independent (basis.of_vector_space_index.linear_independent K V) },
{ assume hbfinite,
refine @is_noetherian_of_linear_equiv K (⊤ : submodule K V) V _
_ _ _ _ (linear_equiv.of_top _ rfl) (id _),
refine is_noetherian_of_fg_of_noetherian _ ⟨set.finite.to_finset hbfinite, _⟩,
rw [set.finite.coe_to_finset, ← b.span_eq, basis.coe_of_vector_space, subtype.range_coe] }
end
variables (K V)
/-- The dimension of a noetherian module over a division ring, as a cardinal,
is strictly less than the first infinite cardinal `ℵ₀`. -/
lemma dim_lt_aleph_0 : ∀ [is_noetherian K V], module.rank K V < ℵ₀ :=
is_noetherian.iff_dim_lt_aleph_0.1
variables {K V}
/-- In a noetherian module over a division ring, all bases are indexed by a finite type. -/
noncomputable def fintype_basis_index {ι : Type*} [is_noetherian K V] (b : basis ι K V) :
fintype ι :=
b.fintype_index_of_dim_lt_aleph_0 (dim_lt_aleph_0 K V)
/-- In a noetherian module over a division ring,
`basis.of_vector_space` is indexed by a finite type. -/
noncomputable instance [is_noetherian K V] : fintype (basis.of_vector_space_index K V) :=
fintype_basis_index (basis.of_vector_space K V)
/-- In a noetherian module over a division ring,
if a basis is indexed by a set, that set is finite. -/
lemma finite_basis_index {ι : Type*} {s : set ι} [is_noetherian K V] (b : basis s K V) :
s.finite :=
b.finite_index_of_dim_lt_aleph_0 (dim_lt_aleph_0 K V)
variables (K V)
/-- In a noetherian module over a division ring,
there exists a finite basis. This is the indexing `finset`. -/
noncomputable def finset_basis_index [is_noetherian K V] :
finset V :=
(finite_basis_index (basis.of_vector_space K V)).to_finset
@[simp] lemma coe_finset_basis_index [is_noetherian K V] :
(↑(finset_basis_index K V) : set V) = basis.of_vector_space_index K V :=
set.finite.coe_to_finset _
@[simp] lemma coe_sort_finset_basis_index [is_noetherian K V] :
((finset_basis_index K V) : Type*) = basis.of_vector_space_index K V :=
set.finite.coe_sort_to_finset _
/--
In a noetherian module over a division ring, there exists a finite basis.
This is indexed by the `finset` `finite_dimensional.finset_basis_index`.
This is in contrast to the result `finite_basis_index (basis.of_vector_space K V)`,
which provides a set and a `set.finite`.
-/
noncomputable def finset_basis [is_noetherian K V] :
basis (finset_basis_index K V) K V :=
(basis.of_vector_space K V).reindex (by simp)
@[simp] lemma range_finset_basis [is_noetherian K V] :
set.range (finset_basis K V) = basis.of_vector_space_index K V :=
by rw [finset_basis, basis.range_reindex, basis.range_of_vector_space]
variables {K V}
/-- A module over a division ring is noetherian if and only if it is finitely generated. -/
lemma iff_fg :
is_noetherian K V ↔ module.finite K V :=
begin
split,
{ introI h,
exact ⟨⟨finset_basis_index K V, by { convert (finset_basis K V).span_eq, simp }⟩⟩ },
{ rintros ⟨s, hs⟩,
rw [is_noetherian.iff_dim_lt_aleph_0, ← dim_top, ← hs],
exact lt_of_le_of_lt (dim_span_le _) (lt_aleph_0_iff_finite.2 (set.finite_mem_finset s)) }
end
end is_noetherian
|
[STATEMENT]
lemma co_vector_zero_vector_one_vector:
"co_vector x \<longleftrightarrow> zero_vector x \<and> one_vector x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. co_vector x = (zero_vector x \<and> one_vector x)
[PROOF STEP]
using co_vector_def one_vector_def zero_vector_def
[PROOF STATE]
proof (prove)
using this:
co_vector ?x \<equiv> ?x * bot = ?x
one_vector ?x \<equiv> ?x * bot \<le> ?x
zero_vector ?x \<equiv> ?x \<le> ?x * bot
goal (1 subgoal):
1. co_vector x = (zero_vector x \<and> one_vector x)
[PROOF STEP]
by auto |
State Before: α : Type u
m n o : ℕ
m' : Type ?u.22403
n' : Type ?u.22406
o' : Type ?u.22409
x : α
u : Fin (Nat.succ m) → α
⊢ vecCons x u 1 = vecHead u State After: α : Type u
m n o : ℕ
m' : Type ?u.22403
n' : Type ?u.22406
o' : Type ?u.22409
x : α
u : Fin (Nat.succ m) → α
⊢ u 0 = vecHead u Tactic: rw [← Fin.succ_zero_eq_one, cons_val_succ] State Before: α : Type u
m n o : ℕ
m' : Type ?u.22403
n' : Type ?u.22406
o' : Type ?u.22409
x : α
u : Fin (Nat.succ m) → α
⊢ u 0 = vecHead u State After: no goals Tactic: rfl |
function a = lehmer ( m, n )
%*****************************************************************************80
%
%% LEHMER returns the LEHMER matrix.
%
% Discussion:
%
% This matrix is also known as the "Westlake" matrix.
%
% Formula:
%
% A(I,J) = min ( I, J ) / max ( I, J )
%
% Example:
%
% N = 5
%
% 1/1 1/2 1/3 1/4 1/5
% 1/2 2/2 2/3 2/4 2/5
% 1/3 2/3 3/3 3/4 3/5
% 1/4 2/4 3/4 4/4 4/5
% 1/5 2/5 3/5 4/5 5/5
%
% Properties:
%
% A is symmetric: A' = A.
%
% Because A is symmetric, it is normal.
%
% Because A is normal, it is diagonalizable.
%
% A is positive definite.
%
% A is totally nonnegative.
%
% The inverse of A is tridiagonal.
%
% The condition number of A lies between N and 4*N*N.
%
% The family of matrices is nested as a function of N.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 14 October 2007
%
% Author:
%
% John Burkardt
%
% Reference:
%
% Morris Newman, John Todd,
% The evaluation of matrix inversion programs,
% Journal of the Society for Industrial and Applied Mathematics,
% Volume 6, Number 4, 1958, pages 466-476.
%
% Solutions to problem E710, proposed by DH Lehmer: The inverse of
% a matrix.
% American Mathematical Monthly,
% Volume 53, Number 9, November 1946, pages 534-535.
%
% John Todd,
% Basic Numerical Mathematics, Volume 2: Numerical Algebra,
% Academic Press, 1977, page 154.
%
% Parameters:
%
% Input, integer M, N, the number of rows and columns of A.
%
% Output, real A(M,N), the matrix.
%
a = zeros ( m, n );
for i = 1 : m
for j = 1 : n
a(i,j) = ( min ( i, j ) ) / ( max ( i, j ) );
end
end
return
end
|
import System
import Data.String
main : IO ()
main = do systime <- time
-- sanity checks on time value
if systime > 1630268000 && systime < 10000000000
then putStrLn "Retrieved unix timestamp from time function."
else putStrLn "Failed to retrieve a unix timestamp from time function."
|
//
// Copyright (c) 2019 Vinnie Falco ([email protected])
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// Official repository: https://github.com/CPPAlliance/url
//
#ifndef BOOST_URL_IMPL_ERROR_IPP
#define BOOST_URL_IMPL_ERROR_IPP
#include <boost/url/error.hpp>
namespace boost {
namespace urls {
parse_error::
parse_error()
: std::invalid_argument(
"parse error")
{
}
void
parse_error::
raise()
{
BOOST_THROW_EXCEPTION(
parse_error());
}
//---
invalid_part::
invalid_part()
: std::invalid_argument(
"bad url argument")
{
}
void
invalid_part::
raise()
{
BOOST_THROW_EXCEPTION(
invalid_part());
}
//---
too_large::
too_large()
: std::length_error(
"too large")
{
}
void
too_large::
raise()
{
BOOST_THROW_EXCEPTION(
too_large());
}
//---
out_of_range::
out_of_range()
: std::out_of_range(
"out of range")
{
}
void
out_of_range::
raise()
{
BOOST_THROW_EXCEPTION(
out_of_range());
}
//----------------------------------------------------------
error_code
make_error_code(error e)
{
struct codes : error_category
{
const char*
name() const noexcept override
{
return "boost.url";
}
std::string
message(int ev) const override
{
switch(static_cast<error>(ev))
{
default:
case error::no_match: return "no match for element";
case error::syntax: return "syntax";
case error::invalid: return "invalid";
case error::missing_scheme: return "missing scheme";
case error::bad_scheme_start_char: return "bad scheme start char";
case error::bad_scheme_char: return "bad scheme char";
case error::bad_username_char: return "bad user char";
case error::bad_userinfo_char: return "bad userinfo char";
case error::bad_port_char: return "bad port char";
case error::port_overflow: return "port overflow";
case error::missing_hostname: return "missing hostname";
case error::missing_port: return "missing port";
case error::bad_pct_encoding_digit: return "bad pct-encoding digit";
case error::incomplete_pct_encoding: return "incomplete pct-encoding";
case error::illegal_reserved_char: return "illegal reserved char";
}
}
error_condition
default_error_condition(
int ev) const noexcept override
{
switch(static_cast<error>(ev))
{
default:
return {ev, *this};
case error::no_match:
case error::syntax:
case error::invalid:
case error::missing_scheme:
case error::bad_scheme_start_char:
case error::bad_scheme_char:
case error::bad_username_char:
case error::bad_userinfo_char:
case error::bad_port_char:
case error::port_overflow:
case error::missing_hostname:
case error::missing_port:
case error::bad_pct_encoding_digit:
case error::incomplete_pct_encoding:
case error::illegal_reserved_char:
return condition::parse_error;
}
}
};
static codes const cat{};
return error_code{static_cast<
std::underlying_type<error>::type>(e), cat};
}
error_condition
make_error_condition(condition c)
{
struct codes : error_category
{
const char*
name() const noexcept override
{
return "boost.url";
}
std::string
message(int cv) const override
{
switch(static_cast<condition>(cv))
{
default:
case condition::parse_error:
return "parsing error";
}
}
};
static codes const cat{};
return error_condition{static_cast<
std::underlying_type<condition>::type>(c), cat};
}
} // urls
} // boost
#endif
|
(* Author: Tobias Nipkow *)
subsection "Hoare Logic for Total Correctness"
theory Hoare_Total
imports Hoare_Examples
begin
subsubsection "Hoare Logic for Total Correctness --- Separate Termination Relation"
text{* Note that this definition of total validity @{text"\<Turnstile>\<^sub>t"} only
works if execution is deterministic (which it is in our case). *}
definition hoare_tvalid :: "assn \<Rightarrow> com \<Rightarrow> assn \<Rightarrow> bool"
("\<Turnstile>\<^sub>t {(1_)}/ (_)/ {(1_)}" 50) where
"\<Turnstile>\<^sub>t {P}c{Q} \<longleftrightarrow> (\<forall>s. P s \<longrightarrow> (\<exists>t. (c,s) \<Rightarrow> t \<and> Q t))"
text{* Provability of Hoare triples in the proof system for total
correctness is written @{text"\<turnstile>\<^sub>t {P}c{Q}"} and defined
inductively. The rules for @{text"\<turnstile>\<^sub>t"} differ from those for
@{text"\<turnstile>"} only in the one place where nontermination can arise: the
@{term While}-rule. *}
inductive
hoaret :: "assn \<Rightarrow> com \<Rightarrow> assn \<Rightarrow> bool" ("\<turnstile>\<^sub>t ({(1_)}/ (_)/ {(1_)})" 50)
where
Skip: "\<turnstile>\<^sub>t {P} SKIP {P}" |
Assign: "\<turnstile>\<^sub>t {\<lambda>s. P(s[a/x])} x::=a {P}" |
Seq: "\<lbrakk> \<turnstile>\<^sub>t {P\<^sub>1} c\<^sub>1 {P\<^sub>2}; \<turnstile>\<^sub>t {P\<^sub>2} c\<^sub>2 {P\<^sub>3} \<rbrakk> \<Longrightarrow> \<turnstile>\<^sub>t {P\<^sub>1} c\<^sub>1;;c\<^sub>2 {P\<^sub>3}" |
If: "\<lbrakk> \<turnstile>\<^sub>t {\<lambda>s. P s \<and> bval b s} c\<^sub>1 {Q}; \<turnstile>\<^sub>t {\<lambda>s. P s \<and> \<not> bval b s} c\<^sub>2 {Q} \<rbrakk>
\<Longrightarrow> \<turnstile>\<^sub>t {P} IF b THEN c\<^sub>1 ELSE c\<^sub>2 {Q}" |
While:
"(\<And>n::nat.
\<turnstile>\<^sub>t {\<lambda>s. P s \<and> bval b s \<and> T s n} c {\<lambda>s. P s \<and> (\<exists>n'<n. T s n')})
\<Longrightarrow> \<turnstile>\<^sub>t {\<lambda>s. P s \<and> (\<exists>n. T s n)} WHILE b DO c {\<lambda>s. P s \<and> \<not>bval b s}" |
conseq: "\<lbrakk> \<forall>s. P' s \<longrightarrow> P s; \<turnstile>\<^sub>t {P}c{Q}; \<forall>s. Q s \<longrightarrow> Q' s \<rbrakk> \<Longrightarrow>
\<turnstile>\<^sub>t {P'}c{Q'}"
text{* The @{term While}-rule is like the one for partial correctness but it
requires additionally that with every execution of the loop body some measure
relation @{term[source]"T :: state \<Rightarrow> nat \<Rightarrow> bool"} decreases.
The following functional version is more intuitive: *}
lemma While_fun:
"\<lbrakk> \<And>n::nat. \<turnstile>\<^sub>t {\<lambda>s. P s \<and> bval b s \<and> n = f s} c {\<lambda>s. P s \<and> f s < n}\<rbrakk>
\<Longrightarrow> \<turnstile>\<^sub>t {P} WHILE b DO c {\<lambda>s. P s \<and> \<not>bval b s}"
by (rule While [where T="\<lambda>s n. n = f s", simplified])
text{* Building in the consequence rule: *}
lemma Assign': "\<forall>s. P s \<longrightarrow> Q(s[a/x]) \<Longrightarrow> \<turnstile>\<^sub>t {P} x ::= a {Q}"
by (simp add: strengthen_pre[OF _ Assign])
lemma While_fun':
assumes "\<And>n::nat. \<turnstile>\<^sub>t {\<lambda>s. P s \<and> bval b s \<and> n = f s} c {\<lambda>s. P s \<and> f s < n}"
and "\<forall>s. P s \<and> \<not> bval b s \<longrightarrow> Q s"
shows "\<turnstile>\<^sub>t {P} WHILE b DO c {Q}"
by(blast intro: assms(1) weaken_post[OF While_fun assms(2)])
text{* Our standard example: *}
lemma "\<turnstile>\<^sub>t {\<lambda>s. s ''x'' = i} ''y'' ::= N 0;; wsum {\<lambda>s. s ''y'' = sum i}"
apply(rule Seq)
prefer 2
apply(rule While_fun' [where P = "\<lambda>s. (s ''y'' = sum i - sum(s ''x''))"
and f = "\<lambda>s. nat(s ''x'')"])
apply(rule Seq)
prefer 2
apply(rule Assign)
apply(rule Assign')
apply simp
apply(simp)
apply(rule Assign')
apply simp
done
text{* The soundness theorem: *}
theorem hoaret_sound: "\<turnstile>\<^sub>t {P}c{Q} \<Longrightarrow> \<Turnstile>\<^sub>t {P}c{Q}"
proof(unfold hoare_tvalid_def, induction rule: hoaret.induct)
case (While P b T c)
{
fix s n
have "\<lbrakk> P s; T s n \<rbrakk> \<Longrightarrow> \<exists>t. (WHILE b DO c, s) \<Rightarrow> t \<and> P t \<and> \<not> bval b t"
proof(induction "n" arbitrary: s rule: less_induct)
case (less n)
thus ?case by (metis While.IH WhileFalse WhileTrue)
qed
}
thus ?case by auto
next
case If thus ?case by auto blast
qed fastforce+
text{*
The completeness proof proceeds along the same lines as the one for partial
correctness. First we have to strengthen our notion of weakest precondition
to take termination into account: *}
definition wpt :: "com \<Rightarrow> assn \<Rightarrow> assn" ("wp\<^sub>t") where
"wp\<^sub>t c Q = (\<lambda>s. \<exists>t. (c,s) \<Rightarrow> t \<and> Q t)"
lemma [simp]: "wp\<^sub>t (x ::= e) Q = (\<lambda>s. Q(s(x := aval e s)))"
by(auto intro!: ext simp: wpt_def)
lemma [simp]: "wp\<^sub>t (c\<^sub>1;;c\<^sub>2) Q = wp\<^sub>t c\<^sub>1 (wp\<^sub>t c\<^sub>2 Q)"
unfolding wpt_def
apply(rule ext)
apply auto
done
lemma [simp]:
"wp\<^sub>t (IF b THEN c\<^sub>1 ELSE c\<^sub>2) Q = (\<lambda>s. wp\<^sub>t (if bval b s then c\<^sub>1 else c\<^sub>2) Q s)"
apply(unfold wpt_def)
apply(rule ext)
apply auto
done
text{* Now we define the number of iterations @{term "WHILE b DO c"} needs to
terminate when started in state @{text s}. Because this is a truly partial
function, we define it as an (inductive) relation first: *}
inductive Its :: "bexp \<Rightarrow> com \<Rightarrow> state \<Rightarrow> nat \<Rightarrow> bool" where
Its_0: "\<not> bval b s \<Longrightarrow> Its b c s 0" |
Its_Suc: "\<lbrakk> bval b s; (c,s) \<Rightarrow> s'; Its b c s' n \<rbrakk> \<Longrightarrow> Its b c s (Suc n)"
text{* The relation is in fact a function: *}
lemma Its_fun: "Its b c s n \<Longrightarrow> Its b c s n' \<Longrightarrow> n=n'"
proof(induction arbitrary: n' rule:Its.induct)
case Its_0 thus ?case by(metis Its.cases)
next
case Its_Suc thus ?case by(metis Its.cases big_step_determ)
qed
text{* For all terminating loops, @{const Its} yields a result: *}
lemma WHILE_Its: "(WHILE b DO c,s) \<Rightarrow> t \<Longrightarrow> \<exists>n. Its b c s n"
proof(induction "WHILE b DO c" s t rule: big_step_induct)
case WhileFalse thus ?case by (metis Its_0)
next
case WhileTrue thus ?case by (metis Its_Suc)
qed
lemma wpt_is_pre: "\<turnstile>\<^sub>t {wp\<^sub>t c Q} c {Q}"
proof (induction c arbitrary: Q)
case SKIP show ?case by (auto intro:hoaret.Skip)
next
case Assign show ?case by (auto intro:hoaret.Assign)
next
case Seq thus ?case by (auto intro:hoaret.Seq)
next
case If thus ?case by (auto intro:hoaret.If hoaret.conseq)
next
case (While b c)
let ?w = "WHILE b DO c"
let ?T = "Its b c"
have "\<forall>s. wp\<^sub>t ?w Q s \<longrightarrow> wp\<^sub>t ?w Q s \<and> (\<exists>n. Its b c s n)"
unfolding wpt_def by (metis WHILE_Its)
moreover
{ fix n
let ?R = "\<lambda>s'. wp\<^sub>t ?w Q s' \<and> (\<exists>n'<n. ?T s' n')"
{ fix s t assume "bval b s" and "?T s n" and "(?w, s) \<Rightarrow> t" and "Q t"
from `bval b s` and `(?w, s) \<Rightarrow> t` obtain s' where
"(c,s) \<Rightarrow> s'" "(?w,s') \<Rightarrow> t" by auto
from `(?w, s') \<Rightarrow> t` obtain n' where "?T s' n'"
by (blast dest: WHILE_Its)
with `bval b s` and `(c, s) \<Rightarrow> s'` have "?T s (Suc n')" by (rule Its_Suc)
with `?T s n` have "n = Suc n'" by (rule Its_fun)
with `(c,s) \<Rightarrow> s'` and `(?w,s') \<Rightarrow> t` and `Q t` and `?T s' n'`
have "wp\<^sub>t c ?R s" by (auto simp: wpt_def)
}
hence "\<forall>s. wp\<^sub>t ?w Q s \<and> bval b s \<and> ?T s n \<longrightarrow> wp\<^sub>t c ?R s"
unfolding wpt_def by auto
(* by (metis WhileE Its_Suc Its_fun WHILE_Its lessI) *)
note strengthen_pre[OF this While.IH]
} note hoaret.While[OF this]
moreover have "\<forall>s. wp\<^sub>t ?w Q s \<and> \<not> bval b s \<longrightarrow> Q s"
by (auto simp add:wpt_def)
ultimately show ?case by (rule conseq)
qed
text{*\noindent In the @{term While}-case, @{const Its} provides the obvious
termination argument.
The actual completeness theorem follows directly, in the same manner
as for partial correctness: *}
theorem hoaret_complete: "\<Turnstile>\<^sub>t {P}c{Q} \<Longrightarrow> \<turnstile>\<^sub>t {P}c{Q}"
apply(rule strengthen_pre[OF _ wpt_is_pre])
apply(auto simp: hoare_tvalid_def wpt_def)
done
corollary hoaret_sound_complete: "\<turnstile>\<^sub>t {P}c{Q} \<longleftrightarrow> \<Turnstile>\<^sub>t {P}c{Q}"
by (metis hoaret_sound hoaret_complete)
end
|
/-
Copyright (c) 2019 Floris van Doorn. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Floris van Doorn, Yury Kudryashov, Sébastien Gouëzel, Chris Hughes
-/
import data.fin.basic
import data.pi.lex
import data.set.intervals.basic
/-!
# Operation on tuples
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
We interpret maps `Π i : fin n, α i` as `n`-tuples of elements of possibly varying type `α i`,
`(α 0, …, α (n-1))`. A particular case is `fin n → α` of elements with all the same type.
In this case when `α i` is a constant map, then tuples are isomorphic (but not definitionally equal)
to `vector`s.
We define the following operations:
* `fin.tail` : the tail of an `n+1` tuple, i.e., its last `n` entries;
* `fin.cons` : adding an element at the beginning of an `n`-tuple, to get an `n+1`-tuple;
* `fin.init` : the beginning of an `n+1` tuple, i.e., its first `n` entries;
* `fin.snoc` : adding an element at the end of an `n`-tuple, to get an `n+1`-tuple. The name `snoc`
comes from `cons` (i.e., adding an element to the left of a tuple) read in reverse order.
* `fin.insert_nth` : insert an element to a tuple at a given position.
* `fin.find p` : returns the first index `n` where `p n` is satisfied, and `none` if it is never
satisfied.
* `fin.append a b` : append two tuples.
* `fin.repeat n a` : repeat a tuple `n` times.
-/
universes u v
namespace fin
variables {m n : ℕ}
open function
section tuple
/-- There is exactly one tuple of size zero. -/
example (α : fin 0 → Sort u) : unique (Π i : fin 0, α i) :=
by apply_instance
@[simp] lemma tuple0_le {α : Π i : fin 0, Type*} [Π i, preorder (α i)] (f g : Π i, α i) : f ≤ g :=
fin_zero_elim
variables {α : fin (n+1) → Type u} (x : α 0) (q : Πi, α i) (p : Π(i : fin n), α (i.succ))
(i : fin n) (y : α i.succ) (z : α 0)
/-- The tail of an `n+1` tuple, i.e., its last `n` entries. -/
def tail (q : Πi, α i) : (Π(i : fin n), α (i.succ)) := λ i, q i.succ
lemma tail_def {n : ℕ} {α : fin (n+1) → Type*} {q : Π i, α i} :
tail (λ k : fin (n+1), q k) = (λ k : fin n, q k.succ) := rfl
/-- Adding an element at the beginning of an `n`-tuple, to get an `n+1`-tuple. -/
def cons (x : α 0) (p : Π(i : fin n), α (i.succ)) : Πi, α i :=
λ j, fin.cases x p j
@[simp] lemma tail_cons : tail (cons x p) = p :=
by simp [tail, cons]
@[simp] lemma cons_succ : cons x p i.succ = p i :=
by simp [cons]
@[simp] lemma cons_zero : cons x p 0 = x :=
by simp [cons]
/-- Updating a tuple and adding an element at the beginning commute. -/
@[simp] lemma cons_update : cons x (update p i y) = update (cons x p) i.succ y :=
begin
ext j,
by_cases h : j = 0,
{ rw h, simp [ne.symm (succ_ne_zero i)] },
{ let j' := pred j h,
have : j'.succ = j := succ_pred j h,
rw [← this, cons_succ],
by_cases h' : j' = i,
{ rw h', simp },
{ have : j'.succ ≠ i.succ, by rwa [ne.def, succ_inj],
rw [update_noteq h', update_noteq this, cons_succ] } }
end
/-- As a binary function, `fin.cons` is injective. -/
lemma cons_injective2 : function.injective2 (@cons n α) :=
λ x₀ y₀ x y h, ⟨congr_fun h 0, funext $ λ i, by simpa using congr_fun h (fin.succ i)⟩
@[simp] lemma cons_eq_cons {x₀ y₀ : α 0} {x y : Π i : fin n, α (i.succ)} :
cons x₀ x = cons y₀ y ↔ x₀ = y₀ ∧ x = y :=
cons_injective2.eq_iff
lemma cons_left_injective (x : Π i : fin n, α (i.succ)) : function.injective (λ x₀, cons x₀ x) :=
cons_injective2.left _
lemma cons_right_injective (x₀ : α 0) : function.injective (cons x₀) :=
cons_injective2.right _
/-- Adding an element at the beginning of a tuple and then updating it amounts to adding it
directly. -/
lemma update_cons_zero : update (cons x p) 0 z = cons z p :=
begin
ext j,
by_cases h : j = 0,
{ rw h, simp },
{ simp only [h, update_noteq, ne.def, not_false_iff],
let j' := pred j h,
have : j'.succ = j := succ_pred j h,
rw [← this, cons_succ, cons_succ] }
end
/-- Concatenating the first element of a tuple with its tail gives back the original tuple -/
@[simp] lemma cons_self_tail : cons (q 0) (tail q) = q :=
begin
ext j,
by_cases h : j = 0,
{ rw h, simp },
{ let j' := pred j h,
have : j'.succ = j := succ_pred j h,
rw [← this, tail, cons_succ] }
end
/-- Recurse on an `n+1`-tuple by splitting it into a single element and an `n`-tuple. -/
@[elab_as_eliminator]
def cons_cases {P : (Π i : fin n.succ, α i) → Sort v}
(h : ∀ x₀ x, P (fin.cons x₀ x)) (x : (Π i : fin n.succ, α i)) : P x :=
_root_.cast (by rw cons_self_tail) $ h (x 0) (tail x)
@[simp] lemma cons_cases_cons {P : (Π i : fin n.succ, α i) → Sort v}
(h : Π x₀ x, P (fin.cons x₀ x)) (x₀ : α 0) (x : Π i : fin n, α i.succ) :
@cons_cases _ _ _ h (cons x₀ x) = h x₀ x :=
begin
rw [cons_cases, cast_eq],
congr',
exact tail_cons _ _
end
/-- Recurse on an tuple by splitting into `fin.elim0` and `fin.cons`. -/
@[elab_as_eliminator]
def cons_induction {α : Type*} {P : Π {n : ℕ}, (fin n → α) → Sort v}
(h0 : P fin.elim0)
(h : ∀ {n} x₀ (x : fin n → α), P x → P (fin.cons x₀ x)) : Π {n : ℕ} (x : fin n → α), P x
| 0 x := by convert h0
| (n + 1) x := cons_cases (λ x₀ x, h _ _ $ cons_induction _) x
lemma cons_injective_of_injective {α} {x₀ : α} {x : fin n → α} (hx₀ : x₀ ∉ set.range x)
(hx : function.injective x) :
function.injective (cons x₀ x : fin n.succ → α) :=
begin
refine fin.cases _ _,
{ refine fin.cases _ _,
{ intro _,
refl },
{ intros j h,
rw [cons_zero, cons_succ] at h,
exact hx₀.elim ⟨_, h.symm⟩ } },
{ intro i,
refine fin.cases _ _,
{ intro h,
rw [cons_zero, cons_succ] at h,
exact hx₀.elim ⟨_, h⟩ },
{ intros j h,
rw [cons_succ, cons_succ] at h,
exact congr_arg _ (hx h), } },
end
lemma cons_injective_iff {α} {x₀ : α} {x : fin n → α} :
function.injective (cons x₀ x : fin n.succ → α) ↔ x₀ ∉ set.range x ∧ function.injective x :=
begin
refine ⟨λ h, ⟨_, _⟩, λ h, cons_injective_of_injective h.1 h.2⟩,
{ rintros ⟨i, hi⟩,
replace h := @h i.succ 0,
simpa [hi, succ_ne_zero] using h, },
{ simpa [function.comp] using h.comp (fin.succ_injective _) },
end
@[simp] lemma forall_fin_zero_pi {α : fin 0 → Sort*} {P : (Π i, α i) → Prop} :
(∀ x, P x) ↔ P fin_zero_elim :=
⟨λ h, h _, λ h x, subsingleton.elim fin_zero_elim x ▸ h⟩
@[simp] lemma exists_fin_zero_pi {α : fin 0 → Sort*} {P : (Π i, α i) → Prop} :
(∃ x, P x) ↔ P fin_zero_elim :=
⟨λ ⟨x, h⟩, subsingleton.elim x fin_zero_elim ▸ h, λ h, ⟨_, h⟩⟩
lemma forall_fin_succ_pi {P : (Π i, α i) → Prop} :
(∀ x, P x) ↔ (∀ a v, P (fin.cons a v)) :=
⟨λ h a v, h (fin.cons a v), cons_cases⟩
lemma exists_fin_succ_pi {P : (Π i, α i) → Prop} :
(∃ x, P x) ↔ (∃ a v, P (fin.cons a v)) :=
⟨λ ⟨x, h⟩, ⟨x 0, tail x, (cons_self_tail x).symm ▸ h⟩, λ ⟨a, v, h⟩, ⟨_, h⟩⟩
/-- Updating the first element of a tuple does not change the tail. -/
@[simp] lemma tail_update_zero : tail (update q 0 z) = tail q :=
by { ext j, simp [tail, fin.succ_ne_zero] }
/-- Updating a nonzero element and taking the tail commute. -/
@[simp] lemma tail_update_succ :
tail (update q i.succ y) = update (tail q) i y :=
begin
ext j,
by_cases h : j = i,
{ rw h, simp [tail] },
{ simp [tail, (fin.succ_injective n).ne h, h] }
end
lemma comp_cons {α : Type*} {β : Type*} (g : α → β) (y : α) (q : fin n → α) :
g ∘ (cons y q) = cons (g y) (g ∘ q) :=
begin
ext j,
by_cases h : j = 0,
{ rw h, refl },
{ let j' := pred j h,
have : j'.succ = j := succ_pred j h,
rw [← this, cons_succ, comp_app, cons_succ] }
end
lemma comp_tail {α : Type*} {β : Type*} (g : α → β) (q : fin n.succ → α) :
g ∘ (tail q) = tail (g ∘ q) :=
by { ext j, simp [tail] }
lemma le_cons [Π i, preorder (α i)] {x : α 0} {q : Π i, α i} {p : Π i : fin n, α i.succ} :
q ≤ cons x p ↔ q 0 ≤ x ∧ tail q ≤ p :=
forall_fin_succ.trans $ and_congr iff.rfl $ forall_congr $ λ j, by simp [tail]
lemma cons_le [Π i, preorder (α i)] {x : α 0} {q : Π i, α i} {p : Π i : fin n, α i.succ} :
cons x p ≤ q ↔ x ≤ q 0 ∧ p ≤ tail q :=
@le_cons _ (λ i, (α i)ᵒᵈ) _ x q p
lemma cons_le_cons [Π i, preorder (α i)] {x₀ y₀ : α 0} {x y : Π i : fin n, α (i.succ)} :
cons x₀ x ≤ cons y₀ y ↔ x₀ ≤ y₀ ∧ x ≤ y :=
forall_fin_succ.trans $ and_congr_right' $ by simp only [cons_succ, pi.le_def]
lemma pi_lex_lt_cons_cons {x₀ y₀ : α 0} {x y : Π i : fin n, α (i.succ)}
(s : Π {i : fin n.succ}, α i → α i → Prop) :
pi.lex (<) @s (fin.cons x₀ x) (fin.cons y₀ y) ↔
s x₀ y₀ ∨ x₀ = y₀ ∧ pi.lex (<) (λ i : fin n, @s i.succ) x y :=
begin
simp_rw [pi.lex, fin.exists_fin_succ, fin.cons_succ, fin.cons_zero, fin.forall_fin_succ],
simp [and_assoc, exists_and_distrib_left],
end
lemma range_fin_succ {α} (f : fin (n + 1) → α) :
set.range f = insert (f 0) (set.range (fin.tail f)) :=
set.ext $ λ y, exists_fin_succ.trans $ eq_comm.or iff.rfl
@[simp] lemma range_cons {α : Type*} {n : ℕ} (x : α) (b : fin n → α) :
set.range (fin.cons x b : fin n.succ → α) = insert x (set.range b) :=
by rw [range_fin_succ, cons_zero, tail_cons]
section append
/-- Append a tuple of length `m` to a tuple of length `n` to get a tuple of length `m + n`.
This is a non-dependent version of `fin.add_cases`. -/
def append {α : Type*} (a : fin m → α) (b : fin n → α) : fin (m + n) → α :=
@fin.add_cases _ _ (λ _, α) a b
@[simp] lemma append_left {α : Type*} (u : fin m → α) (v : fin n → α) (i : fin m) :
append u v (fin.cast_add n i) = u i :=
add_cases_left _ _ _
@[simp] lemma append_right {α : Type*} (u : fin m → α) (v : fin n → α) (i : fin n) :
append u v (nat_add m i) = v i :=
add_cases_right _ _ _
lemma append_right_nil {α : Type*} (u : fin m → α) (v : fin n → α) (hv : n = 0) :
append u v = u ∘ fin.cast (by rw [hv, add_zero]) :=
begin
refine funext (fin.add_cases (λ l, _) (λ r, _)),
{ rw [append_left, function.comp_apply],
refine congr_arg u (fin.ext _),
simp },
{ exact (fin.cast hv r).elim0' }
end
@[simp] lemma append_elim0' {α : Type*} (u : fin m → α) :
append u fin.elim0' = u ∘ fin.cast (add_zero _) :=
append_right_nil _ _ rfl
lemma append_left_nil {α : Type*} (u : fin m → α) (v : fin n → α) (hu : m = 0) :
append u v = v ∘ fin.cast (by rw [hu, zero_add]) :=
begin
refine funext (fin.add_cases (λ l, _) (λ r, _)),
{ exact (fin.cast hu l).elim0' },
{ rw [append_right, function.comp_apply],
refine congr_arg v (fin.ext _),
simp [hu] },
end
@[simp] lemma elim0'_append {α : Type*} (v : fin n → α) :
append fin.elim0' v = v ∘ fin.cast (zero_add _) :=
append_left_nil _ _ rfl
lemma append_assoc {p : ℕ} {α : Type*} (a : fin m → α) (b : fin n → α) (c : fin p → α) :
append (append a b) c = append a (append b c) ∘ fin.cast (add_assoc _ _ _) :=
begin
ext i,
rw function.comp_apply,
refine fin.add_cases (λ l, _) (λ r, _) i,
{ rw append_left,
refine fin.add_cases (λ ll, _) (λ lr, _) l,
{ rw append_left,
simp [cast_add_cast_add] },
{ rw append_right,
simp [cast_add_nat_add], }, },
{ rw append_right,
simp [←nat_add_nat_add] },
end
/-- Appending a one-tuple to the left is the same as `fin.cons`. -/
lemma append_left_eq_cons {α : Type*} {n : ℕ} (x₀ : fin 1 → α) (x : fin n → α):
fin.append x₀ x = fin.cons (x₀ 0) x ∘ fin.cast (add_comm _ _) :=
begin
ext i,
refine fin.add_cases _ _ i; clear i,
{ intro i,
rw [subsingleton.elim i 0, fin.append_left, function.comp_apply, eq_comm],
exact fin.cons_zero _ _, },
{ intro i,
rw [fin.append_right, function.comp_apply, fin.cast_nat_add, eq_comm, fin.add_nat_one],
exact fin.cons_succ _ _ _ },
end
end append
section repeat
/-- Repeat `a` `m` times. For example `fin.repeat 2 ![0, 3, 7] = ![0, 3, 7, 0, 3, 7]`. -/
@[simp] def repeat {α : Type*} (m : ℕ) (a : fin n → α) : fin (m * n) → α
| i := a i.mod_nat
@[simp] lemma repeat_zero {α : Type*} (a : fin n → α) :
repeat 0 a = fin.elim0' ∘ cast (zero_mul _) :=
funext $ λ x, (cast (zero_mul _) x).elim0'
@[simp] lemma repeat_one {α : Type*} (a : fin n → α) :
repeat 1 a = a ∘ cast (one_mul _) :=
begin
generalize_proofs h,
apply funext,
rw (fin.cast h.symm).surjective.forall,
intro i,
simp [mod_nat, nat.mod_eq_of_lt i.is_lt],
end
lemma repeat_succ {α : Type*} (a : fin n → α) (m : ℕ) :
repeat m.succ a = append a (repeat m a) ∘ cast ((nat.succ_mul _ _).trans (add_comm _ _)) :=
begin
generalize_proofs h,
apply funext,
rw (fin.cast h.symm).surjective.forall,
refine fin.add_cases (λ l, _) (λ r, _),
{ simp [mod_nat, nat.mod_eq_of_lt l.is_lt], },
{ simp [mod_nat] }
end
@[simp] lemma repeat_add {α : Type*} (a : fin n → α) (m₁ m₂ : ℕ) :
repeat (m₁ + m₂) a = append (repeat m₁ a) (repeat m₂ a) ∘ cast (add_mul _ _ _) :=
begin
generalize_proofs h,
apply funext,
rw (fin.cast h.symm).surjective.forall,
refine fin.add_cases (λ l, _) (λ r, _),
{ simp [mod_nat, nat.mod_eq_of_lt l.is_lt], },
{ simp [mod_nat, nat.add_mod] }
end
end repeat
end tuple
section tuple_right
/-! In the previous section, we have discussed inserting or removing elements on the left of a
tuple. In this section, we do the same on the right. A difference is that `fin (n+1)` is constructed
inductively from `fin n` starting from the left, not from the right. This implies that Lean needs
more help to realize that elements belong to the right types, i.e., we need to insert casts at
several places. -/
variables {α : fin (n+1) → Type u} (x : α (last n)) (q : Πi, α i) (p : Π(i : fin n), α i.cast_succ)
(i : fin n) (y : α i.cast_succ) (z : α (last n))
/-- The beginning of an `n+1` tuple, i.e., its first `n` entries -/
def init (q : Πi, α i) (i : fin n) : α i.cast_succ :=
q i.cast_succ
lemma init_def {n : ℕ} {α : fin (n+1) → Type*} {q : Π i, α i} :
init (λ k : fin (n+1), q k) = (λ k : fin n, q k.cast_succ) := rfl
/-- Adding an element at the end of an `n`-tuple, to get an `n+1`-tuple. The name `snoc` comes from
`cons` (i.e., adding an element to the left of a tuple) read in reverse order. -/
def snoc (p : Π(i : fin n), α i.cast_succ) (x : α (last n)) (i : fin (n+1)) : α i :=
if h : i.val < n
then _root_.cast (by rw fin.cast_succ_cast_lt i h) (p (cast_lt i h))
else _root_.cast (by rw eq_last_of_not_lt h) x
@[simp] lemma init_snoc : init (snoc p x) = p :=
begin
ext i,
have h' := fin.cast_lt_cast_succ i i.is_lt,
simp [init, snoc, i.is_lt, h'],
convert cast_eq rfl (p i)
end
@[simp] lemma snoc_cast_succ : snoc p x i.cast_succ = p i :=
begin
have : i.cast_succ.val < n := i.is_lt,
have h' := fin.cast_lt_cast_succ i i.is_lt,
simp [snoc, this, h'],
convert cast_eq rfl (p i)
end
@[simp] lemma snoc_comp_cast_succ {n : ℕ} {α : Sort*} {a : α} {f : fin n → α} :
(snoc f a : fin (n + 1) → α) ∘ cast_succ = f :=
funext (λ i, by rw [function.comp_app, snoc_cast_succ])
@[simp] lemma snoc_last : snoc p x (last n) = x :=
by { simp [snoc] }
@[simp] lemma snoc_comp_nat_add {n m : ℕ} {α : Sort*} (f : fin (m + n) → α) (a : α) :
(snoc f a : fin _ → α) ∘ (nat_add m : fin (n + 1) → fin (m + n + 1)) = snoc (f ∘ nat_add m) a :=
begin
ext i,
refine fin.last_cases _ (λ i, _) i,
{ simp only [function.comp_app],
rw [snoc_last, nat_add_last, snoc_last] },
{ simp only [function.comp_app],
rw [snoc_cast_succ, nat_add_cast_succ, snoc_cast_succ] }
end
@[simp] lemma snoc_cast_add {α : fin (n + m + 1) → Type*}
(f : Π i : fin (n + m), α (cast_succ i)) (a : α (last (n + m)))
(i : fin n) :
(snoc f a) (cast_add (m + 1) i) = f (cast_add m i) :=
dif_pos _
@[simp] lemma snoc_comp_cast_add {n m : ℕ} {α : Sort*} (f : fin (n + m) → α) (a : α) :
(snoc f a : fin _ → α) ∘ cast_add (m + 1) = f ∘ cast_add m :=
funext (snoc_cast_add f a)
/-- Updating a tuple and adding an element at the end commute. -/
@[simp] lemma snoc_update : snoc (update p i y) x = update (snoc p x) i.cast_succ y :=
begin
ext j,
by_cases h : j.val < n,
{ simp only [snoc, h, dif_pos],
by_cases h' : j = cast_succ i,
{ have C1 : α i.cast_succ = α j, by rw h',
have E1 : update (snoc p x) i.cast_succ y j = _root_.cast C1 y,
{ have : update (snoc p x) j (_root_.cast C1 y) j = _root_.cast C1 y, by simp,
convert this,
{ exact h'.symm },
{ exact heq_of_cast_eq (congr_arg α (eq.symm h')) rfl } },
have C2 : α i.cast_succ = α (cast_succ (cast_lt j h)),
by rw [cast_succ_cast_lt, h'],
have E2 : update p i y (cast_lt j h) = _root_.cast C2 y,
{ have : update p (cast_lt j h) (_root_.cast C2 y) (cast_lt j h) = _root_.cast C2 y,
by simp,
convert this,
{ simp [h, h'] },
{ exact heq_of_cast_eq C2 rfl } },
rw [E1, E2],
exact eq_rec_compose _ _ _ },
{ have : ¬(cast_lt j h = i),
by { assume E, apply h', rw [← E, cast_succ_cast_lt] },
simp [h', this, snoc, h] } },
{ rw eq_last_of_not_lt h,
simp [ne.symm (ne_of_lt (cast_succ_lt_last i))] }
end
/-- Adding an element at the beginning of a tuple and then updating it amounts to adding it
directly. -/
lemma update_snoc_last : update (snoc p x) (last n) z = snoc p z :=
begin
ext j,
by_cases h : j.val < n,
{ have : j ≠ last n := ne_of_lt h,
simp [h, update_noteq, this, snoc] },
{ rw eq_last_of_not_lt h,
simp }
end
/-- Concatenating the first element of a tuple with its tail gives back the original tuple -/
@[simp] lemma snoc_init_self : snoc (init q) (q (last n)) = q :=
begin
ext j,
by_cases h : j.val < n,
{ have : j ≠ last n := ne_of_lt h,
simp [h, update_noteq, this, snoc, init, cast_succ_cast_lt],
have A : cast_succ (cast_lt j h) = j := cast_succ_cast_lt _ _,
rw ← cast_eq rfl (q j),
congr' 1; rw A },
{ rw eq_last_of_not_lt h,
simp }
end
/-- Updating the last element of a tuple does not change the beginning. -/
@[simp] lemma init_update_last : init (update q (last n) z) = init q :=
by { ext j, simp [init, ne_of_lt, cast_succ_lt_last] }
/-- Updating an element and taking the beginning commute. -/
@[simp] lemma init_update_cast_succ :
init (update q i.cast_succ y) = update (init q) i y :=
begin
ext j,
by_cases h : j = i,
{ rw h, simp [init] },
{ simp [init, h] }
end
/-- `tail` and `init` commute. We state this lemma in a non-dependent setting, as otherwise it
would involve a cast to convince Lean that the two types are equal, making it harder to use. -/
lemma tail_init_eq_init_tail {β : Type*} (q : fin (n+2) → β) :
tail (init q) = init (tail q) :=
by { ext i, simp [tail, init, cast_succ_fin_succ] }
/-- `cons` and `snoc` commute. We state this lemma in a non-dependent setting, as otherwise it
would involve a cast to convince Lean that the two types are equal, making it harder to use. -/
lemma cons_snoc_eq_snoc_cons {β : Type*} (a : β) (q : fin n → β) (b : β) :
@cons n.succ (λ i, β) a (snoc q b) = snoc (cons a q) b :=
begin
ext i,
by_cases h : i = 0,
{ rw h, refl },
set j := pred i h with ji,
have : i = j.succ, by rw [ji, succ_pred],
rw [this, cons_succ],
by_cases h' : j.val < n,
{ set k := cast_lt j h' with jk,
have : j = k.cast_succ, by rw [jk, cast_succ_cast_lt],
rw [this, ← cast_succ_fin_succ],
simp },
rw [eq_last_of_not_lt h', succ_last],
simp
end
lemma comp_snoc {α : Type*} {β : Type*} (g : α → β) (q : fin n → α) (y : α) :
g ∘ (snoc q y) = snoc (g ∘ q) (g y) :=
begin
ext j,
by_cases h : j.val < n,
{ have : j ≠ last n := ne_of_lt h,
simp [h, this, snoc, cast_succ_cast_lt] },
{ rw eq_last_of_not_lt h,
simp }
end
/-- Appending a one-tuple to the right is the same as `fin.snoc`. -/
lemma append_right_eq_snoc {α : Type*} {n : ℕ} (x : fin n → α) (x₀ : fin 1 → α) :
fin.append x x₀ = fin.snoc x (x₀ 0) :=
begin
ext i,
refine fin.add_cases _ _ i; clear i,
{ intro i,
rw [fin.append_left],
exact (@snoc_cast_succ _ (λ _, α) _ _ i).symm, },
{ intro i,
rw [subsingleton.elim i 0, fin.append_right],
exact (@snoc_last _ (λ _, α) _ _).symm, },
end
lemma comp_init {α : Type*} {β : Type*} (g : α → β) (q : fin n.succ → α) :
g ∘ (init q) = init (g ∘ q) :=
by { ext j, simp [init] }
end tuple_right
section insert_nth
variables {α : fin (n+1) → Type u} {β : Type v}
/-- Define a function on `fin (n + 1)` from a value on `i : fin (n + 1)` and values on each
`fin.succ_above i j`, `j : fin n`. This version is elaborated as eliminator and works for
propositions, see also `fin.insert_nth` for a version without an `@[elab_as_eliminator]`
attribute. -/
@[elab_as_eliminator]
def succ_above_cases {α : fin (n + 1) → Sort u} (i : fin (n + 1)) (x : α i)
(p : Π j : fin n, α (i.succ_above j)) (j : fin (n + 1)) : α j :=
if hj : j = i then eq.rec x hj.symm
else if hlt : j < i then eq.rec_on (succ_above_cast_lt hlt) (p _)
else eq.rec_on (succ_above_pred $ (ne.lt_or_lt hj).resolve_left hlt) (p _)
lemma forall_iff_succ_above {p : fin (n + 1) → Prop} (i : fin (n + 1)) :
(∀ j, p j) ↔ p i ∧ ∀ j, p (i.succ_above j) :=
⟨λ h, ⟨h _, λ j, h _⟩, λ h, succ_above_cases i h.1 h.2⟩
/-- Insert an element into a tuple at a given position. For `i = 0` see `fin.cons`,
for `i = fin.last n` see `fin.snoc`. See also `fin.succ_above_cases` for a version elaborated
as an eliminator. -/
def insert_nth (i : fin (n + 1)) (x : α i) (p : Π j : fin n, α (i.succ_above j)) (j : fin (n + 1)) :
α j :=
succ_above_cases i x p j
@[simp] lemma insert_nth_apply_same (i : fin (n + 1)) (x : α i) (p : Π j, α (i.succ_above j)) :
insert_nth i x p i = x :=
by simp [insert_nth, succ_above_cases]
@[simp] lemma insert_nth_apply_succ_above (i : fin (n + 1)) (x : α i) (p : Π j, α (i.succ_above j))
(j : fin n) :
insert_nth i x p (i.succ_above j) = p j :=
begin
simp only [insert_nth, succ_above_cases, dif_neg (succ_above_ne _ _)],
by_cases hlt : j.cast_succ < i,
{ rw [dif_pos ((succ_above_lt_iff _ _).2 hlt)],
apply eq_of_heq ((eq_rec_heq _ _).trans _),
rw [cast_lt_succ_above hlt] },
{ rw [dif_neg (mt (succ_above_lt_iff _ _).1 hlt)],
apply eq_of_heq ((eq_rec_heq _ _).trans _),
rw [pred_succ_above (le_of_not_lt hlt)] }
end
@[simp] lemma succ_above_cases_eq_insert_nth :
@succ_above_cases.{u + 1} = @insert_nth.{u} := rfl
@[simp] lemma insert_nth_comp_succ_above (i : fin (n + 1)) (x : β) (p : fin n → β) :
insert_nth i x p ∘ i.succ_above = p :=
funext $ insert_nth_apply_succ_above i x p
lemma insert_nth_eq_iff {i : fin (n + 1)} {x : α i} {p : Π j, α (i.succ_above j)} {q : Π j, α j} :
i.insert_nth x p = q ↔ q i = x ∧ p = (λ j, q (i.succ_above j)) :=
by simp [funext_iff, forall_iff_succ_above i, eq_comm]
lemma eq_insert_nth_iff {i : fin (n + 1)} {x : α i} {p : Π j, α (i.succ_above j)} {q : Π j, α j} :
q = i.insert_nth x p ↔ q i = x ∧ p = (λ j, q (i.succ_above j)) :=
eq_comm.trans insert_nth_eq_iff
lemma insert_nth_apply_below {i j : fin (n + 1)} (h : j < i) (x : α i)
(p : Π k, α (i.succ_above k)) :
i.insert_nth x p j = eq.rec_on (succ_above_cast_lt h) (p $ j.cast_lt _) :=
by rw [insert_nth, succ_above_cases, dif_neg h.ne, dif_pos h]
lemma insert_nth_apply_above {i j : fin (n + 1)} (h : i < j) (x : α i)
(p : Π k, α (i.succ_above k)) :
i.insert_nth x p j = eq.rec_on (succ_above_pred h) (p $ j.pred _) :=
by rw [insert_nth, succ_above_cases, dif_neg h.ne', dif_neg h.not_lt]
lemma insert_nth_zero (x : α 0) (p : Π j : fin n, α (succ_above 0 j)) :
insert_nth 0 x p = cons x (λ j, _root_.cast (congr_arg α (congr_fun succ_above_zero j)) (p j)) :=
begin
refine insert_nth_eq_iff.2 ⟨by simp, _⟩,
ext j,
convert (cons_succ _ _ _).symm
end
@[simp] lemma insert_nth_zero' (x : β) (p : fin n → β) :
@insert_nth _ (λ _, β) 0 x p = cons x p :=
by simp [insert_nth_zero]
lemma insert_nth_last (x : α (last n)) (p : Π j : fin n, α ((last n).succ_above j)) :
insert_nth (last n) x p =
snoc (λ j, _root_.cast (congr_arg α (succ_above_last_apply j)) (p j)) x :=
begin
refine insert_nth_eq_iff.2 ⟨by simp, _⟩,
ext j,
apply eq_of_heq,
transitivity snoc (λ j, _root_.cast (congr_arg α (succ_above_last_apply j)) (p j)) x j.cast_succ,
{ rw [snoc_cast_succ], exact (cast_heq _ _).symm },
{ apply congr_arg_heq,
rw [succ_above_last] }
end
@[simp] lemma insert_nth_last' (x : β) (p : fin n → β) :
@insert_nth _ (λ _, β) (last n) x p = snoc p x :=
by simp [insert_nth_last]
@[simp] lemma insert_nth_zero_right [Π j, has_zero (α j)] (i : fin (n + 1)) (x : α i) :
i.insert_nth x 0 = pi.single i x :=
insert_nth_eq_iff.2 $ by simp [succ_above_ne, pi.zero_def]
lemma insert_nth_binop (op : Π j, α j → α j → α j) (i : fin (n + 1))
(x y : α i) (p q : Π j, α (i.succ_above j)) :
i.insert_nth (op i x y) (λ j, op _ (p j) (q j)) =
λ j, op j (i.insert_nth x p j) (i.insert_nth y q j) :=
insert_nth_eq_iff.2 $ by simp
@[simp] lemma insert_nth_mul [Π j, has_mul (α j)] (i : fin (n + 1))
(x y : α i) (p q : Π j, α (i.succ_above j)) :
i.insert_nth (x * y) (p * q) = i.insert_nth x p * i.insert_nth y q :=
insert_nth_binop (λ _, (*)) i x y p q
@[simp] lemma insert_nth_add [Π j, has_add (α j)] (i : fin (n + 1))
(x y : α i) (p q : Π j, α (i.succ_above j)) :
i.insert_nth (x + y) (p + q) = i.insert_nth x p + i.insert_nth y q :=
insert_nth_binop (λ _, (+)) i x y p q
@[simp] lemma insert_nth_div [Π j, has_div (α j)] (i : fin (n + 1))
(x y : α i) (p q : Π j, α (i.succ_above j)) :
i.insert_nth (x / y) (p / q) = i.insert_nth x p / i.insert_nth y q :=
insert_nth_binop (λ _, (/)) i x y p q
@[simp] lemma insert_nth_sub [Π j, has_sub (α j)] (i : fin (n + 1))
(x y : α i) (p q : Π j, α (i.succ_above j)) :
i.insert_nth (x - y) (p - q) = i.insert_nth x p - i.insert_nth y q :=
insert_nth_binop (λ _, has_sub.sub) i x y p q
@[simp] lemma insert_nth_sub_same [Π j, add_group (α j)] (i : fin (n + 1))
(x y : α i) (p : Π j, α (i.succ_above j)) :
i.insert_nth x p - i.insert_nth y p = pi.single i (x - y) :=
by simp_rw [← insert_nth_sub, ← insert_nth_zero_right, pi.sub_def, sub_self, pi.zero_def]
variables [Π i, preorder (α i)]
lemma insert_nth_le_iff {i : fin (n + 1)} {x : α i} {p : Π j, α (i.succ_above j)} {q : Π j, α j} :
i.insert_nth x p ≤ q ↔ x ≤ q i ∧ p ≤ (λ j, q (i.succ_above j)) :=
by simp [pi.le_def, forall_iff_succ_above i]
lemma le_insert_nth_iff {i : fin (n + 1)} {x : α i} {p : Π j, α (i.succ_above j)} {q : Π j, α j} :
q ≤ i.insert_nth x p ↔ q i ≤ x ∧ (λ j, q (i.succ_above j)) ≤ p :=
by simp [pi.le_def, forall_iff_succ_above i]
open set
lemma insert_nth_mem_Icc {i : fin (n + 1)} {x : α i} {p : Π j, α (i.succ_above j)}
{q₁ q₂ : Π j, α j} :
i.insert_nth x p ∈ Icc q₁ q₂ ↔
x ∈ Icc (q₁ i) (q₂ i) ∧ p ∈ Icc (λ j, q₁ (i.succ_above j)) (λ j, q₂ (i.succ_above j)) :=
by simp only [mem_Icc, insert_nth_le_iff, le_insert_nth_iff, and.assoc, and.left_comm]
lemma preimage_insert_nth_Icc_of_mem {i : fin (n + 1)} {x : α i} {q₁ q₂ : Π j, α j}
(hx : x ∈ Icc (q₁ i) (q₂ i)) :
i.insert_nth x ⁻¹' (Icc q₁ q₂) = Icc (λ j, q₁ (i.succ_above j)) (λ j, q₂ (i.succ_above j)) :=
set.ext $ λ p, by simp only [mem_preimage, insert_nth_mem_Icc, hx, true_and]
lemma preimage_insert_nth_Icc_of_not_mem {i : fin (n + 1)} {x : α i} {q₁ q₂ : Π j, α j}
(hx : x ∉ Icc (q₁ i) (q₂ i)) :
i.insert_nth x ⁻¹' (Icc q₁ q₂) = ∅ :=
set.ext $ λ p, by simp only [mem_preimage, insert_nth_mem_Icc, hx, false_and, mem_empty_iff_false]
end insert_nth
section find
/-- `find p` returns the first index `n` where `p n` is satisfied, and `none` if it is never
satisfied. -/
def find : Π {n : ℕ} (p : fin n → Prop) [decidable_pred p], option (fin n)
| 0 p _ := none
| (n+1) p _ := by resetI; exact option.cases_on
(@find n (λ i, p (i.cast_lt (nat.lt_succ_of_lt i.2))) _)
(if h : p (fin.last n) then some (fin.last n) else none)
(λ i, some (i.cast_lt (nat.lt_succ_of_lt i.2)))
/-- If `find p = some i`, then `p i` holds -/
/-- `find p` does not return `none` if and only if `p i` holds at some index `i`. -/
lemma is_some_find_iff : Π {n : ℕ} {p : fin n → Prop} [decidable_pred p],
by exactI (find p).is_some ↔ ∃ i, p i
| 0 p _ := iff_of_false (λ h, bool.no_confusion h) (λ ⟨i, _⟩, fin_zero_elim i)
| (n+1) p _ := ⟨λ h, begin
rw [option.is_some_iff_exists] at h,
cases h with i hi,
exactI ⟨i, find_spec _ hi⟩
end, λ ⟨⟨i, hin⟩, hi⟩,
begin
resetI,
dsimp [find],
cases h : find (λ i : fin n, (p (i.cast_lt (nat.lt_succ_of_lt i.2)))) with j,
{ split_ifs with hl hl,
{ exact option.is_some_some },
{ have := (@is_some_find_iff n (λ x, p (x.cast_lt (nat.lt_succ_of_lt x.2))) _).2
⟨⟨i, lt_of_le_of_ne (nat.le_of_lt_succ hin)
(λ h, by clear_aux_decl; cases h; exact hl hi)⟩, hi⟩,
rw h at this,
exact this } },
{ simp }
end⟩
/-- `find p` returns `none` if and only if `p i` never holds. -/
lemma find_eq_none_iff {n : ℕ} {p : fin n → Prop} [decidable_pred p] :
find p = none ↔ ∀ i, ¬ p i :=
by rw [← not_exists, ← is_some_find_iff]; cases (find p); simp
/-- If `find p` returns `some i`, then `p j` does not hold for `j < i`, i.e., `i` is minimal among
the indices where `p` holds. -/
lemma find_min : Π {n : ℕ} {p : fin n → Prop} [decidable_pred p] {i : fin n}
(hi : i ∈ by exactI fin.find p) {j : fin n} (hj : j < i), ¬ p j
| 0 p _ i hi j hj hpj := option.no_confusion hi
| (n+1) p _ i hi ⟨j, hjn⟩ hj hpj := begin
resetI,
dsimp [find] at hi,
cases h : find (λ i : fin n, (p (i.cast_lt (nat.lt_succ_of_lt i.2)))) with k,
{ rw [h] at hi,
split_ifs at hi with hl hl,
{ subst hi,
rw [find_eq_none_iff] at h,
exact h ⟨j, hj⟩ hpj },
{ exact hi.elim } },
{ rw h at hi,
dsimp at hi,
obtain rfl := option.some_inj.1 hi,
exact find_min h (show (⟨j, lt_trans hj k.2⟩ : fin n) < k, from hj) hpj }
end
lemma find_min' {p : fin n → Prop} [decidable_pred p] {i : fin n}
(h : i ∈ fin.find p) {j : fin n} (hj : p j) : i ≤ j :=
le_of_not_gt (λ hij, find_min h hij hj)
lemma nat_find_mem_find {p : fin n → Prop} [decidable_pred p]
(h : ∃ i, ∃ hin : i < n, p ⟨i, hin⟩) :
(⟨nat.find h, (nat.find_spec h).fst⟩ : fin n) ∈ find p :=
let ⟨i, hin, hi⟩ := h in
begin
cases hf : find p with f,
{ rw [find_eq_none_iff] at hf,
exact (hf ⟨i, hin⟩ hi).elim },
{ refine option.some_inj.2 (le_antisymm _ _),
{ exact find_min' hf (nat.find_spec h).snd },
{ exact nat.find_min' _ ⟨f.2, by convert find_spec p hf;
exact fin.eta _ _⟩ } }
end
lemma mem_find_iff {p : fin n → Prop} [decidable_pred p] {i : fin n} :
i ∈ fin.find p ↔ p i ∧ ∀ j, p j → i ≤ j :=
⟨λ hi, ⟨find_spec _ hi, λ _, find_min' hi⟩,
begin
rintros ⟨hpi, hj⟩,
cases hfp : fin.find p,
{ rw [find_eq_none_iff] at hfp,
exact (hfp _ hpi).elim },
{ exact option.some_inj.2 (le_antisymm (find_min' hfp hpi) (hj _ (find_spec _ hfp))) }
end⟩
lemma find_eq_some_iff {p : fin n → Prop} [decidable_pred p] {i : fin n} :
fin.find p = some i ↔ p i ∧ ∀ j, p j → i ≤ j :=
mem_find_iff
lemma mem_find_of_unique {p : fin n → Prop} [decidable_pred p]
(h : ∀ i j, p i → p j → i = j) {i : fin n} (hi : p i) : i ∈ fin.find p :=
mem_find_iff.2 ⟨hi, λ j hj, le_of_eq $ h i j hi hj⟩
end find
/-- To show two sigma pairs of tuples agree, it to show the second elements are related via
`fin.cast`. -/
lemma sigma_eq_of_eq_comp_cast {α : Type*} :
∀ {a b : Σ ii, fin ii → α} (h : a.fst = b.fst), a.snd = b.snd ∘ fin.cast h → a = b
| ⟨ai, a⟩ ⟨bi, b⟩ hi h :=
begin
dsimp only at hi,
subst hi,
simpa using h,
end
/-- `fin.sigma_eq_of_eq_comp_cast` as an `iff`. -/
lemma sigma_eq_iff_eq_comp_cast {α : Type*} {a b : Σ ii, fin ii → α} :
a = b ↔ ∃ (h : a.fst = b.fst), a.snd = b.snd ∘ fin.cast h :=
⟨λ h, h ▸ ⟨rfl, funext $ fin.rec $ by exact λ i hi, rfl⟩,
λ ⟨h, h'⟩, sigma_eq_of_eq_comp_cast _ h'⟩
end fin
|
Formal statement is: lemma prime_gt_1_nat: "prime p \<Longrightarrow> p > (1::nat)" Informal statement is: A prime number is greater than 1. |
-- ----------------------------------------------------------------- [ UML.idr ]
-- Module : UML
-- Description : UML models in Idris
-- Copyright : (c) Jan de Muijnck-Hughes
-- License : see LICENSE
-- --------------------------------------------------------------------- [ EOH ]
module UML
import public UML.Class
import public UML.Sequence
import public UML.Deployment
import public UML.Component
import public UML.Code
import public UML.Types
-- --------------------------------------------------------------------- [ EOF ]
|
State Before: M : Type u_1
inst✝² : CommMonoid M
S : Submonoid M
N : Type u_2
inst✝¹ : CommMonoid N
P : Type ?u.767746
inst✝ : CommMonoid P
f : LocalizationMap S N
x : M
y : { x // x ∈ S }
⊢ ↑(toMap f) ↑y * mk' f x y = ↑(toMap f) x State After: no goals Tactic: rw [mul_comm, mk'_spec] |
/-
Copyright (c) 2018 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
-/
import data.multiset.basic
/-!
# The powerset of a multiset
-/
namespace multiset
open list
variables {α : Type*}
/-! ### powerset -/
/-- A helper function for the powerset of a multiset. Given a list `l`, returns a list
of sublists of `l` (using `sublists_aux`), as multisets. -/
def powerset_aux (l : list α) : list (multiset α) :=
0 :: sublists_aux l (λ x y, x :: y)
theorem powerset_aux_eq_map_coe {l : list α} :
powerset_aux l = (sublists l).map coe :=
by simp [powerset_aux, sublists];
rw [← show @sublists_aux₁ α (multiset α) l (λ x, [↑x]) =
sublists_aux l (λ x, list.cons ↑x),
from sublists_aux₁_eq_sublists_aux _ _,
sublists_aux_cons_eq_sublists_aux₁,
← bind_ret_eq_map, sublists_aux₁_bind]; refl
@[simp] theorem mem_powerset_aux {l : list α} {s} :
s ∈ powerset_aux l ↔ s ≤ ↑l :=
quotient.induction_on s $
by simp [powerset_aux_eq_map_coe, subperm, and.comm]
/-- Helper function for the powerset of a multiset. Given a list `l`, returns a list
of sublists of `l` (using `sublists'`), as multisets. -/
def powerset_aux' (l : list α) : list (multiset α) := (sublists' l).map coe
theorem powerset_aux_perm_powerset_aux' {l : list α} :
powerset_aux l ~ powerset_aux' l :=
by rw powerset_aux_eq_map_coe; exact (sublists_perm_sublists' _).map _
@[simp] theorem powerset_aux'_nil : powerset_aux' (@nil α) = [0] := rfl
@[simp] theorem powerset_aux'_cons (a : α) (l : list α) :
powerset_aux' (a::l) = powerset_aux' l ++ list.map (cons a) (powerset_aux' l) :=
by simp [powerset_aux']; refl
theorem powerset_aux'_perm {l₁ l₂ : list α} (p : l₁ ~ l₂) :
powerset_aux' l₁ ~ powerset_aux' l₂ :=
begin
induction p with a l₁ l₂ p IH a b l l₁ l₂ l₃ p₁ p₂ IH₁ IH₂, {simp},
{ simp, exact IH.append (IH.map _) },
{ simp, apply perm.append_left,
rw [← append_assoc, ← append_assoc,
(by funext s; simp [cons_swap] : cons b ∘ cons a = cons a ∘ cons b)],
exact perm_append_comm.append_right _ },
{ exact IH₁.trans IH₂ }
end
theorem powerset_aux_perm {l₁ l₂ : list α} (p : l₁ ~ l₂) :
powerset_aux l₁ ~ powerset_aux l₂ :=
powerset_aux_perm_powerset_aux'.trans $
(powerset_aux'_perm p).trans powerset_aux_perm_powerset_aux'.symm
/-- The power set of a multiset. -/
def powerset (s : multiset α) : multiset (multiset α) :=
quot.lift_on s
(λ l, (powerset_aux l : multiset (multiset α)))
(λ l₁ l₂ h, quot.sound (powerset_aux_perm h))
theorem powerset_coe (l : list α) :
@powerset α l = ((sublists l).map coe : list (multiset α)) :=
congr_arg coe powerset_aux_eq_map_coe
@[simp] theorem powerset_coe' (l : list α) :
@powerset α l = ((sublists' l).map coe : list (multiset α)) :=
quot.sound powerset_aux_perm_powerset_aux'
@[simp] theorem powerset_zero : @powerset α 0 = 0 ::ₘ 0 := rfl
@[simp] theorem powerset_cons (a : α) (s) :
powerset (a ::ₘ s) = powerset s + map (cons a) (powerset s) :=
quotient.induction_on s $ λ l, by simp; refl
@[simp] theorem mem_powerset {s t : multiset α} :
s ∈ powerset t ↔ s ≤ t :=
quotient.induction_on₂ s t $ by simp [subperm, and.comm]
theorem map_single_le_powerset (s : multiset α) :
s.map (λ a, a ::ₘ 0) ≤ powerset s :=
quotient.induction_on s $ λ l, begin
simp [powerset_coe],
show l.map (coe ∘ list.ret) <+~ (sublists l).map coe,
rw ← list.map_map,
exact ((map_ret_sublist_sublists _).map _).subperm
end
@[simp] theorem card_powerset (s : multiset α) :
card (powerset s) = 2 ^ card s :=
quotient.induction_on s $ by simp
theorem revzip_powerset_aux {l : list α} ⦃x⦄
(h : x ∈ revzip (powerset_aux l)) : x.1 + x.2 = ↑l :=
begin
rw [revzip, powerset_aux_eq_map_coe, ← map_reverse, zip_map, ← revzip] at h,
simp at h, rcases h with ⟨l₁, l₂, h, rfl, rfl⟩,
exact quot.sound (revzip_sublists _ _ _ h)
end
theorem revzip_powerset_aux' {l : list α} ⦃x⦄
(h : x ∈ revzip (powerset_aux' l)) : x.1 + x.2 = ↑l :=
begin
rw [revzip, powerset_aux', ← map_reverse, zip_map, ← revzip] at h,
simp at h, rcases h with ⟨l₁, l₂, h, rfl, rfl⟩,
exact quot.sound (revzip_sublists' _ _ _ h)
end
theorem revzip_powerset_aux_lemma [decidable_eq α] (l : list α)
{l' : list (multiset α)} (H : ∀ ⦃x : _ × _⦄, x ∈ revzip l' → x.1 + x.2 = ↑l) :
revzip l' = l'.map (λ x, (x, ↑l - x)) :=
begin
have : forall₂ (λ (p : multiset α × multiset α) (s : multiset α), p = (s, ↑l - s))
(revzip l') ((revzip l').map prod.fst),
{ rw forall₂_map_right_iff,
apply forall₂_same, rintro ⟨s, t⟩ h,
dsimp, rw [← H h, add_sub_cancel_left] },
rw [← forall₂_eq_eq_eq, forall₂_map_right_iff], simpa
end
theorem revzip_powerset_aux_perm {l₁ l₂ : list α} (p : l₁ ~ l₂) :
revzip (powerset_aux l₁) ~ revzip (powerset_aux l₂) :=
begin
haveI := classical.dec_eq α,
simp [λ l:list α, revzip_powerset_aux_lemma l revzip_powerset_aux, coe_eq_coe.2 p],
exact (powerset_aux_perm p).map _
end
/-! ### powerset_len -/
/-- Helper function for `powerset_len`. Given a list `l`, `powerset_len_aux n l` is the list
of sublists of length `n`, as multisets. -/
def powerset_len_aux (n : ℕ) (l : list α) : list (multiset α) :=
sublists_len_aux n l coe []
theorem powerset_len_aux_eq_map_coe {n} {l : list α} :
powerset_len_aux n l = (sublists_len n l).map coe :=
by rw [powerset_len_aux, sublists_len_aux_eq, append_nil]
@[simp] theorem mem_powerset_len_aux {n} {l : list α} {s} :
s ∈ powerset_len_aux n l ↔ s ≤ ↑l ∧ card s = n :=
quotient.induction_on s $
by simp [powerset_len_aux_eq_map_coe, subperm]; exact
λ l₁, ⟨λ ⟨l₂, ⟨s, e⟩, p⟩, ⟨⟨_, p, s⟩, p.symm.length_eq.trans e⟩,
λ ⟨⟨l₂, p, s⟩, e⟩, ⟨_, ⟨s, p.length_eq.trans e⟩, p⟩⟩
@[simp] theorem powerset_len_aux_zero (l : list α) :
powerset_len_aux 0 l = [0] :=
by simp [powerset_len_aux_eq_map_coe]
@[simp] theorem powerset_len_aux_nil (n : ℕ) :
powerset_len_aux (n+1) (@nil α) = [] := rfl
@[simp] theorem powerset_len_aux_cons (n : ℕ) (a : α) (l : list α) :
powerset_len_aux (n+1) (a::l) =
powerset_len_aux (n+1) l ++ list.map (cons a) (powerset_len_aux n l) :=
by simp [powerset_len_aux_eq_map_coe]; refl
theorem powerset_len_aux_perm {n} {l₁ l₂ : list α} (p : l₁ ~ l₂) :
powerset_len_aux n l₁ ~ powerset_len_aux n l₂ :=
begin
induction n with n IHn generalizing l₁ l₂, {simp},
induction p with a l₁ l₂ p IH a b l l₁ l₂ l₃ p₁ p₂ IH₁ IH₂, {refl},
{ simp, exact IH.append ((IHn p).map _) },
{ simp, apply perm.append_left,
cases n, {simp, apply perm.swap},
simp,
rw [← append_assoc, ← append_assoc,
(by funext s; simp [cons_swap] : cons b ∘ cons a = cons a ∘ cons b)],
exact perm_append_comm.append_right _ },
{ exact IH₁.trans IH₂ }
end
/-- `powerset_len n s` is the multiset of all submultisets of `s` of length `n`. -/
def powerset_len (n : ℕ) (s : multiset α) : multiset (multiset α) :=
quot.lift_on s
(λ l, (powerset_len_aux n l : multiset (multiset α)))
(λ l₁ l₂ h, quot.sound (powerset_len_aux_perm h))
theorem powerset_len_coe' (n) (l : list α) :
@powerset_len α n l = powerset_len_aux n l := rfl
theorem powerset_len_coe (n) (l : list α) :
@powerset_len α n l = ((sublists_len n l).map coe : list (multiset α)) :=
congr_arg coe powerset_len_aux_eq_map_coe
@[simp] theorem powerset_len_zero_left (s : multiset α) :
powerset_len 0 s = 0 ::ₘ 0 :=
quotient.induction_on s $ λ l, by simp [powerset_len_coe']; refl
@[simp] theorem powerset_len_zero_right (n : ℕ) :
@powerset_len α (n + 1) 0 = 0 := rfl
@[simp] theorem powerset_len_cons (n : ℕ) (a : α) (s) :
powerset_len (n + 1) (a ::ₘ s) =
powerset_len (n + 1) s + map (cons a) (powerset_len n s) :=
quotient.induction_on s $ λ l, by simp [powerset_len_coe']; refl
@[simp] theorem mem_powerset_len {n : ℕ} {s t : multiset α} :
s ∈ powerset_len n t ↔ s ≤ t ∧ card s = n :=
quotient.induction_on t $ λ l, by simp [powerset_len_coe']
@[simp] theorem card_powerset_len (n : ℕ) (s : multiset α) :
card (powerset_len n s) = nat.choose (card s) n :=
quotient.induction_on s $ by simp [powerset_len_coe]
theorem powerset_len_le_powerset (n : ℕ) (s : multiset α) :
powerset_len n s ≤ powerset s :=
quotient.induction_on s $ λ l, by simp [powerset_len_coe]; exact
((sublists_len_sublist_sublists' _ _).map _).subperm
theorem powerset_len_mono (n : ℕ) {s t : multiset α} (h : s ≤ t) :
powerset_len n s ≤ powerset_len n t :=
le_induction_on h $ λ l₁ l₂ h, by simp [powerset_len_coe]; exact
((sublists_len_sublist_of_sublist _ h).map _).subperm
end multiset
|
subroutine EigValSym(ain, n, eval, ul)
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!
! This subroutine will return the eigenvalues of the symmetric square
! matrix Ain, ordered from greatest to least.
!
! Calling Parameters
! IN
! Ain Input symmetric matrix. By default, only the
! upper portion is used.
! n Order of the matrix Ain.
! OUT
! eval Vector of length n of the eigenvalues of Ain.
! OPTIONAL
! uplo Use the upper 'U' or lower 'L' portion of the
! input symmetric matrix.
!
! Notes:
!
! 1. The eigenvalues and eigenvectors are determined by reducing the matrix to
! A = Z L Z = Q (S L S') Q'
! by the two operations:
!
! (1) The real symmetric square matrix is reduced to tridiagonal form
! A = Q T Q'
! where Q is orthogonal, and T is symmetric tridiagonal.
! (2) The tridiagonal matrix is reduced to
! T = S L S'
!
! The eigenvalues of A correspond to L (which is a diagonal)
!
! Dependencies: LAPACK, BLAS
!
! Written by Mark Wieczorek June 2004.
!
! Copyright (c) 2005, Mark A. Wieczorek
! All rights reserved.
!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
implicit none
real*8, intent(in) :: ain(:,:)
integer, intent(in) :: n
real*8, intent(out) :: eval(:)
character, intent(in), optional :: ul
integer, parameter :: nb = 80, nbl = 10
character :: uplo
real*8 :: d(n), e(n), tau(n-1), work(nb*n), vl, vu, &
abstol, w(n)
real*8, allocatable :: a(:,:), z(:,:)
integer :: lwork, info, il, iu, m, isuppz(2*n), liwork, iwork(nbl*n), i, astat(2)
external dsytrd, dstegr
if (size(ain(:,1)) < n .or. size(ain(1,:)) < n) then
print*, "Error --- EigValSym"
print*, "AIN must be dimensioned as (N, N) where N is ", n
print*, "Input array is dimensioned as ", size(ain(:,1)), size(ain(1,:))
stop
elseif(size(eval) < n) then
print*, "Error --- EigValSym"
print*, "EVAL must be dimensioned as (N) where N is ", n
print*, "Input array is dimensioned as ", size(eval)
stop
endif
allocate(a(n,n), stat = astat(1))
allocate(z(n,n), stat = astat(2))
if (astat(1) /= 0 .or. astat(2) /= 0) then
print*, "Error --- EigValSym"
print*, "Problem allocating arrays A and Z", astat(1), astat(2)
stop
endif
lwork = nb*n
liwork = nbl*n
eval = 0.0d0
a(1:n,1:n) = ain(1:n,1:n)
if (present(ul)) then
uplo = ul
else
uplo = "U"
endif
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!
! Factor A = Q T Q'
!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
call dsytrd(uplo, n, a, n, d, e(1:n-1), tau, work, lwork, info)
if (info /= 0) then
print*, "Error --- EigValSym"
print*, "Problem tri-diagonalizing input matrix"
stop
else
if ( work(1) > dble(lwork) ) then
print*, "Warning --- EigValSym"
print*, "Consider changing value of nb to ", work(1)/n, " and recompile the SHTOOLS archive."
endif
endif
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!
! Factor T = S L S'
!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
abstol = 0.0d0
call dstegr('n','a', n, d, e, vl, vu, il, iu, abstol, m, w, &
z, n, isuppz, work, lwork, iwork, liwork, info)
if (info /= 0) then
print*, "Error --- EigValSym"
print*, "Problem determining eigenvalues and eigenvectors of tridiagonal matrix."
if (info==1) print*, "Internal error in DLARRE"
if (info==2) print*, "Internal error in DLARRV"
stop
else
if (work(1) > dble(lwork) ) then
print*, "Warning --- EigValSym"
print*, "Consider changing value of nb to ", work(1)/n, " and recompile the SHTOOLS archive."
endif
if (iwork(1) > liwork ) then
print*, "Warning --- Eigsym"
print*, "Consider changing value of nb to ", iwork(1)/n, " and recompile the SHTOOLS archive."
endif
endif
! Reorder eigenvalues from greatest to least.
do i=1, n
eval(i) = w(n+1-i)
enddo
deallocate(a)
deallocate(z)
end subroutine EigValSym
|
module Unlambda
import Control.Monad.Trans
import src.ContT
-- Based on http://www.madore.org/~david/programs/unlambda/
data Rec : (r : Type) -> (m : Type -> Type) -> Type where
RecK : (Rec r m -> ContT r m (Rec r m)) -> Rec r m
data Expr : Type where
S : Expr
K : Expr
I : Expr
R : Expr
C : Expr
Dot : Char -> Expr
Backtick : Expr -> Expr -> Expr
apply : ContT r m (Rec r m) -> ContT r m (Rec r m) -> ContT r m (Rec r m)
apply first second = do (RecK k) <- first
b <- second
k b
eval : Monad m => Expr -> (Char -> m ()) -> ContT r m (Rec r m)
eval S put = pure $ RecK (\rec =>
pure $ RecK (\rec' =>
pure $ RecK (\rec'' => apply (apply (pure rec) (pure rec'')) (apply (pure rec') (pure rec'')))))
eval K put = pure $ RecK (\rec => pure $ RecK (\_ => pure rec))
eval I put = pure $ RecK (\rec => pure rec)
eval R put = pure $ RecK (\rec => do lift $ put '\n'; pure rec)
eval C put = pure $ RecK (\(RecK k) => callCC (\k' => k $ RecK k'))
eval (Dot char) put = pure $ RecK (\rec => do lift $ put char; pure rec)
eval (Backtick expr expr') put = apply (eval expr put) (eval expr' put)
parse : List Char -> Maybe (Expr, List Char)
parse ('s' :: xs) = Just (S, xs)
parse ('k' :: xs) = Just (K, xs)
parse ('i' :: xs) = Just (I, xs)
parse ('r' :: xs) = Just (R, xs)
parse ('c' :: xs) = Just (C, xs)
parse ('.' :: c :: xs) = Just (Dot c, xs)
parse ('`' :: xs) = do (expr, xs') <- parse xs
(expr', xs'') <- parse xs'
Just (Backtick expr expr', xs'')
parse (x :: xs) = Nothing
parse [] = Nothing
callCCTest : String
callCCTest = "``cir"
helloWorldTest : String
helloWorldTest = "```si`k``s.H``s.e``s.l``s.l``s.o``s. ``s.w``s.o``s.r``s.l``s.d``s.!``sri``si``si``si``si``si``si``si``si`ki"
example : IO ()
example = case parse (unpack helloWorldTest) of
Just (expr, _) => run (\_ => pure ()) $ eval expr putChar
Nothing => putStrLn "Parse error"
|
Require Import List.
Require Import String.
Require Import ZArith.
Ltac break_if :=
match goal with
| _ : context [ if ?cond then _ else _ ] |- _ =>
destruct cond as [] eqn:?
| |- context [ if ?cond then _ else _ ] =>
destruct cond as [] eqn:?
| _ : context [ match ?cond with _ => _ end ] |- _ =>
destruct cond as [] eqn:?
| |- context [ match ?cond with _ => _ end ] =>
destruct cond as [] eqn:?
end.
(*
We will extend IMP with the ability to push and pop heaps.
In normal IMP, program states just included the heap "h" and
statement to execute "s".
In our extended version of IMP, program state will also include
the current stack of heaps "l", represented as a list.
There will be two new statements: "PushHeap" and "PopHeap x".
- "PushHeap" adds the current heap "h" to the beginning of "l".
Informally, it copies "h" all at once.
- "PopHeap x" replaces the current heap "h" with the first
element of "l" *except* "x" maps to "lkup x h" and replaces "l"
with the tail of "l". If "l" is the empty list, then
"PopHeap x" has no effect.
Both "PushHeap" and "PopHeap x" become Skip in one step.
*)
Set Implicit Arguments.
Definition var := string.
(* We'll start by defining the syntax of our extended IMP. *)
(* Expressions are just like those from IMP seen in lecture. *)
Inductive Expr : Type :=
| Int : Z -> Expr
| Var : var -> Expr
| Add : Expr -> Expr -> Expr
| Mul : Expr -> Expr -> Expr.
(* Add the PushHeap and PopHeap x statements to the Stmt type. *)
Inductive Stmt : Type :=
| Skip : Stmt
| Assign : var -> Expr -> Stmt
| Seq : Stmt -> Stmt -> Stmt
| Cond : Expr -> Stmt -> Stmt -> Stmt
| While : Expr -> Stmt -> Stmt
(*
[PROBLEM 1]
Add constructors for PushHeap and PopHeap x here.
*)
.
(* Next we define the semantics of our language *)
(* Heaps are represented as association lists. *)
Definition Heap := list (var * Z).
Fixpoint lkup (x: var) (h: Heap) :=
match h with
| nil => 0%Z
| (k, v) :: h' => if string_dec x k then v else lkup x h'
end.
(* Since expressions are unchanged from IMP, their semantics are the same: *)
Inductive Eval : Heap -> Expr -> Z -> Prop :=
| EInt : forall h z,
Eval h (Int z) z
| EVar : forall h v,
Eval h (Var v) (lkup v h)
| EAdd : forall h e1 e2 c1 c2 c3,
Eval h e1 c1 ->
Eval h e2 c2 ->
c3 = (c1 + c2)%Z ->
Eval h (Add e1 e2) c3
| EMul : forall h e1 e2 c1 c2 c3,
Eval h e1 c1 ->
Eval h e2 c2 ->
c3 = (c1 * c2)%Z ->
Eval h (Mul e1 e2) c3.
(*
Define a small-step operational semantics for our extended version of IMP.
Because the form of rules has changed, include all the rules.
*)
Inductive Step : list Heap -> Heap -> Stmt ->
list Heap -> Heap -> Stmt -> Prop :=
(*
[PROBLEM 2]
Add the rules (constructors) for the small step semantics of
our extended version of IMP. I have 10 rules in my solution.
* NOTE *
For statements that involve branching (Cond and While), the
"then" / "enter loop" branch should be taken when the condition
expression evaluates to something not equal to 0, and the
"else" / "exit loop" branch should be taken when the condition
expression evaluates to 0.
*)
.
(*
[PROBLEM 3]
In a short English paragraph, explain why our language would be much less
useful if popping a heap did not copy one value from the popped heap.
*)
(*
[PROBLEM 4]
Give an interesting IMP program that uses both "PushHeap" and "PopHeap x".
*)
(* Interpreters *)
(*
In class we saw how to implement and verify a function
that evaluates expressions:
*)
Fixpoint eval (h: Heap) (e: Expr) : Z :=
match e with
| Int z => z
| Var v => lkup v h
| Add e1 e2 => Z.add (eval h e1) (eval h e2)
| Mul e1 e2 => Z.mul (eval h e1) (eval h e2)
end.
Lemma eval_Eval:
forall h e c,
eval h e = c -> Eval h e c.
Proof.
intro. intro. induction e.
{ intros. simpl in *. subst. constructor. }
{ intros. simpl in *. rewrite <- H. constructor. }
{ intros. simpl in *. econstructor.
{ firstorder. }
{ firstorder. }
{ firstorder. }
}
{ intros. simpl in *. econstructor.
{ firstorder. }
{ firstorder. }
{ firstorder. }
}
Qed.
Lemma Eval_eval:
forall h e c,
Eval h e c -> eval h e = c.
Proof.
intros. induction H.
{ reflexivity. }
{ reflexivity. }
{ subst. reflexivity. }
{ subst. reflexivity. }
Qed.
Lemma Eval_eval':
forall h e,
Eval h e (eval h e).
Proof.
intros. remember (eval h e) as c. apply eval_Eval. omega.
Qed.
(* [Problem 5] *)
(* Write a function which tests whether a statement is a Skip statement. *)
Definition isSkip (s: Stmt) : bool :=
(* TODO *)
false.
(* [Problem 6] *)
(* Prove isSkip correct in the true case. *)
Lemma isSkip_t:
forall s, isSkip s = true -> s = Skip.
Proof.
(* TODO *)
admit.
Qed.
(* [Problem 7] *)
(* Prove isSkip correct in the false case. *)
Lemma isSkip_f:
forall s, isSkip s = false -> s <> Skip.
Proof.
(* TODO *)
admit.
Qed.
(* [Problem 8] *)
(* Implement step as a function. *)
(* Hint: Use your isSkip function in the Seq case. *)
(* Hint: Z.eq_dec decides if a Z is equal to 0. *)
Check Z.eq_dec.
Fixpoint step (l: list Heap) (h: Heap) (s: Stmt) :
option (list Heap * Heap * Stmt) :=
(* TODO *)
None.
(* [Problem 9] *)
(* Prove that only Skip cannot step. *)
Lemma step_None_Skip:
forall l h s, step l h s = None -> s = Skip.
Proof.
(* TODO *)
admit.
Qed.
(* [Problem 10] *)
(* Prove that your step function is SOUND with respect to the Step relation. *)
Lemma step_Step:
forall l h s l' h' s',
step l h s = Some (l', h', s') -> Step l h s l' h' s'.
Proof.
(* TODO *)
admit.
Qed.
(* [Problem 11] *)
(* Prove that your step function is COMPLETE with respect to the Step relation. *)
Lemma Step_step:
forall l h s l' h' s',
Step l h s l' h' s' -> step l h s = Some (l', h', s').
Proof.
(* TODO *)
admit.
Qed.
(* StepN as seen in class *)
Inductive StepN : list Heap -> Heap -> Stmt -> nat ->
list Heap -> Heap -> Stmt -> Prop :=
| StepN_refl : forall l h s,
StepN l h s 0 l h s
| StepN_step : forall l h s l' h' s' l'' h'' s'' n,
Step l h s l' h' s' ->
StepN l' h' s' n l'' h'' s'' ->
StepN l h s (S n) l'' h'' s''.
(* [Problem 12] *)
(* Implement stepn as a function. *)
Fixpoint stepn (l: list Heap) (h: Heap) (s: Stmt) (n: nat) :
option (list Heap * Heap * Stmt) :=
(* TODO *)
None.
(* [Problem 13] *)
(* Prove your stepn function SOUND. *)
Lemma stepn_StepN:
forall n l h s l' h' s',
stepn l h s n = Some (l', h', s') ->
StepN l h s n l' h' s'.
Proof.
(* TODO *)
admit.
Qed.
(* [Problem 14] *)
(* Prove your stepn function COMPLETE. *)
Lemma StepN_stepn:
forall l h s n l' h' s',
StepN l h s n l' h' s' ->
stepn l h s n = Some (l', h', s').
Proof.
(* TODO *)
admit.
Qed.
(* The run function, which takes up to n steps. *)
Fixpoint run (n: nat) (l: list Heap) (h: Heap) (s: Stmt) : list Heap * Heap * Stmt :=
match n with
| O => (l, h, s)
| S m =>
match step l h s with
| Some (l', h', s') => run m l' h' s'
| None => (l, h, s)
end
end.
(* [Problem 15] *)
(* Define the StepStar relation, which corresponds to taking any number of steps. *)
Inductive StepStar : list Heap -> Heap -> Stmt ->
list Heap -> Heap -> Stmt -> Prop :=
(* TODO *)
.
(* [Problem 16] *)
(* Prove that run is SOUND with respect to StepStar. *)
Lemma run_StepStar:
forall n l h s l' h' s',
run n l h s = (l', h', s') -> StepStar l h s l' h' s'.
Proof.
(* TODO *)
admit.
Qed.
(* [Problem 17] *)
(* Prove that running a state that can't step gives that same state. *)
Lemma nostep_run_refl:
forall l h s, step l h s = None ->
forall n, run n l h s = (l, h, s).
Proof.
(* TODO *)
admit.
Qed.
(* [Problem 18] *)
(* Prove that two consecutive runs are the same as one bigger run. *)
Lemma run_combine:
forall m n l h s l' h' s' l'' h'' s'',
run m l h s = (l', h', s') ->
run n l' h' s' = (l'', h'', s'') ->
run (m + n) l h s = (l'', h'', s'').
Proof.
(* TODO *)
admit.
Qed.
(* Here we define what it means for a statement to contain a while. *)
Fixpoint hasWhile (s: Stmt) : bool :=
match s with
| Skip => false
| Assign _ _ => false
| Seq s1 s2 => orb (hasWhile s1) (hasWhile s2)
| Cond _ s1 s2 => orb (hasWhile s1) (hasWhile s2)
| While _ _ => true
| PushHeap => false
| PopHeap _ => false
end.
(* Here we define the number of PushHeap statements contained in a statement. *)
Fixpoint nPushHeap (s: Stmt) : nat :=
match s with
| Skip => 0
| Assign _ _ => 0
| Seq s1 s2 => nPushHeap s1 + nPushHeap s2
| Cond _ s1 s2 => nPushHeap s1 + nPushHeap s2
| While _ s1 => nPushHeap s1
| PushHeap => 1
| PopHeap _ => 0
end.
(*
[Problem 19]
Prove that if we take a step from a statement without any whiles,
then the resulting statement still has no whiles.
*)
Lemma hasWhileStep:
forall l h s l' h' s',
Step l h s l' h' s' ->
hasWhile s = false ->
hasWhile s' = false.
Proof.
(* TODO *)
admit.
Qed.
(* *** A BIT TRICKY! *** *)
(*
[Problem 20]
State and prove the following property:
If statement s has no While loops and from the empty stack
(l = nil) and empty heap (h = nil), s can step to stack l',
heap h', and statement s', then the length of l' does not
exceed the number of PushHeap statements in s (the original
statement).
Hints:
- You will need two lemmas to prove this.
- Think carefully about your induction hypotheses.
*)
(*
[Problem 21]
Prove the previous claim is false if we allow s to contain
While loops.
Hint:
- No need to use induction.
*)
(* Define a weak notion of equivalence between programs. *)
Definition equiv (s1 s2: Stmt) :=
forall l1' h1' l2' h2',
StepStar nil nil s1 l1' h1' Skip ->
StepStar nil nil s2 l2' h2' Skip ->
l1' = l2' /\ h1' = h2'.
(*
[Problem 22]
Prove the following equivalence.
*)
Lemma progs_equiv:
~ (forall s x,
equiv (Seq s (Assign x (Int 0%Z))
(Seq PushHeap (Seq s (PopHeap x)))).
Proof.
(* TODO *)
admit.
Qed.
|
/-
Copyright (c) 2020 Fox Thomson. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Fox Thomson
-/
import computability.DFA
import data.fintype.powerset
/-!
# Nondeterministic Finite Automata
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This file contains the definition of a Nondeterministic Finite Automaton (NFA), a state machine
which determines whether a string (implemented as a list over an arbitrary alphabet) is in a regular
set by evaluating the string over every possible path.
We show that DFA's are equivalent to NFA's however the construction from NFA to DFA uses an
exponential number of states.
Note that this definition allows for Automaton with infinite states, a `fintype` instance must be
supplied for true NFA's.
-/
open set
open_locale computability
universes u v
/-- An NFA is a set of states (`σ`), a transition function from state to state labelled by the
alphabet (`step`), a starting state (`start`) and a set of acceptance states (`accept`).
Note the transition function sends a state to a `set` of states. These are the states that it
may be sent to. -/
structure NFA (α : Type u) (σ : Type v) :=
(step : σ → α → set σ)
(start : set σ)
(accept : set σ)
variables {α : Type u} {σ σ' : Type v} (M : NFA α σ)
namespace NFA
instance : inhabited (NFA α σ) := ⟨ NFA.mk (λ _ _, ∅) ∅ ∅ ⟩
/-- `M.step_set S a` is the union of `M.step s a` for all `s ∈ S`. -/
def step_set (S : set σ) (a : α) : set σ := ⋃ s ∈ S, M.step s a
@[simp] lemma step_set_empty (a : α) : M.step_set ∅ a = ∅ :=
by simp_rw [step_set, Union_false, Union_empty]
/-- `M.eval_from S x` computes all possible paths though `M` with input `x` starting at an element
of `S`. -/
def eval_from (start : set σ) : list α → set σ :=
list.foldl M.step_set start
@[simp] lemma eval_from_nil (S : set σ) : M.eval_from S [] = S := rfl
@[simp] lemma eval_from_singleton (S : set σ) (a : α) : M.eval_from S [a] = M.step_set S a := rfl
@[simp] lemma eval_from_append_singleton (S : set σ) (x : list α) (a : α) :
M.eval_from S (x ++ [a]) = M.step_set (M.eval_from S x) a :=
by simp only [eval_from, list.foldl_append, list.foldl_cons, list.foldl_nil]
/-- `M.eval x` computes all possible paths though `M` with input `x` starting at an element of
`M.start`. -/
def eval : list α → set σ := M.eval_from M.start
@[simp] lemma eval_nil : M.eval [] = M.start := rfl
@[simp] lemma eval_singleton (a : α) : M.eval [a] = M.step_set M.start a := rfl
@[simp] lemma eval_append_singleton (x : list α) (a : α) :
M.eval (x ++ [a]) = M.step_set (M.eval x) a :=
eval_from_append_singleton _ _ _ _
/-- `M.accepts` is the language of `x` such that there is an accept state in `M.eval x`. -/
def accepts : language α :=
λ x, ∃ S ∈ M.accept, S ∈ M.eval x
/-- `M.to_DFA` is an `DFA` constructed from a `NFA` `M` using the subset construction. The
states is the type of `set`s of `M.state` and the step function is `M.step_set`. -/
def to_DFA : DFA α (set σ) :=
{ step := M.step_set,
start := M.start,
accept := {S | ∃ s ∈ S, s ∈ M.accept} }
@[simp] lemma to_DFA_correct :
M.to_DFA.accepts = M.accepts :=
begin
ext x,
rw [accepts, DFA.accepts, eval, DFA.eval],
change list.foldl _ _ _ ∈ {S | _} ↔ _,
split; { exact λ ⟨w, h2, h3⟩, ⟨w, h3, h2⟩ },
end
lemma pumping_lemma [fintype σ] {x : list α} (hx : x ∈ M.accepts)
(hlen : fintype.card (set σ) ≤ list.length x) :
∃ a b c, x = a ++ b ++ c ∧ a.length + b.length ≤ fintype.card (set σ) ∧ b ≠ [] ∧
{a} * {b}∗ * {c} ≤ M.accepts :=
begin
rw ←to_DFA_correct at hx ⊢,
exact M.to_DFA.pumping_lemma hx hlen
end
end NFA
namespace DFA
/-- `M.to_NFA` is an `NFA` constructed from a `DFA` `M` by using the same start and accept
states and a transition function which sends `s` with input `a` to the singleton `M.step s a`. -/
def to_NFA (M : DFA α σ') : NFA α σ' :=
{ step := λ s a, {M.step s a},
start := {M.start},
accept := M.accept }
@[simp] lemma to_NFA_eval_from_match (M : DFA α σ) (start : σ) (s : list α) :
M.to_NFA.eval_from {start} s = {M.eval_from start s} :=
begin
change list.foldl M.to_NFA.step_set {start} s = {list.foldl M.step start s},
induction s with a s ih generalizing start,
{ tauto },
{ rw [list.foldl, list.foldl,
show M.to_NFA.step_set {start} a = {M.step start a}, by simpa [NFA.step_set]],
tauto }
end
@[simp] lemma to_NFA_correct (M : DFA α σ) :
M.to_NFA.accepts = M.accepts :=
begin
ext x,
change (∃ S H, S ∈ M.to_NFA.eval_from {M.start} x) ↔ _,
rw to_NFA_eval_from_match,
split,
{ rintro ⟨ S, hS₁, hS₂ ⟩,
rwa set.mem_singleton_iff.mp hS₂ at hS₁ },
{ exact λ h, ⟨M.eval x, h, rfl⟩ }
end
end DFA
|
import tactic
import data.real.basic
import data.set
------------
-- ESSAIS --
------------
open set
-----------
-- DEBUT --
-----------
/-- Une structure d'espace métrique sur un type X -/
class espace_metrique (X : Type) :=
(dist : X → X → ℝ)
(dist_pos : ∀ x y, dist x y ≥ 0)
(sep : ∀ x y, dist x y = 0 ↔ x = y)
(sym : ∀ x y, dist x y = dist y x)
(triangle : ∀ x y z, dist x z ≤ dist x y + dist y z)
open espace_metrique
/-- Fonction distance avec le type en argument explicite -/
def dist' (X : Type) [espace_metrique X] : X → X → ℝ := λ x y, dist x y
notation `d` := dist
notation `d_[` X `]` := dist' X
----------------------------------------------------
section fondements
----------------------------------------------------
variables {X : Type} [espace_metrique X]
@[simp]
lemma dist_sym (x:X) (y:X) : d x y = d y x := sym x y
@[simp]
lemma dist_x_x_eq_zero (x:X) : d x x = 0 :=
(sep x x).2 rfl
lemma dist_str_pos {x:X} {y:X} : x ≠ y → d x y > 0 :=
begin
contrapose!,
intro d_neg,
have d_pos : d x y ≥ 0, from dist_pos x y,
have d_zero : d x y = 0, from antisymm d_neg d_pos,
exact iff.mp (sep x y) d_zero
end
/-- `boule x r` est la boule ouverte de centre `x` et de rayon `r` -/
def boule (x : X) (r : ℝ) := {y | dist x y < r}
/-- appartenir à une boule équivaut à une inégalité -/
@[simp]
lemma mem_boule (x : X) (r : ℝ) (y : X) : y ∈ boule x r ↔ dist x y < r :=
iff.rfl
/-- Une boule de rayon >0 contient son centre --/
lemma centre_mem_boule (x : X) (r : ℝ) : r > 0 → x ∈ boule x r :=
begin
intro r_pos,
simpa [boule] -- simplifie et utilise l'hypothèse
end
/-- Une partie d'un espace métrique `X` est ouverte si elle contient une boule ouverte de rayon
strictement positif autour de chacun de ses points. -/
def ouvert (A : set X) := ∀ x ∈ A, ∃ r > 0, boule x r ⊆ A
/-- Les boules sont ouvertes -/
lemma boule_est_ouverte : ∀ x : X, ∀ r > 0, ouvert (boule x r) :=
begin
intros x r r_pos y y_in, -- on déroule les définitions,
-- on se retrouve avec un point y dans la boule
-- de centre x et de rayon r, et on cherche une boule autour de y qui soit incluse
-- dans boule x r
set ε := r - d x y with hε,
use ε, -- le rayon candidat
-- OBSOLETE rw exists_prop,
split,
{ -- La ligne suivante peut être remplacée par n'importe laquelle des trois lignes qui la suivent
simp [boule] at y_in,
--change d x y < r at y_in,
--rw mem_boule at y_in,
--unfold boule at y_in, rw set.mem_set_of_eq at y_in,
linarith only [hε, y_in]}, -- le rayon est bien strictement positif
{ -- La ligne suivante est optionnelle, elle sert à expliciter le but
-- change ∀ z, z ∈ boule y (r - d x y) → z ∈ boule x r,
intros z z_in,
rw mem_boule at *,
have clef : d x z ≤ d x y + d y z, from triangle x y z,
linarith only [clef, z_in, y_in, hε]} -- et l'inégalité triangulaire permet de montrer l'inclusion des boules
end
-- Lemme de théorie des ensembles - finalement non utilisé
lemma inclusion_transitive {Y : Type} {A B C : set Y} : A ⊆ B → B ⊆ C → A ⊆ C :=
begin
intros AB BC a a_app_A,
exact BC (AB a_app_A),
end
/-- Une union d'ouverts d'un espace métrique est un ouvert -/
lemma union_ouverts_est_ouvert (I : set (set X)) : (∀ O ∈ I, ouvert O) → ouvert (⋃₀ I) :=
begin
-- Supposons que tous les O dans I sont ouverts.
intro O_ouverts,
-- Soit x un point d'un des O dans I
rintro x ⟨O, O_app_I, x_app_O⟩,
-- Comme O est ouvert, il existe r > 0 tel que B(x, r) ⊆ O
obtain ⟨r, r_positif, boule_dans_O⟩ : ∃ r > 0, boule x r ⊆ O,
from (O_ouverts O) O_app_I x x_app_O,
-- Montrons que ce r convient
use [r, r_positif],
-- Puisque B(x, r) ⊆ O, il suffit de montrer que O ⊆ ⋃₀ I
transitivity O, assumption,
-- Or O est dans I.
exact subset_sUnion_of_mem O_app_I
end
-- ** variante en λ-calcul - non utilisé
lemma union_ouverts_est_ouvert' (I : set (set X)) : (∀ O ∈ I, ouvert O) → ouvert (⋃₀ I) :=
assume O_ouverts x ⟨O, O_app_I, x_app_O⟩,
let ⟨r, r_positif, boule_dans_O⟩ := O_ouverts O O_app_I x x_app_O in
⟨r, r_positif, subset.trans boule_dans_O (subset_sUnion_of_mem O_app_I)⟩
/-- L'intersection de deux ouverts est un ouvert -/
lemma intersection_deux_ouverts_est_ouvert : ∀ O₁ O₂ : set X, ouvert O₁ → ouvert O₂ → ouvert (O₁ ∩ O₂) :=
begin
-- Soit x un point dans l'intersection,
rintro O₁ O₂ ouvert_O₁ ouvert_O₂ x ⟨x_app_O₁,x_app_O₂⟩,
-- le fait que O₁ et O₂ soient ouverts fournis deux nombres positifs
obtain ⟨r₁,r₁_pos,boule_dans_O₁⟩ : ∃ r₁>0, boule x r₁ ⊆ O₁, from ouvert_O₁ x x_app_O₁,
obtain ⟨r₂,r₂_pos,boule_dans_O₂⟩ : ∃ r₂>0, boule x r₂ ⊆ O₂, from ouvert_O₂ x x_app_O₂,
-- Montrons que le minimum r des deux convient
use min r₁ r₂,
-- OBSOLETE rw exists_prop,
-- Il est bien positif
split,
by exact lt_min r₁_pos r₂_pos,
-- les quatre lignes qui précèdent peuvent être remplacées par :
-- use [min r₁ r₂,lt_min r₁_pos r₂_pos]
-- Prenons un y dans la boule de rayon r
intros y y_app_boule,
-- vu le choix de r, on a d x y < r₁ et d x y < r₂
simp [boule] at y_app_boule,
-- donc c'est bon
split ; tautology
-- FIN plus compliquée :
-- simp [boule] at y_app_boule,
-- rcases y_app_boule with ⟨ineg_1,ineg_2⟩,
-- -- il est dans O₁ et dans O₂
-- have y_O₁ : y ∈ O₁, from boule_dans_O₁ ineg_1,
-- have y_O₂ : y ∈ O₂, from boule_dans_O₂ ineg_2,
-- -- donc dans l'intersection, comme voulu.
-- exact and.intro y_O₁ y_O₂,
end
/-- L'espace total est ouvert -/
lemma total_ouvert : ouvert (univ : set X) :=
begin
intros x hx,
use 1,
-- OBSOLETE rw exists_prop,
split,
exact zero_lt_one,
exact subset_univ (boule x 1),
end
/-- L'intersection d'un nombre fini d'ouverts est un ouvert -/
--lemma intersection_ouverts_est_ouvert'
--(I : set (set X)) : (finite I) (∀ O ∈ I, ouvert O) → ouvert (⋂₀ I) :=
--begin
--tactic.unfreeze_local_instances,
--rcases _inst_2 with ⟨Liste, Liste_exhaustive⟩,
--sorry
--end
--{s : set β} {f : β → set α} (hs : finite s) :
--variables (β : Type)
--lemma intersection_ouverts_est_ouvert {s: set β} {O : β → set X} (hs: finite s) :
-- (∀ i, ouvert (O i)) → ouvert (⋂ i, O i) :=
--begin
-- set.finite.induction_on hs (sorry) (sorry)
-- (λ _, by rw bInter_empty; exact total_ouvert)
-- (λ a s has hs ih h, by rw bInter_insert; exact
-- is_open_inter (h a (mem_insert _ _)) (ih (λ i hi, h i (mem_insert_of_mem _ hi))))
--end
--lemma is_open_sInter {s : set (set X)} (hs : finite s) : (∀t ∈ s, ouvert t) → ouvert (⋂₀ s) :=
lemma vide_ouvert : ouvert (∅ : set X) :=
begin
intros x x_in,
exfalso,
exact x_in,
end
lemma vide_ouvert' : ouvert (∅ : set X) :=
assume x x_in, false.elim x_in
/-- L'intérieur d'une partie de X est la réunion des ouverts qu'elle contient -/
def Int (E : set X) := ⋃₀ {O : set X | ouvert O ∧ O ⊆ E}
/-- Caractérisation métrique de l'intérieur -/
@[simp]
lemma interieur_metrique {E : set X} {x : X} : x ∈ Int E ↔ ∃ r>0, boule x r ⊆ E :=
begin
split,
-- Pour le sens direct, supposons que x est dans l'intérieur de E
intro x_dans_Int,
-- Par définition de l'intérieur, il existe un ouvert O inclus dans E et contenant x
rcases x_dans_Int with ⟨O, ⟨ouvert_O,O_sub_E⟩ , x_app_O⟩,
-- L'ouvert O contient une boule autour de x
obtain ⟨r,r_pos,boule_dans_O⟩ : ∃ r>0, boule x r ⊆ O, from ouvert_O x x_app_O,
-- Cette boule convient
use [r, r_pos],
-- puisqu'elle est incluse dans O qui est inclus dans E
transitivity O, assumption, assumption,
-- VARIANTE : exact subset.trans boule_dans_O O_sub_E, -- On peut aussi écrire : tauto,
-- Pour l'autre sens, soit x le centre d'une boule incluse dans E.
rintros ⟨ r,r_pos, boule_dans_E⟩,
-- Cette boule est un ouvert
have ouvert_boule, from boule_est_ouverte x r r_pos,
-- et elle contient x
have x_mem_boule, from centre_mem_boule x r r_pos,
-- donc x est dans l'intérieur de E
use boule x r,
repeat { split }, assumption, assumption, assumption,
-- VARIANTE FIN PLUS COMPLIQUEE : -- la boule est donc incluse dans l'intérieur de E
-- let I := {O : set X | ouvert O ∧ O ⊆ E},
-- have boule_mem_I : (boule x r) ∈ I,
-- exact and.intro ouvert_boule boule_dans_E,
-- have boule_inc_Int : boule x r ⊆ Int E, from subset_sUnion_of_mem boule_mem_I,
-- -- qui contient donc x, centre d'une boule incluse dans Int E
-- exact boule_inc_Int (centre_mem_boule x r r_pos),
end
-- Variante moins pratique (?)
lemma interieur_metrique' {E : set X} : Int E = { x : X | ∃ r>0, boule x r ⊆ E } :=
begin
-- Nous raisonnons par double inclusion
apply subset.antisymm,
-- Soit x dans l'intérieur de E
intros x x_dans_Int, simp,
-- Par définition de l'intérieur, il existe un ouvert O inclus dans E et contenant x
rcases x_dans_Int with ⟨O, ⟨ouvert_O,O_sub_E⟩ , x_app_O⟩,
-- L'ouvert O contient une boule autour de x
obtain ⟨r,r_pos,boule_dans_O⟩ : ∃ r>0, boule x r ⊆ O, from ouvert_O x x_app_O,
-- Cette boule convient
use [r, r_pos],
-- puisqu'elle est incluse dans O qui est inclus dans E
transitivity O, assumption, assumption,
-- VARIANTE : exact subset.trans boule_dans_O O_sub_E, -- On peut aussi écrire : tauto,
-- Pour l'autre sens, soit x le centre d'une boule incluse dans E.
rintros x ⟨ r,r_pos, boule_dans_E⟩,
-- Cette boule est un ouvert
have ouvert_boule, from boule_est_ouverte x r r_pos,
-- et elle contient x
have x_mem_boule, from centre_mem_boule x r r_pos,
-- donc x est dans l'intérieur de E
use boule x r,
repeat { split }, assumption, assumption, assumption,
-- VARIANTE FIN PLUS COMPLIQUEE : -- la boule est donc incluse dans l'intérieur de E
-- let I := {O : set X | ouvert O ∧ O ⊆ E},
-- have boule_mem_I : (boule x r) ∈ I,
-- exact and.intro ouvert_boule boule_dans_E,
-- have boule_inc_Int : boule x r ⊆ Int E, from subset_sUnion_of_mem boule_mem_I,
-- -- qui contient donc x, centre d'une boule incluse dans Int E
-- exact boule_inc_Int (centre_mem_boule x r r_pos),
end
def est_voisinage (V : set X) (x : X) := x ∈ Int V
-- caractérisation d'un voisinage en termes d'ouverts ?
-- caractérisation en terme de boules ?
end fondements
----------------------------------------------------
section continuite
----------------------------------------------------
variables {X Y : Type} [espace_metrique X] [espace_metrique Y]
-- dans la définition suivante les `d_[X]` et `d_[Y]` sont cosmétiques, `d` seul marche aussi bien
def continue_en (f : X → Y) (x₀ : X) :=
∀ ε > 0, ∃ δ > 0, ∀ x, d_[X] x₀ x < δ → d_[Y] (f x₀) (f x) < ε
def continue (f:X → Y) :=
∀ x : X, continue_en f x
-- Notations f continue, f continue_au_point x
-- caractérisation topologique (ponctuelle, globale)
lemma continuite_ouverts (f:X → Y): continue f ↔ ( ∀O, ouvert O → ouvert (f ⁻¹' O) ) :=
begin
-- On raisonne par double implication
split,
{ -- Supposons donc que f vérifie la définition métrique de la continuité
-- Soit O un ouvert à l'arrivé, il s'agit de voir que son image réciproque est ouverte
-- SOit x un point de l'image réciproque, on cherche un rayon
intros cont O O_ouvert x x_dans_reciproque,
-- c'est-à-dire tel que f(x) ∈ O
change f x ∈ O at x_dans_reciproque, -- Cette ligne est purement psychologique, on peut la retirer
-- Puisque O est ouvert, il contient une boule de rayon ε autour de f(x)
obtain ⟨ε, ε_positif, boule_dans_O⟩ : ∃ ε > 0, boule (f x) ε ⊆ O,
from O_ouvert (f x) x_dans_reciproque,
-- L'hypothèse de continuité fournit un δ >0
rcases (cont x) ε ε_positif with ⟨δ , δ_positif, H⟩,
-- Montrons que la boule de rayon δ est dans l'image réciproque
use [δ, δ_positif],
-- pour ceci on prend un point x' dans la boule
intros x' hx',
-- il s'agit de voir que son image est dans O
change f x' ∈ O, -- encore une ligne purement psychologique, Lean n'en a pas besoin
-- Pour cela il suffit de voir que f(x') est dans la boule de centre f(x) et de rayon ε,
-- puisqu'elle est incluse dans O
suffices hh : f x' ∈ boule (f x) ε, from boule_dans_O hh,
-- ce qui est donné par la propriété de δ issue de la continuité
exact H x' hx'
},
{ -- Pour l'autre direction, on suppose que l'image réciproque de tout ouvert est un ouvert,
-- on prend un point x et un ε > 0
rintros H x ε ε_positif,
-- La boule de centre x et de rayon epsilon est un ouvert de Y,
have boule_ouverte, from boule_est_ouverte (f x) ε ε_positif,
-- donc par hypothèse son image réciproque est un ouvert de X
have reciproque_ouvert, from H (boule (f x) ε) boule_ouverte,
-- or x appartient à cette image réciproque
have x_dans_reciproque: x ∈ f ⁻¹' boule (f x) ε,
simpa [boule],
-- Il existe donc une boule autour de x incluse dans l'image réciproque de la première boule
obtain ⟨δ, δ_positif, H⟩: ∃ δ >0, boule x δ ⊆ f ⁻¹' boule (f x) ε , from reciproque_ouvert x x_dans_reciproque,
-- montrons que le rayon de cette boule satisfait la définition métrique de la continuité
use [δ , δ_positif],
-- On considère donc un point x' tel que d(x,x') < δ
intros x' hx',
-- Autrement dit, x' est dans la boule B(x,δ),
change x' ∈ boule x δ at hx', -- encore une ligne pour rien
-- donc son image est dans la première boule
exact H hx' }
end
variables {Z : Type} [espace_metrique Z]
/-- La composée de deux applications continues est continue-/
lemma composition_continue (f : X → Y) (g : Y → Z) : (continue f) → (continue g) → continue (g ∘ f) :=
begin
-- Supposons que f et g sont continues
intros f_cont g_cont,
-- Nous allons utiliser la caractérisation topologique pour montrer la continuité de g ∘ f :
rw continuite_ouverts,
-- On considère un ouvert O de Z
intros O O_ouvert,
-- La caractérisation topologique de la continuité de g nous dit que g ⁻¹' O est un ouvert de Y,
have ouvert1 : ouvert (g ⁻¹' O),
from (((iff.elim_left (continuite_ouverts g)) g_cont) O) O_ouvert,
-- La caractérisation topologique de la continuité de f nous dit que f ⁻¹' (g ⁻¹' O) est un ouvert de X,
exact (((iff.elim_left (continuite_ouverts f)) f_cont) (g ⁻¹' O)) ouvert1,
-- et il est égal à (g ∘ f) ⁻¹' O, CQFD
end
-- A FAIRE : caractérisation topologique de la continuité ponctuelle par les voisinages,
-- et composition ponctuelle
def lipschitzienne (k:ℝ) (f: X → Y) :=
∀ x₀ x₁ , d_[Y] (f x₀) (f x₁) ≤ ( k * d_[X] x₀ x₁ )
-- A FAIRE : lipschitzien implique continu
end continuite
----------------------------------------------------
section fermes
----------------------------------------------------
variables {X:Type} [espace_metrique X]
def ferme (F : set X) := ouvert (- F)
-- A FAIRE : intersection, union
/-- L'adhérence d'une partie de X est l'intersection des fermés qui la contienne -/
def Adh (E : set X) := sInter {F : set X | ferme F ∧ E ⊆ F}
-- adhérence et intérieur par passage au complémentaire
/-- Caractérisation métrique de l'adhérence -/
@[simp]
lemma adherence_metrique {E : set X} {x : X} : x ∈ Adh E ↔ ∀ r>0, boule x r ∩ E ≠ ∅ :=
begin
sorry
end
end fermes
----------------------------------------------------
section suites
----------------------------------------------------
variables {X:Type} [espace_metrique X]
-- variable E : set X
-- variable x : E
-- #print x
def limite_suite (x: ℕ → X) (l : X) := ∀ ε > 0, ∃ N, ∀ n ≥ N, ((d l (x n)) < ε)
-- On va voir besoin de "0<2" dans ℝ
lemma zero_pp_2 : (0:real) < 2 :=
begin
linarith only [zero_lt_one]
end
#print zero_pp_2 -- Waou !
open classical
local attribute [instance] prop_decidable
lemma unicite_limite {x: ℕ → X} {l₁ : X} {l₂ : X} :
(limite_suite x l₁) → (limite_suite x l₂) → l₁ = l₂ :=
begin
-- Supposons que la suite (x_n) converge à la fois vers l₁ et l₂
intros H₁ H₂,
-- Raisonnons par l'absurde, en supposant l₁ ≠ l₂
by_contradiction lim_non_eg,
-- Alors d(l₁, l₂) >0
have dist_limites_pos : 0 < d l₁ l₂, from dist_str_pos lim_non_eg,
-- Appelons ε la moitié de cette distance, qui est donc aussi un nombre positif
let ε := (d l₁ l₂)/2,
have ε_pos : 0 < ε, from div_pos dist_limites_pos zero_pp_2,
-- et appliquons la définition de convergence à nos deux limites
rcases H₁ ε ε_pos with ⟨ N₁ , HN₁ ⟩,
rcases H₂ ε ε_pos with ⟨ N₂ , HN₂ ⟩,
-- On obtient deux rangs N₁, N₂, dont on prend le maximum
let N := max N₁ N₂,
-- La définition de convergence nous donne les deux inégalités d(l₁,x_N )< ε et d(l₂,x_N) < ε
have I₁ : d l₁ (x N) < ε, from HN₁ N (le_max_left N₁ N₂),
have I₂ : d l₂ (x N) < ε, from HN₂ N (le_max_right N₁ N₂),
-- En les combinant à l'inégalité triangulaire entre les trois points impliqué,
-- on obtient d(l₁,l₂) < d(l₁,l₂),
have egal : d l₁ l₂ = 2 * ε , from eq.symm (@mul_div_cancel' ℝ real.field (d l₁ l₂) 2 two_ne_zero),
have Ineg : d l₁ l₂ < d l₁ l₂, from
calc
d l₁ l₂ ≤ d l₁ (x N) + d (x N) l₂ : (triangle l₁ (x N) l₂)
... ≤ d l₁ (x N) + d l₂ (x N) : by simp
-- Linarith se débrouille sans les 3 lignes suivante :
-- ... < ε + ε : by linarith
-- ... = 2 * ε : eq.symm (two_mul ε)
-- ... = d l₁ l₂ : by rw egal,
... < d l₁ l₂ : by linarith,
-- ce qui donne la contradiction recherchée.
linarith only [Ineg]
end
-- nom des lemmes trouvés avec la tactique library_search
example (ε : ℝ) : 2*ε = ε + ε := two_mul ε
example (a : ℝ) (b : ℝ) (h₁ : 0 < a) (h₂ : 0 < b) : 0 < a/b := div_pos h₁ h₂
example (a : ℝ) (b : ℝ) (H : a ≠ 0) : a * (b/a) = b := mul_div_cancel' b H
example (a : ℝ) (b : ℝ) (h₁ : a = b) : b = a := eq.symm h₁
example (a : ℝ) (h₁ : a < a) : false := by linarith
example : @has_lt.lt real real.has_lt 0 1 := zero_lt_one
-- Variante utilisant le lemme suivant :
lemma pp_que_tout_pos (l : real) : (∀ ε>0, l ≤ ε) → l ≤ 0 :=
begin
contrapose!,
intro H,
use l/2,
split,
linarith,
linarith
end
lemma unicite_limite' {x: ℕ → X} {l₁ : X} {l₂ : X} :
(limite_suite x l₁) → (limite_suite x l₂) → l₁ = l₂ :=
begin
intros H1 H2,
have H : (∀ ε>0, d l₁ l₂ ≤ ε),
intros ε ε_pos,
have εs2_pos : ε/2>0, by linarith,
have H1', from H1 _ εs2_pos,
cases H1' with N₁ PN₁ ,
have H2', from H2 _ εs2_pos,
cases H2' with N₂ PN₂,
have HN₁ : max N₁ N₂ ≥ N₁ , by exact le_max_left N₁ N₂, -- library_search
have HN₂ : max N₁ N₂ ≥ N₂ , by exact le_max_right N₁ N₂, -- library_search
specialize PN₁ _ HN₁ ,
specialize PN₂ _ HN₂ ,
have T, from triangle l₁ (x (max N₁ N₂)) l₂,
have Dsym, from sym l₂ (x (max N₁ N₂)),
exact calc
d l₁ l₂ ≤ d l₁ (x (max N₁ N₂)) + d (x (max N₁ N₂)) l₂ : T
... ≤ ε : by linarith,
have D , from pp_que_tout_pos (d l₁ l₂) H, -- : (d l₁ l₂) ≤ 0
have D' , from dist_pos l₁ l₂,
have D'' , by exact le_antisymm D D', -- d l₁ l₂ =0
exact (sep l₁ l₂).1 D''
end
lemma nonvide_ssi_existe_element (A : set X) : A ≠ ∅ ↔ ∃ a : X, a ∈ A :=
ne_empty_iff_nonempty
lemma essai (a : ℝ) (b : ℝ) (c : ℝ) (H1 : a > b) (H2 : b > c) : a > c :=
begin
transitivity b, exact H1, exact H2,
end
lemma inv_inv2 {ε : ℝ} (ε_nz : ε ≠ 0) : ε = 1 / (1 / ε) :=
begin
have inv_ε_nz : (1/ε) ≠ 0, from one_div_ne_zero ε_nz,
have H : ε * (1/ε) = 1, from mul_div_cancel' 1 ε_nz,
exact (eq_div_iff_mul_eq ε 1 inv_ε_nz).2 H,
end
-- critère séquentiel d'adhérence (construire une suite)
lemma critere_sequentiel_adherence (E : set X) (l : X) :
l ∈ Adh E ↔ ∃ x : ℕ → X, (∀ n, x n ∈ E) ∧ (limite_suite x l) :=
begin
split,
-- Pour le sens direct, on prend l dans l'adhérence de E
-- et on cherche à construire une suite d'éléments de E qui converge vers l
intros Hl,
-- Comment éviter d'avoir à introduire cette grosse propriété intermédiaire ?
have H1 : ∀ n : ℕ, ∃ x : X, d l x < 1/(n+1) ∧ x ∈ E,
intro n,
-- have H2, from H 1/(n+1),
-- exact adherence_metrique.mpr l Hl,
sorry,
-- H1 permet de définir une suite (x_n) qui va convenir
choose x H using H1,
use x,
split,
-- La suite est bien à valeur dans E,
exact λ n, ((H n).2),
-- Reste à montrer qu'elle converge vers l
intros ε ε_pos,
have HN, from exists_nat_gt (1 / ε),
cases HN with N HN,
use N, intros n Hn,
specialize H n,
cases H with Hutile Hinutile,
have Ineg : N < n+1, by linarith,
have Ineg2 : ↑n+(1:ℝ) > ↑N, by exact_mod_cast Ineg,
have Ineg3 : ↑n+1 > 1/ε,
begin
transitivity ↑N,
exact Ineg2,
exact HN,
end,
have inv_ε_pos : (1/ε)>0, from one_div_pos_of_pos ε_pos,
have Ineg4, by exact one_div_lt_one_div_of_lt inv_ε_pos Ineg3,
transitivity 1 / (↑n + 1:ℝ),
exact Hutile,
have NZ : ε ≠ 0, by linarith,
-- have inv_inv_ε : ε = 1/(1/ε), by inv_inv2 NZ,
sorry,
-- Pour l'autre direction, on suppose l'existence d'une suite
-- d'éléments de E convergeant vers l
rintro ⟨x,H1,H2⟩,
-- On utilise la caractérisation métrique de l'adhérence
rw adherence_metrique,
rintro r Hr,
rw limite_suite at H2,
have H, from (H2 r) Hr,
-- obtain ⟨N, H3N⟩ : ℕ ,(∀ (n : ℕ), n ≥ N → d l (x n) < r) , from (H2 r) Hr,
cases H with N HN,
have HNN: N ≥ N, by linarith,
specialize HN N HNN,
rw← mem_boule at HN,
specialize H1 N,
rw ne_empty_iff_nonempty,
use x N,
exact and.intro HN H1
end
example (a : ℝ) (b : ℝ) (a_pos : a>0) (a_inf_b : a <b) : 1/a > 1/b :=
begin
exact one_div_lt_one_div_of_lt a_pos a_inf_b
end
example (x : ℝ) (H : x ≠ 0) : x * (1/x) = 1 := mul_div_cancel' 1 H
example (x : ℝ) (y : ℝ) (H : x ≠ 0) : ( y = 1/x ) ↔ ( y * x = 1 ) :=
eq_div_iff_mul_eq y 1 H
-- critère séquentiel de fermeture
-- critère séquentiel de continuité
end suites
----------------------------------------------------
section sous_espaces_metriques
----------------------------------------------------
----------------------------------------------------
section distances_equivalentes
----------------------------------------------------
----------------------------------------------------
section espaces_metriques_produits
----------------------------------------------------
----------------------------------------------------
section espaces_de_fonctions
----------------------------------------------------
|
\section{Blockchain}\label{sec:blockchain}
Recently, cryptocurrency has attracted extensive attention from both the industry and the academy. Bitcoin, often called the first cryptocurrency, had massive success, with the capital market coming to \$ 10 billion in 2016 \cite{coindesk}. Blockchain is the central mechanism of Bitcoin and was first proposed in 2008 and implemented in 2009 \cite{nakamoto2008bitcoin}. The blockchain can be considered a public ledger, in which All committed transactions are stored in a block chain. This chain grows continuously when new blocks are attached to it \cite{zheng2016blockchain}.
At the origin of the blockchain is the Bitcoin protocol, proposed by Satoshi Nakamoto \cite{nakamoto2008bitcoin}. This article proposes a \ac{P2P} network where transactions with the cryptocurrency bitcoin, proposed by customers, are received by servers, who will decide, through a consensus protocol based on cryptographic challenges, on the order in which they will be carried out and permanently stored in a chain of blocks, replicated on each server. According to \citeonline{FORMIGONI2017}, it was the creation of a digital currency that worked in a P2P network that allowed the sending of online payments in a completely secure way, without the involvement of financial institutions, for all participants from the web. In this sense, blockchain was created motivated by an efficient, economical, reliable, and secure system to conduct and record financial transactions. Hence the question: what is the relationship between blockchain and Bitcoin? Blockchain is the platform used for the operation of the Bitcoin network and several other cryptocurrencies.
While the system of financial institutions that serve as third parties reliable processors for processing payments work well for most still suffers from the shortcomings inherent in the model based on confidence. In addition, the cost of mediation increases transaction costs, limiting the minimum practical size of the transaction and eliminating the possibility of occasional small transactions. To solve these problems, \cite{nakamoto2008bitcoin} defined an electronic payment system called Bitcoin based on cryptographic proof rather than reliability, allowing either party to be willing to transact directly without the need for a reliable third party.
This revolution began with a new marginal economy on the Internet. Bitcoin emerges as an alternative currency issued and not backed by a central authority but by automated consensus among networked users. However, its true uniqueness lay in the fact that it did not require that users trust each other. Through self-policing algorithmically, any malicious attempt to circumvent the system would be rejected. In a precise and technical definition, Bitcoin is digital money transacted via the Internet in a decentralized system without bail, using a ledger called blockchain. It is a new way of combining peer-to-peer file sharing rent with public key encryption \cite{swan2015blockchain}.
For \cite{swan2015blockchain}, besides the currency ( "Blockchain 1.0"), smart contracts ("2.0") demonstrate how the blockchain is in a position to become the fifth disruptive computing paradigm after mainframes, PCs, Internet, and mobile/ social networks. Bitcoin is starting to become a digital currency, but the technology blockchain behind it can be much more significant.
The rapid growth in blockchain technology adoption and the development of applications based on this technology have revolutionized financial services industries. In addition to bitcoin, typical applications of blockchain usage vary from proprietary networks used to process financial claims, insurance claims to platforms that can issue and trade equity and corporate bonds \cite{michael2018blockchain}.
Blockchain exists with real-world implementations beyond cryptocurrencies, and these solutions deliver potent benefits to healthcare organizations, bankers, retailers, and consumers. The potential benefits of blockchain are more than just economic. They extend to the political, humanitarian, social, and scientific domains. Specific groups are already harnessing their technological capacity to solve real-world problems \cite{michael2018blockchain}.
\subsection{Blockchain Properties}\label{sec:propriedades}
Blockchain technology has key features such as centralization, persistence, anonymity, and auditability. Blockchain can function in a decentralized environment that is activated by integrating several key technologies such as cryptographic hash, digital signature (based on asymmetric encryption), and distributed consensus engine. With blockchain technology, a transaction may occur in a decentralized manner. As a result, blockchain can significantly save costs and improve efficiency \cite{zheng2016blockchain}. The primary properties of the blockchain are considered innovative and enable rapid adoption for technology \cite{greve2018blockchain}:
\begin{itemize}
\item Decentralization: Applications and systems run in a distributed manner, through the establishment of trust between the parties, without the need for a trusted intermediary entity. This is the primary motivator for the growing interest in the blockchain.
\item Availability and integrity: All datasets and transactions are securely replicated in different nodes to keep the system available and consistent.
\item Transparency and auditability: All transactions recorded in the ledger are public and can be verified and audited. Furthermore, technology codes are often open, verifiable.
\item Immutability and Irrefutability: Transactions recorded in the ledger are immutable. Once registered, they cannot be refuted. Updates are possible based on the generation of new transactions and the realization of a new consensus.
\item Privacy and Anonymity: It is possible to offer privacy to users without the third parties involved having access and control of their data. In technology, each user manages their keys, and each server node stores only encrypted fragments of user data. Transactions are somewhat anonymous, based on the address of those involved in the blockchain.
\item Disintermediation: Blockchain enables the integration between different systems directly and efficiently. Thus, it is considered a connector of complex systems (systems of systems), allowing the elimination of intermediaries to simplify the design of systems and processes.
\item Cooperation and Incentives: Offer of an incentive-based business model in the light of game theory. On-demand consensus is now offered as a service at different levels and scopes.
\end{itemize}
|
(* Title: JinjaThreads/Common/Exceptions.thy
Author: Gerwin Klein, Martin Strecker, Andreas Lochbihler
Based on the Jinja theory Common/Exceptions.thy by Gerwin Klein and Martin Strecker
*)
header {* \isaheader{Exceptions} *}
theory Exceptions
imports
Value
begin
definition NullPointer :: cname
where [code_unfold]: "NullPointer = STR ''java/lang/NullPointerException''"
definition ClassCast :: cname
where [code_unfold]: "ClassCast = STR ''java/lang/ClassCastException''"
definition OutOfMemory :: cname
where [code_unfold]: "OutOfMemory = STR ''java/lang/OutOfMemoryError''"
definition ArrayIndexOutOfBounds :: cname
where [code_unfold]: "ArrayIndexOutOfBounds = STR ''java/lang/ArrayIndexOutOfBoundsException''"
definition ArrayStore :: cname
where [code_unfold]: "ArrayStore = STR ''java/lang/ArrayStoreException''"
definition NegativeArraySize :: cname
where [code_unfold]: "NegativeArraySize = STR ''java/lang/NegativeArraySizeException''"
definition ArithmeticException :: cname
where [code_unfold]: "ArithmeticException = STR ''java/lang/ArithmeticException''"
definition IllegalMonitorState :: cname
where [code_unfold]: "IllegalMonitorState = STR ''java/lang/IllegalMonitorStateException''"
definition IllegalThreadState :: cname
where [code_unfold]: "IllegalThreadState = STR ''java/lang/IllegalThreadStateException''"
definition InterruptedException :: cname
where [code_unfold]: "InterruptedException = STR ''java/lang/InterruptedException''"
definition sys_xcpts_list :: "cname list"
where
"sys_xcpts_list =
[NullPointer, ClassCast, OutOfMemory, ArrayIndexOutOfBounds, ArrayStore, NegativeArraySize, ArithmeticException,
IllegalMonitorState, IllegalThreadState, InterruptedException]"
definition sys_xcpts :: "cname set"
where [code_unfold]: "sys_xcpts = set sys_xcpts_list"
definition wf_syscls :: "'m prog \<Rightarrow> bool"
where "wf_syscls P \<equiv> (\<forall>C \<in> {Object, Throwable, Thread}. is_class P C) \<and> (\<forall>C \<in> sys_xcpts. P \<turnstile> C \<preceq>\<^sup>* Throwable)"
section "System exceptions"
lemma sys_xcpts_cases [consumes 1, cases set]:
"\<lbrakk> C \<in> sys_xcpts; P NullPointer; P OutOfMemory; P ClassCast;
P ArrayIndexOutOfBounds; P ArrayStore; P NegativeArraySize;
P ArithmeticException;
P IllegalMonitorState; P IllegalThreadState; P InterruptedException \<rbrakk>
\<Longrightarrow> P C"
by (auto simp add: sys_xcpts_def sys_xcpts_list_def)
lemma OutOfMemory_not_Object[simp]: "OutOfMemory \<noteq> Object"
by(simp add: OutOfMemory_def Object_def)
lemma ClassCast_not_Object[simp]: "ClassCast \<noteq> Object"
by(simp add: ClassCast_def Object_def)
lemma NullPointer_not_Object[simp]: "NullPointer \<noteq> Object"
by(simp add: NullPointer_def Object_def)
lemma ArrayIndexOutOfBounds_not_Object[simp]: "ArrayIndexOutOfBounds \<noteq> Object"
by(simp add: ArrayIndexOutOfBounds_def Object_def)
lemma ArrayStore_not_Object[simp]: "ArrayStore \<noteq> Object"
by(simp add: ArrayStore_def Object_def)
lemma NegativeArraySize_not_Object[simp]: "NegativeArraySize \<noteq> Object"
by(simp add: NegativeArraySize_def Object_def)
lemma ArithmeticException_not_Object[simp]: "ArithmeticException \<noteq> Object"
by(simp add: ArithmeticException_def Object_def)
lemma IllegalMonitorState_not_Object[simp]: "IllegalMonitorState \<noteq> Object"
by(simp add: IllegalMonitorState_def Object_def)
lemma IllegalThreadState_not_Object[simp]: "IllegalThreadState \<noteq> Object"
by(simp add: IllegalThreadState_def Object_def)
lemma InterruptedException_not_Object[simp]: "InterruptedException \<noteq> Object"
by(simp add: InterruptedException_def Object_def)
lemma sys_xcpts_neqs_aux:
"NullPointer \<noteq> ClassCast" "NullPointer \<noteq> OutOfMemory" "NullPointer \<noteq> ArrayIndexOutOfBounds"
"NullPointer \<noteq> ArrayStore" "NullPointer \<noteq> NegativeArraySize" "NullPointer \<noteq> IllegalMonitorState"
"NullPointer \<noteq> IllegalThreadState" "NullPointer \<noteq> InterruptedException" "NullPointer \<noteq> ArithmeticException"
"ClassCast \<noteq> OutOfMemory" "ClassCast \<noteq> ArrayIndexOutOfBounds"
"ClassCast \<noteq> ArrayStore" "ClassCast \<noteq> NegativeArraySize" "ClassCast \<noteq> IllegalMonitorState"
"ClassCast \<noteq> IllegalThreadState" "ClassCast \<noteq> InterruptedException" "ClassCast \<noteq> ArithmeticException"
"OutOfMemory \<noteq> ArrayIndexOutOfBounds"
"OutOfMemory \<noteq> ArrayStore" "OutOfMemory \<noteq> NegativeArraySize" "OutOfMemory \<noteq> IllegalMonitorState"
"OutOfMemory \<noteq> IllegalThreadState" "OutOfMemory \<noteq> InterruptedException"
"OutOfMemory \<noteq> ArithmeticException"
"ArrayIndexOutOfBounds \<noteq> ArrayStore" "ArrayIndexOutOfBounds \<noteq> NegativeArraySize" "ArrayIndexOutOfBounds \<noteq> IllegalMonitorState"
"ArrayIndexOutOfBounds \<noteq> IllegalThreadState" "ArrayIndexOutOfBounds \<noteq> InterruptedException" "ArrayIndexOutOfBounds \<noteq> ArithmeticException"
"ArrayStore \<noteq> NegativeArraySize" "ArrayStore \<noteq> IllegalMonitorState"
"ArrayStore \<noteq> IllegalThreadState" "ArrayStore \<noteq> InterruptedException"
"ArrayStore \<noteq> ArithmeticException"
"NegativeArraySize \<noteq> IllegalMonitorState"
"NegativeArraySize \<noteq> IllegalThreadState" "NegativeArraySize \<noteq> InterruptedException"
"NegativeArraySize \<noteq> ArithmeticException"
"IllegalMonitorState \<noteq> IllegalThreadState" "IllegalMonitorState \<noteq> InterruptedException"
"IllegalMonitorState \<noteq> ArithmeticException"
"IllegalThreadState \<noteq> InterruptedException"
"IllegalThreadState \<noteq> ArithmeticException"
"InterruptedException \<noteq> ArithmeticException"
by(simp_all add: NullPointer_def ClassCast_def OutOfMemory_def ArrayIndexOutOfBounds_def ArrayStore_def NegativeArraySize_def IllegalMonitorState_def IllegalThreadState_def InterruptedException_def ArithmeticException_def)
lemmas sys_xcpts_neqs = sys_xcpts_neqs_aux sys_xcpts_neqs_aux[symmetric]
lemma Thread_neq_sys_xcpts_aux:
"Thread \<noteq> NullPointer"
"Thread \<noteq> ClassCast"
"Thread \<noteq> OutOfMemory"
"Thread \<noteq> ArrayIndexOutOfBounds"
"Thread \<noteq> ArrayStore"
"Thread \<noteq> NegativeArraySize"
"Thread \<noteq> ArithmeticException"
"Thread \<noteq> IllegalMonitorState"
"Thread \<noteq> IllegalThreadState"
"Thread \<noteq> InterruptedException"
by(simp_all add: Thread_def NullPointer_def ClassCast_def OutOfMemory_def ArrayIndexOutOfBounds_def ArrayStore_def NegativeArraySize_def IllegalMonitorState_def IllegalThreadState_def InterruptedException_def ArithmeticException_def)
lemmas Thread_neq_sys_xcpts = Thread_neq_sys_xcpts_aux Thread_neq_sys_xcpts_aux[symmetric]
section {* Well-formedness for system classes and exceptions *}
lemma
assumes "wf_syscls P"
shows wf_syscls_class_Object: "\<exists>C fs ms. class P Object = Some (C,fs,ms)"
and wf_syscls_class_Thread: "\<exists>C fs ms. class P Thread = Some (C,fs,ms)"
using assms
by(auto simp: map_of_SomeI wf_syscls_def is_class_def)
lemma [simp]:
assumes "wf_syscls P"
shows wf_syscls_is_class_Object: "is_class P Object"
and wf_syscls_is_class_Thread: "is_class P Thread"
using assms by(simp_all add: is_class_def wf_syscls_class_Object wf_syscls_class_Thread)
lemma wf_syscls_xcpt_subcls_Throwable:
"\<lbrakk> C \<in> sys_xcpts; wf_syscls P \<rbrakk> \<Longrightarrow> P \<turnstile> C \<preceq>\<^sup>* Throwable"
by(simp add: wf_syscls_def is_class_def class_def)
lemma wf_syscls_is_class_Throwable:
"wf_syscls P \<Longrightarrow> is_class P Throwable"
by(auto simp add: wf_syscls_def is_class_def class_def map_of_SomeI)
lemma wf_syscls_is_class_sub_Throwable:
"\<lbrakk> wf_syscls P; P \<turnstile> C \<preceq>\<^sup>* Throwable \<rbrakk> \<Longrightarrow> is_class P C"
by(erule subcls_is_class1)(erule wf_syscls_is_class_Throwable)
lemma wf_syscls_is_class_xcpt:
"\<lbrakk> C \<in> sys_xcpts; wf_syscls P \<rbrakk> \<Longrightarrow> is_class P C"
by(blast intro: wf_syscls_is_class_sub_Throwable wf_syscls_xcpt_subcls_Throwable)
lemma wf_syscls_code [code]:
"wf_syscls P \<longleftrightarrow>
(\<forall>C \<in> set [Object, Throwable, Thread]. is_class P C) \<and> (\<forall>C \<in> sys_xcpts. P \<turnstile> C \<preceq>\<^sup>* Throwable)"
by(simp only: wf_syscls_def) simp
end
|
# Estimation of pi using Monte Carlo method
**Statement of problem**
Let two random variables $X$ and $Y$ be in $[-1,1]$ and distributed uniformly. Probability that point given with coordinates $(X, Y)$ lies in unit circle (i.e. $X^2 + Y^2 < 1$) is:
\begin{equation}
p = \frac{S_C}{S_S} = \frac{r^2 \pi}{a^2} = \frac{\pi}{4}
\end{equation}
**Method**
Generate $N$ pairs of $(X, Y)$ and calculate how many of them lay in unit circle, $N_<$. Then estimate probability that point is in circle by:
\begin{equation}
p_< = \frac{N_<}{N}
\end{equation}
By law of large numbers, this number will converge to $p$ as $N \to \infty$. Hence, our estimation of $\pi$ is:
\begin{equation}
\hat \pi = 4p_<
\end{equation}
**Tasks**
1. Generate $N$ pairs of $(X, Y)$, calculate for each $N$ how many of them are in unit circle $N<$. Choose $N$ so that you are able to see how $\hat \pi$ approaches $\pi$ by increasing $N$. (for example, use $N$ log-scaled)
2. Save data as array with informations: how many points have been used and estimation of pi.
3. Plot the data.
```python
```
|
What's New: Ren Skincare, Glamglow and Marc Jacobs | Top Beauty Blog in the Philippines covering Makeup Reviews, Swatches, Skincare and More!
A revolutionary jelly-to-milk cleansing balm that instantly melts away face makeup and deeply cleanses, leaving skin purified and hydrated, a limited-edition shade of Le Marc Lip Crème Lipstick, as seen on the Marc Jacobs Fall 2017 runway and a luxurious serum full of bio-actives that leaves the skin feeling smoothed, lifted, and naturally primed, creating the perfect canvas for make-up application – all available now!
This limited-edition shade was created backstage at the Marc Jacobs Fall 2017 fashion show. Lead makeup artist Diane Kendal custom blended a new shade to be worn on the models by mixing two Le Marc Lipsticks in the shades Blacquer and Blow. Now available as a single lipstick in the shade Trax, this creamy black-burgundy Le Marc Lip Crème delivers 10-hour wear in just one sensual swipe. The unprecedented color payoff comes from concentrated color-boosting pigments that are triple-milled and enriched with hydrating ingredients. The formula imparts ultimate long-lasting color and moisture. Your lips will feel instantly plumped and nourished from powerful peptides, antioxidant-rich seaberry, and natural humectants, including meranti and cocoa butter. Its subtle vanilla taste and scent mimic a decadent dessert. Just snap the case shut and hear a satisfying click, which Marc Jacobs describes as "the sound of luxury."
Inspired by the celestial glow of the stars, GALACTICLEANSE is a luxurious cleansing experience. A splash of water activates the transforming properties of this starry midnight jelly balm to wash away daily makeup buildup and impurities, leaving you with silky smooth, hydrated, supple skin.
REN has created a clean, silicone-free primer that allows the skin to breathe freely. With added skin immunity-boosting technology from prebiotics and probiotics, Perfect Canvas boosts the skin’s defense barrier, instantly hydrating the skin, restoring moisture balance, and smoothing its surface to ‘perfect the canvas’ for a beautiful, healthy-looking complexion—with or without makeup.
All three of these sound fantastic especially the cleanser!
Hi! Thank's for your visit and comment! |
-- ----------------------------------------------------
-- Ejercicio. Demostrar
-- ⊢ ((p → q) → p) → p
-- ----------------------------------------------------
import tactic
variables (p q : Prop)
open_locale classical
-- 1ª demostración
example :
((p → q) → p) → p :=
begin
intro h1,
by_cases h2 : p → q,
{ exact h1 h2, },
{ by_contra h3,
apply h2,
intro h4,
exfalso,
exact h3 h4, },
end
-- 2ª demostración
example :
((p → q) → p) → p :=
begin
by_cases hp : p,
{ intro h1,
exact hp, },
{ intro h2,
exact h2 hp.elim, },
end
-- 3ª demostración
example :
((p → q) → p) → p :=
if hp : p then λ h, hp else λ h, h hp.elim
-- 4ª demostración
example :
((p → q) → p) → p :=
-- by library_search
peirce p q
-- 5ª demostración
example :
((p → q) → p) → p :=
assume h1 : (p → q) → p,
show p, from
by_contradiction
( assume h2 : ¬p,
have h3 : ¬(p → q),
by exact mt h1 h2,
have h4 : p → q, from
assume h5 : p,
show q,
from not.elim h2 h5,
show false,
from h3 h4)
-- 6ª demostración
example :
((p → q) → p) → p :=
-- by hint
by tauto
-- 7ª demostración
example :
((p → q) → p) → p :=
by finish
|
theory PartialHeapSA
imports Mask PackageLogic
begin
section \<open>Definitions\<close>
type_synonym heap = "heap_loc \<rightharpoonup> val"
type_synonym pre_state = "mask \<times> heap"
definition valid_heap :: "mask \<Rightarrow> heap \<Rightarrow> bool" where
"valid_heap \<pi> h \<longleftrightarrow> (\<forall>hl. ppos (\<pi> hl) \<longrightarrow> h hl \<noteq> None)"
fun valid_state :: "pre_state \<Rightarrow> bool" where
"valid_state (\<pi>, h) \<longleftrightarrow> valid_mask \<pi> \<and> valid_heap \<pi> h"
lemma valid_stateI:
assumes "valid_mask \<pi>"
and "\<And>hl. ppos (\<pi> hl) \<Longrightarrow> h hl \<noteq> None"
shows "valid_state (\<pi>, h)"
using assms(1) assms(2) valid_heap_def valid_state.simps by blast
definition empty_heap where "empty_heap hl = None"
lemma valid_pre_unit:
"valid_state (empty_mask, empty_heap)"
using pnone.rep_eq ppos.rep_eq valid_empty valid_stateI by fastforce
typedef state = "{ \<phi> |\<phi>. valid_state \<phi> }"
using valid_pre_unit by blast
fun get_m :: "state \<Rightarrow> mask" where "get_m a = fst (Rep_state a)"
fun get_h :: "state \<Rightarrow> heap" where "get_h a = snd (Rep_state a)"
fun compatible_options where
"compatible_options (Some a) (Some b) \<longleftrightarrow> a = b"
| "compatible_options _ _ \<longleftrightarrow> True"
definition compatible_heaps :: "heap \<Rightarrow> heap \<Rightarrow> bool" where
"compatible_heaps h h' \<longleftrightarrow> (\<forall>hl. compatible_options (h hl) (h' hl))"
definition compatible :: "pre_state \<Rightarrow> pre_state \<Rightarrow> bool" where
"compatible \<phi> \<phi>' \<longleftrightarrow> compatible_heaps (snd \<phi>) (snd \<phi>') \<and> valid_mask (add_masks (fst \<phi>) (fst \<phi>'))"
fun add_states :: "pre_state \<Rightarrow> pre_state \<Rightarrow> pre_state" where
"add_states (\<pi>, h) (\<pi>', h') = (add_masks \<pi> \<pi>', h ++ h')"
definition larger_heap where
"larger_heap h' h \<longleftrightarrow> (\<forall>hl x. h hl = Some x \<longrightarrow> h' hl = Some x)"
definition unit :: "state" where
"unit = Abs_state (empty_mask, empty_heap)"
definition plus :: "state \<Rightarrow> state \<rightharpoonup> state" (infixl "\<oplus>" 63) where
"a \<oplus> b = (if compatible (Rep_state a) (Rep_state b) then Some (Abs_state (add_states (Rep_state a) (Rep_state b))) else None)"
definition core :: "state \<Rightarrow> state" (" |_| ") where
"core \<phi> = Abs_state (empty_mask, get_h \<phi>)"
definition stable :: "state \<Rightarrow> bool" where
"stable \<phi> \<longleftrightarrow> (\<forall>hl. ppos (get_m \<phi> hl) \<longleftrightarrow> get_h \<phi> hl \<noteq> None)"
section Lemmas
lemma valid_heapI:
assumes "\<And>hl. ppos (\<pi> hl) \<Longrightarrow> h hl \<noteq> None"
shows "valid_heap \<pi> h"
using assms valid_heap_def by presburger
lemma valid_state_decompose:
assumes "valid_state (add_masks a b, h)"
shows "valid_state (a, h)"
proof (rule valid_stateI)
show "valid_mask a"
using assms upper_valid_aux valid_state.simps by blast
fix hl assume "ppos (a hl)" then show "h hl \<noteq> None"
by (metis add_masks.simps assms ppos_add valid_heap_def valid_state.simps)
qed
lemma compatible_heapsI:
assumes "\<And>hl a b. h hl = Some a \<Longrightarrow> h' hl = Some b \<Longrightarrow> a = b"
shows "compatible_heaps h h'"
by (metis assms compatible_heaps_def compatible_options.elims(3))
lemma compatibleI_old:
assumes "\<And>hl x y. snd \<phi> hl = Some x \<and> snd \<phi>' hl = Some y \<Longrightarrow> x = y"
and "valid_mask (add_masks (fst \<phi>) (fst \<phi>'))"
shows "compatible \<phi> \<phi>'"
using assms(1) assms(2) compatible_def compatible_heapsI by presburger
lemma larger_heap_anti:
assumes "larger_heap a b"
and "larger_heap b a"
shows "a = b"
proof (rule ext)
fix x show "a x = b x"
proof (cases "a x")
case None
then show ?thesis
by (metis assms(1) larger_heap_def not_None_eq)
next
case (Some a)
then show ?thesis
by (metis assms(2) larger_heap_def)
qed
qed
lemma larger_heapI:
assumes "\<And>hl x. h hl = Some x \<Longrightarrow> h' hl = Some x"
shows "larger_heap h' h"
by (simp add: assms larger_heap_def)
lemma larger_heap_refl:
"larger_heap h h"
using larger_heap_def by blast
lemma compatible_heaps_comm:
assumes "compatible_heaps a b"
shows "a ++ b = b ++ a"
proof (rule ext)
fix x show "(a ++ b) x = (b ++ a) x"
proof (cases "a x")
case None
then show ?thesis
by (simp add: domIff map_add_dom_app_simps(2) map_add_dom_app_simps(3))
next
case (Some a)
then show ?thesis
by (metis (no_types, lifting) assms compatible_heaps_def compatible_options.elims(2) map_add_None map_add_dom_app_simps(1) map_add_dom_app_simps(3))
qed
qed
lemma larger_heaps_sum_ineq:
assumes "larger_heap a' a"
and "larger_heap b' b"
and "compatible_heaps a' b'"
shows "larger_heap (a' ++ b') (a ++ b)"
proof (rule larger_heapI)
fix hl x assume "(a ++ b) hl = Some x"
show "(a' ++ b') hl = Some x"
proof (cases "a hl")
case None
then show ?thesis
by (metis \<open>(a ++ b) hl = Some x\<close> assms(2) larger_heap_def map_add_SomeD map_add_find_right)
next
case (Some aa)
then show ?thesis
by (metis (mono_tags, lifting) \<open>(a ++ b) hl = Some x\<close> assms(1) assms(2) assms(3) compatible_heaps_comm larger_heap_def map_add_Some_iff)
qed
qed
lemma larger_heap_trans:
assumes "larger_heap a b"
and "larger_heap b c"
shows "larger_heap a c"
by (metis (no_types, opaque_lifting) assms(1) assms(2) larger_heap_def)
lemma larger_heap_comp:
assumes "larger_heap a b"
and "compatible_heaps a c"
shows "compatible_heaps b c"
proof (rule compatible_heapsI)
fix hl a ba
assume "b hl = Some a" "c hl = Some ba"
then show "a = ba"
by (metis assms(1) assms(2) compatible_heaps_def compatible_options.simps(1) larger_heap_def)
qed
lemma larger_heap_plus:
assumes "larger_heap a b"
and "larger_heap a c"
shows "larger_heap a (b ++ c)"
proof (rule larger_heapI)
fix hl x assume "(b ++ c) hl = Some x"
then show "a hl = Some x"
proof (cases "b hl")
case None
then show ?thesis
by (metis \<open>(b ++ c) hl = Some x\<close> assms(2) larger_heap_def map_add_SomeD)
next
case (Some bb)
then show ?thesis
by (metis \<open>(b ++ c) hl = Some x\<close> assms(1) assms(2) larger_heap_def map_add_SomeD)
qed
qed
lemma compatible_heaps_sum:
assumes "compatible_heaps a b"
and "compatible_heaps a c"
shows "compatible_heaps a (b ++ c)"
by (metis (no_types, opaque_lifting) assms(1) assms(2) compatible_heaps_def map_add_dom_app_simps(1) map_add_dom_app_simps(3))
lemma larger_compatible_sum_heaps:
assumes "larger_heap a x"
and "larger_heap b y"
and "compatible_heaps a b"
shows "compatible_heaps x y"
proof (rule compatible_heapsI)
fix hl a b assume "x hl = Some a" "y hl = Some b"
then show "a = b"
by (metis assms(1) assms(2) assms(3) compatible_heaps_def compatible_options.simps(1) larger_heap_def)
qed
lemma get_h_m:
"Rep_state x = (get_m x, get_h x)" by simp
lemma get_pre:
"get_h x = snd (Rep_state x)"
"get_m x = fst (Rep_state x)"
by simp_all
lemma plus_ab_defined:
"\<phi> \<oplus> \<phi>' \<noteq> None \<longleftrightarrow> compatible_heaps (get_h \<phi>) (get_h \<phi>') \<and> valid_mask (add_masks (get_m \<phi>) (get_m \<phi>'))"
(is "?A \<longleftrightarrow> ?B")
proof
show "?A \<Longrightarrow> ?B"
by (metis compatible_def get_pre(1) get_pre(2) plus_def)
show "?B \<Longrightarrow> ?A"
using compatible_def plus_def by auto
qed
lemma plus_charact:
assumes "a \<oplus> b = Some x"
shows "get_m x = add_masks (get_m a) (get_m b)"
and "get_h x = (get_h a) ++ (get_h b)"
proof -
have "x = (Abs_state (add_states (Rep_state a) (Rep_state b)))"
by (metis assms option.discI option.inject plus_def)
moreover have "compatible (Rep_state a) (Rep_state b)"
using assms(1) plus_def by (metis option.discI)
moreover have "valid_state (add_states (Rep_state a) (Rep_state b))"
proof -
have "valid_state (add_masks (get_m a) (get_m b), (get_h a) ++ (get_h b))"
proof (rule valid_stateI)
show "valid_mask (add_masks (get_m a) (get_m b))"
using calculation(2) compatible_def by fastforce
fix hl assume "ppos (add_masks (get_m a) (get_m b) hl)"
then show "(get_h a ++ get_h b) hl \<noteq> None"
proof (cases "ppos (get_m a hl)")
case True
then show ?thesis
by (metis Rep_state get_h_m map_add_None mem_Collect_eq valid_heap_def valid_state.simps)
next
case False
then have "ppos (get_m b hl)"
using \<open>ppos (add_masks (get_m a) (get_m b) hl)\<close> padd.rep_eq ppos.rep_eq by auto
then show ?thesis
by (metis Rep_state get_h_m map_add_None mem_Collect_eq valid_heap_def valid_state.simps)
qed
qed
then show ?thesis
using add_states.simps get_h_m by presburger
qed
ultimately show "get_m x = add_masks (get_m a) (get_m b)"
by (metis Abs_state_inverse add_states.simps fst_conv get_h_m mem_Collect_eq)
show "get_h x = (get_h a) ++ (get_h b)"
by (metis Abs_state_inject CollectI Rep_state Rep_state_inverse \<open>valid_state (add_states (Rep_state a) (Rep_state b))\<close> \<open>x = Abs_state (add_states (Rep_state a) (Rep_state b))\<close> add_states.simps eq_snd_iff get_h.simps)
qed
lemma commutative:
"a \<oplus> b = b \<oplus> a"
proof (cases "compatible_heaps (get_h a) (get_h b) \<and> valid_mask (add_masks (get_m a) (get_m b))")
case True
then have "compatible_heaps (get_h b) (get_h a) \<and> add_masks (get_m a) (get_m b) = add_masks (get_m b) (get_m a)"
by (metis add_masks_comm compatible_heapsI compatible_heaps_def compatible_options.simps(1))
then have "(get_h a) ++ (get_h b) = (get_h b) ++ (get_h a)"
by (simp add: compatible_heaps_comm)
then show ?thesis
by (metis True \<open>compatible_heaps (get_h b) (get_h a) \<and> add_masks (get_m a) (get_m b) = add_masks (get_m b) (get_m a)\<close> add_states.simps get_h_m plus_ab_defined plus_def)
next
case False
then show ?thesis
by (metis add_masks_comm compatible_heapsI compatible_heaps_def compatible_options.simps(1) plus_ab_defined)
qed
lemma asso1:
assumes "a \<oplus> b = Some ab \<and> b \<oplus> c = Some bc"
shows "ab \<oplus> c = a \<oplus> bc"
proof (cases "ab \<oplus> c")
case None
then show ?thesis
proof (cases "compatible_heaps (get_h ab) (get_h c)")
case True
then have "\<not> valid_mask (add_masks (add_masks (get_m a) (get_m b)) (get_m c))"
by (metis None assms plus_ab_defined plus_charact(1))
then show ?thesis
by (metis add_masks_asso assms plus_ab_defined plus_charact(1))
next
case False
then have "\<not> compatible_heaps (get_h a ++ get_h b) (get_h c)"
using assms plus_charact(2) by force
then obtain l x y where "(get_h a ++ get_h b) l = Some x" "get_h c l = Some y" "x \<noteq> y"
using compatible_heapsI by blast
then have "\<not> compatible_heaps (get_h a) (get_h b ++ get_h c)"
proof (cases "get_h a l")
case None
then show ?thesis
by (metis \<open>(get_h a ++ get_h b) l = Some x\<close> \<open>get_h c l = Some y\<close> \<open>x \<noteq> y\<close> assms compatible_heaps_comm map_add_dom_app_simps(1) map_add_dom_app_simps(3) map_add_find_right option.inject option.simps(3) plus_ab_defined)
next
case (Some aa)
then show ?thesis
by (metis \<open>(get_h a ++ get_h b) l = Some x\<close> \<open>get_h c l = Some y\<close> \<open>x \<noteq> y\<close> assms commutative compatible_heaps_def compatible_options.elims(2) map_add_find_right option.inject option.simps(3) plus_charact(2))
qed
then show ?thesis
by (metis None assms plus_ab_defined plus_charact(2))
qed
next
case (Some x)
then have "compatible_heaps (get_h a ++ get_h b) (get_h c)"
by (metis assms option.simps(3) plus_ab_defined plus_charact(2))
then have "compatible_heaps (get_h a) (get_h b ++ get_h c)"
by (metis (full_types) assms compatible_heaps_comm compatible_heaps_def compatible_heaps_sum compatible_options.simps(2) domIff map_add_dom_app_simps(1) option.distinct(1) plus_ab_defined)
moreover have "valid_mask (add_masks (get_m a) (add_masks (get_m b) (get_m c)))"
by (metis Some add_masks_asso assms option.distinct(1) plus_ab_defined plus_charact(1))
ultimately obtain y where "Some y = a \<oplus> bc"
by (metis assms plus_ab_defined plus_charact(1) plus_charact(2) plus_def)
then show ?thesis
by (metis (mono_tags, lifting) Some add_masks_asso add_states.simps assms get_h_m map_add_assoc option.distinct(1) plus_charact(1) plus_charact(2) plus_def)
qed
lemma asso2:
assumes "a \<oplus> b = Some ab \<and> b \<oplus> c = None"
shows " ab \<oplus> c = None"
proof (cases "valid_mask (add_masks (get_m b) (get_m c))")
case True
then have "\<not> compatible_heaps (get_h b) (get_h c)"
using assms plus_ab_defined by blast
then obtain l x y where "get_h b l = Some x" "get_h c l = Some y" "x \<noteq> y"
using compatible_heapsI by blast
then have "get_h ab l = Some x"
by (metis assms map_add_find_right plus_charact(2))
then show ?thesis
by (metis \<open>get_h c l = Some y\<close> \<open>x \<noteq> y\<close> compatible_heaps_def compatible_options.simps(1) plus_ab_defined)
next
case False
then obtain l where "\<not> (pgte pwrite (add_masks (get_m b) (get_m c) l))"
by (metis Abs_state_cases Rep_state_cases Rep_state_inverse add_masks_equiv_valid_null get_h_m mem_Collect_eq valid_mask.simps valid_null_def valid_state.simps)
then have "\<not> (pgte pwrite (add_masks (get_m ab) (get_m c) l))"
proof -
have "pgte (add_masks (get_m ab) (get_m c) l) (add_masks (get_m b) (get_m c) l)"
using assms p_greater_exists padd_asso padd_comm plus_charact(1) by auto
then show ?thesis
by (meson \<open>\<not> pgte pwrite (add_masks (get_m b) (get_m c) l)\<close> order_trans pgte.rep_eq)
qed
then show ?thesis
using plus_ab_defined valid_mask.simps by blast
qed
lemma core_defined:
"get_h |\<phi>| = get_h \<phi>"
"get_m |\<phi>| = empty_mask"
using Abs_state_inverse core_def pnone.rep_eq ppos.rep_eq valid_empty valid_stateI apply force
by (metis Abs_state_inverse CollectI core_def empty_mask.simps fst_conv get_pre(2) less_irrefl pnone.rep_eq ppos.rep_eq valid_empty valid_stateI)
lemma state_ext:
assumes "get_h a = get_h b"
and "get_m a = get_m b"
shows "a = b"
by (metis Rep_state_inverse assms(1) assms(2) get_h_m)
lemma core_is_smaller:
"Some x = x \<oplus> |x|"
proof -
obtain y where "Some y = x \<oplus> |x|"
by (metis Rep_state compatible_heapsI core_defined(1) core_defined(2) get_h_m mem_Collect_eq minus_empty option.collapse option.sel plus_ab_defined valid_state.simps)
moreover have "y = x"
proof (rule state_ext)
have "get_h x = get_h x ++ get_h x"
by (simp add: map_add_subsumed1)
then show "get_h y = get_h x"
using calculation core_defined(1) plus_charact(2) by presburger
show "get_m y = get_m x"
by (metis calculation core_defined(2) minus_empty plus_charact(1))
qed
ultimately show ?thesis by blast
qed
lemma core_is_pure:
"Some |x| = |x| \<oplus> |x|"
proof -
obtain y where "Some y = |x| \<oplus> |x|"
by (metis core_def core_defined(1) core_is_smaller)
moreover have "y = |x|"
by (metis calculation core_def core_defined(1) core_is_smaller option.sel)
ultimately show ?thesis by blast
qed
lemma core_sum:
assumes "Some c = a \<oplus> b"
shows "Some |c| = |a| \<oplus> |b|"
proof -
obtain x where "Some x = |a| \<oplus> |b|"
by (metis assms core_defined(1) core_defined(2) minus_empty option.exhaust_sel plus_ab_defined valid_empty)
moreover have "x = |c|"
by (metis assms calculation core_defined(1) core_defined(2) minus_empty plus_charact(1) plus_charact(2) state_ext)
ultimately show ?thesis by blast
qed
lemma core_max:
assumes "Some x = x \<oplus> c"
shows "\<exists>r. Some |x| = c \<oplus> r"
proof -
obtain y where "Some y = c \<oplus> |x|"
by (metis assms asso2 core_is_smaller plus_def)
moreover have "|x| = y"
by (metis (mono_tags, opaque_lifting) Rep_state_inverse add_masks_cancellative assms calculation commutative core_defined(1) core_sum get_h_m minus_empty option.inject plus_charact(1))
ultimately show ?thesis by blast
qed
lemma positivity:
assumes "a \<oplus> b = Some c"
and "Some c = c \<oplus> c"
shows "Some a = a \<oplus> a"
proof -
obtain x where "Some x = a \<oplus> a"
by (metis assms(1) assms(2) asso2 commutative option.exhaust_sel)
moreover have "x = a"
by (metis Rep_state_inverse add_masks_cancellative add_masks_comm assms(1) assms(2) calculation core_defined(1) core_defined(2) core_is_smaller get_h_m greater_mask_def greater_mask_properties(3) option.sel plus_charact(1))
ultimately show ?thesis by blast
qed
lemma cancellative:
assumes "Some a = b \<oplus> x"
and "Some a = b \<oplus> y"
and "|x| = |y|"
shows "x = y"
by (metis add_masks_cancellative assms(1) assms(2) assms(3) core_defined(1) plus_charact(1) state_ext)
lemma unit_charact:
"get_h unit = empty_heap"
"get_m unit = empty_mask"
proof -
have "valid_state (empty_mask, empty_heap)"
using valid_pre_unit by auto
then show "get_h unit = empty_heap" using unit_def
by (simp add: \<open>unit = Abs_state (empty_mask, empty_heap)\<close> Abs_state_inverse)
show "get_m unit = empty_mask"
using \<open>valid_state (empty_mask, empty_heap)\<close> unit_def Abs_state_inverse
by fastforce
qed
lemma empty_heap_neutral:
"a ++ empty_heap = a"
proof (rule ext)
fix x show "(a ++ empty_heap) x = a x"
by (simp add: domIff empty_heap_def map_add_dom_app_simps(3))
qed
lemma unit_neutral:
"Some a = a \<oplus> unit"
proof -
obtain x where "Some x = a \<oplus> unit"
by (metis Abs_state_cases Rep_state_cases Rep_state_inverse compatible_heapsI empty_heap_def fst_conv get_h_m mem_Collect_eq minus_empty option.distinct(1) option.exhaust_sel plus_ab_defined snd_conv unit_def valid_pre_unit valid_state.simps)
moreover have "x = a"
proof (rule state_ext)
show "get_h x = get_h a"
using calculation empty_heap_neutral plus_charact(2) unit_charact(1) by auto
show "get_m x = get_m a"
by (metis calculation minus_empty plus_charact(1) unit_charact(2))
qed
ultimately show ?thesis by blast
qed
lemma stableI:
assumes "\<And>hl. ppos (get_m \<phi> hl) \<longleftrightarrow> get_h \<phi> hl \<noteq> None"
shows "stable \<phi>"
using assms stable_def by blast
lemma stable_unit:
"stable unit"
by (metis empty_heap_def stable_def unit_charact(1) unit_charact(2) valid_heap_def valid_pre_unit valid_state.simps)
lemma stable_sum:
assumes "stable a"
and "stable b"
and "Some x = a \<oplus> b"
shows "stable x"
proof (rule stableI)
fix hl
show "ppos (get_m x hl) = (get_h x hl \<noteq> None)" (is "?A \<longleftrightarrow> ?B")
proof
show "?A \<Longrightarrow> ?B"
by (metis add_le_same_cancel2 add_masks.simps assms(1) assms(2) assms(3) leI less_le_trans map_add_None padd.rep_eq plus_charact(1) plus_charact(2) ppos.rep_eq stable_def)
show "?B \<Longrightarrow> ?A"
by (metis add_masks.simps assms(1) assms(2) assms(3) map_add_None padd_comm plus_charact(1) plus_charact(2) ppos_add stable_def)
qed
qed
lemma multiply_valid:
assumes "pgte pwrite p"
shows "valid_state (multiply_mask p (get_m \<phi>), get_h \<phi>)"
proof (rule valid_stateI)
show "valid_mask (multiply_mask p (get_m \<phi>))"
by (metis Rep_state assms(1) get_h_m mem_Collect_eq valid_mult valid_state.simps)
fix hl show "ppos (multiply_mask p (get_m \<phi>) hl) \<Longrightarrow> get_h \<phi> hl \<noteq> None"
by (metis Abs_state_cases Rep_state_cases Rep_state_inverse get_h_m mem_Collect_eq multiply_mask_def pmult_comm pmult_special(2) ppos_eq_pnone valid_heap_def valid_state.simps)
qed
section \<open>Instantiation of the package logic with this concrete state model\<close>
global_interpretation PartialSA: package_logic plus core unit stable
defines greater (infixl "\<succeq>" 50) = "PartialSA.greater"
and add_set (infixl "\<otimes>" 60) = "PartialSA.add_set"
and defined (infixl "|#|" 60) = "PartialSA.defined"
and greater_set (infixl "|\<ggreater>|" 50) = "PartialSA.greater_set"
and minus (infixl "|\<ominus>|" 60) = "PartialSA.minus"
apply standard
apply (simp add: commutative)
using asso1 apply blast
using asso2 apply blast
using core_is_smaller apply blast
using core_is_pure apply blast
using core_max apply blast
using core_sum apply blast
using positivity apply blast
using cancellative apply blast
using unit_neutral apply blast
using stable_sum apply blast
using stable_unit by blast
section \<open>Some lemmas about this instantiation\<close>
lemma greaterI:
assumes "larger_heap (get_h a) (get_h b)"
and "greater_mask (get_m a) (get_m b)"
shows "a \<succeq> b"
proof -
let ?m = "\<lambda>l. SOME p. get_m a l = padd (get_m b l) p"
have "get_m a = add_masks (get_m b) ?m"
proof (rule ext)
fix l
have "pgte (get_m a l) (get_m b l)"
by (meson assms(2) greater_mask_equiv_def)
then have "get_m a l = padd (get_m b l) (SOME p. get_m a l = padd (get_m b l) p)"
by (simp add: p_greater_exists verit_sko_ex')
then show "get_m a l = add_masks (get_m b) (\<lambda>l. SOME p. get_m a l = padd (get_m b l) p) l"
by simp
qed
moreover have "valid_state (?m, get_h a)"
proof (rule valid_stateI)
show "valid_mask (\<lambda>l. SOME p. get_m a l = padd (get_m b l) p)"
by (metis (no_types, lifting) Rep_state calculation get_h_m mem_Collect_eq upper_valid valid_state.simps)
fix hl
assume asm0: "ppos (SOME p. get_m a hl = padd (get_m b hl) p)"
then have "ppos (get_m a hl)"
by (metis (no_types, lifting) add_masks.elims add_masks_comm calculation greater_mask_def ppos_add)
then show "get_h a hl \<noteq> None"
by (metis Rep_state get_h.simps get_pre(2) mem_Collect_eq prod.collapse valid_heap_def valid_state.simps)
qed
moreover have "compatible_heaps (get_h b) (get_h a)"
by (metis (mono_tags, lifting) assms(1) compatible_heapsI larger_heap_def option.inject)
ultimately have "(get_m a, get_h a) = add_states (get_m b, get_h b) (?m, get_h a)"
proof -
have "get_h b ++ get_h a = get_h a"
proof (rule ext)
fix x show "(get_h b ++ get_h a) x = get_h a x"
by (metis assms(1) domIff larger_heap_def map_add_dom_app_simps(1) map_add_dom_app_simps(3) not_Some_eq)
qed
then show ?thesis
by (metis \<open>get_m a = add_masks (get_m b) (\<lambda>l. SOME p. get_m a l = padd (get_m b l) p)\<close> add_states.simps)
qed
moreover have "compatible_heaps (get_h b) (get_h a) \<and> valid_mask (add_masks (get_m b) ?m)"
by (metis Rep_state \<open>compatible_heaps (get_h b) (get_h a)\<close> \<open>get_m a = add_masks (get_m b) (\<lambda>l. SOME p. get_m a l = padd (get_m b l) p)\<close> get_h_m mem_Collect_eq valid_state.simps)
ultimately have "Some a = b \<oplus> Abs_state (?m, get_h a)"
proof -
have "Rep_state (Abs_state (?m, get_h a)) = (?m, get_h a)"
using Abs_state_inverse \<open>valid_state (\<lambda>l. SOME p. get_m a l = padd (get_m b l) p, get_h a)\<close> by blast
moreover have "compatible (Rep_state b) (?m, get_h a)"
using \<open>compatible_heaps (get_h b) (get_h a) \<and> valid_mask (add_masks (get_m b) (\<lambda>l. SOME p. get_m a l = padd (get_m b l) p))\<close> compatible_def by auto
moreover have "valid_state (add_states (Rep_state b) (?m, get_h a))"
by (metis Rep_state \<open>(get_m a, get_h a) = add_states (get_m b, get_h b) (\<lambda>l. SOME p. get_m a l = padd (get_m b l) p, get_h a)\<close> get_h_m mem_Collect_eq)
ultimately show ?thesis
by (metis (no_types, lifting) Rep_state_inverse \<open>(get_m a, get_h a) = add_states (get_m b, get_h b) (\<lambda>l. SOME p. get_m a l = padd (get_m b l) p, get_h a)\<close> get_h_m plus_def)
qed
then show ?thesis
by (meson PartialSA.greater_def)
qed
lemma larger_implies_greater_mask_hl:
assumes "a \<succeq> b"
shows "pgte (get_m a hl) (get_m b hl)"
using PartialSA.greater_def assms p_greater_exists plus_charact(1) by auto
lemma larger_implies_larger_heap:
assumes "a \<succeq> b"
shows "larger_heap (get_h a) (get_h b)"
by (metis (full_types) PartialSA.greater_equiv assms larger_heapI map_add_find_right plus_charact(2))
lemma compatibleI:
assumes "compatible_heaps (get_h a) (get_h b)"
and "valid_mask (add_masks (get_m a) (get_m b))"
shows "a |#| b"
using PartialSA.defined_def assms(1) assms(2) plus_ab_defined by presburger
end
|
[STATEMENT]
lemma bfun_bounded_norm_range: "bounded (range (\<lambda>s. norm (apply_bfun v s)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bounded (range (\<lambda>s. norm (apply_bfun v s)))
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. bounded (range (\<lambda>s. norm (apply_bfun v s)))
[PROOF STEP]
obtain b where "\<forall>s. norm (v s) \<le> b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>b. \<forall>s. norm (apply_bfun v s) \<le> b \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using norm_le_norm_bfun
[PROOF STATE]
proof (prove)
using this:
norm (apply_bfun ?f ?x) \<le> norm ?f
goal (1 subgoal):
1. (\<And>b. \<forall>s. norm (apply_bfun v s) \<le> b \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by fast
[PROOF STATE]
proof (state)
this:
\<forall>s. norm (apply_bfun v s) \<le> b
goal (1 subgoal):
1. bounded (range (\<lambda>s. norm (apply_bfun v s)))
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
\<forall>s. norm (apply_bfun v s) \<le> b
goal (1 subgoal):
1. bounded (range (\<lambda>s. norm (apply_bfun v s)))
[PROOF STEP]
by (simp add: bounded_norm_comp)
[PROOF STATE]
proof (state)
this:
bounded (range (\<lambda>s. norm (apply_bfun v s)))
goal:
No subgoals!
[PROOF STEP]
qed |
\clearpage
\subsection{C Function Prototypes} % (fold)
\label{sub:c_function_prototypes}
\csyntax{csynt:library-creation-function-prototype}{a Function Prototype}{library-creation/function-prototype-decl}
% subsection c_function_prototypes (end) |
not : (arg1 : Bool) -> (arg2 : Bool)
not = ?a
|
/-
Copyright (c) 2020 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.topology.algebra.module
import Mathlib.linear_algebra.multilinear
import Mathlib.PostPort
universes u v w₁ w₂ l u_1 w₃ w₁' w₄ w u_2 u_3
namespace Mathlib
/-!
# Continuous multilinear maps
We define continuous multilinear maps as maps from `Π(i : ι), M₁ i` to `M₂` which are multilinear
and continuous, by extending the space of multilinear maps with a continuity assumption.
Here, `M₁ i` and `M₂` are modules over a ring `R`, and `ι` is an arbitrary type, and all these
spaces are also topological spaces.
## Main definitions
* `continuous_multilinear_map R M₁ M₂` is the space of continuous multilinear maps from
`Π(i : ι), M₁ i` to `M₂`. We show that it is an `R`-module.
## Implementation notes
We mostly follow the API of multilinear maps.
## Notation
We introduce the notation `M [×n]→L[R] M'` for the space of continuous `n`-multilinear maps from
`M^n` to `M'`. This is a particular case of the general notion (where we allow varying dependent
types as the arguments of our continuous multilinear maps), but arguably the most important one,
especially when defining iterated derivatives.
-/
/-- Continuous multilinear maps over the ring `R`, from `Πi, M₁ i` to `M₂` where `M₁ i` and `M₂`
are modules over `R` with a topological structure. In applications, there will be compatibility
conditions between the algebraic and the topological structures, but this is not needed for the
definition. -/
structure continuous_multilinear_map (R : Type u) {ι : Type v} (M₁ : ι → Type w₁) (M₂ : Type w₂)
[DecidableEq ι] [semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂]
extends multilinear_map R M₁ M₂ where
cont : continuous (multilinear_map.to_fun _to_multilinear_map)
namespace continuous_multilinear_map
protected instance has_coe_to_fun {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
[DecidableEq ι] [semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] : has_coe_to_fun (continuous_multilinear_map R M₁ M₂) :=
has_coe_to_fun.mk (fun (f : continuous_multilinear_map R M₁ M₂) => ((i : ι) → M₁ i) → M₂)
fun (f : continuous_multilinear_map R M₁ M₂) => multilinear_map.to_fun (to_multilinear_map f)
theorem coe_continuous {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂} [DecidableEq ι]
[semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] (f : continuous_multilinear_map R M₁ M₂) : continuous ⇑f :=
cont f
@[simp] theorem coe_coe {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂} [DecidableEq ι]
[semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] (f : continuous_multilinear_map R M₁ M₂) :
⇑(to_multilinear_map f) = ⇑f :=
rfl
theorem to_multilinear_map_inj {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
[DecidableEq ι] [semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] : function.injective to_multilinear_map :=
sorry
theorem ext {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂} [DecidableEq ι] [semiring R]
[(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂] [(i : ι) → semimodule R (M₁ i)]
[semimodule R M₂] [(i : ι) → topological_space (M₁ i)] [topological_space M₂]
{f : continuous_multilinear_map R M₁ M₂} {f' : continuous_multilinear_map R M₁ M₂}
(H : ∀ (x : (i : ι) → M₁ i), coe_fn f x = coe_fn f' x) : f = f' :=
to_multilinear_map_inj (multilinear_map.ext H)
@[simp] theorem map_add {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂} [DecidableEq ι]
[semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] (f : continuous_multilinear_map R M₁ M₂) (m : (i : ι) → M₁ i) (i : ι)
(x : M₁ i) (y : M₁ i) :
coe_fn f (function.update m i (x + y)) =
coe_fn f (function.update m i x) + coe_fn f (function.update m i y) :=
multilinear_map.map_add' (to_multilinear_map f) m i x y
@[simp] theorem map_smul {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂} [DecidableEq ι]
[semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] (f : continuous_multilinear_map R M₁ M₂) (m : (i : ι) → M₁ i) (i : ι)
(c : R) (x : M₁ i) :
coe_fn f (function.update m i (c • x)) = c • coe_fn f (function.update m i x) :=
multilinear_map.map_smul' (to_multilinear_map f) m i c x
theorem map_coord_zero {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂} [DecidableEq ι]
[semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] (f : continuous_multilinear_map R M₁ M₂) {m : (i : ι) → M₁ i} (i : ι)
(h : m i = 0) : coe_fn f m = 0 :=
multilinear_map.map_coord_zero (to_multilinear_map f) i h
@[simp] theorem map_zero {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂} [DecidableEq ι]
[semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] (f : continuous_multilinear_map R M₁ M₂) [Nonempty ι] : coe_fn f 0 = 0 :=
multilinear_map.map_zero (to_multilinear_map f)
protected instance has_zero {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
[DecidableEq ι] [semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] : HasZero (continuous_multilinear_map R M₁ M₂) :=
{ zero := mk (multilinear_map.mk (multilinear_map.to_fun 0) sorry sorry) sorry }
protected instance inhabited {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
[DecidableEq ι] [semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] : Inhabited (continuous_multilinear_map R M₁ M₂) :=
{ default := 0 }
@[simp] theorem zero_apply {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
[DecidableEq ι] [semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] (m : (i : ι) → M₁ i) : coe_fn 0 m = 0 :=
rfl
protected instance has_add {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
[DecidableEq ι] [semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] [has_continuous_add M₂] : Add (continuous_multilinear_map R M₁ M₂) :=
{ add :=
fun (f f' : continuous_multilinear_map R M₁ M₂) =>
mk
(multilinear_map.mk
(multilinear_map.to_fun (to_multilinear_map f + to_multilinear_map f')) sorry sorry)
sorry }
@[simp] theorem add_apply {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
[DecidableEq ι] [semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] (f : continuous_multilinear_map R M₁ M₂)
(f' : continuous_multilinear_map R M₁ M₂) [has_continuous_add M₂] (m : (i : ι) → M₁ i) :
coe_fn (f + f') m = coe_fn f m + coe_fn f' m :=
rfl
@[simp] theorem to_multilinear_map_add {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
[DecidableEq ι] [semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] [has_continuous_add M₂] (f : continuous_multilinear_map R M₁ M₂)
(g : continuous_multilinear_map R M₁ M₂) :
to_multilinear_map (f + g) = to_multilinear_map f + to_multilinear_map g :=
rfl
protected instance add_comm_monoid {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
[DecidableEq ι] [semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] [has_continuous_add M₂] :
add_comm_monoid (continuous_multilinear_map R M₁ M₂) :=
add_comm_monoid.mk Add.add sorry 0 sorry sorry sorry
@[simp] theorem sum_apply {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
[DecidableEq ι] [semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] [has_continuous_add M₂] {α : Type u_1}
(f : α → continuous_multilinear_map R M₁ M₂) (m : (i : ι) → M₁ i) {s : finset α} :
coe_fn (finset.sum s fun (a : α) => f a) m = finset.sum s fun (a : α) => coe_fn (f a) m :=
sorry
/-- If `f` is a continuous multilinear map, then `f.to_continuous_linear_map m i` is the continuous
linear map obtained by fixing all coordinates but `i` equal to those of `m`, and varying the
`i`-th coordinate. -/
def to_continuous_linear_map {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
[DecidableEq ι] [semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] (f : continuous_multilinear_map R M₁ M₂) (m : (i : ι) → M₁ i) (i : ι) :
continuous_linear_map R (M₁ i) M₂ :=
continuous_linear_map.mk
(linear_map.mk (linear_map.to_fun (multilinear_map.to_linear_map (to_multilinear_map f) m i))
sorry sorry)
/-- The cartesian product of two continuous multilinear maps, as a continuous multilinear map. -/
def prod {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂} {M₃ : Type w₃} [DecidableEq ι]
[semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂] [add_comm_monoid M₃]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [semimodule R M₃]
[(i : ι) → topological_space (M₁ i)] [topological_space M₂] [topological_space M₃]
(f : continuous_multilinear_map R M₁ M₂) (g : continuous_multilinear_map R M₁ M₃) :
continuous_multilinear_map R M₁ (M₂ × M₃) :=
mk
(multilinear_map.mk
(multilinear_map.to_fun (multilinear_map.prod (to_multilinear_map f) (to_multilinear_map g)))
sorry sorry)
sorry
@[simp] theorem prod_apply {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
{M₃ : Type w₃} [DecidableEq ι] [semiring R] [(i : ι) → add_comm_monoid (M₁ i)]
[add_comm_monoid M₂] [add_comm_monoid M₃] [(i : ι) → semimodule R (M₁ i)] [semimodule R M₂]
[semimodule R M₃] [(i : ι) → topological_space (M₁ i)] [topological_space M₂]
[topological_space M₃] (f : continuous_multilinear_map R M₁ M₂)
(g : continuous_multilinear_map R M₁ M₃) (m : (i : ι) → M₁ i) :
coe_fn (prod f g) m = (coe_fn f m, coe_fn g m) :=
rfl
/-- If `g` is continuous multilinear and `f` is a collection of continuous linear maps,
then `g (f₁ m₁, ..., fₙ mₙ)` is again a continuous multilinear map, that we call
`g.comp_continuous_linear_map f`. -/
def comp_continuous_linear_map {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₁' : ι → Type w₁'}
{M₄ : Type w₄} [DecidableEq ι] [semiring R] [(i : ι) → add_comm_monoid (M₁ i)]
[(i : ι) → add_comm_monoid (M₁' i)] [add_comm_monoid M₄] [(i : ι) → semimodule R (M₁ i)]
[(i : ι) → semimodule R (M₁' i)] [semimodule R M₄] [(i : ι) → topological_space (M₁ i)]
[(i : ι) → topological_space (M₁' i)] [topological_space M₄]
(g : continuous_multilinear_map R M₁' M₄)
(f : (i : ι) → continuous_linear_map R (M₁ i) (M₁' i)) : continuous_multilinear_map R M₁ M₄ :=
mk
(multilinear_map.mk
(multilinear_map.to_fun
(multilinear_map.comp_linear_map (to_multilinear_map g)
fun (i : ι) => continuous_linear_map.to_linear_map (f i)))
sorry sorry)
sorry
@[simp] theorem comp_continuous_linear_map_apply {R : Type u} {ι : Type v} {M₁ : ι → Type w₁}
{M₁' : ι → Type w₁'} {M₄ : Type w₄} [DecidableEq ι] [semiring R]
[(i : ι) → add_comm_monoid (M₁ i)] [(i : ι) → add_comm_monoid (M₁' i)] [add_comm_monoid M₄]
[(i : ι) → semimodule R (M₁ i)] [(i : ι) → semimodule R (M₁' i)] [semimodule R M₄]
[(i : ι) → topological_space (M₁ i)] [(i : ι) → topological_space (M₁' i)]
[topological_space M₄] (g : continuous_multilinear_map R M₁' M₄)
(f : (i : ι) → continuous_linear_map R (M₁ i) (M₁' i)) (m : (i : ι) → M₁ i) :
coe_fn (comp_continuous_linear_map g f) m = coe_fn g fun (i : ι) => coe_fn (f i) (m i) :=
rfl
/-- In the specific case of continuous multilinear maps on spaces indexed by `fin (n+1)`, where one
can build an element of `Π(i : fin (n+1)), M i` using `cons`, one can express directly the
additivity of a multilinear map along the first variable. -/
theorem cons_add {R : Type u} {n : ℕ} {M : fin (Nat.succ n) → Type w} {M₂ : Type w₂} [semiring R]
[(i : fin (Nat.succ n)) → add_comm_monoid (M i)] [add_comm_monoid M₂]
[(i : fin (Nat.succ n)) → semimodule R (M i)] [semimodule R M₂]
[(i : fin (Nat.succ n)) → topological_space (M i)] [topological_space M₂]
(f : continuous_multilinear_map R M M₂) (m : (i : fin n) → M (fin.succ i)) (x : M 0) (y : M 0) :
coe_fn f (fin.cons (x + y) m) = coe_fn f (fin.cons x m) + coe_fn f (fin.cons y m) :=
multilinear_map.cons_add (to_multilinear_map f) m x y
/-- In the specific case of continuous multilinear maps on spaces indexed by `fin (n+1)`, where one
can build an element of `Π(i : fin (n+1)), M i` using `cons`, one can express directly the
multiplicativity of a multilinear map along the first variable. -/
theorem cons_smul {R : Type u} {n : ℕ} {M : fin (Nat.succ n) → Type w} {M₂ : Type w₂} [semiring R]
[(i : fin (Nat.succ n)) → add_comm_monoid (M i)] [add_comm_monoid M₂]
[(i : fin (Nat.succ n)) → semimodule R (M i)] [semimodule R M₂]
[(i : fin (Nat.succ n)) → topological_space (M i)] [topological_space M₂]
(f : continuous_multilinear_map R M M₂) (m : (i : fin n) → M (fin.succ i)) (c : R) (x : M 0) :
coe_fn f (fin.cons (c • x) m) = c • coe_fn f (fin.cons x m) :=
multilinear_map.cons_smul (to_multilinear_map f) m c x
theorem map_piecewise_add {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
[DecidableEq ι] [semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] (f : continuous_multilinear_map R M₁ M₂) (m : (i : ι) → M₁ i)
(m' : (i : ι) → M₁ i) (t : finset ι) :
coe_fn f (finset.piecewise t (m + m') m') =
finset.sum (finset.powerset t) fun (s : finset ι) => coe_fn f (finset.piecewise s m m') :=
multilinear_map.map_piecewise_add (to_multilinear_map f) m m' t
/-- Additivity of a continuous multilinear map along all coordinates at the same time,
writing `f (m + m')` as the sum of `f (s.piecewise m m')` over all sets `s`. -/
theorem map_add_univ {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂} [DecidableEq ι]
[semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] (f : continuous_multilinear_map R M₁ M₂) [fintype ι] (m : (i : ι) → M₁ i)
(m' : (i : ι) → M₁ i) :
coe_fn f (m + m') =
finset.sum finset.univ fun (s : finset ι) => coe_fn f (finset.piecewise s m m') :=
multilinear_map.map_add_univ (to_multilinear_map f) m m'
/-- If `f` is continuous multilinear, then `f (Σ_{j₁ ∈ A₁} g₁ j₁, ..., Σ_{jₙ ∈ Aₙ} gₙ jₙ)` is the sum
of `f (g₁ (r 1), ..., gₙ (r n))` where `r` ranges over all functions with `r 1 ∈ A₁`, ...,
`r n ∈ Aₙ`. This follows from multilinearity by expanding successively with respect to each
coordinate. -/
theorem map_sum_finset {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂} [DecidableEq ι]
[semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] (f : continuous_multilinear_map R M₁ M₂) {α : ι → Type u_1} [fintype ι]
(g : (i : ι) → α i → M₁ i) (A : (i : ι) → finset (α i)) :
(coe_fn f fun (i : ι) => finset.sum (A i) fun (j : α i) => g i j) =
finset.sum (fintype.pi_finset A)
fun (r : (a : ι) → α a) => coe_fn f fun (i : ι) => g i (r i) :=
multilinear_map.map_sum_finset (to_multilinear_map f) (fun (i : ι) (j : α i) => g i j)
fun (i : ι) => A i
/-- If `f` is continuous multilinear, then `f (Σ_{j₁} g₁ j₁, ..., Σ_{jₙ} gₙ jₙ)` is the sum of
`f (g₁ (r 1), ..., gₙ (r n))` where `r` ranges over all functions `r`. This follows from
multilinearity by expanding successively with respect to each coordinate. -/
theorem map_sum {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂} [DecidableEq ι]
[semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] (f : continuous_multilinear_map R M₁ M₂) {α : ι → Type u_1} [fintype ι]
(g : (i : ι) → α i → M₁ i) [(i : ι) → fintype (α i)] :
(coe_fn f fun (i : ι) => finset.sum finset.univ fun (j : α i) => g i j) =
finset.sum finset.univ fun (r : (i : ι) → α i) => coe_fn f fun (i : ι) => g i (r i) :=
multilinear_map.map_sum (to_multilinear_map f) fun (i : ι) (j : α i) => g i j
/-- Reinterpret an `A`-multilinear map as an `R`-multilinear map, if `A` is an algebra over `R`
and their actions on all involved semimodules agree with the action of `R` on `A`. -/
def restrict_scalars (R : Type u) {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂} [DecidableEq ι]
[semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] {A : Type u_1} [semiring A] [has_scalar R A]
[(i : ι) → semimodule A (M₁ i)] [semimodule A M₂] [∀ (i : ι), is_scalar_tower R A (M₁ i)]
[is_scalar_tower R A M₂] (f : continuous_multilinear_map A M₁ M₂) :
continuous_multilinear_map R M₁ M₂ :=
mk (multilinear_map.restrict_scalars R (to_multilinear_map f)) sorry
@[simp] theorem coe_restrict_scalars (R : Type u) {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
[DecidableEq ι] [semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] {A : Type u_1} [semiring A] [has_scalar R A]
[(i : ι) → semimodule A (M₁ i)] [semimodule A M₂] [∀ (i : ι), is_scalar_tower R A (M₁ i)]
[is_scalar_tower R A M₂] (f : continuous_multilinear_map A M₁ M₂) :
⇑(restrict_scalars R f) = ⇑f :=
rfl
@[simp] theorem map_sub {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂} [DecidableEq ι]
[ring R] [(i : ι) → add_comm_group (M₁ i)] [add_comm_group M₂] [(i : ι) → semimodule R (M₁ i)]
[semimodule R M₂] [(i : ι) → topological_space (M₁ i)] [topological_space M₂]
(f : continuous_multilinear_map R M₁ M₂) (m : (i : ι) → M₁ i) (i : ι) (x : M₁ i) (y : M₁ i) :
coe_fn f (function.update m i (x - y)) =
coe_fn f (function.update m i x) - coe_fn f (function.update m i y) :=
multilinear_map.map_sub (to_multilinear_map f) m i x y
protected instance has_neg {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
[DecidableEq ι] [ring R] [(i : ι) → add_comm_group (M₁ i)] [add_comm_group M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] [topological_add_group M₂] : Neg (continuous_multilinear_map R M₁ M₂) :=
{ neg :=
fun (f : continuous_multilinear_map R M₁ M₂) =>
mk (multilinear_map.mk (multilinear_map.to_fun (-to_multilinear_map f)) sorry sorry) sorry }
@[simp] theorem neg_apply {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
[DecidableEq ι] [ring R] [(i : ι) → add_comm_group (M₁ i)] [add_comm_group M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] (f : continuous_multilinear_map R M₁ M₂) [topological_add_group M₂]
(m : (i : ι) → M₁ i) : coe_fn (-f) m = -coe_fn f m :=
rfl
protected instance has_sub {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
[DecidableEq ι] [ring R] [(i : ι) → add_comm_group (M₁ i)] [add_comm_group M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] [topological_add_group M₂] : Sub (continuous_multilinear_map R M₁ M₂) :=
{ sub :=
fun (f g : continuous_multilinear_map R M₁ M₂) =>
mk
(multilinear_map.mk (multilinear_map.to_fun (to_multilinear_map f - to_multilinear_map g))
sorry sorry)
sorry }
@[simp] theorem sub_apply {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
[DecidableEq ι] [ring R] [(i : ι) → add_comm_group (M₁ i)] [add_comm_group M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] (f : continuous_multilinear_map R M₁ M₂)
(f' : continuous_multilinear_map R M₁ M₂) [topological_add_group M₂] (m : (i : ι) → M₁ i) :
coe_fn (f - f') m = coe_fn f m - coe_fn f' m :=
rfl
protected instance add_comm_group {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
[DecidableEq ι] [ring R] [(i : ι) → add_comm_group (M₁ i)] [add_comm_group M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] [topological_add_group M₂] :
add_comm_group (continuous_multilinear_map R M₁ M₂) :=
add_comm_group.mk Add.add sorry 0 sorry sorry Neg.neg Sub.sub sorry sorry
theorem map_piecewise_smul {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
[DecidableEq ι] [comm_semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] (f : continuous_multilinear_map R M₁ M₂) (c : ι → R) (m : (i : ι) → M₁ i)
(s : finset ι) :
coe_fn f (finset.piecewise s (fun (i : ι) => c i • m i) m) =
(finset.prod s fun (i : ι) => c i) • coe_fn f m :=
multilinear_map.map_piecewise_smul (to_multilinear_map f) (fun (i : ι) => c i)
(fun (i : ι) => m i) s
/-- Multiplicativity of a continuous multilinear map along all coordinates at the same time,
writing `f (λ i, c i • m i)` as `(∏ i, c i) • f m`. -/
theorem map_smul_univ {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂} [DecidableEq ι]
[comm_semiring R] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → semimodule R (M₁ i)] [semimodule R M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] (f : continuous_multilinear_map R M₁ M₂) [fintype ι] (c : ι → R)
(m : (i : ι) → M₁ i) :
(coe_fn f fun (i : ι) => c i • m i) =
(finset.prod finset.univ fun (i : ι) => c i) • coe_fn f m :=
multilinear_map.map_smul_univ (to_multilinear_map f) (fun (i : ι) => c i) fun (i : ι) => m i
protected instance has_scalar {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂} [DecidableEq ι]
[(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] {R' : Type u_1} {A : Type u_2} [comm_semiring R'] [semiring A]
[algebra R' A] [(i : ι) → semimodule A (M₁ i)] [semimodule R' M₂] [semimodule A M₂]
[is_scalar_tower R' A M₂] [topological_space R'] [topological_semimodule R' M₂] :
has_scalar R' (continuous_multilinear_map A M₁ M₂) :=
has_scalar.mk
fun (c : R') (f : continuous_multilinear_map A M₁ M₂) =>
mk (multilinear_map.mk (multilinear_map.to_fun (c • to_multilinear_map f)) sorry sorry) sorry
@[simp] theorem smul_apply {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂} [DecidableEq ι]
[(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] {R' : Type u_1} {A : Type u_2} [comm_semiring R'] [semiring A]
[algebra R' A] [(i : ι) → semimodule A (M₁ i)] [semimodule R' M₂] [semimodule A M₂]
[is_scalar_tower R' A M₂] [topological_space R'] [topological_semimodule R' M₂]
(f : continuous_multilinear_map A M₁ M₂) (c : R') (m : (i : ι) → M₁ i) :
coe_fn (c • f) m = c • coe_fn f m :=
rfl
@[simp] theorem to_multilinear_map_smul {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
[DecidableEq ι] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → topological_space (M₁ i)] [topological_space M₂] {R' : Type u_1} {A : Type u_2}
[comm_semiring R'] [semiring A] [algebra R' A] [(i : ι) → semimodule A (M₁ i)]
[semimodule R' M₂] [semimodule A M₂] [is_scalar_tower R' A M₂] [topological_space R']
[topological_semimodule R' M₂] (c : R') (f : continuous_multilinear_map A M₁ M₂) :
to_multilinear_map (c • f) = c • to_multilinear_map f :=
rfl
protected instance is_scalar_tower {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂} [DecidableEq ι]
[(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] {R' : Type u_1} {A : Type u_2} [comm_semiring R'] [semiring A]
[algebra R' A] [(i : ι) → semimodule A (M₁ i)] [semimodule R' M₂] [semimodule A M₂]
[is_scalar_tower R' A M₂] [topological_space R'] [topological_semimodule R' M₂] {R'' : Type u_3}
[comm_semiring R''] [has_scalar R' R''] [algebra R'' A] [semimodule R'' M₂]
[is_scalar_tower R'' A M₂] [is_scalar_tower R' R'' M₂] [topological_space R'']
[topological_semimodule R'' M₂] : is_scalar_tower R' R'' (continuous_multilinear_map A M₁ M₂) :=
is_scalar_tower.mk
fun (c₁ : R') (c₂ : R'') (f : continuous_multilinear_map A M₁ M₂) =>
ext fun (x : (i : ι) → M₁ i) => smul_assoc c₁ c₂ (coe_fn (to_multilinear_map f) x)
/-- The space of continuous multilinear maps over an algebra over `R` is a module over `R`, for the
pointwise addition and scalar multiplication. -/
protected instance semimodule {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂} [DecidableEq ι]
[(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂] [(i : ι) → topological_space (M₁ i)]
[topological_space M₂] {R' : Type u_1} {A : Type u_2} [comm_semiring R'] [semiring A]
[algebra R' A] [(i : ι) → semimodule A (M₁ i)] [semimodule R' M₂] [semimodule A M₂]
[is_scalar_tower R' A M₂] [topological_space R'] [topological_semimodule R' M₂]
[has_continuous_add M₂] : semimodule R' (continuous_multilinear_map A M₁ M₂) :=
semimodule.mk sorry sorry
/-- Linear map version of the map `to_multilinear_map` associating to a continuous multilinear map
the corresponding multilinear map. -/
@[simp] theorem to_multilinear_map_linear_apply {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
[DecidableEq ι] [(i : ι) → add_comm_monoid (M₁ i)] [add_comm_monoid M₂]
[(i : ι) → topological_space (M₁ i)] [topological_space M₂] {R' : Type u_1} {A : Type u_2}
[comm_semiring R'] [semiring A] [algebra R' A] [(i : ι) → semimodule A (M₁ i)]
[semimodule R' M₂] [semimodule A M₂] [is_scalar_tower R' A M₂] [topological_space R']
[topological_semimodule R' M₂] [has_continuous_add M₂]
(f : continuous_multilinear_map A M₁ M₂) :
coe_fn to_multilinear_map_linear f = to_multilinear_map f :=
Eq.refl (coe_fn to_multilinear_map_linear f)
end continuous_multilinear_map
namespace continuous_linear_map
/-- Composing a continuous multilinear map with a continuous linear map gives again a
continuous multilinear map. -/
def comp_continuous_multilinear_map {R : Type u} {ι : Type v} {M₁ : ι → Type w₁} {M₂ : Type w₂}
{M₃ : Type w₃} [DecidableEq ι] [ring R] [(i : ι) → add_comm_group (M₁ i)] [add_comm_group M₂]
[add_comm_group M₃] [(i : ι) → module R (M₁ i)] [module R M₂] [module R M₃]
[(i : ι) → topological_space (M₁ i)] [topological_space M₂] [topological_space M₃]
(g : continuous_linear_map R M₂ M₃) (f : continuous_multilinear_map R M₁ M₂) :
continuous_multilinear_map R M₁ M₃ :=
continuous_multilinear_map.mk
(multilinear_map.mk
(multilinear_map.to_fun
(linear_map.comp_multilinear_map (to_linear_map g)
(continuous_multilinear_map.to_multilinear_map f)))
sorry sorry)
sorry
@[simp] theorem comp_continuous_multilinear_map_coe {R : Type u} {ι : Type v} {M₁ : ι → Type w₁}
{M₂ : Type w₂} {M₃ : Type w₃} [DecidableEq ι] [ring R] [(i : ι) → add_comm_group (M₁ i)]
[add_comm_group M₂] [add_comm_group M₃] [(i : ι) → module R (M₁ i)] [module R M₂] [module R M₃]
[(i : ι) → topological_space (M₁ i)] [topological_space M₂] [topological_space M₃]
(g : continuous_linear_map R M₂ M₃) (f : continuous_multilinear_map R M₁ M₂) :
⇑(comp_continuous_multilinear_map g f) = ⇑g ∘ ⇑f :=
funext fun (m : (i : ι) → M₁ i) => Eq.refl (coe_fn (comp_continuous_multilinear_map g f) m)
end Mathlib |
theory Indices
imports Main
begin
(**************************************************************************************************)
(**************************************************************************************************)
section\<open>Basic Lemmas for Manipulating Indices and Lists\<close>
(**************************************************************************************************)
(**************************************************************************************************)
fun index_list where
"index_list 0 = []"|
"index_list (Suc n) = index_list n @ [n]"
lemma index_list_length:
"length (index_list n) = n"
by(induction n, simp, auto )
lemma index_list_indices:
"k < n \<Longrightarrow> (index_list n)!k = k"
apply(induction n)
apply (simp; fail)
by (simp add: index_list_length nth_append)
lemma index_list_set:
"set (index_list n) = {..<n}"
apply(induction n)
apply force
by (metis Zero_not_Suc atLeastLessThan_empty atLeastLessThan_singleton atLeastLessThan_upt
diff_Suc_1 index_list.elims ivl_disj_un_singleton(2) lessI lessThan_Suc_atMost less_Suc_eq_le
set_append sorted_list_of_set_empty sorted_list_of_set_range upt_rec)
fun flat_map :: "('a => 'b list) => 'a list => 'b list" where
"flat_map f [] = []"
|"flat_map f (h#t) = (f h)@(flat_map f t)"
abbreviation(input) project_at_indices ("\<pi>\<^bsub>_\<^esub>") where
"project_at_indices S as \<equiv> nths as S"
fun insert_at_index :: " 'a list \<Rightarrow>'a \<Rightarrow> nat \<Rightarrow> 'a list" where
"insert_at_index as a n= (take n as) @ (a#(drop n as))"
lemma insert_at_index_length:
shows "length (insert_at_index as a n) = length as + 1"
by(induction n, auto)
lemma insert_at_index_eq[simp]:
assumes "n \<le> length as"
shows "(insert_at_index as a n)!n = a"
by (metis assms insert_at_index.elims length_take min.absorb2 nth_append_length)
lemma insert_at_index_eq'[simp]:
assumes "n \<le> length as"
assumes "k < n"
shows "(insert_at_index as a n)!k = as ! k"
using assms
by (simp add: nth_append)
lemma insert_at_index_eq''[simp]:
assumes "n < length as"
assumes "k \<le> n"
shows "(insert_at_index as a k)!(Suc n) = as ! n"
using assms insert_at_index.simps[of as a k]
by (smt Suc_diff_Suc append_take_drop_id diff_Suc_Suc dual_order.order_iff_strict
le_imp_less_Suc length_take less_trans min.absorb2 not_le nth_Cons_Suc nth_append)
text\<open>Correctness of project\_at\_indices\<close>
definition indices_of :: "'a list \<Rightarrow> nat set" where
"indices_of as = {..<(length as)}"
lemma proj_at_index_list_length[simp]:
assumes "S \<subseteq> indices_of as"
shows "length (project_at_indices S as) = card S"
proof-
have "S = {i. i < length as \<and> i \<in> S}"
using assms unfolding indices_of_def
by blast
thus ?thesis
using length_nths[of as S] by auto
qed
text\<open>A function which enumerates finite sets\<close>
abbreviation(input) set_to_list :: "nat set \<Rightarrow> nat list" where
"set_to_list S \<equiv> sorted_list_of_set S"
lemma set_to_list_set:
assumes "finite S"
shows "set (set_to_list S) = S"
by (simp add: assms)
lemma set_to_list_length:
assumes "finite S"
shows "length (set_to_list S) = card S"
by (metis assms length_remdups_card_conv length_sort set_sorted_list_of_set sorted_list_of_set_sort_remdups)
lemma set_to_list_empty:
assumes "card S = 0"
shows "set_to_list S = []"
by (metis assms length_0_conv length_sorted_list_of_set)
lemma set_to_list_first:
assumes "card S > 0"
shows "Min S = set_to_list S ! 0 "
proof-
have 0: "set (set_to_list S) = S"
using assms card_ge_0_finite set_sorted_list_of_set by blast
have 1: "sorted (set_to_list S)"
by simp
show ?thesis apply(rule Min_eqI)
using assms card_ge_0_finite apply blast
apply (metis "0" "1" in_set_conv_nth less_Suc0 less_or_eq_imp_le not_less_eq sorted_iff_nth_mono_less)
by (metis "0" Max_in assms card_0_eq card_ge_0_finite gr_zeroI in_set_conv_nth not_less0)
qed
lemma set_to_list_last:
assumes "card S > 0"
shows "Max S = last (set_to_list S)"
proof-
have 0: "set (set_to_list S) = S"
using assms card_ge_0_finite set_sorted_list_of_set by blast
have 1: "sorted (set_to_list S)"
by simp
show ?thesis apply(rule Max_eqI)
using assms card_ge_0_finite apply blast
apply (smt "0" "1" Suc_diff_1 in_set_conv_nth last_conv_nth le_simps(2) length_greater_0_conv
less_or_eq_imp_le nat_neq_iff neq0_conv not_less_eq sorted_iff_nth_mono_less)
by (metis "0" assms card.empty empty_set last_in_set less_numeral_extra(3))
qed
lemma set_to_list_insert_Max:
assumes "finite S"
assumes "\<And>s. s \<in> S \<Longrightarrow> a > s"
shows "set_to_list (insert a S) = set_to_list S @[a]"
by (metis assms(1) assms(2) card_0_eq card_insert_if finite.insertI infinite_growing
insert_not_empty less_imp_le_nat sorted_insort_is_snoc sorted_list_of_set(1) sorted_list_of_set(2)
sorted_list_of_set_insert)
lemma set_to_list_insert_Min:
assumes "finite S"
assumes "\<And>s. s \<in> S \<Longrightarrow> a < s"
shows "set_to_list (insert a S) = a#set_to_list S"
by (metis assms(1) assms(2) insort_is_Cons nat_less_le sorted_list_of_set(1) sorted_list_of_set_insert)
fun nth_elem where
"nth_elem S n = set_to_list S ! n"
lemma nth_elem_closed:
assumes "i < card S"
shows "nth_elem S i \<in> S"
by (metis assms card.infinite not_less0 nth_elem.elims nth_mem set_to_list_length sorted_list_of_set(1))
lemma nth_elem_Min:
assumes "card S > 0"
shows "nth_elem S 0 = Min S"
by (simp add: assms set_to_list_first)
lemma nth_elem_Max:
assumes "card S > 0"
shows "nth_elem S (card S - 1) = Max S"
proof-
have "last (set_to_list S) = set_to_list S ! (card S - 1)"
by (metis assms card_0_eq card_ge_0_finite last_conv_nth neq0_conv set_to_list_length sorted_list_of_set_eq_Nil_iff)
thus ?thesis
using assms set_to_list_last set_to_list_length
by simp
qed
lemma nth_elem_Suc:
assumes "card S > Suc n"
shows "nth_elem S (Suc n) > nth_elem S n"
using assms sorted_sorted_list_of_set[of S] set_to_list_length[of S]
by (metis Suc_lessD card.infinite distinct_sorted_list_of_set lessI nat_less_le not_less0 nth_elem.elims nth_eq_iff_index_eq sorted_iff_nth_mono_less)
lemma nth_elem_insert_Min:
assumes "card S > 0"
assumes "a < Min S"
shows "nth_elem (insert a S) (Suc i) = nth_elem S i"
using assms
by (metis Min_gr_iff card_0_eq card_ge_0_finite neq0_conv nth_Cons_Suc nth_elem.elims set_to_list_insert_Min)
lemma set_to_list_Suc_map:
assumes "finite S"
shows "set_to_list (Suc ` S) = map Suc (set_to_list S)"
proof-
obtain n where n_def: "n = card S"
by blast
have "\<And>S. card S = n \<Longrightarrow> set_to_list (Suc ` S) = map Suc (set_to_list S)"
proof(induction n)
case 0
then show ?case
by (metis card_eq_0_iff finite_imageD image_is_empty inj_Suc list.simps(8) set_to_list_empty)
next
case (Suc n)
have 0: "S = insert (Min S) (S - {Min S})"
by (metis Min_in Suc.prems card_gt_0_iff insert_Diff zero_less_Suc)
have 1: "sorted_list_of_set (Suc ` (S - {Min S})) = map Suc (sorted_list_of_set (S - {Min S}))"
by (metis "0" Suc.IH Suc.prems card_Diff_singleton card.infinite diff_Suc_1 insertI1 nat.simps(3))
have 2: "set_to_list S = (Min S)#(set_to_list (S - {Min S}))"
by (metis "0" DiffD1 Min_le Suc.prems card_Diff_singleton card.infinite card_insert_if
diff_Suc_1 finite_Diff n_not_Suc_n nat.simps(3) nat_less_le set_to_list_insert_Min)
have 3: "sorted_list_of_set (Suc ` S) = (Min (Suc ` S))#(set_to_list ((Suc ` S) - {Min (Suc ` S)}))"
by (metis DiffD1 Diff_idemp Min_in Min_le Suc.prems card_Diff1_less card_eq_0_iff finite_Diff
finite_imageI image_is_empty insert_Diff nat.simps(3) nat_less_le set_to_list_insert_Min)
have 4: "(Min (Suc ` S)) = Suc (Min S)"
by (metis Min.hom_commute Suc.prems Suc_le_mono card_eq_0_iff min_def nat.simps(3))
have 5: "sorted_list_of_set (Suc ` S) = Suc (Min S)#(set_to_list ((Suc ` S) - {Suc (Min S)}))"
using 3 4 by auto
have 6: "sorted_list_of_set (Suc ` S) = Suc (Min S)#(set_to_list (Suc ` (S - {Min S})))"
by (metis (no_types, lifting) "0" "5" Diff_insert_absorb image_insert inj_Suc inj_on_insert)
show ?case
using 6
by (simp add: "1" "2")
qed
thus ?thesis
using n_def by blast
qed
lemma nth_elem_Suc_im:
assumes "i < card S"
shows "nth_elem (Suc ` S) i = Suc (nth_elem S i) "
using set_to_list_Suc_map
by (metis assms card_ge_0_finite dual_order.strict_trans not_gr0 nth_elem.elims nth_map set_to_list_length)
lemma set_to_list_upto:
"set_to_list {..<n} = [0..<n]"
by (simp add: lessThan_atLeast0)
lemma nth_elem_upto:
assumes "i < n"
shows "nth_elem {..<n} i = i"
using set_to_list_upto
by (simp add: assms)
text\<open>Characterizing the entries of project\_at\_indices \<close>
lemma project_at_indices_append:
"project_at_indices S (as@bs) = project_at_indices S as @ project_at_indices {j. j + length as \<in> S} bs"
using nths_append[of as bs S] by auto
lemma project_at_indices_nth:
assumes "S \<subseteq> indices_of as"
assumes "card S > i"
shows "project_at_indices S as ! i = as ! (nth_elem S i)"
proof-
have "\<And> S i. S \<subseteq> indices_of as \<and> card S > i \<Longrightarrow> project_at_indices S as ! i = as ! (nth_elem S i)"
proof(induction as)
case Nil
then show ?case
by (metis list.size(3) not_less0 nths_nil proj_at_index_list_length)
next
case (Cons a as)
assume A: "S \<subseteq> indices_of (a # as) \<and> i < card S"
have 0: "nths (a # as) S = (if 0 \<in> S then [a] else []) @ nths as {j. Suc j \<in> S}"
using nths_Cons[of a as S] by simp
show "nths (a # as) S ! i = (a # as) ! nth_elem S i"
proof(cases "0 \<in> S")
case True
show ?thesis
proof(cases "S = {0}")
case True
then show ?thesis
using "0" Cons.prems by auto
next
case False
have T0: "nths (a # as) S = a#nths as {j. Suc j \<in> S}"
using 0
by (simp add: True)
have T1: "{j. Suc j \<in> S} \<subseteq> indices_of as"
proof fix x assume A: "x \<in> {j. Suc j \<in> S}"
then have "Suc x < length (a#as)"
using Cons.prems indices_of_def by blast
then show "x \<in> indices_of as"
by (simp add: indices_of_def)
qed
have T2: "\<And>i. i < card {j. Suc j \<in> S} \<Longrightarrow> nths as {j. Suc j \<in> S} ! i = as ! nth_elem {j. Suc j \<in> S} i"
using Cons.IH T1 by blast
have T3: "\<And>i. i < card {j. Suc j \<in> S} \<Longrightarrow> nth_elem {j. j > 0 \<and> j\<in> S} i = nth_elem S (Suc i)"
proof-
have 0: " 0 < card {j. Suc j \<in> S}"
by (smt Cons.prems Diff_iff Diff_subset False T0 T1 True add_diff_cancel_left'
card.insert card_0_eq card.infinite finite_subset gr_zeroI insert_Diff
length_Cons n_not_Suc_n plus_1_eq_Suc proj_at_index_list_length singletonI)
have 1: "(insert 0 {j. 0 < j \<and> j \<in> S}) = S"
apply(rule set_eqI) using True gr0I by blast
have 2: "0 < Min {j. 0 < j \<and> j \<in> S}" using False
by (metis (mono_tags, lifting) "1" Cons.prems Min_in finite_insert finite_lessThan
finite_subset indices_of_def less_Suc_eq less_Suc_eq_0_disj mem_Collect_eq singleton_conv)
show "\<And>i. i < card {j. Suc j \<in> S} \<Longrightarrow> nth_elem {j. 0 < j \<and> j \<in> S} i = nth_elem S (Suc i)"
using 0 1 2 nth_elem_insert_Min[of "{j. 0 < j \<and> j \<in> S}" 0] True False
by (metis (no_types, lifting) Cons.prems T0 T1 card_gt_0_iff finite_insert length_Cons less_SucI proj_at_index_list_length)
qed
show "nths (a # as) S ! i = (a # as) ! nth_elem S i"
apply(cases "i = 0")
apply (metis Cons.prems Min_le T0 True card_ge_0_finite le_zero_eq nth_Cons' nth_elem_Min)
proof-
assume "i \<noteq> 0"
then have "i = Suc (i - 1)"
using Suc_pred' by blast
hence "nths (a # as) S ! i = nths as {j. Suc j \<in> S} ! (i-1)"
using A by (simp add: T0)
thus "nths (a # as) S ! i = (a # as) ! nth_elem S i"
proof-
have "i - 1 < card {j. Suc j \<in> S}"
by (metis Cons.prems Suc_less_SucD T0 T1 \<open>i = Suc (i - 1)\<close> length_Cons proj_at_index_list_length)
hence 0: "nth_elem {j. 0 < j \<and> j \<in> S} (i - 1) = nth_elem S i"
using T3[of "i-1"] \<open>i = Suc (i - 1)\<close> by auto
have 1: "nths as {j. Suc j \<in> S} ! (i-1) = as ! nth_elem {j. Suc j \<in> S} (i-1)"
using T2 \<open>i - 1 < card {j. Suc j \<in> S}\<close> by blast
have 2: "(a # as) ! nth_elem S i = as! ((nth_elem S i) - 1)"
by (metis Cons.prems \<open>i = Suc (i - 1)\<close> not_less0 nth_Cons' nth_elem_Suc)
have 3: "(nth_elem S i) - 1 = nth_elem {j. Suc j \<in> S} (i-1)"
proof-
have "Suc ` {j. Suc j \<in> S} = {j. 0 < j \<and> j \<in> S}"
proof
show "Suc ` {j. Suc j \<in> S} \<subseteq> {j. 0 < j \<and> j \<in> S}"
by blast
show "{j. 0 < j \<and> j \<in> S} \<subseteq> Suc ` {j. Suc j \<in> S}"
using Suc_pred gr0_conv_Suc by auto
qed
thus ?thesis
using "0" \<open>i - 1 < card {j. Suc j \<in> S}\<close> nth_elem_Suc_im by fastforce
qed
show "nths (a # as) S ! i = (a # as) ! nth_elem S i"
using "1" "2" "3" \<open>nths (a # as) S ! i = nths as {j. Suc j \<in> S} ! (i - 1)\<close> by auto
qed
qed
qed
next
case False
have F0: "nths (a # as) S = nths as {j. Suc j \<in> S}"
by (simp add: "0" False)
have F1: "Suc `{j. Suc j \<in> S} = S"
proof show "Suc ` {j. Suc j \<in> S} \<subseteq> S" by auto
show "S \<subseteq> Suc ` {j. Suc j \<in> S}" using False Suc_pred
by (smt image_iff mem_Collect_eq neq0_conv subsetI)
qed
have F2: "{j. Suc j \<in> S} \<subseteq> indices_of as \<and> i < card {j. Suc j \<in> S}"
using F1
by (metis (mono_tags, lifting) A F0 Suc_less_SucD indices_of_def length_Cons lessThan_iff
mem_Collect_eq proj_at_index_list_length subset_iff)
have F3: "project_at_indices {j. Suc j \<in> S} as ! i = as ! (nth_elem {j. Suc j \<in> S} i)"
using F2 Cons(1)[of "{j. Suc j \<in> S}"] Cons(2)
by blast
then show ?thesis
using F0 F1 F2 nth_elem_Suc_im by fastforce
qed
qed
then show ?thesis
using assms(1) assms(2) by blast
qed
text\<open>An inverse for nth\_elem\<close>
definition set_rank where
"set_rank S x = (THE i. i < card S \<and> x = nth_elem S i)"
lemma set_rank_exist:
assumes "finite S"
assumes "x \<in> S"
shows "\<exists>i. i < card S \<and> x = nth_elem S i"
using assms nth_elem.simps[of S]
by (metis in_set_conv_nth set_to_list_length sorted_list_of_set(1))
lemma set_rank_unique:
assumes "finite S"
assumes "x \<in> S"
assumes "i < card S \<and> x = nth_elem S i"
assumes "j < card S \<and> x = nth_elem S j"
shows "i = j"
using assms nth_elem.simps[of S]
by (simp add: \<open>i < card S \<and> x = nth_elem S i\<close> \<open>j < card S \<and> x = nth_elem S j\<close>
nth_eq_iff_index_eq set_to_list_length)
lemma nth_elem_set_rank_inv:
assumes "finite S"
assumes "x \<in> S"
shows "nth_elem S (set_rank S x) = x"
using the_equality set_rank_unique set_rank_exist assms
unfolding set_rank_def
by smt
lemma set_rank_nth_elem_inv:
assumes "finite S"
assumes "i < card S"
shows "set_rank S (nth_elem S i) = i"
using the_equality set_rank_unique set_rank_exist assms
unfolding set_rank_def
proof -
show "(THE n. n < card S \<and> nth_elem S i = nth_elem S n) = i"
using assms(1) assms(2) nth_elem_closed set_rank_unique by blast
qed
lemma set_rank_range:
assumes "finite S"
assumes "x \<in> S"
shows "set_rank S x < card S"
using assms(1) assms(2) set_rank_exist set_rank_nth_elem_inv by fastforce
lemma project_at_indices_nth':
assumes "S \<subseteq> indices_of as"
assumes "i \<in> S"
shows "as ! i = project_at_indices S as ! (set_rank S i) "
by (metis assms(1) assms(2) finite_lessThan finite_subset indices_of_def nth_elem_set_rank_inv
project_at_indices_nth set_rank_range)
fun proj_away_from_index :: "nat \<Rightarrow> 'a list \<Rightarrow> 'a list" ("\<pi>\<^bsub>\<noteq>_\<^esub>")where
"proj_away_from_index n as = (take n as)@(drop (Suc n) as)"
text\<open>proj\_away\_from\_index is an inverse to insert\_at\_index\<close>
lemma insert_at_index_project_away[simp]:
assumes "k < length as"
assumes "bs = (insert_at_index as a k)"
shows "\<pi>\<^bsub>\<noteq> k\<^esub> bs = as"
using assms insert_at_index.simps[of as a k] proj_away_from_index.simps[of k bs]
by (simp add: \<open>k < length as\<close> less_imp_le_nat min.absorb2)
definition fibred_cell :: "'a list set \<Rightarrow> ('a list \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a list set" where
"fibred_cell C P = {as . \<exists>x t. as = (t#x) \<and> x \<in> C \<and> (P x t)}"
definition fibred_cell_at_ind :: "nat \<Rightarrow> 'a list set \<Rightarrow> ('a list \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a list set" where
"fibred_cell_at_ind n C P = {as . \<exists>x t. as = (insert_at_index x t n) \<and> x \<in> C \<and> (P x t)}"
lemma fibred_cell_lengths:
assumes "\<And>k. k \<in> C \<Longrightarrow> length k = n"
shows "k \<in> (fibred_cell C P) \<Longrightarrow> length k = Suc n"
proof-
assume "k \<in> (fibred_cell C P)"
obtain x t where "k = (t#x) \<and> x \<in> C \<and> P x t"
proof -
assume a1: "\<And>t x. k = t # x \<and> x \<in> C \<and> P x t \<Longrightarrow> thesis"
have "\<exists>as a. k = a # as \<and> as \<in> C \<and> P as a"
using \<open>k \<in> fibred_cell C P\<close> fibred_cell_def by blast
then show ?thesis
using a1 by blast
qed
then show ?thesis
by (simp add: assms)
qed
lemma fibred_cell_at_ind_lengths:
assumes "\<And>k. k \<in> C \<Longrightarrow> length k = n"
assumes "k \<le> n"
shows "c \<in> (fibred_cell_at_ind k C P) \<Longrightarrow> length c = Suc n"
proof-
assume "c \<in> (fibred_cell_at_ind k C P)"
then obtain x t where "c = (insert_at_index x t k) \<and> x \<in> C \<and> (P x t)"
using assms
unfolding fibred_cell_at_ind_def
by blast
then show ?thesis
by (simp add: assms(1))
qed
lemma project_fibred_cell:
assumes "\<And>k. k \<in> C \<Longrightarrow> length k = n"
assumes "k < n"
assumes "\<forall>x \<in> C. \<exists>t. P x t"
shows "\<pi>\<^bsub>\<noteq> k\<^esub> ` (fibred_cell_at_ind k C P) = C"
proof
show "\<pi>\<^bsub>\<noteq>k\<^esub> ` fibred_cell_at_ind k C P \<subseteq> C"
proof
fix x
assume x_def: "x \<in> \<pi>\<^bsub>\<noteq>k\<^esub> ` fibred_cell_at_ind k C P"
then obtain c where c_def: "x = \<pi>\<^bsub>\<noteq>k\<^esub> c \<and> c \<in> fibred_cell_at_ind k C P"
by blast
then obtain y t where yt_def: "c = (insert_at_index y t k) \<and> y \<in> C \<and> (P y t)"
using assms
unfolding fibred_cell_at_ind_def
by blast
have 0: "x =\<pi>\<^bsub>\<noteq>k\<^esub> c"
by (simp add: c_def)
have 1: "y =\<pi>\<^bsub>\<noteq>k\<^esub> c"
using yt_def assms(1) assms(2)
by (metis insert_at_index_project_away)
have 2: "x = y" using 0 1 by auto
then show "x \<in> C"
by (simp add: yt_def)
qed
show "C \<subseteq> \<pi>\<^bsub>\<noteq>k\<^esub> ` fibred_cell_at_ind k C P"
proof fix x
assume A: "x \<in> C"
obtain t where t_def: "P x t"
using assms A by auto
then show "x \<in> \<pi>\<^bsub>\<noteq>k\<^esub> ` fibred_cell_at_ind k C P"
proof -
have f1: "\<forall>a n A as. take n as @ (a::'a) # drop n as \<notin> A \<or> as \<in> \<pi>\<^bsub>\<noteq>n\<^esub> ` A \<or> \<not> n < length as"
by (metis insert_at_index.simps insert_at_index_project_away rev_image_eqI)
have "\<forall>n. \<exists>as a. take n x @ t # drop n x = insert_at_index as a n \<and> as \<in> C \<and> P as a"
using A t_def by auto
then have "\<forall>n. take n x @ t # drop n x \<in> {insert_at_index as a n |as a. as \<in> C \<and> P as a}"
by blast
then have "x \<in> \<pi>\<^bsub>\<noteq>k\<^esub> ` {insert_at_index as a k |as a. as \<in> C \<and> P as a}"
using f1 by (metis (lifting) A assms(1) assms(2))
then show ?thesis
by (simp add: fibred_cell_at_ind_def)
qed
qed
qed
definition list_segment where
"list_segment i j as = map (nth as) [i..<j]"
lemma list_segment_length:
assumes "i \<le> j"
assumes "j \<le> length as"
shows "length (list_segment i j as) = j - i"
using assms
unfolding list_segment_def
by (metis length_map length_upt)
lemma list_segment_drop:
assumes "i < length as"
shows "(list_segment i (length as) as) = drop i as"
by (metis One_nat_def Suc_diff_Suc add_diff_inverse_nat drop0 drop_map drop_upt
less_Suc_eq list_segment_def map_nth neq0_conv not_less0 plus_1_eq_Suc)
lemma list_segment_concat:
assumes "j \<le> k"
assumes "i \<le> j"
shows "(list_segment i j as) @ (list_segment j k as) = (list_segment i k as)"
using assms unfolding list_segment_def
using le_Suc_ex upt_add_eq_append
by fastforce
lemma list_segment_subset:
assumes "j \<le> k"
shows "set (list_segment i j as) \<subseteq> set (list_segment i k as)"
apply(cases "i > j")
unfolding list_segment_def
apply (metis in_set_conv_nth length_map list.size(3) order.asym subsetI upt_rec zero_order(3))
proof-
assume "\<not> j < i"
then have "i \<le>j"
using not_le
by blast
then have "list_segment i j as @ list_segment j k as = list_segment i k as"
using assms list_segment_concat[of j k i as] by auto
then show "set (map ((!) as) [i..<j]) \<subseteq> set (map ((!) as) [i..<k])"
using set_append unfolding list_segment_def
by (metis Un_upper1)
qed
lemma list_segment_subset_list_set:
assumes "j \<le> length as"
shows "set (list_segment i j as) \<subseteq> set as"
apply(cases "i \<ge> j")
apply (simp add: list_segment_def)
proof-
assume A: "\<not> j \<le> i"
then have B: "i < j"
by auto
have 0: "list_segment i (length as) as = drop i as"
using B assms list_segment_drop[of i as] less_le_trans
by blast
have 1: "set (list_segment i j as) \<subseteq> set (list_segment i (length as) as)"
using B assms list_segment_subset[of j "length as" i as]
by blast
then show ?thesis
using assms 0 dual_order.trans set_drop_subset[of i as]
by metis
qed
definition fun_inv where
"fun_inv = inv"
end
|
CHARMM Element source/dimb/nmdimb.src 1.1
C.##IF DIMB
SUBROUTINE NMDIMB(X,Y,Z,NAT3,BNBND,BIMAG,LNOMA,AMASS,DDS,DDSCR,
1 PARDDV,DDV,DDM,PARDDF,DDF,PARDDE,DDEV,DD1BLK,
2 DD1BLL,NADD,LRAISE,DD1CMP,INBCMP,JNBCMP,
3 NPAR,ATMPAR,ATMPAS,BLATOM,PARDIM,NFREG,NFRET,
4 PARFRQ,CUTF1,ITMX,TOLDIM,IUNMOD,IUNRMD,
5 LBIG,LSCI,ATMPAD,SAVF,NBOND,IB,JB,DDVALM)
C-----------------------------------------------------------------------
C 01-Jul-1992 David Perahia, Liliane Mouawad
C 15-Dec-1994 Herman van Vlijmen
C
C This is the main routine for the mixed-basis diagonalization.
C See: L.Mouawad and D.Perahia, Biopolymers (1993), 33, 599,
C and: D.Perahia and L.Mouawad, Comput. Chem. (1995), 19, 241.
C The method iteratively solves the diagonalization of the
C Hessian matrix. To save memory space, it uses a compressed
C form of the Hessian, which only contains the nonzero elements.
C In the diagonalization process, approximate eigenvectors are
C mixed with Cartesian coordinates to form a reduced basis. The
C Hessian is then diagonalized in the reduced basis. By iterating
C over different sets of Cartesian coordinates the method ultimately
C converges to the exact eigenvalues and eigenvectors (up to the
C requested accuracy).
C If no existing basis set is read, an initial basis will be created
C which consists of the low-frequency eigenvectors of diagonal blocks
C of the Hessian.
C-----------------------------------------------------------------------
C-----------------------------------------------------------------------
C:::##INCLUDE '~/charmm_fcm/impnon.fcm'
C..##IF VAX CONVEX IRIS HPUX IRIS GNU CSPP OS2 GWS CRAY ALPHA
IMPLICIT NONE
C..##ENDIF
C-----------------------------------------------------------------------
C-----------------------------------------------------------------------
C:::##INCLUDE '~/charmm_fcm/stream.fcm'
LOGICAL LOWER,QLONGL
INTEGER MXSTRM,POUTU
PARAMETER (MXSTRM=20,POUTU=6)
INTEGER NSTRM,ISTRM,JSTRM,OUTU,PRNLEV,WRNLEV,IOLEV
COMMON /CASE/ LOWER, QLONGL
COMMON /STREAM/ NSTRM,ISTRM,JSTRM(MXSTRM),OUTU,PRNLEV,WRNLEV,IOLEV
C..##IF SAVEFCM
C..##ENDIF
C-----------------------------------------------------------------------
C-----------------------------------------------------------------------
C:::##INCLUDE '~/charmm_fcm/dimens.fcm'
INTEGER LARGE,MEDIUM,SMALL,REDUCE
C..##IF QUANTA
C..##ELIF T3D
C..##ELSE
PARAMETER (LARGE=60120, MEDIUM=25140, SMALL=6120)
C..##ENDIF
PARAMETER (REDUCE=15000)
INTEGER SIZE
C..##IF XLARGE
C..##ELIF XXLARGE
C..##ELIF LARGE
C..##ELIF MEDIUM
PARAMETER (SIZE=MEDIUM)
C..##ELIF REDUCE
C..##ELIF SMALL
C..##ELIF XSMALL
C..##ENDIF
C..##IF MMFF
integer MAXDEFI
parameter(MAXDEFI=250)
INTEGER NAME0,NAMEQ0,NRES0,KRES0
PARAMETER (NAME0=4,NAMEQ0=10,NRES0=4,KRES0=4)
integer MaxAtN
parameter (MaxAtN=55)
INTEGER MAXAUX
PARAMETER (MAXAUX = 10)
C..##ENDIF
INTEGER MAXCSP, MAXHSET
C..##IF HMCM
PARAMETER (MAXHSET = 200)
C..##ELSE
C..##ENDIF
C..##IF REDUCE
C..##ELSE
PARAMETER (MAXCSP = 500)
C..##ENDIF
C..##IF HMCM
INTEGER MAXHCM,MAXPCM,MAXRCM
C...##IF REDUCE
C...##ELSE
PARAMETER (MAXHCM=500)
PARAMETER (MAXPCM=5000)
PARAMETER (MAXRCM=2000)
C...##ENDIF
C..##ENDIF
INTEGER MXCMSZ
C..##IF IBM IBMRS CRAY INTEL IBMSP T3D REDUCE
C..##ELSE
PARAMETER (MXCMSZ = 5000)
C..##ENDIF
INTEGER CHRSIZ
PARAMETER (CHRSIZ = SIZE)
INTEGER MAXATB
C..##IF REDUCE
C..##ELIF QUANTA
C..##ELSE
PARAMETER (MAXATB = 200)
C..##ENDIF
INTEGER MAXVEC
C..##IFN VECTOR PARVECT
PARAMETER (MAXVEC = 10)
C..##ELIF LARGE XLARGE XXLARGE
C..##ELIF MEDIUM
C..##ELIF SMALL REDUCE
C..##ELIF XSMALL
C..##ELSE
C..##ENDIF
INTEGER IATBMX
PARAMETER (IATBMX = 8)
INTEGER MAXHB
C..##IF LARGE XLARGE XXLARGE
C..##ELIF MEDIUM
PARAMETER (MAXHB = 8000)
C..##ELIF SMALL
C..##ELIF REDUCE XSMALL
C..##ELSE
C..##ENDIF
INTEGER MAXTRN,MAXSYM
C..##IFN NOIMAGES
PARAMETER (MAXTRN = 5000)
PARAMETER (MAXSYM = 192)
C..##ELSE
C..##ENDIF
C..##IF LONEPAIR (lonepair_max)
INTEGER MAXLP,MAXLPH
C...##IF REDUCE
C...##ELSE
PARAMETER (MAXLP = 2000)
PARAMETER (MAXLPH = 4000)
C...##ENDIF
C..##ENDIF (lonepair_max)
INTEGER NOEMAX,NOEMX2
C..##IF REDUCE
C..##ELSE
PARAMETER (NOEMAX = 2000)
PARAMETER (NOEMX2 = 4000)
C..##ENDIF
INTEGER MAXATC, MAXCB, MAXCH, MAXCI, MAXCP, MAXCT, MAXITC, MAXNBF
C..##IF REDUCE
C..##ELIF MMFF CFF
PARAMETER (MAXATC = 500, MAXCB = 1500, MAXCH = 3200, MAXCI = 600,
& MAXCP = 3000,MAXCT = 15500,MAXITC = 200, MAXNBF=1000)
C..##ELIF YAMMP
C..##ELIF LARGE
C..##ELSE
C..##ENDIF
INTEGER MAXCN
PARAMETER (MAXCN = MAXITC*(MAXITC+1)/2)
INTEGER MAXA, MAXAIM, MAXB, MAXT, MAXP
INTEGER MAXIMP, MAXNB, MAXPAD, MAXRES
INTEGER MAXSEG, MAXGRP
C..##IF LARGE XLARGE XXLARGE
C..##ELIF MEDIUM
PARAMETER (MAXA = SIZE, MAXB = SIZE, MAXT = SIZE,
& MAXP = 2*SIZE)
PARAMETER (MAXIMP = 9200, MAXNB = 17200, MAXPAD = 8160,
& MAXRES = 14000)
C...##IF MCSS
C...##ELSE
PARAMETER (MAXSEG = 1000)
C...##ENDIF
C..##ELIF SMALL
C..##ELIF XSMALL
C..##ELIF REDUCE
C..##ELSE
C..##ENDIF
C..##IF NOIMAGES
C..##ELSE
PARAMETER (MAXAIM = 2*SIZE)
PARAMETER (MAXGRP = 2*SIZE/3)
C..##ENDIF
INTEGER REDMAX,REDMX2
C..##IF REDUCE
C..##ELSE
PARAMETER (REDMAX = 20)
PARAMETER (REDMX2 = 80)
C..##ENDIF
INTEGER MXRTRS, MXRTA, MXRTB, MXRTT, MXRTP, MXRTI, MXRTX,
& MXRTHA, MXRTHD, MXRTBL, NICM
PARAMETER (MXRTRS = 200, MXRTA = 5000, MXRTB = 5000,
& MXRTT = 5000, MXRTP = 5000, MXRTI = 2000,
C..##IF YAMMP
C..##ELSE
& MXRTX = 5000, MXRTHA = 300, MXRTHD = 300,
C..##ENDIF
& MXRTBL = 5000, NICM = 10)
INTEGER NMFTAB, NMCTAB, NMCATM, NSPLIN
C..##IF REDUCE
C..##ELSE
PARAMETER (NMFTAB = 200, NMCTAB = 3, NMCATM = 12000, NSPLIN = 3)
C..##ENDIF
INTEGER MAXSHK
C..##IF XSMALL
C..##ELIF REDUCE
C..##ELSE
PARAMETER (MAXSHK = SIZE*3/4)
C..##ENDIF
INTEGER SCRMAX
C..##IF IBM IBMRS CRAY INTEL IBMSP T3D REDUCE
C..##ELSE
PARAMETER (SCRMAX = 5000)
C..##ENDIF
C..##IF TSM
INTEGER MXPIGG
C...##IF REDUCE
C...##ELSE
PARAMETER (MXPIGG=500)
C...##ENDIF
INTEGER MXCOLO,MXPUMB
PARAMETER (MXCOLO=20,MXPUMB=20)
C..##ENDIF
C..##IF ADUMB
INTEGER MAXUMP, MAXEPA, MAXNUM
C...##IF REDUCE
C...##ELSE
PARAMETER (MAXUMP = 10, MAXNUM = 4)
C...##ENDIF
C..##ENDIF
INTEGER MAXING
PARAMETER (MAXING=1000)
C..##IF MMFF
integer MAX_RINGSIZE, MAX_EACH_SIZE
parameter (MAX_RINGSIZE = 20, MAX_EACH_SIZE = 1000)
integer MAXPATHS
parameter (MAXPATHS = 8000)
integer MAX_TO_SEARCH
parameter (MAX_TO_SEARCH = 6)
C..##ENDIF
C-----------------------------------------------------------------------
C-----------------------------------------------------------------------
C:::##INCLUDE '~/charmm_fcm/number.fcm'
REAL*8 ZERO, ONE, TWO, THREE, FOUR, FIVE, SIX,
& SEVEN, EIGHT, NINE, TEN, ELEVEN, TWELVE, THIRTN,
& FIFTN, NINETN, TWENTY, THIRTY
C..##IF SINGLE
C..##ELSE
PARAMETER (ZERO = 0.D0, ONE = 1.D0, TWO = 2.D0,
& THREE = 3.D0, FOUR = 4.D0, FIVE = 5.D0,
& SIX = 6.D0, SEVEN = 7.D0, EIGHT = 8.D0,
& NINE = 9.D0, TEN = 10.D0, ELEVEN = 11.D0,
& TWELVE = 12.D0, THIRTN = 13.D0, FIFTN = 15.D0,
& NINETN = 19.D0, TWENTY = 20.D0, THIRTY = 30.D0)
C..##ENDIF
REAL*8 FIFTY, SIXTY, SVNTY2, EIGHTY, NINETY, HUNDRD,
& ONE2TY, ONE8TY, THRHUN, THR6TY, NINE99, FIFHUN, THOSND,
& FTHSND,MEGA
C..##IF SINGLE
C..##ELSE
PARAMETER (FIFTY = 50.D0, SIXTY = 60.D0, SVNTY2 = 72.D0,
& EIGHTY = 80.D0, NINETY = 90.D0, HUNDRD = 100.D0,
& ONE2TY = 120.D0, ONE8TY = 180.D0, THRHUN = 300.D0,
& THR6TY=360.D0, NINE99 = 999.D0, FIFHUN = 1500.D0,
& THOSND = 1000.D0,FTHSND = 5000.D0, MEGA = 1.0D6)
C..##ENDIF
REAL*8 MINONE, MINTWO, MINSIX
PARAMETER (MINONE = -1.D0, MINTWO = -2.D0, MINSIX = -6.D0)
REAL*8 TENM20,TENM14,TENM8,TENM5,PT0001,PT0005,PT001,PT005,
& PT01, PT02, PT05, PTONE, PT125, PT25, SIXTH, THIRD,
& PTFOUR, PTSIX, HALF, PT75, PT9999, ONEPT5, TWOPT4
C..##IF SINGLE
C..##ELSE
PARAMETER (TENM20 = 1.0D-20, TENM14 = 1.0D-14, TENM8 = 1.0D-8,
& TENM5 = 1.0D-5, PT0001 = 1.0D-4, PT0005 = 5.0D-4,
& PT001 = 1.0D-3, PT005 = 5.0D-3, PT01 = 0.01D0,
& PT02 = 0.02D0, PT05 = 0.05D0, PTONE = 0.1D0,
& PT125 = 0.125D0, SIXTH = ONE/SIX,PT25 = 0.25D0,
& THIRD = ONE/THREE,PTFOUR = 0.4D0, HALF = 0.5D0,
& PTSIX = 0.6D0, PT75 = 0.75D0, PT9999 = 0.9999D0,
& ONEPT5 = 1.5D0, TWOPT4 = 2.4D0)
C..##ENDIF
REAL*8 ANUM,FMARK
REAL*8 RSMALL,RBIG
C..##IF SINGLE
C..##ELSE
PARAMETER (ANUM=9999.0D0, FMARK=-999.0D0)
PARAMETER (RSMALL=1.0D-10,RBIG=1.0D20)
C..##ENDIF
REAL*8 RPRECI,RBIGST
C..##IF VAX DEC
C..##ELIF IBM
C..##ELIF CRAY
C..##ELIF ALPHA T3D T3E
C..##ELSE
C...##IF SINGLE
C...##ELSE
PARAMETER (RPRECI = 2.22045D-16, RBIGST = 4.49423D+307)
C...##ENDIF
C..##ENDIF
C-----------------------------------------------------------------------
C-----------------------------------------------------------------------
C:::##INCLUDE '~/charmm_fcm/consta.fcm'
REAL*8 PI,RADDEG,DEGRAD,TWOPI
PARAMETER(PI=3.141592653589793D0,TWOPI=2.0D0*PI)
PARAMETER (RADDEG=180.0D0/PI)
PARAMETER (DEGRAD=PI/180.0D0)
REAL*8 COSMAX
PARAMETER (COSMAX=0.9999999999D0)
REAL*8 TIMFAC
PARAMETER (TIMFAC=4.88882129D-02)
REAL*8 KBOLTZ
PARAMETER (KBOLTZ=1.987191D-03)
REAL*8 CCELEC
C..##IF AMBER
C..##ELIF DISCOVER
C..##ELSE
PARAMETER (CCELEC=332.0716D0)
C..##ENDIF
REAL*8 CNVFRQ
PARAMETER (CNVFRQ=2045.5D0/(2.99793D0*6.28319D0))
REAL*8 SPEEDL
PARAMETER (SPEEDL=2.99793D-02)
REAL*8 ATMOSP
PARAMETER (ATMOSP=1.4584007D-05)
REAL*8 PATMOS
PARAMETER (PATMOS = 1.D0 / ATMOSP )
REAL*8 BOHRR
PARAMETER (BOHRR = 0.529177249D0 )
REAL*8 TOKCAL
PARAMETER (TOKCAL = 627.5095D0 )
C..##IF MMFF
real*8 MDAKCAL
parameter(MDAKCAL=143.9325D0)
C..##ENDIF
REAL*8 DEBYEC
PARAMETER ( DEBYEC = 2.541766D0 / BOHRR )
REAL*8 ZEROC
PARAMETER ( ZEROC = 298.15D0 )
C-----------------------------------------------------------------------
C-----------------------------------------------------------------------
C:::##INCLUDE '~/charmm_fcm/exfunc.fcm'
C..##IF ACE
C..##ENDIF
C..##IF ADUMB
C..##ENDIF
CHARACTER*4 GTRMA, NEXTA4, CURRA4
CHARACTER*6 NEXTA6
CHARACTER*8 NEXTA8
CHARACTER*20 NEXT20
INTEGER ALLCHR, ALLSTK, ALLHP, DECODI, FIND52,
* GETATN, GETRES, GETRSN, GETSEG, GTRMI, I4VAL,
* ICHAR4, ICMP16, ILOGI4, INDX, INDXA, INDXAF,
* INDXRA, INTEG4, IREAL4, IREAL8, LOCDIF,
* LUNASS, MATOM, NEXTI, NINDX, NSELCT, NSELCTV, ATMSEL,
* PARNUM, PARINS,
* SRCHWD, SRCHWS, STRLNG, DSIZE, SSIZE
C..##IF ACE
* ,GETNNB
C..##ENDIF
LOGICAL CHKPTR, EQST, EQSTA, EQSTWC, EQWDWC, DOTRIM, CHECQUE,
* HYDROG, INITIA, LONE, LTSTEQ, ORDER, ORDER5,
* ORDERR, USEDDT, QTOKDEL, QDIGIT, QALPHA
REAL*8 DECODF, DOTVEC, GTRMF, LENVEC, NEXTF, RANDOM, GTRR8,
* RANUMB, R8VAL, RETVAL8, SUMVEC
C..##IF ADUMB
* ,UMFI
C..##ENDIF
EXTERNAL GTRMA, NEXTA4, CURRA4, NEXTA6, NEXTA8,NEXT20,
* ALLCHR, ALLSTK, ALLHP, DECODI, FIND52,
* GETATN, GETRES, GETRSN, GETSEG, GTRMI, I4VAL,
* ICHAR4, ICMP16, ILOGI4, INDX, INDXA, INDXAF,
* INDXRA, INTEG4, IREAL4, IREAL8, LOCDIF,
* LUNASS, MATOM, NEXTI, NINDX, NSELCT, NSELCTV, ATMSEL,
* PARNUM, PARINS,
* SRCHWD, SRCHWS, STRLNG, DSIZE, SSIZE,
* CHKPTR, EQST, EQSTA, EQSTWC, EQWDWC, DOTRIM, CHECQUE,
* HYDROG, INITIA, LONE, LTSTEQ, ORDER, ORDER5,
* ORDERR, USEDDT, QTOKDEL, QDIGIT, QALPHA,
* DECODF, DOTVEC, GTRMF, LENVEC, NEXTF, RANDOM, GTRR8,
* RANUMB, R8VAL, RETVAL8, SUMVEC
C..##IF ADUMB
* ,UMFI
C..##ENDIF
C..##IF ACE
* ,GETNNB
C..##ENDIF
C..##IFN NOIMAGES
INTEGER IMATOM
EXTERNAL IMATOM
C..##ENDIF
C..##IF MBOND
C..##ENDIF
C..##IF MMFF
INTEGER LEN_TRIM
EXTERNAL LEN_TRIM
CHARACTER*4 AtName
external AtName
CHARACTER*8 ElementName
external ElementName
CHARACTER*10 QNAME
external QNAME
integer IATTCH, IBORDR, CONN12, CONN13, CONN14
integer LEQUIV, LPATH
integer nbndx, nbnd2, nbnd3, NTERMA
external IATTCH, IBORDR, CONN12, CONN13, CONN14
external LEQUIV, LPATH
external nbndx, nbnd2, nbnd3, NTERMA
external find_loc
real*8 vangle, OOPNGL, TORNGL, ElementMass
external vangle, OOPNGL, TORNGL, ElementMass
C..##ENDIF
C-----------------------------------------------------------------------
C-----------------------------------------------------------------------
C:::##INCLUDE '~/charmm_fcm/stack.fcm'
INTEGER STKSIZ
C..##IFN UNICOS
C...##IF LARGE XLARGE
C...##ELIF MEDIUM REDUCE
PARAMETER (STKSIZ=4000000)
C...##ELIF SMALL
C...##ELIF XSMALL
C...##ELIF XXLARGE
C...##ELSE
C...##ENDIF
INTEGER LSTUSD,MAXUSD,STACK
COMMON /ISTACK/ LSTUSD,MAXUSD,STACK(STKSIZ)
C..##ELSE
C..##ENDIF
C..##IF SAVEFCM
C..##ENDIF
C-----------------------------------------------------------------------
C-----------------------------------------------------------------------
C:::##INCLUDE '~/charmm_fcm/heap.fcm'
INTEGER HEAPDM
C..##IFN UNICOS (unicos)
C...##IF XXLARGE (size)
C...##ELIF LARGE XLARGE (size)
C...##ELIF MEDIUM (size)
C....##IF T3D (t3d2)
C....##ELIF TERRA (t3d2)
C....##ELIF ALPHA (t3d2)
C....##ELIF T3E (t3d2)
C....##ELSE (t3d2)
PARAMETER (HEAPDM=2048000)
C....##ENDIF (t3d2)
C...##ELIF SMALL (size)
C...##ELIF REDUCE (size)
C...##ELIF XSMALL (size)
C...##ELSE (size)
C...##ENDIF (size)
INTEGER FREEHP,HEAPSZ,HEAP
COMMON /HEAPST/ FREEHP,HEAPSZ,HEAP(HEAPDM)
LOGICAL LHEAP(HEAPDM)
EQUIVALENCE (LHEAP,HEAP)
C..##ELSE (unicos)
C..##ENDIF (unicos)
C..##IF SAVEFCM (save)
C..##ENDIF (save)
C-----------------------------------------------------------------------
C-----------------------------------------------------------------------
C:::##INCLUDE '~/charmm_fcm/fast.fcm'
INTEGER IACNB, NITCC, ICUSED, FASTER, LFAST, LMACH, OLMACH
INTEGER ICCOUNT, LOWTP, IGCNB, NITCC2
INTEGER ICCNBA, ICCNBB, ICCNBC, ICCNBD, LCCNBA, LCCNBD
COMMON /FASTI/ FASTER, LFAST, LMACH, OLMACH, NITCC, NITCC2,
& ICUSED(MAXATC), ICCOUNT(MAXATC), LOWTP(MAXATC),
& IACNB(MAXAIM), IGCNB(MAXATC),
& ICCNBA, ICCNBB, ICCNBC, ICCNBD, LCCNBA, LCCNBD
C..##IF SAVEFCM
C..##ENDIF
C-----------------------------------------------------------------------
C-----------------------------------------------------------------------
C:::##INCLUDE '~/charmm_fcm/deriv.fcm'
REAL*8 DX,DY,DZ
COMMON /DERIVR/ DX(MAXAIM),DY(MAXAIM),DZ(MAXAIM)
C..##IF SAVEFCM
C..##ENDIF
C-----------------------------------------------------------------------
C-----------------------------------------------------------------------
C:::##INCLUDE '~/charmm_fcm/energy.fcm'
INTEGER LENENP, LENENT, LENENV, LENENA
PARAMETER (LENENP = 50, LENENT = 70, LENENV = 50,
& LENENA = LENENP + LENENT + LENENV )
INTEGER TOTE, TOTKE, EPOT, TEMPS, GRMS, BPRESS, PJNK1, PJNK2,
& PJNK3, PJNK4, HFCTE, HFCKE, EHFC, EWORK, VOLUME, PRESSE,
& PRESSI, VIRI, VIRE, VIRKE, TEPR, PEPR, KEPR, KEPR2,
& DROFFA,
& XTLTE, XTLKE, XTLPE, XTLTEM, XTLPEP, XTLKEP, XTLKP2,
& TOT4, TOTK4, EPOT4, TEM4, MbMom, BodyT, PartT
C..##IF ACE
& , SELF, SCREEN, COUL ,SOLV, INTER
C..##ENDIF
C..##IF FLUCQ
& ,FQKIN
C..##ENDIF
PARAMETER (TOTE = 1, TOTKE = 2, EPOT = 3, TEMPS = 4,
& GRMS = 5, BPRESS = 6, PJNK1 = 7, PJNK2 = 8,
& PJNK3 = 9, PJNK4 = 10, HFCTE = 11, HFCKE = 12,
& EHFC = 13, EWORK = 11, VOLUME = 15, PRESSE = 16,
& PRESSI = 17, VIRI = 18, VIRE = 19, VIRKE = 20,
& TEPR = 21, PEPR = 22, KEPR = 23, KEPR2 = 24,
& DROFFA = 26, XTLTE = 27, XTLKE = 28,
& XTLPE = 29, XTLTEM = 30, XTLPEP = 31, XTLKEP = 32,
& XTLKP2 = 33,
& TOT4 = 37, TOTK4 = 38, EPOT4 = 39, TEM4 = 40,
& MbMom = 41, BodyT = 42, PartT = 43
C..##IF ACE
& , SELF = 45, SCREEN = 46, COUL = 47,
& SOLV = 48, INTER = 49
C..##ENDIF
C..##IF FLUCQ
& ,FQKIN = 50
C..##ENDIF
& )
C..##IF ACE
C..##ENDIF
C..##IF GRID
C..##ENDIF
C..##IF FLUCQ
C..##ENDIF
INTEGER BOND, ANGLE, UREYB, DIHE, IMDIHE, VDW, ELEC, HBOND,
& USER, CHARM, CDIHE, CINTCR, CQRT, NOE, SBNDRY,
& IMVDW, IMELEC, IMHBND, EWKSUM, EWSELF, EXTNDE, RXNFLD,
& ST2, IMST2, TSM, QMEL, QMVDW, ASP, EHARM, GEO, MDIP,
& PRMS, PANG, SSBP, BK4D, SHEL, RESD, SHAP,
& STRB, OOPL, PULL, POLAR, DMC, RGY, EWEXCL, EWQCOR,
& EWUTIL, PBELEC, PBNP, PINT, MbDefrm, MbElec, STRSTR,
& BNDBND, BNDTW, EBST, MBST, BBT, SST, GBEnr, GSBP
C..##IF HMCM
& , HMCM
C..##ENDIF
C..##IF ADUMB
& , ADUMB
C..##ENDIF
& , HYDR
C..##IF FLUCQ
& , FQPOL
C..##ENDIF
PARAMETER (BOND = 1, ANGLE = 2, UREYB = 3, DIHE = 4,
& IMDIHE = 5, VDW = 6, ELEC = 7, HBOND = 8,
& USER = 9, CHARM = 10, CDIHE = 11, CINTCR = 12,
& CQRT = 13, NOE = 14, SBNDRY = 15, IMVDW = 16,
& IMELEC = 17, IMHBND = 18, EWKSUM = 19, EWSELF = 20,
& EXTNDE = 21, RXNFLD = 22, ST2 = 23, IMST2 = 24,
& TSM = 25, QMEL = 26, QMVDW = 27, ASP = 28,
& EHARM = 29, GEO = 30, MDIP = 31, PINT = 32,
& PRMS = 33, PANG = 34, SSBP = 35, BK4D = 36,
& SHEL = 37, RESD = 38, SHAP = 39, STRB = 40,
& OOPL = 41, PULL = 42, POLAR = 43, DMC = 44,
& RGY = 45, EWEXCL = 46, EWQCOR = 47, EWUTIL = 48,
& PBELEC = 49, PBNP = 50, MbDefrm= 51, MbElec = 52,
& STRSTR = 53, BNDBND = 54, BNDTW = 55, EBST = 56,
& MBST = 57, BBT = 58, SST = 59, GBEnr = 60,
& GSBP = 65
C..##IF HMCM
& , HMCM = 61
C..##ENDIF
C..##IF ADUMB
& , ADUMB = 62
C..##ENDIF
& , HYDR = 63
C..##IF FLUCQ
& , FQPOL = 65
C..##ENDIF
& )
INTEGER VEXX, VEXY, VEXZ, VEYX, VEYY, VEYZ, VEZX, VEZY, VEZZ,
& VIXX, VIXY, VIXZ, VIYX, VIYY, VIYZ, VIZX, VIZY, VIZZ,
& PEXX, PEXY, PEXZ, PEYX, PEYY, PEYZ, PEZX, PEZY, PEZZ,
& PIXX, PIXY, PIXZ, PIYX, PIYY, PIYZ, PIZX, PIZY, PIZZ
PARAMETER ( VEXX = 1, VEXY = 2, VEXZ = 3, VEYX = 4,
& VEYY = 5, VEYZ = 6, VEZX = 7, VEZY = 8,
& VEZZ = 9,
& VIXX = 10, VIXY = 11, VIXZ = 12, VIYX = 13,
& VIYY = 14, VIYZ = 15, VIZX = 16, VIZY = 17,
& VIZZ = 18,
& PEXX = 19, PEXY = 20, PEXZ = 21, PEYX = 22,
& PEYY = 23, PEYZ = 24, PEZX = 25, PEZY = 26,
& PEZZ = 27,
& PIXX = 28, PIXY = 29, PIXZ = 30, PIYX = 31,
& PIYY = 32, PIYZ = 33, PIZX = 34, PIZY = 35,
& PIZZ = 36)
CHARACTER*4 CEPROP, CETERM, CEPRSS
COMMON /ANER/ CEPROP(LENENP), CETERM(LENENT), CEPRSS(LENENV)
LOGICAL QEPROP, QETERM, QEPRSS
COMMON /QENER/ QEPROP(LENENP), QETERM(LENENT), QEPRSS(LENENV)
REAL*8 EPROP, ETERM, EPRESS
COMMON /ENER/ EPROP(LENENP), ETERM(LENENT), EPRESS(LENENV)
C..##IF SAVEFCM
C..##ENDIF
REAL*8 EPRPA, EPRP2A, EPRPP, EPRP2P,
& ETRMA, ETRM2A, ETRMP, ETRM2P,
& EPRSA, EPRS2A, EPRSP, EPRS2P
COMMON /ENACCM/ EPRPA(LENENP), ETRMA(LENENT), EPRSA(LENENV),
& EPRP2A(LENENP),ETRM2A(LENENT),EPRS2A(LENENV),
& EPRPP(LENENP), ETRMP(LENENT), EPRSP(LENENV),
& EPRP2P(LENENP),ETRM2P(LENENT),EPRS2P(LENENV)
C..##IF SAVEFCM
C..##ENDIF
INTEGER ECALLS, TOT1ST, TOT2ND
COMMON /EMISCI/ ECALLS, TOT1ST, TOT2ND
REAL*8 EOLD, FITA, DRIFTA, EAT0A, CORRA, FITP, DRIFTP,
& EAT0P, CORRP
COMMON /EMISCR/ EOLD, FITA, DRIFTA, EAT0A, CORRA,
& FITP, DRIFTP, EAT0P, CORRP
C..##IF SAVEFCM
C..##ENDIF
C..##IF ACE
C..##ENDIF
C..##IF FLUCQ
C..##ENDIF
C..##IF ADUMB
C..##ENDIF
C..##IF GRID
C..##ENDIF
C..##IF FLUCQ
C..##ENDIF
C..##IF TSM
REAL*8 TSMTRM(LENENT),TSMTMP(LENENT)
COMMON /TSMENG/ TSMTRM,TSMTMP
C...##IF SAVEFCM
C...##ENDIF
C..##ENDIF
REAL*8 EHQBM
LOGICAL HQBM
COMMON /HQBMVAR/HQBM
C..##IF SAVEFCM
C..##ENDIF
C-----------------------------------------------------------------------
C-----------------------------------------------------------------------
C:::##INCLUDE '~/charmm_fcm/dimb.fcm'
C..##IF DIMB (dimbfcm)
INTEGER NPARMX,MNBCMP,LENDSK
PARAMETER (NPARMX=1000,MNBCMP=300,LENDSK=200000)
INTEGER IJXXCM,IJXYCM,IJXZCM,IJYXCM,IJYYCM
INTEGER IJYZCM,IJZXCM,IJZYCM,IJZZCM
INTEGER IIXXCM,IIXYCM,IIXZCM,IIYYCM
INTEGER IIYZCM,IIZZCM
INTEGER JJXXCM,JJXYCM,JJXZCM,JJYYCM
INTEGER JJYZCM,JJZZCM
PARAMETER (IJXXCM=1,IJXYCM=2,IJXZCM=3,IJYXCM=4,IJYYCM=5)
PARAMETER (IJYZCM=6,IJZXCM=7,IJZYCM=8,IJZZCM=9)
PARAMETER (IIXXCM=1,IIXYCM=2,IIXZCM=3,IIYYCM=4)
PARAMETER (IIYZCM=5,IIZZCM=6)
PARAMETER (JJXXCM=1,JJXYCM=2,JJXZCM=3,JJYYCM=4)
PARAMETER (JJYZCM=5,JJZZCM=6)
INTEGER ITER,IPAR1,IPAR2,NFSAV,PINBCM,PJNBCM,PDD1CM,LENCMP
LOGICAL QDISK,QDW,QCMPCT
COMMON /DIMBI/ ITER,IPAR1,IPAR2,NFSAV,PINBCM,PJNBCM,LENCMP
COMMON /DIMBL/ QDISK,QDW,QCMPCT
C...##IF SAVEFCM
C...##ENDIF
C..##ENDIF (dimbfcm)
C-----------------------------------------------------------------------
C-----------------------------------------------------------------------
C:::##INCLUDE '~/charmm_fcm/ctitla.fcm'
INTEGER MAXTIT
PARAMETER (MAXTIT=32)
INTEGER NTITLA,NTITLB
CHARACTER*80 TITLEA,TITLEB
COMMON /NTITLA/ NTITLA,NTITLB
COMMON /CTITLA/ TITLEA(MAXTIT),TITLEB(MAXTIT)
C..##IF SAVEFCM
C..##ENDIF
C-----------------------------------------------------------------------
C Passed variables
INTEGER NAT3,NADD,NPAR,NFREG,NFRET,BLATOM
INTEGER ATMPAR(2,*),ATMPAS(2,*),ATMPAD(2,*)
INTEGER BNBND(*),BIMAG(*)
INTEGER INBCMP(*),JNBCMP(*),PARDIM
INTEGER ITMX,IUNMOD,IUNRMD,SAVF
INTEGER NBOND,IB(*),JB(*)
REAL*8 X(*),Y(*),Z(*),AMASS(*),DDSCR(*)
REAL*8 DDV(NAT3,*),PARDDV(PARDIM,*),DDM(*),DDS(*)
REAL*8 DDF(*),PARDDF(*),DDEV(*),PARDDE(*)
REAL*8 DD1BLK(*),DD1BLL(*),DD1CMP(*)
REAL*8 TOLDIM,DDVALM
REAL*8 PARFRQ,CUTF1
LOGICAL LNOMA,LRAISE,LSCI,LBIG
C Local variables
INTEGER NATOM,NATP,NDIM,I,J,II,OLDFAS,OLDPRN,IUPD
INTEGER NPARC,NPARD,NPARS,NFCUT1,NFREG2,NFREG6
INTEGER IH1,IH2,IH3,IH4,IH5,IH6,IH7,IH8
INTEGER IS1,IS2,IS3,IS4,JSPACE,JSP,DDSS,DD5
INTEGER ISTRT,ISTOP,IPA1,IPA2,IRESF
INTEGER ATMPAF,INIDS,TRAROT
INTEGER SUBLIS,ATMCOR
INTEGER NFRRES,DDVBAS
INTEGER DDV2,DDVAL
INTEGER LENCM,NTR,NFRE,NFC,N1,N2,NFCUT,NSUBP
INTEGER SCIFV1,SCIFV2,SCIFV3,SCIFV4,SCIFV6
INTEGER DRATQ,ERATQ,E2RATQ,BDRATQ,INRATQ
INTEGER I620,I640,I660,I700,I720,I760,I800,I840,I880,I920
REAL*8 CVGMX,TOLER
LOGICAL LCARD,LAPPE,LPURG,LWDINI,QCALC,QMASWT,QMIX,QDIAG
C Begin
QCALC=.TRUE.
LWDINI=.FALSE.
INIDS=0
IS3=0
IS4=0
LPURG=.TRUE.
ITER=0
NADD=0
NFSAV=0
TOLER=TENM5
QDIAG=.TRUE.
CVGMX=HUNDRD
QMIX=.FALSE.
NATOM=NAT3/3
NFREG6=(NFREG-6)/NPAR
NFREG2=NFREG/2
NFRRES=(NFREG+6)/2
IF(NFREG.GT.PARDIM) CALL WRNDIE(-3,'<NMDIMB>',
1 'NFREG IS LARGER THAN PARDIM*3')
C
C ALLOCATE-SPACE-FOR-TRANSROT-VECTORS
ASSIGN 801 TO I800
GOTO 800
801 CONTINUE
C ALLOCATE-SPACE-FOR-DIAGONALIZATION
ASSIGN 721 TO I720
GOTO 720
721 CONTINUE
C ALLOCATE-SPACE-FOR-REDUCED-BASIS
ASSIGN 761 TO I760
GOTO 760
761 CONTINUE
C ALLOCATE-SPACE-FOR-OTHER-ARRAYS
ASSIGN 921 TO I920
GOTO 920
921 CONTINUE
C
C Space allocation for working arrays of EISPACK
C diagonalization subroutines
IF(LSCI) THEN
C ALLOCATE-SPACE-FOR-LSCI
ASSIGN 841 TO I840
GOTO 840
841 CONTINUE
ELSE
C ALLOCATE-DUMMY-SPACE-FOR-LSCI
ASSIGN 881 TO I880
GOTO 880
881 CONTINUE
ENDIF
QMASWT=(.NOT.LNOMA)
IF(.NOT. QDISK) THEN
LENCM=INBCMP(NATOM-1)*9+NATOM*6
DO I=1,LENCM
DD1CMP(I)=0.0
ENDDO
OLDFAS=LFAST
QCMPCT=.TRUE.
LFAST = -1
CALL ENERGY(X,Y,Z,DX,DY,DZ,BNBND,BIMAG,NAT3,DD1CMP,.TRUE.,1)
LFAST=OLDFAS
QCMPCT=.FALSE.
C
C Mass weight DD1CMP matrix
C
CALL MASSDD(DD1CMP,DDM,INBCMP,JNBCMP,NATOM)
ELSE
CALL WRNDIE(-3,'<NMDIMB>','QDISK OPTION NOT SUPPORTED YET')
C DO I=1,LENDSK
C DD1CMP(I)=0.0
C ENDDO
C OLDFAS=LFAST
C LFAST = -1
ENDIF
C
C Fill DDV with six translation-rotation vectors
C
CALL TRROT(X,Y,Z,DDV,NAT3,1,DDM)
CALL CPARAY(HEAP(TRAROT),DDV,NAT3,1,6,1)
NTR=6
OLDPRN=PRNLEV
PRNLEV=1
CALL ORTHNM(1,6,NTR,HEAP(TRAROT),NAT3,.FALSE.,TOLER)
PRNLEV=OLDPRN
IF(IUNRMD .LT. 0) THEN
C
C If no previous basis is read
C
IF(PRNLEV.GE.2) WRITE(OUTU,502) NPAR
502 FORMAT(/' NMDIMB: Calculating initial basis from block ',
1 'diagonals'/' NMDIMB: The number of blocks is ',I5/)
NFRET = 6
DO I=1,NPAR
IS1=ATMPAR(1,I)
IS2=ATMPAR(2,I)
NDIM=(IS2-IS1+1)*3
NFRE=NDIM
IF(NFRE.GT.NFREG6) NFRE=NFREG6
IF(NFREG6.EQ.0) NFRE=1
CALL FILUPT(HEAP(IUPD),NDIM)
CALL MAKDDU(DD1BLK,DD1CMP,INBCMP,JNBCMP,HEAP(IUPD),
1 IS1,IS2,NATOM)
IF(PRNLEV.GE.9) CALL PRINTE(OUTU,EPROP,ETERM,'VIBR',
1 'ENR',.TRUE.,1,ZERO,ZERO)
C
C Generate the lower section of the matrix and diagonalize
C
C..##IF EISPACK
C..##ENDIF
IH1=1
NATP=NDIM+1
IH2=IH1+NATP
IH3=IH2+NATP
IH4=IH3+NATP
IH5=IH4+NATP
IH6=IH5+NATP
IH7=IH6+NATP
IH8=IH7+NATP
CALL DIAGQ(NDIM,NFRE,DD1BLK,PARDDV,DDS(IH2),DDS(IH3),
1 DDS(IH4),DDS(IH5),DDS,DDS(IH6),DDS(IH7),DDS(IH8),NADD)
C..##IF EISPACK
C..##ENDIF
C
C Put the PARDDV vectors into DDV and replace the elements which do
C not belong to the considered partitioned region by zeros.
C
CALL ADJNME(DDV,PARDDV,NAT3,NDIM,NFRE,NFRET,IS1,IS2)
IF(LSCI) THEN
DO J=1,NFRE
PARDDF(J)=CNVFRQ*SQRT(ABS(PARDDE(J)))
IF(PARDDE(J) .LT. 0.0) PARDDF(J)=-PARDDF(J)
ENDDO
ELSE
DO J=1,NFRE
PARDDE(J)=DDS(J)
PARDDF(J)=CNVFRQ*SQRT(ABS(PARDDE(J)))
IF(PARDDE(J) .LT. 0.0) PARDDF(J)=-PARDDF(J)
ENDDO
ENDIF
IF(PRNLEV.GE.2) THEN
WRITE(OUTU,512) I
WRITE(OUTU,514)
WRITE(OUTU,516) (J,PARDDF(J),J=1,NFRE)
ENDIF
NFRET=NFRET+NFRE
IF(NFRET .GE. NFREG) GOTO 10
ENDDO
512 FORMAT(/' NMDIMB: Diagonalization of part',I5,' completed')
514 FORMAT(' NMDIMB: Frequencies'/)
516 FORMAT(5(I4,F12.6))
10 CONTINUE
C
C Orthonormalize the eigenvectors
C
OLDPRN=PRNLEV
PRNLEV=1
CALL ORTHNM(1,NFRET,NFRET,DDV,NAT3,LPURG,TOLER)
PRNLEV=OLDPRN
C
C Do reduced basis diagonalization using the DDV vectors
C and get eigenvectors of zero iteration
C
IF(PRNLEV.GE.2) THEN
WRITE(OUTU,521) ITER
WRITE(OUTU,523) NFRET
ENDIF
521 FORMAT(/' NMDIMB: Iteration number = ',I5)
523 FORMAT(' NMDIMB: Dimension of the reduced basis set = ',I5)
IF(LBIG) THEN
IF(PRNLEV.GE.2) WRITE(OUTU,585) NFRET,IUNMOD
525 FORMAT(' NMDIMB: ',I5,' basis vectors are saved in unit',I5)
REWIND (UNIT=IUNMOD)
LCARD=.FALSE.
CALL WRTNMD(LCARD,1,NFRET,NAT3,DDV,DDSCR,DDEV,IUNMOD,AMASS)
CALL SAVEIT(IUNMOD)
ELSE
CALL CPARAY(HEAP(DDVBAS),DDV,NAT3,1,NFRET,1)
ENDIF
CALL RBDG(X,Y,Z,NAT3,NDIM,NFRET,DDV,DDF,DDEV,
1 DDSCR,HEAP(DD5),HEAP(DDSS),HEAP(DDV2),NADD,
2 INBCMP,JNBCMP,HEAP(DDVBAS),DD1CMP,QMIX,0,0,IS3,IS4,
3 CUTF1,NFCUT1,NFREG,HEAP(IUPD),DD1BLL,HEAP(SCIFV1),
4 HEAP(SCIFV2),HEAP(SCIFV3),HEAP(SCIFV4),HEAP(SCIFV6),
5 HEAP(DRATQ),HEAP(ERATQ),HEAP(E2RATQ),
6 HEAP(BDRATQ),HEAP(INRATQ),LSCI,LBIG,IUNMOD)
C
C DO-THE-DIAGONALISATIONS-WITH-RESIDUALS
C
ASSIGN 621 TO I620
GOTO 620
621 CONTINUE
C SAVE-MODES
ASSIGN 701 TO I700
GOTO 700
701 CONTINUE
IF(ITER.EQ.ITMX) THEN
CALL CLEANHP(NAT3,NFREG,NPARD,NSUBP,PARDIM,DDV2,DDSS,DDVBAS,
1 DDVAL,JSPACE,TRAROT,
2 SCIFV1,SCIFV2,SCIFV3,SCIFV4,SCIFV6,
3 DRATQ,ERATQ,E2RATQ,BDRATQ,INRATQ,IUPD,ATMPAF,
4 ATMCOR,SUBLIS,LSCI,QDW,LBIG)
RETURN
ENDIF
ELSE
C
C Read in existing basis
C
IF(PRNLEV.GE.2) THEN
WRITE(OUTU,531)
531 FORMAT(/' NMDIMB: Calculations restarted')
ENDIF
C READ-MODES
ISTRT=1
ISTOP=99999999
LCARD=.FALSE.
LAPPE=.FALSE.
CALL RDNMD(LCARD,NFRET,NFREG,NAT3,NDIM,
1 DDV,DDSCR,DDF,DDEV,
2 IUNRMD,LAPPE,ISTRT,ISTOP)
NFRET=NDIM
IF(NFRET.GT.NFREG) THEN
NFRET=NFREG
CALL WRNDIE(-1,'<NMDIMB>',
1 'Not enough space to hold the basis. Increase NMODes')
ENDIF
C PRINT-MODES
IF(PRNLEV.GE.2) THEN
WRITE(OUTU,533) NFRET,IUNRMD
WRITE(OUTU,514)
WRITE(OUTU,516) (J,DDF(J),J=1,NFRET)
ENDIF
533 FORMAT(/' NMDIMB: ',I5,' restart modes read from unit ',I5)
NFRRES=NFRET
ENDIF
C
C -------------------------------------------------
C Here starts the mixed-basis diagonalization part.
C -------------------------------------------------
C
C
C Check cut-off frequency
C
CALL SELNMD(DDF,NFRET,CUTF1,NFCUT1)
C TEST-NFCUT1
IF(IUNRMD.LT.0) THEN
IF(NFCUT1*2-6.GT.NFREG) THEN
IF(PRNLEV.GE.2) WRITE(OUTU,537) DDF(NFRRES)
NFCUT1=NFRRES
CUTF1=DDF(NFRRES)
ENDIF
ELSE
CUTF1=DDF(NFRRES)
ENDIF
537 FORMAT(/' NMDIMB: Too many vectors for the given cutoff frequency'
1 /' Cutoff frequency is decreased to',F9.3)
C
C Compute the new partioning of the molecule
C
CALL PARTIC(NAT3,NFREG,NFCUT1,NPARMX,NPARC,ATMPAR,NFRRES,
1 PARDIM)
NPARS=NPARC
DO I=1,NPARC
ATMPAS(1,I)=ATMPAR(1,I)
ATMPAS(2,I)=ATMPAR(2,I)
ENDDO
IF(QDW) THEN
IF(IPAR1.EQ.0.OR.IPAR2.EQ.0) LWDINI=.TRUE.
IF(IPAR1.GE.IPAR2) LWDINI=.TRUE.
IF(IABS(IPAR1).GT.NPARC*2) LWDINI=.TRUE.
IF(IABS(IPAR2).GT.NPARC*2) LWDINI=.TRUE.
IF(ITER.EQ.0) LWDINI=.TRUE.
ENDIF
ITMX=ITMX+ITER
IF(PRNLEV.GE.2) THEN
WRITE(OUTU,543) ITER,ITMX
IF(QDW) WRITE(OUTU,545) IPAR1,IPAR2
ENDIF
543 FORMAT(/' NMDIMB: Previous iteration number = ',I8/
1 ' NMDIMB: Iteration number to reach = ',I8)
545 FORMAT(' NMDIMB: Previous sub-blocks = ',I5,2X,I5)
C
IF(SAVF.LE.0) SAVF=NPARC
IF(PRNLEV.GE.2) WRITE(OUTU,547) SAVF
547 FORMAT(' NMDIMB: Eigenvectors will be saved every',I5,
1 ' iterations')
C
C If double windowing is defined, the original block sizes are divided
C in two.
C
IF(QDW) THEN
NSUBP=1
CALL PARTID(NPARC,ATMPAR,NPARD,ATMPAD,NPARMX)
ATMPAF=ALLHP(INTEG4(NPARD*NPARD))
ATMCOR=ALLHP(INTEG4(NATOM))
DDVAL=ALLHP(IREAL8(NPARD*NPARD))
CALL CORARR(ATMPAD,NPARD,HEAP(ATMCOR),NATOM)
CALL PARLIS(HEAP(ATMCOR),HEAP(ATMPAF),INBCMP,JNBCMP,NPARD,
2 NSUBP,NATOM,X,Y,Z,NBOND,IB,JB,DD1CMP,HEAP(DDVAL),DDVALM)
SUBLIS=ALLHP(INTEG4(NSUBP*2))
CALL PARINT(HEAP(ATMPAF),NPARD,HEAP(SUBLIS),NSUBP)
CALL INIPAF(HEAP(ATMPAF),NPARD)
C
C Find out with which block to continue (double window method only)
C
IPA1=IPAR1
IPA2=IPAR2
IRESF=0
IF(LWDINI) THEN
ITER=0
LWDINI=.FALSE.
GOTO 500
ENDIF
DO II=1,NSUBP
CALL IPART(HEAP(SUBLIS),II,IPAR1,IPAR2,HEAP(ATMPAF),
1 NPARD,QCALC)
IF((IPAR1.EQ.IPA1).AND.(IPAR2.EQ.IPA2)) GOTO 500
ENDDO
ENDIF
500 CONTINUE
C
C Main loop.
C
DO WHILE((CVGMX.GT.TOLDIM).AND.(ITER.LT.ITMX))
IF(.NOT.QDW) THEN
ITER=ITER+1
IF(PRNLEV.GE.2) WRITE(OUTU,553) ITER
553 FORMAT(/' NMDIMB: Iteration number = ',I8)
IF(INIDS.EQ.0) THEN
INIDS=1
ELSE
INIDS=0
ENDIF
CALL PARTDS(NAT3,NPARC,ATMPAR,NPARS,ATMPAS,INIDS,NPARMX,
1 DDF,NFREG,CUTF1,PARDIM,NFCUT1)
C DO-THE-DIAGONALISATIONS
ASSIGN 641 to I640
GOTO 640
641 CONTINUE
QDIAG=.FALSE.
C DO-THE-DIAGONALISATIONS-WITH-RESIDUALS
ASSIGN 622 TO I620
GOTO 620
622 CONTINUE
QDIAG=.TRUE.
C SAVE-MODES
ASSIGN 702 TO I700
GOTO 700
702 CONTINUE
C
ELSE
DO II=1,NSUBP
CALL IPART(HEAP(SUBLIS),II,IPAR1,IPAR2,HEAP(ATMPAF),
1 NPARD,QCALC)
IF(QCALC) THEN
IRESF=IRESF+1
ITER=ITER+1
IF(PRNLEV.GE.2) WRITE(OUTU,553) ITER
C DO-THE-DWIN-DIAGONALISATIONS
ASSIGN 661 TO I660
GOTO 660
661 CONTINUE
ENDIF
IF((IRESF.EQ.SAVF).OR.(ITER.EQ.ITMX)) THEN
IRESF=0
QDIAG=.FALSE.
C DO-THE-DIAGONALISATIONS-WITH-RESIDUALS
ASSIGN 623 TO I620
GOTO 620
623 CONTINUE
QDIAG=.TRUE.
IF((CVGMX.LE.TOLDIM).OR.(ITER.EQ.ITMX)) GOTO 600
C SAVE-MODES
ASSIGN 703 TO I700
GOTO 700
703 CONTINUE
ENDIF
ENDDO
ENDIF
ENDDO
600 CONTINUE
C
C SAVE-MODES
ASSIGN 704 TO I700
GOTO 700
704 CONTINUE
CALL CLEANHP(NAT3,NFREG,NPARD,NSUBP,PARDIM,DDV2,DDSS,DDVBAS,
1 DDVAL,JSPACE,TRAROT,
2 SCIFV1,SCIFV2,SCIFV3,SCIFV4,SCIFV6,
3 DRATQ,ERATQ,E2RATQ,BDRATQ,INRATQ,IUPD,ATMPAF,
4 ATMCOR,SUBLIS,LSCI,QDW,LBIG)
RETURN
C-----------------------------------------------------------------------
C INTERNAL PROCEDURES
C-----------------------------------------------------------------------
C TO DO-THE-DIAGONALISATIONS-WITH-RESIDUALS
620 CONTINUE
IF(IUNRMD.LT.0) THEN
CALL SELNMD(DDF,NFRET,CUTF1,NFC)
N1=NFCUT1
N2=(NFRET+6)/2
NFCUT=MAX(N1,N2)
IF(NFCUT*2-6 .GT. NFREG) THEN
NFCUT=(NFREG+6)/2
CUTF1=DDF(NFCUT)
IF(PRNLEV.GE.2) THEN
WRITE(OUTU,562) ITER
WRITE(OUTU,564) CUTF1
ENDIF
ENDIF
ELSE
NFCUT=NFRET
NFC=NFRET
ENDIF
562 FORMAT(/' NMDIMB: Not enough space to hold the residual vectors'/
1 ' into DDV array during iteration ',I5)
564 FORMAT(' Cutoff frequency is changed to ',F9.3)
C
C do reduced diagonalization with preceding eigenvectors plus
C residual vectors
C
ISTRT=1
ISTOP=NFCUT
CALL CLETR(DDV,HEAP(TRAROT),NAT3,ISTRT,ISTOP,NFCUT,DDEV,DDF)
CALL RNMTST(DDV,HEAP(DDVBAS),NAT3,DDSCR,DD1CMP,INBCMP,JNBCMP,
2 7,NFCUT,CVGMX,NFCUT,NFC,QDIAG,LBIG,IUNMOD)
NFSAV=NFCUT
IF(QDIAG) THEN
NFRET=NFCUT*2-6
IF(PRNLEV.GE.2) WRITE(OUTU,566) NFRET
566 FORMAT(/' NMDIMB: Diagonalization with residual vectors. '/
1 ' Dimension of the reduced basis set'/
2 ' before orthonormalization = ',I5)
NFCUT=NFRET
OLDPRN=PRNLEV
PRNLEV=1
CALL ORTHNM(1,NFRET,NFCUT,DDV,NAT3,LPURG,TOLER)
PRNLEV=OLDPRN
NFRET=NFCUT
IF(PRNLEV.GE.2) WRITE(OUTU,568) NFRET
568 FORMAT(' after orthonormalization = ',I5)
IF(LBIG) THEN
IF(PRNLEV.GE.2) WRITE(OUTU,570) NFCUT,IUNMOD
570 FORMAT(' NMDIMB: ',I5,' basis vectors are saved in unit',I5)
REWIND (UNIT=IUNMOD)
LCARD=.FALSE.
CALL WRTNMD(LCARD,1,NFCUT,NAT3,DDV,DDSCR,DDEV,IUNMOD,AMASS)
CALL SAVEIT(IUNMOD)
ELSE
CALL CPARAY(HEAP(DDVBAS),DDV,NAT3,1,NFCUT,1)
ENDIF
QMIX=.FALSE.
CALL RBDG(X,Y,Z,NAT3,NDIM,NFRET,DDV,DDF,DDEV,
1 DDSCR,HEAP(DD5),HEAP(DDSS),HEAP(DDV2),NADD,
2 INBCMP,JNBCMP,HEAP(DDVBAS),DD1CMP,QMIX,0,0,IS3,IS4,
3 CUTF1,NFCUT1,NFREG,HEAP(IUPD),DD1BLL,HEAP(SCIFV1),
4 HEAP(SCIFV2),HEAP(SCIFV3),HEAP(SCIFV4),HEAP(SCIFV6),
5 HEAP(DRATQ),HEAP(ERATQ),HEAP(E2RATQ),
6 HEAP(BDRATQ),HEAP(INRATQ),LSCI,LBIG,IUNMOD)
CALL SELNMD(DDF,NFRET,CUTF1,NFCUT1)
ENDIF
GOTO I620
C
C-----------------------------------------------------------------------
C TO DO-THE-DIAGONALISATIONS
640 CONTINUE
DO I=1,NPARC
NFCUT1=NFRRES
IS1=ATMPAR(1,I)
IS2=ATMPAR(2,I)
NDIM=(IS2-IS1+1)*3
IF(PRNLEV.GE.2) WRITE(OUTU,573) I,IS1,IS2
573 FORMAT(/' NMDIMB: Mixed diagonalization, part ',I5/
1 ' NMDIMB: Block limits: ',I5,2X,I5)
IF(NDIM+NFCUT1.GT.PARDIM) CALL WRNDIE(-3,'<NMDIMB>',
1 'Error in dimension of block')
NFRET=NFCUT1
IF(NFRET.GT.NFREG) NFRET=NFREG
CALL CLETR(DDV,HEAP(TRAROT),NAT3,1,NFCUT1,NFCUT,DDEV,DDF)
NFCUT1=NFCUT
CALL ADZER(DDV,1,NFCUT1,NAT3,IS1,IS2)
NFSAV=NFCUT1
OLDPRN=PRNLEV
PRNLEV=1
CALL ORTHNM(1,NFCUT1,NFCUT,DDV,NAT3,LPURG,TOLER)
PRNLEV=OLDPRN
CALL CPARAY(HEAP(DDVBAS),DDV,NAT3,1,NFCUT,1)
NFRET=NDIM+NFCUT
QMIX=.TRUE.
CALL RBDG(X,Y,Z,NAT3,NDIM,NFRET,DDV,DDF,DDEV,
1 DDSCR,HEAP(DD5),HEAP(DDSS),HEAP(DDV2),NADD,
2 INBCMP,JNBCMP,HEAP(DDVBAS),DD1CMP,QMIX,IS1,IS2,IS3,IS4,
3 CUTF1,NFCUT,NFREG,HEAP(IUPD),DD1BLL,HEAP(SCIFV1),
4 HEAP(SCIFV2),HEAP(SCIFV3),HEAP(SCIFV4),HEAP(SCIFV6),
5 HEAP(DRATQ),HEAP(ERATQ),HEAP(E2RATQ),
6 HEAP(BDRATQ),HEAP(INRATQ),LSCI,LBIG,IUNMOD)
QMIX=.FALSE.
IF(NFCUT.GT.NFRRES) NFCUT=NFRRES
NFCUT1=NFCUT
NFRET=NFCUT
ENDDO
GOTO I640
C
C-----------------------------------------------------------------------
C TO DO-THE-DWIN-DIAGONALISATIONS
660 CONTINUE
C
C Store the DDV vectors into DDVBAS
C
NFCUT1=NFRRES
IS1=ATMPAD(1,IPAR1)
IS2=ATMPAD(2,IPAR1)
IS3=ATMPAD(1,IPAR2)
IS4=ATMPAD(2,IPAR2)
NDIM=(IS2-IS1+IS4-IS3+2)*3
IF(PRNLEV.GE.2) WRITE(OUTU,577) IPAR1,IPAR2,IS1,IS2,IS3,IS4
577 FORMAT(/' NMDIMB: Mixed double window diagonalization, parts ',
1 2I5/
2 ' NMDIMB: Block limits: ',I5,2X,I5,4X,I5,2X,I5)
IF(NDIM+NFCUT1.GT.PARDIM) CALL WRNDIE(-3,'<NMDIMB>',
1 'Error in dimension of block')
NFRET=NFCUT1
IF(NFRET.GT.NFREG) NFRET=NFREG
C
C Prepare the DDV vectors consisting of 6 translations-rotations
C + eigenvectors from 7 to NFCUT1 + cartesian displacements vectors
C spanning the atoms from IS1 to IS2
C
CALL CLETR(DDV,HEAP(TRAROT),NAT3,1,NFCUT1,NFCUT,DDEV,DDF)
NFCUT1=NFCUT
NFSAV=NFCUT1
CALL ADZERD(DDV,1,NFCUT1,NAT3,IS1,IS2,IS3,IS4)
OLDPRN=PRNLEV
PRNLEV=1
CALL ORTHNM(1,NFCUT1,NFCUT,DDV,NAT3,LPURG,TOLER)
PRNLEV=OLDPRN
CALL CPARAY(HEAP(DDVBAS),DDV,NAT3,1,NFCUT,1)
C
NFRET=NDIM+NFCUT
QMIX=.TRUE.
CALL RBDG(X,Y,Z,NAT3,NDIM,NFRET,DDV,DDF,DDEV,
1 DDSCR,HEAP(DD5),HEAP(DDSS),HEAP(DDV2),NADD,
2 INBCMP,JNBCMP,HEAP(DDVBAS),DD1CMP,QMIX,IS1,IS2,IS3,IS4,
3 CUTF1,NFCUT,NFREG,HEAP(IUPD),DD1BLL,HEAP(SCIFV1),
4 HEAP(SCIFV2),HEAP(SCIFV3),HEAP(SCIFV4),HEAP(SCIFV6),
5 HEAP(DRATQ),HEAP(ERATQ),HEAP(E2RATQ),
6 HEAP(BDRATQ),HEAP(INRATQ),LSCI,LBIG,IUNMOD)
QMIX=.FALSE.
C
IF(NFCUT.GT.NFRRES) NFCUT=NFRRES
NFCUT1=NFCUT
NFRET=NFCUT
GOTO I660
C
C-----------------------------------------------------------------------
C TO SAVE-MODES
700 CONTINUE
IF(PRNLEV.GE.2) WRITE(OUTU,583) IUNMOD
583 FORMAT(/' NMDIMB: Saving the eigenvalues and eigenvectors to unit'
1 ,I4)
REWIND (UNIT=IUNMOD)
ISTRT=1
ISTOP=NFSAV
LCARD=.FALSE.
IF(PRNLEV.GE.2) WRITE(OUTU,585) NFSAV,IUNMOD
585 FORMAT(' NMDIMB: ',I5,' modes are saved in unit',I5)
CALL WRTNMD(LCARD,ISTRT,ISTOP,NAT3,DDV,DDSCR,DDEV,IUNMOD,
1 AMASS)
CALL SAVEIT(IUNMOD)
GOTO I700
C
C-----------------------------------------------------------------------
C TO ALLOCATE-SPACE-FOR-DIAGONALIZATION
720 CONTINUE
DDV2=ALLHP(IREAL8((PARDIM+3)*(PARDIM+3)))
JSPACE=IREAL8((PARDIM+4))*8
JSP=IREAL8(((PARDIM+3)*(PARDIM+4))/2)
JSPACE=JSPACE+JSP
DDSS=ALLHP(JSPACE)
DD5=DDSS+JSPACE-JSP
GOTO I720
C
C-----------------------------------------------------------------------
C TO ALLOCATE-SPACE-FOR-REDUCED-BASIS
760 CONTINUE
IF(LBIG) THEN
DDVBAS=ALLHP(IREAL8(NAT3))
ELSE
DDVBAS=ALLHP(IREAL8(NFREG*NAT3))
ENDIF
GOTO I760
C
C-----------------------------------------------------------------------
C TO ALLOCATE-SPACE-FOR-TRANSROT-VECTORS
800 CONTINUE
TRAROT=ALLHP(IREAL8(6*NAT3))
GOTO I800
C
C-----------------------------------------------------------------------
C TO ALLOCATE-SPACE-FOR-LSCI
840 CONTINUE
SCIFV1=ALLHP(IREAL8(PARDIM+3))
SCIFV2=ALLHP(IREAL8(PARDIM+3))
SCIFV3=ALLHP(IREAL8(PARDIM+3))
SCIFV4=ALLHP(IREAL8(PARDIM+3))
SCIFV6=ALLHP(IREAL8(PARDIM+3))
DRATQ=ALLHP(IREAL8(PARDIM+3))
ERATQ=ALLHP(IREAL8(PARDIM+3))
E2RATQ=ALLHP(IREAL8(PARDIM+3))
BDRATQ=ALLHP(IREAL8(PARDIM+3))
INRATQ=ALLHP(INTEG4(PARDIM+3))
GOTO I840
C
C-----------------------------------------------------------------------
C TO ALLOCATE-DUMMY-SPACE-FOR-LSCI
880 CONTINUE
SCIFV1=ALLHP(IREAL8(2))
SCIFV2=ALLHP(IREAL8(2))
SCIFV3=ALLHP(IREAL8(2))
SCIFV4=ALLHP(IREAL8(2))
SCIFV6=ALLHP(IREAL8(2))
DRATQ=ALLHP(IREAL8(2))
ERATQ=ALLHP(IREAL8(2))
E2RATQ=ALLHP(IREAL8(2))
BDRATQ=ALLHP(IREAL8(2))
INRATQ=ALLHP(INTEG4(2))
GOTO I880
C
C-----------------------------------------------------------------------
C TO ALLOCATE-SPACE-FOR-OTHER-ARRAYS
920 CONTINUE
IUPD=ALLHP(INTEG4(PARDIM+3))
GOTO I920
C.##ELSE
C.##ENDIF
END
|
[STATEMENT]
lemma valid_prepend: "\<lbrakk>length xs \<le> l t - 1; length ys = length xs; valid (xs @ as) t\<rbrakk> \<Longrightarrow> valid (ys @ as) t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>length xs \<le> l t - 1; length ys = length xs; valid (xs @ as) t\<rbrakk> \<Longrightarrow> valid (ys @ as) t
[PROOF STEP]
unfolding valid_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>length xs \<le> l t - 1; length ys = length xs; well_shaped t \<and> well_valued (xs @ as) t\<rbrakk> \<Longrightarrow> well_shaped t \<and> well_valued (ys @ as) t
[PROOF STEP]
by (auto intro: well_valued0_prepend) |
State Before: 𝕜 : Type u_3
E : Type ?u.376048
F : Type ?u.376051
G : Type ?u.376054
ι : Type u_1
π : ι → Type u_2
inst✝² : OrderedSemiring 𝕜
inst✝¹ : (i : ι) → AddCommMonoid (π i)
inst✝ : (i : ι) → Module 𝕜 (π i)
s : Set ι
x y : (i : ι) → π i
⊢ [x-[𝕜]y] ⊆ pi s fun i => [x i-[𝕜]y i] State After: case intro.intro.intro.intro.intro
𝕜 : Type u_3
E : Type ?u.376048
F : Type ?u.376051
G : Type ?u.376054
ι : Type u_1
π : ι → Type u_2
inst✝² : OrderedSemiring 𝕜
inst✝¹ : (i : ι) → AddCommMonoid (π i)
inst✝ : (i : ι) → Module 𝕜 (π i)
s : Set ι
x y z : (i : ι) → π i
a b : 𝕜
ha : 0 ≤ a
hb : 0 ≤ b
hab : a + b = 1
hz : a • x + b • y = z
i : ι
⊢ z i ∈ (fun i => [x i-[𝕜]y i]) i Tactic: rintro z ⟨a, b, ha, hb, hab, hz⟩ i - State Before: case intro.intro.intro.intro.intro
𝕜 : Type u_3
E : Type ?u.376048
F : Type ?u.376051
G : Type ?u.376054
ι : Type u_1
π : ι → Type u_2
inst✝² : OrderedSemiring 𝕜
inst✝¹ : (i : ι) → AddCommMonoid (π i)
inst✝ : (i : ι) → Module 𝕜 (π i)
s : Set ι
x y z : (i : ι) → π i
a b : 𝕜
ha : 0 ≤ a
hb : 0 ≤ b
hab : a + b = 1
hz : a • x + b • y = z
i : ι
⊢ z i ∈ (fun i => [x i-[𝕜]y i]) i State After: no goals Tactic: exact ⟨a, b, ha, hb, hab, congr_fun hz i⟩ |
Romance blooms between two soldiers stationed in an Israeli outpost on the Lebanese border.
It's a strong and powerful story, but it is not a great film. In some ways limited by its lenght (60 minutes) we see much, but there could be more development in terms of story and characters. The way it was shot was rather amateurish, and I think that hindered the development in many ways.
don't have the strength of "Bubble". I feel something gratuitous in the history. |
// Boost.Geometry
// Copyright (c) 2016-2017 Oracle and/or its affiliates.
// Contributed and/or modified by Vissarion Fysikopoulos, on behalf of Oracle
// Contributed and/or modified by Adam Wulkiewicz, on behalf of Oracle
// Use, modification and distribution is subject to the Boost Software License,
// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_GEOMETRY_FORMULAS_MAXIMUM_LONGITUDE_HPP
#define BOOST_GEOMETRY_FORMULAS_MAXIMUM_LONGITUDE_HPP
#include <boost/geometry/formulas/spherical.hpp>
#include <boost/geometry/formulas/flattening.hpp>
#include <boost/mpl/assert.hpp>
#include <boost/math/special_functions/hypot.hpp>
namespace boost { namespace geometry { namespace formula
{
/*!
\brief Algorithm to compute the vertex longitude of a geodesic segment. Vertex is
a point on the geodesic that maximizes (or minimizes) the latitude. The algorithm
is given the vertex latitude.
*/
//Classes for spesific CS
template <typename CT>
class vertex_longitude_on_sphere
{
public:
template <typename T>
static inline CT apply(T const& lat1, //segment point 1
T const& lat2, //segment point 2
T const& lat3, //vertex latitude
T const& sin_l12,
T const& cos_l12) //lon1 -lon2
{
//https://en.wikipedia.org/wiki/Great-circle_navigation#Finding_way-points
CT const A = sin(lat1) * cos(lat2) * cos(lat3) * sin_l12;
CT const B = sin(lat1) * cos(lat2) * cos(lat3) * cos_l12
- cos(lat1) * sin(lat2) * cos(lat3);
CT lon = atan2(B, A);
return lon + math::pi<CT>();
}
};
template <typename CT>
class vertex_longitude_on_spheroid
{
template<typename T>
static inline void normalize(T& x, T& y)
{
T h = boost::math::hypot(x, y);
x /= h;
y /= h;
}
public:
template <typename T, typename Spheroid>
static inline CT apply(T const& lat1, //segment point 1
T const& lat2, //segment point 2
T const& lat3, //vertex latitude
T& alp1,
Spheroid const& spheroid)
{
// We assume that segment points lay on different side w.r.t.
// the vertex
// Constants
CT const c0 = 0;
CT const c2 = 2;
CT const half_pi = math::pi<CT>() / c2;
if (math::equals(lat1, half_pi)
|| math::equals(lat2, half_pi)
|| math::equals(lat1, -half_pi)
|| math::equals(lat2, -half_pi))
{
// one segment point is the pole
return c0;
}
// More constants
CT const f = flattening<CT>(spheroid);
CT const pi = math::pi<CT>();
CT const c1 = 1;
CT const cminus1 = -1;
// First, compute longitude on auxiliary sphere
CT const one_minus_f = c1 - f;
CT const bet1 = atan(one_minus_f * tan(lat1));
CT const bet2 = atan(one_minus_f * tan(lat2));
CT const bet3 = atan(one_minus_f * tan(lat3));
CT cos_bet1 = cos(bet1);
CT cos_bet2 = cos(bet2);
CT const sin_bet1 = sin(bet1);
CT const sin_bet2 = sin(bet2);
CT const sin_bet3 = sin(bet3);
CT omg12 = 0;
if (bet1 < c0)
{
cos_bet1 *= cminus1;
omg12 += pi;
}
if (bet2 < c0)
{
cos_bet2 *= cminus1;
omg12 += pi;
}
CT const sin_alp1 = sin(alp1);
CT const cos_alp1 = math::sqrt(c1 - math::sqr(sin_alp1));
CT const norm = math::sqrt(math::sqr(cos_alp1) + math::sqr(sin_alp1 * sin_bet1));
CT const sin_alp0 = sin(atan2(sin_alp1 * cos_bet1, norm));
BOOST_ASSERT(cos_bet2 != c0);
CT const sin_alp2 = sin_alp1 * cos_bet1 / cos_bet2;
CT const cos_alp0 = math::sqrt(c1 - math::sqr(sin_alp0));
CT const cos_alp2 = math::sqrt(c1 - math::sqr(sin_alp2));
CT const sig1 = atan2(sin_bet1, cos_alp1 * cos_bet1);
CT const sig2 = atan2(sin_bet2, -cos_alp2 * cos_bet2); //lat3 is a vertex
CT const cos_sig1 = cos(sig1);
CT const sin_sig1 = math::sqrt(c1 - math::sqr(cos_sig1));
CT const cos_sig2 = cos(sig2);
CT const sin_sig2 = math::sqrt(c1 - math::sqr(cos_sig2));
CT const omg1 = atan2(sin_alp0 * sin_sig1, cos_sig1);
CT const omg2 = atan2(sin_alp0 * sin_sig2, cos_sig2);
omg12 += omg1 - omg2;
CT const sin_omg12 = sin(omg12);
CT const cos_omg12 = cos(omg12);
CT omg13 = geometry::formula::vertex_longitude_on_sphere<CT>
::apply(bet1, bet2, bet3, sin_omg12, cos_omg12);
if (lat1 * lat2 < c0)//different hemispheres
{
if ((lat2 - lat1) * lat3 > c0)// ascending segment
{
omg13 = pi - omg13;
}
}
// Second, compute the ellipsoidal longitude
CT const e2 = f * (c2 - f);
CT const ep = math::sqrt(e2 / (c1 - e2));
CT const k2 = math::sqr(ep * cos_alp0);
CT const sqrt_k2_plus_one = math::sqrt(c1 + k2);
CT const eps = (sqrt_k2_plus_one - c1) / (sqrt_k2_plus_one + c1);
CT const eps2 = eps * eps;
CT const n = f / (c2 - f);
// sig3 is the length from equator to the vertex
CT sig3;
if(sin_bet3 > c0)
{
sig3 = half_pi;
} else {
sig3 = -half_pi;
}
CT const cos_sig3 = 0;
CT const sin_sig3 = 1;
CT sig13 = sig3 - sig1;
if (sig13 > pi)
{
sig13 -= 2 * pi;
}
// Order 2 approximation
CT const c1over2 = 0.5;
CT const c1over4 = 0.25;
CT const c1over8 = 0.125;
CT const c1over16 = 0.0625;
CT const c4 = 4;
CT const c8 = 8;
CT const A3 = 1 - (c1over2 - c1over2 * n) * eps - c1over4 * eps2;
CT const C31 = (c1over4 - c1over4 * n) * eps + c1over8 * eps2;
CT const C32 = c1over16 * eps2;
CT const sin2_sig3 = c2 * cos_sig3 * sin_sig3;
CT const sin4_sig3 = sin_sig3 * (-c4 * cos_sig3
+ c8 * cos_sig3 * cos_sig3 * cos_sig3);
CT const sin2_sig1 = c2 * cos_sig1 * sin_sig1;
CT const sin4_sig1 = sin_sig1 * (-c4 * cos_sig1
+ c8 * cos_sig1 * cos_sig1 * cos_sig1);
CT const I3 = A3 * (sig13
+ C31 * (sin2_sig3 - sin2_sig1)
+ C32 * (sin4_sig3 - sin4_sig1));
CT const sign = bet3 >= c0
? c1
: cminus1;
CT const dlon_max = omg13 - sign * f * sin_alp0 * I3;
return dlon_max;
}
};
//CS_tag dispatching
template <typename CT, typename CS_Tag>
struct compute_vertex_lon
{
BOOST_MPL_ASSERT_MSG
(
false, NOT_IMPLEMENTED_FOR_THIS_COORDINATE_SYSTEM, (types<CS_Tag>)
);
};
template <typename CT>
struct compute_vertex_lon<CT, spherical_equatorial_tag>
{
template <typename Strategy>
static inline CT apply(CT const& lat1,
CT const& lat2,
CT const& vertex_lat,
CT const& sin_l12,
CT const& cos_l12,
CT,
Strategy)
{
return vertex_longitude_on_sphere<CT>
::apply(lat1,
lat2,
vertex_lat,
sin_l12,
cos_l12);
}
};
template <typename CT>
struct compute_vertex_lon<CT, geographic_tag>
{
template <typename Strategy>
static inline CT apply(CT const& lat1,
CT const& lat2,
CT const& vertex_lat,
CT,
CT,
CT& alp1,
Strategy const& azimuth_strategy)
{
return vertex_longitude_on_spheroid<CT>
::apply(lat1,
lat2,
vertex_lat,
alp1,
azimuth_strategy.model());
}
};
// Vertex longitude interface
// Assume that lon1 < lon2 and vertex_lat is the latitude of the vertex
template <typename CT, typename CS_Tag>
class vertex_longitude
{
public :
template <typename Strategy>
static inline CT apply(CT& lon1,
CT& lat1,
CT& lon2,
CT& lat2,
CT const& vertex_lat,
CT& alp1,
Strategy const& azimuth_strategy)
{
CT const c0 = 0;
CT pi = math::pi<CT>();
//Vertex is a segment's point
if (math::equals(vertex_lat, lat1))
{
return lon1;
}
if (math::equals(vertex_lat, lat2))
{
return lon2;
}
//Segment lay on meridian
if (math::equals(lon1, lon2))
{
return (std::max)(lat1, lat2);
}
BOOST_ASSERT(lon1 < lon2);
CT dlon = compute_vertex_lon<CT, CS_Tag>::apply(lat1, lat2,
vertex_lat,
sin(lon1 - lon2),
cos(lon1 - lon2),
alp1,
azimuth_strategy);
CT vertex_lon = std::fmod(lon1 + dlon, 2 * pi);
if (vertex_lat < c0)
{
vertex_lon -= pi;
}
if (std::abs(lon1 - lon2) > pi)
{
vertex_lon -= pi;
}
return vertex_lon;
}
};
}}} // namespace boost::geometry::formula
#endif // BOOST_GEOMETRY_FORMULAS_MAXIMUM_LONGITUDE_HPP
|
using RecurrenceAnalysis
using Test
ti = time()
@testset "RecurrenceAnalysis tests" begin
include("dynamicalsystems.jl")
include("smallmatrix.jl")
include("deprecations.jl")
end
ti = time() - ti
println("\nTest took total time of:")
println(round(ti, digits=3), " seconds or ", round(ti/60, digits=3), " minutes")
|
module Proof
%default total
%access export
invert : (a : Nat) -> (b : Nat) -> (a + a = b + b) -> a = b
invert Z Z = const Refl
invert Z (S k) = absurd . SIsNotZ . sym
invert (S k) Z = absurd . SIsNotZ
invert (S k) (S j) = rewrite sym $ plusSuccRightSucc k k in rewrite sym $ plusSuccRightSucc j j in
eqSucc k j . invert k j . succInjective (k+k) (j+j) . succInjective (S $ k+k) (S $ j+j)
|
import cedille-options
open import general-util
module toplevel-state (options : cedille-options.options) {mF : Set → Set} {{_ : monad mF}} where
open import lib
open import cedille-types
open import classify options {mF}
open import ctxt
open import constants
open import conversion
open import rename
open import spans options {mF}
open import syntax-util
open import to-string options
open import string-format
open import subst
import cws-types
record include-elt : Set where
field ast : maybe start
cwst : maybe cws-types.start
deps : 𝕃 string {- dependencies -}
import-to-dep : trie string {- map import strings in the file to their full paths -}
ss : spans ⊎ string {- spans in string form (read from disk) -}
err : 𝔹 -- is ss reporting an error
need-to-add-symbols-to-context : 𝔹
do-type-check : 𝔹
inv : do-type-check imp need-to-add-symbols-to-context ≡ tt
last-parse-time : maybe UTC
cede-up-to-date : 𝔹
rkt-up-to-date : 𝔹
blank-include-elt : include-elt
blank-include-elt = record { ast = nothing ; cwst = nothing; deps = [] ;
import-to-dep = empty-trie ; ss = inj₂ "" ; err = ff ; need-to-add-symbols-to-context = tt ;
do-type-check = tt ; inv = refl ; last-parse-time = nothing; cede-up-to-date = ff ; rkt-up-to-date = ff }
-- the dependencies should pair import strings found in the file with the full paths to those imported files
new-include-elt : filepath → (dependencies : 𝕃 (string × string)) → (ast : start) →
cws-types.start → maybe UTC → include-elt
new-include-elt filename deps x y time =
record { ast = just x ; cwst = just y ; deps = map snd deps ; import-to-dep = trie-fill empty-trie deps ; ss = inj₂ "" ; err = ff ;
need-to-add-symbols-to-context = tt ;
do-type-check = tt ; inv = refl ; last-parse-time = time ; cede-up-to-date = ff ; rkt-up-to-date = ff }
error-include-elt : string → include-elt
error-include-elt err = record blank-include-elt { ss = inj₂ (global-error-string err) ; err = tt }
error-span-include-elt : string → string → posinfo → include-elt
error-span-include-elt err errSpan pos = record blank-include-elt { ss = inj₁ (add-span (span.mk-span err pos (posinfo-plus pos 1) [] (just errSpan) ) empty-spans ) ; err = tt }
set-do-type-check-include-elt : include-elt → 𝔹 → include-elt
set-do-type-check-include-elt ie b =
record ie { need-to-add-symbols-to-context = (b || include-elt.need-to-add-symbols-to-context ie) ;
do-type-check = b ;
inv = lem b }
where lem : (b : 𝔹) → b imp (b || include-elt.need-to-add-symbols-to-context ie) ≡ tt
lem tt = refl
lem ff = refl
set-need-to-add-symbols-to-context-include-elt : include-elt → 𝔹 → include-elt
set-need-to-add-symbols-to-context-include-elt ie b =
record ie { need-to-add-symbols-to-context = b ;
do-type-check = b && include-elt.do-type-check ie ;
inv = lem b }
where lem : ∀(b : 𝔹){b' : 𝔹} → b && b' imp b ≡ tt
lem tt {tt} = refl
lem tt {ff} = refl
lem ff {tt} = refl
lem ff {ff} = refl
set-spans-include-elt : include-elt → spans → include-elt
set-spans-include-elt ie ss =
record ie { ss = inj₁ ss ;
err = spans-have-error ss }
set-last-parse-time-include-elt : include-elt → UTC → include-elt
set-last-parse-time-include-elt ie time =
record ie { last-parse-time = just time }
set-cede-file-up-to-date-include-elt : include-elt → 𝔹 → include-elt
set-cede-file-up-to-date-include-elt ie up-to-date = record ie { cede-up-to-date = up-to-date }
set-rkt-file-up-to-date-include-elt : include-elt → 𝔹 → include-elt
set-rkt-file-up-to-date-include-elt ie up-to-date = record ie { rkt-up-to-date = up-to-date }
set-spans-string-include-elt : include-elt → (err : 𝔹) → string → include-elt
set-spans-string-include-elt ie err ss = record ie { ss = inj₂ ss ; err = err }
record toplevel-state : Set where
constructor mk-toplevel-state
field include-path : 𝕃 string × stringset
files-with-updated-spans : 𝕃 string
is : trie include-elt {- keeps track of files we have parsed and/or processed -}
Γ : ctxt
new-toplevel-state : (include-path : 𝕃 string × stringset) → toplevel-state
new-toplevel-state ip = record { include-path = ip ;
files-with-updated-spans = [] ; is = empty-trie ; Γ = new-ctxt "[nofile]" "[nomod]" }
toplevel-state-lookup-occurrences : var → toplevel-state → 𝕃 (var × posinfo × string)
toplevel-state-lookup-occurrences symb (mk-toplevel-state _ _ _ Γ) = ctxt-lookup-occurrences Γ symb
get-include-elt-if : toplevel-state → filepath → maybe include-elt
get-include-elt-if s filename = trie-lookup (toplevel-state.is s) filename
-- get an include-elt assuming it will be there
get-include-elt : toplevel-state → filepath → include-elt
get-include-elt s filename with get-include-elt-if s filename
get-include-elt s filename | nothing = blank-include-elt {- should not happen -}
get-include-elt s filename | just ie = ie
set-include-elt : toplevel-state → filepath → include-elt → toplevel-state
set-include-elt s f ie = record s { is = trie-insert (toplevel-state.is s) f ie }
set-include-path : toplevel-state → 𝕃 string × stringset → toplevel-state
set-include-path s ip = record s { include-path = ip }
get-do-type-check : toplevel-state → string → 𝔹
get-do-type-check s filename = include-elt.do-type-check (get-include-elt s filename)
include-elt-spans-to-rope : include-elt → rope
include-elt-spans-to-rope ie with (include-elt.ss ie)
include-elt-spans-to-rope ie | inj₁ ss = spans-to-rope ss
include-elt-spans-to-rope ie | inj₂ ss = [[ ss ]]
include-elt-to-string : include-elt → string
include-elt-to-string ie =
" deps: " ^ (𝕃-to-string (λ x → x) "," (include-elt.deps ie)) ^
-- ast
", ast: " ^ maybe-else "not parsed" (λ ast → "parsed") (include-elt.ast ie) ^ ", " ^
" import-to-dep: " ^ (trie-to-string "," (format "filename: %s") (include-elt.import-to-dep ie)) ^
-- spans
" err: " ^ (𝔹-to-string (include-elt.err ie)) ^
", need-to-add-symbols-to-context: " ^ (𝔹-to-string (include-elt.need-to-add-symbols-to-context ie)) ^
", do-type-check: " ^ (𝔹-to-string (include-elt.do-type-check ie)) ^
", last-parse-time: " ^ (maybe-else "" utcToString (include-elt.last-parse-time ie))
params-to-string'' : params → string
params-to-string'' ParamsNil = ""
-- TODO print erased vs non-erased?
params-to-string'' (ParamsCons (Decl pi pi' me v t-k pi'') pms) = "{var: " ^ v ^ ", tk: " ^ rope-to-string (tk-to-string empty-ctxt t-k) ^ "}" ^ ", " ^ (params-to-string'' pms)
defParams-to-string : defParams → string
defParams-to-string (just pms) = params-to-string'' pms
defParams-to-string nothing = ""
-- TODO also print modname?
syms-to-string : trie (string × 𝕃 string) → string
syms-to-string = trie-to-string ", " (λ l → "{" ^ (𝕃-to-string (λ s → s) ", " (snd l)) ^ "}")
ctxt-info-to-string : ctxt-info → string
ctxt-info-to-string (term-decl tp) = "term-decl: {type: " ^ rope-to-string (to-string empty-ctxt tp) ^ "}"
ctxt-info-to-string (term-def dp opac t tp) = "term-def: {defParams: {" ^ (defParams-to-string dp) ^ "}, opacity: " ^ (opacity-to-string opac) ^ ", term: " ^ rope-to-string (to-string empty-ctxt t) ^ ", type: " ^ rope-to-string (to-string empty-ctxt tp) ^ "}"
ctxt-info-to-string (term-udef dp opac t) = "term-udef: {defParams: {" ^ (defParams-to-string dp) ^ "}, opacity: " ^ (opacity-to-string opac) ^ ", term: " ^ rope-to-string (to-string empty-ctxt t) ^ "}"
ctxt-info-to-string (type-decl k) = "type-decl: {kind: " ^ rope-to-string (to-string empty-ctxt k) ^ "}"
ctxt-info-to-string (type-def dp opac tp k) = "type-def: {defParams: {" ^ (defParams-to-string dp) ^ "}, opacity: " ^ (opacity-to-string opac) ^ ", tp: " ^ rope-to-string (to-string empty-ctxt tp) ^ ", kind: " ^ rope-to-string (to-string empty-ctxt k) ^ "}"
ctxt-info-to-string (kind-def pms k) = "kind-def: {pms: " ^ (params-to-string'' pms) ^ "kind: " ^ rope-to-string (to-string empty-ctxt k) ^ "}"
ctxt-info-to-string (rename-def v) = "rename-def: {var: " ^ v ^ "}"
ctxt-info-to-string (var-decl) = "var-decl"
ctxt-info-to-string (const-def _) = "const-def"
ctxt-info-to-string (datatype-def _ _) = "datatype-def"
sym-info-to-string : sym-info → string
sym-info-to-string (ci , (fn , pi)) = "{ctxt-info: " ^ (ctxt-info-to-string ci) ^ ", location: {filename: " ^ fn ^ ", posinfo: " ^ pi ^ "}}"
sym-infos-to-string : trie sym-info → string
sym-infos-to-string = trie-to-string ", " sym-info-to-string
occ-to-string : var × posinfo × string → string
occ-to-string (v , pi , s) = "var: " ^ v ^ ", posinfo: " ^ pi ^ ", string: " ^ s
sym-occs-to-string : trie (𝕃 (var × posinfo × string)) → string
sym-occs-to-string = trie-to-string ", " (λ l → "{" ^ (𝕃-to-string occ-to-string ", " l) ^ "}")
qualif-to-string : qualif-info → string
qualif-to-string (x , as) = x ^ rope-to-string (fst (args-to-string as {TERM} [[]] 0 [] (new-ctxt "" "") nothing neither))
mod-info-to-string : mod-info → string
mod-info-to-string (fn , mn , pms , q) = "filename: " ^ fn ^ ", modname: " ^ mn ^ ", pms: {" ^ (params-to-string'' pms) ^ "}" ^ ", qualif: {" ^ (trie-to-string ", " qualif-to-string q) ^ "}"
ctxt-to-string : ctxt → string
ctxt-to-string (mk-ctxt mi (ss , mn-fn) is os d) = "mod-info: {" ^ (mod-info-to-string mi) ^ "}, syms: {" ^ (syms-to-string ss) ^ "}, i: {" ^ (sym-infos-to-string is) ^ "}, sym-occs: {" ^ (sym-occs-to-string os) ^ "}"
toplevel-state-to-string : toplevel-state → string
toplevel-state-to-string (mk-toplevel-state include-path files is context) =
"\ninclude-path: {\n" ^ (𝕃-to-string (λ x → x) "\n" (fst include-path)) ^
"\n}\nis: {" ^ (trie-to-string "\n" include-elt-to-string is) ^
"\n}\nΓ: {" ^ (ctxt-to-string context) ^ "}"
-- check if a variable is being redefined, and if so return the first given state; otherwise the second (in the monad)
check-redefined : posinfo → var → toplevel-state → spanM toplevel-state → spanM toplevel-state
check-redefined pi x s c =
get-ctxt (λ Γ →
if ctxt-binds-var Γ x then
(spanM-add (redefined-var-span Γ pi x) ≫span spanMr s)
else c)
import-as : var → optAs → var
import-as v NoOptAs = v
import-as v (SomeOptAs pi pfx) = pfx # v
error-in-import-string = "There is an error in the imported file"
-- Traverse all imports, returning an error if we encounter the same file twice
{-# TERMINATING #-}
check-cyclic-imports : (original current : filepath) → stringset → (path : 𝕃 string) → toplevel-state → err-m
check-cyclic-imports fnₒ fn fs path s with stringset-contains fs fn
...| ff = foldr (λ fnᵢ x → x maybe-or check-cyclic-imports fnₒ fnᵢ (stringset-insert fs fn) (fn :: path) s)
nothing (include-elt.deps (get-include-elt s fn))
...| tt with fnₒ =string fn
...| tt = just (foldr (λ fnᵢ x → x ^ " → " ^ fnᵢ) ("Cyclic dependencies (" ^ fn) path ^ " → " ^ fn ^ ")")
...| ff = just error-in-import-string
scope-t : Set → Set
scope-t X = filepath → string → optAs → params → args → X → toplevel-state → toplevel-state × err-m
infixl 0 _≫=scope_
_≫=scope_ : toplevel-state × err-m → (toplevel-state → toplevel-state × err-m) → toplevel-state × err-m
_≫=scope_ (ts , err) f with f ts
...| ts' , err' = ts' , err maybe-or err'
{-# TERMINATING #-}
scope-file : toplevel-state → (original imported : filepath) → optAs → args → toplevel-state × err-m
scope-file' : scope-t ⊤
scope-cmds : scope-t cmds
scope-cmd : scope-t cmd
scope-var : scope-t var
scope-file ts fnₒ fnᵢ oa as with check-cyclic-imports fnₒ fnᵢ (trie-single fnₒ triv) [] ts
...| just e = ts , just e
...| nothing = scope-file' fnₒ fnᵢ oa ParamsNil as triv ts
scope-file' fnₒ fn oa psₒ as triv s with get-include-elt s fn
...| ie with include-elt.err ie | include-elt.ast ie
...| e | nothing = s , (maybe-if e) ≫maybe just error-in-import-string
...| e | just (File pi0 is pi1 pi2 mn ps cs pi3) =
(s , (maybe-if e) ≫maybe just error-in-import-string) ≫=scope
scope-cmds fn mn oa ps as (imps-to-cmds is) ≫=scope
scope-cmds fn mn oa ps as cs
scope-cmds fn mn oa ps as (CmdsNext c cs) s =
scope-cmd fn mn oa ps as c s ≫=scope scope-cmds fn mn oa ps as cs
scope-cmds fn mn oa ps as CmdsStart s = s , nothing
scope-cmd fn mn oa ps as (ImportCmd (Import pi NotPublic pi' ifn oa' as' pi'')) s = s , nothing
scope-cmd fn mn oa psₒ asₒ (ImportCmd (Import pi IsPublic pi' ifn oa' asᵢ' pi'')) s =
let ifn' = trie-lookup-else ifn (include-elt.import-to-dep (get-include-elt s fn)) ifn in
scope-file' fn ifn' oa psₒ asᵢ triv s
-- ^ oa' should be NoOptAs, so we can use oa ^
where
merged : trie (maybe arg) → params → args → trie (maybe arg)
merged σ (ParamsCons (Decl _ _ me x atk _) ps) (ArgsCons a as) =
merged (trie-insert σ x $ just a) ps as
merged σ (ParamsCons (Decl _ _ me x atk _) ps) ArgsNil =
merged (trie-insert σ x nothing) ps ArgsNil
merged σ _ _ = σ
arg-var : arg → maybe var
arg-var (TermArg me (Var pi x)) = just x
arg-var (TypeArg (TpVar pi x)) = just x
arg-var _ = nothing
σ = merged empty-trie psₒ asₒ
reorder : args → args
reorder (ArgsCons a as) =
maybe-else' (arg-var a ≫=maybe trie-lookup σ) (ArgsCons a $ reorder as) λ ma →
maybe-else' ma ArgsNil λ a → ArgsCons a $ reorder as
reorder ArgsNil = ArgsNil
asᵢ = reorder $ qualif-args (toplevel-state.Γ s) asᵢ'
scope-cmd fn mn oa ps as (DefKind _ v _ _ _) = scope-var fn mn oa ps as v
scope-cmd fn mn oa ps as (DefTermOrType _ (DefTerm _ v _ _) _) = scope-var fn mn oa ps as v
scope-cmd fn mn oa ps as (DefTermOrType _ (DefType _ v _ _) _) = scope-var fn mn oa ps as v
scope-cmd fn mn oa ps as (DefDatatype (Datatype _ _ v _ _ _ _) _) = scope-var fn mn oa ps as v
scope-var _ mn oa ps as v s with import-as v oa | s
...| v' | mk-toplevel-state ip fns is (mk-ctxt (mn' , fn , pms , q) ss sis os d) =
mk-toplevel-state ip fns is (mk-ctxt (mn' , fn , pms , trie-insert q v' (mn # v , as)) ss sis os d) ,
flip maybe-map (trie-lookup q v') (uncurry λ v'' as' →
"Multiple definitions of variable " ^ v' ^ " as " ^ v'' ^ " and " ^ (mn # v) ^
(if (mn # v =string v'') then " (perhaps it was already imported?)" else ""))
|
/-
Copyright (c) 2014 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro, Yaël Dillies, Patrick Stevens
-/
import algebra.order.field
import data.nat.cast
/-!
# Cast of naturals into fields
This file concerns the canonical homomorphism `ℕ → F`, where `F` is a field.
## Main results
* `nat.cast_div`: if `n` divides `m`, then `↑(m / n) = ↑m / ↑n`
* `nat.cast_div_le`: in all cases, `↑(m / n) ≤ ↑m / ↑ n`
-/
namespace nat
variables {α : Type*}
@[simp] theorem cast_div [field α] {m n : ℕ} (n_dvd : n ∣ m) (n_nonzero : (n : α) ≠ 0) :
((m / n : ℕ) : α) = m / n :=
begin
rcases n_dvd with ⟨k, rfl⟩,
have : n ≠ 0, {rintro rfl, simpa using n_nonzero},
rw [nat.mul_div_cancel_left _ this.bot_lt, cast_mul, mul_div_cancel_left _ n_nonzero],
end
section linear_ordered_field
variables [linear_ordered_field α]
/-- Natural division is always less than division in the field. -/
lemma cast_div_le {m n : ℕ} : ((m / n : ℕ) : α) ≤ m / n :=
begin
cases n,
{ rw [cast_zero, div_zero, nat.div_zero, cast_zero] },
rwa [le_div_iff, ←nat.cast_mul],
exact nat.cast_le.2 (nat.div_mul_le_self m n.succ),
{ exact nat.cast_pos.2 n.succ_pos }
end
lemma inv_pos_of_nat {n : ℕ} : 0 < ((n : α) + 1)⁻¹ :=
inv_pos.2 $ add_pos_of_nonneg_of_pos n.cast_nonneg zero_lt_one
lemma one_div_pos_of_nat {n : ℕ} : 0 < 1 / ((n : α) + 1) :=
by { rw one_div, exact inv_pos_of_nat }
lemma one_div_le_one_div {n m : ℕ} (h : n ≤ m) : 1 / ((m : α) + 1) ≤ 1 / ((n : α) + 1) :=
by { refine one_div_le_one_div_of_le _ _, exact nat.cast_add_one_pos _, simpa }
lemma one_div_lt_one_div {n m : ℕ} (h : n < m) : 1 / ((m : α) + 1) < 1 / ((n : α) + 1) :=
by { refine one_div_lt_one_div_of_lt _ _, exact nat.cast_add_one_pos _, simpa }
end linear_ordered_field
end nat
|
If $A$ is a finite set and $f$ is a function from $A$ to the positive integers, then the prime factors of $\prod_{a \in A} f(a)$ are the union of the prime factors of $f(a)$ for all $a \in A$. |
!------------------------------------------------------------*- Fortran -*-----
!
! S E R I A L B O X
!
! This file is distributed under terms of BSD license.
! See LICENSE.txt for more information.
!
!------------------------------------------------------------------------------
PROGRAM main_producer
USE m_ser
IMPLICIT NONE
REAL(KIND=8), DIMENSION(5,5,5) :: a
a = 5.0
PRINT *, 'CALL serialize with sum(a)=', sum(a)
CALL serialize(a)
END PROGRAM main_producer
|
[STATEMENT]
lemma eqButPID_sym:
assumes "eqButPID s s1" shows "eqButPID s1 s"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. eqButPID s1 s
[PROOF STEP]
using assms eeqButPID_sym
[PROOF STATE]
proof (prove)
using this:
eqButPID s s1
eeqButPID ?psts ?psts1.0 \<Longrightarrow> eeqButPID ?psts1.0 ?psts
goal (1 subgoal):
1. eqButPID s1 s
[PROOF STEP]
unfolding eqButPID_def
[PROOF STATE]
proof (prove)
using this:
admin s = admin s1 \<and> pendingUReqs s = pendingUReqs s1 \<and> userReq s = userReq s1 \<and> userIDs s = userIDs s1 \<and> user s = user s1 \<and> pass s = pass s1 \<and> pendingFReqs s = pendingFReqs s1 \<and> friendReq s = friendReq s1 \<and> friendIDs s = friendIDs s1 \<and> sentOuterFriendIDs s = sentOuterFriendIDs s1 \<and> recvOuterFriendIDs s = recvOuterFriendIDs s1 \<and> postIDs s = postIDs s1 \<and> admin s = admin s1 \<and> post s = post s1 \<and> owner s = owner s1 \<and> vis s = vis s1 \<and> pendingSApiReqs s = pendingSApiReqs s1 \<and> sApiReq s = sApiReq s1 \<and> serverApiIDs s = serverApiIDs s1 \<and> serverPass s = serverPass s1 \<and> outerPostIDs s = outerPostIDs s1 \<and> eeqButPID (outerPost s) (outerPost s1) \<and> outerOwner s = outerOwner s1 \<and> outerVis s = outerVis s1 \<and> pendingCApiReqs s = pendingCApiReqs s1 \<and> cApiReq s = cApiReq s1 \<and> clientApiIDs s = clientApiIDs s1 \<and> clientPass s = clientPass s1 \<and> sharedWith s = sharedWith s1
eeqButPID ?psts ?psts1.0 \<Longrightarrow> eeqButPID ?psts1.0 ?psts
goal (1 subgoal):
1. admin s1 = admin s \<and> pendingUReqs s1 = pendingUReqs s \<and> userReq s1 = userReq s \<and> userIDs s1 = userIDs s \<and> user s1 = user s \<and> pass s1 = pass s \<and> pendingFReqs s1 = pendingFReqs s \<and> friendReq s1 = friendReq s \<and> friendIDs s1 = friendIDs s \<and> sentOuterFriendIDs s1 = sentOuterFriendIDs s \<and> recvOuterFriendIDs s1 = recvOuterFriendIDs s \<and> postIDs s1 = postIDs s \<and> admin s1 = admin s \<and> post s1 = post s \<and> owner s1 = owner s \<and> vis s1 = vis s \<and> pendingSApiReqs s1 = pendingSApiReqs s \<and> sApiReq s1 = sApiReq s \<and> serverApiIDs s1 = serverApiIDs s \<and> serverPass s1 = serverPass s \<and> outerPostIDs s1 = outerPostIDs s \<and> eeqButPID (outerPost s1) (outerPost s) \<and> outerOwner s1 = outerOwner s \<and> outerVis s1 = outerVis s \<and> pendingCApiReqs s1 = pendingCApiReqs s \<and> cApiReq s1 = cApiReq s \<and> clientApiIDs s1 = clientApiIDs s \<and> clientPass s1 = clientPass s \<and> sharedWith s1 = sharedWith s
[PROOF STEP]
by auto |
import tactic
/-
# Level 1 : Using exact
-/
namespace math3345 -- hide
/-
P and Q are propositions.
-/
variable P : Prop
variable Q : Prop
/- Lemma : no-side-bar
`h` is evidence that `P` and `k` is evidence for `Q`.
Replace `sorry` with `exact h` or `exact k` to complete the proof of `P`.
-/
lemma use_exact (h : P) (k : Q) : P :=
begin
sorry,
end
end math3345 -- hide
|
/-
Copyright (c) 2020 Fox Thomson. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Fox Thomson.
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.data.fintype.basic
import Mathlib.data.finset.basic
import Mathlib.tactic.rcases
import Mathlib.tactic.omega.default
import Mathlib.computability.language
import Mathlib.PostPort
universes u l
namespace Mathlib
/-!
# Regular Expressions
This file contains the formal definition for regular expressions and basic lemmas. Note these are
regular expressions in terms of formal language theory. Note this is different to regex's used in
computer science such as the POSIX standard.
TODO
* Show that this regular expressions and DFA/NFA's are equivalent.
* `attribute [pattern] has_mul.mul` has been added into this file, it could be moved.
-/
/--
This is the definition of regular expressions. The names used here is to mirror the definition
of a Kleene algebra (https://en.wikipedia.org/wiki/Kleene_algebra).
* `0` (`zero`) matches nothing
* `1` (`epsilon`) matches only the empty string
* `char a` matches only the string 'a'
* `star P` matches any finite concatenation of strings which match `P`
* `P + Q` (`plus P Q`) matches anything which match `P` or `Q`
* `P * Q` (`comp P Q`) matches `x ++ y` if `x` matches `P` and `y` matches `Q`
-/
inductive regular_expression (α : Type u) where
| zero : regular_expression α
| epsilon : regular_expression α
| char : α → regular_expression α
| plus : regular_expression α → regular_expression α → regular_expression α
| comp : regular_expression α → regular_expression α → regular_expression α
| star : regular_expression α → regular_expression α
namespace regular_expression
protected instance inhabited {α : Type u} : Inhabited (regular_expression α) := { default := zero }
protected instance has_add {α : Type u} : Add (regular_expression α) := { add := plus }
protected instance has_mul {α : Type u} : Mul (regular_expression α) := { mul := comp }
protected instance has_one {α : Type u} : HasOne (regular_expression α) := { one := epsilon }
protected instance has_zero {α : Type u} : HasZero (regular_expression α) := { zero := zero }
@[simp] theorem zero_def {α : Type u} : zero = 0 := rfl
@[simp] theorem one_def {α : Type u} : epsilon = 1 := rfl
@[simp] theorem plus_def {α : Type u} (P : regular_expression α) (Q : regular_expression α) :
plus P Q = P + Q :=
rfl
@[simp] theorem comp_def {α : Type u} (P : regular_expression α) (Q : regular_expression α) :
comp P Q = P * Q :=
rfl
/-- `matches P` provides a language which contains all strings that `P` matches -/
def matches {α : Type u} : regular_expression α → language α := sorry
@[simp] theorem matches_zero_def {α : Type u} : matches 0 = 0 := rfl
@[simp] theorem matches_epsilon_def {α : Type u} : matches 1 = 1 := rfl
@[simp] theorem matches_add_def {α : Type u} (P : regular_expression α) (Q : regular_expression α) :
matches (P + Q) = matches P + matches Q :=
rfl
@[simp] theorem matches_mul_def {α : Type u} (P : regular_expression α) (Q : regular_expression α) :
matches (P * Q) = matches P * matches Q :=
rfl
@[simp] theorem matches_star_def {α : Type u} (P : regular_expression α) :
matches (star P) = language.star (matches P) :=
rfl
/-- `match_epsilon P` is true if and only if `P` matches the empty string -/
def match_epsilon {α : Type u} : regular_expression α → Bool := sorry
/-- `P.deriv a` matches `x` if `P` matches `a :: x`, the Brzozowski derivative of `P` with respect
to `a` -/
def deriv {α : Type u} [dec : DecidableEq α] : regular_expression α → α → regular_expression α :=
sorry
/-- `P.rmatch x` is true if and only if `P` matches `x`. This is a computable definition equivalent
to `matches`. -/
def rmatch {α : Type u} [dec : DecidableEq α] : regular_expression α → List α → Bool := sorry
@[simp] theorem zero_rmatch {α : Type u} [dec : DecidableEq α] (x : List α) : rmatch 0 x = false :=
sorry
theorem one_rmatch_iff {α : Type u} [dec : DecidableEq α] (x : List α) : ↥(rmatch 1 x) ↔ x = [] :=
sorry
theorem char_rmatch_iff {α : Type u} [dec : DecidableEq α] (a : α) (x : List α) :
↥(rmatch (char a) x) ↔ x = [a] :=
sorry
theorem add_rmatch_iff {α : Type u} [dec : DecidableEq α] (P : regular_expression α)
(Q : regular_expression α) (x : List α) : ↥(rmatch (P + Q) x) ↔ ↥(rmatch P x) ∨ ↥(rmatch Q x) :=
sorry
theorem mul_rmatch_iff {α : Type u} [dec : DecidableEq α] (P : regular_expression α)
(Q : regular_expression α) (x : List α) :
↥(rmatch (P * Q) x) ↔
∃ (t : List α), ∃ (u : List α), x = t ++ u ∧ ↥(rmatch P t) ∧ ↥(rmatch Q u) :=
sorry
theorem star_rmatch_iff {α : Type u} [dec : DecidableEq α] (P : regular_expression α) (x : List α) :
↥(rmatch (star P) x) ↔
∃ (S : List (List α)), x = list.join S ∧ ∀ (t : List α), t ∈ S → t ≠ [] ∧ ↥(rmatch P t) :=
sorry
@[simp] theorem rmatch_iff_matches {α : Type u} [dec : DecidableEq α] (P : regular_expression α)
(x : List α) : ↥(rmatch P x) ↔ x ∈ matches P :=
sorry
protected instance matches.decidable_pred {α : Type u} [dec : DecidableEq α]
(P : regular_expression α) : decidable_pred (matches P) :=
id fun (x : List α) => id (eq.mpr sorry (eq.decidable (rmatch P x) tt))
end Mathlib |
#' Download a labeled survey data set
#'
#' Download a survey data set from Qualtrics corresponding to a variable
#' dictionary generated by \code{\link[qualtdict]{dict_generate}}.
#' Question, item texts and level labels are added as attributes.
#'
#' @param dict A variable dictionary returned by
#' \code{\link[qualtdict]{dict_generate}}.
#' @param surveyID String. A variable dictionary returned by
#' \code{\link[qualtdict]{dict_generate}} has the survey ID as an
#' attribute. If it is read from elsewhere, this needs to be specified
#' manually. Defaults to \code{NULL}.
#' @param keys A character vector containing variables to be added, if
#' \code{split_by_block} is \code{TRUE}, to all individual block data sets.
#' Can also be used to add variables (e.g. IP address) found on Qualtrics
#' but not in the dictionary to the downloaded data sets.
#' @param skip_mistakes Logical. If \code{TRUE}, variables with potenetial
#' level-label mistakes will be removed from the data set.
#' @param split_by_block Logical. If \code{TRUE}, the function returns a
#' list with each element being the data set for a single survey block.
#' @param ... Other arguments passed to
#' \code{\link[qualtRics]{fetch_survey}}. Note that \code{surveyID},
#' \code{import_id}, \code{convert}, \code{label} and \code{include_qids}
#' will be overwritten by the function.
#'
#' @export
#' @examples
#' \dontrun{
#'
#' # Generate a dictionary
#' mydict <- dict_generate("SV_4YyAHbAxpdbzacl",
#' survey_name = "mysurvey",
#' var_name = "easy_name",
#' block_pattern = block_pattern,
#' block_sep = ".",
#' split_by_block = FALSE
#' )
#' survey_dat <- get_survey_data(mydict,
#' unanswer_recode = -77,
#' unanswer_recode_multi = 0
#' )
#' }
get_survey_data <- function(dict,
surveyID = NULL,
keys = NULL,
split_by_block = FALSE,
skip_mistakes = FALSE,
...) {
# Validate the dictionary
suppressWarnings(error_list <- dict_validate(dict)$error)
if (!is.null(error_list$non_unique_names)) {
message("Variables don't have unique names.")
return(error_list$non_unique_names)
}
mistake_qids <- unique(error_list$mistake_dict[["qid"]])
args <- list(...)
args$force_request <- TRUE
args$surveyID <- attr(dict, "surveyID")
args$import_id <- TRUE
args$convert <- FALSE
args$label <- FALSE
# What about loop and merge?
include_qids <- unique(str_extract(dict[["qid"]], "QID[0-9]+"))
# Somehow doesn't work when there is only one question
if (length(include_qids) > 1) {
args$include_questions <- include_qids
}
survey <- do.call(fetch_survey, args)
# Not sure why underscore is appended sometimes when include_questions is specified
colnames(survey) <- str_remove(colnames(survey), "_$")
if (!is.null(mistake_qids) & !skip_mistakes) {
warning("There are variables with potential incorrect level-label codings.
Run 'dict_validate()' on the dictionary object for details or
specify 'skip_mistakes = TRUE' to not apply recoding to
variables with mistakes.")
}
if (skip_mistakes) {
survey <- filter(survey, !qid %in% skip_qids)
}
if (split_by_block == TRUE) {
keys <- unique(unlist(dict[dict[["name"]] %in% keys, "qid"]))
keys_dat <- dict[dict[["name"]] %in% keys, ]
block_dict <- map(
split(dict, dict$block),
~ bind_rows(
keys_dat[-match(keys_dat[["name"]], .x[["name"]])],
.x
) %>%
select(keys, everything())
)
return(map(block_dict, survey_recode,
dat = survey,
keys = keys,
unanswer_recode = args$unanswer_recode,
unanswer_recode_multi = args$unanswer_recode_multi
))
} else {
return(survey_recode(dict,
dat = survey, keys = keys,
unanswer_recode = args$unanswer_recode,
unanswer_recode_multi = args$unanswer_recode_multi,
numeric_to_pos = numeric_to_pos
))
}
}
survey_recode <- function(dict, dat, keys, unanswer_recode, unanswer_recode_multi, numeric_to_pos) {
in_dat <- dict[["qid"]] %in% colnames(dat)
dict <- dict[in_dat, ]
unique_qids <- unique(dict[["qid"]])
unique_varnames <- unique(dict[["name"]])
keys <- c("externalDataReference", "startDate", "endDate", keys)
dat_cols <- c(keys, unique_qids)
varnames <- setNames(unique_qids, unique_varnames)
dat <- rename(dat[dat_cols], !!!varnames)
# level = unique to preserve ordering
split_dict <- split(dict, factor(dict$qid, level = unique(dict$qid)))
dat_vars <- map2_df(
dat[unique_varnames], split_dict,
~ survey_var_recode(.x, .y,
unanswer_recode = unanswer_recode,
unanswer_recode_multi = unanswer_recode_multi
)
)
bind_cols(dat[keys], dat_vars)
}
survey_var_recode <- function(var, var_dict, unanswer_recode, unanswer_recode_multi, numeric_to_pos) {
type <- var_dict[["type"]][1]
selector <- var_dict[["selector"]][1]
levels <- var_dict[["level"]]
labels <- var_dict[["label"]]
if (type == "TE" || any(grepl("_TEXT", levels))) {}
else if (selector == "MACOL" || selector == "MAVR" || selector == "MAHR") {
levels <- 1
if (!is.null(unanswer_recode_multi)) {
levels <- c(levels, unanswer_recode_multi)
labels <- c(labels, paste("Not", labels))
}
}
# If multiple rows it's ordinal
else if (nrow(var_dict) > 1) {
labels <- grep("TEXT", labels, invert = T, value = T)
levels <- grep("TEXT", levels, invert = T, value = T)
if (!is.null(unanswer_recode)) {
levels <- c(levels, unanswer_recode)
labels <- c(labels, "Seen but not answered")
}
}
var <- set_labels(var, labels = setNames(levels, labels))
text_label <- unique(paste_narm(var_dict[["question"]], var_dict[["item"]]))
var <- set_label(var, label = text_label)
return(var)
}
|
!> Module file parallel_types.mod
!
!> @build mpi
!
!> Module that defines the parallel environment types for ASiMoV-CCS
module parallel_types
use kinds, only: ccs_int
implicit none
private
!> placeholder reduction operator type
type, public :: reduction_operator
end type reduction_operator
!v parallel environment type with common parameters
! process id, number of processes and root process
type, public :: parallel_environment
integer(ccs_int) :: proc_id
integer(ccs_int) :: num_procs
integer(ccs_int) :: root
end type parallel_environment
end module parallel_types
|
[STATEMENT]
lemma Lim_dist_ubound:
assumes "\<not>(trivial_limit net)"
and "(f \<longlongrightarrow> l) net"
and "eventually (\<lambda>x. dist a (f x) \<le> e) net"
shows "dist a l \<le> e"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. dist a l \<le> e
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
net \<noteq> bot
(f \<longlongrightarrow> l) net
\<forall>\<^sub>F x in net. dist a (f x) \<le> e
goal (1 subgoal):
1. dist a l \<le> e
[PROOF STEP]
by (fast intro: tendsto_le tendsto_intros) |
= = = Current technical staff = = =
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from copy import deepcopy
import matplotlib
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
import torch
from tqdm import tqdm
from fastai.callbacks import *
from fastai.distributed import *
from fastai.script import *
from fastai.vision import *
from fastai.vision.models.wrn import WideResNet as wrn
# resnet152 with adaptive pool at the end of the feature extractor
from torchvision.models import resnet152
torch.backends.cudnn.benchmark = True
def set_seed(seed=42):
torch.manual_seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed_all(seed)
def get_params(dataset, architecture, train_and_val):
params = {}
if dataset in ['Mini-ImageNet', 'Tiered-ImageNet']:
params['image_size'] = 84
elif dataset in ['CIFAR-FS', 'FC-100']:
params['image_size'] = 32
if train_and_val:
params['csv_file_name'] = 'train_and_val.csv'
params['parameter_file_name'] = dataset + '_' + architecture + '_TV'
else:
params['csv_file_name'] = 'train.csv'
params['parameter_file_name'] = dataset + '_' + architecture
params['parameter_path'] = os.path.join(dataset, 'models',
params['parameter_file_name'] + '.pth')
params['cycle_length'] = 40
params['relu'] = True
params['lambda'] = 1.
return params
def get_transformations(image_size):
transformations = [
flip_lr(p=0.5),
*rand_pad(padding=4, size=image_size, mode='reflection'),
brightness(change=(0.1, 0.9)),
contrast(scale=(0.6, 1.4))
]
return transformations
def get_data(dataset, train_file_name, validation_file_name, image_size,
batch_size):
train_list = ImageList.from_csv(dataset, train_file_name)
validation_list = ImageList.from_csv(dataset, validation_file_name)
loaders = ItemLists(dataset, train_list, validation_list) \
.label_from_df() \
.transform((get_transformations(image_size), []), size=image_size) \
.databunch(bs=batch_size, num_workers=4) \
.normalize(imagenet_stats)
return loaders
class FewShotDataset(torch.utils.data.Dataset):
def save_images(self, dataset, file_name):
data = pd.read_csv(os.path.join(dataset, file_name))
classes = np.unique(data['c']).tolist()
self.images = {
cls : data.loc[data['c'] == cls]['fn'].tolist() for cls in classes
}
def __init__(self, dataset, file_name, image_size, way, support_shot,
query_shot):
self.dataset = dataset
self.save_images(dataset, file_name)
self.image_size = image_size
self.way = way
self.support_shot = support_shot
self.query_shot = query_shot
def get_way(self):
return self.way
def get_query_shot(self, classes):
query_shot = {cls : self.query_shot for cls in classes}
return query_shot
def get_support_shot(self, classes, query_shot):
support_shot = {cls : self.support_shot for cls in classes}
return support_shot
def __getitem__(self, idx):
found_episode = False
while not found_episode:
found_episode = True
way = self.get_way()
classes = np.random.choice(list(self.images.keys()), way,
replace=False)
classes = sorted(classes)
query_shot = self.get_query_shot(classes)
support_shot = self.get_support_shot(classes, query_shot)
support = dict(images=[], classes=[])
query = dict(images=[], classes=[])
for cls in classes:
try:
images = np.random.choice(self.images[cls],
support_shot[cls] + query_shot[cls], replace=False)
except:
found_episode = False
break
support['images'] += images[: support_shot[cls]].tolist()
support['classes'] += ([cls] * support_shot[cls])
query['images'] += images[support_shot[cls] :].tolist()
query['classes'] += ([cls] * query_shot[cls])
support = pd.DataFrame(
{'fn' : support['images'], 'c' : support['classes']}
)
query = pd.DataFrame(
{'fn' : query['images'], 'c' : query['classes']}
)
support = ImageList.from_df(support, self.dataset).split_none() \
.label_from_df().train
query = ImageList.from_df(query, self.dataset).split_none() \
.label_from_df().train
for ind in range(len(query.y.items)):
query.y.items[ind] = \
support.y.classes.index(query.y.classes[query.y.items[ind]])
query.y.classes = support.y.classes
support = ItemLists(self.dataset, support, support) \
.transform((get_transformations(self.image_size), []),
size=self.image_size) \
.databunch(bs=len(support), num_workers=0) \
.normalize(imagenet_stats)
query = ItemLists(self.dataset, query, query) \
.transform((get_transformations(self.image_size), []),
size=self.image_size) \
.databunch(bs=len(query), num_workers=0) \
.normalize(imagenet_stats)
return support, query
def micro_forward(model, x, y, loss_func=None, loss_coef=None):
num = x.size(0)
yhs = []
fs = []
model.zero_grad()
for x, y in zip(torch.split(x, 75), torch.split(y, 75)):
yh = model(x)
yhs.append(yh)
if loss_func:
f = x.size(0) * loss_func(yh, y) / num
(loss_coef * f).backward()
fs.append(f)
yh = torch.cat(yhs)
if loss_func:
f = torch.stack(fs).sum()
return yh, f
else:
return yh
def get_classifier(yh, y):
classifier = torch.zeros((y.unique().size(0), yh.size(1))).cuda()
for cls in torch.sort(y.unique())[0]:
classifier[cls] = yh[y == cls].mean(dim=0)
classifier = torch.nn.functional.normalize(classifier)
return classifier
class Hardness:
'''
Hardness Metric
Intuitively, classification performance on a few-shot episode is determined
by the relative location of the features corresponding to labeled and
unlabeled samples. If the unlabeled features are close to the labeled
features from the same class, a classifier can distinguish between the
classes easily to obtain a high accuracy. Otherwise, the accuracy would be
low. We define hardness as the average log-odds of a test datum being
classified incorrectly. We use the features of a generic feature extractor
(ResNet-152, pre-trained on ImageNet) to calculate this metric. The labeled
samples form class-specific cluster centers. The cluster affinities are
calculated using cosine-similarities, followed by the softmax operator to
get the probability distribution over the classes.
'''
def __init__(self):
self.model = resnet152(pretrained=True)
self.model = self.model.cuda()
self.model.eval()
def get_hardness(self, support, query):
with torch.no_grad():
for xs, ys in support.valid_dl:
break
yhs = micro_forward(self.model, xs, ys)
classifier = get_classifier(yhs, ys)
for xq, yq in query.valid_dl:
break
yhq = micro_forward(self.model, xq, yq)
yhq = torch.nn.functional.normalize(yhq)
yhq = yhq @ classifier.t()
p = torch.softmax(yhq, dim=1)
p = p[torch.arange(0, yq.size(0)), yq]
hardness = ((1. - p) / p).log().mean().item()
return hardness
class FewShotModel(torch.nn.Module):
def __init__(self, backbone, support, relu):
super().__init__()
'''
Support-Based Initialization
Given the pre-trained model (backbone), we append a ReLU layer, an
l2-normalization layer and a fully-connected layer that takes the
logits of the backbone as input and predicts the few-shot labels. We
calculate the per-class l2-normalized average features to initialize
the weights of the fully-connected layer, with the biases set to 0.
'''
self.backbone = deepcopy(backbone)
self.backbone.eval()
self.relu = relu
with torch.no_grad():
for x, y in support.valid_dl:
break
yh = micro_forward(self.backbone, x, y)
if self.relu:
yh = torch.relu(yh)
classifier = get_classifier(yh, y)
self.classifier = torch.nn.Linear(classifier.size(1),
classifier.size(0))
self.classifier = self.classifier.cuda()
self.classifier.weight.data.copy_(classifier)
self.classifier.bias.zero_()
def forward(self, x):
x = self.backbone(x)
if self.relu:
x = torch.relu(x)
x = torch.nn.functional.normalize(x)
x = self.classifier(x)
return x
def validate(model, data):
model.eval()
num = 0
correct = 0
with torch.no_grad():
for x, y in data.valid_dl:
yh = micro_forward(model, x, y)
num += x.size(0)
correct += (yh.argmax(dim=1) == y).sum().item()
accuracy = 100. * correct / num
return accuracy
cross_entropy = torch.nn.functional.cross_entropy
def entropy(yh, y):
p = torch.nn.functional.softmax(yh, dim=1)
log_p = torch.nn.functional.log_softmax(yh, dim=1)
loss = - (p * log_p).sum(dim=1).mean()
return loss
class Flatten(torch.nn.Module):
def forward(self, x):
x = x.view(x.size(0), -1)
return x
class Conv64(torch.nn.Module):
@staticmethod
def conv_bn(in_channels, out_channels, kernel_size, padding, pool):
model = torch.nn.Sequential(
torch.nn.Conv2d(in_channels, out_channels, kernel_size,
padding=padding),
torch.nn.BatchNorm2d(out_channels),
torch.nn.ReLU(True),
torch.nn.MaxPool2d(pool)
)
return model
def __init__(self, num_classes, image_size):
super().__init__()
self.model = torch.nn.Sequential(
self.conv_bn(3, 64, 3, 1, 2),
self.conv_bn(64, 64, 3, 1, 2),
self.conv_bn(64, 64, 3, 1, 2),
self.conv_bn(64, 64, 3, 1, 2),
Flatten(),
torch.nn.Linear(64 * (int(image_size / 16) ** 2), num_classes)
)
def forward(self, x):
x = self.model(x)
return x
class ResNet12(torch.nn.Module):
class Block(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv1 = torch.nn.Conv2d(in_channels, out_channels, 3,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = torch.nn.Conv2d(out_channels, out_channels, 3,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv3 = torch.nn.Conv2d(out_channels, out_channels, 3,
padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(out_channels)
self.conv_res = nn.Conv2d(in_channels, out_channels, 1, bias=False)
self.bn_res = nn.BatchNorm2d(out_channels)
self.maxpool = nn.MaxPool2d(2)
self.relu = nn.ReLU()
def forward(self, x):
residual = self.conv_res(x)
residual = self.bn_res(residual)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x += residual
x = self.relu(x)
x = self.maxpool(x)
return x
def __init__(self, num_classes, image_size):
super().__init__()
self.model = torch.nn.Sequential(
self.Block(3, 64),
self.Block(64, 128),
self.Block(128, 256),
self.Block(256, 512),
torch.nn.AvgPool2d(int(image_size / 16), stride=1),
Flatten(),
torch.nn.Linear(512, num_classes)
)
self.reset_parameters()
def reset_parameters(self):
for module in self.modules():
if isinstance(module, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(module.weight, mode='fan_out',
nonlinearity='relu')
def forward(self, x):
x = self.model(x)
return x
class WRN2810(torch.nn.Module):
def __init__(self, num_classes, image_size):
super().__init__()
self.model = \
partial(wrn, num_groups=3, N=4, k=10)(num_classes=num_classes)
def forward(self, x):
x = self.model(x)
return x
|
!**************************************************************
!* AceGen 6.702 Windows (4 May 16) *
!* Co. J. Korelc 2013 18 Mar 17 19:08:50 *
!**************************************************************
! User : Full professional version
! Notebook : JacobianFunction
! Evaluation time : 82 s Mode : Optimal
! Number of formulae : 1642 Method: Automatic
! Subroutine : dRdX1 size: 28013
! Total size of Mathematica code : 28013 subexpressions
! Total size of Fortran code : 72526 bytes
!******************* S U B R O U T I N E **********************
SUBROUTINE dRdX1(v,x,props,statev,Fnew,dRdX)
USE SMSUtility
IMPLICIT NONE
LOGICAL b126,b165
DOUBLE PRECISION v(2036),x(19),props(9),statev(19),Fnew(9),dRdX(19,19)
v(1881)=props(4)*x(1)
v(1765)=x(12)*x(13)-x(15)*x(19)
v(2021)=-(statev(18)*v(1765))
v(2007)=-(statev(16)*v(1765))
v(1972)=-(statev(11)*v(1765))
v(1764)=x(14)*x(16)-x(11)*x(19)
v(2030)=-(statev(13)*v(1764))
v(1995)=-(statev(17)*v(1764))
v(1981)=-(statev(15)*v(1764))
v(1763)=-(x(12)*x(16))+x(18)*x(19)
v(2023)=-(statev(15)*v(1763))
v(2019)=v(2021)+v(2023)
v(2009)=-(statev(13)*v(1763))
v(2005)=v(2007)+v(2009)
v(1974)=-(statev(17)*v(1763))
v(1970)=v(1972)+v(1974)
v(1861)=v(1763)*v(1764)
v(1762)=-(x(13)*x(14))+x(17)*x(19)
v(2028)=-(statev(16)*v(1762))
v(2031)=v(2028)+v(2030)
v(1993)=-(statev(11)*v(1762))
v(1996)=v(1993)+v(1995)
v(1979)=-(statev(18)*v(1762))
v(1982)=v(1979)+v(1981)
v(1862)=v(1762)*v(1765)
v(1863)=v(1861)+v(1862)
v(1761)=x(15)*x(16)-x(13)*x(18)
v(2020)=-(statev(12)*v(1761))
v(2024)=v(2020)+v(2023)
v(2022)=v(2020)+v(2021)
v(2018)=v(2019)+v(2020)
v(2006)=-(statev(19)*v(1761))
v(2010)=v(2006)+v(2009)
v(2008)=v(2006)+v(2007)
v(2004)=v(2005)+v(2006)
v(1971)=-(statev(14)*v(1761))
v(1975)=v(1971)+v(1974)
v(1973)=v(1971)+v(1972)
v(1969)=v(1970)+v(1971)
v(1760)=-(x(11)*x(15))+x(17)*x(18)
v(2014)=-(statev(14)*v(1760))
v(2000)=-(statev(12)*v(1760))
v(1986)=-(statev(19)*v(1760))
v(1873)=v(1760)*v(1761)
v(1759)=x(11)*x(12)-x(14)*x(18)
v(2012)=-(statev(17)*v(1759))
v(2011)=v(2012)+v(2014)
v(1998)=-(statev(15)*v(1759))
v(1997)=v(1998)+v(2000)
v(1984)=-(statev(13)*v(1759))
v(1983)=v(1984)+v(1986)
v(1874)=v(1759)*v(1763)
v(1876)=v(1873)+v(1874)
v(1868)=v(1759)*v(1764)
v(1758)=x(11)*x(13)-x(16)*x(17)
v(2027)=-(statev(19)*v(1758))
v(2029)=v(2027)+v(2028)
v(2025)=v(2027)+v(2030)
v(2026)=v(2025)+v(2028)
v(1992)=-(statev(14)*v(1758))
v(1994)=v(1992)+v(1993)
v(1990)=v(1992)+v(1995)
v(1991)=v(1990)+v(1993)
v(1978)=-(statev(12)*v(1758))
v(1980)=v(1978)+v(1979)
v(1976)=v(1978)+v(1981)
v(1977)=v(1976)+v(1979)
v(1867)=v(1758)*v(1760)
v(1870)=v(1867)+v(1868)
v(1860)=v(1758)*v(1761)
v(1865)=v(1860)+v(1862)
v(1864)=v(1860)+v(1861)
v(1757)=x(14)*x(15)-x(12)*x(17)
v(2016)=-(statev(11)*v(1757))
v(2017)=v(2014)+v(2016)
v(2013)=v(2012)+v(2016)
v(2015)=v(2013)+v(2014)
v(2002)=-(statev(18)*v(1757))
v(2003)=v(2000)+v(2002)
v(1999)=v(1998)+v(2002)
v(2001)=v(1999)+v(2000)
v(1988)=-(statev(16)*v(1757))
v(1989)=v(1986)+v(1988)
v(1985)=v(1984)+v(1988)
v(1987)=v(1985)+v(1986)
v(1875)=v(1757)*v(1765)
v(1877)=v(1873)+v(1875)
v(1872)=v(1874)+v(1875)
v(1869)=v(1757)*v(1762)
v(1871)=v(1867)+v(1869)
v(1866)=v(1868)+v(1869)
v(1745)=x(3)*x(4)-x(10)*x(6)
v(1958)=-(statev(9)*v(1745))
v(1944)=-(statev(7)*v(1745))
v(1909)=-(statev(2)*v(1745))
v(1742)=-(x(3)*x(7))+x(10)*x(9)
v(1960)=-(statev(6)*v(1742))
v(1956)=v(1958)+v(1960)
v(1946)=-(statev(4)*v(1742))
v(1942)=v(1944)+v(1946)
v(1911)=-(statev(8)*v(1742))
v(1907)=v(1909)+v(1911)
v(1740)=-(x(10)*x(2))+x(5)*x(7)
v(1967)=-(statev(4)*v(1740))
v(1932)=-(statev(8)*v(1740))
v(1918)=-(statev(6)*v(1740))
v(1738)=-(x(4)*x(5))+x(10)*x(8)
v(1965)=-(statev(7)*v(1738))
v(1968)=v(1965)+v(1967)
v(1930)=-(statev(2)*v(1738))
v(1933)=v(1930)+v(1932)
v(1916)=-(statev(9)*v(1738))
v(1919)=v(1916)+v(1918)
v(1734)=x(6)*x(7)-x(4)*x(9)
v(1957)=-(statev(3)*v(1734))
v(1961)=v(1957)+v(1960)
v(1959)=v(1957)+v(1958)
v(1955)=v(1956)+v(1957)
v(1943)=-(statev(10)*v(1734))
v(1947)=v(1943)+v(1946)
v(1945)=v(1943)+v(1944)
v(1941)=v(1942)+v(1943)
v(1908)=-(statev(5)*v(1734))
v(1912)=v(1908)+v(1911)
v(1910)=v(1908)+v(1909)
v(1906)=v(1907)+v(1908)
v(1733)=x(2)*x(3)-x(5)*x(9)
v(1949)=-(statev(8)*v(1733))
v(1935)=-(statev(6)*v(1733))
v(1921)=-(statev(4)*v(1733))
v(1732)=-(x(2)*x(6))+x(8)*x(9)
v(1951)=-(statev(5)*v(1732))
v(1948)=v(1949)+v(1951)
v(1937)=-(statev(3)*v(1732))
v(1934)=v(1935)+v(1937)
v(1923)=-(statev(10)*v(1732))
v(1920)=v(1921)+v(1923)
v(1731)=x(2)*x(4)-x(7)*x(8)
v(1964)=-(statev(10)*v(1731))
v(1966)=v(1964)+v(1965)
v(1962)=v(1964)+v(1967)
v(1963)=v(1962)+v(1965)
v(1929)=-(statev(5)*v(1731))
v(1931)=v(1929)+v(1930)
v(1927)=v(1929)+v(1932)
v(1928)=v(1927)+v(1930)
v(1915)=-(statev(3)*v(1731))
v(1917)=v(1915)+v(1916)
v(1913)=v(1915)+v(1918)
v(1914)=v(1913)+v(1916)
v(1730)=x(5)*x(6)-x(3)*x(8)
v(1953)=-(statev(2)*v(1730))
v(1954)=v(1951)+v(1953)
v(1950)=v(1949)+v(1953)
v(1952)=v(1950)+v(1951)
v(1939)=-(statev(9)*v(1730))
v(1940)=v(1937)+v(1939)
v(1936)=v(1935)+v(1939)
v(1938)=v(1936)+v(1937)
v(1925)=-(statev(7)*v(1730))
v(1926)=v(1923)+v(1925)
v(1922)=v(1921)+v(1925)
v(1924)=v(1922)+v(1923)
v(1729)=-statev(1)+x(1)
v(193)=v(1732)*x(10)+v(1733)*x(4)+v(1730)*x(7)
v(1756)=-(x(7)/v(193))
v(1755)=-(x(3)/v(193))
v(1754)=-(x(5)/v(193))
v(1753)=-(x(4)/v(193))
v(1752)=x(10)/v(193)
v(1751)=-(x(2)/v(193))
v(1750)=x(8)/v(193)
v(1749)=-(x(6)/v(193))
v(1748)=x(9)/v(193)
v(195)=1d0/v(193)**2
v(203)=-(v(1732)*v(195))
v(252)=v(1732)*v(203)
v(241)=v(1733)*v(203)
v(234)=v(1731)*v(203)
v(200)=-(v(1730)*v(195))
v(1739)=-(v(1738)*v(200))
v(259)=v(1730)*v(200)
v(250)=v(1732)*v(200)
v(247)=v(1731)*v(200)
v(238)=v(1733)*v(200)
v(198)=-(v(1734)*v(195))
v(1735)=-(v(1732)*v(198))
v(1524)=statev(3)*v(1735)
v(1519)=statev(10)*v(1735)
v(1505)=statev(5)*v(1735)
v(260)=v(1734)*v(198)
v(251)=v(1733)*v(198)
v(229)=v(1731)*v(198)
v(197)=-(v(1733)*v(195))
v(1743)=-(v(1742)*v(197))
v(1741)=-(v(1740)*v(197))
v(249)=v(1731)*v(197)
v(235)=v(1733)*v(197)
v(196)=-(v(1731)*v(195))
v(1737)=-(v(1734)*v(196))
v(1736)=-(v(1732)*v(196))
v(1528)=statev(10)*v(1736)
v(1514)=statev(5)*v(1736)
v(1509)=statev(3)*v(1736)
v(1366)=statev(10)*v(1737)
v(1358)=statev(5)*v(1737)
v(1353)=statev(3)*v(1737)
v(227)=v(1731)*v(196)
v(1487)=statev(2)*v(1739)
v(1479)=statev(9)*v(1739)
v(1474)=statev(7)*v(1739)
v(202)=-(v(1738)*v(195))
v(258)=v(1733)*v(202)
v(246)=v(1738)*v(202)
v(233)=v(1731)*v(202)
v(1391)=statev(8)*v(1741)
v(1386)=statev(6)*v(1741)
v(1381)=statev(4)*v(1741)
v(199)=-(v(1740)*v(195))
v(1744)=-(v(1742)*v(199))
v(262)=v(1740)*v(199)
v(243)=v(1738)*v(199)
v(230)=v(1731)*v(199)
v(1456)=statev(4)*v(1744)
v(1451)=statev(8)*v(1743)
v(1446)=statev(6)*v(1743)
v(1442)=statev(8)*v(1744)
v(1438)=statev(4)*v(1743)
v(1434)=statev(6)*v(1744)
v(253)=v(1742)*v(198)
v(201)=-(v(1742)*v(195))
v(261)=v(1731)*v(201)
v(256)=v(1742)*v(201)
v(257)=v(1745)*v(203)
v(255)=v(1745)*v(197)
v(245)=v(1745)*v(199)
v(242)=v(1745)*v(196)
v(225)=v(1745)*v(202)
v(224)=v(1745)*v(201)
v(223)=v(1745)*v(200)
v(221)=v(1745)*v(198)
v(194)=-(v(1745)*v(195))
v(1747)=-(v(1730)*v(194))
v(1746)=-(v(1738)*v(194))
v(1491)=statev(9)*v(1746)
v(1483)=statev(7)*v(1746)
v(1469)=statev(2)*v(1746)
v(1419)=statev(9)*v(1747)
v(1414)=statev(7)*v(1747)
v(1406)=statev(2)*v(1747)
v(218)=v(1745)*v(194)
v(1527)=statev(7)*v(1750)
v(1526)=statev(4)*v(1751)
v(1523)=statev(6)*v(1748)
v(1522)=statev(9)*v(1749)
v(1518)=statev(4)*v(1748)
v(1517)=statev(7)*v(1749)
v(1513)=statev(2)*v(1750)
v(1512)=statev(8)*v(1751)
v(1508)=statev(9)*v(1750)
v(1507)=statev(6)*v(1751)
v(1504)=statev(8)*v(1748)
v(1503)=statev(2)*v(1749)
v(1490)=statev(6)*v(1752)
v(1489)=statev(3)*v(1753)
v(1486)=statev(5)*v(1750)
v(1485)=statev(8)*v(1754)
v(1482)=statev(4)*v(1752)
v(1481)=statev(10)*v(1753)
v(1478)=statev(3)*v(1750)
v(1477)=statev(6)*v(1754)
v(1473)=statev(10)*v(1750)
v(1472)=statev(4)*v(1754)
v(1468)=statev(8)*v(1752)
v(1467)=statev(5)*v(1753)
v(1455)=statev(7)*v(1752)
v(1454)=statev(10)*v(1756)
v(1450)=statev(5)*v(1748)
v(1449)=statev(2)*v(1755)
v(1445)=statev(3)*v(1748)
v(1444)=statev(9)*v(1755)
v(1441)=statev(2)*v(1752)
v(1440)=statev(5)*v(1756)
v(1437)=statev(10)*v(1748)
v(1436)=statev(7)*v(1755)
v(1433)=statev(9)*v(1752)
v(1432)=statev(3)*v(1756)
v(1418)=-(statev(3)*v(1749))
v(1417)=statev(6)*v(1755)
v(1413)=-(statev(10)*v(1749))
v(1412)=statev(4)*v(1755)
v(1405)=-(statev(5)*v(1749))
v(1404)=statev(8)*v(1755)
v(1390)=-(statev(2)*v(1754))
v(1389)=statev(5)*v(1751)
v(1385)=-(statev(9)*v(1754))
v(1384)=statev(3)*v(1751)
v(1380)=-(statev(7)*v(1754))
v(1379)=statev(10)*v(1751)
v(1365)=-(statev(4)*v(1756))
v(1364)=statev(7)*v(1753)
v(1357)=-(statev(8)*v(1756))
v(1356)=statev(2)*v(1753)
v(1352)=-(statev(6)*v(1756))
v(1351)=statev(9)*v(1753)
v(240)=v(1754)+v(258)
v(236)=-v(1748)+v(251)
v(232)=v(1756)+v(261)
v(231)=-v(1750)+v(247)
v(228)=-v(1751)+v(249)
v(226)=v(1749)+v(257)
v(222)=-v(1752)+v(245)
v(220)=-v(1755)+v(255)
v(219)=-v(1753)+v(242)
v(516)=(v(1757)*v(1757))
v(539)=(v(1758)*v(1758))
v(515)=(v(1759)*v(1759))
v(1856)=v(515)+v(516)
v(514)=(v(1760)*v(1760))
v(1857)=v(514)+v(515)
v(1853)=v(514)+v(516)
v(1767)=v(514)+v(515)+v(516)
v(563)=(v(1761)*v(1761))
v(180)=v(1759)*x(13)+v(1757)*x(16)+v(1760)*x(19)
v(1785)=statev(18)/v(180)
v(1784)=-(x(16)/v(180))
v(1783)=-(x(12)/v(180))
v(1782)=x(18)/v(180)
v(1781)=-(x(13)/v(180))
v(1780)=statev(17)/v(180)
v(1779)=x(17)/v(180)
v(1778)=-(x(11)/v(180))
v(1777)=statev(13)/v(180)
v(1776)=x(15)/v(180)
v(1775)=statev(14)/v(180)
v(1774)=statev(15)/v(180)
v(1773)=statev(11)/v(180)
v(1772)=statev(16)/v(180)
v(495)=1d0/v(180)**3
v(1766)=(-2d0)*v(495)
v(503)=v(1760)*v(1766)
v(687)=v(503)*v(563)
v(655)=v(1860)*v(503)
v(647)=v(503)*v(539)
v(529)=v(1767)*v(503)
v(538)=-v(529)/3d0
v(502)=v(1762)*v(1766)
v(661)=v(502)*v(516)
v(501)=v(1763)*v(1766)
v(684)=v(501)*v(515)
v(500)=v(1757)*v(1766)
v(525)=v(1767)*v(500)
v(535)=-v(525)/3d0
v(499)=v(1764)*v(1766)
v(650)=v(499)*v(515)
v(498)=v(1761)*v(1766)
v(708)=v(498)*v(514)
v(621)=v(498)*v(539)
v(497)=v(1759)*v(1766)
v(520)=v(1767)*v(497)
v(532)=-v(520)/3d0
v(496)=v(1758)*v(1766)
v(672)=v(496)*v(514)
v(626)=v(496)*v(563)
v(494)=v(1765)*v(1766)
v(696)=v(494)*v(516)
v(208)=1d0/v(180)**2
v(1848)=2d0*v(208)
v(217)=-(v(1760)*v(208))
v(216)=-(v(1762)*v(208))
v(215)=-(v(1763)*v(208))
v(214)=-(v(1757)*v(208))
v(1790)=-(v(1762)*v(214))
v(213)=-(v(1764)*v(208))
v(1787)=-(v(1763)*v(213))
v(212)=-(v(1761)*v(208))
v(1768)=-(v(1760)*v(212))
v(1722)=statev(12)*v(1768)
v(1717)=statev(19)*v(1768)
v(1703)=statev(14)*v(1768)
v(211)=-(v(1759)*v(208))
v(1788)=-(v(1764)*v(211))
v(1786)=-(v(1763)*v(211))
v(210)=-(v(1758)*v(208))
v(1770)=-(v(1761)*v(210))
v(1769)=-(v(1760)*v(210))
v(1726)=statev(19)*v(1769)
v(1712)=statev(14)*v(1769)
v(1707)=statev(12)*v(1769)
v(1594)=statev(19)*v(1770)
v(1586)=statev(14)*v(1770)
v(1581)=statev(12)*v(1770)
v(209)=-(v(1765)*v(208))
v(1789)=-(v(1762)*v(209))
v(1771)=-(v(1757)*v(209))
v(1635)=statev(18)*v(1771)
v(1630)=statev(16)*v(1771)
v(1622)=statev(11)*v(1771)
v(1725)=v(1772)*x(17)
v(1715)=-(v(1772)*x(15))
v(1659)=-(v(1773)*x(12))
v(1651)=v(1773)*x(19)
v(1629)=statev(19)*v(1776)
v(1584)=-(v(1773)*x(13))
v(1694)=v(1774)*x(19)
v(1681)=-(v(1774)*x(14))
v(1660)=v(1775)*x(18)
v(1650)=-(v(1775)*x(16))
v(1724)=-(v(1777)*x(11))
v(1721)=v(1774)*x(18)
v(1720)=-(statev(18)*v(1776))
v(1716)=v(1777)*x(18)
v(1711)=v(1773)*x(17)
v(1710)=statev(17)*v(1778)
v(1706)=statev(15)*v(1778)
v(1705)=statev(18)*v(1779)
v(1702)=-(v(1773)*x(15))
v(1701)=v(1780)*x(18)
v(1693)=statev(12)*v(1781)
v(1690)=statev(14)*v(1779)
v(1689)=-(v(1780)*x(14))
v(1686)=v(1777)*x(19)
v(1685)=statev(19)*v(1781)
v(1682)=statev(12)*v(1779)
v(1677)=statev(19)*v(1779)
v(1676)=-(v(1777)*x(14))
v(1672)=-(v(1775)*x(13))
v(1671)=v(1780)*x(19)
v(1665)=v(1772)*x(19)
v(1664)=statev(19)*v(1784)
v(1655)=statev(12)*v(1782)
v(1654)=statev(18)*v(1783)
v(1647)=statev(19)*v(1782)
v(1646)=statev(16)*v(1783)
v(1643)=v(1785)*x(19)
v(1642)=statev(12)*v(1784)
v(1634)=statev(12)*v(1776)
v(1633)=-(v(1774)*x(12))
v(1628)=-(v(1777)*x(12))
v(1621)=v(1775)*x(15)
v(1620)=-(v(1780)*x(12))
v(1612)=v(1773)*x(14)
v(1611)=-(v(1775)*x(11))
v(1607)=v(1785)*x(14)
v(1606)=statev(12)*v(1778)
v(1602)=v(1772)*x(14)
v(1601)=statev(19)*v(1778)
v(1593)=v(1777)*x(16)
v(1592)=-(v(1772)*x(13))
v(1585)=v(1780)*x(16)
v(1580)=v(1774)*x(16)
v(1579)=statev(18)*v(1781)
v(1666)=statev(13)*v(1787)
v(1661)=statev(17)*v(1786)
v(1656)=statev(15)*v(1786)
v(1652)=statev(17)*v(1787)
v(1648)=statev(13)*v(1786)
v(1644)=statev(15)*v(1787)
v(565)=(v(1763)*v(1763))
v(1855)=v(563)+v(565)
v(699)=v(497)*v(565)
v(634)=v(499)*v(565)
v(1613)=statev(17)*v(1788)
v(1608)=statev(15)*v(1788)
v(1603)=statev(13)*v(1788)
v(666)=v(1861)*v(497)
v(541)=(v(1764)*v(1764))
v(1852)=v(539)+v(541)
v(658)=v(497)*v(541)
v(629)=v(501)*v(541)
v(1695)=statev(18)*v(1789)
v(1691)=statev(11)*v(1790)
v(1687)=statev(16)*v(1789)
v(1683)=statev(18)*v(1790)
v(1678)=statev(16)*v(1790)
v(1673)=statev(11)*v(1789)
v(540)=(v(1762)*v(1762))
v(1851)=v(539)+v(540)
v(1850)=v(540)+v(541)
v(1791)=v(539)+v(540)+v(541)
v(669)=v(500)*v(540)
v(637)=v(494)*v(540)
v(552)=v(1791)*v(502)
v(561)=-v(552)/3d0
v(549)=v(1791)*v(499)
v(558)=-v(549)/3d0
v(543)=v(1791)*v(496)
v(555)=-v(543)/3d0
v(644)=v(1862)*v(500)
v(564)=(v(1765)*v(1765))
v(1854)=v(564)+v(565)
v(1849)=v(563)+v(564)
v(1792)=v(563)+v(564)+v(565)
v(675)=v(500)*v(564)
v(618)=v(502)*v(564)
v(574)=v(1792)*v(501)
v(585)=-v(574)/3d0
v(570)=v(1792)*v(498)
v(582)=-v(570)/3d0
v(566)=v(1792)*v(494)
v(579)=-v(566)/3d0
v(338)=-(Fnew(1)*v(1739))
v(326)=Fnew(1)*v(223)
v(290)=Fnew(1)*v(225)
v(354)=-(Fnew(2)*v(1735))
v(342)=Fnew(2)*v(229)
v(303)=Fnew(2)*v(234)
v(366)=-(Fnew(3)*v(1744))
v(359)=-(Fnew(3)*v(1741))
v(316)=-(Fnew(3)*v(1743))
v(331)=-(Fnew(4)*v(1735))
v(328)=Fnew(4)*v(234)
v(294)=Fnew(4)*v(229)
v(347)=-(Fnew(5)*v(1744))
v(344)=-(Fnew(5)*v(1743))
v(307)=-(Fnew(5)*v(1741))
v(364)=-(Fnew(6)*v(1739))
v(356)=Fnew(6)*v(225)
v(320)=Fnew(6)*v(223)
v(340)=Fnew(7)*v(241)+Fnew(1)*v(250)+Fnew(4)*v(252)
v(339)=Fnew(7)*v(240)+Fnew(4)*v(247)+v(338)
v(336)=-(Fnew(7)*v(1743))
v(337)=Fnew(4)*v(251)+Fnew(1)*v(255)+v(336)
v(335)=Fnew(7)*v(238)+Fnew(4)*v(250)+Fnew(1)*v(259)
v(333)=-(Fnew(7)*v(1741))
v(334)=Fnew(4)*v(249)+Fnew(1)*v(258)+v(333)
v(332)=Fnew(7)*v(236)+Fnew(1)*v(257)+v(331)
v(330)=Fnew(7)*v(235)+Fnew(1)*v(238)+Fnew(4)*v(241)
v(329)=Fnew(7)*v(228)+Fnew(1)*v(231)+v(328)
v(327)=Fnew(7)*v(220)+Fnew(4)*v(226)+v(326)
v(301)=Fnew(1)*v(247)+Fnew(7)*v(249)+v(328)
v(300)=Fnew(4)*v(233)+Fnew(7)*v(243)+Fnew(1)*v(246)
v(298)=-(Fnew(7)*v(1744))
v(299)=Fnew(4)*v(232)+Fnew(1)*v(245)+v(298)
v(297)=Fnew(4)*v(231)+Fnew(7)*v(258)+v(338)
v(296)=Fnew(4)*v(230)+Fnew(1)*v(243)+Fnew(7)*v(262)
v(295)=Fnew(1)*v(242)+Fnew(7)*v(261)+v(294)
v(293)=Fnew(4)*v(228)+Fnew(1)*v(240)+v(333)
v(292)=Fnew(4)*v(227)+Fnew(7)*v(230)+Fnew(1)*v(233)
v(291)=Fnew(4)*v(219)+Fnew(7)*v(222)+v(290)
v(271)=Fnew(1)*v(226)+Fnew(7)*v(251)+v(331)
v(270)=Fnew(4)*v(242)+Fnew(7)*v(245)+v(290)
v(269)=Fnew(1)*v(224)+Fnew(4)*v(253)+Fnew(7)*v(256)
v(268)=Fnew(7)*v(255)+Fnew(4)*v(257)+v(326)
v(267)=Fnew(1)*v(222)+Fnew(4)*v(261)+v(298)
v(266)=Fnew(1)*v(221)+Fnew(7)*v(253)+Fnew(4)*v(260)
v(265)=Fnew(1)*v(220)+Fnew(4)*v(236)+v(336)
v(264)=Fnew(1)*v(219)+Fnew(7)*v(232)+v(294)
v(263)=Fnew(1)*v(218)+Fnew(4)*v(221)+Fnew(7)*v(224)
v(355)=Fnew(8)*v(226)+Fnew(5)*v(251)+v(354)
v(352)=Fnew(8)*v(225)
v(353)=Fnew(2)*v(242)+Fnew(5)*v(245)+v(352)
v(351)=Fnew(8)*v(224)+Fnew(2)*v(253)+Fnew(5)*v(256)
v(349)=Fnew(8)*v(223)
v(350)=Fnew(5)*v(255)+Fnew(2)*v(257)+v(349)
v(348)=Fnew(8)*v(222)+Fnew(2)*v(261)+v(347)
v(346)=Fnew(8)*v(221)+Fnew(5)*v(253)+Fnew(2)*v(260)
v(345)=Fnew(8)*v(220)+Fnew(2)*v(236)+v(344)
v(343)=Fnew(8)*v(219)+Fnew(5)*v(232)+v(342)
v(341)=Fnew(8)*v(218)+Fnew(2)*v(221)+Fnew(5)*v(224)
v(313)=Fnew(5)*v(241)+Fnew(8)*v(250)+Fnew(2)*v(252)
v(311)=-(Fnew(8)*v(1739))
v(312)=Fnew(5)*v(240)+Fnew(2)*v(247)+v(311)
v(310)=Fnew(2)*v(251)+Fnew(8)*v(255)+v(344)
v(309)=Fnew(5)*v(238)+Fnew(2)*v(250)+Fnew(8)*v(259)
v(308)=Fnew(2)*v(249)+Fnew(8)*v(258)+v(307)
v(306)=Fnew(5)*v(236)+Fnew(8)*v(257)+v(354)
v(305)=Fnew(5)*v(235)+Fnew(8)*v(238)+Fnew(2)*v(241)
v(304)=Fnew(5)*v(228)+Fnew(8)*v(231)+v(303)
v(302)=Fnew(5)*v(220)+Fnew(2)*v(226)+v(349)
v(280)=Fnew(8)*v(247)+Fnew(5)*v(249)+v(303)
v(279)=Fnew(2)*v(233)+Fnew(5)*v(243)+Fnew(8)*v(246)
v(278)=Fnew(2)*v(232)+Fnew(8)*v(245)+v(347)
v(277)=Fnew(2)*v(231)+Fnew(5)*v(258)+v(311)
v(276)=Fnew(2)*v(230)+Fnew(8)*v(243)+Fnew(5)*v(262)
v(275)=Fnew(8)*v(242)+Fnew(5)*v(261)+v(342)
v(274)=Fnew(2)*v(228)+Fnew(8)*v(240)+v(307)
v(273)=Fnew(2)*v(227)+Fnew(5)*v(230)+Fnew(8)*v(233)
v(272)=Fnew(2)*v(219)+Fnew(5)*v(222)+v(352)
v(369)=Fnew(9)*v(234)
v(370)=Fnew(6)*v(247)+Fnew(3)*v(249)+v(369)
v(368)=Fnew(9)*v(233)+Fnew(3)*v(243)+Fnew(6)*v(246)
v(367)=Fnew(9)*v(232)+Fnew(6)*v(245)+v(366)
v(365)=Fnew(9)*v(231)+Fnew(3)*v(258)+v(364)
v(363)=Fnew(9)*v(230)+Fnew(6)*v(243)+Fnew(3)*v(262)
v(361)=Fnew(9)*v(229)
v(362)=Fnew(6)*v(242)+Fnew(3)*v(261)+v(361)
v(360)=Fnew(9)*v(228)+Fnew(6)*v(240)+v(359)
v(358)=Fnew(9)*v(227)+Fnew(3)*v(230)+Fnew(6)*v(233)
v(357)=Fnew(9)*v(219)+Fnew(3)*v(222)+v(356)
v(324)=-(Fnew(9)*v(1735))
v(325)=Fnew(6)*v(226)+Fnew(3)*v(251)+v(324)
v(323)=Fnew(9)*v(242)+Fnew(3)*v(245)+v(356)
v(322)=Fnew(6)*v(224)+Fnew(9)*v(253)+Fnew(3)*v(256)
v(321)=Fnew(3)*v(255)+Fnew(9)*v(257)+v(320)
v(319)=Fnew(6)*v(222)+Fnew(9)*v(261)+v(366)
v(318)=Fnew(6)*v(221)+Fnew(3)*v(253)+Fnew(9)*v(260)
v(317)=Fnew(6)*v(220)+Fnew(9)*v(236)+v(316)
v(315)=Fnew(6)*v(219)+Fnew(3)*v(232)+v(361)
v(314)=Fnew(6)*v(218)+Fnew(9)*v(221)+Fnew(3)*v(224)
v(289)=Fnew(3)*v(241)+Fnew(6)*v(250)+Fnew(9)*v(252)
v(288)=Fnew(3)*v(240)+Fnew(9)*v(247)+v(364)
v(287)=Fnew(9)*v(251)+Fnew(6)*v(255)+v(316)
v(286)=Fnew(3)*v(238)+Fnew(9)*v(250)+Fnew(6)*v(259)
v(285)=Fnew(9)*v(249)+Fnew(6)*v(258)+v(359)
v(284)=Fnew(3)*v(236)+Fnew(6)*v(257)+v(324)
v(283)=Fnew(3)*v(235)+Fnew(6)*v(238)+Fnew(9)*v(241)
v(282)=Fnew(3)*v(228)+Fnew(6)*v(231)+v(369)
v(281)=Fnew(3)*v(220)+Fnew(9)*v(226)+v(320)
v(57)=v(1745)/v(193)
v(59)=v(1731)/v(193)
v(60)=v(1733)/v(193)
v(61)=v(1738)/v(193)
v(62)=v(1732)/v(193)
v(63)=v(1742)/v(193)
v(64)=v(1730)/v(193)
v(65)=v(1734)/v(193)
v(66)=v(1740)/v(193)
v(67)=Fnew(1)*v(57)+Fnew(7)*v(63)+Fnew(4)*v(65)
v(68)=Fnew(2)*v(59)+Fnew(8)*v(61)+Fnew(5)*v(66)
v(69)=Fnew(3)*v(60)+Fnew(9)*v(62)+Fnew(6)*v(64)
v(70)=Fnew(4)*v(59)+Fnew(1)*v(61)+Fnew(7)*v(66)
v(71)=Fnew(5)*v(60)+Fnew(2)*v(62)+Fnew(8)*v(64)
v(72)=Fnew(6)*v(57)+Fnew(3)*v(63)+Fnew(9)*v(65)
v(73)=Fnew(7)*v(60)+Fnew(4)*v(62)+Fnew(1)*v(64)
v(1809)=v(363)*v(69)+v(276)*v(71)+v(296)*v(73)
v(1807)=v(368)*v(69)+v(279)*v(71)+v(300)*v(73)
v(1805)=v(358)*v(69)+v(273)*v(71)+v(292)*v(73)
v(1801)=v(285)*v(69)+v(308)*v(71)+v(334)*v(73)
v(1800)=v(288)*v(69)+v(312)*v(71)+v(339)*v(73)
v(1799)=v(282)*v(69)+v(304)*v(71)+v(329)*v(73)
v(1798)=v(322)*v(69)+v(351)*v(71)+v(269)*v(73)
v(1797)=v(314)*v(69)+v(341)*v(71)+v(263)*v(73)
v(1796)=v(318)*v(69)+v(346)*v(71)+v(266)*v(73)
v(1795)=v(287)*v(69)+v(310)*v(71)+v(337)*v(73)
v(1794)=v(281)*v(69)+v(302)*v(71)+v(327)*v(73)
v(1793)=v(284)*v(69)+v(306)*v(71)+v(332)*v(73)
v(415)=2d0*(v(289)*v(69)+v(313)*v(71)+v(340)*v(73))
v(424)=-v(415)/3d0
v(414)=2d0*v(1800)
v(423)=-v(414)/3d0
v(413)=2d0*v(1795)
v(422)=-v(413)/3d0
v(412)=2d0*(v(286)*v(69)+v(309)*v(71)+v(335)*v(73))
v(421)=-v(412)/3d0
v(411)=2d0*v(1801)
v(420)=-v(411)/3d0
v(410)=2d0*v(1793)
v(419)=-v(410)/3d0
v(409)=2d0*(v(283)*v(69)+v(305)*v(71)+v(330)*v(73))
v(418)=-v(409)/3d0
v(408)=2d0*v(1799)
v(417)=-v(408)/3d0
v(407)=2d0*v(1794)
v(416)=-v(407)/3d0
v(74)=Fnew(8)*v(57)+Fnew(5)*v(63)+Fnew(2)*v(65)
v(1810)=v(300)*v(67)+v(368)*v(72)+v(279)*v(74)
v(1808)=v(292)*v(67)+v(358)*v(72)+v(273)*v(74)
v(1806)=v(296)*v(67)+v(363)*v(72)+v(276)*v(74)
v(1804)=v(270)*v(67)+v(323)*v(72)+v(353)*v(74)
v(1803)=v(264)*v(67)+v(315)*v(72)+v(343)*v(74)
v(1802)=v(267)*v(67)+v(319)*v(72)+v(348)*v(74)
v(454)=v(1793)+v(325)*v(69)+v(355)*v(71)+v(271)*v(73)
v(452)=v(1798)+v(337)*v(67)+v(287)*v(72)+v(310)*v(74)
v(451)=v(1794)+v(321)*v(69)+v(350)*v(71)+v(268)*v(73)
v(449)=v(1796)+v(332)*v(67)+v(284)*v(72)+v(306)*v(74)
v(448)=v(1795)+v(317)*v(69)+v(345)*v(71)+v(265)*v(73)
v(446)=v(1797)+v(327)*v(67)+v(281)*v(72)+v(302)*v(74)
v(442)=v(293)*v(67)+v(360)*v(72)+v(274)*v(74)
v(438)=v(301)*v(67)+v(370)*v(72)+v(280)*v(74)
v(434)=v(297)*v(67)+v(365)*v(72)+v(277)*v(74)
v(379)=2d0*v(1796)
v(388)=-v(379)/3d0
v(378)=2d0*v(1804)
v(387)=-v(378)/3d0
v(377)=2d0*(v(269)*v(67)+v(322)*v(72)+v(351)*v(74))
v(386)=-v(377)/3d0
v(376)=2d0*v(1797)
v(385)=-v(376)/3d0
v(375)=2d0*v(1802)
v(384)=-v(375)/3d0
v(374)=2d0*(v(266)*v(67)+v(318)*v(72)+v(346)*v(74))
v(383)=-v(374)/3d0
v(373)=2d0*v(1798)
v(382)=-v(373)/3d0
v(372)=2d0*v(1803)
v(381)=-v(372)/3d0
v(371)=2d0*(v(263)*v(67)+v(314)*v(72)+v(341)*v(74))
v(380)=-v(371)/3d0
v(75)=Fnew(9)*v(59)+Fnew(6)*v(61)+Fnew(3)*v(66)
v(445)=v(1799)+v(370)*v(69)+v(280)*v(71)+v(301)*v(73)
v(444)=v(1807)+v(312)*v(68)+v(339)*v(70)+v(288)*v(75)
v(443)=v(442)+v(310)*v(68)+v(337)*v(70)+v(287)*v(75)
v(441)=v(1800)+v(365)*v(69)+v(277)*v(71)+v(297)*v(73)
v(440)=v(1809)+v(308)*v(68)+v(334)*v(70)+v(285)*v(75)
v(439)=v(438)+v(306)*v(68)+v(332)*v(70)+v(284)*v(75)
v(437)=v(1801)+v(360)*v(69)+v(274)*v(71)+v(293)*v(73)
v(436)=v(1805)+v(304)*v(68)+v(329)*v(70)+v(282)*v(75)
v(435)=v(434)+v(302)*v(68)+v(327)*v(70)+v(281)*v(75)
v(433)=v(438)+v(355)*v(68)+v(271)*v(70)+v(325)*v(75)
v(447)=v(433)-2d0*v(438)+v(439)
v(432)=v(1810)+v(353)*v(68)+v(270)*v(70)+v(323)*v(75)
v(431)=v(1802)+v(299)*v(67)+v(367)*v(72)+v(278)*v(74)
v(430)=v(434)+v(350)*v(68)+v(268)*v(70)+v(321)*v(75)
v(453)=v(430)-2d0*v(434)+v(435)
v(429)=v(1806)+v(348)*v(68)+v(267)*v(70)+v(319)*v(75)
v(428)=v(1803)+v(295)*v(67)+v(362)*v(72)+v(275)*v(74)
v(427)=v(442)+v(345)*v(68)+v(265)*v(70)+v(317)*v(75)
v(450)=v(427)-2d0*v(442)+v(443)
v(426)=v(1808)+v(343)*v(68)+v(264)*v(70)+v(315)*v(75)
v(425)=v(1804)+v(291)*v(67)+v(357)*v(72)+v(272)*v(74)
v(397)=2d0*v(1805)
v(406)=-v(397)/3d0
v(396)=2d0*(v(279)*v(68)+v(300)*v(70)+v(368)*v(75))
v(405)=-v(396)/3d0
v(395)=2d0*v(1806)
v(404)=-v(395)/3d0
v(394)=2d0*v(1807)
v(403)=-v(394)/3d0
v(393)=2d0*(v(276)*v(68)+v(296)*v(70)+v(363)*v(75))
v(402)=-v(393)/3d0
v(392)=2d0*v(1808)
v(401)=-v(392)/3d0
v(391)=2d0*v(1809)
v(400)=-v(391)/3d0
v(390)=2d0*(v(273)*v(68)+v(292)*v(70)+v(358)*v(75))
v(399)=-v(390)/3d0
v(389)=2d0*v(1810)
v(398)=-v(389)/3d0
v(76)=(v(67)*v(67))+(v(72)*v(72))+(v(74)*v(74))
v(88)=-v(76)/3d0
v(77)=(v(68)*v(68))+(v(70)*v(70))+(v(75)*v(75))
v(89)=-v(77)/3d0
v(78)=(v(69)*v(69))+(v(71)*v(71))+(v(73)*v(73))
v(731)=(2d0/3d0)*v(78)+v(88)+v(89)
v(84)=-v(78)/3d0
v(721)=(2d0/3d0)*v(77)+v(84)+v(88)
v(711)=(2d0/3d0)*v(76)+v(84)+v(89)
v(79)=v(67)*v(70)+v(68)*v(74)+v(72)*v(75)
v(1811)=2d0*v(79)
v(460)=-(v(1811)*v(78))
v(457)=(v(79)*v(79))
v(1815)=-v(457)+v(76)*v(77)
v(80)=v(68)*v(71)+v(70)*v(73)+v(69)*v(75)
v(463)=v(1811)*v(80)
v(461)=(-2d0)*v(76)*v(80)
v(455)=(v(80)*v(80))
v(1813)=-v(455)+v(77)*v(78)
v(81)=v(69)*v(72)+v(67)*v(73)+v(71)*v(74)
v(1812)=2d0*v(81)
v(464)=-(v(1812)*v(77))
v(1818)=v(463)+v(464)
v(462)=v(1812)*v(79)
v(1817)=v(461)+v(462)
v(458)=v(1812)*v(80)
v(1816)=v(458)+v(460)
v(456)=(v(81)*v(81))
v(1814)=-v(456)+v(76)*v(78)
v(472)=v(1813)*v(379)+v(1814)*v(397)+v(1815)*v(415)+v(1816)*v(433)+v(1817)*v(445)+v(1818)*v(454)
v(471)=v(1813)*v(378)+v(1814)*v(396)+v(1815)*v(414)+v(1816)*v(432)+v(1817)*v(444)+v(1818)*v(453)
v(470)=v(1813)*v(377)+v(1814)*v(395)+v(1815)*v(413)+v(1816)*v(431)+v(1817)*v(443)+v(1818)*v(452)
v(469)=v(1813)*v(376)+v(1814)*v(394)+v(1815)*v(412)+v(1816)*v(430)+v(1817)*v(441)+v(1818)*v(451)
v(468)=v(1813)*v(375)+v(1814)*v(393)+v(1815)*v(411)+v(1816)*v(429)+v(1817)*v(440)+v(1818)*v(450)
v(467)=v(1813)*v(374)+v(1814)*v(392)+v(1815)*v(410)+v(1816)*v(428)+v(1817)*v(439)+v(1818)*v(449)
v(466)=v(1813)*v(373)+v(1814)*v(391)+v(1815)*v(409)+v(1816)*v(427)+v(1817)*v(437)+v(1818)*v(448)
v(465)=v(1813)*v(372)+v(1814)*v(390)+v(1815)*v(408)+v(1816)*v(426)+v(1817)*v(436)+v(1818)*v(447)
v(459)=v(1813)*v(371)+v(1814)*v(389)+v(1815)*v(407)+v(1816)*v(425)+v(1817)*v(435)+v(1818)*v(446)
v(82)=-(v(455)*v(76))-v(456)*v(77)+v(1815)*v(78)+v(458)*v(79)
v(485)=1d0/v(82)**0.13333333333333333d1
v(1819)=-v(485)/3d0
v(493)=v(1819)*v(472)
v(492)=v(1819)*v(471)
v(491)=v(1819)*v(470)
v(490)=v(1819)*v(469)
v(489)=v(1819)*v(468)
v(488)=v(1819)*v(467)
v(487)=v(1819)*v(466)
v(486)=v(1819)*v(465)
v(484)=v(1819)*v(459)
v(473)=sqrt(v(82))
v(86)=props(2)*(-v(473)+v(82))
v(85)=1d0/v(82)**0.3333333333333333d0
v(1878)=props(1)*v(85)
v(1847)=props(1)*(v(484)*v(711)+((2d0/3d0)*v(371)+v(398)+v(416))*v(85))
v(1846)=props(1)*(v(484)*v(731)+(v(380)+v(398)+(2d0/3d0)*v(407))*v(85))
v(1845)=props(1)*(v(484)*v(721)+(v(380)+(2d0/3d0)*v(389)+v(416))*v(85))
v(1844)=props(1)*(v(486)*v(711)+((2d0/3d0)*v(372)+v(399)+v(417))*v(85))
v(1843)=props(1)*(v(486)*v(731)+(v(381)+v(399)+(2d0/3d0)*v(408))*v(85))
v(1842)=props(1)*(v(486)*v(721)+(v(381)+(2d0/3d0)*v(390)+v(417))*v(85))
v(1841)=props(1)*(v(487)*v(711)+((2d0/3d0)*v(373)+v(400)+v(418))*v(85))
v(1840)=props(1)*(v(487)*v(731)+(v(382)+v(400)+(2d0/3d0)*v(409))*v(85))
v(1839)=props(1)*(v(487)*v(721)+(v(382)+(2d0/3d0)*v(391)+v(418))*v(85))
v(1838)=props(1)*(v(488)*v(711)+((2d0/3d0)*v(374)+v(401)+v(419))*v(85))
v(1837)=props(1)*(v(488)*v(731)+(v(383)+v(401)+(2d0/3d0)*v(410))*v(85))
v(1836)=props(1)*(v(488)*v(721)+(v(383)+(2d0/3d0)*v(392)+v(419))*v(85))
v(1835)=props(1)*(v(489)*v(711)+((2d0/3d0)*v(375)+v(402)+v(420))*v(85))
v(1834)=props(1)*(v(489)*v(731)+(v(384)+v(402)+(2d0/3d0)*v(411))*v(85))
v(1833)=props(1)*(v(489)*v(721)+(v(384)+(2d0/3d0)*v(393)+v(420))*v(85))
v(1832)=props(1)*(v(490)*v(711)+((2d0/3d0)*v(376)+v(403)+v(421))*v(85))
v(1831)=props(1)*(v(490)*v(731)+(v(385)+v(403)+(2d0/3d0)*v(412))*v(85))
v(1830)=props(1)*(v(490)*v(721)+(v(385)+(2d0/3d0)*v(394)+v(421))*v(85))
v(1829)=props(1)*(v(491)*v(711)+((2d0/3d0)*v(377)+v(404)+v(422))*v(85))
v(1828)=props(1)*(v(491)*v(731)+(v(386)+v(404)+(2d0/3d0)*v(413))*v(85))
v(1827)=props(1)*(v(491)*v(721)+(v(386)+(2d0/3d0)*v(395)+v(422))*v(85))
v(1826)=props(1)*(v(492)*v(711)+((2d0/3d0)*v(378)+v(405)+v(423))*v(85))
v(1825)=props(1)*(v(492)*v(731)+(v(387)+v(405)+(2d0/3d0)*v(414))*v(85))
v(1824)=props(1)*(v(492)*v(721)+(v(387)+(2d0/3d0)*v(396)+v(423))*v(85))
v(1823)=props(1)*(v(493)*v(711)+((2d0/3d0)*v(379)+v(406)+v(424))*v(85))
v(1822)=props(1)*(v(493)*v(731)+(v(388)+v(406)+(2d0/3d0)*v(415))*v(85))
v(1821)=props(1)*(v(493)*v(721)+(v(388)+(2d0/3d0)*v(397)+v(424))*v(85))
v(767)=props(1)*(v(493)*v(81)+v(454)*v(85))
v(766)=props(1)*(v(492)*v(81)+v(453)*v(85))
v(765)=props(1)*(v(491)*v(81)+v(452)*v(85))
v(764)=props(1)*(v(490)*v(81)+v(451)*v(85))
v(763)=props(1)*(v(489)*v(81)+v(450)*v(85))
v(762)=props(1)*(v(488)*v(81)+v(449)*v(85))
v(761)=props(1)*(v(487)*v(81)+v(448)*v(85))
v(760)=props(1)*(v(486)*v(81)+v(447)*v(85))
v(759)=props(1)*(v(484)*v(81)+v(446)*v(85))
v(758)=props(1)*(v(493)*v(80)+v(445)*v(85))
v(757)=props(1)*(v(492)*v(80)+v(444)*v(85))
v(756)=props(1)*(v(491)*v(80)+v(443)*v(85))
v(755)=props(1)*(v(490)*v(80)+v(441)*v(85))
v(754)=props(1)*(v(489)*v(80)+v(440)*v(85))
v(753)=props(1)*(v(488)*v(80)+v(439)*v(85))
v(752)=props(1)*(v(487)*v(80)+v(437)*v(85))
v(751)=props(1)*(v(486)*v(80)+v(436)*v(85))
v(750)=props(1)*(v(484)*v(80)+v(435)*v(85))
v(749)=props(1)*(v(493)*v(79)+v(433)*v(85))
v(748)=props(1)*(v(492)*v(79)+v(432)*v(85))
v(747)=props(1)*(v(491)*v(79)+v(431)*v(85))
v(746)=props(1)*(v(490)*v(79)+v(430)*v(85))
v(745)=props(1)*(v(489)*v(79)+v(429)*v(85))
v(744)=props(1)*(v(488)*v(79)+v(428)*v(85))
v(743)=props(1)*(v(487)*v(79)+v(427)*v(85))
v(742)=props(1)*(v(486)*v(79)+v(426)*v(85))
v(741)=props(1)*(v(484)*v(79)+v(425)*v(85))
v(705)=v(211)*x(19)
v(704)=-(v(217)*x(13))
v(703)=-(v(215)*x(14))
v(702)=v(212)*x(17)
v(693)=-(v(214)*x(19))
v(692)=v(217)*x(16)
v(691)=v(209)*x(14)
v(690)=-(v(212)*x(11))
v(681)=v(214)*x(13)
v(680)=-(v(211)*x(16))
v(679)=v(215)*x(11)
v(678)=-(v(209)*x(17))
v(665)=-(v(216)*x(12))
v(664)=v(210)*x(18)
v(654)=-(v(213)*x(18))
v(653)=v(216)*x(15)
v(643)=v(213)*x(12)
v(642)=-(v(210)*x(15))
v(576)=(-2d0)*v(215)
v(575)=2d0*v(212)
v(577)=v(1855)*v(502)+v(618)+v(575)*x(13)+v(576)*x(19)
v(586)=-v(577)/3d0
v(571)=v(1848)*x(16)
v(568)=v(1765)*v(1848)
v(572)=v(1849)*v(499)+v(1761)*v(571)+v(634)-v(568)*x(19)
v(583)=-v(572)/3d0
v(569)=v(1849)*v(497)+v(699)+v(568)*x(12)+v(575)*x(18)
v(581)=-v(569)/3d0
v(567)=v(1854)*v(496)-v(1763)*v(571)+v(626)+v(568)*x(13)
v(580)=-v(567)/3d0
v(547)=(-2d0)*v(213)
v(546)=2d0*v(216)
v(553)=v(1850)*v(503)+v(647)-v(547)*x(11)-v(546)*x(17)
v(562)=-v(553)/3d0
v(548)=v(1850)*v(498)+v(621)+v(546)*x(13)+v(547)*x(16)
v(557)=-v(548)/3d0
v(544)=(-2d0)*v(210)
v(551)=v(1851)*v(501)+v(629)-v(544)*x(16)-v(546)*x(19)
v(560)=-v(551)/3d0
v(550)=v(1852)*v(500)+v(669)+v(547)*x(14)-v(544)*x(17)
v(559)=-v(550)/3d0
v(545)=v(1851)*v(497)+v(658)+v(544)*x(11)+v(546)*x(14)
v(556)=-v(545)/3d0
v(542)=v(1852)*v(494)+v(637)+v(544)*x(13)-v(547)*x(19)
v(554)=-v(542)/3d0
v(526)=(-2d0)*v(217)
v(523)=(-2d0)*v(214)
v(527)=v(1853)*v(501)+v(684)-v(523)*x(12)+v(526)*x(18)
v(536)=-v(527)/3d0
v(524)=v(1853)*v(499)+v(650)-v(526)*x(11)+v(523)*x(14)
v(534)=-v(524)/3d0
v(521)=v(1848)*x(15)
v(578)=v(1854)*v(503)-v(1765)*v(521)+v(687)+v(576)*x(18)
v(587)=-v(578)/3d0
v(573)=v(1855)*v(500)+v(1761)*v(521)+v(675)-v(576)*x(12)
v(584)=-v(573)/3d0
v(518)=(-2d0)*v(211)
v(528)=v(1857)*v(502)+v(661)-v(518)*x(14)+v(526)*x(17)
v(537)=-v(528)/3d0
v(522)=v(1856)*v(498)+v(1757)*v(521)+v(708)-v(518)*x(18)
v(533)=-v(522)/3d0
v(519)=v(1856)*v(496)+v(672)+v(518)*x(11)-v(523)*x(17)
v(531)=-v(519)/3d0
v(517)=v(1857)*v(494)-v(1760)*v(521)+v(696)+v(518)*x(12)
v(530)=-v(517)/3d0
v(505)=1d0/v(208)**0.13333333333333333d1
v(1858)=-v(505)/3d0
v(513)=v(1858)*v(503)
v(512)=v(1858)*v(502)
v(511)=v(1858)*v(501)
v(510)=v(1858)*v(500)
v(509)=v(1858)*v(499)
v(508)=v(1858)*v(498)
v(507)=v(1858)*v(497)
v(506)=v(1858)*v(496)
v(504)=v(1858)*v(494)
v(100)=1d0/v(208)**0.3333333333333333d0
v(1859)=props(7)*v(100)
v(95)=v(1767)*v(208)
v(99)=-v(95)/3d0
v(96)=v(1791)*v(208)
v(102)=-v(96)/3d0
v(97)=v(1792)*v(208)
v(588)=v(102)+(2d0/3d0)*v(97)+v(99)
v(597)=props(7)*(v(100)*(v(538)+v(562)+(2d0/3d0)*v(578))+v(513)*v(588))
v(596)=props(7)*(v(100)*(v(537)+v(561)+(2d0/3d0)*v(577))+v(512)*v(588))
v(595)=props(7)*(v(100)*(v(536)+v(560)+(2d0/3d0)*v(574))+v(511)*v(588))
v(594)=props(7)*(v(100)*(v(535)+v(559)+(2d0/3d0)*v(573))+v(510)*v(588))
v(593)=props(7)*(v(100)*(v(534)+v(558)+(2d0/3d0)*v(572))+v(509)*v(588))
v(592)=props(7)*(v(100)*(v(533)+v(557)+(2d0/3d0)*v(570))+v(508)*v(588))
v(591)=props(7)*(v(100)*(v(532)+v(556)+(2d0/3d0)*v(569))+v(507)*v(588))
v(590)=props(7)*(v(100)*(v(531)+v(555)+(2d0/3d0)*v(567))+v(506)*v(588))
v(589)=props(7)*(v(100)*(v(530)+v(554)+(2d0/3d0)*v(566))+v(504)*v(588))
v(103)=-v(97)/3d0
v(608)=v(102)+v(103)+(2d0/3d0)*v(95)
v(617)=props(7)*(v(100)*((2d0/3d0)*v(529)+v(562)+v(587))+v(513)*v(608))
v(616)=props(7)*(v(100)*((2d0/3d0)*v(528)+v(561)+v(586))+v(512)*v(608))
v(615)=props(7)*(v(100)*((2d0/3d0)*v(527)+v(560)+v(585))+v(511)*v(608))
v(614)=props(7)*(v(100)*((2d0/3d0)*v(525)+v(559)+v(584))+v(510)*v(608))
v(613)=props(7)*(v(100)*((2d0/3d0)*v(524)+v(558)+v(583))+v(509)*v(608))
v(612)=props(7)*(v(100)*((2d0/3d0)*v(522)+v(557)+v(582))+v(508)*v(608))
v(611)=props(7)*(v(100)*((2d0/3d0)*v(520)+v(556)+v(581))+v(507)*v(608))
v(610)=props(7)*(v(100)*((2d0/3d0)*v(519)+v(555)+v(580))+v(506)*v(608))
v(609)=props(7)*(v(100)*((2d0/3d0)*v(517)+v(554)+v(579))+v(504)*v(608))
v(598)=v(103)+(2d0/3d0)*v(96)+v(99)
v(607)=props(7)*(v(100)*(v(538)+(2d0/3d0)*v(553)+v(587))+v(513)*v(598))
v(606)=props(7)*(v(100)*(v(537)+(2d0/3d0)*v(552)+v(586))+v(512)*v(598))
v(605)=props(7)*(v(100)*(v(536)+(2d0/3d0)*v(551)+v(585))+v(511)*v(598))
v(604)=props(7)*(v(100)*(v(535)+(2d0/3d0)*v(550)+v(584))+v(510)*v(598))
v(603)=props(7)*(v(100)*(v(534)+(2d0/3d0)*v(549)+v(583))+v(509)*v(598))
v(602)=props(7)*(v(100)*(v(533)+(2d0/3d0)*v(548)+v(582))+v(508)*v(598))
v(601)=props(7)*(v(100)*(v(532)+(2d0/3d0)*v(545)+v(581))+v(507)*v(598))
v(600)=props(7)*(v(100)*(v(531)+(2d0/3d0)*v(543)+v(580))+v(506)*v(598))
v(599)=props(7)*(v(100)*(v(530)+(2d0/3d0)*v(542)+v(579))+v(504)*v(598))
v(98)=v(1859)*v(588)
v(101)=v(1859)*v(598)
v(104)=v(1859)*v(608)
v(111)=(v(1860)+v(1863))*v(208)
v(641)=props(7)*(v(111)*v(513)+v(100)*(v(1863)*v(503)+v(653)+v(654)+v(655)+v(678)+v(679)))
v(639)=props(7)*(v(111)*v(512)+v(100)*(v(1864)*v(502)+v(637)+v(208)*(-(v(1758)*x(13))+v(1764)*x(19))))
v(636)=props(7)*(v(111)*v(511)+v(100)*(v(1865)*v(501)+v(634)+v(208)*(-(v(1761)*x(16))+v(1765)*x(19))))
v(633)=props(7)*(v(111)*v(510)+v(100)*(v(1864)*v(500)+v(642)+v(643)+v(644)+v(702)+v(703)))
v(631)=props(7)*(v(111)*v(509)+v(100)*(v(1865)*v(499)+v(629)+v(208)*(v(1758)*x(16)-v(1762)*x(19))))
v(628)=props(7)*(v(111)*v(508)+v(100)*(v(1863)*v(498)+v(626)+v(208)*(-(v(1765)*x(13))+v(1763)*x(16))))
v(625)=props(7)*(v(111)*v(507)+v(100)*(v(1865)*v(497)+v(664)+v(665)+v(666)+v(690)+v(691)))
v(623)=props(7)*(v(111)*v(506)+v(100)*(v(1863)*v(496)+v(621)+v(208)*(v(1762)*x(13)-v(1764)*x(16))))
v(620)=props(7)*(v(111)*v(504)+v(100)*(v(1864)*v(494)+v(618)+v(208)*(v(1761)*x(13)-v(1763)*x(19))))
v(112)=v(111)*v(1859)
v(116)=(v(1866)+v(1867))*v(208)
v(674)=props(7)*(v(116)*v(513)+v(100)*(v(1866)*v(503)+v(672)+v(208)*(-(v(1759)*x(11))+v(1757)*x(17))))
v(671)=props(7)*(v(116)*v(512)+v(100)*(v(1870)*v(502)+v(669)+v(208)*(-(v(1764)*x(14))+v(1758)*x(17))))
v(668)=props(7)*(v(116)*v(511)+v(100)*(v(1871)*v(501)-v(664)-v(665)+v(666)+v(692)+v(693)))
v(663)=props(7)*(v(116)*v(510)+v(100)*(v(1870)*v(500)+v(661)+v(208)*(v(1759)*x(14)-v(1760)*x(17))))
v(660)=props(7)*(v(116)*v(509)+v(100)*(v(1871)*v(499)+v(658)+v(208)*(-(v(1758)*x(11))+v(1762)*x(14))))
v(657)=props(7)*(v(116)*v(508)+v(100)*(v(1866)*v(498)-v(653)-v(654)+v(655)+v(680)+v(681)))
v(652)=props(7)*(v(116)*v(507)+v(100)*(v(1871)*v(497)+v(650)+v(208)*(v(1760)*x(11)-v(1757)*x(14))))
v(649)=props(7)*(v(116)*v(506)+v(100)*(v(1866)*v(496)+v(647)+v(208)*(v(1764)*x(11)-v(1762)*x(17))))
v(646)=props(7)*(v(116)*v(504)+v(100)*(v(1870)*v(494)-v(642)-v(643)+v(644)+v(704)+v(705)))
v(117)=v(116)*v(1859)
v(118)=(v(1872)+v(1873))*v(208)
v(710)=props(7)*(v(118)*v(513)+v(100)*(v(1872)*v(503)+v(708)+v(208)*(-(v(1757)*x(15))+v(1759)*x(18))))
v(707)=props(7)*(v(118)*v(512)+v(100)*(v(1876)*v(502)+v(644)-v(702)-v(703)-v(704)-v(705)))
v(701)=props(7)*(v(118)*v(511)+v(100)*(v(1877)*v(501)+v(699)+v(208)*(-(v(1765)*x(12))+v(1761)*x(18))))
v(698)=props(7)*(v(118)*v(510)+v(100)*(v(1876)*v(500)+v(696)+v(208)*(-(v(1759)*x(12))+v(1760)*x(15))))
v(695)=props(7)*(v(118)*v(509)+v(100)*(v(1877)*v(499)+v(666)-v(690)-v(691)-v(692)-v(693)))
v(689)=props(7)*(v(118)*v(508)+v(100)*(v(1872)*v(498)+v(687)+v(208)*(v(1765)*x(15)-v(1763)*x(18))))
v(686)=props(7)*(v(118)*v(507)+v(100)*(v(1877)*v(497)+v(684)+v(208)*(v(1757)*x(12)-v(1760)*x(18))))
v(683)=props(7)*(v(118)*v(506)+v(100)*(v(1872)*v(496)+v(655)-v(678)-v(679)-v(680)-v(681)))
v(677)=props(7)*(v(118)*v(504)+v(100)*(v(1876)*v(494)+v(675)+v(208)*(v(1763)*x(12)-v(1761)*x(15))))
v(119)=v(118)*v(1859)
v(120)=v(1878)*v(711)+v(86)-v(98)
v(121)=-v(101)+v(1878)*v(721)+v(86)
v(122)=-v(104)+v(1878)*v(731)+v(86)
v(123)=-v(112)+v(1878)*v(79)
v(1900)=4d0*v(123)
v(124)=-v(117)+v(1878)*v(80)
v(1901)=4d0*v(124)
v(125)=-v(119)+v(1878)*v(81)
v(1902)=4d0*v(125)
IF(dabs(props(5)).lt.0.1d-11) THEN
v(1880)=v(1881)
v(1879)=props(4)
v(768)=v(1879)
v(127)=v(1880)
ELSE
v(1882)=1d0/props(5)
v(769)=dexp(-(props(5)*v(1881)))
v(768)=props(4)*v(769)
v(127)=v(1882)*(1d0-v(769))
ENDIF
v(770)=props(3)+v(127)
v(1884)=(-0.15d1)/v(770)
v(771)=-(v(768)/v(770)**2)
v(1883)=0.15d1*v(771)
v(986)=v(125)*v(1883)
v(948)=v(124)*v(1883)
v(910)=v(123)*v(1883)
v(1004)=v(1884)*v(710)
v(1023)=-(v(1004)*v(1729))
v(1003)=v(1884)*v(707)
v(1022)=-(v(1003)*v(1729))
v(1002)=v(1884)*v(701)
v(1021)=-(v(1002)*v(1729))
v(1001)=v(1884)*v(698)
v(1020)=-(v(1001)*v(1729))
v(1000)=v(1884)*v(695)
v(1019)=-(v(1000)*v(1729))
v(999)=v(1884)*v(689)
v(1018)=-(v(1729)*v(999))
v(998)=v(1884)*v(686)
v(1017)=-(v(1729)*v(998))
v(997)=v(1884)*v(683)
v(1016)=-(v(1729)*v(997))
v(996)=v(1884)*v(677)
v(1015)=-(v(1729)*v(996))
v(995)=-(v(1884)*v(767))
v(1014)=-(v(1729)*v(995))
v(994)=-(v(1884)*v(766))
v(1013)=-(v(1729)*v(994))
v(993)=-(v(1884)*v(765))
v(1012)=-(v(1729)*v(993))
v(992)=-(v(1884)*v(764))
v(1011)=-(v(1729)*v(992))
v(991)=-(v(1884)*v(763))
v(1010)=-(v(1729)*v(991))
v(990)=-(v(1884)*v(762))
v(1009)=-(v(1729)*v(990))
v(989)=-(v(1884)*v(761))
v(1008)=-(v(1729)*v(989))
v(988)=-(v(1884)*v(760))
v(1007)=-(v(1729)*v(988))
v(987)=-(v(1884)*v(759))
v(1006)=-(v(1729)*v(987))
v(966)=v(1884)*v(674)
v(985)=-(v(1729)*v(966))
v(965)=v(1884)*v(671)
v(984)=-(v(1729)*v(965))
v(964)=v(1884)*v(668)
v(983)=-(v(1729)*v(964))
v(963)=v(1884)*v(663)
v(982)=-(v(1729)*v(963))
v(962)=v(1884)*v(660)
v(981)=-(v(1729)*v(962))
v(961)=v(1884)*v(657)
v(980)=-(v(1729)*v(961))
v(960)=v(1884)*v(652)
v(979)=-(v(1729)*v(960))
v(959)=v(1884)*v(649)
v(978)=-(v(1729)*v(959))
v(958)=v(1884)*v(646)
v(977)=-(v(1729)*v(958))
v(957)=-(v(1884)*v(758))
v(976)=-(v(1729)*v(957))
v(956)=-(v(1884)*v(757))
v(975)=-(v(1729)*v(956))
v(955)=-(v(1884)*v(756))
v(974)=-(v(1729)*v(955))
v(954)=-(v(1884)*v(755))
v(973)=-(v(1729)*v(954))
v(953)=-(v(1884)*v(754))
v(972)=-(v(1729)*v(953))
v(952)=-(v(1884)*v(753))
v(971)=-(v(1729)*v(952))
v(951)=-(v(1884)*v(752))
v(970)=-(v(1729)*v(951))
v(950)=-(v(1884)*v(751))
v(969)=-(v(1729)*v(950))
v(949)=-(v(1884)*v(750))
v(968)=-(v(1729)*v(949))
v(928)=v(1884)*v(641)
v(947)=-(v(1729)*v(928))
v(927)=v(1884)*v(639)
v(946)=-(v(1729)*v(927))
v(926)=v(1884)*v(636)
v(945)=-(v(1729)*v(926))
v(925)=v(1884)*v(633)
v(944)=-(v(1729)*v(925))
v(924)=v(1884)*v(631)
v(943)=-(v(1729)*v(924))
v(923)=v(1884)*v(628)
v(942)=-(v(1729)*v(923))
v(922)=v(1884)*v(625)
v(941)=-(v(1729)*v(922))
v(921)=v(1884)*v(623)
v(940)=-(v(1729)*v(921))
v(920)=v(1884)*v(620)
v(939)=-(v(1729)*v(920))
v(919)=-(v(1884)*v(749))
v(938)=-(v(1729)*v(919))
v(918)=-(v(1884)*v(748))
v(937)=-(v(1729)*v(918))
v(917)=-(v(1884)*v(747))
v(936)=-(v(1729)*v(917))
v(916)=-(v(1884)*v(746))
v(935)=-(v(1729)*v(916))
v(915)=-(v(1884)*v(745))
v(934)=-(v(1729)*v(915))
v(914)=-(v(1884)*v(744))
v(933)=-(v(1729)*v(914))
v(913)=-(v(1884)*v(743))
v(932)=-(v(1729)*v(913))
v(912)=-(v(1884)*v(742))
v(931)=-(v(1729)*v(912))
v(911)=-(v(1884)*v(741))
v(930)=-(v(1729)*v(911))
v(909)=v(1884)*v(617)
v(908)=v(1884)*v(616)
v(907)=v(1884)*v(615)
v(906)=v(1884)*v(614)
v(905)=v(1884)*v(613)
v(904)=v(1884)*v(612)
v(903)=v(1884)*v(611)
v(902)=v(1884)*v(610)
v(901)=v(1884)*v(609)
v(900)=-(v(1822)*v(1884))
v(899)=-(v(1825)*v(1884))
v(898)=-(v(1828)*v(1884))
v(897)=-(v(1831)*v(1884))
v(896)=-(v(1834)*v(1884))
v(895)=-(v(1837)*v(1884))
v(894)=-(v(1840)*v(1884))
v(893)=-(v(1843)*v(1884))
v(892)=-(v(1846)*v(1884))
v(890)=v(1884)*v(607)
v(889)=v(1884)*v(606)
v(888)=v(1884)*v(605)
v(887)=v(1884)*v(604)
v(886)=v(1884)*v(603)
v(885)=v(1884)*v(602)
v(884)=v(1884)*v(601)
v(883)=v(1884)*v(600)
v(882)=v(1884)*v(599)
v(881)=-(v(1821)*v(1884))
v(880)=-(v(1824)*v(1884))
v(879)=-(v(1827)*v(1884))
v(878)=-(v(1830)*v(1884))
v(877)=-(v(1833)*v(1884))
v(876)=-(v(1836)*v(1884))
v(875)=-(v(1839)*v(1884))
v(874)=-(v(1842)*v(1884))
v(873)=-(v(1845)*v(1884))
v(871)=v(1884)*v(597)
v(870)=v(1884)*v(596)
v(869)=v(1884)*v(595)
v(868)=v(1884)*v(594)
v(867)=v(1884)*v(593)
v(866)=v(1884)*v(592)
v(865)=v(1884)*v(591)
v(864)=v(1884)*v(590)
v(863)=v(1884)*v(589)
v(862)=-(v(1823)*v(1884))
v(861)=-(v(1826)*v(1884))
v(860)=-(v(1829)*v(1884))
v(859)=-(v(1832)*v(1884))
v(858)=-(v(1835)*v(1884))
v(857)=-(v(1838)*v(1884))
v(856)=-(v(1841)*v(1884))
v(855)=-(v(1844)*v(1884))
v(854)=-(v(1847)*v(1884))
v(128)=-v(121)/3d0
v(129)=-v(122)/3d0
v(132)=(2d0/3d0)*v(120)+v(128)+v(129)
v(1905)=2d0*v(132)
v(853)=v(132)*v(1883)
v(130)=-v(120)/3d0
v(137)=(2d0/3d0)*v(122)+v(128)+v(130)
v(1904)=2d0*v(137)
v(891)=v(137)*v(1883)
v(134)=(2d0/3d0)*v(121)+v(129)+v(130)
v(1903)=2d0*v(134)
v(1026)=1d0/sqrt(0.15d1*(2d0*v(123)**2+2d0*v(124)**2+2d0*v(125)**2+v(132)**2+v(134)**2+v(137)**2))
v(1899)=0.75d0*v(1026)
v(872)=v(134)*v(1883)
v(133)=-(v(132)*v(1884))
v(136)=-(v(134)*v(1884))
v(138)=-(v(137)*v(1884))
v(139)=-(v(123)*v(1884))
v(929)=-v(139)-v(1729)*v(910)
v(140)=-(v(124)*v(1884))
v(967)=-v(140)-v(1729)*v(948)
v(141)=-(v(125)*v(1884))
v(1005)=-v(141)-v(1729)*v(986)
IF(sqrt(0.15d1*(2d0*v(112)**2+2d0*v(117)**2+2d0*v(119)**2+((2d0/3d0)*v(101)-v(104)/3d0-v(98)/3d0)**2+(-v(101)/3d0+(2d0&
&/3d0)*v(104)-v(98)/3d0)**2+(-v(101)/3d0-v(104)/3d0+(2d0/3d0)*v(98))**2)).lt.0.1d-11) THEN
v(1044)=0d0
v(1045)=0d0
v(1046)=0d0
v(1047)=0d0
v(1048)=0d0
v(1049)=0d0
v(1050)=0d0
v(1051)=0d0
v(1052)=0d0
v(1053)=0d0
v(1054)=0d0
v(1055)=0d0
v(1056)=0d0
v(1057)=0d0
v(1058)=0d0
v(1059)=0d0
v(1060)=0d0
v(1061)=0d0
v(1062)=0d0
v(166)=0d0
v(1063)=0d0
v(1064)=0d0
v(1065)=0d0
v(1066)=0d0
v(1067)=0d0
v(1068)=0d0
v(1069)=0d0
v(1070)=0d0
v(1071)=0d0
v(1072)=0d0
v(1073)=0d0
v(1074)=0d0
v(1075)=0d0
v(1076)=0d0
v(1077)=0d0
v(1078)=0d0
v(1079)=0d0
v(1080)=0d0
v(1081)=0d0
v(167)=0d0
v(1082)=0d0
v(1083)=0d0
v(1084)=0d0
v(1085)=0d0
v(1086)=0d0
v(1087)=0d0
v(1088)=0d0
v(1089)=0d0
v(1090)=0d0
v(1091)=0d0
v(1092)=0d0
v(1093)=0d0
v(1094)=0d0
v(1095)=0d0
v(1096)=0d0
v(1097)=0d0
v(1098)=0d0
v(1099)=0d0
v(1100)=0d0
v(168)=0d0
v(1101)=0d0
v(1102)=0d0
v(1103)=0d0
v(1104)=0d0
v(1105)=0d0
v(1106)=0d0
v(1107)=0d0
v(1108)=0d0
v(1109)=0d0
v(1110)=0d0
v(1111)=0d0
v(1112)=0d0
v(1113)=0d0
v(1114)=0d0
v(1115)=0d0
v(1116)=0d0
v(1117)=0d0
v(1118)=0d0
v(1119)=0d0
v(169)=0d0
v(1120)=0d0
v(1121)=0d0
v(1122)=0d0
v(1123)=0d0
v(1124)=0d0
v(1125)=0d0
v(1126)=0d0
v(1127)=0d0
v(1128)=0d0
v(1129)=0d0
v(1130)=0d0
v(1131)=0d0
v(1132)=0d0
v(1133)=0d0
v(1134)=0d0
v(1135)=0d0
v(1136)=0d0
v(1137)=0d0
v(1138)=0d0
v(170)=0d0
v(1139)=0d0
v(1140)=0d0
v(1141)=0d0
v(1142)=0d0
v(1143)=0d0
v(1144)=0d0
v(1145)=0d0
v(1146)=0d0
v(1147)=0d0
v(1148)=0d0
v(1149)=0d0
v(1150)=0d0
v(1151)=0d0
v(1152)=0d0
v(1153)=0d0
v(1154)=0d0
v(1155)=0d0
v(1156)=0d0
v(1157)=0d0
v(171)=0d0
v(1158)=0d0
v(1159)=0d0
v(1160)=0d0
v(1161)=0d0
v(1162)=0d0
v(1163)=0d0
v(1164)=0d0
v(1165)=0d0
v(1166)=0d0
v(1167)=0d0
v(1168)=0d0
v(1169)=0d0
v(1170)=0d0
v(1171)=0d0
v(1172)=0d0
v(1173)=0d0
v(1174)=0d0
v(1175)=0d0
v(1176)=0d0
v(172)=0d0
v(1177)=0d0
v(1178)=0d0
v(1179)=0d0
v(1180)=0d0
v(1181)=0d0
v(1182)=0d0
v(1183)=0d0
v(1184)=0d0
v(1185)=0d0
v(1186)=0d0
v(1187)=0d0
v(1188)=0d0
v(1189)=0d0
v(1190)=0d0
v(1191)=0d0
v(1192)=0d0
v(1193)=0d0
v(1194)=0d0
v(1195)=0d0
v(173)=0d0
v(1196)=0d0
v(1197)=0d0
v(1198)=0d0
v(1199)=0d0
v(1200)=0d0
v(1201)=0d0
v(1202)=0d0
v(1203)=0d0
v(1204)=0d0
v(1205)=0d0
v(1206)=0d0
v(1207)=0d0
v(1208)=0d0
v(1209)=0d0
v(1210)=0d0
v(1211)=0d0
v(1212)=0d0
v(1213)=0d0
v(1214)=0d0
v(174)=0d0
ELSE
v(1895)=0.15d1*props(6)
v(1894)=sqrt(0.15d1*(v(101)**2+v(104)**2+2d0*v(112)**2+2d0*v(117)**2+2d0*v(119)**2+v(98)**2))
v(1893)=1d0-props(6)
v(1891)=2d0*v(119)
v(1890)=2d0*v(117)
v(1889)=2d0*v(112)
v(1888)=2d0*v(141)
v(1887)=2d0*v(140)
v(1886)=2d0*v(139)
v(1885)=props(8)**(1d0+props(9))
v(1896)=v(1885)/2d0
v(1253)=v(1004)*v(1891)+v(133)*v(597)+v(136)*v(607)+v(138)*v(617)+v(1886)*v(641)+v(1887)*v(674)+v(1888)*v(710)+v(101&
& )*v(890)+v(104)*v(909)+v(1889)*v(928)+v(1890)*v(966)+v(871)*v(98)
v(1252)=v(1003)*v(1891)+v(133)*v(596)+v(136)*v(606)+v(138)*v(616)+v(1886)*v(639)+v(1887)*v(671)+v(1888)*v(707)+v(101&
& )*v(889)+v(104)*v(908)+v(1889)*v(927)+v(1890)*v(965)+v(870)*v(98)
v(1251)=v(1002)*v(1891)+v(133)*v(595)+v(136)*v(605)+v(138)*v(615)+v(1886)*v(636)+v(1887)*v(668)+v(1888)*v(701)+v(101&
& )*v(888)+v(104)*v(907)+v(1889)*v(926)+v(1890)*v(964)+v(869)*v(98)
v(1250)=v(1001)*v(1891)+v(133)*v(594)+v(136)*v(604)+v(138)*v(614)+v(1886)*v(633)+v(1887)*v(663)+v(1888)*v(698)+v(101&
& )*v(887)+v(104)*v(906)+v(1889)*v(925)+v(1890)*v(963)+v(868)*v(98)
v(1249)=v(1000)*v(1891)+v(133)*v(593)+v(136)*v(603)+v(138)*v(613)+v(1886)*v(631)+v(1887)*v(660)+v(1888)*v(695)+v(101&
& )*v(886)+v(104)*v(905)+v(1889)*v(924)+v(1890)*v(962)+v(867)*v(98)
v(1248)=v(133)*v(592)+v(136)*v(602)+v(138)*v(612)+v(1886)*v(628)+v(1887)*v(657)+v(1888)*v(689)+v(101)*v(885)+v(104)*v&
& (904)+v(1889)*v(923)+v(1890)*v(961)+v(866)*v(98)+v(1891)*v(999)
v(1247)=v(133)*v(591)+v(136)*v(601)+v(138)*v(611)+v(1886)*v(625)+v(1887)*v(652)+v(1888)*v(686)+v(101)*v(884)+v(104)*v&
& (903)+v(1889)*v(922)+v(1890)*v(960)+v(865)*v(98)+v(1891)*v(998)
v(1246)=v(133)*v(590)+v(136)*v(600)+v(138)*v(610)+v(1886)*v(623)+v(1887)*v(649)+v(1888)*v(683)+v(101)*v(883)+v(104)*v&
& (902)+v(1889)*v(921)+v(1890)*v(959)+v(864)*v(98)+v(1891)*v(997)
v(1245)=v(133)*v(589)+v(136)*v(599)+v(138)*v(609)+v(1886)*v(620)+v(1887)*v(646)+v(1888)*v(677)+v(101)*v(882)+v(104)*v&
& (901)+v(1889)*v(920)+v(1890)*v(958)+v(863)*v(98)+v(1891)*v(996)
v(1244)=v(101)*v(881)+v(104)*v(900)+v(1889)*v(919)+v(1890)*v(957)+v(862)*v(98)+v(1891)*v(995)
v(1243)=v(101)*v(880)+v(104)*v(899)+v(1889)*v(918)+v(1890)*v(956)+v(861)*v(98)+v(1891)*v(994)
v(1242)=v(101)*v(879)+v(104)*v(898)+v(1889)*v(917)+v(1890)*v(955)+v(860)*v(98)+v(1891)*v(993)
v(1241)=v(101)*v(878)+v(104)*v(897)+v(1889)*v(916)+v(1890)*v(954)+v(859)*v(98)+v(1891)*v(992)
v(1240)=v(101)*v(877)+v(104)*v(896)+v(1889)*v(915)+v(1890)*v(953)+v(858)*v(98)+v(1891)*v(991)
v(1239)=v(101)*v(876)+v(104)*v(895)+v(1889)*v(914)+v(1890)*v(952)+v(857)*v(98)+v(1891)*v(990)
v(1238)=v(101)*v(875)+v(104)*v(894)+v(1889)*v(913)+v(1890)*v(951)+v(856)*v(98)+v(1891)*v(989)
v(1237)=v(101)*v(874)+v(104)*v(893)+v(1889)*v(912)+v(1890)*v(950)+v(855)*v(98)+v(1891)*v(988)
v(1236)=v(101)*v(873)+v(104)*v(892)+v(1889)*v(911)+v(1890)*v(949)+v(854)*v(98)+v(1891)*v(987)
v(1235)=v(101)*v(872)+v(104)*v(891)+v(1889)*v(910)+v(1890)*v(948)+v(853)*v(98)+v(1891)*v(986)
v(1215)=1d0/v(1894)
v(1892)=0.15d1*v(1215)
v(1224)=v(1892)*(v(101)*v(607)+v(104)*v(617)+v(1889)*v(641)+v(1890)*v(674)+v(1891)*v(710)+v(597)*v(98))
v(1223)=v(1892)*(v(101)*v(606)+v(104)*v(616)+v(1889)*v(639)+v(1890)*v(671)+v(1891)*v(707)+v(596)*v(98))
v(1222)=v(1892)*(v(101)*v(605)+v(104)*v(615)+v(1889)*v(636)+v(1890)*v(668)+v(1891)*v(701)+v(595)*v(98))
v(1221)=v(1892)*(v(101)*v(604)+v(104)*v(614)+v(1889)*v(633)+v(1890)*v(663)+v(1891)*v(698)+v(594)*v(98))
v(1220)=v(1892)*(v(101)*v(603)+v(104)*v(613)+v(1889)*v(631)+v(1890)*v(660)+v(1891)*v(695)+v(593)*v(98))
v(1219)=v(1892)*(v(101)*v(602)+v(104)*v(612)+v(1889)*v(628)+v(1890)*v(657)+v(1891)*v(689)+v(592)*v(98))
v(1218)=v(1892)*(v(101)*v(601)+v(104)*v(611)+v(1889)*v(625)+v(1890)*v(652)+v(1891)*v(686)+v(591)*v(98))
v(1217)=v(1892)*(v(101)*v(600)+v(104)*v(610)+v(1889)*v(623)+v(1890)*v(649)+v(1891)*v(683)+v(590)*v(98))
v(1216)=v(1892)*(v(101)*v(599)+v(104)*v(609)+v(1889)*v(620)+v(1890)*v(646)+v(1891)*v(677)+v(589)*v(98))
v(1254)=v(1894)**props(9)*v(1896)
v(1226)=-(v(1895)/v(1894)**2)
v(1234)=v(1224)*v(1226)
v(1233)=v(1223)*v(1226)
v(1232)=v(1222)*v(1226)
v(1231)=v(1221)*v(1226)
v(1230)=v(1220)*v(1226)
v(1229)=v(1219)*v(1226)
v(1228)=v(1218)*v(1226)
v(1227)=v(1217)*v(1226)
v(1225)=v(1216)*v(1226)
v(178)=v(1215)*v(1895)
v(1283)=v(119)*v(178)+v(141)*v(1893)
v(1282)=v(117)*v(178)+v(140)*v(1893)
v(1281)=v(112)*v(178)+v(139)*v(1893)
v(1280)=v(104)*v(178)+v(138)*v(1893)
v(1279)=v(101)*v(178)+v(136)*v(1893)
v(1278)=v(133)*v(1893)+v(178)*v(98)
v(176)=v(101)*v(136)+v(104)*v(138)+v(112)*v(1886)+v(117)*v(1887)+v(119)*v(1888)+v(133)*v(98)
v(1267)=v(176)+dabs(v(176))
v(1269)=props(9)*v(1267)*v(1894)**((-1d0)+props(9))*v(1896)
v(1256)=dsign(1.d0,v(176))
v(1897)=v(1254)*(1d0+v(1256))
v(1277)=v(1224)*v(1269)+v(1253)*v(1897)
v(1276)=v(1223)*v(1269)+v(1252)*v(1897)
v(1275)=v(1222)*v(1269)+v(1251)*v(1897)
v(1274)=v(1221)*v(1269)+v(1250)*v(1897)
v(1273)=v(1220)*v(1269)+v(1249)*v(1897)
v(1272)=v(1219)*v(1269)+v(1248)*v(1897)
v(1271)=v(1218)*v(1269)+v(1247)*v(1897)
v(1270)=v(1217)*v(1269)+v(1246)*v(1897)
v(1268)=v(1216)*v(1269)+v(1245)*v(1897)
v(1265)=v(1244)*v(1897)
v(1264)=v(1243)*v(1897)
v(1263)=v(1242)*v(1897)
v(1262)=v(1241)*v(1897)
v(1261)=v(1240)*v(1897)
v(1260)=v(1239)*v(1897)
v(1259)=v(1238)*v(1897)
v(1258)=v(1237)*v(1897)
v(1257)=v(1236)*v(1897)
v(1255)=v(1235)*v(1897)
v(177)=v(1254)*v(1267)
v(1898)=v(177)*v(1893)
v(1044)=v(1255)*v(1278)+v(1898)*v(853)
v(1045)=v(1257)*v(1278)+v(1898)*v(854)
v(1046)=v(1258)*v(1278)+v(1898)*v(855)
v(1047)=v(1259)*v(1278)+v(1898)*v(856)
v(1048)=v(1260)*v(1278)+v(1898)*v(857)
v(1049)=v(1261)*v(1278)+v(1898)*v(858)
v(1050)=v(1262)*v(1278)+v(1898)*v(859)
v(1051)=v(1263)*v(1278)+v(1898)*v(860)
v(1052)=v(1264)*v(1278)+v(1898)*v(861)
v(1053)=v(1265)*v(1278)+v(1898)*v(862)
v(1054)=v(1268)*v(1278)+v(177)*(v(178)*v(589)+v(1893)*v(863)+v(1225)*v(98))
v(1055)=v(1270)*v(1278)+v(177)*(v(178)*v(590)+v(1893)*v(864)+v(1227)*v(98))
v(1056)=v(1271)*v(1278)+v(177)*(v(178)*v(591)+v(1893)*v(865)+v(1228)*v(98))
v(1057)=v(1272)*v(1278)+v(177)*(v(178)*v(592)+v(1893)*v(866)+v(1229)*v(98))
v(1058)=v(1273)*v(1278)+v(177)*(v(178)*v(593)+v(1893)*v(867)+v(1230)*v(98))
v(1059)=v(1274)*v(1278)+v(177)*(v(178)*v(594)+v(1893)*v(868)+v(1231)*v(98))
v(1060)=v(1275)*v(1278)+v(177)*(v(178)*v(595)+v(1893)*v(869)+v(1232)*v(98))
v(1061)=v(1276)*v(1278)+v(177)*(v(178)*v(596)+v(1893)*v(870)+v(1233)*v(98))
v(1062)=v(1277)*v(1278)+v(177)*(v(178)*v(597)+v(1893)*v(871)+v(1234)*v(98))
v(166)=v(1278)*v(177)
v(1063)=v(1255)*v(1279)+v(1898)*v(872)
v(1064)=v(1257)*v(1279)+v(1898)*v(873)
v(1065)=v(1258)*v(1279)+v(1898)*v(874)
v(1066)=v(1259)*v(1279)+v(1898)*v(875)
v(1067)=v(1260)*v(1279)+v(1898)*v(876)
v(1068)=v(1261)*v(1279)+v(1898)*v(877)
v(1069)=v(1262)*v(1279)+v(1898)*v(878)
v(1070)=v(1263)*v(1279)+v(1898)*v(879)
v(1071)=v(1264)*v(1279)+v(1898)*v(880)
v(1072)=v(1265)*v(1279)+v(1898)*v(881)
v(1073)=v(1268)*v(1279)+v(177)*(v(101)*v(1225)+v(178)*v(599)+v(1893)*v(882))
v(1074)=v(1270)*v(1279)+v(177)*(v(101)*v(1227)+v(178)*v(600)+v(1893)*v(883))
v(1075)=v(1271)*v(1279)+v(177)*(v(101)*v(1228)+v(178)*v(601)+v(1893)*v(884))
v(1076)=v(1272)*v(1279)+v(177)*(v(101)*v(1229)+v(178)*v(602)+v(1893)*v(885))
v(1077)=v(1273)*v(1279)+v(177)*(v(101)*v(1230)+v(178)*v(603)+v(1893)*v(886))
v(1078)=v(1274)*v(1279)+v(177)*(v(101)*v(1231)+v(178)*v(604)+v(1893)*v(887))
v(1079)=v(1275)*v(1279)+v(177)*(v(101)*v(1232)+v(178)*v(605)+v(1893)*v(888))
v(1080)=v(1276)*v(1279)+v(177)*(v(101)*v(1233)+v(178)*v(606)+v(1893)*v(889))
v(1081)=v(1277)*v(1279)+v(177)*(v(101)*v(1234)+v(178)*v(607)+v(1893)*v(890))
v(167)=v(1279)*v(177)
v(1082)=v(1255)*v(1280)+v(1898)*v(891)
v(1083)=v(1257)*v(1280)+v(1898)*v(892)
v(1084)=v(1258)*v(1280)+v(1898)*v(893)
v(1085)=v(1259)*v(1280)+v(1898)*v(894)
v(1086)=v(1260)*v(1280)+v(1898)*v(895)
v(1087)=v(1261)*v(1280)+v(1898)*v(896)
v(1088)=v(1262)*v(1280)+v(1898)*v(897)
v(1089)=v(1263)*v(1280)+v(1898)*v(898)
v(1090)=v(1264)*v(1280)+v(1898)*v(899)
v(1091)=v(1265)*v(1280)+v(1898)*v(900)
v(1092)=v(1268)*v(1280)+v(177)*(v(104)*v(1225)+v(178)*v(609)+v(1893)*v(901))
v(1093)=v(1270)*v(1280)+v(177)*(v(104)*v(1227)+v(178)*v(610)+v(1893)*v(902))
v(1094)=v(1271)*v(1280)+v(177)*(v(104)*v(1228)+v(178)*v(611)+v(1893)*v(903))
v(1095)=v(1272)*v(1280)+v(177)*(v(104)*v(1229)+v(178)*v(612)+v(1893)*v(904))
v(1096)=v(1273)*v(1280)+v(177)*(v(104)*v(1230)+v(178)*v(613)+v(1893)*v(905))
v(1097)=v(1274)*v(1280)+v(177)*(v(104)*v(1231)+v(178)*v(614)+v(1893)*v(906))
v(1098)=v(1275)*v(1280)+v(177)*(v(104)*v(1232)+v(178)*v(615)+v(1893)*v(907))
v(1099)=v(1276)*v(1280)+v(177)*(v(104)*v(1233)+v(178)*v(616)+v(1893)*v(908))
v(1100)=v(1277)*v(1280)+v(177)*(v(104)*v(1234)+v(178)*v(617)+v(1893)*v(909))
v(168)=v(1280)*v(177)
v(1101)=v(1255)*v(1281)+v(1898)*v(910)
v(1102)=v(1257)*v(1281)+v(1898)*v(911)
v(1103)=v(1258)*v(1281)+v(1898)*v(912)
v(1104)=v(1259)*v(1281)+v(1898)*v(913)
v(1105)=v(1260)*v(1281)+v(1898)*v(914)
v(1106)=v(1261)*v(1281)+v(1898)*v(915)
v(1107)=v(1262)*v(1281)+v(1898)*v(916)
v(1108)=v(1263)*v(1281)+v(1898)*v(917)
v(1109)=v(1264)*v(1281)+v(1898)*v(918)
v(1110)=v(1265)*v(1281)+v(1898)*v(919)
v(1111)=v(1268)*v(1281)+v(177)*(v(112)*v(1225)+v(178)*v(620)+v(1893)*v(920))
v(1112)=v(1270)*v(1281)+v(177)*(v(112)*v(1227)+v(178)*v(623)+v(1893)*v(921))
v(1113)=v(1271)*v(1281)+v(177)*(v(112)*v(1228)+v(178)*v(625)+v(1893)*v(922))
v(1114)=v(1272)*v(1281)+v(177)*(v(112)*v(1229)+v(178)*v(628)+v(1893)*v(923))
v(1115)=v(1273)*v(1281)+v(177)*(v(112)*v(1230)+v(178)*v(631)+v(1893)*v(924))
v(1116)=v(1274)*v(1281)+v(177)*(v(112)*v(1231)+v(178)*v(633)+v(1893)*v(925))
v(1117)=v(1275)*v(1281)+v(177)*(v(112)*v(1232)+v(178)*v(636)+v(1893)*v(926))
v(1118)=v(1276)*v(1281)+v(177)*(v(112)*v(1233)+v(178)*v(639)+v(1893)*v(927))
v(1119)=v(1277)*v(1281)+v(177)*(v(112)*v(1234)+v(178)*v(641)+v(1893)*v(928))
v(169)=v(1281)*v(177)
v(1120)=v(1255)*v(1282)+v(1898)*v(948)
v(1121)=v(1257)*v(1282)+v(1898)*v(949)
v(1122)=v(1258)*v(1282)+v(1898)*v(950)
v(1123)=v(1259)*v(1282)+v(1898)*v(951)
v(1124)=v(1260)*v(1282)+v(1898)*v(952)
v(1125)=v(1261)*v(1282)+v(1898)*v(953)
v(1126)=v(1262)*v(1282)+v(1898)*v(954)
v(1127)=v(1263)*v(1282)+v(1898)*v(955)
v(1128)=v(1264)*v(1282)+v(1898)*v(956)
v(1129)=v(1265)*v(1282)+v(1898)*v(957)
v(1130)=v(1268)*v(1282)+v(177)*(v(117)*v(1225)+v(178)*v(646)+v(1893)*v(958))
v(1131)=v(1270)*v(1282)+v(177)*(v(117)*v(1227)+v(178)*v(649)+v(1893)*v(959))
v(1132)=v(1271)*v(1282)+v(177)*(v(117)*v(1228)+v(178)*v(652)+v(1893)*v(960))
v(1133)=v(1272)*v(1282)+v(177)*(v(117)*v(1229)+v(178)*v(657)+v(1893)*v(961))
v(1134)=v(1273)*v(1282)+v(177)*(v(117)*v(1230)+v(178)*v(660)+v(1893)*v(962))
v(1135)=v(1274)*v(1282)+v(177)*(v(117)*v(1231)+v(178)*v(663)+v(1893)*v(963))
v(1136)=v(1275)*v(1282)+v(177)*(v(117)*v(1232)+v(178)*v(668)+v(1893)*v(964))
v(1137)=v(1276)*v(1282)+v(177)*(v(117)*v(1233)+v(178)*v(671)+v(1893)*v(965))
v(1138)=v(1277)*v(1282)+v(177)*(v(117)*v(1234)+v(178)*v(674)+v(1893)*v(966))
v(170)=v(1282)*v(177)
v(1139)=v(1255)*v(1283)+v(1898)*v(986)
v(1140)=v(1257)*v(1283)+v(1898)*v(987)
v(1141)=v(1258)*v(1283)+v(1898)*v(988)
v(1142)=v(1259)*v(1283)+v(1898)*v(989)
v(1143)=v(1260)*v(1283)+v(1898)*v(990)
v(1144)=v(1261)*v(1283)+v(1898)*v(991)
v(1145)=v(1262)*v(1283)+v(1898)*v(992)
v(1146)=v(1263)*v(1283)+v(1898)*v(993)
v(1147)=v(1264)*v(1283)+v(1898)*v(994)
v(1148)=v(1265)*v(1283)+v(1898)*v(995)
v(1149)=v(1268)*v(1283)+v(177)*(v(119)*v(1225)+v(178)*v(677)+v(1893)*v(996))
v(1150)=v(1270)*v(1283)+v(177)*(v(119)*v(1227)+v(178)*v(683)+v(1893)*v(997))
v(1151)=v(1271)*v(1283)+v(177)*(v(119)*v(1228)+v(178)*v(686)+v(1893)*v(998))
v(1152)=v(1272)*v(1283)+v(177)*(v(119)*v(1229)+v(178)*v(689)+v(1893)*v(999))
v(1153)=v(1273)*v(1283)+v(177)*(v(119)*v(1230)+v(1000)*v(1893)+v(178)*v(695))
v(1154)=v(1274)*v(1283)+v(177)*(v(119)*v(1231)+v(1001)*v(1893)+v(178)*v(698))
v(1155)=v(1275)*v(1283)+v(177)*(v(119)*v(1232)+v(1002)*v(1893)+v(178)*v(701))
v(1156)=v(1276)*v(1283)+v(177)*(v(119)*v(1233)+v(1003)*v(1893)+v(178)*v(707))
v(1157)=v(1277)*v(1283)+v(177)*(v(119)*v(1234)+v(1004)*v(1893)+v(178)*v(710))
v(171)=v(1283)*v(177)
v(1158)=v(1139)
v(1159)=v(1140)
v(1160)=v(1141)
v(1161)=v(1142)
v(1162)=v(1143)
v(1163)=v(1144)
v(1164)=v(1145)
v(1165)=v(1146)
v(1166)=v(1147)
v(1167)=v(1148)
v(1168)=v(1149)
v(1169)=v(1150)
v(1170)=v(1151)
v(1171)=v(1152)
v(1172)=v(1153)
v(1173)=v(1154)
v(1174)=v(1155)
v(1175)=v(1156)
v(1176)=v(1157)
v(172)=v(171)
v(1177)=v(1101)
v(1178)=v(1102)
v(1179)=v(1103)
v(1180)=v(1104)
v(1181)=v(1105)
v(1182)=v(1106)
v(1183)=v(1107)
v(1184)=v(1108)
v(1185)=v(1109)
v(1186)=v(1110)
v(1187)=v(1111)
v(1188)=v(1112)
v(1189)=v(1113)
v(1190)=v(1114)
v(1191)=v(1115)
v(1192)=v(1116)
v(1193)=v(1117)
v(1194)=v(1118)
v(1195)=v(1119)
v(173)=v(169)
v(1196)=v(1120)
v(1197)=v(1121)
v(1198)=v(1122)
v(1199)=v(1123)
v(1200)=v(1124)
v(1201)=v(1125)
v(1202)=v(1126)
v(1203)=v(1127)
v(1204)=v(1128)
v(1205)=v(1129)
v(1206)=v(1130)
v(1207)=v(1131)
v(1208)=v(1132)
v(1209)=v(1133)
v(1210)=v(1134)
v(1211)=v(1135)
v(1212)=v(1136)
v(1213)=v(1137)
v(1214)=v(1138)
v(174)=v(170)
ENDIF
dRdX(1,1)=-v(768)
dRdX(1,2)=v(1899)*(v(1845)*v(1903)+v(1846)*v(1904)+v(1847)*v(1905)+v(1900)*v(741)+v(1901)*v(750)+v(1902)*v(759))
dRdX(1,3)=v(1899)*(v(1842)*v(1903)+v(1843)*v(1904)+v(1844)*v(1905)+v(1900)*v(742)+v(1901)*v(751)+v(1902)*v(760))
dRdX(1,4)=v(1899)*(v(1839)*v(1903)+v(1840)*v(1904)+v(1841)*v(1905)+v(1900)*v(743)+v(1901)*v(752)+v(1902)*v(761))
dRdX(1,5)=v(1899)*(v(1836)*v(1903)+v(1837)*v(1904)+v(1838)*v(1905)+v(1900)*v(744)+v(1901)*v(753)+v(1902)*v(762))
dRdX(1,6)=v(1899)*(v(1833)*v(1903)+v(1834)*v(1904)+v(1835)*v(1905)+v(1900)*v(745)+v(1901)*v(754)+v(1902)*v(763))
dRdX(1,7)=v(1899)*(v(1830)*v(1903)+v(1831)*v(1904)+v(1832)*v(1905)+v(1900)*v(746)+v(1901)*v(755)+v(1902)*v(764))
dRdX(1,8)=v(1899)*(v(1827)*v(1903)+v(1828)*v(1904)+v(1829)*v(1905)+v(1900)*v(747)+v(1901)*v(756)+v(1902)*v(765))
dRdX(1,9)=v(1899)*(v(1824)*v(1903)+v(1825)*v(1904)+v(1826)*v(1905)+v(1900)*v(748)+v(1901)*v(757)+v(1902)*v(766))
dRdX(1,10)=v(1899)*(v(1821)*v(1903)+v(1822)*v(1904)+v(1823)*v(1905)+v(1900)*v(749)+v(1901)*v(758)+v(1902)*v(767))
dRdX(1,11)=v(1899)*(-(v(1905)*v(589))-v(1903)*v(599)-v(1904)*v(609)-v(1900)*v(620)-v(1901)*v(646)-v(1902)*v(677))
dRdX(1,12)=v(1899)*(-(v(1905)*v(590))-v(1903)*v(600)-v(1904)*v(610)-v(1900)*v(623)-v(1901)*v(649)-v(1902)*v(683))
dRdX(1,13)=v(1899)*(-(v(1905)*v(591))-v(1903)*v(601)-v(1904)*v(611)-v(1900)*v(625)-v(1901)*v(652)-v(1902)*v(686))
dRdX(1,14)=v(1899)*(-(v(1905)*v(592))-v(1903)*v(602)-v(1904)*v(612)-v(1900)*v(628)-v(1901)*v(657)-v(1902)*v(689))
dRdX(1,15)=v(1899)*(-(v(1905)*v(593))-v(1903)*v(603)-v(1904)*v(613)-v(1900)*v(631)-v(1901)*v(660)-v(1902)*v(695))
dRdX(1,16)=v(1899)*(-(v(1905)*v(594))-v(1903)*v(604)-v(1904)*v(614)-v(1900)*v(633)-v(1901)*v(663)-v(1902)*v(698))
dRdX(1,17)=v(1899)*(-(v(1905)*v(595))-v(1903)*v(605)-v(1904)*v(615)-v(1900)*v(636)-v(1901)*v(668)-v(1902)*v(701))
dRdX(1,18)=v(1899)*(-(v(1905)*v(596))-v(1903)*v(606)-v(1904)*v(616)-v(1900)*v(639)-v(1901)*v(671)-v(1902)*v(707))
dRdX(1,19)=v(1899)*(-(v(1905)*v(597))-v(1903)*v(607)-v(1904)*v(617)-v(1900)*v(641)-v(1901)*v(674)-v(1902)*v(710))
dRdX(2,1)=-v(133)-v(1729)*v(853)
dRdX(2,2)=v(1906)*v(194)-v(1729)*v(854)
dRdX(2,3)=v(1356)+v(1357)+v(1358)+v(1907)*v(196)-v(1729)*v(855)
dRdX(2,4)=v(1449)+v(1450)+v(1451)+v(1910)*v(197)-v(1729)*v(856)
dRdX(2,5)=v(1906)*v(198)-v(1729)*v(857)
dRdX(2,6)=v(1440)+v(1441)+v(1442)+v(1910)*v(199)-v(1729)*v(858)
dRdX(2,7)=-v(1404)-v(1405)+v(1406)+v(1912)*v(200)-v(1729)*v(859)
dRdX(2,8)=v(1906)*v(201)-v(1729)*v(860)
dRdX(2,9)=-v(1467)-v(1468)+v(1469)+v(1912)*v(202)-v(1729)*v(861)
dRdX(2,10)=-v(1503)-v(1504)+v(1505)+v(1907)*v(203)-v(1729)*v(862)
dRdX(2,11)=-(v(1729)*v(863))
dRdX(2,12)=-(v(1729)*v(864))
dRdX(2,13)=-(v(1729)*v(865))
dRdX(2,14)=-(v(1729)*v(866))
dRdX(2,15)=-(v(1729)*v(867))
dRdX(2,16)=-(v(1729)*v(868))
dRdX(2,17)=-(v(1729)*v(869))
dRdX(2,18)=-(v(1729)*v(870))
dRdX(2,19)=-(v(1729)*v(871))
dRdX(3,1)=-v(136)-v(1729)*v(872)
dRdX(3,2)=v(1489)+v(1490)+v(1491)+v(1913)*v(194)-v(1729)*v(873)
dRdX(3,3)=v(1914)*v(196)-v(1729)*v(874)
dRdX(3,4)=v(1384)+v(1385)+v(1386)+v(1917)*v(197)-v(1729)*v(875)
dRdX(3,5)=-v(1351)-v(1352)+v(1353)+v(1919)*v(198)-v(1729)*v(876)
dRdX(3,6)=v(1914)*v(199)-v(1729)*v(877)
dRdX(3,7)=v(1477)+v(1478)+v(1479)+v(1913)*v(200)-v(1729)*v(878)
dRdX(3,8)=-v(1432)-v(1433)+v(1434)+v(1917)*v(201)-v(1729)*v(879)
dRdX(3,9)=v(1914)*v(202)-v(1729)*v(880)
dRdX(3,10)=-v(1507)-v(1508)+v(1509)+v(1919)*v(203)-v(1729)*v(881)
dRdX(3,11)=-(v(1729)*v(882))
dRdX(3,12)=-(v(1729)*v(883))
dRdX(3,13)=-(v(1729)*v(884))
dRdX(3,14)=-(v(1729)*v(885))
dRdX(3,15)=-(v(1729)*v(886))
dRdX(3,16)=-(v(1729)*v(887))
dRdX(3,17)=-(v(1729)*v(888))
dRdX(3,18)=-(v(1729)*v(889))
dRdX(3,19)=-(v(1729)*v(890))
dRdX(4,1)=-v(138)-v(1729)*v(891)
dRdX(4,2)=v(1412)+v(1413)+v(1414)+v(1920)*v(194)-v(1729)*v(892)
dRdX(4,3)=v(1526)+v(1527)+v(1528)+v(1922)*v(196)-v(1729)*v(893)
dRdX(4,4)=v(1924)*v(197)-v(1729)*v(894)
dRdX(4,5)=v(1517)+v(1518)+v(1519)+v(1922)*v(198)-v(1729)*v(895)
dRdX(4,6)=-v(1379)-v(1380)+v(1381)+v(1926)*v(199)-v(1729)*v(896)
dRdX(4,7)=v(1924)*v(200)-v(1729)*v(897)
dRdX(4,8)=-v(1436)-v(1437)+v(1438)+v(1926)*v(201)-v(1729)*v(898)
dRdX(4,9)=-v(1472)-v(1473)+v(1474)+v(1920)*v(202)-v(1729)*v(899)
dRdX(4,10)=v(1924)*v(203)-v(1729)*v(900)
dRdX(4,11)=-(v(1729)*v(901))
dRdX(4,12)=-(v(1729)*v(902))
dRdX(4,13)=-(v(1729)*v(903))
dRdX(4,14)=-(v(1729)*v(904))
dRdX(4,15)=-(v(1729)*v(905))
dRdX(4,16)=-(v(1729)*v(906))
dRdX(4,17)=-(v(1729)*v(907))
dRdX(4,18)=-(v(1729)*v(908))
dRdX(4,19)=-(v(1729)*v(909))
dRdX(5,1)=v(929)
dRdX(5,2)=v(1467)+v(1468)+v(1469)+v(1927)*v(194)+v(930)
dRdX(5,3)=v(1928)*v(196)+v(931)
dRdX(5,4)=v(1389)+v(1390)+v(1391)+v(1931)*v(197)+v(932)
dRdX(5,5)=-v(1356)-v(1357)+v(1358)+v(1933)*v(198)+v(933)
dRdX(5,6)=v(1928)*v(199)+v(934)
dRdX(5,7)=v(1485)+v(1486)+v(1487)+v(1927)*v(200)+v(935)
dRdX(5,8)=-v(1440)-v(1441)+v(1442)+v(1931)*v(201)+v(936)
dRdX(5,9)=v(1928)*v(202)+v(937)
dRdX(5,10)=-v(1512)-v(1513)+v(1514)+v(1933)*v(203)+v(938)
dRdX(5,11)=v(939)
dRdX(5,12)=v(940)
dRdX(5,13)=v(941)
dRdX(5,14)=v(942)
dRdX(5,15)=v(943)
dRdX(5,16)=v(944)
dRdX(5,17)=v(945)
dRdX(5,18)=v(946)
dRdX(5,19)=v(947)
dRdX(6,1)=v(967)
dRdX(6,2)=v(1417)+v(1418)+v(1419)+v(1934)*v(194)+v(968)
dRdX(6,3)=v(1507)+v(1508)+v(1509)+v(1936)*v(196)+v(969)
dRdX(6,4)=v(1938)*v(197)+v(970)
dRdX(6,5)=v(1522)+v(1523)+v(1524)+v(1936)*v(198)+v(971)
dRdX(6,6)=-v(1384)-v(1385)+v(1386)+v(1940)*v(199)+v(972)
dRdX(6,7)=v(1938)*v(200)+v(973)
dRdX(6,8)=-v(1444)-v(1445)+v(1446)+v(1940)*v(201)+v(974)
dRdX(6,9)=-v(1477)-v(1478)+v(1479)+v(1934)*v(202)+v(975)
dRdX(6,10)=v(1938)*v(203)+v(976)
dRdX(6,11)=v(977)
dRdX(6,12)=v(978)
dRdX(6,13)=v(979)
dRdX(6,14)=v(980)
dRdX(6,15)=v(981)
dRdX(6,16)=v(982)
dRdX(6,17)=v(983)
dRdX(6,18)=v(984)
dRdX(6,19)=v(985)
dRdX(7,1)=v(1005)
dRdX(7,2)=v(1006)+v(194)*v(1941)
dRdX(7,3)=v(1007)+v(1364)+v(1365)+v(1366)+v(1942)*v(196)
dRdX(7,4)=v(1008)+v(1436)+v(1437)+v(1438)+v(1945)*v(197)
dRdX(7,5)=v(1009)+v(1941)*v(198)
dRdX(7,6)=v(1010)+v(1454)+v(1455)+v(1456)+v(1945)*v(199)
dRdX(7,7)=v(1011)-v(1412)-v(1413)+v(1414)+v(1947)*v(200)
dRdX(7,8)=v(1012)+v(1941)*v(201)
dRdX(7,9)=v(1013)-v(1481)-v(1482)+v(1483)+v(1947)*v(202)
dRdX(7,10)=v(1014)-v(1517)-v(1518)+v(1519)+v(1942)*v(203)
dRdX(7,11)=v(1015)
dRdX(7,12)=v(1016)
dRdX(7,13)=v(1017)
dRdX(7,14)=v(1018)
dRdX(7,15)=v(1019)
dRdX(7,16)=v(1020)
dRdX(7,17)=v(1021)
dRdX(7,18)=v(1022)
dRdX(7,19)=v(1023)
dRdX(8,1)=v(1005)
dRdX(8,2)=v(1006)+v(1404)+v(1405)+v(1406)+v(194)*v(1948)
dRdX(8,3)=v(1007)+v(1512)+v(1513)+v(1514)+v(1950)*v(196)
dRdX(8,4)=v(1008)+v(1952)*v(197)
dRdX(8,5)=v(1009)+v(1503)+v(1504)+v(1505)+v(1950)*v(198)
dRdX(8,6)=v(1010)-v(1389)-v(1390)+v(1391)+v(1954)*v(199)
dRdX(8,7)=v(1011)+v(1952)*v(200)
dRdX(8,8)=v(1012)-v(1449)-v(1450)+v(1451)+v(1954)*v(201)
dRdX(8,9)=v(1013)-v(1485)-v(1486)+v(1487)+v(1948)*v(202)
dRdX(8,10)=v(1014)+v(1952)*v(203)
dRdX(8,11)=v(1015)
dRdX(8,12)=v(1016)
dRdX(8,13)=v(1017)
dRdX(8,14)=v(1018)
dRdX(8,15)=v(1019)
dRdX(8,16)=v(1020)
dRdX(8,17)=v(1021)
dRdX(8,18)=v(1022)
dRdX(8,19)=v(1023)
dRdX(9,1)=v(929)
dRdX(9,2)=v(194)*v(1955)+v(930)
dRdX(9,3)=v(1351)+v(1352)+v(1353)+v(1956)*v(196)+v(931)
dRdX(9,4)=v(1444)+v(1445)+v(1446)+v(1959)*v(197)+v(932)
dRdX(9,5)=v(1955)*v(198)+v(933)
dRdX(9,6)=v(1432)+v(1433)+v(1434)+v(1959)*v(199)+v(934)
dRdX(9,7)=-v(1417)-v(1418)+v(1419)+v(1961)*v(200)+v(935)
dRdX(9,8)=v(1955)*v(201)+v(936)
dRdX(9,9)=-v(1489)-v(1490)+v(1491)+v(1961)*v(202)+v(937)
dRdX(9,10)=-v(1522)-v(1523)+v(1524)+v(1956)*v(203)+v(938)
dRdX(9,11)=v(939)
dRdX(9,12)=v(940)
dRdX(9,13)=v(941)
dRdX(9,14)=v(942)
dRdX(9,15)=v(943)
dRdX(9,16)=v(944)
dRdX(9,17)=v(945)
dRdX(9,18)=v(946)
dRdX(9,19)=v(947)
dRdX(10,1)=v(967)
dRdX(10,2)=v(1481)+v(1482)+v(1483)+v(194)*v(1962)+v(968)
dRdX(10,3)=v(196)*v(1963)+v(969)
dRdX(10,4)=v(1379)+v(1380)+v(1381)+v(1966)*v(197)+v(970)
dRdX(10,5)=-v(1364)-v(1365)+v(1366)+v(1968)*v(198)+v(971)
dRdX(10,6)=v(1963)*v(199)+v(972)
dRdX(10,7)=v(1472)+v(1473)+v(1474)+v(1962)*v(200)+v(973)
dRdX(10,8)=-v(1454)-v(1455)+v(1456)+v(1966)*v(201)+v(974)
dRdX(10,9)=v(1963)*v(202)+v(975)
dRdX(10,10)=-v(1526)-v(1527)+v(1528)+v(1968)*v(203)+v(976)
dRdX(10,11)=v(977)
dRdX(10,12)=v(978)
dRdX(10,13)=v(979)
dRdX(10,14)=v(980)
dRdX(10,15)=v(981)
dRdX(10,16)=v(982)
dRdX(10,17)=v(983)
dRdX(10,18)=v(984)
dRdX(10,19)=v(985)
dRdX(11,1)=v(133)-v(166)-v(1729)*(v(1044)-v(853))
dRdX(11,2)=-(v(1729)*(v(1045)-v(854)))
dRdX(11,3)=-(v(1729)*(v(1046)-v(855)))
dRdX(11,4)=-(v(1729)*(v(1047)-v(856)))
dRdX(11,5)=-(v(1729)*(v(1048)-v(857)))
dRdX(11,6)=-(v(1729)*(v(1049)-v(858)))
dRdX(11,7)=-(v(1729)*(v(1050)-v(859)))
dRdX(11,8)=-(v(1729)*(v(1051)-v(860)))
dRdX(11,9)=-(v(1729)*(v(1052)-v(861)))
dRdX(11,10)=-(v(1729)*(v(1053)-v(862)))
dRdX(11,11)=v(1969)*v(209)-v(1729)*(v(1054)-v(863))
dRdX(11,12)=v(1584)+v(1585)+v(1586)+v(1970)*v(210)-v(1729)*(v(1055)-v(864))
dRdX(11,13)=v(1659)+v(1660)+v(1661)+v(1973)*v(211)-v(1729)*(v(1056)-v(865))
dRdX(11,14)=v(1969)*v(212)-v(1729)*(v(1057)-v(866))
dRdX(11,15)=v(1650)+v(1651)+v(1652)+v(1973)*v(213)-v(1729)*(v(1058)-v(867))
dRdX(11,16)=-v(1620)-v(1621)+v(1622)+v(1975)*v(214)-v(1729)*(v(1059)-v(868))
dRdX(11,17)=v(1969)*v(215)-v(1729)*(v(1060)-v(869))
dRdX(11,18)=-v(1671)-v(1672)+v(1673)+v(1975)*v(216)-v(1729)*(v(1061)-v(870))
dRdX(11,19)=-v(1701)-v(1702)+v(1703)+v(1970)*v(217)-v(1729)*(v(1062)-v(871))
dRdX(12,1)=v(136)-v(167)-v(1729)*(v(1063)-v(872))
dRdX(12,2)=-(v(1729)*(v(1064)-v(873)))
dRdX(12,3)=-(v(1729)*(v(1065)-v(874)))
dRdX(12,4)=-(v(1729)*(v(1066)-v(875)))
dRdX(12,5)=-(v(1729)*(v(1067)-v(876)))
dRdX(12,6)=-(v(1729)*(v(1068)-v(877)))
dRdX(12,7)=-(v(1729)*(v(1069)-v(878)))
dRdX(12,8)=-(v(1729)*(v(1070)-v(879)))
dRdX(12,9)=-(v(1729)*(v(1071)-v(880)))
dRdX(12,10)=-(v(1729)*(v(1072)-v(881)))
dRdX(12,11)=v(1693)+v(1694)+v(1695)+v(1976)*v(209)-v(1729)*(v(1073)-v(882))
dRdX(12,12)=v(1977)*v(210)-v(1729)*(v(1074)-v(883))
dRdX(12,13)=v(1606)+v(1607)+v(1608)+v(1980)*v(211)-v(1729)*(v(1075)-v(884))
dRdX(12,14)=-v(1579)-v(1580)+v(1581)+v(1982)*v(212)-v(1729)*(v(1076)-v(885))
dRdX(12,15)=v(1977)*v(213)-v(1729)*(v(1077)-v(886))
dRdX(12,16)=v(1681)+v(1682)+v(1683)+v(1976)*v(214)-v(1729)*(v(1078)-v(887))
dRdX(12,17)=-v(1642)-v(1643)+v(1644)+v(1980)*v(215)-v(1729)*(v(1079)-v(888))
dRdX(12,18)=v(1977)*v(216)-v(1729)*(v(1080)-v(889))
dRdX(12,19)=-v(1705)-v(1706)+v(1707)+v(1982)*v(217)-v(1729)*(v(1081)-v(890))
dRdX(13,1)=v(138)-v(168)-v(1729)*(v(1082)-v(891))
dRdX(13,2)=-(v(1729)*(v(1083)-v(892)))
dRdX(13,3)=-(v(1729)*(v(1084)-v(893)))
dRdX(13,4)=-(v(1729)*(v(1085)-v(894)))
dRdX(13,5)=-(v(1729)*(v(1086)-v(895)))
dRdX(13,6)=-(v(1729)*(v(1087)-v(896)))
dRdX(13,7)=-(v(1729)*(v(1088)-v(897)))
dRdX(13,8)=-(v(1729)*(v(1089)-v(898)))
dRdX(13,9)=-(v(1729)*(v(1090)-v(899)))
dRdX(13,10)=-(v(1729)*(v(1091)-v(900)))
dRdX(13,11)=v(1628)+v(1629)+v(1630)+v(1983)*v(209)-v(1729)*(v(1092)-v(901))
dRdX(13,12)=v(1724)+v(1725)+v(1726)+v(1985)*v(210)-v(1729)*(v(1093)-v(902))
dRdX(13,13)=v(1987)*v(211)-v(1729)*(v(1094)-v(903))
dRdX(13,14)=v(1715)+v(1716)+v(1717)+v(1985)*v(212)-v(1729)*(v(1095)-v(904))
dRdX(13,15)=-v(1601)-v(1602)+v(1603)+v(1989)*v(213)-v(1729)*(v(1096)-v(905))
dRdX(13,16)=v(1987)*v(214)-v(1729)*(v(1097)-v(906))
dRdX(13,17)=-v(1646)-v(1647)+v(1648)+v(1989)*v(215)-v(1729)*(v(1098)-v(907))
dRdX(13,18)=-v(1676)-v(1677)+v(1678)+v(1983)*v(216)-v(1729)*(v(1099)-v(908))
dRdX(13,19)=v(1987)*v(217)-v(1729)*(v(1100)-v(909))
dRdX(14,1)=v(139)-v(169)-v(1729)*(v(1101)-v(910))
dRdX(14,2)=-(v(1729)*(v(1102)-v(911)))
dRdX(14,3)=-(v(1729)*(v(1103)-v(912)))
dRdX(14,4)=-(v(1729)*(v(1104)-v(913)))
dRdX(14,5)=-(v(1729)*(v(1105)-v(914)))
dRdX(14,6)=-(v(1729)*(v(1106)-v(915)))
dRdX(14,7)=-(v(1729)*(v(1107)-v(916)))
dRdX(14,8)=-(v(1729)*(v(1108)-v(917)))
dRdX(14,9)=-(v(1729)*(v(1109)-v(918)))
dRdX(14,10)=-(v(1729)*(v(1110)-v(919)))
dRdX(14,11)=v(1671)+v(1672)+v(1673)+v(1990)*v(209)-v(1729)*(v(1111)-v(920))
dRdX(14,12)=v(1991)*v(210)-v(1729)*(v(1112)-v(921))
dRdX(14,13)=v(1611)+v(1612)+v(1613)+v(1994)*v(211)-v(1729)*(v(1113)-v(922))
dRdX(14,14)=-v(1584)-v(1585)+v(1586)+v(1996)*v(212)-v(1729)*(v(1114)-v(923))
dRdX(14,15)=v(1991)*v(213)-v(1729)*(v(1115)-v(924))
dRdX(14,16)=v(1689)+v(1690)+v(1691)+v(1990)*v(214)-v(1729)*(v(1116)-v(925))
dRdX(14,17)=-v(1650)-v(1651)+v(1652)+v(1994)*v(215)-v(1729)*(v(1117)-v(926))
dRdX(14,18)=v(1991)*v(216)-v(1729)*(v(1118)-v(927))
dRdX(14,19)=-v(1710)-v(1711)+v(1712)+v(1996)*v(217)-v(1729)*(v(1119)-v(928))
dRdX(15,1)=v(140)-v(170)-v(1729)*(v(1120)-v(948))
dRdX(15,2)=-(v(1729)*(v(1121)-v(949)))
dRdX(15,3)=-(v(1729)*(v(1122)-v(950)))
dRdX(15,4)=-(v(1729)*(v(1123)-v(951)))
dRdX(15,5)=-(v(1729)*(v(1124)-v(952)))
dRdX(15,6)=-(v(1729)*(v(1125)-v(953)))
dRdX(15,7)=-(v(1729)*(v(1126)-v(954)))
dRdX(15,8)=-(v(1729)*(v(1127)-v(955)))
dRdX(15,9)=-(v(1729)*(v(1128)-v(956)))
dRdX(15,10)=-(v(1729)*(v(1129)-v(957)))
dRdX(15,11)=v(1633)+v(1634)+v(1635)+v(1997)*v(209)-v(1729)*(v(1130)-v(958))
dRdX(15,12)=v(1705)+v(1706)+v(1707)+v(1999)*v(210)-v(1729)*(v(1131)-v(959))
dRdX(15,13)=v(2001)*v(211)-v(1729)*(v(1132)-v(960))
dRdX(15,14)=v(1720)+v(1721)+v(1722)+v(1999)*v(212)-v(1729)*(v(1133)-v(961))
dRdX(15,15)=-v(1606)-v(1607)+v(1608)+v(2003)*v(213)-v(1729)*(v(1134)-v(962))
dRdX(15,16)=v(2001)*v(214)-v(1729)*(v(1135)-v(963))
dRdX(15,17)=-v(1654)-v(1655)+v(1656)+v(2003)*v(215)-v(1729)*(v(1136)-v(964))
dRdX(15,18)=-v(1681)-v(1682)+v(1683)+v(1997)*v(216)-v(1729)*(v(1137)-v(965))
dRdX(15,19)=v(2001)*v(217)-v(1729)*(v(1138)-v(966))
dRdX(16,1)=v(141)-v(171)-v(1729)*(v(1139)-v(986))
dRdX(16,2)=-(v(1729)*(v(1140)-v(987)))
dRdX(16,3)=-(v(1729)*(v(1141)-v(988)))
dRdX(16,4)=-(v(1729)*(v(1142)-v(989)))
dRdX(16,5)=-(v(1729)*(v(1143)-v(990)))
dRdX(16,6)=-(v(1729)*(v(1144)-v(991)))
dRdX(16,7)=-(v(1729)*(v(1145)-v(992)))
dRdX(16,8)=-(v(1729)*(v(1146)-v(993)))
dRdX(16,9)=-(v(1729)*(v(1147)-v(994)))
dRdX(16,10)=-(v(1729)*(v(1148)-v(995)))
dRdX(16,11)=v(2004)*v(209)-v(1729)*(v(1149)-v(996))
dRdX(16,12)=v(1592)+v(1593)+v(1594)+v(2005)*v(210)-v(1729)*(v(1150)-v(997))
dRdX(16,13)=v(1646)+v(1647)+v(1648)+v(2008)*v(211)-v(1729)*(v(1151)-v(998))
dRdX(16,14)=v(2004)*v(212)-v(1729)*(v(1152)-v(999))
dRdX(16,15)=v(1664)+v(1665)+v(1666)-(-v(1000)+v(1153))*v(1729)+v(2008)*v(213)
dRdX(16,16)=-v(1628)-v(1629)+v(1630)-(-v(1001)+v(1154))*v(1729)+v(2010)*v(214)
dRdX(16,17)=-((-v(1002)+v(1155))*v(1729))+v(2004)*v(215)
dRdX(16,18)=-v(1685)-v(1686)+v(1687)-(-v(1003)+v(1156))*v(1729)+v(2010)*v(216)
dRdX(16,19)=-v(1715)-v(1716)+v(1717)-(-v(1004)+v(1157))*v(1729)+v(2005)*v(217)
dRdX(17,1)=v(141)-v(172)-v(1729)*(v(1158)-v(986))
dRdX(17,2)=-(v(1729)*(v(1159)-v(987)))
dRdX(17,3)=-(v(1729)*(v(1160)-v(988)))
dRdX(17,4)=-(v(1729)*(v(1161)-v(989)))
dRdX(17,5)=-(v(1729)*(v(1162)-v(990)))
dRdX(17,6)=-(v(1729)*(v(1163)-v(991)))
dRdX(17,7)=-(v(1729)*(v(1164)-v(992)))
dRdX(17,8)=-(v(1729)*(v(1165)-v(993)))
dRdX(17,9)=-(v(1729)*(v(1166)-v(994)))
dRdX(17,10)=-(v(1729)*(v(1167)-v(995)))
dRdX(17,11)=v(1620)+v(1621)+v(1622)+v(2011)*v(209)-v(1729)*(v(1168)-v(996))
dRdX(17,12)=v(1710)+v(1711)+v(1712)+v(2013)*v(210)-v(1729)*(v(1169)-v(997))
dRdX(17,13)=v(2015)*v(211)-v(1729)*(v(1170)-v(998))
dRdX(17,14)=v(1701)+v(1702)+v(1703)+v(2013)*v(212)-v(1729)*(v(1171)-v(999))
dRdX(17,15)=-v(1611)-v(1612)+v(1613)-(-v(1000)+v(1172))*v(1729)+v(2017)*v(213)
dRdX(17,16)=-((-v(1001)+v(1173))*v(1729))+v(2015)*v(214)
dRdX(17,17)=-v(1659)-v(1660)+v(1661)-(-v(1002)+v(1174))*v(1729)+v(2017)*v(215)
dRdX(17,18)=-v(1689)-v(1690)+v(1691)-(-v(1003)+v(1175))*v(1729)+v(2011)*v(216)
dRdX(17,19)=-((-v(1004)+v(1176))*v(1729))+v(2015)*v(217)
dRdX(18,1)=v(139)-v(173)-v(1729)*(v(1177)-v(910))
dRdX(18,2)=-(v(1729)*(v(1178)-v(911)))
dRdX(18,3)=-(v(1729)*(v(1179)-v(912)))
dRdX(18,4)=-(v(1729)*(v(1180)-v(913)))
dRdX(18,5)=-(v(1729)*(v(1181)-v(914)))
dRdX(18,6)=-(v(1729)*(v(1182)-v(915)))
dRdX(18,7)=-(v(1729)*(v(1183)-v(916)))
dRdX(18,8)=-(v(1729)*(v(1184)-v(917)))
dRdX(18,9)=-(v(1729)*(v(1185)-v(918)))
dRdX(18,10)=-(v(1729)*(v(1186)-v(919)))
dRdX(18,11)=v(2018)*v(209)-v(1729)*(v(1187)-v(920))
dRdX(18,12)=v(1579)+v(1580)+v(1581)+v(2019)*v(210)-v(1729)*(v(1188)-v(921))
dRdX(18,13)=v(1654)+v(1655)+v(1656)+v(2022)*v(211)-v(1729)*(v(1189)-v(922))
dRdX(18,14)=v(2018)*v(212)-v(1729)*(v(1190)-v(923))
dRdX(18,15)=v(1642)+v(1643)+v(1644)+v(2022)*v(213)-v(1729)*(v(1191)-v(924))
dRdX(18,16)=-v(1633)-v(1634)+v(1635)+v(2024)*v(214)-v(1729)*(v(1192)-v(925))
dRdX(18,17)=v(2018)*v(215)-v(1729)*(v(1193)-v(926))
dRdX(18,18)=-v(1693)-v(1694)+v(1695)+v(2024)*v(216)-v(1729)*(v(1194)-v(927))
dRdX(18,19)=-v(1720)-v(1721)+v(1722)+v(2019)*v(217)-v(1729)*(v(1195)-v(928))
dRdX(19,1)=v(140)-v(174)-v(1729)*(v(1196)-v(948))
dRdX(19,2)=-(v(1729)*(v(1197)-v(949)))
dRdX(19,3)=-(v(1729)*(v(1198)-v(950)))
dRdX(19,4)=-(v(1729)*(v(1199)-v(951)))
dRdX(19,5)=-(v(1729)*(v(1200)-v(952)))
dRdX(19,6)=-(v(1729)*(v(1201)-v(953)))
dRdX(19,7)=-(v(1729)*(v(1202)-v(954)))
dRdX(19,8)=-(v(1729)*(v(1203)-v(955)))
dRdX(19,9)=-(v(1729)*(v(1204)-v(956)))
dRdX(19,10)=-(v(1729)*(v(1205)-v(957)))
dRdX(19,11)=v(1685)+v(1686)+v(1687)+v(2025)*v(209)-v(1729)*(v(1206)-v(958))
dRdX(19,12)=v(2026)*v(210)-v(1729)*(v(1207)-v(959))
dRdX(19,13)=v(1601)+v(1602)+v(1603)+v(2029)*v(211)-v(1729)*(v(1208)-v(960))
dRdX(19,14)=-v(1592)-v(1593)+v(1594)+v(2031)*v(212)-v(1729)*(v(1209)-v(961))
dRdX(19,15)=v(2026)*v(213)-v(1729)*(v(1210)-v(962))
dRdX(19,16)=v(1676)+v(1677)+v(1678)+v(2025)*v(214)-v(1729)*(v(1211)-v(963))
dRdX(19,17)=-v(1664)-v(1665)+v(1666)+v(2029)*v(215)-v(1729)*(v(1212)-v(964))
dRdX(19,18)=v(2026)*v(216)-v(1729)*(v(1213)-v(965))
dRdX(19,19)=-v(1724)-v(1725)+v(1726)+v(2031)*v(217)-v(1729)*(v(1214)-v(966))
END
|
# The Unscented Kalman Filter (UKF) for Nonlinear Estimation Problems
*Cem Özen*, September 2017.
## The Kalman Filter
The Kalman filter is an algorithm that uses the past measurements of the state of a system (also taking into account statistical noise and other inaccuracies) to predict the future state of the system. Its first use was on the Apollo missions to the Moon, but today Kalman filtering is extensively used in a vast array of applications in fields ranging from robotics to econometrics.
The Kalman filter was originally invented to model linear systems. However, extensions of the method to deal with nonlinear systems have also been developed, such as the extended Kalman filter (EKF) and the unscented Kalman filter (UKF).
In the **unscented Kalman filter (UKF)**, the state distribution is approximated by Gaussian random variables (GRVs) as in the extended Kalman filter (EKF). However the two methods differ in the way GRVs are propagated through the system dynamics: While the EKF propagates GRVs analytically through a first-order linearization of the non-linear system, the UKF uses a deterministic sampling approach, in which a minimal set of sample points (so-called *sigma points*) that capture the true mean and covariance of the GRV is propagated through the *true* nonlinear system. While the posterior mean and the covariance of the EKF achieves only first-order accuracy (Taylor expansion), which often yields sub-optimal performance, the UKF accuracy is in the 2nd order; thus, the UKF presents a superior alternative to the EKF. Remarkably, this performance advantage does not come at an extra computational cost.
## Problem Definition
In this notebook, I use the Kalman filter approach (UKF) as a design tool to construct a dynamical system with a desired type of behavior. Qualitatively, (attractive) dynamical systems can exhibit three distinct types of dynamical behavior in the limit $t \rightarrow \infty$: *fixed points*, *oscillations* and *chaos*.
Accordingly a dynamical system with unknown parameters, can be designed (i.e. its parameters can be inferred) so that it displays a desired dynamical behavior. In such a problem, the UKF method is used in the context of *parameter estimation*.
The parameter estimation (inference) problem is formulated as a state-space model in which a nonlinear mapping,
$$
\begin{eqnarray*}
y_k & = & g(x_k, \theta_k)
\end{eqnarray*}
$$
with the input $x_k$, the output $y_k$, and the parameters to be inferred, $\theta_k$, is reformulated in the state-space representation:
$$
\begin{eqnarray*}
\theta_k & = & \theta_{k-1} + \nu_k \\
y_k & = & g(x_k, \theta_k) + u_k.
\end{eqnarray*}
$$
Above $u_k \sim N(0, Q_k)$ represents the measurement noise and $\nu_k \sim N(0, R_k)$ is the artifical process noise which drives the system.
In the context of the current problem, that is, designing a dynamical system of desired behavior, the nonlinear mapping $g(.)$ will be a nonlinear numerical routine that outputs the dynamical behavior of the system. In the dynamical systems theory, this behavior is encoded in the maximum of the Lyapunov exponents ($\lambda_{max}$) of the system. Hence, in our problem
$$
\begin{eqnarray*}
\theta_k & = & \theta_{k-1} + \nu_k \\
\lambda_{max} & = & \mathfrak{L}(\theta_k, y_0; f) + u_k,
\end{eqnarray*}
$$
where $\mathfrak{L}(.)$ is the nonlinear mapping the system parameters to the target dynamical behavior encoded by the $\lambda_{max}$. Above, $y_0$ is the initial condition for the (n-dimensional) dynamical system, which evolves in time according to
$$
\frac{dy}{dt} = f(y; \theta).
$$
where $f$ is a gradient field. Also, notice that the role of our filter in this context will be a *smoother*, since at each step of the time series, the observed 'data' remains the same: $(\lambda_{max}, \lambda_{max}, \lambda_{max}, \ldots)$.
**To summarize, given a dynamical system described by a gradient field $f$, its initial conditions $y_0$, and a set of indetermined parameters $\theta$, we seek to infer the values of the parameters to drive the system to produce the desired behavior as encoded by $\lambda_{max}$. **
```python
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import scipy
from scipy import integrate
import sympy
from IPython.display import display, Math, Latex
sympy.init_printing(use_latex='mathjax')
%matplotlib inline
```
### The Dynamical System
Here I apply the UKF technique for the purpose of parameter estimation in the Lorenz model which serves as a good toy model for many applications involving chaos. The Lorenz system is given by the following autonomous dynamical system equations:
$$\begin{align*}
\dot{x} &= \sigma(y-x), \\
\dot{y} &= x(\rho-z)-y, \\
\dot{z} &= xy-\beta z.
\end{align*}$$
In his orginal paper [E. N. Lorenz, J. Atmos. Sci. 20, 130 (1963)], Lorenz used the parameters $\sigma = 10$, $\rho = 28$, and $\beta = 8/3$ for which the trajectories produce a strange attractor(i.e chaotic dynamics). In our problem, this dynamical behavior (more accurately, the maximal Lyapunov exponent of the Lorenz system with this choice of the parameter values) will be our target. We will initialize the parameter values so that the Lorenz model produces non-chaotic (say, fixed point) dynamics. The unscented Kalman filter will then be utilized to drive the system from this non-chaotic behavior to the chaotic behavior defined by the target value of the maximal Lyapunov exponent; in the process the parameter values will be updated iteratively.
```python
class LorenzSystem:
def __init__(self, sigma = 10, rho = 28, beta = 8./3.):
self.sigma = sigma
self.rho = rho
self.beta = beta
def dx_dt(self, x, t = None):
return np.array([ self.sigma * (x[1] - x[0]),
x[0] * (self.rho - x[2]) - x[1],
x[0] * x[1] - self.beta * x[2]])
```
### ODE Solver
In order to integrate the dynamical system equations using given values of initial points, we can use several standard routines available in `scipy.integrate`. However, here I prefer to use an explicit implementation utilizing a 4th order Runge-Kutta step.
```python
class ODESolver:
def __init__(self, f, dt):
""" f is function in the form f=f(x,t) """
self.f = f
self.dt = dt
def advance(self):
"""Advance solution one time step"""
raise NotImplementedError
def set_initial_condition(self, u0, t0=0.):
self.u = [] # u[k] is solution at time t[k]
self.t = [] # time levels in the solution process
self.u.append(u0)
self.t.append(t0)
self.k = 0 # time level counter
def solve(self, T, terminate=None):
""" Advance solution from t = t0 to t <= T, steps of dt
as long as terminate(u, t, k) is False.
terminate(u, t, k) is a user-given function returning True or False.
By default, a terminate function which always returns False is used """
if terminate is None:
terminate = lambda u, t, k: False
self.k = 0
tnew = 0
while tnew <= T and not terminate(self.u, self.t, self.k):
unew = self.advance()
self.u.append(unew)
tnew = self.t[-1] + self.dt
self.t.append(tnew)
self.k += 1
return np.array(self.u), np.array(self.t)
class RungeKutta4(ODESolver):
def advance(self):
u, dt, f, k, t = self.u, self.dt, self.f, self.k, self.t[-1]
dt2 = dt/2.0
k1 = dt * f(u[k], t)
k2 = dt * f(u[k] + 0.5*k1, t + dt2)
k3 = dt * f(u[k] + 0.5*k2, t + dt2)
k4 = dt * f(u[k] + k3, t + dt)
unew = u[k] + (1./6.)*(k1 + 2*k2 + 2*k3 + k4)
return unew
```
### Calculation of the Lyapunov Spectra
The concept of Lyapunov exponents was introduced in the dynamical systems theory for the purpose of measuring the sensitivity of the system to initial conditions. In an n-dimensional dynamical system, we can consider an initially orthonormal axes of n vectors in the tangent space at $y_0$. As the dynamical system evolve in time, the volume defined by the initially orthonormal axes get distorted to form an n-dimensional ellipsoid in the tangent space at each successive point on the trajectory. An algorithm due to Benettin et.al (see Parker and Chua, 1989 in references) computes the average rate of growth (during the time evolution of the system) of the ith principal axis of the ellipsod as $\lambda_i$. These quantities, sorted as $\lambda_1 \ge \lambda_2, \ldots, \lambda_n$ are called the Lyapunov exponents. The sign of the maximum of these exponents, determines the asymptotic dynamical behavior of the dynamical system. In particular, dynamical systems with $\lambda_{max} < 0 $ converge to the same stable fixed point in the phase space. Systems with $\lambda_{max} = 0 $ converge to an oscillatory solution defined by a limit-cycle and systems with $\lambda_{max} > 0 $ converge to a limit set of solutions defined by a strange (i.e. chaotic) attractor.
In the Benettin algorithm, the Lyapunov spectra are obtained iteratively and directly from the dynamical system equations---unlike various other methods using time series. Since the calculation of Lyapunov exponents is computationally very expensive, in the following I implemented the Benettin algorithm using the `SymPy` library in order to compute the derivatives (hence the Jacobian matrix) in a reliable and fast approach using symbolical computation.
```python
def sym_to_np(x, t, params, xdot):
dim = xdot.shape[0]
eta = sympy.Matrix(sympy.symarray('eta', (dim,dim))) # perturbation matrix
Df = xdot.jacobian(x)
etadot = Df * eta
z = x.col_join(eta.reshape(dim*dim,1))
zdot = xdot.col_join(etadot.reshape(dim*dim,1))
# lambdification to obtain a NumPy-aware function to compute zdot:
mat2array = [{'ImmutableMatrix': np.array}, 'numpy']
zdot_ = sympy.lambdify((z, t, params), zdot, modules=mat2array)
zdot_np = lambda z, t, params: zdot_(z, t, params).reshape((dim*dim + dim,))
return zdot_np
def update_state_and_perturbation(zdot_np, params, x, u, tstart=0, tstop=1.0, dt=0.01, integrator='RK4'):
dim = x.shape[0]
Phi0 = np.eye(dim) # initial value for perturbation matrix Phi
Phi0_ = Phi0.reshape((dim*dim,))
z0 = np.concatenate((x, Phi0_)) # initial value for combined {x, Phi}
if integrator == 'RK4':
z_solver = RungeKutta4(lambda z, t:
zdot_np(z, t, params), dt)
z_solver.set_initial_condition(z0, tstart)
z, t = z_solver.solve(tstop)
elif integrator == 'ODEINT':
t = np.arange(tstart, tstop, dt) # time measurements
z = integrate.odeint(lambda z, t: zdot_np(z, t, params), z0, t)
x = z[-1, 0:dim]
Phi = z[-1, dim:dim*dim+dim].reshape((dim,dim))
dx = np.dot(Phi,u)
return x, dx
def lyapunov_gram_schimidt(zdot_np, params, x, T=0.2, dt=0.01, Er=1.e-4, Ea=1.e-4, kmax=1000,
integrator='RK4', complete=True, debug=False):
"""
Remarks:
1) Make sure the initial state array, x, is already on the attractor for
a chaotic system. It is best to externally evolve the system until the trajectory
is settled on the strange attractor, then an arbitrary value of the state can be taken
as an initial value for this program.
2) How to choose a suitable value for T: Too small a value would mean loss of accuracy
due to excessive orthogonalization, too large a value would build up too large numbers hence
loss of numerical precission. Best values are for systems
"""
dim = x.shape[0] # dimension of state space
u = np.eye(dim) # initial perturbation matrix, Remark 2, p.79, Parker and Chua.
if complete is True: # True for computing all, False for computing maximum Lyapunov exp.
nums = dim
else:
nums = 1
lyaps = np.zeros((nums,)) # initialize lyapunov exponents
sum_ = np.zeros((nums,))
if debug is True: # True for storing Lyapunov exponent at each iteration
lyaps_evol = []
k = 0
while True:
k += 1
if (k == kmax):
print("lyapunov_gram_schimidt: no convergence!")
break
lyaps_old = lyaps.copy()
# dx is the linearized, tangent-space perturbation. We evolve the trajectory x
# and the perturbation together for a period of T. We output the result to perform a
# Gram-Schimidt orthonormalization.
x, dx = update_state_and_perturbation(zdot_np, params, x, u, tstart=0, tstop=T, dt=dt,
integrator=integrator)
# perform the orthonormalization and update Lyapunov exponents
for i in range(nums):
vi = dx[:, i]
for j in range(i):
vi = vi - np.dot(vi, u[:,j]) * u[:,j]
norm = np.linalg.norm(vi)
u[:,i] = vi / norm
sum_[i] += np.log(norm)
lyaps[i] = sum_[i] / (k * T)
#print("iter={:d} LE[{:d}] = {:6.3f}".format(k, i, lyaps[i]))
if debug is True:
lyaps_evol.append(np.copy(lyaps))
if (np.linalg.norm(lyaps_old - lyaps) < Er * np.linalg.norm(lyaps) + Ea):
break
if debug is True:
return np.array(lyaps_evol)
else:
return lyaps
```
In the following we will use the Benettin algorithm to compute only the maximal Lyapunov exponent, which solely determines the qualitative behaviour of the system. Before moving on to the UKF method and its application, let's compute the maximal Lyapunov exponent of the Lorenz system for the choice of system parameters $\sigma=10., \rho=28.,$ and $\beta=8./3$:
```python
# Define the Lorenz system, SymPy way:
u = sympy.Matrix(sympy.symarray('u', 3)) # dynamic variable symbols
t, sigma, rho, beta = sympy.symbols("t, sigma, rho, beta") # time and system parameter symbols
params = sympy.Matrix([sigma, rho, beta]) # parameter vector
lorenz = sympy.Matrix([sigma * (u[1] - u[0]), u[0] * (rho - u[2]) - u[1], u[0] * u[1] - beta * u[2]])
# Produce the Numpy-aware function (state + perturbation) for the Lorenz system
lorenz_np = sym_to_np(u, t, params, lorenz)
# Now calculate the Lyapunov exponents of the Lorenz System for the chosen parameter set below.
x0 = np.array([ 2, 0.5, 10]) # initial condition for state
params = np.array([10., 28., 8./3.]) # Lorenz system parameters
lyap = lyapunov_gram_schimidt(zdot_np=lorenz_np, params=params, x=x0, T=1.0, dt=0.01, Er=1.e-4,Ea=1.e-4, kmax=10000,
integrator="RK4", complete=False, debug=False)
print("lambda_max = {:.3f}".format(float(lyap)))
```
lambda_max = 0.883
As a numerical check of the accuracy of the Benettin algorithm, compare the value we obtained above, $\lambda_{max} = 0.883$, to the reported value of $\lambda_{max} = 0.906$ (Sprott, 1997). We can improve the accuracy by using smaller $dt$ or trying different values for $T$ parameter in the Benettin algorithm (see reference Parker and Chua), but we want to be able to run the Benettin algorithm fast enough, since we will call it many times in the filter.
### The Unscented Kalman Filter
The main idea behind the Unscented Kalman Filter (UKF) is to produce several sampling points (the sigma points) around the current state estimate based on its covariance. Once the sigma points are chosen, we propagate these points through the nonlinear mapping to get a more accurate estimation of the mean and the covariance. The application of the filter is therefore an iterative procedure. Each iteration is composed of a prediction step and an update step.
In the "prediction step", we perturb the current parameter estimate by the driving process noise $\nu_k$ to obtain *a priori* estimates of the mean ($\hat{\theta}_k^{pr}$) and the covariance ($P_k^{pr}$) of the parameters, which are conditional on all but the current observation. In the "update step", we use the current observation ($y_k$) to calculate the *a posteriori* estimates of the mean ($\hat{\theta}_k^{po}$) and the covariance ($P_k^{po}$).
*Initialization*
$$\begin{eqnarray*}
\theta_0^{po} & = & E(\theta) \\
P_0^{po} & = & E( (\hat{\theta_0} - \theta) (\hat{\theta_0} - \theta)^T)
\end{eqnarray*}$$
*Prediction*
$$\begin{eqnarray*}
\hat{\theta}_k^{pr} & = & E(\theta | y_{i \leq k-1}) \\
& = & \hat{\theta}_{k-1}^{po} \\
P_k^{pr} & = & P_{k-1}^{po} + R_{k-1}
\end{eqnarray*}$$
*Update*
$$\begin{eqnarray*}
\hat{\theta}_k^{po} & = & \hat{\theta}_k^{pr} + K_k(y_k - \hat{y}_k)\\
P_k^{po} & = & P_k^{pr} - K_k P_{\hat{y}_k} K_k^T
\end{eqnarray*}$$
where
$$\begin{eqnarray*}
Y_k & = & g(x_k, \Theta_k) \\
\hat{y}_k & = & \sum_{i=0}^{2L} \omega_i^m [Y_k]_i \\
P_{\hat{y}_k} & = & \sum_{i=0}^{2L} \omega_i^c ([Y_k]_i -\hat{y}_k ) ([Y_k]_i -\hat{y}_k )^T + Q_k \\
P_{\hat{\theta}^{pr}_k \hat{y}_k} & = & \sum_{i=0}^{2L} \omega_i^c ([\Theta_k]_i - \hat{\theta}_k^{pr}) ([Y_k]_i -\hat{y}_k )^T \\
K_k & = & P_{\hat{\theta}^{pr}_k \hat{y}_k} P_{\hat{y}_k}^{-1}
\end{eqnarray*}$$
The set of sigma points $\Theta_k$ and the corresponding weights are given by
$$\begin{align*}
[\Theta_k]_0 & = \hat{\theta}_k^{pr} & & & \omega_0^m & = \frac{\lambda}{L+\lambda} & i & =0 \\
[\Theta_k]_i & = \hat{\theta}_k^{pr} + \left[\sqrt{ (L+\lambda) P_k^{pr}}\right]_i & i & = 1, \ldots, L & \omega_0^c & = \frac{\lambda}{L + \lambda} + (1-\alpha^2 + \beta) & i & = 0 \\
[\Theta_k]_i & = \hat{\theta}_k^{pr} - \left[\sqrt{ (L+\lambda) P_k^{pr}}\right]_i & i & = L+1, \ldots, 2L & \omega_i^c & = \omega_i^m = \frac{1}{2(L + \lambda)} & i & = 1, \ldots, 2L
\end{align*}$$
```python
class UKF:
def __init__(self, y_target, func, x0, theta0, P0, alpha, beta, kappa, Q, R):
"""func must return a numpy array"""
self.y_target = y_target
self.func = func # external function y=f(x0, *args). [ *list(theta0) -> *args ]
self.N, = y_target.shape # dimension of the observation space, y=f(x)
self.x0 = x0 # initial signal, numpy array of (M,)
self.theta0 = theta0 # initial mean-values of parameters, numpy array of (L,)
self.P0 = P0 # initial covariance matrix, numpy array of (L,L)
self.L, = theta0.shape # dimension of parameter space
self.M, = x0.shape # dimension of signal space
self.alpha = alpha # UKF parameter
self.beta = beta # UKF parameter
self.kappa = kappa # UKF parameter
self.Q = Q # measurement noise covariance in the state-space model
self.R = R # artifical process noise covariance in the state-space model
self.lambda_ = self.alpha**2 * (self.L + self.kappa) - self.L
self.num_sigmas = 2*self.L + 1 # UKF parameter, number of sigma points
Wc = np.full(self.num_sigmas, 1./(2*(self.L + self.lambda_)))
Wm = Wc.copy()
Wm[0] = self.lambda_ / (self.L + self.lambda_)
Wc[0] = Wm[0] + (1. - self.alpha**2 + self.beta)
self.Wc = Wc
self.Wm = Wm
# store values
self.all_theta = None
self.all_P = None
self.all_y = None
def sigma_points(self, theta, P):
#returns sigma points for a distribution of parameters (Theta) and
#for distribution of measurements (Y)
# calculate the sigma points for the parameters
Theta = np.zeros((self.num_sigmas, self.L))
U = scipy.linalg.cholesky((self.L + self.lambda_) * P)
Theta[0] = theta
for k in range(self.L):
Theta[k + 1] = theta + U[k]
Theta[self.L + k + 1] = theta - U[k]
# calculate the sigma-points for the measurements
Y = np.empty((self.num_sigmas,self.N))
for i in range(self.num_sigmas):
Y[i] = self.func(self.x0, *list(Theta[i]))
return Theta, Y
def unscented_transform(self, Y):
# use unscented transform to get the mean and covariance for observations, y and Py
y = np.dot(self.Wm, Y)
Py = np.zeros((self.N, self.N))
for i in range(self.num_sigmas):
Py += self.Wc[i] * np.outer(Y[i] - y, Y[i] - y)
return y, Py
def update(self, theta_pr, P_pr):
#predict values of mean and covariance are updated
Theta, Y = self.sigma_points(theta_pr, P_pr)
# mean and covariance of prediction passed through UT
y, Py = self.unscented_transform(Y)
Py += self.Q
# compute cross variance of the state and the measurements
Pty = np.zeros((self.L, self.N))
for i in range(self.num_sigmas):
Pty += self.Wc[i] * np.outer(Theta[i] - theta_pr, Y[i] - y)
# compute Kalman gain
K = np.dot(Pty, scipy.linalg.inv(Py))
theta = theta_pr + np.dot(K, self.y_target - y)
P = P_pr - K.dot(Py).dot(K.T)
return theta, P
def estimate(self, x0, iter_):
self.all_theta = np.zeros((iter_, self.L))
self.all_P = np.zeros((iter_, self.L, self.L))
self.all_y = np.zeros((iter_, self.N))
theta = self.theta0 # initial value of mean.
P = self.P0 # initial value of cov.
self.all_theta[0,:] = self.theta0
self.all_P[0, :, :] = P
for i in range(1, iter_):
# predict step
theta_pr = theta
P_pr = P + self.R
# update step
theta, P = self.update(theta_pr, P_pr)
self.all_theta[i, :] = theta
self.all_P[i, :, :] = P
self.all_y[i, :] = self.func(x0, *list(theta))
if i % 50 == 0 : # let's print updated values of the parameters at certain intervals
print("iteration = {:4d}: sigma = {:.2f}, rho = {:.2f}, beta = {:.2f}".format(i, *list(theta) ))
```
## Application: Chaotification of the Lorenz System
The Lorenz system that we defined above will serve as a suitable toy model for a parameter estimation application. Let us initialize the Lorenz system in a non-chaotic regime (specifically, a stable fixed point regime). Our goal will be to drive the system to a chaotic regime. In particular, we will conceive a positive real value for the maximal Lyapunov exponent as a target. The UKF then will drive the system to achieve the chaotic dynamics encoded by the maximal Lyapunov exponent by updating the system parameters.
### A Technical Side Note: Constraining the Parameters in the UKF
In the formulation of the UKF approach, there are no constraints for any of the system parameters. Therefore, in general, each parameter can take any value in $(-\infty, +\infty)$. However, like many other physical systems, the Lorenz model parameters need to be constrained to positive real numbers. Although this can be achieved through modifying the UKF equations, I opt to improvise a quick-and-dirty approach to achieve the same end. The idea here is to introduce an extra dimension in the observable function (i.e. the nonlinear mapping $g(.)$ in the general formulation) for every parameter to be constrained. These extra "observables" can then be used to assign penalties (such as large positive values) any time the UKF updates the parameters to values lying outside the allowed window, which may be bounded in either or both sides by a finite value. As long as the parameter values remain within the boundaries, no penalty would be given.
```python
def penalty(x, lb=0, ub=1):
const = 100.
if x < lb :
return const * np.exp(abs(x-lb)**2)
elif x > ub:
return const * np.exp(abs(x-ub)**2)
else:
return 0.
def g(x, sigma, rho, beta):
"""
non-linear observable function
x : (3, ) numpy array holding state vector coordinates
theta: (3, ) numpy array holding values of sigma, rho and beta
"""
tstart, tstop, dt = 0., 10., 0.01
lorenz_model = LorenzSystem(sigma, rho, beta)
lorenz_model_derivatives = lorenz_model.dx_dt
lorenz_solver = RungeKutta4(lorenz_model_derivatives, dt)
lorenz_solver.set_initial_condition(x, tstart)
x, tt = lorenz_solver.solve(tstop)
x0 = x[-1,:] # The end point of the trajectory is the new initial point
# for all the computations below
# OBSERVABLES TO EXPORT
# maximal Lyapunov exponent
params = np.array([sigma, rho, beta])
lyap = lyapunov_gram_schimidt(zdot_np=lorenz_np, params=params, x=x0, T=1.0, dt=0.01, Er=1.e-4,Ea=1.e-4, kmax=10000,
integrator="RK4", complete=False, debug=False)
s_penalty = penalty(sigma, lb=0.5, ub=30)
r_penalty = penalty(rho, lb=0.5, ub=30)
b_penalty = penalty(beta, lb=0.5, ub=30)
penalties = np.array([s_penalty, r_penalty, b_penalty])
return np.concatenate((lyap, penalties))
```
Note that we modified the mapping $g(.)$, which normally would calculate the maximal Lyapunov exponent as $\lambda_{max} = \mathfrak{L}(\theta_k, y_0; f)$, to return a NumPy array of shape (4,) because we include three penalty values. Above $x$ represents the initial condition, $y_0$, for the Lorenz model's 3-dimensional phase space. In our problem, $x$ will always remain a constant, so every time we update the parameters to obtain a new Lorenz model, the time evolution will always start with this initial condition.
```python
# initialize state vector in Lorenz model
x0 = np.array([2.0, 0.5, 1.0]) # arbitrarily chosen
# initialize parameters (sigma, rho, beta):
theta0 = np.array([10.0, 10.0, 10.0]) # initial mean values of the parameters
P0 = 0.01*np.diag([1, 1, 1]) # initial state covariance
# initialize measurement and noise covariances
Q = 0.01*np.diag([1, 1, 1, 1]) # measurement noise covariance
R = 0.01*np.diag([1, 1, 1]) # process noise covariance
# initialize UKF parameters (see reference Labbe for explanations)
alpha = 0.8
beta = 2.
kappa = 0.
no_iterations=300 # number of iterations
# set the target dynamics
y_target = np.array([0.883, 0., 0., 0.]) # target value of maximal Lyapunov exponent (calculated above)
# create an instance of the filter for our problem
ukf = UKF(y_target, g, x0, theta0, P0, alpha, beta, kappa, Q, R)
```
### Parameter Estimation (Inference) Using the UKF
Now we are ready to run the filter to infer the Lorenz model's parameter values that would produce the chaotic dynamics encoded by the maximal Layapunov exponent value of 0.883.
```python
ukf.estimate(x0, no_iterations)
iterations = np.arange(no_iterations)
```
iteration = 50: sigma = 8.32, rho = 20.62, beta = 1.59
iteration = 100: sigma = 10.61, rho = 25.90, beta = 2.44
iteration = 150: sigma = 10.79, rho = 27.21, beta = 2.73
iteration = 200: sigma = 10.73, rho = 27.73, beta = 2.79
iteration = 250: sigma = 10.69, rho = 27.89, beta = 2.52
Let's print out initial and final values of the relevant quantities:
```python
print("Maximal Lyapunov exponent: target value vs achieved value")
print("y_initial = {:.3f}".format(g(x0, *list(theta0))[0]))
print("y_target = {:.3f}".format(y_target[0]))
print("y last = {:.3f}\n".format( ukf.all_y[-1,0]))
print("Inferred parameters: initial and achieved values (mean)")
print("sigma = {:.2f}, rho = {:.2f}, beta = {:.2f}".format(*list(ukf.theta0) ))
print("sigma = {:.2f}, rho = {:.2f}, beta = {:.2f}\n".format(*list(ukf.all_theta[-1,:]) ))
print("Inferred parameters: initial and achieved values (covariance)")
print("P init=", P0)
print("P last=", ukf.all_P[-1,:,:])
```
Maximal Lyapunov exponent: target value vs achieved value
y_initial = -2.719
y_target = 0.883
y last = 0.916
Inferred parameters: initial and achieved values (mean)
sigma = 10.00, rho = 10.00, beta = 10.00
sigma = 10.71, rho = 27.98, beta = 2.59
Inferred parameters: initial and achieved values (covariance)
P init= [[ 0.01 0. 0. ]
[ 0. 0.01 0. ]
[ 0. 0. 0.01]]
P last= [[ 0.92755344 0.06007742 -0.12421053]
[ 0.06007742 0.63554907 -0.11750524]
[-0.12421053 -0.11750524 0.15363307]]
In order to be able to plot how the UKF drives the system to produce the desired output, and infer the values of the system parameters, we need to integrate the Lorenz model equations for the parameter values we started with, and also for the parameter values we obtained at the end.
```python
# integration of the Lorenz model (using the parameter values we started with)
tstart, tstop, dt = 0., 10., 0.01
x0 = np.array([2.0, 2.5, 5.0]) # initial values for Lorenz model state variables (x,y,z)--we keep this constant
params = list(ukf.all_theta[0,:]) # initial values for sigma, rho, beta
lorenz_model = LorenzSystem(*params)
lorenz_model_derivatives = lorenz_model.dx_dt
lorenz_solver = RungeKutta4(lorenz_model_derivatives, dt)
lorenz_solver.set_initial_condition(x0, tstart)
x_ti, t = lorenz_solver.solve(tstop)
# integration of the Lorenz model (using the parameter values we started with)
# since we do not change x0, below we use the same x0 as above.
params = list(ukf.all_theta[-1,:]) # final values for sigma, rho, beta
lorenz_model = LorenzSystem(*params)
lorenz_model_derivatives = lorenz_model.dx_dt
lorenz_solver = RungeKutta4(lorenz_model_derivatives, dt)
lorenz_solver.set_initial_condition(x0, tstart)
x_tj, t = lorenz_solver.solve(tstop)
```
```python
fig, ax0 = plt.subplots()
fig.set_size_inches(13,5)
ax0.plot(iterations[1:], ukf.all_y[1:,0], lw=3)
#ax0.annotate('', fontsize = 12, xy = (3, -2), xycoords = 'data', \
# xytext=(40, -1.5), arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=0.1"))
#ax0.annotate('fixed point dynamics', xy = (40, -0.6), xycoords = 'data', fontsize = 12)
ax0.axhline(y=0, c="k")
ax0.axhline(y=y_target[0], c="r")
ax0.set_ylim(ymax=3)
ax0.set_xlabel("iterations", fontsize = 20)
ax0.set_ylabel(r"$\lambda_{max}$", fontsize = 20)
ax1 = fig.add_axes([0.20, 0.15, 0.08, 0.25], projection='3d') # x,y, width, height (all relative to 1)
ax1.axis("on")
ax1.xaxis.set_major_locator(plt.NullLocator())
ax1.yaxis.set_major_locator(plt.NullLocator())
ax1.zaxis.set_major_locator(plt.NullLocator())
ax1.plot(x_ti[:,0], x_ti[:,1], x_ti[:,2], 'r-', lw=2.0)
ax1.view_init(30, 20) # altitude and azimuth in degrees
ax2 = fig.add_axes([0.80, 0.62, 0.08, 0.25], projection='3d') # x,y, width, height (all relative to 1)
ax2.axis("on")
ax2.xaxis.set_major_locator(plt.NullLocator())
ax2.yaxis.set_major_locator(plt.NullLocator())
ax2.zaxis.set_major_locator(plt.NullLocator())
ax2.plot(x_tj[:,0], x_tj[:,1], x_tj[:,2], 'r-', lw=0.5)
ax2.view_init(30, 120) # altitude and azimuth in degrees
ax0.annotate('fixed point dynamics', xy = (60, -1.5), xycoords = 'data', fontsize = 12)
ax0.annotate('chaotic dynamics', xy = (220, 1.8), xycoords = 'data', fontsize = 12)
ax0.annotate(r'$\lambda_{max}^{target}$', xy = (1.02, 0.6), xycoords = 'axes fraction', fontsize = 20, color = "r")
```
The plot above shows how the UKF drives the system from an initial configuration in the fixed-point regime to the chaotic regime (the boundary between these two dynamical regimes is the $\lambda_{max}=0$ line, on which the dynamical behavior is limit cycles). Parameters of the Lorenz model kept getting updated in the chaotic regime until the $\lambda_{max}$ of the model reached $\lambda_{max}^{target}$.
```python
fig, ax = plt.subplots(1,3)
fig.subplots_adjust(wspace=0.5)
fig.set_size_inches(15, 3)
ax[0].set_xlabel("iterations", fontsize = 20)
ax[0].set_ylabel(r"$\sigma$", fontsize = 20)
ax[0].plot(iterations, ukf.all_theta[:,0])
ax[1].set_xlabel("iterations", fontsize = 20)
ax[1].set_ylabel(r"$\rho$", fontsize = 20)
ax[1].plot(iterations, ukf.all_theta[:,1])
ax[2].set_xlabel("iterations", fontsize = 20)
ax[2].set_ylabel(r"$\beta$", fontsize = 20)
ax[2].plot(iterations, ukf.all_theta[:,2])
```
Plots above show the inferred values of the parameters $\sigma$, $\rho$, and $\beta$ at successive UKF iterations.
## References:
1) Labbe, R. R. (2018). *Kalman and Bayesian Filters in Python*. (available online) <br>
2) Parker, T.S. and Chua, L.O. (1989). *Practical Numerical Algorithms for Chaotic Systems*. New York, USA: Springer-Verlag. <br>
3) Silk et.al, *Designing attractive models via automated identification of chaotic and oscillatory dynamical regimes*, Nature Communications, (2011) 2:489. <br>
4) Sprott, J.C. (1997). *Lyapunov Exponent and Dimension of the Lorenz Attractor*, http://sprott.physics.wisc.edu/chaos/lorenzle.htm
|
(* *********************************************************************)
(* *)
(* The Compcert verified compiler *)
(* *)
(* Xavier Leroy, INRIA Paris-Rocquencourt *)
(* *)
(* Copyright Institut National de Recherche en Informatique et en *)
(* Automatique. All rights reserved. This file is distributed *)
(* under the terms of the INRIA Non-Commercial License Agreement. *)
(* *)
(* *********************************************************************)
(** Constant propagation over RTL. This is one of the optimizations
performed at RTL level. It proceeds by a standard dataflow analysis
and the corresponding code rewriting. *)
Require Import Coqlib.
Require Import Maps.
Require Import AST.
Require Import Integers.
Require Import Floats.
Require Import Op.
Require Import Registers.
Require Import RTL.
Require Import Lattice.
Require Import Kildall.
Require Import Liveness.
Require Import ConstpropOp.
(** * Static analysis *)
(** The type [approx] of compile-time approximations of values is
defined in the machine-dependent part [ConstpropOp]. *)
(** We equip this type of approximations with a semi-lattice structure.
The ordering is inclusion between the sets of values denoted by
the approximations. *)
Module Approx <: SEMILATTICE_WITH_TOP.
Definition t := approx.
Definition eq (x y: t) := (x = y).
Definition eq_refl: forall x, eq x x := (@refl_equal t).
Definition eq_sym: forall x y, eq x y -> eq y x := (@sym_equal t).
Definition eq_trans: forall x y z, eq x y -> eq y z -> eq x z := (@trans_equal t).
Lemma eq_dec: forall (x y: t), {x=y} + {x<>y}.
Proof.
decide equality.
apply Int.eq_dec.
apply Float.eq_dec.
apply Int64.eq_dec.
apply Int.eq_dec.
apply ident_eq.
apply Int.eq_dec.
Defined.
Definition beq (x y: t) := if eq_dec x y then true else false.
Lemma beq_correct: forall x y, beq x y = true -> x = y.
Proof.
unfold beq; intros. destruct (eq_dec x y). auto. congruence.
Qed.
Definition ge (x y: t) : Prop := x = Unknown \/ y = Novalue \/ x = y.
Lemma ge_refl: forall x y, eq x y -> ge x y.
Proof.
unfold eq, ge; tauto.
Qed.
Lemma ge_trans: forall x y z, ge x y -> ge y z -> ge x z.
Proof.
unfold ge; intuition congruence.
Qed.
Lemma ge_compat: forall x x' y y', eq x x' -> eq y y' -> ge x y -> ge x' y'.
Proof.
unfold eq, ge; intros; congruence.
Qed.
Definition bot := Novalue.
Definition top := Unknown.
Lemma ge_bot: forall x, ge x bot.
Proof.
unfold ge, bot; tauto.
Qed.
Lemma ge_top: forall x, ge top x.
Proof.
unfold ge, bot; tauto.
Qed.
Definition lub (x y: t) : t :=
if eq_dec x y then x else
match x, y with
| Novalue, _ => y
| _, Novalue => x
| _, _ => Unknown
end.
Lemma ge_lub_left: forall x y, ge (lub x y) x.
Proof.
unfold lub; intros.
case (eq_dec x y); intro.
apply ge_refl. apply eq_refl.
destruct x; destruct y; unfold ge; tauto.
Qed.
Lemma ge_lub_right: forall x y, ge (lub x y) y.
Proof.
unfold lub; intros.
case (eq_dec x y); intro.
apply ge_refl. subst. apply eq_refl.
destruct x; destruct y; unfold ge; tauto.
Qed.
End Approx.
Module D := LPMap Approx.
(** We keep track of read-only global variables (i.e. "const" global
variables in C) as a map from their names to their initialization
data. *)
Definition global_approx : Type := PTree.t (list init_data).
(** Given some initialization data and a byte offset, compute a static
approximation of the result of a memory load from a memory block
initialized with this data. *)
Fixpoint eval_load_init (chunk: memory_chunk) (pos: Z) (il: list init_data): approx :=
match il with
| nil => Unknown
| Init_int8 n :: il' =>
if zeq pos 0 then
match chunk with
| Mint8unsigned => I (Int.zero_ext 8 n)
| Mint8signed => I (Int.sign_ext 8 n)
| _ => Unknown
end
else eval_load_init chunk (pos - 1) il'
| Init_int16 n :: il' =>
if zeq pos 0 then
match chunk with
| Mint16unsigned => I (Int.zero_ext 16 n)
| Mint16signed => I (Int.sign_ext 16 n)
| _ => Unknown
end
else eval_load_init chunk (pos - 2) il'
| Init_int32 n :: il' =>
if zeq pos 0
then match chunk with Mint32 => I n | _ => Unknown end
else eval_load_init chunk (pos - 4) il'
| Init_int64 n :: il' =>
if zeq pos 0
then match chunk with Mint64 => L n | _ => Unknown end
else eval_load_init chunk (pos - 8) il'
| Init_float32 n :: il' =>
if zeq pos 0
then match chunk with
| Mfloat32 => if propagate_float_constants tt then F (Float.singleoffloat n) else Unknown
| _ => Unknown
end
else eval_load_init chunk (pos - 4) il'
| Init_float64 n :: il' =>
if zeq pos 0
then match chunk with
| Mfloat64 => if propagate_float_constants tt then F n else Unknown
| _ => Unknown
end
else eval_load_init chunk (pos - 8) il'
| Init_addrof symb ofs :: il' =>
if zeq pos 0
then match chunk with Mint32 => G symb ofs | _ => Unknown end
else eval_load_init chunk (pos - 4) il'
| Init_space n :: il' =>
eval_load_init chunk (pos - Zmax n 0) il'
end.
(** Compute a static approximation for the result of a load at an address whose
approximation is known. If the approximation points to a global variable,
and this global variable is read-only, we use its initialization data
to determine a static approximation. Otherwise, [Unknown] is returned. *)
Definition eval_static_load (gapp: global_approx) (chunk: memory_chunk) (addr: approx) : approx :=
match addr with
| G symb ofs =>
match gapp!symb with
| None => Unknown
| Some il => eval_load_init chunk (Int.unsigned ofs) il
end
| _ => Unknown
end.
(** The transfer function for the dataflow analysis is straightforward.
For [Iop] instructions, we set the approximation of the destination
register to the result of executing abstractly the operation.
For [Iload] instructions, we set the approximation of the destination
register to the result of [eval_static_load].
For [Icall] and [Ibuiltin], the destination register becomes [Unknown].
Other instructions keep the approximations unchanged, as they preserve
the values of all registers. *)
Definition approx_reg (app: D.t) (r: reg) :=
D.get r app.
Definition approx_regs (app: D.t) (rl: list reg):=
List.map (approx_reg app) rl.
Definition transfer (gapp: global_approx) (f: function) (pc: node) (before: D.t) :=
match f.(fn_code)!pc with
| None => before
| Some i =>
match i with
| Iop op args res s =>
let a := eval_static_operation op (approx_regs before args) in
D.set res a before
| Iload chunk addr args dst s =>
let a := eval_static_load gapp chunk
(eval_static_addressing addr (approx_regs before args)) in
D.set dst a before
| Icall sig ros args res s =>
D.set res Unknown before
| Ibuiltin ef args res s =>
D.set res Unknown before
| _ =>
before
end
end.
(** To reduce the size of approximations, we preventively set to [Top]
the approximations of registers used for the last time in the
current instruction. *)
Definition transfer' (gapp: global_approx) (f: function) (lastuses: PTree.t (list reg))
(pc: node) (before: D.t) :=
let after := transfer gapp f pc before in
match lastuses!pc with
| None => after
| Some regs => List.fold_left (fun a r => D.set r Unknown a) regs after
end.
(** The static analysis itself is then an instantiation of Kildall's
generic solver for forward dataflow inequations. [analyze f]
returns a mapping from program points to mappings of pseudo-registers
to approximations. It can fail to reach a fixpoint in a reasonable
number of iterations, in which case we use the trivial mapping
(program point -> [D.top]) instead. *)
Module DS := Dataflow_Solver(D)(NodeSetForward).
Definition analyze (gapp: global_approx) (f: RTL.function): PMap.t D.t :=
let lu := Liveness.last_uses f in
match DS.fixpoint (successors f) (transfer' gapp f lu)
((f.(fn_entrypoint), D.top) :: nil) with
| None => PMap.init D.top
| Some res => res
end.
(** * Code transformation *)
(** The code transformation proceeds instruction by instruction.
Operators whose arguments are all statically known are turned
into ``load integer constant'', ``load float constant'' or
``load symbol address'' operations. Likewise for loads whose
result can be statically predicted. Operators for which some
but not all arguments are known are subject to strength reduction,
and similarly for the addressing modes of load and store instructions.
Conditional branches and multi-way branches are statically resolved
into [Inop] instructions if possible. Other instructions are unchanged.
In addition, we try to jump over conditionals whose condition can
be statically resolved based on the abstract state "after" the
instruction that branches to the conditional. A typical example is:
<<
1: x := 0 and goto 2
2: if (x == 0) goto 3 else goto 4
>>
where other instructions branch into 2 with different abstract values
for [x]. We transform this code into:
<<
1: x := 0 and goto 3
2: if (x == 0) goto 3 else goto 4
>>
*)
Definition transf_ros (app: D.t) (ros: reg + ident) : reg + ident :=
match ros with
| inl r =>
match D.get r app with
| G symb ofs => if Int.eq ofs Int.zero then inr _ symb else ros
| _ => ros
end
| inr s => ros
end.
Parameter generate_float_constants : unit -> bool.
Definition const_for_result (a: approx) : option operation :=
match a with
| I n => Some(Ointconst n)
| F n => if generate_float_constants tt then Some(Ofloatconst n) else None
| G symb ofs => Some(Oaddrsymbol symb ofs)
| S ofs => Some(Oaddrstack ofs)
| _ => None
end.
Fixpoint successor_rec (n: nat) (f: function) (app: D.t) (pc: node) : node :=
match n with
| O => pc
| Datatypes.S n' =>
match f.(fn_code)!pc with
| Some (Inop s) =>
successor_rec n' f app s
| Some (Icond cond args s1 s2) =>
match eval_static_condition cond (approx_regs app args) with
| Some b => if b then s1 else s2
| None => pc
end
| _ => pc
end
end.
Definition num_iter := 10%nat.
Definition successor (f: function) (app: D.t) (pc: node) : node :=
successor_rec num_iter f app pc.
Function annot_strength_reduction
(app: D.t) (targs: list annot_arg) (args: list reg) :=
match targs, args with
| AA_arg ty :: targs', arg :: args' =>
let (targs'', args'') := annot_strength_reduction app targs' args' in
match ty, approx_reg app arg with
| Tint, I n => (AA_int n :: targs'', args'')
| Tfloat, F n => (AA_float n :: targs'', args'')
| _, _ => (AA_arg ty :: targs'', arg :: args'')
end
| targ :: targs', _ =>
let (targs'', args'') := annot_strength_reduction app targs' args in
(targ :: targs'', args'')
| _, _ =>
(targs, args)
end.
Function builtin_strength_reduction
(app: D.t) (ef: external_function) (args: list reg) :=
match ef, args with
| EF_vload chunk, r1 :: nil =>
match approx_reg app r1 with
| G symb n1 => (EF_vload_global chunk symb n1, nil)
| _ => (ef, args)
end
| EF_vstore chunk, r1 :: r2 :: nil =>
match approx_reg app r1 with
| G symb n1 => (EF_vstore_global chunk symb n1, r2 :: nil)
| _ => (ef, args)
end
| EF_annot text targs, args =>
let (targs', args') := annot_strength_reduction app targs args in
(EF_annot text targs', args')
| _, _ =>
(ef, args)
end.
Definition transf_instr (gapp: global_approx) (f: function) (apps: PMap.t D.t)
(pc: node) (instr: instruction) :=
let app := apps!!pc in
match instr with
| Iop op args res s =>
let a := eval_static_operation op (approx_regs app args) in
let s' := successor f (D.set res a app) s in
match const_for_result a with
| Some cop =>
Iop cop nil res s'
| None =>
let (op', args') := op_strength_reduction op args (approx_regs app args) in
Iop op' args' res s'
end
| Iload chunk addr args dst s =>
let a := eval_static_load gapp chunk
(eval_static_addressing addr (approx_regs app args)) in
match const_for_result a with
| Some cop =>
Iop cop nil dst s
| None =>
let (addr', args') := addr_strength_reduction addr args (approx_regs app args) in
Iload chunk addr' args' dst s
end
| Istore chunk addr args src s =>
let (addr', args') := addr_strength_reduction addr args (approx_regs app args) in
Istore chunk addr' args' src s
| Icall sig ros args res s =>
Icall sig (transf_ros app ros) args res s
| Itailcall sig ros args =>
Itailcall sig (transf_ros app ros) args
| Ibuiltin ef args res s =>
let (ef', args') := builtin_strength_reduction app ef args in
Ibuiltin ef' args' res s
| Icond cond args s1 s2 =>
match eval_static_condition cond (approx_regs app args) with
| Some b =>
if b then Inop s1 else Inop s2
| None =>
let (cond', args') := cond_strength_reduction cond args (approx_regs app args) in
Icond cond' args' s1 s2
end
| Ijumptable arg tbl =>
match approx_reg app arg with
| I n =>
match list_nth_z tbl (Int.unsigned n) with
| Some s => Inop s
| None => instr
end
| _ => instr
end
| _ =>
instr
end.
Definition transf_code (gapp: global_approx) (f: function) (app: PMap.t D.t) (instrs: code) : code :=
PTree.map (transf_instr gapp f app) instrs.
Definition transf_function (gapp: global_approx) (f: function) : function :=
let approxs := analyze gapp f in
mkfunction
f.(fn_sig)
f.(fn_params)
f.(fn_stacksize)
(transf_code gapp f approxs f.(fn_code))
f.(fn_entrypoint).
Definition transf_fundef (gapp: global_approx) (fd: fundef) : fundef :=
AST.transf_fundef (transf_function gapp) fd.
Fixpoint make_global_approx (gapp: global_approx) (gdl: list (ident * globdef fundef unit)): global_approx :=
match gdl with
| nil => gapp
| (id, gl) :: gdl' =>
let gapp1 :=
match gl with
| Gfun f => PTree.remove id gapp
| Gvar gv =>
if gv.(gvar_readonly) && negb gv.(gvar_volatile)
then PTree.set id gv.(gvar_init) gapp
else PTree.remove id gapp
end in
make_global_approx gapp1 gdl'
end.
Definition transf_program (p: program) : program :=
let gapp := make_global_approx (PTree.empty _) p.(prog_defs) in
transform_program (transf_fundef gapp) p.
|
If $m \neq 0$, then $m x + c = y$ if and only if $x = \frac{1}{m} y - \frac{c}{m}$. |
lemma comp: "bounded_linear f \<Longrightarrow> bounded_linear g \<Longrightarrow> bounded_bilinear (\<lambda>x y. f x ** g y)" |
!-------------------------------------------------------------------------------
!> Module saturation process
!!
!! @par Description
!! This module is for saturation processes
!!
!! @author NICAM developers
!<
!-------------------------------------------------------------------------------
module mod_satadjust
!-----------------------------------------------------------------------------
!
!++ Used modules
!
use mod_precision
use mod_debug
!ESC! use mod_stdio
!ESC! use mod_prof
!-----------------------------------------------------------------------------
implicit none
private
!-----------------------------------------------------------------------------
!
!++ Public parameters
!
!-----------------------------------------------------------------------------
!
!++ Public variables
!
!-----------------------------------------------------------------------------
!
!++ Public procedures
!
public :: SATURATION_setup
public :: SATURATION_setrange
public :: SATURATION_alpha
public :: SATURATION_dalphadT
public :: SATURATION_psat_all
public :: SATURATION_psat_liq
public :: SATURATION_psat_ice
public :: SATURATION_qsat_liq
public :: SATURATION_qsat_ice
interface SATURATION_alpha
module procedure SATURATION_alpha_0D
module procedure SATURATION_alpha_1D
module procedure SATURATION_alpha_2D
end interface SATURATION_alpha
interface SATURATION_dalphadT
module procedure SATURATION_dalphadT_0D
module procedure SATURATION_dalphadT_1D
module procedure SATURATION_dalphadT_2D
end interface SATURATION_dalphadT
interface SATURATION_psat_all
module procedure SATURATION_psat_all_0D
module procedure SATURATION_psat_all_1D
module procedure SATURATION_psat_all_2D
end interface SATURATION_psat_all
interface SATURATION_psat_liq
module procedure SATURATION_psat_liq_0D
module procedure SATURATION_psat_liq_1D
module procedure SATURATION_psat_liq_2D
end interface SATURATION_psat_liq
interface SATURATION_psat_ice
module procedure SATURATION_psat_ice_0D
module procedure SATURATION_psat_ice_1D
module procedure SATURATION_psat_ice_2D
end interface SATURATION_psat_ice
interface SATURATION_qsat_liq
module procedure SATURATION_qsat_liq_0D
module procedure SATURATION_qsat_liq_1D
module procedure SATURATION_qsat_liq_2D
end interface SATURATION_qsat_liq
interface SATURATION_qsat_ice
module procedure SATURATION_qsat_ice_0D
module procedure SATURATION_qsat_ice_1D
module procedure SATURATION_qsat_ice_2D
end interface SATURATION_qsat_ice
public :: SATURATION_rh
public :: SATURATION_dewtem
public :: SATURATION_adjustment
public :: SATURATION_enthalpy
public :: moist_dqsw_dtem_rho
public :: moist_dqsi_dtem_rho
public :: moist_dqsw_dtem_dpre
public :: moist_dqsi_dtem_dpre
!-----------------------------------------------------------------------------
!
!++ Public parameters & variables
!
real(RP), public :: CPovR_liq
real(RP), public :: CPovR_ice
real(RP), public :: CVovR_liq
real(RP), public :: CVovR_ice
real(RP), public :: LovR_liq
real(RP), public :: LovR_ice
!-----------------------------------------------------------------------------
!
!++ Private procedures
!
private :: satadjust_all
private :: satadjust_liq
!-----------------------------------------------------------------------------
!
!++ Private parameters & variables
!
real(RP), private, parameter :: TEM_MIN = 10.0_RP !< minimum temperature [K]
real(RP), private, parameter :: DTEM_EPS0 = 1.0E-8_RP ! temperature convergence criterion
character(len=H_SHORT), private :: ALPHA_TYPE = 'LINEAR'
real(RP), private :: SATURATION_ULIMIT_TEMP = 273.15_RP !< upper limit temperature
real(RP), private :: SATURATION_LLIMIT_TEMP = 233.15_RP !< lower limit temperature
!-----------------------------------------------------------------------------
contains
!-----------------------------------------------------------------------------
!> Setup
subroutine SATURATION_setup
!ESC! use mod_adm, only: &
!ESC! ADM_proc_stop
!ESC! use mod_const, only: &
!ESC! CONST_Rvap, &
!ESC! CONST_CPvap, &
!ESC! CONST_CVvap, &
!ESC! CONST_CL, &
!ESC! CONST_CI
!ESC! use mod_runconf, only: &
!ESC! EIN_TYPE, &
!ESC! LHV, &
!ESC! LHS
implicit none
NAMELIST / SATURATIONPARAM / &
ALPHA_TYPE
integer :: ierr
!---------------------------------------------------------------------------
!ESC! !--- read parameters
!ESC! write(IO_FID_LOG,*)
!ESC! write(IO_FID_LOG,*) '+++ Module[saturation]/Category[nhm share]'
!ESC! rewind(IO_FID_CONF)
!ESC! read(IO_FID_CONF,nml=SATURATIONPARAM,iostat=ierr)
!ESC! if ( ierr < 0 ) then
!ESC! write(IO_FID_LOG,*) '*** SATURATIONPARAM is not specified. use default.'
!ESC! elseif( ierr > 0 ) then
!ESC! write(* ,*) 'xxx Not appropriate names in namelist SATURATIONPARAM. STOP.'
!ESC! write(IO_FID_LOG,*) 'xxx Not appropriate names in namelist SATURATIONPARAM. STOP.'
!ESC! call ADM_proc_stop
!ESC! endif
!ESC! write(IO_FID_LOG,nml=SATURATIONPARAM)
if ( EIN_TYPE == 'EXACT' ) then
CPovR_liq = ( CONST_CPvap - CONST_CL ) / CONST_Rvap
CPovR_ice = ( CONST_CPvap - CONST_CI ) / CONST_Rvap
CVovR_liq = ( CONST_CVvap - CONST_CL ) / CONST_Rvap
CVovR_ice = ( CONST_CVvap - CONST_CI ) / CONST_Rvap
elseif( EIN_TYPE == 'SIMPLE' &
.OR. EIN_TYPE == 'SIMPLE2' ) then
CPovR_liq = 0.0_RP
CPovR_ice = 0.0_RP
CVovR_liq = 0.0_RP
CVovR_ice = 0.0_RP
endif
LovR_liq = LHV / CONST_Rvap
LovR_ice = LHS / CONST_Rvap
return
end subroutine SATURATION_setup
!-----------------------------------------------------------------------------
subroutine SATURATION_setrange( Tw, Ti )
implicit none
real(RP), intent(in) :: Tw
real(RP), intent(in) :: Ti
!---------------------------------------------------------------------------
SATURATION_ULIMIT_TEMP = Tw
SATURATION_LLIMIT_TEMP = Ti
return
end subroutine SATURATION_setrange
!-----------------------------------------------------------------------------
!> calc liquid/ice separation factor (0D)
subroutine SATURATION_alpha_0D( &
tem, &
alpha )
!ESC! use mod_const, only: &
!ESC! CONST_PI
implicit none
real(RP), intent(in) :: tem !< temperature [K]
real(RP), intent(out) :: alpha !< liquid/ice separation factor [0-1]
real(RP) :: fact
real(RP) :: PI
!---------------------------------------------------------------------------
if ( ALPHA_TYPE == 'COS' ) then
PI = CONST_PI
if ( tem > SATURATION_ULIMIT_TEMP ) then
alpha = 1.0_RP
elseif( tem < SATURATION_LLIMIT_TEMP ) then
alpha = 0.0_RP
else
fact = ( SATURATION_ULIMIT_TEMP - tem ) &
/ ( SATURATION_ULIMIT_TEMP - SATURATION_LLIMIT_TEMP )
alpha = 0.5_RP * ( 1.0_RP + cos( fact * PI ) )
endif
elseif( ALPHA_TYPE == 'LINEAR' ) then
fact = ( tem - SATURATION_LLIMIT_TEMP ) &
/ ( SATURATION_ULIMIT_TEMP - SATURATION_LLIMIT_TEMP )
alpha = min( max( fact, 0.0_RP ), 1.0_RP )
endif
return
end subroutine SATURATION_alpha_0D
!-----------------------------------------------------------------------------
!> calc liquid/ice separation factor (1D)
subroutine SATURATION_alpha_1D( &
ijdim, &
tem, &
alpha )
!ESC! use mod_const, only: &
!ESC! CONST_PI
implicit none
integer, intent(in) :: ijdim
real(RP), intent(in) :: tem (ijdim) !< temperature [K]
real(RP), intent(out) :: alpha(ijdim) !< liquid/ice separation factor [0-1]
real(RP) :: fact
real(RP) :: PI
integer :: ij
!---------------------------------------------------------------------------
if ( ALPHA_TYPE == 'COS' ) then
PI = CONST_PI
!$omp parallel default(none),private(ij,fact), &
!$omp shared(ijdim,alpha,tem,SATURATION_ULIMIT_TEMP,SATURATION_LLIMIT_TEMP,PI)
!$omp do
do ij = 1, ijdim
fact = ( SATURATION_ULIMIT_TEMP - tem(ij) ) &
/ ( SATURATION_ULIMIT_TEMP - SATURATION_LLIMIT_TEMP )
alpha(ij) = 0.5_RP * ( 1.0_RP + cos( fact * PI ) )
enddo
!$omp end do
!$omp do
do ij = 1, ijdim
if ( tem(ij) > SATURATION_ULIMIT_TEMP ) then
alpha(ij) = 1.0_RP
elseif( tem(ij) < SATURATION_LLIMIT_TEMP ) then
alpha(ij) = 0.0_RP
endif
enddo
!$omp end do
!$omp end parallel
elseif( ALPHA_TYPE == 'LINEAR' ) then
!$omp parallel do default(none),private(ij,fact), &
!$omp shared(ijdim,alpha,tem,SATURATION_ULIMIT_TEMP,SATURATION_LLIMIT_TEMP)
do ij = 1, ijdim
fact = ( tem(ij) - SATURATION_LLIMIT_TEMP ) &
/ ( SATURATION_ULIMIT_TEMP - SATURATION_LLIMIT_TEMP )
alpha(ij) = min( max( fact, 0.0_RP ), 1.0_RP )
enddo
!$omp end parallel do
endif
return
end subroutine SATURATION_alpha_1D
!-----------------------------------------------------------------------------
!> calc liquid/ice separation factor (2D)
subroutine SATURATION_alpha_2D( &
ijdim, &
kdim, &
tem, &
alpha )
!ESC! use mod_const, only: &
!ESC! CONST_PI
implicit none
integer, intent(in) :: ijdim
integer, intent(in) :: kdim
real(RP), intent(in) :: tem (ijdim,kdim) !< temperature [K]
real(RP), intent(out) :: alpha(ijdim,kdim) !< liquid/ice separation factor [0-1]
real(RP) :: fact
real(RP) :: PI
integer :: ij, k
!---------------------------------------------------------------------------
if ( ALPHA_TYPE == 'COS' ) then
PI = CONST_PI
!$omp parallel default(none),private(ij,k,fact), &
!$omp shared(ijdim,kdim,alpha,tem,SATURATION_ULIMIT_TEMP,SATURATION_LLIMIT_TEMP,PI)
do k = 1, kdim
!$omp do
do ij = 1, ijdim
fact = ( SATURATION_ULIMIT_TEMP - tem(ij,k) ) &
/ ( SATURATION_ULIMIT_TEMP - SATURATION_LLIMIT_TEMP )
alpha(ij,k) = 0.5_RP * ( 1.0_RP + cos( fact * PI ) )
enddo
!$omp end do
!$omp do
do ij = 1, ijdim
if ( tem(ij,k) > SATURATION_ULIMIT_TEMP ) then
alpha(ij,k) = 1.0_RP
elseif( tem(ij,k) < SATURATION_LLIMIT_TEMP ) then
alpha(ij,k) = 0.0_RP
endif
enddo
!$omp end do
enddo
!$omp end parallel
elseif( ALPHA_TYPE == 'LINEAR' ) then
!$omp parallel do default(none),private(ij,k,fact), &
!$omp shared(ijdim,kdim,alpha,tem,SATURATION_ULIMIT_TEMP,SATURATION_LLIMIT_TEMP)
do k = 1, kdim
do ij = 1, ijdim
fact = ( tem(ij,k) - SATURATION_LLIMIT_TEMP ) &
/ ( SATURATION_ULIMIT_TEMP - SATURATION_LLIMIT_TEMP )
alpha(ij,k) = min( max( fact, 0.0_RP ), 1.0_RP )
enddo
enddo
!$omp end parallel do
endif
return
end subroutine SATURATION_alpha_2D
!-----------------------------------------------------------------------------
!> calc d(alpha)/d(tem), 0D
subroutine SATURATION_dalphadT_0D( &
tem, &
dalpha_dT )
!ESC! use mod_const, only: &
!ESC! CONST_PI
implicit none
real(RP), intent(in) :: tem !< temperature [K]
real(RP), intent(out) :: dalpha_dT !< d(alpha)/d(T)
real(RP) :: lim1, lim2, fact
real(RP) :: PI
!---------------------------------------------------------------------------
if ( ALPHA_TYPE == 'COS' ) then
PI = CONST_PI
! if Tup < tem, dalpha/dT = 0 (no slope)
lim1 = 0.5_RP + sign( 0.5_RP, SATURATION_ULIMIT_TEMP - tem )
! if Tdn > tem, dalpha/dT = 0 (no slope)
lim2 = 0.5_RP + sign( 0.5_RP, tem - SATURATION_LLIMIT_TEMP )
fact = ( SATURATION_ULIMIT_TEMP - tem ) &
/ ( SATURATION_ULIMIT_TEMP - SATURATION_LLIMIT_TEMP )
dalpha_dT = lim1 * lim2 / ( SATURATION_ULIMIT_TEMP - SATURATION_LLIMIT_TEMP ) &
* 0.5_RP * PI * sin( fact * PI )
elseif( ALPHA_TYPE == 'LINEAR' ) then
! if Tup < tem, dalpha/dT = 0 (no slope)
lim1 = 0.5_RP + sign( 0.5_RP, SATURATION_ULIMIT_TEMP - tem )
! if Tdn > tem, dalpha/dT = 0 (no slope)
lim2 = 0.5_RP + sign( 0.5_RP, tem - SATURATION_LLIMIT_TEMP )
dalpha_dT = lim1 * lim2 / ( SATURATION_ULIMIT_TEMP - SATURATION_LLIMIT_TEMP )
endif
return
end subroutine SATURATION_dalphadT_0D
!-----------------------------------------------------------------------------
!> calc d(alpha)/d(tem), 1D
subroutine SATURATION_dalphadT_1D( &
ijdim, &
tem, &
dalpha_dT )
!ESC! use mod_const, only: &
!ESC! CONST_PI
implicit none
integer, intent(in) :: ijdim
real(RP), intent(in) :: tem (ijdim) !< temperature [K]
real(RP), intent(out) :: dalpha_dT(ijdim) !< d(alpha)/d(T)
real(RP) :: lim1, lim2, fact
real(RP) :: PI
integer :: ij
!---------------------------------------------------------------------------
if ( ALPHA_TYPE == 'COS' ) then
PI = CONST_PI
!$omp parallel do default(none),private(ij,fact,lim1,lim2), &
!$omp shared(ijdim,dalpha_dT,tem,SATURATION_ULIMIT_TEMP,SATURATION_LLIMIT_TEMP,PI)
do ij = 1, ijdim
! if Tup < tem, dalpha/dT = 0 (no slope)
lim1 = 0.5_RP + sign( 0.5_RP, SATURATION_ULIMIT_TEMP - tem(ij) )
! if Tdn > tem, dalpha/dT = 0 (no slope)
lim2 = 0.5_RP + sign( 0.5_RP, tem(ij) - SATURATION_LLIMIT_TEMP )
fact = ( SATURATION_ULIMIT_TEMP - tem(ij) ) &
/ ( SATURATION_ULIMIT_TEMP - SATURATION_LLIMIT_TEMP )
dalpha_dT(ij) = lim1 * lim2 / ( SATURATION_ULIMIT_TEMP - SATURATION_LLIMIT_TEMP ) &
* 0.5_RP * PI * sin( fact * PI )
enddo
!$omp end parallel do
elseif( ALPHA_TYPE == 'LINEAR' ) then
!$omp parallel do default(none),private(ij,lim1,lim2), &
!$omp shared(ijdim,dalpha_dT,tem,SATURATION_ULIMIT_TEMP,SATURATION_LLIMIT_TEMP)
do ij = 1, ijdim
! if Tup < tem, dalpha/dT = 0 (no slope)
lim1 = 0.5_RP + sign( 0.5_RP, SATURATION_ULIMIT_TEMP - tem(ij) )
! if Tdn > tem, dalpha/dT = 0 (no slope)
lim2 = 0.5_RP + sign( 0.5_RP, tem(ij) - SATURATION_LLIMIT_TEMP )
dalpha_dT(ij) = lim1 * lim2 / ( SATURATION_ULIMIT_TEMP - SATURATION_LLIMIT_TEMP )
enddo
!$omp end parallel do
endif
return
end subroutine SATURATION_dalphadT_1D
!-----------------------------------------------------------------------------
!> calc d(alpha)/d(temp), 2D
subroutine SATURATION_dalphadT_2D( &
ijdim, &
kdim, &
tem, &
dalpha_dT )
!ESC! use mod_const, only: &
!ESC! CONST_PI
implicit none
integer, intent(in) :: ijdim
integer, intent(in) :: kdim
real(RP), intent(in) :: tem (ijdim,kdim) !< temperature [K]
real(RP), intent(out) :: dalpha_dT(ijdim,kdim) !< d(alpha)/d(T)
real(RP) :: lim1, lim2, fact
real(RP) :: PI
integer :: ij, k
!---------------------------------------------------------------------------
if ( ALPHA_TYPE == 'COS' ) then
PI = CONST_PI
!$omp parallel do default(none),private(ij,k,fact,lim1,lim2), &
!$omp shared(ijdim,kdim,dalpha_dT,tem,SATURATION_ULIMIT_TEMP,SATURATION_LLIMIT_TEMP,PI)
do k = 1, kdim
do ij = 1, ijdim
! if Tup < tem, dalpha/dT = 0 (no slope)
lim1 = 0.5_RP + sign( 0.5_RP, SATURATION_ULIMIT_TEMP - tem(ij,k) )
! if Tdn > tem, dalpha/dT = 0 (no slope)
lim2 = 0.5_RP + sign( 0.5_RP, tem(ij,k) - SATURATION_LLIMIT_TEMP )
fact = ( SATURATION_ULIMIT_TEMP - tem(ij,k) ) &
/ ( SATURATION_ULIMIT_TEMP - SATURATION_LLIMIT_TEMP )
dalpha_dT(ij,k) = lim1 * lim2 / ( SATURATION_ULIMIT_TEMP - SATURATION_LLIMIT_TEMP ) &
* 0.5_RP * PI * sin( fact * PI )
enddo
enddo
!$omp end parallel do
elseif( ALPHA_TYPE == 'LINEAR' ) then
!$omp parallel do default(none),private(ij,k,lim1,lim2), &
!$omp shared(ijdim,kdim,dalpha_dT,tem,SATURATION_ULIMIT_TEMP,SATURATION_LLIMIT_TEMP)
do k = 1, kdim
do ij = 1, ijdim
! if Tup < tem, dalpha/dT = 0 (no slope)
lim1 = 0.5_RP + sign( 0.5_RP, SATURATION_ULIMIT_TEMP - tem(ij,k) )
! if Tdn > tem, dalpha/dT = 0 (no slope)
lim2 = 0.5_RP + sign( 0.5_RP, tem(ij,k) - SATURATION_LLIMIT_TEMP )
dalpha_dT(ij,k) = lim1 * lim2 / ( SATURATION_ULIMIT_TEMP - SATURATION_LLIMIT_TEMP )
enddo
enddo
!$omp end parallel do
endif
return
end subroutine SATURATION_dalphadT_2D
!-----------------------------------------------------------------------------
!> calc saturation vapor pressure (liquid/ice mixture) (0D)
subroutine SATURATION_psat_all_0D( &
tem, &
psat )
implicit none
real(RP), intent(in) :: tem !< temperature [K]
real(RP), intent(out) :: psat !< saturation vapor pressure [Pa]
real(RP) :: alpha, psatl, psati
!---------------------------------------------------------------------------
call SATURATION_alpha ( tem, alpha )
call SATURATION_psat_liq( tem, psatl )
call SATURATION_psat_ice( tem, psati )
psat = psatl * ( alpha ) &
+ psati * ( 1.0_RP - alpha )
return
end subroutine SATURATION_psat_all_0D
!-----------------------------------------------------------------------------
!> calc saturation vapor pressure (liquid/ice mixture) (1D)
subroutine SATURATION_psat_all_1D( &
ijdim, &
tem, &
psat )
implicit none
integer, intent(in) :: ijdim
real(RP), intent(in) :: tem (ijdim) !< temperature [K]
real(RP), intent(out) :: psat(ijdim) !< saturation vapor pressure [Pa]
real(RP) :: alpha(ijdim), psatl(ijdim), psati(ijdim)
integer :: ij
!---------------------------------------------------------------------------
call SATURATION_alpha ( ijdim, tem(:), alpha(:) )
call SATURATION_psat_liq( ijdim, tem(:), psatl(:) )
call SATURATION_psat_ice( ijdim, tem(:), psati(:) )
do ij = 1, ijdim
psat(ij) = psatl(ij) * ( alpha(ij) ) &
+ psati(ij) * ( 1.0_RP - alpha(ij) )
enddo
return
end subroutine SATURATION_psat_all_1D
!-----------------------------------------------------------------------------
!> calc saturation vapor pressure (liquid/ice mixture) (2D)
subroutine SATURATION_psat_all_2D( &
ijdim, &
kdim, &
tem, &
psat )
!ESC! use mod_const, only: &
!ESC! CONST_PI, &
!ESC! CONST_TEM00, &
!ESC! CONST_PSAT0
implicit none
integer, intent(in) :: ijdim
integer, intent(in) :: kdim
real(RP), intent(in) :: tem (ijdim,kdim) !< temperature [K]
real(RP), intent(out) :: psat(ijdim,kdim) !< saturation vapor pressure [Pa]
real(RP) :: alpha, psatl, psati
real(RP) :: rtem
real(RP) :: PI, RTEM00, PSAT0
integer :: ij, k
!---------------------------------------------------------------------------
RTEM00 = 1.0_RP / CONST_TEM00
PSAT0 = CONST_PSAT0
if ( ALPHA_TYPE == 'COS' ) then
PI = CONST_PI
!$omp parallel do default(none),private(ij,k,alpha,rtem,psatl,psati), &
!$omp shared(ijdim,kdim,psat,tem,SATURATION_ULIMIT_TEMP,SATURATION_LLIMIT_TEMP,PI, &
!$omp CPovR_liq,LovR_liq,CPovR_ice,LovR_ice,RTEM00,PSAT0)
do k = 1, kdim
do ij = 1, ijdim
if ( tem(ij,k) > SATURATION_ULIMIT_TEMP ) then
alpha = 1.0_RP
elseif( tem(ij,k) < SATURATION_LLIMIT_TEMP ) then
alpha = 0.0_RP
else
alpha = ( SATURATION_ULIMIT_TEMP - tem(ij,k) ) &
/ ( SATURATION_ULIMIT_TEMP - SATURATION_LLIMIT_TEMP )
alpha = 0.5_RP * ( 1.0_RP + cos( alpha * PI ) )
endif
rtem = 1.0_RP / max( tem(ij,k), TEM_MIN )
psatl = PSAT0 * ( tem(ij,k) * RTEM00 )**CPovR_liq &
* exp( LovR_liq * ( RTEM00 - rtem ) )
psati = PSAT0 * ( tem(ij,k) * RTEM00 )**CPovR_ice &
* exp( LovR_ice * ( RTEM00 - rtem ) )
psat(ij,k) = psatl * ( alpha ) &
+ psati * ( 1.0_RP - alpha )
enddo
enddo
!$omp end parallel do
elseif( ALPHA_TYPE == 'LINEAR' ) then
!$omp parallel do default(none),private(ij,k,alpha,rtem,psatl,psati), &
!$omp shared(ijdim,kdim,psat,tem,SATURATION_ULIMIT_TEMP,SATURATION_LLIMIT_TEMP, &
!$omp CPovR_liq,LovR_liq,CPovR_ice,LovR_ice,RTEM00,PSAT0)
do k = 1, kdim
do ij = 1, ijdim
alpha = ( tem(ij,k) - SATURATION_LLIMIT_TEMP ) &
/ ( SATURATION_ULIMIT_TEMP - SATURATION_LLIMIT_TEMP )
alpha = min( max( alpha, 0.0_RP ), 1.0_RP )
rtem = 1.0_RP / max( tem(ij,k), TEM_MIN )
psatl = PSAT0 * ( tem(ij,k) * RTEM00 )**CPovR_liq &
* exp( LovR_liq * ( RTEM00 - rtem ) )
psati = PSAT0 * ( tem(ij,k) * RTEM00 )**CPovR_ice &
* exp( LovR_ice * ( RTEM00 - rtem ) )
psat(ij,k) = psatl * ( alpha ) &
+ psati * ( 1.0_RP - alpha )
enddo
enddo
!$omp end parallel do
endif
return
end subroutine SATURATION_psat_all_2D
!-----------------------------------------------------------------------------
!> calc saturation vapor pressure from Clausius-Clapeyron equation (0D)
subroutine SATURATION_psat_liq_0D( &
tem, &
psat )
!ESC! use mod_const, only: &
!ESC! CONST_TEM00, &
!ESC! CONST_PSAT0
implicit none
real(RP), intent(in) :: tem !< temperature [K]
real(RP), intent(out) :: psat !< saturation vapor pressure [Pa]
real(RP) :: rtem
real(RP) :: RTEM00, PSAT0
!---------------------------------------------------------------------------
RTEM00 = 1.0_RP / CONST_TEM00
PSAT0 = CONST_PSAT0
rtem = 1.0_RP / max( tem, TEM_MIN )
psat = PSAT0 * ( tem * RTEM00 )**CPovR_liq &
* exp( LovR_liq * ( RTEM00 - rtem ) )
return
end subroutine SATURATION_psat_liq_0D
!-----------------------------------------------------------------------------
!> calc saturation vapor pressure from Clausius-Clapeyron equation (1D)
subroutine SATURATION_psat_liq_1D( &
ijdim, &
tem, &
psat )
!ESC! use mod_const, only: &
!ESC! CONST_TEM00, &
!ESC! CONST_PSAT0
implicit none
integer, intent(in) :: ijdim
real(RP), intent(in) :: tem (ijdim) !< temperature [K]
real(RP), intent(out) :: psat(ijdim) !< saturation vapor pressure [Pa]
real(RP) :: rtem
real(RP) :: RTEM00, PSAT0
integer :: ij
!---------------------------------------------------------------------------
RTEM00 = 1.0_RP / CONST_TEM00
PSAT0 = CONST_PSAT0
!$omp parallel do default(none),private(ij,rtem), &
!$omp shared(ijdim,psat,tem,CPovR_liq,LovR_liq,RTEM00,PSAT0)
do ij = 1, ijdim
rtem = 1.0_RP / max( tem(ij), TEM_MIN )
psat(ij) = PSAT0 * ( tem(ij) * RTEM00 )**CPovR_liq &
* exp( LovR_liq * ( RTEM00 - rtem ) )
enddo
!$omp end parallel do
return
end subroutine SATURATION_psat_liq_1D
!-----------------------------------------------------------------------------
!> calc saturation vapor pressure from Clausius-Clapeyron equation (2D)
subroutine SATURATION_psat_liq_2D( &
ijdim, &
kdim, &
tem, &
psat )
!ESC! use mod_const, only: &
!ESC! CONST_TEM00, &
!ESC! CONST_PSAT0
implicit none
integer, intent(in) :: ijdim
integer, intent(in) :: kdim
real(RP), intent(in) :: tem (ijdim,kdim) !< temperature [K]
real(RP), intent(out) :: psat(ijdim,kdim) !< saturation vapor pressure [Pa]
real(RP) :: rtem
real(RP) :: RTEM00, PSAT0
integer :: ij, k
!---------------------------------------------------------------------------
RTEM00 = 1.0_RP / CONST_TEM00
PSAT0 = CONST_PSAT0
!$omp parallel do default(none),private(ij,k,rtem), &
!$omp shared(ijdim,kdim,psat,tem,CPovR_liq,LovR_liq,RTEM00,PSAT0)
do k = 1, kdim
do ij = 1, ijdim
rtem = 1.0_RP / max( tem(ij,k), TEM_MIN )
psat(ij,k) = PSAT0 * ( tem(ij,k) * RTEM00 )**CPovR_liq &
* exp( LovR_liq * ( RTEM00 - rtem ) )
enddo
enddo
!$omp end parallel do
return
end subroutine SATURATION_psat_liq_2D
!-----------------------------------------------------------------------------
!> calc saturation vapor pressure from Clausius-Clapeyron equation (0D)
subroutine SATURATION_psat_ice_0D( &
tem, &
psat )
!ESC! use mod_const, only: &
!ESC! CONST_TEM00, &
!ESC! CONST_PSAT0
implicit none
real(RP), intent(in) :: tem !< temperature [K]
real(RP), intent(out) :: psat !< saturation vapor pressure [Pa]
real(RP) :: rtem
real(RP) :: RTEM00, PSAT0
!---------------------------------------------------------------------------
RTEM00 = 1.0_RP / CONST_TEM00
PSAT0 = CONST_PSAT0
rtem = 1.0_RP / max( tem, TEM_MIN )
psat = PSAT0 * ( tem * RTEM00 )**CPovR_ice &
* exp( LovR_ice * ( RTEM00 - rtem ) )
return
end subroutine SATURATION_psat_ice_0D
!-----------------------------------------------------------------------------
!> calc saturation vapor pressure from Clausius-Clapeyron equation (1D)
subroutine SATURATION_psat_ice_1D( &
ijdim, &
tem, &
psat )
!ESC! use mod_const, only: &
!ESC! CONST_TEM00, &
!ESC! CONST_PSAT0
implicit none
integer, intent(in) :: ijdim
real(RP), intent(in) :: tem (ijdim) !< temperature [K]
real(RP), intent(out) :: psat(ijdim) !< saturation vapor pressure [Pa]
real(RP) :: rtem
real(RP) :: RTEM00, PSAT0
integer :: ij
!---------------------------------------------------------------------------
RTEM00 = 1.0_RP / CONST_TEM00
PSAT0 = CONST_PSAT0
!$omp parallel do default(none),private(ij,rtem), &
!$omp shared(ijdim,psat,tem,CPovR_ice,LovR_ice,RTEM00,PSAT0)
do ij = 1, ijdim
rtem = 1.0_RP / max( tem(ij), TEM_MIN )
psat(ij) = PSAT0 * ( tem(ij) * RTEM00 )**CPovR_ice &
* exp( LovR_ice * ( RTEM00 - rtem ) )
enddo
!$omp end parallel do
return
end subroutine SATURATION_psat_ice_1D
!-----------------------------------------------------------------------------
!> calc saturation vapor pressure from Clausius-Clapeyron equation (2D)
subroutine SATURATION_psat_ice_2D( &
ijdim, &
kdim, &
tem, &
psat )
!ESC! use mod_const, only: &
!ESC! CONST_TEM00, &
!ESC! CONST_PSAT0
implicit none
integer, intent(in) :: ijdim
integer, intent(in) :: kdim
real(RP), intent(in) :: tem (ijdim,kdim) !< temperature [K]
real(RP), intent(out) :: psat(ijdim,kdim) !< saturation vapor pressure [Pa]
real(RP) :: rtem
real(RP) :: RTEM00, PSAT0
integer :: ij, k
!---------------------------------------------------------------------------
RTEM00 = 1.0_RP / CONST_TEM00
PSAT0 = CONST_PSAT0
!$omp parallel do default(none),private(ij,k,rtem), &
!$omp shared(ijdim,kdim,psat,tem,CPovR_ice,LovR_ice,RTEM00,PSAT0)
do k = 1, kdim
do ij = 1, ijdim
rtem = 1.0_RP / max( tem(ij,k), TEM_MIN )
psat(ij,k) = PSAT0 * ( tem(ij,k) * RTEM00 )**CPovR_ice &
* exp( LovR_ice * ( RTEM00 - rtem ) )
enddo
enddo
!$omp end parallel do
return
end subroutine SATURATION_psat_ice_2D
!-----------------------------------------------------------------------------
subroutine SATURATION_qsat_liq_0D( &
tem, &
pre, &
qsat )
!ESC! use mod_const, only: &
!ESC! CONST_EPSvap
implicit none
real(RP), intent(in) :: pre
real(RP), intent(in) :: tem
real(RP), intent(out) :: qsat
real(RP) :: psat
real(RP) :: EPSvap
!---------------------------------------------------------------------------
EPSvap = CONST_EPSvap
call SATURATION_psat_liq_0D( tem, psat )
qsat = EPSvap * psat / ( pre - ( 1.0_RP-EPSvap ) * psat )
return
end subroutine SATURATION_qsat_liq_0D
!-----------------------------------------------------------------------------
subroutine SATURATION_qsat_liq_1D( &
ijdim, &
tem, &
pre, &
qsat )
!ESC! use mod_const, only: &
!ESC! CONST_EPSvap
implicit none
integer, intent(in) :: ijdim
real(RP), intent(in) :: pre (ijdim)
real(RP), intent(in) :: tem (ijdim)
real(RP), intent(out) :: qsat(ijdim)
real(RP) :: psat(ijdim)
real(RP) :: EPSvap
integer :: ij
!---------------------------------------------------------------------------
EPSvap = CONST_EPSvap
call SATURATION_psat_liq_1D( ijdim, tem(:), psat(:) )
!$omp parallel do default(none),private(ij), &
!$omp shared(ijdim,qsat,pre,psat,EPSvap)
do ij = 1, ijdim
qsat(ij) = EPSvap * psat(ij) / ( pre(ij) - ( 1.0_RP-EPSvap ) * psat(ij) )
enddo
!$omp end parallel do
return
end subroutine SATURATION_qsat_liq_1D
!-----------------------------------------------------------------------------
subroutine SATURATION_qsat_liq_2D( &
ijdim, &
kdim, &
tem, &
pre, &
qsat )
!ESC! use mod_const, only: &
!ESC! CONST_EPSvap
implicit none
integer, intent(in) :: ijdim
integer, intent(in) :: kdim
real(RP), intent(in) :: pre (ijdim,kdim)
real(RP), intent(in) :: tem (ijdim,kdim)
real(RP), intent(out) :: qsat(ijdim,kdim)
real(RP) :: psat(ijdim,kdim)
real(RP) :: EPSvap
integer :: ij, k
!---------------------------------------------------------------------------
EPSvap = CONST_EPSvap
call SATURATION_psat_liq_2D( ijdim, kdim, tem(:,:), psat(:,:) )
!$omp parallel do default(none),private(ij,k), &
!$omp shared(ijdim,kdim,qsat,pre,psat,EPSvap)
do k = 1, kdim
do ij = 1, ijdim
qsat(ij,k) = EPSvap * psat(ij,k) / ( pre(ij,k) - ( 1.0_RP-EPSvap ) * psat(ij,k) )
enddo
enddo
!$omp end parallel do
return
end subroutine SATURATION_qsat_liq_2D
!-----------------------------------------------------------------------------
subroutine SATURATION_qsat_ice_0D( &
tem, &
pre, &
qsat )
!ESC! use mod_const, only: &
!ESC! CONST_EPSvap
implicit none
real(RP), intent(in) :: pre
real(RP), intent(in) :: tem
real(RP), intent(out) :: qsat
real(RP) :: psat
real(RP) :: EPSvap
!---------------------------------------------------------------------------
EPSvap = CONST_EPSvap
call SATURATION_psat_ice_0D( tem, psat )
qsat = EPSvap * psat / ( pre - ( 1.0_RP-EPSvap ) * psat )
return
end subroutine SATURATION_qsat_ice_0D
!-----------------------------------------------------------------------------
subroutine SATURATION_qsat_ice_1D( &
ijdim, &
tem, &
pre, &
qsat )
!ESC! use mod_const, only: &
!ESC! CONST_EPSvap
implicit none
integer, intent(in) :: ijdim
real(RP), intent(in) :: pre (ijdim)
real(RP), intent(in) :: tem (ijdim)
real(RP), intent(out) :: qsat(ijdim)
real(RP) :: psat(ijdim)
real(RP) :: EPSvap
integer :: ij
!---------------------------------------------------------------------------
EPSvap = CONST_EPSvap
call SATURATION_psat_ice_1D( ijdim, tem(:), psat(:) )
!$omp parallel do default(none),private(ij), &
!$omp shared(ijdim,qsat,pre,psat,EPSvap)
do ij = 1, ijdim
qsat(ij) = EPSvap * psat(ij) / ( pre(ij) - ( 1.0_RP-EPSvap ) * psat(ij) )
enddo
!$omp end parallel do
return
end subroutine SATURATION_qsat_ice_1D
!-----------------------------------------------------------------------------
subroutine SATURATION_qsat_ice_2D( &
ijdim, &
kdim, &
tem, &
pre, &
qsat )
!ESC! use mod_const, only: &
!ESC! CONST_EPSvap
implicit none
integer, intent(in) :: ijdim
integer, intent(in) :: kdim
real(RP), intent(in) :: pre (ijdim,kdim)
real(RP), intent(in) :: tem (ijdim,kdim)
real(RP), intent(out) :: qsat(ijdim,kdim)
real(RP) :: psat(ijdim,kdim)
real(RP) :: EPSvap
integer :: ij, k
!---------------------------------------------------------------------------
EPSvap = CONST_EPSvap
call SATURATION_psat_ice_2D( ijdim, kdim, tem(:,:), psat(:,:) )
!$omp parallel do default(none),private(ij,k), &
!$omp shared(ijdim,kdim,qsat,pre,psat,EPSvap)
do k = 1, kdim
do ij = 1, ijdim
qsat(ij,k) = EPSvap * psat(ij,k) / ( pre(ij,k) - ( 1.0_RP-EPSvap ) * psat(ij,k) )
enddo
enddo
!$omp end parallel do
return
end subroutine SATURATION_qsat_ice_2D
!-----------------------------------------------------------------------------
subroutine SATURATION_rh( &
ijdim, &
kdim, &
rho, &
tem, &
qv, &
rh )
!ESC! use mod_const, only: &
!ESC! CONST_Rvap, &
!ESC! CONST_TMELT
implicit none
integer, intent(in) :: ijdim
integer, intent(in) :: kdim
real(RP), intent(in) :: rho(ijdim,kdim)
real(RP), intent(in) :: tem(ijdim,kdim)
real(RP), intent(in) :: qv (ijdim,kdim)
real(RP), intent(out) :: rh (ijdim,kdim)
real(RP) :: psat_liq(ijdim,kdim)
real(RP) :: psat_ice(ijdim,kdim)
real(RP) :: rh_liq, rh_ice, alpha
real(RP) :: Rvap, TMELT
integer :: ij, k
!---------------------------------------------------------------------------
Rvap = CONST_Rvap
TMELT = CONST_TMELT
call SATURATION_psat_liq( ijdim, kdim, tem(:,:), psat_liq(:,:) )
call SATURATION_psat_ice( ijdim, kdim, tem(:,:), psat_ice(:,:) )
!$omp parallel do default(none),private(ij,k,rh_liq,rh_ice,alpha), &
!$omp shared(ijdim,kdim,rh,rho,tem,qv,psat_liq,psat_ice,Rvap,TMELT)
do k = 1, kdim
do ij = 1, ijdim
rh_liq = qv(ij,k) / psat_liq(ij,k) * ( rho(ij,k) * Rvap * tem(ij,k) )
rh_ice = qv(ij,k) / psat_ice(ij,k) * ( rho(ij,k) * Rvap * tem(ij,k) )
alpha = 0.5_RP + sign(0.5_RP,tem(ij,k)-TMELT)
rh(ij,k) = ( alpha ) * rh_liq &
+ ( 1.0_RP-alpha ) * rh_ice
enddo
enddo
!$omp end parallel do
return
end subroutine SATURATION_rh
!-----------------------------------------------------------------------------
subroutine SATURATION_dewtem( &
ijdim, &
kdim, &
tem, &
pre, &
qd, &
qv, &
wtem )
!ESC! use mod_const, only: &
!ESC! CONST_EPSvap, &
!ESC! CONST_PSAT0, &
!ESC! CONST_TEM00
implicit none
integer, intent(in) :: ijdim
integer, intent(in) :: kdim
real(RP), intent(in) :: tem (ijdim,kdim)
real(RP), intent(in) :: pre (ijdim,kdim)
real(RP), intent(in) :: qd (ijdim,kdim)
real(RP), intent(in) :: qv (ijdim,kdim)
real(RP), intent(out) :: wtem(ijdim,kdim) ! dew point temperature
real(RP) :: prev(ijdim,kdim)
logical :: flag(ijdim,kdim)
real(RP) :: rtem, lv, psatl, f, dfdtem
real(RP) :: EPSvap, RTEM00, PSAT0
real(RP), parameter :: criteria = 1.E-8_RP
integer, parameter :: itelim = 20
integer :: ite
integer :: ij, k
!---------------------------------------------------------------------------
EPSvap = CONST_EPSvap
RTEM00 = 1.0_RP / CONST_TEM00
PSAT0 = CONST_PSAT0
!$omp parallel do default(none),private(ij,k), &
!$omp shared(ijdim,kdim,prev,wtem,flag,tem,pre,qd,qv,EPSvap)
do k = 1, kdim
do ij = 1, ijdim
prev(ij,k) = pre(ij,k) * qv(ij,k) / ( EPSvap * qd(ij,k) + qv(ij,k) )
wtem(ij,k) = tem(ij,k) * 0.98_RP
flag(ij,k) = .false.
enddo
enddo
!$omp end parallel do
!$omp parallel default(none),private(ij,k,rtem,lv,psatl,f,dfdtem), &
!$omp shared(ite,ijdim,kdim,wtem,flag,prev,CPovR_liq,LovR_liq,RTEM00,PSAT0)
do ite = 1, itelim
!$omp do
do k = 1, kdim
do ij = 1, ijdim
if ( .NOT. flag(ij,k) ) then
rtem = 1.0_RP / max( wtem(ij,k), TEM_MIN )
lv = LovR_liq + wtem(ij,k) * CPovR_liq
psatl = PSAT0 * ( wtem(ij,k) * RTEM00 )**CPovR_liq &
* exp( LovR_liq * ( RTEM00 - rtem ) )
f = psatl - prev(ij,k)
dfdtem = lv * psatl * rtem**2
wtem(ij,k) = wtem(ij,k) - f / dfdtem
if ( abs(f) < prev(ij,k) * criteria ) then
flag(ij,k) = .true.
endif
endif
enddo
enddo
!$omp end do
enddo
!$omp end parallel
return
end subroutine SATURATION_dewtem
!-----------------------------------------------------------------------------
subroutine SATURATION_adjustment( &
ijdim, &
kdim, &
rhog, &
rhoge, &
rhogq, &
tem, &
q, &
qd, &
gsgam2, &
ice_adjust )
!ESC! use mod_runconf, only: &
!ESC! nqmax => TRC_VMAX, &
!ESC! I_QV, &
!ESC! I_QC, &
!ESC! I_QI, &
!ESC! LHV, &
!ESC! LHF
use mod_thrmdyn, only: &
thrmdyn_cv
implicit none
integer, intent(in) :: ijdim
integer, intent(in) :: kdim
real(RP), intent(in) :: rhog (ijdim,kdim)
real(RP), intent(inout) :: rhoge (ijdim,kdim)
real(RP), intent(inout) :: rhogq (ijdim,kdim,nqmax)
real(RP), intent(inout) :: tem (ijdim,kdim)
real(RP), intent(inout) :: q (ijdim,kdim,nqmax)
real(RP), intent(in) :: qd (ijdim,kdim)
real(RP), intent(in) :: gsgam2(ijdim,kdim)
logical, intent(in) :: ice_adjust
real(RP) :: ein_moist(ijdim,kdim)
real(RP) :: qsum (ijdim,kdim)
real(RP) :: CVtot (ijdim,kdim)
real(RP) :: rho (ijdim,kdim)
integer :: ij, k
!---------------------------------------------------------------------------
call PROF_rapstart('____Saturation_Adjustment')
! ein_moist = U1(rho,qsum,T1) : "unsaturated temperature"
if ( I_QI > 0 .AND. ice_adjust ) then
!$omp parallel do default(none),private(ij,k), &
!$omp shared(ijdim,kdim,ein_moist,qsum,rhoge,rhog,q,LHV,LHF,I_QV,I_QC,I_QI)
do k = 1, kdim
do ij = 1, ijdim
ein_moist(ij,k) = rhoge(ij,k) / rhog(ij,k) &
+ q(ij,k,I_QV) * LHV &
- q(ij,k,I_QI) * LHF
qsum (ij,k) = q(ij,k,I_QV) &
+ q(ij,k,I_QC) &
+ q(ij,k,I_QI)
q(ij,k,I_QV) = qsum(ij,k)
q(ij,k,I_QC) = 0.0_RP
q(ij,k,I_QI) = 0.0_RP
enddo
enddo
!$omp end parallel do
else
!$omp parallel do default(none),private(ij,k), &
!$omp shared(ijdim,kdim,ein_moist,qsum,rhoge,rhog,q,LHV,I_QV,I_QC)
do k = 1, kdim
do ij = 1, ijdim
ein_moist(ij,k) = rhoge(ij,k) / rhog(ij,k) &
+ q(ij,k,I_QV) * LHV
qsum (ij,k) = q(ij,k,I_QV) &
+ q(ij,k,I_QC)
q(ij,k,I_QV) = qsum(ij,k)
q(ij,k,I_QC) = 0.0_RP
enddo
enddo
!$omp end parallel do
endif
call THRMDYN_cv( ijdim, & ! [IN]
kdim, & ! [IN]
qd (:,:), & ! [IN]
q (:,:,:), & ! [IN]
CVtot(:,:) ) ! [OUT]
!$omp parallel do default(none),private(ij,k), &
!$omp shared(ijdim,kdim,rho,tem,ein_moist,q,rhog,gsgam2,CVtot,I_QV,LHV)
do k = 1, kdim
do ij = 1, ijdim
rho(ij,k) = rhog(ij,k) / gsgam2(ij,k)
tem(ij,k) = ( ein_moist(ij,k) - q(ij,k,I_QV) * LHV ) / CVtot(ij,k)
enddo
enddo
!$omp end parallel do
if ( I_QI > 0 .AND. ice_adjust ) then
call satadjust_all( ijdim, & ! [IN]
kdim, & ! [IN]
rho (:,:), & ! [IN]
ein_moist(:,:), & ! [IN]
qsum (:,:), & ! [IN]
tem (:,:), & ! [INOUT]
q (:,:,:) ) ! [INOUT]
else
call satadjust_liq( ijdim, & ! [IN]
kdim, & ! [IN]
rho (:,:), & ! [IN]
ein_moist(:,:), & ! [IN]
qsum (:,:), & ! [IN]
tem (:,:), & ! [INOUT]
q (:,:,:) ) ! [INOUT]
endif
!$omp parallel do default(none),private(ij,k), &
!$omp shared(ijdim,kdim,rhogq,rhog,q,I_QV,I_QC)
do k = 1, kdim
do ij = 1, ijdim
rhogq(ij,k,I_QV) = rhog(ij,k) * q(ij,k,I_QV)
rhogq(ij,k,I_QC) = rhog(ij,k) * q(ij,k,I_QC)
enddo
enddo
!$omp end parallel do
if ( I_QI > 0 .AND. ice_adjust ) then
!$omp parallel do default(none),private(ij,k), &
!$omp shared(ijdim,kdim,rhogq,rhog,q,I_QI)
do k = 1, kdim
do ij = 1, ijdim
rhogq(ij,k,I_QI) = rhog(ij,k) * q(ij,k,I_QI)
enddo
enddo
!$omp end parallel do
endif
call THRMDYN_cv( ijdim, & ! [IN]
kdim, & ! [IN]
qd (:,:), & ! [IN]
q (:,:,:), & ! [IN]
CVtot(:,:) ) ! [OUT]
!$omp parallel do default(none),private(ij,k), &
!$omp shared(ijdim,kdim,rhoge,rhog,tem,CVtot)
do k = 1, kdim
do ij = 1, ijdim
rhoge(ij,k) = rhog(ij,k) * tem(ij,k) * CVtot(ij,k)
enddo
enddo
!$omp end parallel do
call PROF_rapend ('____Saturation_Adjustment')
return
end subroutine SATURATION_adjustment
!-----------------------------------------------------------------------------
subroutine satadjust_all( &
ijdim, &
kdim, &
rho, &
Emoist, &
qsum, &
tem, &
q )
!ESC! use mod_const, only: &
!ESC! CONST_EPS, &
!ESC! CONST_CVdry, &
!ESC! CONST_Rvap, &
!ESC! CONST_PSAT0, &
!ESC! CONST_TEM00
!ESC! use mod_adm, only: &
!ESC! ADM_proc_stop, &
!ESC! kmin => ADM_kmin, &
!ESC! kmax => ADM_kmax
!ESC! use mod_runconf, only: &
!ESC! nqmax => TRC_VMAX, &
!ESC! NQW_STR, &
!ESC! NQW_END, &
!ESC! I_QV, &
!ESC! I_QC, &
!ESC! I_QI, &
!ESC! CVW, &
!ESC! LHV, &
!ESC! LHF
use mod_thrmdyn, only: &
thrmdyn_qd
implicit none
integer, intent(in) :: ijdim
integer, intent(in) :: kdim
real(RP), intent(in) :: rho (ijdim,kdim)
real(RP), intent(in) :: Emoist(ijdim,kdim)
real(RP), intent(in) :: qsum (ijdim,kdim)
real(RP), intent(inout) :: tem (ijdim,kdim)
real(RP), intent(inout) :: q (ijdim,kdim,nqmax)
real(RP) :: qd(ijdim,kdim)
real(RP) :: rtem, alpha, psatl, psati, psat, qsatl, qsati, qsat
real(RP) :: CVtot, Emoist_new, dtemp, lim1, lim2
real(RP) :: dalpha_dT, dqsatl_dT, dqsati_dT, dqsat_dT, dqc_dT, dqi_dT, dCVtot_dT, dEmoist_dT
real(RP) :: RTEM00, PSAT0, Rvap, CVdry, EPS
real(RP) :: dtemp_criteria
integer, parameter :: itelim = 100
logical :: converged
integer :: ij, k, nq, ite
!---------------------------------------------------------------------------
dtemp_criteria = 10.0_RP**(-(RP_PREC+1)/2)
EPS = CONST_EPS
CVdry = CONST_CVdry
Rvap = CONST_Rvap
PSAT0 = CONST_PSAT0
RTEM00 = 1.0_RP / CONST_TEM00
call THRMDYN_qd( ijdim, kdim, q(:,:,:), qd(:,:) )
!$omp parallel do default(none), &
!$omp private(ij,k,nq,ite,converged,rtem,lim1,lim2, &
!$omp alpha,psatl,psati,psat,qsatl,qsati,qsat,CVtot,Emoist_new,dtemp, &
!$omp dalpha_dT,dqsatl_dT,dqsati_dT,dqsat_dT,dqc_dT,dqi_dT,dCVtot_dT,dEmoist_dT), &
!$omp shared (ijdim,kmin,kmax,tem,q,qd,rho,Emoist,qsum, &
!$omp CPovR_liq,CVovR_liq,LovR_liq,CPovR_ice,CVovR_ice,LovR_ice, &
!$omp NQW_STR,NQW_END,I_QV,I_QC,I_QI,CVW,LHV,LHF,RTEM00,PSAT0,Rvap,CVdry,EPS, &
!$omp SATURATION_ULIMIT_TEMP,SATURATION_LLIMIT_TEMP,dtemp_criteria)
do k = kmin, kmax
do ij = 1, ijdim
alpha = ( tem(ij,k) - SATURATION_LLIMIT_TEMP ) &
/ ( SATURATION_ULIMIT_TEMP - SATURATION_LLIMIT_TEMP )
alpha = min( max( alpha, 0.0_RP ), 1.0_RP )
rtem = 1.0_RP / max( tem(ij,k), TEM_MIN )
psatl = PSAT0 * ( tem(ij,k) * RTEM00 )**CPovR_liq * exp( LovR_liq * ( RTEM00 - rtem ) )
psati = PSAT0 * ( tem(ij,k) * RTEM00 )**CPovR_ice * exp( LovR_ice * ( RTEM00 - rtem ) )
psat = psatl * ( alpha ) &
+ psati * ( 1.0_RP - alpha )
qsat = psat / ( rho(ij,k) * Rvap * tem(ij,k) )
if ( qsum(ij,k)-qsat > EPS ) then
converged = .false.
do ite = 1, itelim
alpha = ( tem(ij,k) - SATURATION_LLIMIT_TEMP ) &
/ ( SATURATION_ULIMIT_TEMP - SATURATION_LLIMIT_TEMP )
alpha = min( max( alpha, 0.0_RP ), 1.0_RP )
rtem = 1.0_RP / max( tem(ij,k), TEM_MIN )
psatl = PSAT0 * ( tem(ij,k) * RTEM00 )**CPovR_liq * exp( LovR_liq * ( RTEM00 - rtem ) )
psati = PSAT0 * ( tem(ij,k) * RTEM00 )**CPovR_ice * exp( LovR_ice * ( RTEM00 - rtem ) )
psat = psatl * ( alpha ) &
+ psati * ( 1.0_RP - alpha )
qsatl = psatl / ( rho(ij,k) * Rvap * tem(ij,k) )
qsati = psati / ( rho(ij,k) * Rvap * tem(ij,k) )
qsat = psat / ( rho(ij,k) * Rvap * tem(ij,k) )
! Separation
q(ij,k,I_QV) = qsat
q(ij,k,I_QC) = ( qsum(ij,k)-qsat ) * ( alpha )
q(ij,k,I_QI) = ( qsum(ij,k)-qsat ) * ( 1.0_RP-alpha )
CVtot = qd(ij,k) * CVdry
do nq = NQW_STR, NQW_END
CVtot = CVtot + q(ij,k,nq) * CVW(nq)
enddo
Emoist_new = tem(ij,k) * CVtot + q(ij,k,I_QV) * LHV - q(ij,k,I_QI) * LHF
! dX/dT
lim1 = 0.5_RP + sign( 0.5_RP, SATURATION_ULIMIT_TEMP - tem(ij,k) )
lim2 = 0.5_RP + sign( 0.5_RP, tem(ij,k) - SATURATION_LLIMIT_TEMP )
dalpha_dT = lim1 * lim2 / ( SATURATION_ULIMIT_TEMP - SATURATION_LLIMIT_TEMP )
dqsatl_dT = ( LovR_liq / tem(ij,k)**2 + CVovR_liq / tem(ij,k) ) * qsatl
dqsati_dT = ( LovR_ice / tem(ij,k)**2 + CVovR_ice / tem(ij,k) ) * qsati
dqsat_dT = qsatl * dalpha_dT + dqsatl_dT * ( alpha ) &
- qsati * dalpha_dT + dqsati_dT * ( 1.0_RP-alpha )
dqc_dT = ( qsum(ij,k)-qsat ) * dalpha_dT - dqsat_dT * ( alpha )
dqi_dT = -( qsum(ij,k)-qsat ) * dalpha_dT - dqsat_dT * ( 1.0_RP-alpha )
dCVtot_dT = dqsat_dT * CVW(I_QV) &
+ dqc_dT * CVW(I_QC) &
+ dqi_dT * CVW(I_QI)
dEmoist_dT = tem(ij,k) * dCVtot_dT &
+ CVtot &
+ dqsat_dT * LHV &
- dqi_dT * LHF
dtemp = ( Emoist_new - Emoist(ij,k) ) / dEmoist_dT
tem(ij,k) = tem(ij,k) - dtemp
if ( abs(dtemp) < dtemp_criteria ) then
converged = .true.
exit
endif
if( tem(ij,k)*0.0_RP /= 0.0_RP) exit
enddo
if ( .NOT. converged ) then
write(*,*) rho(ij,k),tem(ij,k),q(ij,k,I_QV),q(ij,k,I_QC),q(ij,k,I_QI)
write(*,*) 'xxx [satadjust_all] not converged! dtemp=', dtemp,ij,k,ite
call ADM_proc_stop
endif
endif
enddo
enddo
!$omp end parallel do
return
end subroutine satadjust_all
!-----------------------------------------------------------------------------
subroutine satadjust_liq( &
ijdim, &
kdim, &
rho, &
Emoist, &
qsum, &
tem, &
q )
!ESC! use mod_const, only: &
!ESC! CONST_EPS, &
!ESC! CONST_CVdry, &
!ESC! CONST_Rvap, &
!ESC! CONST_PSAT0, &
!ESC! CONST_TEM00
!ESC! use mod_adm, only: &
!ESC! ADM_proc_stop, &
!ESC! kmin => ADM_kmin, &
!ESC! kmax => ADM_kmax
!ESC! use mod_runconf, only: &
!ESC! nqmax => TRC_VMAX, &
!ESC! NQW_STR, &
!ESC! NQW_END, &
!ESC! I_QV, &
!ESC! I_QC, &
!ESC! CVW, &
!ESC! LHV
use mod_thrmdyn, only: &
thrmdyn_qd
implicit none
integer, intent(in) :: ijdim
integer, intent(in) :: kdim
real(RP), intent(in) :: rho (ijdim,kdim)
real(RP), intent(in) :: Emoist(ijdim,kdim)
real(RP), intent(in) :: qsum (ijdim,kdim)
real(RP), intent(inout) :: tem (ijdim,kdim)
real(RP), intent(inout) :: q (ijdim,kdim,nqmax)
real(RP) :: qd(ijdim,kdim)
real(RP) :: rtem, psat, qsat
real(RP) :: CVtot, Emoist_new, dtemp
real(RP) :: dqsat_dT, dCVtot_dT, dEmoist_dT
real(RP) :: RTEM00, PSAT0, Rvap, CVdry, EPS
real(RP) :: dtemp_criteria
integer, parameter :: itelim = 100
logical :: converged
integer :: ij, k, nq, ite
!---------------------------------------------------------------------------
dtemp_criteria = 10.0_RP**(-(RP_PREC+1)/2)
EPS = CONST_EPS
CVdry = CONST_CVdry
Rvap = CONST_Rvap
PSAT0 = CONST_PSAT0
RTEM00 = 1.0_RP / CONST_TEM00
call THRMDYN_qd( ijdim, kdim, q(:,:,:), qd(:,:) )
!$omp parallel do default(none), &
!$omp private(ij,k,nq,ite,converged,rtem, &
!$omp psat,qsat,CVtot,Emoist_new,dtemp,dqsat_dT,dCVtot_dT,dEmoist_dT), &
!$omp shared (ijdim,kmin,kmax,tem,q,qd,rho,Emoist,qsum, &
!$omp CPovR_liq,CVovR_liq,LovR_liq, &
!$omp NQW_STR,NQW_END,I_QV,I_QC,CVW,LHV,RTEM00,PSAT0,Rvap,CVdry,EPS, &
!$omp SATURATION_ULIMIT_TEMP,SATURATION_LLIMIT_TEMP,dtemp_criteria)
do k = kmin, kmax
do ij = 1, ijdim
rtem = 1.0_RP / max( tem(ij,k), TEM_MIN )
psat = PSAT0 * ( tem(ij,k) * RTEM00 )**CPovR_liq * exp( LovR_liq * ( RTEM00 - rtem ) )
qsat = psat / ( rho(ij,k) * Rvap * tem(ij,k) )
if ( qsum(ij,k)-qsat > EPS ) then
converged = .false.
do ite = 1, itelim
rtem = 1.0_RP / max( tem(ij,k), TEM_MIN )
psat = PSAT0 * ( tem(ij,k) * RTEM00 )**CPovR_liq * exp( LovR_liq * ( RTEM00 - rtem ) )
qsat = psat / ( rho(ij,k) * Rvap * tem(ij,k) )
! Separation
q(ij,k,I_QV) = qsat
q(ij,k,I_QC) = qsum(ij,k)-qsat
CVtot = qd(ij,k) * CVdry
do nq = NQW_STR, NQW_END
CVtot = CVtot + q(ij,k,nq) * CVW(nq)
enddo
Emoist_new = tem(ij,k) * CVtot + q(ij,k,I_QV) * LHV
! dX/dT
dqsat_dT = ( LovR_liq / tem(ij,k)**2 + CVovR_liq / tem(ij,k) ) * qsat
dCVtot_dT = dqsat_dT * ( CVW(I_QV) - CVW(I_QC) )
dEmoist_dT = tem(ij,k) * dCVtot_dT &
+ CVtot &
+ dqsat_dT * LHV
dtemp = ( Emoist_new - Emoist(ij,k) ) / dEmoist_dT
tem(ij,k) = tem(ij,k) - dtemp
if ( abs(dtemp) < dtemp_criteria ) then
converged = .true.
exit
endif
if( tem(ij,k)*0.0_RP /= 0.0_RP) exit
enddo
if ( .NOT. converged ) then
write(*,*) rho(ij,k),tem(ij,k),q(ij,k,I_QV),q(ij,k,I_QC)
write(*,*) 'xxx [satadjust_liq] not converged! dtemp=', dtemp,ij,k,ite
call ADM_proc_stop
endif
endif
enddo
enddo
return
end subroutine satadjust_liq
!-----------------------------------------------------------------------------
subroutine SATURATION_enthalpy( &
ijdim, &
kdim, &
tem, &
ent, &
pre, &
qw, &
adiabat_type )
!ESC! use mod_const, only: &
!ESC! CONST_Rdry, &
!ESC! CONST_Rvap, &
!ESC! CONST_CPdry, &
!ESC! CONST_CPvap, &
!ESC! CONST_LHV0, &
!ESC! CONST_PSAT0, &
!ESC! CONST_EPSvap, &
!ESC! CONST_PRE00, &
!ESC! CONST_TEM00
!ESC! use mod_runconf, only: &
!ESC! nqmax => TRC_VMAX, &
!ESC! I_QV, &
!ESC! I_QC
use mod_thrmdyn, only: &
thrmdyn_ent
implicit none
integer, intent(in) :: ijdim
integer, intent(in) :: kdim
real(RP), intent(inout) :: tem(ijdim,kdim)
real(RP), intent(in) :: ent(ijdim,kdim)
real(RP), intent(in) :: pre(ijdim,kdim)
real(RP), intent(in) :: qw (ijdim,kdim)
character(len=*), intent(in) :: adiabat_type
! = 'RMA' reversible: qv = qv*, qc = qw - qv*
! = 'PMA' pseudo : if qw > qv* : qv = qv*, qc=0
! = 'DA' qc = 0
real(RP) :: qd (ijdim,kdim)
real(RP) :: qsat (ijdim,kdim)
real(RP) :: q (ijdim,kdim,nqmax)
real(RP) :: pred (ijdim,kdim)
real(RP) :: prev (ijdim,kdim)
real(RP) :: tem_old(ijdim,kdim)
real(RP) :: ents (ijdim,kdim)
real(RP) :: dents (ijdim,kdim)
real(RP) :: dtem (ijdim,kdim)
logical :: flag (ijdim,kdim)
real(RP) :: PREMIN = 1.E-10_RP
real(RP) :: DENTS_FACT = 1.0_RP
real(RP) :: DENTS_MIN = 1.E-10_RP
real(RP) :: TEMMIN = 1.0_RP
integer, parameter :: itelim = 100
integer :: ite
integer :: ij, k
!---------------------------------------------------------------------------
flag(:,:) = .false.
q(:,:,:) = 0.0_RP
if ( adiabat_type == 'DA' ) then
tem (:,:) = exp( ( ent(:,:) + CONST_Rdry * log( pre(:,:) / CONST_PRE00 ) ) / CONST_CPdry ) * CONST_TEM00
qd (:,:) = 1.0_RP-qw(:,:)
q (:,:,I_QV) = qw(:,:)
pred(:,:) = pre(:,:) * CONST_EPSvap * qd(:,:) / ( CONST_EPSvap * qd(:,:) + q(:,:,I_QV) )
pred(:,:) = max( pred(:,:), PREMIN )
prev(:,:) = pre(:,:) * q(:,:,I_QV) / ( CONST_EPSvap * qd(:,:) + q(:,:,I_QV) )
prev(:,:) = max( prev(:,:), PREMIN )
tem(:,:) = exp( ( ent(:,:) &
+ qd(:,:) * ( CONST_Rdry * log( pred(:,:) / CONST_PRE00 ) )&
+ q (:,:,I_QV) * ( CONST_Rvap * log( prev(:,:) / CONST_PSAT0 ) - CONST_LHV0 / CONST_TEM00 ) ) &
/ ( qd(:,:) * CONST_CPdry + q(:,:,I_QV) * CONST_CPvap ) ) * CONST_TEM00
elseif( adiabat_type == 'RMA' .OR. adiabat_type == 'PMA' ) then
tem(:,:) = exp( ( ent(:,:) + CONST_Rdry * log( pre(:,:) / CONST_PRE00 ) ) / CONST_CPdry ) * CONST_TEM00
do ite = 1, itelim
tem_old(:,:) = tem(:,:)
call SATURATION_qsat_liq( ijdim, kdim, tem(:,:), pre(:,:), qsat(:,:) )
q(:,:,I_QV) = min ( qw(:,:), qsat(:,:) )
if ( adiabat_type == 'RMA' ) then
q (:,:,I_QC) = qw(:,:) - q(:,:,I_QV)
qd(:,:) = 1.0_RP - qw(:,:)
elseif( adiabat_type == 'PMA' ) then
q (:,:,I_QC) = 0.0_RP
qd(:,:) = 1.0_RP - q(:,:,I_QV)
endif
call thrmdyn_ent( ijdim, & ! [IN]
kdim, & ! [IN]
ents, & ! [OUT]
tem, & ! [IN]
pre, & ! [IN]
q, & ! [IN]
qd ) ! [IN]
do k = 1, kdim
do ij = 1, ijdim
dents(:,:) = ( CONST_CPdry * qd(:,:) + CONST_CPvap * q(:,:,I_QV) ) / tem(:,:) &
+ ( CONST_LHV0 * q(:,:,I_QV) ) / tem(:,:)**2 &
* ( CONST_LHV0 / ( tem(:,:) * CONST_RVAP ) - 1.0_RP )
dents(:,:) = max ( dents(:,:) * DENTS_FACT, DENTS_MIN )
enddo
enddo
do k = 1, kdim
do ij = 1, ijdim
if ( .NOT. flag(ij,k) ) then
tem (ij,k) = tem(ij,k) - ( ents(ij,k) - ent(ij,k) ) / dents(ij,k)
tem (ij,k) = max ( tem(ij,k), TEMMIN )
dtem(ij,k) = abs( tem(ij,k) - tem_old(ij,k) )
endif
if ( dtem(ij,k) < DTEM_EPS0 ) then
dtem(ij,k) = 0.0_RP
flag(ij,k) = .true.
endif
enddo
enddo
if( maxval( dtem(:,:) ) < DTEM_EPS0 ) exit
enddo ! iteration
else
write(IO_FID_LOG,*) '### SATURATION_enthalpy: invalid adiabat_type=', adiabat_type
endif
return
end subroutine SATURATION_enthalpy
!-----------------------------------------------------------------------------
! (d qsw/d T)_{rho}: partial difference of qsat_water
subroutine moist_dqsw_dtem_rho( &
ijdim, &
kdim, &
tem, &
rho, &
dqsdtem )
!ESC! use mod_const, only: &
!ESC! CONST_Rvap, &
!ESC! CONST_TEM00
implicit none
integer, intent(in) :: ijdim
integer, intent(in) :: kdim
real(RP), intent(in) :: tem (ijdim,kdim)
real(RP), intent(in) :: rho (ijdim,kdim)
real(RP), intent(out) :: dqsdtem(ijdim,kdim)
real(RP) :: psat(ijdim,kdim) ! saturation vapor pressure
real(RP) :: LovR(ijdim,kdim) ! latent heat for condensation
!---------------------------------------------------------------------------
call SATURATION_psat_liq_2D( ijdim, kdim, tem(:,:), psat(:,:) )
LovR (:,:) = LovR_liq + CPovR_liq * ( tem(:,:) - CONST_TEM00 )
dqsdtem(:,:) = psat(:,:) / ( rho (:,:) * CONST_Rvap * tem(:,:)**2 ) &
* ( LovR(:,:) / tem(:,:) - 1.0_RP )
return
end subroutine moist_dqsw_dtem_rho
!-----------------------------------------------------------------------------
! (d qsi/d T)_{rho}: partial difference of qsat_ice
subroutine moist_dqsi_dtem_rho( &
ijdim, &
kdim, &
tem, &
rho, &
dqsdtem )
!ESC! use mod_const, only: &
!ESC! CONST_Rvap, &
!ESC! CONST_TEM00
implicit none
integer, intent(in) :: ijdim
integer, intent(in) :: kdim
real(RP), intent(in) :: tem (ijdim,kdim)
real(RP), intent(in) :: rho (ijdim,kdim)
real(RP), intent(out) :: dqsdtem(ijdim,kdim)
real(RP) :: psat(ijdim,kdim) ! saturation vapor pressure
real(RP) :: LovR(ijdim,kdim) ! latent heat for condensation
!---------------------------------------------------------------------------
call SATURATION_psat_ice_2D( ijdim, kdim, tem(:,:), psat(:,:) )
LovR (:,:) = LovR_ice + CPovR_ice * ( tem(:,:) - CONST_TEM00 )
dqsdtem(:,:) = psat(:,:) / ( rho (:,:) * CONST_Rvap * tem(:,:)**2 ) &
* ( LovR(:,:) / tem(:,:) - 1.0_RP )
return
end subroutine moist_dqsi_dtem_rho
!-----------------------------------------------------------------------------
! (d qs/d T)_{p} and (d qs/d p)_{T}
subroutine moist_dqsw_dtem_dpre( &
ijdim, &
kdim, &
tem, &
pre, &
dqsdtem, &
dqsdpre )
!ESC! use mod_const, only: &
!ESC! CONST_EPSvap, &
!ESC! CONST_TEM00
implicit none
integer, intent(in) :: ijdim
integer, intent(in) :: kdim
real(RP), intent(in) :: tem (ijdim,kdim)
real(RP), intent(in) :: pre (ijdim,kdim)
real(RP), intent(out) :: dqsdtem(ijdim,kdim)
real(RP), intent(out) :: dqsdpre(ijdim,kdim)
real(RP) :: psat(ijdim,kdim) ! saturation vapor pressure
real(RP) :: LovR(ijdim,kdim) ! latent heat for condensation
real(RP) :: den1(ijdim,kdim) ! denominator
!---------------------------------------------------------------------------
call SATURATION_psat_liq_2D( ijdim, kdim, tem(:,:), psat(:,:) )
LovR (:,:) = LovR_liq + CPovR_liq * ( tem(:,:) - CONST_TEM00 )
den1 (:,:) = ( pre(:,:) - ( 1.0_RP-CONST_EPSvap ) * psat(:,:) )**2
dqsdpre(:,:) = -CONST_EPSvap * psat(:,:) / den1(:,:)
dqsdtem(:,:) = CONST_EPSvap * psat(:,:) / den1(:,:) * pre(:,:) * LovR(:,:) / tem(:,:)**2
return
end subroutine moist_dqsw_dtem_dpre
!-----------------------------------------------------------------------------
! (d qsi/d T)_{p} and (d qs/d p)_{T}
subroutine moist_dqsi_dtem_dpre( &
ijdim, &
kdim, &
tem, &
pre, &
dqsdtem, &
dqsdpre )
!ESC! use mod_const, only: &
!ESC! CONST_EPSvap, &
!ESC! CONST_TEM00
implicit none
integer, intent(in) :: ijdim
integer, intent(in) :: kdim
real(RP), intent(in) :: tem (ijdim,kdim)
real(RP), intent(in) :: pre (ijdim,kdim)
real(RP), intent(out) :: dqsdtem(ijdim,kdim)
real(RP), intent(out) :: dqsdpre(ijdim,kdim)
real(RP) :: psat(ijdim,kdim) ! saturation vapor pressure
real(RP) :: LovR(ijdim,kdim) ! latent heat for condensation
real(RP) :: den1(ijdim,kdim) ! denominator
!---------------------------------------------------------------------------
call SATURATION_psat_ice_2D( ijdim, kdim, tem(:,:), psat(:,:) )
LovR (:,:) = LovR_ice + CPovR_ice * ( tem(:,:) - CONST_TEM00 )
den1 (:,:) = ( pre(:,:) - ( 1.0_RP-CONST_EPSvap ) * psat(:,:) )**2
dqsdpre(:,:) = -CONST_EPSvap * psat(:,:) / den1(:,:)
dqsdtem(:,:) = CONST_EPSvap * psat(:,:) / den1(:,:) * pre(:,:) * LovR(:,:) / tem(:,:)**2
return
end subroutine moist_dqsi_dtem_dpre
end module mod_satadjust
|
State Before: α : Type u_1
β : Type ?u.44124
inst✝ : PseudoEMetricSpace α
x y z : α
s t : Set α
⊢ einfsep (insert x s) = (⨅ (y : α) (_ : y ∈ s) (_ : x ≠ y), edist x y) ⊓ einfsep s State After: α : Type u_1
β : Type ?u.44124
inst✝ : PseudoEMetricSpace α
x y z : α
s t : Set α
⊢ (⨅ (y : α) (_ : y ∈ s) (_ : x ≠ y), edist x y) ⊓ einfsep s ≤ einfsep (insert x s) Tactic: refine' le_antisymm (le_min einfsep_insert_le (einfsep_anti (subset_insert _ _))) _ State Before: α : Type u_1
β : Type ?u.44124
inst✝ : PseudoEMetricSpace α
x y z : α
s t : Set α
⊢ (⨅ (y : α) (_ : y ∈ s) (_ : x ≠ y), edist x y) ⊓ einfsep s ≤ einfsep (insert x s) State After: α : Type u_1
β : Type ?u.44124
inst✝ : PseudoEMetricSpace α
x y z : α
s t : Set α
⊢ ∀ (x_1 : α),
x_1 = x ∨ x_1 ∈ s →
∀ (y : α),
y = x ∨ y ∈ s → x_1 ≠ y → (⨅ (y : α) (_ : y ∈ s) (_ : x ≠ y), edist x y) ≤ edist x_1 y ∨ einfsep s ≤ edist x_1 y Tactic: simp_rw [le_einfsep_iff, inf_le_iff, mem_insert_iff] State Before: α : Type u_1
β : Type ?u.44124
inst✝ : PseudoEMetricSpace α
x y z : α
s t : Set α
⊢ ∀ (x_1 : α),
x_1 = x ∨ x_1 ∈ s →
∀ (y : α),
y = x ∨ y ∈ s → x_1 ≠ y → (⨅ (y : α) (_ : y ∈ s) (_ : x ≠ y), edist x y) ≤ edist x_1 y ∨ einfsep s ≤ edist x_1 y State After: case inl.inl
α : Type u_1
β : Type ?u.44124
inst✝ : PseudoEMetricSpace α
y z✝ : α
s t : Set α
z : α
hyz : z ≠ z
⊢ (⨅ (y : α) (_ : y ∈ s) (_ : z ≠ y), edist z y) ≤ edist z z ∨ einfsep s ≤ edist z z
case inl.inr
α : Type u_1
β : Type ?u.44124
inst✝ : PseudoEMetricSpace α
y✝ z✝ : α
s t : Set α
y z : α
hz : z ∈ s
hyz : y ≠ z
⊢ (⨅ (y_1 : α) (_ : y_1 ∈ s) (_ : y ≠ y_1), edist y y_1) ≤ edist y z ∨ einfsep s ≤ edist y z
case inr.inl
α : Type u_1
β : Type ?u.44124
inst✝ : PseudoEMetricSpace α
y✝ z✝ : α
s t : Set α
y : α
hy : y ∈ s
z : α
hyz : y ≠ z
⊢ (⨅ (y : α) (_ : y ∈ s) (_ : z ≠ y), edist z y) ≤ edist y z ∨ einfsep s ≤ edist y z
case inr.inr
α : Type u_1
β : Type ?u.44124
inst✝ : PseudoEMetricSpace α
x y✝ z✝ : α
s t : Set α
y : α
hy : y ∈ s
z : α
hz : z ∈ s
hyz : y ≠ z
⊢ (⨅ (y : α) (_ : y ∈ s) (_ : x ≠ y), edist x y) ≤ edist y z ∨ einfsep s ≤ edist y z Tactic: rintro y (rfl | hy) z (rfl | hz) hyz State Before: case inl.inl
α : Type u_1
β : Type ?u.44124
inst✝ : PseudoEMetricSpace α
y z✝ : α
s t : Set α
z : α
hyz : z ≠ z
⊢ (⨅ (y : α) (_ : y ∈ s) (_ : z ≠ y), edist z y) ≤ edist z z ∨ einfsep s ≤ edist z z State After: no goals Tactic: exact False.elim (hyz rfl) State Before: case inl.inr
α : Type u_1
β : Type ?u.44124
inst✝ : PseudoEMetricSpace α
y✝ z✝ : α
s t : Set α
y z : α
hz : z ∈ s
hyz : y ≠ z
⊢ (⨅ (y_1 : α) (_ : y_1 ∈ s) (_ : y ≠ y_1), edist y y_1) ≤ edist y z ∨ einfsep s ≤ edist y z State After: no goals Tactic: exact Or.inl (iInf_le_of_le _ (iInf₂_le hz hyz)) State Before: case inr.inl
α : Type u_1
β : Type ?u.44124
inst✝ : PseudoEMetricSpace α
y✝ z✝ : α
s t : Set α
y : α
hy : y ∈ s
z : α
hyz : y ≠ z
⊢ (⨅ (y : α) (_ : y ∈ s) (_ : z ≠ y), edist z y) ≤ edist y z ∨ einfsep s ≤ edist y z State After: case inr.inl
α : Type u_1
β : Type ?u.44124
inst✝ : PseudoEMetricSpace α
y✝ z✝ : α
s t : Set α
y : α
hy : y ∈ s
z : α
hyz : y ≠ z
⊢ (⨅ (y : α) (_ : y ∈ s) (_ : z ≠ y), edist z y) ≤ edist z y ∨ einfsep s ≤ edist z y Tactic: rw [edist_comm] State Before: case inr.inl
α : Type u_1
β : Type ?u.44124
inst✝ : PseudoEMetricSpace α
y✝ z✝ : α
s t : Set α
y : α
hy : y ∈ s
z : α
hyz : y ≠ z
⊢ (⨅ (y : α) (_ : y ∈ s) (_ : z ≠ y), edist z y) ≤ edist z y ∨ einfsep s ≤ edist z y State After: no goals Tactic: exact Or.inl (iInf_le_of_le _ (iInf₂_le hy hyz.symm)) State Before: case inr.inr
α : Type u_1
β : Type ?u.44124
inst✝ : PseudoEMetricSpace α
x y✝ z✝ : α
s t : Set α
y : α
hy : y ∈ s
z : α
hz : z ∈ s
hyz : y ≠ z
⊢ (⨅ (y : α) (_ : y ∈ s) (_ : x ≠ y), edist x y) ≤ edist y z ∨ einfsep s ≤ edist y z State After: no goals Tactic: exact Or.inr (einfsep_le_edist_of_mem hy hz hyz) |
function res = isdigit(s)
% res = isdigit(s);
% res(i) = 1 is s(1) is a digit between 0 and 9 (inclusive)
%
id = find(s >= '0' & s <= '9');
res = zeros(size(s));
res(id) = 1;
return;
|
@testset "@fastcall" begin
@test macroexpand(:(@fastcall f())) == :(f())
@test macroexpand(:(@fastcall f(x))) == :(f(x))
@test macroexpand(:(@fastcall f(x,y))) == :(f(x,y))
# these are confusing
@test_broken macroexpand(:(@fastcall f(a=1))) == :(GlobalRef(FastKeywords, Symbol("fastkw#f"))(GlobalRef(FastKeywords,:KW){:a}(1)))
@test_broken macroexpand(:(@fastcall f(a=1,b=2))) == :(GlobalRef(FastKeywords, Symbol("fastkw#f"))(FastKeywords.KW{:a}(1), KW{:b}(2)))
end
|
Set Warnings "-notation-overridden".
Require Import Category.Lib.
Require Export Category.Structure.Cocartesian.
Require Export Category.Construction.Coproduct.
Require Export Category.Instance.Cat.
Generalizable All Variables.
Set Primitive Projections.
Set Universe Polymorphism.
Unset Transparent Obligations.
(* Another way of reading this is that we're proving Cat^op is Cartesian. *)
Program Instance Cat_Cocartesian : @Cocartesian Cat := {
product_obj := @Coproduct;
fork := fun _ _ _ F G =>
{| fobj := fun x =>
match x with
| Datatypes.inl x => F x
| Datatypes.inr x => G x
end
; fmap := fun x y f =>
match x with
| Datatypes.inl x =>
match y with
| Datatypes.inl y => _
| Datatypes.inr y => False_rect _ _
end
| Datatypes.inr x =>
match y with
| Datatypes.inl y => False_rect _ _
| Datatypes.inr y => _
end
end |};
exl := fun _ _ =>
{| fobj := Datatypes.inl
; fmap := fun _ _ => _ |};
exr := fun _ _ =>
{| fobj := Datatypes.inr
; fmap := fun _ _ => _ |};
}.
Next Obligation. exact (fmap f). Defined.
Next Obligation. exact (fmap f). Defined.
Next Obligation.
proper.
destruct x, y; simpl in *;
solve [ apply fmap_respects; auto | contradiction ].
Qed.
Next Obligation.
destruct x; simpl in *; cat.
Qed.
Next Obligation.
destruct x, y, z; simpl in *; try tauto;
apply fmap_comp.
Qed.
Next Obligation.
rename x into A.
rename y into B.
rename z into C.
proper.
destruct x3, y1; simpl; auto; tauto.
Qed.
Next Obligation.
rename x into A.
rename y into B.
rename z into C.
split; intros; simplify.
- apply (e (Datatypes.inl x0) (Datatypes.inl y)).
- apply (e (Datatypes.inr x0) (Datatypes.inr y)).
- destruct x1; auto.
- destruct x1, y.
+ apply e0; tauto.
+ tauto.
+ tauto.
+ apply e; tauto.
Qed.
|
In 2008 , Fernandez started dating Bahraini prince Hassan bin Rashid Al Khalifa , whom she met at a mutual friend 's party ; they separated in 2011 . While filming Housefull 2 in 2011 , Fernandez began a romantic relationship with director Sajid Khan . The relationship attracted media coverage in India and there was speculation of an impending wedding . However , the relationship ended in May 2013 .
|
[STATEMENT]
lemma (in group) set_mult_carrier_idem:
assumes "subgroup H G"
shows "(carrier G) <#> H = carrier G"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. carrier G <#> H = carrier G
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. carrier G <#> H \<subseteq> carrier G
2. carrier G \<subseteq> carrier G <#> H
[PROOF STEP]
show "(carrier G)<#>H \<subseteq> carrier G"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. carrier G <#> H \<subseteq> carrier G
[PROOF STEP]
unfolding set_mult_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Union>h\<in>carrier G. \<Union>k\<in>H. {h \<otimes> k}) \<subseteq> carrier G
[PROOF STEP]
using subgroup.subset assms
[PROOF STATE]
proof (prove)
using this:
subgroup ?H ?G \<Longrightarrow> ?H \<subseteq> carrier ?G
subgroup H G
goal (1 subgoal):
1. (\<Union>h\<in>carrier G. \<Union>k\<in>H. {h \<otimes> k}) \<subseteq> carrier G
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
carrier G <#> H \<subseteq> carrier G
goal (1 subgoal):
1. carrier G \<subseteq> carrier G <#> H
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. carrier G \<subseteq> carrier G <#> H
[PROOF STEP]
have " (carrier G) #> \<one> = carrier G"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. carrier G #> \<one> = carrier G
[PROOF STEP]
unfolding set_mult_def r_coset_def group_axioms
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Union>h\<in>carrier G. {h \<otimes> \<one>}) = carrier G
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
carrier G #> \<one> = carrier G
goal (1 subgoal):
1. carrier G \<subseteq> carrier G <#> H
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
carrier G #> \<one> = carrier G
goal (1 subgoal):
1. carrier G \<subseteq> carrier G <#> H
[PROOF STEP]
have "(carrier G) #> \<one> \<subseteq> (carrier G) <#> H"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. carrier G #> \<one> \<subseteq> carrier G <#> H
[PROOF STEP]
unfolding set_mult_def r_coset_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Union>h\<in>carrier G. {h \<otimes> \<one>}) \<subseteq> (\<Union>h\<in>carrier G. \<Union>k\<in>H. {h \<otimes> k})
[PROOF STEP]
using assms subgroup.one_closed[OF assms]
[PROOF STATE]
proof (prove)
using this:
subgroup H G
\<one> \<in> H
goal (1 subgoal):
1. (\<Union>h\<in>carrier G. {h \<otimes> \<one>}) \<subseteq> (\<Union>h\<in>carrier G. \<Union>k\<in>H. {h \<otimes> k})
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
carrier G #> \<one> \<subseteq> carrier G <#> H
goal (1 subgoal):
1. carrier G \<subseteq> carrier G <#> H
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
carrier G #> \<one> = carrier G
carrier G #> \<one> \<subseteq> carrier G <#> H
[PROOF STEP]
show "carrier G \<subseteq> (carrier G) <#> H"
[PROOF STATE]
proof (prove)
using this:
carrier G #> \<one> = carrier G
carrier G #> \<one> \<subseteq> carrier G <#> H
goal (1 subgoal):
1. carrier G \<subseteq> carrier G <#> H
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
carrier G \<subseteq> carrier G <#> H
goal:
No subgoals!
[PROOF STEP]
qed |
namespace xena
inductive xnat
| zero : xnat
| succ : xnat → xnat
open xnat
definition add : xnat → xnat → xnat
| n zero := n
| n (succ p) := succ (add n p)
notation a + b := add a b
definition one := succ zero
definition two := succ one
definition three := succ two
definition four := succ three
example : two + two = four :=
begin
dunfold two, -- goal now succ one + succ one = four
admit,
end
|
data MyList : (a: Type) -> Type where
Nil: MyList a
Cons: (x: a) -> MyList a -> MyList a
insertList: a -> MyList a -> MyList a
insertList = Cons
|
Formal statement is: lemma coeff_monom [simp]: "coeff (monom a m) n = (if m = n then a else 0)" Informal statement is: The coefficient of a monomial is the coefficient of the monomial if the degree of the monomial is the same as the degree of the coefficient, and zero otherwise. |
/*! \file
\brief A file location.
Copyright (C) 2019-2022 kaoru https://www.tetengo.org/
*/
#if !defined(TETENGO_JSON_FILELOCATION_HPP)
#define TETENGO_JSON_FILELOCATION_HPP
#include <cstddef>
#include <string>
#include <boost/operators.hpp>
namespace tetengo::json
{
/*!
\brief A file location.
*/
class file_location : public boost::equality_comparable<file_location>
{
public:
// constructors and destructor
/*!
\brief Creates a file location.
\param line A line.
\param line_index A line index.
\param column_index A column index.
\throw std::out_of_range When column_index is larger than the line length.
*/
file_location(std::string line, std::size_t line_index, std::size_t column_index);
// functions
/*!
\brief Returns true when one file location is equal to another.
\param one One file location.
\param another Another file location.
\retval true When one is equal to another.
\retval false Otherwise.
*/
friend bool operator==(const file_location& one, const file_location& another);
/*!
\brief Returns the line.
\return The line.
*/
[[nodiscard]] const std::string& line() const;
/*!
\brief Returns the line index.
\return The line index.
*/
[[nodiscard]] std::size_t line_index() const;
/*!
\brief Returns the column index.
\return The column index.
*/
[[nodiscard]] std::size_t column_index() const;
/*!
\brief Sets a column index.
\param index A column index.
\throw std::out_of_range When index is larger than the line length.
*/
void set_column_index(std::size_t index);
private:
// variables
std::string m_line;
std::size_t m_line_index;
std::size_t m_column_index;
};
}
#endif
|
Robi Botos will be at Chalmers United Church in Kingston, ON on Sunday, March 5, 2017. Doors Open at 1:45PM, and the Show Starts around 2:00PM. Robi Botos line-up includes Robi Botos. Chalmers United Church is located at 212 Barrie Street, Kingston, ON.
Cutoff is Sunday, March 5, 2017 10:59 am EST unless it sells out earlier. |
# Indutores
Jupyter Notebook desenvolvido por [Gustavo S.S.](https://github.com/GSimas)
**Um indutor consiste em uma bobina de fio condutor.**
Qualquer condutor de corrente elétrica possui propriedades indutivas e
pode ser considerado um indutor. Mas, para aumentar o efeito indutivo, um indutor
usado na prática é normalmente formado em uma bobina cilíndrica com
várias espiras de fio condutor, conforme ilustrado na Figura 6.21.
Ao passar uma corrente através de um indutor, constata-se que a tensão nele
é diretamente proporcional à taxa de variação da corrente
\begin{align}
{\Large v = L \frac{di}{dt}}
\end{align}
onde **L** é a constante de proporcionalidade denominada indutância do indutor.
**Indutância é a propriedade segundo a qual um indutor se opõe à mudança
do fluxo de corrente através dele, medida em henrys (H).**
A indutância de um indutor depende de suas dimensões físicas e de sua
construção.
\begin{align}
{\Large L = \frac{N^2 µ A}{l}}
\end{align}
onde N é o número de espiras, / é o comprimento, A é a área da seção transversal
e µ é a permeabilidade magnética do núcleo
**Relação Tensão-Corrente:**
\begin{align}
{\Large i = \frac{1}{L} \int_{t_0}^{t} v(τ)dτ + i(t_0)}
\end{align}
**Potência Liberada pelo Indutor:**
\begin{align}
{\Large p = vi = (L \frac{di}{dt})i}
\end{align}
**Energia Armazenada:**
\begin{align}
{\Large w = \int_{-∞}^{t} p(τ)dτ = L \int_{-∞}^{t} \frac{di}{dτ} idτ = L \int_{-∞}^{t} i di}
\end{align}
\begin{align}
{\Large w = \frac{1}{2} Li^2}
\end{align}
1. **Um indutor atua como um curto-circuito em CC.**
2. A corrente através de um indutor não pode mudar instantaneamente.
3. **Assim como o capacitor ideal, o indutor ideal não dissipa energia; a energia armazenada nele pode ser recuperada posteriormente. O indutor absorve potência do circuito quando está armazenando energia e libera potência para o circuito quando retorna a energia previamente armazenada.**
4. Um indutor real, não ideal, tem um componente resistivo significativo, conforme pode ser visto na Figura 6.26. Isso se deve ao fato de que o indutor é feito de um material condutor como cobre, que possui certa resistência denominada **resistência de enrolamento Rw**, que aparece em série com a indutância do indutor. A presença de Rw o torna tanto um dispositivo armazenador de energia como um dispositivo dissipador de energia. Uma vez que Rw normalmente é **muito pequena**, ela é ignorada na maioria dos casos. O indutor não ideal também tem uma **capacitância de enrolamento Cw** em decorrência do acoplamento capacitivo entre as bobinas condutoras. A Cw é muito pequena e pode ser ignorada na maioria dos casos, exceto em altas frequências
**Exemplo 6.8**
A corrente que passa por um indutor de 0,1 H é i(t) = 10te–5t A. Calcule a tensão no
indutor e a energia armazenada nele.
```python
print("Exemplo 6.8")
import numpy as np
from sympy import *
L = 0.1
t = symbols('t')
i = 10*t*exp(-5*t)
v = L*diff(i,t)
w = (L*i**2)/2
print("Tensão no indutor:",v,"V")
print("Energia:",w,"J")
```
Exemplo 6.8
Tensão no indutor: -5.0*t*exp(-5*t) + 1.0*exp(-5*t) V
Energia: 5.0*t**2*exp(-10*t) J
**Problema Prático 6.8**
Se a corrente através de um indutor de 1 mH for i(t) = 60 cos(100t) mA, determine a tensão entre os terminais e a energia armazenada.
```python
print("Problema Prático 6.8")
m = 10**-3 #definicao de mili
L = 1*m
i = 60*cos(100*t)*m
v = L*diff(i,t)
w = (L*i**2)/2
print("Tensão:",v,"V")
print("Energia:",w,"J")
```
Problema Prático 6.8
Tensão: -0.006*sin(100*t) V
Energia: 1.8e-6*cos(100*t)**2 J
**Exemplo 6.9**
Determine a corrente através de um indutor de 5 H se a tensão nele for
v(t):
30t^2, t>0
0, t<0
Determine, também, a energia armazenada no instante t = 5s. Suponha i(v)>0.
```python
print("Exemplo 6.9")
L = 5
v = 30*t**2
i = integrate(v,t)/L
print("Corrente:",i,"A")
w = L*(i.subs(t,5)**2)/2
print("Energia:",w,"J")
```
Exemplo 6.9
Corrente: 2*t**3 A
Energia: 156250 J
**Problema Prático 6.9**
A tensão entre os terminais de um indutor de 2 H é v = 10(1 – t) V. Determine a corrente
que passa através dele no instante t = 4 s e a energia armazenada nele no instante t = 4s.
Suponha i(0) = 2 A.
```python
print("Problema Prático 6.9")
L = 2
v = 10*(1 - t)
i0 = 2
i = integrate(v,t)/L + i0
i4 = i.subs(t,4)
print("Corrente no instante t = 4s:",i4,"A")
p = v*i
w = integrate(p,(t,0,4))
print("Energia no instante t = 4s:",w,"J")
```
Problema Prático 6.9
Corrente no instante t = 4s: -18 A
Energia no instante t = 4s: 320 J
**Exemplo 6.10**
Considere o circuito da Figura 6.27a. Em CC, determine:
(a) i, vC e iL;
(b) a energia armazenada no capacitor e no indutor.
```python
print("Exemplo 6.10")
Req = 1 + 5
Vf = 12
C = 1
L = 2
i = Vf/Req
print("Corrente i:",i,"A")
#vc = tensao sobre o capacitor = tensao sobre resistore de 5ohms
vc = 5*i
print("Tensão Vc:",vc,"V")
print("Corrente il:",i,"A")
wl = (L*i**2)/2
wc = (C*vc**2)/2
print("Energia no Indutor:",wl,"J")
print("Energia no Capacitor:",wc,"J")
```
Exemplo 6.10
Corrente i: 2.0 A
Tensão Vc: 10.0 V
Corrente il: 2.0 A
Energia no Indutor: 4.0 J
Energia no Capacitor: 50.0 J
**Problema Prático 6.10**
Determine vC, iL e a energia armazenada no capacitor e no indutor no circuito da Figura
6.28 em CC.
```python
print("Problema Prático 6.10")
Cf = 10
C = 4
L = 6
il = 10*6/(6 + 2) #divisor de corrente
vc = 2*il
wl = (L*il**2)/2
wc = (C*vc**2)/2
print("Corrente il:",il,"A")
print("Tensão vC:",vc,"V")
print("Energia no Capacitor:",wc,"J")
print("Energia no Indutor:",wl,"J")
```
Problema Prático 6.10
Corrente il: 7.5 A
Tensão vC: 15.0 V
Energia no Capacitor: 450.0 J
Energia no Indutor: 168.75 J
## Indutores em Série e Paralelo
**A indutância equivalente de indutores conectados em série é a soma das
indutâncias individuais.**
\begin{align}
L_{eq} = L_1 + L_2 + ... + L_N = \sum_{i = 1}^{N}L_i
\end{align}
**A indutância equivalente de indutores paralelos é o inverso da soma dos
inversos das indutâncias individuais.**
\begin{align}
L_{eq} = \frac{1}{L_1} + \frac{1}{L_2} + ... + \frac{1}{L_N} = (\sum_{i = 1}^{N} \frac{1}{L_i})^{-1}
\end{align}
Ou, para duas Indutâncias:
\begin{align}
L_{eq} = \frac{L_1 L_2}{L_1 + L_2}
\end{align}
**Exemplo 6.11**
Determine a indutância equivalente do circuito mostrado na Figura 6.31.
```python
print("Exemplo 6.11")
Leq1 = 20 + 12 + 10
Leq2 = Leq1*7/(Leq1 + 7)
Leq3 = 4 + Leq2 + 8
print("Indutância Equivalente:",Leq3,"H")
```
Exemplo 6.11
Indutância Equivalente: 18.0 H
**Problema Prático 6.11**
Calcule a indutância equivalente para o circuito indutivo em escada da Figura 6.32.
```python
print("Problema Prático 6.11")
def Leq(x,y): #definicao de funcao para calculo de duas indutancias equivalentes em paralelo
L = x*y/(x + y)
return L
Leq1 = 40*m + 20*m
Leq2 = Leq(30*m,Leq1)
Leq3 = Leq2 + 100*m
Leq4 = Leq(40*m,Leq3)
Leq5 = 20*m + Leq4
Leq6 = Leq(Leq5,50*m)
print("Indutância Equivalente:",Leq6,"H")
```
Problema Prático 6.11
Indutância Equivalente: 0.025000000000000005 H
**Exemplo 6.12**
Para o circuito da Figura 6.33,
i(t) = 4(2 – e–10t) mA.
Se i2(0) = –1 mA, determine:
(a) i1(0);
(b) v(t), v1(t) e v2(t);
(c) i1(t) e i2(t).
```python
print("Exemplo 6.12")
i = 4*(2 - exp(-10*t))*m
i2_0 = -1*m
i1_0 = i.subs(t,0) - i2_0
print("Corrente i1(0):",i1_0,"A")
Leq1 = Leq(4,12)
Leq2 = Leq1 + 2
v = Leq2*diff(i,t)
v1 = 2*diff(i,t)
v2 = v - v1
print("Tensão v(t):",v,"V")
print("Tensão v1(t):",v1,"V")
print("Tensão v2(t):",v2,"V")
i1 = integrate(v1,(t,0,t))/4 + i1_0
i2 = integrate(v2,(t,0,t))/12 + i2_0
print("Corrente i1(t):",i1,"A")
print("Corrente i2(t):",i2,"A")
```
Exemplo 6.12
Corrente i1(0): 0.00500000000000000 A
Tensão v(t): 0.2*exp(-10*t) V
Tensão v1(t): 0.08*exp(-10*t) V
Tensão v2(t): 0.12*exp(-10*t) V
Corrente i1(t): 0.007 - 0.002*exp(-10*t) A
Corrente i2(t): -0.001*exp(-10*t) A
**Problema Prático 6.12**
No circuito da Figura 6.34,
i1(t) = 0,6e–2t A.
Se i(0) = 1,4 A, determine:
(a) i2(0);
(b) i2(t) e i(t);
(c) v1(t), v2(t) e v(t).
```python
print("Problema Prático 6.12")
i1 = 0.6*exp(-2*t)
i_0 = 1.4
i2_0 = i_0 - i1.subs(t,0)
print("Corrente i2(0):",i2_0,"A")
v1 = 6*diff(i1,t)
i2 = integrate(v1,(t,0,t))/3 + i2_0
i = i1 + i2
print("Corrente i2(t):",i2,"A")
print("Corrente i(t):",i,"A")
Leq1 = Leq(3,6)
Leq2 = Leq1 + 8
v = Leq2*diff(i)
v2 = v - v1
print("Tensão v1(t):",v1,"V")
print("Tensão v2(t):",v2,"V")
print("Tensão v(t):",v,"V")
```
Problema Prático 6.12
Corrente i2(0): 0.800000000000000 A
Corrente i2(t): -0.4 + 1.2*exp(-2*t) A
Corrente i(t): -0.4 + 1.8*exp(-2*t) A
Tensão v1(t): -7.2*exp(-2*t) V
Tensão v2(t): -28.8*exp(-2*t) V
Tensão v(t): -36.0*exp(-2*t) V
|
```python
%matplotlib notebook
import numpy as np
import matplotlib.pyplot as pl
import matplotlib as mpl
```
# Simulating Reaction-Diffusion Systems
In this notebook we want to show step by step how to simulate a reaction-diffusion system in Python. Here, we decided for the [Gray-Scott model](http://www.karlsims.com/rd.html) because of the variety of shapes it can produce.
In this models, we have two chemicals $A$ and $B$ which are distributed across a grid of size $N$. The symbol $A_{ij}$ represents the concentration of chemical $A$ at grid coordinates $(i,j)$ (similar for $B$).
The discrete reaction-diffusion update equations are
$$
\begin{align}
A_{ij}(t+1) &= A_{ij}(t) + \Big[D_A (\nabla^2 A)_{ij} - A_{ij}B_{ij}^2 + f(1-A_{ij}) \Big]\times\Delta t\\
B_{ij}(t+1) &= B_{ij}(t) + \Big[D_B (\nabla^2 B)_{ij} + A_{ij}B_{ij}^2 - (k+f)B_{ij} \Big]\times\Delta t
\end{align}
$$
The single terms are explained in the tutorial linked above.
First, we need to take care of the discretized Laplacian terms.
## Discrete Laplacian
As explained on [Wikipedia](https://en.wikipedia.org/wiki/Discrete_Laplace_operator#Implementation_via_operator_discretization), the discretized Laplacian of a grid cell $(i,j)$ can be computed by summing over neighboring cells and subtract the value of the original cell with the total weight. One possible implementation is to only recognize direct neighbors of grid difference $\Delta=1$, i.e. at $(i,j-1)$, $(i,j+1)$, $(i-1,j)$, and $(i+1,j)$.
The whole update formula is
$$
(\nabla^2 A)_{ij} = A_{i,j-1} + A_{i,j+1} + A_{i-1,j} + A_{i+1,j} - 4A_{ij}
$$
How can we do this efficiently in `numpy`? Let's first define a small concentration matrix `A`.
```python
A = np.ones((3,3))
A[1,1] = 0
A
```
array([[1., 1., 1.],
[1., 0., 1.],
[1., 1., 1.]])
Now for each cell, we want to add its right neighbor. We can easily access this value in a matrix sense by doing a `numpy.roll`, which shifts all elements in a certain direction, with periodic boundary conditions.
```python
right_neighbor = np.roll(A, # the matrix to permute
(0,-1), # we want the right neighbor, so we shift the whole matrix -1 in the x-direction)
(0,1), # apply this in directions (y,x)
)
right_neighbor
```
array([[1., 1., 1.],
[0., 1., 1.],
[1., 1., 1.]])
So to compute the discrete Laplacian of a matrix $M$, one could use the following function.
```python
def discrete_laplacian(M):
"""Get the discrete Laplacian of matrix M"""
L = -4*M
L += np.roll(M, (0,-1), (0,1)) # right neighbor
L += np.roll(M, (0,+1), (0,1)) # left neighbor
L += np.roll(M, (-1,0), (0,1)) # top neighbor
L += np.roll(M, (+1,0), (0,1)) # bottom neighbor
return L
```
Let's test this with our example matrix
```python
discrete_laplacian(A)
```
array([[ 0., -1., 0.],
[-1., 4., -1.],
[ 0., -1., 0.]])
Seems like it worked! Note that periodic boundary conditions were used, too.
## Implement update formula
Computing the Laplacian was the hardest part. The other parts are simple. Just take the concentration matrices and apply the update formula.
```python
def gray_scott_update(A, B, DA, DB, f, k, delta_t):
"""
Updates a concentration configuration according to a Gray-Scott model
with diffusion coefficients DA and DB, as well as feed rate f and
kill rate k.
"""
# Let's get the discrete Laplacians first
LA = discrete_laplacian(A)
LB = discrete_laplacian(B)
# Now apply the update formula
diff_A = (DA*LA - A*B**2 + f*(1-A)) * delta_t
diff_B = (DB*LB + A*B**2 - (k+f)*B) * delta_t
A += diff_A
B += diff_B
return A, B
```
## Choosing initial conditions
The initial conditions are very important in the Gray-Scott model. If you just randomize the initial conditions it might happen that everything just dies out. It seems to be a good idea to assume a homogeneous distribution of chemicals with a small disturbance which can then produce some patterns. We can also add a bit of noise. We can do the same decisions as Rajesh Singh in his [somewhat more elaborate version](https://rajeshrinet.github.io/blog/2016/gray-scott/) and disturb with a square in the center of the grid. Let's do the following.
```python
def get_initial_configuration(N, random_influence=0.2):
"""
Initialize a concentration configuration. N is the side length
of the (N x N)-sized grid.
`random_influence` describes how much noise is added.
"""
# We start with a configuration where on every grid cell
# there's a lot of chemical A, so the concentration is high
A = (1-random_influence) * np.ones((N,N)) + random_influence * np.random.random((N,N))
# Let's assume there's only a bit of B everywhere
B = random_influence * np.random.random((N,N))
# Now let's add a disturbance in the center
N2 = N//2
radius = r = int(N/10.0)
A[N2-r:N2+r, N2-r:N2+r] = 0.50
B[N2-r:N2+r, N2-r:N2+r] = 0.25
return A, B
```
Let's also add a function which makes nice drawings and then draw some initial configurations.
```python
def draw(A,B):
"""draw the concentrations"""
fig, ax = pl.subplots(1,2,figsize=(5.65,4))
ax[0].imshow(A, cmap='Greys')
ax[1].imshow(B, cmap='Greys')
ax[0].set_title('A')
ax[1].set_title('B')
ax[0].axis('off')
ax[1].axis('off')
```
```python
A, B = get_initial_configuration(200)
draw(A,B)
```
Now we can simulate! We should first decide for some parameter choices. Let's stick with [Rajesh's choices](https://rajeshrinet.github.io/blog/2016/gray-scott/).
```python
# update in time
delta_t = 1.0
# Diffusion coefficients
DA = 0.16
DB = 0.08
# define feed/kill rates
f = 0.060
k = 0.062
# grid size
N = 200
# simulation steps
N_simulation_steps = 10000
```
And for the simulation we simply update the concentrations for `N_simulation_steps` time steps.
```python
A, B = get_initial_configuration(200)
for t in range(N_simulation_steps):
A, B = gray_scott_update(A, B, DA, DB, f, k, delta_t)
draw(A,B)
```
Isn't this nice? You might also want to try the following values (directly taken from Rajesh's version above):
```python
DA, DB, f, k = 0.14, 0.06, 0.035, 0.065 # bacteria
A, B = get_initial_configuration(200)
for t in range(N_simulation_steps):
A, B = gray_scott_update(A, B, DA, DB, f, k, delta_t)
draw(A,B)
```
|
using Juriba
using Test
@testset "Juriba.jl" begin
# Write your tests here.
end
|
State Before: K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
h : i < q - 1
⊢ ∑ x : K, x ^ i = 0 State After: case pos
K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
h : i < q - 1
hi : i = 0
⊢ ∑ x : K, x ^ i = 0
case neg
K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
h : i < q - 1
hi : ¬i = 0
⊢ ∑ x : K, x ^ i = 0 Tactic: by_cases hi : i = 0 State Before: case neg
K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
h : i < q - 1
hi : ¬i = 0
⊢ ∑ x : K, x ^ i = 0 State After: no goals Tactic: classical
have hiq : ¬q - 1 ∣ i := by contrapose! h; exact Nat.le_of_dvd (Nat.pos_of_ne_zero hi) h
let φ : Kˣ ↪ K := ⟨fun x ↦ x, Units.ext⟩
have : univ.map φ = univ \ {0} := by
ext x
simp only [true_and_iff, Function.Embedding.coeFn_mk, mem_sdiff, Units.exists_iff_ne_zero,
mem_univ, mem_map, exists_prop_of_true, mem_singleton]
calc
(∑ x : K, x ^ i) = ∑ x in univ \ {(0 : K)}, x ^ i := by
rw [← sum_sdiff ({0} : Finset K).subset_univ, sum_singleton,
zero_pow (Nat.pos_of_ne_zero hi), add_zero]
_ = ∑ x : Kˣ, (x ^ i : K) := by simp [← this, univ.sum_map φ]
_ = 0 := by rw [sum_pow_units K i, if_neg]; exact hiq State Before: case pos
K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
h : i < q - 1
hi : i = 0
⊢ ∑ x : K, x ^ i = 0 State After: no goals Tactic: simp only [hi, nsmul_one, sum_const, pow_zero, card_univ, cast_card_eq_zero] State Before: case neg
K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
h : i < q - 1
hi : ¬i = 0
⊢ ∑ x : K, x ^ i = 0 State After: case neg
K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
h : i < q - 1
hi : ¬i = 0
hiq : ¬q - 1 ∣ i
⊢ ∑ x : K, x ^ i = 0 Tactic: have hiq : ¬q - 1 ∣ i := by contrapose! h; exact Nat.le_of_dvd (Nat.pos_of_ne_zero hi) h State Before: case neg
K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
h : i < q - 1
hi : ¬i = 0
hiq : ¬q - 1 ∣ i
⊢ ∑ x : K, x ^ i = 0 State After: case neg
K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
h : i < q - 1
hi : ¬i = 0
hiq : ¬q - 1 ∣ i
φ : Kˣ ↪ K := { toFun := fun x => ↑x, inj' := (_ : Function.Injective Units.val) }
⊢ ∑ x : K, x ^ i = 0 Tactic: let φ : Kˣ ↪ K := ⟨fun x ↦ x, Units.ext⟩ State Before: case neg
K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
h : i < q - 1
hi : ¬i = 0
hiq : ¬q - 1 ∣ i
φ : Kˣ ↪ K := { toFun := fun x => ↑x, inj' := (_ : Function.Injective Units.val) }
⊢ ∑ x : K, x ^ i = 0 State After: case neg
K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
h : i < q - 1
hi : ¬i = 0
hiq : ¬q - 1 ∣ i
φ : Kˣ ↪ K := { toFun := fun x => ↑x, inj' := (_ : Function.Injective Units.val) }
this : map φ univ = univ \ {0}
⊢ ∑ x : K, x ^ i = 0 Tactic: have : univ.map φ = univ \ {0} := by
ext x
simp only [true_and_iff, Function.Embedding.coeFn_mk, mem_sdiff, Units.exists_iff_ne_zero,
mem_univ, mem_map, exists_prop_of_true, mem_singleton] State Before: case neg
K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
h : i < q - 1
hi : ¬i = 0
hiq : ¬q - 1 ∣ i
φ : Kˣ ↪ K := { toFun := fun x => ↑x, inj' := (_ : Function.Injective Units.val) }
this : map φ univ = univ \ {0}
⊢ ∑ x : K, x ^ i = 0 State After: no goals Tactic: calc
(∑ x : K, x ^ i) = ∑ x in univ \ {(0 : K)}, x ^ i := by
rw [← sum_sdiff ({0} : Finset K).subset_univ, sum_singleton,
zero_pow (Nat.pos_of_ne_zero hi), add_zero]
_ = ∑ x : Kˣ, (x ^ i : K) := by simp [← this, univ.sum_map φ]
_ = 0 := by rw [sum_pow_units K i, if_neg]; exact hiq State Before: K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
h : i < q - 1
hi : ¬i = 0
⊢ ¬q - 1 ∣ i State After: K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
hi : ¬i = 0
h : q - 1 ∣ i
⊢ q - 1 ≤ i Tactic: contrapose! h State Before: K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
hi : ¬i = 0
h : q - 1 ∣ i
⊢ q - 1 ≤ i State After: no goals Tactic: exact Nat.le_of_dvd (Nat.pos_of_ne_zero hi) h State Before: K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
h : i < q - 1
hi : ¬i = 0
hiq : ¬q - 1 ∣ i
φ : Kˣ ↪ K := { toFun := fun x => ↑x, inj' := (_ : Function.Injective Units.val) }
⊢ map φ univ = univ \ {0} State After: case a
K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
h : i < q - 1
hi : ¬i = 0
hiq : ¬q - 1 ∣ i
φ : Kˣ ↪ K := { toFun := fun x => ↑x, inj' := (_ : Function.Injective Units.val) }
x : K
⊢ x ∈ map φ univ ↔ x ∈ univ \ {0} Tactic: ext x State Before: case a
K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
h : i < q - 1
hi : ¬i = 0
hiq : ¬q - 1 ∣ i
φ : Kˣ ↪ K := { toFun := fun x => ↑x, inj' := (_ : Function.Injective Units.val) }
x : K
⊢ x ∈ map φ univ ↔ x ∈ univ \ {0} State After: no goals Tactic: simp only [true_and_iff, Function.Embedding.coeFn_mk, mem_sdiff, Units.exists_iff_ne_zero,
mem_univ, mem_map, exists_prop_of_true, mem_singleton] State Before: K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
h : i < q - 1
hi : ¬i = 0
hiq : ¬q - 1 ∣ i
φ : Kˣ ↪ K := { toFun := fun x => ↑x, inj' := (_ : Function.Injective Units.val) }
this : map φ univ = univ \ {0}
⊢ ∑ x : K, x ^ i = ∑ x in univ \ {0}, x ^ i State After: no goals Tactic: rw [← sum_sdiff ({0} : Finset K).subset_univ, sum_singleton,
zero_pow (Nat.pos_of_ne_zero hi), add_zero] State Before: K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
h : i < q - 1
hi : ¬i = 0
hiq : ¬q - 1 ∣ i
φ : Kˣ ↪ K := { toFun := fun x => ↑x, inj' := (_ : Function.Injective Units.val) }
this : map φ univ = univ \ {0}
⊢ ∑ x in univ \ {0}, x ^ i = ∑ x : Kˣ, ↑(x ^ i) State After: no goals Tactic: simp [← this, univ.sum_map φ] State Before: K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
h : i < q - 1
hi : ¬i = 0
hiq : ¬q - 1 ∣ i
φ : Kˣ ↪ K := { toFun := fun x => ↑x, inj' := (_ : Function.Injective Units.val) }
this : map φ univ = univ \ {0}
⊢ ∑ x : Kˣ, ↑(x ^ i) = 0 State After: case hnc
K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
h : i < q - 1
hi : ¬i = 0
hiq : ¬q - 1 ∣ i
φ : Kˣ ↪ K := { toFun := fun x => ↑x, inj' := (_ : Function.Injective Units.val) }
this : map φ univ = univ \ {0}
⊢ ¬q - 1 ∣ i Tactic: rw [sum_pow_units K i, if_neg] State Before: case hnc
K : Type u_1
R : Type ?u.640358
inst✝¹ : Field K
inst✝ : Fintype K
i : ℕ
h : i < q - 1
hi : ¬i = 0
hiq : ¬q - 1 ∣ i
φ : Kˣ ↪ K := { toFun := fun x => ↑x, inj' := (_ : Function.Injective Units.val) }
this : map φ univ = univ \ {0}
⊢ ¬q - 1 ∣ i State After: no goals Tactic: exact hiq |
If $p$ is a prime number, then $p$ and $n$ are coprime if and only if $p$ does not divide $n$. |
using BCTRNN
using DiffEqSensitivity
using OrdinaryDiffEq
import DiffEqFlux: FastChain, FastDense
import Flux: ClipValue, ADAM
# Not in Project.toml
using Plots
gr()
include("sine_wave_dataloader.jl")
function train_sine_ss_fc(epochs, solver=Tsit5();
sensealg=InterpolatingAdjoint(autojacvec=ReverseDiffVJP(true)),
T=Float32, model_size=5,
mtkize=false, gen_jac=false, lr=0.02, kwargs...)
train_dl = generate_2d_data(T)
f_in = 2
f_out = 1
im = BCTRNN.InputAllToAll()
sm = BCTRNN.SynsFullyConnected()
#om = BCTRNN.OutputAll()
om = BCTRNN.OutputIdxs(collect(1:model_size))
wiring = BCTRNN.WiringConfig(f_in, model_size, im,sm,om)
model = FastChain(BCTRNN.Mapper(f_in),
BCTRNN.LTCSynState(wiring, solver, sensealg; T, mtkize, gen_jac, kwargs...),
FastDense(wiring.n_out, f_out))
hs = []
for (k,v) in wiring.matrices
push!(hs, heatmap(v, title=k))
end
display(plot(hs..., layout=length(hs)))
cb = BCTRNN.MyCallback(T; ecb=mycb, nepochs=epochs, nsamples=length(train_dl))
#opt = GalacticOptim.Flux.Optimiser(ClipValue(0.5), ADAM(0.02))
opt = BCTRNN.ClampBoundOptim(BCTRNN.get_bounds(model,T)..., ClipValue(T(1.0)), ADAM(T(lr)))
BCTRNN.optimize(model, BCTRNN.loss_seq, cb, opt, train_dl, epochs, T), model
end
@time train_sine_ss_fc(1000, AutoTsit5(Rosenbrock23(autodiff=false)); model_size=5, mtkize=true, lr=0.004)
|
#' Unioned HUC 8 & 12 polygons for the state of Utah
#'
#' Unioned HUC 8 & 12 polygons for the state of Utah.
#'
#' @format An sf type polygon shapefile
"huc8_12_poly"
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- Results concerning double negation elimination.
------------------------------------------------------------------------
{-# OPTIONS --without-K --safe #-}
module Axiom.DoubleNegationElimination where
open import Axiom.ExcludedMiddle
open import Level
open import Relation.Nullary
open import Relation.Nullary.Negation
------------------------------------------------------------------------
-- Definition
-- The classical statement of double negation elimination says that
-- if a property is not not true then it is true.
DoubleNegationElimination : (ℓ : Level) → Set (suc ℓ)
DoubleNegationElimination ℓ = {P : Set ℓ} → ¬ ¬ P → P
------------------------------------------------------------------------
-- Properties
-- Double negation elimination is equivalent to excluded middle
em⇒dne : ∀ {ℓ} → ExcludedMiddle ℓ → DoubleNegationElimination ℓ
em⇒dne em = decidable-stable em
dne⇒em : ∀ {ℓ} → DoubleNegationElimination ℓ → ExcludedMiddle ℓ
dne⇒em dne = dne excluded-middle
|
%%
%% Automatically generated file from DocOnce source
%% (https://github.com/hplgit/doconce/)
%%
% #ifdef PTEX2TEX_EXPLANATION
%%
%% The file follows the ptex2tex extended LaTeX format, see
%% ptex2tex: http://code.google.com/p/ptex2tex/
%%
%% Run
%% ptex2tex myfile
%% or
%% doconce ptex2tex myfile
%%
%% to turn myfile.p.tex into an ordinary LaTeX file myfile.tex.
%% (The ptex2tex program: http://code.google.com/p/ptex2tex)
%% Many preprocess options can be added to ptex2tex or doconce ptex2tex
%%
%% ptex2tex -DMINTED myfile
%% doconce ptex2tex myfile envir=minted
%%
%% ptex2tex will typeset code environments according to a global or local
%% .ptex2tex.cfg configure file. doconce ptex2tex will typeset code
%% according to options on the command line (just type doconce ptex2tex to
%% see examples). If doconce ptex2tex has envir=minted, it enables the
%% minted style without needing -DMINTED.
% #endif
% #define PREAMBLE
% #ifdef PREAMBLE
%-------------------- begin preamble ----------------------
\documentclass[%
twoside, % oneside: electronic viewing, twoside: printing
final, % or draft (marks overfull hboxes, figures with paths)
10pt]{article}
\listfiles % print all files needed to compile this document
\usepackage{relsize,makeidx,color,setspace,amsmath,amsfonts}
\usepackage[table]{xcolor}
\usepackage{bm,microtype}
\usepackage{ptex2tex}
\usepackage[T1]{fontenc}
%\usepackage[latin1]{inputenc}
\usepackage[utf8]{inputenc}
\usepackage{lmodern} % Latin Modern fonts derived from Computer Modern
% Hyperlinks in PDF:
\definecolor{linkcolor}{rgb}{0,0,0.4}
\usepackage[%
colorlinks=true,
linkcolor=linkcolor,
urlcolor=linkcolor,
citecolor=black,
filecolor=black,
%filecolor=blue,
pdfmenubar=true,
pdftoolbar=true,
bookmarksdepth=3 % Uncomment (and tweak) for PDF bookmarks with more levels than the TOC
]{hyperref}
%\hyperbaseurl{} % hyperlinks are relative to this root
\setcounter{tocdepth}{2} % number chapter, section, subsection
\usepackage[framemethod=TikZ]{mdframed}
% --- begin definitions of admonition environments ---
% --- end of definitions of admonition environments ---
% prevent orhpans and widows
\clubpenalty = 10000
\widowpenalty = 10000
% --- end of standard preamble for documents ---
% insert custom LaTeX commands...
\raggedbottom
\makeindex
%-------------------- end preamble ----------------------
\begin{document}
% #endif
% ------------------- main content ----------------------
% Slides for FYS-KJM4480
% ----------------- title -------------------------
\thispagestyle{empty}
\begin{center}
{\LARGE\bf
\begin{spacing}{1.25}
Slides from FYS4411/9411 Definitions of the many-body problem
\end{spacing}
}
\end{center}
% ----------------- author(s) -------------------------
\begin{center}
{\bf Morten Hjorth-Jensen${}^{1, 2}$} \\ [0mm]
\end{center}
\begin{center}
% List of all institutions:
\centerline{{\small ${}^1$Department of Physics, University of Oslo, Oslo, Norway}}
\centerline{{\small ${}^2$National Superconducting Cyclotron Laboratory, Michigan State University, East Lansing, MI 48824, USA}}
\end{center}
% ----------------- end author(s) -------------------------
\begin{center} % date
Spring 2015
\end{center}
\vspace{1cm}
% !split
\subsection{Quantum Many-particle Methods}
% --- begin paragraph admon ---
\paragraph{}
\begin{itemize}
\item Large-scale diagonalization (Iterative methods, Lanczo's method, dimensionalities $10^{10}$ states)
\item Coupled cluster theory, favoured method in quantum chemistry, molecular and atomic physics. Applications to ab initio calculations in nuclear physics as well for large nuclei.
\item Perturbative many-body methods
\item Density functional theories/Mean-field theory and Hartree-Fock theory (covered partly also in FYS-MENA4111)
\item Monte-Carlo methods (Only in FYS4411, Computational quantum mechanics)
\item Green's function theories (depending on interest)
\item and other. The physics of the system hints at which many-body methods to use.
\end{itemize}
\noindent
% --- end paragraph admon ---
% !split
\subsection{Selected Texts and Many-body theory}
% --- begin paragraph admon ---
\paragraph{}
\begin{itemize}
\item Blaizot and Ripka, \emph{Quantum Theory of Finite systems}, MIT press 1986
\item Negele and Orland, \emph{Quantum Many-Particle Systems}, Addison-Wesley, 1987.
\item Fetter and Walecka, \emph{Quantum Theory of Many-Particle Systems}, McGraw-Hill, 1971.
\item Helgaker, Jorgensen and Olsen, \emph{Molecular Electronic Structure Theory}, Wiley, 2001.
\item Mattuck, \emph{Guide to Feynman Diagrams in the Many-Body Problem}, Dover, 1971.
\item Dickhoff and Van Neck, \emph{Many-Body Theory Exposed}, World Scientific, 2006.
\end{itemize}
\noindent
% --- end paragraph admon ---
% !split
\subsection{Definitions}
% --- begin paragraph admon ---
\paragraph{}
An operator is defined as $\hat{O}$ throughout. Unless otherwise specified the number of particles is
always $N$ and $d$ is the dimension of the system. In nuclear physics
we normally define the total number of particles to be $A=N+Z$, where
$N$ is total number of neutrons and $Z$ the total number of
protons. In case of other baryons such isobars $\Delta$ or various
hyperons such as $\Lambda$ or $\Sigma$, one needs to add their
definitions. Hereafter, $N$ is reserved for the total number of
particles, unless otherwise specificied.
% --- end paragraph admon ---
% !split
\subsection{Definitions}
% --- begin paragraph admon ---
\paragraph{}
The quantum numbers of a single-particle state in coordinate space are
defined by the variable
\[
x=({\bf r},\sigma),
\]
where
\[
{\bf r}\in {\mathbb{R}}^{d},
\]
with $d=1,2,3$ represents the spatial coordinates and $\sigma$ is the eigenspin of the particle. For fermions with eigenspin $1/2$ this means that
\[
x\in {\mathbb{R}}^{d}\oplus (\frac{1}{2}),
\]
and the integral $\int dx = \sum_{\sigma}\int d^dr = \sum_{\sigma}\int d{\bf r}$,
and
\[
\int d^Nx= \int dx_1\int dx_2\dots\int dx_N.
\]
% --- end paragraph admon ---
% !split
\subsection{Definitions}
% --- begin paragraph admon ---
\paragraph{}
The quantum mechanical wave function of a given state with quantum numbers $\lambda$ (encompassing all quantum numbers needed to specify the system), ignoring time, is
\[
\Psi_{\lambda}=\Psi_{\lambda}(x_1,x_2,\dots,x_N),
\]
with $x_i=({\bf r}_i,\sigma_i)$ and the projection of $\sigma_i$ takes the values
$\{-1/2,+1/2\}$ for particles with spin $1/2$.
We will hereafter always refer to $\Psi_{\lambda}$ as the exact wave function, and if the ground state is not degenerate we label it as
\[
\Psi_0=\Psi_0(x_1,x_2,\dots,x_N).
\]
% --- end paragraph admon ---
% !split
\subsection{Definitions}
% --- begin paragraph admon ---
\paragraph{}
Since the solution $\Psi_{\lambda}$ seldomly can be found in closed form, approximations are sought. Here we define an approximative wave function or an ansatz to the exact wave function as
\[
\Phi_{\lambda}=\Phi_{\lambda}(x_1,x_2,\dots,x_N),
\]
with
\[
\Phi_0=\Phi_0(x_1,x_2,\dots,x_N),
\]
being the ansatz to the ground state.
% --- end paragraph admon ---
% !split
\subsection{Definitions}
% --- begin paragraph admon ---
\paragraph{}
The wave function $\Psi_{\lambda}$ is sought in the Hilbert space of either symmetric or anti-symmetric $N$-body functions, namely
\[
\Psi_{\lambda}\in {\cal H}_N:= {\cal H}_1\oplus{\cal H}_1\oplus\dots\oplus{\cal H}_1,
\]
where the single-particle Hilbert space $\hat{H}_1$ is the space of square integrable functions over
$\in {\mathbb{R}}^{d}\oplus (\sigma)$
resulting in
\[
{\cal H}_1:= L^2(\mathbb{R}^{d}\oplus (\sigma)).
\]
% --- end paragraph admon ---
% !split
\subsection{Definitions}
% --- begin paragraph admon ---
\paragraph{}
Our Hamiltonian is invariant under the permutation (interchange) of two particles.
Since we deal with fermions however, the total wave function is antisymmetric.
Let $\hat{P}$ be an operator which interchanges two particles.
Due to the symmetries we have ascribed to our Hamiltonian, this operator commutes with the total Hamiltonian,
\[
[\hat{H},\hat{P}] = 0,
\]
meaning that $\Psi_{\lambda}(x_1, x_2, \dots , x_N)$ is an eigenfunction of
$\hat{P}$ as well, that is
\[
\hat{P}_{ij}\Psi_{\lambda}(x_1, x_2, \dots,x_i,\dots,x_j,\dots,x_N)=
\beta\Psi_{\lambda}(x_1, x_2, \dots,x_j,\dots,x_i,\dots,x_N),
\]
where $\beta$ is the eigenvalue of $\hat{P}$. We have introduced the suffix $ij$ in order to indicate that we permute particles $i$ and $j$.
The Pauli principle tells us that the total wave function for a system of fermions
has to be antisymmetric, resulting in the eigenvalue $\beta = -1$.
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
The Schrodinger equation reads
\begin{equation}
\hat{H}(x_1, x_2, \dots , x_N) \Psi_{\lambda}(x_1, x_2, \dots , x_N) =
E_\lambda \Psi_\lambda(x_1, x_2, \dots , x_N), \label{eq:basicSE1}
\end{equation}
where the vector $x_i$ represents the coordinates (spatial and spin) of particle $i$, $\lambda$ stands for all the quantum
numbers needed to classify a given $N$-particle state and $\Psi_{\lambda}$ is the pertaining eigenfunction. Throughout this course,
$\Psi$ refers to the exact eigenfunction, unless otherwise stated.
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
We write the Hamilton operator, or Hamiltonian, in a generic way
\[
\hat{H} = \hat{T} + \hat{V}
\]
where $\hat{T}$ represents the kinetic energy of the system
\[
\hat{T} = \sum_{i=1}^N \frac{\mathbf{p}_i^2}{2m_i} = \sum_{i=1}^N \left( -\frac{\hbar^2}{2m_i} \mathbf{\nabla_i}^2 \right) =
\sum_{i=1}^N t(x_i)
\]
while the operator $\hat{V}$ for the potential energy is given by
\begin{equation}
\hat{V} = \sum_{i=1}^N \hat{u}_{\mathrm{ext}}(x_i) + \sum_{ji=1}^N v(x_i,x_j)+\sum_{ijk=1}^Nv(x_i,x_j,x_k)+\dots
\label{eq:firstv}
\end{equation}
Hereafter we use natural units, viz.~$\hbar=c=e=1$, with $e$ the elementary charge and $c$ the speed of light. This means that momenta and masses
have dimension energy.
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
If one does quantum chemistry, after having introduced the Born-Oppenheimer approximation which effectively freezes out the nucleonic degrees of freedom, the Hamiltonian for $N=n_e$ electrons takes the following form
\[
\hat{H} = \sum_{i=1}^{n_e} t(x_i) - \sum_{i=1}^{n_e} k\frac{Z}{r_i} + \sum_{i < j}^{n_e} \frac{k}{r_{ij}},
\]
with $k=1.44$ eVnm
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
We can rewrite this as
\begin{equation}
\hat{H} = \hat{H}_0 + \hat{H}_I
= \sum_{i=1}^{n_e}\hat{h}_0(x_i) + \sum_{i < j}^{n_e}\frac{1}{r_{ij}},
\label{H1H2}
\end{equation}
where we have defined
\[
r_{ij}=| {\bf r}_i-{\bf r}_j|,
\]
and
\begin{equation}
\hat{h}_0(x_i) = \hat{t}(x_i) - \frac{Z}{x_i}.
\label{hi}
\end{equation}
The first term of Eq.~(\ref{H1H2}), $H_0$, is the sum of the $N$
\emph{one-body} Hamiltonians $\hat{h}_0$. Each individual
Hamiltonian $\hat{h}_0$ contains the kinetic energy operator of an
electron and its potential energy due to the attraction of the
nucleus. The second term, $H_I$, is the sum of the $n_e(n_e-1)/2$
two-body interactions between each pair of electrons. Note that the double sum carries a restriction $i < j$.
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
The potential energy term due to the attraction of the nucleus defines the onebody field $u_i=u_{\mathrm{ext}}(x_i)$ of Eq.~(\ref{eq:firstv}).
We have moved this term into the $\hat{H}_0$ part of the Hamiltonian, instead of keeping it in $\hat{V}$ as in Eq.~(\ref{eq:firstv}).
The reason is that we will hereafter treat $\hat{H}_0$ as our non-interacting Hamiltonian. For a many-body wavefunction $\Phi_{\lambda}$ defined by an
appropriate single-particle basis, we may solve exactly the non-interacting eigenvalue problem
\[
\hat{H}_0\Phi_{\lambda}= w_{\lambda}\Phi_{\lambda},
\]
with $w_{\lambda}$ being the non-interacting energy. This energy is defined by the sum over single-particle energies to be defined below.
For atoms the single-particle energies could be the hydrogen-like single-particle energies corrected for the charge $Z$. For nuclei and quantum
dots, these energies could be given by the harmonic oscillator in three and two dimensions, respectively.
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
We will assume that the interacting part of the Hamiltonian
can be approximated by a two-body interaction.
This means that our Hamiltonian is written as
\begin{equation}
\hat{H} = \hat{H}_0 + \hat{H}_I
= \sum_{i=1}^N \hat{h}_0(x_i) + \sum_{i < j}^N V(r_{ij}),
\label{Hnuclei}
\end{equation}
with
\begin{equation}
H_0=\sum_{i=1}^N \hat{h}_0(x_i) = \sum_{i=1}^N\left(\hat{t}(x_i) + \hat{u}_{\mathrm{ext}}(x_i)\right).
\label{hinuclei}
\end{equation}
The onebody part $u_{\mathrm{ext}}(x_i)$ is normally approximated by a harmonic oscillator potential or the Coulomb interaction an electron feels from the nucleus. However, other potentials are fully possible, such as
one derived from the self-consistent solution of the Hartree-Fock equations.
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
Our Hamiltonian is invariant under the permutation (interchange) of two particles. % (exercise here, prove it)
Since we deal with fermions however, the total wave function is antisymmetric.
Let $\hat{P}$ be an operator which interchanges two particles.
Due to the symmetries we have ascribed to our Hamiltonian, this operator commutes with the total Hamiltonian,
\[
[\hat{H},\hat{P}] = 0,
\]
meaning that $\Psi_{\lambda}(x_1, x_2, \dots , x_N)$ is an eigenfunction of
$\hat{P}$ as well, that is
\[
\hat{P}_{ij}\Psi_{\lambda}(x_1, x_2, \dots,x_i,\dots,x_j,\dots,x_N)=
\beta\Psi_{\lambda}(x_1, x_2, \dots,x_i,\dots,x_j,\dots,x_N),
\]
where $\beta$ is the eigenvalue of $\hat{P}$. We have introduced the suffix $ij$ in order to indicate that we permute particles $i$ and $j$.
The Pauli principle tells us that the total wave function for a system of fermions
has to be antisymmetric, resulting in the eigenvalue $\beta = -1$.
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
In our case we assume that we can approximate the exact eigenfunction with a Slater determinant
\begin{equation}
\Phi(x_1, x_2,\dots ,x_N,\alpha,\beta,\dots, \sigma)=\frac{1}{\sqrt{N!}}
\left| \begin{array}{ccccc} \psi_{\alpha}(x_1)& \psi_{\alpha}(x_2)& \dots & \dots & \psi_{\alpha}(x_N)\\
\psi_{\beta}(x_1)&\psi_{\beta}(x_2)& \dots & \dots & \psi_{\beta}(x_N)\\
\dots & \dots & \dots & \dots & \dots \\
\dots & \dots & \dots & \dots & \dots \\
\psi_{\sigma}(x_1)&\psi_{\sigma}(x_2)& \dots & \dots & \psi_{\sigma}(x_N)\end{array} \right|, \label{eq:HartreeFockDet}
\end{equation}
where $x_i$ stand for the coordinates and spin values of a particle $i$ and $\alpha,\beta,\dots, \gamma$
are quantum numbers needed to describe remaining quantum numbers.
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
The single-particle function $\psi_{\alpha}(x_i)$ are eigenfunctions of the onebody
Hamiltonian $h_i$, that is
\[
\hat{h}_0(x_i)=\hat{t}(x_i) + \hat{u}_{\mathrm{ext}}(x_i),
\]
with eigenvalues
\[
\hat{h}_0(x_i) \psi_{\alpha}(x_i)=\left(\hat{t}(x_i) + \hat{u}_{\mathrm{ext}}(x_i)\right)\psi_{\alpha}(x_i)=\varepsilon_{\alpha}\psi_{\alpha}(x_i).
\]
The energies $\varepsilon_{\alpha}$ are the so-called non-interacting single-particle energies, or unperturbed energies.
The total energy is in this case the sum over all single-particle energies, if no two-body or more complicated
many-body interactions are present.
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
Let us denote the ground state energy by $E_0$. According to the
variational principle we have
\[
E_0 \le E[\Phi] = \int \Phi^*\hat{H}\Phi d\mathbf{\tau}
\]
where $\Phi$ is a trial function which we assume to be normalized
\[
\int \Phi^*\Phi d\mathbf{\tau} = 1,
\]
where we have used the shorthand $d\mathbf{\tau}=d\mathbf{r}_1d\mathbf{r}_2\dots d\mathbf{r}_N$.
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
In the Hartree-Fock method the trial function is the Slater
determinant of Eq.~(\ref{eq:HartreeFockDet}) which can be rewritten as
\[
\Phi(x_1,x_2,\dots,x_N,\alpha,\beta,\dots,\nu) = \frac{1}{\sqrt{N!}}\sum_{P} (-)^P\hat{P}\psi_{\alpha}(x_1)
\psi_{\beta}(x_2)\dots\psi_{\nu}(x_N)=\sqrt{N!}\hat{A}\Phi_H,
\]
where we have introduced the antisymmetrization operator $\hat{A}$ defined by the
summation over all possible permutations of two particles.
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
It is defined as
\begin{equation}
\hat{A} = \frac{1}{N!}\sum_{p} (-)^p\hat{P},
\label{antiSymmetryOperator}
\end{equation}
with $p$ standing for the number of permutations. We have introduced for later use the so-called
Hartree-function, defined by the simple product of all possible single-particle functions
\[
\Phi_H(x_1,x_2,\dots,x_N,\alpha,\beta,\dots,\nu) =
\psi_{\alpha}(x_1)
\psi_{\beta}(x_2)\dots\psi_{\nu}(x_N).
\]
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
Both $\hat{H}_0$ and $\hat{H}_I$ are invariant under all possible permutations of any two particles
and hence commute with $\hat{A}$
\begin{equation}
[H_0,\hat{A}] = [H_I,\hat{A}] = 0. \label{commutionAntiSym}
\end{equation}
Furthermore, $\hat{A}$ satisfies
\begin{equation}
\hat{A}^2 = \hat{A}, \label{AntiSymSquared}
\end{equation}
since every permutation of the Slater
determinant reproduces it.
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
The expectation value of $\hat{H}_0$
\[
\int \Phi^*\hat{H}_0\Phi d\mathbf{\tau}
= N! \int \Phi_H^*\hat{A}\hat{H}_0\hat{A}\Phi_H d\mathbf{\tau}
\]
is readily reduced to
\[
\int \Phi^*\hat{H}_0\Phi d\mathbf{\tau}
= N! \int \Phi_H^*\hat{H}_0\hat{A}\Phi_H d\mathbf{\tau},
\]
where we have used Eqs.~(\ref{commutionAntiSym}) and
(\ref{AntiSymSquared}). The next step is to replace the antisymmetrization
operator by its definition and to
replace $\hat{H}_0$ with the sum of one-body operators
\[
\int \Phi^*\hat{H}_0\Phi d\mathbf{\tau}
= \sum_{i=1}^N \sum_{p} (-)^p\int
\Phi_H^*\hat{h}_0\hat{P}\Phi_H d\mathbf{\tau}.
\]
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
The integral vanishes if two or more particles are permuted in only one
of the Hartree-functions $\Phi_H$ because the individual single-particle wave functions are
orthogonal. We obtain then
\[
\int \Phi^*\hat{H}_0\Phi d\mathbf{\tau}= \sum_{i=1}^N \int \Phi_H^*\hat{h}_0\Phi_H d\mathbf{\tau}.
\]
Orthogonality of the single-particle functions allows us to further simplify the integral, and we
arrive at the following expression for the expectation values of the
sum of one-body Hamiltonians
\begin{equation}
\int \Phi^*\hat{H}_0\Phi d\mathbf{\tau}
= \sum_{\mu=1}^N \int \psi_{\mu}^*(\mathbf{r})\hat{h}_0\psi_{\mu}(\mathbf{r})
d\mathbf{r}.
\label{H1Expectation}
\end{equation}
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
We introduce the following shorthand for the above integral
\[
\langle \mu | \hat{h}_0 | \mu \rangle = \int \psi_{\mu}^*(\mathbf{r})\hat{h}_0\psi_{\mu}(\mathbf{r}),
\]
and rewrite Eq.~(\ref{H1Expectation}) as
\begin{equation}
\int \Phi^*\hat{H}_0\Phi d\mathbf{\tau}
= \sum_{\mu=1}^N \langle \mu | \hat{h}_0 | \mu \rangle.
\label{H1Expectation1}
\end{equation}
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
The expectation value of the two-body part of the Hamiltonian is obtained in a
similar manner. We have
\[
\int \Phi^*\hat{H}_I\Phi d\mathbf{\tau}
= N! \int \Phi_H^*\hat{A}\hat{H}_I\hat{A}\Phi_H d\mathbf{\tau},
\]
which reduces to
\[
\int \Phi^*\hat{H}_I\Phi d\mathbf{\tau}
= \sum_{i\le j=1}^N \sum_{p} (-)^p\int
\Phi_H^*V(r_{ij})\hat{P}\Phi_H d\mathbf{\tau},
\]
by following the same arguments as for the one-body
Hamiltonian.
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
Because of the dependence on the inter-particle distance $r_{ij}$, permutations of
any two particles no longer vanish, and we get
\[
\int \Phi^*\hat{H}_I\Phi d\mathbf{\tau}
= \sum_{i < j=1}^N \int
\Phi_H^*V(r_{ij})(1-P_{ij})\Phi_H d\mathbf{\tau}.
\]
where $P_{ij}$ is the permutation operator that interchanges
particle $i$ and particle $j$. Again we use the assumption that the single-particle wave functions
are orthogonal.
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
We obtain
\begin{equation}
\begin{split}
\int \Phi^*\hat{H}_I\Phi d\mathbf{\tau}
= \frac{1}{2}\sum_{\mu=1}^N\sum_{\nu=1}^N
&\left[ \int \psi_{\mu}^*(x_i)\psi_{\nu}^*(x_j)V(r_{ij})\psi_{\mu}(x_i)\psi_{\nu}(x_j)
dx_ix_j \right.\\
&\left.
- \int \psi_{\mu}^*(x_i)\psi_{\nu}^*(x_j)
V(r_{ij})\psi_{\nu}(x_i)\psi_{\mu}(x_j)
dx_ix_j
\right]. \label{H2Expectation}
\end{split}
\end{equation}
The first term is the so-called direct term. It is frequently also called the Hartree term,
while the second is due to the Pauli principle and is called
the exchange term or just the Fock term.
The factor $1/2$ is introduced because we now run over
all pairs twice.
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
The last equation allows us to introduce some further definitions.
The single-particle wave functions $\psi_{\mu}(x)$, defined by the quantum numbers $\mu$ and $x$
are defined as the overlap
\[
\psi_{\alpha}(x) = \langle x | \alpha \rangle .
\]
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
We introduce the following shorthands for the above two integrals
\[
\langle \mu\nu|\hat{v}|\mu\nu\rangle = \int \psi_{\mu}^*(x_i)\psi_{\nu}^*(x_j)V(r_{ij})\psi_{\mu}(x_i)\psi_{\nu}(x_j)
dx_ix_j,
\]
and
\[
\langle \mu\nu|\hat{v}|\nu\mu\rangle = \int \psi_{\mu}^*(x_i)\psi_{\nu}^*(x_j)
V(r_{ij})\psi_{\nu}(x_i)\psi_{\mu}(x_j)
dx_ix_j.
\]
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
The direct and exchange matrix elements can be brought together if we define the antisymmetrized matrix element
\[
\langle \mu\nu|\hat{v}|\mu\nu\rangle_{\mathrm{AS}}= \langle \mu\nu|\hat{v}|\mu\nu\rangle-\langle \mu\nu|\hat{v}|\nu\mu\rangle,
\]
or for a general matrix element
\[
\langle \mu\nu|\hat{v}|\sigma\tau\rangle_{\mathrm{AS}}= \langle \mu\nu|\hat{v}|\sigma\tau\rangle-\langle \mu\nu|\hat{v}|\tau\sigma\rangle.
\]
It has the symmetry property
\[
\langle \mu\nu|\hat{v}|\sigma\tau\rangle_{\mathrm{AS}}= -\langle \mu\nu|\hat{v}|\tau\sigma\rangle_{\mathrm{AS}}=-\langle \nu\mu|\hat{v}|\sigma\tau\rangle_{\mathrm{AS}}.
\]
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
The antisymmetric matrix element is also hermitian, implying
\[
\langle \mu\nu|\hat{v}|\sigma\tau\rangle_{\mathrm{AS}}= \langle \sigma\tau|\hat{v}|\mu\nu\rangle_{\mathrm{AS}}.
\]
With these notations we rewrite Eq.~(\ref{H2Expectation}) as
\begin{equation}
\int \Phi^*\hat{H}_I\Phi d\mathbf{\tau}
= \frac{1}{2}\sum_{\mu=1}^N\sum_{\nu=1}^N \langle \mu\nu|\hat{v}|\mu\nu\rangle_{\mathrm{AS}}.
\label{H2Expectation2}
\end{equation}
% --- end paragraph admon ---
% !split
\subsection{Definitions and notations}
% --- begin paragraph admon ---
\paragraph{}
Combining Eqs.~(\ref{H1Expectation1}) and
(\ref{H2Expectation2}) we obtain the energy functional
\begin{equation}
E[\Phi]
= \sum_{\mu=1}^N \langle \mu | \hat{h}_0 | \mu \rangle +
\frac{1}{2}\sum_{{\mu}=1}^N\sum_{{\nu}=1}^N \langle \mu\nu|\hat{v}|\mu\nu\rangle_{\mathrm{AS}}.
\label{FunctionalEPhi}
\end{equation}
which we will use as our starting point for the Hartree-Fock calculations later in this course.
% --- end paragraph admon ---
% ------------------- end of main content ---------------
% #ifdef PREAMBLE
\printindex
\end{document}
% #endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.