Datasets:
AI4M
/

text
stringlengths
0
3.34M
On this page you can make an online donation through iDeal or on of the other payment systems. Thank you very much for considering to support our ministry! I hereby authorize Stichting GlobalRize Nederland to collect the amount shown above from my account periodically.
lemma zero_le_scaleR_iff: fixes b :: "'a::ordered_real_vector" shows "0 \<le> a *\<^sub>R b \<longleftrightarrow> 0 < a \<and> 0 \<le> b \<or> a < 0 \<and> b \<le> 0 \<or> a = 0" (is "?lhs = ?rhs")
MODULE MOD_RADNET CONTAINS SUBROUTINE RADNET(N, ENLTE, TL, WEIGHT, NCHARG, EION, ELEVEL, EINST, $ SL, EN, NOM, RRATE, XLAMBDA, FWEIGHT, $ XJC, NF, XJL, SIGMAKI, LASTIND, $ LEVEL, DP, JOBNUM, ITNEL) ! RADIATIVE RATE COEFFICIENT MATRIX RRATE IS CALCULATED AT ! NOTE THAT THIS SUBROUTINE CALCULATES NETTO RATE COEFFICIENTS ! FOR NON-RUDIMENTAL LINE TRANSITIONS USE MOD_ISRCHFGT USE MOD_XRUDI USE UTILS USE PHYS USE FILE_OPERATIONS USE COMMON_BLOCK IMPLICIT REAL*8(A - H, O - Z) PARAMETER (one = 1.0D0) DIMENSION EINST(N, N) DIMENSION NCHARG(N), ELEVEL(N) DIMENSION EION(N), ENLTE(N), WEIGHT(N), EN(N) DIMENSION NOM(N) DIMENSION SIGMAKI(NF, N) DIMENSION XLAMBDA(NF), FWEIGHT(NF) INTEGER, INTENT(IN) :: DP REAL*8 :: XJ_LTE, EMINDU_LTE REAL*8, DIMENSION(N, N), INTENT(OUT) :: RRATE INTEGER, INTENT(IN) :: JOBNUM, ITNEL REAL*8, DIMENSION(LASTIND), INTENT(IN) :: SL, XJL REAL*8, DIMENSION(NF), INTENT(IN) :: XJC CHARACTER*10, DIMENSION(N), INTENT(IN) :: LEVEL REAL*8 :: FREQ_FACT REAL*8 :: XJCK_LTE LOGICAL :: ARR_LTE_COND, DEPTH_COND REAL*4 :: JSRATIO !*** C1 = H * C / K (CM * KELVIN) DATA C1 / 1.4388d0 / !*** C2 = 2 * H * C (H AND C IN CGS UNITS) DATA C2 / 3.9724d-16 / !*** C3 = 4 * PI / H / C (CGS UNITS) DATA C3 / 0.06327d18 / !*** PI8 = 8 * PI DATA PI8 / 25.13274123d0 / DEPTH_COND = (DP .LE. 91 .AND. DP .GE. 80) .OR. (DP .LE. 11) ARR_LTE_COND = ALLOCATED(ARR_LTE) .AND. ITNEL .EQ. 1 ARR(DP, 1 : N, 1 : N) = 0.0D0 RBR(DP, 1 : N, 1 : N) = 0.0D0 IF (ARR_LTE_COND) ARR_LTE(DP, 1 : N, 1 : N) = 0.0D0 !*** LOOP OVER ALL TRANSITIONS --------------------------------- IND = 0 DO NUP = 2, N LLOW : DO LOW = 1, NUP - 1 IF (NOM(LOW) .NE. NOM(NUP)) GOTO 14 IF (NCHARG(LOW) .NE. NCHARG(NUP)) GOTO 8 !*** LINE TRANSITION ************************************** IND = IND + 1 WAVENUM = ELEVEL(NUP) - ELEVEL(LOW) W3 = WAVENUM * WAVENUM * WAVENUM !*** CHECK WHETHER THIS TRANSITION IS ONLY RUDIMENTAL !*************************************************************************************************************************************** !*************************************************************************************************************************************** ! THAT IS HOW IT'S DONE WITH THE RADIATIVE BRACKET IF (EINST(LOW, NUP) .NE. -2.0D0 .AND. SL(IND) .NE. 0.0D0) THEN XJ = XJL(IND) JSRATIO = XJ / SL(IND) ! RINAT TAGIROV: ! At the innermost point the value of radiative bracket seems to be wrong. It abruptly increases. ! For some reason the XJ / SL(IND) ratio there is much further from one than at the preceding point. ! For possible reasons see the commentary in ETL.FOR about the boundary conditions and weights. ! The explanation given there however does not seem to be in line with the fact that the value ! of radiative bracket at the outermost point looks correct. ! Though this may be attributed to insensitivity of the radiative bracket to the outer boundary condition. ! But then again I can not think of any reason for such insensitivity. RRATE(LOW, NUP) = 0.0D0 RRATE(NUP, LOW) = EINST(NUP, LOW) * (one - XJ / SL(IND)) ! RRATE(NUP, LOW) = EINST(NUP, LOW) * (one - JSRATIO) ! IF (IND .EQ. 1) RRATE(NUP, LOW) = EINST(NUP, LOW) * (one - XJ / SL(IND)) + 1000 * 0.25 * 8.2249 ELSEIF (EINST(LOW, NUP) .NE. -2.0D0 .AND. SL(IND) .EQ. 0.0D0) THEN XJ = XJL(IND) EMINDU = EINST(NUP, LOW) * XJ / C2 / W3 RRATE(LOW, NUP) = EMINDU * WEIGHT(NUP) / WEIGHT(LOW) RRATE(NUP, LOW) = EINST(NUP, LOW) + EMINDU ELSE !*** TRANSITION IS RUDIMENTAL -- RADIATION FIELD FROM INTERPOLATION OF CONT. CALL XRUDI(XJ, WAVENUM, XJC, XLAMBDA, 1, NF, 1) EMINDU = EINST(NUP, LOW) * XJ / C2 / W3 RRATE(LOW, NUP) = EMINDU * WEIGHT(NUP) / WEIGHT(LOW) RRATE(NUP, LOW) = EINST(NUP, LOW) + EMINDU ENDIF !************************************************************************************************************************************** !************************************************************************************************************************************** !*************************************************************************************************************************************** !*************************************************************************************************************************************** ! THAT IS HOW IT'S DONE WITHOUT THE RADIATIVE BRACKET (TO ACTUALLY REMOVE THE RADIATIVE BRACKET FROM THE CALCULATION REPLACE ! ARR ARRAY HERE WITH THE RRATE ARRAY AND COMMENT OUT THE IMPLEMENTATION OF THE RADIATIVE BRACKET ABOVE) IF (EINST(LOW, NUP) .NE. -2.0D0) THEN XJ = XJL(IND) IF (ARR_LTE_COND) XJ_LTE = XJL_LTE(DP, IND) ELSE !*** TRANSITION IS RUDIMENTAL -- RADIATION FIELD FROM INTERPOLATION OF CONT. CALL XRUDI(XJ, WAVENUM, XJC, XLAMBDA, 1, NF, 1) IF (ARR_LTE_COND) CALL XRUDI(XJ_LTE, WAVENUM, XJC_LTE(DP, 1 : NF), XLAMBDA, 1, NF, 1) ENDIF EMINDU = EINST(NUP, LOW) * XJ / C2 / W3 ARR(DP, LOW, NUP) = EMINDU * WEIGHT(NUP) / WEIGHT(LOW) ARR(DP, NUP, LOW) = EINST(NUP, LOW) + EMINDU IF (ARR_LTE_COND) XJ_LTE = PLANCK_FUNC(WAVENUM * light_speed, TL) ! RINAT TAGIROV: ONE HAS TO CALCULATE THE LTE ABSOLUTE RADIATIVE RATES ! WITH THE PLANCK FUNCTION TO COMPARE THEM WITH THE NLTE ONES IF (ARR_LTE_COND) EMINDU_LTE = EINST(NUP, LOW) * XJ_LTE / C2 / W3 IF (ARR_LTE_COND) ARR_LTE(DP, LOW, NUP) = EMINDU_LTE * WEIGHT(NUP) / WEIGHT(LOW) IF (ARR_LTE_COND) ARR_LTE(DP, NUP, LOW) = EINST(NUP, LOW) + EMINDU_LTE !************************************************************************************************************************************** !************************************************************************************************************************************** CYCLE LLOW 8 CONTINUE !*** CHARGE DIFFERENCE MUST BE 1 IF (NCHARG(NUP) .NE. NCHARG(LOW)+1 ) GOTO 14 !*** UPPER LEVEL MUST BE A GROUND STATE IF (NCHARG(NUP) .NE. NCHARG(NUP-1)+1) GOTO 14 !*** CONTINUUM TRANSITION (NET RADIATIVE BRACKETS) **************** !*** SIGMAKI = PRECALCULATED CROSS SECTION IN CM**2 !*** EDGE = THRESHOLD ENERGY IN KAYSER ******* EDGE = EION(LOW) - ELEVEL(LOW) EDGELAM = 1.0D8 / EDGE !*** RATE INTEGRAL REC = 0.0D0 !*** FIND EDGE FREQUENCY INDEX NFEDGE = ISRCHFGT(NF, XLAMBDA, 1, EDGELAM) - 1 L2 : DO K = 1, NFEDGE ! CALCULATION OF THE RADIATIVE BRACKET WAVENUM = 1.0D8 / XLAMBDA(K) W2 = WAVENUM * WAVENUM W3 = W2 * WAVENUM XJCK = XJC(K) SIGMA = SIGMAKI(K, LOW) ! write(*, '(A,2x,2(i4,2x),2(e15.7,2x))'), 'radnet check:', k, low, xjc(k), sigmaki(k, low) !*** CALCULATE BOUND-FREE SOURCE FUNCTION FOR TRANSITION LOW-UP ONLY EXFAC = EXP(-C1 * WAVENUM / TL) G = EXFAC * ENLTE(LOW) / ENLTE(NUP) SBF = C2 * W3 / (EN(LOW) / (EN(NUP) * G) - one) REC = REC + SIGMA * W2 * EXFAC * (one - XJCK / SBF) * FWEIGHT(K) ! CALCULATION OF THE ABSOLUTE RADIATIVE RATES (AS GIVEN BY RUTTEN "Radiative Transfer In Stellar Atmospheres") FREQ_FACT = (SIGMA / WAVENUM) * FWEIGHT(K) ! BOUND - FREE: ARR(DP, LOW, NUP) = ARR(DP, LOW, NUP) + FREQ_FACT * XJCK ! FWEIGHT is the frequency interval (according to the trapezoidal integration rule) ! IF (ARR_LTE_COND) XJCK_LTE = XJC_LTE(DP, K) IF (ARR_LTE_COND) XJCK_LTE = PLANCK_FUNC(WAVENUM * light_speed, TL) IF (ARR_LTE_COND) ARR_LTE(DP, LOW, NUP) = ARR_LTE(DP, LOW, NUP) + FREQ_FACT * XJCK_LTE ! FREE - BOUND: ARR(DP, NUP, LOW) = ARR(DP, NUP, LOW) + FREQ_FACT * $ (PLANCK_FUNC(WAVENUM * light_speed, TL) * (1.0D0 - EXFAC) + XJCK * EXFAC) IF (ARR_LTE_COND) ARR_LTE(DP, NUP, LOW) = ARR_LTE(DP, NUP, LOW) + FREQ_FACT * $ (PLANCK_FUNC(WAVENUM * light_speed, TL) * $ (1.0D0 - EXFAC) + XJCK_LTE * EXFAC) ENDDO L2 ! FINISHING THE CALCULATION OF THE NET RADIATIVE BRACKET RRATE(LOW, NUP) = 0.0D0 RRATE(NUP, LOW) = PI8 * REC * ENLTE(LOW) / ENLTE(NUP) ! FINISHING THE CALCULATION OF THE ABSOLUTE RADIATIVE RATES ARR(DP, LOW, NUP) = C3 * ARR(DP, LOW, NUP)! * 2.0D0 ! The factor of 2 which seemingly should be there due to the trapezoidal integration rule is missing from these formulas ! (look into module FGRID for FWEIGHT) but the result seems to be correct so apparently I misunderstood something. ARR(DP, NUP, LOW) = C3 * (ENLTE(LOW) / ENLTE(NUP)) * ARR(DP, NUP, LOW)! * 2.0D0 IF (ARR_LTE_COND) ARR_LTE(DP, LOW, NUP) = C3 * ARR_LTE(DP, LOW, NUP) IF (ARR_LTE_COND) ARR_LTE(DP, NUP, LOW) = C3 * (POP_LTE(DP, LOW) / POP_LTE(DP, NUP)) * ARR_LTE(DP, NUP, LOW) CYCLE LLOW !*** LEVELS BELONG TO DIFFERENT ELEMENTS, !*** CHARGE DIFFERENCE NOT 1 OR UPPER LEVEL NO GROUND STATE: ZERO RATE 14 RRATE(LOW, NUP) = 0.0D0 RRATE(NUP, LOW) = 0.0D0 ENDDO LLOW ENDDO !*** ENDLOOP --------------------------------------------------------- !*** DIAGONAL ELEMENTS ARE SET TO ZERO FORALL (J = 1 : N) RRATE(J, J) = 0.0D0 FORALL (J = 1 : N) ARR(DP, J, J) = 0.0D0 RBR(DP, 1 : N, 1 : N) = RRATE(1 : N, 1 : N) IF (ARR_LTE_COND) FORALL (J = 1 : N) ARR_LTE(DP, J, J) = 0.0D0 RETURN END SUBROUTINE END MODULE
proposition Liouville_weak: assumes "f holomorphic_on UNIV" and "(f \<longlongrightarrow> l) at_infinity" shows "f z = l"
% Copyright 2013 Oliver Johnson, Srikanth Patala % % This file is part of MisorientationMaps. % % MisorientationMaps is free software: you can redistribute it and/or modify % it under the terms of the GNU General Public License as published by % the Free Software Foundation, either version 3 of the License, or % (at your option) any later version. % % MisorientationMaps is distributed in the hope that it will be useful, % but WITHOUT ANY WARRANTY; without even the implied warranty of % MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the % GNU General Public License for more details. % % You should have received a copy of the GNU General Public License % along with MisorientationMaps. If not, see <http://www.gnu.org/licenses/>. function sphcoord = polarcoord(v) %%%%% Cartesian to spherical coordinates %%%%% v = n x 3 array of pts %%%%% output is n x 3 array of spherical coordinates %%%%% [r theta phi] r = sqrt(v(:,1).^2 + v(:,2).^2 + v(:,3).^2); ind1 = find(r == 0); ind2 = find(r ~= 0); if(size(ind1,2) > 0) theta(ind1)=0; phi(ind1)=0; end if(size(ind2,2) > 0) theta(ind2) = acos(v(ind2,3)./r(ind2)); phi(ind2) = atan2(v(ind2,2),v(ind2,1)); end sphcoord = [r theta' phi']; end
calc_weight<-function(cellType){ freq<-sort((table(unlist(cellType)))/length(cellType)) weight<-1+sqrt((max(freq)-freq)/(max(freq)-min(freq))) return(weight) } read_cell_markers_file<-function(panglao5_file, species, remove_subtype_of="", HLA_panglao5_file=""){ #preparing cell activity database marker<-data.frame(fread(panglao5_file)) if(remove_subtype_of != ""){ remove_subtype_of<-unique(unlist(strsplit(remove_subtype_of, ","))) pangdb_ct <- read.table(HLA_panglao5_file,header = T,row.names = 1,sep = "\t",stringsAsFactors = F) remove_subtype_of<-remove_subtype_of[remove_subtype_of %in% marker$cell.type] layer="Layer2" removed<-c() for(layer in c("Layer1","Layer2", "Layer3")){ layer_ct<-unique(pangdb_ct[,layer]) for(rs in remove_subtype_of){ if(rs %in% removed){ next } if(rs %in% layer_ct){ subdb<-rownames(pangdb_ct)[pangdb_ct[,layer]==rs] if(rs %in% subdb){ subdb<-subdb[!(subdb %in% remove_subtype_of)] marker<-marker[!(marker$cell.type %in% subdb),] }else{ marker$cell.type[marker$cell.type %in% subdb] = rs } removed<-c(removed, rs) } } } } hsind<-regexpr(species,marker$species) marker_species<-marker[hsind>0 & marker$ubiquitousness.index<0.05,] if (species=="Mm") { ##change the gene symbol only keep the first letter capitalize marker_species$official.gene.symbol<-toMouseGeneSymbol(marker_species$official.gene.symbol) } if (species=="Hs") { marker_species$official.gene.symbol<-toupper(marker_species$official.gene.symbol) } cellType<-tapply(marker_species$official.gene.symbol,marker_species$cell.type,list) weight<-calc_weight(cellType) return(list(cellType=cellType, weight=weight)) } get_selfdefinedCelltype <- function(file, finalLayer="layer3"){ file.ori <- scan(file,what = "",sep="\n") marker.ori <- matrix("undefined",nrow=length(file.ori)/2,ncol = 2) for (i in 1:length(file.ori)){if(i%%2==1){marker.ori[(i+1)/2,1]=substr(file.ori[i],2,nchar(file.ori[i]))}else{marker.ori[i/2,2]=file.ori[i]}} ref <- as.data.frame(marker.ori[,1]) ref <- tidyr::separate(ref,col=colnames(ref),into = c("celltype","subtypeOf"),sep = ",") rownames(ref) <- ref$celltype celltype.ori <- sapply(tapply(marker.ori[,2],marker.ori[,1],list), function(x) strsplit(x,",")) celltype.tem <- as.data.frame(matrix(unlist(celltype.ori))) celltype.tem[,2] <- rep(names(celltype.ori),lengths(celltype.ori)) celltype.tem <- tidyr::separate(celltype.tem,col=V2,into = c("celltype","subtypeOf"),sep = ",") ct <- as.data.frame(matrix("Undefined",nrow = nrow(celltype.tem),ncol = length(unique(celltype.tem$subtypeOf))+2),stringsAsFactors=FALSE) ct[,1:2] <- celltype.tem[,1:2] for (i in 3:ncol(ct)){ for (j in 1:nrow(ct)) {if(ct[j,i-1] %in% rownames(ref)){ct[j,i]<-ref[ct[j,i-1],2]}else{ct[j,i]<-ct[j,i-1]}} } layer <- ncol(ct) if(! identical(ct[,ncol(ct)],ct[,ncol(ct)-1])){layer <- ncol(ct)}else{ for (i in 1:ncol(ct)) {if(identical(ct[,i],ct[,i+1])){layer <- i;break}} } celltype.trim <- ct[,1:layer] for (i in 1:nrow(celltype.trim)){ tag <- which(celltype.trim[i,]==celltype.trim[i,ncol(celltype.trim)]) if(length(tag)>1){ if(ncol(celltype.trim)+1-length(tag)==3){suf <- ct[i,3]} else{suf <- ct[i,3:(ncol(celltype.trim)+1-length(tag))]} pre <- rep(celltype.trim[i,2],length(tag)-1) celltype.trim[i,] <- c(celltype.trim[i,][1:2],pre,suf) } } for (i in 1:ncol(celltype.trim)){if(i == 1){colnames(celltype.trim)[i]="gene"}else{colnames(celltype.trim)[i]=paste0("layer",(ncol(celltype.trim)+1-i),"")}} ct.final <- celltype.trim for (i in 1:ncol(ct.final)){ if(i >1){ct.final[,i]=celltype.trim[,ncol(celltype.trim)+2-i];colnames(ct.final)[i] <- colnames(celltype.trim)[ncol(celltype.trim)+2-i]} } cellType<-tapply(ct.final$gene,ct.final[[finalLayer]],list) weight<-calc_weight(cellType) return(list(cellType=cellType, weight=weight)) } toMouseGeneSymbol<-function(x){ result=paste0(toupper(substr(x,1,1)),tolower(substr(x,2,nchar(x)))) return(result) } read_cell_cluster_file<-function(fileName, sort_cluster_name="seurat_clusters"){ result<-read.csv(fileName, stringsAsFactors = F, row.names = 1) display_sort_cluster_name = paste0("display_", sort_cluster_name) result[,display_sort_cluster_name] = paste0("Cluster ", result[,sort_cluster_name]) cluster_names=colnames(result)[grepl("_clusters", colnames(result))] sort_clusters_num = length(unique(result[,sort_cluster_name])) for(cluster_name in cluster_names){ cluster_num = length(unique(result[,cluster_name])) if(cluster_name == sort_cluster_name){ next } if (cluster_num != sort_clusters_num) { next } cf<-unique(result[,c(sort_cluster_name, cluster_name)]) if(nrow(cf) != sort_clusters_num){ next } cf<-cf[order(as.numeric(cf[,sort_cluster_name]), decreasing = T),] cf_levels=cf[,cluster_name] result[,cluster_name] = factor(result[,cluster_name], levels=cf_levels) } return(result) } find_markers<-function(object, by_sctransform, ident.1, ident.2, min.pct = 0.5, logfc.threshold = 0.6){ assay=ifelse(by_sctransform, "SCT", "RNA") markers=FindMarkers(object, assay=assay, ident.1=ident.1, ident.2=ident.2, only.pos=TRUE, min.pct=min.pct, logfc.threshold=logfc.threshold) markers=markers[markers$p_val_adj < 0.01,] return(markers) } find_all_markers<-function(object, by_sctransform, min.pct = 0.5, logfc.threshold = 0.6){ assay=ifelse(by_sctransform, "SCT", "RNA") markers=FindAllMarkers(object, assay=assay, only.pos=TRUE, min.pct=min.pct, logfc.threshold=logfc.threshold) markers=markers[markers$p_val_adj < 0.01,] return(markers) } get_cluster_count<-function(counts, clusters){ if(is.null(levels(clusters))){ allClusters=unique(clusters) }else{ allClusters=levels(clusters) } cluster<-allClusters[1] csums=lapply(allClusters, function(cluster){ #cat(cluster, "\n") cells=names(clusters)[clusters==cluster] subcounts=counts[,cells] #cat(ncol(subcounts), "\n") Matrix::rowSums(subcounts) }) result=do.call(cbind, csums) colnames(result)=allClusters gcount=Matrix::rowSums(result) result=result[gcount > 0,] return(result) } get_group_count=function(curobj, groupName="active.ident") { counts=GetAssayData(curobj,assay="RNA",slot="counts") curgroups=curobj[[groupName]] clusters=curgroups[,1] names(clusters)=rownames(curgroups) result=get_cluster_count(counts, clusters) return(result) } add_cluster<-function(object, new.cluster.name, new.cluster.ids){ seurat_clusters<-object[["seurat_clusters"]]$seurat_clusters names(new.cluster.ids) <- levels(seurat_clusters) new.cluster.values<-plyr::mapvalues(x = seurat_clusters, from = levels(seurat_clusters), to = new.cluster.ids) names(new.cluster.values)<-names(seurat_clusters) object[[new.cluster.name]]<-new.cluster.values object } add_celltype<-function(obj, celltype_df, celltype_column){ new.cluster.ids<-split(celltype_df[,celltype_column], celltype$seurat_clusters) obj[[celltype_column]] = unlist(new.cluster.ids[as.character(obj$seurat_clusters)]) celltype_df$seurat_column = paste0(celltype_df$seurat_clusters, " : ", celltype_df[,celltype_column]) new.cluster.ids<-split(celltype_df$seurat_column, celltype$seurat_clusters) obj[[paste0("seurat_", celltype_column)]] = unlist(new.cluster.ids[as.character(obj$seurat_clusters)]) return(obj) } run_cluster<-function(object, Remove_Mt_rRNA, rRNApattern, Mtpattern, pca_dims, by_sctransform, min.pct = 0.5, logfc.threshold = 0.6){ if (by_sctransform) { object <- RunPCA(object = object, verbose=FALSE) }else{ if (Remove_Mt_rRNA) { rRNA.genes <- grep(pattern = rRNApattern, rownames(object), value = TRUE) Mt.genes<- grep (pattern= Mtpattern,rownames(object), value=TRUE ) var.genes <- dplyr::setdiff(VariableFeatures(object), c(rRNA.genes,Mt.genes)) } else { var.genes <- VariableFeatures(object) } object <- RunPCA(object = object, features = var.genes, verbose=FALSE) } object <- RunUMAP(object = object, reduction = "pca", dims=pca_dims, verbose = FALSE) object <- FindNeighbors(object = object, dims=pca_dims, verbose=FALSE) object <- FindClusters(object=object, verbose=FALSE, random.seed=random.seed, resolution=resolution) if (by_sctransform) { markers <- FindAllMarkers(object, only.pos = TRUE, min.pct = min.pct, logfc.threshold = logfc.threshold) }else{ markers <- FindAllMarkers(object, features=var.genes, only.pos = TRUE, min.pct = min.pct, logfc.threshold = logfc.threshold) } markers <- markers[markers$p_val_adj < 0.01,] return(list(object=object, markers=markers)) } ORA_celltype<-function(medianexp,cellType,weight){ ORA_result<-matrix(NA, nrow=length(cellType),ncol=dim(medianexp)[2]) CTA_result<-matrix(0,nrow=length(cellType),ncol=dim(medianexp)[2]) colnames(ORA_result)=colnames(medianexp) colnames(CTA_result)=colnames(medianexp) exp_z<-scale(medianexp) genenames<-rownames(medianexp) for (j in 1: dim(medianexp)[2]){ clusterexp<-medianexp[,j] clusterexp_z<-exp_z[,j] for (i in 1:length(cellType)){ ct_exp<-length(intersect(genenames[clusterexp>0],cellType[[i]])) ct_not_exp<-length(cellType[[i]])-ct_exp exp_not_ct<-sum(clusterexp>0)-ct_exp not_exp_not_ct<-length(clusterexp)-ct_not_exp cont.table<-matrix(c(ct_exp,ct_not_exp,exp_not_ct,not_exp_not_ct),nrow=2) ORA_result[i,j]<-fisher.test(cont.table,alternative="greater")$p.value ### weight_ss<-weight[names(weight)%in%cellType[[i]]] ind<-match(names(weight_ss),genenames) exp_ss<-clusterexp_z[ind[!is.na(ind)]] weight_ss<-weight_ss[!is.na(ind)] CTA_result[i,j]<-sum(exp_ss*weight_ss)/(length(exp_ss)^(1/3)) } } rownames(ORA_result)<-rownames(CTA_result)<-names(cellType) minp_ora_ind<- apply(ORA_result,2,function(x){which.min(x)}) minp_ora<-apply(ORA_result,2,min) names(minp_ora)<-rownames(ORA_result)[minp_ora_ind] max_cta_ind<- apply(CTA_result,2,function(x){which.max(x)}) max_cta<-apply(CTA_result,2,max,na.rm=T) names(max_cta)<-rownames(CTA_result)[max_cta_ind] return(list(ora=ORA_result,cta=CTA_result,min_ora=minp_ora,max_cta=max_cta)) } get_cta_ora_mat<-function(predict_celltype) { cta_index<-apply(predict_celltype$cta,2,function(x){return(order(x,decreasing=T)[1:2])}) cta_index<-unique(sort(cta_index)) cta_mat<- predict_celltype$cta[cta_index,] colnames(cta_mat)<-paste0(names(predict_celltype$max_cta), " : ", colnames(predict_celltype$cta)) ora_mat<- predict_celltype$ora[cta_index,] ora_mat<--log10(ora_mat) colnames(ora_mat)<-colnames(cta_mat) return(list(cta_mat=cta_mat, ora_mat=ora_mat)) } get_cta_combined<-function(obj, predicted){ cta_table<-data.frame(Cluster=colnames(predicted$cta), CellType=names(predicted$max_cta), CtaScore=round(predicted$max_cta * 10) / 10.0, stringsAsFactors = F) cta_table$OraPvalue=apply(cta_table, 1, function(x){ ct=x[2] cl=x[1] predicted$ora[ct, cl] }) cluster_sample<-as.data.frame.matrix(table(obj$seurat_clusters, obj$orig.ident)) cluster_sample<-cluster_sample[as.character(cta_table$Cluster),] nc<-apply(cluster_sample, 2, function(x){ tc=sum(x) perc<-x/tc return(round(perc*1000) / 10.0) }) colnames(nc)<-paste0(colnames(nc), "_perc") cta_combined<-cbind(cta_table, cluster_sample, nc) return(cta_combined) }
State Before: C : Type u_2 inst✝¹ : Category C inst✝ : Preadditive C X : SimplicialObject C q : ℕ ⊢ P q ≫ P q = P q State After: case h C : Type u_2 inst✝¹ : Category C inst✝ : Preadditive C X : SimplicialObject C q n : ℕ ⊢ HomologicalComplex.Hom.f (P q ≫ P q) n = HomologicalComplex.Hom.f (P q) n Tactic: ext n State Before: case h C : Type u_2 inst✝¹ : Category C inst✝ : Preadditive C X : SimplicialObject C q n : ℕ ⊢ HomologicalComplex.Hom.f (P q ≫ P q) n = HomologicalComplex.Hom.f (P q) n State After: no goals Tactic: exact P_f_idem q n
C$Procedure ZZEKTRPI ( EK tree, parent information ) SUBROUTINE ZZEKTRPI ( HANDLE, TREE, KEY, PARENT, PKEY, POFFST, . LPIDX, LPKEY, LSIB, RPIDX, RPKEY, RSIB ) C$ Abstract C C Given a key, return general information pertaining to the key's C parent node. C C$ Disclaimer C C THIS SOFTWARE AND ANY RELATED MATERIALS WERE CREATED BY THE C CALIFORNIA INSTITUTE OF TECHNOLOGY (CALTECH) UNDER A U.S. C GOVERNMENT CONTRACT WITH THE NATIONAL AERONAUTICS AND SPACE C ADMINISTRATION (NASA). THE SOFTWARE IS TECHNOLOGY AND SOFTWARE C PUBLICLY AVAILABLE UNDER U.S. EXPORT LAWS AND IS PROVIDED "AS-IS" C TO THE RECIPIENT WITHOUT WARRANTY OF ANY KIND, INCLUDING ANY C WARRANTIES OF PERFORMANCE OR MERCHANTABILITY OR FITNESS FOR A C PARTICULAR USE OR PURPOSE (AS SET FORTH IN UNITED STATES UCC C SECTIONS 2312-2313) OR FOR ANY PURPOSE WHATSOEVER, FOR THE C SOFTWARE AND RELATED MATERIALS, HOWEVER USED. C C IN NO EVENT SHALL CALTECH, ITS JET PROPULSION LABORATORY, OR NASA C BE LIABLE FOR ANY DAMAGES AND/OR COSTS, INCLUDING, BUT NOT C LIMITED TO, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, C INCLUDING ECONOMIC DAMAGE OR INJURY TO PROPERTY AND LOST PROFITS, C REGARDLESS OF WHETHER CALTECH, JPL, OR NASA BE ADVISED, HAVE C REASON TO KNOW, OR, IN FACT, SHALL KNOW OF THE POSSIBILITY. C C RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF C THE SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY C CALTECH AND NASA FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE C ACTIONS OF RECIPIENT IN THE USE OF THE SOFTWARE. C C$ Required_Reading C C EK C C$ Keywords C C EK C PRIVATE C C$ Declarations INCLUDE 'ekpage.inc' INCLUDE 'ektree.inc' INCLUDE 'ektype.inc' INTEGER HANDLE INTEGER TREE INTEGER KEY INTEGER PARENT INTEGER PKEY INTEGER POFFST INTEGER LPIDX INTEGER LPKEY INTEGER LSIB INTEGER RPIDX INTEGER RPKEY INTEGER RSIB C$ Brief_I/O C C Variable I/O Description C -------- --- -------------------------------------------------- C HANDLE I File handle. C TREE I Root of tree. C KEY I Key belonging to node of interest. C PARENT O Parent node of the node containing KEY. C PKEY O A key in the parent node. C POFFST O Key offset of the parent node. C LPIDX O Node-relative index of the left parent key. C LPKEY O Left parent key. C LSIB O Node number of left sibling. C RPIDX O Node-relative index of the right parent key. C RPKEY O Right parent key. C RSIB O Node number of right sibling. C C$ Detailed_Input C C HANDLE is a file handle of an EK open for read or write C access. C C TREE is the root node number of the tree of interest. C C NODE is the node number of interest. C C$ Detailed_Output C C PARENT is the number of the parent node of the node C containing KEY. If KEY is in the root, PARENT is C set to zero. C C PKEY is a key in PARENT. If PARENT is set to zero, C PKEY is set to zero as well. PKEY is used to C traverse a chain of ancestors towards the to root. C C POFFST is the key offset of PARENT; this is the offset C that must be added to the node-relative key C values in PARENT to convert them to absolute keys. C C LPIDX is the index in PARENT of the key `to the left' C of the node containing KEY. This key is the C immediate predecessor of the first key in the C subtree headed by the node containing KEY. C C The key indices in PARENT start at 1. If PARENT C contains no keys that precede the node containing C KEY, LPIDX is set to zero. C C LPKEY is the absolute key located in PARENT at index C LPIDX. If PARENT contains no keys that precede the C node containing KEY, LPKEY is set to zero. C C LSIB is the number of the left sibling node of the node C containing KEY. If PARENT contains no keys that C precede the node containing KEY, then the node C containing KEY has no left sibling, and LSIB is C set to zero. C C RPIDX is the index in PARENT of the key `to the right' C of the node containing KEY. This key is the C immediate successor of the last key in the C subtree headed by the node containing KEY. C C If PARENT contains no keys that succeed the node C containing KEY, RPIDX is set to zero. C C RPKEY is the absolute key located in PARENT at index C RPIDX. If PARENT contains no keys that succeed the C node containing KEY, RPKEY is set to zero. C C RSIB is the number of the right sibling node of the node C containing KEY. If PARENT contains no keys that C succeed the node containing KEY, then the node C containing KEY has no right sibling, and RSIB is C set to zero. C C$ Parameters C C None. C C$ Exceptions C C 1) If HANDLE is invalid, the error will be diagnosed by routines C called by this routine. The file will not be modified. C C 2) If an I/O error occurs while reading the indicated file, the C error will be diagnosed by routines called by this routine. C C 3) If the input key is out of range, the error C SPICE(INDEXOUTOFRANGE) is signaled. C C 4) If the input key is not found in the tree, the error C SPICE(ITEMNOTFOUND) is signaled. This error most likely C indicates the presence of a serious bug in the EK software, C or that the input EK file has been corrupted. C C$ Files C C See the EK Required Reading for a discussion of the EK file C format. C C$ Particulars C C This routine supports tree operations that involve identifying C the parent node of a specified node. In particular, this C routine supports updating ancestors of a node when an insertion C or deletion occurs. C C$ Examples C C See ZZEKTRUD, ZZEKTRUI. C C$ Restrictions C C None. C C$ Literature_References C C None. C C$ Author_and_Institution C C N.J. Bachman (JPL) C C$ Version C C- SPICELIB Version 1.1.0, 09-FEB-2015 (NJB) C C Now uses ERRHAN to insert DAS file name into C long error messages. C C C- Beta Version 1.0.0, 23-OCT-1995 (NJB) C C-& C C SPICELIB functions C INTEGER LSTLEI C C Local variables C INTEGER CHILD INTEGER LKEY INTEGER MAXKEY INTEGER NEWKEY INTEGER OFFSET INTEGER PAGE ( PGSIZI ) INTEGER PREV INTEGER PRVKEY INTEGER TOTKEY LOGICAL FOUND C C Use discovery check-in in this puppy. C C Nothing found to begin with. C FOUND = .FALSE. C C Get a local copy of the input key. We may overwrite the input C key when we set PKEY. C LKEY = KEY C C Start out by reading in the root page. The node level starts C out at 1. C CALL ZZEKPGRI ( HANDLE, TREE, PAGE ) PARENT = 0 PKEY = 0 POFFST = 0 LPIDX = 0 LPKEY = 0 LSIB = 0 RPIDX = 0 RPKEY = 0 RSIB = 0 C C Find out how many keys are in the tree. If LKEY is outside C this range, we won't find it. C TOTKEY = PAGE ( TRNKEY ) IF ( ( LKEY .LT. 1 ) .OR. ( LKEY .GT. TOTKEY ) ) THEN CALL CHKIN ( 'ZZEKTRPI' ) CALL SETMSG ( 'Key = #; valid range = 1:#. Tree = #, file = #') CALL ERRINT ( '#', LKEY ) CALL ERRINT ( '#', TOTKEY ) CALL ERRINT ( '#', TREE ) CALL ERRHAN ( '#', HANDLE ) CALL SIGERR ( 'SPICE(INDEXOUTOFRANGE)' ) CALL CHKOUT ( 'ZZEKTRPI' ) RETURN END IF C C Find the last key at this level that is less than or equal to C the requested key. C PREV = LSTLEI ( LKEY, PAGE(TRNKR), PAGE(TRKEYR+1) ) IF ( PREV .GT. 0 ) THEN PRVKEY = PAGE(TRKEYR+PREV) ELSE PRVKEY = 0 END IF C C If we were lucky enough to get an exact match, we can quit now. C The root has no parent so the output values remain set to zero. C IF ( PRVKEY .EQ. LKEY ) THEN RETURN END IF C C Still here? Traverse the pointer path until we find the key C or run out of progeny. C OFFSET = PRVKEY PARENT = TREE PKEY = PAGE ( TRKEYR + 1 ) MAXKEY = PAGE ( TRNKR ) IF ( PREV .GT. 0 ) THEN LPIDX = PREV LPKEY = PAGE ( TRKEYR + LPIDX ) LSIB = PAGE ( TRKIDR + LPIDX ) ELSE LPIDX = 0 LPKEY = 0 LSIB = 0 END IF IF ( PREV .LT. MAXKEY ) THEN RPIDX = PREV + 1 RPKEY = PAGE ( TRKEYR + RPIDX ) RSIB = PAGE ( TRKIDR + RPIDX + 1 ) ELSE RPIDX = 0 RPKEY = 0 RSIB = 0 END IF CHILD = PAGE ( TRKIDR + PREV + 1 ) FOUND = .FALSE. DO WHILE ( ( CHILD .GT. 0 ) .AND. ( .NOT. FOUND ) ) C C Read in the child page. C CALL ZZEKPGRI ( HANDLE, CHILD, PAGE ) C C Find the last key at this level that is less than or equal to C the requested key. Since the keys we're looking at now are C ordinal positions relative to the subtree whose root is the C current node, we must subtract from LKEY the position of the C node preceding the first key of this subtree. C NEWKEY = LKEY - OFFSET PREV = LSTLEI ( NEWKEY, PAGE(TRNKC), PAGE(TRKEYC+1) ) IF ( PREV .GT. 0 ) THEN PRVKEY = PAGE(TRKEYC+PREV) ELSE PRVKEY = 0 END IF C C If we were lucky enough to get an exact match, we can quit. C The outputs are set. C IF ( PRVKEY .EQ. NEWKEY ) THEN FOUND = .TRUE. ELSE C C Record information from the current node before we read the C next child page. C PARENT = CHILD POFFST = OFFSET PKEY = PAGE ( TRKEYC + 1 ) + OFFSET MAXKEY = PAGE ( TRNKC ) IF ( PREV .GT. 0 ) THEN LPIDX = PREV LPKEY = PAGE ( TRKEYC + LPIDX ) LSIB = PAGE ( TRKIDC + LPIDX ) ELSE LPIDX = 0 LPKEY = 0 LSIB = 0 END IF IF ( PREV .LT. MAXKEY ) THEN RPIDX = PREV + 1 RPKEY = PAGE ( TRKEYC + RPIDX ) RSIB = PAGE ( TRKIDC + RPIDX + 1 ) ELSE RPIDX = 0 RPKEY = 0 RSIB = 0 END IF C C Update the offset of the tree headed by CHILD, and set C the new child node. C OFFSET = PRVKEY + OFFSET CHILD = PAGE ( TRKIDC + PREV + 1 ) END IF END DO C C If we found the key, our outputs are already set. If not, we've C got trouble. C IF ( .NOT. FOUND ) THEN CALL CHKIN ( 'ZZEKTRPI' ) CALL SETMSG ( 'Key #; valid range = 1:#. Tree = #, file = #.'// . ' Key was not found. This probably indicates'// . ' a corrupted file or a bug in the EK code.' ) CALL ERRINT ( '#', LKEY ) CALL ERRINT ( '#', TOTKEY ) CALL ERRINT ( '#', TREE ) CALL ERRHAN ( '#', HANDLE ) CALL SIGERR ( 'SPICE(ITEMNOTFOUND)' ) CALL CHKOUT ( 'ZZEKTRPI' ) RETURN END IF RETURN END
Formal statement is: lemma setdist_ltE: assumes "setdist S T < b" "S \<noteq> {}" "T \<noteq> {}" obtains x y where "x \<in> S" "y \<in> T" "dist x y < b" Informal statement is: If the distance between two nonempty sets is less than $b$, then there exist points $x \in S$ and $y \in T$ such that the distance between $x$ and $y$ is less than $b$.
/* integration/jacobi.c * * Copyright (C) 2017 Konrad Griessinger, Patrick Alken * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* * The code in this module is based on IQPACK, specifically the LGPL * implementation found in HERMITE_RULE: * https://people.sc.fsu.edu/~jburkardt/c_src/hermite_rule/hermite_rule.html */ #include <stdio.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_math.h> #include <gsl/gsl_integration.h> #include <gsl/gsl_sf_gamma.h> static int jacobi_check(const size_t n, const gsl_integration_fixed_params * params) { (void) n; if (fabs(params->b - params->a) <= GSL_DBL_EPSILON) { GSL_ERROR("|b - a| too small", GSL_EDOM); } else if (params->a >= params->b) { GSL_ERROR("lower integration limit must be smaller than upper limit", GSL_EDOM); } else if (params->alpha <= -1.0 || params->beta <= -1.0) { GSL_ERROR("alpha and beta must be > -1", GSL_EDOM); } else { return GSL_SUCCESS; } } static int jacobi_init(const size_t n, double * diag, double * subdiag, gsl_integration_fixed_params * params) { const double absum = params->beta + params->alpha; const double abdiff = params->beta - params->alpha; const double a2b2 = absum * abdiff; /* beta^2 - alpha^2 */ size_t i; /* construct the diagonal and subdiagonal elements of Jacobi matrix */ diag[0] = abdiff/(absum + 2.0); subdiag[0] = 2.0*sqrt((params->alpha + 1.0)*(params->beta + 1.0)/(absum + 3.0))/(absum + 2.0); for (i = 1; i < n; i++) { diag[i] = a2b2 / ( (absum + 2.0*i) * (absum + 2.0*i + 2.0) ); subdiag[i] = sqrt ( 4.0*(i + 1.0) * (params->alpha + i + 1.0) * (params->beta + i + 1.0) * (absum + i + 1.0) / ( pow((absum + 2.0*i + 2.0), 2.0) - 1.0 ) ) / ( absum + 2.0*i + 2.0 ); } params->zemu = pow(2.0, absum + 1.0) * gsl_sf_gamma(params->alpha + 1.0) * gsl_sf_gamma(params->beta + 1.0) / gsl_sf_gamma(absum + 2.0); params->shft = 0.5*(params->b + params->a); params->slp = 0.5*(params->b - params->a); params->al = params->alpha; params->be = params->beta; return GSL_SUCCESS; } static const gsl_integration_fixed_type jacobi_type = { jacobi_check, jacobi_init }; const gsl_integration_fixed_type *gsl_integration_fixed_jacobi = &jacobi_type;
<center> <h4>Universidad Nacional de Córdoba - Facultad de Matemática, Astronomía, Física y Computación</h4> <h3>Diplomatura en Ciencia de Datos, Aprendizaje Automático y sus Aplicaciones</h3> </center> <h1> Regresión Linear - Ejemplo </h1> <h3> Análisis y Visualización de Datos - 2019 </h3> En este ejemplo veremos cómo implementar una regresión logística para predecir una variable numérica. Volveremos a utilizar el dataset [Human Freedom Index 2018](https://www.cato.org/human-freedom-index-new) de el instituto Cato. Usaremos la misma versión del conjunto de datos que en el práctico. En esta notebook vamos a tratar de estimar una función lineal que modele el cambio a través del tiempo de la libertad personal y la económica. ```python %matplotlib inline import matplotlib.pyplot as plt import numpy import pandas import seaborn ``` /usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead. import pandas.util.testing as tm ```python seaborn.set_context(context='talk', font_scale=1.2) ``` ```python BLUE = '#35A7FF' RED = '#FF5964' GREEN = '#6BF178' YELLOW = '#FFE74C' ``` ```python dataset = pandas.read_csv( 'https://object.cato.org/sites/cato.org/files/human-freedom-index-files/human-freedom-index-2019.csv') dataset.shape ``` (1620, 120) ```python score_cols = [col for col in dataset.columns if 'pf_identity' in col] + [ 'pf_score', # Personal Freedom (score) 'pf_rank', # Personal Freedom (rank) 'ef_score', # Economic Freedom (score) 'ef_rank', # Economic Freedom (rank) 'hf_score', # Human Freedom (score) 'hf_rank', # Human Freedom (rank) ] important_cols = ['year', 'ISO_code', 'countries', 'region'] + score_cols ``` ```python dataset = dataset[important_cols].replace('-', numpy.nan) for score_col in score_cols: dataset[score_col] = pandas.to_numeric(dataset[score_col]) dataset ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>year</th> <th>ISO_code</th> <th>countries</th> <th>region</th> <th>pf_identity_legal</th> <th>pf_identity_sex_male</th> <th>pf_identity_sex_female</th> <th>pf_identity_sex</th> <th>pf_identity_divorce</th> <th>pf_identity</th> <th>pf_score</th> <th>pf_rank</th> <th>ef_score</th> <th>ef_rank</th> <th>hf_score</th> <th>hf_rank</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>2017</td> <td>ALB</td> <td>Albania</td> <td>Eastern Europe</td> <td>0.0</td> <td>10.0</td> <td>10.0</td> <td>10.0</td> <td>7.5</td> <td>5.8</td> <td>8.01</td> <td>46.0</td> <td>7.67</td> <td>30.0</td> <td>7.84</td> <td>38.0</td> </tr> <tr> <th>1</th> <td>2017</td> <td>DZA</td> <td>Algeria</td> <td>Middle East &amp; North Africa</td> <td>NaN</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>5.20</td> <td>146.0</td> <td>4.77</td> <td>159.0</td> <td>4.99</td> <td>155.0</td> </tr> <tr> <th>2</th> <td>2017</td> <td>AGO</td> <td>Angola</td> <td>Sub-Saharan Africa</td> <td>10.0</td> <td>0.0</td> <td>0.0</td> <td>0.0</td> <td>5.0</td> <td>5.0</td> <td>5.98</td> <td>121.0</td> <td>4.83</td> <td>158.0</td> <td>5.40</td> <td>151.0</td> </tr> <tr> <th>3</th> <td>2017</td> <td>ARG</td> <td>Argentina</td> <td>Latin America &amp; the Caribbean</td> <td>10.0</td> <td>10.0</td> <td>10.0</td> <td>10.0</td> <td>10.0</td> <td>10.0</td> <td>8.04</td> <td>41.0</td> <td>5.67</td> <td>147.0</td> <td>6.86</td> <td>77.0</td> </tr> <tr> <th>4</th> <td>2017</td> <td>ARM</td> <td>Armenia</td> <td>Caucasus &amp; Central Asia</td> <td>7.0</td> <td>10.0</td> <td>10.0</td> <td>10.0</td> <td>7.5</td> <td>8.2</td> <td>7.15</td> <td>72.0</td> <td>7.70</td> <td>27.0</td> <td>7.42</td> <td>54.0</td> </tr> <tr> <th>...</th> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> </tr> <tr> <th>1615</th> <td>2008</td> <td>AUS</td> <td>Australia</td> <td>Oceania</td> <td>NaN</td> <td>10.0</td> <td>10.0</td> <td>10.0</td> <td>10.0</td> <td>10.0</td> <td>9.29</td> <td>7.0</td> <td>8.18</td> <td>6.0</td> <td>8.73</td> <td>4.0</td> </tr> <tr> <th>1616</th> <td>2008</td> <td>DNK</td> <td>Denmark</td> <td>Western Europe</td> <td>NaN</td> <td>10.0</td> <td>10.0</td> <td>10.0</td> <td>10.0</td> <td>10.0</td> <td>9.49</td> <td>3.0</td> <td>7.98</td> <td>9.0</td> <td>8.73</td> <td>4.0</td> </tr> <tr> <th>1617</th> <td>2008</td> <td>CHE</td> <td>Switzerland</td> <td>Western Europe</td> <td>NaN</td> <td>10.0</td> <td>10.0</td> <td>10.0</td> <td>10.0</td> <td>10.0</td> <td>9.31</td> <td>6.0</td> <td>8.35</td> <td>4.0</td> <td>8.83</td> <td>3.0</td> </tr> <tr> <th>1618</th> <td>2008</td> <td>NZL</td> <td>New Zealand</td> <td>Oceania</td> <td>NaN</td> <td>10.0</td> <td>10.0</td> <td>10.0</td> <td>10.0</td> <td>10.0</td> <td>9.42</td> <td>4.0</td> <td>8.46</td> <td>3.0</td> <td>8.94</td> <td>2.0</td> </tr> <tr> <th>1619</th> <td>2008</td> <td>HKG</td> <td>Hong Kong</td> <td>East Asia</td> <td>NaN</td> <td>10.0</td> <td>10.0</td> <td>10.0</td> <td>10.0</td> <td>10.0</td> <td>9.13</td> <td>12.0</td> <td>9.11</td> <td>1.0</td> <td>9.12</td> <td>1.0</td> </tr> </tbody> </table> <p>1620 rows × 16 columns</p> </div> En el práctico habíamos trabajado sobre las variables `ef_score` y `pf_score`, que hacen referencia a los índices de libertad personal y libertad económica de cada páis. Además, sabemos que el dataset incluye una medición del índice anual por país desde 2008 hasta 2016, aunque hay datos faltantes de algunos indicadores. La motivación de este análisis comienza con este gráfico, que muestra una tendencia decreciente de la libertad personal y una tendencia ascendiente de la libertad económica. La libertad humana o `pf_score` es el promedio de ambos indicadores ```python plt.figure(figsize=(10,6)) seaborn.lineplot(data=dataset, x='year', y='ef_score', label='Economic Freedom') seaborn.lineplot(data=dataset, x='year', y='pf_score', label='Personal Freedom') seaborn.lineplot(data=dataset, x='year', y='hf_score', label='Human Freedom') plt.legend() plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.ylabel('Index score') seaborn.despine() ``` Este fenómeno podría estar dado por varios factores: * Hay pocos países en los que la libertad personal está decreciendo, pero su libertad económica se mantiene constante. * Los países para los cuales sube la libertad económica decrecen en libertad personal. * **¿Otras?** Veamos qué sucede en Argentina. Si graficamos ambas variables, vemos que "van bajando". Formalmente, esto significa que hay la recta que las modela tiene una pendiente negativa. **¿Y esto, es grave?** ```python plt.figure(figsize=(15, 8)) seaborn.regplot(data=dataset[(dataset.ISO_code == 'ARG')], x='year', y='ef_score', label='Economic Freedom') seaborn.regplot(data=dataset[(dataset.ISO_code == 'ARG')], x='year', y='pf_score', label='Personal Freedom') seaborn.regplot(data=dataset[(dataset.ISO_code == 'ARG')], x='year', y='hf_score', label='Human Freedom') plt.legend() plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) seaborn.despine() plt.xlim(2008, 2016) ``` Podemos graficar varios países, pero es difícil comparar visualmente entre tantas variables, qué países "decrecen" más rápido que otros. ```python countries = ['Argentina', 'Brazil', 'Mexico', 'Bolivia', 'Uruguay', 'Peru', 'Colombia', 'Venezuela'] g = seaborn.FacetGrid(dataset, col="countries", margin_titles=True, aspect=2, col_wrap=2, col_order=countries) g.map(seaborn.lineplot, "year", "ef_score", color=BLUE) g.map(seaborn.lineplot, "year", "pf_score", color=RED) g.set(xlim=(2008, 2016)) print('Puntajes de libertad para América Latina'); ``` Para poder comparar la situación de Argentina con otros países, podemos comparar la pendiente de la recta de la regresión lineal. A partir del gráfico anterior pudimos ver que la mayoría de los países tiene tendencias similares y que se pueden estimar con una recta sin perder generalidad. Esto es posible también, en cierta medida, porque tenemos pocos puntos para estimar. ## Regresión lineal Queremos ver cuál es el coeficiente que relaciona ambas variables. Primero: **¿Cuál es la variable dependiente? ¿Cuál la independiente?** Una vez que hemos determinado eso, lo que queremos encontrar es la función de la siguiente forma: $$ef = a * year + b$$ Reescribiremos esto como una función $e$ (por economic), cuyo parámetro es el valor $y$ (por year): $$e(y) = a * y + b$$ Vamos a describir los ejemplos como pares $(x_y, x_e)$, donde $x_y$ denota el `year` y $x_e$ denota `ef_score`. Para encontrar la recta $e$ que mejor describe los datos, queremos minimizar el error cuadrático medio, definido como: $$mse = \frac{1}{|X|} \sum_{x \in X} (e(x_y) - x_e)^2 $$ Recordemos que para minimizar una función, una buena opción es comenzar por buscar los puntos estacionarios, donde la derivada se anula. Por suerte, la función $mse$ es convexa, y por lo tanto tiene todos sus puntos estacionarios son minimizadores. El minimizador es el valor de los parámetros $a$ y $b$ que minimizan la función. Ahora, en hemos cambiado nuestras "variables", lo que buscamos es encontrar la función adecuada, por lo tanto lo que cambia son los valores de los parámetros que definen la función. Primero, notemos que: $$\frac{\partial}{\partial a}e(y) = x_p$$ $$\frac{\partial}{\partial b}e(y) = 1$$ Con eso, calculamos las derivadas parciales para cada parámetro de la función $mse$. $$\frac{\partial}{\partial a}mse = \frac{2}{|X|} \sum_{x \in X} (e(x_p) - x_e) \frac{\partial}{\partial a} (e(x_p) - x_e) = \frac{2}{|X|} \sum_{x \in X} (e(x_p) - x_e) e_p $$ $$\frac{\partial}{\partial b}mse = \frac{2}{|X|} \sum_{x \in X} \frac{\partial}{\partial b} e(x_p) - x_e = \frac{2}{|X|} \sum_{x \in X} e(x_p) - x_e $$ A pesar del formulerío, es bastante simple. Sólo reemplazamos $mse$ por su definición, y luego aplicamos un par de reglas como "la derivada de la suma es la suma de las derivadas", la regla de la cadena, o la definición de la derivada de la función cuadrática. Una vez que tenemos esos valores, tenemos que igualarlos a cero para encontrar los puntos estacionarios. \begin{align} \frac{\partial}{\partial a}mse &= \frac{2}{|X|} \sum_{x \in X} (e(x_y) - x_e) x_y = 0 \\ &\Rightarrow a = \frac{\bar{x_y} \bar{x_e} - \overline{x_yx_e}}{(\bar{x_y})^2 - \overline{x_y^2}} \end{align} \begin{align} \frac{\partial}{\partial b}mse &= \frac{2}{|X|} \sum_{x \in X} e(x_y) - x_e = 0 \\ &\Rightarrow b = \bar{x_e} - a \bar{x_y} \end{align} Donde $\bar{x}$ es la media del valor para todos los ejemplos. Vamos a confiar en estas fórmulas, pero una demostración de las mismas está en: https://medium.freecodecamp.org/machine-learning-mean-squared-error-regression-line-c7dde9a26b93 ```python def estimate_params(X_y, X_e): """Caculates the value of a using all the examples.""" num = numpy.mean(X_y)*numpy.mean(X_e) - numpy.mean(numpy.multiply(X_y, X_e)) denom = numpy.mean(X_y)**2 - numpy.mean(numpy.multiply(X_y, X_y)) a = num / denom b = numpy.mean(X_e) - a * numpy.mean(X_y) return a, b ``` ```python # Asumimos que todos los registros que tienen pf_score tienen el año. a, b = estimate_params( dataset[(dataset.ISO_code == 'ARG') & (dataset.pf_score.notnull())].year.dropna(), dataset[dataset.ISO_code == 'ARG'].pf_score) a, b ``` (-0.02048484848483261, 49.32575757572563) ```python def base_linear_regression(x_y, a): return a * x_y ``` ```python def regplot2(data, x, y, reg_func, **reg_func_args): """Plots the x, y columns from data and builds a line with the regression reg_func.""" seaborn.scatterplot(data=data, x=x, y=y, color=BLUE) minimum = data[x].min() maximum = data[x].max() plt.plot([minimum, maximum], [reg_func(minimum, **reg_func_args), reg_func(maximum, **reg_func_args)], color=GREEN) seaborn.despine() plt.show() ``` ```python regplot2(dataset[dataset.ISO_code == 'ARG'], x='year', y='pf_score', reg_func=base_linear_regression, a=a) ``` Vemos que la recta va en el sentido correcto, pero está demasiado abajo. Esto ocurre porque no hemos usado el término de bias. Redefinamos entonces la regresión logística ```python def linear_regression(x_y, a, b): return a * x_y + b ``` ```python regplot2(dataset[dataset.ISO_code == 'ARG'], x='year', y='pf_score', reg_func=linear_regression, a=a, b=b) ``` ## Continuamos el análisis Perfecto! Ahora podemos calcular las pendientes y los biases para todos los años, para regresiones que estimen el `pf_score`. ```python def build_regressions(data, x_var='year', y_var='pf_score'): records = [] for code in data.ISO_code.unique(): record = [code, data[data.ISO_code == code].region.values[0], data[data.ISO_code == code].countries.values[0]] y_data = data[data.ISO_code == code][y_var].dropna() # Comprobamos que hay datos en el intervalo if len(y_data) <= 1: continue x_data = data[(data.ISO_code == code) & (data[y_var].notnull())][x_var].dropna() # Estimamos los parámetros a, b = estimate_params(x_data, y_data) # Calculamos el error cuadrático medio de la regresión lineal estimada predictions = numpy.apply_along_axis( lambda x: linear_regression(x, a, b), 0, x_data) mse = numpy.mean(numpy.power(predictions - y_data, 2)) record.extend([a, b, mse]) # Agregamos el registro records.append(record) return pandas.DataFrame.from_records( records, columns=['ISO_code', 'region', 'country', 'slope', 'bias', 'mse'] ) ``` ```python pf_regressions = build_regressions(dataset).set_index('ISO_code') pf_regressions[:10] ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>region</th> <th>country</th> <th>slope</th> <th>bias</th> <th>mse</th> </tr> <tr> <th>ISO_code</th> <th></th> <th></th> <th></th> <th></th> <th></th> </tr> </thead> <tbody> <tr> <th>ALB</th> <td>Eastern Europe</td> <td>Albania</td> <td>-0.036909</td> <td>82.084545</td> <td>0.044086</td> </tr> <tr> <th>DZA</th> <td>Middle East &amp; North Africa</td> <td>Algeria</td> <td>-0.006121</td> <td>17.587939</td> <td>0.009140</td> </tr> <tr> <th>AGO</th> <td>Sub-Saharan Africa</td> <td>Angola</td> <td>0.087636</td> <td>-170.678182</td> <td>0.039059</td> </tr> <tr> <th>ARG</th> <td>Latin America &amp; the Caribbean</td> <td>Argentina</td> <td>-0.020485</td> <td>49.325758</td> <td>0.003178</td> </tr> <tr> <th>ARM</th> <td>Caucasus &amp; Central Asia</td> <td>Armenia</td> <td>-0.016667</td> <td>40.756667</td> <td>0.008773</td> </tr> <tr> <th>AUS</th> <td>Oceania</td> <td>Australia</td> <td>-0.011576</td> <td>32.513212</td> <td>0.002516</td> </tr> <tr> <th>AUT</th> <td>Western Europe</td> <td>Austria</td> <td>0.037394</td> <td>-66.048303</td> <td>0.007005</td> </tr> <tr> <th>AZE</th> <td>Caucasus &amp; Central Asia</td> <td>Azerbaijan</td> <td>-0.105758</td> <td>218.958121</td> <td>0.060136</td> </tr> <tr> <th>BHS</th> <td>Latin America &amp; the Caribbean</td> <td>Bahamas</td> <td>-0.032909</td> <td>74.170545</td> <td>0.009934</td> </tr> <tr> <th>BHR</th> <td>Middle East &amp; North Africa</td> <td>Bahrain</td> <td>-0.089515</td> <td>186.388242</td> <td>0.048762</td> </tr> </tbody> </table> </div> ```python plt.figure(figsize=(10,6)) seaborn.distplot( pf_regressions[pf_regressions.region == 'Latin America & the Caribbean'].slope, color=BLUE, label='Latam y Caribe') seaborn.distplot(pf_regressions.slope, color=RED, label='Global') plt.xlabel('Slope of linear regression that fits the pf_score of each country') plt.legend() seaborn.despine() ``` ```python def plot_regressions(regressions): plt.figure(figsize=(10,6)) colors = seaborn.color_palette("cubehelix", len(regressions)) for color, (year, row) in zip(colors, regressions.iterrows()): minimum, maximum = 2008, 2016 plt.plot([minimum, maximum], [linear_regression(minimum, row['slope'], row['bias']), linear_regression(maximum, row['slope'], row['bias'])], color=color, label=str(year), linestyle='--') plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) seaborn.despine() ``` ```python plot_regressions(pf_regressions.loc[['ARG', 'BRA', 'VEN', 'CAN']]) plt.xlabel('year') plt.ylabel('pf_score') plt.ylim(4, 10) ``` ### Libertad Económica ```python ef_regressions = build_regressions(dataset, y_var='ef_score').set_index('ISO_code') ef_regressions[:10] ``` <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>region</th> <th>country</th> <th>slope</th> <th>bias</th> <th>mse</th> </tr> <tr> <th>ISO_code</th> <th></th> <th></th> <th></th> <th></th> <th></th> </tr> </thead> <tbody> <tr> <th>ALB</th> <td>Eastern Europe</td> <td>Albania</td> <td>0.047333</td> <td>-87.801333</td> <td>0.003397</td> </tr> <tr> <th>DZA</th> <td>Middle East &amp; North Africa</td> <td>Algeria</td> <td>-0.042606</td> <td>90.805697</td> <td>0.009553</td> </tr> <tr> <th>AGO</th> <td>Sub-Saharan Africa</td> <td>Angola</td> <td>0.015394</td> <td>-25.786303</td> <td>0.084069</td> </tr> <tr> <th>ARG</th> <td>Latin America &amp; the Caribbean</td> <td>Argentina</td> <td>-0.083273</td> <td>173.018364</td> <td>0.132188</td> </tr> <tr> <th>ARM</th> <td>Caucasus &amp; Central Asia</td> <td>Armenia</td> <td>0.013333</td> <td>-19.165333</td> <td>0.003609</td> </tr> <tr> <th>AUS</th> <td>Oceania</td> <td>Australia</td> <td>-0.010909</td> <td>30.026545</td> <td>0.002294</td> </tr> <tr> <th>AUT</th> <td>Western Europe</td> <td>Austria</td> <td>-0.008485</td> <td>24.819758</td> <td>0.000690</td> </tr> <tr> <th>AZE</th> <td>Caucasus &amp; Central Asia</td> <td>Azerbaijan</td> <td>0.042242</td> <td>-78.787879</td> <td>0.007204</td> </tr> <tr> <th>BHS</th> <td>Latin America &amp; the Caribbean</td> <td>Bahamas</td> <td>-0.026667</td> <td>61.038667</td> <td>0.003569</td> </tr> <tr> <th>BHR</th> <td>Middle East &amp; North Africa</td> <td>Bahrain</td> <td>-0.010364</td> <td>28.097818</td> <td>0.009243</td> </tr> </tbody> </table> </div> ```python plot_regressions(ef_regressions.loc[['ARG', 'BRA', 'VEN', 'CAN']]) plt.xlabel('year') plt.ylabel('ef_score') ``` ```python plt.figure(figsize=(10,6)) seaborn.distplot( ef_regressions[ef_regressions.region == 'Latin America & the Caribbean'].slope, color=BLUE, label='Latam y Caribe') seaborn.distplot(ef_regressions.slope, color=RED, label='Global') plt.xlabel('Slope of linear regression that fits the ef_score of each country') plt.legend() seaborn.despine() ``` ## Análisis conjunto **¿Cuáles es el 10% de países en los que la libertad humana disminuye más rápidamente?** ```python quantil = pf_regressions.slope.quantile(0.1) pf_regressions[pf_regressions.slope < quantil].country ``` ISO_code BTN Bhutan BRA Brazil BRN Brunei Darussalam BDI Burundi EGY Egypt GMB Gambia, The MUS Mauritius NPL Nepal NER Niger RWA Rwanda SYR Syria TJK Tajikistan THA Thailand TUR Turkey UKR Ukraine VEN Venezuela YEM Yemen, Rep. Name: country, dtype: object **¿Cuáles es el 10% de países en los que la libertad económica disminuye más rápidamente?** ```python quantil = ef_regressions.slope.quantile(0.1) ef_regressions[ef_regressions.slope < quantil].country ``` ISO_code ARG Argentina BRA Brazil BRN Brunei Darussalam EGY Egypt FJI Fiji GHA Ghana IRQ Iraq KWT Kuwait LBR Liberia LBY Libya PNG Pap. New Guinea SLE Sierra Leone SDN Sudan SYR Syria TUN Tunisia VEN Venezuela ZMB Zambia Name: country, dtype: object **¿Cuáles son los paises en los que la libertad económica aumenta pero la libertad personal disminuye (rápidamente)?** ```python all_countries = dataset.ISO_code.unique() codes = [] for code in all_countries: if (code in ef_regressions.index and code in pf_regressions.index and ef_regressions.loc[code].slope > 0.02 and pf_regressions.loc[code].slope < -0.02): codes.append(code) ef_regressions.loc[codes].country ``` ISO_code ALB Albania AZE Azerbaijan BGR Bulgaria BDI Burundi KHM Cambodia CPV Cape Verde CHN China GMB Gambia, The GTM Guatemala GIN Guinea ISL Iceland IDN Indonesia KAZ Kazakhstan LAO Laos MYS Malaysia MLT Malta MEX Mexico MAR Morocco MMR Myanmar NPL Nepal NER Niger PRY Paraguay PHL Philippines RUS Russia RWA Rwanda ESP Spain TZA Tanzania VNM Vietnam Name: country, dtype: object # Errores Calculamos el mse pero nunca lo usamos. Veamos cómo son los países para los que la regresión linear no produce una buena aproximación ```python pf_regressions.mse.sort_values()[-10:] ``` ISO_code TLS 0.106220 SYC 0.107328 TUR 0.126490 CAF 0.142989 VEN 0.145224 COD 0.165123 LBY 0.182368 GNB 0.194737 YEM 0.195064 BDI 0.206774 Name: mse, dtype: float64 ```python plt.figure(figsize=(10,6)) countries = ['BDI', 'YEM', 'GNB', 'LBY'] seaborn.lineplot(data=dataset[dataset.ISO_code.isin(countries)], x='year', y='hf_score', hue='countries') plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) seaborn.despine() ``` Claramente se ve que estas funciones no podían ser estimadas satisfactoriamente con una recta, pero aún así, la tendencia general (descendiente o ascendiente) habría sido aproximada ```python ```
// Copyright (C) 2011-2012 The Trustees of Indiana University. // Use, modification and distribution is subject to the Boost Software // License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // Authors: Jeremiah Willcock // Andrew Lumsdaine #ifndef BOOST_NWAY_BERNOULLI_DISTRIBUTION_HPP #define BOOST_NWAY_BERNOULLI_DISTRIBUTION_HPP #include <boost/random/uniform_01.hpp> #include <limits> namespace boost { namespace graph { namespace random { template <int NCases, typename RAIter> struct find_bucket_impl { typedef typename std::iterator_traits<RAIter>::value_type value_type; static int go(value_type x, RAIter start) { RAIter middle = start + NCases / 2; if (x < *middle) { return find_bucket_impl<NCases / 2, RAIter>::go(x, start); } else { return NCases / 2 + find_bucket_impl<NCases - NCases / 2, RAIter>::go(x, middle); } } }; template <typename RAIter> struct find_bucket_impl<1, RAIter> { typedef typename std::iterator_traits<RAIter>::value_type value_type; static int go(value_type /* x */, RAIter /* start */) { return 0; } }; template <typename RAIter> struct find_bucket_impl<0, RAIter> {}; // Find the last bucket whose value is <= x template <int NCases, typename RAIter> int find_bucket(typename std::iterator_traits<RAIter>::value_type x, RAIter start) { return find_bucket_impl<NCases, RAIter>::go(x, start); } template <int NCases, typename RealType = double, typename RNGResult = RealType> class nway_bernoulli_distribution { RNGResult ranges[NCases + 1]; public: nway_bernoulli_distribution() {} // Range of generator must be [min_, max_) template <typename RandomAccessIterator> explicit nway_bernoulli_distribution(RandomAccessIterator it, RNGResult min_, RNGResult max_) { set_probabilities(it, min_, max_); } template <typename RandomAccessIterator> void set_probabilities(RandomAccessIterator it, RNGResult min_, RNGResult max_) { RealType total = 0; for (size_t i = 0; i < NCases; ++i) { ranges[i] = RNGResult(min_ + (max_ - min_) * total); total += *it++; } ranges[NCases] = max_; } int compute_from_random_number(RNGResult x) const { return find_bucket<NCases>(x, ranges); } template <typename Gen> int operator()(Gen& gen) const { return compute_from_random_number(gen()); } }; template <int NCases, typename RealType, typename RandomAccessIterator, typename Gen> nway_bernoulli_distribution<NCases, RealType, typename Gen::result_type> make_nway_bernoulli_distribution(RandomAccessIterator it, const Gen& gen) { return nway_bernoulli_distribution<NCases, RealType, typename Gen::result_type> (it, (gen.min)(), (gen.max)() + std::numeric_limits<typename Gen::result_type>::is_integer); } } } } // end namespace boost::graph::random #endif // BOOST_NWAY_BERNOULLI_DISTRIBUTION_HPP
lemma tendsto_dist [tendsto_intros]: fixes l m :: "'a::metric_space" assumes f: "(f \<longlongrightarrow> l) F" and g: "(g \<longlongrightarrow> m) F" shows "((\<lambda>x. dist (f x) (g x)) \<longlongrightarrow> dist l m) F"
The " Summaries of Work done at Little Rock Arsenal , C.S.A. " continue at about the same pace and scale from August 1862 until August 1863 . <unk> to the " Summary " for August , 1863 is the ominous notation , " During the last week in the month , nearly all stores at the Arsenal have been packed and sent to Arkadelphia , in obedience to orders from Chief of Ordnance , District of Arkansas . " This then marks the beginning of the evacuation of ordnance activities from Little Rock , with the city being surrendered to the advancing Federal troops of Frederick Steele 's Arkansas Expedition on September 11 , 1863 .
str(Titanic) titanic=data.frame(Titanic) str(titanic) #filter data where survived = Yes #error-->filter(titanic,Survived=="Yes") (df <- titanic[ which(titanic$Survived == "Yes"),]) df (df1 <- titanic[which(titanic$Sex == "Male" | titanic$Survived == "Yes"),]) df1 #https://youtu.be/fbsNhipRK3Y https://youtu.be/1UJ2KUzIOKw #attach attach(titanic) (df <- titanic[which(Survived == "Yes"),]) (df <- titanic[which(Survived == "Yes",c(1,3,5)),]) df View(mtcars) mpg mtcars$mpg attach(mtcars) mpg detach(mtcars) mpg #using subset method for filter data df2 <- subset(titanic,Survived == "Yes" & Sex == "Male") df2 #limit the set of column df3 <- subset(titanic,Survived == "Yes" & Sex == "Male",select=c(1,3,5)) df3 #sample (x <- sample(1:100)) #random (x1 <-1:100) # sequence (x <-sample(50,10)) # replace =F (x <- sample(2,50,replace=T,prob=c(0.8,0.2))) (x <- sample(3,50,replace=T,prob=c(0.5,0.3,0.2))) #rep (y <- rep(1:5)) #times=1 (y <- rep(c("a","d"),4,5)) (y <- rep(c("a","d"),times=4,len=5)) (y <- rep(c("a","d"),times=4,each=5,len=50))
/- Copyright (c) 2019 Kenny Lau. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Kenny Lau -/ import Mathlib.PrePort import Mathlib.Lean3Lib.init.default import Mathlib.data.equiv.basic import Mathlib.control.applicative import Mathlib.control.traversable.basic import Mathlib.PostPort universes u l v u_1 namespace Mathlib /-! # Free constructions ## Main definitions * `free_magma α`: free magma (structure with binary operation without any axioms) over alphabet `α`, defined inductively, with traversable instance and decidable equality. * `magma.free_semigroup α`: free semigroup over magma `α`. * `free_semigroup α`: free semigroup over alphabet `α`, defined as a synonym for `α × list α` (i.e. nonempty lists), with traversable instance and decidable equality. * `free_semigroup_free_magma α`: isomorphism between `magma.free_semigroup (free_magma α)` and `free_semigroup α`. -/ /-- Free magma over a given alphabet. -/ inductive free_magma (α : Type u) where | of : α → free_magma α | mul : free_magma α → free_magma α → free_magma α /-- Free nonabelian additive magma over a given alphabet. -/ inductive free_add_magma (α : Type u) where | of : α → free_add_magma α | add : free_add_magma α → free_add_magma α → free_add_magma α namespace free_magma protected instance Mathlib.free_add_magma.inhabited {α : Type u} [Inhabited α] : Inhabited (free_add_magma α) := { default := free_add_magma.of Inhabited.default } protected instance Mathlib.free_add_magma.has_add {α : Type u} : Add (free_add_magma α) := { add := free_add_magma.add } @[simp] theorem Mathlib.free_add_magma.add_eq {α : Type u} (x : free_add_magma α) (y : free_add_magma α) : free_add_magma.add x y = x + y := rfl /-- Recursor for `free_magma` using `x * y` instead of `free_magma.mul x y`. -/ def Mathlib.free_add_magma.rec_on' {α : Type u} {C : free_add_magma α → Sort l} (x : free_add_magma α) (ih1 : (x : α) → C (free_add_magma.of x)) (ih2 : (x y : free_add_magma α) → C x → C y → C (x + y)) : C x := free_add_magma.rec_on x ih1 ih2 end free_magma /-- Lifts a function `α → β` to a magma homomorphism `free_magma α → β` given a magma `β`. -/ def free_magma.lift {α : Type u} {β : Type v} [Mul β] (f : α → β) : free_magma α → β := sorry /-- Lifts a function `α → β` to an additive magma homomorphism `free_add_magma α → β` given an additive magma `β`. -/ def free_add_magma.lift {α : Type u} {β : Type v} [Add β] (f : α → β) : free_add_magma α → β := sorry namespace free_magma @[simp] theorem Mathlib.free_add_magma.lift_of {α : Type u} {β : Type v} [Add β] (f : α → β) (x : α) : free_add_magma.lift f (free_add_magma.of x) = f x := rfl @[simp] theorem lift_mul {α : Type u} {β : Type v} [Mul β] (f : α → β) (x : free_magma α) (y : free_magma α) : lift f (x * y) = lift f x * lift f y := rfl theorem lift_unique {α : Type u} {β : Type v} [Mul β] (f : free_magma α → β) (hf : ∀ (x y : free_magma α), f (x * y) = f x * f y) : f = lift (f ∘ of) := sorry end free_magma /-- The unique magma homomorphism `free_magma α → free_magma β` that sends each `of x` to `of (f x)`. -/ def free_magma.map {α : Type u} {β : Type v} (f : α → β) : free_magma α → free_magma β := sorry /-- The unique additive magma homomorphism `free_add_magma α → free_add_magma β` that sends each `of x` to `of (f x)`. -/ def free_add_magma.map {α : Type u} {β : Type v} (f : α → β) : free_add_magma α → free_add_magma β := sorry namespace free_magma @[simp] theorem Mathlib.free_add_magma.map_of {α : Type u} {β : Type v} (f : α → β) (x : α) : free_add_magma.map f (free_add_magma.of x) = free_add_magma.of (f x) := rfl @[simp] theorem Mathlib.free_add_magma.map_add {α : Type u} {β : Type v} (f : α → β) (x : free_add_magma α) (y : free_add_magma α) : free_add_magma.map f (x + y) = free_add_magma.map f x + free_add_magma.map f y := rfl protected instance Mathlib.free_add_magma.monad : Monad free_add_magma := sorry /-- Recursor on `free_magma` using `pure` instead of `of`. -/ protected def Mathlib.free_add_magma.rec_on'' {α : Type u} {C : free_add_magma α → Sort l} (x : free_add_magma α) (ih1 : (x : α) → C (pure x)) (ih2 : (x y : free_add_magma α) → C x → C y → C (x + y)) : C x := free_add_magma.rec_on' x ih1 ih2 @[simp] theorem Mathlib.free_add_magma.map_pure {α : Type u} {β : Type u} (f : α → β) (x : α) : f <$> pure x = pure (f x) := rfl @[simp] theorem Mathlib.free_add_magma.map_add' {α : Type u} {β : Type u} (f : α → β) (x : free_add_magma α) (y : free_add_magma α) : f <$> (x + y) = f <$> x + f <$> y := rfl @[simp] theorem Mathlib.free_add_magma.pure_bind {α : Type u} {β : Type u} (f : α → free_add_magma β) (x : α) : pure x >>= f = f x := rfl @[simp] theorem mul_bind {α : Type u} {β : Type u} (f : α → free_magma β) (x : free_magma α) (y : free_magma α) : x * y >>= f = (x >>= f) * (y >>= f) := rfl @[simp] theorem Mathlib.free_add_magma.pure_seq {α : Type u} {β : Type u} {f : α → β} {x : free_add_magma α} : pure f <*> x = f <$> x := rfl @[simp] theorem mul_seq {α : Type u} {β : Type u} {f : free_magma (α → β)} {g : free_magma (α → β)} {x : free_magma α} : f * g <*> x = (f <*> x) * (g <*> x) := rfl protected instance Mathlib.free_add_magma.is_lawful_monad : is_lawful_monad free_add_magma := sorry end free_magma /-- `free_magma` is traversable. -/ protected def free_magma.traverse {m : Type u → Type u} [Applicative m] {α : Type u} {β : Type u} (F : α → m β) : free_magma α → m (free_magma β) := sorry /-- `free_add_magma` is traversable. -/ protected def free_add_magma.traverse {m : Type u → Type u} [Applicative m] {α : Type u} {β : Type u} (F : α → m β) : free_add_magma α → m (free_add_magma β) := sorry namespace free_magma protected instance Mathlib.free_add_magma.traversable : traversable free_add_magma := traversable.mk free_add_magma.traverse @[simp] theorem Mathlib.free_add_magma.traverse_pure {α : Type u} {β : Type u} {m : Type u → Type u} [Applicative m] (F : α → m β) (x : α) : traverse F (pure x) = pure <$> F x := rfl @[simp] theorem Mathlib.free_add_magma.traverse_pure' {α : Type u} {β : Type u} {m : Type u → Type u} [Applicative m] (F : α → m β) : traverse F ∘ pure = fun (x : α) => pure <$> F x := rfl @[simp] theorem Mathlib.free_add_magma.traverse_add {α : Type u} {β : Type u} {m : Type u → Type u} [Applicative m] (F : α → m β) (x : free_add_magma α) (y : free_add_magma α) : traverse F (x + y) = Add.add <$> traverse F x <*> traverse F y := rfl @[simp] theorem Mathlib.free_add_magma.traverse_add' {α : Type u} {β : Type u} {m : Type u → Type u} [Applicative m] (F : α → m β) : function.comp (traverse F) ∘ Add.add = fun (x y : free_add_magma α) => Add.add <$> traverse F x <*> traverse F y := rfl @[simp] theorem Mathlib.free_add_magma.traverse_eq {α : Type u} {β : Type u} {m : Type u → Type u} [Applicative m] (F : α → m β) (x : free_add_magma α) : free_add_magma.traverse F x = traverse F x := rfl @[simp] theorem mul_map_seq {α : Type u} (x : free_magma α) (y : free_magma α) : Mul.mul <$> x <*> y = x * y := rfl protected instance Mathlib.free_add_magma.is_lawful_traversable : is_lawful_traversable free_add_magma := is_lawful_traversable.mk sorry sorry sorry sorry end free_magma /-- Representation of an element of a free magma. -/ protected def free_magma.repr {α : Type u} [has_repr α] : free_magma α → string := sorry /-- Representation of an element of a free additive magma. -/ protected def free_add_magma.repr {α : Type u} [has_repr α] : free_add_magma α → string := sorry protected instance free_add_magma.has_repr {α : Type u} [has_repr α] : has_repr (free_add_magma α) := has_repr.mk free_add_magma.repr /-- Length of an element of a free magma. -/ def free_magma.length {α : Type u} : free_magma α → ℕ := sorry /-- Length of an element of a free additive magma. -/ def free_add_magma.length {α : Type u} : free_add_magma α → ℕ := sorry /-- Associativity relations for a magma. -/ inductive magma.free_semigroup.r (α : Type u) [Mul α] : α → α → Prop where | intro : ∀ (x y z : α), magma.free_semigroup.r α (x * y * z) (x * (y * z)) | left : ∀ (w x y z : α), magma.free_semigroup.r α (w * (x * y * z)) (w * (x * (y * z))) /-- Associativity relations for an additive magma. -/ inductive add_magma.free_add_semigroup.r (α : Type u) [Add α] : α → α → Prop where | intro : ∀ (x y z : α), add_magma.free_add_semigroup.r α (x + y + z) (x + (y + z)) | left : ∀ (w x y z : α), add_magma.free_add_semigroup.r α (w + (x + y + z)) (w + (x + (y + z))) namespace magma /-- Free semigroup over a magma. -/ def free_semigroup (α : Type u) [Mul α] := Quot sorry namespace free_semigroup /-- Embedding from magma to its free semigroup. -/ def Mathlib.add_magma.free_add_semigroup.of {α : Type u} [Add α] : α → add_magma.free_add_semigroup α := Quot.mk (add_magma.free_add_semigroup.r α) protected instance Mathlib.add_magma.free_add_semigroup.inhabited {α : Type u} [Add α] [Inhabited α] : Inhabited (add_magma.free_add_semigroup α) := { default := add_magma.free_add_semigroup.of Inhabited.default } protected theorem Mathlib.add_magma.free_add_semigroup.induction_on {α : Type u} [Add α] {C : add_magma.free_add_semigroup α → Prop} (x : add_magma.free_add_semigroup α) (ih : ∀ (x : α), C (add_magma.free_add_semigroup.of x)) : C x := quot.induction_on x ih theorem of_mul_assoc {α : Type u} [Mul α] (x : α) (y : α) (z : α) : of (x * y * z) = of (x * (y * z)) := quot.sound (r.intro x y z) theorem of_mul_assoc_left {α : Type u} [Mul α] (w : α) (x : α) (y : α) (z : α) : of (w * (x * y * z)) = of (w * (x * (y * z))) := quot.sound (r.left w x y z) theorem of_mul_assoc_right {α : Type u} [Mul α] (w : α) (x : α) (y : α) (z : α) : of (w * x * y * z) = of (w * (x * y) * z) := sorry protected instance semigroup {α : Type u} [Mul α] : semigroup (free_semigroup α) := semigroup.mk (fun (x y : free_semigroup α) => quot.lift_on x (fun (p : α) => quot.lift_on y (fun (q : α) => Quot.mk (r α) (p * q)) sorry) sorry) sorry theorem Mathlib.add_magma.free_add_semigroup.of_add {α : Type u} [Add α] (x : α) (y : α) : add_magma.free_add_semigroup.of (x + y) = add_magma.free_add_semigroup.of x + add_magma.free_add_semigroup.of y := rfl /-- Lifts a magma homomorphism `α → β` to a semigroup homomorphism `magma.free_semigroup α → β` given a semigroup `β`. -/ def lift {α : Type u} [Mul α] {β : Type v} [semigroup β] (f : α → β) (hf : ∀ (x y : α), f (x * y) = f x * f y) : free_semigroup α → β := Quot.lift f sorry @[simp] theorem lift_of {α : Type u} [Mul α] {β : Type v} [semigroup β] (f : α → β) {hf : ∀ (x y : α), f (x * y) = f x * f y} (x : α) : lift f hf (of x) = f x := rfl @[simp] theorem lift_mul {α : Type u} [Mul α] {β : Type v} [semigroup β] (f : α → β) {hf : ∀ (x y : α), f (x * y) = f x * f y} (x : free_semigroup α) (y : free_semigroup α) : lift f hf (x * y) = lift f hf x * lift f hf y := quot.induction_on x fun (p : α) => quot.induction_on y fun (q : α) => hf p q theorem Mathlib.add_magma.free_add_semigroup.lift_unique {α : Type u} [Add α] {β : Type v} [add_semigroup β] (f : add_magma.free_add_semigroup α → β) (hf : ∀ (x y : add_magma.free_add_semigroup α), f (x + y) = f x + f y) : f = add_magma.free_add_semigroup.lift (f ∘ add_magma.free_add_semigroup.of) fun (p q : α) => hf (add_magma.free_add_semigroup.of p) (add_magma.free_add_semigroup.of q) := funext fun (x : add_magma.free_add_semigroup α) => quot.induction_on x fun (p : α) => rfl /-- From a magma homomorphism `α → β` to a semigroup homomorphism `magma.free_semigroup α → magma.free_semigroup β`. -/ def Mathlib.add_magma.free_add_semigroup.map {α : Type u} [Add α] {β : Type v} [Add β] (f : α → β) (hf : ∀ (x y : α), f (x + y) = f x + f y) : add_magma.free_add_semigroup α → add_magma.free_add_semigroup β := add_magma.free_add_semigroup.lift (add_magma.free_add_semigroup.of ∘ f) sorry @[simp] theorem Mathlib.add_magma.free_add_semigroup.map_of {α : Type u} [Add α] {β : Type v} [Add β] (f : α → β) {hf : ∀ (x y : α), f (x + y) = f x + f y} (x : α) : add_magma.free_add_semigroup.map f hf (add_magma.free_add_semigroup.of x) = add_magma.free_add_semigroup.of (f x) := rfl @[simp] theorem map_mul {α : Type u} [Mul α] {β : Type v} [Mul β] (f : α → β) {hf : ∀ (x y : α), f (x * y) = f x * f y} (x : free_semigroup α) (y : free_semigroup α) : map f hf (x * y) = map f hf x * map f hf y := lift_mul (of ∘ f) x y end free_semigroup end magma /-- Free semigroup over a given alphabet. (Note: In this definition, the free semigroup does not contain the empty word.) -/ def free_semigroup (α : Type u) := α × List α namespace free_semigroup protected instance semigroup {α : Type u} : semigroup (free_semigroup α) := semigroup.mk (fun (L1 L2 : free_semigroup α) => (prod.fst L1, prod.snd L1 ++ prod.fst L2 :: prod.snd L2)) sorry /-- The embedding `α → free_semigroup α`. -/ def Mathlib.free_add_semigroup.of {α : Type u} (x : α) : free_add_semigroup α := (x, []) protected instance Mathlib.free_add_semigroup.inhabited {α : Type u} [Inhabited α] : Inhabited (free_add_semigroup α) := { default := free_add_semigroup.of Inhabited.default } /-- Recursor for free semigroup using `of` and `*`. -/ protected def Mathlib.free_add_semigroup.rec_on {α : Type u} {C : free_add_semigroup α → Sort l} (x : free_add_semigroup α) (ih1 : (x : α) → C (free_add_semigroup.of x)) (ih2 : (x : α) → (y : free_add_semigroup α) → C (free_add_semigroup.of x) → C y → C (free_add_semigroup.of x + y)) : C x := prod.rec_on x fun (f : α) (s : List α) => list.rec_on s ih1 (fun (hd : α) (tl : List α) (ih : (_a : α) → C (_a, tl)) (f : α) => ih2 f (hd, tl) (ih1 f) (ih hd)) f end free_semigroup /-- Auxiliary function for `free_semigroup.lift`. -/ def free_semigroup.lift' {α : Type u} {β : Type v} [semigroup β] (f : α → β) : α → List α → β := sorry /-- Auxiliary function for `free_semigroup.lift`. -/ def free_add_semigroup.lift' {α : Type u} {β : Type v} [add_semigroup β] (f : α → β) : α → List α → β := sorry namespace free_semigroup /-- Lifts a function `α → β` to a semigroup homomorphism `free_semigroup α → β` given a semigroup `β`. -/ def lift {α : Type u} {β : Type v} [semigroup β] (f : α → β) (x : free_semigroup α) : β := lift' f (prod.fst x) (prod.snd x) @[simp] theorem lift_of {α : Type u} {β : Type v} [semigroup β] (f : α → β) (x : α) : lift f (of x) = f x := rfl theorem lift_of_mul {α : Type u} {β : Type v} [semigroup β] (f : α → β) (x : α) (y : free_semigroup α) : lift f (of x * y) = f x * lift f y := rfl @[simp] theorem Mathlib.free_add_semigroup.lift_add {α : Type u} {β : Type v} [add_semigroup β] (f : α → β) (x : free_add_semigroup α) (y : free_add_semigroup α) : free_add_semigroup.lift f (x + y) = free_add_semigroup.lift f x + free_add_semigroup.lift f y := sorry theorem Mathlib.free_add_semigroup.lift_unique {α : Type u} {β : Type v} [add_semigroup β] (f : free_add_semigroup α → β) (hf : ∀ (x y : free_add_semigroup α), f (x + y) = f x + f y) : f = free_add_semigroup.lift (f ∘ free_add_semigroup.of) := sorry /-- The unique semigroup homomorphism that sends `of x` to `of (f x)`. -/ def Mathlib.free_add_semigroup.map {α : Type u} {β : Type v} (f : α → β) : free_add_semigroup α → free_add_semigroup β := free_add_semigroup.lift (free_add_semigroup.of ∘ f) @[simp] theorem Mathlib.free_add_semigroup.map_of {α : Type u} {β : Type v} (f : α → β) (x : α) : free_add_semigroup.map f (free_add_semigroup.of x) = free_add_semigroup.of (f x) := rfl @[simp] theorem Mathlib.free_add_semigroup.map_add {α : Type u} {β : Type v} (f : α → β) (x : free_add_semigroup α) (y : free_add_semigroup α) : free_add_semigroup.map f (x + y) = free_add_semigroup.map f x + free_add_semigroup.map f y := free_add_semigroup.lift_add (free_add_semigroup.of ∘ f) x y protected instance Mathlib.free_add_semigroup.monad : Monad free_add_semigroup := sorry /-- Recursor that uses `pure` instead of `of`. -/ def rec_on' {α : Type u} {C : free_semigroup α → Sort l} (x : free_semigroup α) (ih1 : (x : α) → C (pure x)) (ih2 : (x : α) → (y : free_semigroup α) → C (pure x) → C y → C (pure x * y)) : C x := free_semigroup.rec_on x ih1 ih2 @[simp] theorem map_pure {α : Type u} {β : Type u} (f : α → β) (x : α) : f <$> pure x = pure (f x) := rfl @[simp] theorem map_mul' {α : Type u} {β : Type u} (f : α → β) (x : free_semigroup α) (y : free_semigroup α) : f <$> (x * y) = f <$> x * f <$> y := map_mul f x y @[simp] theorem pure_bind {α : Type u} {β : Type u} (f : α → free_semigroup β) (x : α) : pure x >>= f = f x := rfl @[simp] theorem mul_bind {α : Type u} {β : Type u} (f : α → free_semigroup β) (x : free_semigroup α) (y : free_semigroup α) : x * y >>= f = (x >>= f) * (y >>= f) := lift_mul f x y @[simp] theorem Mathlib.free_add_semigroup.pure_seq {α : Type u} {β : Type u} {f : α → β} {x : free_add_semigroup α} : pure f <*> x = f <$> x := rfl @[simp] theorem mul_seq {α : Type u} {β : Type u} {f : free_semigroup (α → β)} {g : free_semigroup (α → β)} {x : free_semigroup α} : f * g <*> x = (f <*> x) * (g <*> x) := mul_bind (fun (_x : α → β) => (fun (α β : Type u) (f : α → β) (x : free_semigroup α) => lift (of ∘ f) x) α β _x x) f g protected instance Mathlib.free_add_semigroup.is_lawful_monad : is_lawful_monad free_add_semigroup := sorry /-- `free_semigroup` is traversable. -/ protected def Mathlib.free_add_semigroup.traverse {m : Type u → Type u} [Applicative m] {α : Type u} {β : Type u} (F : α → m β) (x : free_add_semigroup α) : m (free_add_semigroup β) := free_add_semigroup.rec_on' x (fun (x : α) => pure <$> F x) fun (x : α) (y : free_add_semigroup α) (ihx ihy : m (free_add_semigroup β)) => Add.add <$> ihx <*> ihy protected instance Mathlib.free_add_semigroup.traversable : traversable free_add_semigroup := traversable.mk free_add_semigroup.traverse @[simp] theorem traverse_pure {α : Type u} {β : Type u} {m : Type u → Type u} [Applicative m] (F : α → m β) (x : α) : traverse F (pure x) = pure <$> F x := rfl @[simp] theorem Mathlib.free_add_semigroup.traverse_pure' {α : Type u} {β : Type u} {m : Type u → Type u} [Applicative m] (F : α → m β) : traverse F ∘ pure = fun (x : α) => pure <$> F x := rfl @[simp] theorem Mathlib.free_add_semigroup.traverse_add {α : Type u} {β : Type u} {m : Type u → Type u} [Applicative m] (F : α → m β) [is_lawful_applicative m] (x : free_add_semigroup α) (y : free_add_semigroup α) : traverse F (x + y) = Add.add <$> traverse F x <*> traverse F y := sorry @[simp] theorem Mathlib.free_add_semigroup.traverse_add' {α : Type u} {β : Type u} {m : Type u → Type u} [Applicative m] (F : α → m β) [is_lawful_applicative m] : function.comp (traverse F) ∘ Add.add = fun (x y : free_add_semigroup α) => Add.add <$> traverse F x <*> traverse F y := funext fun (x : free_add_semigroup α) => funext fun (y : free_add_semigroup α) => free_add_semigroup.traverse_add F x y @[simp] theorem Mathlib.free_add_semigroup.traverse_eq {α : Type u} {β : Type u} {m : Type u → Type u} [Applicative m] (F : α → m β) (x : free_add_semigroup α) : free_add_semigroup.traverse F x = traverse F x := rfl @[simp] theorem Mathlib.free_add_semigroup.add_map_seq {α : Type u} (x : free_add_semigroup α) (y : free_add_semigroup α) : Add.add <$> x <*> y = x + y := rfl protected instance Mathlib.free_add_semigroup.is_lawful_traversable : is_lawful_traversable free_add_semigroup := is_lawful_traversable.mk sorry sorry sorry sorry protected instance Mathlib.free_add_semigroup.decidable_eq {α : Type u} [DecidableEq α] : DecidableEq (free_add_semigroup α) := prod.decidable_eq end free_semigroup /-- Isomorphism between `magma.free_semigroup (free_magma α)` and `free_semigroup α`. -/ def free_add_semigroup_free_add_magma (α : Type u) : add_magma.free_add_semigroup (free_add_magma α) ≃ free_add_semigroup α := equiv.mk (add_magma.free_add_semigroup.lift (free_add_magma.lift free_add_semigroup.of) sorry) (free_add_semigroup.lift (add_magma.free_add_semigroup.of ∘ free_add_magma.of)) sorry sorry @[simp] theorem free_semigroup_free_magma_mul {α : Type u} (x : magma.free_semigroup (free_magma α)) (y : magma.free_semigroup (free_magma α)) : coe_fn (free_semigroup_free_magma α) (x * y) = coe_fn (free_semigroup_free_magma α) x * coe_fn (free_semigroup_free_magma α) y := magma.free_semigroup.lift_mul (free_magma.lift free_semigroup.of) x y end Mathlib
Nine , the product of three and three , represents a multitude , so the Egyptians called several large groups " <unk> " , or sets of nine , even if they had more than nine members . The most prominent ennead was the Ennead of Heliopolis , an extended family of deities descended from the creator god Atum , which incorporates many important gods . The term " ennead " was often extended to include all of Egypt 's deities .
Formal statement is: lemma atLeastAtMost_subset_contains_Inf: fixes A :: "real set" and a b :: real shows "A \<noteq> {} \<Longrightarrow> a \<le> b \<Longrightarrow> A \<subseteq> {a..b} \<Longrightarrow> Inf A \<in> {a..b}" Informal statement is: If $A$ is a nonempty subset of the closed interval $[a,b]$, then $\inf A \in [a,b]$.
# coding: utf-8 # # Unit Commitment Model of Australia's National Electricity Market # A unit commitmnet (UC) model of Australia's National Electricity Market (NEM) is developed using the generator and network datasets previously constructed. The UC model is based on [1], and takes into account a number of technical and economic parameters associated with power system operation. Compared to models with linear cost minimisation objective functions, UC models introduce a number of additional continuous and binary variables, resulting in a mixed integer linear program (MILP). The complexity of solving UC models is also increased as, unlike DCOPF models with linear objective functions which are solved sequentially for each time period, UC models solve for an optimal dispatch schedule for each unit over the time interval under investigation. Computer memory limiations were encountered when attempting to use the full network represenatation with the UC formulation adopted here. For this reason a simplified representation of the NEM's network is constructed. This representation makes use of NEM zones which are used in AEMO's own market models [2]. Rather than considering power balance constraints for each node, nodes are aggregated into their respective NEM zones. Power balance constraints are then enforced for each zone. This network representation is similar to the network representation of the NEM provided in [3], limiting power flows between zones according to interconnector capabilities. # # The model presented here uses MMSDM data for June 2017 as an example. Data for only one month is loaded into this Jupyter Notebook to reduce the compuational burden of storing many large MMSDM tables in memory. As the schema of MMSDM tables is time invariant, other periods can easily be analysed by loading alternative MMSDM tables and selecting different intervals. When solving the UC model, a 24hr period (48 trading intervals) is investigated. The results from the optimisation problem are pickled and saved. # # A summary of the steps taken to construct the UC model is as follows: # 1. Import pacakges, declare paths to files, and load data # 2. Organise model data: # * summarise important parameters for each node (e.g. assigned DUIDs, NEM region, NEM zone, proportion of regional demand consumed at node); # * compute aggregate nodal power injections from intermittent sources at each node for each time interval. # 3. Aggregate data for nodes by NEM zones # 4. Construct branch incidence matrix for AC lines # 7. Construct incidence matrix describing the connections of HVDC links # 8. Construct UC model # 9. Solve UC model for time interval under investigation # # ## Import packages # In[1]: import os import pickle from math import pi import random import numpy as np import pandas as pd import geopandas as gp from pyomo.environ import * import matplotlib.pyplot as plt # ## Declare paths to files # In[2]: # Core data directory data_dir = os.path.abspath(os.path.join(os.path.curdir, os.path.pardir, os.path.pardir, 'data')) # Network directory network_dir = os.path.abspath(os.path.join(os.path.curdir, os.path.pardir, '1_network')) # Generators directory gens_dir = os.path.abspath(os.path.join(os.path.curdir, os.path.pardir, '2_generators')) # Signals directory signals_dir = os.path.abspath(os.path.join(os.path.curdir, os.path.pardir, '3_load_and_dispatch_signals')) # Output path output_dir = os.path.abspath(os.path.join(os.path.curdir, 'output')) # ## Load data # In[3]: # Network data # ------------ # Nodes df_n = pd.read_csv(os.path.join(network_dir, 'output', 'network_nodes.csv'), index_col='NODE_ID', dtype={'NEAREST_NODE':np.int32}) # Edges df_e = pd.read_csv(os.path.join(network_dir, 'output', 'network_edges.csv'), index_col='LINE_ID') # HVDC links df_hvdc = pd.read_csv(os.path.join(network_dir, 'output', 'network_hvdc_links.csv'), index_col='HVDC_LINK_ID') # AC interconnector links df_ac_i = pd.read_csv(os.path.join(network_dir, 'output', 'network_ac_interconnector_links.csv'), index_col='INTERCONNECTOR_ID') # AC interconnector limits df_ac_ilim = pd.read_csv(os.path.join(network_dir, 'output', 'network_ac_interconnector_flow_limits.csv'), index_col='INTERCONNECTOR_ID') # Power station - node assignments df_station_nodes = pd.read_csv(os.path.join(network_dir, 'output', 'network_power_stations-nodes.csv'), index_col='POWER_STATION_ID', dtype={'NEAREST_NODE':np.int32}) # Generator data # -------------- df_g = pd.read_csv(os.path.join(gens_dir, 'output', 'generators.csv'), index_col='DUID') # Dispatch and load signals # ------------------------- # Dispatch signals from SCADA data df_scada = pd.read_csv(os.path.join(signals_dir, 'output', 'signals_dispatch.csv'), index_col='SETTLEMENTDATE', parse_dates=['SETTLEMENTDATE']) # Regional demand signals df_regd = pd.read_csv(os.path.join(signals_dir, 'output', 'signals_regional_load.csv'), index_col='SETTLEMENTDATE', parse_dates=['SETTLEMENTDATE']) # Cross-reference tables # ---------------------- # AEMO DUIDs - Geoscience Australia power station names df_duid_stations = pd.read_csv(os.path.join(data_dir, 'cross_reference', 'DUID-GA-POWER_STATIONS.csv'), index_col='DUID') # ## Organise model data # ### Summarise model data for each node # The steps taken to collate data used in the UC model are as follows: # 1. Initialise a dataframe, `df_m`, that will be used to summarise UC model data at each node # 2. Assign DUIDs to each node # 3. Create dataframe that contains intermittent power injections (from wind and solar) at each node for each time period. # 4. Create dataframe that contains demand for each NEM region for each time period # 5. Construct branch incidence matrix for the network # In[4]: # Dataframe that will contain a summary of data used in the DCOPF model df_m = df_n.copy() # DUIDs assigned to each node df_m['DUID'] = df_g.reset_index().groupby('NODE')[['DUID']].aggregate(lambda x: set(x)).reindex(df_m.index, fill_value=set()) df_m # ### Intermittent generation at each node # DUIDs corresponding to intermittent generators (wind and solar) are identified, and the net power injection at each node from these generators computed. Nodal power injections from intermittent sources are then aggregated by NEM zone. # # Note: Unit dispatch for some DUIDs is negative. This could be a result of the measurement methods used to collect dispatch data. As these negative values are small, they are unlikely to have a material impact on the final output of the model. However, the declaration of parameters in the UC model to follow may raise warnings when encountering negative values. To prevent this from occurring, these negative values are set to zero. # In[5]: # Find all intermittent DUIDs (wind and solar) mask = df_g['FUEL_CAT'].isin(['Wind', 'Solar']) ig_ids = df_g[mask].index # Find total intermittent generation at each node for each time period df_inter = df_scada.reindex(ig_ids, axis=1, fill_value=0).T.join(df_g[['NEM_ZONE']], how='left').groupby('NEM_ZONE').sum().T # Set negative dispatch values to 0 mask = df_inter < 0 df_inter[mask] = 0 # Re-index, so all nodes are contained within columns df_inter = df_inter.reindex(df_n['NEM_ZONE'].unique(), axis=1, fill_value=0) df_inter.head() # ### Total demand in each NEM region # Total demand for each NEM region. # In[6]: df_regd.head() # ## Summarise model data for reduced network # In[7]: # Initialise dataframe that will contain model data for the reduced network df_rm = pd.DataFrame(index=df_m['NEM_ZONE'].unique()) # Proportion of NEM region demand for each zone df_rm = df_m.groupby('NEM_ZONE')[['PROP_REG_D']].sum() # NEM region associated with each NEM zone df_rm['NEM_REGION'] = df_rm.apply(lambda x: df_m.drop_duplicates('NEM_ZONE').set_index('NEM_ZONE').loc[x.name , 'NEM_REGION'], axis=1) df_rm['DUID'] = df_g.reset_index().groupby('NEM_ZONE')['DUID'].aggregate(lambda x: set(x)) df_rm.head() # ### Demand for each NEM zone # In[8]: # Initialise matrix containing zonal demand df_zd = pd.DataFrame(index=df_rm.index, columns=df_regd.index) def get_zone_demand(row): # NEM region corresponding to NEM zone nem_region = df_m.drop_duplicates('NEM_ZONE').set_index('NEM_ZONE').loc[row.name, 'NEM_REGION'] # Demand series for NEM zone's corresponding NEM region region_demand = df_regd.loc[:, nem_region] # Demand series for each NEM zone zone_demand = region_demand * df_rm.loc[row.name, 'PROP_REG_D'] return zone_demand df_zd = df_zd.apply(get_zone_demand, axis=1).T df_zd.head() # ### Incidence matrix for AC connections between NEM zones # In[9]: # Lines in the reduced network rn_lines = ['ADE,NSA', 'SESA,ADE', 'MEL,SESA', 'MEL,CVIC', 'MEL,LV', 'MEL,NVIC', 'CVIC,SWNSW', 'NVIC,SWNSW', 'NVIC,CAN', 'CAN,SWNSW', 'CAN,NCEN', 'NCEN,NNS', 'NNS,SWQ', 'SWQ,SEQ', 'SWQ,CQ', 'SEQ,CQ', 'CQ,NQ', 'NNS,SEQ'] # Incidence matrix for network based on NEM zones as nodes df_ac_C = pd.DataFrame(index=rn_lines, columns=df_rm.index, data=0) for line in rn_lines: # Get 'from' and 'to' nodes for each line fn, tn = line.split(',') # Assign 'from' node value of 1 df_ac_C.loc[line, fn] = 1 # Assign 'to' node value of -1 df_ac_C.loc[line, tn] = -1 df_ac_C.head() # ### Incidence matrix for HVDC links # In[10]: # Drop Directlink HVDC line (techincally not an interconnector) df_hvdc = df_hvdc.drop('DIRECTLINK') # Assign 'from' and 'to' zones to each HVDC link df_hvdc['FROM_ZONE'] = df_hvdc.apply(lambda x: df_n.loc[x['FROM_NODE'], 'NEM_ZONE'], axis=1) df_hvdc['TO_ZONE'] = df_hvdc.apply(lambda x: df_n.loc[x['TO_NODE'], 'NEM_ZONE'], axis=1) # Incidence matrix for HVDC links df_hvdc_C = pd.DataFrame(index=df_hvdc.index, columns=df_rm.index, data=0) for index, row in df_hvdc.iterrows(): # Extract 'from' and 'to' zones for each HVDC link fz, tz = row['FROM_ZONE'], row['TO_ZONE'] # Assign value of 1 to 'from' zones df_hvdc_C.loc[index, fz] = 1 # Assign value of -1 to 'to' zones df_hvdc_C.loc[index, tz] = -1 df_hvdc_C # ## Minimum Reserve levels for each NEM region # Minimum reserve levels in MW for each NEM region are obtained from [4]. # In[11]: df_mrl = pd.Series(data={'NSW1': 673.2, 'QLD1': 666.08, 'SA1': 195, 'TAS1': 194, 'VIC1': 498}) # ## Unit Commitment Model # In[12]: def run_uc_model(fname, fix_hydro=True): # Model # ----- m = ConcreteModel() # Sets # ---- # Generators if fix_hydro: mask = (df_g['SCHEDULE_TYPE'] == 'SCHEDULED') & df_g['FUEL_CAT'].isin(['Fossil']) & ~pd.isnull(df_g['MIN_ON_TIME']) m.G = Set(initialize=df_g[mask].index) mask = (df_g['SCHEDULE_TYPE'] == 'SCHEDULED') & df_g['FUEL_CAT'].isin(['Hydro']) & ~pd.isnull(df_g['MIN_ON_TIME']) m.HYDRO = Set(initialize=df_g[mask].index) else: mask = (df_g['SCHEDULE_TYPE'] == 'SCHEDULED') & df_g['FUEL_CAT'].isin(['Hydro', 'Fossil']) & ~pd.isnull(df_g['MIN_ON_TIME']) m.G = Set(initialize=df_g[mask].index) m.HYDRO = Set() # NEM zones m.J = Set(initialize=df_rm.index) # NEM regions m.S = Set(initialize=df_rm['NEM_REGION'].unique()) # HVDC links m.H = Set(initialize=df_hvdc_C.index) # AC lines m.A = Set(initialize=df_ac_C.index) # (Dictionary to convert between timestamps and time indices) t_dict = {j+1: k for j, k in enumerate(df_zd[:48].index)} # Time indices m.T = Set(initialize=list(t_dict.keys()), ordered=True) # Parameters # ---------- # Constant linear variable cost (SRMC) def C_LV_rule(m, g): return float(df_g.loc[g, 'SRMC_2016-17']) m.C_LV = Param(m.G, initialize=C_LV_rule) # No load cost def C_NL_rule(m, g): if df_g.loc[g, 'FUEL_CAT'] == 'Hydro': return float(0) else: return float((df_g.loc[g, 'NL_FUEL_CONS'] * df_g.loc[g, 'HEAT_RATE'] * df_g.loc[g, 'FC_2016-17'])) m.C_NL = Param(m.G, initialize=C_NL_rule) # Maximum power output def P_MAX_rule(m, g): return float(df_g.loc[g, 'REG_CAP']) m.P_MAX = Param(m.G, initialize=P_MAX_rule) # Minimum power output def P_MIN_rule(m, g): min_gen = df_g.loc[g, 'MIN_GEN'] # If no data for min gen as % of nameplate capacity, return 0 if pd.isnull(min_gen): return float(0) else: return float(min_gen) m.P_MIN = Param(m.G, initialize=P_MIN_rule) # Hydro output def HYDRO_OUTPUT_rule(m, hydro, t): return float(df_scada.loc[t_dict[t], hydro]) m.HYDRO_OUTPUT = Param(m.HYDRO, m.T, initialize=HYDRO_OUTPUT_rule) # Time interval length in hours m.DELT = Param(initialize=0.5) # Ramp up capability def RU_rule(m, g): return float(df_g.loc[g, 'RR_UP'] * m.DELT.value) m.RU = Param(m.G, initialize=RU_rule) # Ramp down capability def RD_rule(m, g): return float(df_g.loc[g, 'RR_DOWN'] * m.DELT.value) m.RD = Param(m.G, initialize=RD_rule) # Minimum off time expressed in terms of time intervals def TD_rule(m, g): return float(df_g.loc[g, 'MIN_OFF_TIME'] / m.DELT.value) m.TD = Param(m.G, initialize=TD_rule) # Minimum on time expressed in terms of time intervals def TU_rule(m, g): return float(df_g.loc[g, 'MIN_ON_TIME'] / m.DELT.value) m.TU = Param(m.G, initialize=TU_rule) # Demand for each NEM zone def D_rule(m, j, t): return float(df_zd.loc[t_dict[t], j]) m.D = Param(m.J, m.T, initialize=D_rule) # HVDC incidence matrix def HVDC_C_rule(m, h, j): return float(df_hvdc_C.loc[h, j]) m.HVDC_C = Param(m.H, m.J, initialize=HVDC_C_rule) # AC incidence matrix def AC_C_rule(m, a, j): return float(df_ac_C.loc[a, j]) m.AC_C = Param(m.A, m.J, initialize=AC_C_rule) # Intermittent generation for each zone def P_W_rule(m, j, t): wind_output = float(df_inter.loc[t_dict[t], j]) if wind_output < 0: return float(0) else: return wind_output m.P_W = Param(m.J, m.T, initialize=P_W_rule) # Start-up costs def C_SU_rule(m, g): return float(df_g.loc[g, 'SU_COST_COLD'] * m.P_MIN[g]) m.C_SU = Param(m.G, initialize=C_SU_rule) # Start-up ramp rate def SU_D_rule(m, g): ru_intervals = m.P_MIN[g] / (df_g.loc[g, 'RR_STARTUP'] * m.DELT.value) if ru_intervals > 1: return float(ceil(ru_intervals)) else: return float(0) m.SU_D = Param(m.G, initialize=SU_D_rule) # Shutdown ramp rate def SD_D_rule(m, g): rd_intervals = m.P_MIN[g] / (df_g.loc[g, 'RR_SHUTDOWN'] * m.DELT.value) if rd_intervals > 1: return float(ceil(rd_intervals)) else: return float(0) m.SD_D = Param(m.G, initialize=SD_D_rule) # Startup capability def SU_rule(m, g): if m.SU_D[g]: return m.P_MIN[g] else: return float(df_g.loc[g, 'RR_STARTUP']) m.SU = Param(m.G, initialize=SU_rule) # Shutdown capability def SD_rule(m, g): if m.SD_D[g]: return m.P_MIN[g] else: return float(df_g.loc[g, 'RR_SHUTDOWN']) m.SD = Param(m.G, initialize=SD_rule) # Minimum reserve level (up) for each NEM zone def D_UP_rule(m, s): return float(df_mrl.loc[s]) m.D_UP = Param(m.S, initialize=D_UP_rule) # Minimum reserve level (down) for each NEM zone def D_DOWN_rule(m, s): return float(df_mrl.loc[s] / 10) m.D_DOWN = Param(m.S, initialize=D_DOWN_rule) # Variables # --------- # One state for generator m.u = Var(m.G, m.T, within=Binary) # Startup indicator m.v = Var(m.G, m.T, within=Binary) # Shutdown indicator m.w = Var(m.G, m.T, within=Binary) # Reserve (up) for each generator m.r_up = Var(m.G, m.T, within=NonNegativeReals) # Resever (down) for each generator m.r_down = Var(m.G, m.T, within=NonNegativeReals) # Wind power output at each node m.p_w = Var(m.J, m.T, within=NonNegativeReals) # Dispatch for each generator above P_MIN m.p = Var(m.G, m.T, within=NonNegativeReals) # Power flow over AC transmission lines m.p_ac = Var(m.A, m.T, within=Reals) # Dummy variables used to compute absolute flows over AC link m.p_ac_up = Var(m.A, m.T, within=NonNegativeReals) m.p_ac_lo = Var(m.A, m.T, within=NonNegativeReals) # Power flow over HVDC links def p_hvdc_rule(m, h, t): return (-float(df_hvdc.loc[h, 'REVERSE_LIMIT_MW']), float(df_hvdc.loc[h, 'FORWARD_LIMIT_MW'])) m.p_hvdc = Var(m.H, m.T, bounds=p_hvdc_rule, initialize=0) # Dummy variables used to compute absolute flows over HVDC links m.p_hvdc_up = Var(m.H, m.T, within=NonNegativeReals) m.p_hvdc_lo = Var(m.H, m.T, within=NonNegativeReals) # Expressions # ----------- # Startup cost function def C_SU_PRIME_rule(m, g): return m.C_SU[g] m.C_SU_PRIME = Expression(m.G, rule=C_SU_PRIME_rule) # Total power output for each generator def p_hat_rule(m, g, t): if m.SU_D[g] == 0: if t != m.T.last(): return (m.P_MIN[g] * (m.u[g, t] + m.v[g, t + 1])) + m.p[g, t] else: return (m.P_MIN[g] * m.u[g, t]) + m.p[g, t] else: # Startup trajectory su_traj = {i + 1: i * (m.P_MIN[g] / m.SU_D[g]) for i in range(0, int(m.SU_D[g]) + 1)} # x index x_index = [i for i in range(1, int(m.SU_D[g]) + 1) if ((t - i + m.SU_D[g] + 2) <= m.T.last()) and ((t - i + m.SU_D[g] + 2) >= m.T.first())] # Shutdown trajectory sd_traj = {i + 1: m.P_MIN[g] - (m.P_MIN[g] / m.SD_D[g]) * i for i in range(0, int(m.SD_D[g]) + 1)} # y index y_index = [i for i in range(2, int(m.SD_D[g]) + 2) if ((t - i + 2) <= m.T.last()) and ((t - i + 2) >= m.T.first())] if t != m.T.last(): return (m.P_MIN[g] * (m.u[g, t] + m.v[g, t + 1]) + m.p[g, t] + sum(su_traj[x] * m.v[g, t - x + int(m.SU_D[g]) + 2] for x in x_index) + sum(sd_traj[y] * m.w[g, t - y + 2] for y in y_index)) else: return ((m.P_MIN[g] * m.u[g, t]) + m.p[g, t] + sum(su_traj[x] * m.v[g, t - x + int(m.SU_D[g]) + 2] for x in x_index) + sum(sd_traj[y] * m.w[g, t - y + 2] for y in y_index)) m.p_hat = Expression(m.G, m.T, rule=p_hat_rule) # Energy output for each generator def e_rule(m, g, t): if t != m.T.first(): return ((m.p_hat[g, t - 1] + m.p_hat[g, t]) / 2) * m.DELT else: return m.p_hat[g, t] m.e = Expression(m.G, m.T, rule=e_rule) # Constraints # ----------- # Power balance for each NEM zone def power_balance_rule(m, j, t): gens = [g for g in df_rm.loc[j, 'DUID'] if g in m.G or m.HYDRO] if gens: return (sum(m.p_hat[g, t] for g in gens if g in m.G) - m.D[j, t] + m.p_w[j, t] + sum(m.HYDRO_OUTPUT[hydro, t] for hydro in gens if hydro in m.HYDRO) == sum(m.p_ac[a, t] * m.AC_C[a, j] for a in m.A) + sum(m.p_hvdc[h, t] * m.HVDC_C[h, j] for h in m.H)) else: return m.D[j, t] == - sum(m.p_ac[a, t] * m.AC_C[a, j] for a in m.A) - sum(m.p_hvdc[h, t] * m.HVDC_C[h, j] for h in m.H) m.power_balance = Constraint(m.J, m.T, rule=power_balance_rule) # Limit flows over Heywood interconnector def heywood_rule(m, t): return -500 <= m.p_ac['MEL,SESA', t] <= 600 m.heywood = Constraint(m.T, rule=heywood_rule) # Limit flows over QNI interconnector def QNI_rule(m, t): return -1078 <= m.p_ac['NNS,SWQ', t] <= 600 m.QNI = Constraint(m.T, rule=QNI_rule) # Limit flow over Terranora interconnector def terranora_rule(m, t): return -210 <= m.p_ac['NNS,SEQ', t] <= 107 m.terranora = Constraint(m.T, rule=terranora_rule) # Limit interconnector flows between VIC and NSW def VIC_to_NSW_rule(m, t): return -1350 <= sum(m.p_ac[line, t] for line in ['CVIC,SWNSW', 'NVIC,SWNSW', 'NVIC,CAN']) <= 1600 m.VIC_to_NSW = Constraint(m.T, rule=VIC_to_NSW_rule) # DUIDs allocated to each region df_region_duids = df_g.reset_index().groupby('NEM_REGION')['DUID'].aggregate(lambda x: set(x)) # Ensure upward reserve for each NEM region is maintained def reserve_up_rule(m, s, t): return sum(m.r_up[g, t] for g in df_region_duids.loc[s] if g in m.G) >= m.D_UP[s] m.reserve_up = Constraint(m.S, m.T, rule=reserve_up_rule) # Ensure downward reserve for each NEM region is satisfied def reserve_down_rule(m, s, t): return sum(m.r_down[g, t] for g in df_region_duids.loc[s] if g in m.G) >= m.D_DOWN[s] m.reserve_down = Constraint(m.S, m.T, rule=reserve_down_rule) # Use SCADA data to initialise 'on' state for first period def u0_rule(m, g): if df_scada.loc[t_dict[m.T.first()], g] > 1: return 1 else: return 0 m.u0 = Param(m.G, initialize=u0_rule) # Logic describing relationship between generator on state, startup state, and shutdown state def logic_rule(m, g, t): if t != m.T.first(): return m.u[g, t] - m.u[g, t - 1] == m.v[g, t] - m.w[g, t] else: return m.u[g, t] - m.u0[g] == m.v[g, t] - m.w[g, t] m.logic = Constraint(m.G, m.T, rule=logic_rule) # Minimum on time (in time intervals) def min_on_time_rule(m, g, t): i_index = [i for i in range(t - int(m.TU[g]) + 1, t + 1)] if t < m.TU[g]: return Constraint.Skip else: return sum(m.v[g, i] for i in i_index) <= m.u[g, t] m.min_on_time = Constraint(m.G, m.T, rule=min_on_time_rule) # Minimum off time (in time intervals) def min_off_time_rule(m, g, t): i_index = [i for i in range(t - int(m.TD[g]) + 1, t + 1)] if t < m.TD[g]: return Constraint.Skip else: return sum(m.w[g, i] for i in i_index) <= 1 - m.u[g, t] m.min_off_time = Constraint(m.G, m.T, rule=min_off_time_rule) # Power output considering upward reserves def power_output_reserve_up_rule(m, g, t): if t != m.T.last(): return m.p[g, t] + m.r_up[g, t] <= ((m.P_MAX[g] - m.P_MIN[g]) * m.u[g, t] - (m.P_MAX[g] - m.SD[g]) * m.w[g, t + 1] + (m.SU[g] - m.P_MIN[g]) * m.v[g, t + 1]) else: return m.p[g, t] + m.r_up[g, t] <= (m.P_MAX[g] - m.P_MIN[g]) * m.u[g, t] m.power_output_reserve_up = Constraint(m.G, m.T, rule=power_output_reserve_up_rule) # Power output considering downward reserves def power_output_reserve_down_rule(m, g, t): return m.p[g, t] - m.r_down[g, t] >= 0 m.power_output_reserve_down = Constraint(m.G, m.T, rule=power_output_reserve_down_rule) # Ramp up limit def ramp_up_rule(m, g, t): if t == m.T.first(): return Constraint.Skip else: return (m.p[g, t] + m.r_up[g, t]) - m.p[g, t - 1] <= m.RU[g] m.ramp_up = Constraint(m.G, m.T, rule=ramp_up_rule) # Ramp down limit def ramp_down_rule(m, g, t): if t == m.T.first(): return Constraint.Skip else: return -(m.p[g, t] - m.r_down[g, t]) + m.p[g, t - 1] <= m.RD[g] m.ramp_down = Constraint(m.G, m.T, rule=ramp_down_rule) # Wind output limit (allows curtailment if intermittent generation is too high) def wind_output_rule(m, j, t): return m.p_w[j, t] <= m.P_W[j, t] m.wind_output = Constraint(m.J, m.T, rule=wind_output_rule) # If shutdown ramp is larger than Pmax, m.p[g,t] + m.r_up[g,t] may be greater than P_MAX[g] # Need this constraint to ensure power output is correctly constrained. def max_power_output_rule(m, g, t): return m.p[g, t] + m.P_MIN[g] + m.r_up[g, t] <= m.P_MAX[g] m.max_power_output_rule = Constraint(m.G, m.T, rule=max_power_output_rule) # Absolute flow over AC links def abs_ac_flow_up_rule(m, a, t): return m.p_ac_up[a, t] >= m.p_ac[a, t] m.abs_ac_flow_up = Constraint(m.A, m.T, rule=abs_ac_flow_up_rule) def abs_ac_flow_lo_rule(m, a, t): return m.p_ac_lo[a, t] >= - m.p_ac[a, t] m.abs_ac_flow_lo = Constraint(m.A, m.T, rule=abs_ac_flow_lo_rule) # Absolute flow over HVDC links def abs_hvdc_flow_up_rule(m, h, t): return m.p_hvdc_up[h, t] >= m.p_hvdc[h, t] m.abs_hvdc_flow_up = Constraint(m.H, m.T, rule=abs_hvdc_flow_up_rule) def abs_hvdc_flow_lo_rule(m, h, t): return m.p_hvdc_lo[h, t] >= - m.p_hvdc[h, t] m.abs_hvdc_flow_lo = Constraint(m.H, m.T, rule=abs_hvdc_flow_lo_rule) # Objective function # ------------------ # Minimise total cost of generation over the time horizon def objective_rule(m): return sum((m.C_LV[g] * m.e[g, t]) + (m.C_SU_PRIME[g] * m.v[g, t]) for g in m.G for t in m.T) + sum(5 * (m.p_ac_up[a, t] + m.p_ac_lo[a, t]) for a in m.A for t in m.T) + sum(5 * (m.p_hvdc_up[h, t] + m.p_hvdc_lo[h, t]) for h in m.H for t in m.T) m.objective = Objective(rule=objective_rule, sense=minimize) # Setup solver # ------------ solver = 'gurobi' solver_io = 'lp' stream_solver = True keepfiles = True m.dual = Suffix(direction=Suffix.IMPORT) opt = SolverFactory(solver, solver_io=solver_io) #opt.options['MIPGap'] = 2e-3 opt.options['TimeLimit'] = 3600 # Solve model results_initial = opt.solve(m, keepfiles=keepfiles, tee=stream_solver) # Fix integer variables for g in m.G: for t in m.T: m.u[g, t].fix() m.v[g, t].fix() m.w[g, t].fix() # Re-solve to obtain dual of power balance constraints results_final = opt.solve(m, keepfiles=keepfiles, tee=stream_solver) # Store all instance solutions in a results object m.solutions.store_to(results_final) # Retrieve total power and energy output values p_hat = [] e_output = [] for g in m.G: for t in m.T: p_hat.append( (g, t, t_dict[t], value(m.p_hat[g, t])) ) e_output.append( (g, t, t_dict[t], value(m.e[g, t])) ) # Dataframe for total power output from each unit for each time interval df_p_hat = pd.DataFrame(data=p_hat, columns=['DUID', 'T_INDEX', 'T_STAMP', 'VALUE']).pivot(index='T_STAMP', columns='DUID', values='VALUE') # Dataframe for total energy output from each unit for each time interval df_e_output = pd.DataFrame(data=e_output, columns=['DUID', 'T_INDEX', 'T_STAMP', 'VALUE']).pivot(index='T_STAMP', columns='DUID', values='VALUE') # Store dataframes in results dictionary results_final['df_p_hat'] = df_p_hat results_final['df_e_output'] = df_e_output results_final['t_dict'] = t_dict # Save to file with open(os.path.join(output_dir, fname), 'wb') as f: pickle.dump(results_final, f) return m # Model with hydro output determined by UC m = run_uc_model('uc_results.pickle', fix_hydro=False) # Model with hydro output fixed to SCADA output values m_fixed_hydro = run_uc_model('uc_fixed_hydro_results.pickle', fix_hydro=True) # ## References # [1] - Morales-España, G., Gentile, C. & Ramos, A. Tight MIP formulations of the power-based unit commitment problem. OR Spectrum 37, 929–950 (2015) # # [2] - Australian Energy Markets Operator. Market Modelling Methodology and Input Assumptions - For Planning the National Electricity Market and Eastern and South-eastern Gas Systems. (AEMO, 2016). at https://www.aemo.com.au/-/media/Files/Electricity/NEM/Planning_and_Forecasting/NTNDP/2016/Dec/Market-Modelling-Methodology-And-Input-Assumptions.pdf # # [3] - Elliston, B., MacGill, I. & Diesendorf, M. Least cost 100% renewable electricity scenarios in the Australian National Electricity Market. Energy Policy 59, 270–282 (2013). # # [4] - Australian Energy Markets Operator. NTNDP Database. (2018). at https://www.aemo.com.au/Electricity/National-Electricity-Market-NEM/Planning-and-forecasting/National-Transmission-Network-Development-Plan/NTNDP-database
/** * Copyright (c) 2017 Melown Technologies SE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef vef_reader_hpp_included_ #define vef_reader_hpp_included_ #include <vector> #include <boost/optional.hpp> #include "roarchive/roarchive.hpp" #include "vef.hpp" namespace vef { /** VEF archive reader */ class Archive { public: using list = std::vector<Archive>; Archive(const boost::filesystem::path &root); Archive(roarchive::RoArchive &archive); const Manifest& manifest() const { return manifest_; } const roarchive::RoArchive archive() const { return archive_; } const boost::filesystem::path path() const { return archive_.path(); } /** Get istream for mesh */ roarchive::IStream::pointer meshIStream(const Mesh &mesh) const; private: roarchive::RoArchive archive_; /** Loaded manifest. */ Manifest manifest_; }; } // namespace vef #endif // vef_reader_hpp_included_
Mathews retired from the Royal Navy in 1881 and was appointed Brigadier @-@ General of Zanzibar . There followed more expeditions to the African mainland , including a failed attempt to stop German expansion in East Africa . In October 1891 Mathews was appointed First Minister to the Zanzibar government , a position in which he was " irremovable by the sultan " . During this time Mathews was a keen abolitionist and promoted this cause to the Sultans he worked with . This resulted in the prohibiting of the slave trade in Zanzibar 's dominions in 1890 and the abolition of slavery in 1897 . Mathews was appointed the British Consul @-@ General for East Africa in 1891 but declined to take up the position , remaining in Zanzibar instead . Mathews and his troops also played a key role in the ending of the Anglo @-@ Zanzibar War of 1896 which erupted out of an attempt to bypass the requirement that new Sultans must be vetted by the British consul . During his time as first minister Mathews continued to be involved with the military and was part of two large campaigns , one to Witu and another to Mwele .
module Utils.Num import Data.Vect alphabets : Vect 36 Char alphabets = fromList $ unpack "0123456789abcdefghijklmnopqrstuvwxyz" -- Use integer for performance reason export stringToNat' : Fin 36 -> String -> Maybe Integer stringToNat' base str = if str == "" then Nothing else go (finToInteger base) 1 0 $ reverse $ unpack str where go : Integer -> Integer -> Integer -> List Char -> Maybe Integer go base' yoyo acc [] = Just acc go base' yoyo acc (chr :: xs) = do i <- elemIndex chr alphabets if i < base then go base' (base' * yoyo) (acc + finToInteger i * yoyo) xs else Nothing export stringToNat : Fin 36 -> String -> Maybe Nat stringToNat base string = integerToNat <$> stringToNat' base string export stringToInteger : Fin 36 -> String -> Maybe Integer stringToInteger base str = case strUncons str of Just ('-', num) => negate <$> stringToNat' base num Just ('+', num) => stringToNat' base num _ => stringToNat' base str
using MeasureTheory using Test @testset "MeasureTheory.jl" begin # Write your own tests here. end
\section{Introduction} Sea level rise threatens the densely populated and ecologically significant low-lying, coastal region of Bengal. Mitigating the effects of sea level rise (SLR) in this region will be especially difficult considering current widespread land management practices and the ongoing geopolitical tension between India and Bangladesh. Coastal Bengal is situated within the delta formed by the confluence of the Ganges and Brahmaputra rivers and straddles the border between West Bengal, India to the east and Bangladesh to the west. The region is home to \textasciitilde30 million people \citep{centerforinternationalearthscienceinformationnetwork-ciesin-columbiauniversityPopulationEstimationService2018} and the ecologically critical Sundarbans mangrove forest. Since the 1960s, the region has seen a vast transformation through the reduction in natural mangrove habitat and the widspread construction of earthen embankments. These embankments surround large swaths of clearcut land, known as polders, that accomodate the swelling population of the region. While, preventing regular inundation by spring high tides, the creation of these embankments have had the unintended consequence of starving the interior of the polders of fresh sediment. Without this sediment, polder interiors have compacted resulting in signficant elevation offset (\SIrange{1.0}{1.5}{\meter}) relative to the natural mangrove forest \citep{auerbachFloodRiskNatural2015}. Polder elevations often sit precariously below local mean high water (MHW) levels leading to persistent waterlogging. Furthermore, many of these embankments are in disrepair and susceptible to breaching especially by storm surge as was the case with Cyclone Sidr (2007), Cyclone Aila (2009), and recently with Cyclone Amphan (2020). Tidal river management (TRM) has been proposed as a possible augmentation of current land management practices to alleviate some of the issues caused by poldering. Under TRM, embankments are seasonally breached to allow tidal water inundation and sediment aggradation. Many low-lying areas may recover to an acceptable elevation within 5 to 10 years, though, some may take longer. There are a host of local socioeconomic, political, and governance consideration that may influence the success of TRM. Here, we neglect those considerations and focus on the general feasibility of TRM in regards to SLR and the sediment supply of the GB system. Future studies will focus on the social dynamics surround TRM. Coastal Bengal is often seen as one of the most at-risk regions for SLR due to climate change. Infographics often depict large swaths of the Bengal coastline flooded under different SLR scenarios. However, this overly simplifies the threats to the region and neglects the significant sediment contribution of the GB system in mainintaining the natural elevation. Estimates for increases in Relative Mean Sea Level (RMSL) in the GB delta range from \SIrange{2.8}{8.8}{\milli\meter\per\year}. However, RMSL neglects the widening of the tidal range in the polder region. On average, local high water levels in the polder region are increasing at a rate of \SI{15.9}{\milli\meter\per\year} \citep{pethickRapidRiseEffective2013}. While SLR is of paramount importance, tidal range amplification is the more imminent threat to the region. This is especially important considering the Bangladeshi government's recent reinvestment in poldering with a \$400 million loan from the World Bank for the Coastal Embankment Improvement Project - Phase I (CEIP-I). As for the natural mangrove system, it is unclear how changing water levels will affect elevation. Some studies have shown that the region is incredibly resilient to increasing water levels due, in large part, to the abudant sediment supply of the GB system. This sediment is delivered to the platform periodically during spring high tide which helps maintain an equilibirum elevation approximately equivalent to mean higher high water (MHHW). But, this large volume of sediment delivered to coastal Bengal is not guaranteed. Water has long been the focus of the geopolitical disputes between India and Bangladesh. However, the reduction in waterflow across the border portends a significant decrease in sediment flux. Estimates suggest sediment flux may be reduced by \SIrange{39}{75}{\percent} for the Ganges and \SIrange{9}{25}{\percent} for the Brahmaputra resulting in a change in aggradation from \SI{3.6}{\milli\meter\per\year} to \SI{2.5}{\milli\meter\per\year} \citep{higginsRiverLinkingIndia2018}. The combination of increasing water levels and decreasing sediment supply may further intensify an already dire situation. Here, we use a zero-dimensional mass balance model of sediment aggradation to understand the impact that increasing water levels and decreasing sediment flux will have on the regions equilibrium elevation and consequently its resilience to climate change. We consider both the resilience of the natural mangrove system and the ability of the polder system to recover to a more resilient elevation through TRM.
# <center><span style="color:red">**Cálculo Numérico - Avaliação 01**</span><br/></center> # <center><font color='purple'>**Installing dependencies**</font></center> ## <font color='orange'>**To install dependencies, run this cell**</font> ```python !pip install -r requirements.txt ``` ## <font color='orange'>**Importing dependencies**</font> ### <font color='#0f3f21'> **To import dependencies, run this cell** </font> ```python from fractions import Fraction from sympy import * import numpy as np import matplotlib.pyplot as plt ``` # <center><span style="color:green">**Exercício 00**</span><br/></center> ### <center><span style="color:blue">Calculando matriz de Hilbert de ordem 4</span><br/></center> ```python def Hilbert_matrix_string(a, b): return [[str(Fraction(1 / (i + j + 1)).limit_denominator()) for j in range(b)] for i in range(a)] Hilbert_order_4_matrix = Hilbert_matrix_string(4,4) print("Matriz de Hilbert ordem 4:") Hilbert_order_4_matrix ``` ### <center><span style="color:blue">Calculando matriz inversa</span><br/></center> ```python def Hilbert_matrix_array(a, b): return np.array([[Fraction(1 / (i + j + 1)).limit_denominator() for j in range(b)] for i in range(a)]) hilbert_matrix_array_order_4 = Hilbert_matrix_array(4,4).astype('float64') Hilbert_inverse_matrix_array_order_4 = np.linalg.inv(hilbert_matrix_array_order_4) print("\n\nMatriz inversa de Hilbert de ordem 4:") Hilbert_inverse_matrix_array_order_4 ``` ### <center><span style="color:blue">Descrevendo vetor solução usando pinv (pseudo-inverse) function</span><br/></center> ```python pinv = np.linalg.pinv(hilbert_matrix_array_order_4) b = [0 ,1, 10, 100] solution_vector = pinv.dot(b) print("O vetor solucao é dado por:") solution_vector.tolist() ``` # <center><span style="color:green">**Exercício 01 parte a - Calculando xm para altura máxima**</span><br/></center> ```python x = symbols('x') g, y0, v0, m, c = 9.81, 100, 55, 80, 15 y = y0 + (m / c) * (v0 + ((m * g) / c)) * (1 - exp(- (c / m) * x)) - ((m * g) / c) * x print('Equacao:') y ``` ### <font color='orange'>Altura maxima de xm é dada para y'(x) = 0 </font> ```python y_diff = diff(y) print("y(x)' = ", y_diff) print("Calculando y(x)'= 0, resulta em xm, em metros, com o valor de") solve(y_diff)[0] ``` # <center><span style="color:green">**Parte b - Calculando x para y(x) = 0**</span><br/></center> ```python print("Valor de y(x) = 0: ") solve(y, rational = False)[0] ``` ### <center><span style="color:blue">O processo deve ser iterativo, pois a equacao -52.32 * x + 672.373333333333 - 572.373333333333 * exp(-0.1875 * x) = 0 nao possui uma fórmula simples para resolucao da equacao.</span><br/></center> ```python def Jacobian(v_str, f_list): vars = symbols(v_str) f = sympify(f_list) J = zeros(len(f),len(vars)) for i, fi in enumerate(f): for j, s in enumerate(vars): J[i,j] = diff(fi, s) return J ``` # <center><span style="color:green">**Exercicio 02 parte a - Resolvendo o primeiro sistema de equacoes**</span><br/></center> ```python x, y = symbols('x y') f1 = x**2 + y**2 f2 = -exp(x) + y print("Usando como chute inicial o par (I,I), pois a equacao nao possui solucoes reais, por conta de x² + y² = 0 possuir solucoes complexas, a solucao é dada por:") nsolve((f1, f2), (x, y), (I, I)) ``` ```python print("Matriz Jacobiana do sistema:") Jacobian('x y',['x**2 + y**2', '-exp(x) + y']) ``` ```python x = [1,2,3,4] y = [1, -1.33,-2.77,-3.233] plt.plot(x, y) plt.xlabel('Iteracoes') plt.ylabel('Valores da raíz') plt.title('Gráfico relacionando vetor solucao x X i') plt.show() ``` # <center><span style="color:green">**Parte b - Mostrando que o segundo sistema de equacoes possui infinitas solucoes**</span><br/></center> ```python print("Matriz Jacobiana do sistema:") jacobian_matrix = Jacobian('x y', ['-exp(x) + y','-sin(x) + y']) jacobian_matrix ``` ```python delta = 0.025 x, y = np.meshgrid(np.arange(-50, 4, delta),np.arange(-1, 1, delta)) plt.contour(x, y,- np.sin(x) + y, [0] ) plt.contour(x, y, -np.exp(x) + y, [0]) plt.show() ``` ### <center><span style="color:blue">Como mostrado no gráfico, o sistema possui infinitas solucoes.</span><br/></center> # <center><span style="color:green">**Exercicio 03 - descobrindo valor de d**</span><br/></center> ```python d = symbols('d') k1, k2, m, g, h = 40000, 40, 95, 9.81, 0.43 f = -(k1 * d + k2 * d ** (3/2)) energy_conservation = (2 * k2 * d ** (5/2))/5 + (1/2) * k1 * d ** 2 - m *g * d - m * g *h print("Equacao:") energy_conservation ``` ```python t = symbols('t') f = (0.4 * t - t**3 + 0.141) solved_equation = solve(f) print("Os valores de t para que a matriz seja singular são:") solved_equation ``` ```python print("Com a aproximacao inicial de d = 0.4, o valor de d, em metros, é igual a:") nsolve(energy_conservation,0.4) ```
// Copyright 2019 John McFarlane // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef WSS_BOARD_H #define WSS_BOARD_H #include "coord.h" #include "grid.h" #include "load_buffer.h" #include <ssize.h> #include <wss_assert.h> #include <fmt/printf.h> #include <gsl/string_span> #include <array> #include <optional> #include <unordered_map> template<typename T> class board { public: board(board&&) = default; explicit board(int init_edge) :edge{init_edge}, cells{std::make_unique<T[]>(edge*edge)} { } int size() const { return edge; } T const& cell(coord c) const { WSS_ASSERT(c[0]>=0); WSS_ASSERT(c[0]<edge); WSS_ASSERT(c[1]>=0); WSS_ASSERT(c[1]<edge); return cells.get()[c[0]+c[1]*edge]; } T& cell(coord c) { WSS_ASSERT(c[0]>=0); WSS_ASSERT(c[0]<edge); WSS_ASSERT(c[1]>=0); WSS_ASSERT(c[1]<edge); return cells.get()[c[0]+c[1]*edge]; } private: int edge; std::unique_ptr<T[]> cells; }; template<typename CellType, typename TextToCell> std::optional<board<CellType>> make_board( std::vector<std::vector<char>> const& lines, TextToCell const& mapping) { auto edge{ssize(lines)}; board<CellType> result{int(edge)}; for (auto row_index{0}; row_index!=edge; ++row_index) { auto const& line{lines[row_index]}; auto const num_fields{ssize(line)}; if (num_fields!=edge) { fmt::print(stderr, "error: input row #{} has {} fields, expected {}.\n", row_index+1, num_fields, edge); return std::nullopt; } for (auto column_index{0}; column_index!=edge; ++column_index) { auto const field{line[column_index]}; auto const cell_found{mapping(field)}; if (!cell_found) { fmt::print( stderr, "Unrecognised field, '{}', in row #{}, column #{}.\n", (char)field, row_index+1, column_index+1); return std::nullopt; } result.cell(coord{column_index,row_index}) = *cell_found; } } return result; } template<typename CellType, typename TextToCell> std::optional<board<CellType>> load_board( gsl::cstring_span<> filename, TextToCell const& mapping) { auto const buffer{load_buffer(filename)}; if (!buffer) { return std::nullopt; } auto const fields{parse_grid(*buffer)}; return make_board<CellType>(fields, mapping); } template<typename T> void transpose(board<T>& b) { auto const edge = ssize(b); for (auto row = 0; row != edge; ++row) { for (auto column = 0; column != row; ++ column) { std::swap(b.cell(coord{column, row}), b.cell(coord{row, column})); } } } #endif //WSS_BOARD_H
% get vuw logos from https://wgtn.brandkit.io/assets?tags=National%0Identity \documentclass{beamer} %\usepackage[utf8]{inputenc} \usepackage{graphicx} \usepackage{amsmath,amsfonts,amssymb} \usepackage{longtable} %\usepackage{enumitem} \usepackage{mathtools} %\usepackage{amsopn} %\usepackage{pgf} \usetheme{Madrid} %\usecolortheme{beaver} \usecolortheme{crane} \title{A National Longitudinal Investigation of Environmental Attitudes and Behaviours} \subtitle{New Zealand: 2009-2018} \author{Joseph Bulbulia} \institute[Victoria University]{School of Psychology \\ Faculty of Science\\ Te Herenga Waka Victoria University of Wellington} % \date{\today} \date{Oct 28, 2020} \logo{\includegraphics[height=.45cm]{VUW_LOGO_3.png}} \begin{document} \maketitle \begin{frame} \frametitle{Outline} \tableofcontents \end{frame} \section{Background: New Zealand Attitudes and Values Study} \begin{frame}{What is the New Zealand Attitudes and Values Study (NZAVS)?} \begin{itemize} \item Planned 20-year longitudinal study, currently in its 11$^{th}$ year. \item Postal questionnaire. \item Sample frame drawn randomly from NZ Electoral Roll. \item Large multidisciplinary research team. \item Focus on personality, social attitudes, values, religion, adult character development, identity, employment, experiences of discrimination, well-being and health, and {\bf environmental attitudes}. \item Current sample contains $> 42,000$ unique people, or 1.45\% of the adult NZ population (we have responses from 61,535 individuals.) \end{itemize} \end{frame} \begin{frame}{Histogram of weekly NZAVS responses reveals magnitude of information capture} \begin{figure} \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/NZAVSHISTOGRAM.png} %\caption{} \end{figure} \end{frame} \begin{frame}{Here, we investigate change in (1) Climate Beliefs, (2) Climate Concern, (3) Environmental Efficacy, (4) Sacrificial Behaviour for the Environment in $N = 26,790$ longitudinal participants (responded to two or more waves).} % \begin{block}{Model} % \tiny{ % \begin{flalign*} % \sum_{k=1}^{K}\operatorname{Environmental Orientation}_{ij} &\sim N \left(\mu,\sigma^2 \right) \\ \mu &=\alpha_{j[i]} + \beta_{1}(\operatorname{years}) + \beta_{2}(\operatorname{Relid.S}) + \beta_{3}(\operatorname{Pol.Orient.S})\ + \\ % &\quad \beta_{4}(\operatorname{Edu.S}) + \beta_{5}(\operatorname{Age.C.decade}) + \beta_{6}(\operatorname{EthnicCats}_{\operatorname{Māori}}) + \beta_{7}(\operatorname{EthnicCats}_{\operatorname{Pacific}})\ + \\ % &\quad \beta_{8}(\operatorname{EthnicCats}_{\operatorname{Asian}}) + \beta_{9}(\operatorname{Male}_{\operatorname{1}}) + \beta_{10}(\operatorname{NZdepS}) + \beta_{11}(\operatorname{Urban}_{\operatorname{1}})\ + \\ % &\quad \beta_{12}(\operatorname{years} \times \operatorname{Relid.S}) + \beta_{13}(\operatorname{years} \times \operatorname{Pol.Orient.S}) + \beta_{14}(\operatorname{years} \times \operatorname{Edu.S}) \\ \alpha_{j} &\sim N \left(\mu_{\alpha_{j}},\sigma^2_{\alpha_{j}} \right), \operatorname{ for ~ } i = 1\dots 26,790 ~\operatorname{Id}, j = 1\dots 10~\operatorname{Waves} % \end{flalign*} % } % % \end{block} \end{frame} \section{Climate Change Beliefs} \begin{frame}{1. Climate Change Questions} \begin{alertblock}{~} "Climate change is real." \end{alertblock} \begin{alertblock}{~} "Climate change is caused by humans." \end{alertblock} \end{frame} \begin{frame}{Trend: in only nine years, there was nearly a 1 point increase in beliefs that climate change is real (!)} \begin{figure} \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/REAL_TIME.png} %\caption{} \end{figure} \end{frame} % % % \begin{frame}{Ethnicity: non-Europeans, and especially Pacific/Māori have the strongest climate beliefs (panels show years 2009,2013,2018).} % \begin{figure} % \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/REAL_EthnicCats_T.png} % %\caption{} % \end{figure} % \end{frame} % % % \begin{frame}{Education: the relationship between education and climate change beliefs has strengthened.} % \begin{figure} % \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/REAL_Edu.S.png} % %\caption{ % \end{figure} % \end{frame} \begin{frame}{Political conservatism predicts weaker beliefs in climate change, however conservatives are more convinced of climate change than were liberals were a decade ago.} \begin{figure} \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/REAL_Pol.Orient.S.png} %\caption{} \end{figure} \end{frame} \begin{frame}{Religious people have also increasingly accepted climate change, but religious identification is increasingly attenuating such beliefs.} \begin{figure} \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/REAL_RELIDS_T.png} %\caption{} \end{figure} \end{frame} % \begin{frame}{Religion Climate Change is Human Caused} % \begin{figure} % \includegraphics[width=.99\textwidth,height=\textheight,keepaspectratio]{Figures/HUMANCAUSED_RELIDS_T.png} % %\caption{} % \end{figure} % \end{frame} \section{Concern about Climate} \begin{frame}{2. Climate Concern Question (2013-2018} \begin{alertblock}{~} "I am deeply concerned about climate change." \end{alertblock} \end{frame} \begin{frame}{Trend: during the past five years, there has been a strong increase in concern about climate (about 1/2 point)} \begin{figure} \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/CONCERN_TIME.png} %\caption{} \end{figure} \end{frame} % % % \begin{frame}{Ethnicity: Europeans are substantially less concerned about climate than are other ethnic groups} % \begin{figure} % \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/CONCERN_EthnicCats_T.png} % %\caption{} % \end{figure} % \end{frame} % % % \begin{frame}{Education: the relationship between education and climate concern has increased.} % \begin{figure} % \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/CONCERN_Edu.S.png} % %\caption{} % \end{figure} % \end{frame} \begin{frame}{Political Conservativism strongly attenuates climate concern (even as conservatives accept climate change is real.)} \begin{figure} \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/CONCERN_Pol.Orient.S.png} %\caption{} \end{figure} \end{frame} \begin{frame}{Religious people have grown in climate concern, but religious identification is increasingly suppressing environmental concern} \begin{figure} \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/CONCERN_RELIDS_T.png} %\caption{} \end{figure} \end{frame} \section{Beliefs My Behavior Can Help The Environment} \begin{frame}{3. Environmental Efficacy Questions} \begin{alertblock}{~} "By taking personal action I believe I can make a positive difference to environmental problems." \end{alertblock} \begin{alertblock}{~} "I feel I can make a difference to the state of the environment." \end{alertblock} \end{frame} \begin{frame}{Trend: beliefs in environmental efficacy are slowly increasing} \begin{figure} \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/EFFICACY_TIME.png} %\caption{} \end{figure} \end{frame} % % \begin{frame}{Ethnicity: Pakeha are only now catching up with Māori a decade ago in environmental efficacy beliefs} % \begin{figure} % \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/EFFICACY_EthnicCats_T.png} % %\caption{} % \end{figure} % \end{frame} % % % \begin{frame}{Education: the relationship of education to environmental efficacy beliefs is becoming increasingly important.} % \begin{figure} % \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/EFFICACY_Edu.S.png} % %\caption{} % \end{figure} % \end{frame} % \begin{frame}{Political Conservativism predicts {\bf weaker} environmental efficacy beliefs.} \begin{figure} \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/EFFICACY_Pol.Orient.S.png} %\caption{} \end{figure} \end{frame} \begin{frame}{Religion has long been associated with {\bf stronger} environmental efficacy beliefs, however the relationship is weakening} \begin{figure} \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/EFFICACY_RELIDS_T.png} %\caption{} \end{figure} \end{frame} \section{Sacrificial Behaviour for the Environment} \begin{frame}{4. Climate Sacrifice Questions} \begin{alertblock}{~} "Have you made sacrifices to your standard of living (e.g., accepted higher prices, driven less, conserved energy) in order to protect the environment?" \end{alertblock} % \begin{alertblock}{~} % "Have you made changes to your daily routine in order to protect the environment?" % \end{alertblock} \end{frame} \begin{frame}{Trend: there has not been much increase in environmental sacrificial behaviors.} \begin{figure} \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/SACRIFICEMADE_TIME.png} %\caption{} \end{figure} % \end{frame} % % \begin{frame}{Ethnicity: Māori sacrifice more for the environment, and Pakeha have yet to catch up} % \begin{figure} % \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/SACRIFICEMADE_EthnicCats_T.png} % %\caption{} % \end{figure} % \end{frame} % % \begin{frame}{Education: the relationship between education and environmental sacrifice is becoming more important; those low in education are sacrificing {\bf less} now than a decade ago.} % \begin{figure} % \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/SACRIFICEMADE_Edu.S.png} % %\caption{} % \end{figure} % \end{frame} % \begin{frame}{Political Conservativism predicts {\bf lower} environmental sacrifice, with all of the growth occurring among political liberals} \begin{figure} \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/SACRIFICEMADE_Pol.Orient.S.png} %\caption{} \end{figure} \end{frame} \begin{frame}{Religion has long been associated with {\bf stronger} environmental efficacy beliefs, however the relationship is weakening.} \begin{figure} \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/SACRIFICEMADE_RELIDS_T.png} %\caption{} \end{figure} \end{frame} \section{What Motivates Sacrifice for the Environment?} \begin{frame}{How do ideologies and emotions combine to motivate environmental sacrifice?} \begin{alertblock}{Future (in)Security} "Satisfied with my future security." \end{alertblock} \begin{alertblock}{(dis)Satisfaction with New Zealand's environment} Satisfaction with quality of New Zealand’s natural environment. \end{alertblock} % \begin{alertblock}{~} % "Have you made changes to your daily routine in order to protect the environment?" % \end{alertblock} \end{frame} \begin{frame}{Political Conservativism: future (in)security and does not predict greater environmental sacrifice.} \begin{figure} \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/X_SACRIFICEMADE_Your_Future_SecurityS_Pol.Orient.S.png} %\caption{} \end{figure} \end{frame} \begin{frame}{Religious Identification: future (in)security does not predict greater environmental sacrifice} \begin{figure} \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/X_SACRIFICEMADE_Your_Future_SecurityS_Relid.S.png} %\caption{} \end{figure} \end{frame} \begin{frame}{(dis)Satisfaction with the environment predicts greater environmental sacrifice at low and high levels of Political Conservativism.} \begin{figure} \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/X_SACRIFICEMADE_Env.SatNZEnvironmentS_Pol.Orient.S.png} %\caption{} \end{figure} \end{frame} \begin{frame}{(dis)Satisfaction with the environment predicts greater environmental sacrifice at low and high level of Religious Identification.} \begin{figure} \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/X_SACRIFICEMADE_Env.SatNZEnvironmentS_Relid.S.png} %\caption{} \end{figure} \end{frame} \begin{frame}{What what predicts pro-environmental priorities?} \begin{alertblock}{Government spending on motorways} "Increased government spending on new motorways." \end{alertblock} \begin{alertblock}{Government spending on public transport} "Government subsidy of public transport." \end{alertblock} \begin{alertblock}{Protection of New Zealand Native Species} "Protecting New Zealand’s native species should be a national priority." \end{alertblock} \end{frame} \begin{frame}{(dis)Satisfaction with the NZ environment predicts lower support for Motorway Spending (a revealed CO$_2$ preference).} \begin{figure} \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/XY_PLOT_Env.MotorwaySpend.SATENVIRON.png} %\caption{} \end{figure} \end{frame} \begin{frame}{(dis)Satisfaction with the NZ environment predicts greater support for public transport spending (a revealed CO$_2$ preference).} \begin{figure} \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/X_SACRIFICEMADE_Env.SatNZEnvironmentS_Relid.S} %\caption{} \end{figure} \end{frame} \begin{frame}{(dis)Satisfaction with the NZ environment predicts greater support for protection of NZ species.} \begin{figure} \includegraphics[width=.8\textwidth,height=\textheight,keepaspectratio]{Figures/XY_PLOT_Env.NATIVE.SPECIES.SATENVIRON.png} %\caption{} \end{figure} \end{frame} \section{Summary} \begin{frame}{Take Homes} % Keep the summary *very short*. \begin{itemize} \item The \alert{first message}: compared with a decade ago, New Zealanders are substantially {\bf more (1) aware} and {\bf (2) more concerned about climate change}. \pause \item The \alert{second message}: {\bf sacrifice} for the environment {\bf has been slow to follow} this awareness. \pause % \item % The \alert{third message}: {\bf education is increasingly important} for climate awareness and behaviours. % \pause % \item The \alert{third message}: recruit religious people: though less concerned about climate,{\bf religious people are more likely to believe they can make a difference} to the environment and {\bf more likely to sacrifice} for the environment. \pause \item The \alert{fourth message}: {\bf foster frustration over fear} to promote pro-environmental behavior and policies. \pause \end{itemize} \end{frame} \begin{frame}{Frustration > Fear} \begin{columns} \column{0.55\textwidth} \begin{figure} \includegraphics[width=1.02\textwidth,height=1.02\textheight,keepaspectratio]{Figures/Greta.jpg} %\caption{} \end{figure} \column{0.45\textwidth} \begin{figure} \includegraphics[width=1.05\textwidth,height=1.05\textheight,keepaspectratio]{Figures/Munchjpg.jpg} %\caption{} \end{figure} \end{columns} \end{frame} % \begin{frame}{Thanks!} % \begin{figure} % \includegraphics[width=.99\textwidth,height=\textheight,keepaspectratio]{TRT_LOGO.png} % %\caption{} % \end{figure} % \end{frame} \begin{frame} \frametitle{Thanks!} \begin{columns} \column{0.4\textwidth} \begin{itemize} \item Prof Dominic Johnson (University of Oxford) for the idea. \item Prof Chris G. Sibley (University of Auckland) for the heavy lifting. \item $>$ 50 NZAVS collaborators (join us!) \item $>$ 61,535 New Zealanders who have participated for their time. \item Templeton Religion Trust for their financial support. \end{itemize} \column{0.6\textwidth} \centering \begin{figure} \includegraphics[width=\textwidth,height=\textheight,keepaspectratio]{TRT_LOGO.png} %\caption{For their generous support} \end{figure} \end{columns} \end{frame} \begin{frame}{Cheers} \centering \texttt{[email protected]} \end{frame} %\section{Extra Slides} \begin{frame}{Extra Slides: 2009-2018 Sample Demographic Indicators} \begin{table} \centering %\caption{Demographic Indicators}\label{} \scalebox{.4}{ \begin{tabular}{ l c c c c c c c c c c } \toprule & \multicolumn{ 10 }{c}{ Wave }\\ & 2009 & 2010 & 2011 & 2012 & 2013 & 2014 & 2015 & 2016 & 2017 & 2018 \\ & n = 5523 & n = 4430 & n = 6652 & n = 11300 & n = 17241 & n = 15801 & n = 13929 & n = 20402 & n = 17017 & n = 18020 \\ \midrule Age & & & & & & & & & & \\ \hspace{6pt} & 48.7 (15.3) & 51.0 (15.2) & 50.7 (15.8) & 49.7 (14.9) & 48.1 (14.0) & 49.3 (14.0) & 50.8 (13.9) & 50.2 (13.8) & 51.3 (13.8) & 52.3 (13.6)\\ EthnicCats & & & & & & & & & & \\ \hspace{6pt} Euro & 4050 (73.3\%) & 3341 (75.4\%) & 4650 (69.9\%) & 8375 (74.1\%) & 13166 (76.4\%) & 12573 (79.6\%) & 11113 (79.8\%) & 16363 (80.2\%) & 13844 (81.4\%) & 14622 (81.1\%)\\ \hspace{6pt} Maori & 890 (16.1\%) & 687 (15.5\%) & 721 (10.8\%) & 1743 (15.4\%) & 2168 (12.6\%) & 1973 (12.5\%) & 1668 (12\%) & 2244 (11\%) & 2002 (11.8\%) & 2081 (11.5\%)\\ \hspace{6pt} Pacific & 206 (3.7\%) & 139 (3.1\%) & 140 (2.1\%) & 408 (3.6\%) & 471 (2.7\%) & 433 (2.7\%) & 351 (2.5\%) & 449 (2.2\%) & 319 (1.9\%) & 335 (1.9\%)\\ \hspace{6pt} Asian & 227 (4.1\%) & 164 (3.7\%) & 219 (3.3\%) & 478 (4.2\%) & 679 (3.9\%) & 632 (4\%) & 492 (3.5\%) & 806 (4\%) & 646 (3.8\%) & 678 (3.8\%)\\ \hspace{6pt} \emph{missing} & 150 (2.7\%) & 99 (2.2\%) & 922 (13.9\%) & 296 (2.6\%) & 757 (4.4\%) & 190 (1.2\%) & 305 (2.2\%) & 540 (2.6\%) & 206 (1.2\%) & 304 (1.7\%)\\ Edu & & & & & & & & & & \\ \hspace{6pt} & 5.1 (2.8) & NaN (\emph{missing}) & NaN (\emph{missing}) & 5.7 (2.8) & 5.9 (2.8) & 6.1 (2.8) & 6.2 (2.8) & 6.3 (2.7) & 6.5 (2.7) & 6.5 (2.7)\\ Male & & & & & & & & & & \\ \hspace{6pt} 0 & 3338 (60.4\%) & 2729 (61.6\%) & 4161 (62.6\%) & 7085 (62.7\%) & 10890 (63.2\%) & 9966 (63.1\%) & 8707 (62.5\%) & 12806 (62.8\%) & 10756 (63.2\%) & 11287 (62.6\%)\\ \hspace{6pt} 1 & 2185 (39.6\%) & 1701 (38.4\%) & 2491 (37.4\%) & 4215 (37.3\%) & 6351 (36.8\%) & 5782 (36.6\%) & 5172 (37.1\%) & 7528 (36.9\%) & 6203 (36.5\%) & 6690 (37.1\%)\\ \hspace{6pt} \emph{missing} & 0 (0\%) & 0 (0\%) & 0 (0\%) & 0 (0\%) & 0 (0\%) & 53 (0.3\%) & 50 (0.4\%) & 68 (0.3\%) & 58 (0.3\%) & 43 (0.2\%)\\ NZdep & & & & & & & & & & \\ \hspace{6pt} & 5.0 (2.8) & 4.9 (2.8) & 4.6 (2.7) & 4.9 (2.8) & 4.8 (2.8) & 4.7 (2.8) & 4.7 (2.8) & 4.6 (2.7) & 4.6 (2.7) & 4.6 (2.7)\\ Pol Orient & & & & & & & & & & \\ \hspace{6pt} & 3.8 (1.2) & 4.0 (1.3) & 3.7 (1.4) & 3.7 (1.3) & 3.6 (1.3) & 3.6 (1.3) & 3.6 (1.3) & 3.6 (1.4) & 3.6 (1.4) & 3.6 (1.4)\\ Relid & & & & & & & & & & \\ \hspace{6pt} & 2.3 (2.8) & 2.2 (2.8) & 1.9 (2.7) & 2.0 (2.7) & 1.9 (2.7) & 1.9 (2.7) & 2.0 (2.7) & 1.8 (2.6) & 1.7 (2.6) & 1.7 (2.6)\\ Urban & & & & & & & & & & \\ \hspace{6pt} 0 & 2220 (40.2\%) & 1785 (40.3\%) & 2161 (32.5\%) & 3815 (33.8\%) & 5583 (32.4\%) & 5158 (32.6\%) & 4621 (33.2\%) & 7039 (34.5\%) & 3089 (18.2\%) & 3220 (17.9\%)\\ \hspace{6pt} 1 & 3269 (59.2\%) & 2623 (59.2\%) & 4006 (60.2\%) & 7310 (64.7\%) & 11531 (66.9\%) & 10439 (66.1\%) & 8986 (64.5\%) & 13124 (64.3\%) & 13688 (80.4\%) & 14512 (80.5\%)\\ \hspace{6pt} \emph{missing} & 34 (0.6\%) & 22 (0.5\%) & 485 (7.3\%) & 175 (1.5\%) & 127 (0.7\%) & 204 (1.3\%) & 322 (2.3\%) & 239 (1.2\%) & 240 (1.4\%) & 288 (1.6\%)\\ \bottomrule \end{tabular} } \end{table} \end{frame} \begin{frame}{Coefficient Plot: Climate Change is Real} \begin{figure} \includegraphics[width=.99\textwidth,height=\textheight,keepaspectratio]{Figures/mREAL.png} %\caption{} \end{figure} \end{frame} \begin{frame}{Coefficient Plot: Environmental Concern} \begin{figure} \includegraphics[width=.99\textwidth,height=\textheight,keepaspectratio]{Figures/mCONCERN.png} %\caption{} \end{figure} \end{frame} \begin{frame}{Coefficient Plot: Environmental Efficacy} \begin{figure} \includegraphics[width=.99\textwidth,height=\textheight,keepaspectratio]{Figures/mEFFICACY.png} %\caption{} \end{figure} \end{frame} \begin{frame}{Coefficient Plot: Sacrifice Made} \begin{figure} \includegraphics[width=.99\textwidth,height=\textheight,keepaspectratio]{Figures/mSACRIFICEMADE.png} %\caption{} \end{figure} \end{frame} \end{document}
###################################################################### # subsets(A) is the set of all subsets of A `is_element/subsets` := (A::set) -> proc(B) type(B,set) and B minus A = {}; end; `is_equal/subsets` := (A::set) -> (B,C) -> evalb(B = C): `is_leq/subsets` := (A::set) -> (B,C) -> evalb(B minus C = {}): `random_element/subsets` := (A::set) -> proc() local r,B,a; r := rand(2); B := select(a -> (r() = 1),A); return B; end; `list_elements/subsets` := (A::set) -> sort(map(sort,[op(combinat[powerset](A))])): `count_elements/subsets` := (A::set) -> 2^nops(A); `functor/subsets` := (A::set,B::set) -> (f) -> proc(T) map(t -> f[t],T); end: `cofunctor/subsets` := (A::set,B::set) -> (f) -> proc(T) select(a -> member(f[a],T),A); end:
State Before: ι : Type ?u.337904 ι' : Type ?u.337907 α : Type u_1 β : Type u_2 γ : Type ?u.337916 inst✝¹ : SemilatticeSup α inst✝ : NoMaxOrder α a : α f : α → β l : Filter β ⊢ Tendsto (fun x => f ↑x) atTop l ↔ Tendsto f atTop l State After: ι : Type ?u.337904 ι' : Type ?u.337907 α : Type u_1 β : Type u_2 γ : Type ?u.337916 inst✝¹ : SemilatticeSup α inst✝ : NoMaxOrder α a : α f : α → β l : Filter β ⊢ Tendsto (fun x => f ↑x) atTop l ↔ Tendsto (f ∘ Subtype.val) atTop l Tactic: rw [← map_val_Ioi_atTop a, tendsto_map'_iff] State Before: ι : Type ?u.337904 ι' : Type ?u.337907 α : Type u_1 β : Type u_2 γ : Type ?u.337916 inst✝¹ : SemilatticeSup α inst✝ : NoMaxOrder α a : α f : α → β l : Filter β ⊢ Tendsto (fun x => f ↑x) atTop l ↔ Tendsto (f ∘ Subtype.val) atTop l State After: no goals Tactic: rfl
function protocol = FSL2Protocol(bvalfile, bvecfile) % % function protocol = FSL2Protocol(bvalfile, bvecfile) % % Note: for NODDI, the exact sequence timing is not important. % this function reverse-engineerings one possible sequence timing % given the b-values. % % author: Gary Hui Zhang ([email protected]) % protocol.pulseseq = 'PGSE'; protocol.schemetype = 'multishellfixedG'; protocol.teststrategy = 'fixed'; % load bval bval = load(bvalfile); bval = bval'; % set total number of measurements protocol.totalmeas = length(bval); % set the b=0 indices protocol.b0_Indices = find(bval==0); protocol.numZeros = length(protocol.b0_Indices); % find the unique non-zero b-values B = unique(bval(bval>0)); % set the number of shells protocol.M = length(B); for i=1:length(B) protocol.N(i) = length(find(bval==B(i))); end % maximum b-value in the s/mm^2 unit maxB = max(B); % set maximum G = 40 mT/m Gmax = 0.04; % set smalldel and delta and G GAMMA = 2.675987E8; tmp = nthroot(3*maxB*10^6/(2*GAMMA^2*Gmax^2),3); for i=1:length(B) protocol.udelta(i) = tmp; protocol.usmalldel(i) = tmp; protocol.uG(i) = sqrt(B(i)/maxB)*Gmax; end protocol.delta = zeros(size(bval))'; protocol.smalldel = zeros(size(bval))'; protocol.G = zeros(size(bval))'; for i=1:length(B) tmp = find(bval==B(i)); for j=1:length(tmp) protocol.delta(tmp(j)) = protocol.udelta(i); protocol.smalldel(tmp(j)) = protocol.usmalldel(i); protocol.G(tmp(j)) = protocol.uG(i); end end % load bvec bvec = load(bvecfile); protocol.grad_dirs = bvec'; % make the gradient directions for b=0's [1 0 0] for i=1:length(protocol.b0_Indices) protocol.grad_dirs(protocol.b0_Indices(i),:) = [1 0 0]; end % make sure the gradient directions are unit vectors for i=1:protocol.totalmeas protocol.grad_dirs(i,:) = protocol.grad_dirs(i,:)/norm(protocol.grad_dirs(i,:)); end
This hop starts at 9 am Pacific on December 21 and ends on December 31. Please check the blogs that have ~ mark in front of their name first since those have been verified. Giveaway Ends 12.31.17 and open to all who can legally enter. Open to anyone who can accept and use an Amazon or Paypal code. Please add BookHounds @Gmail to your address book. You can also email to verify if you have won. I don’t know yet. I’ve got to get Christmas over with first. I’ll probably stay at home and watch the ball drop! I’m going to throw a small NYE get together at home. Safer that way. We will stay home and watch movies. I will stay home and go to bed around ten or eleven as usual. Not sure yet what we are doing but the plan is to go to the cities and stay the night. New Years Eve I’m reading, then going to bed early so I can run in the morning. We usually have family over and play cards and watch the count down on TV. I’ll be sleeping like I am every year. I get up at 12:30 every day ;). Relaxing, watching movies, reading, and eating!! Have a great holiday and thanks for this amazing giveaway! I’m spending a quiet New Year’s Eve at home with my family and our animals and a nice bottle of wine. Nothing special. A quiet night in with family. Probably be asleep before ball drops. We’re going to see a Beatles tribute band. Looking forward to it. Staying home and watching TV. I don’t know yet what we are doing. Not much! Might stay up and watch the ball drop, might not. Watch the ball drop and go to bed. Too cold to go out side. I usually stay home with my family. Besides reading more books, no real plans!!! No plans, most of the time I am in bed before it happens…lol. I don’t do much on NYE — I’ll be cozy at home. I have a 3 year old so we do not have any plans lol! I really hope we are all sleeping at midnight! No plans at all this year unfortunately! No plans yet, but probably have friends over and stay home. Partying with a few close friends. I’m staying at home, trying to go on like it’s any other day (can’t escape the Happy New Year wishes and stuff with the family, but they keep it to a minumum at least), bc I’m not much for celebrations, they make me very uncomfortable. My new years eve plans include staying at home! I plan to stay in with hubby, and have a quiet evening alone. I’ll be staying home just like any other night on NYE. The plan is to go to the Cities and enjoy a night out. We are spending it with our daughter and family of three little ones. I’m staying at home and spending time with my family! I will be staying at home with family. Sitting at home with my husband and kids. No plans for New Year’s Eve! We might invite my niece to spend the night. I will be staying home this New Year with my husband and daughter. We will have a lobster dinner, then play games and watch a movie. I usually stay home New Year’s Eve these days–I’ll probably be babysitting my grandkids. I don’t have any plans. Part of me is wondering if we should throw a party. We stay in and watch the ball drop in Time’s Square. Relax, read, watch tv and eat! Have a great holiday and thanks for this giveaway! We will be staying at home with a few close friends and family and enjoy the New Year as it comes in!!! We usually stay in because we go out on the 29th for our wedding anniversary. This year it will be our 44th ! Love is grand! I have no plans for New Year’s Eve, but will probably stay at home. I am planning to stay home and celebrate with my family. We just stay home and safe and play games..Scrabble, Yatzee and drink lots of champagne and beer!! Merry Christmas and Happy New year! I don’t have any plans for New Year’s Eve yet. I plan to stay home with my husband and I have to work that day. My plans are to stay at home, read, and cuddle my puppies! I’ll be at home with the cats, as usual and will be going to bed at my normal hour but will probably be woken up at midnight by fireworks! We’ll be staying in and watching movies. For New Year’s Eve I will probably be staying home and having a movie marathon! I’m going to see George Clinton and Parliament-Funkadelic at the Moon Pie Drop! No plans other than staying home with my family. We’ll be hanging out with family and cousins, playing games and chatting! WE will be home and counting down together as a family. Thank you for the wonderful giveaway! We are just going to hang out at home. It’s going to be really cold here so I’ll just be home with my family. We may go out for an early dinner but will spend the night at home. Hard to pick just one seeing I read over 200, but one that definitely caught me off guard was The Wild by K Webster. Thank you for this fabulous chance! I’m going to a work Christmas party and then will be visiting my sister. We are planning on staying home and enjoying some family time! I plan to stay home with my daughter and watch the ball drop. Staying in with movies, tv and Rum Chata in my coffee! We will be staying home and celebrating by watching TV and entering Giveaways. We are staying in and watching movies! Thanks for the chance to win & HAPPY NEW YEAR! We stay home every year. People are crazy on the roads on a normal day. There’s no way I’m going out after everyone’s been partying. I plan on staying home and spending time with my family. I plan on staying home. I’ll be at home and will watch the ball drop if I can stay up. We’ll be celebrating at home. We will be staying home and watching the festivities on TV. We will toast the New Year, then go to bed! We are going to a small house party. Staying home and staying warm. We might make it till midnight! I have to work. I am completely fine with that. We’ll be staying in for New Year’s Eve. We’re both not feeling well, so some TV, and will be sleeping before 12:00. Happy New Year!! My plans are to watch the ball drop with my family. I will be going to a party at a friends house. I will be staying home snuggled up with the family watching tv! I am spending it with family! Playing cards and watching movies with friends.
module WordLength allLengths : List String -> List Nat allLengths [] = [] allLengths (x :: xs) = length x :: allLengths xs
module BoundedNat import Data.Fin import Data.So %default total namespace MaybeResult atm : (xs : List a) -> (n : Nat) -> Maybe a atm [] _ = Nothing atm (x::_) Z = Just x atm (_::xs) (S n) = xs `atm` n namespace SoLtParam ats : (xs : List a) -> (n : Nat) -> {auto ok : So (n `lt` length xs)} -> a ats (x::_) Z = x ats (_::xs) (S k) {ok} = xs `ats` k ats [] Z impossible ats [] (S k) impossible x0s : Char x0s = ['1', '2', '3'] `ats` 0 x2s : Char x2s = ['1', '2', '3'] `ats` 2 --x3s : Char --x3s = ['1', '2', '3'] `ats` 3 namespace SoJustParam ltlt : So (n < k) -> So (n `lt` k) ltlt {n = Z} {k = Z} so = so ltlt {n = Z} {k = (S k)} so = so ltlt {n = (S j)} {k = Z} so = so ltlt {n = (S j)} {k = (S l)} _ with (choose (j < l)) ltlt _ | Left subso = ltlt subso ltlt {n = (S j)} {k = (S l)} _ | Right sso with (compare j l) ltlt _ | Right sso | LT = absurd sso atss : (xs : List a) -> (n : Nat) -> {auto ok : So (n < length xs)} -> a atss xs n {ok} = SoLtParam.ats xs n {ok = ltlt ok} x0ss : Char x0ss = ['1', '2', '3'] `atss` 0 x2ss : Char x2ss = ['1', '2', '3'] `atss` 2 --x3ss : Char --x3ss = ['1', '2', '3'] `atss` 3 namespace LteParam atl : (xs : List a) -> (n : Nat) -> {auto ok : LT n (length xs)} -> a atl (x::_) Z = x atl (_::xs) (S n) {ok = (LTESucc _)} = xs `atl` n x0l : Char x0l = ['1', '2', '3'] `atl` 0 x2l : Char x2l = ['1', '2', '3'] `atl` 2 --x3l : Char --x3l = ['1', '2', '3'] `atl` 3 namespace InBoundsParam ati : (xs : List a) -> (n : Nat) -> {auto ok : InBounds n xs} -> a ati (x::_) Z = x ati (_::xs) (S k) {ok = InLater _} = xs `ati` k x0i : Char x0i = ['1', '2', '3'] `ati` 0 x2i : Char x2i = ['1', '2', '3'] `ati` 2 --x3i : Char --x3i = ['1', '2', '3'] `ati` 3 namespace CumstomWithLte data BoundedNat : Nat -> Type where MkBNat : (n : Nat) -> {auto ok : LT n b} -> BoundedNat b atb : (xs : List a) -> BoundedNat (length xs) -> a atb (x::_) (MkBNat Z) = x atb (_::xs) (MkBNat (S n) {ok = (LTESucc _)}) = xs `atb` MkBNat n atb [] (MkBNat n) impossible x0b : Char x0b = ['1', '2', '3'] `atb` MkBNat 0 x2b : Char x2b = ['1', '2', '3'] `atb` MkBNat 2 --x3b : Char --x3b = ['1', '2', '3'] `atb` MkBNat 3 namespace FinParam atf : (xs : List a) -> Fin (length xs) -> a atf (x::_) FZ = x atf (_::xs) (FS n) = xs `atf` n x0f : Char x0f = ['1', '2', '3'] `atf` 0 x2f : Char x2f = ['1', '2', '3'] `atf` 2 --x3f : Char --x3f = ['1', '2', '3'] `atf` 3
/- Copyright (c) 2020 Bhavik Mehta. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Bhavik Mehta -/ import category_theory.limits.shapes.split_coequalizer import category_theory.limits.preserves.basic /-! # Preserving (co)equalizers Constructions to relate the notions of preserving (co)equalizers and reflecting (co)equalizers to concrete (co)forks. In particular, we show that `equalizer_comparison f g G` is an isomorphism iff `G` preserves the limit of the parallel pair `f,g`, as well as the dual result. -/ noncomputable theory universes v u₁ u₂ open category_theory category_theory.category category_theory.limits variables {C : Type u₁} [category.{v} C] variables {D : Type u₂} [category.{v} D] variables (G : C ⥤ D) namespace category_theory.limits section equalizers variables {X Y Z : C} {f g : X ⟶ Y} {h : Z ⟶ X} (w : h ≫ f = h ≫ g) /-- The map of a fork is a limit iff the fork consisting of the mapped morphisms is a limit. This essentially lets us commute `fork.of_ι` with `functor.map_cone`. -/ def is_limit_map_cone_fork_equiv : is_limit (G.map_cone (fork.of_ι h w)) ≃ is_limit (fork.of_ι (G.map h) (by simp only [←G.map_comp, w]) : fork (G.map f) (G.map g)) := (is_limit.postcompose_hom_equiv (diagram_iso_parallel_pair.{v} _) _).symm.trans (is_limit.equiv_iso_limit (fork.ext (iso.refl _) (by { simp [fork.ι] }))) /-- The property of preserving equalizers expressed in terms of forks. -/ def is_limit_fork_map_of_is_limit [preserves_limit (parallel_pair f g) G] (l : is_limit (fork.of_ι h w)) : is_limit (fork.of_ι (G.map h) (by simp only [←G.map_comp, w]) : fork (G.map f) (G.map g)) := is_limit_map_cone_fork_equiv G w (preserves_limit.preserves l) /-- The property of reflecting equalizers expressed in terms of forks. -/ def is_limit_of_is_limit_fork_map [reflects_limit (parallel_pair f g) G] (l : is_limit (fork.of_ι (G.map h) (by simp only [←G.map_comp, w]) : fork (G.map f) (G.map g))) : is_limit (fork.of_ι h w) := reflects_limit.reflects ((is_limit_map_cone_fork_equiv G w).symm l) variables (f g) [has_equalizer f g] /-- If `G` preserves equalizers and `C` has them, then the fork constructed of the mapped morphisms of a fork is a limit. -/ def is_limit_of_has_equalizer_of_preserves_limit [preserves_limit (parallel_pair f g) G] : is_limit (fork.of_ι (G.map (equalizer.ι f g)) (by simp only [←G.map_comp, equalizer.condition])) := is_limit_fork_map_of_is_limit G _ (equalizer_is_equalizer f g) variables [has_equalizer (G.map f) (G.map g)] /-- If the equalizer comparison map for `G` at `(f,g)` is an isomorphism, then `G` preserves the equalizer of `(f,g)`. -/ def preserves_equalizer.of_iso_comparison [i : is_iso (equalizer_comparison f g G)] : preserves_limit (parallel_pair f g) G := begin apply preserves_limit_of_preserves_limit_cone (equalizer_is_equalizer f g), apply (is_limit_map_cone_fork_equiv _ _).symm _, apply is_limit.of_point_iso (limit.is_limit (parallel_pair (G.map f) (G.map g))), apply i, end variables [preserves_limit (parallel_pair f g) G] /-- If `G` preserves the equalizer of `(f,g)`, then the equalizer comparison map for `G` at `(f,g)` is an isomorphism. -/ def preserves_equalizer.iso : G.obj (equalizer f g) ≅ equalizer (G.map f) (G.map g) := is_limit.cone_point_unique_up_to_iso (is_limit_of_has_equalizer_of_preserves_limit G f g) (limit.is_limit _) @[simp] lemma preserves_equalizer.iso_hom : (preserves_equalizer.iso G f g).hom = equalizer_comparison f g G := rfl instance : is_iso (equalizer_comparison f g G) := begin rw ← preserves_equalizer.iso_hom, apply_instance end end equalizers section coequalizers variables {X Y Z : C} {f g : X ⟶ Y} {h : Y ⟶ Z} (w : f ≫ h = g ≫ h) /-- The map of a cofork is a colimit iff the cofork consisting of the mapped morphisms is a colimit. This essentially lets us commute `cofork.of_π` with `functor.map_cocone`. -/ def is_colimit_map_cocone_cofork_equiv : is_colimit (G.map_cocone (cofork.of_π h w)) ≃ is_colimit (cofork.of_π (G.map h) (by simp only [←G.map_comp, w]) : cofork (G.map f) (G.map g)) := (is_colimit.precompose_inv_equiv (diagram_iso_parallel_pair.{v} _) _).symm.trans $ is_colimit.equiv_iso_colimit $ cofork.ext (iso.refl _) $ begin dsimp only [cofork.π, cofork.of_π_ι_app], dsimp, rw [category.comp_id, category.id_comp] end /-- The property of preserving coequalizers expressed in terms of coforks. -/ def is_colimit_cofork_map_of_is_colimit [preserves_colimit (parallel_pair f g) G] (l : is_colimit (cofork.of_π h w)) : is_colimit (cofork.of_π (G.map h) (by simp only [←G.map_comp, w]) : cofork (G.map f) (G.map g)) := is_colimit_map_cocone_cofork_equiv G w (preserves_colimit.preserves l) /-- The property of reflecting coequalizers expressed in terms of coforks. -/ def is_colimit_of_is_colimit_cofork_map [reflects_colimit (parallel_pair f g) G] (l : is_colimit (cofork.of_π (G.map h) (by simp only [←G.map_comp, w]) : cofork (G.map f) (G.map g))) : is_colimit (cofork.of_π h w) := reflects_colimit.reflects ((is_colimit_map_cocone_cofork_equiv G w).symm l) variables (f g) [has_coequalizer f g] /-- If `G` preserves coequalizers and `C` has them, then the cofork constructed of the mapped morphisms of a cofork is a colimit. -/ def is_colimit_of_has_coequalizer_of_preserves_colimit [preserves_colimit (parallel_pair f g) G] : is_colimit (cofork.of_π (G.map (coequalizer.π f g)) _) := is_colimit_cofork_map_of_is_colimit G _ (coequalizer_is_coequalizer f g) variables [has_coequalizer (G.map f) (G.map g)] /-- If the coequalizer comparison map for `G` at `(f,g)` is an isomorphism, then `G` preserves the coequalizer of `(f,g)`. -/ def of_iso_comparison [i : is_iso (coequalizer_comparison f g G)] : preserves_colimit (parallel_pair f g) G := begin apply preserves_colimit_of_preserves_colimit_cocone (coequalizer_is_coequalizer f g), apply (is_colimit_map_cocone_cofork_equiv _ _).symm _, apply is_colimit.of_point_iso (colimit.is_colimit (parallel_pair (G.map f) (G.map g))), apply i, end variables [preserves_colimit (parallel_pair f g) G] /-- If `G` preserves the coequalizer of `(f,g)`, then the coequalizer comparison map for `G` at `(f,g)` is an isomorphism. -/ def preserves_coequalizer.iso : coequalizer (G.map f) (G.map g) ≅ G.obj (coequalizer f g) := is_colimit.cocone_point_unique_up_to_iso (colimit.is_colimit _) (is_colimit_of_has_coequalizer_of_preserves_colimit G f g) @[simp] lemma preserves_coequalizer.iso_hom : (preserves_coequalizer.iso G f g).hom = coequalizer_comparison f g G := rfl instance : is_iso (coequalizer_comparison f g G) := begin rw ← preserves_coequalizer.iso_hom, apply_instance end /-- Any functor preserves coequalizers of split pairs. -/ @[priority 1] instance preserves_split_coequalizers (f g : X ⟶ Y) [has_split_coequalizer f g] : preserves_colimit (parallel_pair f g) G := begin apply preserves_colimit_of_preserves_colimit_cocone ((has_split_coequalizer.is_split_coequalizer f g).is_coequalizer), apply (is_colimit_map_cocone_cofork_equiv G _).symm ((has_split_coequalizer.is_split_coequalizer f g).map G).is_coequalizer, end end coequalizers end category_theory.limits
/- Copyright (c) 2020 Heather Macbeth. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Heather Macbeth -/ import Mathlib.PrePort import Mathlib.Lean3Lib.init.default import Mathlib.analysis.specific_limits import Mathlib.analysis.asymptotics import Mathlib.PostPort universes u_1 namespace Mathlib /-! # The group of units of a complete normed ring This file contains the basic theory for the group of units (invertible elements) of a complete normed ring (Banach algebras being a notable special case). ## Main results The constructions `one_sub`, `add` and `unit_of_nearby` state, in varying forms, that perturbations of a unit are units. The latter two are not stated in their optimal form; more precise versions would use the spectral radius. The first main result is `is_open`: the group of units of a complete normed ring is an open subset of the ring. The function `inverse` (defined in `algebra.ring`), for a ring `R`, sends `a : R` to `a⁻¹` if `a` is a unit and 0 if not. The other major results of this file (notably `inverse_add`, `inverse_add_norm` and `inverse_add_norm_diff_nth_order`) cover the asymptotic properties of `inverse (x + t)` as `t → 0`. -/ namespace units /-- In a complete normed ring, a perturbation of `1` by an element `t` of distance less than `1` from `1` is a unit. Here we construct its `units` structure. -/ def one_sub {R : Type u_1} [normed_ring R] [complete_space R] (t : R) (h : norm t < 1) : units R := mk (1 - t) (tsum fun (n : ℕ) => t ^ n) (mul_neg_geom_series t h) (geom_series_mul_neg t h) @[simp] theorem one_sub_coe {R : Type u_1} [normed_ring R] [complete_space R] (t : R) (h : norm t < 1) : ↑(one_sub t h) = 1 - t := rfl /-- In a complete normed ring, a perturbation of a unit `x` by an element `t` of distance less than `∥x⁻¹∥⁻¹` from `x` is a unit. Here we construct its `units` structure. -/ def add {R : Type u_1} [normed_ring R] [complete_space R] (x : units R) (t : R) (h : norm t < (norm ↑(x⁻¹)⁻¹)) : units R := x * one_sub (-(↑(x⁻¹) * t)) sorry @[simp] theorem add_coe {R : Type u_1} [normed_ring R] [complete_space R] (x : units R) (t : R) (h : norm t < (norm ↑(x⁻¹)⁻¹)) : ↑(add x t h) = ↑x + t := sorry /-- In a complete normed ring, an element `y` of distance less than `∥x⁻¹∥⁻¹` from `x` is a unit. Here we construct its `units` structure. -/ def unit_of_nearby {R : Type u_1} [normed_ring R] [complete_space R] (x : units R) (y : R) (h : norm (y - ↑x) < (norm ↑(x⁻¹)⁻¹)) : units R := add x (y - ↑x) h @[simp] theorem unit_of_nearby_coe {R : Type u_1} [normed_ring R] [complete_space R] (x : units R) (y : R) (h : norm (y - ↑x) < (norm ↑(x⁻¹)⁻¹)) : ↑(unit_of_nearby x y h) = y := sorry /-- The group of units of a complete normed ring is an open subset of the ring. -/ theorem is_open {R : Type u_1} [normed_ring R] [complete_space R] : is_open (set_of fun (x : R) => is_unit x) := sorry theorem nhds {R : Type u_1} [normed_ring R] [complete_space R] (x : units R) : (set_of fun (x : R) => is_unit x) ∈ nhds ↑x := mem_nhds_sets is_open (eq.mpr (id (Eq._oldrec (Eq.refl (↑x ∈ set_of fun (x : R) => is_unit x)) set.mem_set_of_eq)) (is_unit_unit x)) end units namespace normed_ring theorem inverse_one_sub {R : Type u_1} [normed_ring R] [complete_space R] (t : R) (h : norm t < 1) : ring.inverse (1 - t) = ↑(units.one_sub t h⁻¹) := sorry /-- The formula `inverse (x + t) = inverse (1 + x⁻¹ * t) * x⁻¹` holds for `t` sufficiently small. -/ theorem inverse_add {R : Type u_1} [normed_ring R] [complete_space R] (x : units R) : filter.eventually (fun (t : R) => ring.inverse (↑x + t) = ring.inverse (1 + ↑(x⁻¹) * t) * ↑(x⁻¹)) (nhds 0) := sorry theorem inverse_one_sub_nth_order {R : Type u_1} [normed_ring R] [complete_space R] (n : ℕ) : filter.eventually (fun (t : R) => ring.inverse (1 - t) = (finset.sum (finset.range n) fun (i : ℕ) => t ^ i) + t ^ n * ring.inverse (1 - t)) (nhds 0) := sorry /-- The formula `inverse (x + t) = (∑ i in range n, (- x⁻¹ * t) ^ i) * x⁻¹ + (- x⁻¹ * t) ^ n * inverse (x + t)` holds for `t` sufficiently small. -/ theorem inverse_add_nth_order {R : Type u_1} [normed_ring R] [complete_space R] (x : units R) (n : ℕ) : filter.eventually (fun (t : R) => ring.inverse (↑x + t) = (finset.sum (finset.range n) fun (i : ℕ) => (-↑(x⁻¹) * t) ^ i) * ↑(x⁻¹) + (-↑(x⁻¹) * t) ^ n * ring.inverse (↑x + t)) (nhds 0) := sorry theorem inverse_one_sub_norm {R : Type u_1} [normed_ring R] [complete_space R] : asymptotics.is_O (fun (t : R) => ring.inverse (1 - t)) (fun (t : R) => 1) (nhds 0) := sorry /-- The function `λ t, inverse (x + t)` is O(1) as `t → 0`. -/ theorem inverse_add_norm {R : Type u_1} [normed_ring R] [complete_space R] (x : units R) : asymptotics.is_O (fun (t : R) => ring.inverse (↑x + t)) (fun (t : R) => 1) (nhds 0) := sorry /-- The function `λ t, inverse (x + t) - (∑ i in range n, (- x⁻¹ * t) ^ i) * x⁻¹` is `O(t ^ n)` as `t → 0`. -/ theorem inverse_add_norm_diff_nth_order {R : Type u_1} [normed_ring R] [complete_space R] (x : units R) (n : ℕ) : asymptotics.is_O (fun (t : R) => ring.inverse (↑x + t) - (finset.sum (finset.range n) fun (i : ℕ) => (-↑(x⁻¹) * t) ^ i) * ↑(x⁻¹)) (fun (t : R) => norm t ^ n) (nhds 0) := sorry /-- The function `λ t, inverse (x + t) - x⁻¹` is `O(t)` as `t → 0`. -/ theorem inverse_add_norm_diff_first_order {R : Type u_1} [normed_ring R] [complete_space R] (x : units R) : asymptotics.is_O (fun (t : R) => ring.inverse (↑x + t) - ↑(x⁻¹)) (fun (t : R) => norm t) (nhds 0) := sorry /-- The function `λ t, inverse (x + t) - x⁻¹ + x⁻¹ * t * x⁻¹` is `O(t ^ 2)` as `t → 0`. -/ theorem inverse_add_norm_diff_second_order {R : Type u_1} [normed_ring R] [complete_space R] (x : units R) : asymptotics.is_O (fun (t : R) => ring.inverse (↑x + t) - ↑(x⁻¹) + ↑(x⁻¹) * t * ↑(x⁻¹)) (fun (t : R) => norm t ^ bit0 1) (nhds 0) := sorry /-- The function `inverse` is continuous at each unit of `R`. -/ theorem inverse_continuous_at {R : Type u_1} [normed_ring R] [complete_space R] (x : units R) : continuous_at ring.inverse ↑x := sorry
/- Copyright (c) 2017 Johannes Hölzl. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Johannes Hölzl, Yaël Dillies -/ import order.partial_sups /-! # Consecutive differences of sets > THIS FILE IS SYNCHRONIZED WITH MATHLIB4. > Any changes to this file require a corresponding PR to mathlib4. This file defines the way to make a sequence of elements into a sequence of disjoint elements with the same partial sups. For a sequence `f : ℕ → α`, this new sequence will be `f 0`, `f 1 \ f 0`, `f 2 \ (f 0 ⊔ f 1)`. It is actually unique, as `disjointed_unique` shows. ## Main declarations * `disjointed f`: The sequence `f 0`, `f 1 \ f 0`, `f 2 \ (f 0 ⊔ f 1)`, .... * `partial_sups_disjointed`: `disjointed f` has the same partial sups as `f`. * `disjoint_disjointed`: The elements of `disjointed f` are pairwise disjoint. * `disjointed_unique`: `disjointed f` is the only pairwise disjoint sequence having the same partial sups as `f`. * `supr_disjointed`: `disjointed f` has the same supremum as `f`. Limiting case of `partial_sups_disjointed`. We also provide set notation variants of some lemmas. ## TODO Find a useful statement of `disjointed_rec_succ`. One could generalize `disjointed` to any locally finite bot preorder domain, in place of `ℕ`. Related to the TODO in the module docstring of `order.partial_sups`. -/ variables {α β : Type*} section generalized_boolean_algebra variables [generalized_boolean_algebra α] /-- If `f : ℕ → α` is a sequence of elements, then `disjointed f` is the sequence formed by subtracting each element from the nexts. This is the unique disjoint sequence whose partial sups are the same as the original sequence. -/ def disjointed (f : ℕ → α) : ℕ → α | 0 := f 0 | (n + 1) := f (n + 1) \ (partial_sups f n) @[simp] lemma disjointed_zero (f : ℕ → α) : disjointed f 0 = f 0 := rfl lemma disjointed_succ (f : ℕ → α) (n : ℕ) : disjointed f (n + 1) = f (n + 1) \ (partial_sups f n) := rfl lemma disjointed_le_id : disjointed ≤ (id : (ℕ → α) → ℕ → α) := begin rintro f n, cases n, { refl }, { exact sdiff_le } end lemma disjointed_le (f : ℕ → α) : disjointed f ≤ f := disjointed_le_id f lemma disjoint_disjointed (f : ℕ → α) : pairwise (disjoint on disjointed f) := begin refine (symmetric.pairwise_on disjoint.symm _).2 (λ m n h, _), cases n, { exact (nat.not_lt_zero _ h).elim }, exact disjoint_sdiff_self_right.mono_left ((disjointed_le f m).trans (le_partial_sups_of_le f (nat.lt_add_one_iff.1 h))), end /-- An induction principle for `disjointed`. To define/prove something on `disjointed f n`, it's enough to define/prove it for `f n` and being able to extend through diffs. -/ def disjointed_rec {f : ℕ → α} {p : α → Sort*} (hdiff : ∀ ⦃t i⦄, p t → p (t \ f i)) : ∀ ⦃n⦄, p (f n) → p (disjointed f n) | 0 := id | (n + 1) := λ h, begin suffices H : ∀ k, p (f (n + 1) \ partial_sups f k), { exact H n }, rintro k, induction k with k ih, { exact hdiff h }, rw [partial_sups_succ, ←sdiff_sdiff_left], exact hdiff ih, end @[simp] lemma disjointed_rec_zero {f : ℕ → α} {p : α → Sort*} (hdiff : ∀ ⦃t i⦄, p t → p (t \ f i)) (h₀ : p (f 0)) : disjointed_rec hdiff h₀ = h₀ := rfl -- TODO: Find a useful statement of `disjointed_rec_succ`. lemma monotone.disjointed_eq {f : ℕ → α} (hf : monotone f) (n : ℕ) : disjointed f (n + 1) = f (n + 1) \ f n := by rw [disjointed_succ, hf.partial_sups_eq] @[simp] lemma partial_sups_disjointed (f : ℕ → α) : partial_sups (disjointed f) = partial_sups f := begin ext n, induction n with k ih, { rw [partial_sups_zero, partial_sups_zero, disjointed_zero] }, { rw [partial_sups_succ, partial_sups_succ, disjointed_succ, ih, sup_sdiff_self_right] } end /-- `disjointed f` is the unique sequence that is pairwise disjoint and has the same partial sups as `f`. -/ lemma disjointed_unique {f d : ℕ → α} (hdisj : pairwise (disjoint on d)) (hsups : partial_sups d = partial_sups f) : d = disjointed f := begin ext n, cases n, { rw [←partial_sups_zero d, hsups, partial_sups_zero, disjointed_zero] }, suffices h : d n.succ = partial_sups d n.succ \ partial_sups d n, { rw [h, hsups, partial_sups_succ, disjointed_succ, sup_sdiff, sdiff_self, bot_sup_eq] }, rw [partial_sups_succ, sup_sdiff, sdiff_self, bot_sup_eq, eq_comm, sdiff_eq_self_iff_disjoint], suffices h : ∀ m ≤ n, disjoint (partial_sups d m) (d n.succ), { exact h n le_rfl }, rintro m hm, induction m with m ih, { exact hdisj (nat.succ_ne_zero _).symm }, rw [partial_sups_succ, disjoint_iff, inf_sup_right, sup_eq_bot_iff, ←disjoint_iff, ←disjoint_iff], exact ⟨ih (nat.le_of_succ_le hm), hdisj (nat.lt_succ_of_le hm).ne⟩, end end generalized_boolean_algebra section complete_boolean_algebra variables [complete_boolean_algebra α] lemma supr_disjointed (f : ℕ → α) : (⨆ n, disjointed f n) = (⨆ n, f n) := supr_eq_supr_of_partial_sups_eq_partial_sups (partial_sups_disjointed f) lemma disjointed_eq_inf_compl (f : ℕ → α) (n : ℕ) : disjointed f n = f n ⊓ (⨅ i < n, (f i)ᶜ) := begin cases n, { rw [disjointed_zero, eq_comm, inf_eq_left], simp_rw le_infi_iff, exact λ i hi, (i.not_lt_zero hi).elim }, simp_rw [disjointed_succ, partial_sups_eq_bsupr, sdiff_eq, compl_supr], congr, ext i, rw nat.lt_succ_iff, end end complete_boolean_algebra /-! ### Set notation variants of lemmas -/ lemma disjointed_subset (f : ℕ → set α) (n : ℕ) : disjointed f n ⊆ f n := disjointed_le f n lemma Union_disjointed {f : ℕ → set α} : (⋃ n, disjointed f n) = (⋃ n, f n) := supr_disjointed f lemma disjointed_eq_inter_compl (f : ℕ → set α) (n : ℕ) : disjointed f n = f n ∩ (⋂ i < n, (f i)ᶜ) := disjointed_eq_inf_compl f n lemma preimage_find_eq_disjointed (s : ℕ → set α) (H : ∀ x, ∃ n, x ∈ s n) [∀ x n, decidable (x ∈ s n)] (n : ℕ) : (λ x, nat.find (H x)) ⁻¹' {n} = disjointed s n := by { ext x, simp [nat.find_eq_iff, disjointed_eq_inter_compl] }
open import Relation.Binary.PropositionalEquality record Group : Set₁ where field X : Set _·_ : X → X → X e : X i : X → X assoc : (x y z : X) → (x · y) · z ≡ x · (y · z) unit-l : (x : X) → e · x ≡ x unit-r : (x : X) → x · e ≡ x inv-l : (x : X) → i x · x ≡ e inv-r : (x : X) → x · i x ≡ e inv-u-l : {G : Group} → (x x' y : Group.X) → (x Group.· y) ≡ Group.e → (x' Group.· y) ≡ Group.e → x ≡ x' inv-u-l x x' y p q = ?
= Of Human Feelings =
module Currencies include("types.jl") include("currency.jl") include("algebra.jl") # Syntax Sugar for a few common Currencies const BRL = Currency(:BRL) const USD = Currency(:USD) const EUR = Currency(:EUR) const JPY = Currency(:JPY) const CHF = Currency(:CHF) const GBP = Currency(:GBP) end # module Currencies
State Before: α : Type u_1 inst✝ : DecidableEq α l : List α x y z : α h : x ∈ y :: z :: l hx : x = y ⊢ next (y :: z :: l) x h = z State After: no goals Tactic: rw [next, nextOr, if_pos hx]
/- Copyright (c) 2022 Sina Hazratpour. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. ---------------- # HW 13 Sina Hazratpour Introduction to Proof MATH 301, Johns Hopkins University, Fall 2022 -/ import lectures.lec18_nat_trans import tactic.basic open PROOFS open PROOFS.STR open category_str --infixr ` ⟶ `:10 := precategory_str.hom -- type as \h --notation `𝟙` := precategory_str.id -- type as \b1 -- infixr ` ⊚ `:80 := precategory_str.comp-- type as \oo local notation f ` ⊚ `:80 g:80 := precategory_str.comp g f universes v₁ v₂ v₃ v₄ u₁ u₂ u₃ u₄ variables {𝓒 : Type u₁} [category_str.{v₁} 𝓒] {𝓓 : Type u₂} [category_str.{v₂} 𝓓] /-! ## Q1. Construct the type of all groups. Show that this type admits the structure of a (large) category where morphisms are group homomorphisms.-/ structure mult_Group' := (carrier : Type) (str : mult_group_str carrier ) #check mult_Group' -- type of all groups, carrier type equipped with a multiplicative group strucutre structure bundled (C : Type v₁ → Type u₁) := (carrier : Type*) (str : C carrier ) instance (M : mult_Group') : mult_group_str M.carrier := M.str -- OUR GOAL IS FIRST TO DEFINE A CATEGORY WHERE MORPHISMS ARE GROUP HOMOMORPHISMS @[ext] -- first we define a class of morphisms between group structures -- A group morphism between groups `M` and `N` is given by a function `f : M → N` which preserves the multiplication operation. class mult_Group.morphism (M : Type u₁) (N : Type u₁) [mult_group_str M] [mult_group_str N] := (to_fun : M → N) -- f : M → N -- the underlying function of morphism (resp_one : to_fun 1 = 1) -- f (1_M ) = 1_N (resp_mul : ∀ x y : M, to_fun (x * y) = to_fun x * to_fun y) #check mult_Group.morphism -- first we defined an identity morphism between group structures def mult_group.morphism.id {M : Type}[mult_group_str M] : mult_Group.morphism M M:= { to_fun := id, -- in order to define an identity function, we provide id resp_one := by { -- our goal is to prove `id 1 = 1` refl, -- By defintion `id 1` and `1` are equal }, resp_mul := by { -- our goal is to prove `∀ (x y : M), id ( x * y) = id x * id y` intros x y, -- First, we fix ` x y : M ` in the context simp,}, -- since ` id x = x ` by defintion, ` id y = y ` by defintion, and ` id ( x * y ) = x * y ` by defintion, the goal can be written as ` x * y = x * y ` which is resolved since both sides are defintionally equal } -- next we must define composition of group morphisms @[simp] def mult_Group.morphism.comp {L M N : Type} [mult_group_str L] [mult_group_str M] [mult_group_str N] (g : mult_Group.morphism M N) (f : mult_Group.morphism L M) : mult_Group.morphism L N := { to_fun := g.to_fun ∘ f.to_fun, -- composotion of group morphisms is defined as the composition of the to_fun component of each group morphism resp_one := by { -- our goal is to prove `(morphism.to_fun ∘ morphism.to_fun) 1 = 1` dsimp, -- through simplification, the composition is simplified so that the goal turns into ` morphism.to_fun (morphism.to_fun 1) = 1` rw mult_Group.morphism.resp_one, -- our goal is to resolve `morphism.to_fun (morphism.to_fun 1) = 1` and there exists a mult_Group.morphism on g and f, a property of this morphism is that it respects one, therefore this property can be invoked rw mult_Group.morphism.resp_one,}, -- our goal is to resolve ` morphism.to_fun 1 = 1 ` and since there exists a mult_Group.morphism on g and f, this resolves a property of this morphsim which resolves the goal resp_mul := by { -- our goal is to prove `∀ (x y : L), (morphism.to_fun ∘ morphism.to_fun) (x * y) = (morphism.to_fun ∘ morphism.to_fun) x * (morphism.to_fun ∘ morphism.to_fun) y` intros x y, -- we fix ` x y : L ` in the context dsimp, -- through simplification, the composition is simplified so that the goal turns into `morphism.to_fun (morphism.to_fun (x * y)) = morphism.to_fun (morphism.to_fun x) * morphism.to_fun (morphism.to_fun y` rw mult_Group.morphism.resp_mul, -- our goal is to resolve `morphism.to_fun (morphism.to_fun (x * y)) = morphism.to_fun (morphism.to_fun x) * morphism.to_fun (morphism.to_fun y` and since there exists a mult_Group.morphism on g and f, this resolves a property of this morphsim which resolves the goal rw mult_Group.morphism.resp_mul, }, -- our goalis to resolve ` morphism.to_fun (morphism.to_fun x * morphism.to_fun y) = morphism.to_fun (morphism.to_fun x) * morphism.to_fun (morphism.to_fun y` and since there exists a mult_Group.morphism on g and f, this resolves a property of this morphsim which resolves the goal } -- in order to admit a category whose morphisms are group morphisms, we create lemmas which satifisy the fields of a category_str @[simp] lemma mult_group.morphism.id_comp {L M : Type} [mult_group_str L] [mult_group_str M] (f : mult_Group.morphism L M) : mult_Group.morphism.comp f (mult_group.morphism.id) = f := begin 3 ext, -- By extensionality, it is sufficient to prove the to_fun components of the morphism f and id when composed yields the original to_fun function refl, -- Our goal is transformed to `morphism.to_fun x = morphism.to_fun x` and both sides are definitionally equal end @[simp] lemma mult_group.morphism.comp_id {L M : Type} [mult_group_str L] [mult_group_str M] (f : mult_Group.morphism L M) : mult_Group.morphism.comp (mult_group.morphism.id) f = f := begin ext, -- By extenstionality, it is sufficient to prove the to_fun components of the morphism f and id when composed yields the original to_fun function refl, -- Our goal is transformed to `morphism.to_fun x = morphism.to_fun x` and both sides are definitonally equal end @[simp] def mult_group.morphism.comp_assoc {K L M N: Type} [mult_group_str K] [mult_group_str L] [mult_group_str M] [mult_group_str N] (f : mult_Group.morphism K L) (g : mult_Group.morphism L M) (h : mult_Group.morphism M N) : mult_Group.morphism.comp (mult_Group.morphism.comp h g) f = mult_Group.morphism.comp h (mult_Group.morphism.comp g f) := begin refl, -- The goal is resolved since both sides of the equal sign are defintionally equal end instance cat_of_groups : large_category_str (mult_Group') := { hom := λ X, λ Y, mult_Group.morphism X.carrier Y.carrier, -- we first must provide a function which takes `mult_Group' → mult_Group' → Type` id := λ X, mult_group.morphism.id, -- we first fix ` X : mult_Group' ` and next we provide a function ` X.carrier → X.carrier ` comp := λ X Y Z, λ f g , mult_Group.morphism.comp g f, -- we first fix ` X Y Z : mult_Group'` in the context and ` f: mult_Group.morphism X.carrier Y.carrier` and ` g : mult_Group.morphism Y.carrier Z.carrier ` in the context as well, we then must define composition of multiplicative group structures which is given by `mult_Group.morphism.comp g f` id_comp' := by { -- our goal is to prove `∀ {X Y : mult_Group'} (f : X ⟶ Y), f ⊚ 𝟙 X = f` intros X Y, -- we fix `X Y : mult_Group` in the context exact mult_group.morphism.id_comp}, -- in order to prove composition with the id function yields the original function, we use the previously defined lemma comp_id' := by { -- our goal is to prove `∀ {X Y : mult_Group'} (f : X ⟶ Y), 𝟙 Y ⊚ f = f` intros X Y, -- we fix `X Y : mult_Group'` in the context exact mult_group.morphism.comp_id}, -- in order to prove composition with the id function yields the original function, we use the previously defined lemma comp_assoc' := by { -- our goal is to prove `∀ {W X Y Z : mult_Group'} (f : W ⟶ X) (g : X ⟶ Y) (h : Y ⟶ Z), h ⊚ g ⊚ f = h ⊚ (g ⊚ f)` intros W X Y Z, -- we fix ` W X Y Z : mult_Group' ` in the context exact mult_group.morphism.comp_assoc,} -- in order to prove composition is associative, we use the previously defined lemma } /-! ## Q2 Show that a monoid action gives rise to a functor from the delooping category of the monoid to the category of types. You can show that by filling in for the sorry placeholder in below. -/ def delooping_monoid_action (A : Type) [M : mult_Monoid] [mult_monoid_action M.carrier A] : (delooping_cat M).carrier ⥤ Type* := { obj := λ X, A, -- for this field we must provide an instance `(delooping_cat M).carrier → Type ` mor := λ X Y, _inst_3.smul, -- we fix ` X Y : (delooping_cat M).carrier → Type` in the context, we then provide a function from `mult_monoid_action M.carrier A` resp_id' := by { -- our goal is to solve ` ∀ (X : (delooping_cat M).carrier), mult_monoid_action.smul (𝟙 X) = 𝟙 A` intro X, -- we fix ` X : (delooping_cat M).carrier ` in the context simp [precategory_str.id], -- through simplification, ` 𝟙 A ` is reduced to `id` have h₃: ∀ (x : A), mult_monoid_action.smul ( 1 : M.carrier ) x = x, by {exact _inst_3.one_smul'}, -- we create a proof in the context which proves the propostion `∀ (x : A), mult_monoid_action.smul 1 x = x` . The proof of this proposition derives from a property associated with mult_monoid_action funext, -- since we are proving equality of functions, we evoke function extenstionality simp, -- Through simplificatioin ` id x ` is reduced to ` x ` have h₅ : 𝟙 X = ( 1 : M.carrier), by {refl,}, -- we introduce a proof in the context which proves `𝟙 X` and `( 1 : M.carrier)` are equal since this is known by defintion rw h₅, -- there is `𝟙 X` in the goal and we want to replace it by `1` to prove that `mult_monoid_action.smul 1 x = x` since we know `h₅` says something about the goal exact h₃ x, -- the goal is resolved by invoking the proposition found in the context }, resp_comp' := by { -- our goal is to prove `∀ {X Y Z : (delooping_cat M).carrier} (f : X ⟶ Y) (g : Y ⟶ Z), mult_monoid_action.smul (g ⊚ f) = mult_monoid_action.smul g ⊚ mult_monoid_action.smul f` intros X Y Z f g, -- we fix ` X Y Z : (delooping_cat M).carrier → Type` and ` f : X ⟶ Y ` and `g : Y ⟶ Z` in the context funext y, rw ← _inst_3.mul_smul' f g y, } } /-! ## Question 4: The Arrow Category Given a category 𝓒 we want to construct a new category whose objects are morphisms of 𝓒 and whose morphisms are commutative squares in 𝓒. -/ @[ext] structure arrow_type (𝓒 : Type*) [small_category_str 𝓒] := (dom : 𝓒) (cod : 𝓒) (arrow : dom ⟶ cod) #check arrow_type local notation `𝔸𝕣` : 10 := arrow_type @[ext] structure arrow_type_hom {𝓒 : Type*}[small_category_str 𝓒] (α β : 𝔸𝕣 𝓒 ) := (top : α.dom ⟶ β.dom) (bot : α.cod ⟶ β.cod) (eq : β.arrow ⊚ top = bot ⊚ α.arrow) #check arrow_type_hom /- Show that we can equip `𝓒[→]` with the structure of a category where morphisms of 𝓒 and whose morphisms are commutative squares in 𝓒. -/ instance arrow_cat (𝓒 : Type*)[small_category_str 𝓒] : small_category_str (𝔸𝕣 𝓒) := { hom := λ α, λ β, arrow_type_hom α β , id := λ α, ⟨𝟙 α.dom, 𝟙 α.cod, by {simp,}⟩ , comp := λ X Y Z, λ f, λ g, ⟨ -- we must define composition of arrow_type_hom and in order to do this we fix ` X Y Z : 𝔸𝕣 𝓒 ` and ` f : arrow_type_hom X Y` and ` g : arrow_type_hom Y Z` in the context and provide an instance of type arrow_type_hom which defines composition precategory_str.comp f.top g.top, -- for the first field, we compose the top function of the homomorphisms of f g precategory_str.comp f.bot g.bot, -- for the second field, we compose the top function of the homomorphisms of f g by { -- our goal is to prove `Z.arrow ⊚ (g.top ⊚ f.top) = g.bot ⊚ f.bot ⊚ X.arrow` simp, -- through simplificaiton, the associativity of composition is utlized, in order to rewrite the goal as `Z.arrow ⊚ (g.top ⊚ f.top) = g.bot ⊚ (f.bot ⊚ X.arrow)` have h₂: Y.arrow ⊚ f.top = f.bot ⊚ X.arrow , by {exact arrow_type_hom.eq f}, -- we create a proof in the context which proves the proposition ` Y.arrow ⊚ f.top = f.bot ⊚ X.arrow` which is proven by the `eq` property associated with the arrow_type_hom structure have h₃: Z.arrow ⊚ g.top = g.bot ⊚ Y.arrow, by {exact arrow_type_hom.eq g}, -- we create a proof in the context which proves the proposition `Z.arrow ⊚ g.top = g.bot ⊚ Y.arrow` which is proven by the `eq` property associated ith the arrow_type_hom structure rw ← h₂, -- there is `f.bot ⊚ X.arrow` in h₂ and we want to replace it by `Y.arrow ⊚ f.top` to prove that `Z.arrow ⊚ (g.top ⊚ f.top) = g.bot ⊚ (Y.arrow ⊚ f.top)` since we know `h₂` says something about the goal have h₄: (Z.arrow ⊚ g.top) ⊚ f.top = Z.arrow ⊚ (g.top ⊚ f.top), by {exact category_str.comp_assoc f.top g.top Z.arrow}, -- we create a proof in the context that proves the associativity of composition which is a property associated with category_str rw ← h₄, -- there is `Z.arrow ⊚ (g.top ⊚ f.top)` in the goal and we want to replace it by `(Z.arrow ⊚ g.top) ⊚ f.top` to prove that `Z.arrow ⊚ g.top ⊚ f.top = g.bot (Y.arrow ⊚ f.top)` since we know `h₄` says something about the goal rw h₃, -- there is `Z.arrow ⊚ g.top` in the goal and we want to replace it by `g.bot ⊚ Y.arrow` to prove that `g.bot ⊚ Y.arrow ⊚ f.top = g.bot ⊚ (Y.arrow ⊚ f.top)` since we know `h₃` says something about the goal exact category_str.comp_assoc f.top Y.arrow g.bot, -- a property associated with category_str is the associativity of composition which is required to resolve the goal } ⟩ , id_comp' := by { -- our goal is to prove `∀ {X Y : 𝔸𝕣 𝓒} (f : X ⟶ Y), f ⊚ 𝟙 X = f` intros X Y f, -- we fix ` X Y : 𝔸𝕣 𝓒` and ` f : X ⟶ Y ` in the context simp, -- Through simplification, properties concerning the composition of 𝟙 and the components of the arrow_type_hom is broken down ext, -- By extensionality, we can prove the homomorphisms are equal by proving the f.top functions are equal and the f.bottom functions are equal { refl, -- Both sides of the propostion are definitionally equal, therefore the goal is resolved } , { refl, -- Both sides of the propostion are definitionally equal, therefore the goal is resolved }, }, comp_id' := by { -- our goal is to prove `∀ {X Y : 𝔸𝕣 𝓒} (f : X ⟶ Y), 𝟙 Y ⊚ f = f` intros X Y f, -- we fix ` X Y : 𝔸𝕣 𝓒` and ` f : X ⟶ Y ` in the context simp, -- Through simplification, properties concerning the composition of 𝟙 and the components of the arrow_type_hom is broken down ext, -- By extensionality, we can prove the homomorphisms are equal by proving the f.top functions are equal and the f.bottom functions are equal { refl, -- Both sides of the propostion are definitionally equal, therefore the goal is resolved } , { refl, -- Both sides of the propostion are definitionally equal, therefore the goal is resolved }, }, comp_assoc' := by { -- our goal is to prove ` ∀ {W X Y Z : 𝔸𝕣 𝓒} (f : W ⟶ X) (g : X ⟶ Y) (h : Y ⟶ Z), h ⊚ g ⊚ f = h ⊚ (g ⊚ f)` intros Q X Y Z f g h, -- we fix ` Q X Y Z : 𝔸𝕣 𝓒 ` and ` f : W ⟶ X` `g :X ⟶ Y` `h : Y ⟶ Z` in the context ext, -- By extensionality, we can prove the homomorphisms are equal by proving the f.top functions are equal and the f.bottom functions are equal { exact category_str.comp_assoc f.top g.top h.top, -- we invoke the property that composition is associative associated with category_str }, { exact category_str.comp_assoc f.bot g.bot h.bot, -- we invoke the property that composition is associative associated with category_str }, }, } /- We shall define two functors form `𝔸𝕣 𝓒` to `𝓒`: `Dom` and `Cod`. `Dom` takes an arrow `f : X ⟶ Y` to its domain `X` and `Cod` takes `f` to `Y`. -/ def Dom (𝓒 : Type*)[small_category_str 𝓒] : (𝔸𝕣 𝓒) ⥤ 𝓒 := { obj := λ α, α.dom, mor := λ X Y, λ f, arrow_type_hom.top f, -- we fix ` X Y : 𝔸𝕣 𝓒` in the context `f : X ⟶ Y` and provide a function from `X.dom ⟶ Y.dom` which is satisfied through the top function associated with arrow_type_hom structure resp_id' := by {-- our goal is to prove ` ∀ (X : 𝔸𝕣 𝓒), (𝟙 X).top = 𝟙 X.dom` intro X, -- we fix ` X : 𝔸𝕣 𝓒 ` in the context simp only [precategory_str.id]}, -- we resolve the goal through simplification of the id function associatiated with the precategory_str resp_comp' := by { -- our goal is to prove `∀ {X Y Z : 𝔸𝕣 𝓒} (f : X ⟶ Y) (g : Y ⟶ Z), (g ⊚ f).top = g.top ⊚ f.top` intros A B C f g, -- we fix ` A B C : 𝔸𝕣 𝓒 ` in the context ` f: A ⟶ B ` and ` g : B ⟶ C ` in the context simp only [precategory_str.comp],}, } -- we resolve the goal through simplification of the defntion of composition associated with the pre_category_str def Cod (𝓒 : Type*)[small_category_str 𝓒] : (𝔸𝕣 𝓒) ⥤ 𝓒 := { obj := λ α, α.cod, mor := λ X Y, λ f, arrow_type_hom.bot f, -- we fix ` X Y : 𝔸𝕣 𝓒` in the context `f : X ⟶ Y` and provide a function from `X.cod → Y.cod` which is satisfied through the top function associated with arrow_type_hom structure resp_id' := by { -- our goal is to prove `∀ (X : 𝔸𝕣 𝓒), (𝟙 X).bot = 𝟙 X.cod` intro X, -- we fix ` X : 𝔸𝕣 𝓒 ` in the context simp only [precategory_str.id]}, -- we resolve the goal through simplification of the id function associatiated with the precategory_str resp_comp' := by { -- our goal is to prove `∀ {X Y Z : 𝔸𝕣 𝓒} (f : X ⟶ Y) (g : Y ⟶ Z), (g ⊚ f).bot = g.bot ⊚ f.bot` intros A B C f g, -- we fix ` A B C : 𝔸𝕣 𝓒 ` in the context ` f: A ⟶ B ` and ` g : B ⟶ C ` in the context simp only [precategory_str.comp], -- -- we resolve the goal through simplification of the defntion of composition associated with the pre_category_str }, } /- Theorem: For functors `F G : 𝓒 ⥤ 𝓓`, the type of natural transformations `F ⟶ G` is equivalent to the type of functors `𝓒 ⥤ 𝔸𝕣 𝓓` whose composition with `Dom` and `Cod` are equal to `F` and `G` respectively. Therefore, the arrow category classifies natural transformations. -/ local notation F ` ⊚⊚ `:80 G:80 := category_str.functor.comp G F def arrow_cat_classifies_nat_trans {𝓒 𝓓 : Type*}[small_category_str 𝓒] [small_category_str 𝓓] (F G : 𝓒 ⥤ 𝓓) : (F ⟶ G) ≅ { H : 𝓒 ⥤ 𝔸𝕣 𝓓 // ( (Dom 𝓓) ⊚⊚ H = F ) ∧ ((Cod 𝓓) ⊚⊚ H = G) } := { to_mor := λ f, ⟨ { obj := λ a, ⟨F.obj a, G.obj a, nat_trans.cmpt F.obj G.obj⟩ , mor := λ X Y f, sorry, resp_id' := sorry, resp_comp' := sorry, }, by {split, { }, { }, } ⟩, inv_mor := λ f, ⟨ { obj := λ a,, mor := λ X Y f, sorry, resp_id' := sorry, resp_comp' := sorry, }, by {split, { }, { }, } ⟩, , left_inv := _, right_inv := _ } /-! ## Question 3: Yoneda Problem -/ open nat_trans def Yoneda (X Y : 𝓒) (α : ℍom.obj X ≅ ℍom.obj Y) : X ≅ Y := { to_mor := α.to_mor.cmpt (op X) (𝟙 X), inv_mor := α.inv_mor.cmpt (op Y) (𝟙 Y), left_inv := by { have h₁, from α.inv_mor.naturality (α.to_mor.cmpt (op X) (𝟙 X)), simp at h₁, sorry, }, right_inv := sorry, }
#include <boost/mpl/aux_/iter_push_front.hpp>
#ifndef TEST_COMMAND_INCLUDE #define TEST_COMMAND_INCLUDE #include <memory> #include <string> #include <vector> #include <gsl/gsl> #include "statement.h" #include "yaml.h" namespace execHelper { namespace test { namespace baseUtils { using CommandKey = std::string; using CommandKeys = std::vector<CommandKey>; using Statements = std::vector<std::shared_ptr<Statement>>; class TestCommand { public: TestCommand(CommandKey commandKey, Statements initialStatements = {}) noexcept; std::shared_ptr<Statement> operator[](size_t index) const noexcept; Statements::const_iterator begin() const noexcept; Statements::const_iterator end() const noexcept; size_t size() const noexcept; std::string get() const noexcept; unsigned int getNbOfStatements() const noexcept; unsigned int getNumberOfStatementExecutions() const noexcept; // Returns the sum of executions of all statements void add(std::shared_ptr<Statement> statement) noexcept; void resetExecutions() noexcept; void write(gsl::not_null<YamlWriter*> yaml) const noexcept; private: std::string m_command; Statements m_statements; }; using Commands = std::vector<TestCommand>; template <typename T, typename... Args> inline std::shared_ptr<Statement> createStatement(Args... args) noexcept { return std::static_pointer_cast<Statement>(std::make_shared<T>(args...)); } } // namespace baseUtils } // namespace test } // namespace execHelper #endif /* TEST_COMMAND_INCLUDE */
#Rscript ##################################################################################################################### ##################################################################################################################### ################################# Calculate community indexes from observation data ################################# ##################################################################################################################### ##################################################################################################################### ###################### Packages R suppressMessages(library(tidyr)) ###################### Load arguments and declaring variables args <- commandArgs(trailingOnly = TRUE) if (length(args) < 4) { stop("At least one argument must be supplied, an input dataset file (.tabular).", call. = FALSE) # if no args -> error and exit1 } else { import_data <- args[1] ###### Nom du fichier importé avec son extension / file name imported with the file type ".filetype" index <- args[2] ###### List of selected metrics to calculate source(args[3]) ###### Import functions } #### d_ata must be a dataframe with at least 3 variables : unitobs representing location and year ("observation.unit"), species code ("species.code") and abundance ("number") #Import des données / Import data obs <- read.table(import_data, sep = "\t", dec = ".", header = TRUE, encoding = "UTF-8") # obs[obs == -999] <- NA factors <- fact_det_f(obs = obs) obs_type <- def_typeobs_f(obs = obs) obs <- create_unitobs(data = obs) vars_data <- c("observation.unit", "species.code", "number") err_msg_data <- "The input dataset doesn't have the right format. It need to have at least the following 3 variables :\n- observation.unit (or location and year)\n- species.code\n- number\n" check_file(obs, err_msg_data, vars_data, 3) #################################################################################################### ################# create community metrics table ## Function : calc_biodiv_f ####################### #################################################################################################### ######################################################################################################################## calc_biodiv_f <- function(d_ata, unitobs = "observation.unit", code_species = "species.code", nombres = "number", indices = index) { ## Purpose: compute biodiversity indexes ## ---------------------------------------------------------------------- ## Arguments: d_ata : input observation file ## unitobs : name of column observation unit ## code_species : name of species column ## nombres : name of abundance column ## indices : list of indexes to compute ## ---------------------------------------------------------------------- ## Author: Yves Reecht, Date: 29 oct. 2010, 08:58 modified by Coline ROYAUX in june 2020 ## Supress lines that doesn't represent a species : notspline <- grep("(sp\\.)$|([1-9])$|^(Absencemacrofaune)$|^(NoID)$|^(Acrobranc)$|^(Acrodigit)$|^(Acroencr)$|^(Acrosubm)$|^(Acrotabu)$|^(Adredure)$|^(Adremoll)$|^(Algaturf)$|^(Balimona)$|^(Corablan)$|^(CoradurV)$|^(Coraenal)$|^(Coramor1)$|^(Coramor2)$|^(Coramou)$|^( Dallcora)$|^(Debrcora)$|^(Debris)$|^(Hare)$|^(HexaChar)$|^(MuraCong)$|^(Nacrbran)$|^(Nacrcham)$|^(Nacrencr)$|^(Nacrfoli)$|^(Nacrmass)$|^(Nacrsubm)$|^(Recrcora)$|^(Roche)$|^(Sable)$|^(Vase)$", d_ata[, code_species], value = FALSE) if (length(notspline) != 0) { d_ata <- d_ata[-notspline, ] } ## Suppress unused factor levels : d_ata <- .GlobalEnv$drop_levels_f(df = d_ata) ## aggregation of data if not already done : if (nrow(d_ata) > nrow(expand.grid(unique(d_ata[, unitobs]), unique(d_ata[, code_species])))) { d_ata <- agregations_generic_f(d_ata = d_ata, metrics = nombres, factors = c(unitobs, code_species), list_fact = NULL) } df_biodiv <- as.data.frame(as.table(tapply(d_ata[, nombres], d_ata[, unitobs], sum, na.rm = TRUE))) colnames(df_biodiv) <- c(unitobs, nombres) ## ################################################## ## species richness : d_ata$presence_absence <- .GlobalEnv$pres_abs_f(nombres = d_ata[, nombres], logical = FALSE) df_biodiv$species_richness <- as.vector(tapply(d_ata$presence_absence, d_ata[, unitobs], sum, na.rm = TRUE), "integer") ## ... as.vector to avoid the class "array". ## ################################################## ## Simpson, Shannon indexes and derivatives : mat_nb <- tapply(d_ata[, nombres], # Matrix of individual count /species/unitobs. list(d_ata[, unitobs], d_ata[, code_species]), sum, na.rm = TRUE) mat_nb[is.na(mat_nb)] <- 0 # Vrais zéros ## each species individual proportion in the dataset : prop_indiv <- sweep(mat_nb, 1, apply(mat_nb, 1, sum, na.rm = TRUE), # individual count / unitobs ; equiv df_biodiv$nombre. FUN = "/") ## Simpson indexes : df_biodiv$simpson <- apply(prop_indiv^2, 1, sum, na.rm = TRUE) if (any(is.element(c("all", "simpson.l"), indices))) { df_biodiv$simpson_l <- 1 - df_biodiv$simpson } ## Shannon index : df_biodiv$shannon <- -1 * apply(prop_indiv * log(prop_indiv), 1, sum, na.rm = TRUE) ## Pielou index : if (any(is.element(c("all", "pielou"), indices))) { df_biodiv$pielou <- df_biodiv$shannon / log(df_biodiv$species_richness) } ## Hill index : if (any(is.element(c("all", "hill"), indices))) { df_biodiv$hill <- (1 - df_biodiv$simpson) / exp(df_biodiv$shannon) # equiv df_biodiv$l.simpson / exp(df_biodiv$shannon) } return(df_biodiv) } ################# Analysis res <- calc_numbers_f(obs, obs_type = obs_type, factors = factors, nb_name = "number") table_comm_indexes <- calc_biodiv_f(res, unitobs = "observation.unit", code_species = "species.code", nombres = "number", indices = index) table_comm_indexes <- create_year_location(table_comm_indexes) #Save dataframe in a tabular format filename_comm <- "TabCommunityIndexes.tabular" write.table(table_comm_indexes, filename_comm, row.names = FALSE, quote = FALSE, sep = "\t", dec = ".", fileEncoding = "UTF-8") cat(paste("\nWrite table with Community indexes. \n--> \"", filename_comm, "\"\n", sep = ""))
The complex conjugate of a sum is the sum of the complex conjugates.
open import Agda.Primitive using (_⊔_) import Categories.Category as Category import Categories.Category.Cartesian as Cartesian open import MultiSorted.AlgebraicTheory -- Finite products indexed by contexts module MultiSorted.Product {o ℓ e} (𝒞 : Category.Category o ℓ e) {𝓈 ℴ} {Σ : Signature {𝓈} {ℴ}} (interp-sort : Signature.sort Σ → Category.Category.Obj 𝒞) where open Signature Σ open Category.Category 𝒞 open HomReasoning interp-sort-var : {Γ : Context} → var Γ → Obj interp-sort-var {Γ} x = interp-sort (sort-of Γ x) record Producted : Set (o ⊔ ℓ ⊔ e ⊔ 𝓈) where field prod : Context → Obj π : ∀ {Γ} (x : var Γ) → prod Γ ⇒ interp-sort-var x tuple : ∀ Γ {B} → ((x : var Γ) → B ⇒ interp-sort-var x) → B ⇒ prod Γ project : ∀ {Γ} {B} {x : var Γ} {fs : (y : var Γ) → B ⇒ interp-sort-var y} → π x ∘ tuple Γ fs ≈ fs x unique : ∀ {Γ} {B} {fs : (x : var Γ) → B ⇒ interp-sort-var x} {g : B ⇒ prod Γ} → (∀ i → π i ∘ g ≈ fs i) → tuple Γ fs ≈ g η : ∀ {Γ} → tuple Γ π ≈ id η = unique (λ i → identityʳ) ! : ∀ {B : Obj} → B ⇒ prod ctx-empty ! {B} = tuple ctx-empty ctx-empty-absurd !-unique : ∀ {B : Obj} {f : B ⇒ prod ctx-empty} → ! ≈ f !-unique {f = f} = unique ctx-empty-absurd !-unique₂ : ∀ {B : Obj} {f g : B ⇒ prod ctx-empty} → f ≈ g !-unique₂ = (⟺ !-unique) ○ !-unique tuple-cong : ∀ {B : Obj} {Γ} {fs gs : (x : var Γ) → B ⇒ interp-sort-var x} → (∀ i → fs i ≈ gs i) → tuple Γ fs ≈ tuple Γ gs tuple-cong ξ = unique (λ i → project ○ ⟺ (ξ i)) ∘-distribʳ-tuple : ∀ {B C} {Γ} {fs : (x : var Γ) → B ⇒ interp-sort-var x} {g : C ⇒ B} → tuple Γ (λ x → fs x ∘ g) ≈ tuple Γ fs ∘ g ∘-distribʳ-tuple = unique (λ i → ⟺ assoc ○ ∘-resp-≈ˡ project) -- A cartesian category has a standard products structure (which we need not use) module _ (cartesian-𝒞 : Cartesian.Cartesian 𝒞) where open Cartesian.Cartesian cartesian-𝒞 standard-prod : Context → Obj standard-prod ctx-empty = ⊤ standard-prod (ctx-slot A) = interp-sort A standard-prod (ctx-concat Γ Δ) = standard-prod Γ × standard-prod Δ standard-π : ∀ {Γ} (x : var Γ) → standard-prod Γ ⇒ interp-sort-var x standard-π var-var = id standard-π (var-inl i) = standard-π i ∘ π₁ standard-π (var-inr i) = standard-π i ∘ π₂ standard-tuple : ∀ Γ {B} → ((x : var Γ) → B ⇒ interp-sort-var x) → B ⇒ standard-prod Γ standard-tuple ctx-empty fs = ! standard-tuple (ctx-slot _) fs = fs var-var standard-tuple (ctx-concat Γ Δ) fs = ⟨ standard-tuple Γ (λ i → fs (var-inl i)) , standard-tuple Δ (λ j → fs (var-inr j)) ⟩ standard-project : ∀ {Γ} {B} {x : var Γ} {fs : (x : var Γ) → B ⇒ interp-sort-var x} → standard-π x ∘ standard-tuple Γ fs ≈ fs x standard-project {x = var-var} = identityˡ standard-project {x = var-inl x} = assoc ○ ((∘-resp-≈ʳ project₁) ○ standard-project {x = x}) standard-project {x = var-inr x} = assoc ○ ((∘-resp-≈ʳ project₂) ○ standard-project {x = x}) standard-unique : ∀ {Γ} {B} {fs : (x : var Γ) → B ⇒ interp-sort-var x} {g : B ⇒ standard-prod Γ} → (∀ i → standard-π i ∘ g ≈ fs i) → standard-tuple Γ fs ≈ g standard-unique {ctx-empty} ξ = !-unique _ standard-unique {ctx-slot _} ξ = ⟺ (ξ var-var) ○ identityˡ standard-unique {ctx-concat Γ Δ} {fs = fs} ξ = unique (⟺ (standard-unique (λ x → sym-assoc ○ (ξ (var-inl x))))) (⟺ (standard-unique (λ y → sym-assoc ○ (ξ (var-inr y))))) StandardProducted : Producted StandardProducted = record { prod = standard-prod ; π = standard-π ; tuple = standard-tuple ; project = λ {Γ} → standard-project {Γ} ; unique = standard-unique }
(* Title: L3_Lib.thy Original author: Anthony Fox, University of Cambridge Contributions by: Kyndylan Nienhuis, University of Cambridge L3 operations. *) theory L3_Lib imports "$ISABELLE_HOME/src/HOL/Word/Word" "$ISABELLE_HOME/src/HOL/Library/Code_Target_Numeral" "$ISABELLE_HOME/src/HOL/Library/Code_Char" begin (* basic state Monad *) definition "return = Pair" definition bind :: "('state \<Rightarrow> ('a \<times> 'state)) \<Rightarrow> ('a \<Rightarrow> 'state \<Rightarrow> ('b \<times> 'state)) \<Rightarrow> ('state \<Rightarrow> ('b \<times> 'state))" where "bind f g = (\<lambda>s. let (a, s') = f s in g a s')" definition read_state :: "('state \<Rightarrow> 'a) \<Rightarrow> 'state \<Rightarrow> 'a \<times> 'state" where "read_state f = (\<lambda>s. (f s, s))" definition update_state :: "('state \<Rightarrow> 'state) \<Rightarrow> 'state \<Rightarrow> unit \<times> 'state" where "update_state f = (\<lambda>s. ((), f s))" definition extend_state :: "'b \<Rightarrow> ('b \<times> 'state \<Rightarrow> 'a \<times> 'b \<times> 'state) \<Rightarrow> 'state \<Rightarrow> 'a \<times> 'state" where "extend_state v f = (\<lambda>s. let (a, s') = f (v, s) in (a, snd s'))" definition trim_state :: "('state \<Rightarrow> 'a \<times> 'state) \<Rightarrow> 'b \<times> 'state \<Rightarrow> 'a \<times> 'b \<times> 'state" where "trim_state f = (\<lambda>(s1, s2). let (a, s') = f s2 in (a, s1, s'))" fun foreach_loop :: "'a list \<times> ('a \<Rightarrow> 'state \<Rightarrow> unit \<times> 'state) \<Rightarrow> 'state \<Rightarrow> unit \<times> 'state" where "foreach_loop ([], _) = return ()" | "foreach_loop (h # t, a) = bind (a h) (\<lambda>u. foreach_loop (t, a))" function for_loop :: "nat \<times> nat \<times> (nat \<Rightarrow> 'state \<Rightarrow> unit \<times> 'state) \<Rightarrow> 'state \<Rightarrow> unit \<times> 'state" where "for_loop (i, j, a) = (if i = j then a i else bind (a i) (\<lambda>u. for_loop ((if i < j then i + 1 else i - 1), j, a)))" by auto termination by (relation "measure (\<lambda>(i, j, _). if i < j then j - i else i - j)") auto (* Because there are no constraints on i, j and a on the left-hand side of the definition, every occurrence of for_loop can be simplified by for_loop.simps, and since the definition is recursive the simplifier might diverge. For this reason we remove for_loop.simps from the simp set. *) declare for_loop.simps [simp del] (* Monad laws *) lemma bind_left_identity [simp]: shows "bind (return a) f = f a" unfolding return_def bind_def by auto lemma bind_right_identity [simp]: shows "bind m return = m" unfolding return_def bind_def by auto lemma bind_associativity: shows "bind (bind m f) g = bind m (\<lambda>a. bind (f a) g)" (is "?l = ?r") proof fix s show "?l s = ?r s" unfolding return_def bind_def by (cases "m s") auto qed (* Projections *) lemma project_return [simp]: shows "fst (return a s) = a" and "snd (return a s) = s" unfolding return_def by auto lemma project_read_state [simp]: shows "fst (read_state f s) = f s" and "snd (read_state f s) = s" unfolding read_state_def by auto lemma project_update_state [simp]: shows "fst (update_state f s) = ()" and "snd (update_state f s) = f s" unfolding update_state_def by auto (* Other monad simplifications *) lemma read_state_constant [simp]: shows "read_state (\<lambda>s. a) = return a" unfolding read_state_def return_def .. lemma update_state_id [simp]: shows "update_state (\<lambda>s. s) = return ()" unfolding update_state_def return_def .. lemma foreach_loop_return [simp]: shows "foreach_loop (l, \<lambda>_. return a) = return ()" by (induct l) simp_all lemma extend_state_return [simp]: shows "extend_state v (return a) = return a" unfolding extend_state_def return_def by simp lemma extend_state_trim_state [simp]: shows "extend_state v (trim_state m) = m" (is "?l = ?r") proof fix s show "?l s = ?r s" unfolding extend_state_def trim_state_def by (cases "m s") auto qed (* extra character operations *) definition Ord :: "char \<Rightarrow> nat" where "Ord = nat_of_char" definition Chr :: "nat \<Rightarrow> char" where "Chr = char_of_nat" definition is_lower :: "char \<Rightarrow> bool" where "is_lower c = (Ord (CHR ''a'') \<le> Ord c \<and> Ord c \<le> Ord (CHR ''z''))" definition is_upper :: "char \<Rightarrow> bool" where "is_upper c = (Ord (CHR ''A'') \<le> Ord c \<and> Ord c \<le> Ord (CHR ''Z''))" definition is_space :: "char \<Rightarrow> bool" where "is_space c = (Ord (CHR '' '') = Ord c \<or> 9 \<le> Ord c \<and> Ord c \<le> 13)" definition is_digit :: "char \<Rightarrow> bool" where "is_digit c = (Ord (CHR ''0'') \<le> Ord c \<and> Ord c \<le> Ord (CHR ''9''))" definition is_hex_digit :: "char \<Rightarrow> bool" where "is_hex_digit c = (is_digit c \<or> Ord (CHR ''a'') \<le> Ord c \<and> Ord c \<le> Ord (CHR ''f'') \<or> Ord (CHR ''A'') \<le> Ord c \<and> Ord c \<le> Ord (CHR ''F''))" definition is_alpha :: "char \<Rightarrow> bool" where "is_alpha c = (is_lower c \<or> is_upper c)" definition is_alpha_num :: "char \<Rightarrow> bool" where "is_alpha_num c = (is_alpha c \<or> is_digit c)" definition to_lower :: "char \<Rightarrow> char" where "to_lower c = (if is_upper c then Chr (Ord c + 32) else c)" definition to_upper :: "char \<Rightarrow> char" where "to_upper c = (if is_lower c then Chr (Ord c - 32) else c)" (* numeric strings *) fun list_to_nat :: "nat \<Rightarrow> nat list \<Rightarrow> nat" where "list_to_nat _ [] = 0" | "list_to_nat base (h # t) = h mod base + base * list_to_nat base t" fun nat_to_list :: "nat \<Rightarrow> nat \<Rightarrow> nat list" where "nat_to_list base n = (if n < base \<or> base < 2 then [n mod base] else n mod base # nat_to_list base (n div base))" (* Because there are no constraints on n on the left-hand side of the definition, every occurrence of nat_to_list can be simplified by nat_to_list.simps, and since the definition is recursive the simplifier might diverge. For this reason we remove nat_to_list.simps from the simp set. *) declare nat_to_list.simps [simp del] definition hex :: "nat \<Rightarrow> char" where "hex n = (if n = 0 then CHR ''0'' else if n = 1 then CHR ''1'' else if n = 2 then CHR ''2'' else if n = 3 then CHR ''3'' else if n = 4 then CHR ''4'' else if n = 5 then CHR ''5'' else if n = 6 then CHR ''6'' else if n = 7 then CHR ''7'' else if n = 8 then CHR ''8'' else if n = 9 then CHR ''9'' else if n = 10 then CHR ''A'' else if n = 11 then CHR ''B'' else if n = 12 then CHR ''C'' else if n = 13 then CHR ''D'' else if n = 14 then CHR ''E'' else if n = 15 then CHR ''F'' else undefined)" definition unhex :: "char \<Rightarrow> nat" where "unhex c = (if c = CHR ''0'' then 0 else if c = CHR ''1'' then 1 else if c = CHR ''2'' then 2 else if c = CHR ''3'' then 3 else if c = CHR ''4'' then 4 else if c = CHR ''5'' then 5 else if c = CHR ''6'' then 6 else if c = CHR ''7'' then 7 else if c = CHR ''8'' then 8 else if c = CHR ''9'' then 9 else if c = CHR ''a'' \<or> c = CHR ''A'' then 10 else if c = CHR ''b'' \<or> c = CHR ''B'' then 11 else if c = CHR ''c'' \<or> c = CHR ''C'' then 12 else if c = CHR ''d'' \<or> c = CHR ''D'' then 13 else if c = CHR ''e'' \<or> c = CHR ''E'' then 14 else if c = CHR ''f'' \<or> c = CHR ''F'' then 15 else undefined)" definition string_to_nat :: "nat \<Rightarrow> string \<Rightarrow> nat" where "string_to_nat base s = list_to_nat base (map unhex (rev s))" definition nat_to_string :: "nat \<Rightarrow> nat \<Rightarrow> string" where "nat_to_string base n = rev (map hex (nat_to_list base n))" definition "bin_string_to_nat \<equiv> string_to_nat 2" definition "nat_to_bin_string \<equiv> nat_to_string 2" definition "dec_string_to_nat \<equiv> string_to_nat 10" definition "nat_to_dec_string \<equiv> nat_to_string 10" definition "hex_string_to_nat \<equiv> string_to_nat 16" definition "nat_to_hex_string \<equiv> nat_to_string 16" definition nat_from_bin_string :: "string \<Rightarrow> nat option" where "nat_from_bin_string s = (if s \<noteq> '''' \<and> list_all (\<lambda>c. c = CHR ''0'' \<or> c = CHR ''1'') s then Some (bin_string_to_nat s) else None)" definition nat_from_dec_string :: "string \<Rightarrow> nat option" where "nat_from_dec_string s = (if s \<noteq> '''' \<and> list_all is_digit s then Some (dec_string_to_nat s) else None)" definition nat_from_hex_string :: "string \<Rightarrow> nat option" where "nat_from_hex_string s = (if s \<noteq> '''' \<and> list_all is_hex_digit s then Some (hex_string_to_nat s) else None)" definition dec_string_to_int :: "string \<Rightarrow> int" where "dec_string_to_int r = (case r of [] \<Rightarrow> 0 | h # t \<Rightarrow> (if h = CHR ''-'' \<or> h = CHR ''~'' then -int (dec_string_to_nat t) else int (dec_string_to_nat r)))" definition int_to_dec_string :: "int \<Rightarrow> string" where "int_to_dec_string i = (if i < 0 then CHR ''~'' # nat_to_dec_string (nat (-i)) else nat_to_dec_string (nat i))" definition string_to_bool :: "string \<Rightarrow> bool" where "string_to_bool s = (if s = ''true'' then True else if s = ''false'' then False else undefined)" definition string_to_char :: "string \<Rightarrow> char" where "string_to_char s = (case s of [c] \<Rightarrow> c | _ \<Rightarrow> undefined)" (* extra Nat operation *) fun log2 :: "nat \<Rightarrow> nat" where "log2 n = (if n = 0 then undefined else if n = 1 then 0 else Suc (log2 (n div 2)))" (* Because there are no constraints on n on the left-hand side of the definition, every occurrence of log2 can be simplified by log2.simps, and since the definition is recursive the simplifier might diverge. For this reason we remove log2.simps from the simp set. *) declare log2.simps [simp del] lemma log2_bounds: assumes "n \<noteq> 0" shows "2 ^ (log2 n) \<le> n" and "n < 2 ^ (Suc (log2 n))" proof - -- "The induction works better if we prove one goal instead of two goals" have "2 ^ (log2 n) \<le> n \<and> n < 2 ^ (Suc (log2 n))" using assms proof (induct "log2 n" arbitrary: n) case 0 hence "n = 1" by (simp add: log2.simps) (meson nat.simps(3)) thus ?case by (simp add: log2.simps) next case (Suc k) show ?case proof (cases "n = 1") case True thus ?thesis by (simp add: log2.simps) next case False hence "1 < n" using Suc(3) by simp hence "(n div 2) \<noteq> 0" by auto have log2: "log2 n = Suc (log2 (n div 2))" using `1 < n` by (simp add: log2.simps) hence "k = log2 (n div 2)" using Suc(2) by simp note Suc(1)[OF this `(n div 2) \<noteq> 0`] thus ?thesis using log2 by auto qed qed thus "2 ^ (log2 n) \<le> n" "n < 2 ^ (Suc (log2 n))" by auto qed lemma log2_unat_bounds: fixes x :: "('a :: len) word" assumes "x \<noteq> 0" shows "log2 (unat x) < len_of TYPE('a)" proof - have "unat x \<noteq> 0" using assms by (simp add: unat_eq_zero) have "unat x < 2 ^ len_of TYPE('a)" by simp note le_less_trans[OF log2_bounds(1)[OF `unat x \<noteq> 0`] this] thus ?thesis by auto qed (* extra int operations *) definition quot :: "int \<Rightarrow> int \<Rightarrow> int" (infixl "quot" 70) where "i quot j = (if j = 0 then undefined else if 0 < j then if 0 \<le> i then i div j else -(-i div j) else if 0 \<le> i then -(i div -j) else -i div -j)" definition rem :: "int \<Rightarrow> int \<Rightarrow> int" (infixl "rem" 70) where "i rem j = (if j = 0 then undefined else i - i quot j * j)" definition quot_rem :: "int * int \<Rightarrow> int * int" where "quot_rem p = (case p of (i, j) \<Rightarrow> (i div j, i rem j))" (* extra option operations *) definition is_some :: "'a option \<Rightarrow> bool" where "is_some x = (case x of Some _ \<Rightarrow> True | _ \<Rightarrow> False)" lemma is_some_alt: shows "is_some x = (x \<noteq> None)" unfolding is_some_def using option.disc_eq_case(2) by auto lemma is_some_simps [simp]: shows "\<not> (is_some None)" and "is_some (Some x)" unfolding is_some_def by simp_all (* extra list operations *) fun splitl :: "('a \<Rightarrow> bool) \<Rightarrow> 'a list \<Rightarrow> 'a list \<times> 'a list" where "splitl _ [] = ([], [])" | "splitl P (h # t) = (if P h then let (l, r) = splitl P t in (h # l, r) else ([], h # t))" definition splitr :: "('a \<Rightarrow> bool) \<Rightarrow> 'a list \<Rightarrow> 'a list \<times> 'a list" where "splitr P x = (let (l, r) = splitl P (rev x) in (rev r, rev l))" definition pad_left :: "'a \<Rightarrow> nat \<Rightarrow> 'a list \<Rightarrow> 'a list" where "pad_left c n s = replicate (n - length s) c @ s" definition pad_right :: "'a \<Rightarrow> nat \<Rightarrow> 'a list \<Rightarrow> 'a list" where "pad_right c n s = s @ replicate (n - length s) c" fun index_find :: "nat \<Rightarrow> 'a \<times> 'a list \<Rightarrow> nat option" where "index_find _ (_, []) = None" | "index_find i (v, h # t) = (if v = h then Some i else index_find (Suc i) (v, t))" definition "index_of = index_find 0" definition remove :: "'a list * 'a list \<Rightarrow> 'a list" where "remove p = (case p of (l1, l2) \<Rightarrow> filter (\<lambda>x. x \<notin> set l1) l2)" definition remove_except :: "'a list * 'a list \<Rightarrow> 'a list" where "remove_except p = (case p of (l1, l2) \<Rightarrow> filter (\<lambda>x. x \<in> set l1) l2)" fun remove_duplicates :: "'a list \<Rightarrow> 'a list" where "remove_duplicates [] = []" | "remove_duplicates (h # t) = (if h \<in> set t then remove_duplicates t else h # remove_duplicates t)" lemma splitl_length: shows "length (fst (splitl P l)) + length (snd (splitl P l)) = length l" by (induct l, auto simp add: case_prod_beta) lemma splitl_fst_length [simp]: shows "length (fst (splitl P x)) \<le> length x" using splitl_length by (metis order_refl trans_le_add1) lemma splitl_snd_length [simp]: shows "length (snd (splitl P x)) \<le> length x" using splitl_length by (metis order_refl trans_le_add2) lemma pad_left_length [simp]: shows "length (pad_left e n l) = max (length l) n" unfolding pad_left_def by auto lemma pad_right_length [simp]: shows "length (pad_right e n l) = max (length l) n" unfolding pad_right_def by auto lemma pad_left_nth: shows "pad_left e n l ! m = (if m < n - List.length l then e else l ! (m - (n - List.length l)))" unfolding pad_left_def nth_append by simp (* extra string operations *) lemma fields_termination_lem [simp]: assumes "a \<noteq> []" and "length a \<le> length c" shows "length a - b < Suc (length c)" by (simp add: assms(2) le_imp_less_Suc less_imp_diff_less) function (sequential) tokens :: "(char \<Rightarrow> bool) \<Rightarrow> string \<Rightarrow> string list" where "tokens _ '''' = []" | "tokens P x = (let (l, r) = splitl (\<lambda>e. ~P e) x in if l = [] then tokens P (tl r) else l # tokens P r)" by pat_completeness auto termination tokens apply (relation "measure (length o snd)") apply auto apply (case_tac "~ P v", auto simp add: case_prod_beta le_imp_less_Suc) apply (case_tac "~ P v", auto simp add: case_prod_beta le_imp_less_Suc) done function (sequential) fields :: "(char \<Rightarrow> bool) \<Rightarrow> string \<Rightarrow> string list" where "fields _ '''' = [[]]" | "fields P x = (let (l, r) = splitl (\<lambda>e. ~P e) x in if l = [] then [] # fields P (tl r) else if r = [] then [l] else l # fields P (tl r))" by pat_completeness auto termination fields apply (relation "measure (length o snd)") apply auto apply (case_tac "~ P v", auto simp add: case_prod_beta le_imp_less_Suc) apply (case_tac "~ P v", auto simp add: case_prod_beta) done (* bit-string operations - extends Bool_List_Representation.thy *) definition nat_to_bitstring :: "nat \<Rightarrow> bool list" where "nat_to_bitstring n = (if n = 0 then [False] else bin_to_bl (log2 n + 1) (int n))" definition "bitstring_to_nat = nat o bl_to_bin" definition fixwidth :: "nat \<Rightarrow> bool list \<Rightarrow> bool list" where "fixwidth n v = (let l = length v in if l < n then pad_left False n v else drop (l - n) v)" definition bitwise :: "(bool \<Rightarrow> bool \<Rightarrow> bool) \<Rightarrow> bool list \<Rightarrow> bool list \<Rightarrow> bool list" where "bitwise f v1 v2 = (let m = max (length v1) (length v2) in map (case_prod f) (zip (fixwidth m v1) (fixwidth m v2)))" definition "bor = bitwise (op \<or>)" definition "band = bitwise (op \<and>)" definition "bxor = bitwise (op \<noteq>)" definition bitstring_shiftl :: "bool list \<Rightarrow> nat \<Rightarrow> bool list" where "bitstring_shiftl v m = pad_right False (length v + m) v" definition bitstring_shiftr :: "bool list \<Rightarrow> nat \<Rightarrow> bool list" where "bitstring_shiftr v m = take (length v - m) v" definition bitstring_field :: "nat \<Rightarrow> nat \<Rightarrow> bool list \<Rightarrow> bool list" where "bitstring_field h l v = fixwidth (Suc h - l) (bitstring_shiftr v l)" definition bitstring_rotate :: "bool list \<Rightarrow> nat \<Rightarrow> bool list" where "bitstring_rotate v m = (let l = length v in let x = m mod l in if l = 0 \<or> x = 0 then v else bitstring_field (x - 1) 0 v @ bitstring_field (l - 1) x v)" definition bitstring_test_bit :: "bool list \<Rightarrow> nat \<Rightarrow> bool" where "bitstring_test_bit v n = (bitstring_field n n v = [True])" definition bitstring_modify :: "(nat \<times> bool \<Rightarrow> bool) \<times> bool list \<Rightarrow> bool list" where "bitstring_modify p = (case p of (f, l) \<Rightarrow> map f (zip (rev (upt 0 (length l))) l))" definition bitstring_field_insert :: "nat \<Rightarrow> nat \<Rightarrow> bool list \<Rightarrow> bool list \<Rightarrow> bool list" where "bitstring_field_insert h l v1 v2 = bitstring_modify (\<lambda>(i, b). if l \<le> i \<and> i \<le> h then bitstring_test_bit v1 (i - l) else b, v2)" lemma nat_to_bitstring_zero [simp]: shows "nat_to_bitstring 0 = [False]" unfolding nat_to_bitstring_def by simp (* We do not add the following rule to the simp set, because n occurs twice at the right hand side, and therefore the state might not become simpler when applying this rule. *) lemma nat_to_bitstring_length: shows "length (nat_to_bitstring n) = (if n = 0 then 1 else log2 n + 1)" unfolding nat_to_bitstring_def by (simp del: bin_to_bl_def) lemma fixwidth_length [simp]: shows "length (fixwidth n l) = n" unfolding fixwidth_def Let_def by auto lemma bitwise_length [simp]: shows "length (bitwise f v1 v2) = max (length v1) (length v2)" unfolding bitwise_def Let_def by auto (* extra word operations *) definition unsigned_min :: "'a::len word \<times> 'a::len word \<Rightarrow> 'a::len word" where "unsigned_min p = (case p of (w1, w2) \<Rightarrow> (if w1 \<le> w2 then w1 else w2))" definition unsigned_max :: "'a::len word \<times> 'a::len word \<Rightarrow> 'a::len word" where "unsigned_max p = (case p of (w1, w2) \<Rightarrow> (if w1 \<le> w2 then w2 else w1))" definition word_log2 :: "'a::len word \<Rightarrow> 'a::len word" where "word_log2 w = of_nat (log2 (unat w))" definition word_quot :: "'a::len word \<Rightarrow> 'a::len word \<Rightarrow> 'a::len word" where "word_quot i j = of_int (sint i quot sint j)" definition word_rem :: "'a::len word \<Rightarrow> 'a::len word \<Rightarrow> 'a::len word" where "word_rem i j = of_int (sint i rem sint j)" definition word_sdiv :: "'a::len word \<Rightarrow> 'a::len word \<Rightarrow> 'a::len word" where "word_sdiv i j = of_int (sint i div sint j)" definition word_smod :: "'a::len word \<Rightarrow> 'a::len word \<Rightarrow> 'a::len word" where "word_smod i j = of_int (sint i mod sint j)" definition word_modify :: "(nat \<times> bool \<Rightarrow> bool) \<times> 'a::len word \<Rightarrow> 'a::len word" where "word_modify p = (case p of (f, w) \<Rightarrow> of_bl (bitstring_modify (f, to_bl w)))" definition word_bit_field_insert :: "nat \<Rightarrow> nat \<Rightarrow> 'a::len word \<Rightarrow> 'b::len word \<Rightarrow> 'b::len word" where "word_bit_field_insert h l w1 w2 = word_modify (\<lambda>(i, b). if l \<le> i \<and> i \<le> h then test_bit w1 (i - l) else b, w2)" definition word_bits :: "nat \<Rightarrow> nat \<Rightarrow> 'a::len word \<Rightarrow> 'a::len word" where "word_bits h l w = (w >> l) AND mask (Suc h - l)" definition word_extract :: "nat \<Rightarrow> nat \<Rightarrow> 'a::len word \<Rightarrow> 'b::len word" where "word_extract h l w = ucast (word_bits h l w)" definition word_replicate :: "nat \<Rightarrow> 'a::len word \<Rightarrow> 'b::len word" where "word_replicate n a = word_rcat (replicate n a)" (* floating-point stubs *) datatype ieee_rounding = roundTiesToEven | roundTowardPositive | roundTowardNegative | roundTowardZero datatype ieee_compare = LT | EQ | GT | UN record ieee_flags = DivideByZero :: bool InvalidOp :: bool Overflow :: bool Precision :: bool Underflow :: bool consts fp32_abs :: "32 word \<Rightarrow> 32 word" fp32_add :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> 32 word" fp32_add_with_flags :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> ieee_flags \<times> 32 word" fp32_compare :: "32 word \<Rightarrow> 32 word \<Rightarrow> ieee_compare" fp32_div :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> 32 word" fp32_div_with_flags :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> ieee_flags \<times> 32 word" fp32_equal :: "32 word \<Rightarrow> 32 word \<Rightarrow> bool" fp32_from_int :: "ieee_rounding \<Rightarrow> int \<Rightarrow> 32 word" fp32_greater :: "32 word \<Rightarrow> 32 word \<Rightarrow> bool" fp32_greater_equal :: "32 word \<Rightarrow> 32 word \<Rightarrow> bool" fp32_is_integral :: "32 word \<Rightarrow> bool" fp32_is_finite :: "32 word \<Rightarrow> bool" fp32_is_nan :: "32 word \<Rightarrow> bool" fp32_is_normal :: "32 word \<Rightarrow> bool" fp32_is_signalling_nan :: "32 word \<Rightarrow> bool" fp32_is_subnormal :: "32 word \<Rightarrow> bool" fp32_less :: "32 word \<Rightarrow> 32 word \<Rightarrow> bool" fp32_less_equal :: "32 word \<Rightarrow> 32 word \<Rightarrow> bool" fp32_mul :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> 32 word" fp32_mul_with_flags :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> ieee_flags \<times> 32 word" fp32_mul_add :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> 32 word" fp32_mul_add_with_flags :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> ieee_flags \<times> 32 word" fp32_mul_sub :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> 32 word" fp32_mul_sub_with_flags :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> ieee_flags \<times> 32 word" fp32_neg_inf :: "32 word" fp32_neg_max :: "32 word" fp32_neg_min :: "32 word" fp32_neg_zero :: "32 word" fp32_negate :: "32 word \<Rightarrow> 32 word" fp32_pos_inf :: "32 word" fp32_pos_max :: "32 word" fp32_pos_min :: "32 word" fp32_pos_zero :: "32 word" fp32_round_to_integral :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word" fp32_snan :: "32 word" fp32_sqrt :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word" fp32_sqrt_with_flags :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> ieee_flags \<times> 32 word" fp32_sub :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> 32 word" fp32_sub_with_flags :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> ieee_flags \<times> 32 word" fp32_qnan :: "32 word" fp32_to_int :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> int option" consts fp64_abs :: "64 word \<Rightarrow> 64 word" fp64_add :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> 64 word" fp64_add_with_flags :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> ieee_flags \<times> 64 word" fp64_compare :: "64 word \<Rightarrow> 64 word \<Rightarrow> ieee_compare" fp64_div :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> 64 word" fp64_div_with_flags :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> ieee_flags \<times> 64 word" fp64_equal :: "64 word \<Rightarrow> 64 word \<Rightarrow> bool" fp64_from_int :: "ieee_rounding \<Rightarrow> int \<Rightarrow> 64 word" fp64_greater :: "64 word \<Rightarrow> 64 word \<Rightarrow> bool" fp64_greater_equal :: "64 word \<Rightarrow> 64 word \<Rightarrow> bool" fp64_is_integral :: "64 word \<Rightarrow> bool" fp64_is_finite :: "64 word \<Rightarrow> bool" fp64_is_nan :: "64 word \<Rightarrow> bool" fp64_is_normal :: "64 word \<Rightarrow> bool" fp64_is_signalling_nan :: "64 word \<Rightarrow> bool" fp64_is_subnormal :: "64 word \<Rightarrow> bool" fp64_less :: "64 word \<Rightarrow> 64 word \<Rightarrow> bool" fp64_less_equal :: "64 word \<Rightarrow> 64 word \<Rightarrow> bool" fp64_mul :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> 64 word" fp64_mul_with_flags :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> ieee_flags \<times> 64 word" fp64_mul_add :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> 64 word" fp64_mul_add_with_flags :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> ieee_flags \<times> 64 word" fp64_mul_sub :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> 64 word" fp64_mul_sub_with_flags :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> ieee_flags \<times> 64 word" fp64_neg_inf :: "64 word" fp64_neg_min :: "64 word" fp64_neg_max :: "64 word" fp64_neg_zero :: "64 word" fp64_negate :: "64 word \<Rightarrow> 64 word" fp64_pos_inf :: "64 word" fp64_pos_min :: "64 word" fp64_pos_max :: "64 word" fp64_pos_zero :: "64 word" fp64_round_to_integral :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word" fp64_snan :: "64 word" fp64_sqrt :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word" fp64_sqrt_with_flags :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> ieee_flags \<times> 64 word" fp64_sub :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> 64 word" fp64_sub_with_flags :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> ieee_flags \<times> 64 word" fp64_qnan :: "64 word" fp64_to_int :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> int option" consts fp32_to_fp64 :: "32 word \<Rightarrow> 64 word" fp64_to_fp32 :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 32 word" code_printing constant "fp32_abs" \<rightharpoonup> (SML) "!(fn '_ => raise Fail \"fp32'_abs\")" and (OCaml) "!(fun '_ -> failwith \"fp32'_abs\")" and (Haskell) "!(\\ '_ -> error \"fp32'_abs\")" | constant "fp32_add" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_add\")" and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp32'_add\")" and (Haskell) "!(\\ '_ '_ '_ -> error \"fp32'_add\")" | constant "fp32_add_with_flags" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_add'_with'_flags\")" and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp32'_add'_with'_flag\")" and (Haskell) "!(\\ '_ '_ '_ -> error \"fp32'_add'_with'_flag\")" | constant "fp32_compare" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => raise Fail \"fp32'_compare\")" and (OCaml) "!(fun '_ '_ -> failwith \"fp32'_compare\")" and (Haskell) "!(\\ '_ '_ -> error \"fp32'_compare\")" | constant "fp32_div" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_div\")" and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp32'_div\")" and (Haskell) "!(\\ '_ '_ '_ -> error \"fp32'_div\")" | constant "fp32_div_with_flags" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_div'_with'_flags\")" and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp32'_div'_with'_flag\")" and (Haskell) "!(\\ '_ '_ '_ -> error \"fp32'_div'_with'_flag\")" | constant "fp32_equal" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => raise Fail \"fp32'_equal\")" and (OCaml) "!(fun '_ '_ -> failwith \"fp32'_equal\")" and (Haskell) "!(\\ '_ '_ -> error \"fp32'_equal\")" | constant "fp32_from_int" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => raise Fail \"fp32'_from'_int\")" and (OCaml) "!(fun '_ '_ -> failwith \"fp32'_from'_int\")" and (Haskell) "!(\\ '_ '_ -> error \"fp32'_from'_int\")" | constant "fp32_greater" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => raise Fail \"fp32'_greater\")" and (OCaml) "!(fun '_ '_ -> failwith \"fp32'_greater\")" and (Haskell) "!(\\ '_ '_ -> error \"fp32'_greater\")" | constant "fp32_greater_equal" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => raise Fail \"fp32'_greater'_equal\")" and (OCaml) "!(fun '_ '_ -> failwith \"fp32'_greater'_equal\")" and (Haskell) "!(\\ '_ '_ -> error \"fp32'_greater'_equal\")" | constant "fp32_is_integral" \<rightharpoonup> (SML) "!(fn '_ => raise Fail \"fp32'_is'_integral\")" and (OCaml) "!(fun '_ -> failwith \"fp32'_is'_integral\")" and (Haskell) "!(\\ '_ -> error \"fp32'_is'_intagral\")" | constant "fp32_is_finite" \<rightharpoonup> (SML) "!(fn '_ => raise Fail \"fp32'_is'_finite\")" and (OCaml) "!(fun '_ -> failwith \"fp32'_is'_finite\")" and (Haskell) "!(\\ '_ -> error \"fp32'_is'_finite\")" | constant "fp32_is_nan" \<rightharpoonup> (SML) "!(fn '_ => raise Fail \"fp32'_is'_nan\")" and (OCaml) "!(fun '_ -> failwith \"fp32'_is'_nan\")" and (Haskell) "!(\\ '_ -> error \"fp32'_is'_nan\")" | constant "fp32_is_normal" \<rightharpoonup> (SML) "!(fn '_ => raise Fail \"fp32'_is'_normal\")" and (OCaml) "!(fun '_ -> failwith \"fp32'_is'_normal\")" and (Haskell) "!(\\ '_ -> error \"fp32'_is'_normal\")" | constant "fp32_is_signalling_nan" \<rightharpoonup> (SML) "!(fn '_ => raise Fail \"fp32'_is'_signalling'_nan\")" and (OCaml) "!(fun '_ -> failwith \"fp32'_is'_signalling'_nan\")" and (Haskell) "!(\\ '_ -> error \"fp32'_is'_signalling'_nan\")" | constant "fp32_is_subnormal" \<rightharpoonup> (SML) "!(fn '_ => raise Fail \"fp32'_is'_subnormal\")" and (OCaml) "!(fun '_ -> failwith \"fp32'_is'_subnormal\")" and (Haskell) "!(\\ '_ -> error \"fp32'_is'_subnormal\")" | constant "fp32_less" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => raise Fail \"fp32'_less\")" and (OCaml) "!(fun '_ '_ -> failwith \"fp32'_less\")" and (Haskell) "!(\\ '_ '_ -> error \"fp32'_less\")" | constant "fp32_less_equal" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => raise Fail \"fp32'_less'_equal\")" and (OCaml) "!(fun '_ '_ -> failwith \"fp32'_less'_equal\")" and (Haskell) "!(\\ '_ '_ -> error \"fp32'_less'_equal\")" | constant "fp32_mul" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_mul\")" and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp32'_mul\")" and (Haskell) "!(\\ '_ '_ '_ -> error \"fp32'_mul\")" | constant "fp32_mul_with_flags" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_mul'_with'_flags\")" and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp32'_mul'_with'_flag\")" and (Haskell) "!(\\ '_ '_ '_ -> error \"fp32'_mul'_with'_flag\")" | constant "fp32_mul_add" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_mul'_add\")" and (OCaml) "!(fun '_ '_ '_ '_ -> failwith \"fp32'_mul'_add\")" and (Haskell) "!(\\ '_ '_ '_ '_ -> error \"fp32'_mul'_add\")" | constant "fp32_mul_add_with_flags" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_mul'_add'_with'_flags\")" and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp32'_mul'_add'_with'_flag\")" and (Haskell) "!(\\ '_ '_ '_ -> error \"fp32'_mul'_add'_with'_flag\")" | constant "fp32_mul_sub" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_mul'_sub\")" and (OCaml) "!(fun '_ '_ '_ '_ -> failwith \"fp32'_mul'_sub\")" and (Haskell) "!(\\ '_ '_ '_ '_ -> error \"fp32'_mul'_sub\")" | constant "fp32_mul_sub_with_flags" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_sub'_add'_with'_flags\")" and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp32'_mul'_sub'_with'_flag\")" and (Haskell) "!(\\ '_ '_ '_ -> error \"fp32'_mul'_sub'_with'_flag\")" | constant "fp32_neg_inf" \<rightharpoonup> (SML) "!(raise Fail \"fp32'_neg'_inf\")" and (OCaml) "!(failwith \"fp32'_neg'_inf\")" and (Haskell) "!(error \"fp32'_neg'_inf\")" | constant "fp32_neg_min" \<rightharpoonup> (SML) "!(raise Fail \"fp32'_neg'_min\")" and (OCaml) "!(failwith \"fp32'_neg'_min\")" and (Haskell) "!(error \"fp32'_neg'_min\")" | constant "fp32_neg_max" \<rightharpoonup> (SML) "!(raise Fail \"fp32'_neg'_max\")" and (OCaml) "!(failwith \"fp32'_neg'_max\")" and (Haskell) "!(error \"fp32'_neg'_min\")" | constant "fp32_neg_zero" \<rightharpoonup> (SML) "!(raise Fail \"fp32'_neg'_zero\")" and (OCaml) "!(failwith \"fp32'_neg'_zero\")" and (Haskell) "!(error \"fp32'_neg'_zero\")" | constant "fp32_negate" \<rightharpoonup> (SML) "!(fn '_ => raise Fail \"fp32'_negate\")" and (OCaml) "!(fun '_ -> failwith \"fp32'_negate\")" and (Haskell) "!(\\ '_ -> error \"fp32'_negate\")" | constant "fp32_pos_inf" \<rightharpoonup> (SML) "!(raise Fail \"fp32'_pos'_inf\")" and (OCaml) "!(failwith \"fp32'_pos'_inf\")" and (Haskell) "!(error \"fp32'_pos'_inf\")" | constant "fp32_pos_min" \<rightharpoonup> (SML) "!(raise Fail \"fp32'_pos'_min\")" and (OCaml) "!(failwith \"fp32'_pos'_min\")" and (Haskell) "!(error \"fp32'_pos'_min\")" | constant "fp32_pos_max" \<rightharpoonup> (SML) "!(raise Fail \"fp32'_pos'_max\")" and (OCaml) "!(failwith \"fp32'_pos'_max\")" and (Haskell) "!(error \"fp32'_pos'_max\")" | constant "fp32_pos_zero" \<rightharpoonup> (SML) "!(raise Fail \"fp32'_pos'_zero\")" and (OCaml) "!(failwith \"fp32'_pos'_zero\")" and (Haskell) "!(error \"fp32'_pos'_zero\")" | constant "fp32_snan" \<rightharpoonup> (SML) "!(raise Fail \"fp32'_snan\")" and (OCaml) "!(failwith \"fp32'_snan\")" and (Haskell) "!(error \"fp32'_snan\")" | constant "fp32_round_to_integral" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => raise Fail \"fp32'_round'_to'_integral\")" and (OCaml) "!(fun '_ '_ -> failwith \"fp32'_round'_to'_integral\")" and (Haskell) "!(\\ '_ '_ -> error \"fp32'_to'_integral\")" | constant "fp32_sqrt" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => raise Fail \"fp32'_sqrt\")" and (OCaml) "!(fun '_ '_ -> failwith \"fp32'_sqrt\")" and (Haskell) "!(\\ '_ '_ -> error \"fp32'_sqrt\")" | constant "fp32_sqrt_with_flags" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_sqrt'_with'_flags\")" and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp32'_sqrt'_with'_flag\")" and (Haskell) "!(\\ '_ '_ '_ -> error \"fp32'_sqrt'_with'_flag\")" | constant "fp32_sub" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_sub\")" and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp32'_sub\")" and (Haskell) "!(\\ '_ '_ '_ -> error \"fp32'_sub\")" | constant "fp32_sub_with_flags" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_sub'_with'_flags\")" and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp32'_sub'_with'_flag\")" and (Haskell) "!(\\ '_ '_ '_ -> error \"fp32'_sub'_with'_flag\")" | constant "fp32_qnan" \<rightharpoonup> (SML) "!(raise Fail \"fp32'_qnan\")" and (OCaml) "!(failwith \"fp32'_qnan\")" and (Haskell) "!(error \"fp32'_qnan\")" | constant "fp32_to_int" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => raise Fail \"fp32'_to'_int\")" and (OCaml) "!(fun '_ '_ -> failwith \"fp32'_to'_int\")" and (Haskell) "!(\\ '_ '_ -> error \"fp32'_to'_int\")" | constant "fp64_abs" \<rightharpoonup> (SML) "!(fn '_ => raise Fail \"fp64'_abs\")" and (OCaml) "!(fun '_ -> failwith \"fp64'_abs\")" and (Haskell) "!(\\ '_ -> error \"fp64'_abs\")" | constant "fp64_add" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_add\")" and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp64'_add\")" and (Haskell) "!(\\ '_ '_ '_ -> error \"fp64'_add\")" | constant "fp64_add_with_flags" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_add'_with'_flags\")" and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp64'_add'_with'_flag\")" and (Haskell) "!(\\ '_ '_ '_ -> error \"fp64'_add'_with'_flag\")" | constant "fp64_compare" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => raise Fail \"fp64'_compare\")" and (OCaml) "!(fun '_ '_ -> failwith \"fp64'_compare\")" and (Haskell) "!(\\ '_ '_ -> error \"fp64'_compare\")" | constant "fp64_div" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_div\")" and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp64'_div\")" and (Haskell) "!(\\ '_ '_ '_ -> error \"fp64'_div\")" | constant "fp64_div_with_flags" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_div'_with'_flags\")" and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp64'_div'_with'_flag\")" and (Haskell) "!(\\ '_ '_ '_ -> error \"fp64'_div'_with'_flag\")" | constant "fp64_equal" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => raise Fail \"fp64'_equal\")" and (OCaml) "!(fun '_ '_ -> failwith \"fp64'_equal\")" and (Haskell) "!(\\ '_ '_ -> error \"fp64'_equal\")" | constant "fp64_from_int" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => raise Fail \"fp64'_from'_int\")" and (OCaml) "!(fun '_ '_ -> failwith \"fp64'_from'_int\")" and (Haskell) "!(\\ '_ '_ -> error \"fp64'_from'_int\")" | constant "fp64_greater" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => raise Fail \"fp64'_greater\")" and (OCaml) "!(fun '_ '_ -> failwith \"fp64'_greater\")" and (Haskell) "!(\\ '_ '_ -> error \"fp64'_greater\")" | constant "fp64_greater_equal" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => raise Fail \"fp64'_greater'_equal\")" and (OCaml) "!(fun '_ '_ -> failwith \"fp64'_greater'_equal\")" and (Haskell) "!(\\ '_ '_ -> error \"fp64'_greater'_equal\")" | constant "fp64_is_integral" \<rightharpoonup> (SML) "!(fn '_ => raise Fail \"fp64'_is'_integral\")" and (OCaml) "!(fun '_ -> failwith \"fp64'_is'_integral\")" and (Haskell) "!(\\ '_ -> error \"fp64'_is'_intagral\")" | constant "fp64_is_finite" \<rightharpoonup> (SML) "!(fn '_ => raise Fail \"fp64'_is'_finite\")" and (OCaml) "!(fun '_ -> failwith \"fp64'_is'_finite\")" and (Haskell) "!(\\ '_ -> error \"fp64'_is'_finite\")" | constant "fp64_is_nan" \<rightharpoonup> (SML) "!(fn '_ => raise Fail \"fp64'_is'_nan\")" and (OCaml) "!(fun '_ -> failwith \"fp64'_is'_nan\")" and (Haskell) "!(\\ '_ -> error \"fp64'_is'_nan\")" | constant "fp64_is_normal" \<rightharpoonup> (SML) "!(fn '_ => raise Fail \"fp64'_is'_normal\")" and (OCaml) "!(fun '_ -> failwith \"fp64'_is'_normal\")" and (Haskell) "!(\\ '_ -> error \"fp64'_is'_normal\")" | constant "fp64_is_signalling_nan" \<rightharpoonup> (SML) "!(fn '_ => raise Fail \"fp64'_is'_signalling'_nan\")" and (OCaml) "!(fun '_ -> failwith \"fp64'_is'_signalling'_nan\")" and (Haskell) "!(\\ '_ -> error \"fp64'_is'_signalling'_nan\")" | constant "fp64_is_subnormal" \<rightharpoonup> (SML) "!(fn '_ => raise Fail \"fp64'_is'_subnormal\")" and (OCaml) "!(fun '_ -> failwith \"fp64'_is'_subnormal\")" and (Haskell) "!(\\ '_ -> error \"fp64'_is'_subnormal\")" | constant "fp64_less" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => raise Fail \"fp64'_less\")" and (OCaml) "!(fun '_ '_ -> failwith \"fp64'_less\")" and (Haskell) "!(\\ '_ '_ -> error \"fp64'_less\")" | constant "fp64_less_equal" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => raise Fail \"fp64'_less'_equal\")" and (OCaml) "!(fun '_ '_ -> failwith \"fp64'_less'_equal\")" and (Haskell) "!(\\ '_ '_ -> error \"fp64'_less'_equal\")" | constant "fp64_mul" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_mul\")" and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp64'_mul\")" and (Haskell) "!(\\ '_ '_ '_ -> error \"fp64'_mul\")" | constant "fp64_mul_with_flags" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_mul'_with'_flags\")" and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp64'_mul'_with'_flag\")" and (Haskell) "!(\\ '_ '_ '_ -> error \"fp64'_mul'_with'_flag\")" | constant "fp64_mul_add" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_mul'_add\")" and (OCaml) "!(fun '_ '_ '_ '_ -> failwith \"fp64'_mul'_add\")" and (Haskell) "!(\\ '_ '_ '_ '_ -> error \"fp64'_mul'_add\")" | constant "fp64_mul_add_with_flags" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_mul'_add'_with'_flags\")" and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp64'_mul'_add'_with'_flag\")" and (Haskell) "!(\\ '_ '_ '_ -> error \"fp64'_mul'_add'_with'_flag\")" | constant "fp64_mul_sub" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_mul'_sub\")" and (OCaml) "!(fun '_ '_ '_ '_ -> failwith \"fp64'_mul'_sub\")" and (Haskell) "!(\\ '_ '_ '_ '_ -> error \"fp64'_mul'_sub\")" | constant "fp64_mul_sub_with_flags" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_sub'_add'_with'_flags\")" and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp64'_mul'_sub'_with'_flag\")" and (Haskell) "!(\\ '_ '_ '_ -> error \"fp64'_mul'_sub'_with'_flag\")" | constant "fp64_neg_inf" \<rightharpoonup> (SML) "!(raise Fail \"fp64'_neg'_inf\")" and (OCaml) "!(failwith \"fp64'_neg'_inf\")" and (Haskell) "!(error \"fp64'_neg'_inf\")" | constant "fp64_neg_min" \<rightharpoonup> (SML) "!(raise Fail \"fp64'_neg'_min\")" and (OCaml) "!(failwith \"fp64'_neg'_min\")" and (Haskell) "!(error \"fp64'_neg'_min\")" | constant "fp64_neg_max" \<rightharpoonup> (SML) "!(raise Fail \"fp64'_neg'_max\")" and (OCaml) "!(failwith \"fp64'_neg'_max\")" and (Haskell) "!(error \"fp64'_neg'_max\")" | constant "fp64_neg_zero" \<rightharpoonup> (SML) "!(raise Fail \"fp64'_neg'_zero\")" and (OCaml) "!(failwith \"fp64'_neg'_zero\")" and (Haskell) "!(error \"fp64'_neg'_zero\")" | constant "fp64_negate" \<rightharpoonup> (SML) "!(fn '_ => raise Fail \"fp64'_negate\")" and (OCaml) "!(fun '_ -> failwith \"fp64'_negate\")" and (Haskell) "!(\\ '_ -> error \"fp64'_negate\")" | constant "fp64_pos_inf" \<rightharpoonup> (SML) "!(raise Fail \"fp64'_pos'_inf\")" and (OCaml) "!(failwith \"fp64'_pos'_inf\")" and (Haskell) "!(error \"fp64'_pos'_inf\")" | constant "fp64_pos_min" \<rightharpoonup> (SML) "!(raise Fail \"fp64'_pos'_min\")" and (OCaml) "!(failwith \"fp64'_pos'_min\")" and (Haskell) "!(error \"fp64'_pos'_min\")" | constant "fp64_pos_max" \<rightharpoonup> (SML) "!(raise Fail \"fp64'_pos'_max\")" and (OCaml) "!(failwith \"fp64'_pos'_max\")" and (Haskell) "!(error \"fp64'_pos'_max\")" | constant "fp64_pos_zero" \<rightharpoonup> (SML) "!(raise Fail \"fp64'_pos'_zero\")" and (OCaml) "!(failwith \"fp64'_pos'_zero\")" and (Haskell) "!(error \"fp64'_pos'_zero\")" | constant "fp64_snan" \<rightharpoonup> (SML) "!(raise Fail \"fp64'_snan\")" and (OCaml) "!(failwith \"fp64'_snan\")" and (Haskell) "!(error \"fp64'_snan\")" | constant "fp64_round_to_integral" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => raise Fail \"fp64'_round'_to'_integral\")" and (OCaml) "!(fun '_ '_ -> failwith \"fp64'_round'_to'_integral\")" and (Haskell) "!(\\ '_ '_ -> error \"fp64'_to'_integral\")" | constant "fp64_sqrt" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => raise Fail \"fp64'_sqrt\")" and (OCaml) "!(fun '_ '_ -> failwith \"fp64'_sqrt\")" and (Haskell) "!(\\ '_ '_ -> error \"fp64'_sqrt\")" | constant "fp64_sqrt_with_flags" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_sqrt'_with'_flags\")" and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp64'_sqrt'_with'_flag\")" and (Haskell) "!(\\ '_ '_ '_ -> error \"fp64'_sqrt'_with'_flag\")" | constant "fp64_qnan" \<rightharpoonup> (SML) "!(raise Fail \"fp64'_qnan\")" and (OCaml) "!(failwith \"fp64'_qnan\")" and (Haskell) "!(error \"fp64'_qnan\")" | constant "fp64_sub" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_sub\")" and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp64'_sub\")" and (Haskell) "!(\\ '_ '_ '_ -> error \"fp64'_sub\")" | constant "fp64_sub_with_flags" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_sub'_with'_flags\")" and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp64'_sub'_with'_flag\")" and (Haskell) "!(\\ '_ '_ '_ -> error \"fp64'_sub'_with'_flag\")" | constant "fp64_to_int" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => raise Fail \"fp64'_to'_int\")" and (OCaml) "!(fun '_ '_ -> failwith \"fp64'_to'_int\")" and (Haskell) "!(\\ '_ '_ -> error \"fp64'_to'_int\")" | constant "fp32_to_fp64" \<rightharpoonup> (SML) "!(fn '_ => raise Fail \"fp32'_to'_fp64\")" and (OCaml) "!(fun '_ -> failwith \"fp32'_to'_fp64\")" and (Haskell) "!(\\ '_ -> error \"fp32'_to'_fp64\")" | constant "fp64_to_fp32" \<rightharpoonup> (SML) "!(fn '_ => fn '_ => raise Fail \"fp64'_to'_fp32\")" and (OCaml) "!(fun '_ '_ -> failwith \"fp64'_to'_fp32\")" and (Haskell) "!(\\ '_ '_ -> error \"fp64'_to'_fp32\")" end
\documentclass{article} % amsmath package, useful for mathematical formulas \usepackage{amsmath} % amssymb package, useful for mathematical symbols \usepackage{amssymb} % gensymb package, useful for general symbols, such as degrees celsius \usepackage{gensymb} % graphicx package, useful for including eps and pdf graphics % include graphics with the command \includegraphics \usepackage{graphicx} \usepackage{rotating} \usepackage[margin=2cm]{geometry} \usepackage{xspace} %% EW macros \newcommand{\mul}{\ensuremath{\mu}L\xspace} \newcommand{\mug}{\ensuremath{\mu}g\xspace} \newcommand{\mum}{\ensuremath{\mu}m\xspace} \newcommand{\degC}{\celsius\xspace} \newcommand{\tb}[1]{\textcolor{blue}{#1}} %\newcounter{PExCounter} % enumitem allows continuous numbering across split lists. \usepackage{enumitem} \begin{document} \title{RNA content of RNA-protein assemblies in yeast} \author{Edward Wallace, [email protected]} \date{\today} \maketitle \section*{Introduction} The aim is to find out which RNAs are sequestered in RNA-protein assemblies, by extracting both supernatant and pellet of 100,000g fractionated lysate. %The pellet fraction of heat-shocked cells is enriched for stress granules. %\subsection*{Overview} \subsection*{Prepare} \begin{itemize} \item soluble RNA buffer (SRB; 20mM HEPES-KOH pH7.4, 120mM KCl, 2mM EDTA, 0.2mM DTT, 1:100 Protease Inhibitors cocktail IV, 1:100 RNAsin plus.) Make stock of salt and buffer, then chill and add DTT and inhibitors immediately before use. \item insoluble RNA buffer (IRB; 20mM HEPES-KOH pH7.4, 120mM KCl, 10mM EDTA, 2\% Sarkosyl, 2mM DTT, 1:100 Protease Inhibitors cocktail IV, 1:100 RNAsin plus.). % \item \item 10X soluble additive buffer (SAB; 20\% Sarkosyl, 100mM EDTA). % Sarkosyl is N-lauryl-sarcosine, sigma \item Prepare appropriate numbers of safe-lok tubes (2X number of samples) loaded with 7mm steel balls, racked in LN, labeled on sides and top. Also, labeled tubes for all steps of protocol. \item Waterbath at desired temperature, ice, chilled centrifuge rotors, etc. \end{itemize} \textbf{CAUTION:} Phenol and Phenol:Chloroform are extremely dangerous, causing chemical burns, and must be handled in a fume hood. \subsection*{Sample growth and lysis} \begin{enumerate}[resume] \item Grow up several cultures of yeast (BY4741) to $4 \times 10^6$ cells/mL ($OD_{600} \approx 0.4$) at 30\degC, in 100ml media in a 250ml flask. % \item Transfer $2 \times 10^8$ cells to a 50mL conical tube (50 mL of a $4 \times 10^6$ cells/mL culture). \item Spin at 2500g for 30s. Gently decant and discard supernatant. %\item For heat shock treatment, add 50ml of pre-warmed media (for treatment) or 30\degC media (control) to tube and shake to resuspend culture. Incubate for desired time (0,2,4,or 8mins). Spin at 3000g for 30s and discard supernatant. \item For short heat shock treatment, hold tube with pellet in waterbath at desired temperature for desired time. \item Resuspend pellet in 1 ml ice-cold SPB, on ice. Transfer to 1.5mL tube, centrifuge 1min, 5,000g, 4\degC. Discard supernatant. \item Resuspend pellet in 150 \mul ice-cold SPB, on ice. \item Drip $2 \times 100\mul$ of resuspended pellet onto upper wall of 2 tubes containing steel ball. 1 tube will be used for fractionation, the other for total RNA extraction. Goal is to get a nugget of frozen material on the wall, and to avoid dripping the material around the ball and thus freezing the ball to the bottom of the tube; having some LN2 remaining in the tube helps. \item Place tubes at -80\degC; when all LN2 has boiled out of tube (listen -- if any popping or hissing, keep waiting), snap tube closed carefully, away from other tubes. Keep in LN2. (Any remaining LN2 in tube will cause tube to explode open and fire the stainless steel ball into your iPad, brain, colleague, or other important equipment.) \item Rack the tube into the PTFE 2mL tube adaptor for the Retsch Mixer Mill MM400 (Retsch \#22.008.0005) and submerge the entire assembly in LN2. Agitate for $6 \times 90$ seconds at 30 Hz in a Retsch Mixer Mill MM400, returning sample holder to LN2 between sessions. Complete lysis produces fine snowy powder in the tube. \item Remove sample tubes from LN2 and pop the caps to relieve pressure. Add 400 \mul SPB to each tube, and extract ball with a magnet. We rinse balls in methanol and store in 50\% ethanol. \end{enumerate} \subsection*{Soluble fraction extraction} \begin{enumerate}[resume] \item Spin at 3000g for 30 seconds (clarification step) to remove whole cells and very large aggregates. \item Decant clarified liquid into a 1.5mL microcentrifuge tube. If desired, keep the pellet and process it alongside the insoluble fraction; this end product is the \emph{unclarified fraction}. For total RNA samples, skip next spin and move to step \ref{step:mix}. \item Spin at 100,000g for 20 minutes (fixed-angle TLA-55 rotor at 40,309 rpm, 4\degC, in a Beckman Coulter Optimax tabletop ultracentrifuge). \item Decant supernatant into a 1.5mL microcentrifuge tube: this is the \emph{soluble fraction}. \item Take 10ul aliquot of soluble fraction and mix with Laemmli buffer; use this to run a protein gel and assess protein integrity. \item Mix soluble fraction with 1/10 vol SAB (Sarkosyl/EDTA), and equal volume Phenol, to denature proteins and begin RNA extraction. \label{step:mix} \end{enumerate} \subsection*{Insoluble fraction extraction} \begin{enumerate}[resume] \item Violently snap pellet to clear remaining liquid. \item Add 500 \mul soluble RNA buffer (SRB) and vortex violently. (The pellet may not resuspend; that's fine.) \item Spin at 100,000g for 20 minutes. \item Discard supernatant, clear residual liquid with a hard snap. \item Add 500 \mul insoluble RNA buffer (IRB). Vortex briefly. % \item Vortex until pellet dissolves, 10-15 minutes for clarified samples. % \item Spin at 100,000g for 5 minutes. % \item Decant supernatant into a 1.5mL microcentrifuge tube: this is the \emph{insoluble fraction}. \item Add equal volume Phenol pH 8, vortex until pellet dissolves, begin RNA extraction. \end{enumerate} \subsection*{RNA extraction} Extract using standard phenol:chloroform method. \begin{enumerate}[resume] \item Having mixed equal volumes of aqueous solution with Phenol, ensure tubes are vortexed thoroughly. \item Spin at 14,000g for 2mins at 4\degC. \item Transfer aqueous phase (roughly 200\mul) to new 1.5ml tube (labeled tube N), avoiding cloudy interphase and lipids on top. Add 250\mul SPB to previous tube (labeled tube A) and vortex tube A for 5mins. \item Add 250\mul chloroform to tube P to suck off phenol from water phase. Vortex for 3 mins, then spin 2mins at 14,000g and transfer aqueous phase from tube A to tube N. Discard tube A. \item Repeat extraction on tube N: mix with equal volume Phenol:Chloroform pH 4.5, vortex 30s, 14,000g for 2mins, remove aqueous phase to new tube. Repeat until interphase is no longer cloudy. \item To final aqueous sample add 1/10 vol ammonium Acetate, 1.5 vol 100\% EtOH, and 1\mul GlycoBlue (use a master mix!). Mix gently, precipitate overnight at -20\degC. \item Remove the sample from freezer. Cold spin (4\degC) for 15 mins at 14,000g. \item Thoroughly remove ethanol from pellet, and add 700\mul 80\% ethanol. Cold spin for 2 minutes at 14,000g. If desired, repeat the ethanol wash and cold spin. This removes all traces of salt, detergent, etc. \item Dry pellets thoroughly, i.e., pipette off ethanol, removing all liquid. If necessary, dry with the tubes open on a 37\degC\ heat block (if the RNA sample is pure, this should not degrade the RNA). Resuspend pellet in 50 \mul H$_2$O. \item To check the quality of the RNA, pour a 1\% agarose-TBE gel on RNA-free equipment, and run using NEB RNA loading dye. Heat loading dye and H$_2$O to 95\degC\ for 5 minutes, and then cool, to reduce the possibility of contamination. Mix 1\mul sample, 10\mul 1X loading dye for each well. Perform a 2X serial dilution of the sample for more precise quantification. \end{enumerate} \end{document}
State Before: ι : Type ?u.74711 α : Type u_1 β : Type ?u.74717 inst✝ : GeneralizedHeytingAlgebra α a b c d : α h : Codisjoint a b ⊢ b ⇨ a ⊓ b = a State After: no goals Tactic: rw [himp_inf_distrib, himp_self, inf_top_eq, h.himp_eq_right]
#!/usr/bin/Rscript # This script converts XPT file into CSV file using R library('foreign') args <- commandArgs(trailingOnly = TRUE) xpt_to_csv <- function(src_path, dst_path) { print("r: WARNING this code isn't memory efficient, it stores XPS file in memory twice") print(paste("r: reading full XPS file into memory from:", src_path, sep = " ")) data = read.xport(src_path) print(paste("r: writing data to csv file:", dst_path, sep = " ")) write.csv(data, file = dst_path) } xpt_to_csv(src_path = args[1], dst_path = args[2])
We found the following attractions in the Bridgeport area. 263 Golden Hill St., Bridgeport, CT 06604. I-95, Ex. 27A to Exit 2 Golden Hill St./Main St. (203) 576-1636. Award Wining professional theater in a "BYOB & Picnic" cabaret style seating atmosphere. Produces Professional Children's Theatre, Main Stage Concerts, & Main Stage Theatre featuring both seasoned and up and coming CT and NY talent. Groups, birthdays, and rentals welcome! 910 Fairfield Ave., Bridgeport, CT 06605.
-- Andreas, 2016-12-31, issue #1976 raised by nad -- Check for correct parameters in projection pattern -- {-# OPTIONS -v tc.lhs.split:40 #-} postulate A B : Set module M (_ : Set) where record R : Set₂ where field F : Set₁ open R public open M A wrong : M.R B F wrong = Set -- Expected error: -- A != B of type Set -- when checking that the clause F wrong = Set has type M.R B
{-# OPTIONS --cubical --safe #-} module Cubical.Foundations.UnivalenceId where open import Cubical.Core.Glue renaming ( isEquiv to isEquivPath ; _≃_ to EquivPath ; equivFun to equivFunPath ) open import Cubical.Core.Id open import Cubical.Foundations.Prelude public hiding ( _≡_ ; _≡⟨_⟩_ ; _∎ ) open import Cubical.Foundations.Id open import Cubical.Foundations.Equiv renaming ( isPropIsEquiv to isPropIsEquivPath ) open import Cubical.Foundations.Univalence renaming ( EquivContr to EquivContrPath ) open import Cubical.Foundations.Isomorphism path≡Id : ∀ {ℓ} {A B : Type ℓ} → Path _ (Path _ A B) (Id A B) path≡Id = isoToPath (iso pathToId idToPath idToPathToId pathToIdToPath ) equivPathToEquivPath : ∀ {ℓ} {A : Type ℓ} {B : Type ℓ} → (p : EquivPath A B) → Path _ (equivToEquivPath (equivPathToEquiv p)) p equivPathToEquivPath (f , p) i = ( f , isPropIsEquivPath f (equivToEquivPath (equivPathToEquiv (f , p)) .snd) p i ) equivPath≡Equiv : ∀ {ℓ} {A B : Type ℓ} → Path _ (EquivPath A B) (A ≃ B) equivPath≡Equiv {ℓ} = isoToPath (iso (equivPathToEquiv {ℓ}) equivToEquivPath equivToEquiv equivPathToEquivPath) univalenceId : ∀ {ℓ} {A B : Type ℓ} → (A ≡ B) ≃ (A ≃ B) univalenceId {ℓ} {A = A} {B = B} = equivPathToEquiv rem where rem0 : Path _ (Lift (EquivPath A B)) (Lift (A ≃ B)) rem0 = congPath Lift equivPath≡Equiv rem1 : Path _ (Id A B) (Lift (A ≃ B)) rem1 i = hcomp (λ j → λ { (i = i0) → path≡Id {A = A} {B = B} j ; (i = i1) → rem0 j }) (univalencePath {A = A} {B = B} i) rem : EquivPath (Id A B) (A ≃ B) rem = compEquiv (eqweqmap rem1) (invEquiv LiftEquiv)
Formal statement is: lemma decseq_Suc_iff: "decseq f \<longleftrightarrow> (\<forall>n. f (Suc n) \<le> f n)" Informal statement is: A sequence $f$ is decreasing if and only if $f(n+1) \leq f(n)$ for all $n$.
\documentclass[10pt,letterpaper]{article} \include{settings} %% Define per-paper macros. \newcommand{\withurl}[2]{{#1}\footnote{\texttt{#2}}} \newcommand{\rulemajor}[1]{\section{#1}} \begin{document} \vspace*{0.2in} \begin{flushleft} {\Large \textbf\newline{Ten Quick Tips for Creating an Effective Lesson} } \newline \\ {Greg~Wilson}\textsuperscript{1,*} \\ \textbf{1} RStudio, Inc., Toronto, Ontario M4L 2T9 \\ \bigskip * [email protected] \end{flushleft} \section*{Abstract} We present ten tips for building effective lessons that are grounded in empirical research on pedagogy and cognitive psychology, and which we have found to be practically useful in both classroom and free-range settings. \section*{Author Summary} As a species, we know as much about teaching and learning as we do about public health, but most people who teach at the post-secondary level are never introduced to even the basics of evidence-based pedagogy. Knowing just a few key facts will help you build more effective lessons in less time and with less pain, and will also make those lessons easier for your peers to find and re-use. This paper presents ten tips that you can apply immediately and explains why they work. \section*{Introduction} There are many kinds of lessons, both formal and informal, from seconds long to lifelong. Most people have sat (or suffered) through hundreds of these, but have never been shown how to design ones that are effective. These ten simple tips for creating lessons are: \begin{itemize} \item based on current educational research \cite{Nuth2007,Ambr2010,DeBr2015,Dida2016,Brow2018,Mark2018}, \item filtered by what can be done by non-specialists with limited time and resources \cite{Hust2012,Lang2016}, and \item prioritized by experience teaching and training people to teach together \cite{Deve2018,Wils2016,Wils2018}. \end{itemize} The key insight that underpins all of these tips is that \emph{learning is both a cognitive and a social activity}. On the cognitive side, incoming information (the lesson) passes through a \emph{sensory register} that has physically separate channels for visual and auditory information and is stored in \emph{short-term memory}, where it is used to construct a \emph{verbal model} (sometimes also called a \emph{linguistic model}) and a separate \emph{visual model} \cite{Maye2009}. These are then integrated and stored in \emph{long-term memory} as facts and relationships. If those facts and relationships are strengthened by use, they can later be recalled and applied, and we say that learning has taken place. One key feature of this model is that short-term memory is very limited: \cite{Mill1956} famously estimated its size as $7{\pm}2$ items, and more recent studies place the figure closer to 4. If too much information is presented too quickly, material spills out of short-term memory before it can be integrated and stored, and learning does not occur. A second key feature is that the brain's processing power is also very limited. Effort spent identifying key facts or reconciling the linguistic and visual input streams reduces the power available for organizing new information and connecting it to what's already present. Learning is also a social activity. Learners who feel motivated will learn more; learners who feel that they may not be judged on their merits, or who have experienced unequal treatment in the past, will learn less (see the tip ``Motivate and Avoid Demotivating''). In \cite{Litt2004}, for example, Kenneth Wesson wrote, ``If poor inner-city children consistently outscored children from wealthy suburban homes on standardized tests, is anyone naive enough to believe that we would still insist on using these tests as indicators of success?'' Lesson designers must take the social aspects of learning into account if they are to create effective lessons; we discuss this further in the final tip (``Make Lessons Inclusive''). \rulemajor{Use Learner Personas to Define Your Audience} The first step in creating a good lesson is figuring out who the audience is. One way to do this is to make up biographies of two or three target learners. This technique is borrowed from user interface designers, who create short profiles of typical users to help them think about their audience, and the profiles themselves are called \emph{personas}. Learner personas have five parts: \begin{enumerate} \item the person's general background, \item what they already know, \item what \emph{they} think they want to do (as opposed to what someone who already understands the subject thinks), \item how the lesson will help them, and \item any special needs they might have. \end{enumerate} A learner persona for a weekend introduction to programming aimed at college students might be: \begin{enumerate} \item Jorge has just moved from Costa Rica to Canada to study agricultural engineering. He has joined the college soccer team, and is looking forward to learning how to play ice hockey. \item Other than using Excel, Word, and the Internet, Jorge's most significant previous experience with computers is helping his sister build a WordPress site for the family business back home in Costa Rica. \item Jorge needs to measure properties of soil from nearby farms using a handheld device that sends logs in a text format to his computer. Right now, Jorge has to open each file in Excel, crop the first and last points, and calculate an average. \item This workshop will show Jorge how to write a little Python program to read the data, select the right values from each file, and calculate the required statistics. \item Jorge can read English well, but still struggles sometimes to keep up with spoken conversation (especially if it involves a lot of new jargon). \end{enumerate} Rather than writing new personas for every lesson or course, instructors often create and share a handful that cover everyone they hope to teach, then pick a few from that set to describe who particular material is intended for. Used this way, personas become a convenient shorthand for design issues: when speaking with each other, teachers can say, "Would Jorge understand why we're doing this?" or, "What installation problems would Jorge face?" Personas help you remember one of the most important tips of teaching: \emph{you are not your learners}. The people you teach will almost always have different backgrounds, different capabilities, and different ambitions than you; personas help you keep your lessons focused on what they need rather than on what your younger self might have wanted. \rulemajor{Design for Effective Learning Strategies} Some learning strategies are provably more effective than others \cite{Rohr2015,Kang2016,Miya2018}, so lessons should be designed to encourage their use. As summarized in \cite{Wein2018a,Wein2018b}, the six most important are: \begin{description} \item[Spaced Practice:] Ten hours of study spread out over five days is more effective than two five-hour days, and far better than one ten-hour day. You should therefore create lessons and exercises that include some older material in each new lesson. According to \cite{Mill2016}, ``The lectures that predominate in face-to-face courses are relatively ineffective ways to teach, but they probably contribute to spacing material over time, because they unfold in a set schedule over time. In contrast, depending on how the courses are set up, online students can sometimes avoid exposure to material altogether until an assignment is nigh.'' \item[Retrieval Practice:] Researchers now believe that the limiting factor for long-term memory is not retention (what is stored), but recall (what can be accessed). Recall of specific information improves with practice, so outcomes in real situations can be improved by taking practice tests or summarizing the details of a topic from memory and then checking what was and wasn't remembered. For example, \cite{Karp2008} found that repeated testing improved recall of word lists from 35\% to 80\%. \item[Interleaving:] One way to space retrieval practice is to interleave study of different topics: instead of mastering one subject, then the next, then a third, shuffle the order. Even better, switch up the order: A-B-C-B-A-C is better than A-B-C-A-B-C, which in turn is better than A-A-B-B-C-C \cite{Rohr2015}. This is effective because interleaving fosters creation of more links between different topics, which in turn increases retention and recall. \item[Elaboration:] Having learners explain things to themselves and others as they go along improves understanding and recall. One way to do this is to follow up each answer on a practice quiz with an explanation of why that answer is correct, or conversely with an explanation of why some other plausible answer isn't. Another is to have learners explain how a new idea is similar to or different from one they have seen previously. \item[Concrete Examples:] One specific form of elaboration that is useful enough to deserve its own heading is the use of concrete examples. As discussed in the tip ``Use Worked Examples and Concreteness Fading'', every statement of a general principle should be accompanied by one or more examples of its use, or conversely take each particular problem and list the general principles it embodies. \cite{Raws2014} found that interleaving examples and definitions made it more likely that learners would remember the latter correctly. Another approach is to teach by contrast, i.e., to show learners what a solution is \emph{not}, or what kind of problem a technique \emph{won't} solve. For example, when showing children how to simplify fractions, it's important to give them a few like 5/7 that can't be simplified so that they don't become frustrated looking for answers that don't exist. \item[Dual Coding:] Different subsystems in our brains handle and store linguistic and visual information, and if complementary information is presented through both channels, then they can reinforce one another. However, learning is more effective when the same information is \emph{not} presented simultaneously in two different channels \cite{Maye2003,Maye2009}, because then the brain has to expend effort to check the channels against each other. This is one of the many reasons that reading slides verbatim is ineffective: not only is the reader not adding value, they are actually adding to the load on learners whose brains are trying to check that the spoken and written inputs are consistent. \end{description} \rulemajor{Write Summative Assessments to Set Concrete Goals} \emph{Summative assessment} is something done at the end of a lesson to tell whether the desired learning has taken place: a driving test, performance of a piece of music, a written examination, or something else of that kind. Summative assessments are usually used as gates (e.g., ``Is it now safe for this person to drive on their own?''), but they are also a good way to clarify the learning objectives for a lesson. ``Understand linear regression'' is hopelessly vague; a much better way to set the goal for that lesson would be to define an exercise such as: \begin{quotation} \noindent Write a short R script that reads the tabular data in \texttt{housing.csv} and uses the \texttt{lm} function to calculate a regression coefficient relating house price to purchaser age. \end{quotation} This is better because it gives the lesson author a concrete goal to work toward: nothing goes in the lesson except what is needed to complete the summative assessments. This helps reduce content bloat, and also tells the author when the lesson is done. Writing summative assessments early in the lesson design process also helps ensure that outcomes are actually checkable. Since telepathy is not yet widely available, it is impossible for instructors to know what learners do and don't understand. Instead, we must ask them to demonstrate that they're able to do something that they couldn't do without the desired understanding. Finally, creating summative assessments early can help authors stay connected to their learners' goals. Each summative assessment should embody an \emph{authentic task}, i.e., something that an actual learner actually wants to do. Early on, authentic tasks should be learners' own goals; as they advance and are able to make sense of generalizations, these tasks may be extensions or generalizations of earlier solutions. Continuing with the statistical example above, calculating a regression coefficient may be an authentic task for someone who already knows enough statistics to understand what such coefficients are good for. If the intended learners are not yet that experienced, this exercise could be extended to have them make some sort of judgment based on the regression coefficients to exercise higher-order thinking. \rulemajor{Write Formative Assessments for Pacing, Design, Preparation, and Reinforcement} The counterpoint to summative assessment is \emph{formative assessment}, which is checks used while learning is taking place to form (or shape) the teaching. Asking learners for questions is a common, but relatively ineffective, kind of formative assessment. What works better is to give them a short problem---one that can be done in 1--2 minutes so as not to derail the flow of the lesson, and which will help them uncover and confront their misconceptions about the topic being taught. Checking in with learners this way every 10--15 minutes accomplishes several things: \begin{description} \item[Pacing:] Asking, ``Does everyone understand?'' almost always produces false positives. In contrast, if any substantial fraction of your learners cannot do a formative assessment correctly, you know right then and there that you need to re-explain the most recent material. When you start doing this, you will feel like you're going more slowly, but that's because you will now be teaching at the speed at which your audience can learn rather than the speed at which you can talk. \item[Design:] Creating formative assessments that build toward a lesson's summative assessment gives you a structure for your lesson. Returning to the regression example, the summative assessment tells you that you should have exercises along the way in which learners load CSV data, use the \texttt{lm} function with appropriate parameters, and interpret the result. Writing a few minutes of material for each of these subjects is less intimidating than trying to explain the whole topic at once. \item[Preparation:] Formative assessments give learners practice with the concepts, methods, and tools they will use when doing the lesson's summative assessment, and tells them where to focus their revision. Switching from statistics to music, if a violinist is able to do the bowing and fingering exercises for a piece, but is struggling with the rhythmic patterns, that tells her where she should spend her study time. \item[Reinforcement:] Learners remember things better if they use material right away, and having formative assessments during the lesson does this. \item[Scope:] Breaking a summative assessment down into parts and creating formative assessments for each usually shows you that you are trying to cram too much into one lesson. Writing assessments is therefore iterative, as early drafts of summative assessments are re-scoped to only require as much material as can plausibly be covered. \end{description} \cite{Broo2016,Majo2015,Rice2018} offer inspiration for a wide variety of different kinds of summative and formative assessment exercises. \rulemajor{Integrate Visual and Linguistic Information} Research by Mayer and colleagues on the split-attention effect is closely related to cognitive load theory \cite{Maye2003}. As described in the introduction, linguistic and visual input are processed by different parts of the human brain, and linguistic and visual memories are stored separately as well. This means that correlating linguistic and visual streams of information takes cognitive effort: when someone reads something while hearing it spoken aloud, their brain can't help but check that it's getting the same information on both channels. Learning is therefore more effective when information is presented simultaneously in two different channels, but when that information is complementary rather than redundant. For example, people generally find it harder to learn from a video that has both narration and on-screen captions than from one that has either the narration or the captions but not both, because some of their attention has to be devoted to checking that the narration and the captions agree with each other. Two notable exceptions to this are people who do not yet speak the language well and people with hearing exercises or other special needs, both of whom may find that the extra effort is a net benefit. This is why it's more effective to draw a diagram piece by piece while teaching rather than to present the whole thing at once. If parts of the diagram appear at the same time as things are being said, the two will be correlated in the learner's memory. Pointing at part of the diagram later is then more likely to trigger recall of what was being said when that part was being drawn. The split-attention effect does \emph{not} mean that learners shouldn't try to reconcile multiple incoming streams of information---after all, this is something they have to do in the real world \cite{Atki2000}. Instead, it means that instruction shouldn't require it while people are mastering unit skills; instead, using multiple sources of information simultaneously should be treated as a separate learning task. \rulemajor{Design for Peer Instruction} No matter how good a teacher is, she can only say one thing at a time. How then can she clear up many different misconceptions in a reasonable time? The best solution developed so far is \emph{peer instruction}. Originally created by Eric Mazur at Harvard \cite{Crou2001}, it has been studied extensively in a wide variety of contexts (e.g., \cite{Vick2015,Port2016}). Peer instruction is essentially a scalable way to provide one-to-one mentorship. It interleaves formative assessment with student discussion as follows: \begin{enumerate} \item Give a brief introduction to the topic, either in class or in out-of-class reading. \item Give learners a multiple choice question (MCQ). \item Have all the students vote on their answers to the MCQ. \begin{enumerate} \item If the students all have the right answer, move on. \item If they all have the same wrong answer, address that specific misconception. \item If they have a mix of right and wrong answers, give them several minutes to discuss those answers with one another in small groups (typically 2--4 students) and then reconvene and vote again. \end{enumerate} \end{enumerate} The questions posed to learners don't have to be MCQs: matching terms to definitions can be equally effective, as can Parsons Problems (in which they are given the jumbled parts of a solution and must put them in the right order \cite{Pars2006}). Whatever mix is used, the lesson must build toward them, and the question must probe for conceptual understanding and misconceptions (rather than check simple factual knowledge). Group discussion significantly improves students' understanding because it forces them to clarify their thinking, which can be enough to call out gaps in reasoning. Re-polling the class then lets the teacher know if they can move on, or if further explanation is necessary. A final round of additional explanation and discussion after the correct answer is presented gives students one more chance to solidify their understanding. But could this be a false positive? Are results improving because of increased understanding during discussion, or simply from a follow-the-leader effect? \cite{Smit2009} tested this by following the first question with a second one that students answer individually and found that peer discussion actually does enhance understanding, even when none of the students in a discussion group originally knew the correct answer. It is important to have learners vote publicly so that they can't change their minds afterwards and rationalize it by making excuses to themselves like ``I just misread the question''. Some of the value of peer instruction comes from having their answer be wrong and having to think through the reasons why. This is called the \emph{hypercorrection effect} \cite{Metc2016}. Most people don't like to be told they're wrong, so it's reasonable to assume that the more confident someone is that the answer they've given in a test is correct, the harder it is to change their mind if they were actually wrong. However, it turns out that the opposite is true: the more confident someone is that they were right, the more likely they are not to repeat the error if they are corrected. \rulemajor{Use Worked Examples and Concreteness Fading} A worked example is a step-by-step demonstration of how to solve a problem or do some task. By giving the steps in order, the instructor reduces the learner's cognitive load, which accelerates learning \cite{Atki2000,Paas2003}. However, worked examples become less effective as learners acquire more expertise \cite{Kaly2003,Kaly2007}, a phenomenon known as the \emph{expertise reversal effect}. In brief, as learners build their own mental models of what to do and how to do it, the detailed step-by-step breakdown of a worked example starts to get in the way. This is why tutorials and manual pages both need to exist: what's appropriate for a newcomer is frustrating for an expert, while what jogs an expert's memory may be incomprehensible to a novice. One powerful way to use worked examples is to present a series of \emph{faded examples} \cite{Schw2009}. The first example in the series is a complete use of a problem-solving strategy; each subsequent example gives the learner more blanks to fill in. The material that isn't blank is often referred to as \emph{scaffolding}, since it serves the same purpose as the scaffolding set up temporarily at a building site. Faded examples can be used in almost every kind of teaching, from sport and music to contract law. Someone teaching high school algebra might use them by first solving this equation for $x$: \begin{eqnarray*} (4x + 8) / 2 & = & 5 \\ 4x + 8 & = & 2 * 5 \\ 4x + 8 & = & 10 \\ 4x & = & 10 - 8 \\ 4x & = & 2 \\ x & = & 2/4 \\ x & = & 1/2 \end{eqnarray*} and then asking learners to fill in the blanks in this: \begin{eqnarray*} (3x - 1) * 3 & = & 12 \\ 3x - 1 & = & \rule{2em}{0.15mm}~/~\rule{2em}{0.15mm} \\ 3x - 1 & = & 4 \\ 3x & = & 4~\rule{2em}{0.15mm}~1 \\ 3x & = & \rule{2em}{0.15mm} \\ x & = & \rule{2em}{0.15mm}~/~3 \\ x & = & \rule{2em}{0.15mm} \\ \end{eqnarray*} The next problem might be this: \begin{eqnarray*} (5x + 1) * 3 & = & 4 \\ 5x + 1 & = & \rule{2em}{0.15mm} \\ 5x & = & \rule{2em}{0.15mm} \\ x & = & \rule{2em}{0.15mm} \\ \end{eqnarray*} Learners would finally be asked to solve an equation entirely on their own: \begin{eqnarray*} (2x + 7) / 4 & = & 1 \end{eqnarray*} At each step, learners have a slightly larger problem to solve, which is less intimidating than a blank screen or a blank sheet of paper. Faded examples also encourage learners (and instructors) to think about the similarities and differences between various approaches. Worked examples are themselves an example of \emph{concreteness fading} \cite{Gold2005,Fyfe2014}, which describes the process of starting lessons with things that are specific or tangible and then explicitly and gradually transitioning to more abstract and general concepts. Concreteness fading: \begin{enumerate} \item helps learners understand abstract symbols in terms of well-understood concrete objects, \item lets them leverage personal experience to ground abstract thinking, \item gives them a store of examples and mental images that they can fall back on when abstract symbols and reasoning fail, and \item help learners figure out what is specific to particular examples and what is generalizable across all problems of a certain kind. \end{enumerate} One way to remember this strategy is the acronym PETE (Problem, Explanation, Theory, Example), which encourages instructors to: \begin{itemize} \item describe an authentic problem that the lesson will solve, \item work through a solution to that problem, \item explain the general theory that underpins that solution, and \item work through a second example so that learners will understand which parts generalize. \end{itemize} \rulemajor{Show How to Detect, Diagnose, and Correct Common Mistakes} It is almost oxymoronic to say that learners spend a lot of their time trying to figure out what they've done wrong and fixing it: after all, if they knew and they had, they would already have moved on to the next subject. Most lessons devote little time to detecting, diagnosing, and correcting common mistakes, but doing this will accelerate learning---not least by reducing the time that learners spend feeling lost and frustrated. In Carroll et al's ``minimal manual'' approach to training materials, every topic is accompanied by descriptions of symptoms learners might see, their causes, and how to correct them \cite{Carr2014}. When studying second language acquisition, \cite{Lyst1997} identified six ways in which instructors can correct learners' mistakes: \begin{description} \item[Explicit correction:] clearly indicating that the learner is incorrect and provide the correct form. \item[Recasting:] repeat the learner's response with the mistake or mistakes corrected. \item[Clarification request:] indicate that the learner's answer is incorrect (e.g., by saying, ``Are you sure?'') but leave the correction open-ended. \item[Metalinguistic clues:] pose leading questions (e.g., ``Do we need the absolute error or the relative error here?'') \item[Elicitation:] provide the first part of the correct answer as a prompt and require the learner to fill in the rest. \item[Repetition:] repeats the learner's error, drawing attention to it but leaving the correction up to them. \end{description} All of these can be used preemptively during the design of lessons. For example, an introduction to chemical reactions could present an incomplete calculation of enthalpy and ask the learner to fill it in (elicitation) or present the complete calculation with errors, then draw attention to those errors and correct them one by one (recasting). All of these strategies provide retrieval practice by requiring learners to use what they have just learned, and encourage metacognition by requiring them to reflect on the the limits and applicability of that knowledge. \rulemajor{Motivate and Avoid Demotivating} One of the strongest predictors of whether people learn something is their \emph{intrinsic motivation}, i.e., their innate desire to master the material. The term is used in contract with \emph{extrinsic motivation}, which refers to behavior driven by rewards such as money, fame, and grades. As \cite{Wlod2017} describes, the biggest motivators for adult learners are their sense of agency (i.e., the degree to which they feel that they're in control of their lives), the utility or usefulness of what they're learning, and whether their peers are learning the same things. Letting people go through lessons at the time of their own choosing, using authentic tasks, and working in small groups speak to each of these factors. Conversely, it is very easy for educators to demotivate their learners by being unpredictable, unfair, or indifferent. If there is no reliable relationship between effort and result, learners stop trying (a particular case of a broader phenomenon called \emph{learned helplessness}). If the learning environment is slanted to advantage some people at the expense of others, everyone will do less well on average \cite{Wilk2011}, and if the lessons make it clear that the teacher doesn't care if people learn things or not, learners will mirror that indifference. One way to tell if learners are motivated or not is to look at the incidence of cheating. In classrooms, it is usually not a symptom of moral failing, but a rational response to poorly-designed incentives. As reported in \cite{Lang2013}, some things that educators do that unintentionally encourage cheating include: \begin{itemize} \item setting the cost of failure very high, \item relying on single assessment mechanisms like multiple-choice tests, and \item using arbitrary grading criteria. \end{itemize} Eliminating these from lessons doesn't guarantee that learners won't cheat, but does reduce the incidence. (And despite what many educators believe, cheating is no more likely online than in person \cite{Beck2014}.) \rulemajor{Make Lessons Inclusive} \emph{Inclusivity} is a policy of including people who might otherwise be excluded. In STEM education, it means making a positive effort to be more welcoming to women, under-represented racial or ethnic groups, people with various sexual orientations, the elderly, the physically challenged, the economically disadvantaged, and others. The most important step is to stop thinking in terms of a \emph{deficit model}, i.e., to stop thinking that the members of marginalized groups lack something and are therefore responsible for not getting ahead. Believing that puts the burden on people who already have to work harder because of the inequities they face, and (not coincidentally) gives those who benefit from the current arrangements an excuse not to look at themselves too closely. One axis of inclusive lesson design is physical: provide descriptive text for images and videos to help the visually challenged, closed captions for videos to help those with hearing challenges, and so on. Another axis is social: \begin{itemize} \item Use gender-neutral pronouns (e.g., a singular ``they'') or alternate between male and female pronouns. \item Use culturally varied names in examples (e.g., Aisha and Boris rather than Alice and Bob). \item Avoid examples based on over-simplified or exclusionary views of gender and orientation, such as assuming that there are only two genders, that gender is fixed throughout a person's life, or that marriage is always between people of unlike gender. \end{itemize} Committing fully to inclusive teaching may mean fundamentally rethinking content. For example, \cite{Lach2018} explored two strategies for making computing education more culturally inclusive, each of which has its own traps for the unwary. The first strategy, \emph{community representation}, highlights students' social identities, histories, and community networks using after-school mentors or role models from students' neighborhoods, or activities that use community narratives and histories as a foundation for a computing project. The major risk is shallowness, e.g., using computers to build slideshows rather than do any real computing. The second strategy, \emph{computational integration}, incorporates ideas from the learner's community, e.g., by reverse engineering indigenous graphic designs in a visual programming environment. The major risk here is cultural appropriation, e.g., using practices without acknowledging origins. No matter which strategy is chosen, the first steps should always be to ask your learners and members of their community what \emph{they} think you ought to do, and to give them control over content and direction. \section*{Conclusion} Following the ten tips laid out above doesn't guarantee that your lessons will be great, but it will help ensure that they aren't bad. When it comes time to put them into practice, we recommend following something like the reverse design process developed independently by \cite{Wigg2005,Bigg2011,Fink2013}: \begin{enumerate} \item Figure out who your learners are and what their goals are. \item Create the summative assessment for the lesson to give yourself a target. \item Itemize the knowledge and skills that assessment relies on, and create formative assessments to check on each while learning is taking place. \item Order those formative assessments in a way that respects their dependencies, i.e., so that they build on each other. \item Estimate the time required to cover each topic and perform its related formative assessment, then cut material that there isn't time for. \item Write lessons to connect each formative assessment to the next (which is usually much easier than writing an entire lesson at once). \item Double-check your language and examples to ensure that they address your learners' goals and won't demotivate them. \item Derive learning objectives and key points from the lesson to share with your learners and co-instructors. The former make the lesson findable, while the latter give you and your co-instructors a quick way to check what the lesson actually covers. \item Put everything online for other people to download, modify, and contribute to. \end{enumerate} We also recommend that lessons be designed for sharing with other instructors. Instructors often scour the web for ideas, and it's common for people to inherit courses from previous instructors. What is far less common is collaborative lesson construction, i.e., people taking material, improving it, and then offering their changes back to the community. This model has served the open source software community well, and as \cite{Deve2018} describes, it works equally well for lessons---provided that materials are designed to make fine-grained collaboration easy. Unfortunately, widely-used systems like Git are designed to handle text files, and struggle with structured document formats like Microsoft Word or PowerPoint. In addition, their learning curve is very steep, and deters many potential users who have deadlines to meet or would rather think about engaging exercises than try to make sense of obscure error messages. One key enabler of collaborative lesson construction is licensing. We strongly recommend using one of the Creative Commons family of licenses, since they have been carefully vetted and are widely understood. % \bibliography{wilson-lesson-tips} \begin{thebibliography}{10} \bibitem{Nuth2007} Nuthall G. \newblock The Hidden Lives of Learners. \newblock {NZCER} Press; 2007. \bibitem{Ambr2010} Ambrose SA, Bridges MW, DiPietro M, Lovett MC, Norman MK. \newblock How Learning Works: Seven Research-Based Principles for Smart Teaching. \newblock Jossey-Bass; 2010. \bibitem{DeBr2015} Bruyckere PD, Kirschner PA, Hulshof CD. \newblock Urban Myths about Learning and Education. \newblock Academic Press; 2015. \bibitem{Dida2016} Didau D, Rose N. \newblock What Every Teacher Needs to Know About Psychology. \newblock John Catt Educational; 2016. \bibitem{Brow2018} Brown NCC, Wilson G. \newblock Ten Quick Tips for Teaching Programming. \newblock {PLoS} Computational Biology. 2018;14(4). \newblock doi:{10.1371/journal.pcbi.1006023}. \bibitem{Mark2018} Markovits RA, Weinstein Y. \newblock Can Cognitive Processes Help Explain the Success of Instructional Techniques Recommended by Behavior Analysts? \newblock {NPJ} Science of Learning. 2018;3(1). \newblock doi:{10.1038/s41539-017-0018-1}. \bibitem{Hust2012} Huston T. \newblock Teaching What You Don't Know. \newblock Harvard University Press; 2012. \bibitem{Lang2016} Lang JM. \newblock Small Teaching: Everyday Lessons from the Science of Learning. \newblock Jossey-Bass; 2016. \bibitem{Deve2018} Devenyi GA, Emonet R, Harris RM, Hertweck KL, Irving D, Milligan I, et~al. \newblock Ten Simple Rules for Collaborative Lesson Development. \newblock {PLOS} Computational Biology. 2018;14(3). \newblock doi:{10.1371/journal.pcbi.1005963}. \bibitem{Wils2016} Wilson G. \newblock Software Carpentry: Lessons Learned. \newblock {F1000Research}. 2016;doi:{10.12688/f1000research.3-62.v2}. \bibitem{Wils2018} Wilson G. \newblock Teaching Tech Together. \newblock Lulu; 2018. \bibitem{Maye2009} Mayer RE. \newblock Multimedia Learning. \newblock 2nd ed. Cambridge University Press; 2009. \bibitem{Mill1956} Miller GA. \newblock The Magical Number Seven, Plus or Minus Two: Some Limits on Our Capacity for Processing Information. \newblock Psychological Review. 1956;63(2):81--97. \newblock doi:{10.1037/h0043158}. \bibitem{Litt2004} Littky D. \newblock The Big Picture: Education Is Everyone's Business. \newblock Association for Supervision \& Curriculum Development ({ASCD}); 2004. \bibitem{Rohr2015} Rohrer D, Dedrick RF, Stershic S. \newblock Interleaved Practice Improves Mathematics Learning. \newblock Journal of Educational Psychology. 2015;107(3):900--908. \newblock doi:{10.1037/edu0000001}. \bibitem{Kang2016} Kang SHK. \newblock Spaced Repetition Promotes Efficient and Effective Learning. \newblock Policy Insights from the Behavioral and Brain Sciences. 2016;3(1):12--19. \newblock doi:{10.1177/2372732215624708}. \bibitem{Miya2018} Miyatsu T, Nguyen K, McDaniel MA. \newblock Five Popular Study Strategies: Their Pitfalls and Optimal Implementations. \newblock Perspectives on Psychological Science. 2018;13(3):390--407. \newblock doi:{10.1177/1745691617710510}. \bibitem{Wein2018a} Weinstein Y, Madan CR, Sumeracki MA. \newblock Teaching the Science of Learning. \newblock Cognitive Research: Principles and Implications. 2018;3(1). \newblock doi:{10.1186/s41235-017-0087-y}. \bibitem{Wein2018b} Weinstein Y, Sumeracki M, Caviglioli O. \newblock Understanding How We Learn: A Visual Guide. \newblock Routledge; 2018. \bibitem{Mill2016} Miller MD. \newblock Minds Online: Teaching Effectively with Technology. \newblock Harvard University Press; 2016. \bibitem{Karp2008} Karpicke JD, Roediger HL. \newblock The Critical Importance of Retrieval for Learning. \newblock Science. 2008;319(5865):966--968. \newblock doi:{10.1126/science.1152408}. \bibitem{Raws2014} Rawson KA, Thomas RC, Jacoby LL. \newblock The Power of Examples: Illustrative Examples Enhance Conceptual Learning of Declarative Concepts. \newblock Educational Psychology Review. 2014;27(3):483--504. \newblock doi:{10.1007/s10648-014-9273-3}. \bibitem{Maye2003} Mayer RE, Moreno R. \newblock Nine Ways to Reduce Cognitive Load in Multimedia Learning. \newblock Educational Psychologist. 2003;38(1):43--52. \newblock doi:{10.1207/s15326985ep3801\_6}. \bibitem{Broo2016} Brookfield SD, Preskill S. \newblock The Discussion Book: 50 Great Ways to Get People Talking. \newblock Jossey-Bass; 2016. \bibitem{Majo2015} Major CH, Harris MS, Zakrajsek T. \newblock Teaching for Learning: 101 Intentionally Designed Educational Activities to Put Students on the Path to Success. \newblock Routledge; 2015. \bibitem{Rice2018} Rice GT. \newblock Hitting Pause: 65 Lecture Breaks to Refresh and Reinforce Learning. \newblock Stylus Publishing; 2018. \bibitem{Atki2000} Atkinson RK, Derry SJ, Renkl A, Wortham D. \newblock Learning from Examples: Instructional Principles from the Worked Examples Research. \newblock Review of Educational Research. 2000;70(2):181--214. \newblock doi:{10.3102/00346543070002181}. \bibitem{Crou2001} Crouch CH, Mazur E. \newblock Peer Instruction: Ten Years of Experience and Results. \newblock American Journal of Physics. 2001;69(9):970--977. \newblock doi:{10.1119/1.1374249}. \bibitem{Vick2015} Vickrey T, Rosploch K, Rahmanian R, Pilarz M, Stains M. \newblock Research-Based Implementation of Peer Instruction: A Literature Review. \newblock {CBE}--Life Sciences Education. 2015;14(1). \newblock doi:{10.1187/cbe.14-11-0198}. \bibitem{Port2016} Porter L, Bouvier D, Cutts Q, Grissom S, Lee CB, McCartney R, et~al. \newblock A Multi-Institutional Study of Peer Instruction in Introductory Computing. \newblock In: Proc.\ 2016 Technical Symposium on Computer Science Education ({SIGCSE'16}). Association for Computing Machinery ({ACM}); 2016. \bibitem{Pars2006} Parsons D, Haden P. \newblock {Parson's} Programming Puzzles: A Fun and Effective Learning Tool for First Programming Courses. \newblock In: Proc.\ 2006 Australasian Conference on Computing Education ({ACE'06}). Australian Computer Society; 2006. p. 157--163. \bibitem{Smit2009} Smith MK, Wood WB, Adams WK, Wieman CE, Knight JK, Guild N, et~al. \newblock Why Peer Discussion Improves Student Performance on In-class Concept Questions. \newblock Science. 2009;323(5910):122--124. \newblock doi:{10.1126/science.1165919}. \bibitem{Metc2016} Metcalfe J. \newblock Learning from Errors. \newblock Annual Review of Psychology. 2016;68(1):465--489. \newblock doi:{10.1146/annurev-psych-010416-044022}. \bibitem{Paas2003} Paas F, Renkl A, Sweller J. \newblock Cognitive Load Theory and Instructional Design: Recent Developments. \newblock Educational Psychologist. 2003;38(1):1--4. \newblock doi:{10.1207/s15326985ep3801\_1}. \bibitem{Kaly2003} Kalyuga S, Ayres P, Chandler P, Sweller J. \newblock The Expertise Reversal Effect. \newblock Educational Psychologist. 2003;38(1):23--31. \newblock doi:{10.1207/s15326985ep3801\_4}. \bibitem{Kaly2007} Kalyuga S. \newblock Expertise Reversal Effect and Its Implications for Learner-Tailored Instruction. \newblock Educational Psychology Review. 2007;19(4):509--539. \newblock doi:{10.1007/s10648-007-9054-3}. \bibitem{Schw2009} Schwonke R, Renkl A, Krieg C, Wittwer J, Aleven V, Salden R. \newblock The worked-example effect: Not an artefact of lousy control conditions. \newblock Computers in Human Behavior. 2009;25(2):258--266. \newblock doi:{10.1016/j.chb.2008.12.011}. \bibitem{Gold2005} Goldstone RL, Son JY. \newblock The Transfer of Scientific Principles Using Concrete and Idealized Simulations. \newblock Journal of the Learning Sciences. 2005;14(1):69--110. \newblock doi:{10.1207/s15327809jls1401\_4}. \bibitem{Fyfe2014} Fyfe ER, McNeil NM, Son JY, Goldstone RL. \newblock Concreteness Fading in Mathematics and Science Instruction: a Systematic Review. \newblock Educational Psychology Review. 2014;26(1):9--25. \newblock doi:{10.1007/s10648-014-9249-3}. \bibitem{Carr2014} Carroll J. \newblock Creating Minimalist Instruction. \newblock International Journal of Designs for Learning. 2014;5(2). \newblock doi:{10.14434/ijdl.v5i2.12887}. \bibitem{Lyst1997} Lyster R, Ranta L. \newblock Corrective Feedback and Learner Uptake: Negotiation of Form in Communicative Classrooms. \newblock Studies in Second Language Acquisition. 1997;19(1):37--66. \bibitem{Wlod2017} Wlodkowski RJ, Ginsberg MB. \newblock Enhancing Adult Motivation to Learn: A Comprehensive Guide for Teaching All Adults. \newblock Jossey-Bass; 2017. \bibitem{Wilk2011} Wilkinson R, Pickett K. \newblock The Spirit Level: Why Greater Equality Makes Societies Stronger. \newblock Bloomsbury Press; 2011. \bibitem{Lang2013} Lang JM. \newblock Cheating Lessons: Learning from Academic Dishonesty. \newblock Harvard University Press; 2013. \bibitem{Beck2014} Beck V. \newblock Testing a Model to Predict Online Cheating---Much Ado About Nothing. \newblock Active Learning in Higher Education. 2014;15(1):65--75. \newblock doi:{10.1177/1469787413514646}. \bibitem{Lach2018} Lachney M. \newblock Computational Communities: {African-American} Cultural Capital in Computer Science Education. \newblock Computer Science Education. 2018; p. 1--22. \newblock doi:{10.1080/08993408.2018.1429062}. \bibitem{Wigg2005} Wiggins G, McTighe J. \newblock Understanding by Design. \newblock Association for Supervision \& Curriculum Development ({ASCD}); 2005. \bibitem{Bigg2011} Biggs J, Tang C. \newblock Teaching for Quality Learning at University. \newblock Open University Press; 2011. \bibitem{Fink2013} Fink LD. \newblock Creating Significant Learning Experiences: An Integrated Approach to Designing College Courses. \newblock Jossey-Bass; 2013. \end{thebibliography} \end{document}
Formal statement is: lemma contour_integral_part_circlepath_reverse': "b < a \<Longrightarrow> contour_integral (part_circlepath c r a b) f = -contour_integral (part_circlepath c r b a) f" Informal statement is: If $b < a$, then the contour integral of $f$ along the part of the circle of radius $r$ centered at $c$ from $a$ to $b$ is equal to the negative of the contour integral of $f$ along the part of the circle of radius $r$ centered at $c$ from $b$ to $a$.
Formal statement is: lemma mono_vimage_algebra: "sets M \<le> sets N \<Longrightarrow> sets (vimage_algebra X f M) \<subseteq> sets (vimage_algebra X f N)" Informal statement is: If $M$ is a sub-$\sigma$-algebra of $N$, then the image of $M$ under $f$ is a sub-$\sigma$-algebra of the image of $N$ under $f$.
Lambton College , which offers two @-@ year programs and diplomas , is one of Ontario 's 21 colleges of applied arts and technology . It has a full @-@ time enrolment of 3 @,@ 500 and a part @-@ time enrolment of about 8 @,@ 000 . It is the city 's only post @-@ secondary school .
program tarefad parameter (M = 400, N = 1000000, Nmax=3000, pi = 4e0*atan(1e0)) dimension I_AndPos(M,2), I_Passo(2), Prob(0:Nmax*Nmax) open(10, file="saida-d-10407962") I_AndPos = 0 Prob = 0 do j = 1, N S = 0 do i = 1, M i_rand = 4*rand() arg = i_rand*pi/2 I_Passo = (/cos(arg), sin(arg)/) I_AndPos(i,:) = I_AndPos(i,:) + I_Passo end do do i = 1, M i_x = abs(I_AndPos(i,1)) i_y = abs(I_AndPos(i,2)) iglob = i_x + i_y*Nmax Prob(iglob) = Prob(iglob) + 1 end do Prob = Prob/M do i = 0, Nmax*Nmax if (Prob(i).ne.0) then S = S - Prob(i)*log(Prob(i)) end if end do write(10,'(I0," ",1F0.3)') j, S end do close(10) end program tarefad
5 . Molten SbF
library( "macroutils2" ) # ====== Read a bin file ====== # Format the path to a test binary file ( filenm <- system.file( "bintest/chat_winCer_GW-D_1kgHa_d298_annual_output.bin", package = "macroutils2", mustWork = TRUE ) ) # Read the binary file tmp1 <- macroReadBin( f = filenm ) # Inspect the table colnames( tmp1 ) dim( tmp1 ) # ====== Aggregate the results ===== # Mean by year and month (only top results shown): head( r1 <- macroAggregateBin( x = tmp1, by = "%Y-%m", FUN = mean ) ) # Mean by month too, but on one column only # (only top results shown): head( r2 <- macroAggregateBin( x = tmp1, columns = "CCET", by = "%Y-%m", FUN = mean ) ) # Mean by week of year (00 -> 53): r3 <- macroAggregateBin( x = tmp1, by = "%Y-%W", FUN = mean ) # Inspect the results head( r3 ) tail( r3 ) # Notice the new format of the column 'Date' ("character") class( r1[,"Date"] ) # Trick to convert r1$Date to POSIXct date again, by adding a virtual day: r1[,"Date"] <- as.POSIXct( paste( sep = "", r1[,"Date"], "-15" ), format = "%Y-%m-%d", tz = "GMT" ) class( r1[,"Date"] ) # Plot the results plot( r1[,2] ~ r1[,"Date"], type = "b", col = "red" ) # ====== using the original R aggregate() ===== # More code, but a bit faster r1b <- aggregate( x = tmp1[,-1], by = list( "Date" = format.POSIXct( tmp1[,"Date"], "%Y-%m" ) ), FUN = mean ) head( r1b ) identical( r1[,-1], r1b[,-1] )
\section{Resolution Effects} \label{sec:res} Figure demonstrating imprint SFR=0 leave on the observable space and how we deal with them so we can ignore them...
Application: Motors, Devices, Printers, Sensors, CD/DVD ROM and so on. Offering new and cost-effective solutions for design and manufacture with availabilities of typical toughness of plastic, diverse and complex geometry, preferred magnetizing pattern, as well as more assembling methods. Extensive choice of property range 1.0—11.5 MGOe. Much better resistance of corrosion than sintered NdFeB magnet. Minimizing figure and weight of application due to its comparatively stronger magnetic property and lower density. Anisotropic Bonded Magnets Compression NdFeB magnet is extensively used in generator, motor, meter, miniaturized sensor and other applications where requiring advantages over sintered one and ferrite magnet. With limitation of binder and high filling ratio of NdFeB alloy powder, adhesive bonding or additional molding are the proper ways to assemble compression NdFeB magnet together with other component. The compression process is usually recommended to mold magnet with simple geometry such as disc, ring, block and cylinder. Segment and other irregular shapes with bores, slot or chamfer are also available by direct compaction process. The dimensional tolerance of compression bonded NdFeB magnet in X-Y plane is economically controlled within +/-0.05mm. Closer tolerance on critical dimension is also achievable and should be negotiated.
#define DEBUG 1 /** * File : E.cpp * Author : Kazune Takahashi * Created : 11/22/2019, 4:12:54 PM * Powered by Visual Studio Code */ #include <iostream> #include <iomanip> #include <algorithm> #include <vector> #include <string> #include <complex> #include <tuple> #include <queue> #include <stack> #include <map> #include <set> #include <unordered_map> #include <unordered_set> #include <bitset> #include <functional> #include <random> #include <chrono> #include <cctype> #include <cassert> #include <cmath> #include <cstdio> #include <cstdlib> // ----- boost ----- #include <boost/rational.hpp> // ----- using directives and manipulations ----- using boost::rational; using namespace std; using ll = long long; // ----- constexpr for Mint and Combination ----- constexpr ll MOD{1000000007LL}; // constexpr ll MOD{998244353LL}; // be careful constexpr ll MAX_SIZE{3000010LL}; // constexpr ll MAX_SIZE{3000010LL}; // if 10^7 is needed // ----- ch_max and ch_min ----- template <typename T> void ch_max(T &left, T right) { if (left < right) { left = right; } } template <typename T> void ch_min(T &left, T right) { if (left > right) { left = right; } } // ----- Mint ----- template <ll MOD = MOD> class Mint { public: ll x; Mint() : x{0LL} {} Mint(ll x) : x{x % MOD} {} Mint operator-() const { return x ? MOD - x : 0; } Mint &operator+=(const Mint &a) { if ((x += a.x) >= MOD) { x -= MOD; } return *this; } Mint &operator-=(const Mint &a) { return *this += -a; } Mint &operator*=(const Mint &a) { (x *= a.x) %= MOD; return *this; } Mint &operator/=(const Mint &a) { Mint b{a}; return *this *= b.power(MOD - 2); } Mint operator+(const Mint &a) const { return Mint(*this) += a; } Mint operator-(const Mint &a) const { return Mint(*this) -= a; } Mint operator*(const Mint &a) const { return Mint(*this) *= a; } Mint operator/(const Mint &a) const { return Mint(*this) /= a; } bool operator<(const Mint &a) const { return x < a.x; } bool operator==(const Mint &a) const { return x == a.x; } const Mint power(ll N) { if (N == 0) { return 1; } else if (N % 2 == 1) { return *this * power(N - 1); } else { Mint half = power(N / 2); return half * half; } } }; template <ll MOD> Mint<MOD> operator+(ll lhs, const Mint<MOD> &rhs) { return rhs + lhs; } template <ll MOD> Mint<MOD> operator-(ll lhs, const Mint<MOD> &rhs) { return -rhs + lhs; } template <ll MOD> Mint<MOD> operator*(ll lhs, const Mint<MOD> &rhs) { return rhs * lhs; } template <ll MOD> Mint<MOD> operator/(ll lhs, const Mint<MOD> &rhs) { return Mint<MOD>{lhs} / rhs; } template <ll MOD> istream &operator>>(istream &stream, Mint<MOD> &a) { return stream >> a.x; } template <ll MOD> ostream &operator<<(ostream &stream, const Mint<MOD> &a) { return stream << a.x; } // ----- Combination ----- template <ll MOD = MOD, ll MAX_SIZE = MAX_SIZE> class Combination { public: vector<Mint<MOD>> inv, fact, factinv; Combination() : inv(MAX_SIZE), fact(MAX_SIZE), factinv(MAX_SIZE) { inv[1] = 1; for (auto i = 2LL; i < MAX_SIZE; i++) { inv[i] = (-inv[MOD % i]) * (MOD / i); } fact[0] = factinv[0] = 1; for (auto i = 1LL; i < MAX_SIZE; i++) { fact[i] = Mint<MOD>(i) * fact[i - 1]; factinv[i] = inv[i] * factinv[i - 1]; } } Mint<MOD> operator()(int n, int k) { if (n >= 0 && k >= 0 && n - k >= 0) { return fact[n] * factinv[k] * factinv[n - k]; } return 0; } Mint<MOD> catalan(int x, int y) { return (*this)(x + y, y) - (*this)(x + y, y - 1); } }; // ----- for C++14 ----- using mint = Mint<MOD>; using combination = Combination<MOD, MAX_SIZE>; ll gcd(ll x, ll y) { return y ? gcd(y, x % y) : x; } // ----- frequently used constexpr ----- // constexpr double epsilon{1e-10}; // constexpr ll infty{1000000000000000LL}; // constexpr int dx[4] = {1, 0, -1, 0}; // constexpr int dy[4] = {0, 1, 0, -1}; // ----- Yes() and No() ----- void Yes() { cout << "Yes" << endl; exit(0); } void No() { cout << "No" << endl; exit(0); } // ----- main() ----- int N; vector<string> S; ll dp[40][40][40][40]; ll dp2[40][40][40][40]; bool connectable(int i, int j) { return S[i][j] == '1'; } ll calc(int A, int B, int C, int D); ll calc_unit(int A, int B, int C, int D); void flush(); void flush() { ll res{0LL}; if (N >= 2) { for (auto i = 2; i <= 2 * N - 2; i++) { if (connectable(0, i)) { res += calc(1, i - 1, i + 1, 2 * N - 1); } } } else if (connectable(0, 1)) { res = 1; } else { res = 0; } cout << res << endl; } ll calc(int A, int B, int C, int D) { if (dp2[A][B][C][D] >= 0) { return dp2[A][B][C][D]; } if (A == B || C == D) { return dp2[A][B][C][D] = calc_unit(A, B, C, D); } ll res{0LL}; for (auto i = A + 1; i <= B; i++) { for (auto j = C; j <= D - 1; j++) { res += calc(A, i - 1, j + 1, D) * calc_unit(i, B, C, j); } } res += calc_unit(A, B, C, D); return dp2[A][B][C][D] = res; } ll calc_unit(int A, int B, int C, int D) { if (dp[A][B][C][D] >= 0) { return dp[A][B][C][D]; } if (A == B && C == D) { return dp[A][B][C][D] = (connectable(A, C) ? 1 : 0); } ll res{0LL}; if (A == B) { for (auto j = C + 1; j <= D - 1; j++) { if (connectable(A, j)) { res += calc(C, j - 1, j + 1, D); } } } else if (C == D) { for (auto i = A + 1; i <= B - 1; i++) { if (connectable(i, C)) { res += calc(A, i - 1, i + 1, B); } } } else { for (auto i = A + 1; i <= B - 1; i++) { for (auto j = C + 1; j <= D - 1; j++) { if (connectable(i, j)) { res += calc(A, i - 1, i + 1, B) * calc(C, j - 1, j + 1, D); } } } } return dp[A][B][C][D] = res; } int main() { cin >> N; S.resize(2 * N); for (auto i = 0; i < 2 * N; i++) { cin >> S[i]; } fill(&dp[0][0][0][0], &dp[0][0][0][0] + 40 * 40 * 40 * 40, -1); fill(&dp2[0][0][0][0], &dp2[0][0][0][0] + 40 * 40 * 40 * 40, -1); flush(); }
[STATEMENT] lemma widen_asym_1: assumes wfP: "wf_prog wf_md P" shows "P \<turnstile> C \<le> D \<Longrightarrow> C = D \<or> \<not> (P \<turnstile> D \<le> C)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. P \<turnstile> C \<le> D \<Longrightarrow> C = D \<or> \<not> P \<turnstile> D \<le> C [PROOF STEP] proof (erule widen.induct) [PROOF STATE] proof (state) goal (6 subgoals): 1. \<And>T. T = T \<or> \<not> P \<turnstile> T \<le> T 2. \<And>C D. P \<turnstile> C \<preceq>\<^sup>* D \<Longrightarrow> Class C = Class D \<or> \<not> P \<turnstile> Class D \<le> Class C 3. \<And>C. NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT 4. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 5. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 6. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] fix T [PROOF STATE] proof (state) goal (6 subgoals): 1. \<And>T. T = T \<or> \<not> P \<turnstile> T \<le> T 2. \<And>C D. P \<turnstile> C \<preceq>\<^sup>* D \<Longrightarrow> Class C = Class D \<or> \<not> P \<turnstile> Class D \<le> Class C 3. \<And>C. NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT 4. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 5. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 6. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] show "T = T \<or> \<not> (P \<turnstile> T \<le> T)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. T = T \<or> \<not> P \<turnstile> T \<le> T [PROOF STEP] by simp [PROOF STATE] proof (state) this: T = T \<or> \<not> P \<turnstile> T \<le> T goal (5 subgoals): 1. \<And>C D. P \<turnstile> C \<preceq>\<^sup>* D \<Longrightarrow> Class C = Class D \<or> \<not> P \<turnstile> Class D \<le> Class C 2. \<And>C. NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT 3. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 4. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 5. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] next [PROOF STATE] proof (state) goal (5 subgoals): 1. \<And>C D. P \<turnstile> C \<preceq>\<^sup>* D \<Longrightarrow> Class C = Class D \<or> \<not> P \<turnstile> Class D \<le> Class C 2. \<And>C. NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT 3. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 4. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 5. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] fix C D [PROOF STATE] proof (state) goal (5 subgoals): 1. \<And>C D. P \<turnstile> C \<preceq>\<^sup>* D \<Longrightarrow> Class C = Class D \<or> \<not> P \<turnstile> Class D \<le> Class C 2. \<And>C. NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT 3. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 4. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 5. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] assume CscD: "P \<turnstile> C \<preceq>\<^sup>* D" [PROOF STATE] proof (state) this: P \<turnstile> C \<preceq>\<^sup>* D goal (5 subgoals): 1. \<And>C D. P \<turnstile> C \<preceq>\<^sup>* D \<Longrightarrow> Class C = Class D \<or> \<not> P \<turnstile> Class D \<le> Class C 2. \<And>C. NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT 3. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 4. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 5. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] then [PROOF STATE] proof (chain) picking this: P \<turnstile> C \<preceq>\<^sup>* D [PROOF STEP] have CpscD: "C = D \<or> (C \<noteq> D \<and> (subcls1 P)\<^sup>+\<^sup>+ C D)" [PROOF STATE] proof (prove) using this: P \<turnstile> C \<preceq>\<^sup>* D goal (1 subgoal): 1. C = D \<or> C \<noteq> D \<and> (subcls1 P)\<^sup>+\<^sup>+ C D [PROOF STEP] by (simp add: rtranclpD) [PROOF STATE] proof (state) this: C = D \<or> C \<noteq> D \<and> (subcls1 P)\<^sup>+\<^sup>+ C D goal (5 subgoals): 1. \<And>C D. P \<turnstile> C \<preceq>\<^sup>* D \<Longrightarrow> Class C = Class D \<or> \<not> P \<turnstile> Class D \<le> Class C 2. \<And>C. NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT 3. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 4. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 5. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] { [PROOF STATE] proof (state) this: C = D \<or> C \<noteq> D \<and> (subcls1 P)\<^sup>+\<^sup>+ C D goal (5 subgoals): 1. \<And>C D. P \<turnstile> C \<preceq>\<^sup>* D \<Longrightarrow> Class C = Class D \<or> \<not> P \<turnstile> Class D \<le> Class C 2. \<And>C. NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT 3. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 4. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 5. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] assume "P \<turnstile> D \<preceq>\<^sup>* C" [PROOF STATE] proof (state) this: P \<turnstile> D \<preceq>\<^sup>* C goal (5 subgoals): 1. \<And>C D. P \<turnstile> C \<preceq>\<^sup>* D \<Longrightarrow> Class C = Class D \<or> \<not> P \<turnstile> Class D \<le> Class C 2. \<And>C. NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT 3. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 4. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 5. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] then [PROOF STATE] proof (chain) picking this: P \<turnstile> D \<preceq>\<^sup>* C [PROOF STEP] have DpscC: "D = C \<or> (D \<noteq> C \<and> (subcls1 P)\<^sup>+\<^sup>+ D C)" [PROOF STATE] proof (prove) using this: P \<turnstile> D \<preceq>\<^sup>* C goal (1 subgoal): 1. D = C \<or> D \<noteq> C \<and> (subcls1 P)\<^sup>+\<^sup>+ D C [PROOF STEP] by (simp add: rtranclpD) [PROOF STATE] proof (state) this: D = C \<or> D \<noteq> C \<and> (subcls1 P)\<^sup>+\<^sup>+ D C goal (5 subgoals): 1. \<And>C D. P \<turnstile> C \<preceq>\<^sup>* D \<Longrightarrow> Class C = Class D \<or> \<not> P \<turnstile> Class D \<le> Class C 2. \<And>C. NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT 3. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 4. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 5. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] { [PROOF STATE] proof (state) this: D = C \<or> D \<noteq> C \<and> (subcls1 P)\<^sup>+\<^sup>+ D C goal (5 subgoals): 1. \<And>C D. P \<turnstile> C \<preceq>\<^sup>* D \<Longrightarrow> Class C = Class D \<or> \<not> P \<turnstile> Class D \<le> Class C 2. \<And>C. NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT 3. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 4. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 5. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] assume "(subcls1 P)\<^sup>+\<^sup>+ D C" [PROOF STATE] proof (state) this: (subcls1 P)\<^sup>+\<^sup>+ D C goal (5 subgoals): 1. \<And>C D. P \<turnstile> C \<preceq>\<^sup>* D \<Longrightarrow> Class C = Class D \<or> \<not> P \<turnstile> Class D \<le> Class C 2. \<And>C. NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT 3. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 4. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 5. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] with wfP [PROOF STATE] proof (chain) picking this: wf_prog wf_md P (subcls1 P)\<^sup>+\<^sup>+ D C [PROOF STEP] have CnscD: "\<not> (subcls1 P)\<^sup>+\<^sup>+ C D" [PROOF STATE] proof (prove) using this: wf_prog wf_md P (subcls1 P)\<^sup>+\<^sup>+ D C goal (1 subgoal): 1. \<not> (subcls1 P)\<^sup>+\<^sup>+ C D [PROOF STEP] by (rule subcls_asym) [PROOF STATE] proof (state) this: \<not> (subcls1 P)\<^sup>+\<^sup>+ C D goal (5 subgoals): 1. \<And>C D. P \<turnstile> C \<preceq>\<^sup>* D \<Longrightarrow> Class C = Class D \<or> \<not> P \<turnstile> Class D \<le> Class C 2. \<And>C. NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT 3. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 4. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 5. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] with CpscD [PROOF STATE] proof (chain) picking this: C = D \<or> C \<noteq> D \<and> (subcls1 P)\<^sup>+\<^sup>+ C D \<not> (subcls1 P)\<^sup>+\<^sup>+ C D [PROOF STEP] have "C = D" [PROOF STATE] proof (prove) using this: C = D \<or> C \<noteq> D \<and> (subcls1 P)\<^sup>+\<^sup>+ C D \<not> (subcls1 P)\<^sup>+\<^sup>+ C D goal (1 subgoal): 1. C = D [PROOF STEP] by simp [PROOF STATE] proof (state) this: C = D goal (5 subgoals): 1. \<And>C D. P \<turnstile> C \<preceq>\<^sup>* D \<Longrightarrow> Class C = Class D \<or> \<not> P \<turnstile> Class D \<le> Class C 2. \<And>C. NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT 3. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 4. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 5. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] } [PROOF STATE] proof (state) this: (subcls1 P)\<^sup>+\<^sup>+ D C \<Longrightarrow> C = D goal (5 subgoals): 1. \<And>C D. P \<turnstile> C \<preceq>\<^sup>* D \<Longrightarrow> Class C = Class D \<or> \<not> P \<turnstile> Class D \<le> Class C 2. \<And>C. NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT 3. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 4. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 5. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] with DpscC [PROOF STATE] proof (chain) picking this: D = C \<or> D \<noteq> C \<and> (subcls1 P)\<^sup>+\<^sup>+ D C (subcls1 P)\<^sup>+\<^sup>+ D C \<Longrightarrow> C = D [PROOF STEP] have "C = D" [PROOF STATE] proof (prove) using this: D = C \<or> D \<noteq> C \<and> (subcls1 P)\<^sup>+\<^sup>+ D C (subcls1 P)\<^sup>+\<^sup>+ D C \<Longrightarrow> C = D goal (1 subgoal): 1. C = D [PROOF STEP] by blast [PROOF STATE] proof (state) this: C = D goal (5 subgoals): 1. \<And>C D. P \<turnstile> C \<preceq>\<^sup>* D \<Longrightarrow> Class C = Class D \<or> \<not> P \<turnstile> Class D \<le> Class C 2. \<And>C. NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT 3. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 4. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 5. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] } [PROOF STATE] proof (state) this: P \<turnstile> D \<preceq>\<^sup>* C \<Longrightarrow> C = D goal (5 subgoals): 1. \<And>C D. P \<turnstile> C \<preceq>\<^sup>* D \<Longrightarrow> Class C = Class D \<or> \<not> P \<turnstile> Class D \<le> Class C 2. \<And>C. NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT 3. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 4. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 5. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] hence "Class C = Class D \<or> \<not> (P \<turnstile> D \<preceq>\<^sup>* C)" [PROOF STATE] proof (prove) using this: P \<turnstile> D \<preceq>\<^sup>* C \<Longrightarrow> C = D goal (1 subgoal): 1. Class C = Class D \<or> \<not> P \<turnstile> D \<preceq>\<^sup>* C [PROOF STEP] by blast [PROOF STATE] proof (state) this: Class C = Class D \<or> \<not> P \<turnstile> D \<preceq>\<^sup>* C goal (5 subgoals): 1. \<And>C D. P \<turnstile> C \<preceq>\<^sup>* D \<Longrightarrow> Class C = Class D \<or> \<not> P \<turnstile> Class D \<le> Class C 2. \<And>C. NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT 3. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 4. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 5. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] thus "Class C = Class D \<or> \<not> P \<turnstile> Class D \<le> Class C" [PROOF STATE] proof (prove) using this: Class C = Class D \<or> \<not> P \<turnstile> D \<preceq>\<^sup>* C goal (1 subgoal): 1. Class C = Class D \<or> \<not> P \<turnstile> Class D \<le> Class C [PROOF STEP] by simp [PROOF STATE] proof (state) this: Class C = Class D \<or> \<not> P \<turnstile> Class D \<le> Class C goal (4 subgoals): 1. \<And>C. NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT 2. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 3. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 4. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] next [PROOF STATE] proof (state) goal (4 subgoals): 1. \<And>C. NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT 2. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 3. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 4. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] fix C [PROOF STATE] proof (state) goal (4 subgoals): 1. \<And>C. NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT 2. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 3. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 4. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] show "NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT" [PROOF STATE] proof (prove) goal (1 subgoal): 1. NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT [PROOF STEP] by simp [PROOF STATE] proof (state) this: NT = Class C \<or> \<not> P \<turnstile> Class C \<le> NT goal (3 subgoals): 1. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 2. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 3. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] next [PROOF STATE] proof (state) goal (3 subgoals): 1. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 2. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 3. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] fix A [PROOF STATE] proof (state) goal (3 subgoals): 1. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 2. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 3. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] { [PROOF STATE] proof (state) goal (3 subgoals): 1. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 2. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 3. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] assume "P \<turnstile> A\<lfloor>\<rceil> \<le> NT" [PROOF STATE] proof (state) this: P \<turnstile> A\<lfloor>\<rceil> \<le> NT goal (3 subgoals): 1. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 2. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 3. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] hence "A\<lfloor>\<rceil> = NT" [PROOF STATE] proof (prove) using this: P \<turnstile> A\<lfloor>\<rceil> \<le> NT goal (1 subgoal): 1. A\<lfloor>\<rceil> = NT [PROOF STEP] by fastforce [PROOF STATE] proof (state) this: A\<lfloor>\<rceil> = NT goal (3 subgoals): 1. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 2. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 3. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] hence "False" [PROOF STATE] proof (prove) using this: A\<lfloor>\<rceil> = NT goal (1 subgoal): 1. False [PROOF STEP] by simp [PROOF STATE] proof (state) this: False goal (3 subgoals): 1. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 2. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 3. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] } [PROOF STATE] proof (state) this: P \<turnstile> A\<lfloor>\<rceil> \<le> NT \<Longrightarrow> False goal (3 subgoals): 1. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 2. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 3. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] hence "\<not> (P \<turnstile> A\<lfloor>\<rceil> \<le> NT)" [PROOF STATE] proof (prove) using this: P \<turnstile> A\<lfloor>\<rceil> \<le> NT \<Longrightarrow> False goal (1 subgoal): 1. \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT [PROOF STEP] by blast [PROOF STATE] proof (state) this: \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT goal (3 subgoals): 1. \<And>A. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT 2. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 3. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] thus "NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT" [PROOF STATE] proof (prove) using this: \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT goal (1 subgoal): 1. NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT [PROOF STEP] by simp [PROOF STATE] proof (state) this: NT = A\<lfloor>\<rceil> \<or> \<not> P \<turnstile> A\<lfloor>\<rceil> \<le> NT goal (2 subgoals): 1. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 2. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] next [PROOF STATE] proof (state) goal (2 subgoals): 1. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 2. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] fix A [PROOF STATE] proof (state) goal (2 subgoals): 1. \<And>A. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> 2. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] show "A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil>" [PROOF STATE] proof (prove) goal (1 subgoal): 1. A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> [PROOF STEP] by(auto dest: Object_widen) [PROOF STATE] proof (state) this: A\<lfloor>\<rceil> = Class Object \<or> \<not> P \<turnstile> Class Object \<le> A\<lfloor>\<rceil> goal (1 subgoal): 1. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] fix A B [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] assume AsU: "P \<turnstile> A \<le> B" and BnpscA: "A = B \<or> \<not> P \<turnstile> B \<le> A" [PROOF STATE] proof (state) this: P \<turnstile> A \<le> B A = B \<or> \<not> P \<turnstile> B \<le> A goal (1 subgoal): 1. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] { [PROOF STATE] proof (state) this: P \<turnstile> A \<le> B A = B \<or> \<not> P \<turnstile> B \<le> A goal (1 subgoal): 1. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] assume "P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil>" [PROOF STATE] proof (state) this: P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> goal (1 subgoal): 1. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] hence "P \<turnstile> B \<le> A" [PROOF STATE] proof (prove) using this: P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> goal (1 subgoal): 1. P \<turnstile> B \<le> A [PROOF STEP] by (auto dest: Array_Array_widen) [PROOF STATE] proof (state) this: P \<turnstile> B \<le> A goal (1 subgoal): 1. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] with BnpscA [PROOF STATE] proof (chain) picking this: A = B \<or> \<not> P \<turnstile> B \<le> A P \<turnstile> B \<le> A [PROOF STEP] have "A = B" [PROOF STATE] proof (prove) using this: A = B \<or> \<not> P \<turnstile> B \<le> A P \<turnstile> B \<le> A goal (1 subgoal): 1. A = B [PROOF STEP] by blast [PROOF STATE] proof (state) this: A = B goal (1 subgoal): 1. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] hence "A\<lfloor>\<rceil> = B\<lfloor>\<rceil>" [PROOF STATE] proof (prove) using this: A = B goal (1 subgoal): 1. A\<lfloor>\<rceil> = B\<lfloor>\<rceil> [PROOF STEP] by simp [PROOF STATE] proof (state) this: A\<lfloor>\<rceil> = B\<lfloor>\<rceil> goal (1 subgoal): 1. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] } [PROOF STATE] proof (state) this: P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> goal (1 subgoal): 1. \<And>A B. \<lbrakk>P \<turnstile> A \<le> B; A = B \<or> \<not> P \<turnstile> B \<le> A\<rbrakk> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] thus "A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil>" [PROOF STATE] proof (prove) using this: P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> \<Longrightarrow> A\<lfloor>\<rceil> = B\<lfloor>\<rceil> goal (1 subgoal): 1. A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> [PROOF STEP] by blast [PROOF STATE] proof (state) this: A\<lfloor>\<rceil> = B\<lfloor>\<rceil> \<or> \<not> P \<turnstile> B\<lfloor>\<rceil> \<le> A\<lfloor>\<rceil> goal: No subgoals! [PROOF STEP] qed
Require Import List. Import ListNotations. Require Import Nat. Fixpoint myNth {X : Type} (n : nat) (ls : list X) : option X := match ls with | [] => None | x::xs => match n with | 0 => None | (S m) => if eqb m 0 then Some x else myNth m xs end end. Theorem emptyList {X : Type} (n : nat) : myNth n ([] : list X) = None. Proof. destruct n; auto. Qed. Theorem zeroNth {X : Type} (ls : list X) : myNth 0 ls = None. Proof. destruct ls; auto. Qed. Theorem leLtN {X : Type} (ls : list X) (n : nat) : length ls < n -> myNth n ls = None. Proof. generalize dependent n. induction ls; intros. + destruct n; auto. + destruct n. - inversion H. - destruct n. * inversion H. inversion H1. * assert (myNth (S (S n)) (a::ls) = myNth (S n) ls). { clear; reflexivity. } rewrite H0. clear H0. apply IHls. simpl in H. intuition. Qed. Theorem myNthCorrect {X : Type}: forall (ls xs ys : list X) n x, length (xs ++ [x]) = n -> ls = xs ++ x :: ys -> myNth n ls = Some x. Proof. intro. induction ls; intros. + destruct xs; inversion H0. + destruct n. - destruct xs. * inversion H. * inversion H. - destruct n. * destruct xs. ++ inversion H0. auto. ++ destruct xs. -- inversion H. -- inversion H. * destruct xs. ++ inversion H. ++ inversion H0. assert (myNth (S (S n)) (x0 :: xs ++ x :: ys) = myNth (S n) (xs ++ x :: ys)). { clear. reflexivity. } rewrite H1. clear H1. rewrite <- H3. eapply IHls. -- simpl in H. apply eq_add_S in H. exact H. -- exact H3. Qed.
\section{Theory} \label{sec:theory} The following is a short summary of the used equations, which were adopted from chapter 3 of the course book~\cite{Bonet2008}. Unless stated otherwise, the uppercase letters refer to the initial (reference) configuration of the problem, while the lowercase ones refer to the current configuration. The boldface letters denote vectors, matrices and tensors. For a given load, solving the problem implies finding a configuration that simultaneously satisfies both the global (system) equilibrium equations and the constitutive equations. The equilibrium equations are expressed in terms of the residual (out-of-balance) vector \(\bm{R} (\bm{x})\) as the balance between internal and external forces: \begin{equation} \bm{R} (\bm{x}) = \bm{T} (\bm{x}) - \bm{F} = \bm{0}, \end{equation} where \(\bm{x} =\left[ \bm{x}_{1}, \bm{x}_{2}, \cdots, \bm{x}_{N} \right]^{\text{T}}\) is the vector of current nodal positions; \(\bm{T} =\left[ \bm{T}_{1}, \bm{T}_{2}, \cdots, \bm{T}_{N} \right]^{\text{T}}\) is the vector of internal nodal forces; \(\bm{F} =\left[ \bm{F}_{1}, \bm{F}_{2}, \cdots, \bm{F}_{N} \right]^{\text{T}}\) is the vector of external nodal forces, where it is assumed to be independent of the current nodal positions \(x\) (generally this is not true); and \(N\) is the number of nodes. \subsection{Hyperelasticity} \label{sec:hyperelasticity} In the case of hyperelastic material behaviour of a rod, i.e.\ material whose strain energy per unit volume \(V\) does not depend on the path taken by the rod as it moved from initial length \(L\) to the current length \(l\), the internal truss forces \(\bm{T}_{a}\) and \(\bm{T}_{b}\) can be computed as \begin{equation} \bm{T}_{b} = \frac{V E}{l} \ln \left( \frac{l}{L} \right) \bm{n} = \tau \frac{V}{l} \bm{n}, \quad \bm{T}_{a} = - \bm{T}_{b}. \end{equation} Here, a Young's modulus like constant \(E\) has been used to relate Kirchhoff stress \(\tau = E \varepsilon = \sigma v / V\) to logarithmic strain \(\varepsilon = \ln(l/L)\). Finding equilibrium position is carried out using Newton-Raphson method, which involves linearisation of the equilibrium equations. The linearisation yields the directional derivative \(D \bm{T}^{(e)} (\bm{x}^{(e)}) [\bm{u}^{(e)}]\), which gives the expression for the tangent stiffness matrix: \begin{equation} D \bm{T}^{(e)} (\bm{x}^{(e)}) [\bm{u}^{(e)}] = \bm{K}^{(e)} \bm{u}^{(e)} = \begin{bmatrix} \bm{K}_{aa}^{(e)} & \bm{K}_{ab}^{(e)} \\ \bm{K}_{ba}^{(e)} & \bm{K}_{bb}^{(e)} \end{bmatrix} \begin{bmatrix} \bm{u}_{a} \\ \bm{u}_{b} \end{bmatrix} \end{equation} with \begin{align} \bm{K}_{aa}^{(e)} &= \bm{K}_{bb}^{(e)} =\left( \frac{V}{v} \frac{d \tau}{d \varepsilon} \frac{a}{l} - \frac{2 \sigma a}{l} \right) \bm{n} \otimes \bm{n} + \frac{\sigma a}{l} \bm{I} \\ \bm{K}_{ab}^{(e)} &= \bm{K}_{ba}^{(e)} = - \bm{K}_{aa}^{(e)}, \end{align} where \(d \tau / d \varepsilon = E\) is the elastic material tangent modulus. \subsection{Rate-independent plasticity} \label{sec:plasticity} In the case of rate-independent finite strain plasticity with isotropic hardening, the stress is defined via elastic strain \(\varepsilon_{e}\): \begin{align} \tau &= E \varepsilon_{e} = E \left( \varepsilon - \varepsilon_{p} \right) \\ \varepsilon_{p} &= \int_{0}^{t} \dot{\varepsilon}_{p} dt \label{eq:plastic-strain} \end{align} The onset of plastic deformation is governed by the yield condition, which for the problem at issue is \begin{equation} f (\tau, \bar{\varepsilon}_{p}) = \left| \tau \right| - \left( \tau_{y}^{0} + H \bar{\varepsilon}_{p} \right) \leq 0, \quad \bar{\varepsilon}_{p} \geq 0, \end{equation} where \(\tau_{y}^{0}\) is the initial yield stress, \(\bar{\varepsilon}_{p}\) is the hardening parameter and \(H\) is a material property called plastic modulus. At its simplest, the hardening parameter is defined as the accumulated absolute plastic strain occurring over time: \begin{equation} \bar{\varepsilon}_{p} = \int_{0}^{t} \dot{\bar{\varepsilon}}_{p} dt, \quad \dot{\bar{\varepsilon}}_{p} = \left| \dot{\varepsilon}_{p} \right|, \quad \dot{\varepsilon}_{p} = \dot{\gamma} \frac{\partial f}{\partial \tau}, \end{equation} where \(\dot{\gamma}\) is plastic multiplier. In a computational setting the time integration of \eqref{eq:plastic-strain} can only be performed approximately from a finite sequence of values determined at different time steps. In order to satisfy the yield condition exactly at each incremental time step, a return-mapping algorithm described in figure~\ref{fig:return-mapping} is used, which employs incremental kinematics (see figure~\ref{fig:incr-kinematics}). \begin{figure}[th] \begin{subfigure}[t]{0.38\textwidth} \centering \includegraphics[width=\textwidth]{return_mapping.png} \caption{Return-mapping algorithm.} \label{fig:return-mapping} \end{subfigure} ~ \begin{subfigure}[t]{0.56\textwidth} \centering \includegraphics[width=\textwidth]{incremental_kinematics.png} \caption{Incremental kinematics. \(\lambda = l / L\).} \label{fig:incr-kinematics} \end{subfigure} \caption{Two figures from \cite{Bonet2008}.} \end{figure} The last matter to be addressed in the presence of plasticity is the material tangent modulus \(d \tau / d \varepsilon\). The tangent modulus derived from incremental considerations is generally not the same as the one obtained from the rate equations. The reason is that the incremental change in stress imposed by the chosen return-mapping algorithm is different from the continuous change in stress stemming from the rate equations. That is why the algorithmic tangent modulus is used: \begin{equation} \frac{d \tau_{n+1}}{d \varepsilon_{n+1}} = \frac{E H}{E + H} \end{equation} When plasticity occurs, this replaces the elastic tangent stiffness \(d \tau / d \varepsilon = E \). %%% Local Variables: %%% mode: latex %%% TeX-master: "../main" %%% End:
Require Import rt.util.all. Require Import rt.model.arrival.basic.task rt.model.arrival.basic.job rt.model.priority rt.model.arrival.basic.task_arrival. Require Import rt.model.schedule.global.basic.schedule. Require Import rt.model.schedule.apa.affinity rt.model.schedule.apa.interference rt.model.schedule.apa.platform. From mathcomp Require Import ssreflect ssrbool ssrfun eqtype ssrnat seq fintype bigop. Module InterferenceEDF. Import Schedule Priority Platform Interference Priority Affinity. Section Lemmas. Context {sporadic_task: eqType}. Context {Job: eqType}. Variable job_arrival: Job -> time. Variable job_cost: Job -> time. Variable job_deadline: Job -> time. Variable job_task: Job -> sporadic_task. (* Assume any job arrival sequence... *) Variable arr_seq: arrival_sequence Job. (* Consider any schedule. *) Variable num_cpus: nat. Variable sched: schedule Job num_cpus. (* Assume that every job at any time has a processor affinity alpha. *) Variable alpha: task_affinity sporadic_task num_cpus. (* Assume that the schedule satisfies the global scheduling invariant for EDF, i.e., if any job of tsk is backlogged, every processor must be busy with jobs with no larger absolute deadline. *) Hypothesis H_scheduler_uses_EDF: respects_JLFP_policy_under_weak_APA job_arrival job_cost job_task arr_seq sched alpha (EDF job_arrival job_deadline). (* Under EDF scheduling, a job only causes interference if its deadline is not larger than the deadline of the analyzed job. *) Lemma interference_under_edf_implies_shorter_deadlines : forall j j' t1 t2, arrives_in arr_seq j' -> job_interference job_arrival job_cost job_task sched alpha j' j t1 t2 != 0 -> job_arrival j + job_deadline j <= job_arrival j' + job_deadline j'. Proof. rename H_scheduler_uses_EDF into PRIO. unfold respects_JLDP_policy_under_weak_APA in *. intros j j' t1 t2 ARR' INTERF. unfold job_interference in INTERF. destruct ([exists t': 'I_t2, [exists cpu: processor num_cpus, (t' >= t1) && backlogged job_arrival job_cost sched j' t' && can_execute_on alpha (job_task j') cpu && scheduled_on sched j cpu t']]) eqn:EX. { move: EX => /existsP [t' /existsP [cpu /andP [/andP [/andP [LE BACK] CAN] SCHED]]]. by specialize (PRIO j' j cpu t' ARR' BACK SCHED CAN). } { apply negbT in EX; rewrite negb_exists in EX; move: EX => /forallP ALL. rewrite big_nat_cond (eq_bigr (fun x => 0)) in INTERF; first by rewrite -big_nat_cond big_const_nat iter_addn mul0n addn0 eq_refl in INTERF. move => i /andP [/andP [GEi LTi] _]. specialize (ALL (Ordinal LTi)). rewrite negb_exists in ALL. move: ALL => /forallP ALL. rewrite (eq_bigr (fun x => 0)); first by rewrite big_const_ord iter_addn mul0n addn0. intros cpu _; specialize (ALL cpu); simpl in ALL. destruct (backlogged job_arrival job_cost sched j' i); last by rewrite andFb. rewrite GEi 2!andTb in ALL; rewrite andTb. by apply negbTE in ALL; rewrite ALL. } Qed. End Lemmas. End InterferenceEDF.
State Before: α : Type ?u.322539 β : Type ?u.322542 γ : Type ?u.322545 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal ⊢ sup f = lsub f ∨ succ (sup f) = lsub f State After: case inl α : Type ?u.322539 β : Type ?u.322542 γ : Type ?u.322545 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal h : sup f = lsub f ⊢ sup f = lsub f ∨ succ (sup f) = lsub f case inr α : Type ?u.322539 β : Type ?u.322542 γ : Type ?u.322545 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal h : sup f < lsub f ⊢ sup f = lsub f ∨ succ (sup f) = lsub f Tactic: cases' eq_or_lt_of_le (sup_le_lsub.{_, v} f) with h h State Before: case inl α : Type ?u.322539 β : Type ?u.322542 γ : Type ?u.322545 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal h : sup f = lsub f ⊢ sup f = lsub f ∨ succ (sup f) = lsub f State After: no goals Tactic: exact Or.inl h State Before: case inr α : Type ?u.322539 β : Type ?u.322542 γ : Type ?u.322545 r : α → α → Prop s : β → β → Prop t : γ → γ → Prop ι : Type u f : ι → Ordinal h : sup f < lsub f ⊢ sup f = lsub f ∨ succ (sup f) = lsub f State After: no goals Tactic: exact Or.inr ((succ_le_of_lt h).antisymm (lsub_le_sup_succ f))
[STATEMENT] lemma has_vector_derivative_const[simp, derivative_intros]: "((\<lambda>x. c) has_vector_derivative 0) net" [PROOF STATE] proof (prove) goal (1 subgoal): 1. ((\<lambda>x. c) has_vector_derivative (0::'a)) net [PROOF STEP] by (auto simp: has_vector_derivative_def)
/* Author: Rohan Chetan Thanki Date created: 16-Oct-2021 */ /* This contains the implementation of the Hedging_Portfolio class */ #include "Hedging_Portfolio.hpp" #include <string> #include <boost/math/distributions.hpp> using namespace std; /******************************* Constructors and Destructors ***************************************/ // Default constructor Hedging_Portfolio::Hedging_Portfolio() : Option() { } // Parametrised constructor Hedging_Portfolio::Hedging_Portfolio(const double& K1, const double& S1, const double& r1, const double& T1, const double& vol1, const char& optionType) : Option(K1, S1, r1, T1, vol1, optionType) { if (!(optionType == 'c' || optionType == 'C' || optionType == 'p' || optionType == 'P')) throw(20); } // Destructor Hedging_Portfolio::~Hedging_Portfolio(void) { } /******************************* Getters ***************************************/ double Hedging_Portfolio::getDelta(void) const { return delta; } double Hedging_Portfolio::getOptionPrice(void) const { return optionPrice; } double Hedging_Portfolio::getB(void) const { return B; } double Hedging_Portfolio::getHedgingError(void) const { return HE; } string Hedging_Portfolio::getDate(void) const { return date; } string Hedging_Portfolio::getExpDate(void) const { return expDate; } double Hedging_Portfolio::getpnlNaked(void) const { return pnlNaked; } double Hedging_Portfolio::getpnlHedged(void) const { return pnlHedged; } /******************************* Setters ***************************************/ void Hedging_Portfolio::setDelta(const double& delta1) { delta = delta1; } void Hedging_Portfolio::setOptionPrice(const double& optionPrice1) { optionPrice = optionPrice1; } void Hedging_Portfolio::setB(const double& B1) { B = B1; } void Hedging_Portfolio::setHedgingError(const double& HE1) { HE = HE1; } void Hedging_Portfolio::setDate(const string& date1) { date = date1; } void Hedging_Portfolio::setExpDate(const string& expDate1) { expDate = expDate1; } void Hedging_Portfolio::setpnlNaked(const double& pnlNaked1) { pnlNaked = pnlNaked1; } void Hedging_Portfolio::setpnlHedged(const double& pnlHedged1) { pnlHedged = pnlHedged1; } /******************************* Other Functions ***************************************/ // Get CDF of Standard Normal inline double N(const double& x) { boost::math::normal_distribution<> stdNormal(0.0, 1.0); return(cdf(stdNormal, x)); } double Hedging_Portfolio::computeDelta(void) const { char flag = getFlag(); double K = getStrikePrice(); double S = getSpotPrice(); double r = getRiskFreeRate(); double T = getTimeToMaturity(); double sigma = getVolatility(); double d1 = (log(S / K) + ((r + (pow(sigma, 2) / 2)) * T)) / (sigma * sqrt(T)); if (flag == 'c' || flag == 'C') return (N(d1)); else if (flag == 'p' || flag == 'P') return (N(d1)); else throw(10); } // Compute option price using Black Scholes formula double Hedging_Portfolio::computeBlackScholesOptionPrice(const double& sigma) const { char flag = getFlag(); double K = getStrikePrice(); double S = getSpotPrice(); double r = getRiskFreeRate(); double T = getTimeToMaturity(); Option opt1(K, S, r, T, sigma, flag); double d1 = (log(S / K) + ((r + (pow(sigma, 2) / 2)) * T)) / (sigma * sqrt(T)); double d2 = d1 - (sigma * sqrt(T)); if (flag == 'c' || flag == 'C') return (S * N(d1)) - (K * exp(-r * T) * N(d2)); else if (flag == 'p' || flag == 'P') return (K * exp(-r * T) * N(-d2)) - (S * N(-d1)); else throw(10); } //double getImpliedVol(const double&, const double&, const double&, double Hedging_Portfolio:: computeImpliedVol(double& volMax) const { double flag = getFlag(); double volMin = 0; double impVol; double TMat = getTimeToMaturity(); double epsilon = 0.001; double modelPrice; string temp; while (true) { impVol = 0.5 * (volMin + volMax); modelPrice = computeBlackScholesOptionPrice(impVol); if (modelPrice > optionPrice + epsilon) volMax = impVol; else if (modelPrice < optionPrice - epsilon) volMin = impVol; else return(impVol); } }
module ClenshawCurtisBessel using LinearAlgebra, BandedMatrices using SparseArrays, StaticArrays, OffsetArrays import HypergeometricFunctions import ArbNumerics, DoubleFloats, SpecialFunctions include("specialfunctions.jl") # define i.e. bessel J for high precision types include("oliver.jl") # transform recurrence into a boundary value problem include("piessensbranders.jl") # compute modified moments export momentM end
/* * Copyright (c) 2011-2012 by Michael Berlin, Zuse Institute Berlin * * Licensed under the BSD License, see LICENSE file for details. * */ #include <boost/scoped_ptr.hpp> #include <iostream> #include <string> #include "libxtreemfs/client.h" #include "libxtreemfs/file_handle.h" #include "libxtreemfs/helper.h" #include "libxtreemfs/system_user_mapping.h" #include "libxtreemfs/user_mapping.h" #include "libxtreemfs/volume.h" #include "libxtreemfs/xtreemfs_exception.h" #include "mkfs.xtreemfs/mkfs_options.h" #include "util/logging.h" using namespace std; using namespace xtreemfs; using namespace xtreemfs::pbrpc; using namespace xtreemfs::util; int main(int argc, char* argv[]) { // Parse command line options. MkfsOptions options; bool invalid_commandline_parameters = false; try { options.ParseCommandLine(argc, argv); } catch(const XtreemFSException& e) { cout << "Invalid parameters found, error: " << e.what() << endl << endl; invalid_commandline_parameters = true; } // Display help if needed. if (options.empty_arguments_list || invalid_commandline_parameters) { cout << options.ShowCommandLineUsage() << endl; return 1; } if (options.show_help) { cout << options.ShowCommandLineHelp() << endl; return 1; } // Show only the version. if (options.show_version) { cout << options.ShowVersion("mkfs.xtreemfs") << endl; return 1; } bool success = true; boost::scoped_ptr<SystemUserMapping> system_user_mapping; boost::scoped_ptr<Client> client; try { // Start logging manually (although it would be automatically started by // ClientImplementation()) as its required by UserMapping. initialize_logger(options.log_level_string, options.log_file_path, LEVEL_WARN); // Set user_credentials. system_user_mapping.reset(SystemUserMapping::GetSystemUserMapping()); // Check if the user specified an additional user mapping in options. UserMapping* additional_um = UserMapping::CreateUserMapping( options.additional_user_mapping_type, options); if (additional_um) { system_user_mapping->RegisterAdditionalUserMapping(additional_um); system_user_mapping->StartAdditionalUserMapping(); } // If no owner name or owning group name is specified, the MRC uses the // UserCredentials to set the owner and owning group of the new volume. // See http://code.google.com/p/xtreemfs/issues/detail?id=204. UserCredentials user_credentials; system_user_mapping->GetUserCredentialsForCurrentUser(&user_credentials); if (!options.owner_username.empty()) { user_credentials.set_username(options.owner_username); } else { #ifndef WIN32 // Warn the user if the SystemUserMapping failed to resolve the UID to // a string (e.g. when there is no entry in /etc/passwd). if (CheckIfUnsignedInteger(user_credentials.username())) { if (Logging::log->loggingActive(LEVEL_WARN)) { Logging::log->getLog(LEVEL_WARN) << "Failed to map the UID " << geteuid() << " to a username." " Now the value \"" << options.owner_username << "\" will be set" " as owner of the volume." " Keep in mind that mount.xtreemfs does" " always try to map UIDs to names. If this is not consistent" " over all your systems (the UID does not always get mapped to" " the same name), you may run into permission problems." << endl; } } #endif // !WIN32 } if (user_credentials.username().empty()) { cout << "Error: No name found for the current user\n"; return 1; } if (!options.owner_groupname.empty()) { user_credentials.add_groups(options.owner_groupname); } else { #ifndef WIN32 // Warn the user if the SystemUserMapping failed to resolve the GID to // a string (e.g. when there is no entry in /etc/group). if (CheckIfUnsignedInteger(user_credentials.groups(0))) { if (Logging::log->loggingActive(LEVEL_WARN)) { Logging::log->getLog(LEVEL_WARN) << "Failed to map the GID " << getegid() << " to a group name." " Now the value \"" << options.owner_groupname << "\" will be set" " as owning group of the volume." " Keep in mind that mount.xtreemfs does" " always try to map GIDs to names. If this is not consistent over" " all your systems (the GID does not always get mapped to the" " same group name), you may run into permission problems." << endl; } } #endif // !WIN32 } if (user_credentials.groups(0).empty()) { cout << "Error: No name found for the primary group of the current user" "\n"; return 1; } long quota = parseByteNumber(options.volume_quota); if (quota == -1) { cout << "Error: " << options.volume_quota << " is not a valid quota.\n"; return 1; } if (quota < 0) { cout << "Error: Quota has to be greater or equal zero \n"; return 1; } Auth auth; if (options.admin_password.empty()) { auth.set_auth_type(AUTH_NONE); } else { auth.set_auth_type(AUTH_PASSWORD); auth.mutable_auth_passwd()->set_password(options.admin_password); } // Repeat the used options. cout << "Trying to create the volume: " << options.xtreemfs_url << "\n" << "\n" << "Using options:\n"; if (!options.owner_username.empty()) { cout << " Owner:\t\t\t" << options.owner_username << "\n"; } else { if (!options.SSLEnabled()) { // We cannot tell if it's a user certificate - in that case the MRC // ignores the UserCredentials and extracts the owner from the cert. // To be on the safe side, we output the definite owner only in non-SSL // cases. cout << " Owner:\t\t\t" << user_credentials.username() << "\n"; } } if (!options.owner_groupname.empty()) { cout << " Owning group:\t\t\t" << options.owner_groupname << "\n"; } else { if (!options.SSLEnabled()) { // We cannot tell if it's a user certificate - in that case the MRC // ignores the UserCredentials and extracts the owner from the cert. // To be on the safe side, we output the definite owner only in non-SSL // cases. cout << " Owning group:\t\t\t" << user_credentials.groups(0) << "\n"; } } cout << " Mode:\t\t\t\t" << options.volume_mode_octal << "\n" << " Access Control Policy:\t" << options.access_policy_type_string << "\n" << " Quota:\t\t\t" << options.volume_quota << "\n" << "\n" << " Default striping policy:\t\t" << options.default_striping_policy_type_string << "\n" << " Default stripe size (object size):\t" << options.default_stripe_size << "\n" << " Default stripe width (# OSDs):\t" << options.default_stripe_width << "\n" << "\n"; if (options.volume_attributes.size() > 0) { cout << " Volume attributes (Name = Value)" << endl; for (list<KeyValuePair*>::iterator it = options.volume_attributes.begin(); it != options.volume_attributes.end(); ++it) { cout << " " << (*it)->key() << " = " << (*it)->value() << endl; } cout << endl; } // Create a new client and start it. client.reset(Client::CreateClient( "DIR-host-not-required-for-mkfs", // Using a bogus value as DIR address. // NOLINT user_credentials, options.GenerateSSLOptions(), options)); client->Start(); // Create the volume on the MRC. client->CreateVolume(options.mrc_service_address, auth, user_credentials, options.volume_name, options.volume_mode_decimal, options.owner_username, options.owner_groupname, options.access_policy_type, quota, options.default_striping_policy_type, options.default_stripe_size, options.default_stripe_width, options.volume_attributes); } catch (const XtreemFSException& e) { success = false; cout << "Failed to create the volume, error:\n" << "\t" << e.what() << endl; } // Cleanup. if (client) { client->Shutdown(); } system_user_mapping->StopAdditionalUserMapping(); if (success) { cout << "Successfully created volume \"" << options.volume_name << "\" at " "MRC: " << options.xtreemfs_url << endl; return 0; } else { return 1; } }
(* Do not edit this file, it was generated automatically *) Require Import VST.floyd.proofauto. Require Import VST.progs64.revarray. Require Import VST.floyd.sublist. Instance CompSpecs : compspecs. make_compspecs prog. Defined. Definition Vprog : varspecs. mk_varspecs prog. Defined. Definition reverse_spec := DECLARE _reverse WITH a0: val, sh : share, contents : list int, size: Z PRE [ _a OF (tptr tint), _n OF tint ] PROP (0 <= size <= Int.max_signed; writable_share sh) LOCAL (temp _a a0; temp _n (Vint (Int.repr size))) SEP (data_at sh (tarray tint size) (map Vint contents) a0) POST [ tvoid ] PROP() LOCAL() SEP(data_at sh (tarray tint size) (map Vint (rev contents)) a0). Definition main_spec := DECLARE _main WITH gv : globals PRE [] main_pre prog nil gv POST [ tint ] main_post prog nil gv. Definition Gprog : funspecs := ltac:(with_library prog [reverse_spec; main_spec]). Definition flip_ends {A} lo hi (contents: list A) := sublist 0 lo (rev contents) ++ sublist lo hi contents ++ sublist hi (Zlength contents) (rev contents). Definition reverse_Inv a0 sh contents size := (EX j:Z, (PROP (0 <= j; j <= size-j) LOCAL (temp _a a0; temp _lo (Vint (Int.repr j)); temp _hi (Vint (Int.repr (size-j)))) SEP (data_at sh (tarray tint size) (flip_ends j (size-j) contents) a0)))%assert. Lemma Zlength_flip_ends: forall A i j (al: list A), 0 <= i -> i<=j -> j <= Zlength al -> Zlength (flip_ends i j al) = Zlength al. Proof. intros. unfold flip_ends. autorewrite with sublist. omega. Qed. Hint Rewrite @Zlength_flip_ends using (autorewrite with sublist; omega) : sublist. Lemma flip_fact_1: forall A size (contents: list A) j, Zlength contents = size -> 0 <= j -> size - j - 1 <= j <= size - j -> flip_ends j (size - j) contents = rev contents. Proof. intros. unfold flip_ends. rewrite <- (Zlen_le_1_rev (sublist j (size-j) contents)) by (autorewrite with sublist; omega). rewrite !sublist_rev by (autorewrite with sublist; omega). rewrite <- !rev_app_distr, ?H. autorewrite with sublist; auto. Qed. Lemma flip_fact_3: forall A (al: list A) j size, size = Zlength al -> 0 <= j < size - j - 1 -> sublist 0 j (flip_ends j (size - j) al) ++ sublist (size - j - 1) (size - j) al ++ sublist (j + 1) size (sublist 0 (size - j - 1) (flip_ends j (size - j) al) ++ sublist j (j + 1) (flip_ends j (size - j) al) ++ sublist (size - j) size (flip_ends j (size - j) al)) = flip_ends (j + 1) (size - (j + 1)) al. Proof. intros. unfold flip_ends. rewrite <- H. autorewrite with sublist. rewrite (sublist_split 0 j (j+1)) by (autorewrite with sublist; omega). rewrite !app_ass. f_equal. f_equal. rewrite !sublist_rev, <- ?H by omega. rewrite Zlen_le_1_rev by (autorewrite with sublist; omega). f_equal; omega. rewrite (sublist_app2 (size-j) size) by (autorewrite with sublist; omega). autorewrite with sublist. rewrite sublist_app' by (autorewrite with sublist; omega). autorewrite with sublist. f_equal. f_equal; omega. autorewrite with sublist. rewrite <- (Zlen_le_1_rev (sublist j (1+j) al)) by (autorewrite with sublist; omega). rewrite !sublist_rev, <- ?H by omega. rewrite <- !rev_app_distr, <- ?H. autorewrite with sublist. f_equal; f_equal; omega. Qed. Lemma flip_ends_map: forall A B (F: A -> B) lo hi (al: list A), flip_ends lo hi (map F al) = map F (flip_ends lo hi al). Proof. intros. unfold flip_ends. rewrite !map_app. rewrite !map_sublist, !map_rev, Zlength_map. auto. Qed. Lemma flip_fact_2: forall {A}{d: Inhabitant A} (al: list A) size j, Zlength al = size -> j < size - j - 1 -> 0 <= j -> Znth (size - j - 1) al = Znth (size - j - 1) (flip_ends j (size - j) al). Proof. intros. unfold flip_ends. autorewrite with sublist. auto. Qed. Lemma body_reverse: semax_body Vprog Gprog f_reverse reverse_spec. Proof. start_function. forward. (* lo = 0; *) forward. (* hi = n; *) assert_PROP (Zlength (map Vint contents) = size) as ZL by entailer!. forward_while (reverse_Inv a0 sh (map Vint contents) size). * (* Prove that current precondition implies loop invariant *) Exists 0. entailer!. unfold flip_ends; autorewrite with sublist; auto. * (* Prove that loop invariant implies typechecking condition *) entailer!. * (* Prove that loop body preserves invariant *) forward. (* t = a[lo]; *) { entailer!. clear - H0 HRE. autorewrite with sublist in *|-*. rewrite flip_ends_map. rewrite Znth_map by list_solve. apply I. } forward. (* s = a[hi-1]; *) { entailer!. clear - H H0 HRE. autorewrite with sublist in *|-*. rewrite flip_ends_map. rewrite Znth_map by list_solve. apply I. } rewrite <- flip_fact_2 by (rewrite ?Zlength_flip_ends; omega). forward. (* a[hi-1] = t; *) forward. (* a[lo] = s; *) forward. (* lo++; *) forward. (* hi--; *) (* Prove postcondition of loop body implies loop invariant *) Exists (Z.succ j). entailer!. f_equal; f_equal; omega. simpl. apply derives_refl'. unfold data_at. f_equal. clear - H0 HRE H1. unfold Z.succ. rewrite <- flip_fact_3 by auto. rewrite <- (Znth_map (Zlength (map Vint contents)-j-1) Vint) by (autorewrite with sublist in *; list_solve). forget (map Vint contents) as al. clear contents. remember (Zlength al) as size. repeat match goal with |- context [reptype ?t] => change (reptype t) with val end. unfold upd_Znth. rewrite !Znth_cons_sublist by (repeat rewrite Zlength_flip_ends; try omega). rewrite ?Zlength_app, ?Zlength_firstn, ?Z.max_r by omega. rewrite ?Zlength_flip_ends by omega. rewrite ?Zlength_sublist by (rewrite ?Zlength_flip_ends ; omega). unfold Z.succ. rewrite <- Heqsize. autorewrite with sublist. replace (size - j - 1 + (1 + j)) with size by (clear; omega). reflexivity. * (* after the loop *) forward. (* return; *) rewrite map_rev. rewrite flip_fact_1; try omega; auto. cancel. Qed. Definition four_contents := [Int.repr 1; Int.repr 2; Int.repr 3; Int.repr 4]. Lemma body_main: semax_body Vprog Gprog f_main main_spec. Proof. name four _four. start_function. forward_call (* revarray(four,4); *) (gv _four, Ews, four_contents, 4). split; [computable | auto]. forward_call (* revarray(four,4); *) (gv _four,Ews, rev four_contents,4). split; [computable | auto]. rewrite rev_involutive. forward. (* return s; *) Qed. Existing Instance NullExtension.Espec. Lemma prog_correct: semax_prog prog Vprog Gprog. Proof. prove_semax_prog. semax_func_cons body_reverse. semax_func_cons body_main. Qed.
Correspondence between Rowntree and Co, the Board of Trade, A. Hughes and Sons and other companies leasing buildings (see below), reports and estimates of cost of replacement or repair of damage. Concerning capital, policy, monthly returns, sales organisation; with draft Memoranda and Articles of Association. Sections on each of the four companies, with a briefhistory, organisation chart, details of departments, production and markets, etc.
/- Copyright (c) Sébastien Gouëzel. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Sébastien Gouëzel -/ import Mathlib.PrePort import Mathlib.Lean3Lib.init.default import Mathlib.analysis.normed_space.finite_dimension import Mathlib.data.complex.module import Mathlib.data.complex.is_R_or_C import Mathlib.PostPort universes u_1 u_2 namespace Mathlib /-! # Normed space structure on `ℂ`. This file gathers basic facts on complex numbers of an analytic nature. ## Main results This file registers `ℂ` as a normed field, expresses basic properties of the norm, and gives tools on the real vector space structure of `ℂ`. Notably, in the namespace `complex`, it defines functions: * `continuous_linear_map.re` * `continuous_linear_map.im` * `continuous_linear_map.of_real` They are bundled versions of the real part, the imaginary part, and the embedding of `ℝ` in `ℂ`, as continuous `ℝ`-linear maps. We also register the fact that `ℂ` is an `is_R_or_C` field. -/ namespace complex protected instance has_norm : has_norm ℂ := has_norm.mk abs protected instance normed_group : normed_group ℂ := normed_group.of_core ℂ sorry protected instance normed_field : normed_field ℂ := normed_field.mk sorry abs_mul protected instance nondiscrete_normed_field : nondiscrete_normed_field ℂ := nondiscrete_normed_field.mk sorry protected instance normed_algebra_over_reals : normed_algebra ℝ ℂ := normed_algebra.mk abs_of_real @[simp] theorem norm_eq_abs (z : ℂ) : norm z = abs z := rfl theorem dist_eq (z : ℂ) (w : ℂ) : dist z w = abs (z - w) := rfl @[simp] theorem norm_real (r : ℝ) : norm ↑r = norm r := abs_of_real r @[simp] theorem norm_rat (r : ℚ) : norm ↑r = abs ↑r := sorry @[simp] theorem norm_nat (n : ℕ) : norm ↑n = ↑n := abs_of_nat n @[simp] theorem norm_int {n : ℤ} : norm ↑n = abs ↑n := sorry theorem norm_int_of_nonneg {n : ℤ} (hn : 0 ≤ n) : norm ↑n = ↑n := eq.mpr (id (Eq._oldrec (Eq.refl (norm ↑n = ↑n)) norm_int)) (eq.mpr (id (Eq._oldrec (Eq.refl (abs ↑n = ↑n)) (abs_of_nonneg (iff.mpr int.cast_nonneg hn)))) (Eq.refl ↑n)) /-- A complex normed vector space is also a real normed vector space. -/ protected instance normed_space.restrict_scalars_real (E : Type u_1) [normed_group E] [normed_space ℂ E] : normed_space ℝ E := normed_space.restrict_scalars ℝ ℂ E /-- The space of continuous linear maps over `ℝ`, from a real vector space to a complex vector space, is a normed vector space over `ℂ`. -/ protected instance continuous_linear_map.real_smul_complex (E : Type u_1) [normed_group E] [normed_space ℝ E] (F : Type u_2) [normed_group F] [normed_space ℂ F] : normed_space ℂ (continuous_linear_map ℝ E F) := continuous_linear_map.normed_space_extend_scalars /-- Continuous linear map version of the real part function, from `ℂ` to `ℝ`. -/ def continuous_linear_map.re : continuous_linear_map ℝ ℂ ℝ := linear_map.to_continuous_linear_map linear_map.re @[simp] theorem continuous_linear_map.re_coe : ↑continuous_linear_map.re = linear_map.re := rfl @[simp] theorem continuous_linear_map.re_apply (z : ℂ) : coe_fn continuous_linear_map.re z = re z := rfl @[simp] theorem continuous_linear_map.re_norm : norm continuous_linear_map.re = 1 := sorry /-- Continuous linear map version of the real part function, from `ℂ` to `ℝ`. -/ def continuous_linear_map.im : continuous_linear_map ℝ ℂ ℝ := linear_map.to_continuous_linear_map linear_map.im @[simp] theorem continuous_linear_map.im_coe : ↑continuous_linear_map.im = linear_map.im := rfl @[simp] theorem continuous_linear_map.im_apply (z : ℂ) : coe_fn continuous_linear_map.im z = im z := rfl @[simp] theorem continuous_linear_map.im_norm : norm continuous_linear_map.im = 1 := sorry /-- Linear isometry version of the canonical embedding of `ℝ` in `ℂ`. -/ def linear_isometry.of_real : linear_isometry ℝ ℝ ℂ := linear_isometry.mk linear_map.of_real sorry /-- Continuous linear map version of the canonical embedding of `ℝ` in `ℂ`. -/ def continuous_linear_map.of_real : continuous_linear_map ℝ ℝ ℂ := linear_isometry.to_continuous_linear_map linear_isometry.of_real theorem isometry_of_real : isometry coe := linear_isometry.isometry linear_isometry.of_real theorem continuous_of_real : continuous coe := isometry.continuous isometry_of_real @[simp] theorem continuous_linear_map.of_real_coe : ↑continuous_linear_map.of_real = linear_map.of_real := rfl @[simp] theorem continuous_linear_map.of_real_apply (x : ℝ) : coe_fn continuous_linear_map.of_real x = ↑x := rfl @[simp] theorem continuous_linear_map.of_real_norm : norm continuous_linear_map.of_real = 1 := linear_isometry.norm_to_continuous_linear_map linear_isometry.of_real protected instance is_R_or_C : is_R_or_C ℂ := is_R_or_C.mk (add_monoid_hom.mk re zero_re add_re) (add_monoid_hom.mk im zero_im add_im) conj I sorry sorry sorry sorry sorry sorry sorry sorry sorry sorry sorry sorry sorry div_I end complex namespace is_R_or_C @[simp] theorem re_to_complex {x : ℂ} : coe_fn re x = complex.re x := rfl @[simp] theorem im_to_complex {x : ℂ} : coe_fn im x = complex.im x := rfl @[simp] theorem conj_to_complex {x : ℂ} : coe_fn conj x = coe_fn complex.conj x := rfl @[simp] theorem I_to_complex : I = complex.I := rfl @[simp] theorem norm_sq_to_complex {x : ℂ} : coe_fn norm_sq x = coe_fn complex.norm_sq x := sorry @[simp] theorem abs_to_complex {x : ℂ} : abs x = complex.abs x := sorry
#include <iostream> #include <Eigen/Core> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <pcl/common/common_headers.h> #include <pcl/io/pcd_io.h> #include "../../../core/format_helper.h" #include "../../../core/math_helper.h" void SavePointCloud(const cv::Mat& img, const cv::Mat& disp_img, const double* calib) { pcl::PointCloud<pcl::PointXYZRGB>::Ptr point_cloud(new pcl::PointCloud<pcl::PointXYZRGB>); Eigen::Vector4d pt3d; double min_disp = 1.0; for (int y = 0; y < disp_img.rows; y++) { for (int x = 0; x < disp_img.cols; x++) { double disp = static_cast<double>(disp_img.at<uint16_t>(y,x)) / 256.0; //std::cout << "D = " << disp << "\n"; if (disp > min_disp) { core::MathHelper::Triangulate(calib, x, y, disp, pt3d); pcl::PointXYZRGB point; point.x = pt3d[0]; point.y = pt3d[1]; point.z = pt3d[2]; if (img.channels() == 3) { point.b = img.at<cv::Vec3b>(y,x)[0]; point.g = img.at<cv::Vec3b>(y,x)[1]; point.r = img.at<cv::Vec3b>(y,x)[2]; } else if (img.channels() == 1) { point.b = img.at<uint8_t>(y,x); point.g = img.at<uint8_t>(y,x); point.r = img.at<uint8_t>(y,x); } else throw 1; point_cloud->points.push_back(point); } } } point_cloud->width = point_cloud->points.size(); point_cloud->height = 1; pcl::io::savePCDFile("pcl.pcd", *point_cloud); } int main(int argc, char** argv) { if (argc != 4) { std::cerr << "Usage:\n\t" << argv[0] << " img disp_img calib_file\n"; return 1; } std::string img_path = argv[1]; std::string dispimg_path = argv[2]; std::string calib_path = argv[3]; double mono_cam[5]; double color_cam[5]; core::FormatHelper::ReadCalibKitti(calib_path, mono_cam, color_cam); cv::Mat img = cv::imread(img_path, CV_LOAD_IMAGE_COLOR); cv::Mat disp_img = cv::imread(dispimg_path, CV_LOAD_IMAGE_ANYDEPTH); std::cout << "Bytes per pixel = " << disp_img.step / disp_img.cols << "\n"; if (img.channels() == 3) SavePointCloud(img, disp_img, color_cam); else if (img.channels() == 1) SavePointCloud(img, disp_img, mono_cam); else throw 1; return 0; }
%the object be controlled function [sys,x0,str,ts]=ctrl5_2_3obj(t,x,u,flag) switch flag, case 0, [sys,x0,str,ts]=mdlInit(); case 1, sys=mdlDer(t,x,u); case 3, sys=mdlOutput(t,x,u); otherwise, sys=[]; end; function [sys,x0,str,ts]= mdlInit() size=simsizes; size.NumContStates=2; size.NumDiscStates=0; size.NumOutputs=2; size.NumInputs=1; size.DirFeedthrough=0; size.NumSampleTimes=1; sys=simsizes(size); x0=[0]; str=[]; ts=[0,0]; function sys=mdlDer(t,x,u) global b0; a=1; sys(1)=x(2); sys(2)=a*sign(sin(10*t))+u*b0; function sys=mdlOutput(t,x,u) sys(1)=x(1); sys(2)=sign(sin(2*t));
suppressPackageStartupMessages(library(kazaam)) if (comm.rank() == 0){ x = matrix(rnorm(30), 10) svd = svd(x) d = svd$d u = svd$u v = svd$v } else { x = NULL } dx = expand(x) svd_test = svd(dx) d_test = svd_test$d u_test = collapse(svd_test$u) v_test = svd_test$v comm.print(all.equal(d, d_test)) # the direction of the vectors can differ, so we have to account for sign comm.print(all.equal(abs(u), abs(u_test))) comm.print(all.equal(abs(v), abs(v_test))) x = rantshaq(stats::rnorm, 3, 10) v = svd(x)$v vt = La.svd(x)$vt comm.print(all.equal(abs(DATA(v)), abs(DATA(t(vt))))) x = t(x) v = svd(x)$v vt = La.svd(x)$vt comm.print(all.equal(abs(v), abs(t(vt)))) finalize()
#!/usr/bin/env python3 import numpy as np from scipy.io import wavfile # some consts F_SAMPLE = 44100 DURATION_S = 10.0 ATTENUATION = 0.5 WAV_FILE = "strange.wav" # some vars t_sample = 1 / F_SAMPLE # init empty waveform waveform = np.zeros(int(DURATION_S * F_SAMPLE)) # add samples for i, v in enumerate(waveform): t = i * t_sample waveform[i] += np.sin(2 * np.pi * 20 * (np.sin(2 * np.pi * 0.5 * t))**2 * (np.sin(2 * np.pi * 3.0 * t))**2 * t) # write to wav file scaled = np.int16(waveform / np.max(np.abs(waveform)) * 2**16//2 * ATTENUATION) wavfile.write(WAV_FILE, F_SAMPLE, scaled)
| pc = 0xc002 | a = 0x20 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | | pc = 0xc005 | a = 0x20 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x010a] = 0x20 | | pc = 0xc008 | a = 0x20 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x010a] = 0x20 | | pc = 0xc00d | a = 0x20 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | | pc = 0xc00f | a = 0x0f | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
= = Early and personal life = =
""" Module to simulate realistic GPS trajectories of a number of people anywhere in the world. """ import datetime from dataclasses import dataclass from enum import Enum import re import sys import time from typing import Dict, List, Optional, Tuple, Union import numpy as np import openrouteservice import pandas as pd import requests from timezonefinder import TimezoneFinder from forest.jasmine.data2mobmat import great_circle_dist from forest.poplar.legacy.common_funcs import datetime2stamp, stamp2datetime R = 6.371*10**6 ACTIVE_STATUS_LIST = range(11) TRAVELLING_STATUS_LIST = range(11) OVERPASS_URL = "https://overpass-api.de/api/interpreter" class PossibleExits(Enum): """This class enumerates possible exits for attributes""" CAFE = "cafe" BAR = "bar" RESTAURANT = "restaurant" PARK = "park" CINEMA = "cinema" DANCE = "dance" FITNESS = "fitness_centre" class Vehicle(Enum): """This class enumerates vehicle for attributes""" BUS = "bus" CAR = "car" BICYCLE = "bicycle" FOOT = "foot" class Occupation(Enum): """This class enumerates occupation for attributes""" NONE = "none" WORK = "office" SCHOOL = "university" class ActionType(Enum): """This class enumerates action type for action""" PAUSE = "p" PAUSE_NIGHT = "p_night" FLIGHT_PAUSE_FLIGHT = "fpf" def get_path(start: Tuple[float, float], end: Tuple[float, float], transport: Vehicle, api_key: str) -> Tuple[np.ndarray, float]: """Calculates paths between sets of coordinates This function takes 2 sets of coordinates and a mean of transport and using the openroute api calculates the set of nodes to traverse from location1 to location2 along with the duration and distance of the flight. Args: start: coordinates of start point (lat, lon) end: coordinates of end point (lat, lon) transport: means of transportation, api_key: api key collected from https://openrouteservice.org/dev/#/home Returns: path_coordinates: 2d numpy array containing [lat,lon] of route distance: distance of trip in meters Raises: RuntimeError: An error when openrouteservice does not return coordinates of route as expected """ lat1, lon1 = start lat2, lon2 = end distance = great_circle_dist(lat1, lon1, lat2, lon2) if distance < 250: return (np.array([[lat1, lon1], [lat2, lon2]]), distance) if transport in (Vehicle.CAR, Vehicle.BUS): transport2 = "driving-car" elif transport == Vehicle.FOOT: transport2 = "foot-walking" elif transport == Vehicle.BICYCLE: transport2 = "cycling-regular" else: transport2 = "" client = openrouteservice.Client(key=api_key) coords = ((lon1, lat1), (lon2, lat2)) try: routes = client.directions( coords, profile=transport2, format="geojson" ) except Exception: raise RuntimeError( "Openrouteservice did not return proper trajectories." ) coordinates = routes["features"][0]["geometry"]["coordinates"] path_coordinates = [[coord[1], coord[0]] for coord in coordinates] # sometimes if exact coordinates of location are not in a road # the starting or ending coordinates of route will be returned # in the nearer road which can be slightly different than # the ones provided if path_coordinates[0] != [lat1, lon1]: path_coordinates[0] = [lat1, lon1] if path_coordinates[-1] != [lat2, lon2]: path_coordinates[-1] = [lat2, lon2] return np.array(path_coordinates), distance def get_basic_path(path: np.ndarray, transport: Vehicle) -> np.ndarray: """Subsets paths depending on transport for optimisation. This function takes a path from get_path() function and subsets it to a specific number of nodes. Args: path: 2d numpy array transport: Vehicle Returns: subset of original path that represents the flight """ distance = great_circle_dist(*path[0], *path[-1]) if transport in [Vehicle.FOOT, Vehicle.BICYCLE]: # slower speed thus capturing more locations length = 2 + distance // 200 elif transport == Vehicle.BUS: # higher speed thus capturing less locations # bus route start and end +2 length = 4 + distance // 400 else: # transport is car # higher speed thus capturing less locations length = 2 + distance // 400 if length >= len(path): basic_path = path else: indexes = list(range(0, len(path), int(len(path) / (length - 1)))) if len(indexes) < length: indexes.append(len(path) - 1) else: indexes[-1] = len(path) - 1 indexes2 = [] for i in range(len(indexes) - 1): if (path[indexes[i]] != path[indexes[i + 1]]).any(): indexes2.append(indexes[i]) indexes2.append(indexes[-1]) basic_path = path[indexes2] return basic_path def bounding_box(center: Tuple[float, float], radius: int) -> Tuple: """A bounding box around a set of coordinates. Args: center: set of coordinates (floats) (lat, lon) radius: radius in meters of area around coordinates Return: tuple of 4 elements that represents a bounding box around the coordinates provided """ lat, lon = center earth_radius = 6371 # kilometers lat_const = (radius / (1000 * earth_radius)) * (180 / np.pi) lon_const = lat_const / np.cos(lat * np.pi / 180) return lat - lat_const, lon - lon_const, lat + lat_const, lon + lon_const class Attributes: """This class holds the attributes needed to create an instance of a Person class""" def __init__(self, vehicle: Optional[str] = None, main_employment: Optional[str] = None, active_status: Optional[int] = None, travelling_status: Optional[int] = None, preferred_places: Optional[List[str]] = None, **kwargs): """Error check and generate missing data for attributes Args: vehicle: used for distances and time of flights main_occupation: used for routine action in weekdays active_status: used for probability in free time to take an action or stay home travelling status: used to derive amount of distance travelled preferred_places :used to sample action when free time where x1-x3 are amenities (str) Raises: ValueError: for incorrect vehicle type ValueError: for incorrect main_employment type ValueError: if active_status is not between 0 and 10 ValueError: if travelling_status is not between 0 and 10 ValueError: if an exit from possible_exits is not correct type """ if vehicle is not None: self.vehicle = Vehicle(vehicle) else: # exclude bus self.vehicle = np.random.choice(list(Vehicle)[1:]) if main_employment is not None: self.main_occupation = Occupation(main_employment) else: self.main_occupation = np.random.choice(list(Occupation)) if active_status is not None: if active_status not in ACTIVE_STATUS_LIST: raise ValueError("active_status must be between 0 and 10") self.active_status = int(active_status) else: self.active_status = np.random.choice(ACTIVE_STATUS_LIST) if travelling_status is not None: if travelling_status not in TRAVELLING_STATUS_LIST: raise ValueError("travelling_status must be between 0 and 10") self.travelling_status = int(travelling_status) else: self.travelling_status = np.random.choice(TRAVELLING_STATUS_LIST) if preferred_places is not None: self.preferred_places = [] for possible_exit in preferred_places: possible_exit2 = PossibleExits(possible_exit) self.preferred_places.append(possible_exit2) possible_exits2 = [ x for x in PossibleExits if x not in self.preferred_places ] random_exits = np.random.choice( possible_exits2, 3 - len(self.preferred_places), replace=False ).tolist() for choice in random_exits: self.preferred_places.append(choice) else: self.preferred_places = np.random.choice( list(PossibleExits), 3, replace=False ).tolist() @dataclass class Action: """Class containing potential actions for a Person. Args: action: ActionType, indicating pause, pause for the night or flight-pause-flight destination_coordinates: tuple, destination's coordinates duration: list, contains [minimum, maximum] duration of pause in seconds preferred_exit: str, exit code """ action: ActionType destination_coordinates: Tuple[float, float] duration: List[float] preferred_exit: str class Person: """This class represents a person whose trajectories we want to simulate""" def __init__(self, home_coordinates: Tuple[float, float], attributes: Attributes, local_places: Dict[str, list]): """This function sets the basic attributes and information to be used of the person. Args: home_coordinates: tuple, coordinates of primary home attributes: Attributes class, consists of various information local_places: dictionary, contains overpass nodes of amenities near house """ self.home_coordinates = home_coordinates self.attributes = attributes # used to update preferred exits in a day if already visited self.preferred_places_today = self.attributes.preferred_places.copy() self.office_today = False # this will hold the coordinates of paths # to each location visited self.trips: Dict[str, np.ndarray] = {} # if employed/student find a place nearby to visit # for work or studies # also set which days within the week to visit it # depending on active status if self.attributes.main_occupation != Occupation.NONE: main_occupation_locations = local_places[ self.attributes.main_occupation.value ] if len(main_occupation_locations) != 0: i = np.random.choice( range(len(main_occupation_locations)), 1, )[0] while main_occupation_locations[i] == home_coordinates: i = np.random.choice( range(len(main_occupation_locations)), 1 )[0] self.office_coordinates = main_occupation_locations[i] no_office_days = np.random.binomial( 5, self.attributes.active_status / 10 ) self.office_days = np.random.choice( range(5), no_office_days, replace=False ) self.office_days.sort() else: self.office_coordinates = (0, 0) self.office_days = np.array([]) else: self.office_coordinates = (0, 0) self.office_days = np.array([]) # define favorite places self.possible_destinations = list(PossibleExits) # for a certain venue select 3 locations for each venue randomly # these will be considered the 3 favorite places to go # 3 was chosen arbitrarily since people usually follow the # same patterns and go out mostly in the same places # order in the list of 3 matters, with order be of decreasing # preference for possible_exit in self.possible_destinations: # if there are more than 3 sets of coordinates for an venue # select 3 at random, else select all of them as preferred if len(local_places[possible_exit.value]) > 3: random_places = np.random.choice( range(len(local_places[possible_exit.value])), 3, replace=False, ).tolist() places_selected = [ tuple(place) for place in np.array(local_places[possible_exit.value])[ random_places ] if tuple(place) != home_coordinates ] setattr(self, possible_exit.value + "_places", places_selected) else: setattr( self, possible_exit.value + "_places", [ tuple(place) for place in local_places[possible_exit.value] if tuple(place) != home_coordinates ], ) # calculate distances of selected places from home # create a list of the locations ordered by distance distances = [ great_circle_dist(*home_coordinates, *place) for place in getattr(self, possible_exit.value + "_places") ] order = np.argsort(distances) setattr( self, possible_exit.value + "_places_ordered", np.array(getattr(self, possible_exit.value + "_places"))[ order ].tolist(), ) # remove all exits which have no places nearby possible_destinations2 = self.possible_destinations.copy() for act in possible_destinations2: if len(getattr(self, act.value + "_places")) == 0: self.possible_destinations.remove(act) # order preferred places by travelling_status # if travelling status high, preferred locations # will be the ones that are further away travelling_status_norm = (self.attributes.travelling_status ** 2) / ( self.attributes.travelling_status ** 2 + (10 - self.attributes.travelling_status) ** 2 ) for act in self.possible_destinations: act_places = getattr(self, act.value + "_places_ordered").copy() places = [] for i in range(len(act_places) - 1, -1, -1): index = np.random.binomial(i, travelling_status_norm) places.append(act_places[index]) del act_places[index] setattr(self, act.value + "_places", places) def set_travelling_status(self, travelling_status: int): """Update preferred locations of exits depending on new travelling status. Args: travelling_status: 0-10 | int indicating new travelling_status """ self.attributes.travelling_status = travelling_status travelling_status_norm = (travelling_status ** 2) / ( travelling_status ** 2 + (10 - travelling_status) ** 2 ) for act in self.possible_destinations: act_places = getattr(self, act.value + "_places_ordered").copy() places = [] for i in range(len(act_places) - 1, -1, -1): index = np.random.binomial(i, travelling_status_norm) places.append(act_places[index]) del act_places[index] setattr(self, act.value + "_places", places) def set_active_status(self, active_status: int): """Update active status. Args: active_status: 0-10 | int indicating new travelling_status """ self.attributes.active_status = active_status if ( self.attributes.main_occupation != Occupation.NONE and self.office_coordinates != (0, 0) ): no_office_days = np.random.binomial(5, active_status / 10) self.office_days = np.random.choice( range(5), no_office_days, replace=False ) self.office_days.sort() def update_preferred_places(self, exit_code: PossibleExits): """This function updates the set of preferred exits for the day, after an action has been performed. Args: exit_code: str, representing the action which was performed. """ if exit_code in self.preferred_places_today: index_of_code = self.preferred_places_today.index(exit_code) # if exit chosen is the least favorite for the day # replace it with a random venue from the rest of the # possible exits if index_of_code == (len(self.preferred_places_today) - 1): probs = np.array( [ 0 if c in self.preferred_places_today else 1 for c in self.possible_destinations ] ) probs = probs / sum(probs) self.preferred_places_today[-1] = np.random.choice( self.possible_destinations, 1, p=probs.tolist() )[0] else: # if exit selected is not the least preferred # switch positions with the next one ( self.preferred_places_today[index_of_code], self.preferred_places_today[index_of_code + 1], ) = ( self.preferred_places_today[index_of_code + 1], self.preferred_places_today[index_of_code], ) def choose_preferred_exit(self, current_time: float, update: bool = True ) -> Tuple[str, Tuple[float, float]]: """This function samples through the possible actions for the person, depending on his attributes and the time. Args: current_time: float, current time in seconds update: boolean, to update preferrences Returns: tuple of string and tuple: str, selected action to perform tuple, selected location's coordinates """ seconds_of_day = current_time % (24 * 60 * 60) hour_of_day = seconds_of_day / (60 * 60) # active_coef represents hours of inactivity # the larger the active status the smaller the active_coef # the less hours of inactivity # active_coef is in between [0, 2.5] active_coef = (10 - self.attributes.active_status) / 4 # too early in the morning so no action # should be taken if hour_of_day < 9 + active_coef: return "home", self.home_coordinates elif hour_of_day > 22 - active_coef: return "home_night", self.home_coordinates else: # probability of staying at home regardless the time probs_of_staying_home = [1 - self.attributes.active_status / 10, self.attributes.active_status / 10] if np.random.choice([0, 1], 1, p=probs_of_staying_home)[0] == 0: return "home", self.home_coordinates possible_destinations2 = self.possible_destinations.copy() actions = [] probabilities = np.array([]) # ratios on how probable each exit is to happen # the first venue is 2 times more likely to incur # than the second and 6 times more likely than the third ratios = [6., 3., 1.] for i, _ in enumerate(self.preferred_places_today): preferred_action = self.preferred_places_today[i] if preferred_action in possible_destinations2: actions.append(preferred_action) probabilities = np.append(probabilities, ratios[i]) possible_destinations2.remove(preferred_action) # for all the remaining venues the first venue is 24 times more likely # to occur for act in possible_destinations2: if act not in self.preferred_places_today: actions.append(act) probabilities = np.append(probabilities, 0.25) probabilities = probabilities / sum(probabilities) selected_action = np.random.choice(actions, 1, p=probabilities)[0] if update: self.update_preferred_places(selected_action) # after venue has been selected, a location for that venue # needs to be selected as well. action_locations = getattr(self, selected_action.value + "_places") ratios2 = ratios[: len(action_locations)] probabilities2 = np.array(ratios2) probabilities2 = probabilities2 / sum(probabilities2) selected_location_index = np.random.choice( range(len(action_locations)), 1, p=probabilities2 )[0] selected_location = action_locations[selected_location_index] return selected_action, selected_location def end_of_day_reset(self): """Reset preferred exits of the day. To run when a day ends""" self.preferred_places_today = self.attributes.preferred_places self.office_today = False def calculate_trip(self, origin: Tuple[float, float], destination: Tuple[float, float], api_key: str ) -> Tuple[np.ndarray, Vehicle]: """This function uses the openrouteservice api to produce the path from person's house to destination and back. Args: destination: tuple, coordinates for destination origin: tuple, coordinates for origin api_key: str, openrouteservice api key Returns: path: 2d numpy array, containing [lat,lon] of route from origin to destination transport: Vehicle, means of transport Raises: RuntimeError: An error when openrouteservice does not return coordinates of route as expected after 3 tries """ distance = great_circle_dist(*origin, *destination) # if very short distance do not take any vehicle (less than 1km) if distance <= 1000: transport = Vehicle.FOOT else: transport = self.attributes.vehicle coords_str = \ f"{origin[0]}_{origin[1]}_{destination[0]}_{destination[1]}" if coords_str in self.trips.keys(): path = self.trips[coords_str] else: for try_no in range(3): try: path, _ = get_path( origin, destination, transport, api_key ) except RuntimeError: if try_no == 2: raise else: time.sleep(30) continue else: path = get_basic_path(path, transport) self.trips[coords_str] = path break return path, transport def choose_action(self, current_time: float, day_of_week: int, update: bool = True) -> Action: """This function decides action for person to take. Args: current_time: int, current time in seconds day_of_week: int, day of the week update: bool, to update preferences and office day Returns: Action dataclass """ seconds_of_day = current_time % (24 * 60 * 60) if seconds_of_day == 0: # if it is a weekday and working/studying # wake up between 8am and 9am if (day_of_week < 5 and self.attributes.main_occupation != Occupation.NONE): return Action(ActionType.PAUSE, self.home_coordinates, [8 * 3600, 9 * 3600], "home_morning") # else wake up between 8am and 12pm return Action(ActionType.PAUSE, self.home_coordinates, [8 * 3600, 12 * 3600], "home_morning") # if haven't yet been to office today if not self.office_today: if update: self.office_today = not self.office_today # if today is office day go to office # work for 7 to 9 hours if day_of_week in self.office_days: return Action(ActionType.FLIGHT_PAUSE_FLIGHT, self.office_coordinates, [7 * 3600, 9 * 3600], "office") # if today is not office day # work for 7 to 9 hours from home elif day_of_week < 5: return Action(ActionType.PAUSE, self.home_coordinates, [7 * 3600, 9 * 3600], "office_home") # otherwise choose to do something in the free time preferred_exit, location = self.choose_preferred_exit(current_time, update) # if chosen to stay home if preferred_exit == "home": # if after 10pm and chosen to stay home # stay for the night until next day if seconds_of_day + 2 * 3600 > 24 * 3600 - 1: return Action(ActionType.PAUSE_NIGHT, self.home_coordinates, [24 * 3600 - seconds_of_day, 24 * 3600 - seconds_of_day], "home_night") # otherwise stay for half an hour to 2 hours and then decide again return Action(ActionType.PAUSE, self.home_coordinates, [0.5 * 3600, 2 * 3600], preferred_exit) # if deciding to stay at home for the night elif preferred_exit == "home_night": return Action(ActionType.PAUSE_NIGHT, self.home_coordinates, [24 * 3600 - seconds_of_day, 24 * 3600 - seconds_of_day], preferred_exit) # otherwise go to the location specified # spend from half an hour to 2.5 hours depending # on active status return Action(ActionType.FLIGHT_PAUSE_FLIGHT, location, [0.5 * 3600 + 1.5 * 3600 * (self.attributes.active_status - 1) / 9, 1 * 3600 + 1.5 * 3600 * (self.attributes.active_status - 1) / 9], preferred_exit) def gen_basic_traj(location_start: Tuple[float, float], location_end: Tuple[float, float], vehicle: Vehicle, time_start: float ) -> Tuple[np.ndarray, float]: """This function generates basic trajectories between 2 points. Args: location_start: tuple, coordinates of start point location_end: tuple, coordinates of end point vehicle: Vehicle, means of transportation, time_start: float, starting time Returns: numpy.ndarray, containing the trajectories float, total distance travelled """ traj_list = [] latitude_start, longitude_start = location_start if vehicle == Vehicle.FOOT: speed_range = [1.2, 1.6] elif vehicle == Vehicle.BICYCLE: speed_range = [7, 11] else: speed_range = [10, 14] distance = great_circle_dist(*location_start, *location_end) traveled = 0 time_end = time_start while traveled < distance: random_speed = np.random.uniform(speed_range[0], speed_range[1], 1)[0] random_time = int(np.around(np.random.uniform(30, 120, 1), 0)) mov = random_speed * random_time if ( traveled + mov > distance or distance - traveled - mov < speed_range[1] ): mov = distance - traveled random_time = int(np.around(mov / random_speed, 0)) traveled = traveled + mov time_end = time_start + random_time ratio = traveled / distance latitude_end, longitude_end = ( ratio * location_end[0] + (1 - ratio) * location_start[0], ratio * location_end[1] + (1 - ratio) * location_start[1], ) for i in range(random_time): newline = [ time_start + i + 1, (i + 1) / random_time * latitude_end + (random_time - i - 1) / random_time * latitude_start, (i + 1) / random_time * longitude_end + (random_time - i - 1) / random_time * longitude_start, ] traj_list.append(newline) latitude_start = latitude_end longitude_start = longitude_end time_start = time_end if traveled < distance and vehicle == Vehicle.BUS: random_time = int(np.around(np.random.uniform(20, 60, 1), 0)) time_end = time_start + random_time for i in range(random_time): newline = [ time_start + i + 1, latitude_start, longitude_start ] traj_list.append(newline) time_start = time_end traj_array = np.array(traj_list) err_lat = np.random.normal(loc=0.0, scale=2 * 1e-5, size=traj_array.shape[0]) err_lon = np.random.normal(loc=0.0, scale=2 * 1e-5, size=traj_array.shape[0]) traj_array[:, 1] = traj_array[:, 1] + err_lat traj_array[:, 2] = traj_array[:, 2] + err_lon return traj_array, distance def gen_basic_pause(location_start: Tuple[float, float], time_start: float, t_e_range: Union[List[float], None], t_diff_range: Union[List[float], None] ) -> np.ndarray: """This function generates basic trajectories for a pause. Args: location_start: tuple, coordinates of pause location time_start: float, starting time t_e_range: list, limits of ending time (None if t_diff_range used) t_diff_range: list, limits of duration (None if t_e_range used) Returns: numpy.ndarray, containing the trajectories Raises: ValueError: if both t_e_range and t_diff_range are None ValueError: if t_e_range is not None and does not have 2 elements ValueError: if t_diff_range is not None and does not have 2 elements """ traj_list = [] if t_e_range is None and t_diff_range is not None: if len(t_diff_range) == 2: random_time = int( np.around( np.random.uniform(t_diff_range[0], t_diff_range[1], 1), 0 ) ) else: raise ValueError("t_diff_range should be a list of length 2") elif t_e_range is not None and t_diff_range is None: if len(t_e_range) == 2: random_time = int( np.around( np.random.uniform(t_e_range[0], t_e_range[1], 1), 0 ) - time_start ) else: raise ValueError("t_e_range must be a list of length 2") else: raise ValueError("Either t_e_range or t_diff_range should be None") std = 1 * 1e-5 for i in range(random_time): newline = [time_start + i + 1, location_start[0], location_start[1]] traj_list.append(newline) traj_array = np.array(traj_list) err_lat = np.random.normal(loc=0.0, scale=std, size=traj_array.shape[0]) err_lon = np.random.normal(loc=0.0, scale=std, size=traj_array.shape[0]) traj_array[:, 1] = traj_array[:, 1] + err_lat traj_array[:, 2] = traj_array[:, 2] + err_lon return traj_array def gen_route_traj(route: list, vehicle: Vehicle, time_start: float) -> Tuple[np.ndarray, float]: """This function generates basic trajectories between multiple points. Args: route: list, contains coordinates of multiple locations vehicle: Vehicle, means of transportation, time_start: float, starting time Returns: numpy.ndarray, containing the trajectories float, total distance travelled """ total_distance = 0. traj = np.zeros((1, 3)) for i in range(len(route) - 1): location_start = route[i] location_end = route[i + 1] try: trip, distance = gen_basic_traj( location_start, location_end, vehicle, time_start ) except IndexError: route[i + 1] = location_start continue total_distance += distance time_start = trip[-1, 0] traj = np.vstack((traj, trip)) # generate pause if vehicle is bus for bus stop waiting time if (i + 1) != len(route) - 1 and vehicle == Vehicle.BUS: trip = gen_basic_pause(location_end, time_start, None, [5, 120]) time_start = trip[-1, 0] traj = np.vstack((traj, trip)) traj = traj[1:, :] return traj, total_distance def gen_all_traj(person: Person, switches: Dict[str, int], start_date: datetime.date, end_date: datetime.date, api_key: str) -> Tuple[np.ndarray, List[int], List[float]]: """Generates trajectories for a single person. Args: switches: (dictionary) contains changes of attributes in between the simulation all amenities around the house address start_date: (datetime.date object) start date of trajectories end_date: (datetime.date object) end date of trajectories, end date is not included in the trajectories api_key: (str) api key for open route service Returns: traj: (numpy.ndarray) contains the gps trajectories of a single person, first column is time, second column is lattitude and third column is longitude home_time_list: (list) contains the time spent at home each day in seconds total_d_list: (list) contains the total distance travelled each day in meters Raises: ValueError: if possible destinations around the house address are less than 4 ValueError: if no offices around person's house address """ if len(person.possible_destinations) < 4: raise ValueError("Not enough possible destinations") if ( person.attributes.main_occupation != Occupation.NONE and person.office_coordinates == (0, 0) ): raise ValueError("No office coordinates") val_active_change = -1 time_active_change = -1 val_travel_change = -1 time_travel_change = -1 if len(switches.keys()) != 0: for key in switches.keys(): key_list = key.split("-") if key_list[0] == "active_status": time_active_change = int(key_list[1]) - 1 val_active_change = switches[key] elif key_list[0] == "travelling_status": time_travel_change = int(key_list[1]) - 1 val_travel_change = switches[key] current_date = start_date t_s = 0 traj = np.zeros((1, 3)) traj[0, 0] = t_s traj[0, 1] = person.home_coordinates[0] traj[0, 2] = person.home_coordinates[1] home_time = 0 total_d = 0. home_time_list = [] total_d_list = [] while current_date < end_date: if t_s == time_travel_change * 24 * 3600: person.set_travelling_status(val_travel_change) if t_s == time_active_change * 24 * 3600: person.set_active_status(val_active_change) current_weekdate = current_date.weekday() action = person.choose_action(t_s, current_weekdate) if action.action == ActionType.PAUSE: res = gen_basic_pause( action.destination_coordinates, t_s, None, action.duration ) if action.destination_coordinates == person.home_coordinates: home_time += res[-1, 0] - res[0, 0] + 1 traj = np.vstack((traj, res)) t_s = res[-1, 0] elif action.action == ActionType.FLIGHT_PAUSE_FLIGHT: d_temp = 0. go_path, transport = person.calculate_trip( person.home_coordinates, action.destination_coordinates, api_key ) return_path, _ = person.calculate_trip( action.destination_coordinates, person.home_coordinates, api_key ) # Flight 1 res1, distance1 = gen_route_traj(go_path.tolist(), transport, t_s) t_s1 = res1[-1, 0] traj1 = res1 d_temp += distance1 # Pause res2 = gen_basic_pause( action.destination_coordinates, t_s1, None, action.duration ) t_s2 = res2[-1, 0] traj2 = np.vstack((traj1, res2)) # Flight 2 res3, distance3 = gen_route_traj( return_path.tolist(), transport, t_s2 ) t_s3 = res3[-1, 0] traj3 = np.vstack((traj2, res3)) d_temp += distance3 dates_passed_in_hrs = (current_date - start_date).days * 24 * 3600 if t_s3 - dates_passed_in_hrs < 24 * 3600: t_s = t_s3 traj = np.vstack((traj, traj3)) total_d += d_temp else: # pause res = gen_basic_pause( person.home_coordinates, t_s, None, [15 * 60, 30 * 60] ) home_time += res[-1, 0] - res[0, 0] + 1 t_s = res[-1, 0] traj = np.vstack((traj, res)) elif action.action == ActionType.PAUSE_NIGHT: if action.duration[0] + action.duration[1] != 0: res = gen_basic_pause( action.destination_coordinates, t_s, None, action.duration ) if action.destination_coordinates == person.home_coordinates: home_time += res[-1, 0] - res[0, 0] + 1 traj = np.vstack((traj, res)) t_s = res[-1, 0] current_date += datetime.timedelta(days=1) person.end_of_day_reset() home_time_list.append(home_time) total_d_list.append(total_d) home_time = 0 total_d = 0 traj = traj[:-1, :] return traj, home_time_list, total_d_list def remove_data( full_data: np.ndarray, cycle: int, percentage: float, day: int ) -> np.ndarray: """Only keeps observed data from simulated trajectories depending on cycle and percentage. Args: full_data: (numpy.ndarray) contains the complete trajectories cycle: (int) on_period + off_period of observations, in minutes percentage: (float) off_period/cycle, in between 0 and 1 day: (int) number of days in full_data Returns: obs_data: (numpy.ndarray) contains the trajectories of the on period. """ sample_dur = int(np.around(60 * cycle * (1 - percentage), 0)) index_all = np.array([]) for i in range(day): start = int(np.around(np.random.uniform(0, 60 * cycle, 1), 0)) start += 86400 * i index_cycle = np.arange(start, start + sample_dur) if i == 0: index_all = index_cycle while index_all[-1] < 86400 * (i + 1): index_cycle = index_cycle + cycle * 60 index_all = np.concatenate((index_all, index_cycle)) index_all = index_all[index_all < 86400 * (i + 1)] index_all = np.concatenate( (np.arange(600), index_all, np.arange(86400 * day - 600, 86400 * day)) ) index_all = np.unique(index_all) obs_data = full_data[index_all, :] return obs_data def prepare_data( obs: np.ndarray, timestamp_s: int, tz_str: str ) -> pd.DataFrame: """Prepares the data in a dataframe. Args: obs: (numpy.ndarray) observed trajectories. timestamp_s: (int) timestamp of starting day tz_str: (str) timezone Returns: new: (pandas.DataFrame) final dataframe of simulated gps data. """ utc_start = stamp2datetime(timestamp_s, tz_str) utc_start_stamp = datetime2stamp(utc_start, "UTC") new = np.zeros((obs.shape[0], 6)) new[:, 0] = (obs[:, 0] + timestamp_s) * 1000 new[:, 1] = (obs[:, 0] + utc_start_stamp) * 1000 new[:, 2] = obs[:, 1] new[:, 3] = obs[:, 2] new[:, 4] = 0 new[:, 5] = 20 return pd.DataFrame( new, columns=[ "timestamp", "UTC time", "latitude", "longitude", "altitude", "accuracy", ], ) def process_switches( attributes: Dict[str, Dict], key: str, ) -> Dict[str, int]: """Preprocesses the attributes of each person. Args: attributes: (dictionary) contains attributes of each person, loaded from json file. key: (str) a key from attributes.keys() Returns: switches: (dictionary) contains possible changes of attributes in between of simulation """ switches = {} for x in attributes[key].keys(): key_list = x.split("-") if len(key_list) == 2: switches[x] = attributes[key][x] return switches def load_attributes( attributes: Dict[str, Dict], ) -> Tuple[Dict[int, Attributes], Dict[int, Dict[str, int]]]: """Loads the attributes of each person. Args: attributes: Dictionary of attributes of each person, loaded from json file. Returns: attributes: (dictionary) contains attributes of each person, loaded from json file. switches: (dictionary) contains possible changes of attributes in between of simulation Raises: ValueError: if the format of the json file is not correct. """ attributes_dictionary: Dict[int, Attributes] = {} switches_dictionary: Dict[int, Dict[str, int]] = {} for key in attributes.keys(): match = re.search(r"[0-9]*-?[0-9]+", key) if match is None: raise ValueError(f"Wrong format in attributes.json on {key}") users = match.group(0).split("-") if len(users) == 1: up_to = int(users[0]) else: up_to = int(users[1]) for user in range(int(users[0]), up_to + 1): attrs = Attributes(**attributes[key]) switches = process_switches(attributes, key) attributes_dictionary[user] = attrs switches_dictionary[user] = switches return attributes_dictionary, switches_dictionary def generate_addresses(country: str, city: str) -> np.ndarray: """Generates multiple addresses. Args: country: (str) country of the persons city: (str) city of the persons Returns: addresses: (np.ndarray) contains address coordinates of each person Raises: RuntimeError: if the api raises error for too many tries ValueError: if the api returns no results """ overpy_query = f""" [out:json]; area["ISO3166-1"="{country}"][admin_level=2] -> .country; area["name"="{city}"] -> .city; node(area.country)(area.city)["addr:street"]; out center 150; """ response = requests.get(OVERPASS_URL, params={"data": overpy_query}, timeout=60) response.raise_for_status() res = response.json() try: index = np.random.choice( range(len(res["elements"])), 100, replace=False ) except ValueError: sys.stderr.write( "Overpass query came back empty. Check the location argument, ISO " "code, and city name, for any misspellings." ) raise return np.array(res["elements"])[index] def generate_nodes( house_address: Tuple[float, float], employment: Occupation ) -> Dict[str, List[Tuple[float, float]]]: """Generates multiple amenities coordinates. Args: house_address: (tuple) coordinates of the house employment: (Occupation) occupation of the person Returns: nodes: (dictionary) contains coordinates of each amenity Raises: RuntimeError: if the api raises error for too many tries """ house_area = bounding_box(house_address, 2000) house_area2 = bounding_box(house_address, 3000) q_employment = "" if employment == Occupation.WORK: q_employment = f'node{house_area}["office"];' elif employment == Occupation.SCHOOL: q_employment = f""" node{house_area2}["amenity"="university"]; way{house_area2}["amenity"="university"] """ overpy_query2 = f""" [out:json]; ( node{house_area}["amenity"="cafe"]; node{house_area}["amenity"="bar"]; node{house_area}["amenity"="restaurant"]; node{house_area}["amenity"="cinema"]; node{house_area}["leisure"="park"]; node{house_area}["leisure"="dance"]; node{house_area}["leisure"="fitness_centre"]; way{house_area}["amenity"="cafe"]; way{house_area}["amenity"="bar"]; way{house_area}["amenity"="restaurant"]; way{house_area}["amenity"="cinema"]; way{house_area}["leisure"="park"]; way{house_area}["leisure"="dance"]; way{house_area}["leisure"="fitness_centre"]; {q_employment} ); out center; """ response = requests.get(OVERPASS_URL, params={"data": overpy_query2}, timeout=60) response.raise_for_status() res = response.json() all_nodes: Dict[str, list] = {} for place in list(PossibleExits): all_nodes[place.value] = [] all_nodes["office"] = [] all_nodes["university"] = [] for element in res["elements"]: if element["type"] == "node": lon = element["lon"] lat = element["lat"] else: lon = element["center"]["lon"] lat = element["center"]["lat"] if "office" in element["tags"]: all_nodes["office"].append((lat, lon)) if "amenity" in element["tags"]: for key in all_nodes.keys(): if element["tags"]["amenity"] == key: all_nodes[key].append((lat, lon)) elif "leisure" in element["tags"]: for key in all_nodes.keys(): if element["tags"]["leisure"] == key: all_nodes[key].append((lat, lon)) return all_nodes def sim_gps_data( n_persons: int, location: str, start_date: datetime.date, end_date: datetime.date, cycle: int, percentage: float, api_key: str, attributes_dict: Optional[Dict[str, Dict]] = None, ) -> pd.DataFrame: """Generates gps trajectories. Args: n_persons: (int) number of people to simulate location: (str) indicating country and city to simulate at, format "Country_2_letter_ISO_code/City_Name" start_date: (datetime.date) start date of trajectories end_date: (datetime.date) end date of trajectories, end date is not included in the trajectories cycle: (int) the sum of on-cycle and off_cycle, unit is minute percentage: (float) the missing rate, in other words, the proportion of off_cycle, should be within [0,1] api_key: (str), api key for open route service https://openrouteservice.org/ attributes_dict: (dictionary) containing attributes for each user, optional Returns: gps_data: (pandas.DataFrame) contains gps trajectories for each person Raises: ValueError: if attributes.json is not in the correct format ValueError: if location is not in the correct format RuntimeError: if too many Overpass queries are made ValueError: if Overpass query fails """ sys.stdout.write("Loading Attributes...\n") if attributes_dict is None: attributes_dictionary: Dict[int, Attributes] = {} switches_dictionary: Dict[int, Dict[str, int]] = {} else: attributes_dictionary, switches_dictionary = load_attributes( attributes_dict ) for user in range(1, n_persons + 1): if user not in attributes_dictionary.keys(): attributes_dictionary[user] = Attributes() if user not in switches_dictionary.keys(): switches_dictionary[user] = {} sys.stdout.write("Gathering Addresses...\n") try: location_ctr, location_city = location.split("/") except ValueError: sys.stderr.write("Location provided did not have the correct format.") raise nodes = generate_addresses(location_ctr, location_city) # find timezone of city location_coords = (float(nodes[0]['lat']), float(nodes[0]['lon'])) obj = TimezoneFinder() tz_str = obj.timezone_at(lng=location_coords[1], lat=location_coords[0]) no_of_days = (end_date - start_date).days timestamp_s = ( datetime2stamp( [start_date.year, start_date.month, start_date.day, 0, 0, 0], tz_str ) * 1000 ) user = 0 ind = 0 gps_data = pd.DataFrame(columns=[ "user", "timestamp", "UTC time", "latitude", "longitude", "altitude", "accuracy", ] ) sys.stdout.write("Starting to generate trajectories...\n") while user < n_persons: house_address = (float(nodes[ind]['lat']), float(nodes[ind]['lon'])) attrs = attributes_dictionary[user + 1] all_nodes = generate_nodes(house_address, attrs.main_occupation) person = Person(house_address, attrs, all_nodes) all_traj, all_times, all_distances = gen_all_traj( person, switches_dictionary[user + 1], start_date, end_date, api_key, ) if len(all_traj) == 0: ind += 1 continue all_distances_array = np.array(all_distances) / 1000 all_times_array = np.array(all_times) / 3600 sys.stdout.write(f"User_{user + 1}\n") sys.stdout.write(f" distance(km): {all_distances_array.tolist()}\n") sys.stdout.write(f" hometime(hr): {all_times_array.tolist()}\n") obs = remove_data(all_traj, cycle, percentage, no_of_days) obs_pd = prepare_data(obs, timestamp_s / 1000, tz_str) obs_pd['user'] = user + 1 gps_data = gps_data.append(obs_pd) user += 1 ind += 1 return gps_data
State Before: ι : Type u_1 α : ι → Type u_2 β : ι → Type ?u.163005 s s₁ s₂ : Set ι t t₁ t₂ : (i : ι) → Set (α i) i : ι inst✝ : DecidableEq ι f : (i : ι) → α i hi : i ∈ s hf : ∀ (j : ι), j ∈ s → j ≠ i → f j ∈ t j ⊢ update f i ⁻¹' pi s t = t i State After: case h ι : Type u_1 α : ι → Type u_2 β : ι → Type ?u.163005 s s₁ s₂ : Set ι t t₁ t₂ : (i : ι) → Set (α i) i : ι inst✝ : DecidableEq ι f : (i : ι) → α i hi : i ∈ s hf : ∀ (j : ι), j ∈ s → j ≠ i → f j ∈ t j x : α i ⊢ x ∈ update f i ⁻¹' pi s t ↔ x ∈ t i Tactic: ext x State Before: case h ι : Type u_1 α : ι → Type u_2 β : ι → Type ?u.163005 s s₁ s₂ : Set ι t t₁ t₂ : (i : ι) → Set (α i) i : ι inst✝ : DecidableEq ι f : (i : ι) → α i hi : i ∈ s hf : ∀ (j : ι), j ∈ s → j ≠ i → f j ∈ t j x : α i ⊢ x ∈ update f i ⁻¹' pi s t ↔ x ∈ t i State After: case h.refine'_1 ι : Type u_1 α : ι → Type u_2 β : ι → Type ?u.163005 s s₁ s₂ : Set ι t t₁ t₂ : (i : ι) → Set (α i) i : ι inst✝ : DecidableEq ι f : (i : ι) → α i hi : i ∈ s hf : ∀ (j : ι), j ∈ s → j ≠ i → f j ∈ t j x : α i h : x ∈ update f i ⁻¹' pi s t ⊢ x ∈ t i case h.refine'_2 ι : Type u_1 α : ι → Type u_2 β : ι → Type ?u.163005 s s₁ s₂ : Set ι t t₁ t₂ : (i : ι) → Set (α i) i : ι inst✝ : DecidableEq ι f : (i : ι) → α i hi : i ∈ s hf : ∀ (j : ι), j ∈ s → j ≠ i → f j ∈ t j x : α i hx : x ∈ t i j : ι hj : j ∈ s ⊢ update f i x j ∈ t j Tactic: refine' ⟨fun h => _, fun hx j hj => _⟩ State Before: case h.refine'_1 ι : Type u_1 α : ι → Type u_2 β : ι → Type ?u.163005 s s₁ s₂ : Set ι t t₁ t₂ : (i : ι) → Set (α i) i : ι inst✝ : DecidableEq ι f : (i : ι) → α i hi : i ∈ s hf : ∀ (j : ι), j ∈ s → j ≠ i → f j ∈ t j x : α i h : x ∈ update f i ⁻¹' pi s t ⊢ x ∈ t i State After: case h.e'_4 ι : Type u_1 α : ι → Type u_2 β : ι → Type ?u.163005 s s₁ s₂ : Set ι t t₁ t₂ : (i : ι) → Set (α i) i : ι inst✝ : DecidableEq ι f : (i : ι) → α i hi : i ∈ s hf : ∀ (j : ι), j ∈ s → j ≠ i → f j ∈ t j x : α i h : x ∈ update f i ⁻¹' pi s t ⊢ x = update f i x i Tactic: convert h i hi State Before: case h.e'_4 ι : Type u_1 α : ι → Type u_2 β : ι → Type ?u.163005 s s₁ s₂ : Set ι t t₁ t₂ : (i : ι) → Set (α i) i : ι inst✝ : DecidableEq ι f : (i : ι) → α i hi : i ∈ s hf : ∀ (j : ι), j ∈ s → j ≠ i → f j ∈ t j x : α i h : x ∈ update f i ⁻¹' pi s t ⊢ x = update f i x i State After: no goals Tactic: simp State Before: case h.refine'_2 ι : Type u_1 α : ι → Type u_2 β : ι → Type ?u.163005 s s₁ s₂ : Set ι t t₁ t₂ : (i : ι) → Set (α i) i : ι inst✝ : DecidableEq ι f : (i : ι) → α i hi : i ∈ s hf : ∀ (j : ι), j ∈ s → j ≠ i → f j ∈ t j x : α i hx : x ∈ t i j : ι hj : j ∈ s ⊢ update f i x j ∈ t j State After: case h.refine'_2.inl ι : Type u_1 α : ι → Type u_2 β : ι → Type ?u.163005 s s₁ s₂ : Set ι t t₁ t₂ : (i : ι) → Set (α i) inst✝ : DecidableEq ι f : (i : ι) → α i j : ι hj hi : j ∈ s hf : ∀ (j_1 : ι), j_1 ∈ s → j_1 ≠ j → f j_1 ∈ t j_1 x : α j hx : x ∈ t j ⊢ update f j x j ∈ t j case h.refine'_2.inr ι : Type u_1 α : ι → Type u_2 β : ι → Type ?u.163005 s s₁ s₂ : Set ι t t₁ t₂ : (i : ι) → Set (α i) i : ι inst✝ : DecidableEq ι f : (i : ι) → α i hi : i ∈ s hf : ∀ (j : ι), j ∈ s → j ≠ i → f j ∈ t j x : α i hx : x ∈ t i j : ι hj : j ∈ s h : j ≠ i ⊢ update f i x j ∈ t j Tactic: obtain rfl | h := eq_or_ne j i State Before: case h.refine'_2.inl ι : Type u_1 α : ι → Type u_2 β : ι → Type ?u.163005 s s₁ s₂ : Set ι t t₁ t₂ : (i : ι) → Set (α i) inst✝ : DecidableEq ι f : (i : ι) → α i j : ι hj hi : j ∈ s hf : ∀ (j_1 : ι), j_1 ∈ s → j_1 ≠ j → f j_1 ∈ t j_1 x : α j hx : x ∈ t j ⊢ update f j x j ∈ t j State After: no goals Tactic: simpa State Before: case h.refine'_2.inr ι : Type u_1 α : ι → Type u_2 β : ι → Type ?u.163005 s s₁ s₂ : Set ι t t₁ t₂ : (i : ι) → Set (α i) i : ι inst✝ : DecidableEq ι f : (i : ι) → α i hi : i ∈ s hf : ∀ (j : ι), j ∈ s → j ≠ i → f j ∈ t j x : α i hx : x ∈ t i j : ι hj : j ∈ s h : j ≠ i ⊢ update f i x j ∈ t j State After: case h.refine'_2.inr ι : Type u_1 α : ι → Type u_2 β : ι → Type ?u.163005 s s₁ s₂ : Set ι t t₁ t₂ : (i : ι) → Set (α i) i : ι inst✝ : DecidableEq ι f : (i : ι) → α i hi : i ∈ s hf : ∀ (j : ι), j ∈ s → j ≠ i → f j ∈ t j x : α i hx : x ∈ t i j : ι hj : j ∈ s h : j ≠ i ⊢ f j ∈ t j Tactic: rw [update_noteq h] State Before: case h.refine'_2.inr ι : Type u_1 α : ι → Type u_2 β : ι → Type ?u.163005 s s₁ s₂ : Set ι t t₁ t₂ : (i : ι) → Set (α i) i : ι inst✝ : DecidableEq ι f : (i : ι) → α i hi : i ∈ s hf : ∀ (j : ι), j ∈ s → j ≠ i → f j ∈ t j x : α i hx : x ∈ t i j : ι hj : j ∈ s h : j ≠ i ⊢ f j ∈ t j State After: no goals Tactic: exact hf j hj h
//////////////////////////////////////////////////////////// // // Copyright (c) 2018 Jan Filipowicz, Filip Turobos // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // //////////////////////////////////////////////////////////// #ifndef GENETIC_ALGORITHM_LIBRARY_ROULETTE_WHEEL_SELECTION_H #define GENETIC_ALGORITHM_LIBRARY_ROULETTE_WHEEL_SELECTION_H #include <algorithm> #include <cstddef> #include <limits> #include <random> #include <utility> #include <type_traits> #include <vector> #include <gsl/gsl_assert> #include "identity.h" template<class UniformRandomBitGenerator, class Function = identity> class roulette_wheel_selection { public: explicit roulette_wheel_selection(UniformRandomBitGenerator& g, const Function& f = Function()) noexcept(noexcept(Function(f))); template<class Specimen> void operator()(std::vector<Specimen>& specimens, std::size_t n); private: UniformRandomBitGenerator& rand; Function probability_function; }; template<class UniformRandomBitGenerator, class Function> inline roulette_wheel_selection<UniformRandomBitGenerator, Function>::roulette_wheel_selection(UniformRandomBitGenerator& g, const Function& f) noexcept(noexcept(Function(f))) : rand(g), probability_function(f) {} template<class UniformRandomBitGenerator, class Function> template<class Specimen> inline void roulette_wheel_selection<UniformRandomBitGenerator, Function>::operator()(std::vector<Specimen>& specimens, std::size_t n) { Expects(specimens.size() >= n); using sample_type = std::common_type_t<double, decltype(probability_function(std::declval<typename Specimen::rating_type>()))>; using iterator_type = typename std::vector<Specimen>::iterator; using pair_type = std::pair<sample_type, iterator_type>; std::vector<pair_type> samples; samples.reserve(specimens.size()); for (iterator_type it = specimens.begin(); it != specimens.end(); ++it) { const sample_type probability = probability_function(it->rating()); sample_type sample = std::numeric_limits<sample_type>::max(); if (probability > 0.0) { const std::exponential_distribution<sample_type> distribution(probability); sample = distribution(rand); } samples.emplace_back(sample, it); } std::nth_element(samples.begin(), samples.begin() + n, samples.end()); samples.resize(n); iterator_type it = specimens.begin(); for (const auto& sample : samples) { std::iter_swap(it++, sample.second); } specimens.resize(n); } #endif
(* (c) Copyright Microsoft Corporation and Inria. All rights reserved. *) Require Import ssreflect ssrfun ssrbool eqtype ssrnat seq. (******************************************************************************) (* This file deals with divisibility for natural numbers. *) (* It contains the definitions of: *) (* edivn m d == the pair composed of the quotient and remainder *) (* of the Euclidean division of m by d. *) (* m %/ d == quotient of the Euclidean division of m by d. *) (* m %% d == remainder of the Euclidean division of m by d. *) (* m = n %[mod d] <-> m equals n modulo d. *) (* m == n %[mod d] <=> m equals n modulo d (boolean version). *) (* m <> n %[mod d] <-> m differs from n modulo d. *) (* m != n %[mod d] <=> m differs from n modulo d (boolean version). *) (* d %| m <=> d divides m. *) (* gcdn m n == the GCD of m and n. *) (* egcdn m n == the extended GCD (Bezout coefficient pair) of m and n. *) (* If egcdn m n = (u, v), then gcdn m n = m * u - n * v. *) (* lcmn m n == the LCM of m and n. *) (* coprime m n <=> m and n are coprime (:= gcdn m n == 1). *) (* chinese m n r s == witness of the chinese remainder theorem. *) (* We adjoin an m to operator suffixes to indicate a nested %% (modn), as in *) (* modnDml : m %% d + n = m + n %[mod d]. *) (******************************************************************************) Set Implicit Arguments. Unset Strict Implicit. Unset Printing Implicit Defensive. (** Euclidean division *) Definition edivn_rec d := fix loop m q := if m - d is m'.+1 then loop m' q.+1 else (q, m). Definition edivn m d := if d > 0 then edivn_rec d.-1 m 0 else (0, m). CoInductive edivn_spec m d : nat * nat -> Type := EdivnSpec q r of m = q * d + r & (d > 0) ==> (r < d) : edivn_spec m d (q, r). Lemma edivnP m d : edivn_spec m d (edivn m d). Proof. rewrite -{1}[m]/(0 * d + m) /edivn; case: d => //= d. elim: m {-2}m 0 (leqnn m) => [|n IHn] [|m] q //= le_mn. have le_m'n: m - d <= n by rewrite (leq_trans (leq_subr d m)). rewrite subn_if_gt; case: ltnP => [// | le_dm]. by rewrite -{1}(subnKC le_dm) -addSn addnA -mulSnr; apply: IHn. Qed. Lemma edivn_eq d q r : r < d -> edivn (q * d + r) d = (q, r). Proof. move=> lt_rd; have d_gt0: 0 < d by apply: leq_trans lt_rd. case: edivnP lt_rd => q' r'; rewrite d_gt0 /=. wlog: q q' r r' / q <= q' by case/orP: (leq_total q q'); last symmetry; eauto. rewrite leq_eqVlt; case/predU1P => [-> /addnI-> |] //=. rewrite -(leq_pmul2r d_gt0) => /leq_add lt_qr eq_qr _ /lt_qr {lt_qr}. by rewrite addnS ltnNge mulSn -addnA eq_qr addnCA addnA leq_addr. Qed. Definition divn m d := (edivn m d).1. Notation "m %/ d" := (divn m d) : nat_scope. (* We redefine modn so that it is structurally decreasing. *) Definition modn_rec d := fix loop m := if m - d is m'.+1 then loop m' else m. Definition modn m d := if d > 0 then modn_rec d.-1 m else m. Notation "m %% d" := (modn m d) : nat_scope. Notation "m = n %[mod d ]" := (m %% d = n %% d) : nat_scope. Notation "m == n %[mod d ]" := (m %% d == n %% d) : nat_scope. Notation "m <> n %[mod d ]" := (m %% d <> n %% d) : nat_scope. Notation "m != n %[mod d ]" := (m %% d != n %% d) : nat_scope. Lemma modn_def m d : m %% d = (edivn m d).2. Proof. case: d => //= d; rewrite /modn /edivn /=. elim: m {-2}m 0 (leqnn m) => [|n IHn] [|m] q //=. rewrite ltnS !subn_if_gt; case: (d <= m) => // le_mn. by apply: IHn; apply: leq_trans le_mn; apply: leq_subr. Qed. Lemma edivn_def m d : edivn m d = (m %/ d, m %% d). Proof. by rewrite /divn modn_def; case: (edivn m d). Qed. Lemma divn_eq m d : m = m %/ d * d + m %% d. Proof. by rewrite /divn modn_def; case: edivnP. Qed. Lemma div0n d : 0 %/ d = 0. Proof. by case: d. Qed. Lemma divn0 m : m %/ 0 = 0. Proof. by []. Qed. Lemma mod0n d : 0 %% d = 0. Proof. by case: d. Qed. Lemma modn0 m : m %% 0 = m. Proof. by []. Qed. Lemma divn_small m d : m < d -> m %/ d = 0. Proof. by move=> lt_md; rewrite /divn (edivn_eq 0). Qed. Lemma divnMDl q m d : 0 < d -> (q * d + m) %/ d = q + m %/ d. Proof. move=> d_gt0; rewrite {1}(divn_eq m d) addnA -mulnDl. by rewrite /divn edivn_eq // modn_def; case: edivnP; rewrite d_gt0. Qed. Lemma mulnK m d : 0 < d -> m * d %/ d = m. Proof. by move=> d_gt0; rewrite -[m * d]addn0 divnMDl // div0n addn0. Qed. Lemma mulKn m d : 0 < d -> d * m %/ d = m. Proof. by move=> d_gt0; rewrite mulnC mulnK. Qed. Lemma expnB p m n : p > 0 -> m >= n -> p ^ (m - n) = p ^ m %/ p ^ n. Proof. by move=> p_gt0 /subnK{2}<-; rewrite expnD mulnK // expn_gt0 p_gt0. Qed. Lemma modn1 m : m %% 1 = 0. Proof. by rewrite modn_def; case: edivnP => ? []. Qed. Lemma divn1 m : m %/ 1 = m. Proof. by rewrite {2}(@divn_eq m 1) // modn1 addn0 muln1. Qed. Lemma divnn d : d %/ d = (0 < d). Proof. by case: d => // d; rewrite -{1}[d.+1]muln1 mulKn. Qed. Lemma divnMl p m d : p > 0 -> p * m %/ (p * d) = m %/ d. Proof. move=> p_gt0; case: (posnP d) => [-> | d_gt0]; first by rewrite muln0. rewrite {2}/divn; case: edivnP; rewrite d_gt0 /= => q r ->{m} lt_rd. rewrite mulnDr mulnCA divnMDl; last by rewrite muln_gt0 p_gt0. by rewrite addnC divn_small // ltn_pmul2l. Qed. Implicit Arguments divnMl [p m d]. Lemma divnMr p m d : p > 0 -> m * p %/ (d * p) = m %/ d. Proof. by move=> p_gt0; rewrite -!(mulnC p) divnMl. Qed. Implicit Arguments divnMr [p m d]. Lemma ltn_mod m d : (m %% d < d) = (0 < d). Proof. by case: d => // d; rewrite modn_def; case: edivnP. Qed. Lemma ltn_pmod m d : 0 < d -> m %% d < d. Proof. by rewrite ltn_mod. Qed. Lemma leq_trunc_div m d : m %/ d * d <= m. Proof. by rewrite {2}(divn_eq m d) leq_addr. Qed. Lemma leq_mod m d : m %% d <= m. Proof. by rewrite {2}(divn_eq m d) leq_addl. Qed. Lemma leq_div m d : m %/ d <= m. Proof. by case: d => // d; apply: leq_trans (leq_pmulr _ _) (leq_trunc_div _ _). Qed. Lemma ltn_ceil m d : 0 < d -> m < (m %/ d).+1 * d. Proof. by move=> d_gt0; rewrite {1}(divn_eq m d) -addnS mulSnr leq_add2l ltn_mod. Qed. Lemma ltn_divLR m n d : d > 0 -> (m %/ d < n) = (m < n * d). Proof. move=> d_gt0; apply/idP/idP. by rewrite -(leq_pmul2r d_gt0); apply: leq_trans (ltn_ceil _ _). rewrite !ltnNge -(@leq_pmul2r d n) //; apply: contra => le_nd_floor. exact: leq_trans le_nd_floor (leq_trunc_div _ _). Qed. Lemma leq_divRL m n d : d > 0 -> (m <= n %/ d) = (m * d <= n). Proof. by move=> d_gt0; rewrite leqNgt ltn_divLR // -leqNgt. Qed. Lemma ltn_Pdiv m d : 1 < d -> 0 < m -> m %/ d < m. Proof. by move=> d_gt1 m_gt0; rewrite ltn_divLR ?ltn_Pmulr // ltnW. Qed. Lemma divn_gt0 d m : 0 < d -> (0 < m %/ d) = (d <= m). Proof. by move=> d_gt0; rewrite leq_divRL ?mul1n. Qed. Lemma leq_div2r d m n : m <= n -> m %/ d <= n %/ d. Proof. have [-> //| d_gt0 le_mn] := posnP d. by rewrite leq_divRL // (leq_trans _ le_mn) -?leq_divRL. Qed. Lemma leq_div2l m d e : 0 < d -> d <= e -> m %/ e <= m %/ d. Proof. move/leq_divRL=> -> le_de. by apply: leq_trans (leq_trunc_div m e); apply: leq_mul. Qed. Lemma leq_divDl p m n : (m + n) %/ p <= m %/ p + n %/ p + 1. Proof. have [-> //| p_gt0] := posnP p; rewrite -ltnS -addnS ltn_divLR // ltnW //. rewrite {1}(divn_eq n p) {1}(divn_eq m p) addnACA !mulnDl -3!addnS leq_add2l. by rewrite mul2n -addnn -addSn leq_add // ltn_mod. Qed. Lemma geq_divBl k m p : k %/ p - m %/ p <= (k - m) %/ p + 1. Proof. rewrite leq_subLR addnA; apply: leq_trans (leq_divDl _ _ _). by rewrite -maxnE leq_div2r ?leq_maxr. Qed. Lemma divnMA m n p : m %/ (n * p) = m %/ n %/ p. Proof. case: n p => [|n] [|p]; rewrite ?muln0 ?div0n //. rewrite {2}(divn_eq m (n.+1 * p.+1)) mulnA mulnAC !divnMDl //. by rewrite [_ %/ p.+1]divn_small ?addn0 // ltn_divLR // mulnC ltn_mod. Qed. Lemma divnAC m n p : m %/ n %/ p = m %/ p %/ n. Proof. by rewrite -!divnMA mulnC. Qed. Lemma modn_small m d : m < d -> m %% d = m. Proof. by move=> lt_md; rewrite {2}(divn_eq m d) divn_small. Qed. Lemma modn_mod m d : m %% d = m %[mod d]. Proof. by case: d => // d; apply: modn_small; rewrite ltn_mod. Qed. Lemma modnMDl p m d : p * d + m = m %[mod d]. Proof. case: (posnP d) => [-> | d_gt0]; first by rewrite muln0. by rewrite {1}(divn_eq m d) addnA -mulnDl modn_def edivn_eq // ltn_mod. Qed. Lemma muln_modr {p m d} : 0 < p -> p * (m %% d) = (p * m) %% (p * d). Proof. move=> p_gt0; apply: (@addnI (p * (m %/ d * d))). by rewrite -mulnDr -divn_eq mulnCA -(divnMl p_gt0) -divn_eq. Qed. Lemma muln_modl {p m d} : 0 < p -> (m %% d) * p = (m * p) %% (d * p). Proof. by rewrite -!(mulnC p); apply: muln_modr. Qed. Lemma modnDl m d : d + m = m %[mod d]. Proof. by rewrite -{1}[d]mul1n modnMDl. Qed. Lemma modnDr m d : m + d = m %[mod d]. Proof. by rewrite addnC modnDl. Qed. Lemma modnn d : d %% d = 0. Proof. by rewrite -{1}[d]addn0 modnDl mod0n. Qed. Lemma modnMl p d : p * d %% d = 0. Proof. by rewrite -[p * d]addn0 modnMDl mod0n. Qed. Lemma modnMr p d : d * p %% d = 0. Proof. by rewrite mulnC modnMl. Qed. Lemma modnDml m n d : m %% d + n = m + n %[mod d]. Proof. by rewrite {2}(divn_eq m d) -addnA modnMDl. Qed. Lemma modnDmr m n d : m + n %% d = m + n %[mod d]. Proof. by rewrite !(addnC m) modnDml. Qed. Lemma modnDm m n d : m %% d + n %% d = m + n %[mod d]. Proof. by rewrite modnDml modnDmr. Qed. Lemma eqn_modDl p m n d : (p + m == p + n %[mod d]) = (m == n %[mod d]). Proof. case: d => [|d]; first by rewrite !modn0 eqn_add2l. apply/eqP/eqP=> eq_mn; last by rewrite -modnDmr eq_mn modnDmr. rewrite -(modnMDl p m) -(modnMDl p n) !mulnSr -!addnA. by rewrite -modnDmr eq_mn modnDmr. Qed. Lemma eqn_modDr p m n d : (m + p == n + p %[mod d]) = (m == n %[mod d]). Proof. by rewrite -!(addnC p) eqn_modDl. Qed. Lemma modnMml m n d : m %% d * n = m * n %[mod d]. Proof. by rewrite {2}(divn_eq m d) mulnDl mulnAC modnMDl. Qed. Lemma modnMmr m n d : m * (n %% d) = m * n %[mod d]. Proof. by rewrite !(mulnC m) modnMml. Qed. Lemma modnMm m n d : m %% d * (n %% d) = m * n %[mod d]. Proof. by rewrite modnMml modnMmr. Qed. Lemma modn2 m : m %% 2 = odd m. Proof. by elim: m => //= m IHm; rewrite -addn1 -modnDml IHm; case odd. Qed. Lemma divn2 m : m %/ 2 = m./2. Proof. by rewrite {2}(divn_eq m 2) modn2 muln2 addnC half_bit_double. Qed. Lemma odd_mod m d : odd d = false -> odd (m %% d) = odd m. Proof. by move=> d_even; rewrite {2}(divn_eq m d) odd_add odd_mul d_even andbF. Qed. Lemma modnXm m n a : (a %% n) ^ m = a ^ m %[mod n]. Proof. by elim: m => // m IHm; rewrite !expnS -modnMmr IHm modnMml modnMmr. Qed. (** Divisibility **) Definition dvdn d m := m %% d == 0. Notation "m %| d" := (dvdn m d) : nat_scope. Lemma dvdnP d m : reflect (exists k, m = k * d) (d %| m). Proof. apply: (iffP eqP) => [md0 | [k ->]]; last by rewrite modnMl. by exists (m %/ d); rewrite {1}(divn_eq m d) md0 addn0. Qed. Implicit Arguments dvdnP [d m]. Prenex Implicits dvdnP. Lemma dvdn0 d : d %| 0. Proof. by case: d. Qed. Lemma dvd0n n : (0 %| n) = (n == 0). Proof. by case: n. Qed. Lemma dvdn1 d : (d %| 1) = (d == 1). Proof. by case: d => [|[|d]] //; rewrite /dvdn modn_small. Qed. Lemma dvd1n m : 1 %| m. Proof. by rewrite /dvdn modn1. Qed. Lemma dvdn_gt0 d m : m > 0 -> d %| m -> d > 0. Proof. by case: d => // /prednK <-. Qed. Lemma dvdnn m : m %| m. Proof. by rewrite /dvdn modnn. Qed. Lemma dvdn_mull d m n : d %| n -> d %| m * n. Proof. by case/dvdnP=> n' ->; rewrite /dvdn mulnA modnMl. Qed. Lemma dvdn_mulr d m n : d %| m -> d %| m * n. Proof. by move=> d_m; rewrite mulnC dvdn_mull. Qed. Hint Resolve dvdn0 dvd1n dvdnn dvdn_mull dvdn_mulr. Lemma dvdn_mul d1 d2 m1 m2 : d1 %| m1 -> d2 %| m2 -> d1 * d2 %| m1 * m2. Proof. by move=> /dvdnP[q1 ->] /dvdnP[q2 ->]; rewrite mulnCA -mulnA 2?dvdn_mull. Qed. Lemma dvdn_trans n d m : d %| n -> n %| m -> d %| m. Proof. by move=> d_dv_n /dvdnP[n1 ->]; apply: dvdn_mull. Qed. Lemma dvdn_eq d m : (d %| m) = (m %/ d * d == m). Proof. apply/eqP/eqP=> [modm0 | <-]; last exact: modnMl. by rewrite {2}(divn_eq m d) modm0 addn0. Qed. Lemma dvdn2 n : (2 %| n) = ~~ odd n. Proof. by rewrite /dvdn modn2; case (odd n). Qed. Lemma dvdn_odd m n : m %| n -> odd n -> odd m. Proof. by move=> m_dv_n; apply: contraTT; rewrite -!dvdn2 => /dvdn_trans->. Qed. Lemma divnK d m : d %| m -> m %/ d * d = m. Proof. by rewrite dvdn_eq; move/eqP. Qed. Lemma leq_divLR d m n : d %| m -> (m %/ d <= n) = (m <= n * d). Proof. by case: d m => [|d] [|m] ///divnK=> {2}<-; rewrite leq_pmul2r. Qed. Lemma ltn_divRL d m n : d %| m -> (n < m %/ d) = (n * d < m). Proof. by move=> dv_d_m; rewrite !ltnNge leq_divLR. Qed. Lemma eqn_div d m n : d > 0 -> d %| m -> (n == m %/ d) = (n * d == m). Proof. by move=> d_gt0 dv_d_m; rewrite -(eqn_pmul2r d_gt0) divnK. Qed. Lemma eqn_mul d m n : d > 0 -> d %| m -> (m == n * d) = (m %/ d == n). Proof. by move=> d_gt0 dv_d_m; rewrite eq_sym -eqn_div // eq_sym. Qed. Lemma divn_mulAC d m n : d %| m -> m %/ d * n = m * n %/ d. Proof. case: d m => [[] //| d m] dv_d_m; apply/eqP. by rewrite eqn_div ?dvdn_mulr // mulnAC divnK. Qed. Lemma muln_divA d m n : d %| n -> m * (n %/ d) = m * n %/ d. Proof. by move=> dv_d_m; rewrite !(mulnC m) divn_mulAC. Qed. Lemma muln_divCA d m n : d %| m -> d %| n -> m * (n %/ d) = n * (m %/ d). Proof. by move=> dv_d_m dv_d_n; rewrite mulnC divn_mulAC ?muln_divA. Qed. Lemma divnA m n p : p %| n -> m %/ (n %/ p) = m * p %/ n. Proof. by case: p => [|p] dv_n; rewrite -{2}(divnK dv_n) // divnMr. Qed. Lemma modn_dvdm m n d : d %| m -> n %% m = n %[mod d]. Proof. by case/dvdnP=> q def_m; rewrite {2}(divn_eq n m) {3}def_m mulnA modnMDl. Qed. Lemma dvdn_leq d m : 0 < m -> d %| m -> d <= m. Proof. by move=> m_gt0 /dvdnP[[|k] Dm]; rewrite Dm // leq_addr in m_gt0 *. Qed. Lemma gtnNdvd n d : 0 < n -> n < d -> (d %| n) = false. Proof. by move=> n_gt0 lt_nd; rewrite /dvdn eqn0Ngt modn_small ?n_gt0. Qed. Lemma eqn_dvd m n : (m == n) = (m %| n) && (n %| m). Proof. case: m n => [|m] [|n] //; apply/idP/andP; first by move/eqP->; auto. rewrite eqn_leq => [[Hmn Hnm]]; apply/andP; have:= dvdn_leq; auto. Qed. Lemma dvdn_pmul2l p d m : 0 < p -> (p * d %| p * m) = (d %| m). Proof. by case: p => // p _; rewrite /dvdn -muln_modr // muln_eq0. Qed. Implicit Arguments dvdn_pmul2l [p m d]. Lemma dvdn_pmul2r p d m : 0 < p -> (d * p %| m * p) = (d %| m). Proof. by move=> p_gt0; rewrite -!(mulnC p) dvdn_pmul2l. Qed. Implicit Arguments dvdn_pmul2r [p m d]. Lemma dvdn_divLR p d m : 0 < p -> p %| d -> (d %/ p %| m) = (d %| m * p). Proof. by move=> /(@dvdn_pmul2r p _ m) <- /divnK->. Qed. Lemma dvdn_divRL p d m : p %| m -> (d %| m %/ p) = (d * p %| m). Proof. have [-> | /(@dvdn_pmul2r p d) <- /divnK-> //] := posnP p. by rewrite divn0 muln0 dvdn0. Qed. Lemma dvdn_div d m : d %| m -> m %/ d %| m. Proof. by move/divnK=> {2}<-; apply: dvdn_mulr. Qed. Lemma dvdn_exp2l p m n : m <= n -> p ^ m %| p ^ n. Proof. by move/subnK <-; rewrite expnD dvdn_mull. Qed. Lemma dvdn_Pexp2l p m n : p > 1 -> (p ^ m %| p ^ n) = (m <= n). Proof. move=> p_gt1; case: leqP => [|gt_n_m]; first exact: dvdn_exp2l. by rewrite gtnNdvd ?ltn_exp2l ?expn_gt0 // ltnW. Qed. Lemma dvdn_exp2r m n k : m %| n -> m ^ k %| n ^ k. Proof. by case/dvdnP=> q ->; rewrite expnMn dvdn_mull. Qed. Lemma dvdn_addr m d n : d %| m -> (d %| m + n) = (d %| n). Proof. by case/dvdnP=> q ->; rewrite /dvdn modnMDl. Qed. Lemma dvdn_addl n d m : d %| n -> (d %| m + n) = (d %| m). Proof. by rewrite addnC; apply: dvdn_addr. Qed. Lemma dvdn_add d m n : d %| m -> d %| n -> d %| m + n. Proof. by move/dvdn_addr->. Qed. Lemma dvdn_add_eq d m n : d %| m + n -> (d %| m) = (d %| n). Proof. by move=> dv_d_mn; apply/idP/idP => [/dvdn_addr | /dvdn_addl] <-. Qed. Lemma dvdn_subr d m n : n <= m -> d %| m -> (d %| m - n) = (d %| n). Proof. by move=> le_n_m dv_d_m; apply: dvdn_add_eq; rewrite subnK. Qed. Lemma dvdn_subl d m n : n <= m -> d %| n -> (d %| m - n) = (d %| m). Proof. by move=> le_n_m dv_d_m; rewrite -(dvdn_addl _ dv_d_m) subnK. Qed. Lemma dvdn_sub d m n : d %| m -> d %| n -> d %| m - n. Proof. by case: (leqP n m) => [le_nm /dvdn_subr <- // | /ltnW/eqnP ->]; rewrite dvdn0. Qed. Lemma dvdn_exp k d m : 0 < k -> d %| m -> d %| (m ^ k). Proof. by case: k => // k _ d_dv_m; rewrite expnS dvdn_mulr. Qed. Hint Resolve dvdn_add dvdn_sub dvdn_exp. Lemma eqn_mod_dvd d m n : n <= m -> (m == n %[mod d]) = (d %| m - n). Proof. by move=> le_mn; rewrite -{1}[n]add0n -{1}(subnK le_mn) eqn_modDr mod0n. Qed. Lemma divnDl m n d : d %| m -> (m + n) %/ d = m %/ d + n %/ d. Proof. by case: d => // d /divnK{1}<-; rewrite divnMDl. Qed. Lemma divnDr m n d : d %| n -> (m + n) %/ d = m %/ d + n %/ d. Proof. by move=> dv_n; rewrite addnC divnDl // addnC. Qed. (***********************************************************************) (* A function that computes the gcd of 2 numbers *) (***********************************************************************) Fixpoint gcdn_rec m n := let n' := n %% m in if n' is 0 then m else if m - n'.-1 is m'.+1 then gcdn_rec (m' %% n') n' else n'. Definition gcdn := nosimpl gcdn_rec. Lemma gcdnE m n : gcdn m n = if m == 0 then n else gcdn (n %% m) m. Proof. rewrite /gcdn; elim: m {-2}m (leqnn m) n => [|s IHs] [|m] le_ms [|n] //=. case def_n': (_ %% _) => // [n']. have{def_n'} lt_n'm: n' < m by rewrite -def_n' -ltnS ltn_pmod. rewrite {}IHs ?(leq_trans lt_n'm) // subn_if_gt ltnW //=; congr gcdn_rec. by rewrite -{2}(subnK (ltnW lt_n'm)) -addnS modnDr. Qed. Lemma gcdnn : idempotent gcdn. Proof. by case=> // n; rewrite gcdnE modnn. Qed. Lemma gcdnC : commutative gcdn. Proof. move=> m n; wlog lt_nm: m n / n < m. by case: (ltngtP n m) => [||-> //]; last symmetry; auto. by rewrite gcdnE -{1}(ltn_predK lt_nm) modn_small. Qed. Lemma gcd0n : left_id 0 gcdn. Proof. by case. Qed. Lemma gcdn0 : right_id 0 gcdn. Proof. by case. Qed. Lemma gcd1n : left_zero 1 gcdn. Proof. by move=> n; rewrite gcdnE modn1. Qed. Lemma gcdn1 : right_zero 1 gcdn. Proof. by move=> n; rewrite gcdnC gcd1n. Qed. Lemma dvdn_gcdr m n : gcdn m n %| n. Proof. elim: m {-2}m (leqnn m) n => [|s IHs] [|m] le_ms [|n] //. rewrite gcdnE; case def_n': (_ %% _) => [|n']; first by rewrite /dvdn def_n'. have lt_n's: n' < s by rewrite -ltnS (leq_trans _ le_ms) // -def_n' ltn_pmod. rewrite /= (divn_eq n.+1 m.+1) def_n' dvdn_addr ?dvdn_mull //; last exact: IHs. by rewrite gcdnE /= IHs // (leq_trans _ lt_n's) // ltnW // ltn_pmod. Qed. Lemma dvdn_gcdl m n : gcdn m n %| m. Proof. by rewrite gcdnC dvdn_gcdr. Qed. Lemma gcdn_gt0 m n : (0 < gcdn m n) = (0 < m) || (0 < n). Proof. by case: m n => [|m] [|n] //; apply: (@dvdn_gt0 _ m.+1) => //; apply: dvdn_gcdl. Qed. Lemma gcdnMDl k m n : gcdn m (k * m + n) = gcdn m n. Proof. by rewrite !(gcdnE m) modnMDl mulnC; case: m. Qed. Lemma gcdnDl m n : gcdn m (m + n) = gcdn m n. Proof. by rewrite -{2}(mul1n m) gcdnMDl. Qed. Lemma gcdnDr m n : gcdn m (n + m) = gcdn m n. Proof. by rewrite addnC gcdnDl. Qed. Lemma gcdnMl n m : gcdn n (m * n) = n. Proof. by case: n => [|n]; rewrite gcdnE modnMl gcd0n. Qed. Lemma gcdnMr n m : gcdn n (n * m) = n. Proof. by rewrite mulnC gcdnMl. Qed. Lemma gcdn_idPl {m n} : reflect (gcdn m n = m) (m %| n). Proof. by apply: (iffP idP) => [/dvdnP[q ->] | <-]; rewrite (gcdnMl, dvdn_gcdr). Qed. Lemma gcdn_idPr {m n} : reflect (gcdn m n = n) (n %| m). Proof. by rewrite gcdnC; apply: gcdn_idPl. Qed. Lemma expn_min e m n : e ^ minn m n = gcdn (e ^ m) (e ^ n). Proof. rewrite /minn; case: leqP; [rewrite gcdnC | move/ltnW]; by move/(dvdn_exp2l e)/gcdn_idPl. Qed. Lemma gcdn_modr m n : gcdn m (n %% m) = gcdn m n. Proof. by rewrite {2}(divn_eq n m) gcdnMDl. Qed. Lemma gcdn_modl m n : gcdn (m %% n) n = gcdn m n. Proof. by rewrite !(gcdnC _ n) gcdn_modr. Qed. (* Extended gcd, which computes Bezout coefficients. *) Fixpoint Bezout_rec km kn qs := if qs is q :: qs' then Bezout_rec kn (NatTrec.add_mul q kn km) qs' else (km, kn). Fixpoint egcdn_rec m n s qs := if s is s'.+1 then let: (q, r) := edivn m n in if r > 0 then egcdn_rec n r s' (q :: qs) else if odd (size qs) then qs else q.-1 :: qs else [::0]. Definition egcdn m n := Bezout_rec 0 1 (egcdn_rec m n n [::]). CoInductive egcdn_spec m n : nat * nat -> Type := EgcdnSpec km kn of km * m = kn * n + gcdn m n & kn * gcdn m n < m : egcdn_spec m n (km, kn). Lemma egcd0n n : egcdn 0 n = (1, 0). Proof. by case: n. Qed. Lemma egcdnP m n : m > 0 -> egcdn_spec m n (egcdn m n). Proof. rewrite /egcdn; have: (n, m) = Bezout_rec n m [::] by []. case: (posnP n) => [-> /=|]; first by split; rewrite // mul1n gcdn0. move: {2 6}n {4 6}n {1 4}m [::] (ltnSn n) => s n0 m0. elim: s n m => [[]//|s IHs] n m qs /= le_ns n_gt0 def_mn0 m_gt0. case: edivnP => q r def_m; rewrite n_gt0 /= => lt_rn. case: posnP => [r0 {s le_ns IHs lt_rn}|r_gt0]; last first. by apply: IHs => //=; [rewrite (leq_trans lt_rn) | rewrite natTrecE -def_m]. rewrite {r}r0 addn0 in def_m; set b := odd _; pose d := gcdn m n. pose km := ~~ b : nat; pose kn := if b then 1 else q.-1. rewrite (_ : Bezout_rec _ _ _ = Bezout_rec km kn qs); last first. by rewrite /kn /km; case: (b) => //=; rewrite natTrecE addn0 muln1. have def_d: d = n by rewrite /d def_m gcdnC gcdnE modnMl gcd0n -[n]prednK. have: km * m + 2 * b * d = kn * n + d. rewrite {}/kn {}/km def_m def_d -mulSnr; case: b; rewrite //= addn0 mul1n. by rewrite prednK //; apply: dvdn_gt0 m_gt0 _; rewrite def_m dvdn_mulr. have{def_m}: kn * d <= m. have q_gt0 : 0 < q by rewrite def_m muln_gt0 n_gt0 ?andbT in m_gt0. by rewrite /kn; case b; rewrite def_d def_m leq_pmul2r // leq_pred. have{def_d}: km * d <= n by rewrite -[n]mul1n def_d leq_pmul2r // leq_b1. move: km {q}kn m_gt0 n_gt0 def_mn0; rewrite {}/d {}/b. elim: qs m n => [|q qs IHq] n r kn kr n_gt0 r_gt0 /=. case=> -> -> {m0 n0}; rewrite !addn0 => le_kn_r _ def_d; split=> //. have d_gt0: 0 < gcdn n r by rewrite gcdn_gt0 n_gt0. have: 0 < kn * n by rewrite def_d addn_gt0 d_gt0 orbT. rewrite muln_gt0 n_gt0 andbT; move/ltn_pmul2l <-. by rewrite def_d -addn1 leq_add // mulnCA leq_mul2l le_kn_r orbT. rewrite !natTrecE; set m:= _ + r; set km := _ * _ + kn; pose d := gcdn m n. have ->: gcdn n r = d by rewrite [d]gcdnC gcdnMDl. have m_gt0: 0 < m by rewrite addn_gt0 r_gt0 orbT. have d_gt0: 0 < d by rewrite gcdn_gt0 m_gt0. move/IHq=> {IHq} IHq le_kn_r le_kr_n def_d; apply: IHq => //; rewrite -/d. by rewrite mulnDl leq_add // -mulnA leq_mul2l le_kr_n orbT. apply: (@addIn d); rewrite -!addnA addnn addnCA mulnDr -addnA addnCA. rewrite /km mulnDl mulnCA mulnA -addnA; congr (_ + _). by rewrite -def_d addnC -addnA -mulnDl -mulnDr addn_negb -mul2n. Qed. Lemma Bezoutl m n : m > 0 -> {a | a < m & m %| gcdn m n + a * n}. Proof. move=> m_gt0; case: (egcdnP n m_gt0) => km kn def_d lt_kn_m. exists kn; last by rewrite addnC -def_d dvdn_mull. apply: leq_ltn_trans lt_kn_m. by rewrite -{1}[kn]muln1 leq_mul2l gcdn_gt0 m_gt0 orbT. Qed. Lemma Bezoutr m n : n > 0 -> {a | a < n & n %| gcdn m n + a * m}. Proof. by rewrite gcdnC; apply: Bezoutl. Qed. (* Back to the gcd. *) Lemma dvdn_gcd p m n : p %| gcdn m n = (p %| m) && (p %| n). Proof. apply/idP/andP=> [dv_pmn | [dv_pm dv_pn]]. by rewrite !(dvdn_trans dv_pmn) ?dvdn_gcdl ?dvdn_gcdr. case (posnP n) => [->|n_gt0]; first by rewrite gcdn0. case: (Bezoutr m n_gt0) => // km _ /(dvdn_trans dv_pn). by rewrite dvdn_addl // dvdn_mull. Qed. Lemma gcdnAC : right_commutative gcdn. Proof. suffices dvd m n p: gcdn (gcdn m n) p %| gcdn (gcdn m p) n. by move=> m n p; apply/eqP; rewrite eqn_dvd !dvd. rewrite !dvdn_gcd dvdn_gcdr. by rewrite !(dvdn_trans (dvdn_gcdl _ p)) ?dvdn_gcdl ?dvdn_gcdr. Qed. Lemma gcdnA : associative gcdn. Proof. by move=> m n p; rewrite !(gcdnC m) gcdnAC. Qed. Lemma gcdnCA : left_commutative gcdn. Proof. by move=> m n p; rewrite !gcdnA (gcdnC m). Qed. Lemma gcdnACA : interchange gcdn gcdn. Proof. by move=> m n p q; rewrite -!gcdnA (gcdnCA n). Qed. Lemma muln_gcdr : right_distributive muln gcdn. Proof. move=> p m n; case: (posnP p) => [-> //| p_gt0]. elim: {m}m.+1 {-2}m n (ltnSn m) => // s IHs m n; rewrite ltnS => le_ms. rewrite gcdnE [rhs in _ = rhs]gcdnE muln_eq0 (gtn_eqF p_gt0) -muln_modr //=. by case: posnP => // m_gt0; apply: IHs; apply: leq_trans le_ms; apply: ltn_pmod. Qed. Lemma muln_gcdl : left_distributive muln gcdn. Proof. by move=> m n p; rewrite -!(mulnC p) muln_gcdr. Qed. Lemma gcdn_def d m n : d %| m -> d %| n -> (forall d', d' %| m -> d' %| n -> d' %| d) -> gcdn m n = d. Proof. move=> dv_dm dv_dn gdv_d; apply/eqP. by rewrite eqn_dvd dvdn_gcd dv_dm dv_dn gdv_d ?dvdn_gcdl ?dvdn_gcdr. Qed. Lemma muln_divCA_gcd n m : n * (m %/ gcdn n m) = m * (n %/ gcdn n m). Proof. by rewrite muln_divCA ?dvdn_gcdl ?dvdn_gcdr. Qed. (* We derive the lcm directly. *) Definition lcmn m n := m * n %/ gcdn m n. Lemma lcmnC : commutative lcmn. Proof. by move=> m n; rewrite /lcmn mulnC gcdnC. Qed. Lemma lcm0n : left_zero 0 lcmn. Proof. by move=> n; apply: div0n. Qed. Lemma lcmn0 : right_zero 0 lcmn. Proof. by move=> n; rewrite lcmnC lcm0n. Qed. Lemma lcm1n : left_id 1 lcmn. Proof. by move=> n; rewrite /lcmn gcd1n mul1n divn1. Qed. Lemma lcmn1 : right_id 1 lcmn. Proof. by move=> n; rewrite lcmnC lcm1n. Qed. Lemma muln_lcm_gcd m n : lcmn m n * gcdn m n = m * n. Proof. by apply/eqP; rewrite divnK ?dvdn_mull ?dvdn_gcdr. Qed. Lemma lcmn_gt0 m n : (0 < lcmn m n) = (0 < m) && (0 < n). Proof. by rewrite -muln_gt0 ltn_divRL ?dvdn_mull ?dvdn_gcdr. Qed. Lemma muln_lcmr : right_distributive muln lcmn. Proof. case=> // m n p; rewrite /lcmn -muln_gcdr -!mulnA divnMl // mulnCA. by rewrite muln_divA ?dvdn_mull ?dvdn_gcdr. Qed. Lemma muln_lcml : left_distributive muln lcmn. Proof. by move=> m n p; rewrite -!(mulnC p) muln_lcmr. Qed. Lemma lcmnA : associative lcmn. Proof. move=> m n p; rewrite {1 3}/lcmn mulnC !divn_mulAC ?dvdn_mull ?dvdn_gcdr //. rewrite -!divnMA ?dvdn_mulr ?dvdn_gcdl // mulnC mulnA !muln_gcdr. by rewrite ![_ * lcmn _ _]mulnC !muln_lcm_gcd !muln_gcdl -!(mulnC m) gcdnA. Qed. Lemma lcmnCA : left_commutative lcmn. Proof. by move=> m n p; rewrite !lcmnA (lcmnC m). Qed. Lemma lcmnAC : right_commutative lcmn. Proof. by move=> m n p; rewrite -!lcmnA (lcmnC n). Qed. Lemma lcmnACA : interchange lcmn lcmn. Proof. by move=> m n p q; rewrite -!lcmnA (lcmnCA n). Qed. Lemma dvdn_lcml d1 d2 : d1 %| lcmn d1 d2. Proof. by rewrite /lcmn -muln_divA ?dvdn_gcdr ?dvdn_mulr. Qed. Lemma dvdn_lcmr d1 d2 : d2 %| lcmn d1 d2. Proof. by rewrite lcmnC dvdn_lcml. Qed. Lemma dvdn_lcm d1 d2 m : lcmn d1 d2 %| m = (d1 %| m) && (d2 %| m). Proof. case: d1 d2 => [|d1] [|d2]; try by case: m => [|m]; rewrite ?lcmn0 ?andbF. rewrite -(@dvdn_pmul2r (gcdn d1.+1 d2.+1)) ?gcdn_gt0 // muln_lcm_gcd. by rewrite muln_gcdr dvdn_gcd {1}mulnC andbC !dvdn_pmul2r. Qed. Lemma lcmnMl m n : lcmn m (m * n) = m * n. Proof. by case: m => // m; rewrite /lcmn gcdnMr mulKn. Qed. Lemma lcmnMr m n : lcmn n (m * n) = m * n. Proof. by rewrite mulnC lcmnMl. Qed. Lemma lcmn_idPr {m n} : reflect (lcmn m n = n) (m %| n). Proof. by apply: (iffP idP) => [/dvdnP[q ->] | <-]; rewrite (lcmnMr, dvdn_lcml). Qed. Lemma lcmn_idPl {m n} : reflect (lcmn m n = m) (n %| m). Proof. by rewrite lcmnC; apply: lcmn_idPr. Qed. Lemma expn_max e m n : e ^ maxn m n = lcmn (e ^ m) (e ^ n). Proof. rewrite /maxn; case: leqP; [rewrite lcmnC | move/ltnW]; by move/(dvdn_exp2l e)/lcmn_idPr. Qed. (* Coprime factors *) Definition coprime m n := gcdn m n == 1. Lemma coprime1n n : coprime 1 n. Proof. by rewrite /coprime gcd1n. Qed. Lemma coprimen1 n : coprime n 1. Proof. by rewrite /coprime gcdn1. Qed. Lemma coprime_sym m n : coprime m n = coprime n m. Proof. by rewrite /coprime gcdnC. Qed. Lemma coprime_modl m n : coprime (m %% n) n = coprime m n. Proof. by rewrite /coprime gcdn_modl. Qed. Lemma coprime_modr m n : coprime m (n %% m) = coprime m n. Proof. by rewrite /coprime gcdn_modr. Qed. Lemma coprime2n n : coprime 2 n = odd n. Proof. by rewrite -coprime_modr modn2; case: (odd n). Qed. Lemma coprimen2 n : coprime n 2 = odd n. Proof. by rewrite coprime_sym coprime2n. Qed. Lemma coprimeSn n : coprime n.+1 n. Proof. by rewrite -coprime_modl (modnDr 1) coprime_modl coprime1n. Qed. Lemma coprimenS n : coprime n n.+1. Proof. by rewrite coprime_sym coprimeSn. Qed. Lemma coprimePn n : n > 0 -> coprime n.-1 n. Proof. by case: n => // n _; rewrite coprimenS. Qed. Lemma coprimenP n : n > 0 -> coprime n n.-1. Proof. by case: n => // n _; rewrite coprimeSn. Qed. Lemma coprimeP n m : n > 0 -> reflect (exists u, u.1 * n - u.2 * m = 1) (coprime n m). Proof. move=> n_gt0; apply: (iffP eqP) => [<-| [[kn km] /= kn_km_1]]. by have [kn km kg _] := egcdnP m n_gt0; exists (kn, km); rewrite kg addKn. apply gcdn_def; rewrite ?dvd1n // => d dv_d_n dv_d_m. by rewrite -kn_km_1 dvdn_subr ?dvdn_mull // ltnW // -subn_gt0 kn_km_1. Qed. Lemma modn_coprime k n : 0 < k -> (exists u, (k * u) %% n = 1) -> coprime k n. Proof. move=> k_gt0 [u Hu]; apply/coprimeP=> //. by exists (u, k * u %/ n); rewrite /= mulnC {1}(divn_eq (k * u) n) addKn. Qed. Lemma Gauss_dvd m n p : coprime m n -> (m * n %| p) = (m %| p) && (n %| p). Proof. by move=> co_mn; rewrite -muln_lcm_gcd (eqnP co_mn) muln1 dvdn_lcm. Qed. Lemma Gauss_dvdr m n p : coprime m n -> (m %| n * p) = (m %| p). Proof. case: n => [|n] co_mn; first by case: m co_mn => [|[]] // _; rewrite !dvd1n. by symmetry; rewrite mulnC -(@dvdn_pmul2r n.+1) ?Gauss_dvd // andbC dvdn_mull. Qed. Lemma Gauss_dvdl m n p : coprime m p -> (m %| n * p) = (m %| n). Proof. by rewrite mulnC; apply: Gauss_dvdr. Qed. Lemma dvdn_double_leq m n : m %| n -> odd m -> ~~ odd n -> 0 < n -> m.*2 <= n. Proof. move=> m_dv_n odd_m even_n n_gt0. by rewrite -muln2 dvdn_leq // Gauss_dvd ?coprimen2 ?m_dv_n ?dvdn2. Qed. Lemma dvdn_double_ltn m n : m %| n.-1 -> odd m -> odd n -> 1 < n -> m.*2 < n. Proof. by case: n => //; apply: dvdn_double_leq. Qed. Lemma Gauss_gcdr p m n : coprime p m -> gcdn p (m * n) = gcdn p n. Proof. move=> co_pm; apply/eqP; rewrite eqn_dvd !dvdn_gcd !dvdn_gcdl /=. rewrite andbC dvdn_mull ?dvdn_gcdr //= -(@Gauss_dvdr _ m) ?dvdn_gcdr //. by rewrite /coprime gcdnAC (eqnP co_pm) gcd1n. Qed. Lemma Gauss_gcdl p m n : coprime p n -> gcdn p (m * n) = gcdn p m. Proof. by move=> co_pn; rewrite mulnC Gauss_gcdr. Qed. Lemma coprime_mulr p m n : coprime p (m * n) = coprime p m && coprime p n. Proof. case co_pm: (coprime p m) => /=; first by rewrite /coprime Gauss_gcdr. apply/eqP=> co_p_mn; case/eqnP: co_pm; apply gcdn_def => // d dv_dp dv_dm. by rewrite -co_p_mn dvdn_gcd dv_dp dvdn_mulr. Qed. Lemma coprime_mull p m n : coprime (m * n) p = coprime m p && coprime n p. Proof. by rewrite -!(coprime_sym p) coprime_mulr. Qed. Lemma coprime_pexpl k m n : 0 < k -> coprime (m ^ k) n = coprime m n. Proof. case: k => // k _; elim: k => [|k IHk]; first by rewrite expn1. by rewrite expnS coprime_mull -IHk; case coprime. Qed. Lemma coprime_pexpr k m n : 0 < k -> coprime m (n ^ k) = coprime m n. Proof. by move=> k_gt0; rewrite !(coprime_sym m) coprime_pexpl. Qed. Lemma coprime_expl k m n : coprime m n -> coprime (m ^ k) n. Proof. by case: k => [|k] co_pm; rewrite ?coprime1n // coprime_pexpl. Qed. Lemma coprime_expr k m n : coprime m n -> coprime m (n ^ k). Proof. by rewrite !(coprime_sym m); apply: coprime_expl. Qed. Lemma coprime_dvdl m n p : m %| n -> coprime n p -> coprime m p. Proof. by case/dvdnP=> d ->; rewrite coprime_mull => /andP[]. Qed. Lemma coprime_dvdr m n p : m %| n -> coprime p n -> coprime p m. Proof. by rewrite !(coprime_sym p); apply: coprime_dvdl. Qed. Lemma coprime_egcdn n m : n > 0 -> coprime (egcdn n m).1 (egcdn n m).2. Proof. move=> n_gt0; case: (egcdnP m n_gt0) => kn km /= /eqP. have [/dvdnP[u defn] /dvdnP[v defm]] := (dvdn_gcdl n m, dvdn_gcdr n m). rewrite -[gcdn n m]mul1n {1}defm {1}defn !mulnA -mulnDl addnC. rewrite eqn_pmul2r ?gcdn_gt0 ?n_gt0 //; case: kn => // kn /eqP def_knu _. by apply/coprimeP=> //; exists (u, v); rewrite mulnC def_knu mulnC addnK. Qed. Lemma dvdn_pexp2r m n k : k > 0 -> (m ^ k %| n ^ k) = (m %| n). Proof. move=> k_gt0; apply/idP/idP=> [dv_mn_k|]; last exact: dvdn_exp2r. case: (posnP n) => [-> | n_gt0]; first by rewrite dvdn0. have [n' def_n] := dvdnP (dvdn_gcdr m n); set d := gcdn m n in def_n. have [m' def_m] := dvdnP (dvdn_gcdl m n); rewrite -/d in def_m. have d_gt0: d > 0 by rewrite gcdn_gt0 n_gt0 orbT. rewrite def_m def_n !expnMn dvdn_pmul2r ?expn_gt0 ?d_gt0 // in dv_mn_k. have: coprime (m' ^ k) (n' ^ k). rewrite coprime_pexpl // coprime_pexpr // /coprime -(eqn_pmul2r d_gt0) mul1n. by rewrite muln_gcdl -def_m -def_n. rewrite /coprime -gcdn_modr (eqnP dv_mn_k) gcdn0 -(exp1n k). by rewrite (inj_eq (expIn k_gt0)) def_m; move/eqP->; rewrite mul1n dvdn_gcdr. Qed. Section Chinese. (***********************************************************************) (* The chinese remainder theorem *) (***********************************************************************) Variables m1 m2 : nat. Hypothesis co_m12 : coprime m1 m2. Lemma chinese_remainder x y : (x == y %[mod m1 * m2]) = (x == y %[mod m1]) && (x == y %[mod m2]). Proof. wlog le_yx : x y / y <= x; last by rewrite !eqn_mod_dvd // Gauss_dvd. by case/orP: (leq_total y x); last rewrite !(eq_sym (x %% _)); auto. Qed. (***********************************************************************) (* A function that solves the chinese remainder problem *) (***********************************************************************) Definition chinese r1 r2 := r1 * m2 * (egcdn m2 m1).1 + r2 * m1 * (egcdn m1 m2).1. Lemma chinese_modl r1 r2 : chinese r1 r2 = r1 %[mod m1]. Proof. rewrite /chinese; case: (posnP m2) co_m12 => [-> /eqnP | m2_gt0 _]. by rewrite gcdn0 => ->; rewrite !modn1. case: egcdnP => // k2 k1 def_m1 _. rewrite mulnAC -mulnA def_m1 gcdnC (eqnP co_m12) mulnDr mulnA muln1. by rewrite addnAC (mulnAC _ m1) -mulnDl modnMDl. Qed. Lemma chinese_modr r1 r2 : chinese r1 r2 = r2 %[mod m2]. Proof. rewrite /chinese; case: (posnP m1) co_m12 => [-> /eqnP | m1_gt0 _]. by rewrite gcd0n => ->; rewrite !modn1. case: (egcdnP m2) => // k1 k2 def_m2 _. rewrite addnC mulnAC -mulnA def_m2 (eqnP co_m12) mulnDr mulnA muln1. by rewrite addnAC (mulnAC _ m2) -mulnDl modnMDl. Qed. Lemma chinese_mod x : x = chinese (x %% m1) (x %% m2) %[mod m1 * m2]. Proof. apply/eqP; rewrite chinese_remainder //. by rewrite chinese_modl chinese_modr !modn_mod !eqxx. Qed. End Chinese.
```python from IPython.display import Image from IPython.core.display import HTML from sympy import *; x,h,t,y,z,a,b,c = symbols("x h t y z a b c", real=True) Image(url= "https://i.imgur.com/Szz3QgV.png") ``` ```python f = 4*sin(x) + 2*cos(y) -4*sin(x)*cos(y)+x - 6*pi f ``` $\displaystyle x - 4 \sin{\left(x \right)} \cos{\left(y \right)} + 4 \sin{\left(x \right)} + 2 \cos{\left(y \right)} - 6 \pi$ ```python solve(f,y) ``` [-acos((x + 4*sin(x) - 6*pi)/(2*(2*sin(x) - 1))) + 2*pi, acos((x + 4*sin(x) - 6*pi)/(2*(2*sin(x) - 1)))] ```python idiff(f,y,x) ``` $\displaystyle \frac{4 \cos{\left(x \right)} \cos{\left(y \right)} - 4 \cos{\left(x \right)} - 1}{2 \left(2 \sin{\left(x \right)} - 1\right) \sin{\left(y \right)}}$ ```python idiff(f,y,x).subs(x,6*pi).subs(y,(7*pi)/2) ``` $\displaystyle - \frac{5}{2}$ ```python Image(url= "https://i.imgur.com/BZrIT5A.png") ``` ```python ```
function prediction = AlexNet(varargin) %AlexNet Returns an AlexNet model for ImageNet % M = models.AlexNet() returns the model proposed in: % % Krizhevsky et al., "ImageNet classification with deep convolutional % neural networks", NIPS 2012. % % models.AlexNet(..., 'option', value, ...) accepts the following % options: % % `pretrained`:: false % If true, returns a model pre-trained on ImageNet (using the % MatConvNet example code). % % `input`:: default input % Specifies an input (images) layer for the network. If unspecified, a % new one is created. % % `numClasses`:: 1000 % Number of output classes. % % `batchNorm`:: true % Whether to use batch normalization. % % `normalization`:: [5 1 0.0001/5 0.75] % Parameters for vl_nnnormalize layer (only used without batch-norm). % % Any other options will be passed to models.ConvBlock(), and can be used % to change the activation function, weight initialization, etc. % % Suggested SGD training options are also returned in the struct M.meta. % Copyright (C) 2018 Joao F. Henriques, Andrea Vedaldi. % All rights reserved. % % This file is part of the VLFeat library and is made available under % the terms of the BSD license (see the COPYING file). % parse options. unknown arguments will be passed to ConvBlock (e.g. % activation). opts.pretrained = false ; % whether to fetch a pre-trained model opts.input = Input('name', 'images', 'gpu', true) ; % default input layer opts.numClasses = 1000 ; % number of predicted classes opts.batchNorm = true ; % whether to use batch normalization opts.normalization = [5 1 0.0001/5 0.75] ; % for LRN layer (vl_nnnormalize) [opts, convBlockArgs] = vl_argparse(opts, varargin, 'nonrecursive') ; % default training options for this network (returned as output.meta) meta.batchSize = 256 ; meta.imageSize = [227, 227, 3] ; meta.augmentation.crop = 227 / 256; meta.augmentation.location = true ; meta.augmentation.flip = true ; meta.augmentation.brightness = 0.1 ; meta.augmentation.aspect = [2/3, 3/2] ; meta.weightDecay = 0.0005 ; % the default learning rate schedule if ~opts.pretrained if ~opts.batchNorm meta.learningRate = logspace(-2, -4, 60) ; else meta.learningRate = logspace(-1, -4, 20) ; end meta.numEpochs = numel(meta.learningRate) ; else % fine-tuning has lower LR meta.learningRate = 1e-5 ; meta.numEpochs = 20 ; end % return a pre-trained model if opts.pretrained if opts.batchNorm warning('The pre-trained model does not include batch-norm (set batchNorm to false).') ; end if opts.numClasses ~= 1000 warning('Model options are ignored when loading a pre-trained model.') ; end prediction = models.pretrained('imagenet-matconvnet-alex') ; % return prediction layer (not softmax) assert(isequal(prediction{1}.func, @vl_nnsoftmax)) ; prediction = prediction{1}.inputs{1} ; % replace input layer with the given one input = prediction.find('Input', 1) ; prediction.replace(input, opts.input) ; prediction.meta = meta ; return end % get conv block generator with the given options. default activation is % ReLU, with pre-activation batch normalization (can be overriden). conv = models.ConvBlock('batchNorm', opts.batchNorm, ... 'preActivationBatchNorm', true, 'weightScale', 0.01, convBlockArgs{:}) ; % build network images = opts.input ; % first conv block x = conv(images, 'size', [11, 11, 3, 96], 'stride', 4) ; if ~opts.batchNorm x = vl_nnnormalize(x, opts.normalization) ; end x = vl_nnpool(x, 3, 'stride', 2) ; % second conv block x = conv(x, 'size', [5, 5, 48, 256], 'pad', 2) ; if ~opts.batchNorm x = vl_nnnormalize(x, opts.normalization) ; end x = vl_nnpool(x, 3, 'stride', 2) ; % conv blocks 3-5 x = conv(x, 'size', [3, 3, 256, 384], 'pad', 1) ; x = conv(x, 'size', [3, 3, 192, 384], 'pad', 1) ; x = conv(x, 'size', [3, 3, 192, 256], 'pad', 1) ; x = vl_nnpool(x, 3, 'stride', 2) ; % first fully-connected block x = conv(x, 'size', [6, 6, 256, 4096]) ; if ~opts.batchNorm x = vl_nndropout(x) ; end % second fully-connected block x = conv(x, 'size', [1, 1, 4096, 4096]) ; if ~opts.batchNorm x = vl_nndropout(x) ; end % prediction layer prediction = conv(x, 'size', [1, 1, 4096, opts.numClasses], ... 'batchNorm', false, 'activation', 'none') ; prediction.meta = meta ; end
/- LoVe Exercise 4: Functional Programming -/ import .love04_functional_programming_demo namespace LoVe /- Question 1: Reverse of a List -/ /- We define a new accumulator-based version of `reverse`. The first argument serves as the accumulator. This definition is _tail-recursive_, meaning that compilers and interpreters can easily optimize the recursion away, resulting in more efficient code. -/ def areverse {α : Type} : list α → list α → list α | ys [] := ys | ys (x :: xs) := areverse (x :: ys) xs /- 1.1. Our intention is that `areverse [] xs` should be equal to `reverse xs`. But if we start an induction, we quickly see that the induction hypothesis is not strong enough. Start by proving the following generalization (using pattern matching or the `induction` tactic): -/ lemma areverse_eq_reverse_append {α : Type} : ∀ys xs : list α, areverse ys xs = reverse xs ++ ys | ys [] := by refl | ys (x :: xs) := by simp [reverse, areverse, areverse_eq_reverse_append _ xs] /- 1.2. Derive the desired equation. -/ lemma areverse_eq_reverse {α : Type} (xs : list α) : areverse [] xs = reverse xs := by simp [areverse_eq_reverse_append] /- 1.3. Prove the following property. Hint: A one-line inductionless proof is possible. -/ lemma areverse_areverse {α : Type} (xs : list α) : areverse [] (areverse [] xs) = xs := by simp [areverse_eq_reverse, reverse_reverse] /- Question 2: Drop and Take -/ /- The `drop` function removes the first `n` elements from the front of a list. -/ def drop {α : Type} : ℕ → list α → list α | 0 xs := xs | (_ + 1) [] := [] | (m + 1) (x :: xs) := drop m xs /- Its relative `take` returns a list consisting of the the first `n` elements at the front of a list. -/ /- 2.1. Define `take`. -/ /- To avoid unpleasant surprises in the proofs, we recommend that you follow the same recursion pattern as for `drop` above. -/ def take {α : Type} : ℕ → list α → list α | 0 _ := [] | (_ + 1) [] := [] | (m + 1) (x :: xs) := x :: take m xs #reduce take 0 [3, 7, 11] -- expected: [] #reduce take 1 [3, 7, 11] -- expected: [3] #reduce take 2 [3, 7, 11] -- expected: [3, 7] #reduce take 3 [3, 7, 11] -- expected: [3, 7, 11] #reduce take 4 [3, 7, 11] -- expected: [3, 7, 11] -- when `#reduce` fails for some obscure reason, try `#eval`: #eval take 2 ["a", "b", "c"] -- expected: ["a", "b"] /- 2.2. Prove the following lemmas. Notice that they are registered as simplification rules thanks to the `@[simp]` attribute. -/ @[simp] lemma drop_nil {α : Type} : ∀n : ℕ, drop n ([] : list α) = [] | 0 := by refl | (_ + 1) := by refl @[simp] lemma take_nil {α : Type} : ∀n : ℕ, take n ([] : list α) = [] | 0 := by refl | (_ + 1) := by refl /- 2.3. Follow the recursion pattern of `drop` and `take` to prove the following lemmas. In other words, for each lemma, there should be three cases, and the third case will need to invoke the induction hypothesis. The first case is shown for `drop_drop`. Beware of the fact that there are three variables in the `drop_drop` lemma (but only two arguments to `drop`). Hint: The `refl` tactic might be useful in the third case of `drop_drop`. -/ lemma drop_drop {α : Type} : ∀(m n : ℕ) (xs : list α), drop n (drop m xs) = drop (n + m) xs | 0 n xs := by refl | (_ + 1) _ [] := by simp [drop] | (m + 1) n (x :: xs) := begin simp [drop, drop_drop m n xs], refl end lemma take_take {α : Type} : ∀(m : ℕ) (xs : list α), take m (take m xs) = take m xs | 0 _ := by refl | (_ + 1) [] := by refl | (m + 1) (x :: xs) := by simp [take, take_take m xs] lemma take_drop {α : Type} : ∀(n : ℕ) (xs : list α), take n xs ++ drop n xs = xs | 0 _ := by refl | (_ + 1) [] := by refl | (m + 1) (x :: xs) := by simp [take, drop, take_drop m] /- Question 3: λ-Terms -/ /- 3.1. Define an inductive type corresponding to the untyled λ-terms, as given by the following context-free grammar: <lam> ::= 'var' <string> | 'abs' <string> <lam> | 'app' <lam> <lam> -/ inductive lam : Type | var : string → lam | abs : string → lam → lam | app : lam → lam → lam export lam (var abs app) /- 3.2. Register a textual representation of the type `lam`. Make sure to supply enough parentheses to guarantee that the output is unambiguous. -/ def lam.repr : lam → string | (var s) := s | (abs s t) := "(λ" ++ s ++ ", " ++ lam.repr t ++ ")" | (app t u) := "(" ++ lam.repr t ++ " " ++ lam.repr u ++ ")" instance : has_repr lam := ⟨lam.repr⟩ /- Question 4 (**optional**): Concatenation -/ /- Consider the following Lean definition of 2–3 trees as an inductive type: -/ inductive tttree (α : Type) : Type | empty {} : tttree | bin : α → tttree → tttree → tttree | ter : α → tttree → tttree → tttree → tttree export tttree (empty bin ter) /- 4.1 (**optional**). Complete the following Lean definition. The `map_tree` function should apply its argument `f` to all values of type α stored in the tree and otherwise preserve the tree's structure. -/ def map_tttree {α β : Type} (f : α → β) : tttree α → tttree β | empty := empty | (bin a l r) := bin (f a) (map_tttree l) (map_tttree r) | (ter a l m r) := ter (f a) (map_tttree l) (map_tttree m) (map_tttree r) /- 4.2 (**optional**). Prove the following lemma about your definition of `map_tree`. -/ lemma map_tttree_id {α : Type} : ∀t : tttree α, map_tttree (λx : α, x) t = t | empty := by refl | (bin a l r) := by simp [map_tttree, map_tttree_id l, map_tttree_id r] | (ter a l m r) := by simp [map_tttree, map_tttree_id l, map_tttree_id m, map_tttree_id r] /- 4.3 (**optional**). Complete the following Lean definition. The `set_tree` function should return the set of all values of type α stored in the tree. In your answer, you may use traditional set notations regardless of whether they are actually supported by Lean. -/ def set_tttree {α : Type} : tttree α → set α | empty := ∅ | (bin a l r) := insert a (set_tttree l ∪ set_tttree r) | (ter a l m r) := insert a (set_tttree l ∪ set_tttree m ∪ set_tttree r) /- A _congruence rule_ is a lemma that can be used to lift an equivalence relation between terms to the same terms occurring under a common context. Congruence rules for equality are built into Lean's logic. In the following example, the equivalence relation is `=`, the terms are `f` and `g`, and the context is `map_tree … t`: -/ lemma map_tttree_congr_weak {α β : Type} (f g : α → β) (f = g) (t : tttree α) : map_tttree f t = map_tttree g t := by simp * /- 4.4 (**optional**). The above rule is not as flexible as it could be, because it requires `f = g`. As long as `f` and `g` are equal for all values `x : α` stored in `t`, we have `map_tree f t = map_tree g t`, even if `f` and `g` disagree on other `α` values. Inspired by this observation, prove the following stronger congruence rule. -/ lemma map_tttree_congr_strong {α β : Type} (f g : α → β) : ∀t : tttree α, (∀x, x ∈ set_tttree t → f x = g x) → map_tttree f t = map_tttree g t | empty := by intros; refl | (bin a l r) := begin intros, simp [map_tttree], apply and.intro, { apply a_1, simp [set_tttree] }, apply and.intro, { apply map_tttree_congr_strong, intros, apply a_1, simp [set_tttree], cc }, { apply map_tttree_congr_strong, intros, apply a_1, simp [set_tttree], cc }, end | (ter a l m r) := begin intros, simp [map_tttree], apply and.intro, { apply a_1, simp [set_tttree] }, apply and.intro, { apply map_tttree_congr_strong, intros, apply a_1, simp [set_tttree], cc }, apply and.intro, { apply map_tttree_congr_strong, intros, apply a_1, simp [set_tttree], cc }, { apply map_tttree_congr_strong, intros, apply a_1, simp [set_tttree], cc } end end LoVe
module Operations using ..Types using ..Universes: Element, AreaElements export check, Fraction, exists, not_exists, new function check end exists(element::Element) = convert(Bool, element) exists(elements::AreaElements) = convert(Bool, elements) not_exists(element::Element) = !exists(element) not_exists(elements::AreaElements) = !exists(elements) function Base.count(elements::AreaElements) counter = 0 for element in elements if isenabled(element) counter += 1 end end return counter end struct Fraction <: Checkable border::Float32 end function check(element::Element, parameters::Fraction) return rand(Float32) < parameters.border end function (checker::Checkable)(element::E) where {E<:Element} if check(element, checker) return element end return disable(element) end function (checker::Checkable)(elements::Vector{E}) where {E<:Element} # TODO: replace with boolean template assigment for (i, e) in enumerate(elements) if isenabled(e) && !check(e, checker) elements[i] = disable(elements[i]) end end return elements end function new(elements::AreaElements) for (i, element) in enumerate(elements) if isenabled(element) elements[i] = construct_new_element(typeof(element), element.universe, element.topology_index) end end return elements end function new(element::Element) if isenabled(element) return construct_new_element(typeof(element), element.universe, element.topology_index) end return element end end
From iris.algebra Require Import excl auth cmra gmap agree gset numbers. From iris.algebra.lib Require Import frac_agree. From iris.heap_lang Require Export notation locations lang. From iris.base_logic.lib Require Export invariants. From iris.program_logic Require Export atomic. From iris.proofmode Require Import tactics. From iris.heap_lang Require Import proofmode par. From iris.bi.lib Require Import fractional. Set Default Proof Using "All". Require Export lock multicopy multicopy_util. Parameter inContents : val. Parameter findNext : val. Parameter addContents: val. Parameter atCapacity: val. Parameter chooseNext: val. Parameter mergeContents: val. Parameter allocNode: val. Parameter insertNode: val. (** Template algorithms *) Definition traverse : val := rec: "t_rec" "n" "k" := lockNode "n" ;; match: (inContents "n" "k") with SOME "v" => unlockNode "n";; "v" | NONE => match: (findNext "n" "k") with SOME "n'" => unlockNode "n" ;; "t_rec" "n'" "k" | NONE => unlockNode "n" ;; #bot end end. Definition search (r: Node) : val := λ: "k", traverse #r "k". Definition upsert (r: Node) : val := rec: "upsert_rec" "k" "v" := lockNode #r ;; let: "res" := addContents #r "k" "v" in if: "res" then unlockNode #r else unlockNode #r;; "upsert_rec" "k" "v". Definition compact (r: Node) : val := rec: "compact_rec" "n" := lockNode "n" ;; if: atCapacity "n" then match: (chooseNext "n") with SOME "m" => lockNode "m" ;; mergeContents "n" "m" ;; unlockNode "n" ;; unlockNode "m" ;; "compact_rec" "m" | NONE => let: "m" := allocNode #() in insertNode #r "n" "m";; mergeContents "n" "m" ;; unlockNode "n" ;; unlockNode "m";; "compact_rec" "m" end else unlockNode "n". (** Proof setup **) Definition esT : Type := gmap Node (gset K). Canonical Structure esRAC := leibnizO esT. (* RAs used in proof *) Definition prod4O A B C D := prodO (prodO (prodO A B) C) D. Definition per_node_gl := agreeR (prod4O gnameO gnameO gnameO (gmapO K gnameO)). Definition ghost_heapUR := gmapUR Node $ per_node_gl. Definition KT: Type := K*T. Definition flow_KTR := authR (multiset_flowint_ur KT). Definition flow_KR := authR (multiset_flowint_ur K). Definition set_nodeR := authR (gsetUR Node). Definition frac_contR := frac_agreeR (gmapUR K natUR). Definition frac_esR := frac_agreeR (esRAC). Definition timeR := authR (max_natUR). Definition ghR := authR $ ghost_heapUR. Class multicopy_lsmG Σ := MULTICOPY_LSM { multicopy_lsm_flow_KTG :> inG Σ flow_KTR; multicopy_lsm_flow_KG :> inG Σ flow_KR; multicopy_lsm_set_nodeG :> inG Σ set_nodeR; multicopy_lsm_frac_contG :> inG Σ frac_contR; multicopy_lsm_frac_esG :> inG Σ frac_esR; multicopy_lsm_timeG :> inG Σ timeR; multicopy_lsm_ghG :> inG Σ ghR; }. Definition multicopy_lsmΣ : gFunctors := #[GFunctor flow_KTR; GFunctor flow_KR; GFunctor set_nodeR; GFunctor frac_contR; GFunctor frac_esR; GFunctor timeR; GFunctor ghR ]. Instance subG_multicopy_lsmΣ {Σ} : subG multicopy_lsmΣ Σ → multicopy_lsmG Σ. Proof. solve_inG. Qed. Section multicopy_lsm. Context {Σ} `{!heapG Σ, !multicopyG Σ, !multicopy_lsmG Σ}. Notation iProp := (iProp Σ). Local Notation "m !1 i" := (nzmap_total_lookup i m) (at level 20). (** Assumptions on the implementation made by the template algorithms. *) (* The node predicate is specific to each template implementation. See GRASShopper files multicopy-lsm.spl for the concrete definition. *) Parameter node : Node → Node → esT → (gmap K V) → iProp. Parameter nodeSpatial : Node → iProp. Parameter needsNewNode : Node → Node → esT → (gmap K V) → iProp. (* The following assumption is justified by the fact that GRASShopper uses a * first-order separation logic. *) Parameter node_timeless_proof : ∀ r n es V, Timeless (node r n es V). Global Instance node_timeless r n es V: Timeless (node r n es V). Proof. apply node_timeless_proof. Qed. (* The following hypothesis are proved as a GRASShopper lemma in * multicopy-lsm.spl *) Parameter node_sep_star: ∀ r n es V es' V', node r n es V ∗ node r n es' V' -∗ False. Parameter node_es_disjoint: ∀ r n es V, node r n es V -∗ ⌜∀ n1 n2, n1 ≠ n2 → es !!! n1 ∩ es !!! n2 = ∅⌝. Parameter node_es_empty: ∀ r n es V, node r n es V -∗ ⌜es !!! r = ∅ ∧ es !!! n = ∅⌝. (** The LSM multicopy structure invariant *) Definition inFP γ_f (n: Node) : iProp := own γ_f (◯ {[n]}). Definition closed γ_f (es: esT) : iProp := ∀ n, ⌜es !!! n ≠ ∅⌝ → inFP γ_f n. Definition inflow_zero (I: multiset_flowint_ur KT) := ∀ n, inset KT I n = ∅. Definition outflow_zero (I: multiset_flowint_ur KT) := out_map I = ∅. Definition outflow_zero_J (I: multiset_flowint_ur K) := out_map I = ∅. Definition inflow_J (J: multiset_flowint_ur K) r := ∀ n k, k ∈ KS → if decide (n = r) then in_inset K k J n else ¬ in_inset K k J n. Definition outflow_le_1 (I: multiset_flowint_ur KT) := ∀ n kt, out I n !1 kt ≤ 1. Definition outflow_constraint_I (In: multiset_flowint_ur KT) (es: esT) (Qn: gmap K T) := ∀ n' k t, k ∈ KS → ((k,t) ∈ outset KT In n' ↔ k ∈ es !!! n' ∧ (Qn !! k = Some t)). Definition outflow_constraint_J (Jn: multiset_flowint_ur K) (es: esT) n := ∀ n' k, k ∈ KS → (k ∈ outset K Jn n' ↔ k ∈ es !!! n' ∧ k ∈ inset K Jn n). (* This constraint is implicit in the paper. We track B_n explicitly as ghost state here. That is the following captures the definition of B_n in terms of C_n/Q_n given in the paper. *) Definition contents_in_reach (Bn Tn Qn: gmap K T) := ∀ k t, k ∈ KS → ((Tn !! k = Some t → Bn !! k = Some t) ∧ (Tn !! k = None → Bn !! k = Qn !! k)). (** ϕ_1 in the paper *) Definition φ1 (es: esT) (Qn: gmap K T) := ∀ k, k ∈ KS → ((∀ n', k ∉ es !!! n') → Qn !! k = None). (** ϕ_2 in the paper *) Definition φ2 n (Bn: gmap K T) In := ∀ k t, k ∈ KS → ((k,t) ∈ inset KT In n → Bn !!! k = t). (** ϕ_3 in the paper *) Definition φ3 (Bn Qn: gmap K T) := ∀ k, k ∈ KS → (Qn !!! k ≤ Bn !!! k). (** ϕ_4 in the paper *) Definition φ4 n (Bn: gmap K T) Jn := ∀ k, k ∈ KS → (Bn !! k = None ∨ k ∈ inset K Jn n). (** ϕ_5 in the paper *) Definition φ5 n (Jn: multiset_flowint_ur K) := ∀ k, inf Jn n !1 k ≤ 1. (** The following two constraints are inductive consequences of ϕ1..ϕ5. We track them explicitly in the invariant for convenience. *) Definition φ6 n (es: esT) (Jn: multiset_flowint_ur K) (Qn: gmap K T) := ∀ k, k ∈ KS → ((∃ n', k ∈ es !!! n') ∧ k ∈ inset K Jn n → k ∈ dom (gset K) Qn). Definition φ7 (n: Node) (In: multiset_flowint_ur KT) := ∀ kt, inf In n !1 kt ≤ 1. Definition f_mergeLeft {A} (K1: gset K) (Vn: gmap K A) (Es: gset K) (Vm: gmap K A) := λ k (o1: option A) (o2: option A), if (decide (k ∈ (KS ∩ K1 ∩ dom (gset K) Vn ∩ Es))) then (None: option A) else o1. Global Instance f_mergeLeft_diag_none {A} (K1: gset K) (Vn: gmap K A) (Es: gset K) (Vm: gmap K A) k: DiagNone (f_mergeLeft K1 Vn Es Vm k). Proof. unfold DiagNone. unfold f_mergeLeft. destruct (decide (k ∈ ((KS ∩ K1) ∩ dom (gset K) Vn) ∩ Es)); try by simpl. Qed. Definition mergeLeft {A} (K1: gset K) (Vn: gmap K A) (Es: gset K) (Vm: gmap K A) : gmap K A := gmap_imerge (f_mergeLeft K1 Vn Es Vm) Vn Vm. Definition f_mergeRight {A} (K1: gset K) (Vn: gmap K A) (Es: gset K) (Vm: gmap K A) := λ k (o1: option A) (o2: option A), if (decide (k ∈ (KS ∩ K1 ∩ dom (gset K) Vn ∩ Es))) then o1 else o2. Global Instance f_mergeRight_diag_none {A} (K1: gset K) (Vn: gmap K A) (Es: gset K) (Vm: gmap K A) k: DiagNone (f_mergeRight K1 Vn Es Vm k). Proof. unfold DiagNone. unfold f_mergeRight. destruct (decide (k ∈ ((KS ∩ K1) ∩ dom (gset K) Vn) ∩ Es)); try by simpl. Qed. Definition mergeRight {A} (K1: gset K) (Vn: gmap K A) (Es: gset K) (Vm: gmap K A) : gmap K A := gmap_imerge (f_mergeRight K1 Vn Es Vm) Vn Vm. Definition frac_ghost_state γ_en γ_cn γ_qn (es: esT) (Tn Qn: gmap K T): iProp := own (γ_en) (to_frac_agree (1/2) (es)) ∗ own (γ_cn) (to_frac_agree (1/2) (Tn)) ∗ own (γ_qn) (to_frac_agree (1/2) (Qn)). Definition singleton_interfaces_ghost_state (γ_I γ_J: gname) (n: Node) (In: multiset_flowint_ur KT) (Jn: multiset_flowint_ur K) : iProp := own γ_I (◯ In) ∗ own γ_J (◯ Jn) ∗ ⌜domm In = {[n]}⌝ ∗ ⌜domm Jn = {[n]}⌝. Definition outflow_constraints n In Jn es Qn : iProp := ⌜outflow_constraint_I In es Qn⌝ ∗ ⌜outflow_constraint_J Jn es n⌝ ∗ ⌜outflow_le_1 In⌝. Definition ghost_loc γ_en γ_cn γ_qn (γ_cirn: gmap K gnameO) : per_node_gl := to_agree (γ_en, γ_cn, γ_qn, γ_cirn). Definition contents_proj (Cn: gmap K (V*T)) (Vn: gmap K V) (Tn: gmap K T) : iProp := ⌜dom (gset K) Cn = dom (gset K) Vn⌝ ∗ ⌜dom (gset K) Cn = dom (gset K) Tn⌝ ∗ ⌜∀ k v t, Cn !! k = Some (v, t) ↔ Vn !! k = Some v ∧ Tn !! k = Some t⌝. Definition nodePred' γ_gh γ_s r n (Cn: gmap K (V*T)) (Vn: gmap K V) (Tn Qn: gmap K T) γ_en γ_cn γ_qn γ_cirn es: iProp := node r n es Vn ∗ own γ_gh (◯ {[n := ghost_loc γ_en γ_cn γ_qn γ_cirn]}) ∗ frac_ghost_state γ_en γ_cn γ_qn es Tn Qn ∗ own γ_s (◯ set_of_map Cn) ∗ contents_proj Cn Vn Tn. (** Predicate N_L in the paper *) Definition nodePred γ_gh γ_s r n (Cn: gmap K (V*T)) (Qn: gmap K T) : iProp := ∃ γ_en γ_cn γ_qn γ_cirn es Vn Tn, nodePred' γ_gh γ_s r n Cn Vn Tn Qn γ_en γ_cn γ_qn γ_cirn es. Definition nodeShared' (γ_I γ_J γ_f: gname) γ_gh r n (Tn Qn Bn: gmap K T) H γ_en γ_cn γ_qn γ_cirn es In Jn: iProp := own γ_gh (◯ {[n := ghost_loc γ_en γ_cn γ_qn γ_cirn]}) ∗ frac_ghost_state γ_en γ_cn γ_qn es Tn Qn ∗ singleton_interfaces_ghost_state γ_I γ_J n In Jn ∗ inFP γ_f n ∗ closed γ_f es ∗ outflow_constraints n In Jn es Qn ∗ ⌜contents_in_reach Bn Tn Qn⌝ ∗ (if decide (n = r) then ⌜∀ k, Bn !!! k = ((map_of_set H) !!! k).2⌝ ∗ ⌜inflow_zero In⌝ else True)%I ∗ ([∗ set] k ∈ KS, own (γ_cirn !!! (k)) (● (MaxNat (Bn !!! k)))) ∗ ⌜φ1 es Qn⌝ ∗ ⌜φ2 n Bn In⌝ ∗ ⌜φ3 Bn Qn⌝ ∗ ⌜φ4 n Bn Jn⌝ ∗ ⌜φ5 n Jn⌝ ∗ ⌜φ6 n es Jn Qn⌝ ∗ ⌜φ7 n In⌝. (** Predicate N_S in the paper *) Definition nodeShared (γ_I γ_J γ_f: gname) γ_gh r n (Qn : gmap K T) H: iProp := ∃ γ_en γ_cn γ_qn γ_cirn es Tn Bn In Jn, nodeShared' γ_I γ_J γ_f γ_gh r n Tn Qn Bn H γ_en γ_cn γ_qn γ_cirn es In Jn. (** Predicate G in the paper *) Definition global_state (γ_I γ_J γ_f γ_gh: gname) (r: Node) (hγ: gmap Node per_node_gl) (I: multiset_flowint_ur KT) (J: multiset_flowint_ur K) : iProp := own γ_I (● I) ∗ ⌜outflow_zero I⌝ ∗ own γ_J (● J) ∗ ⌜outflow_zero_J J⌝ ∗ ⌜inflow_J J r⌝ ∗ own γ_f (● domm I) ∗ own γ_gh (● hγ) ∗ inFP γ_f r ∗ ⌜domm I = domm J⌝ ∗ ⌜domm I = dom (gset Node) hγ⌝. Definition Inv_LSM γ_s γ_I γ_J γ_f γ_gh r H : iProp := ∃ hγ (I: multiset_flowint_ur KT) (J: multiset_flowint_ur K), global_state γ_I γ_J γ_f γ_gh r hγ I J ∗ ([∗ set] n ∈ (domm I), ∃ (bn: bool) Cn Qn, (lockR bn n (nodePred γ_gh γ_s r n Cn Qn)) ∗ (nodeShared γ_I γ_J γ_f γ_gh r n Qn H))%I. Global Instance Inv_LSM_timeless γ_s γ_I γ_J γ_f γ_gh r H: Timeless (Inv_LSM γ_s γ_I γ_J γ_f γ_gh r H). Proof. rewrite /Inv_LSM. repeat (apply bi.exist_timeless; intros). repeat apply bi.sep_timeless; try apply _. apply big_sepS_timeless. repeat (intros; apply bi.exist_timeless; intros). apply bi.sep_timeless; try apply _. destruct x3; try apply _. repeat apply bi.sep_timeless; try apply _. repeat (apply bi.exist_timeless; intros). repeat apply bi.sep_timeless; try apply _. destruct (decide (x2 = r)); try apply _. Qed. (** Helper functions specs *) (* The following specs are proved for each implementation in GRASShopper * (see multicopy-lsm.spl *) Parameter inContents_spec : ∀ r n esn (Vn: gmap K V) (k: K), ⊢ ({{{ node r n esn Vn }}} inContents #n #k {{{ (v: option V), RET (match v with Some v => SOMEV #v | None => NONEV end); node r n esn Vn ∗ ⌜Vn !! k = v⌝ }}})%I. Parameter findNext_spec : ∀ r n esn (Vn: gmap K V) (k: K), ⊢ ({{{ node r n esn Vn }}} findNext #n #k {{{ (n': option Node), RET (match n' with Some n' => SOMEV #n' | None => NONEV end); node r n esn Vn ∗ (match n' with Some n' => ⌜k ∈ esn !!! n'⌝ | None => ⌜∀ n'', k ∉ esn !!! n''⌝ end) }}})%I. Parameter addContents_spec : ∀ r n esn (Vn: gmap K V) (k: K) (v: V), ⊢ ({{{ node r n esn Vn ∗ ⌜n = r⌝ }}} addContents #r #k #v {{{ (succ: bool) (Vn': gmap K V), RET #succ; node r n esn Vn' ∗ if succ then ⌜Vn' = <[k := v]> Vn⌝ else ⌜Vn' = Vn⌝ }}})%I. Parameter atCapacity_spec : ∀ r n esn (Vn: gmap K V), ⊢ ({{{ node r n esn Vn }}} atCapacity #n {{{ (b: bool), RET #b; node r n esn Vn }}})%I. Parameter chooseNext_spec : ∀ r n esn (Vn: gmap K V), ⊢ ({{{ node r n esn Vn }}} chooseNext #n {{{ (n1: option Node), RET (match n1 with Some n1 => SOMEV #n1 | None => NONEV end); node r n esn Vn ∗ (match n1 with Some n1 => ⌜esn !!! n1 ≠ ∅⌝ | None => needsNewNode r n esn Vn end) }}})%I. Parameter allocNode_spec : ⊢ ({{{ True }}} allocNode #() {{{ (m: Node) (l:loc), RET #m; nodeSpatial m ∗ ⌜lockLoc m = l⌝ ∗ l ↦ #true }}})%I. Parameter insertNode_spec : ∀ r n m esn Vn, ⊢ {{{ node r n esn Vn ∗ needsNewNode r n esn Vn ∗ nodeSpatial m ∗ ⌜m ≠ r⌝ }}} insertNode #r #n #m {{{ esn' esm Vm, RET #(); node r n esn' Vn ∗ node r m esm Vm ∗ ⌜esn' = <[m:=esn' !!! m]> esn⌝ ∗ ⌜esn' !!! m ≠ ∅⌝ ∗ ⌜Vm = ∅⌝ ∗ ⌜esm = ∅⌝ }}}. Parameter mergeContents_spec : ∀ r n m esn esm (Vn Vm: gmap K V), ⊢ ({{{ node r n esn Vn ∗ node r m esm Vm ∗ ⌜esn !!! m ≠ ∅⌝ }}} mergeContents #n #m {{{ (K1: gset K) Vn' Vm', RET #(); node r n esn Vn' ∗ node r m esm Vm' ∗ ⌜Vn' = mergeLeft K1 Vn (esn !!! m) Vm⌝ ∗ ⌜Vm' = mergeRight K1 Vn (esn !!! m) Vm⌝ }}})%I. End multicopy_lsm.
For people with relapsed AML , the only proven potentially curative therapy is a hematopoietic stem cell transplant , if one has not already been performed . In 2000 , the monoclonal antibody @-@ linked cytotoxic agent <unk> <unk> ( <unk> ) was approved in the United States for people aged more than 60 years with relapsed AML who are not candidates for high @-@ dose chemotherapy . This drug was voluntarily withdrawn from the market by its manufacturer , Pfizer in 2010 .
<a href="https://www.bigdatauniversity.com"></a> # <center>Non Linear Regression Analysis</center> If the data shows a curvy trend, then linear regression will not produce very accurate results when compared to a non-linear regression because, as the name implies, linear regression presumes that the data is linear. Let's learn about non linear regressions and apply an example on python. In this notebook, we fit a non-linear model to the datapoints corrensponding to China's GDP from 1960 to 2014. ### Importing required libraries ```python import numpy as np import matplotlib.pyplot as plt %matplotlib inline ``` Though Linear regression is very good to solve many problems, it cannot be used for all datasets. First recall how linear regression, could model a dataset. It models a linear relation between a dependent variable y and independent variable x. It had a simple equation, of degree 1, for example y = 2*(x) + 3. ```python x = np.arange(-5.0, 5.0, 0.1) ##You can adjust the slope and intercept to verify the changes in the graph y = 2*(x) + 3 y_noise = 2 * np.random.normal(size=x.size) ydata = y + y_noise #plt.figure(figsize=(8,6)) plt.plot(x, ydata, 'bo') plt.plot(x,y, 'r') plt.ylabel('Dependent Variable') plt.xlabel('Indepdendent Variable') plt.show() ``` Non-linear regressions are a relationship between independent variables $x$ and a dependent variable $y$ which result in a non-linear function modeled data. Essentially any relationship that is not linear can be termed as non-linear, and is usually represented by the polynomial of $k$ degrees (maximum power of $x$). $$ \ y = a x^3 + b x^2 + c x + d \ $$ Non-linear functions can have elements like exponentials, logarithms, fractions, and others. For example: $$ y = \log(x)$$ Or even, more complicated such as : $$ y = \log(a x^3 + b x^2 + c x + d)$$ Let's take a look at a cubic function's graph. ```python x = np.arange(-5.0, 5.0, 0.1) ##You can adjust the slope and intercept to verify the changes in the graph y = 1*(x**3) + 1*(x**2) + 1*x + 3 y_noise = 20 * np.random.normal(size=x.size) ydata = y + y_noise plt.plot(x, ydata, 'bo') plt.plot(x,y, 'r') plt.ylabel('Dependent Variable') plt.xlabel('Indepdendent Variable') plt.show() ``` As you can see, this function has $x^3$ and $x^2$ as independent variables. Also, the graphic of this function is not a straight line over the 2D plane. So this is a non-linear function. Some other types of non-linear functions are: ### Quadratic $$ Y = X^2 $$ ```python x = np.arange(-5.0, 5.0, 0.1) ##You can adjust the slope and intercept to verify the changes in the graph y = np.power(x,2) y_noise = 2 * np.random.normal(size=x.size) ydata = y + y_noise plt.plot(x, ydata, 'bo') plt.plot(x,y, 'r') plt.ylabel('Dependent Variable') plt.xlabel('Indepdendent Variable') plt.show() ``` ### Exponential An exponential function with base c is defined by $$ Y = a + b c^X$$ where b ≠0, c > 0 , c ≠1, and x is any real number. The base, c, is constant and the exponent, x, is a variable. ```python X = np.arange(-5.0, 5.0, 0.1) ##You can adjust the slope and intercept to verify the changes in the graph Y= np.exp(X) plt.plot(X,Y) plt.ylabel('Dependent Variable') plt.xlabel('Indepdendent Variable') plt.show() ``` ### Logarithmic The response $y$ is a results of applying logarithmic map from input $x$'s to output variable $y$. It is one of the simplest form of __log()__: i.e. $$ y = \log(x)$$ Please consider that instead of $x$, we can use $X$, which can be polynomial representation of the $x$'s. In general form it would be written as \begin{equation} y = \log(X) \end{equation} ```python X = np.arange(-5.0, 5.0, 0.1) Y = np.log(X) plt.plot(X,Y) plt.ylabel('Dependent Variable') plt.xlabel('Indepdendent Variable') plt.show() ``` ### Sigmoidal/Logistic $$ Y = a + \frac{b}{1+ c^{(X-d)}}$$ ```python X = np.arange(-5.0, 5.0, 0.1) Y = 1-4/(1+np.power(3, X-2)) plt.plot(X,Y) plt.ylabel('Dependent Variable') plt.xlabel('Indepdendent Variable') plt.show() ``` <a id="ref2"></a> # Non-Linear Regression example For an example, we're going to try and fit a non-linear model to the datapoints corrensponding to China's GDP from 1960 to 2014. We download a dataset with two columns, the first, a year between 1960 and 2014, the second, China's corresponding annual gross domestic income in US dollars for that year. ```python import numpy as np import pandas as pd # downloading dataset # !wget -nv -O china_gdp.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/china_gdp.csv df = pd.read_csv("china_gdp.csv") df.head(10) ``` __Did you know?__ When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC) ### Plotting the Dataset ### This is what the datapoints look like. It kind of looks like an either logistic or exponential function. The growth starts off slow, then from 2005 on forward, the growth is very significant. And finally, it deccelerates slightly in the 2010s. ```python plt.figure(figsize=(8,5)) x_data, y_data = (df["Year"].values, df["Value"].values) plt.plot(x_data, y_data, 'bo') plt.ylabel('GDP') plt.xlabel('Year') plt.show() ``` ### Choosing a model ### From an initial look at the plot, we determine that the logistic function could be a good approximation, since it has the property of starting with a slow growth, increasing growth in the middle, and then decreasing again at the end; as illustrated below: ```python X = np.arange(-5.0, 5.0, 0.1) Y = 1.0 / (1.0 + np.exp(-X)) plt.plot(X,Y) plt.ylabel('Dependent Variable') plt.xlabel('Indepdendent Variable') plt.show() ``` The formula for the logistic function is the following: $$ \hat{Y} = \frac1{1+e^{\beta_1(X-\beta_2)}}$$ $\beta_1$: Controls the curve's steepness, $\beta_2$: Slides the curve on the x-axis. ### Building The Model ### Now, let's build our regression model and initialize its parameters. ```python def sigmoid(x, Beta_1, Beta_2): y = 1 / (1 + np.exp(-Beta_1*(x-Beta_2))) return y ``` Lets look at a sample sigmoid line that might fit with the data: ```python beta_1 = 0.10 beta_2 = 1990.0 #logistic function Y_pred = sigmoid(x_data, beta_1 , beta_2) #plot initial prediction against datapoints plt.plot(x_data, Y_pred*15000000000000.) plt.plot(x_data, y_data, 'ro') ``` Our task here is to find the best parameters for our model. Lets first normalize our x and y: ```python # Lets normalize our data xdata =x_data/max(x_data) ydata =y_data/max(y_data) ``` #### How we find the best parameters for our fit line? we can use __curve_fit__ which uses non-linear least squares to fit our sigmoid function, to data. Optimal values for the parameters so that the sum of the squared residuals of sigmoid(xdata, *popt) - ydata is minimized. popt are our optimized parameters. ```python from scipy.optimize import curve_fit popt, pcov = curve_fit(sigmoid, xdata, ydata) #print the final parameters print(" beta_1 = %f, beta_2 = %f" % (popt[0], popt[1])) ``` Now we plot our resulting regresssion model. ```python x = np.linspace(1960, 2015, 55) x = x/max(x) plt.figure(figsize=(8,5)) y = sigmoid(x, *popt) plt.plot(xdata, ydata, 'ro', label='data') plt.plot(x,y, linewidth=3.0, label='fit') plt.legend(loc='best') plt.ylabel('GDP') plt.xlabel('Year') plt.show() ``` ## Practice Can you calculate what is the accuracy of our model? ```python # write your code here msk = np.random.rand(len(df)) < 0.8 train_x = xdata[msk] test_x = xdata[~msk] train_y = ydata[msk] test_y = ydata[~msk] popt, pcov = curve_fit(sigmoid, train_x, train_y) y_hat = sigmoid(test_x, *popt) print("Mean absolute error: %.2f" % np.mean(np.absolute(y_hat - test_y))) print("Residual sum of squares (MSE): %.2f" % np.mean((y_hat - test_y) ** 2)) from sklearn.metrics import r2_score print("R2-score: %.2f" % r2_score(y_hat , test_y) ) ``` Double-click __here__ for the solution. <!-- Your answer is below: # split data into train/test msk = np.random.rand(len(df)) < 0.8 train_x = xdata[msk] test_x = xdata[~msk] train_y = ydata[msk] test_y = ydata[~msk] # build the model using train set popt, pcov = curve_fit(sigmoid, train_x, train_y) # predict using test set y_hat = sigmoid(test_x, *popt) # evaluation print("Mean absolute error: %.2f" % np.mean(np.absolute(y_hat - test_y))) print("Residual sum of squares (MSE): %.2f" % np.mean((y_hat - test_y) ** 2)) from sklearn.metrics import r2_score print("R2-score: %.2f" % r2_score(y_hat , test_y) ) --> ## Want to learn more? IBM SPSS Modeler is a comprehensive analytics platform that has many machine learning algorithms. It has been designed to bring predictive intelligence to decisions made by individuals, by groups, by systems – by your enterprise as a whole. A free trial is available through this course, available here: [SPSS Modeler](http://cocl.us/ML0101EN-SPSSModeler). Also, you can use Watson Studio to run these notebooks faster with bigger datasets. Watson Studio is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, Watson Studio enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of Watson Studio users today with a free account at [Watson Studio](https://cocl.us/ML0101EN_DSX) ### Thanks for completing this lesson! Notebook created by: <a href = "https://ca.linkedin.com/in/saeedaghabozorgi">Saeed Aghabozorgi</a> <hr> Copyright &copy; 2018 [Cognitive Class](https://cocl.us/DX0108EN_CC). This notebook and its source code are released under the terms of the [MIT License](https://bigdatauniversity.com/mit-license/).​
(* Title: Isabelle Collections Library Author: Peter Lammich <peter dot lammich at uni-muenster.de> Maintainer: Peter Lammich <peter dot lammich at uni-muenster.de> *) (* Changes since submission on 2009-11-26: 2009-12-10: OrderedMap, algorithms for iterators, min, max, to_sorted_list *) header {* \isaheader{Generic Algorithms for Maps} *} theory MapGA imports SetIteratorCollectionsGA begin text_raw {*\label{thy:MapGA}*} record ('k,'v,'s) map_basic_ops = bmap_op_\<alpha> :: "('k,'v,'s) map_\<alpha>" bmap_op_invar :: "('k,'v,'s) map_invar" bmap_op_empty :: "('k,'v,'s) map_empty" bmap_op_lookup :: "('k,'v,'s) map_lookup" bmap_op_update :: "('k,'v,'s) map_update" bmap_op_update_dj :: "('k,'v,'s) map_update_dj" bmap_op_delete :: "('k,'v,'s) map_delete" bmap_op_list_it :: "('k,'v,'s) map_list_it" record ('k,'v,'s) omap_basic_ops = "('k,'v,'s) map_basic_ops" + bmap_op_ordered_list_it :: "'s \<Rightarrow> ('k,'v,('k\<times>'v) list) map_iterator" bmap_op_rev_list_it :: "'s \<Rightarrow> ('k,'v,('k\<times>'v) list) map_iterator" locale StdBasicMapDefs = poly_map_iteratei_defs "bmap_op_list_it ops" for ops :: "('k,'v,'s,'more) map_basic_ops_scheme" begin abbreviation \<alpha> where "\<alpha> == bmap_op_\<alpha> ops" abbreviation invar where "invar == bmap_op_invar ops" abbreviation empty where "empty == bmap_op_empty ops" abbreviation lookup where "lookup == bmap_op_lookup ops" abbreviation update where "update == bmap_op_update ops" abbreviation update_dj where "update_dj == bmap_op_update_dj ops" abbreviation delete where "delete == bmap_op_delete ops" abbreviation list_it where "list_it == bmap_op_list_it ops" end locale StdBasicOMapDefs = StdBasicMapDefs ops + poly_map_iterateoi_defs "bmap_op_ordered_list_it ops" + poly_map_rev_iterateoi_defs "bmap_op_rev_list_it ops" for ops :: "('k::linorder,'v,'s,'more) omap_basic_ops_scheme" begin abbreviation ordered_list_it where "ordered_list_it \<equiv> bmap_op_ordered_list_it ops" abbreviation rev_list_it where "rev_list_it \<equiv> bmap_op_rev_list_it ops" end locale StdBasicMap = StdBasicMapDefs ops + map \<alpha> invar + map_empty \<alpha> invar empty + map_lookup \<alpha> invar lookup + map_update \<alpha> invar update + map_update_dj \<alpha> invar update_dj + map_delete \<alpha> invar delete + poly_map_iteratei \<alpha> invar list_it for ops :: "('k,'v,'s,'more) map_basic_ops_scheme" begin lemmas correct[simp] = empty_correct lookup_correct update_correct update_dj_correct delete_correct end locale StdBasicOMap = StdBasicOMapDefs ops + StdBasicMap ops + poly_map_iterateoi \<alpha> invar ordered_list_it + poly_map_rev_iterateoi \<alpha> invar rev_list_it for ops :: "('k::linorder,'v,'s,'more) omap_basic_ops_scheme" begin end context StdBasicMapDefs begin definition "g_sng k v \<equiv> update k v (empty ())" definition "g_add m1 m2 \<equiv> iterate m2 (\<lambda>(k,v) \<sigma>. update k v \<sigma>) m1" definition "g_sel m P \<equiv> iteratei m (\<lambda>\<sigma>. \<sigma> = None) (\<lambda>x \<sigma>. if P x then Some x else None) None" definition "g_bex m P \<equiv> iteratei m (\<lambda>x. \<not>x) (\<lambda>kv \<sigma>. P kv) False" definition "g_ball m P \<equiv> iteratei m id (\<lambda>kv \<sigma>. P kv) True" definition "g_size m \<equiv> iterate m (\<lambda>_. Suc) (0::nat)" definition "g_size_abort b m \<equiv> iteratei m (\<lambda>s. s<b) (\<lambda>_. Suc) (0::nat)" definition "g_isEmpty m \<equiv> g_size_abort 1 m = 0" definition "g_isSng m \<equiv> g_size_abort 2 m = 1" definition "g_to_list m \<equiv> iterate m (op #) []" definition "g_list_to_map l \<equiv> foldl (\<lambda>m (k,v). update k v m) (empty ()) (rev l)" definition "g_add_dj m1 m2 \<equiv> iterate m2 (\<lambda>(k,v) \<sigma>. update_dj k v \<sigma>) m1" definition "g_restrict P m \<equiv> iterate m (\<lambda>(k,v) \<sigma>. if P (k,v) then update_dj k v \<sigma> else \<sigma>) (empty ())" definition dflt_ops :: "('k,'v,'s) map_ops" where [icf_rec_def]: "dflt_ops \<equiv> \<lparr> map_op_\<alpha> = \<alpha>, map_op_invar = invar, map_op_empty = empty, map_op_lookup = lookup, map_op_update = update, map_op_update_dj = update_dj, map_op_delete = delete, map_op_list_it = list_it, map_op_sng = g_sng, map_op_restrict = g_restrict, map_op_add = g_add, map_op_add_dj = g_add_dj, map_op_isEmpty = g_isEmpty, map_op_isSng = g_isSng, map_op_ball = g_ball, map_op_bex = g_bex, map_op_size = g_size, map_op_size_abort = g_size_abort, map_op_sel = g_sel, map_op_to_list = g_to_list, map_op_to_map = g_list_to_map \<rparr>" local_setup {* Locale_Code.lc_decl_del @{term dflt_ops}*} end lemma update_dj_by_update: assumes "map_update \<alpha> invar update" shows "map_update_dj \<alpha> invar update" proof - interpret map_update \<alpha> invar update by fact show ?thesis apply (unfold_locales) apply (auto simp add: update_correct) done qed lemma map_iterator_linord_is_it: "map_iterator_linord m it \<Longrightarrow> map_iterator m it" unfolding set_iterator_def set_iterator_map_linord_def apply (erule set_iterator_genord.set_iterator_weaken_R) .. context StdBasicMap begin lemma g_add_impl: "map_add \<alpha> invar g_add" proof fix m1 m2 assume "invar m1" "invar m2" have A: "g_add m1 m2 = iterate_add_to_map m1 update (iteratei m2)" unfolding g_add_def iterate_add_to_map_def by simp have "\<alpha> (g_add m1 m2) = \<alpha> m1 ++ \<alpha> m2 \<and> invar (g_add m1 m2)" unfolding A apply (rule iterate_add_to_map_correct[of \<alpha> invar update m1 "iteratei m2" "\<alpha> m2"]) apply unfold_locales [] apply fact apply (rule iteratei_correct, fact) done thus "\<alpha> (g_add m1 m2) = \<alpha> m1 ++ \<alpha> m2" "invar (g_add m1 m2)" by auto qed lemma g_sel_impl: "map_sel' \<alpha> invar g_sel" proof - have A: "\<And>m P. g_sel m P = iterate_sel_no_map (iteratei m) P" unfolding g_sel_def iterate_sel_no_map_def iterate_sel_def by simp { fix m P assume I: "invar m" note iterate_sel_no_map_correct[OF iteratei_correct[OF I], of P] } thus ?thesis apply unfold_locales unfolding A apply (simp add: Bex_def Ball_def image_iff map_to_set_def) apply clarify apply (metis option.exhaust PairE) apply (simp add: Bex_def Ball_def image_iff map_to_set_def) done qed lemma g_bex_impl: "map_bex \<alpha> invar g_bex" apply unfold_locales unfolding g_bex_def apply (rule_tac I="\<lambda>it \<sigma>. \<sigma> \<longleftrightarrow> (\<exists>kv\<in>it. P kv)" in iteratei_rule_insert_P) by (auto simp: map_to_set_def) lemma g_size_impl: "map_size \<alpha> invar g_size" proof fix m assume I: "invar m" have A: "g_size m \<equiv> iterate_size (iteratei m)" unfolding g_size_def iterate_size_def by simp from iterate_size_correct [OF iteratei_correct[OF I]] show "g_size m = card (dom (\<alpha> m))" unfolding A by (simp_all add: card_map_to_set) qed lemma g_size_abort_impl: "map_size_abort \<alpha> invar g_size_abort" proof fix s m assume I: "invar m" have A: "g_size_abort s m \<equiv> iterate_size_abort (iteratei m) s" unfolding g_size_abort_def iterate_size_abort_def by simp from iterate_size_abort_correct [OF iteratei_correct[OF I]] show "g_size_abort s m = min s (card (dom (\<alpha> m)))" unfolding A by (simp_all add: card_map_to_set) qed lemma g_isEmpty_impl: "map_isEmpty \<alpha> invar g_isEmpty" proof fix m assume I: "invar m" interpret map_size_abort \<alpha> invar g_size_abort by (rule g_size_abort_impl) from size_abort_correct[OF I] have "g_size_abort 1 m = min 1 (card (dom (\<alpha> m)))" . thus "g_isEmpty m = (\<alpha> m = Map.empty)" unfolding g_isEmpty_def by (auto simp: min_def card_0_eq[OF finite] I) qed lemma g_isSng_impl: "map_isSng \<alpha> invar g_isSng" proof fix m assume I: "invar m" interpret map_size_abort \<alpha> invar g_size_abort by (rule g_size_abort_impl) from size_abort_correct[OF I] have "g_size_abort 2 m = min 2 (card (dom (\<alpha> m)))" . thus "g_isSng m = (\<exists>k v. \<alpha> m = [k \<mapsto> v])" unfolding g_isSng_def by (auto simp: min_def I card_Suc_eq dom_eq_singleton_conv) qed lemma g_to_list_impl: "map_to_list \<alpha> invar g_to_list" proof fix m assume I: "invar m" have A: "g_to_list m = iterate_to_list (iteratei m)" unfolding g_to_list_def iterate_to_list_def by simp from iterate_to_list_correct [OF iteratei_correct[OF I]] have set_l_eq: "set (g_to_list m) = map_to_set (\<alpha> m)" and dist_l: "distinct (g_to_list m)" unfolding A by simp_all from dist_l show dist_fst_l: "distinct (map fst (g_to_list m))" by (simp add: distinct_map set_l_eq map_to_set_def inj_on_def) from map_of_map_to_set[of "(g_to_list m)" "\<alpha> m", OF dist_fst_l] set_l_eq show "map_of (g_to_list m) = \<alpha> m" by simp qed lemma g_list_to_map_impl: "list_to_map \<alpha> invar g_list_to_map" proof - { fix m0 l assume "invar m0" hence "invar (foldl (\<lambda>s (k,v). update k v s) m0 l) \<and> \<alpha> (foldl (\<lambda>s (k,v). update k v s) m0 l) = \<alpha> m0 ++ map_of (rev l)" proof (induction l arbitrary: m0) case Nil thus ?case by simp next case (Cons kv l) obtain k v where [simp]: "kv=(k,v)" by (cases kv) auto have "invar (foldl (\<lambda>s (k, v). update k v s) m0 (kv # l))" apply simp apply (rule conjunct1[OF Cons.IH]) apply (simp add: update_correct Cons.prems) done moreover have "\<alpha> (foldl (\<lambda>s (k, v). update k v s) m0 (kv # l)) = \<alpha> m0 ++ map_of (rev (kv # l))" apply simp thm trans[OF conjunct2[OF Cons.IH]] apply (rule trans[OF conjunct2[OF Cons.IH]]) apply (auto simp: update_correct Cons.prems Map.map_add_def[abs_def] split: option.split ) done ultimately show ?case by simp qed } thus ?thesis apply unfold_locales unfolding g_list_to_map_def apply (auto simp: empty_correct) done qed lemma g_add_dj_impl: "map_add_dj \<alpha> invar g_add_dj" proof fix m1 m2 assume "invar m1" "invar m2" and DJ: "dom (\<alpha> m1) \<inter> dom (\<alpha> m2) = {}" have A: "g_add_dj m1 m2 = iterate_add_to_map m1 update_dj (iteratei m2)" unfolding g_add_dj_def iterate_add_to_map_def by simp have "\<alpha> (g_add_dj m1 m2) = \<alpha> m1 ++ \<alpha> m2 \<and> invar (g_add_dj m1 m2)" unfolding A apply (rule iterate_add_to_map_dj_correct[ of \<alpha> invar update_dj m1 "iteratei m2" "\<alpha> m2"]) apply unfold_locales [] apply fact apply (rule iteratei_correct, fact) using DJ apply (simp add: Int_ac) done thus "\<alpha> (g_add_dj m1 m2) = \<alpha> m1 ++ \<alpha> m2" "invar (g_add_dj m1 m2)" by auto qed lemma g_restrict_impl: "map_restrict \<alpha> invar \<alpha> invar g_restrict" proof fix m P assume I: "invar m" have AUX: "\<And>k v it \<sigma>. \<lbrakk>it \<subseteq> {(k, v). \<alpha> m k = Some v}; \<alpha> m k = Some v; (k, v) \<notin> it; {(k, v). \<alpha> \<sigma> k = Some v} = it \<inter> Collect P\<rbrakk> \<Longrightarrow> k \<notin> dom (\<alpha> \<sigma>)" proof (rule ccontr, simp) fix k v it \<sigma> assume "k\<in>dom (\<alpha> \<sigma>)" then obtain v' where "\<alpha> \<sigma> k = Some v'" by auto moreover assume "{(k, v). \<alpha> \<sigma> k = Some v} = it \<inter> Collect P" ultimately have MEM: "(k,v')\<in>it" by auto moreover assume "it \<subseteq> {(k, v). \<alpha> m k = Some v}" and "\<alpha> m k = Some v" ultimately have "v'=v" by auto moreover assume "(k,v)\<notin>it" moreover note MEM ultimately show False by simp qed have "\<alpha> (g_restrict P m) = \<alpha> m |` {k. \<exists>v. \<alpha> m k = Some v \<and> P (k, v)} \<and> invar (g_restrict P m)" unfolding g_restrict_def apply (rule_tac I="\<lambda>it \<sigma>. invar \<sigma> \<and> map_to_set (\<alpha> \<sigma>) = it \<inter> Collect P" in iterate_rule_insert_P) apply (auto simp: I empty_correct update_dj_correct map_to_set_def AUX) apply (auto split: split_if_asm) apply (rule ext) apply (auto simp: Map.restrict_map_def) apply force apply (rule ccontr) apply force done thus "\<alpha> (g_restrict P m) = \<alpha> m |` {k. \<exists>v. \<alpha> m k = Some v \<and> P (k, v)}" "invar (g_restrict P m)" by auto qed context StdBasicOMapDefs begin definition "g_min m P \<equiv> iterateoi m (\<lambda>\<sigma>. \<sigma> = None) (\<lambda>x \<sigma>. if P x then Some x else None) None" definition "g_max m P \<equiv> rev_iterateoi m (\<lambda>\<sigma>. \<sigma> = None) (\<lambda>x \<sigma>. if P x then Some x else None) None" definition "g_to_sorted_list m \<equiv> rev_iterateo m (op #) []" definition "g_to_rev_list m \<equiv> iterateo m (op #) []" definition dflt_oops :: "('k,'v,'s) omap_ops" where [icf_rec_def]: "dflt_oops \<equiv> map_ops.extend dflt_ops \<lparr> map_op_ordered_list_it = ordered_list_it, map_op_rev_list_it = rev_list_it, map_op_min = g_min, map_op_max = g_max, map_op_to_sorted_list = g_to_sorted_list, map_op_to_rev_list = g_to_rev_list \<rparr>" local_setup {* Locale_Code.lc_decl_del @{term dflt_oops}*} end context StdBasicOMap begin lemma g_min_impl: "map_min \<alpha> invar g_min" proof fix m P assume I: "invar m" from iterateoi_correct[OF I] have iti': "map_iterator_linord (iterateoi m) (\<alpha> m)" by simp note sel_correct = iterate_sel_no_map_map_linord_correct[OF iti', of P] have A: "g_min m P = iterate_sel_no_map (iterateoi m) P" unfolding g_min_def iterate_sel_no_map_def iterate_sel_def by simp { assume "rel_of (\<alpha> m) P \<noteq> {}" with sel_correct show "g_min m P \<in> Some ` rel_of (\<alpha> m) P" unfolding A by (auto simp add: image_iff rel_of_def) } { assume "rel_of (\<alpha> m) P = {}" with sel_correct show "g_min m P = None" unfolding A by (auto simp add: image_iff rel_of_def) } { fix k v assume "(k, v) \<in> rel_of (\<alpha> m) P" with sel_correct show "fst (the (g_min m P)) \<le> k" unfolding A by (auto simp add: image_iff rel_of_def) } qed lemma g_max_impl: "map_max \<alpha> invar g_max" proof fix m P assume I: "invar m" from rev_iterateoi_correct[OF I] have iti': "map_iterator_rev_linord (rev_iterateoi m) (\<alpha> m)" by simp note sel_correct = iterate_sel_no_map_map_rev_linord_correct[OF iti', of P] have A: "g_max m P = iterate_sel_no_map (rev_iterateoi m) P" unfolding g_max_def iterate_sel_no_map_def iterate_sel_def by simp { assume "rel_of (\<alpha> m) P \<noteq> {}" with sel_correct show "g_max m P \<in> Some ` rel_of (\<alpha> m) P" unfolding A by (auto simp add: image_iff rel_of_def) } { assume "rel_of (\<alpha> m) P = {}" with sel_correct show "g_max m P = None" unfolding A by (auto simp add: image_iff rel_of_def) } { fix k v assume "(k, v) \<in> rel_of (\<alpha> m) P" with sel_correct show "fst (the (g_max m P)) \<ge> k" unfolding A by (auto simp add: image_iff rel_of_def) } qed lemma g_to_sorted_list_impl: "map_to_sorted_list \<alpha> invar g_to_sorted_list" proof fix m assume I: "invar m" note iti = rev_iterateoi_correct[OF I] from iterate_to_list_map_rev_linord_correct[OF iti] show "sorted (map fst (g_to_sorted_list m))" "distinct (map fst (g_to_sorted_list m))" "map_of (g_to_sorted_list m) = \<alpha> m" unfolding g_to_sorted_list_def iterate_to_list_def by simp_all qed lemma g_to_rev_list_impl: "map_to_rev_list \<alpha> invar g_to_rev_list" proof fix m assume I: "invar m" note iti = iterateoi_correct[OF I] from iterate_to_list_map_linord_correct[OF iti] show "sorted (rev (map fst (g_to_rev_list m)))" "distinct (map fst (g_to_rev_list m))" "map_of (g_to_rev_list m) = \<alpha> m" unfolding g_to_rev_list_def iterate_to_list_def by (simp_all add: rev_map) qed lemma dflt_oops_impl: "StdOMap dflt_oops" proof - interpret aux!: StdMap dflt_ops by (rule dflt_ops_impl) show ?thesis apply (rule StdOMap_intro) apply icf_locales apply (simp_all add: icf_rec_unf) apply (rule g_min_impl) apply (rule g_max_impl) apply (rule g_to_sorted_list_impl) apply (rule g_to_rev_list_impl) done qed end locale g_image_filter_defs_loc = m1!: StdMapDefs ops1 + m2!: StdMapDefs ops2 for ops1 :: "('k1,'v1,'s1,'m1) map_ops_scheme" and ops2 :: "('k2,'v2,'s2,'m2) map_ops_scheme" begin definition "g_image_filter f m1 \<equiv> m1.iterate m1 (\<lambda>kv \<sigma>. case f kv of None => \<sigma> | Some (k',v') => m2.update_dj k' v' \<sigma> ) (m2.empty ())" end locale g_image_filter_loc = g_image_filter_defs_loc ops1 ops2 + m1!: StdMap ops1 + m2!: StdMap ops2 for ops1 :: "('k1,'v1,'s1,'m1) map_ops_scheme" and ops2 :: "('k2,'v2,'s2,'m2) map_ops_scheme" begin lemma g_image_filter_impl: "map_image_filter m1.\<alpha> m1.invar m2.\<alpha> m2.invar g_image_filter" proof fix m k' v' and f :: "('k1 \<times> 'v1) \<Rightarrow> ('k2 \<times> 'v2) option" assume invar_m: "m1.invar m" and unique_f: "transforms_to_unique_keys (m1.\<alpha> m) f" have A: "g_image_filter f m = iterate_to_map m2.empty m2.update_dj ( set_iterator_image_filter f (m1.iteratei m))" unfolding g_image_filter_def iterate_to_map_alt_def set_iterator_image_filter_def case_prod_beta by simp from m1.iteratei_correct[OF invar_m] have iti_m: "map_iterator (m1.iteratei m) (m1.\<alpha> m)" by simp from unique_f have inj_on_f: "inj_on f (map_to_set (m1.\<alpha> m) \<inter> dom f)" unfolding transforms_to_unique_keys_def inj_on_def Ball_def map_to_set_def by auto (metis option.inject) def vP \<equiv> "\<lambda>k v. \<exists>k' v'. m1.\<alpha> m k' = Some v' \<and> f (k', v') = Some (k, v)" have vP_intro: "\<And>k v. (\<exists>k' v'. m1.\<alpha> m k' = Some v' \<and> f (k', v') = Some (k, v)) \<longleftrightarrow> vP k v" unfolding vP_def by simp { fix k v have "Eps_Opt (vP k) = Some v \<longleftrightarrow> vP k v" using unique_f unfolding vP_def transforms_to_unique_keys_def apply (rule_tac Eps_Opt_eq_Some) apply (metis Pair_eq option.inject) done } note Eps_vP_elim[simp] = this have map_intro: "{y. \<exists>x. x \<in> map_to_set (m1.\<alpha> m) \<and> f x = Some y} = map_to_set (\<lambda>k. Eps_Opt (vP k))" by (simp add: map_to_set_def vP_intro set_eq_iff split: prod.splits) from set_iterator_image_filter_correct [OF iti_m, OF inj_on_f, unfolded map_intro] have iti_filter: "map_iterator (set_iterator_image_filter f (m1.iteratei m)) (\<lambda>k. Eps_Opt (vP k))" by auto have upd: "map_update_dj m2.\<alpha> m2.invar m2.update_dj" by unfold_locales have emp: "map_empty m2.\<alpha> m2.invar m2.empty" by unfold_locales from iterate_to_map_correct[OF upd emp iti_filter] show "map_op_invar ops2 (g_image_filter f m) \<and> (map_op_\<alpha> ops2 (g_image_filter f m) k' = Some v') = (\<exists>k v. map_op_\<alpha> ops1 m k = Some v \<and> f (k, v) = Some (k', v'))" unfolding A vP_def[symmetric] by (simp add: vP_intro) qed end sublocale g_image_filter_loc < map_image_filter m1.\<alpha> m1.invar m2.\<alpha> m2.invar g_image_filter by (rule g_image_filter_impl) locale g_value_image_filter_defs_loc = m1!: StdMapDefs ops1 + m2!: StdMapDefs ops2 for ops1 :: "('k,'v1,'s1,'m1) map_ops_scheme" and ops2 :: "('k,'v2,'s2,'m2) map_ops_scheme" begin definition "g_value_image_filter f m1 \<equiv> m1.iterate m1 (\<lambda>(k,v) \<sigma>. case f k v of None => \<sigma> | Some v' => m2.update_dj k v' \<sigma> ) (m2.empty ())" end (* TODO: Move to Misc *) lemma restrict_map_dom_subset: "\<lbrakk> dom m \<subseteq> R\<rbrakk> \<Longrightarrow> m|`R = m" apply (rule ext) apply (auto simp: restrict_map_def) apply (case_tac "m x") apply auto done locale g_value_image_filter_loc = g_value_image_filter_defs_loc ops1 ops2 + m1!: StdMap ops1 + m2!: StdMap ops2 for ops1 :: "('k,'v1,'s1,'m1) map_ops_scheme" and ops2 :: "('k,'v2,'s2,'m2) map_ops_scheme" begin lemma g_value_image_filter_impl: "map_value_image_filter m1.\<alpha> m1.invar m2.\<alpha> m2.invar g_value_image_filter" apply unfold_locales unfolding g_value_image_filter_def apply (rule_tac I="\<lambda>it \<sigma>. m2.invar \<sigma> \<and> m2.\<alpha> \<sigma> = (\<lambda>k. Option.bind (map_op_\<alpha> ops1 m k) (f k)) |` it" in m1.old_iterate_rule_insert_P) apply auto [] apply (auto simp: m2.empty_correct) [] defer apply simp [] apply (rule restrict_map_dom_subset) apply (auto) [] apply (case_tac "m1.\<alpha> m x") apply (auto) [2] apply (auto split: option.split simp: m2.update_dj_correct intro!: ext) apply (auto simp: restrict_map_def) done end sublocale g_value_image_filter_loc < map_value_image_filter m1.\<alpha> m1.invar m2.\<alpha> m2.invar g_value_image_filter by (rule g_value_image_filter_impl) end
The end of the war drastically reduced military investment in the island . Increasing enforcement of gambling laws and the growth of Las Vegas , Nevada as a competitive center of gambling and entertainment put pressure on the gaming industry on the island . Finally in 1957 , Texas Attorney General Will Wilson and the Texas Rangers began a massive campaign of raids which disrupted gambling and prostitution in the city . As these vice industries crashed , so did tourism , taking the rest of the Galveston economy with it . Neither the economy nor the culture of the city was the same afterward .
#= We use two sets of infixable comparatives. The conventional symbols {==,!=,<,<=,>,>=} are defined using the Arb C library's implementation of eq,ne,lt,le,gt,ge. The predecessor/successor symbols {≃,≄,≺,≼,≻,≽} are defined from Hend Dawood's non-strict total ordering for interval values. (q.v. Hend's Master's thesis: Interval Mathematics Foundations, Algebraic Structures, and Applications) =# for (op,cop) in ((:(==), :(arb_eq)), (:(!=), :(arb_ne)), (:(<=), :(arb_le)), (:(>=), :(arb_ge)), (:(<), :(arb_lt)), (:(>), :(arb_gt)) ) @eval begin function ($op){T<:ArbFloat}(a::T, b::T) return Bool(ccall(@libarb($cop), Cint, (Ptr{T}, Ptr{T}), &a, &b) ) end ($op){P,Q}(a::ArbFloat{P}, b::ArbFloat{Q}) = ($op)(promote(a,b)...) ($op){T<:ArbFloat,R<:Real}(a::T, b::R) = ($op)(promote(a,b)...) ($op){T<:ArbFloat,R<:Real}(a::R, b::T) = ($op)(promote(a,b)...) end end function (≃){T<:ArbFloat}(a::T, b::T) return Bool(ccall(@libarb(arb_eq), Cint, (Ptr{T}, Ptr{T}), &a, &b)) end simeq{T<:ArbFloat}(a::T, b::T) = (≃)(a,b) function (≄){T<:ArbFloat}(a::T, b::T) return !Bool(ccall(@libarb(arb_eq), Cint, (Ptr{T}, Ptr{T}), &a, &b)) end nsime{T<:ArbFloat}(a::T, b::T) = (≄)(a,b) function (⪰){T<:ArbFloat}(a::T, b::T) alo, ahi = bounds(a) blo, bhi = bounds(b) return (alo < blo) || ((alo == blo) & (ahi <= bhi)) end succeq{T<:ArbFloat}(a::T, b::T) = (⪰)(a,b) function (≻){T<:ArbFloat}(a::T, b::T) # (a ≼ b) & (a ≄ b) alo, ahi = bounds(a) blo, bhi = bounds(b) return (alo < blo) || ((alo == blo) & (ahi < bhi)) end succ{T<:ArbFloat}(a::T, b::T) = (≻)(a,b) function (⪯){T<:ArbFloat}(a::T, b::T) alo, ahi = bounds(a) blo, bhi = bounds(b) return (alo > blo) || ((alo == blo) & (ahi >= bhi)) end preceq{T<:ArbFloat}(a::T, b::T) = (⪯)(a,b) function (≺){T<:ArbFloat}(a::T, b::T) alo, ahi = bounds(a) blo, bhi = bounds(b) return (alo > blo) || ((alo == blo) & (ahi > bhi)) end prec{T<:ArbFloat}(a::T, b::T) = (≺)(a,b) # for sorted ordering isequal{T<:ArbFloat}(a::T, b::T) = !(a != b) isless{ T<:ArbFloat}(a::T, b::T) = b == max(a,b) # !(a >= b) isequal{T<:ArbFloat}(a::Void, b::T) = false isequal{T<:ArbFloat}(a::T, b::Void) = false isless{ T<:ArbFloat}(a::Void, b::T) = true isless{ T<:ArbFloat}(a::T, b::Void) = true # experimental ≖ ≗ (eq){T<:ArbFloat}(x::T, y::T) = !(x != y) (eq){P,Q}(x::ArbFloat{P}, y::ArbFloat{Q}) = (eq)(promote(x,y)...) (eq){T1<:ArbFloat,T2<:Real}(x::T1, y::T2) = (eq)(promote(x,y)...) (eq){T1<:ArbFloat,T2<:Real}(x::T2, y::T1) = (eq)(promote(x,y)...) (≗){T<:ArbFloat}(x::T, y::T) = !(x != y) (≗){P,Q}(x::ArbFloat{P}, y::ArbFloat{Q}) = (≗)(promote(x,y)...) (≗){T1<:ArbFloat,T2<:Real}(x::T1, y::T2) = (≗)(promote(x,y)...) (≗){T1<:ArbFloat,T2<:Real}(x::T2, y::T1) = (≗)(promote(x,y)...) (neq){T<:ArbFloat}(x::T, y::T) = donotoverlap(x, y) (neq){P,Q}(x::ArbFloat{P}, y::ArbFloat{Q}) = (donotoverlap)(promote(x,y)...) (neq){T1<:ArbFloat,T2<:Real}(x::T1, y::T2) = (donotoverlap)(promote(x,y)...) (neq){T1<:ArbFloat,T2<:Real}(x::T2, y::T1) = (donotoverlap)(promote(x,y)...)
/*************************************************************************** * * Copyright (c) 2013 Baidu.com, Inc. All Rights Reserved * **************************************************************************/ /** * @file client_mock.cpp * * @author liuming03 * @date 2013-9-11 * @brief */ #include <sys/socket.h> #include <netinet/in.h> #include <thrift/transport/TSocket.h> #include <thrift/transport/TBufferTransports.h> #include <thrift/protocol/TBinaryProtocol.h> #include <boost/thread.hpp> #include <boost/bind.hpp> #include "Announce.h" using std::string; using boost::shared_ptr; using namespace apache::thrift::protocol; using namespace apache::thrift::transport; using namespace apache::thrift; using namespace bbts::tracker; static void connect_server(int thread_id, int repead_id, string host, int port) { char peerid[20]; snprintf(peerid, 20, "%10d%10d", thread_id, repead_id); Peer peer; peer.__set_ip("192.168.1.1"); peer.__set_idc("tc"); peer.__set_port(1234); peer.__set_peerid(peerid); Stat stat; stat.__set_downloaded(thread_id * repead_id); stat.__set_left(thread_id + repead_id); stat.__set_uploaded(thread_id); stat.__set_status(static_cast<Status::type>(thread_id * repead_id % 5)); AnnounceRequest request; request.__set_infohash(peerid); request.__set_is_seed(thread_id * repead_id % 2); request.__set_num_want(50); request.__set_peer(peer); request.__set_stat(stat); AnnounceResponse reponse; shared_ptr<TSocket> socket(new TSocket(host.c_str(), port)); socket->setConnTimeout(3000); socket->setSendTimeout(3000); socket->setRecvTimeout(5000); shared_ptr<TTransport> transport(new TFramedTransport(socket)); shared_ptr<TBinaryProtocol> protocol(new TBinaryProtocol(transport)); AnnounceClient client(protocol); try { transport->open(); client.announce(reponse, request); transport->close(); //printf("success\n"); } catch (TException &tx) { printf("ERROR: %s\n", tx.what()); } } static void main_thread(int thread_id, int count, string host, int port) { for (int i = 0; i < count; ++i) { connect_server(thread_id, i, host, port); } } int main(int argc, char* argv[]) { if (argc < 5) { printf("usage: %s host port thread repeat\n", argv[0]); return 0; } string host = argv[1]; int port = atoi(argv[2]); int thread_num = atoi(argv[3]); int count = atoi(argv[4]); printf("%s:%d %d %d\n", host.c_str(), port, thread_num, count); boost::thread_group threads; for (int i = 0; i < thread_num; ++i) { threads.create_thread(boost::bind(&main_thread, i, count, host, port)); } threads.join_all(); }
%------------------------------------------------------------------------------- \section{Introduction} %------------------------------------------------------------------------------- Consider the motivating models of \cite{Spence.1973} and \cite{Ben-Porath.1967} as discussed in class. \begin{boenumerate} \item For both models, provide a brief description of the question they are designed to address and their key ingredients. Provide two examples of important economic features that are missing from the formal analysis?\\ \noindent{Consider the model developed in \cite{Spence.1973} in more detail. Figure \ref{Model parametrization} visualizes the information about the wage schedule and the cost of education in the parametrized model. Please assume throughout that employers belief that individuals with a level of education $y^* \geq \tfrac{5}{4}$ have a high productivity.} \begin{figure}[h]\centering \caption{Model parametrization}\label{Model parametrization} \subfloat[Wage schedule]{\scalebox{0.25}{\includegraphics{fig-introduction-spence-benefit}}} \subfloat[Cost of education]{\scalebox{0.25}{\includegraphics{fig-introduction-spence-cost}}} \end{figure} \item Write down the parametrization of the cost $(c_L, c_H)$ and wage $(w_L, w_H)$ functions for the high and low productivity individuals. \item Complete Figure \ref{Canvas Surplus} by adding the surplus functions for each of the two groups over the specified range. Also, indicate the optimal level of schooling for each of the two groups. \begin{figure}[h]\centering \caption{Surplus of education}\label{Canvas Surplus} \scalebox{0.35}{\includegraphics{fig-introduction-spence-surplus-canvas}} \end{figure} \item Calculate the range of the separating schooling level $y^*$ that confirms the employer's beliefs.\\ \noindent{Now consider the case where individuals do not have the ability to signal their productivity and the share of individuals with low productivity is denoted by $q_L$.\\} \item What is the wage for each of the two groups in this scenario as a function of $q_L$? \item Complete Figure \ref{Canvas Market Structure} by adding the surplus and wage for the two groups under the scenario where individuals are able to signal their ability and when they are not. \begin{figure}[h]\centering \caption{Surplus and market structure}\label{Canvas Market Structure} \scalebox{0.35}{\includegraphics{fig-introduction-spence-market-structure-canvas}} \end{figure} \item What scenario do high productivity individuals prefer, what does their assessment depend on, when exactly do they change their mind? \end{boenumerate}
%File: formatting-instruction.tex \documentclass[letterpaper]{article} \usepackage{graphicx} \usepackage{float} \graphicspath{ {Images/} } \usepackage{aaai} \usepackage{times} \usepackage{helvet} \usepackage{courier} \frenchspacing \setlength{\pdfpagewidth}{8.5in} \setlength{\pdfpageheight}{11in} \pdfinfo{ /Title {Unsupervised Decomposition of Multi-Author Document} /Author (Sayantan Sengupta,Kautsya Kanu)} \setcounter{secnumdepth}{0} \begin{document} % The file aaai.sty is the style file for AAAI Press % proceedings, working notes, and technical reports. % \title{Unsupervised Decomposition of Multi-Author Document:\\\textit{Exploiting the difference of Syntactic writing styles}} \author{Kautsya Kanu \and Sayantan Sengupta\\ Indian Institute of Technology Delhi\\ } \maketitle \begin{abstract} \begin{quote} This paper proposes an improvement over a recent $paper^{[1]}$. We have worked on two aspects, In the first aspect, we try to capture writing style of author by n-gram model of words, POS Tags and PQ Gram model of syntactic parsing over used basic uni-gram model. In the second aspect, we added some layers of refinements in existing baseline model and introduce new term "similarity index" to distinguish between pure and mixed segments before unsupervised labeling. Similarity index uses overall and sudden change of writing style by PQ Gram model and words used using n-gram model between lexicalised/unlexicalised sentences in segments for refinement. In this paper, we investigate the role of feature selection that captures the syntactic patterns specific to an author and its overall effect in the final accuracy of the baseline system. More specifically, we insert a layer of refinement to the baseline system and define a threshold based on the similarity measure among the sentences to consider the purity of the segments to be given as input to the GMM.The key idea of our approach is to provide the GMM clustering with the "good segments" so that the clustering precision is maximised which is then used as labels to train a classifier. We also try different features set like bigrams and trigrams of POS tags and an PQ Grams based feature on unlexicalised PCFG to capture the distinct writing styles which is then given as an input to a GMM trained by iterative EM algorithm to generate good clusters of the segments of the merged document. \end{quote} \end{abstract} \section{Introduction} \noindent Here we propose a new unsupervised method for decomposing a multi-author document in to authorial components. We have assumed that we have no prior information about the authors and the documents, except the number of authors of the document. The key idea is to exploit the differences of the grammatical writing styles of the authors and use this information to build paragraph clusters. This is a difficult problem in many levels. It’s easy to decompose based on topics and contexts, which is often known as text segmentation in literature. So it gets difficult to distinguish if multiple authors have written on the same topic. Quantifying the difference of the grammatical writing styles of authors is another big challenge. As there is no prior information/access to the author’s written texts, supervised classification approaches can’t be applied directly. On top of this, the number of author is not known in general of a random a document/article in general (in case of plagiarism). So fixing the number of clusters is another big task. So considering the above constraints, this paper focus more on the feature selection part of the texts which is the most important part of the whole unsupervised clustering, as good features will lead to more precise clustering of the correct sentences to their respective clusters. The traditional studies on text segmentation, as shown in Choi (2000), Brants et al. (2002), Misra et al. (2009) and Henning and Labor (2009), focus on dividing the the text into significant components such as words, sentences and topics rather than authors. There are almost no approaches, as those in Schaalje et. al. (2013), Segarra et al (2014) and Layton et al. (2013) deal with documents written by a single author only. Koppel et al. (2011) has considered the segmentation of a document according to multi-authorship, this approach requires manual translations and concordance to be available beforehand. Hence their document can only be applied on particular types of documents such as Bible books. Akiva and Koppel (2013) tried to come up with a solution. Their method relies on distance measurement to increase the precision and accuracy of the clustering and classification process. The performance is degraded when the number of authors increases to more than two. % \begin{itemize} % \item You must use the latest AAAI Press \LaTeX{} macro. % \item Download the author kit. % \item Complete, sign, and return by the deadline the AAAI copyright form (proceedings authors) or distribution license (technical report authors). % \item Read and format your paper source and PDF according to the formatting instructions for authors. % \item Submit your electronic files and abstract using our electronic submission form \textbf{on time.} % \item Submit your copyright form, and any required page or formatting charges to AAAI Press so that they are received by the deadline. % \item Check every page of your paper before submitting it. % \end{itemize} \section{Modified Baseline} After modifying the latest state of the art technique used is described below: Given a multi-author document written by \textit{l} authors, it is assumed that every sentence is completely written by only one of the authors. The approach goes through the following steps: \begin{itemize} \item Divide the document into segments of fixed length. \item Represent each sentence inside a segment as vectors using n-grams of words and pq grams as feature set. \item Separate pure and mixed segments by analyzing sudden change in writing style or words used between sentences inside a segment using "similarity index" of segment. \item Represent the resulted pure segments as vectors using an appropriate feature set(Words, POS Tags, PQ Grams) in whole merged document which can differentiate the writing styles among authors. \item Cluster the resulted vectors into \itemit{l} clusters using an appropriate clustering algorithm targeting on high recall rates(GMM with iterative EM algorithm). \item Re-vectorize the segments using a different feature set to more accurately discriminate the segments in each cluster. \item Apply the segment Elicitation procedure, which identifies the vital segments from each clusters to improve the precision rates. \item Re-vectorize all selected segments using another feature set that can capture the differences in the writing styles of all the sentences in a document. \item Train the classifier using a Naive Bayesian model. \item Classify each sentence using the learned classifier. \end{itemize} %\begin{figure} %\caption{•} %\includegraphics[width=8cm]{u7.jpg} %\centering %\end{figure} %\begin{figure} %\caption{•} %\includegraphics[width=8cm]{u6.jpg} %\centering %\end{figure} \section{Data Set} The data sets we have used to evaluate our model is: \begin{itemize} \item 690 blogs written by Gary Becker and Richard Posner. \item 1,182 New York Times articles written by Maureen Dowd, Gail Collins, Thomas Freidman and Paul Krugman.\\ Each data set has its own set of challenges, since each author has written a lot of different topics and some topics are taken by both authors. \end{itemize} The resulting table is shown below: \begin {table}[h] \caption {Table Title} \label{tab:title} \begin{center} \begin{tabular}{||c c c c||} \hline Dataset & Accuracy & sentences & Authors \\ [0.5ex] \hline\hline Becker-Posner & 0.82 & 26922 & 2 \\ \hline GC-TF-PK & 0.67 & 11984 & 3 \\ \hline MD-TF-PK & 0.70 & 13422 & 3 \\ \hline MD-GC-PK & 0.66 & 13448 & 3 \\ \hline MD-GC-TF-PK & 0.61 & 15584 & 4 \\ [1ex] \hline \end{tabular} \end{center} \end{table} \begin{figure} \caption{•} \includegraphics[width=8cm]{u4.jpg} %\centering \caption{•} \includegraphics[width=8cm]{u5.jpg} \end{figure} \begin{figure} %centering \end{figure} \section{Limitations of the Baseline System} We can see that no deep NLP features are used for the task. A bag of words model is a weak model to be able to discriminate between the authors. Also, the accuracy of the final stage of classification depends on the “chunk” (V) of sentences picked from the individual authors to form the merged document. Changing that parameter (V) from 200 to 50 reduces the final accuracy from 82\% to 49\%. Training on segments and testing on sentences is not such a good idea as the whole bottleneck for achieving high accuracy is the clustering algorithm.Also the cases of mixed segments(sentences comprising of both the authors) pose a problem during the clustering process which affects the precision and recall badly. \subsection{Preliminary: PQ Grams} Similar to n-grams that represent the subparts of given length n of a string, p-q grams extract substructures of an ordered labelled tree. The size of p-q gram is determined by stem (p) and base (q). P defines how many nodes are included vertically, and q defines the number of nodes to be considered horizontally. For example, a valid p-q gram with p=2 and q=3 starting from PP at the left side of the tree (S2) shown in the above figure would be [PP-NP-DT-JJ-NNS ]. The p-q gram index then consists of all possible p-q grams of a tree. In order to obtain all p-q grams, the base is shifted left and right additionally. If less than p nodes exists horizontally, the corresponding place in the pq-gram is filled with \textbf{*}, indicating a missing node.\\ \section{Proposed Methodology} There are two different aspects of this paper. The first aspect is to use a PQ grams based model to featurise the sentences of the segments and study its effect on the final accuracy. The second aspect is to introduce a layer of filtering in to the segments to identify which segments are pure(written completely by one author) to the most extent posible by defining a theshold value which ideally is a similarity score among the sentences in each segments.This will help us in getting better cluster assignments.\\ \textbf{In the first aspect,} the main idea is to quantify the differences of the grammatical writing styles which the earlier baseline model was lacking and use this information to build paragraph clusters. So, by doing this, what kind of sentences can we decompose? An example shown below illustrates this. Consider the two sentences below:\\ \textit{S1: My chair started squeaking a few days ago and its driving me nuts.\\ S2: Since a few days my chair is squeaking-it’s simply annoying.\\} \begin{figure}[H] \caption{Different parses for similar meaning sentence} \includegraphics[width=8cm]{u2.jpg} \end{figure} The above sentences are semantically similar, although they differ way too much syntactically(as shown in the figure below) and a bag of words model, which just relies on the occurrences of the words/word counts can’t distinguish between these two sentences as they have more or less similar kinds of words. The main idea is to quantify those differences by calculating grammar profiles and to use this information to decompose a collaboratively written document. %\centering \begin{figure}[H] \caption{The overview of our Method} \includegraphics[width=8cm]{u3.jpg} %\centering \end{figure} As seen from the flow chart above, paragraphs are extracted from text and each sentence is extracted from the paragraphs. For each sentences, a parse tree is formed using the standard StanfordParser. We call this the Grammar tree. From this Grammar tree, we extract the PQ Gram indices of these sentences. p-q gram index of a sentence is all possible p-q grams of a sentence , whereby multiple occurrences of the same p-q grams are also present multiple times in the index. By combining all p-q gram indices of all sentences, a p-q gram profile is created which contains a list of all p-q grams and their corresponding frequency of appearance in the text. For our experiment, we have used p=2 and q=3. Finally, each paragraph-profile is provided as input for clustering algorithm, which are asked to build clusters based on the p-q grams contained. Also the labels are POS tags of Penn Treebank. We have not used the head words as we want to capture just the structure of the sentence and not the choice of words used by each authors\\ Another features we tried is the POS tags of each sentences in a bigram, trigram setting build a paragraph profile which gets input to a clustering algorithm (GMM-EM).\\ \\ \textbf{In the second aspect} \begin{itemize} \item Calculate the similarity between each sentences in a segment by counting the number of common PQ-grams divided by the multiplication of the total PQ grams of each sentences and obtain a score. \item Do this for all the sentences and sum all the scores to get a similarity score of the segment, repeated over for all the segments. \item We now have a relative measure of the purity of the segments where better segment scores means more pure or in simple words, the mixing is biased towards one of the authors. \item This will give us a indication about which segments to use for clustering algorithm and leave the evenly distributed mixed segments with low similarity scores out of the clustering process. \item Train the GMM on the pure segments and leave the mixed segments out. \item We use the posterior of the clustered segments to identify the class of the left out segments. \item Identify vital segments as before and train using Naive Bayes. \item This step of bypassing some of the segments from GMM will enhance the selection of vital segments in the baseline system. \end{itemize} %\begin{figure}[H] %\caption{•} %\includegraphics[width=8cm]{2.png} %\end{figure} %\begin{figure}[H] %\caption{•} %\includegraphics[width=8cm]{3.png} %\end{figure} \begin{figure}[H] \caption{•} \includegraphics[width=8cm]{4.png} \end{figure} \begin{figure}[H] \caption{•} \includegraphics[width=8cm]{1.png} \end{figure} %\begin{figure}[H] %\caption{•} %\includegraphics[width=8cm]{5.png} %\end{figure} %\begin{figure}[H] %\caption{•} %\includegraphics[width=8cm]{6.png} %\end{figure} \section{Results} \textbf{First Aspect} \begin {table}[H] \caption {Becker-Posner} \label{tab:title} \begin{center} \begin{tabular}{||c c c c||} \hline Features & & Accuracy \\ [0.5ex] & V=200 & V=100 & V=50\\ \hline\hline Baseline & 0.82 & 0.57 & 0.51\\ \hline POS-tags(bigram) & 0.67 & 0.64 & 0.63\\ \hline POS-tags(trigram) & 0.70 & 0.67 & 0.65\\ \hline PQ-GRAMS & 0.66 & 0.63 & 0.62\\ \hline PQ-GRAMS+tf-idf & 0.69 & 0.65 & 0.63\\ [1ex] \hline \end{tabular} \end{center} \end{table} \subsection{Analysis} When applied to the data set Becker-Posner dataset (26922 sentences), we encountered many long sentences which the parser was not able to parse(out of memory). So we ignored those sentences (only 3) and evaluated the above strategies on this reduced data. Our observation from the above experiment is that, intuitively its a good method to capture the different syntactic aspects of writers. Although, it pushes the dimensionality of the feature space to quite high compared to the baseline.The baseline method’s feature size were much smaller and simpler and faster. Also we could see that introducing these features are not exactly increasing the final accuracy of the baseline model.\\ We could see that the sensitivity of the model with new features even after reducing the chunk size(V) is quite robust. Its not varying drastically as in the case with baseline system. So we can conclude that these set of syntactic features are very stable. \subsection{Future Scope} The whole assumption of this investigation was that we know apriori about the number of authors present in the document. Future works can be on the area of determining the number of authors automatically using different clustering algorithms and study the effect of these features on the discriminating properties of those classifiers. \subsection{References} \textit{(i) A generic unsupervised method for decomposing multi-author documents.} Navot Akiva and Moshe Koppel.2013.\\ \textit{(ii) Journal of the American Society for information Science and Technology,} 64: 2256--2264.\\ Navot Akiva and Moshe Koppel.2013. \textit{Science} 208: 1019--1026.\\ \textit{(iii) Unsupervised Decomposition of a Multi-Author Document Based on Naive-Bayesian Model.}Khaled Aldebei, Xiangjian He and Jie Yang. \textit{Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics (Short Papers)}, pages 501–505, Beijing, China\\ \textit{(iv) Automatic Decomposition of Multi-Author Documents Using Grammar Analysis,Michael Tschuggnall and Günther Specht,Databases and Information Systems Institute of Computer Science, University of Innsbruck, Austria} \end{document}
[STATEMENT] lemma capped_pnu: "y \<le> u \<Longrightarrow> z \<le> v \<Longrightarrow> u * v \<le> v \<Longrightarrow> p\<nu>(\<lambda>x . (y * x \<squnion> z) \<sqinter> v) = y\<^sup>\<omega>\<^sub>v \<squnion> y\<^sup>\<star> * z" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>y \<le> u; z \<le> v; u * v \<le> v\<rbrakk> \<Longrightarrow> p\<nu> (\<lambda>x. (y * x \<squnion> z) \<sqinter> v) = y\<^sup>\<omega>\<^sub>v \<squnion> y\<^sup>\<star> * z [PROOF STEP] by (metis capped_greatest_postfixpoint greatest_postfixpoint_same)