Datasets:
AI4M
/

text
stringlengths
0
3.34M
open import Prelude module Implicits.Syntax.Context where open import Implicits.Syntax.Type open import Data.List.All open import Data.Vec open import Data.List Ctx : ℕ → ℕ → Set Ctx ν n = Vec (Type ν) n ICtx : ℕ → Set ICtx ν = List (Type ν) -- wellformed implicit contexts _⊢OK : ∀ {ν} → ICtx ν → Set Δ ⊢OK = All (λ a → List.[] ⊢unamb a) Δ Ktx : ℕ → ℕ → Set Ktx ν n = Ctx ν n × ICtx ν _∷Γ_ : ∀ {ν n} → Type ν → Ktx ν n → Ktx ν (suc n) a ∷Γ (Γ , Δ) = (a ∷ Γ) , Δ _∷Δ_ : ∀ {ν n} → Type ν → Ktx ν n → Ktx ν n a ∷Δ (Γ , Δ) = Γ , a List.∷ Δ _∷K_ : ∀ {ν n} → Type ν → Ktx ν n → Ktx ν (suc n) a ∷K (Γ , Δ) = a ∷ Γ , a List.∷ Δ nil : ∀ {ν} → Ktx ν 0 nil = [] , List.[]
cc ------------ dpmjet3.4 - authors: S.Roesler, R.Engel, J.Ranft ------- cc -------- phojet1.12-40 - authors: S.Roesler, R.Engel, J.Ranft ------- cc - oct'13 ------- cc ----------- pythia-6.4 - authors: Torbjorn Sjostrand, Lund'10 ------- cc --------------------------------------------------------------------- cc converted for use with FLUKA ------- cc - oct'13 ------- C...PYLAMF C...The standard lambda function. DOUBLE PRECISION FUNCTION PYLAMF(X,Y,Z) C...Double precision and integer declarations. IMPLICIT DOUBLE PRECISION(A-H, O-Z) IMPLICIT INTEGER(I-N) C...Local variables. DOUBLE PRECISION X,Y,Z PYLAMF=(X-(Y+Z))**2-4D0*Y*Z IF(PYLAMF.LT.0D0) PYLAMF=0D0 RETURN END
State Before: α : Type u_1 β : Type ?u.7051 m : Type ?u.7054 n : Type u_2 R : Type ?u.7060 inst✝¹ : Star α inst✝ : AddGroup n v : n → α ⊢ (circulant v)ᴴ = circulant (star fun i => v (-i)) State After: case a.h α : Type u_1 β : Type ?u.7051 m : Type ?u.7054 n : Type u_2 R : Type ?u.7060 inst✝¹ : Star α inst✝ : AddGroup n v : n → α i✝ x✝ : n ⊢ (circulant v)ᴴ i✝ x✝ = circulant (star fun i => v (-i)) i✝ x✝ Tactic: ext State Before: case a.h α : Type u_1 β : Type ?u.7051 m : Type ?u.7054 n : Type u_2 R : Type ?u.7060 inst✝¹ : Star α inst✝ : AddGroup n v : n → α i✝ x✝ : n ⊢ (circulant v)ᴴ i✝ x✝ = circulant (star fun i => v (-i)) i✝ x✝ State After: no goals Tactic: simp
Formal statement is: lemma locallyE: assumes "locally P S" "openin (top_of_set S) w" "x \<in> w" obtains u v where "openin (top_of_set S) u" "P v" "x \<in> u" "u \<subseteq> v" "v \<subseteq> w" Informal statement is: If $S$ is locally $P$, then for any open set $w$ in $S$ and any point $x$ in $w$, there exists an open set $u$ in $S$ containing $x$ and a set $v$ containing $u$ such that $v$ is contained in $w$ and $v$ is $P$.
#!/usr/bin/env Rscript # This script is used to perform internal library search using various parameters write("loading required packages ...\n", stdout()) list.of.packages <- c("argparse","MSnbase","intervals","tools","BiocParallel") new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])] if(length(new.packages)) install.packages(new.packages) # load argparse for parsing arguments suppressWarnings(library("argparse")) suppressWarnings(library("MSnbase")) suppressWarnings(library("intervals")) suppressWarnings(library("tools")) suppressWarnings(library("BiocParallel")) # set require arguments parser <- ArgumentParser() # set require arguments. parser$add_argument("-v", "--verbose", action="store_true", default=TRUE, help="Print extra output [default]") parser$add_argument("-q", "--quietly", action="store_false", dest="verbose", help="Print little output") parser$add_argument("-i", "--inputMS2",type="character", help="Input MS2 file (metfrag format). This can also be a zip file containing a number of metfrag MS2 file. In the later, a batch version of the script will be run.") parser$add_argument("-l", "--inputLibrary",type="character", help="Input MS2 library file") parser$add_argument("-ri", "--readNameOftheMS2", type="character", help="In the calse that inputMS2 is a single metfrag input file, real name of the file should be supplied. This option is for Galaxy that changes the name of the files but still keeps it in the metadata") parser$add_argument("-out", "--outputCSV", type="character", help="The results the search engine. If inputMS2 is zip, then the output will be concatenated unless option split is set.") parser$add_argument("-s", "--split", type="logical", default=FALSE, help="Set if you DONT want the output to be concatenated") parser$add_argument("-mz", "--precursorPPMTol", type="double", default=10.0, help="Precursors mz tolerance in ppm") parser$add_argument("-mzf", "--fragmentPPMTol", type="double", default=10.0, help="Fragments tolerance in ppm") parser$add_argument("-mzfa", "--fragmentabsTol", type="double", default=0.07, help="Fragments absolute tolerance") parser$add_argument("-rt", "--precursorRTTol", type="double", default=0.07, help="Precursors RT tolerance in sec") parser$add_argument("-sr", "--searchRange", type="logical", default=T, help="search based on feature RT or parent MS2") parser$add_argument("-pr", "--preprocess", type="logical", default=F, help="preprocess MS2 intensities") parser$add_argument("-dec", "--outputSemiDecoy", type="logical", default=F, help="estimate e-value for each MS2. This will take a lot of time unless you set low resampling number") parser$add_argument("-rs", "--resample", type="integer", default=1000, help="How many MS2s used to estimate e-value") parser$add_argument("-th", "--topHits", type="integer", default=-1, help="How many IDs per MS2 are reported (0 is the top score, -1 is all)") parser$add_argument("-ts", "--topScore", type="character", default="Scoredotproduct", help="which score to choose for selecting the top ions") parser$add_argument("-im", "--ionMode", type="character", default="pos", help="set ionization mode pos or neg, is not used now!") parser$add_argument("-ncore", "--numberOfCores", type="integer", default=1, help="Number of cores") parser$add_argument("-outT", "--outTable", type="logical", default=T, help="If set, the aggregatated results will be written as table otherwise CSV") write("loading the main function ...\n", stdout()) main<-function() { ################# the following packages have been adapted from msnbasea and maldiquant ################# numberOfCommonPeaks <- function(x, y, tolerance=25e-6, relative=TRUE) { sum(commonPeaks(x, y, tolerance=tolerance, relative=relative)) } commonPeaks <- function(x, y, method=c("highest", "closest"), tolerance=25e-6, relative=TRUE) { m <- matchPeaks(x, y, method=match.arg(method), tolerance=tolerance, relative=relative) m[which(is.na(m))] <- 0L as.logical(m) } matchPeaks <- function(x, y, method=c("highest", "closest", "all"), tolerance=25e-6, relative=TRUE) { method <- match.arg(method) y<-y[,"mz"] if (nrow(x) == 0 || length(y) == 0) { return(integer(peaksCount(x))) } m <- relaxedMatch((x[,"mz"]), y, nomatch=NA, tolerance=tolerance, relative=relative) if (anyDuplicated(m)) { o <- order((x[,"intensity"]), decreasing=TRUE) sortedMatches <- m[o] sortedMatches[which(duplicated(sortedMatches))] <- NA m[o] <- sortedMatches } as.integer(m) } relaxedMatch <- function(x, table, nomatch=NA_integer_, tolerance=25e-6, relative=TRUE) { if (relative) { if (tolerance > 1L) { stop(sQuote("tolerance"), " must be smaller than 1 for relative deviations.") } tolerance <- table*tolerance } MALDIquant:::match.closest(x, table, tolerance=tolerance, nomatch=nomatch) } bin_Spectra <- function(object1, object2, binSize = 1L, breaks = seq(floor(min(c((object1[,"mz"]), (object2[,"mz"])))), ceiling(max(c((object1[,"mz"]), (object2[,"mz"])))), by = binSize)) { breaks <- .fix_breaks(breaks, range((object1[,"mz"]), (object2[,"mz"]))) list(bin_Spectrum(object1, breaks = breaks), bin_Spectrum(object2, breaks = breaks)) } bin_Spectrum <- function(object, binSize = 1L, breaks = seq(floor(min((object[,"mz"]))), ceiling(max((object[,"mz"]))), by = binSize), fun = sum, msLevel.) { ## If msLevel. not missing, perform the trimming only if the msLevel ## of the spectrum matches (any of) the specified msLevels. # print(length(object[,"intensity"])) # print(length(object[,"mz"])) bins <- .bin_values(object[,"intensity"], object[,"mz"], binSize = binSize, breaks = breaks, fun = fun) #data.frame(bins$mids,bins$x) object<-matrix(c(bins$mids,bins$x), nrow = length(bins$x), dimnames = list(1:length(bins$x), c("mz","intensity"))) # object<-data.frame("mz" = bins$mids,"intensity" = bins$x) return(object) } dotproduct<-function (x, y) { as.vector(x %*% y)/(sqrt(sum(x * x)) * sqrt(sum(y * y))) } compare_Spectra2 <- function(x, y, fun=c("common", "cor", "dotproduct"),binSize=0,tolerance =0,relative = F) { { if (fun == "cor" || fun == "dotproduct") { #binnedSpectra <- bin_Spectra(x, y, ...) binSize<-binSize breaks = seq(floor(min(c((x[,"mz"]), (y[,"mz"])))), ceiling(max(c((x[,"mz"]), (y[,"mz"])))), by = binSize) # breaks <- .fix_breaks(brks = breaks, rng = range((x[,"mz"]), (y[,"mz"]))) # fix! brks = breaks rng = range((x[,"mz"]), (y[,"mz"])) if (brks[length(brks)] <= rng[2]) breaks <- c(brks, max((rng[2] + 1e-6), brks[length(brks)] + mean(diff(brks)))) breaks1 = breaks rng = range(x[,"mz"]) if (brks[length(brks)] <= rng[2]) breaks1 <- c(brks, max((rng[2] + 1e-6), brks[length(brks)] + mean(diff(brks)))) nbrks <- length(breaks1) idx <- findInterval(x[,"mz"], breaks1) ## Ensure that indices are within breaks. idx[which(idx < 1L)] <- 1L idx[which(idx >= nbrks)] <- nbrks - 1L ints <- double(nbrks - 1L) ints[unique(idx)] <- unlist(lapply(base::split(x[,"intensity"], idx), sum), use.names = FALSE) binsx=ints binsmids = (breaks1[-nbrks] + breaks1[-1L]) / 2L list1<-binsx#matrix(c(binsmids,binsx), nrow = length(binsx), dimnames = list(1:length(binsx), c("mz","intensity"))) # breaks2 <- .fix_breaks(breaks, range(y[,"mz"])) breaks2 = breaks brks = breaks rng = range(y[,"mz"]) if (brks[length(brks)] <= rng[2]) breaks2 <- c(brks, max((rng[2] + 1e-6), brks[length(brks)] + mean(diff(brks)))) nbrks <- length(breaks2) idx <- findInterval(y[,"mz"], breaks2) ## Ensure that indices are within breaks. idx[which(idx < 1L)] <- 1L idx[which(idx >= nbrks)] <- nbrks - 1L ints <- double(nbrks - 1L) ints[unique(idx)] <- unlist(lapply(base::split(y[,"intensity"], idx), sum), use.names = FALSE) bins<-list(x = ints, mids = (breaks2[-nbrks] + breaks[-1L]) / 2L) binsmids = (breaks2[-nbrks] + breaks2[-1L]) / 2L binsx=ints list2<-binsx#matrix(c(binsmids,binsx), nrow = length(binsx), dimnames = list(1:length(binsx), c("mz","intensity"))) #inten <- lapply(list(list1,list2), function(x){x[,"intensity"]}) # ifelse(fun == "dotproduct",dotproduct(list1,list2),cor(list1,list2) return(c(dotproduct(list1,list2),cor(list1,list2))) } else if (fun == "common") { return(numberOfCommonPeaks(x, y, tolerance =tolerance,relative = relative)) } } } .fix_breaks <- function(brks, rng) { ## Assuming breaks being sorted. if (brks[length(brks)] <= rng[2]) brks <- c(brks, max((rng[2] + 1e-6), brks[length(brks)] + mean(diff(brks)))) brks } .bin_values <- function(x, toBin, binSize = 1, breaks = seq(floor(min(toBin)), ceiling(max(toBin)), by = binSize), fun = max) { if (length(x) != length(toBin)) stop("lengths of 'x' and 'toBin' have to match.") fun <- match.fun(fun) breaks <- .fix_breaks(breaks, range(toBin)) nbrks <- length(breaks) idx <- findInterval(toBin, breaks) ## Ensure that indices are within breaks. idx[which(idx < 1L)] <- 1L idx[which(idx >= nbrks)] <- nbrks - 1L ints <- double(nbrks - 1L) ints[unique(idx)] <- unlist(lapply(base::split(x, idx), fun), use.names = FALSE) list(x = ints, mids = (breaks[-nbrks] + breaks[-1L]) / 2L) } ################# ################# ################# doSearch<-function(args) { inputLibrary<-NULL inputMS2<-NULL outputCSV<-NULL readNameOftheMS2<-NULL # MS1 PPM tol precursorPPMTol<-10 # MS2 abs tol fragmentabsTol<-0.07 # MS2 ppm tol fragmentPPMTol<-10 # MS1 RT tol precursorRTTol<-20 # search based on feature RT or parent MS2 searchRange<-T # preprocess MS2 ? preprocess<-F # estimate decoy ? outputSemiDecoy<-T # how many we should peak 0 is top -1 is all topHits<--1 # set ionization mode pos or neg ionMode<-"pos" # which score to choose topScore<-"Scoredotproduct" # resample resample=1000 # libdata for parallel package libdata<-NULL # read the parameters for(arg in names(args)) { argCase<-arg value<-args[[argCase]] if(argCase=="inputMS2" & !is.null(value)) { inputMS2=as.character(value) } if(argCase=="inputLibrary" & !is.null(value)) { inputLibrary=as.character(value) } if(argCase=="readNameOftheMS2" & !is.null(value)) { readNameOftheMS2=as.character(value) } if(argCase=="outputCSV" & !is.null(value)) { outputCSV=as.character(value) } if(argCase=="precursorPPMTol" & !is.null(value)) { precursorPPMTol=as.numeric(value) } if(argCase=="fragmentabsTol" & !is.null(value)) { fragmentabsTol=as.numeric(value) } if(argCase=="fragmentPPMTol" & !is.null(value)) { fragmentPPMTol=as.numeric(value) } if(argCase=="precursorRTTol" & !is.null(value)) { precursorRTTol=as.numeric(value) } if(argCase=="searchRange" & !is.null(value)) { searchRange=as.logical(value) } if(argCase=="outputSemiDecoy" & !is.null(value)) { outputSemiDecoy=as.logical(value) } if(argCase=="topHits" & !is.null(value)) { topHits=as.numeric(value) } if(argCase=="ionMode" & !is.null(value)) { ionMode=as.character(value) } if(argCase=="topScore" & !is.null(value)) { topScore=as.character(value) } if(argCase=="resample" & !is.null(value)) { resample=as.numeric(value) } if(argCase=="libdata" & !is.null(value)) { libdata=value } } # load MSnbase package for comparing spectra if((is.null(inputLibrary) & is.null(libdata))| is.null(inputMS2)) stop("Both inputs (library & MS2) are required") # read library file ######################### UNCOMMENT ############################# MSlibrary<-NA if(is.null(libdata)) { MSlibrary<-read.csv(inputLibrary,stringsAsFactors = F) }else{ MSlibrary<-libdata } if(nrow(MSlibrary)<1) stop("Library is empty!") checkAllCol<-all(sapply(c("startRT","endRT","startMZ","endMZ","centermz","centerrt","intensity","MS2fileName" ,"ID","Name", "nmass","MS2mz","MS2rt","MS2intensity","MS2mzs","MS2intensities","featureGroup" ), function(x){x%in%colnames(MSlibrary)})) if(!checkAllCol)stop("Check the library file! it has to include the following columns: startRT,endRT,startMZ,endMZ,centermz,centerrt, intensity,fileName ,ID,Name, nmass,MS2mz,MS2rt,MS2intensity,MS2mzs,featureGroup and MS2intensities" ) # limit the library to those with MS2 MSlibrary<-MSlibrary[MSlibrary[,"MS2mzs"]!="",] # extract name of the file MS2NameFileName<-readNameOftheMS2 if(is.null(MS2NameFileName))MS2NameFileName<-basename(inputMS2) # extract precursor RT and mz from name of the file precursorRT<-as.numeric(strsplit(x = MS2NameFileName,split = "_",fixed = T)[[1]][2]) precursorMZ<-as.numeric(strsplit(x = MS2NameFileName,split = "_",fixed = T)[[1]][3]) if(is.na(precursorRT) | is.na(precursorMZ)) stop("File name does not contain RT or mz. Check the file name!") # extract MS2 information from MS2 file MS2Information<-readLines(inputMS2) # split the data into a dataframe for easy access MS2DataFrame<-sapply(X = strsplit(x = MS2Information,split = " ",fixed = T)[[1]],FUN = function(x){strsplit(x = x,split = "=",fixed = T)[[1]]}) colnames(MS2DataFrame)<-MS2DataFrame[1,] # set parameters from input file # mz ppm if(!"DatabaseSearchRelativeMassDeviation"%in%colnames(MS2DataFrame))stop("DatabaseSearchRelativeMassDeviation is not in the parameter file!") if(is.null(precursorPPMTol))precursorPPMTol<-as.numeric(MS2DataFrame[2,"DatabaseSearchRelativeMassDeviation"]) if(is.na(precursorPPMTol)) stop("precursorPPMTol has not been provided and is not in the parameter file!") # fragment absolute deviation if(!"FragmentPeakMatchAbsoluteMassDeviation"%in%colnames(MS2DataFrame))stop("FragmentPeakMatchAbsoluteMassDeviation is not in the parameter file!") if(is.null(fragmentabsTol))fragmentabsTol<-as.numeric(MS2DataFrame[2,"FragmentPeakMatchAbsoluteMassDeviation"]) if(is.na(fragmentabsTol)) stop("fragmentabsTol has not been provided and is not in the parameter file!") # fragment PPM deviation if(is.null(fragmentPPMTol))fragmentPPMTol<-as.numeric(MS2DataFrame[2,"FragmentPeakMatchRelativeMassDeviation"]) if(is.na(fragmentPPMTol)) stop("fragmentPPMTol has not been provided and is not in the parameter file!") # fix if RT tol is missing! if(is.na(precursorRTTol)) {cat("WARNNING: precursorRTTol has not been provided using largest RT region:",.Machine$double.xmax);precursorRTTol<-.Machine$double.xmax} parentFile<-"NotFound" if(!"SampleName"%in%colnames(MS2DataFrame))stop("SampleName is not in the parameter file!") parentFile<-basename(MS2DataFrame[2,"SampleName"]) if(parentFile=="NotFound") {warning("SampleName was not found in the parameter file setting to NotFound")} # extract MS2 peaks if(!"PeakListString"%in%colnames(MS2DataFrame))stop("PeakListString is not in the parameter file!") MS2TMP<-MS2DataFrame[2,"PeakListString"] # if spectrum is empty do not continue isMS2Emtpy<-MS2TMP=="" # define a function for calculating ppm! ppmCal<-function(run,ppm) { return((run*ppm)/1000000) } temp<-NA if(!isMS2Emtpy) { temp<-t(sapply(X=(strsplit(x = strsplit(x = MS2TMP,split = ";",fixed = T)[[1]],split = "_",fixed = T)),FUN = function(x){c(mz=as.numeric(x[1]), int=as.numeric(x[2]))})) temp<-temp[temp[,1]!=0,] if(nrow(temp)<1)isMS2Emtpy<-F } if(!isMS2Emtpy){ # read MS2 and convert to dataframe # create a msnbase file! targetMS2<-new("Spectrum2", mz=temp[,1], intensity=temp[,2]) targetMS2DataFrame<-data.frame(mz=temp[,1],intensity=temp[,2]) targetMS2DataFrame<- matrix(c(temp[,1],temp[,2]), nrow = length(temp[,2]), dimnames = list(1:length(temp[,2]), c("mz","intensity"))) #names(targetMS2DataFrame)<-c("mz","intensity") # set search interval for MS2 mzTarget<-Intervals_full(cbind(precursorMZ,precursorMZ)) rtTarget<-Intervals_full(cbind(precursorRT,precursorRT)) # set search interval for library that is either as range or centroid mzLib<-NA rtLib<-NA if(searchRange) { if(!"startMZ"%in%colnames(MSlibrary))stop("startMZ is not in library file!") if(!"endMZ"%in%colnames(MSlibrary))stop("endMZ is not in library file!") mzLib<-Intervals_full(cbind(MSlibrary$startMZ- ppmCal(MSlibrary$startMZ,precursorPPMTol), MSlibrary$endMZ+ ppmCal(MSlibrary$endMZ,precursorPPMTol))) if(!"startRT"%in%colnames(MSlibrary))stop("startRT is not in library file!") if(!"endRT"%in%colnames(MSlibrary))stop("endRT is not in library file!") rtLib<-Intervals_full(cbind(MSlibrary$startRT- precursorRTTol, MSlibrary$endRT+ precursorRTTol)) }else{ if(!"centermz"%in%colnames(MSlibrary))stop("centermz is not in library file!") mzLib<-Intervals_full(cbind(MSlibrary$centermz- ppmCal(MSlibrary$centermz,precursorPPMTol), MSlibrary$centermz+ ppmCal(MSlibrary$centermz,precursorPPMTol))) if(!"centerrt"%in%colnames(MSlibrary))stop("centerrt is not in library file!") rtLib<-Intervals_full(cbind(MSlibrary$centerrt- precursorRTTol, MSlibrary$centerrt+ precursorRTTol)) } # do precursor mass search Mass_iii <- interval_overlap(mzTarget,mzLib) # do precursor mass search Time_ii <- interval_overlap(rtTarget,rtLib) # check if there is any hit ?! imatch = mapply(intersect,Time_ii,Mass_iii) foundHit<-length(imatch[[1]])>0 # create an empty array for the results results<-c() # compareSpectra needs relative deviation in fractions fragmentPPMTol<-fragmentPPMTol/1000000 if(foundHit) { cat("Number of hits: ",length(imatch),"\n") for(i in imatch) { hitTMP<-MSlibrary[i,] if(hitTMP[,"MS2mzs"]!="") { # tmpResults<-c() # for(j in 1:length(parentmzs)) { #MS2sTMPLib<-parentMS2s[[j]] tempmz<-as.numeric(strsplit(hitTMP[,"MS2mzs"],split = ";",fixed = T)[[1]]) tempint<-as.numeric(strsplit(hitTMP[,"MS2intensities"],split = ";",fixed = T)[[1]]) tempmz<-tempmz[tempint!=0] tempint<-tempint[tempint!=0] # if all the peaks were zero, skip rest of the loop! if(length(tempint)<1)next # extract name of the metabolite hitName<-hitTMP[,"Name"] hitChI<-NA Identifier<-ifelse(test = is.null(results),yes = 1,no = (nrow(results)+1)) fileName<-parentFile parentMZ<-precursorMZ parentRT<-precursorRT restOfLibInformation<-hitTMP[,c("startRT","endRT","startMZ","endMZ","centermz","centerrt","intensity","MS2fileName" ,"ID","Name", "nmass","MS2mz","MS2rt","MS2intensity","featureGroup" )] featureGroup<-hitTMP[,"featureGroup"] nmass<-as.numeric(hitTMP[,"nmass"]) MS1mzTolTh<-NA MS1mzTolTh<-((parentMZ-(nmass))/(nmass))*1000000 MS1RTTol<-NA centerRT<-as.numeric(hitTMP[,"centerrt"]) MS1RTTol<-parentRT-centerRT # create MS2 object libMS2Obj<-new("Spectrum2", mz=tempmz, intensity=tempint) ## output three types of scores: dotproduct, common peaks and correlation (dotproduct will be our main score) dotPeaks<-NA tryCatch({ dotPeaks<-compareSpectra(targetMS2, libMS2Obj, fun="dotproduct",binSize =fragmentabsTol) }, warning = function(w) { }, error = function(e) { }, finally = { }) nPeakCommon<-NA tryCatch({ nPeakCommon<-compareSpectra(targetMS2, libMS2Obj, fun="common",tolerance =fragmentPPMTol,relative = TRUE) }, warning = function(w) { }, error = function(e) { }, finally = { }) corPeaks<-NA tryCatch({ corPeaks<-compareSpectra(targetMS2, libMS2Obj, fun="cor",binSize =fragmentabsTol) }, warning = function(w) { }, error = function(e) { }, finally = { }) ## # if requrested create a decoyDatabase for this specific MS2 and estimate "e-value" # this will repeat the whole process but for rest of the peaks # it will take LONG time! decoyScore<-list() decoyScore[["dotproduct"]]<-c() decoyScore[["common"]]<-c() decoyScore[["cor"]]<-c() dotPeaksDecoy<-NA nPeakCommonDecoy<-NA corPeaksDecoy<-NA if(outputSemiDecoy) { decoyLib<-MSlibrary[-as.vector(imatch),] decoyScoresTMP<-c() start_time <- Sys.time() allMzs<-as.character(decoyLib[,"MS2mzs"]) allInts<-as.character(decoyLib[,"MS2intensities"]) rowNumbersDecoy<-1:nrow(decoyLib) if(resample>0) { set.seed(resample) rowNumbersDecoy<-sample(x = rowNumbersDecoy,size = resample,replace = F) } for(k in rowNumbersDecoy) { tempmzDecoy<-as.numeric(strsplit(allMzs[k],split = ";",fixed = T)[[1]]) tempintDecoy<-as.numeric(strsplit(allInts[k],split = ";",fixed = T)[[1]]) tempmzDecoy<-tempmzDecoy[tempintDecoy!=0] tempintDecoy<-tempintDecoy[tempintDecoy!=0] tempDecoy<- matrix(c(tempmzDecoy,tempintDecoy), nrow = length(tempintDecoy), dimnames = list(1:length(tempintDecoy), c("mz","intensity"))) dotPeaksDecoy<-NA nPeakCommonDecoy<-NA tryCatch({ tmp<-c(NA,NA) tmp<-compare_Spectra2(targetMS2DataFrame, tempDecoy, fun="dotproduct",binSize =fragmentabsTol) dotPeaksDecoy<-tmp[1] corPeaksDecoy<-tmp[2] }, warning = function(w) { }, error = function(e) { }, finally = { }) nPeakCommonDecoy<-NA tryCatch({ nPeakCommonDecoy<-compare_Spectra2(targetMS2DataFrame, tempDecoy, fun="common",tolerance =fragmentPPMTol,relative = TRUE) }, warning = function(w) { }, error = function(e) { }, finally = { }) decoyScore[["dotproduct"]]<-c(decoyScore[["dotproduct"]],dotPeaksDecoy) decoyScore[["common"]]<-c(decoyScore[["common"]],nPeakCommonDecoy) decoyScore[["cor"]]<-c(decoyScore[["cor"]],corPeaksDecoy) } decoyScore[["dotproduct"]]<-na.omit(decoyScore[["dotproduct"]]) decoyScore[["common"]]<-na.omit(decoyScore[["common"]]) decoyScore[["cor"]]<-na.omit(decoyScore[["cor"]]) dotPeaksDecoy<-NA nPeakCommonDecoy<-NA corPeaksDecoy<-NA if(!is.na(dotPeaks)) dotPeaksDecoy<-sum(decoyScore[["dotproduct"]]>dotPeaks)/length(decoyScore[["dotproduct"]] ) if(!is.na(nPeakCommon)) nPeakCommonDecoy<-sum(decoyScore[["common"]]>nPeakCommon)/length(decoyScore[["common"]]) if(!is.na(corPeaks)) corPeaksDecoy<-sum(decoyScore[["cor"]]>corPeaks)/length(decoyScore[["cor"]]) } results<-rbind(results, data.frame(fileName=parentFile,parentMZ=parentMZ,parentRT=parentRT,Name=fileName,Identifier=Identifier,InChI=NA, MS1mzTolTh,MS1RTTol, Scoredotproduct=dotPeaks,Scorecommon=nPeakCommon,ScoreCorrelation=corPeaks, ScoredotproductEValue=dotPeaksDecoy,ScorecommonEValue=nPeakCommonDecoy,ScoreCorrelationEValue=corPeaksDecoy, score=dotPeaks,scoreEValue=dotPeaksDecoy,restOfLibInformation,MS1RTTol=MS1RTTol,featureGroup=featureGroup)) } } } # limit the results as requrested by user: tophit:-1 = all, tophit:0 = top, tophit:>0 = top tophits score higher the better (for now) resTMP<-c() if(topHits!=-1 & nrow(results)>1) { if(topHits==0) { for(groupNumber in unique(results[,"featureGroup"])) { tmpResults<-data.frame(results[results[,"featureGroup"]==groupNumber,]) resTMP<-rbind(resTMP,tmpResults[which.max(tmpResults[,topScore]),]) } }else{ for(groupNumber in unique(results[,"featureGroup"])) { tmpResults<-data.frame(results[results[,"featureGroup"]==groupNumber,]) resTMP<-rbind(resTMP,tmpResults[order(tmpResults[,topScore],decreasing = T,na.last = T),][seq(1,topHits),]) } } results<-resTMP } write.csv(x =results, outputCSV) }else{file.create(outputCSV)} }else{ file.create(outputCSV) } } # parsing the arguments args <- parser$parse_args() # args<-list() # args$inputMS2<-"res.zip" # args$inputLibrary<-"library.csv" # args$outputCSV<-"ot.csv" # args$verbose<-T # args$numberOfCores<-5 if ( args$verbose ) { write("Checking if the inputs and outputs have been given ...\n", stdout()) } # check if the input MS2 has been given if(is.null(args$inputMS2)) { errorMessage<-"No inputMS2 file has been specified. You MUST specify the input file see the help (-h)!" write(errorMessage,stderr()) stop(errorMessage, call. = FALSE) } # check if the input inputLibrary has been given if(is.null(args$inputLibrary)) { errorMessage<-"No library file has been specified. You MUST specify the library file see the help (-h)!" write(errorMessage,stderr()) stop(errorMessage, call. = FALSE) } # check if the input outputCSV has been given if(is.null(args$outputCSV)) { errorMessage<-"No output file has been specified. You MUST specify the output file see the help (-h)!" write(errorMessage,stderr()) stop(errorMessage, call. = FALSE) } # this is a helper function to fix the input names and output names # This is only used in parallel model and when TMP dir has been specified. prepareOut<-function(argsIn,input,outputDIR) { suppressWarnings(library("argparse")) suppressWarnings(library("MSnbase")) suppressWarnings(library("intervals")) suppressWarnings(library("tools")) suppressWarnings(library("BiocParallel")) argsIn$inputMS2<-input # print(input) argsIn$outputCSV<-paste(outputDIR,"/",file_path_sans_ext(basename(input)),".csv",sep="") argsIn$readNameOftheMS2<-basename(input) #print(argsIn) doSearch(argsIn) } # now if the file extension is zip we assume it has a number of MS2 files in it. # we unzip to a temp folder and go forward with rest of the pipeline if(file_ext(args$inputMS2)=="zip" | sum(file_ext(args$readNameOftheMS2)=="zip")==1){ if ( args$verbose ) { write("Seems like zip file have been provided, unzipping ...\n", stdout()) } if ( args$verbose ) { write("creating temp folder ...\n", stdout()) } baseTMP<-"tmp" dir.create(baseTMP) tmpDIR<-paste(baseTMP,"/","inputs",sep = "") dir.create(tmpDIR) # file.copy(list.files("wetransfer-a2e025/res/out/")[4000:5000],tmpDIR) #tmpDIR<-"wetransfer-a2e025/res/out/" if ( args$verbose ) { write("Unzipping the file ...\n", stdout()) } unzip(args$inputMS2, overwrite = TRUE, junkpaths = TRUE, exdir = tmpDIR, unzip = "internal", setTimes = FALSE) if ( args$verbose ) { write("The data has been unzipped, reading list of the MS2 files ...\n", stdout()) } # Get a the list of uinzip files AllMS2Files<-list.files(tmpDIR,full.names = T) if ( args$verbose ) { write("Done!\n", stdout()) } if ( args$verbose ) { write("Reading the library file ...\n", stdout()) } # We load the whole database in the memory instead of reading it for every single MS2. args$libdata<-read.csv(args$inputLibrary,stringsAsFactors = F) print(dim(args$libdata)) if ( args$verbose ) { write("Done!\n", stdout()) } if ( args$verbose ) { write("Setting Snow parameters ...\n", stdout()) } # we set thge Snow parameters snow<-SnowParam(workers = 1, type = "SOCK") # More than 1 core is requested, we try to set it. if(args$numberOfCores>1) { snow <- SnowParam(workers = args$numberOfCores, type = "SOCK") } if ( args$verbose ) { write("Done!\n", stdout()) } if ( args$verbose ) { write("Creating a temp folder for the results ...\n", stdout()) } tmpDIROut<-paste(baseTMP,"/","outputs",sep = "") #tmpDIROut<-"test" dir.create(tmpDIROut) print(tmpDIROut) # tmpDIR if ( args$verbose ) { write("Done!\n", stdout()) } if ( args$verbose ) { write("Running the database search ...\n", stdout()) } # this will iterate through the MS2 files and run them. bplapply(AllMS2Files, FUN = function(x){prepareOut(args,input = x,outputDIR = tmpDIROut)},BPPARAM = snow) if ( args$verbose ) { write("Done!\n", stdout()) } # if the user wants to aggregate the results right here if(!args$split) { if ( args$verbose ) { write("Combining the results of the MS2s ...\n", stdout()) } inputs<-list.files(tmpDIROut,full.names = T) realNamesTMP<-inputs allMS2IDs<-c() for(i in 1:length(inputs)) { # check if the file is empty info = file.info(inputs[i]) if(info$size!=0 & !is.na(info$size)) { tmpFile<-read.csv(inputs[i]) # check if the file has any IDs if(nrow(tmpFile)>0) { # Extract mz and rt from the real file names rt<-as.numeric(strsplit(x = realNamesTMP[i],split = "_",fixed = T)[[1]][2]) mz<-as.numeric(strsplit(x = realNamesTMP[i],split = "_",fixed = T)[[1]][3]) allMS2IDs<-rbind(allMS2IDs,data.frame(parentMZ=mz,parentRT=rt,tmpFile)) } } } if ( args$verbose ) { write("Done!\n", stdout()) } if ( args$verbose ) { write("Writing the results ...\n", stdout()) } if(args$outTable) { if(is.null(allMS2IDs) || nrow(allMS2IDs)<1) { file.create(args$outputCSV) }else{ write.table(x=allMS2IDs,file=args$outputCSV,quote=F,sep="\t") } }else{ if(is.null(allMS2IDs) || nrow(allMS2IDs)<1) { file.create(args$outputCSV) }else{ write.csv(x = allMS2IDs,file = args$outputCSV) } } }else{ if ( args$verbose ) { write("Writing the results ...\n", stdout()) } file.copy(list.files(tmpDIROut,full.names = T),getwd()) } }else{ doSearch(args) } } # running the main function. This is to make parallel stuff easier. Otherwise, not other value :) main()
[STATEMENT] lemma a_star_AndRL: "M \<longrightarrow>\<^sub>a* M'\<Longrightarrow> AndR <a>.M <b>.N c \<longrightarrow>\<^sub>a* AndR <a>.M' <b>.N c" [PROOF STATE] proof (prove) goal (1 subgoal): 1. M \<longrightarrow>\<^sub>a* M' \<Longrightarrow> AndR <a>.M <b>.N c \<longrightarrow>\<^sub>a* AndR <a>.M' <b>.N c [PROOF STEP] by (induct set: rtranclp) (blast intro: rtranclp.rtrancl_into_rtrancl)+
#Open psv file fil<-file(file.path(data.path,"sms.psv"), "rb") #Read dimension of vector = number of parameters dim<-readBin(fil,integer(),1) dim #read values, assume a very high number of observations psv<-readBin(fil, numeric(), 1E6) close(fil) # calc number of MCMC set dim2<-length(psv)/dim #reformat psv<-matrix(psv, dim,dim2) CV<-round(abs(apply(psv,1,sd)/apply(psv,1,mean)*100)) a<-read.table(file.path(data.path,"sms.std"),comment.char = "#",header=FALSE,skip=1) tmp<-data.frame(index=a$V1,name=a$V2, mean=a$V3, CV.round=round(a$V4/a$V3*100), std=a$V4) print(tmp) cleanup() dev<-"screen" nox<-4 noy<-4 newplot(dev,nox,noy); j<-1 first<-600 last<-789 for (i in (first:last)) { j<-j+1 if (j==noxy) {newplot(dev,nox,noy); j<<-0 } plot(density(psv[i,]),main=paste(i,tmp[i,'name'])) # plot((psv[i,]),main=paste("par:",i),type='l',ylab='') }
import feather_model.basic open_locale classical open feather_model @[reducible] def 𝕋 := mterm (finset V) /-- To establish a base case for the model, we create the "empty" model level. -/ instance : term_struct (finset V) := { var := λ v, {v}, bound := id, subst := λ v e f, if v ∈ f then f.erase v ∪ e else f, is_type := λ _ _, true, runtime_ok := λ _, true, rir_ok := λ _, true, runtime_judgments := λ _, true, rir_judgments := λ _, true, defeq := λ _ _ _ _, true, sort := λ _, ∅, representable := λ s f, f, } instance : type_data (finset V) := ⟨λ _ _, true, λ _, true⟩ instance : term (finset V) := begin refine_struct { .. }; intros; try { trivial }, { unfold subst, rw if_neg, assumption, }, { unfold subst, rw if_pos, refl, assumption, }, end example : term_struct 𝕋 := infer_instance
[GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E s : Set G x : G ⊢ DifferentiableWithinAt 𝕜 (↑iso ∘ f) s x ↔ DifferentiableWithinAt 𝕜 f s x [PROOFSTEP] refine' ⟨fun H => _, fun H => iso.differentiable.differentiableAt.comp_differentiableWithinAt x H⟩ [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E s : Set G x : G H : DifferentiableWithinAt 𝕜 (↑iso ∘ f) s x ⊢ DifferentiableWithinAt 𝕜 f s x [PROOFSTEP] have : DifferentiableWithinAt 𝕜 (iso.symm ∘ iso ∘ f) s x := iso.symm.differentiable.differentiableAt.comp_differentiableWithinAt x H [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E s : Set G x : G H : DifferentiableWithinAt 𝕜 (↑iso ∘ f) s x this : DifferentiableWithinAt 𝕜 (↑(ContinuousLinearEquiv.symm iso) ∘ ↑iso ∘ f) s x ⊢ DifferentiableWithinAt 𝕜 f s x [PROOFSTEP] rwa [← Function.comp.assoc iso.symm iso f, iso.symm_comp_self] at this [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E x : G ⊢ DifferentiableAt 𝕜 (↑iso ∘ f) x ↔ DifferentiableAt 𝕜 f x [PROOFSTEP] rw [← differentiableWithinAt_univ, ← differentiableWithinAt_univ, iso.comp_differentiableWithinAt_iff] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E s : Set G ⊢ DifferentiableOn 𝕜 (↑iso ∘ f) s ↔ DifferentiableOn 𝕜 f s [PROOFSTEP] rw [DifferentiableOn, DifferentiableOn] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E s : Set G ⊢ (∀ (x : G), x ∈ s → DifferentiableWithinAt 𝕜 (↑iso ∘ f) s x) ↔ ∀ (x : G), x ∈ s → DifferentiableWithinAt 𝕜 f s x [PROOFSTEP] simp only [iso.comp_differentiableWithinAt_iff] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E ⊢ Differentiable 𝕜 (↑iso ∘ f) ↔ Differentiable 𝕜 f [PROOFSTEP] rw [← differentiableOn_univ, ← differentiableOn_univ] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E ⊢ DifferentiableOn 𝕜 (↑iso ∘ f) univ ↔ DifferentiableOn 𝕜 f univ [PROOFSTEP] exact iso.comp_differentiableOn_iff [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E s : Set G x : G f' : G →L[𝕜] E ⊢ HasFDerivWithinAt (↑iso ∘ f) (comp (↑iso) f') s x ↔ HasFDerivWithinAt f f' s x [PROOFSTEP] refine' ⟨fun H => _, fun H => iso.hasFDerivAt.comp_hasFDerivWithinAt x H⟩ [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E s : Set G x : G f' : G →L[𝕜] E H : HasFDerivWithinAt (↑iso ∘ f) (comp (↑iso) f') s x ⊢ HasFDerivWithinAt f f' s x [PROOFSTEP] have A : f = iso.symm ∘ iso ∘ f := by rw [← Function.comp.assoc, iso.symm_comp_self] rfl [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E s : Set G x : G f' : G →L[𝕜] E H : HasFDerivWithinAt (↑iso ∘ f) (comp (↑iso) f') s x ⊢ f = ↑(ContinuousLinearEquiv.symm iso) ∘ ↑iso ∘ f [PROOFSTEP] rw [← Function.comp.assoc, iso.symm_comp_self] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E s : Set G x : G f' : G →L[𝕜] E H : HasFDerivWithinAt (↑iso ∘ f) (comp (↑iso) f') s x ⊢ f = _root_.id ∘ f [PROOFSTEP] rfl [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E s : Set G x : G f' : G →L[𝕜] E H : HasFDerivWithinAt (↑iso ∘ f) (comp (↑iso) f') s x A : f = ↑(ContinuousLinearEquiv.symm iso) ∘ ↑iso ∘ f ⊢ HasFDerivWithinAt f f' s x [PROOFSTEP] have B : f' = (iso.symm : F →L[𝕜] E).comp ((iso : E →L[𝕜] F).comp f') := by rw [← ContinuousLinearMap.comp_assoc, iso.coe_symm_comp_coe, ContinuousLinearMap.id_comp] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E s : Set G x : G f' : G →L[𝕜] E H : HasFDerivWithinAt (↑iso ∘ f) (comp (↑iso) f') s x A : f = ↑(ContinuousLinearEquiv.symm iso) ∘ ↑iso ∘ f ⊢ f' = comp (↑(ContinuousLinearEquiv.symm iso)) (comp (↑iso) f') [PROOFSTEP] rw [← ContinuousLinearMap.comp_assoc, iso.coe_symm_comp_coe, ContinuousLinearMap.id_comp] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E s : Set G x : G f' : G →L[𝕜] E H : HasFDerivWithinAt (↑iso ∘ f) (comp (↑iso) f') s x A : f = ↑(ContinuousLinearEquiv.symm iso) ∘ ↑iso ∘ f B : f' = comp (↑(ContinuousLinearEquiv.symm iso)) (comp (↑iso) f') ⊢ HasFDerivWithinAt f f' s x [PROOFSTEP] rw [A, B] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E s : Set G x : G f' : G →L[𝕜] E H : HasFDerivWithinAt (↑iso ∘ f) (comp (↑iso) f') s x A : f = ↑(ContinuousLinearEquiv.symm iso) ∘ ↑iso ∘ f B : f' = comp (↑(ContinuousLinearEquiv.symm iso)) (comp (↑iso) f') ⊢ HasFDerivWithinAt (↑(ContinuousLinearEquiv.symm iso) ∘ ↑iso ∘ f) (comp (↑(ContinuousLinearEquiv.symm iso)) (comp (↑iso) f')) s x [PROOFSTEP] exact iso.symm.hasFDerivAt.comp_hasFDerivWithinAt x H [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E x : G f' : G →L[𝕜] E ⊢ HasStrictFDerivAt (↑iso ∘ f) (comp (↑iso) f') x ↔ HasStrictFDerivAt f f' x [PROOFSTEP] refine' ⟨fun H => _, fun H => iso.hasStrictFDerivAt.comp x H⟩ [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E x : G f' : G →L[𝕜] E H : HasStrictFDerivAt (↑iso ∘ f) (comp (↑iso) f') x ⊢ HasStrictFDerivAt f f' x [PROOFSTEP] convert iso.symm.hasStrictFDerivAt.comp x H using 1 [GOAL] case h.e'_9 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E x : G f' : G →L[𝕜] E H : HasStrictFDerivAt (↑iso ∘ f) (comp (↑iso) f') x ⊢ f = fun x => ↑(ContinuousLinearEquiv.symm iso) ((↑iso ∘ f) x) [PROOFSTEP] ext z [GOAL] case h.e'_10 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E x : G f' : G →L[𝕜] E H : HasStrictFDerivAt (↑iso ∘ f) (comp (↑iso) f') x ⊢ f' = comp (↑(ContinuousLinearEquiv.symm iso)) (comp (↑iso) f') [PROOFSTEP] ext z [GOAL] case h.e'_9.h 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E x : G f' : G →L[𝕜] E H : HasStrictFDerivAt (↑iso ∘ f) (comp (↑iso) f') x z : G ⊢ f z = ↑(ContinuousLinearEquiv.symm iso) ((↑iso ∘ f) z) [PROOFSTEP] apply (iso.symm_apply_apply _).symm [GOAL] case h.e'_10.h 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E x : G f' : G →L[𝕜] E H : HasStrictFDerivAt (↑iso ∘ f) (comp (↑iso) f') x z : G ⊢ ↑f' z = ↑(comp (↑(ContinuousLinearEquiv.symm iso)) (comp (↑iso) f')) z [PROOFSTEP] apply (iso.symm_apply_apply _).symm [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E x : G f' : G →L[𝕜] E ⊢ HasFDerivAt (↑iso ∘ f) (comp (↑iso) f') x ↔ HasFDerivAt f f' x [PROOFSTEP] simp_rw [← hasFDerivWithinAt_univ, iso.comp_hasFDerivWithinAt_iff] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E s : Set G x : G f' : G →L[𝕜] F ⊢ HasFDerivWithinAt (↑iso ∘ f) f' s x ↔ HasFDerivWithinAt f (comp (↑(ContinuousLinearEquiv.symm iso)) f') s x [PROOFSTEP] rw [← iso.comp_hasFDerivWithinAt_iff, ← ContinuousLinearMap.comp_assoc, iso.coe_comp_coe_symm, ContinuousLinearMap.id_comp] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E x : G f' : G →L[𝕜] F ⊢ HasFDerivAt (↑iso ∘ f) f' x ↔ HasFDerivAt f (comp (↑(ContinuousLinearEquiv.symm iso)) f') x [PROOFSTEP] simp_rw [← hasFDerivWithinAt_univ, iso.comp_hasFDerivWithinAt_iff'] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E s : Set G x : G hxs : UniqueDiffWithinAt 𝕜 s x ⊢ fderivWithin 𝕜 (↑iso ∘ f) s x = comp (↑iso) (fderivWithin 𝕜 f s x) [PROOFSTEP] by_cases h : DifferentiableWithinAt 𝕜 f s x [GOAL] case pos 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E s : Set G x : G hxs : UniqueDiffWithinAt 𝕜 s x h : DifferentiableWithinAt 𝕜 f s x ⊢ fderivWithin 𝕜 (↑iso ∘ f) s x = comp (↑iso) (fderivWithin 𝕜 f s x) [PROOFSTEP] rw [fderiv.comp_fderivWithin x iso.differentiableAt h hxs, iso.fderiv] [GOAL] case neg 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E s : Set G x : G hxs : UniqueDiffWithinAt 𝕜 s x h : ¬DifferentiableWithinAt 𝕜 f s x ⊢ fderivWithin 𝕜 (↑iso ∘ f) s x = comp (↑iso) (fderivWithin 𝕜 f s x) [PROOFSTEP] have : ¬DifferentiableWithinAt 𝕜 (iso ∘ f) s x := mt iso.comp_differentiableWithinAt_iff.1 h [GOAL] case neg 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E s : Set G x : G hxs : UniqueDiffWithinAt 𝕜 s x h : ¬DifferentiableWithinAt 𝕜 f s x this : ¬DifferentiableWithinAt 𝕜 (↑iso ∘ f) s x ⊢ fderivWithin 𝕜 (↑iso ∘ f) s x = comp (↑iso) (fderivWithin 𝕜 f s x) [PROOFSTEP] rw [fderivWithin_zero_of_not_differentiableWithinAt h, fderivWithin_zero_of_not_differentiableWithinAt this, ContinuousLinearMap.comp_zero] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E x : G ⊢ fderiv 𝕜 (↑iso ∘ f) x = comp (↑iso) (fderiv 𝕜 f x) [PROOFSTEP] rw [← fderivWithin_univ, ← fderivWithin_univ] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : G → E x : G ⊢ fderivWithin 𝕜 (↑iso ∘ f) univ x = comp (↑iso) (fderivWithin 𝕜 f univ x) [PROOFSTEP] exact iso.comp_fderivWithin uniqueDiffWithinAt_univ [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E ⊢ DifferentiableWithinAt 𝕜 (f ∘ ↑iso) (↑iso ⁻¹' s) x ↔ DifferentiableWithinAt 𝕜 f s (↑iso x) [PROOFSTEP] refine' ⟨fun H => _, fun H => H.comp x iso.differentiableWithinAt (mapsTo_preimage _ s)⟩ [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E H : DifferentiableWithinAt 𝕜 (f ∘ ↑iso) (↑iso ⁻¹' s) x ⊢ DifferentiableWithinAt 𝕜 f s (↑iso x) [PROOFSTEP] have : DifferentiableWithinAt 𝕜 ((f ∘ iso) ∘ iso.symm) s (iso x) := by rw [← iso.symm_apply_apply x] at H apply H.comp (iso x) iso.symm.differentiableWithinAt intro y hy simpa only [mem_preimage, apply_symm_apply] using hy [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E H : DifferentiableWithinAt 𝕜 (f ∘ ↑iso) (↑iso ⁻¹' s) x ⊢ DifferentiableWithinAt 𝕜 ((f ∘ ↑iso) ∘ ↑(ContinuousLinearEquiv.symm iso)) s (↑iso x) [PROOFSTEP] rw [← iso.symm_apply_apply x] at H [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E H : DifferentiableWithinAt 𝕜 (f ∘ ↑iso) (↑iso ⁻¹' s) (↑(ContinuousLinearEquiv.symm iso) (↑iso x)) ⊢ DifferentiableWithinAt 𝕜 ((f ∘ ↑iso) ∘ ↑(ContinuousLinearEquiv.symm iso)) s (↑iso x) [PROOFSTEP] apply H.comp (iso x) iso.symm.differentiableWithinAt [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E H : DifferentiableWithinAt 𝕜 (f ∘ ↑iso) (↑iso ⁻¹' s) (↑(ContinuousLinearEquiv.symm iso) (↑iso x)) ⊢ MapsTo (↑(ContinuousLinearEquiv.symm iso)) s (↑iso ⁻¹' s) [PROOFSTEP] intro y hy [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E H : DifferentiableWithinAt 𝕜 (f ∘ ↑iso) (↑iso ⁻¹' s) (↑(ContinuousLinearEquiv.symm iso) (↑iso x)) y : F hy : y ∈ s ⊢ ↑(ContinuousLinearEquiv.symm iso) y ∈ ↑iso ⁻¹' s [PROOFSTEP] simpa only [mem_preimage, apply_symm_apply] using hy [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E H : DifferentiableWithinAt 𝕜 (f ∘ ↑iso) (↑iso ⁻¹' s) x this : DifferentiableWithinAt 𝕜 ((f ∘ ↑iso) ∘ ↑(ContinuousLinearEquiv.symm iso)) s (↑iso x) ⊢ DifferentiableWithinAt 𝕜 f s (↑iso x) [PROOFSTEP] rwa [Function.comp.assoc, iso.self_comp_symm] at this [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G x : E ⊢ DifferentiableAt 𝕜 (f ∘ ↑iso) x ↔ DifferentiableAt 𝕜 f (↑iso x) [PROOFSTEP] simp only [← differentiableWithinAt_univ, ← iso.comp_right_differentiableWithinAt_iff, preimage_univ] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F ⊢ DifferentiableOn 𝕜 (f ∘ ↑iso) (↑iso ⁻¹' s) ↔ DifferentiableOn 𝕜 f s [PROOFSTEP] refine' ⟨fun H y hy => _, fun H y hy => iso.comp_right_differentiableWithinAt_iff.2 (H _ hy)⟩ [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F H : DifferentiableOn 𝕜 (f ∘ ↑iso) (↑iso ⁻¹' s) y : F hy : y ∈ s ⊢ DifferentiableWithinAt 𝕜 f s y [PROOFSTEP] rw [← iso.apply_symm_apply y, ← comp_right_differentiableWithinAt_iff] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F H : DifferentiableOn 𝕜 (f ∘ ↑iso) (↑iso ⁻¹' s) y : F hy : y ∈ s ⊢ DifferentiableWithinAt 𝕜 (f ∘ ↑iso) (↑iso ⁻¹' s) (↑(ContinuousLinearEquiv.symm iso) y) [PROOFSTEP] apply H [GOAL] case a 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F H : DifferentiableOn 𝕜 (f ∘ ↑iso) (↑iso ⁻¹' s) y : F hy : y ∈ s ⊢ ↑(ContinuousLinearEquiv.symm iso) y ∈ ↑iso ⁻¹' s [PROOFSTEP] simpa only [mem_preimage, apply_symm_apply] using hy [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G ⊢ Differentiable 𝕜 (f ∘ ↑iso) ↔ Differentiable 𝕜 f [PROOFSTEP] simp only [← differentiableOn_univ, ← iso.comp_right_differentiableOn_iff, preimage_univ] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E f' : F →L[𝕜] G ⊢ HasFDerivWithinAt (f ∘ ↑iso) (comp f' ↑iso) (↑iso ⁻¹' s) x ↔ HasFDerivWithinAt f f' s (↑iso x) [PROOFSTEP] refine' ⟨fun H => _, fun H => H.comp x iso.hasFDerivWithinAt (mapsTo_preimage _ s)⟩ [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E f' : F →L[𝕜] G H : HasFDerivWithinAt (f ∘ ↑iso) (comp f' ↑iso) (↑iso ⁻¹' s) x ⊢ HasFDerivWithinAt f f' s (↑iso x) [PROOFSTEP] rw [← iso.symm_apply_apply x] at H [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E f' : F →L[𝕜] G H : HasFDerivWithinAt (f ∘ ↑iso) (comp f' ↑iso) (↑iso ⁻¹' s) (↑(ContinuousLinearEquiv.symm iso) (↑iso x)) ⊢ HasFDerivWithinAt f f' s (↑iso x) [PROOFSTEP] have A : f = (f ∘ iso) ∘ iso.symm := by rw [Function.comp.assoc, iso.self_comp_symm] rfl [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E f' : F →L[𝕜] G H : HasFDerivWithinAt (f ∘ ↑iso) (comp f' ↑iso) (↑iso ⁻¹' s) (↑(ContinuousLinearEquiv.symm iso) (↑iso x)) ⊢ f = (f ∘ ↑iso) ∘ ↑(ContinuousLinearEquiv.symm iso) [PROOFSTEP] rw [Function.comp.assoc, iso.self_comp_symm] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E f' : F →L[𝕜] G H : HasFDerivWithinAt (f ∘ ↑iso) (comp f' ↑iso) (↑iso ⁻¹' s) (↑(ContinuousLinearEquiv.symm iso) (↑iso x)) ⊢ f = f ∘ _root_.id [PROOFSTEP] rfl [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E f' : F →L[𝕜] G H : HasFDerivWithinAt (f ∘ ↑iso) (comp f' ↑iso) (↑iso ⁻¹' s) (↑(ContinuousLinearEquiv.symm iso) (↑iso x)) A : f = (f ∘ ↑iso) ∘ ↑(ContinuousLinearEquiv.symm iso) ⊢ HasFDerivWithinAt f f' s (↑iso x) [PROOFSTEP] have B : f' = (f'.comp (iso : E →L[𝕜] F)).comp (iso.symm : F →L[𝕜] E) := by rw [ContinuousLinearMap.comp_assoc, iso.coe_comp_coe_symm, ContinuousLinearMap.comp_id] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E f' : F →L[𝕜] G H : HasFDerivWithinAt (f ∘ ↑iso) (comp f' ↑iso) (↑iso ⁻¹' s) (↑(ContinuousLinearEquiv.symm iso) (↑iso x)) A : f = (f ∘ ↑iso) ∘ ↑(ContinuousLinearEquiv.symm iso) ⊢ f' = comp (comp f' ↑iso) ↑(ContinuousLinearEquiv.symm iso) [PROOFSTEP] rw [ContinuousLinearMap.comp_assoc, iso.coe_comp_coe_symm, ContinuousLinearMap.comp_id] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E f' : F →L[𝕜] G H : HasFDerivWithinAt (f ∘ ↑iso) (comp f' ↑iso) (↑iso ⁻¹' s) (↑(ContinuousLinearEquiv.symm iso) (↑iso x)) A : f = (f ∘ ↑iso) ∘ ↑(ContinuousLinearEquiv.symm iso) B : f' = comp (comp f' ↑iso) ↑(ContinuousLinearEquiv.symm iso) ⊢ HasFDerivWithinAt f f' s (↑iso x) [PROOFSTEP] rw [A, B] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E f' : F →L[𝕜] G H : HasFDerivWithinAt (f ∘ ↑iso) (comp f' ↑iso) (↑iso ⁻¹' s) (↑(ContinuousLinearEquiv.symm iso) (↑iso x)) A : f = (f ∘ ↑iso) ∘ ↑(ContinuousLinearEquiv.symm iso) B : f' = comp (comp f' ↑iso) ↑(ContinuousLinearEquiv.symm iso) ⊢ HasFDerivWithinAt ((f ∘ ↑iso) ∘ ↑(ContinuousLinearEquiv.symm iso)) (comp (comp f' ↑iso) ↑(ContinuousLinearEquiv.symm iso)) s (↑iso x) [PROOFSTEP] apply H.comp (iso x) iso.symm.hasFDerivWithinAt [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E f' : F →L[𝕜] G H : HasFDerivWithinAt (f ∘ ↑iso) (comp f' ↑iso) (↑iso ⁻¹' s) (↑(ContinuousLinearEquiv.symm iso) (↑iso x)) A : f = (f ∘ ↑iso) ∘ ↑(ContinuousLinearEquiv.symm iso) B : f' = comp (comp f' ↑iso) ↑(ContinuousLinearEquiv.symm iso) ⊢ MapsTo (↑(ContinuousLinearEquiv.symm iso)) s (↑iso ⁻¹' s) [PROOFSTEP] intro y hy [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E f' : F →L[𝕜] G H : HasFDerivWithinAt (f ∘ ↑iso) (comp f' ↑iso) (↑iso ⁻¹' s) (↑(ContinuousLinearEquiv.symm iso) (↑iso x)) A : f = (f ∘ ↑iso) ∘ ↑(ContinuousLinearEquiv.symm iso) B : f' = comp (comp f' ↑iso) ↑(ContinuousLinearEquiv.symm iso) y : F hy : y ∈ s ⊢ ↑(ContinuousLinearEquiv.symm iso) y ∈ ↑iso ⁻¹' s [PROOFSTEP] simpa only [mem_preimage, apply_symm_apply] using hy [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G x : E f' : F →L[𝕜] G ⊢ HasFDerivAt (f ∘ ↑iso) (comp f' ↑iso) x ↔ HasFDerivAt f f' (↑iso x) [PROOFSTEP] simp only [← hasFDerivWithinAt_univ, ← comp_right_hasFDerivWithinAt_iff, preimage_univ] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E f' : E →L[𝕜] G ⊢ HasFDerivWithinAt (f ∘ ↑iso) f' (↑iso ⁻¹' s) x ↔ HasFDerivWithinAt f (comp f' ↑(ContinuousLinearEquiv.symm iso)) s (↑iso x) [PROOFSTEP] rw [← iso.comp_right_hasFDerivWithinAt_iff, ContinuousLinearMap.comp_assoc, iso.coe_symm_comp_coe, ContinuousLinearMap.comp_id] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G x : E f' : E →L[𝕜] G ⊢ HasFDerivAt (f ∘ ↑iso) f' x ↔ HasFDerivAt f (comp f' ↑(ContinuousLinearEquiv.symm iso)) (↑iso x) [PROOFSTEP] simp only [← hasFDerivWithinAt_univ, ← iso.comp_right_hasFDerivWithinAt_iff', preimage_univ] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E hxs : UniqueDiffWithinAt 𝕜 (↑iso ⁻¹' s) x ⊢ fderivWithin 𝕜 (f ∘ ↑iso) (↑iso ⁻¹' s) x = comp (fderivWithin 𝕜 f s (↑iso x)) ↑iso [PROOFSTEP] by_cases h : DifferentiableWithinAt 𝕜 f s (iso x) [GOAL] case pos 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E hxs : UniqueDiffWithinAt 𝕜 (↑iso ⁻¹' s) x h : DifferentiableWithinAt 𝕜 f s (↑iso x) ⊢ fderivWithin 𝕜 (f ∘ ↑iso) (↑iso ⁻¹' s) x = comp (fderivWithin 𝕜 f s (↑iso x)) ↑iso [PROOFSTEP] exact (iso.comp_right_hasFDerivWithinAt_iff.2 h.hasFDerivWithinAt).fderivWithin hxs [GOAL] case neg 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E hxs : UniqueDiffWithinAt 𝕜 (↑iso ⁻¹' s) x h : ¬DifferentiableWithinAt 𝕜 f s (↑iso x) ⊢ fderivWithin 𝕜 (f ∘ ↑iso) (↑iso ⁻¹' s) x = comp (fderivWithin 𝕜 f s (↑iso x)) ↑iso [PROOFSTEP] have : ¬DifferentiableWithinAt 𝕜 (f ∘ iso) (iso ⁻¹' s) x := by intro h' exact h (iso.comp_right_differentiableWithinAt_iff.1 h') [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E hxs : UniqueDiffWithinAt 𝕜 (↑iso ⁻¹' s) x h : ¬DifferentiableWithinAt 𝕜 f s (↑iso x) ⊢ ¬DifferentiableWithinAt 𝕜 (f ∘ ↑iso) (↑iso ⁻¹' s) x [PROOFSTEP] intro h' [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E hxs : UniqueDiffWithinAt 𝕜 (↑iso ⁻¹' s) x h : ¬DifferentiableWithinAt 𝕜 f s (↑iso x) h' : DifferentiableWithinAt 𝕜 (f ∘ ↑iso) (↑iso ⁻¹' s) x ⊢ False [PROOFSTEP] exact h (iso.comp_right_differentiableWithinAt_iff.1 h') [GOAL] case neg 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s✝ t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G s : Set F x : E hxs : UniqueDiffWithinAt 𝕜 (↑iso ⁻¹' s) x h : ¬DifferentiableWithinAt 𝕜 f s (↑iso x) this : ¬DifferentiableWithinAt 𝕜 (f ∘ ↑iso) (↑iso ⁻¹' s) x ⊢ fderivWithin 𝕜 (f ∘ ↑iso) (↑iso ⁻¹' s) x = comp (fderivWithin 𝕜 f s (↑iso x)) ↑iso [PROOFSTEP] rw [fderivWithin_zero_of_not_differentiableWithinAt h, fderivWithin_zero_of_not_differentiableWithinAt this, ContinuousLinearMap.zero_comp] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G x : E ⊢ fderiv 𝕜 (f ∘ ↑iso) x = comp (fderiv 𝕜 f (↑iso x)) ↑iso [PROOFSTEP] rw [← fderivWithin_univ, ← fderivWithin_univ, ← iso.comp_right_fderivWithin, preimage_univ] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x✝ : E s t : Set E L L₁ L₂ : Filter E iso : E ≃L[𝕜] F f : F → G x : E ⊢ UniqueDiffWithinAt 𝕜 (↑iso ⁻¹' univ) x [PROOFSTEP] exact uniqueDiffWithinAt_univ [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hg : ContinuousAt g a hf : HasStrictFDerivAt f (↑f') (g a) hfg : ∀ᶠ (y : F) in 𝓝 a, f (g y) = y ⊢ HasStrictFDerivAt g (↑(ContinuousLinearEquiv.symm f')) a [PROOFSTEP] replace hg := hg.prod_map' hg [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hf : HasStrictFDerivAt f (↑f') (g a) hfg : ∀ᶠ (y : F) in 𝓝 a, f (g y) = y hg : ContinuousAt (fun p => (g p.fst, g p.snd)) (a, a) ⊢ HasStrictFDerivAt g (↑(ContinuousLinearEquiv.symm f')) a [PROOFSTEP] replace hfg := hfg.prod_mk_nhds hfg [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hf : HasStrictFDerivAt f (↑f') (g a) hg : ContinuousAt (fun p => (g p.fst, g p.snd)) (a, a) hfg : ∀ᶠ (p : F × F) in 𝓝 (a, a), f (g p.fst) = p.fst ∧ f (g p.snd) = p.snd ⊢ HasStrictFDerivAt g (↑(ContinuousLinearEquiv.symm f')) a [PROOFSTEP] have : (fun p : F × F => g p.1 - g p.2 - f'.symm (p.1 - p.2)) =O[𝓝 (a, a)] fun p : F × F => f' (g p.1 - g p.2) - (p.1 - p.2) := by refine' ((f'.symm : F →L[𝕜] E).isBigO_comp _ _).congr (fun x => _) fun _ => rfl simp [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hf : HasStrictFDerivAt f (↑f') (g a) hg : ContinuousAt (fun p => (g p.fst, g p.snd)) (a, a) hfg : ∀ᶠ (p : F × F) in 𝓝 (a, a), f (g p.fst) = p.fst ∧ f (g p.snd) = p.snd ⊢ (fun p => g p.fst - g p.snd - ↑(ContinuousLinearEquiv.symm f') (p.fst - p.snd)) =O[𝓝 (a, a)] fun p => ↑f' (g p.fst - g p.snd) - (p.fst - p.snd) [PROOFSTEP] refine' ((f'.symm : F →L[𝕜] E).isBigO_comp _ _).congr (fun x => _) fun _ => rfl [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hf : HasStrictFDerivAt f (↑f') (g a) hg : ContinuousAt (fun p => (g p.fst, g p.snd)) (a, a) hfg : ∀ᶠ (p : F × F) in 𝓝 (a, a), f (g p.fst) = p.fst ∧ f (g p.snd) = p.snd x : F × F ⊢ ↑↑(ContinuousLinearEquiv.symm f') (↑f' (g x.fst - g x.snd) - (x.fst - x.snd)) = g x.fst - g x.snd - ↑(ContinuousLinearEquiv.symm f') (x.fst - x.snd) [PROOFSTEP] simp [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hf : HasStrictFDerivAt f (↑f') (g a) hg : ContinuousAt (fun p => (g p.fst, g p.snd)) (a, a) hfg : ∀ᶠ (p : F × F) in 𝓝 (a, a), f (g p.fst) = p.fst ∧ f (g p.snd) = p.snd this : (fun p => g p.fst - g p.snd - ↑(ContinuousLinearEquiv.symm f') (p.fst - p.snd)) =O[𝓝 (a, a)] fun p => ↑f' (g p.fst - g p.snd) - (p.fst - p.snd) ⊢ HasStrictFDerivAt g (↑(ContinuousLinearEquiv.symm f')) a [PROOFSTEP] refine' this.trans_isLittleO _ [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hf : HasStrictFDerivAt f (↑f') (g a) hg : ContinuousAt (fun p => (g p.fst, g p.snd)) (a, a) hfg : ∀ᶠ (p : F × F) in 𝓝 (a, a), f (g p.fst) = p.fst ∧ f (g p.snd) = p.snd this : (fun p => g p.fst - g p.snd - ↑(ContinuousLinearEquiv.symm f') (p.fst - p.snd)) =O[𝓝 (a, a)] fun p => ↑f' (g p.fst - g p.snd) - (p.fst - p.snd) ⊢ (fun p => ↑f' (g p.fst - g p.snd) - (p.fst - p.snd)) =o[𝓝 (a, a)] fun p => p.fst - p.snd [PROOFSTEP] clear this [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hf : HasStrictFDerivAt f (↑f') (g a) hg : ContinuousAt (fun p => (g p.fst, g p.snd)) (a, a) hfg : ∀ᶠ (p : F × F) in 𝓝 (a, a), f (g p.fst) = p.fst ∧ f (g p.snd) = p.snd ⊢ (fun p => ↑f' (g p.fst - g p.snd) - (p.fst - p.snd)) =o[𝓝 (a, a)] fun p => p.fst - p.snd [PROOFSTEP] refine' ((hf.comp_tendsto hg).symm.congr' (hfg.mono _) (eventually_of_forall fun _ => rfl)).trans_isBigO _ [GOAL] case refine'_1 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hf : HasStrictFDerivAt f (↑f') (g a) hg : ContinuousAt (fun p => (g p.fst, g p.snd)) (a, a) hfg : ∀ᶠ (p : F × F) in 𝓝 (a, a), f (g p.fst) = p.fst ∧ f (g p.snd) = p.snd ⊢ ∀ (x : F × F), f (g x.fst) = x.fst ∧ f (g x.snd) = x.snd → (fun x => ↑↑f' (((fun p => (g p.fst, g p.snd)) x).fst - ((fun p => (g p.fst, g p.snd)) x).snd) - (f ((fun p => (g p.fst, g p.snd)) x).fst - f ((fun p => (g p.fst, g p.snd)) x).snd)) x = ↑f' (g x.fst - g x.snd) - (x.fst - x.snd) [PROOFSTEP] rintro p ⟨hp1, hp2⟩ [GOAL] case refine'_1.intro 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hf : HasStrictFDerivAt f (↑f') (g a) hg : ContinuousAt (fun p => (g p.fst, g p.snd)) (a, a) hfg : ∀ᶠ (p : F × F) in 𝓝 (a, a), f (g p.fst) = p.fst ∧ f (g p.snd) = p.snd p : F × F hp1 : f (g p.fst) = p.fst hp2 : f (g p.snd) = p.snd ⊢ (fun x => ↑↑f' (((fun p => (g p.fst, g p.snd)) x).fst - ((fun p => (g p.fst, g p.snd)) x).snd) - (f ((fun p => (g p.fst, g p.snd)) x).fst - f ((fun p => (g p.fst, g p.snd)) x).snd)) p = ↑f' (g p.fst - g p.snd) - (p.fst - p.snd) [PROOFSTEP] simp [hp1, hp2] [GOAL] case refine'_2 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hf : HasStrictFDerivAt f (↑f') (g a) hg : ContinuousAt (fun p => (g p.fst, g p.snd)) (a, a) hfg : ∀ᶠ (p : F × F) in 𝓝 (a, a), f (g p.fst) = p.fst ∧ f (g p.snd) = p.snd ⊢ (fun x => ((fun p => p.fst - p.snd) ∘ fun p => (g p.fst, g p.snd)) x) =O[𝓝 (a, a)] fun p => p.fst - p.snd [PROOFSTEP] refine (hf.isBigO_sub_rev.comp_tendsto hg).congr' (eventually_of_forall fun _ => rfl) (hfg.mono ?_) [GOAL] case refine'_2 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hf : HasStrictFDerivAt f (↑f') (g a) hg : ContinuousAt (fun p => (g p.fst, g p.snd)) (a, a) hfg : ∀ᶠ (p : F × F) in 𝓝 (a, a), f (g p.fst) = p.fst ∧ f (g p.snd) = p.snd ⊢ ∀ (x : F × F), f (g x.fst) = x.fst ∧ f (g x.snd) = x.snd → ((fun p => f p.fst - f p.snd) ∘ fun p => (g p.fst, g p.snd)) x = (fun p => p.fst - p.snd) x [PROOFSTEP] rintro p ⟨hp1, hp2⟩ [GOAL] case refine'_2.intro 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hf : HasStrictFDerivAt f (↑f') (g a) hg : ContinuousAt (fun p => (g p.fst, g p.snd)) (a, a) hfg : ∀ᶠ (p : F × F) in 𝓝 (a, a), f (g p.fst) = p.fst ∧ f (g p.snd) = p.snd p : F × F hp1 : f (g p.fst) = p.fst hp2 : f (g p.snd) = p.snd ⊢ ((fun p => f p.fst - f p.snd) ∘ fun p => (g p.fst, g p.snd)) p = (fun p => p.fst - p.snd) p [PROOFSTEP] simp only [(· ∘ ·), hp1, hp2] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hg : ContinuousAt g a hf : HasFDerivAt f (↑f') (g a) hfg : ∀ᶠ (y : F) in 𝓝 a, f (g y) = y ⊢ HasFDerivAt g (↑(ContinuousLinearEquiv.symm f')) a [PROOFSTEP] have : (fun x : F => g x - g a - f'.symm (x - a)) =O[𝓝 a] fun x : F => f' (g x - g a) - (x - a) := by refine' ((f'.symm : F →L[𝕜] E).isBigO_comp _ _).congr (fun x => _) fun _ => rfl simp [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hg : ContinuousAt g a hf : HasFDerivAt f (↑f') (g a) hfg : ∀ᶠ (y : F) in 𝓝 a, f (g y) = y ⊢ (fun x => g x - g a - ↑(ContinuousLinearEquiv.symm f') (x - a)) =O[𝓝 a] fun x => ↑f' (g x - g a) - (x - a) [PROOFSTEP] refine' ((f'.symm : F →L[𝕜] E).isBigO_comp _ _).congr (fun x => _) fun _ => rfl [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x✝ : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hg : ContinuousAt g a hf : HasFDerivAt f (↑f') (g a) hfg : ∀ᶠ (y : F) in 𝓝 a, f (g y) = y x : F ⊢ ↑↑(ContinuousLinearEquiv.symm f') (↑f' (g x - g a) - (x - a)) = g x - g a - ↑(ContinuousLinearEquiv.symm f') (x - a) [PROOFSTEP] simp [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hg : ContinuousAt g a hf : HasFDerivAt f (↑f') (g a) hfg : ∀ᶠ (y : F) in 𝓝 a, f (g y) = y this : (fun x => g x - g a - ↑(ContinuousLinearEquiv.symm f') (x - a)) =O[𝓝 a] fun x => ↑f' (g x - g a) - (x - a) ⊢ HasFDerivAt g (↑(ContinuousLinearEquiv.symm f')) a [PROOFSTEP] refine' this.trans_isLittleO _ [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hg : ContinuousAt g a hf : HasFDerivAt f (↑f') (g a) hfg : ∀ᶠ (y : F) in 𝓝 a, f (g y) = y this : (fun x => g x - g a - ↑(ContinuousLinearEquiv.symm f') (x - a)) =O[𝓝 a] fun x => ↑f' (g x - g a) - (x - a) ⊢ (fun x => ↑f' (g x - g a) - (x - a)) =o[𝓝 a] fun x' => x' - a [PROOFSTEP] clear this [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hg : ContinuousAt g a hf : HasFDerivAt f (↑f') (g a) hfg : ∀ᶠ (y : F) in 𝓝 a, f (g y) = y ⊢ (fun x => ↑f' (g x - g a) - (x - a)) =o[𝓝 a] fun x' => x' - a [PROOFSTEP] refine' ((hf.comp_tendsto hg).symm.congr' (hfg.mono _) (eventually_of_forall fun _ => rfl)).trans_isBigO _ [GOAL] case refine'_1 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hg : ContinuousAt g a hf : HasFDerivAt f (↑f') (g a) hfg : ∀ᶠ (y : F) in 𝓝 a, f (g y) = y ⊢ ∀ (x : F), f (g x) = x → (fun x => ↑↑f' (g x - g a) - (f (g x) - f (g a))) x = ↑f' (g x - g a) - (x - a) [PROOFSTEP] rintro p hp [GOAL] case refine'_1 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hg : ContinuousAt g a hf : HasFDerivAt f (↑f') (g a) hfg : ∀ᶠ (y : F) in 𝓝 a, f (g y) = y p : F hp : f (g p) = p ⊢ (fun x => ↑↑f' (g x - g a) - (f (g x) - f (g a))) p = ↑f' (g p - g a) - (p - a) [PROOFSTEP] simp [hp, hfg.self_of_nhds] [GOAL] case refine'_2 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hg : ContinuousAt g a hf : HasFDerivAt f (↑f') (g a) hfg : ∀ᶠ (y : F) in 𝓝 a, f (g y) = y ⊢ (fun x => ((fun x' => x' - g a) ∘ g) x) =O[𝓝 a] fun x' => x' - a [PROOFSTEP] refine' ((hf.isBigO_sub_rev f'.antilipschitz).comp_tendsto hg).congr' (eventually_of_forall fun _ => rfl) (hfg.mono _) [GOAL] case refine'_2 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hg : ContinuousAt g a hf : HasFDerivAt f (↑f') (g a) hfg : ∀ᶠ (y : F) in 𝓝 a, f (g y) = y ⊢ ∀ (x : F), f (g x) = x → ((fun x' => f x' - f (g a)) ∘ g) x = (fun x' => x' - a) x [PROOFSTEP] rintro p hp [GOAL] case refine'_2 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f✝ f₀ f₁ g✝ : E → F f'✝ f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E f : E → F f' : E ≃L[𝕜] F g : F → E a : F hg : ContinuousAt g a hf : HasFDerivAt f (↑f') (g a) hfg : ∀ᶠ (y : F) in 𝓝 a, f (g y) = y p : F hp : f (g p) = p ⊢ ((fun x' => f x' - f (g a)) ∘ g) p = (fun x' => x' - a) p [PROOFSTEP] simp only [(· ∘ ·), hp, hfg.self_of_nhds] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E h : HasFDerivWithinAt f f' s x hf' : ∃ C, ∀ (z : E), ‖z‖ ≤ C * ‖↑f' z‖ ⊢ ∀ᶠ (z : E) in 𝓝[s \ {x}] x, f z ≠ f x [PROOFSTEP] rw [nhdsWithin, diff_eq, ← inf_principal, ← inf_assoc, eventually_inf_principal] [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E h : HasFDerivWithinAt f f' s x hf' : ∃ C, ∀ (z : E), ‖z‖ ≤ C * ‖↑f' z‖ ⊢ ∀ᶠ (x_1 : E) in 𝓝 x ⊓ 𝓟 s, x_1 ∈ {x}ᶜ → f x_1 ≠ f x [PROOFSTEP] have A : (fun z => z - x) =O[𝓝[s] x] fun z => f' (z - x) := isBigO_iff.2 <| hf'.imp fun C hC => eventually_of_forall fun z => hC _ [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E h : HasFDerivWithinAt f f' s x hf' : ∃ C, ∀ (z : E), ‖z‖ ≤ C * ‖↑f' z‖ A : (fun z => z - x) =O[𝓝[s] x] fun z => ↑f' (z - x) ⊢ ∀ᶠ (x_1 : E) in 𝓝 x ⊓ 𝓟 s, x_1 ∈ {x}ᶜ → f x_1 ≠ f x [PROOFSTEP] have : (fun z => f z - f x) ~[𝓝[s] x] fun z => f' (z - x) := h.trans_isBigO A [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E h : HasFDerivWithinAt f f' s x hf' : ∃ C, ∀ (z : E), ‖z‖ ≤ C * ‖↑f' z‖ A : (fun z => z - x) =O[𝓝[s] x] fun z => ↑f' (z - x) this : (fun z => f z - f x) ~[𝓝[s] x] fun z => ↑f' (z - x) ⊢ ∀ᶠ (x_1 : E) in 𝓝 x ⊓ 𝓟 s, x_1 ∈ {x}ᶜ → f x_1 ≠ f x [PROOFSTEP] simpa [not_imp_not, sub_eq_zero] using (A.trans this.isBigO_symm).eq_zero_imp [GOAL] 𝕜 : Type u_1 inst✝⁸ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝⁷ : NormedAddCommGroup E inst✝⁶ : NormedSpace 𝕜 E F : Type u_3 inst✝⁵ : NormedAddCommGroup F inst✝⁴ : NormedSpace 𝕜 F G : Type u_4 inst✝³ : NormedAddCommGroup G inst✝² : NormedSpace 𝕜 G G' : Type u_5 inst✝¹ : NormedAddCommGroup G' inst✝ : NormedSpace 𝕜 G' f f₀ f₁ g : E → F f' f₀' f₁' g' e : E →L[𝕜] F x : E s t : Set E L L₁ L₂ : Filter E h : HasFDerivAt f f' x hf' : ∃ C, ∀ (z : E), ‖z‖ ≤ C * ‖↑f' z‖ ⊢ ∀ᶠ (z : E) in 𝓝[{x}ᶜ] x, f z ≠ f x [PROOFSTEP] simpa only [compl_eq_univ_diff] using (hasFDerivWithinAt_univ.2 h).eventually_ne hf' [GOAL] E : Type u_1 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace ℝ E F : Type u_2 inst✝¹ : NormedAddCommGroup F inst✝ : NormedSpace ℝ F f : E → F f' : E →L[ℝ] F x : E L : Filter E ⊢ Tendsto (fun x' => ‖x' - x‖⁻¹ * ‖f x' - f x - ↑f' (x' - x)‖) L (𝓝 0) ↔ Tendsto (fun x' => ‖x' - x‖⁻¹ • (f x' - f x - ↑f' (x' - x))) L (𝓝 0) [PROOFSTEP] symm [GOAL] E : Type u_1 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace ℝ E F : Type u_2 inst✝¹ : NormedAddCommGroup F inst✝ : NormedSpace ℝ F f : E → F f' : E →L[ℝ] F x : E L : Filter E ⊢ Tendsto (fun x' => ‖x' - x‖⁻¹ • (f x' - f x - ↑f' (x' - x))) L (𝓝 0) ↔ Tendsto (fun x' => ‖x' - x‖⁻¹ * ‖f x' - f x - ↑f' (x' - x)‖) L (𝓝 0) [PROOFSTEP] rw [tendsto_iff_norm_tendsto_zero] [GOAL] E : Type u_1 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace ℝ E F : Type u_2 inst✝¹ : NormedAddCommGroup F inst✝ : NormedSpace ℝ F f : E → F f' : E →L[ℝ] F x : E L : Filter E ⊢ Tendsto (fun e => ‖‖e - x‖⁻¹ • (f e - f x - ↑f' (e - x)) - 0‖) L (𝓝 0) ↔ Tendsto (fun x' => ‖x' - x‖⁻¹ * ‖f x' - f x - ↑f' (x' - x)‖) L (𝓝 0) [PROOFSTEP] refine' tendsto_congr fun x' => _ [GOAL] E : Type u_1 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace ℝ E F : Type u_2 inst✝¹ : NormedAddCommGroup F inst✝ : NormedSpace ℝ F f : E → F f' : E →L[ℝ] F x : E L : Filter E x' : E ⊢ ‖‖x' - x‖⁻¹ • (f x' - f x - ↑f' (x' - x)) - 0‖ = ‖x' - x‖⁻¹ * ‖f x' - f x - ↑f' (x' - x)‖ [PROOFSTEP] simp [norm_smul] [GOAL] E : Type u_1 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace ℝ E F : Type u_2 inst✝¹ : NormedAddCommGroup F inst✝ : NormedSpace ℝ F f : E → F f' : E →L[ℝ] F x : E hf : HasFDerivAt f f' x v : E ⊢ Tendsto (fun c => c • (f (x + c⁻¹ • v) - f x)) atTop (𝓝 (↑f' v)) [PROOFSTEP] apply hf.lim v [GOAL] E : Type u_1 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace ℝ E F : Type u_2 inst✝¹ : NormedAddCommGroup F inst✝ : NormedSpace ℝ F f : E → F f' : E →L[ℝ] F x : E hf : HasFDerivAt f f' x v : E ⊢ Tendsto (fun n => ‖n‖) atTop atTop [PROOFSTEP] rw [tendsto_atTop_atTop] [GOAL] E : Type u_1 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace ℝ E F : Type u_2 inst✝¹ : NormedAddCommGroup F inst✝ : NormedSpace ℝ F f : E → F f' : E →L[ℝ] F x : E hf : HasFDerivAt f f' x v : E ⊢ ∀ (b : ℝ), ∃ i, ∀ (a : ℝ), i ≤ a → b ≤ ‖a‖ [PROOFSTEP] exact fun b => ⟨b, fun a ha => le_trans ha (le_abs_self _)⟩ [GOAL] 𝕜 : Type u_1 inst✝⁴ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E F : Type u_3 inst✝¹ : NormedAddCommGroup F inst✝ : NormedSpace 𝕜 F f : E → F s : Set E f' : E →L[𝕜] F x : E h : HasFDerivWithinAt f f' s x ⊢ MapsTo (↑f') (tangentConeAt 𝕜 s x) (tangentConeAt 𝕜 (f '' s) (f x)) [PROOFSTEP] rintro v ⟨c, d, dtop, clim, cdlim⟩ [GOAL] case intro.intro.intro.intro 𝕜 : Type u_1 inst✝⁴ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E F : Type u_3 inst✝¹ : NormedAddCommGroup F inst✝ : NormedSpace 𝕜 F f : E → F s : Set E f' : E →L[𝕜] F x : E h : HasFDerivWithinAt f f' s x v : E c : ℕ → 𝕜 d : ℕ → E dtop : ∀ᶠ (n : ℕ) in atTop, x + d n ∈ s clim : Tendsto (fun n => ‖c n‖) atTop atTop cdlim : Tendsto (fun n => c n • d n) atTop (𝓝 v) ⊢ ↑f' v ∈ tangentConeAt 𝕜 (f '' s) (f x) [PROOFSTEP] refine' ⟨c, fun n => f (x + d n) - f x, mem_of_superset dtop _, clim, h.lim atTop dtop clim cdlim⟩ [GOAL] case intro.intro.intro.intro 𝕜 : Type u_1 inst✝⁴ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E F : Type u_3 inst✝¹ : NormedAddCommGroup F inst✝ : NormedSpace 𝕜 F f : E → F s : Set E f' : E →L[𝕜] F x : E h : HasFDerivWithinAt f f' s x v : E c : ℕ → 𝕜 d : ℕ → E dtop : ∀ᶠ (n : ℕ) in atTop, x + d n ∈ s clim : Tendsto (fun n => ‖c n‖) atTop atTop cdlim : Tendsto (fun n => c n • d n) atTop (𝓝 v) ⊢ {x_1 | (fun n => x + d n ∈ s) x_1} ⊆ {x_1 | (fun n => f x + (fun n => f (x + d n) - f x) n ∈ f '' s) x_1} [PROOFSTEP] simp (config := { contextual := true }) [-mem_image, mem_image_of_mem] [GOAL] 𝕜 : Type u_1 inst✝⁴ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E F : Type u_3 inst✝¹ : NormedAddCommGroup F inst✝ : NormedSpace 𝕜 F f : E → F s : Set E f' : E →L[𝕜] F x : E h : HasFDerivWithinAt f f' s x hs : UniqueDiffWithinAt 𝕜 s x h' : DenseRange ↑f' ⊢ UniqueDiffWithinAt 𝕜 (f '' s) (f x) [PROOFSTEP] refine' ⟨h'.dense_of_mapsTo f'.continuous hs.1 _, h.continuousWithinAt.mem_closure_image hs.2⟩ [GOAL] 𝕜 : Type u_1 inst✝⁴ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E F : Type u_3 inst✝¹ : NormedAddCommGroup F inst✝ : NormedSpace 𝕜 F f : E → F s : Set E f' : E →L[𝕜] F x : E h : HasFDerivWithinAt f f' s x hs : UniqueDiffWithinAt 𝕜 s x h' : DenseRange ↑f' ⊢ MapsTo ↑f' ↑(Submodule.span 𝕜 (tangentConeAt 𝕜 s x)) ↑(Submodule.span 𝕜 (tangentConeAt 𝕜 (f '' s) (f x))) [PROOFSTEP] show Submodule.span 𝕜 (tangentConeAt 𝕜 s x) ≤ (Submodule.span 𝕜 (tangentConeAt 𝕜 (f '' s) (f x))).comap f' [GOAL] 𝕜 : Type u_1 inst✝⁴ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E F : Type u_3 inst✝¹ : NormedAddCommGroup F inst✝ : NormedSpace 𝕜 F f : E → F s : Set E f' : E →L[𝕜] F x : E h : HasFDerivWithinAt f f' s x hs : UniqueDiffWithinAt 𝕜 s x h' : DenseRange ↑f' ⊢ Submodule.span 𝕜 (tangentConeAt 𝕜 s x) ≤ Submodule.comap f' (Submodule.span 𝕜 (tangentConeAt 𝕜 (f '' s) (f x))) [PROOFSTEP] rw [Submodule.span_le] [GOAL] 𝕜 : Type u_1 inst✝⁴ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E F : Type u_3 inst✝¹ : NormedAddCommGroup F inst✝ : NormedSpace 𝕜 F f : E → F s : Set E f' : E →L[𝕜] F x : E h : HasFDerivWithinAt f f' s x hs : UniqueDiffWithinAt 𝕜 s x h' : DenseRange ↑f' ⊢ tangentConeAt 𝕜 s x ⊆ ↑(Submodule.comap f' (Submodule.span 𝕜 (tangentConeAt 𝕜 (f '' s) (f x)))) [PROOFSTEP] exact h.mapsTo_tangent_cone.mono Subset.rfl Submodule.subset_span [GOAL] 𝕜 : Type u_1 inst✝⁴ : NontriviallyNormedField 𝕜 E : Type u_2 inst✝³ : NormedAddCommGroup E inst✝² : NormedSpace 𝕜 E F : Type u_3 inst✝¹ : NormedAddCommGroup F inst✝ : NormedSpace 𝕜 F f : E → F s : Set E f' : E →L[𝕜] F e : F ≃L[𝕜] E ⊢ UniqueDiffOn 𝕜 (↑e ⁻¹' s) ↔ UniqueDiffOn 𝕜 s [PROOFSTEP] rw [← e.image_symm_eq_preimage, e.symm.uniqueDiffOn_image_iff]
State Before: α : Type u_1 l : List α inst✝ : DecidableEq α head : α tail : List α a : α h : 0 < length (head :: tail) ⊢ count a (List.tail (head :: tail)) = count a (head :: tail) - if a = get (head :: tail) { val := 0, isLt := h } then 1 else 0 State After: α : Type u_1 l : List α inst✝ : DecidableEq α head : α tail : List α a : α h : 0 < length (head :: tail) ⊢ count a (List.tail (head :: tail)) = (count a tail + if a = head then 1 else 0) - if a = get (head :: tail) { val := 0, isLt := h } then 1 else 0 Tactic: rw [count_cons'] State Before: α : Type u_1 l : List α inst✝ : DecidableEq α head : α tail : List α a : α h : 0 < length (head :: tail) ⊢ count a (List.tail (head :: tail)) = (count a tail + if a = head then 1 else 0) - if a = get (head :: tail) { val := 0, isLt := h } then 1 else 0 State After: no goals Tactic: split_ifs <;> simp at * <;> contradiction
Way back in the late '90s and early 2000s, many Perl fans could rattle off a list of big projects using Perl: Slashdot, Amazon.com, IMDB. Eyebrows popped up (maybe at one point), as if the fact that billions of dollars of online sales went through Perl were validation of a language. Today much of the online Perl community discussion reads as reactionary, at least to me. Some random Internet argument will degrade into "Perl? Isn't that insert negative description here?" versus "Nuh uh, Perl isn't insert negative description here!" Me, I'd rather hear about interesting new projects written in Perl. Take the recent Duck Duck Go written in Perl story. Repeat this a few dozen times (especially with new projects created in the past year or two) and responses will move from "Perl? People still use that?" to "Wow, people who know Perl can certainly do a lot of interesting things!" I'd rather see the latter message spread than almost any other marketing message—so tell the world, what are you working on with Perl? Chris Hardie has a nice talk about framing any discussion. When you use the same words of the negative frame ("Nuh uh, Perl isn't insert negative description here!"), you beg the question that that frame is valid even though you don't intend to reinforce it. Although chromatic, and most seasoned writers, already know this, every word you use contributes to a proposition that you convey to users. Good writers are good because they don't convey any proposition other than what they intend or care to tolerate. Some people just don't want to know. Only yesterday I made a post on reddit about a project I was working on. Somewhere in the comments a guy asked me what it was written in (which was nothing to do with the original post). I told him Perl, and he replied "Perl might be part of your problem.. no one uses Perl, etc. etc.". I pointed out that many big sites (BBC, Amazon, Craigslist) use Perl. But he refused to even believe these companies used them, or if they did that they were anomalies, and everyone else doesn't use Perl. Ok, so there are always going to be people who refuse to listen to anything beyond their preconceived views... but it's hard to know whether there are so many people who think like this that they'll never look at Perl seriously, or if people will eventually come around and give Perl a chance. It has been said, but it seems like the Perl community values internal infrastructure work much more than outwardly facing projects (websites, applications, etc.) It would be great if more people talked about the fun projects they are doing. I know I am guilty of not blogging about my personal projects. Anyways, I'll try to brag here. :) I started a web-based scrabble clone at www.leisurelyletters.com. I also have a hosted (or standalone) web-based IRC client running at www.usealice.org. The source code for both sites are on github. Neither are particularly popular (yet?!), but hopefully they show people that Perl can be used for the fun stuff too. This page contains a single entry by chromatic published on July 10, 2010 9:16 AM. Don't Parse That String! was the previous entry in this blog. Strings and Security and Designing Away Bugs is the next entry in this blog.
lemma lim_cnj: "((\<lambda>x. cnj(f x)) \<longlongrightarrow> cnj l) F \<longleftrightarrow> (f \<longlongrightarrow> l) F"
module TerminalLoggers using Logging: AbstractLogger, LogLevel, BelowMinLevel, Debug, Info, Warn, Error, AboveMaxLevel import Logging: handle_message, shouldlog, min_enabled_level, catch_exceptions export TerminalLogger include("StickyMessages.jl") include("TerminalLogger.jl") end # module
[STATEMENT] lemma out546_suma__conga: assumes "A B C D E F SumA G H I" and "E Out D F" shows "A B C CongA G H I" [PROOF STATE] proof (prove) goal (1 subgoal): 1. A B C CongA G H I [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. A B C CongA G H I [PROOF STEP] have "A B C D E F SumA A B C" [PROOF STATE] proof (prove) goal (1 subgoal): 1. A B C D E F SumA A B C [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. A B C D E F SumA A B C [PROOF STEP] have "C B C CongA D E F" [PROOF STATE] proof (prove) goal (1 subgoal): 1. C B C CongA D E F [PROOF STEP] by (metis assms(1) assms(2) l11_21_b out_trivial suma_distincts) [PROOF STATE] proof (state) this: C B C CongA D E F goal (1 subgoal): 1. A B C D E F SumA A B C [PROOF STEP] moreover [PROOF STATE] proof (state) this: C B C CongA D E F goal (1 subgoal): 1. A B C D E F SumA A B C [PROOF STEP] have "\<not> B C OS A C" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<not> B C OS A C [PROOF STEP] using os_distincts [PROOF STATE] proof (prove) using this: ?A ?B OS ?X ?Y \<Longrightarrow> ?A \<noteq> ?B \<and> ?A \<noteq> ?X \<and> ?A \<noteq> ?Y \<and> ?B \<noteq> ?X \<and> ?B \<noteq> ?Y goal (1 subgoal): 1. \<not> B C OS A C [PROOF STEP] by auto [PROOF STATE] proof (state) this: \<not> B C OS A C goal (1 subgoal): 1. A B C D E F SumA A B C [PROOF STEP] moreover [PROOF STATE] proof (state) this: \<not> B C OS A C goal (1 subgoal): 1. A B C D E F SumA A B C [PROOF STEP] have "Coplanar A B C C" [PROOF STATE] proof (prove) goal (1 subgoal): 1. Coplanar A B C C [PROOF STEP] using ncop_distincts [PROOF STATE] proof (prove) using this: \<not> Coplanar ?A ?B ?C ?D \<Longrightarrow> ?A \<noteq> ?B \<and> ?A \<noteq> ?C \<and> ?A \<noteq> ?D \<and> ?B \<noteq> ?C \<and> ?B \<noteq> ?D \<and> ?C \<noteq> ?D goal (1 subgoal): 1. Coplanar A B C C [PROOF STEP] by auto [PROOF STATE] proof (state) this: Coplanar A B C C goal (1 subgoal): 1. A B C D E F SumA A B C [PROOF STEP] moreover [PROOF STATE] proof (state) this: Coplanar A B C C goal (1 subgoal): 1. A B C D E F SumA A B C [PROOF STEP] have "A B C CongA A B C" [PROOF STATE] proof (prove) goal (1 subgoal): 1. A B C CongA A B C [PROOF STEP] by (metis Tarski_neutral_dimensionless.suma_distincts Tarski_neutral_dimensionless_axioms assms(1) conga_pseudo_refl conga_right_comm) [PROOF STATE] proof (state) this: A B C CongA A B C goal (1 subgoal): 1. A B C D E F SumA A B C [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: C B C CongA D E F \<not> B C OS A C Coplanar A B C C A B C CongA A B C [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: C B C CongA D E F \<not> B C OS A C Coplanar A B C C A B C CongA A B C goal (1 subgoal): 1. A B C D E F SumA A B C [PROOF STEP] using SumA_def [PROOF STATE] proof (prove) using this: C B C CongA D E F \<not> B C OS A C Coplanar A B C C A B C CongA A B C ?A ?B ?C ?D ?E ?F SumA ?G ?H ?I \<equiv> \<exists>J. ?C ?B J CongA ?D ?E ?F \<and> \<not> ?B ?C OS ?A J \<and> Coplanar ?A ?B ?C J \<and> ?A ?B J CongA ?G ?H ?I goal (1 subgoal): 1. A B C D E F SumA A B C [PROOF STEP] by blast [PROOF STATE] proof (state) this: A B C D E F SumA A B C goal: No subgoals! [PROOF STEP] qed [PROOF STATE] proof (state) this: A B C D E F SumA A B C goal (1 subgoal): 1. A B C CongA G H I [PROOF STEP] then [PROOF STATE] proof (chain) picking this: A B C D E F SumA A B C [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: A B C D E F SumA A B C goal (1 subgoal): 1. A B C CongA G H I [PROOF STEP] using suma2__conga assms(1) [PROOF STATE] proof (prove) using this: A B C D E F SumA A B C \<lbrakk>?A ?B ?C ?D ?E ?F SumA ?G ?H ?I; ?A ?B ?C ?D ?E ?F SumA ?G' ?H' ?I'\<rbrakk> \<Longrightarrow> ?G ?H ?I CongA ?G' ?H' ?I' A B C D E F SumA G H I goal (1 subgoal): 1. A B C CongA G H I [PROOF STEP] by blast [PROOF STATE] proof (state) this: A B C CongA G H I goal: No subgoals! [PROOF STEP] qed
Formal statement is: lemma homotopic_with_sym: "homotopic_with P X Y f g \<longleftrightarrow> homotopic_with P X Y g f" Informal statement is: Two continuous maps $f, g: X \to Y$ are homotopic if and only if $g, f: X \to Y$ are homotopic.
{-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE DeriveDataTypeable, DeriveGeneric #-} -- | -- Module : Statistics.Distribution.StudentT -- Copyright : (c) 2011 Aleksey Khudyakov -- License : BSD3 -- -- Maintainer : [email protected] -- Stability : experimental -- Portability : portable -- -- Student-T distribution module Statistics.Distribution.StudentT ( StudentT -- * Constructors , studentT , studentTE , studentTUnstandardized -- * Accessors , studentTndf ) where import Control.Applicative import Data.Aeson (FromJSON(..), ToJSON, Value(..), (.:)) import Data.Binary (Binary(..)) import Data.Data (Data, Typeable) import GHC.Generics (Generic) import Numeric.SpecFunctions ( logBeta, incompleteBeta, invIncompleteBeta, digamma) import qualified Statistics.Distribution as D import Statistics.Distribution.Transform (LinearTransform (..)) import Statistics.Internal -- | Student-T distribution newtype StudentT = StudentT { studentTndf :: Double } deriving (Eq, Typeable, Data, Generic) instance Show StudentT where showsPrec i (StudentT ndf) = defaultShow1 "studentT" ndf i instance Read StudentT where readPrec = defaultReadPrecM1 "studentT" studentTE instance ToJSON StudentT instance FromJSON StudentT where parseJSON (Object v) = do ndf <- v .: "studentTndf" maybe (fail $ errMsg ndf) return $ studentTE ndf parseJSON _ = empty instance Binary StudentT where put = put . studentTndf get = do ndf <- get maybe (fail $ errMsg ndf) return $ studentTE ndf -- | Create Student-T distribution. Number of parameters must be positive. studentT :: Double -> StudentT studentT ndf = maybe (error $ errMsg ndf) id $ studentTE ndf -- | Create Student-T distribution. Number of parameters must be positive. studentTE :: Double -> Maybe StudentT studentTE ndf | ndf > 0 = Just (StudentT ndf) | otherwise = Nothing errMsg :: Double -> String errMsg _ = modErr "studentT" "non-positive number of degrees of freedom" instance D.Distribution StudentT where cumulative = cumulative complCumulative = complCumulative instance D.ContDistr StudentT where density d@(StudentT ndf) x = exp (logDensityUnscaled d x) / sqrt ndf logDensity d@(StudentT ndf) x = logDensityUnscaled d x - log (sqrt ndf) quantile = quantile cumulative :: StudentT -> Double -> Double cumulative (StudentT ndf) x | x > 0 = 1 - 0.5 * ibeta | otherwise = 0.5 * ibeta where ibeta = incompleteBeta (0.5 * ndf) 0.5 (ndf / (ndf + x*x)) complCumulative :: StudentT -> Double -> Double complCumulative (StudentT ndf) x | x > 0 = 0.5 * ibeta | otherwise = 1 - 0.5 * ibeta where ibeta = incompleteBeta (0.5 * ndf) 0.5 (ndf / (ndf + x*x)) logDensityUnscaled :: StudentT -> Double -> Double logDensityUnscaled (StudentT ndf) x = log (ndf / (ndf + x*x)) * (0.5 * (1 + ndf)) - logBeta 0.5 (0.5 * ndf) quantile :: StudentT -> Double -> Double quantile (StudentT ndf) p | p >= 0 && p <= 1 = let x = invIncompleteBeta (0.5 * ndf) 0.5 (2 * min p (1 - p)) in case sqrt $ ndf * (1 - x) / x of r | p < 0.5 -> -r | otherwise -> r | otherwise = modErr "quantile" $ "p must be in [0,1] range. Got: "++show p instance D.MaybeMean StudentT where maybeMean (StudentT ndf) | ndf > 1 = Just 0 | otherwise = Nothing instance D.MaybeVariance StudentT where maybeVariance (StudentT ndf) | ndf > 2 = Just $! ndf / (ndf - 2) | otherwise = Nothing instance D.Entropy StudentT where entropy (StudentT ndf) = 0.5 * (ndf+1) * (digamma ((1+ndf)/2) - digamma(ndf/2)) + log (sqrt ndf) + logBeta (ndf/2) 0.5 instance D.MaybeEntropy StudentT where maybeEntropy = Just . D.entropy instance D.ContGen StudentT where genContVar = D.genContinuous -- | Create an unstandardized Student-t distribution. studentTUnstandardized :: Double -- ^ Number of degrees of freedom -> Double -- ^ Central value (0 for standard Student T distribution) -> Double -- ^ Scale parameter -> LinearTransform StudentT studentTUnstandardized ndf mu sigma | sigma > 0 = LinearTransform mu sigma $ studentT ndf | otherwise = modErr "studentTUnstandardized" $ "sigma must be > 0. Got: " ++ show sigma modErr :: String -> String -> a modErr fun msg = error $ "Statistics.Distribution.StudentT." ++ fun ++ ": " ++ msg
header {* Assertions, commands, and separation logic proof rules *} theory Ribbons_Basic imports Main begin text {* We define a command language, assertions, and the rules of separation logic, plus some derived rules that are used by our tool. This is the only theory file that is loaded by the tool. We keep it as small as possible. *} subsection {* Assertions *} text {* The language of assertions includes (at least) an emp constant, a star-operator, and existentially-quantified logical variables. *} typedecl assertion axiomatization Emp :: "assertion" axiomatization Star :: "assertion \<Rightarrow> assertion \<Rightarrow> assertion" (infixr "\<star>" 55) where star_comm: "p \<star> q = q \<star> p" and star_assoc: "(p \<star> q) \<star> r = p \<star> (q \<star> r)" and star_emp: "p \<star> Emp = p" and emp_star: "Emp \<star> p = p" lemma star_rot: "q \<star> p \<star> r = p \<star> q \<star> r" using star_assoc star_comm by auto axiomatization Exists :: "string \<Rightarrow> assertion \<Rightarrow> assertion" text {* Extracting the set of program variables mentioned in an assertion. *} axiomatization rd_ass :: "assertion \<Rightarrow> string set" where rd_emp: "rd_ass Emp = {}" and rd_star: "rd_ass (p \<star> q) = rd_ass p \<union> rd_ass q" and rd_exists: "rd_ass (Exists x p) = rd_ass p" subsection {* Commands *} text {* The language of commands comprises (at least) non-deterministic choice, non-deterministic looping, skip and sequencing. *} typedecl command axiomatization Choose :: "command \<Rightarrow> command \<Rightarrow> command" axiomatization Loop :: "command \<Rightarrow> command" axiomatization Skip :: "command" axiomatization Seq :: "command \<Rightarrow> command \<Rightarrow> command" (infixr ";;" 55) where seq_assoc: "c1 ;; (c2 ;; c3) = (c1 ;; c2) ;; c3" and seq_skip: "c ;; Skip = c" and skip_seq: "Skip ;; c = c" text {* Extracting the set of program variables read by a command. *} axiomatization rd_com :: "command \<Rightarrow> string set" where rd_com_choose: "rd_com (Choose c1 c2) = rd_com c1 \<union> rd_com c2" and rd_com_loop: "rd_com (Loop c) = rd_com c" and rd_com_skip: "rd_com (Skip) = {}" and rd_com_seq: "rd_com (c1 ;; c2) = rd_com c1 \<union> rd_com c2" text {* Extracting the set of program variables written by a command. *} axiomatization wr_com :: "command \<Rightarrow> string set" where wr_com_choose: "wr_com (Choose c1 c2) = wr_com c1 \<union> wr_com c2" and wr_com_loop: "wr_com (Loop c) = wr_com c" and wr_com_skip: "wr_com (Skip) = {}" and wr_com_seq: "wr_com (c1 ;; c2) = wr_com c1 \<union> wr_com c2" subsection {* Separation logic proof rules *} text {* Note that the frame rule has a side-condition concerning program variables. When proving the soundness of our graphical formalisation of ribbon proofs, we shall omit this side-condition. *} inductive prov_triple :: "assertion \<times> command \<times> assertion \<Rightarrow> bool" where exists: "prov_triple (p, c, q) \<Longrightarrow> prov_triple (Exists x p, c, Exists x q)" | choose: "\<lbrakk> prov_triple (p, c1, q); prov_triple (p, c2, q) \<rbrakk> \<Longrightarrow> prov_triple (p, Choose c1 c2, q)" | loop: "prov_triple (p, c, p) \<Longrightarrow> prov_triple (p, Loop c, p)" | frame: "\<lbrakk> prov_triple (p, c, q); wr_com(c) \<inter> rd_ass(r) = {} \<rbrakk> \<Longrightarrow> prov_triple (p \<star> r, c, q \<star> r)" | skip: "prov_triple (p, Skip, p)" | seq: "\<lbrakk> prov_triple (p, c1, q); prov_triple (q, c2, r) \<rbrakk> \<Longrightarrow> prov_triple (p, c1 ;; c2, r)" text {* Here are some derived proof rules, which are used in our ribbon-checking tool. *} lemma choice_lemma: assumes "prov_triple (p1, c1, q1)" and "prov_triple (p2, c2, q2)" and "p = p1" and "p1 = p2" and "q = q1" and "q1 = q2" shows "prov_triple (p, Choose c1 c2, q)" using assms prov_triple.choose by auto lemma loop_lemma: assumes "prov_triple (p1, c, q1)" and "p = p1" and "p1 = q1" and "q1 = q" shows "prov_triple (p, Loop c, q)" using assms prov_triple.loop by auto lemma seq_lemma: assumes "prov_triple (p1, c1, q1)" and "prov_triple (p2, c2, q2)" and "q1 = p2" shows "prov_triple (p1, c1 ;; c2, q2)" using assms prov_triple.seq by auto end
lemma Lim_null: fixes f :: "'a \<Rightarrow> 'b::real_normed_vector" shows "(f \<longlongrightarrow> l) net \<longleftrightarrow> ((\<lambda>x. f(x) - l) \<longlongrightarrow> 0) net"
#' semanticaxelrod #' #' @name semanticaxelrod #' @docType package NULL
Collection, use, access and/or disclosure of personal health information is governed by Ontario law (Personal Health Information Protection Act 2004). You have a right, by law, to access your own hospital health record either by viewing or requesting a copy. You can obtain a copy of your own hospital health record by submitting a written request to the Health Records department. A person can review their own health record at the health care facility where they were treated, by submitting a written request to the Health Records department. In the case of a conflict, the capable patient's decision prevails with respect to consent for the collection, use and/or disclosure of their personal health information. If you are requesting a copy of the hospital record of a patient that is deceased, you must submit proof of your legal signing authority as well as a written request, to the Health Records department. You can request that a copy of your hospital health record be released to a lawyer, insurance company, or any other third party specified by you, by submitting a written request to the Health Records department. When requested, copies of your health record may be released to health care providers outside the hospital to ensure the best continuing care for you. Your attending physician at the hospital may also share reports or summaries of your treatment at the hospital with other physicians and health care providers involved in your care to ensure they are aware of treatments or medications that may affect your ongoing care.
-- -- A module about shapes -- ||| a Shape data Shape = ||| Triangle with base and heigth Triangle Double Double | ||| Circle with radius Circle Double ||| computes the area of a shape area : Shape -> Double area (Triangle x y) = 0.5 * x * y area (Circle x) = pi * x * x -- pi is known by idris {- commented out code -}
[STATEMENT] lemma fmdrop_eq_iff: "fmdrop x B = fmdrop y B \<longleftrightarrow> x = y \<or> (x \<notin> fmdom' B \<and> y \<notin> fmdom' B)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (fmdrop x B = fmdrop y B) = (x = y \<or> x \<notin> fmdom' B \<and> y \<notin> fmdom' B) [PROOF STEP] by transfer (auto simp: map_drop_def map_filter_def fun_eq_iff, metis)
[STATEMENT] lemma (in monoid_add) length_interact: "length (interact xs ys) = sum_list (map length xs) + sum_list (map length ys)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. length (interact xs ys) = sum_list (map length xs) + sum_list (map length ys) [PROOF STEP] by (induction rule: interact.induct) (auto simp: length_concat)
module imper-nouni where -- -- TO-DOs -- -- * change use of =string -- * prove that =string can't be both tt and ff -- * prove reverse theorems for passes/fails/chck -- * prove semantic equivalence for execs and execsTo -- + this would be s-thm and s-det open import lib open import eq-reas-nouni _=nat_ = _=ℕ_ _-nat_ = _∸_ cross = _×_ equiv = _≡_ bottom = ⊥ bottom-elim = ⊥-elim -- -- inspect/with-eq idiom -- data Singleton {a} {A : Set a} (x : A) : Set a where _with-eq_ : (y : A) → equiv x y → Singleton x inspect : forall {a} {A : Set a} (x : A) -> Singleton x inspect x = x with-eq refl -- -- variable identifiers -- Id : Set Id = string _=Id_ : Id -> Id -> bool _=Id_ = _=string_ -- -- values (just natural numbers here) -- Val : Set Val = nat -- -- value and variable expressions -- data Expn : Set where val : Val -> Expn var : Id -> Expn plus : Expn -> Expn -> Expn minus : Expn -> Expn -> Expn scaleBy : Expn -> Val -> Expn -- -- conditions on values and variables -- data Cond : Set where true : Cond false : Cond and : Cond -> Cond -> Cond or : Cond -> Cond -> Cond not : Cond -> Cond less : Expn -> Expn -> Cond equal : Expn -> Expn -> Cond -- -- stack frames containing variable bindings -- Frm : Set Frm = list (cross Id Val) -- -- program statements that transform a frame -- data Stmt : Set where skip : Stmt assign : Id -> Expn -> Stmt seq : Stmt -> Stmt -> Stmt ifThenElse : Cond -> Stmt -> Stmt -> Stmt repeatBy : Id -> Stmt -> Stmt returns : Expn -> Stmt -- -- functional SEMANTICS of frames -- lkup : Id -> Frm -> Val lkup x [] = 0 lkup x ((y , w) :: F) = if (x =Id y) then w else (lkup x F) update : Id -> Val -> Frm -> Frm update x v [] = (x , v) :: [] update x v ((y , w) :: F) = if (x =Id y) then (y , v) :: F else (y , w) :: (update x v F) -- -- functional SEMANTICS of expressions -- eval : Expn -> Frm -> Val eval (val v) F = v eval (var x) F = lkup x F eval (plus e1 e2) F = (eval e1 F) + (eval e2 F) eval (minus e1 e2) F = (eval e1 F) -nat (eval e2 F) eval (scaleBy e1 v2) F = (eval e1 F) * v2 -- -- functional SEMANTICS of conditions -- chck : Cond -> Frm -> bool chck true F = tt chck false F = ff chck (and c1 c2) F = (chck c1 F) && (chck c2 F) chck (or c1 c2) F = (chck c1 F) || (chck c2 F) chck (not c) F = ~ (chck c F) chck (less e1 e2) F = (eval e1 F) < (eval e2 F) chck (equal e1 e2) F = (eval e1 F) =nat (eval e2 F) -- -- functional SEMANTICS of program execution -- exec : Stmt -> Frm -> Frm repeatedly : Stmt -> Id -> nat -> Frm -> Frm repeatedly s x 0 F = F repeatedly s x (suc n) F = repeatedly s x n (update x n (exec s F)) exec skip F = F exec (seq s1 s2) F = (exec s2 (exec s1 F)) exec (assign x e) F = (update x (eval e F) F) exec (ifThenElse c s1 s2) F = if (chck c F) then (exec s1 F) else (exec s2 F) exec (repeatBy x s) F = repeatedly s x (lkup x F) F exec (returns e) F = (update "retval" (eval e F) F) -- -- SEMANTICS of stack bindings as a relation -- data mapsTo : Frm -> Id -> Val -> Set where var-undef : forall {x : Id} ---------------- -> (mapsTo [] x 0) var-match : forall {x y : Id} {F : Frm} {v : Val} -> (equiv (x =string y) tt) --------------------------- -> (mapsTo ((y , v) :: F) x v) var-mismatch : forall {x y : Id} {F : Frm} {v w : Val} -> (equiv (x =string y) ff) -> (mapsTo F x v) ---------------------------- -> (mapsTo ((y , w) :: F) x v) -- -- THEOREM: mapsTo agrees with lookup -- var-thm : forall (x : Id) (F : Frm) -> mapsTo F x (lkup x F) var-thm x [] = var-undef var-thm x ((y , w) :: F) with (inspect (x =string y)) ... | tt with-eq match = let lkup-is-w : (equiv (lkup x ((y , w) :: F)) w) lkup-is-w = cong3 if_then_else_ match refl refl in cong-pred (mapsTo ((y , w) :: F) x) (sym lkup-is-w) (var-match match) ... | ff with-eq mismatch = let lkup-is-lkup : (equiv (lkup x ((y , w) :: F)) (lkup x F)) lkup-is-lkup = cong3 if_then_else_ mismatch refl refl in cong-pred (mapsTo ((y , w) :: F) x) (sym lkup-is-lkup) (var-mismatch mismatch (var-thm x F)) postulate =Id-det : ∀ {x y : Id} -> (equiv (x =string y) tt) -> (equiv (x =string y) ff) -> bottom var-det : forall{x : Id}{F : Frm}{u1 u2 : Val} -> mapsTo F x u1 -> mapsTo F x u2 -> equiv u1 u2 var-det{x}{[]}{u1}{u2} var-undef var-undef = refl var-det{x}{(y , w) :: F}{u1}{u2} (var-match _) (var-match _) = refl var-det{x}{(y , w) :: F}{u1}{u2} (var-mismatch _ lkup-is-u1) (var-mismatch _ lkup-is-u2) = var-det lkup-is-u1 lkup-is-u2 var-det{x}{(y , w) :: F}{u1}{u2} (var-match{.x}{.y}{.F}{.u1} same) (var-mismatch{.x}{.y}{.F}{.u2} diff _) = bottom-elim (=Id-det{x}{y} same diff) var-det{x}{(y , w) :: F}{u1}{u2} (var-mismatch{.x}{.y}{.F}{.u1} diff _) (var-match{.x}{.y}{.F}{.u2} same) = bottom-elim (=Id-det{x}{y} same diff) -- -- SEMANTICS of expression evaluation as a relation -- data evalsTo : Frm -> Expn -> Val -> Set where e-val : forall {v : Val} {F : Frm} ------------------------ -> (evalsTo F (val v) v) e-var : forall {x : Id} {F : Frm} {v : Val} -> (mapsTo F x v) ------------------------ -> (evalsTo F (var x) v) e-add : forall {e1 e2 : Expn} {F : Frm} {v1 v2 : Val} -> (evalsTo F e1 v1) -> (evalsTo F e2 v2) ------------------------------------- -> (evalsTo F (plus e1 e2) (v1 + v2)) e-sub : forall {e1 e2 : Expn} {F : Frm} {v1 v2 : Val} -> (evalsTo F e1 v1) -> (evalsTo F e2 v2) ----------------------------------------- -> (evalsTo F (minus e1 e2) (v1 -nat v2)) e-scale : forall {e1 : Expn} {F : Frm} {v1 v2 : Val} -> (evalsTo F e1 v1) ---------------------------------------- -> (evalsTo F (scaleBy e1 v2) (v1 * v2)) e-thm : forall (e : Expn) -> (F : Frm) -> (evalsTo F e (eval e F)) e-thm (val e) F = e-val e-thm (var x) F = e-var (var-thm x F) e-thm (plus e1 e2) F = (e-add (e-thm e1 F) (e-thm e2 F)) e-thm (minus e1 e2) F = (e-sub (e-thm e1 F) (e-thm e2 F)) e-thm (scaleBy e1 v2) F = (e-scale (e-thm e1 F)) e-det : forall {e : Expn}{F : Frm}{u w : Val} -> (evalsTo F e u) -> (evalsTo F e w) -> (equiv u w) e-det{val v}{F}{u}{w} e-val e-val = refl e-det{var x}{F}{u}{w} (e-var var-lkup-u) (e-var var-lkup-v) = var-det var-lkup-u var-lkup-v e-det{plus e1 e2}{F}{u}{w} (e-add e-u1 e-u2) (e-add e-w1 e-w2) = cong2 _+_ (e-det e-u1 e-w1) (e-det e-u2 e-w2) e-det{minus e1 e2}{F}{u}{w} (e-sub e-u1 e-u2) (e-sub e-w1 e-w2) = cong2 _-nat_ (e-det e-u1 e-w1) (e-det e-u2 e-w2) e-det{scaleBy e1 v2}{F}{u}{w} (e-scale e-u1) (e-scale e-w1) = cong2 _*_ (e-det e-u1 e-w1) refl e-thm-fwd : forall {e : Expn}{F : Frm}{v : Val} -> (evalsTo F e v) -> (equiv v (eval e F)) e-thm-fwd{e}{F}{v} ev = let p1 : evalsTo F e (eval e F) p1 = e-thm e F in e-det ev p1 e-thm-rev : forall {e : Expn}{F : Frm}{v : Val} -> (equiv v (eval e F)) -> (evalsTo F e v) e-thm-rev{e}{F}{v} v-is = cong-pred (evalsTo F e) (sym v-is) (e-thm e F) -- -- SEMANTICS of conditions as a decidable relation -- data passes : Frm -> Cond -> Set data fails : Frm -> Cond -> Set data passes where c-tt : forall {F : Frm} ---------------- -> passes F true c-and : forall {c1 c2 : Cond} {F : Frm} -> passes F c1 -> passes F c2 ----------------------- -> passes F (and c1 c2) c-or1 : forall {c1 c2 : Cond} {F : Frm} -> passes F c1 ---------------------- -> passes F (or c1 c2) c-or2 : forall {c1 c2 : Cond} {F : Frm} -> passes F c2 ---------------------- -> passes F (or c1 c2) c-less : forall {e1 e2 : Expn} {F : Frm} {v1 v2 : Val} -> equiv (v1 < v2) tt -> evalsTo F e1 v1 -> evalsTo F e2 v2 ------------------------- -> passes F (less e1 e2) c-eq : forall {e1 e2 : Expn} {F : Frm} {v1 v2 : Val} -> equiv (v1 =nat v2) tt -> evalsTo F e1 v1 -> evalsTo F e2 v2 -------------------------- -> passes F (equal e1 e2) c-not : forall {c : Cond} {F : Frm} -> fails F c ------------------- -> passes F (not c) data fails where ~c-ff : forall {F : Frm} ---------------- -> fails F false ~c-or : forall {c1 c2 : Cond} {F : Frm} -> fails F c1 -> fails F c2 ----------------------- -> fails F (or c1 c2) ~c-and1 : forall {c1 c2 : Cond} {F : Frm} -> fails F c1 ---------------------- -> fails F (and c1 c2) ~c-and2 : forall {c1 c2 : Cond} {F : Frm} -> fails F c2 ---------------------- -> fails F (and c1 c2) ~c-less : forall {e1 e2 : Expn} {F : Frm} {v1 v2 : Val} -> equiv (v1 < v2) ff -> evalsTo F e1 v1 -> evalsTo F e2 v2 ------------------------- -> fails F (less e1 e2) ~c-eq : forall {e1 e2 : Expn} {F : Frm} {v1 v2 : Val} -> equiv (v1 =nat v2) ff -> evalsTo F e1 v1 -> evalsTo F e2 v2 -------------------------- -> fails F (equal e1 e2) ~c-not : forall {c : Cond} {F : Frm} -> passes F c ------------------- -> fails F (not c) c-thm-fwd : forall {c : Cond}{F : Frm} -> (passes F c) -> (equiv (chck c F) tt) ~c-thm-fwd : forall {c : Cond}{F : Frm} -> (fails F c) -> (equiv (chck c F) ff) c-thm-fwd (c-tt{F}) = refl c-thm-fwd (c-and{c1}{c2}{F} passes-c1 passes-c2) = cong2 _&&_ (c-thm-fwd passes-c1) (c-thm-fwd passes-c2) c-thm-fwd (c-or1{c1}{c2}{F} passes-c1) = cong2 _||_ (c-thm-fwd passes-c1) refl c-thm-fwd (c-or2{c1}{c2}{F} passes-c2) = trans (cong2 _||_ refl (c-thm-fwd passes-c2)) (||-tt (chck c1 F)) c-thm-fwd (c-less{e1}{e2}{F}{v1}{v2} v1-less-v2 evalsTo-e1-v1 evalsTo-e2-v2) = let eval-e1-is-v1 : (equiv (eval e1 F) v1) eval-e1-is-v1 = sym (e-thm-fwd evalsTo-e1-v1) eval-e2-is-v2 : (equiv (eval e2 F) v2) eval-e2-is-v2 = sym (e-thm-fwd evalsTo-e2-v2) in begin chck (less e1 e2) F equiv[ refl ] (eval e1 F) < (eval e2 F) equiv[ cong2 _<_ eval-e1-is-v1 eval-e2-is-v2 ] v1 < v2 equiv[ v1-less-v2 ] tt qed c-thm-fwd (c-eq{e1}{e2}{F}{v1}{v2} v1-equals-v2 evalsTo-e1-v1 evalsTo-e2-v2) = let eval-e1-is-v1 : (equiv (eval e1 F) v1) eval-e1-is-v1 = sym (e-thm-fwd evalsTo-e1-v1) eval-e2-is-v2 : (equiv (eval e2 F) v2) eval-e2-is-v2 = sym (e-thm-fwd evalsTo-e2-v2) in begin chck (equal e1 e2) F equiv[ refl ] (eval e1 F) =nat (eval e2 F) equiv[ cong2 _=nat_ eval-e1-is-v1 eval-e2-is-v2 ] v1 =nat v2 equiv[ v1-equals-v2 ] tt qed c-thm-fwd (c-not{c}{F} c-fails) = cong ~_ (~c-thm-fwd c-fails) ~c-thm-fwd (~c-ff{F}) = refl ~c-thm-fwd (~c-or{c1}{c2}{F} fails-c1 fails-c2) = cong2 _||_ (~c-thm-fwd fails-c1) (~c-thm-fwd fails-c2) ~c-thm-fwd (~c-and1{c1}{c2}{F} fails-c1) = cong2 _&&_ (~c-thm-fwd fails-c1) refl ~c-thm-fwd (~c-and2{c1}{c2}{F} fails-c2) = trans (cong2 _&&_ refl (~c-thm-fwd fails-c2)) (&&-ff (chck c1 F)) ~c-thm-fwd (~c-less{e1}{e2}{F}{v1}{v2} v1-not-less-v2 evalsTo-e1-v1 evalsTo-e2-v2) = let eval-e1-is-v1 : (equiv (eval e1 F) v1) eval-e1-is-v1 = sym (e-thm-fwd evalsTo-e1-v1) eval-e2-is-v2 : (equiv (eval e2 F) v2) eval-e2-is-v2 = sym (e-thm-fwd evalsTo-e2-v2) in begin chck (less e1 e2) F equiv[ refl ] (eval e1 F) < (eval e2 F) equiv[ cong2 _<_ eval-e1-is-v1 eval-e2-is-v2 ] v1 < v2 equiv[ v1-not-less-v2 ] ff qed ~c-thm-fwd (~c-eq{e1}{e2}{F}{v1}{v2} v1-not-equals-v2 evalsTo-e1-v1 evalsTo-e2-v2) = let eval-e1-is-v1 : (equiv (eval e1 F) v1) eval-e1-is-v1 = sym (e-thm-fwd evalsTo-e1-v1) eval-e2-is-v2 : (equiv (eval e2 F) v2) eval-e2-is-v2 = sym (e-thm-fwd evalsTo-e2-v2) in begin chck (equal e1 e2) F equiv[ refl ] (eval e1 F) =nat (eval e2 F) equiv[ cong2 _=nat_ eval-e1-is-v1 eval-e2-is-v2 ] v1 =nat v2 equiv[ v1-not-equals-v2 ] ff qed ~c-thm-fwd (~c-not{c}{F} c-passes) = cong ~_ (c-thm-fwd c-passes) -- These can probably be shown just by using -- the contrapositives of ~c-thm-fwd and ~~c-thm-fwd postulate c-thm-rev : forall {c : Cond}{F : Frm} -> (equiv (chck c F) tt) -> (passes F c) ~c-thm-rev : forall {c : Cond}{F : Frm} -> (equiv (chck c F) ff) -> (fails F c) -- -- SEMANTICS of program statements -- as a state transformation relation -- data execsTo : Frm -> Stmt -> Frm -> Set where s-skip : forall {F : Frm} ------------------- -> execsTo F skip F s-assign : forall {x : Id} {e : Expn} {F : Frm} {v : Val} -> evalsTo F e v ---------------------------------------- -> execsTo F (assign x e) (update x v F) s-seq : forall {s1 s2 : Stmt} {F0 F1 F2 : Frm} -> (execsTo F0 s1 F1) -> (execsTo F1 s2 F2) ------------------------------ -> (execsTo F0 (seq s1 s2) F2) s-if-then : forall {c : Cond} {s1 s2 : Stmt} {F F' : Frm} -> (passes F c) -> (execsTo F s1 F') -------------------------------------- -> (execsTo F (ifThenElse c s1 s2) F') s-if-else : forall {c : Cond} {s1 s2 : Stmt} {F F' : Frm} -> (fails F c) -> (execsTo F s2 F') -------------------------------------- -> (execsTo F (ifThenElse c s1 s2) F') s-repeat-0 : forall {s : Stmt} {x : Id} {F : Frm} -> (mapsTo F x 0) ------------------------------- -> (execsTo F (repeatBy x s) F) s-repeat-suc : forall {n : nat} {s : Stmt} {x : Id} {F F' : Frm} -> (mapsTo F x (suc n)) -> (execsTo F (seq (seq s (assign x (val n))) (repeatBy x s)) F') ----------------------------------------------------------------- -> (execsTo F (repeatBy x s) F') -- -- A lil cheat: "returns" is just assign; doesn't exit s-return : forall {e : Expn} {F : Frm} {rv : Val} -> (evalsTo F e rv) ------------------------------------------------- -> (execsTo F (returns e) (update "retval" rv F)) postulate frm-compare : Frm -> Frm -> bool frm-iso : Frm -> Frm -> Set frm-not-iso : Frm -> Frm -> Set postulate s-thm : forall {s : Stmt}{F Ff Fr : Frm} -> execsTo F s Fr -> equiv (exec s F) Ff ---------------------- -> frm-iso Ff Fr
module KROME using Reexport: @reexport using CBinding: 𝐣𝐥 include("libkrome.jl") """ examples_dir() Return the directory where the examples provided with KROME.jl are located. If KROME is installed as a regular package (with `]add KROME`), these files are read-only and should *not* be modified. To find out which files are available, use, e.g., `readdir`: # Examples ```julia julia> readdir(examples_dir()) 2-element Array{String,1}: "av-slab-benchmark" "test_hello" ``` """ examples_dir() = joinpath(pathof(KROME) |> dirname |> dirname, "examples") end
Recreating the story of humanity's past by studying ancient bones can hit a snag when they deteriorate, but scientists are now reporting an advance inspired by seashells that can better preserve valuable remains. Their findings, which appear in the ACS journal Langmuir, could have wide-ranging implications for both archeology and paleontology. Luigi Dei and colleagues explain that a process similar to osteoporosis causes bones discovered at historically significant sites to become brittle and fragile — and in the process, lose clues to the culture they were once part of. Preserving them has proved challenging. Current techniques to harden and strengthen bones use vinyl and acrylic polymers. They act as a sort of glue, filling in cracks and holding fragments together, but they are not ideal. In an effort to stanch the loss of information due to damage, Dei's team set out to find a better way to preserve old bones. The researchers turned to seashells for inspiration. Using skeletal fragments from the Late Middle Ages, they grew aragonite, a kind of lime that some sea animals produce to shore up their shells, on the bones in a controlled way. The treatment hardened the surfaces of the bones, as well as the pores inside them, making the ancient remains 50 to 70 percent sturdier. "These results could have immediate impact for preserving archeological and paleontological bone remains," the scientists conclude. The authors acknowledge funding from Consorzio Interuniversitario per lo Sviluppo dei Sistemi a Grande Interfase (CSGI), Florence, Italy; the University of Florence; the TEMART Project funded by the European Fund for Regional Development; the Tuscany region; and the S.I.C.A.M.O.R. PAR-FAS Project Tuscany Region.
State Before: α : Type u_1 β : Type ?u.4109768 E : Type u_2 F : Type ?u.4109774 inst✝⁴ : MeasurableSpace α inst✝³ : NormedAddCommGroup E inst✝² : PartialOrder α inst✝¹ : MeasurableSingletonClass α f : α → E μ : MeasureTheory.Measure α a b : α inst✝ : NoAtoms μ ⊢ ↑↑μ {b} ≠ ⊤ State After: α : Type u_1 β : Type ?u.4109768 E : Type u_2 F : Type ?u.4109774 inst✝⁴ : MeasurableSpace α inst✝³ : NormedAddCommGroup E inst✝² : PartialOrder α inst✝¹ : MeasurableSingletonClass α f : α → E μ : MeasureTheory.Measure α a b : α inst✝ : NoAtoms μ ⊢ 0 ≠ ⊤ Tactic: rw [measure_singleton] State Before: α : Type u_1 β : Type ?u.4109768 E : Type u_2 F : Type ?u.4109774 inst✝⁴ : MeasurableSpace α inst✝³ : NormedAddCommGroup E inst✝² : PartialOrder α inst✝¹ : MeasurableSingletonClass α f : α → E μ : MeasureTheory.Measure α a b : α inst✝ : NoAtoms μ ⊢ 0 ≠ ⊤ State After: no goals Tactic: exact ENNReal.zero_ne_top
(* Author: Tobias Nipkow *) theory Abs_Int3 imports Abs_Int2_ivl begin subsection "Widening and Narrowing" class widen = fixes widen :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infix "\<nabla>" 65) class narrow = fixes narrow :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infix "\<triangle>" 65) class wn = widen + narrow + order + assumes widen1: "x \<le> x \<nabla> y" assumes widen2: "y \<le> x \<nabla> y" assumes narrow1: "y \<le> x \<Longrightarrow> y \<le> x \<triangle> y" assumes narrow2: "y \<le> x \<Longrightarrow> x \<triangle> y \<le> x" begin lemma narrowid[simp]: "x \<triangle> x = x" by (metis eq_iff narrow1 narrow2) end lemma top_widen_top[simp]: "\<top> \<nabla> \<top> = (\<top>::_::{wn,order_top})" by (metis eq_iff top_greatest widen2) instantiation ivl :: wn begin definition "widen_rep p1 p2 = (if is_empty_rep p1 then p2 else if is_empty_rep p2 then p1 else let (l1,h1) = p1; (l2,h2) = p2 in (if l2 < l1 then Minf else l1, if h1 < h2 then Pinf else h1))" lift_definition widen_ivl :: "ivl \<Rightarrow> ivl \<Rightarrow> ivl" is widen_rep by(auto simp: widen_rep_def eq_ivl_iff) definition "narrow_rep p1 p2 = (if is_empty_rep p1 \<or> is_empty_rep p2 then empty_rep else let (l1,h1) = p1; (l2,h2) = p2 in (if l1 = Minf then l2 else l1, if h1 = Pinf then h2 else h1))" lift_definition narrow_ivl :: "ivl \<Rightarrow> ivl \<Rightarrow> ivl" is narrow_rep by(auto simp: narrow_rep_def eq_ivl_iff) instance proof qed (transfer, auto simp: widen_rep_def narrow_rep_def le_iff_subset \<gamma>_rep_def subset_eq is_empty_rep_def empty_rep_def eq_ivl_def split: if_splits extended.splits)+ end instantiation st :: ("{order_top,wn}")wn begin lift_definition widen_st :: "'a st \<Rightarrow> 'a st \<Rightarrow> 'a st" is "map2_st_rep (op \<nabla>)" by(auto simp: eq_st_def) lift_definition narrow_st :: "'a st \<Rightarrow> 'a st \<Rightarrow> 'a st" is "map2_st_rep (op \<triangle>)" by(auto simp: eq_st_def) instance proof (standard, goal_cases) case 1 thus ?case by transfer (simp add: less_eq_st_rep_iff widen1) next case 2 thus ?case by transfer (simp add: less_eq_st_rep_iff widen2) next case 3 thus ?case by transfer (simp add: less_eq_st_rep_iff narrow1) next case 4 thus ?case by transfer (simp add: less_eq_st_rep_iff narrow2) qed end instantiation option :: (wn)wn begin fun widen_option where "None \<nabla> x = x" | "x \<nabla> None = x" | "(Some x) \<nabla> (Some y) = Some(x \<nabla> y)" fun narrow_option where "None \<triangle> x = None" | "x \<triangle> None = None" | "(Some x) \<triangle> (Some y) = Some(x \<triangle> y)" instance proof (standard, goal_cases) case (1 x y) thus ?case by(induct x y rule: widen_option.induct)(simp_all add: widen1) next case (2 x y) thus ?case by(induct x y rule: widen_option.induct)(simp_all add: widen2) next case (3 x y) thus ?case by(induct x y rule: narrow_option.induct) (simp_all add: narrow1) next case (4 y x) thus ?case by(induct x y rule: narrow_option.induct) (simp_all add: narrow2) qed end definition map2_acom :: "('a \<Rightarrow> 'a \<Rightarrow> 'a) \<Rightarrow> 'a acom \<Rightarrow> 'a acom \<Rightarrow> 'a acom" where "map2_acom f C1 C2 = annotate (\<lambda>p. f (anno C1 p) (anno C2 p)) (strip C1)" instantiation acom :: (widen)widen begin definition "widen_acom = map2_acom (op \<nabla>)" instance .. end instantiation acom :: (narrow)narrow begin definition "narrow_acom = map2_acom (op \<triangle>)" instance .. end lemma strip_map2_acom[simp]: "strip C1 = strip C2 \<Longrightarrow> strip(map2_acom f C1 C2) = strip C1" by(simp add: map2_acom_def) (*by(induct f C1 C2 rule: map2_acom.induct) simp_all*) lemma strip_widen_acom[simp]: "strip C1 = strip C2 \<Longrightarrow> strip(C1 \<nabla> C2) = strip C1" by(simp add: widen_acom_def) lemma strip_narrow_acom[simp]: "strip C1 = strip C2 \<Longrightarrow> strip(C1 \<triangle> C2) = strip C1" by(simp add: narrow_acom_def) lemma narrow1_acom: "C2 \<le> C1 \<Longrightarrow> C2 \<le> C1 \<triangle> (C2::'a::wn acom)" by(simp add: narrow_acom_def narrow1 map2_acom_def less_eq_acom_def size_annos) lemma narrow2_acom: "C2 \<le> C1 \<Longrightarrow> C1 \<triangle> (C2::'a::wn acom) \<le> C1" by(simp add: narrow_acom_def narrow2 map2_acom_def less_eq_acom_def size_annos) subsubsection "Pre-fixpoint computation" definition iter_widen :: "('a \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> ('a::{order,widen})option" where "iter_widen f = while_option (\<lambda>x. \<not> f x \<le> x) (\<lambda>x. x \<nabla> f x)" definition iter_narrow :: "('a \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> ('a::{order,narrow})option" where "iter_narrow f = while_option (\<lambda>x. x \<triangle> f x < x) (\<lambda>x. x \<triangle> f x)" definition pfp_wn :: "('a::{order,widen,narrow} \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> 'a option" where "pfp_wn f x = (case iter_widen f x of None \<Rightarrow> None | Some p \<Rightarrow> iter_narrow f p)" lemma iter_widen_pfp: "iter_widen f x = Some p \<Longrightarrow> f p \<le> p" by(auto simp add: iter_widen_def dest: while_option_stop) lemma iter_widen_inv: assumes "!!x. P x \<Longrightarrow> P(f x)" "!!x1 x2. P x1 \<Longrightarrow> P x2 \<Longrightarrow> P(x1 \<nabla> x2)" and "P x" and "iter_widen f x = Some y" shows "P y" using while_option_rule[where P = "P", OF _ assms(4)[unfolded iter_widen_def]] by (blast intro: assms(1-3)) lemma strip_while: fixes f :: "'a acom \<Rightarrow> 'a acom" assumes "\<forall>C. strip (f C) = strip C" and "while_option P f C = Some C'" shows "strip C' = strip C" using while_option_rule[where P = "\<lambda>C'. strip C' = strip C", OF _ assms(2)] by (metis assms(1)) lemma strip_iter_widen: fixes f :: "'a::{order,widen} acom \<Rightarrow> 'a acom" assumes "\<forall>C. strip (f C) = strip C" and "iter_widen f C = Some C'" shows "strip C' = strip C" proof- have "\<forall>C. strip(C \<nabla> f C) = strip C" by (metis assms(1) strip_map2_acom widen_acom_def) from strip_while[OF this] assms(2) show ?thesis by(simp add: iter_widen_def) qed lemma iter_narrow_pfp: assumes mono: "!!x1 x2::_::wn acom. P x1 \<Longrightarrow> P x2 \<Longrightarrow> x1 \<le> x2 \<Longrightarrow> f x1 \<le> f x2" and Pinv: "!!x. P x \<Longrightarrow> P(f x)" "!!x1 x2. P x1 \<Longrightarrow> P x2 \<Longrightarrow> P(x1 \<triangle> x2)" and "P p0" and "f p0 \<le> p0" and "iter_narrow f p0 = Some p" shows "P p \<and> f p \<le> p" proof- let ?Q = "%p. P p \<and> f p \<le> p \<and> p \<le> p0" { fix p assume "?Q p" note P = conjunct1[OF this] and 12 = conjunct2[OF this] note 1 = conjunct1[OF 12] and 2 = conjunct2[OF 12] let ?p' = "p \<triangle> f p" have "?Q ?p'" proof auto show "P ?p'" by (blast intro: P Pinv) have "f ?p' \<le> f p" by(rule mono[OF `P (p \<triangle> f p)` P narrow2_acom[OF 1]]) also have "\<dots> \<le> ?p'" by(rule narrow1_acom[OF 1]) finally show "f ?p' \<le> ?p'" . have "?p' \<le> p" by (rule narrow2_acom[OF 1]) also have "p \<le> p0" by(rule 2) finally show "?p' \<le> p0" . qed } thus ?thesis using while_option_rule[where P = ?Q, OF _ assms(6)[simplified iter_narrow_def]] by (blast intro: assms(4,5) le_refl) qed lemma pfp_wn_pfp: assumes mono: "!!x1 x2::_::wn acom. P x1 \<Longrightarrow> P x2 \<Longrightarrow> x1 \<le> x2 \<Longrightarrow> f x1 \<le> f x2" and Pinv: "P x" "!!x. P x \<Longrightarrow> P(f x)" "!!x1 x2. P x1 \<Longrightarrow> P x2 \<Longrightarrow> P(x1 \<nabla> x2)" "!!x1 x2. P x1 \<Longrightarrow> P x2 \<Longrightarrow> P(x1 \<triangle> x2)" and pfp_wn: "pfp_wn f x = Some p" shows "P p \<and> f p \<le> p" proof- from pfp_wn obtain p0 where its: "iter_widen f x = Some p0" "iter_narrow f p0 = Some p" by(auto simp: pfp_wn_def split: option.splits) have "P p0" by (blast intro: iter_widen_inv[where P="P"] its(1) Pinv(1-3)) thus ?thesis by - (assumption | rule iter_narrow_pfp[where P=P] mono Pinv(2,4) iter_widen_pfp its)+ qed lemma strip_pfp_wn: "\<lbrakk> \<forall>C. strip(f C) = strip C; pfp_wn f C = Some C' \<rbrakk> \<Longrightarrow> strip C' = strip C" by(auto simp add: pfp_wn_def iter_narrow_def split: option.splits) (metis (mono_tags) strip_iter_widen strip_narrow_acom strip_while) locale Abs_Int_wn = Abs_Int_inv_mono where \<gamma>=\<gamma> for \<gamma> :: "'av::{wn,bounded_lattice} \<Rightarrow> val set" begin definition AI_wn :: "com \<Rightarrow> 'av st option acom option" where "AI_wn c = pfp_wn (step' \<top>) (bot c)" lemma AI_wn_correct: "AI_wn c = Some C \<Longrightarrow> CS c \<le> \<gamma>\<^sub>c C" proof(simp add: CS_def AI_wn_def) assume 1: "pfp_wn (step' \<top>) (bot c) = Some C" have 2: "strip C = c \<and> step' \<top> C \<le> C" by(rule pfp_wn_pfp[where x="bot c"]) (simp_all add: 1 mono_step'_top) have pfp: "step (\<gamma>\<^sub>o \<top>) (\<gamma>\<^sub>c C) \<le> \<gamma>\<^sub>c C" proof(rule order_trans) show "step (\<gamma>\<^sub>o \<top>) (\<gamma>\<^sub>c C) \<le> \<gamma>\<^sub>c (step' \<top> C)" by(rule step_step') show "... \<le> \<gamma>\<^sub>c C" by(rule mono_gamma_c[OF conjunct2[OF 2]]) qed have 3: "strip (\<gamma>\<^sub>c C) = c" by(simp add: strip_pfp_wn[OF _ 1]) have "lfp c (step (\<gamma>\<^sub>o \<top>)) \<le> \<gamma>\<^sub>c C" by(rule lfp_lowerbound[simplified,where f="step (\<gamma>\<^sub>o \<top>)", OF 3 pfp]) thus "lfp c (step UNIV) \<le> \<gamma>\<^sub>c C" by simp qed end global_interpretation Abs_Int_wn where \<gamma> = \<gamma>_ivl and num' = num_ivl and plus' = "op +" and test_num' = in_ivl and inv_plus' = inv_plus_ivl and inv_less' = inv_less_ivl defines AI_wn_ivl = AI_wn .. subsubsection "Tests" definition "step_up_ivl n = ((\<lambda>C. C \<nabla> step_ivl \<top> C)^^n)" definition "step_down_ivl n = ((\<lambda>C. C \<triangle> step_ivl \<top> C)^^n)" text{* For @{const test3_ivl}, @{const AI_ivl} needed as many iterations as the loop took to execute. In contrast, @{const AI_wn_ivl} converges in a constant number of steps: *} value "show_acom (step_up_ivl 1 (bot test3_ivl))" value "show_acom (step_up_ivl 2 (bot test3_ivl))" value "show_acom (step_up_ivl 3 (bot test3_ivl))" value "show_acom (step_up_ivl 4 (bot test3_ivl))" value "show_acom (step_up_ivl 5 (bot test3_ivl))" value "show_acom (step_up_ivl 6 (bot test3_ivl))" value "show_acom (step_up_ivl 7 (bot test3_ivl))" value "show_acom (step_up_ivl 8 (bot test3_ivl))" value "show_acom (step_down_ivl 1 (step_up_ivl 8 (bot test3_ivl)))" value "show_acom (step_down_ivl 2 (step_up_ivl 8 (bot test3_ivl)))" value "show_acom (step_down_ivl 3 (step_up_ivl 8 (bot test3_ivl)))" value "show_acom (step_down_ivl 4 (step_up_ivl 8 (bot test3_ivl)))" value "show_acom_opt (AI_wn_ivl test3_ivl)" text{* Now all the analyses terminate: *} value "show_acom_opt (AI_wn_ivl test4_ivl)" value "show_acom_opt (AI_wn_ivl test5_ivl)" value "show_acom_opt (AI_wn_ivl test6_ivl)" subsubsection "Generic Termination Proof" lemma top_on_opt_widen: "top_on_opt o1 X \<Longrightarrow> top_on_opt o2 X \<Longrightarrow> top_on_opt (o1 \<nabla> o2 :: _ st option) X" apply(induct o1 o2 rule: widen_option.induct) apply (auto) by transfer simp lemma top_on_opt_narrow: "top_on_opt o1 X \<Longrightarrow> top_on_opt o2 X \<Longrightarrow> top_on_opt (o1 \<triangle> o2 :: _ st option) X" apply(induct o1 o2 rule: narrow_option.induct) apply (auto) by transfer simp (* FIXME mk anno abbrv *) lemma annos_map2_acom[simp]: "strip C2 = strip C1 \<Longrightarrow> annos(map2_acom f C1 C2) = map (%(x,y).f x y) (zip (annos C1) (annos C2))" by(simp add: map2_acom_def list_eq_iff_nth_eq size_annos anno_def[symmetric] size_annos_same[of C1 C2]) lemma top_on_acom_widen: "\<lbrakk>top_on_acom C1 X; strip C1 = strip C2; top_on_acom C2 X\<rbrakk> \<Longrightarrow> top_on_acom (C1 \<nabla> C2 :: _ st option acom) X" by(auto simp add: widen_acom_def top_on_acom_def)(metis top_on_opt_widen in_set_zipE) lemma top_on_acom_narrow: "\<lbrakk>top_on_acom C1 X; strip C1 = strip C2; top_on_acom C2 X\<rbrakk> \<Longrightarrow> top_on_acom (C1 \<triangle> C2 :: _ st option acom) X" by(auto simp add: narrow_acom_def top_on_acom_def)(metis top_on_opt_narrow in_set_zipE) text{* The assumptions for widening and narrowing differ because during narrowing we have the invariant @{prop"y \<le> x"} (where @{text y} is the next iterate), but during widening there is no such invariant, there we only have that not yet @{prop"y \<le> x"}. This complicates the termination proof for widening. *} locale Measure_wn = Measure1 where m=m for m :: "'av::{order_top,wn} \<Rightarrow> nat" + fixes n :: "'av \<Rightarrow> nat" assumes m_anti_mono: "x \<le> y \<Longrightarrow> m x \<ge> m y" assumes m_widen: "~ y \<le> x \<Longrightarrow> m(x \<nabla> y) < m x" assumes n_narrow: "y \<le> x \<Longrightarrow> x \<triangle> y < x \<Longrightarrow> n(x \<triangle> y) < n x" begin lemma m_s_anti_mono_rep: assumes "\<forall>x. S1 x \<le> S2 x" shows "(\<Sum>x\<in>X. m (S2 x)) \<le> (\<Sum>x\<in>X. m (S1 x))" proof- from assms have "\<forall>x. m(S1 x) \<ge> m(S2 x)" by (metis m_anti_mono) thus "(\<Sum>x\<in>X. m (S2 x)) \<le> (\<Sum>x\<in>X. m (S1 x))" by (metis sum_mono) qed lemma m_s_anti_mono: "S1 \<le> S2 \<Longrightarrow> m_s S1 X \<ge> m_s S2 X" unfolding m_s_def apply (transfer fixing: m) apply(simp add: less_eq_st_rep_iff eq_st_def m_s_anti_mono_rep) done lemma m_s_widen_rep: assumes "finite X" "S1 = S2 on -X" "\<not> S2 x \<le> S1 x" shows "(\<Sum>x\<in>X. m (S1 x \<nabla> S2 x)) < (\<Sum>x\<in>X. m (S1 x))" proof- have 1: "\<forall>x\<in>X. m(S1 x) \<ge> m(S1 x \<nabla> S2 x)" by (metis m_anti_mono wn_class.widen1) have "x \<in> X" using assms(2,3) by(auto simp add: Ball_def) hence 2: "\<exists>x\<in>X. m(S1 x) > m(S1 x \<nabla> S2 x)" using assms(3) m_widen by blast from sum_strict_mono_ex1[OF `finite X` 1 2] show ?thesis . qed lemma m_s_widen: "finite X \<Longrightarrow> fun S1 = fun S2 on -X ==> ~ S2 \<le> S1 \<Longrightarrow> m_s (S1 \<nabla> S2) X < m_s S1 X" apply(auto simp add: less_st_def m_s_def) apply (transfer fixing: m) apply(auto simp add: less_eq_st_rep_iff m_s_widen_rep) done lemma m_o_anti_mono: "finite X \<Longrightarrow> top_on_opt o1 (-X) \<Longrightarrow> top_on_opt o2 (-X) \<Longrightarrow> o1 \<le> o2 \<Longrightarrow> m_o o1 X \<ge> m_o o2 X" proof(induction o1 o2 rule: less_eq_option.induct) case 1 thus ?case by (simp add: m_o_def)(metis m_s_anti_mono) next case 2 thus ?case by(simp add: m_o_def le_SucI m_s_h split: option.splits) next case 3 thus ?case by simp qed lemma m_o_widen: "\<lbrakk> finite X; top_on_opt S1 (-X); top_on_opt S2 (-X); \<not> S2 \<le> S1 \<rbrakk> \<Longrightarrow> m_o (S1 \<nabla> S2) X < m_o S1 X" by(auto simp: m_o_def m_s_h less_Suc_eq_le m_s_widen split: option.split) lemma m_c_widen: "strip C1 = strip C2 \<Longrightarrow> top_on_acom C1 (-vars C1) \<Longrightarrow> top_on_acom C2 (-vars C2) \<Longrightarrow> \<not> C2 \<le> C1 \<Longrightarrow> m_c (C1 \<nabla> C2) < m_c C1" apply(auto simp: m_c_def widen_acom_def map2_acom_def size_annos[symmetric] anno_def[symmetric]sum_list_sum_nth) apply(subgoal_tac "length(annos C2) = length(annos C1)") prefer 2 apply (simp add: size_annos_same2) apply (auto) apply(rule sum_strict_mono_ex1) apply(auto simp add: m_o_anti_mono vars_acom_def anno_def top_on_acom_def top_on_opt_widen widen1 less_eq_acom_def listrel_iff_nth) apply(rule_tac x=p in bexI) apply (auto simp: vars_acom_def m_o_widen top_on_acom_def) done definition n_s :: "'av st \<Rightarrow> vname set \<Rightarrow> nat" ("n\<^sub>s") where "n\<^sub>s S X = (\<Sum>x\<in>X. n(fun S x))" lemma n_s_narrow_rep: assumes "finite X" "S1 = S2 on -X" "\<forall>x. S2 x \<le> S1 x" "\<forall>x. S1 x \<triangle> S2 x \<le> S1 x" "S1 x \<noteq> S1 x \<triangle> S2 x" shows "(\<Sum>x\<in>X. n (S1 x \<triangle> S2 x)) < (\<Sum>x\<in>X. n (S1 x))" proof- have 1: "\<forall>x. n(S1 x \<triangle> S2 x) \<le> n(S1 x)" by (metis assms(3) assms(4) eq_iff less_le_not_le n_narrow) have "x \<in> X" by (metis Compl_iff assms(2) assms(5) narrowid) hence 2: "\<exists>x\<in>X. n(S1 x \<triangle> S2 x) < n(S1 x)" by (metis assms(3-5) eq_iff less_le_not_le n_narrow) show ?thesis apply(rule sum_strict_mono_ex1[OF `finite X`]) using 1 2 by blast+ qed lemma n_s_narrow: "finite X \<Longrightarrow> fun S1 = fun S2 on -X \<Longrightarrow> S2 \<le> S1 \<Longrightarrow> S1 \<triangle> S2 < S1 \<Longrightarrow> n\<^sub>s (S1 \<triangle> S2) X < n\<^sub>s S1 X" apply(auto simp add: less_st_def n_s_def) apply (transfer fixing: n) apply(auto simp add: less_eq_st_rep_iff eq_st_def fun_eq_iff n_s_narrow_rep) done definition n_o :: "'av st option \<Rightarrow> vname set \<Rightarrow> nat" ("n\<^sub>o") where "n\<^sub>o opt X = (case opt of None \<Rightarrow> 0 | Some S \<Rightarrow> n\<^sub>s S X + 1)" lemma n_o_narrow: "top_on_opt S1 (-X) \<Longrightarrow> top_on_opt S2 (-X) \<Longrightarrow> finite X \<Longrightarrow> S2 \<le> S1 \<Longrightarrow> S1 \<triangle> S2 < S1 \<Longrightarrow> n\<^sub>o (S1 \<triangle> S2) X < n\<^sub>o S1 X" apply(induction S1 S2 rule: narrow_option.induct) apply(auto simp: n_o_def n_s_narrow) done definition n_c :: "'av st option acom \<Rightarrow> nat" ("n\<^sub>c") where "n\<^sub>c C = sum_list (map (\<lambda>a. n\<^sub>o a (vars C)) (annos C))" lemma less_annos_iff: "(C1 < C2) = (C1 \<le> C2 \<and> (\<exists>i<length (annos C1). annos C1 ! i < annos C2 ! i))" by(metis (hide_lams, no_types) less_le_not_le le_iff_le_annos size_annos_same2) lemma n_c_narrow: "strip C1 = strip C2 \<Longrightarrow> top_on_acom C1 (- vars C1) \<Longrightarrow> top_on_acom C2 (- vars C2) \<Longrightarrow> C2 \<le> C1 \<Longrightarrow> C1 \<triangle> C2 < C1 \<Longrightarrow> n\<^sub>c (C1 \<triangle> C2) < n\<^sub>c C1" apply(auto simp: n_c_def narrow_acom_def sum_list_sum_nth) apply(subgoal_tac "length(annos C2) = length(annos C1)") prefer 2 apply (simp add: size_annos_same2) apply (auto) apply(simp add: less_annos_iff le_iff_le_annos) apply(rule sum_strict_mono_ex1) apply (auto simp: vars_acom_def top_on_acom_def) apply (metis n_o_narrow nth_mem finite_cvars less_imp_le le_less order_refl) apply(rule_tac x=i in bexI) prefer 2 apply simp apply(rule n_o_narrow[where X = "vars(strip C2)"]) apply (simp_all) done end lemma iter_widen_termination: fixes m :: "'a::wn acom \<Rightarrow> nat" assumes P_f: "\<And>C. P C \<Longrightarrow> P(f C)" and P_widen: "\<And>C1 C2. P C1 \<Longrightarrow> P C2 \<Longrightarrow> P(C1 \<nabla> C2)" and m_widen: "\<And>C1 C2. P C1 \<Longrightarrow> P C2 \<Longrightarrow> ~ C2 \<le> C1 \<Longrightarrow> m(C1 \<nabla> C2) < m C1" and "P C" shows "EX C'. iter_widen f C = Some C'" proof(simp add: iter_widen_def, rule measure_while_option_Some[where P = P and f=m]) show "P C" by(rule `P C`) next fix C assume "P C" "\<not> f C \<le> C" thus "P (C \<nabla> f C) \<and> m (C \<nabla> f C) < m C" by(simp add: P_f P_widen m_widen) qed lemma iter_narrow_termination: fixes n :: "'a::wn acom \<Rightarrow> nat" assumes P_f: "\<And>C. P C \<Longrightarrow> P(f C)" and P_narrow: "\<And>C1 C2. P C1 \<Longrightarrow> P C2 \<Longrightarrow> P(C1 \<triangle> C2)" and mono: "\<And>C1 C2. P C1 \<Longrightarrow> P C2 \<Longrightarrow> C1 \<le> C2 \<Longrightarrow> f C1 \<le> f C2" and n_narrow: "\<And>C1 C2. P C1 \<Longrightarrow> P C2 \<Longrightarrow> C2 \<le> C1 \<Longrightarrow> C1 \<triangle> C2 < C1 \<Longrightarrow> n(C1 \<triangle> C2) < n C1" and init: "P C" "f C \<le> C" shows "EX C'. iter_narrow f C = Some C'" proof(simp add: iter_narrow_def, rule measure_while_option_Some[where f=n and P = "%C. P C \<and> f C \<le> C"]) show "P C \<and> f C \<le> C" using init by blast next fix C assume 1: "P C \<and> f C \<le> C" and 2: "C \<triangle> f C < C" hence "P (C \<triangle> f C)" by(simp add: P_f P_narrow) moreover then have "f (C \<triangle> f C) \<le> C \<triangle> f C" by (metis narrow1_acom narrow2_acom 1 mono order_trans) moreover have "n (C \<triangle> f C) < n C" using 1 2 by(simp add: n_narrow P_f) ultimately show "(P (C \<triangle> f C) \<and> f (C \<triangle> f C) \<le> C \<triangle> f C) \<and> n(C \<triangle> f C) < n C" by blast qed locale Abs_Int_wn_measure = Abs_Int_wn where \<gamma>=\<gamma> + Measure_wn where m=m for \<gamma> :: "'av::{wn,bounded_lattice} \<Rightarrow> val set" and m :: "'av \<Rightarrow> nat" subsubsection "Termination: Intervals" definition m_rep :: "eint2 \<Rightarrow> nat" where "m_rep p = (if is_empty_rep p then 3 else let (l,h) = p in (case l of Minf \<Rightarrow> 0 | _ \<Rightarrow> 1) + (case h of Pinf \<Rightarrow> 0 | _ \<Rightarrow> 1))" lift_definition m_ivl :: "ivl \<Rightarrow> nat" is m_rep by(auto simp: m_rep_def eq_ivl_iff) lemma m_ivl_nice: "m_ivl[l,h] = (if [l,h] = \<bottom> then 3 else (if l = Minf then 0 else 1) + (if h = Pinf then 0 else 1))" unfolding bot_ivl_def by transfer (auto simp: m_rep_def eq_ivl_empty split: extended.split) lemma m_ivl_height: "m_ivl iv \<le> 3" by transfer (simp add: m_rep_def split: prod.split extended.split) lemma m_ivl_anti_mono: "y \<le> x \<Longrightarrow> m_ivl x \<le> m_ivl y" by transfer (auto simp: m_rep_def is_empty_rep_def \<gamma>_rep_cases le_iff_subset split: prod.split extended.splits if_splits) lemma m_ivl_widen: "~ y \<le> x \<Longrightarrow> m_ivl(x \<nabla> y) < m_ivl x" by transfer (auto simp: m_rep_def widen_rep_def is_empty_rep_def \<gamma>_rep_cases le_iff_subset split: prod.split extended.splits if_splits) definition n_ivl :: "ivl \<Rightarrow> nat" where "n_ivl iv = 3 - m_ivl iv" lemma n_ivl_narrow: "x \<triangle> y < x \<Longrightarrow> n_ivl(x \<triangle> y) < n_ivl x" unfolding n_ivl_def apply(subst (asm) less_le_not_le) apply transfer by(auto simp add: m_rep_def narrow_rep_def is_empty_rep_def empty_rep_def \<gamma>_rep_cases le_iff_subset split: prod.splits if_splits extended.split) global_interpretation Abs_Int_wn_measure where \<gamma> = \<gamma>_ivl and num' = num_ivl and plus' = "op +" and test_num' = in_ivl and inv_plus' = inv_plus_ivl and inv_less' = inv_less_ivl and m = m_ivl and n = n_ivl and h = 3 proof (standard, goal_cases) case 2 thus ?case by(rule m_ivl_anti_mono) next case 1 thus ?case by(rule m_ivl_height) next case 3 thus ?case by(rule m_ivl_widen) next case 4 from 4(2) show ?case by(rule n_ivl_narrow) -- "note that the first assms is unnecessary for intervals" qed lemma iter_winden_step_ivl_termination: "\<exists>C. iter_widen (step_ivl \<top>) (bot c) = Some C" apply(rule iter_widen_termination[where m = "m_c" and P = "%C. strip C = c \<and> top_on_acom C (- vars C)"]) apply (auto simp add: m_c_widen top_on_bot top_on_step'[simplified comp_def vars_acom_def] vars_acom_def top_on_acom_widen) done lemma iter_narrow_step_ivl_termination: "top_on_acom C (- vars C) \<Longrightarrow> step_ivl \<top> C \<le> C \<Longrightarrow> \<exists>C'. iter_narrow (step_ivl \<top>) C = Some C'" apply(rule iter_narrow_termination[where n = "n_c" and P = "%C'. strip C = strip C' \<and> top_on_acom C' (-vars C')"]) apply(auto simp: top_on_step'[simplified comp_def vars_acom_def] mono_step'_top n_c_narrow vars_acom_def top_on_acom_narrow) done theorem AI_wn_ivl_termination: "\<exists>C. AI_wn_ivl c = Some C" apply(auto simp: AI_wn_def pfp_wn_def iter_winden_step_ivl_termination split: option.split) apply(rule iter_narrow_step_ivl_termination) apply(rule conjunct2) apply(rule iter_widen_inv[where f = "step' \<top>" and P = "%C. c = strip C & top_on_acom C (- vars C)"]) apply(auto simp: top_on_acom_widen top_on_step'[simplified comp_def vars_acom_def] iter_widen_pfp top_on_bot vars_acom_def) done (*unused_thms Abs_Int_init - *) subsubsection "Counterexamples" text{* Widening is increasing by assumption, but @{prop"x \<le> f x"} is not an invariant of widening. It can already be lost after the first step: *} lemma assumes "!!x y::'a::wn. x \<le> y \<Longrightarrow> f x \<le> f y" and "x \<le> f x" and "\<not> f x \<le> x" shows "x \<nabla> f x \<le> f(x \<nabla> f x)" nitpick[card = 3, expect = genuine, show_consts, timeout = 120] (* 1 < 2 < 3, f x = 2, x widen y = 3 -- guarantees termination with top=3 x = 1 Now f is mono, x <= f x, not f x <= x but x widen f x = 3, f 3 = 2, but not 3 <= 2 *) oops text{* Widening terminates but may converge more slowly than Kleene iteration. In the following model, Kleene iteration goes from 0 to the least pfp in one step but widening takes 2 steps to reach a strictly larger pfp: *} lemma assumes "!!x y::'a::wn. x \<le> y \<Longrightarrow> f x \<le> f y" and "x \<le> f x" and "\<not> f x \<le> x" and "f(f x) \<le> f x" shows "f(x \<nabla> f x) \<le> x \<nabla> f x" nitpick[card = 4, expect = genuine, show_consts, timeout = 120] (* 0 < 1 < 2 < 3 f: 1 1 3 3 0 widen 1 = 2 2 widen 3 = 3 and x widen y arbitrary, eg 3, which guarantees termination Kleene: f(f 0) = f 1 = 1 <= 1 = f 1 but because not f 0 <= 0, we obtain 0 widen f 0 = 0 wide 1 = 2, which is again not a pfp: not f 2 = 3 <= 2 Another widening step yields 2 widen f 2 = 2 widen 3 = 3 *) oops end
(* * Copyright 2014, NICTA * * This software may be distributed and modified according to the terms of * the BSD 2-Clause license. Note that NO WARRANTY is provided. * See "LICENSE_BSD2.txt" for details. * * @TAG(NICTA_BSD) *) (* License: BSD, terms see file ./LICENSE *) theory SepInv imports SepCode begin (* FIXME: temporary hack for compatability - should generalise earlier proofs to avoid all the duplication in here *) definition inv_footprint :: "'a::c_type ptr \<Rightarrow> heap_assert" where "inv_footprint p \<equiv> \<lambda>s. dom s = {(x,y). x \<in> {ptr_val p..+size_of TYPE('a)}} - s_footprint p" text {* Like in Separation.thy, these arrows are defined using bsub and esub but have an \emph{input} syntax abbreviation with just sub. See original comment there for justification. *} definition sep_map_inv :: "'a::c_type ptr \<Rightarrow> 'a ptr_guard \<Rightarrow> 'a \<Rightarrow> heap_assert" ("_ \<mapsto>\<^sup>i\<^bsub>_\<^esub> _" [56,0,51] 56) where "p \<mapsto>\<^sup>i\<^bsub>g\<^esub> v \<equiv> p \<mapsto>\<^sub>g v \<and>\<^sup>* inv_footprint p" notation (input) sep_map_inv ("_ \<mapsto>\<^sup>i\<^sub>_ _" [56,1000,51] 56) definition sep_map_any_inv :: "'a ::c_type ptr \<Rightarrow> 'a ptr_guard \<Rightarrow> heap_assert" ("_ \<mapsto>\<^sup>i\<^bsub>_\<^esub> -" [56,0] 56) where "p \<mapsto>\<^sup>i\<^bsub>g\<^esub> - \<equiv> p \<mapsto>\<^sub>g - \<and>\<^sup>* inv_footprint p" notation (input) sep_map_any_inv ("_ \<mapsto>\<^sup>i\<^sub>_ -" [56,0] 56) definition sep_map'_inv :: "'a::c_type ptr \<Rightarrow> 'a ptr_guard \<Rightarrow> 'a \<Rightarrow> heap_assert" ("_ \<hookrightarrow>\<^sup>i\<^bsub>_\<^esub> _" [56,0,51] 56) where "p \<hookrightarrow>\<^sup>i\<^bsub>g\<^esub> v \<equiv> p \<hookrightarrow>\<^sub>g v \<and>\<^sup>* inv_footprint p" notation (input) sep_map'_inv ("_ \<hookrightarrow>\<^sup>i\<^sub>_ _" [56,1000,51] 56) definition sep_map'_any_inv :: "'a::c_type ptr \<Rightarrow> 'a ptr_guard \<Rightarrow> heap_assert" ("_ \<hookrightarrow>\<^sup>i\<^bsub>_\<^esub> -" [56,0] 56) where "p \<hookrightarrow>\<^sup>i\<^bsub>g\<^esub> - \<equiv> p \<hookrightarrow>\<^sub>g - \<and>\<^sup>* inv_footprint p" notation (input) sep_map'_any_inv ("_ \<hookrightarrow>\<^sup>i\<^sub>_ -" [56,0] 56) definition tagd_inv :: "'a ptr_guard \<Rightarrow> 'a::c_type ptr \<Rightarrow> heap_assert" (infix "\<turnstile>\<^sub>s\<^sup>i" 100) where "g \<turnstile>\<^sub>s\<^sup>i p \<equiv> g \<turnstile>\<^sub>s p \<and>\<^sup>* inv_footprint p" text {* ---- *} lemma sep_map'_g: "(p \<hookrightarrow>\<^sup>i\<^sub>g v) s \<Longrightarrow> g p" apply(unfold sep_map'_inv_def) apply(drule sep_conjD) apply clarsimp apply(erule sep_map'_g_exc) done lemma sep_map'_unfold: "(p \<hookrightarrow>\<^sup>i\<^sub>g v) = ((p \<hookrightarrow>\<^sup>i\<^sub>g v) \<and>\<^sup>* sep_true)" by (simp add: sep_map'_inv_def sep_map'_def sep_conj_ac) lemma sep_map'_any_unfold: "(i \<hookrightarrow>\<^sup>i\<^sub>g -) = ((i \<hookrightarrow>\<^sup>i\<^sub>g -) \<and>\<^sup>* sep_true)" apply(rule ext, simp add: sep_map'_any_inv_def sep_map'_any_def sep_conj_ac) apply rule apply(subst sep_conj_com) apply(subst sep_conj_assoc)+ apply(erule (1) sep_conj_impl) apply(clarsimp simp: sep_conj_ac) apply(subst (asm) sep_map'_unfold_exc, subst sep_conj_com) apply(subst sep_conj_exists, fast) apply(subst (asm) sep_conj_com) apply(subst (asm) sep_conj_assoc)+ apply(erule (1) sep_conj_impl) apply(subst sep_map'_unfold_exc) apply(subst (asm) sep_conj_exists, fast) done lemma sep_map'_conjE1: "\<lbrakk> (P \<and>\<^sup>* Q) s; \<And>s. P s \<Longrightarrow> (i \<hookrightarrow>\<^sup>i\<^sub>g v) s \<rbrakk> \<Longrightarrow> (i \<hookrightarrow>\<^sup>i\<^sub>g v) s" by (subst sep_map'_unfold, erule sep_conj_impl, simp+) lemma sep_map'_conjE2: "\<lbrakk> (P \<and>\<^sup>* Q) s; \<And>s. Q s \<Longrightarrow> (i \<hookrightarrow>\<^sup>i\<^sub>g v) s \<rbrakk> \<Longrightarrow> (i \<hookrightarrow>\<^sup>i\<^sub>g v) s" by (subst (asm) sep_conj_com, erule sep_map'_conjE1, simp) lemma sep_map'_any_conjE1: "\<lbrakk> (P \<and>\<^sup>* Q) s; \<And>s. P s \<Longrightarrow> (i \<hookrightarrow>\<^sup>i\<^sub>g -) s \<rbrakk> \<Longrightarrow> (i \<hookrightarrow>\<^sup>i\<^sub>g -) s" by (subst sep_map'_any_unfold, erule sep_conj_impl, simp+) lemma sep_map'_any_conjE2: "\<lbrakk> (P \<and>\<^sup>* Q) s; \<And>s. Q s \<Longrightarrow> (i \<hookrightarrow>\<^sup>i\<^sub>g -) s \<rbrakk> \<Longrightarrow> (i \<hookrightarrow>\<^sup>i\<^sub>g -) s" by (subst (asm) sep_conj_com, erule sep_map'_any_conjE1, simp) lemma sep_map_any_old: "(p \<mapsto>\<^sup>i\<^sub>g -) = (\<lambda>s. \<exists>v. (p \<mapsto>\<^sup>i\<^sub>g v) s)" apply(rule ext) apply(simp add: sep_map_inv_def sep_map_any_inv_def sep_map_any_def sep_conj_ac) apply(subst sep_conj_com) apply(subst sep_conj_exists) apply(simp add: sep_conj_com) done lemma sep_map'_old: "(p \<hookrightarrow>\<^sup>i\<^sub>g v) = ((p \<mapsto>\<^sup>i\<^sub>g v) \<and>\<^sup>* sep_true)" apply(rule ext) apply(simp add: sep_map'_inv_def sep_map_inv_def sep_map'_def sep_conj_ac) done lemma sep_map'_any_old: "(p \<hookrightarrow>\<^sup>i\<^sub>g -) = (\<lambda>s. \<exists>v. (p \<hookrightarrow>\<^sup>i\<^sub>g v) s)" apply(rule ext) apply(simp add: sep_map'_inv_def sep_map'_any_inv_def sep_map'_any_def sep_conj_exists) done lemma sep_map_sep_map' [simp]: "(p \<mapsto>\<^sup>i\<^sub>g v) s \<Longrightarrow> (p \<hookrightarrow>\<^sup>i\<^sub>g v) s" apply(unfold sep_map_inv_def sep_map'_inv_def sep_map'_def) apply(simp add: sep_conj_ac) apply(subst sep_conj_com) apply(subst sep_conj_assoc)+ apply(erule (1) sep_conj_impl) apply(erule sep_conj_sep_true) done lemmas guardI = sep_map'_g[OF sep_map_sep_map'] lemma sep_map_anyI [simp]: "(p \<mapsto>\<^sup>i\<^sub>g v) s \<Longrightarrow> (p \<mapsto>\<^sup>i\<^sub>g -) s" apply(simp add: sep_map_any_inv_def sep_map_inv_def sep_map_any_def sep_conj_ac) apply(erule (1) sep_conj_impl) apply fast done lemma sep_map_anyD: "(p \<mapsto>\<^sup>i\<^sub>g -) s \<Longrightarrow> \<exists>v. (p \<mapsto>\<^sup>i\<^sub>g v) s" apply(simp add: sep_map_any_def sep_map_any_inv_def sep_map_inv_def sep_conj_ac) apply(subst (asm) sep_conj_com) apply(subst (asm) sep_conj_exists) apply(clarsimp simp: sep_conj_ac) done lemma sep_conj_mapD: "((i \<mapsto>\<^sup>i\<^sub>g v) \<and>\<^sup>* P) s \<Longrightarrow> (i \<hookrightarrow>\<^sup>i\<^sub>g v) s \<and> ((i \<mapsto>\<^sup>i\<^sub>g -) \<and>\<^sup>* P) s" apply rule apply(rule sep_map'_conjE2) apply (simp add:sep_conj_ac)+ apply(erule sep_conj_impl) apply simp+ done lemma sep_map'_ptr_safe: "(p \<hookrightarrow>\<^sup>i\<^sub>g (v::'a::mem_type)) (lift_state (h,d)) \<Longrightarrow> ptr_safe p d" apply(unfold sep_map'_inv_def) apply(rule sep_map'_ptr_safe_exc) apply(subst sep_map'_unfold_exc) apply(erule (1) sep_conj_impl) apply simp done lemmas sep_map_ptr_safe = sep_map'_ptr_safe[OF sep_map_sep_map'] lemma sep_map_any_ptr_safe: fixes p::"'a::mem_type ptr" shows "(p \<mapsto>\<^sup>i\<^sub>g -) (lift_state (h, d)) \<Longrightarrow> ptr_safe p d" apply(drule sep_map_anyD) apply(blast intro:sep_map_ptr_safe) done lemma sep_heap_update': "(g \<turnstile>\<^sub>s\<^sup>i p \<and>\<^sup>* (p \<mapsto>\<^sup>i\<^sub>g v \<longrightarrow>\<^sup>* P)) (lift_state (h,d)) \<Longrightarrow> P (lift_state (heap_update p (v::'a::mem_type) h,d))" apply(rule_tac g=g in sep_heap_update'_exc) apply(unfold tagd_inv_def) apply(subst (asm) sep_conj_assoc)+ apply(erule (1) sep_conj_impl) apply(subst (asm) sep_map_inv_def) apply(simp add: sep_conj_ac) apply(drule sep_conjD, clarsimp) apply(rule sep_implI, clarsimp) apply(drule sep_implD) apply(drule_tac x="s\<^sub>0 ++ s'" in spec) apply(simp add: map_disj_com map_add_disj) apply(clarsimp simp: map_disj_com) apply(erule notE) apply(erule (1) sep_conjI) apply(simp add: map_disj_com) apply(subst map_add_com) apply simp+ done lemma tagd_g: "(g \<turnstile>\<^sub>s\<^sup>i p \<and>\<^sup>* P) s \<Longrightarrow> g p" apply(unfold tagd_inv_def) apply(auto simp: tagd_def dest!: sep_conjD) apply(erule s_valid_g) done lemma tagd_ptr_safe: "(g \<turnstile>\<^sub>s\<^sup>i p \<and>\<^sup>* sep_true) (lift_state (h,d)) \<Longrightarrow> ptr_safe p d" apply(rule tagd_ptr_safe_exc) apply(unfold tagd_inv_def) apply(subst (asm) sep_conj_assoc) apply(erule (1) sep_conj_impl) apply simp done lemma sep_map_tagd: "(p \<mapsto>\<^sup>i\<^sub>g (v::'a::mem_type)) s \<Longrightarrow> (g \<turnstile>\<^sub>s\<^sup>i p) s" apply(unfold sep_map_inv_def) apply(unfold tagd_inv_def) apply(erule sep_conj_impl) apply(erule sep_map_tagd_exc) apply assumption done lemma sep_map_any_tagd: "(p \<mapsto>\<^sup>i\<^sub>g -) s \<Longrightarrow> (g \<turnstile>\<^sub>s\<^sup>i (p::'a::mem_type ptr)) s" by (clarsimp dest!: sep_map_anyD, erule sep_map_tagd) lemma sep_heap_update: "\<lbrakk> (p \<mapsto>\<^sup>i\<^sub>g - \<and>\<^sup>* (p \<mapsto>\<^sup>i\<^sub>g v \<longrightarrow>\<^sup>* P)) (lift_state (h,d)) \<rbrakk> \<Longrightarrow> P (lift_state (heap_update p (v::'a::mem_type) h,d))" by (force intro: sep_heap_update' dest: sep_map_anyD sep_map_tagd elim: sep_conj_impl) lemma sep_heap_update_global': "(g \<turnstile>\<^sub>s\<^sup>i p \<and>\<^sup>* R) (lift_state (h,d)) \<Longrightarrow> ((p \<mapsto>\<^sup>i\<^sub>g v) \<and>\<^sup>* R) (lift_state (heap_update p (v::'a::mem_type) h,d))" by (rule sep_heap_update', erule sep_conj_sep_conj_sep_impl_sep_conj) lemma sep_heap_update_global: "(p \<mapsto>\<^sup>i\<^sub>g - \<and>\<^sup>* R) (lift_state (h,d)) \<Longrightarrow> ((p \<mapsto>\<^sup>i\<^sub>g v) \<and>\<^sup>* R) (lift_state (heap_update p (v::'a::mem_type) h,d))" by (fast intro: sep_heap_update_global' sep_conj_impl sep_map_any_tagd) lemma sep_heap_update_global_super_fl_inv: "\<lbrakk> (p \<mapsto>\<^sup>i\<^sub>g u \<and>\<^sup>* R) (lift_state (h,d)); field_lookup (typ_info_t TYPE('b::mem_type)) f 0 = Some (t,n); export_uinfo t = (typ_uinfo_t TYPE('a)) \<rbrakk> \<Longrightarrow> ((p \<mapsto>\<^sup>i\<^sub>g update_ti_t t (to_bytes_p v) u) \<and>\<^sup>* R) (lift_state (heap_update (Ptr &(p\<rightarrow>f)) (v::'a::mem_type) h,d))" apply(unfold sep_map_inv_def) apply(simp only: sep_conj_assoc) apply(erule (2) sep_heap_update_global_super_fl) done lemma sep_map'_inv: "(p \<hookrightarrow>\<^sup>i\<^sub>g v) s \<Longrightarrow> (p \<hookrightarrow>\<^sub>g v) s" apply(unfold sep_map'_inv_def) apply(subst sep_map'_unfold_exc) apply(erule (1) sep_conj_impl, simp) done lemma sep_map'_lift: "(p \<hookrightarrow>\<^sup>i\<^sub>g (v::'a::mem_type)) (lift_state (h,d)) \<Longrightarrow> lift h p = v" apply(drule sep_map'_inv) apply(erule sep_map'_lift_exc) done lemma sep_map_lift: "((p::'a::mem_type ptr) \<mapsto>\<^sup>i\<^sub>g -) (lift_state (h,d)) \<Longrightarrow> (p \<mapsto>\<^sup>i\<^sub>g lift h p) (lift_state (h,d))" apply(frule sep_map_anyD) apply clarsimp apply(frule sep_map_sep_map') apply(drule sep_map'_lift) apply simp done lemma sep_map_lift_wp: "\<lbrakk> \<exists>v. (p \<mapsto>\<^sup>i\<^sub>g v \<and>\<^sup>* (p \<mapsto>\<^sup>i\<^sub>g v \<longrightarrow>\<^sup>* P v)) (lift_state (h,d)) \<rbrakk> \<Longrightarrow> P (lift h (p::'a::mem_type ptr)) (lift_state (h,d))" apply clarsimp thm sep_map'_lift apply(subst sep_map'_lift [where g=g and d=d]) apply(subst sep_map'_inv_def) apply(subst sep_map'_def) apply(subst sep_conj_assoc)+ apply(subst sep_conj_com[where P=sep_true]) apply(subst sep_conj_assoc [symmetric]) apply(erule sep_conj_impl) apply(unfold sep_map_inv_def) apply assumption apply simp apply(rule_tac P="p \<mapsto>\<^sup>i\<^sub>g v" and Q="P v" in sep_conj_impl_same) apply(unfold sep_map_inv_def) apply(erule (2) sep_conj_impl) done lemma sep_map'_anyI [simp]: "(p \<hookrightarrow>\<^sup>i\<^sub>g v) s \<Longrightarrow> (p \<hookrightarrow>\<^sup>i\<^sub>g -) s" apply(unfold sep_map'_inv_def sep_map'_any_inv_def) apply(erule sep_conj_impl) apply(erule sep_map'_anyI_exc) apply assumption done lemma sep_map'_anyD: "(p \<hookrightarrow>\<^sup>i\<^sub>g -) s \<Longrightarrow> \<exists>v. (p \<hookrightarrow>\<^sup>i\<^sub>g v) s" apply(unfold sep_map'_inv_def sep_map'_any_inv_def sep_map'_any_def) apply(subst (asm) sep_conj_exists) apply clarsimp done lemma sep_map'_lift_rev: "\<lbrakk> lift h p = (v::'a::mem_type); (p \<hookrightarrow>\<^sup>i\<^sub>g -) (lift_state (h,d)) \<rbrakk> \<Longrightarrow> (p \<hookrightarrow>\<^sup>i\<^sub>g v) (lift_state (h,d))" apply(drule sep_map'_anyD) apply clarsimp apply(frule sep_map'_lift) apply simp done lemma sep_map'_any_g: "(p \<hookrightarrow>\<^sup>i\<^sub>g -) s \<Longrightarrow> g p" apply(drule sep_map'_anyD) apply(blast intro:sep_map'_g) done lemma any_guardI: "(p \<mapsto>\<^sup>i\<^sub>g -) s \<Longrightarrow> g p" apply(drule sep_map_anyD) apply(blast intro:guardI) done lemma sep_map_sep_map_any: "(p \<mapsto>\<^sup>i\<^sub>g v) s \<Longrightarrow> (p \<mapsto>\<^sup>i\<^sub>g -) s" apply(simp) done (* FIXME: can be made more flexible when generalised separation conjunction is added lsf: should be fine with sep_select_tac *) lemma sep_lift_exists: fixes p :: "'a::mem_type ptr" assumes ex: "((\<lambda>s. \<exists>v. (p \<hookrightarrow>\<^sup>i\<^sub>g v) s \<and> P v s) \<and>\<^sup>* Q) (lift_state (h,d))" shows "(P (lift h p) \<and>\<^sup>* Q) (lift_state (h,d))" proof - from ex obtain v where "((\<lambda>s. (p \<hookrightarrow>\<^sup>i\<^sub>g v) s \<and> P v s) \<and>\<^sup>* Q) (lift_state (h,d))" by (subst (asm) sep_conj_exists, clarsimp) thus ?thesis by (force simp: sep_map'_lift sep_conj_ac dest: sep_map'_conjE2 dest!: sep_conj_conj) qed lemma sep_map_dom: "(p \<mapsto>\<^sup>i\<^sub>g (v::'a::c_type)) s \<Longrightarrow> dom s = {(a,b). a \<in> {ptr_val p..+size_of TYPE('a)}}" apply(unfold sep_map_inv_def) apply(drule sep_conjD, clarsimp) apply(drule sep_map_dom_exc) apply(clarsimp simp: inv_footprint_def) apply auto apply(erule s_footprintD) done lemma sep_map'_dom: "(p \<hookrightarrow>\<^sup>i\<^sub>g (v::'a::mem_type)) s \<Longrightarrow> (ptr_val p,SIndexVal) \<in> dom s" apply(unfold sep_map'_inv_def) apply(drule sep_conjD, clarsimp) apply(drule sep_map'_dom_exc, clarsimp) done lemma sep_map'_inj: "\<lbrakk> (p \<hookrightarrow>\<^sup>i\<^sub>g (v::'a::c_type)) s; (p \<hookrightarrow>\<^sup>i\<^sub>h v') s \<rbrakk> \<Longrightarrow> v=v'" apply(drule sep_map'_inv)+ apply(drule (2) sep_map'_inj_exc) done lemma ptr_retyp_tagd: "\<lbrakk> g (p::'a::mem_type ptr); {(a, b). a \<in> {ptr_val p..+size_of TYPE('a)}} \<subseteq> dom_s d \<rbrakk> \<Longrightarrow> (g \<turnstile>\<^sub>s\<^sup>i p) (lift_state (h, ptr_retyp p d))" apply(simp add: tagd_inv_def tagd_def ptr_retyp_s_valid lift_state_dom) oops lemma ptr_retyp_sep_cut': fixes p::"'a::mem_type ptr" assumes sc: "(sep_cut' (ptr_val p) (size_of TYPE('a)) \<and>\<^sup>* P) (lift_state (h,d))" and "g p" shows "(g \<turnstile>\<^sub>s\<^sup>i p \<and>\<^sup>* P) (lift_state (h,(ptr_retyp p d)))" proof - from sc obtain s\<^sub>0 and s\<^sub>1 where "s\<^sub>0 \<bottom> s\<^sub>1" and "lift_state (h,d) = s\<^sub>1 ++ s\<^sub>0" and "P s\<^sub>1" and d: "dom s\<^sub>0 = {(a,b). a \<in> {ptr_val p..+size_of TYPE('a)}}" and k: "dom s\<^sub>0 \<subseteq> dom_s d" apply - apply(drule sep_conjD) apply clarsimp apply(drule sep_cut'_dom) apply(subgoal_tac "dom s\<^sub>0 \<subseteq> dom_s d") apply fast apply(subst dom_lift_state_dom_s [where h=h,symmetric]) apply auto done moreover hence "lift_state (h, ptr_retyp p d) = s\<^sub>1 ++ lift_state (h, ptr_retyp p d) |` (dom s\<^sub>0)" apply - apply(rule ext, case_tac "x \<in> dom s\<^sub>0") apply(case_tac "x \<in> dom s\<^sub>1") apply(clarsimp simp: map_disj_def) apply fast apply(subst map_add_com) apply(clarsimp simp: map_disj_def) apply fast apply(clarsimp simp: map_add_def split: option.splits) apply(case_tac x, clarsimp) apply(clarsimp simp: lift_state_ptr_retyp_d merge_dom2) done moreover have "g p" by fact with d k have "(g \<turnstile>\<^sub>s\<^sup>i p) (lift_state (h, ptr_retyp p d) |` dom s\<^sub>0)" apply - apply(auto simp: lift_state_ptr_retyp_restrict sep_conj_ac) apply(unfold tagd_inv_def) apply(simp add: sep_conj_ac) apply(rule_tac s\<^sub>0="lift_state (h,d) |` ({(a, b). a \<in> {ptr_val p..+size_of TYPE('a)}} - s_footprint p)" in sep_conjI) apply(clarsimp simp: inv_footprint_def) apply fast apply(erule_tac h=h in ptr_retyp_tagd_exc) apply(clarsimp simp: map_disj_def) apply fast apply(subst map_add_comm[of "lift_state (h, ptr_retyp p empty_htd)"]) apply(simp, fast) apply(rule ext) apply(clarsimp simp: map_add_def split: option.splits) apply(subgoal_tac "(a,b) \<notin> s_footprint p") apply(clarsimp simp: restrict_map_def) apply(subgoal_tac "s_footprint p = dom (lift_state (h, ptr_retyp p empty_htd) )") apply(simp only:) apply fast apply simp done ultimately show ?thesis apply - apply(rule_tac s\<^sub>0="(lift_state (h,ptr_retyp p d))|`dom s\<^sub>0" and s\<^sub>1=s\<^sub>1 in sep_conjI, auto simp: map_disj_def) done qed lemma ptr_retyp_sep_cut'_wp: "\<lbrakk> (sep_cut' (ptr_val p) (size_of TYPE('a)) \<and>\<^sup>* (g \<turnstile>\<^sub>s\<^sup>i p \<longrightarrow>\<^sup>* P)) (lift_state (h,d)); g (p::'a::mem_type ptr) \<rbrakk> \<Longrightarrow> P (lift_state (h,(ptr_retyp p d)))" apply(rule_tac P="g \<turnstile>\<^sub>s\<^sup>i p" and Q=P in sep_conj_impl_same) apply(rule ptr_retyp_sep_cut') apply simp+ done lemma tagd_dom: "(g \<turnstile>\<^sub>s\<^sup>i p) s \<Longrightarrow> dom s = {(a,b). a \<in> {ptr_val (p::'a::c_type ptr)..+size_of TYPE('a)}}" apply (clarsimp simp: tagd_inv_def) apply(drule sep_conjD, clarsimp) apply(clarsimp simp: inv_footprint_def) apply(drule tagd_dom_exc) apply auto apply(erule s_footprintD) done lemma tagd_dom_p: "(g \<turnstile>\<^sub>s\<^sup>i p) s \<Longrightarrow> (ptr_val (p::'a::mem_type ptr),SIndexVal) \<in> dom s" apply(drule tagd_dom) apply(clarsimp) done end
/************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ #ifndef INCLUDED_QNAME_TO_STRING_HXX #define INCLUDED_QNAME_TO_STRING_HXX #include <boost/shared_ptr.hpp> #include <map> #include <string> #include <iostream> #include <resourcemodel/WW8ResourceModel.hxx> #include <com/sun/star/beans/XPropertySet.hpp> namespace writerfilter { using namespace ::std; class WRITERFILTER_DLLPUBLIC QNameToString { typedef boost::shared_ptr<QNameToString> Pointer_t; typedef map < Id, string > Map; static Pointer_t pInstance; void init_doctok(); void init_ooxml(); Map mMap; protected: /** Generated. */ QNameToString(); public: static Pointer_t Instance(); string operator()(Id qName); }; class WRITERFILTER_DLLPUBLIC SprmIdToString { typedef boost::shared_ptr<SprmIdToString> Pointer_t; static Pointer_t pInstance; map<sal_uInt32, string> mMap; protected: /** Generated automatically. */ SprmIdToString(); public: static Pointer_t Instance(); string operator()(sal_uInt32 nId); }; } #endif // INCLUDED_QNAME_TO_STRING_HXX
theory T111 imports Main begin lemma "( (\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) & (\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) & (\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) & (\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, meet(y, z)) = meet(mult(x, y), mult(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(x, join(y, z)) = join(undr(x, y), undr(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(x, meet(y, z)) = join(over(x, y), over(x, z))) & (\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) & (\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) & (\<forall> x::nat. invo(invo(x)) = x) ) \<longrightarrow> (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(join(x, y), z) = join(over(x, z), over(y, z))) " nitpick[card nat=4,timeout=86400] oops end
[GOAL] α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : LE α ⊢ OrderTop α ⊕' NoTopOrder α [PROOFSTEP] by_cases H : ∀ a : α, ∃ b, ¬b ≤ a [GOAL] case pos α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : LE α H : ∀ (a : α), ∃ b, ¬b ≤ a ⊢ OrderTop α ⊕' NoTopOrder α [PROOFSTEP] exact PSum.inr ⟨H⟩ [GOAL] case neg α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : LE α H : ¬∀ (a : α), ∃ b, ¬b ≤ a ⊢ OrderTop α ⊕' NoTopOrder α [PROOFSTEP] push_neg at H [GOAL] case neg α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : LE α H : ∃ a, ∀ (b : α), b ≤ a ⊢ OrderTop α ⊕' NoTopOrder α [PROOFSTEP] letI : Top α := ⟨Classical.choose H⟩ [GOAL] case neg α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : LE α H : ∃ a, ∀ (b : α), b ≤ a this : Top α := { top := Classical.choose H } ⊢ OrderTop α ⊕' NoTopOrder α [PROOFSTEP] exact PSum.inl ⟨Classical.choose_spec H⟩ [GOAL] α : Type u β : Type v γ : Type u_1 δ : Type u_2 inst✝² : LinearOrder α inst✝¹ : Preorder β inst✝ : OrderTop β f : α → β H : StrictMono f a : α h_top : f a = ⊤ x : α p : β ⊢ p ≤ f a [PROOFSTEP] rw [h_top] [GOAL] α : Type u β : Type v γ : Type u_1 δ : Type u_2 inst✝² : LinearOrder α inst✝¹ : Preorder β inst✝ : OrderTop β f : α → β H : StrictMono f a : α h_top : f a = ⊤ x : α p : β ⊢ p ≤ ⊤ [PROOFSTEP] exact le_top [GOAL] α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 hA : PartialOrder α A : OrderTop α hB : PartialOrder α B : OrderTop α H : ∀ (x y : α), x ≤ y ↔ x ≤ y ⊢ ⊤ = ⊤ [PROOFSTEP] cases PartialOrder.ext H [GOAL] case refl α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 hA : PartialOrder α A B : OrderTop α H : ∀ (x y : α), x ≤ y ↔ x ≤ y ⊢ ⊤ = ⊤ [PROOFSTEP] apply top_unique [GOAL] case refl.h α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 hA : PartialOrder α A B : OrderTop α H : ∀ (x y : α), x ≤ y ↔ x ≤ y ⊢ ⊤ ≤ ⊤ [PROOFSTEP] exact @le_top _ _ A _ [GOAL] α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : PartialOrder α A B : OrderTop α ⊢ A = B [PROOFSTEP] rcases A with ⟨ha⟩ [GOAL] case mk α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : PartialOrder α B : OrderTop α toTop✝ : Top α ha : ∀ (a : α), a ≤ ⊤ ⊢ mk ha = B [PROOFSTEP] rcases B with ⟨hb⟩ [GOAL] case mk.mk α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : PartialOrder α toTop✝¹ : Top α ha : ∀ (a : α), a ≤ ⊤ toTop✝ : Top α hb : ∀ (a : α), a ≤ ⊤ ⊢ mk ha = mk hb [PROOFSTEP] congr [GOAL] case mk.mk.e_toTop α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : PartialOrder α toTop✝¹ : Top α ha : ∀ (a : α), a ≤ ⊤ toTop✝ : Top α hb : ∀ (a : α), a ≤ ⊤ ⊢ toTop✝¹ = toTop✝ [PROOFSTEP] ext [GOAL] case mk.mk.e_toTop.top α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : PartialOrder α toTop✝¹ : Top α ha : ∀ (a : α), a ≤ ⊤ toTop✝ : Top α hb : ∀ (a : α), a ≤ ⊤ ⊢ ⊤ = ⊤ [PROOFSTEP] exact le_antisymm (hb _) (ha _) [GOAL] α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : LE α ⊢ OrderBot α ⊕' NoBotOrder α [PROOFSTEP] by_cases H : ∀ a : α, ∃ b, ¬a ≤ b [GOAL] case pos α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : LE α H : ∀ (a : α), ∃ b, ¬a ≤ b ⊢ OrderBot α ⊕' NoBotOrder α [PROOFSTEP] exact PSum.inr ⟨H⟩ [GOAL] case neg α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : LE α H : ¬∀ (a : α), ∃ b, ¬a ≤ b ⊢ OrderBot α ⊕' NoBotOrder α [PROOFSTEP] push_neg at H [GOAL] case neg α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : LE α H : ∃ a, ∀ (b : α), a ≤ b ⊢ OrderBot α ⊕' NoBotOrder α [PROOFSTEP] letI : Bot α := ⟨Classical.choose H⟩ [GOAL] case neg α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : LE α H : ∃ a, ∀ (b : α), a ≤ b this : Bot α := { bot := Classical.choose H } ⊢ OrderBot α ⊕' NoBotOrder α [PROOFSTEP] exact PSum.inl ⟨Classical.choose_spec H⟩ [GOAL] α : Type u β : Type v γ : Type u_1 δ : Type u_2 inst✝² : LinearOrder α inst✝¹ : PartialOrder β inst✝ : OrderBot β f : α → β H : StrictMono f a : α h_bot : f a = ⊥ x : α p : β ⊢ f a ≤ p [PROOFSTEP] rw [h_bot] [GOAL] α : Type u β : Type v γ : Type u_1 δ : Type u_2 inst✝² : LinearOrder α inst✝¹ : PartialOrder β inst✝ : OrderBot β f : α → β H : StrictMono f a : α h_bot : f a = ⊥ x : α p : β ⊢ ⊥ ≤ p [PROOFSTEP] exact bot_le [GOAL] α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 hA : PartialOrder α A : OrderBot α hB : PartialOrder α B : OrderBot α H : ∀ (x y : α), x ≤ y ↔ x ≤ y ⊢ ⊥ = ⊥ [PROOFSTEP] cases PartialOrder.ext H [GOAL] case refl α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 hA : PartialOrder α A B : OrderBot α H : ∀ (x y : α), x ≤ y ↔ x ≤ y ⊢ ⊥ = ⊥ [PROOFSTEP] apply bot_unique [GOAL] case refl.h α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 hA : PartialOrder α A B : OrderBot α H : ∀ (x y : α), x ≤ y ↔ x ≤ y ⊢ ⊥ ≤ ⊥ [PROOFSTEP] exact @bot_le _ _ A _ [GOAL] α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : PartialOrder α A B : OrderBot α ⊢ A = B [PROOFSTEP] rcases A with ⟨ha⟩ [GOAL] case mk α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : PartialOrder α B : OrderBot α toBot✝ : Bot α ha : ∀ (a : α), ⊥ ≤ a ⊢ mk ha = B [PROOFSTEP] rcases B with ⟨hb⟩ [GOAL] case mk.mk α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : PartialOrder α toBot✝¹ : Bot α ha : ∀ (a : α), ⊥ ≤ a toBot✝ : Bot α hb : ∀ (a : α), ⊥ ≤ a ⊢ mk ha = mk hb [PROOFSTEP] congr [GOAL] case mk.mk.e_toBot α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : PartialOrder α toBot✝¹ : Bot α ha : ∀ (a : α), ⊥ ≤ a toBot✝ : Bot α hb : ∀ (a : α), ⊥ ≤ a ⊢ toBot✝¹ = toBot✝ [PROOFSTEP] ext [GOAL] case mk.mk.e_toBot.bot α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : PartialOrder α toBot✝¹ : Bot α ha : ∀ (a : α), ⊥ ≤ a toBot✝ : Bot α hb : ∀ (a : α), ⊥ ≤ a ⊢ ⊥ = ⊥ [PROOFSTEP] exact le_antisymm (ha _) (hb _) [GOAL] α : Type u β : Type v γ : Type u_1 δ : Type u_2 inst✝¹ : SemilatticeSup α inst✝ : OrderBot α a b : α ⊢ a ⊔ b = ⊥ ↔ a = ⊥ ∧ b = ⊥ [PROOFSTEP] rw [eq_bot_iff, sup_le_iff] [GOAL] α : Type u β : Type v γ : Type u_1 δ : Type u_2 inst✝¹ : SemilatticeSup α inst✝ : OrderBot α a b : α ⊢ a ≤ ⊥ ∧ b ≤ ⊥ ↔ a = ⊥ ∧ b = ⊥ [PROOFSTEP] simp [GOAL] α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : PartialOrder α A B : BoundedOrder α ⊢ A = B [PROOFSTEP] have ht : @BoundedOrder.toOrderTop α _ A = @BoundedOrder.toOrderTop α _ B := OrderTop.ext [GOAL] α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : PartialOrder α A B : BoundedOrder α ht : toOrderTop = toOrderTop ⊢ A = B [PROOFSTEP] have hb : @BoundedOrder.toOrderBot α _ A = @BoundedOrder.toOrderBot α _ B := OrderBot.ext [GOAL] α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : PartialOrder α A B : BoundedOrder α ht : toOrderTop = toOrderTop hb : toOrderBot = toOrderBot ⊢ A = B [PROOFSTEP] cases A [GOAL] case mk α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : PartialOrder α B : BoundedOrder α toOrderTop✝ : OrderTop α toOrderBot✝ : OrderBot α ht : toOrderTop = toOrderTop hb : toOrderBot = toOrderBot ⊢ mk = B [PROOFSTEP] cases B [GOAL] case mk.mk α✝ : Type u β : Type v γ : Type u_1 δ : Type u_2 α : Type u_3 inst✝ : PartialOrder α toOrderTop✝¹ : OrderTop α toOrderBot✝¹ : OrderBot α toOrderTop✝ : OrderTop α toOrderBot✝ : OrderBot α ht : toOrderTop = toOrderTop hb : toOrderBot = toOrderBot ⊢ mk = mk [PROOFSTEP] congr [GOAL] α : Type u β : Type v γ : Type u_1 δ : Type u_2 inst✝³ : LE α inst✝² : Top α inst✝¹ : LE β inst✝ : OrderTop β f : α → β map_le : ∀ (a b : α), f a ≤ f b → a ≤ b map_top : f ⊤ = ⊤ a : α ⊢ f a ≤ f ⊤ [PROOFSTEP] rw [map_top] -- Porting note: lean3 didn't need the type annotation [GOAL] α : Type u β : Type v γ : Type u_1 δ : Type u_2 inst✝³ : LE α inst✝² : Top α inst✝¹ : LE β inst✝ : OrderTop β f : α → β map_le : ∀ (a b : α), f a ≤ f b → a ≤ b map_top : f ⊤ = ⊤ a : α ⊢ f a ≤ ⊤ [PROOFSTEP] exact @le_top β _ _ _ [GOAL] α : Type u β : Type v γ : Type u_1 δ : Type u_2 inst✝³ : LE α inst✝² : Bot α inst✝¹ : LE β inst✝ : OrderBot β f : α → β map_le : ∀ (a b : α), f a ≤ f b → a ≤ b map_bot : f ⊥ = ⊥ a : α ⊢ f ⊥ ≤ f a [PROOFSTEP] rw [map_bot] -- Porting note: lean3 didn't need the type annotation [GOAL] α : Type u β : Type v γ : Type u_1 δ : Type u_2 inst✝³ : LE α inst✝² : Bot α inst✝¹ : LE β inst✝ : OrderBot β f : α → β map_le : ∀ (a b : α), f a ≤ f b → a ≤ b map_bot : f ⊥ = ⊥ a : α ⊢ ⊥ ≤ f a [PROOFSTEP] exact @bot_le β _ _ _ [GOAL] α : Type u β : Type v γ : Type u_1 δ : Type u_2 p : α → Prop inst✝² : PartialOrder α inst✝¹ : OrderBot α inst✝ : OrderBot (Subtype p) hbot : p ⊥ x : { x // p x } ⊢ ↑x = ⊥ ↔ x = ⊥ [PROOFSTEP] rw [← coe_bot hbot, ext_iff] [GOAL] α : Type u β : Type v γ : Type u_1 δ : Type u_2 p : α → Prop inst✝² : PartialOrder α inst✝¹ : OrderTop α inst✝ : OrderTop (Subtype p) htop : p ⊤ x : { x // p x } ⊢ ↑x = ⊤ ↔ x = ⊤ [PROOFSTEP] rw [← coe_top htop, ext_iff] [GOAL] α : Type u β : Type v γ : Type u_1 δ : Type u_2 inst✝¹ : LinearOrder α inst✝ : OrderBot α a b : α ⊢ min a b = ⊥ ↔ a = ⊥ ∨ b = ⊥ [PROOFSTEP] simp only [← inf_eq_min, ← le_bot_iff, inf_le_iff]
/- Copyright (c) 2019 Johan Commelin. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Johan Commelin -/ import logic.basic universes u v w variables {α : Sort u} {β : Sort v} {γ : Sort w} structure unique (α : Sort u) extends inhabited α := (uniq : ∀ a:α, a = default) attribute [class] unique instance punit.unique : unique punit.{u} := { default := punit.star, uniq := λ x, punit_eq x _ } namespace unique open function section variables [unique α] instance : inhabited α := to_inhabited ‹unique α› lemma eq_default (a : α) : a = default α := uniq _ a lemma default_eq (a : α) : default α = a := (uniq _ a).symm instance : subsingleton α := ⟨λ a b, by rw [eq_default a, eq_default b]⟩ end protected lemma subsingleton_unique' : ∀ (h₁ h₂ : unique α), h₁ = h₂ | ⟨⟨x⟩, h⟩ ⟨⟨y⟩, _⟩ := by congr; rw [h x, h y] instance subsingleton_unique : subsingleton (unique α) := ⟨unique.subsingleton_unique'⟩ def of_surjective {f : α → β} (hf : surjective f) [unique α] : unique β := { default := f (default _), uniq := λ b, begin cases hf b with a ha, subst ha, exact congr_arg f (eq_default a) end } end unique
{-# OPTIONS --cubical #-} module Multidimensional.Data.Dir.Properties where open import Cubical.Core.Everything open import Cubical.Foundations.Prelude open import Cubical.Data.Empty open import Cubical.Relation.Nullary open import Multidimensional.Data.Dir.Base ¬↓≡↑ : ¬ ↓ ≡ ↑ ¬↓≡↑ eq = subst (caseDir Dir ⊥) eq ↓ ¬↑≡↓ : ¬ ↑ ≡ ↓ ¬↑≡↓ eq = subst (caseDir ⊥ Dir) eq ↓
module Data.Nat.Properties.Extra where open import Data.Nat open import Data.Product open import Data.Nat.Properties open import Data.Nat.Properties.Simple open import Function open import Level using () open import Relation.Binary open import Relation.Binary.Core open import Relation.Nullary.Negation open import Relation.Nullary -- open import Relation.Nullary.Decidable open import Relation.Binary.PropositionalEquality hiding (isPreorder) open ≡-Reasoning open ≤-Reasoning renaming (begin_ to start_; _∎ to _□; _≡⟨_⟩_ to _≈⟨_⟩_) open DecTotalOrder decTotalOrder using (reflexive) renaming (refl to ≤-refl) ------------------------------------------------------------------------ -- Misc ------------------------------------------------------------------------ isDecTotalOrder : IsDecTotalOrder {A = ℕ} _≡_ _≤_ isDecTotalOrder = DecTotalOrder.isDecTotalOrder decTotalOrder isTotalOrder : IsTotalOrder {A = ℕ} _≡_ _≤_ isTotalOrder = IsDecTotalOrder.isTotalOrder isDecTotalOrder isPartialOrder : IsPartialOrder {A = ℕ} _≡_ _≤_ isPartialOrder = IsTotalOrder.isPartialOrder isTotalOrder isPreorder : IsPreorder {A = ℕ} _≡_ _≤_ isPreorder = IsPartialOrder.isPreorder isPartialOrder open import Algebra.Structures import Algebra.FunctionProperties as P; open P (_≡_ {A = ℕ}) isSemiringWithoutOne : IsSemiringWithoutOne _≡_ _⊔_ _⊓_ zero isSemiringWithoutOne = IsCommutativeSemiringWithoutOne.isSemiringWithoutOne ⊔-⊓-0-isCommutativeSemiringWithoutOne +-isCommutativeMonoid : IsCommutativeMonoid _≡_ _⊔_ zero +-isCommutativeMonoid = IsSemiringWithoutOne.+-isCommutativeMonoid isSemiringWithoutOne cmp : Trichotomous _≡_ _<_ cmp = StrictTotalOrder.compare strictTotalOrder ⊓-comm : Commutative _⊓_ ⊓-comm = IsCommutativeSemiringWithoutOne.*-comm ⊔-⊓-0-isCommutativeSemiringWithoutOne ⊔-comm : Commutative _⊔_ ⊔-comm = IsCommutativeMonoid.comm +-isCommutativeMonoid ------------------------------------------------------------------------ -- Equational ------------------------------------------------------------------------ -- ℕ cancel-suc : ∀ {x y} → suc x ≡ suc y → x ≡ y cancel-suc {x} {.x} refl = refl -- cancel-suc : ∀ {x y} → suc x ≡ suc y → x ≡ y -- cancel-suc {x} {.x} refl = refl -- _+_ [a+b]+c≡[a+c]+b : ∀ a b c → a + b + c ≡ a + c + b [a+b]+c≡[a+c]+b a b c = begin a + b + c ≡⟨ +-assoc a b c ⟩ a + (b + c) ≡⟨ cong (λ x → a + x) (+-comm b c) ⟩ a + (c + b) ≡⟨ sym (+-assoc a c b) ⟩ a + c + b ∎ a+[b+c]≡b+[a+c] : ∀ a b c → a + (b + c) ≡ b + (a + c) a+[b+c]≡b+[a+c] a b c = begin a + (b + c) ≡⟨ sym (+-assoc a b c) ⟩ a + b + c ≡⟨ cong (λ x → x + c) (+-comm a b) ⟩ b + a + c ≡⟨ +-assoc b a c ⟩ b + (a + c) ∎ cancel-+-right : ∀ k {i j} → i + k ≡ j + k → i ≡ j cancel-+-right zero {i} {j} p = begin i ≡⟨ sym (+-right-identity i) ⟩ i + zero ≡⟨ p ⟩ j + zero ≡⟨ +-right-identity j ⟩ j ∎ cancel-+-right (suc k) {i} {j} p = cancel-+-right k lemma where lemma : i + k ≡ j + k lemma = cancel-suc $ begin suc (i + k) ≡⟨ sym (+-suc i k) ⟩ i + suc k ≡⟨ p ⟩ j + suc k ≡⟨ +-suc j k ⟩ suc (j + k) ∎ -- _*_ *-right-identity : ∀ n → n * 1 ≡ n *-right-identity zero = refl *-right-identity (suc n) = cong suc (*-right-identity n) *-left-identity : ∀ n → 1 * n ≡ n *-left-identity zero = refl *-left-identity (suc n) = cong suc (*-left-identity n) distrib-left-*-+ : ∀ m n o → m * (n + o) ≡ m * n + m * o distrib-left-*-+ m n o = begin m * (n + o) ≡⟨ *-comm m (n + o) ⟩ (n + o) * m ≡⟨ distribʳ-*-+ m n o ⟩ n * m + o * m ≡⟨ cong₂ _+_ (*-comm n m) (*-comm o m) ⟩ m * n + m * o ∎ -- _∸_ m∸n+n≡m : ∀ {m n} → n ≤ m → m ∸ n + n ≡ m m∸n+n≡m {m} {n} n≤m = begin m ∸ n + n ≡⟨ +-comm (m ∸ n) n ⟩ n + (m ∸ n) ≡⟨ m+n∸m≡n n≤m ⟩ m ∎ m∸[o∸n]+o≡m+n : ∀ m n o → n ≤ o → o ∸ n ≤ m → m ∸ ( o ∸ n ) + o ≡ m + n m∸[o∸n]+o≡m+n m n o n≤o o∸n≤m = begin m ∸ (o ∸ n) + o ≡⟨ +-comm (m ∸ (o ∸ n)) o ⟩ o + (m ∸ (o ∸ n)) ≡⟨ cong (λ x → x + (m ∸ (o ∸ n))) (sym (m+n∸m≡n {n} {o} n≤o)) ⟩ (n + (o ∸ n)) + (m ∸ (o ∸ n)) ≡⟨ +-assoc n (o ∸ n) (m ∸ (o ∸ n)) ⟩ n + ((o ∸ n) + (m ∸ (o ∸ n))) ≡⟨ cong (λ w → n + w) (m+n∸m≡n {o ∸ n} o∸n≤m) ⟩ n + m ≡⟨ +-comm n m ⟩ m + n ∎ ------------------------------------------------------------------------ -- Relational ------------------------------------------------------------------------ -- _≤_ ≤-trans : Transitive _≤_ ≤-trans = IsPreorder.trans isPreorder <⇒≤ : _<_ ⇒ _≤_ <⇒≤ {zero} p = z≤n <⇒≤ {suc i} (s≤s p) = s≤s (<⇒≤ p) >⇒≰ : _>_ ⇒ _≰_ >⇒≰ {zero} () q >⇒≰ {suc m} (s≤s p) (s≤s q) = >⇒≰ p q ≤⇒≯ : _≤_ ⇒ _≯_ ≤⇒≯ {zero} p () ≤⇒≯ {suc m} (s≤s p) (s≤s q) = ≤⇒≯ p q <⇒≱ : _<_ ⇒ _≱_ <⇒≱ {zero} () z≤n <⇒≱ {suc i} (s≤s p) (s≤s q) = <⇒≱ p q >⇒≢ : _>_ ⇒ _≢_ >⇒≢ {zero} () refl >⇒≢ {suc m} (s≤s m>n) refl = >⇒≢ m>n refl ≥⇒≮ : _≥_ ⇒ _≮_ ≥⇒≮ z≤n () ≥⇒≮ (s≤s m) (s≤s p) = ≥⇒≮ m p <⇒≢ : _<_ ⇒ _≢_ <⇒≢ {zero} () refl <⇒≢ {suc m} (s≤s m<n) refl = <⇒≢ m<n refl ≤∧≢⇒< : ∀ {m n} → m ≤ n → m ≢ n → m < n ≤∧≢⇒< {zero} {zero} p q = contradiction refl q ≤∧≢⇒< {zero} {suc n} p q = s≤s z≤n ≤∧≢⇒< {suc m} {zero} () q ≤∧≢⇒< {suc m} {suc n} (s≤s p) q = s≤s (≤∧≢⇒< p (q ∘ cong suc)) ≥∧≢⇒> : ∀ {m n} → m ≥ n → m ≢ n → m > n ≥∧≢⇒> {zero} {zero} p q = contradiction refl q ≥∧≢⇒> {zero} {suc n} () q ≥∧≢⇒> {suc m} {zero} p q = s≤s z≤n ≥∧≢⇒> {suc m} {suc n} (s≤s p) q = s≤s (≥∧≢⇒> p (q ∘ cong suc)) ≤0⇒≡0 : ∀ n → n ≤ 0 → n ≡ 0 ≤0⇒≡0 zero n≤0 = refl ≤0⇒≡0 (suc n) () -- ≤-suc : ∀ {m n} → m ≤ n → suc m ≤ suc n -- ≤-suc z≤n = s≤s z≤n -- ≤-suc (s≤s rel) = s≤s (s≤s rel) -- i*j>0⇒i>0∧j>0 : ∀ i j → i * j > 0 → (i > 0 × j > 0) -- i*j>0⇒i>0∧j>0 zero j () -- i*j>0⇒i>0∧j>0 (suc i) zero p = contradiction (proj₂ (i*j>0⇒i>0∧j>0 i 0 p)) (λ ()) -- i*j>0⇒i>0∧j>0 (suc i) (suc j) p = s≤s z≤n , s≤s z≤n -- _+_ -- m≡n+o⇒m≥o : ∀ {m} {o} n → m ≡ n + o → m ≥ o -- m≡n+o⇒m≥o {.(n + o)} {o} n refl = n≤m+n o n n+-mono : ∀ n → (λ x → n + x) Preserves _≤_ ⟶ _≤_ n+-mono n = _+-mono_ {n} {n} ≤-refl n+-mono-inverse : ∀ n → ∀ {a b} → n + a ≤ n + b → a ≤ b n+-mono-inverse zero a≤b = a≤b n+-mono-inverse (suc n) (s≤s a≤b) = n+-mono-inverse n a≤b +n-mono : ∀ n → (λ x → x + n) Preserves _≤_ ⟶ _≤_ +n-mono n {a} {b} a≤b = _+-mono_ {a} {b} {n} {n} a≤b ≤-refl +n-mono-inverse : ∀ n → ∀ {a b} → a + n ≤ b + n → a ≤ b +n-mono-inverse zero {a} {b} p = start a ≈⟨ sym (+-right-identity a) ⟩ a + 0 ≤⟨ p ⟩ b + 0 ≈⟨ +-right-identity b ⟩ b □ +n-mono-inverse (suc n) {a} {b} p = +n-mono-inverse n p' where p' : a + n ≤ b + n p' = ≤-pred $ start suc (a + n) ≈⟨ sym (+-suc a n) ⟩ a + suc n ≤⟨ p ⟩ b + suc n ≈⟨ +-suc b n ⟩ suc (b + n) □ +-mono-contra : ∀ {a b c d} → a ≥ b → a + c < b + d → c < d +-mono-contra {zero} {zero} p q = q +-mono-contra {zero} {suc b} () q +-mono-contra {suc a} {zero} {c} {d} p q = start suc c ≤⟨ n≤m+n (suc a) (suc c) ⟩ suc a + suc c ≈⟨ +-suc (suc a) c ⟩ suc (suc a) + c ≤⟨ q ⟩ d □ +-mono-contra {suc a} {suc b} (s≤s p) (s≤s q) = +-mono-contra p q -- _∸_ ∸n-mono : ∀ n → (λ x → x ∸ n) Preserves _≤_ ⟶ _≤_ ∸n-mono n {a} {b} a≤b = ∸-mono {a} {b} {n} {n} a≤b ≤-refl ∸n-mono-inverse : ∀ n → ∀ {a b} → a > n → a ∸ n ≤ b ∸ n → a ≤ b ∸n-mono-inverse zero p q = q ∸n-mono-inverse (suc n) {zero} {zero} p q = q ∸n-mono-inverse (suc n) {zero} {suc b} p q = z≤n ∸n-mono-inverse (suc n) {suc a} {zero} p q = contradiction (≤-pred p) (≤⇒≯ q') where q' : a ≤ n q' = ∸n-mono-inverse n (≤-pred p) $ start a ∸ n ≤⟨ q ⟩ zero ≈⟨ sym (n∸n≡0 n) ⟩ n ∸ n □ ∸n-mono-inverse (suc n) {suc a} {suc b} p q = s≤s (∸n-mono-inverse n (≤-pred p) q) n∸-mono : ∀ n → (λ x → n ∸ x) Preserves _≥_ ⟶ _≤_ n∸-mono n {a} {b} a≥b = ∸-mono {n} {n} {a} {b} ≤-refl a≥b m≥n+o⇒m∸o≥n : ∀ m n o → m ≥ n + o → m ∸ o ≥ n m≥n+o⇒m∸o≥n m n o p = start n ≈⟨ sym (m+n∸n≡m n o) ⟩ n + o ∸ o ≤⟨ ∸-mono {n + o} {m} {o} p ≤-refl ⟩ m ∸ o □ cancel-∸-right : ∀ {m n} o → m ≥ o → n ≥ o → m ∸ o ≡ n ∸ o → m ≡ n cancel-∸-right zero p q eq = eq cancel-∸-right {zero} (suc o) () q eq cancel-∸-right {suc m} {zero} (suc o) p () eq cancel-∸-right {suc m} {suc n} (suc o) p q eq = cong suc (cancel-∸-right o (≤-pred p) (≤-pred q) eq) -- _*_ n*-mono : ∀ n → (λ x → n * x) Preserves _≤_ ⟶ _≤_ n*-mono n = _*-mono_ {n} {n} ≤-refl n*-mono-strict-inverse : ∀ n {a} {b} → n * a < n * b → a < b n*-mono-strict-inverse zero () n*-mono-strict-inverse (suc n) {a} {b} p with suc a ≤? b n*-mono-strict-inverse (suc n) {a} {b} p | yes q = q n*-mono-strict-inverse (suc n) {a} {b} p | no ¬q = contradiction p ¬p where ¬q' : b ≤ a ¬q' = ≤-pred (≰⇒> ¬q) ¬p : suc (suc n * a) ≰ suc n * b ¬p = >⇒≰ (s≤s (n*-mono (suc n) ¬q')) *n-mono : ∀ n → (λ x → x * n) Preserves _≤_ ⟶ _≤_ *n-mono n {a} {b} a≤b = _*-mono_ {a} {b} {n} {n} a≤b ≤-refl *-comm-mono : ∀ {a b c d} → a * b ≤ c * d → b * a ≤ d * c *-comm-mono {a} {b} {c} {d} p = start b * a ≈⟨ *-comm b a ⟩ a * b ≤⟨ p ⟩ c * d ≈⟨ *-comm c d ⟩ d * c □ *-comm-mono-strict : ∀ {a b c d} → a * b < c * d → b * a < d * c *-comm-mono-strict {a} {b} {c} {d} p = start suc (b * a) ≈⟨ cong suc (*-comm b a) ⟩ suc (a * b) ≤⟨ p ⟩ c * d ≈⟨ *-comm c d ⟩ d * c □ *n-mono-inverse : ∀ n {a} {b} → a * (suc n) ≤ b * (suc n) → a ≤ b *n-mono-inverse n {zero} p = z≤n *n-mono-inverse n {suc a} {zero} () *n-mono-inverse n {suc a} {suc b} (s≤s p) = s≤s (*n-mono-inverse n (n+-mono-inverse n p)) *n-mono-strict-inverse : ∀ n {a} {b} → a * n < b * n → a < b *n-mono-strict-inverse n {a} {b} p = n*-mono-strict-inverse n (*-comm-mono-strict {a} {n} {b} {n} p) m≤m*1+n : ∀ m n → m ≤ m * suc n m≤m*1+n m zero = reflexive (sym (*-right-identity m)) m≤m*1+n m (suc n) = start m ≤⟨ m≤m+n m (m * suc n) ⟩ m + m * suc n ≈⟨ sym (+-*-suc m (suc n)) ⟩ m * suc (suc n) □ -- _⊔_ m≤n⊔m : ∀ m n → m ≤ n ⊔ m m≤n⊔m zero n = z≤n m≤n⊔m (suc m) zero = s≤s (m≤n⊔m m zero) m≤n⊔m (suc m) (suc n) = s≤s (m≤n⊔m m n) m⊓n≤n : ∀ m n → m ⊓ n ≤ n m⊓n≤n zero n = z≤n m⊓n≤n (suc m) zero = z≤n m⊓n≤n (suc m) (suc n) = s≤s (m⊓n≤n m n) ⊔-upper-bound : ∀ m n o → m + n ≥ o → o ⊔ n ≤ m + n ⊔-upper-bound zero zero zero p = p ⊔-upper-bound zero zero (suc o) () ⊔-upper-bound zero (suc n) zero p = s≤s (⊔-upper-bound zero n zero z≤n) ⊔-upper-bound zero (suc n) (suc o) p = s≤s (⊔-upper-bound zero n o (≤-pred p)) ⊔-upper-bound (suc m) zero zero p = p ⊔-upper-bound (suc m) zero (suc o) p = p ⊔-upper-bound (suc m) (suc n) zero p = start suc n ≤⟨ s≤s (≤-step ≤-refl) ⟩ suc (suc n) ≤⟨ s≤s (n≤m+n m (suc n)) ⟩ suc (m + suc n) □ ⊔-upper-bound (suc m) (suc n) (suc o) p = start suc (o ⊔ n) ≤⟨ s≤s (⊔-upper-bound (suc m) n o $ start o ≤⟨ ≤-pred p ⟩ m + suc n ≈⟨ +-suc m n ⟩ suc (m + n) □ ) ⟩ suc (suc (m + n)) ≈⟨ cong suc (sym (+-suc m n)) ⟩ suc (m + suc n) □ m⊔n≤m+n : ∀ m n → m ⊔ n ≤ m + n m⊔n≤m+n zero n = ≤-refl m⊔n≤m+n (suc m) zero = s≤s (m≤m+n m zero) m⊔n≤m+n (suc m) (suc n) = s≤s $ start m ⊔ n ≤⟨ m⊔n≤m+n m n ⟩ m + n ≤⟨ n+-mono m (n≤m+n 1 n) ⟩ m + suc n □ -- double : ∀ m → m + m ≡ m * 2 -- double m = -- begin -- m + m -- ≡⟨ cong (λ w → m + w) (sym (+-right-identity m)) ⟩ -- m + (m + zero) -- ≡⟨ *-comm (suc (suc zero)) m ⟩ -- m * suc (suc zero) -- ∎ -- cancel-∸-right-inverse : ∀ m n o → n ≥ o → m + o ≡ n → m ≡ n ∸ o -- cancel-∸-right-inverse m n zero p eq = -- begin -- m -- ≡⟨ sym (+-right-identity m) ⟩ -- m + 0 -- ≡⟨ eq ⟩ -- n -- ∎ -- cancel-∸-right-inverse m zero (suc o) () eq -- cancel-∸-right-inverse m (suc n) (suc o) p eq = -- begin -- m -- ≡⟨ cancel-∸-right-inverse m n o (≤-pred p) $ cancel-+-left 1 $ -- begin -- suc m + o -- ≡⟨ sym (+-suc m o) ⟩ -- m + suc o -- ≡⟨ eq ⟩ -- suc n -- ∎ -- ⟩ -- n ∸ o -- ∎
Awesome product.... makes me feel more comfortable when I have to leave the room for a moment and Grandma is in the other room! Gives added security for Grandma and me. Thanks Lumex! Graham-Field Lumex - Fast Alert Basic Patient Alarm is a chair pad alarm system that sends an alert to warn the staff or caregiver when the user slips out or gets out of their chair. This alarm pad works well in a variety of chair styles including wheelchairs. The alarm secures to the seat with a metal clip. Pad Size: 10" x 15". Available as a system, alarm only and sensor pad only. The Fast Alert Basic Alarm is a home health product that serves as a personal protection device for both the patient and the caregiver. Often seen in hospitals and nursing homes. It is an ideal safety alarm for the elderly and anyone who needs the security of a alert system while sitting. The Fast Alert Basic Patient Alarm is sold as a complete system. The Alarm and chair replacement sensor are also sold as replacement parts and can be purchased separately - see options above. Note: Protect Fast-Alert sensor pad with waterproof disposable pad if there is risk of incontinence or liquid spills.
module Data.Fuel %default total ||| Fuel for running total operations potentially indefinitely. public export data Fuel = Dry | More (Lazy Fuel) ||| Provide `n` units of fuel. export limit : Nat -> Fuel limit Z = Dry limit (S n) = More (limit n) ||| Provide fuel indefinitely. ||| This function is fundamentally partial. partial export forever : Fuel forever = More forever
Formal statement is: lemma contour_integral_eq: "(\<And>x. x \<in> path_image p \<Longrightarrow> f x = g x) \<Longrightarrow> contour_integral p f = contour_integral p g" Informal statement is: If two functions $f$ and $g$ are equal on the image of a path $p$, then the contour integral of $f$ along $p$ is equal to the contour integral of $g$ along $p$.
[GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 n : ℕ f : α ≃ Fin n ⊢ Nat.card α = n [PROOFSTEP] simpa only [card_eq_fintype_card, Fintype.card_fin] using card_congr f [GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 h : Nat.card α ≠ 0 ⊢ α ≃ Fin (Nat.card α) [PROOFSTEP] cases fintypeOrInfinite α [GOAL] case inl α✝ : Type u_1 β : Type u_2 α : Type u_3 h : Nat.card α ≠ 0 val✝ : Fintype α ⊢ α ≃ Fin (Nat.card α) [PROOFSTEP] simpa only [card_eq_fintype_card] using Fintype.equivFin α [GOAL] case inr α✝ : Type u_1 β : Type u_2 α : Type u_3 h : Nat.card α ≠ 0 val✝ : Infinite α ⊢ α ≃ Fin (Nat.card α) [PROOFSTEP] simp only [card_eq_zero_of_infinite, ne_eq] at h [GOAL] α : Type u_1 β : Type u_2 a : α inst✝ : Subsingleton α ⊢ Nat.card α = 1 [PROOFSTEP] letI := Fintype.ofSubsingleton a [GOAL] α : Type u_1 β : Type u_2 a : α inst✝ : Subsingleton α this : Fintype α := Fintype.ofSubsingleton a ⊢ Nat.card α = 1 [PROOFSTEP] rw [card_eq_fintype_card, Fintype.card_ofSubsingleton a] [GOAL] α : Type u_1 β : Type u_2 inst✝ : IsEmpty α ⊢ Nat.card α = 0 [PROOFSTEP] simp [GOAL] α : Type u_1 β : Type u_2 inst✝¹ : Finite α inst✝ : Finite β ⊢ Nat.card (α ⊕ β) = Nat.card α + Nat.card β [PROOFSTEP] have := Fintype.ofFinite α [GOAL] α : Type u_1 β : Type u_2 inst✝¹ : Finite α inst✝ : Finite β this : Fintype α ⊢ Nat.card (α ⊕ β) = Nat.card α + Nat.card β [PROOFSTEP] have := Fintype.ofFinite β [GOAL] α : Type u_1 β : Type u_2 inst✝¹ : Finite α inst✝ : Finite β this✝ : Fintype α this : Fintype β ⊢ Nat.card (α ⊕ β) = Nat.card α + Nat.card β [PROOFSTEP] simp_rw [Nat.card_eq_fintype_card, Fintype.card_sum] [GOAL] α✝ : Type u_1 β✝ : Type u_2 α : Type u_3 β : Type u_4 ⊢ Nat.card (α × β) = Nat.card α * Nat.card β [PROOFSTEP] simp only [Nat.card, mk_prod, toNat_mul, toNat_lift] [GOAL] α : Type u_1 β✝ : Type u_2 β : α → Type u_3 inst✝ : Fintype α ⊢ Nat.card ((a : α) → β a) = ∏ a : α, Nat.card (β a) [PROOFSTEP] simp_rw [Nat.card, mk_pi, prod_eq_of_fintype, toNat_lift, toNat_finset_prod] [GOAL] α : Type u_1 β : Type u_2 inst✝ : Finite α ⊢ Nat.card (α → β) = Nat.card β ^ Nat.card α [PROOFSTEP] haveI := Fintype.ofFinite α [GOAL] α : Type u_1 β : Type u_2 inst✝ : Finite α this : Fintype α ⊢ Nat.card (α → β) = Nat.card β ^ Nat.card α [PROOFSTEP] rw [Nat.card_pi, Finset.prod_const, Finset.card_univ, ← Nat.card_eq_fintype_card] [GOAL] α : Type u_1 β : Type u_2 n : ℕ ⊢ Nat.card (ZMod n) = n [PROOFSTEP] cases n [GOAL] case zero α : Type u_1 β : Type u_2 ⊢ Nat.card (ZMod zero) = zero [PROOFSTEP] exact @Nat.card_eq_zero_of_infinite _ Int.infinite [GOAL] case succ α : Type u_1 β : Type u_2 n✝ : ℕ ⊢ Nat.card (ZMod (succ n✝)) = succ n✝ [PROOFSTEP] rw [Nat.card_eq_fintype_card, ZMod.card] [GOAL] α✝ : Type u_1 β✝ : Type u_2 α : Type u_3 β : Type u_4 ⊢ card (α ⊕ β) = card α + card β [PROOFSTEP] simp only [PartENat.card, Cardinal.mk_sum, map_add, Cardinal.toPartENat_lift] [GOAL] α : Type u_1 β : Type u_2 n : ℕ c : Cardinal.{u_3} ⊢ ↑n ≤ ↑toPartENat c ↔ ↑n ≤ c [PROOFSTEP] rw [← toPartENat_cast n, toPartENat_le_iff_of_le_aleph0 (le_of_lt (nat_lt_aleph0 n))] [GOAL] α : Type u_1 β : Type u_2 c : Cardinal.{u_3} n : ℕ ⊢ ↑toPartENat c ≤ ↑n ↔ c ≤ ↑n [PROOFSTEP] rw [← toPartENat_cast n, toPartENat_le_iff_of_lt_aleph0 (nat_lt_aleph0 n)] [GOAL] α : Type u_1 β : Type u_2 n : ℕ c : Cardinal.{u_3} ⊢ ↑n = ↑toPartENat c ↔ ↑n = c [PROOFSTEP] rw [le_antisymm_iff, le_antisymm_iff, Cardinal.toPartENat_le_natCast_iff, Cardinal.natCast_le_toPartENat_iff] [GOAL] α : Type u_1 β : Type u_2 c : Cardinal.{u_3} n : ℕ ⊢ ↑toPartENat c = ↑n ↔ c = ↑n [PROOFSTEP] rw [eq_comm, Cardinal.natCast_eq_toPartENat_iff, eq_comm] [GOAL] α : Type u_1 β : Type u_2 n : ℕ c : Cardinal.{u_3} ⊢ ↑n < ↑toPartENat c ↔ ↑n < c [PROOFSTEP] simp only [← not_le, Cardinal.toPartENat_le_natCast_iff] [GOAL] α : Type u_1 β : Type u_2 n : ℕ c : Cardinal.{u_3} ⊢ ↑toPartENat c < ↑n ↔ c < ↑n [PROOFSTEP] simp only [← not_le, Cardinal.natCast_le_toPartENat_iff] [GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 ⊢ card α = 0 ↔ IsEmpty α [PROOFSTEP] rw [← Cardinal.mk_eq_zero_iff] [GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 ⊢ card α = 0 ↔ #α = 0 [PROOFSTEP] conv_rhs => rw [← Nat.cast_zero] [GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 | #α = 0 [PROOFSTEP] rw [← Nat.cast_zero] [GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 | #α = 0 [PROOFSTEP] rw [← Nat.cast_zero] [GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 | #α = 0 [PROOFSTEP] rw [← Nat.cast_zero] [GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 ⊢ card α = 0 ↔ #α = ↑0 [PROOFSTEP] simp only [← Cardinal.toPartENat_eq_natCast_iff] [GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 ⊢ card α = 0 ↔ ↑toPartENat #α = ↑0 [PROOFSTEP] simp only [PartENat.card, Nat.cast_zero] [GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 ⊢ card α ≤ 1 ↔ Subsingleton α [PROOFSTEP] rw [← le_one_iff_subsingleton] [GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 ⊢ card α ≤ 1 ↔ #α ≤ 1 [PROOFSTEP] conv_rhs => rw [← Nat.cast_one] [GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 | #α ≤ 1 [PROOFSTEP] rw [← Nat.cast_one] [GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 | #α ≤ 1 [PROOFSTEP] rw [← Nat.cast_one] [GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 | #α ≤ 1 [PROOFSTEP] rw [← Nat.cast_one] [GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 ⊢ card α ≤ 1 ↔ #α ≤ ↑1 [PROOFSTEP] rw [← Cardinal.toPartENat_le_natCast_iff] [GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 ⊢ card α ≤ 1 ↔ ↑toPartENat #α ≤ ↑1 [PROOFSTEP] simp only [PartENat.card, Nat.cast_one] [GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 ⊢ 1 < card α ↔ Nontrivial α [PROOFSTEP] rw [← Cardinal.one_lt_iff_nontrivial] [GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 ⊢ 1 < card α ↔ 1 < #α [PROOFSTEP] conv_rhs => rw [← Nat.cast_one] [GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 | 1 < #α [PROOFSTEP] rw [← Nat.cast_one] [GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 | 1 < #α [PROOFSTEP] rw [← Nat.cast_one] [GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 | 1 < #α [PROOFSTEP] rw [← Nat.cast_one] [GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 ⊢ 1 < card α ↔ ↑1 < #α [PROOFSTEP] rw [← natCast_lt_toPartENat_iff] [GOAL] α✝ : Type u_1 β : Type u_2 α : Type u_3 ⊢ 1 < card α ↔ ↑1 < ↑toPartENat #α [PROOFSTEP] simp only [PartENat.card, Nat.cast_one]
module Test.Char import Data.Prim.Char import Data.SOP import Hedgehog allChar : Gen Char allChar = unicodeAll prop_ltMin : Property prop_ltMin = property $ do b8 <- forAll allChar (b8 >= MinChar) === True prop_comp : Property prop_comp = property $ do [m,n] <- forAll $ np [allChar, allChar] toOrdering (comp m n) === compare m n export props : Group props = MkGroup "Char" [ ("prop_ltMin", prop_ltMin) , ("prop_comp", prop_comp) ]
header {* Monad-Plus Class *} theory Monad_Plus imports Monad begin hide_const (open) Fixrec.mplus class plusU = tycon + fixes plusU :: "udom\<cdot>'a \<rightarrow> udom\<cdot>'a \<rightarrow> udom\<cdot>'a::tycon" class functor_plus = plusU + "functor" + assumes fmapU_plusU [coerce_simp]: "fmapU\<cdot>f\<cdot>(plusU\<cdot>a\<cdot>b) = plusU\<cdot>(fmapU\<cdot>f\<cdot>a)\<cdot>(fmapU\<cdot>f\<cdot>b)" assumes plusU_assoc: "plusU\<cdot>(plusU\<cdot>a\<cdot>b)\<cdot>c = plusU\<cdot>a\<cdot>(plusU\<cdot>b\<cdot>c)" class monad_plus = plusU + monad + assumes bindU_plusU: "bindU\<cdot>(plusU\<cdot>xs\<cdot>ys)\<cdot>k = plusU\<cdot>(bindU\<cdot>xs\<cdot>k)\<cdot>(bindU\<cdot>ys\<cdot>k)" assumes plusU_assoc': "plusU\<cdot>(plusU\<cdot>a\<cdot>b)\<cdot>c = plusU\<cdot>a\<cdot>(plusU\<cdot>b\<cdot>c)" instance monad_plus \<subseteq> functor_plus by default (simp_all only: fmapU_eq_bindU bindU_plusU plusU_assoc') definition fplus :: "'a\<cdot>'f::functor_plus \<rightarrow> 'a\<cdot>'f \<rightarrow> 'a\<cdot>'f" where "fplus = coerce\<cdot>(plusU :: udom\<cdot>'f \<rightarrow> _)" lemma fmap_fplus: fixes f :: "'a \<rightarrow> 'b" and a b :: "'a\<cdot>'f::functor_plus" shows "fmap\<cdot>f\<cdot>(fplus\<cdot>a\<cdot>b) = fplus\<cdot>(fmap\<cdot>f\<cdot>a)\<cdot>(fmap\<cdot>f\<cdot>b)" unfolding fmap_def fplus_def by (simp add: coerce_simp) lemma fplus_assoc: fixes a b c :: "'a\<cdot>'f::functor_plus" shows "fplus\<cdot>(fplus\<cdot>a\<cdot>b)\<cdot>c = fplus\<cdot>a\<cdot>(fplus\<cdot>b\<cdot>c)" unfolding fplus_def by (simp add: coerce_simp plusU_assoc) abbreviation mplus :: "'a\<cdot>'m::monad_plus \<rightarrow> 'a\<cdot>'m \<rightarrow> 'a\<cdot>'m" where "mplus \<equiv> fplus" lemmas mplus_def = fplus_def [where 'f="'m::monad_plus" for f] lemmas fmap_mplus = fmap_fplus [where 'f="'m::monad_plus" for f] lemmas mplus_assoc = fplus_assoc [where 'f="'m::monad_plus" for f] lemma bind_mplus: fixes a b :: "'a\<cdot>'m::monad_plus" shows "bind\<cdot>(mplus\<cdot>a\<cdot>b)\<cdot>k = mplus\<cdot>(bind\<cdot>a\<cdot>k)\<cdot>(bind\<cdot>b\<cdot>k)" unfolding bind_def mplus_def by (simp add: coerce_simp bindU_plusU) end
/** * Swagger Petstore * This is a sample server Petstore server. You can find out more about Swagger at [http://swagger.io](http://swagger.io) or on [irc.freenode.net, #swagger](http://swagger.io/irc/). For this sample, you can use the api key `special-key` to test the authorization filters. * * OpenAPI spec version: 1.0.0 * Contact: [email protected] * * NOTE: This class is auto generated by the swagger code generator 1.5.0. * https://github.com/swagger-api/swagger-codegen.git * Do not edit the class manually. */ #include "Category.h" #include <string> #include <sstream> #include <boost/property_tree/ptree.hpp> #include <boost/property_tree/json_parser.hpp> using boost::property_tree::ptree; using boost::property_tree::read_json; using boost::property_tree::write_json; Category::Category() { m_id = ; m_name = ; } Category::~Category() { } std::string Category::toJsonString() { std::stringstream ss; ptree pt; pt.put("id", m_id); pt.put("name", m_name); write_json(ss, pt, false); return ss.str(); } void Category::fromJsonString(std::string const& jsonString) { std::stringstream ss(jsonString); ptree pt; read_json(ss,pt); m_id = pt.get("id", ); m_name = pt.get("name", ); } int Category::getId() const { return m_id; } void Category::setId(int value) { m_id = value; } string Category::getName() const { return m_name; } void Category::setName(string value) { m_name = value; }
Require Import VST.veric.expr. Require Import VST.veric.SeparationLogic. Require Import VST.floyd.local2ptree. Require Import VST.floyd.client_lemmas. Require Import VST.floyd.efield_lemmas. Require Import Coq.Bool.Bool. Require Import Coq.Lists.List. Require Import mc_reify.clight_expr_eq. Fixpoint denote_tc_assert_b_norho a:= match a with | tc_TT => true | tc_andp' a b => andb (denote_tc_assert_b_norho a) (denote_tc_assert_b_norho b) | tc_orp' a b => orb (denote_tc_assert_b_norho a) (denote_tc_assert_b_norho b) | _ => false end. Fixpoint denote_tc_assert_b_norho_forgive_isptr a e:= match a with | tc_TT => true | tc_andp' a b => andb (denote_tc_assert_b_norho_forgive_isptr a e) (denote_tc_assert_b_norho_forgive_isptr b e) | tc_orp' a b => orb (denote_tc_assert_b_norho_forgive_isptr a e) (denote_tc_assert_b_norho_forgive_isptr b e) | tc_isptr e0 => expr_beq e e0 | _ => false end. Lemma denote_tc_assert_b_norho_sound: forall a rho, denote_tc_assert_b_norho a = true -> denote_tc_assert a rho. Proof. intros. induction a; simpl in *; unfold_lift; simpl; auto; try congruence. rewrite andb_true_iff in *. intuition. rewrite orb_true_iff in *. intuition. Qed. Lemma denote_tc_assert_b_norho_forgive_isptr_sound: forall a e rho, denote_tc_assert_b_norho_forgive_isptr a e = true -> isptr (expr.eval_expr e rho) -> denote_tc_assert a rho. Proof. intros. induction a; simpl in *; unfold_lift; simpl; auto; try congruence. rewrite andb_true_iff in *. intuition. rewrite orb_true_iff in *. intuition. apply expr_beq_spec in H; subst; auto. Qed. Definition tc_lvalue_b_norho Delta e := denote_tc_assert_b_norho (typecheck_lvalue Delta e). Definition tc_expr_b_norho Delta e := denote_tc_assert_b_norho (typecheck_expr Delta e). Definition tc_temp_id_b_norho id t Delta e:= denote_tc_assert_b_norho (typecheck_temp_id id t Delta e). Definition tc_lvalue_b_norho' Delta e := match e with | Ederef e0 t => denote_tc_assert_b_norho_forgive_isptr (typecheck_lvalue Delta e) e0 | _ => denote_tc_assert_b_norho (typecheck_lvalue Delta e) end. Lemma tc_lvalue_b_sound : forall e Delta rho, tc_lvalue_b_norho Delta e = true -> tc_lvalue Delta e rho . Proof. intros. apply denote_tc_assert_b_norho_sound; auto. Qed. Lemma tc_expr_b_sound : forall e Delta rho, tc_expr_b_norho Delta e = true -> tc_expr Delta e rho . Proof. intros. apply denote_tc_assert_b_norho_sound; auto. Qed. Lemma tc_temp_id_b_sound : forall id t Delta e rho, tc_temp_id_b_norho id t Delta e= true -> tc_temp_id id t Delta e rho . Proof. intros. apply denote_tc_assert_b_norho_sound; auto. Qed. Lemma tc_lvalue_b'_sound : forall e Delta rho, tc_lvalue_b_norho' Delta e = true -> isptr (expr.eval_lvalue e rho) -> tc_lvalue Delta e rho . Proof. intros. destruct e eqn:HH; try solve [apply tc_lvalue_b_sound; auto]. eapply denote_tc_assert_b_norho_forgive_isptr_sound; [exact H |]. simpl in H0. unfold_lift in H0. destruct (expr.eval_expr e0 rho); try inversion H0. simpl. auto. Qed. Fixpoint tc_efield_b_norho Delta efs := match efs with | nil => true | eArraySubsc ei :: efs' => (tc_expr_b_norho Delta ei && tc_efield_b_norho Delta efs')%bool | eStructField _ :: efs' => tc_efield_b_norho Delta efs' | eUnionField _ :: efs' => tc_efield_b_norho Delta efs' end. Lemma tc_efield_b_sound: forall efs Delta rho, tc_efield_b_norho Delta efs = true -> tc_efield Delta efs rho. Proof. intros. induction efs. + simpl; auto. + destruct a; simpl in H |- *. - apply andb_true_iff in H. destruct H. apply tc_expr_b_sound with (rho := rho) in H. tauto. - tauto. - tauto. Qed. Definition tc_LR_b_norho Delta e lr := match lr with | LLLL => tc_lvalue_b_norho' Delta e | RRRR => tc_expr_b_norho Delta e end. Definition type_is_int (e: Clight.expr) : bool := match typeof e with | Tint _ _ _ => true | _ => false end. (* Definition writable_share_b (sh: share) : bool := if (seplog.writable_share_dec sh) then true else false. Lemma writable_share_b_sound: forall sh, writable_share_b sh = true -> writable_share sh. Proof. intros. unfold writable_share_b in H. destruct (seplog.writable_share_dec sh). auto. congruence. Qed. *)
SUBROUTINE fdjac2(fcn, m, n, x, fvec, fjac, ldfjac, iflag, 1 ncnt, epsfcn, wa, time, fnorm_min, x_min, fvec_min) USE stel_kinds !DEC$ IF .NOT.DEFINED (MPI_OPT) USE fdjac_mod, m1=>m, n1=>n, eps1=>eps, ncnt1=>ncnt IMPLICIT NONE C----------------------------------------------- C D u m m y A r g u m e n t s C----------------------------------------------- INTEGER, INTENT(in) :: m, n, ldfjac, ncnt INTEGER, TARGET :: iflag REAL(rprec) :: epsfcn, time REAL(rprec), DIMENSION(n), TARGET :: x(n), wa(m) REAL(rprec), DIMENSION(m), INTENT(in) :: fvec REAL(rprec), DIMENSION(ldfjac,n), INTENT(out) :: fjac REAL(rprec), INTENT(out) :: fnorm_min, x_min(n), fvec_min(m) C----------------------------------------------- C L o c a l P a r a m e t e r s C----------------------------------------------- REAL(rprec), PARAMETER :: zero = 0 C----------------------------------------------- C L o c a l V a r i a b l e s C----------------------------------------------- INTEGER :: j, istat, iread, ic1, ic2, irate, count_max REAL(rprec) :: eps, epsmch, h, dpmpar, temp, cur_norm C----------------------------------------------- C E x t e r n a l F u n c t i o n s C----------------------------------------------- EXTERNAL fcn, dpmpar, multiprocess, fdjac_parallel REAL(rprec), EXTERNAL :: enorm C----------------------------------------------- c c SUBROUTINE fdjac2 c c this SUBROUTINE computes a forward-difference approximation c to the m by n jacobian matrix ASSOCIATED with a specified c problem of m functions in n variables. c c Here c c fcn is the name of the user-supplied SUBROUTINE which c calculates the functions. fcn must be declared c in an EXTernal statement in the user calling c program (see LMDIF1 for documentation), and should be written as follows: c c SUBROUTINE fcn(m,n,x,fvec,iflag,ncnt) c INTEGER m,n,iflag c REAL(rprec) x(n),fvec(m) c ---------- c calculate the functions at x and c RETURN this vector in fvec. c ---------- c RETURN c END c c fjac is an output m by n array which CONTAINS the c approximation to the jacobian matrix evaluated at x. c c ldfjac is a positive INTEGER input variable not less than m c which specifies the leading DIMENSION of the array fjac. c c iflag is an INTEGER variable which can be used to terminate c the execution of fdjac2. see description of fcn. c c epsfcn is an input variable used in determining a suitable c step length for the forward-difference approximation. this c approximation assumes that the relative errors in the c functions are of the order of epsfcn. IF epsfcn is less c than the machine precision, it is assumed that the relative c errors in the functions are of the order of the machine c precision. c c wa is a work array of length m. c c subprograms called c c user-supplied ...... fcn c c MINpack-supplied ... dpmpar c c fortran-supplied ... ABS,max,sqrt c c argonne national laboratory. MINpack project. march 1980. c burton s. garbow, kenneth e. hillstrom, jorge j. more c c ********** c c epsmch is the machine precision. c epsmch = dpmpar(1) c eps = SQRT(MAX(epsfcn,epsmch)) ! ! Load MODULE values. Pointers will automatically update TARGETs... ! Prepare for multi-processing... ! m1 = m n1 = n ncnt1 = ncnt eps1 = eps xp => x wap => wa ! Find MIN chisq = fnorm**2 state for this jacobian evaluation ! (Do NOT retain from previous evaluations, or could get into a non-converging loop...) fnorm_min = HUGE(fnorm_min) CALL system_clock(ic1, irate) CALL multiprocess(n, max_processors, fdjac_parallel, fcn) CALL system_clock(ic2, irate, count_max) IF (ic2 .lt. ic1) ic2 = ic2 + count_max c DO j = 1, n c temp = x(j) c h = eps*ABS(temp) c IF (h .eq. zero) h = eps c x(j) = temp + h c CALL fcn (m, n, x, wa, iflag, ncnt) c IF (iflag .lt. 0) EXIT c x(j) = temp c fjac(:m,j) = (wa - fvec)/h c END DO cur_norm = enorm(m,fvec) ! WHERE are we now? DO j = 1, n READ (j+1000, iostat=iread) istat, iflag, h, temp IF (iREAD .ne. 0) THEN WRITE (6, *) 'Error reading from file fort.', j+1000, 1 ' in fdjac2: IOSTAT = ', iread iflag = -14 ELSE IF (j .ne. istat) THEN WRITE (6, *) 'Wrong value for INDEX j READ in fdjac2' iflag = -14 END IF IF (iflag .ne. 0) EXIT !DEC$ IF DEFINED (CRAY) DO k = 1, m READ (j+1000) wa(k) END DO !DEC$ ELSE READ (j+1000) wa !DEC$ ENDIF fjac(:m,j) = (wa - fvec)/h IF( temp > cur_norm) flip(j) = .not. flip(j) ! flip for next time IF( temp < fnorm_min) THEN fnorm_min = temp fvec_min = wa !DEC$ IF DEFINED (CRAY) DO k = 1, n READ (j+1000) x_min(k) END DO !DEC$ ELSE READ (j+1000) x_min !DEC$ ENDIF END IF CLOSE (j+1000, status='delete') !!Needed to run correctly in multi-tasking... END DO ! ! Do ANY special cleanup now for IFLAG = flag_cleanup ! iflag = flag_cleanup CALL fcn(m, n, x, wa, iflag, ncnt) time = time + REAL(ic2 - ic1)/REAL(irate) !!Time in multi-process CALL !DEC$ ENDIF END SUBROUTINE fdjac2
/- Copyright (c) 2020 Aaron Anderson. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Aaron Anderson -/ import data.finset.fold import algebra.gcd_monoid.multiset /-! # GCD and LCM operations on finsets ## Main definitions - `finset.gcd` - the greatest common denominator of a `finset` of elements of a `gcd_monoid` - `finset.lcm` - the least common multiple of a `finset` of elements of a `gcd_monoid` ## Implementation notes Many of the proofs use the lemmas `gcd.def` and `lcm.def`, which relate `finset.gcd` and `finset.lcm` to `multiset.gcd` and `multiset.lcm`. TODO: simplify with a tactic and `data.finset.lattice` ## Tags finset, gcd -/ variables {α β γ : Type*} namespace finset open multiset variables [cancel_comm_monoid_with_zero α] [normalized_gcd_monoid α] /-! ### lcm -/ section lcm /-- Least common multiple of a finite set -/ def lcm (s : finset β) (f : β → α) : α := s.fold gcd_monoid.lcm 1 f variables {s s₁ s₂ : finset β} {f : β → α} lemma lcm_def : s.lcm f = (s.1.map f).lcm := rfl @[simp] lemma lcm_empty : (∅ : finset β).lcm f = 1 := fold_empty @[simp] lemma lcm_dvd_iff {a : α} : s.lcm f ∣ a ↔ (∀b ∈ s, f b ∣ a) := begin apply iff.trans multiset.lcm_dvd, simp only [multiset.mem_map, and_imp, exists_imp_distrib], exact ⟨λ k b hb, k _ _ hb rfl, λ k a' b hb h, h ▸ k _ hb⟩, end lemma lcm_dvd {a : α} : (∀b ∈ s, f b ∣ a) → s.lcm f ∣ a := lcm_dvd_iff.2 lemma dvd_lcm {b : β} (hb : b ∈ s) : f b ∣ s.lcm f := lcm_dvd_iff.1 dvd_rfl _ hb @[simp] lemma lcm_insert [decidable_eq β] {b : β} : (insert b s : finset β).lcm f = gcd_monoid.lcm (f b) (s.lcm f) := begin by_cases h : b ∈ s, { rw [insert_eq_of_mem h, (lcm_eq_right_iff (f b) (s.lcm f) (multiset.normalize_lcm (s.1.map f))).2 (dvd_lcm h)] }, apply fold_insert h, end @[simp] lemma lcm_singleton {b : β} : ({b} : finset β).lcm f = normalize (f b) := multiset.lcm_singleton @[simp] lemma normalize_lcm : normalize (s.lcm f) = s.lcm f := by simp [lcm_def] lemma lcm_union [decidable_eq β] : (s₁ ∪ s₂).lcm f = gcd_monoid.lcm (s₁.lcm f) (s₂.lcm f) := finset.induction_on s₁ (by rw [empty_union, lcm_empty, lcm_one_left, normalize_lcm]) $ λ a s has ih, by rw [insert_union, lcm_insert, lcm_insert, ih, lcm_assoc] theorem lcm_congr {f g : β → α} (hs : s₁ = s₂) (hfg : ∀a ∈ s₂, f a = g a) : s₁.lcm f = s₂.lcm g := by { subst hs, exact finset.fold_congr hfg } lemma lcm_mono_fun {g : β → α} (h : ∀ b ∈ s, f b ∣ g b) : s.lcm f ∣ s.lcm g := lcm_dvd (λ b hb, (h b hb).trans (dvd_lcm hb)) lemma lcm_mono (h : s₁ ⊆ s₂) : s₁.lcm f ∣ s₂.lcm f := lcm_dvd $ assume b hb, dvd_lcm (h hb) theorem lcm_eq_zero_iff [nontrivial α] : s.lcm f = 0 ↔ 0 ∈ f '' s := by simp only [multiset.mem_map, lcm_def, multiset.lcm_eq_zero_iff, set.mem_image, mem_coe, ← finset.mem_def] end lcm /-! ### gcd -/ section gcd /-- Greatest common divisor of a finite set -/ def gcd (s : finset β) (f : β → α) : α := s.fold gcd_monoid.gcd 0 f variables {s s₁ s₂ : finset β} {f : β → α} lemma gcd_def : s.gcd f = (s.1.map f).gcd := rfl @[simp] lemma gcd_empty : (∅ : finset β).gcd f = 0 := fold_empty lemma dvd_gcd_iff {a : α} : a ∣ s.gcd f ↔ ∀b ∈ s, a ∣ f b := begin apply iff.trans multiset.dvd_gcd, simp only [multiset.mem_map, and_imp, exists_imp_distrib], exact ⟨λ k b hb, k _ _ hb rfl, λ k a' b hb h, h ▸ k _ hb⟩, end lemma gcd_dvd {b : β} (hb : b ∈ s) : s.gcd f ∣ f b := dvd_gcd_iff.1 dvd_rfl _ hb lemma dvd_gcd {a : α} : (∀b ∈ s, a ∣ f b) → a ∣ s.gcd f := dvd_gcd_iff.2 @[simp] lemma gcd_insert [decidable_eq β] {b : β} : (insert b s : finset β).gcd f = gcd_monoid.gcd (f b) (s.gcd f) := begin by_cases h : b ∈ s, { rw [insert_eq_of_mem h, (gcd_eq_right_iff (f b) (s.gcd f) (multiset.normalize_gcd (s.1.map f))).2 (gcd_dvd h)] ,}, apply fold_insert h, end @[simp] lemma gcd_singleton {b : β} : ({b} : finset β).gcd f = normalize (f b) := multiset.gcd_singleton @[simp] lemma normalize_gcd : normalize (s.gcd f) = s.gcd f := by simp [gcd_def] lemma gcd_union [decidable_eq β] : (s₁ ∪ s₂).gcd f = gcd_monoid.gcd (s₁.gcd f) (s₂.gcd f) := finset.induction_on s₁ (by rw [empty_union, gcd_empty, gcd_zero_left, normalize_gcd]) $ λ a s has ih, by rw [insert_union, gcd_insert, gcd_insert, ih, gcd_assoc] theorem gcd_congr {f g : β → α} (hs : s₁ = s₂) (hfg : ∀a ∈ s₂, f a = g a) : s₁.gcd f = s₂.gcd g := by { subst hs, exact finset.fold_congr hfg } lemma gcd_mono_fun {g : β → α} (h : ∀ b ∈ s, f b ∣ g b) : s.gcd f ∣ s.gcd g := dvd_gcd (λ b hb, (gcd_dvd hb).trans (h b hb)) lemma gcd_mono (h : s₁ ⊆ s₂) : s₂.gcd f ∣ s₁.gcd f := dvd_gcd $ assume b hb, gcd_dvd (h hb) theorem gcd_image {g : γ → β} (s: finset γ) [decidable_eq β] [is_idempotent α gcd_monoid.gcd] : (s.image g).gcd f = s.gcd (f ∘ g) := by simp [gcd, fold_image_idem] theorem gcd_eq_gcd_image [decidable_eq α] [is_idempotent α gcd_monoid.gcd] : s.gcd f = (s.image f).gcd id := (@gcd_image _ _ _ _ _ id _ _ _ _).symm theorem gcd_eq_zero_iff : s.gcd f = 0 ↔ ∀ (x : β), x ∈ s → f x = 0 := begin rw [gcd_def, multiset.gcd_eq_zero_iff], split; intro h, { intros b bs, apply h (f b), simp only [multiset.mem_map, mem_def.1 bs], use b, simp [mem_def.1 bs] }, { intros a as, rw multiset.mem_map at as, rcases as with ⟨b, ⟨bs, rfl⟩⟩, apply h b (mem_def.1 bs) } end lemma gcd_eq_gcd_filter_ne_zero [decidable_pred (λ (x : β), f x = 0)] : s.gcd f = (s.filter (λ x, f x ≠ 0)).gcd f := begin classical, transitivity ((s.filter (λ x, f x = 0)) ∪ (s.filter (λ x, f x ≠ 0))).gcd f, { rw filter_union_filter_neg_eq }, rw gcd_union, transitivity gcd_monoid.gcd (0 : α) _, { refine congr (congr rfl _) rfl, apply s.induction_on, { simp }, intros a s has h, rw filter_insert, split_ifs with h1; simp [h, h1], }, simp [gcd_zero_left, normalize_gcd], end lemma gcd_mul_left {a : α} : s.gcd (λ x, a * f x) = normalize a * s.gcd f := begin classical, apply s.induction_on, { simp }, intros b t hbt h, rw [gcd_insert, gcd_insert, h, ← gcd_mul_left], apply ((normalize_associated a).mul_right _).gcd_eq_right end lemma gcd_mul_right {a : α} : s.gcd (λ x, f x * a) = s.gcd f * normalize a := begin classical, apply s.induction_on, { simp }, intros b t hbt h, rw [gcd_insert, gcd_insert, h, ← gcd_mul_right], apply ((normalize_associated a).mul_left _).gcd_eq_right end end gcd end finset namespace finset section is_domain variables [comm_ring α] [is_domain α] [normalized_gcd_monoid α] lemma gcd_eq_of_dvd_sub {s : finset β} {f g : β → α} {a : α} (h : ∀ x : β, x ∈ s → a ∣ f x - g x) : gcd_monoid.gcd a (s.gcd f) = gcd_monoid.gcd a (s.gcd g) := begin classical, revert h, apply s.induction_on, { simp }, intros b s bs hi h, rw [gcd_insert, gcd_insert, gcd_comm (f b), ← gcd_assoc, hi (λ x hx, h _ (mem_insert_of_mem hx)), gcd_comm a, gcd_assoc, gcd_comm a (gcd_monoid.gcd _ _), gcd_comm (g b), gcd_assoc _ _ a, gcd_comm _ a], exact congr_arg _ (gcd_eq_of_dvd_sub_right (h _ (mem_insert_self _ _))) end end is_domain end finset
lemma connected_uncountable: fixes S :: "'a::metric_space set" assumes "connected S" "a \<in> S" "b \<in> S" "a \<noteq> b" shows "uncountable S"
State Before: α : Type u_1 E : α → Type u_2 p✝ q : ℝ≥0∞ inst✝⁴ : (i : α) → NormedAddCommGroup (E i) 𝕜 : Type u_3 inst✝³ : NormedRing 𝕜 inst✝² : (i : α) → Module 𝕜 (E i) inst✝¹ : ∀ (i : α), BoundedSMul 𝕜 (E i) inst✝ : DecidableEq α p : ℝ≥0∞ i : α a : E i c : 𝕜 ⊢ lp.single p i (c • a) = c • lp.single p i a State After: α : Type u_1 E : α → Type u_2 p✝ q : ℝ≥0∞ inst✝⁴ : (i : α) → NormedAddCommGroup (E i) 𝕜 : Type u_3 inst✝³ : NormedRing 𝕜 inst✝² : (i : α) → Module 𝕜 (E i) inst✝¹ : ∀ (i : α), BoundedSMul 𝕜 (E i) inst✝ : DecidableEq α p : ℝ≥0∞ i : α a : E i c : 𝕜 j : α ⊢ ↑(lp.single p i (c • a)) j = ↑(c • lp.single p i a) j Tactic: refine' ext (funext (fun (j : α) => _)) State Before: α : Type u_1 E : α → Type u_2 p✝ q : ℝ≥0∞ inst✝⁴ : (i : α) → NormedAddCommGroup (E i) 𝕜 : Type u_3 inst✝³ : NormedRing 𝕜 inst✝² : (i : α) → Module 𝕜 (E i) inst✝¹ : ∀ (i : α), BoundedSMul 𝕜 (E i) inst✝ : DecidableEq α p : ℝ≥0∞ i : α a : E i c : 𝕜 j : α ⊢ ↑(lp.single p i (c • a)) j = ↑(c • lp.single p i a) j State After: case pos α : Type u_1 E : α → Type u_2 p✝ q : ℝ≥0∞ inst✝⁴ : (i : α) → NormedAddCommGroup (E i) 𝕜 : Type u_3 inst✝³ : NormedRing 𝕜 inst✝² : (i : α) → Module 𝕜 (E i) inst✝¹ : ∀ (i : α), BoundedSMul 𝕜 (E i) inst✝ : DecidableEq α p : ℝ≥0∞ i : α a : E i c : 𝕜 j : α hi : j = i ⊢ ↑(lp.single p i (c • a)) j = ↑(c • lp.single p i a) j case neg α : Type u_1 E : α → Type u_2 p✝ q : ℝ≥0∞ inst✝⁴ : (i : α) → NormedAddCommGroup (E i) 𝕜 : Type u_3 inst✝³ : NormedRing 𝕜 inst✝² : (i : α) → Module 𝕜 (E i) inst✝¹ : ∀ (i : α), BoundedSMul 𝕜 (E i) inst✝ : DecidableEq α p : ℝ≥0∞ i : α a : E i c : 𝕜 j : α hi : ¬j = i ⊢ ↑(lp.single p i (c • a)) j = ↑(c • lp.single p i a) j Tactic: by_cases hi : j = i State Before: case pos α : Type u_1 E : α → Type u_2 p✝ q : ℝ≥0∞ inst✝⁴ : (i : α) → NormedAddCommGroup (E i) 𝕜 : Type u_3 inst✝³ : NormedRing 𝕜 inst✝² : (i : α) → Module 𝕜 (E i) inst✝¹ : ∀ (i : α), BoundedSMul 𝕜 (E i) inst✝ : DecidableEq α p : ℝ≥0∞ i : α a : E i c : 𝕜 j : α hi : j = i ⊢ ↑(lp.single p i (c • a)) j = ↑(c • lp.single p i a) j State After: case pos α : Type u_1 E : α → Type u_2 p✝ q : ℝ≥0∞ inst✝⁴ : (i : α) → NormedAddCommGroup (E i) 𝕜 : Type u_3 inst✝³ : NormedRing 𝕜 inst✝² : (i : α) → Module 𝕜 (E i) inst✝¹ : ∀ (i : α), BoundedSMul 𝕜 (E i) inst✝ : DecidableEq α p : ℝ≥0∞ c : 𝕜 j : α a : E j ⊢ ↑(lp.single p j (c • a)) j = ↑(c • lp.single p j a) j Tactic: subst hi State Before: case pos α : Type u_1 E : α → Type u_2 p✝ q : ℝ≥0∞ inst✝⁴ : (i : α) → NormedAddCommGroup (E i) 𝕜 : Type u_3 inst✝³ : NormedRing 𝕜 inst✝² : (i : α) → Module 𝕜 (E i) inst✝¹ : ∀ (i : α), BoundedSMul 𝕜 (E i) inst✝ : DecidableEq α p : ℝ≥0∞ c : 𝕜 j : α a : E j ⊢ ↑(lp.single p j (c • a)) j = ↑(c • lp.single p j a) j State After: case pos α : Type u_1 E : α → Type u_2 p✝ q : ℝ≥0∞ inst✝⁴ : (i : α) → NormedAddCommGroup (E i) 𝕜 : Type u_3 inst✝³ : NormedRing 𝕜 inst✝² : (i : α) → Module 𝕜 (E i) inst✝¹ : ∀ (i : α), BoundedSMul 𝕜 (E i) inst✝ : DecidableEq α p : ℝ≥0∞ c : 𝕜 j : α a : E j ⊢ ↑(lp.single p j (c • a)) j = c • ↑(lp.single p j a) j Tactic: dsimp State Before: case pos α : Type u_1 E : α → Type u_2 p✝ q : ℝ≥0∞ inst✝⁴ : (i : α) → NormedAddCommGroup (E i) 𝕜 : Type u_3 inst✝³ : NormedRing 𝕜 inst✝² : (i : α) → Module 𝕜 (E i) inst✝¹ : ∀ (i : α), BoundedSMul 𝕜 (E i) inst✝ : DecidableEq α p : ℝ≥0∞ c : 𝕜 j : α a : E j ⊢ ↑(lp.single p j (c • a)) j = c • ↑(lp.single p j a) j State After: no goals Tactic: simp [lp.single_apply_self] State Before: case neg α : Type u_1 E : α → Type u_2 p✝ q : ℝ≥0∞ inst✝⁴ : (i : α) → NormedAddCommGroup (E i) 𝕜 : Type u_3 inst✝³ : NormedRing 𝕜 inst✝² : (i : α) → Module 𝕜 (E i) inst✝¹ : ∀ (i : α), BoundedSMul 𝕜 (E i) inst✝ : DecidableEq α p : ℝ≥0∞ i : α a : E i c : 𝕜 j : α hi : ¬j = i ⊢ ↑(lp.single p i (c • a)) j = ↑(c • lp.single p i a) j State After: case neg α : Type u_1 E : α → Type u_2 p✝ q : ℝ≥0∞ inst✝⁴ : (i : α) → NormedAddCommGroup (E i) 𝕜 : Type u_3 inst✝³ : NormedRing 𝕜 inst✝² : (i : α) → Module 𝕜 (E i) inst✝¹ : ∀ (i : α), BoundedSMul 𝕜 (E i) inst✝ : DecidableEq α p : ℝ≥0∞ i : α a : E i c : 𝕜 j : α hi : ¬j = i ⊢ ↑(lp.single p i (c • a)) j = c • ↑(lp.single p i a) j Tactic: dsimp State Before: case neg α : Type u_1 E : α → Type u_2 p✝ q : ℝ≥0∞ inst✝⁴ : (i : α) → NormedAddCommGroup (E i) 𝕜 : Type u_3 inst✝³ : NormedRing 𝕜 inst✝² : (i : α) → Module 𝕜 (E i) inst✝¹ : ∀ (i : α), BoundedSMul 𝕜 (E i) inst✝ : DecidableEq α p : ℝ≥0∞ i : α a : E i c : 𝕜 j : α hi : ¬j = i ⊢ ↑(lp.single p i (c • a)) j = c • ↑(lp.single p i a) j State After: no goals Tactic: simp [lp.single_apply_ne p i _ hi]
```python from sympy import * init_printing() ``` ```python x1, x2, a, b, w, p1, p2, L = symbols('x_1 x_2 alpha beta w p_1 p_2 lambda') ``` ```python U = (x1**a) * (x2**b) budget_constraint = w - p1*x1 - p2*x2 U, budget_constraint ``` ## Constrained Maximization Problem The consumer chooses $\mathbf{x} = (x_1, x_2)$ to solve, \begin{align} \max_{\mathbf{x}} &\; U(\mathbf{x}) = x_1^{\alpha} x_2^{\beta} \\ s.t. &\; \mathbf{p}\cdot \mathbf{x} = w \end{align} where $\mathbf{p} = (p_1, p_2) \gg \mathbf{0}$ ## Convert to Unconstrained Problem We can convert the constrained optimization problem by forming the *Lagrangian* function as our objective function. \begin{align} \mathcal{L}(\mathbf{x},\lambda) &= U(\mathbf{x}) + \lambda \left(w - \mathbf{p}\cdot \mathbf{x} \right) \\ &= x_1^{\alpha} x_2^{\beta} + \lambda (w - p_1 x_1 - p_2 x_2) \end{align} Using this as the objective function we can derive the first order condition for a maximum (yielding the *critical points*) and test the second order condition at those critical points to verify we have a local maximum and not a minimum or saddle point. ```python Lagrange = U + L*budget_constraint Lagrange_x1 = Lagrange.diff(x1) Lagrange_x2 = Lagrange.diff(x2) Lagrange_L = Lagrange.diff(L) Lagrange_x1, Lagrange_x2, Lagrange_L ``` We solve the first two conditions (partial w.r.t. $x_1,x_2$) for the multiplier $\lambda$ and then set them equal to each other to get rid of the $\lambda$ term and solve for $x_1$ in terms of $x_2$ and parameters. ```python term1 = solve(Lagrange_x1, L)[0].simplify() term2 = solve(Lagrange_x2, L)[0] x1_temp = solve(term1 - term2, x1)[0].simplify() Eq(x1, x1_temp) ``` Now we can substitute our expression for $x_1$, that summarizes the first 2 conditions, into the third first order condition (associated with $\lambda$), solve for $x_2$ to get the **Marshalian Demand** function for $x_2$. ```python subbed_term = Lagrange_L.subs(x1, x1_temp) x2_star = solve(subbed_term, x2)[0] Eq(Symbol('x_2^*'), x2_star) ``` We can now substitute the demand function $x_2^*(\mathbf{p},w)$ into another first order condition, solve for $x_1$ to get its demand function. ```python subbed_term = Lagrange_L.subs(x2, x2_star) x1_star = solve(subbed_term, x1)[0] Eq(Symbol('x_1^*'), x1_star) ``` Now that we know $x_1^*$ and $x_2^*$ we can substitute them into one the first order conditions that were solved for $\lambda$ and derive the optimal Lagrange multiplier value (which represents the marginal utility of wealth at the optimal choice). ```python L_star = term1.subs([(x1, x1_star), (x2, x2_star)]).simplify() Eq(Symbol('lambda^*'), L_star) ``` The Walrasian demand is the "argmax" of the utility maximization problem. \begin{align} \mathbf{x}(\mathbf{p},w) &= (x_1^*(\mathbf{p},w), x_2^*(\mathbf{p},w)) \end{align} substituting the solutions to the maximization problem into the utility function we can derive the **indirect utility**. ```python V = U.subs([(x1, x1_star), (x2, x2_star)]).simplify() Eq(Symbol('V(\mathbf{p},w)'), V) ```
subroutine sumrun(wmelt,hascon) c******************************************************************** c * c This subroutine is called from SR CONTIN to generate a summary * c of the number of runoff events and total runoff volume generated * c during the simulation period. * c * c******************************************************************** c real wmelt integer hascon c include 'pmxelm.inc' include 'pmxpln.inc' include 'pmxprt.inc' include 'pmxhil.inc' c c******************************************************************** c * c Common Blocks * c * c******************************************************************** c include 'cavloss.inc' c include 'cclim.inc' include 'cefflen.inc' c read: efflen(mxplan) include 'chydrol.inc' c include 'cirriga.inc' c include 'cslpopt.inc' c include 'cstmflg.inc' c include 'cstruc.inc' include 'csumirr.inc' include 'csumout.inc' c c******************************************************************** c * c sumout variables updated * c nrunot(mxplan),nrunoy(mxplan),nrunom(13,mxplan) * c trunot(mxplan),trunoy(mxplan),trunom(13,mxplan) * c * c******************************************************************** cReza put IF here 3/4/94 c Change to include consideration of irrigation-induced runoff. c dcf 8/25/94 c if(rain(iplane) .gt. 0.0 .or. irrund(iplane) .gt. 0.0) then nrunot(iplane) = nrunot(iplane) + 1 nrunoy(iplane) = nrunoy(iplane) + 1 nrunom(iplane) = nrunom(iplane) + 1 c if (hascon.ne.0) then trunot(iplane) = trunot(iplane) + (runoff(iplane)*1000.0) trunoy(iplane) = trunoy(iplane) + (runoff(iplane)*1000.0) trunom(iplane) = trunom(iplane) + (runoff(iplane)*1000.0) else trunot(iplane) = trunot(iplane) + (runoff(iplane)*1000.0) * 1 efflen(iplane) / totlen(iplane) trunoy(iplane) = trunoy(iplane) + (runoff(iplane)*1000.0) * 1 efflen(iplane) / totlen(iplane) trunom(iplane) = trunom(iplane) + (runoff(iplane)*1000.0) * 1 efflen(iplane) / totlen(iplane) endif c c monthly rainfall runoff totals 06-27-94 sjl c else c Reza 3/7/94. c c XXX this appears to be incorrect. Will calculate melt runoff if c irrigation has occured on a day without rainfall in the middle c of the summer. 06-22-94 04:49pm sjl c however there is probably a flag for frozen soil c that i do not know about, i added following condition for my work c c c ...if 5 day avg temp less than freezing count runoff as melt runoff c otherwise it will be calculated as irrigation runoff06-27-94 10:49am sjl c c XXX The following causes problems with the water balance, because c melt runoff events on days in which average temp is greater c than 0 degrees Centigrade are not added in. Change this so that c it checks melt values (wmelt(iplane)) - dcf 7/6/94 c if(tmnavg.lt.0)then c if(wmelt .gt. 0.0)then nmunot(iplane) = nmunot(iplane) + 1 nmunoy(iplane) = nmunoy(iplane) + 1 nmunom(iplane) = nmunom(iplane) + 1 c if (hascon.ne.0) then tmunot(iplane) = tmunot(iplane) + (runoff(iplane)*1000.0) tmunoy(iplane) = tmunoy(iplane) + (runoff(iplane)*1000.0) tmunom(iplane) = tmunom(iplane) + (runoff(iplane)*1000.0) else tmunot(iplane) = tmunot(iplane) + (runoff(iplane)*1000.0) * 1 efflen(iplane) / totlen(iplane) tmunoy(iplane) = tmunoy(iplane) + (runoff(iplane)*1000.0) * 1 efflen(iplane) / totlen(iplane) tmunom(iplane) = tmunom(iplane) + (runoff(iplane)*1000.0) * 1 efflen(iplane) / totlen(iplane) endif endif endif c return end
Formal statement is: lemma closed_injective_linear_image: fixes f :: "'a::euclidean_space \<Rightarrow> 'b::euclidean_space" assumes S: "closed S" and f: "linear f" "inj f" shows "closed (f ` S)" Informal statement is: If $f$ is a linear injection from a Euclidean space to another Euclidean space, then the image of a closed set under $f$ is closed.
theory T39 imports Main begin lemma "( (\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) & (\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) & (\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) & (\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, meet(y, z)) = meet(mult(x, y), mult(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(meet(x, y), z) = join(undr(x, z), undr(y, z))) & (\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) & (\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) & (\<forall> x::nat. invo(invo(x)) = x) ) \<longrightarrow> (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(meet(x, y), z) = meet(mult(x, z), mult(y, z))) " nitpick[card nat=4,timeout=86400] oops end
Formal statement is: lemma closed_Times: "closed S \<Longrightarrow> closed T \<Longrightarrow> closed (S \<times> T)" Informal statement is: The product of two closed sets is closed.
//================================================================================================== /** Copyright 2016 NumScale SAS Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) **/ //================================================================================================== #ifndef BOOST_SIMD_ARCH_COMMON_SCALAR_FUNCTION_REC_HPP_INCLUDED #define BOOST_SIMD_ARCH_COMMON_SCALAR_FUNCTION_REC_HPP_INCLUDED #include <boost/simd/detail/overload.hpp> #include <boost/simd/constant/one.hpp> #include <boost/simd/constant/valmax.hpp> #include <boost/simd/constant/zero.hpp> #include <boost/simd/function/abs.hpp> #include <boost/simd/function/raw.hpp> #include <boost/simd/function/sign.hpp> namespace boost { namespace simd { namespace ext { namespace bd = boost::dispatch; namespace bs = boost::simd; #ifdef BOOST_MSVC #pragma warning(push) #pragma warning(disable: 4723) // potential divide by 0 #endif BOOST_DISPATCH_OVERLOAD ( rec_ , (typename A0) , bd::cpu_ , bd::scalar_< bd::floating_<A0> > ) { BOOST_FORCEINLINE A0 operator() ( A0 a0) const BOOST_NOEXCEPT { return One<A0>()/a0; } }; BOOST_DISPATCH_OVERLOAD ( rec_ , (typename A0) , bd::cpu_ , bd::scalar_< bd::arithmetic_<A0> > ) { BOOST_FORCEINLINE A0 operator() ( A0 a0) const BOOST_NOEXCEPT { return (a0 ? ((bs::abs(a0) == One<A0>()) ? sign(a0) : Zero<A0>()) : Valmax<A0>()); } }; BOOST_DISPATCH_OVERLOAD ( rec_ , (typename T) , bd::cpu_ , bs::raw_tag , bd::scalar_<bd::unspecified_<T>> ) { BOOST_FORCEINLINE T operator()(const raw_tag &, T a) const BOOST_NOEXCEPT { return rec(a); } }; } } } #ifdef BOOST_MSVC #pragma warning(pop) #endif #endif
Formal statement is: proposition homotopic_loops_conjugate: fixes s :: "'a::real_normed_vector set" assumes "path p" "path q" and pip: "path_image p \<subseteq> s" and piq: "path_image q \<subseteq> s" and pq: "pathfinish p = pathstart q" and qloop: "pathfinish q = pathstart q" shows "homotopic_loops s (p +++ q +++ reversepath p) q" Informal statement is: If $p$ and $q$ are paths in a set $s$ such that $p$ ends where $q$ starts, then the loop $pqp^{-1}$ is homotopic to $q$.
theory T111 imports Main begin lemma "( (\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) & (\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) & (\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) & (\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, meet(y, z)) = meet(mult(x, y), mult(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(x, join(y, z)) = join(undr(x, y), undr(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(x, meet(y, z)) = join(over(x, y), over(x, z))) & (\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) & (\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) & (\<forall> x::nat. invo(invo(x)) = x) ) \<longrightarrow> (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(join(x, y), z) = join(over(x, z), over(y, z))) " nitpick[card nat=7,timeout=86400] oops end
/- Copyright (c) 2020 Simon Hudon. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Simon Hudon -/ import tactic.core /-! # `pretty_cases` tactic When using `induction` and `cases`, `pretty_cases` prints a `"Try this:"` advice that shows how to structure the proof with `case { ... }` commands. In the following example, we apply induction on a permutation assumption about lists. `pretty_cases` gives us a proof skeleton that explicit selects the branches and explicit names the new local constants: ```lean example {α} (xs ys : list α) (h : xs ~ ys) : true := begin induction h, pretty_cases, -- Try this: -- case list.perm.nil : -- { admit }, -- case list.perm.cons : h_x h_l₁ h_l₂ h_a h_ih -- { admit }, -- case list.perm.swap : h_x h_y h_l -- { admit }, -- case list.perm.trans : h_l₁ h_l₂ h_l₃ h_a h_a_1 h_ih_a h_ih_a_1 -- { admit }, end ``` ## Main definitions * `pretty_cases_advice` return `pretty_cases` advice without printing it * `pretty_cases` main tactic -/ namespace tactic /-- Query the proof goal and print the skeleton of a proof by cases. -/ meta def pretty_cases_advice : tactic string := retrieve $ do gs ← get_goals, cases ← gs.mmap $ λ g, do { t : list name ← get_tag g, let vs := t.tail, let ⟨vs,ts⟩ := vs.span (λ n, name.last_string n = "_arg"), set_goals [g], ls ← local_context, let m := native.rb_map.of_list $ (ls.map expr.local_uniq_name).zip (ls.map expr.local_pp_name), let vs := vs.map $ λ v, (m.find v.get_prefix).get_or_else `_, let var_decls := string.intercalate " " $ vs.map to_string, let var_decls := if vs.empty then "" else " : " ++ var_decls, pure sformat!" case {ts.head}{var_decls}\n {{ admit }}" }, let cases := string.intercalate ",\n" cases, pure sformat!"Try this:\n{cases}" namespace interactive /-- Query the proof goal and print the skeleton of a proof by cases. For example, let us consider the following proof: ```lean example {α} (xs ys : list α) (h : xs ~ ys) : true := begin induction h, pretty_cases, -- Try this: -- case list.perm.nil : -- { admit }, -- case list.perm.cons : h_x h_l₁ h_l₂ h_a h_ih -- { admit }, -- case list.perm.swap : h_x h_y h_l -- { admit }, -- case list.perm.trans : h_l₁ h_l₂ h_l₃ h_a h_a_1 h_ih_a h_ih_a_1 -- { admit }, end ``` The output helps the user layout the cases and rename the introduced variables. -/ meta def pretty_cases : tactic unit := pretty_cases_advice >>= trace add_tactic_doc { name := "pretty_cases", category := doc_category.tactic, decl_names := [``tactic.interactive.pretty_cases], tags := ["context management", "goal management"] } end interactive end tactic
// Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved // Use of this source code is governed by an apache-2.0 license that can be // found in the LICENSE file. #include "common/step/filesystem/step_recover_storage_directories.h" #include <boost/filesystem/path.hpp> #include <boost/system/error_code.hpp> #include "common/paths.h" #include "common/utils/file_util.h" namespace { const char kDataLocation[] = "data"; const char kSharedResLocation[] = "shared"; } // namespace namespace bf = boost::filesystem; namespace common_installer { namespace filesystem { bool StepRecoverStorageDirectories::MoveAppStorage( const bf::path& in_src, const bf::path& in_dst, const char *key) { bf::path src = in_src / key; if (!bf::exists(src)) return false; bf::path dst = in_dst / key; return common_installer::MoveDir(src, dst); } Step::Status StepRecoverStorageDirectories::RecoveryUpdate() { if (!context_->pkg_path.get().empty()) { bf::path backup_path = common_installer::GetBackupPathForPackagePath( context_->pkg_path.get()); if (bf::exists(backup_path)) { MoveAppStorage(context_->pkg_path.get(), backup_path, kDataLocation); MoveAppStorage(context_->pkg_path.get(), backup_path, kSharedResLocation); } } return Status::OK; } } // namespace filesystem } // namespace common_installer
theory T123 imports Main begin lemma "( (\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) & (\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) & (\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) & (\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, meet(y, z)) = meet(mult(x, y), mult(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(meet(x, y), z) = meet(mult(x, z), mult(y, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(x, join(y, z)) = join(undr(x, y), undr(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(x, meet(y, z)) = join(over(x, y), over(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(meet(x, y), z) = join(undr(x, z), undr(y, z))) & (\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) & (\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) & (\<forall> x::nat. invo(invo(x)) = x) ) \<longrightarrow> (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(join(x, y), z) = join(over(x, z), over(y, z))) " nitpick[card nat=8,timeout=86400] oops end
/- Copyright (c) 2021 Eric Wieser. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Eric Wieser ! This file was ported from Lean 3 source module algebra.dual_number ! leanprover-community/mathlib commit b8d2eaa69d69ce8f03179a5cda774fc0cde984e4 ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathlib.Algebra.TrivSqZeroExt /-! # Dual numbers The dual numbers over `R` are of the form `a + bε`, where `a` and `b` are typically elements of a commutative ring `R`, and `ε` is a symbol satisfying `ε^2 = 0`. They are a special case of `TrivSqZeroExt R M` with `M = R`. ## Notation In the `DualNumber` locale: * `R[ε]` is a shorthand for `DualNumber R` * `ε` is a shorthand for `DualNumber.eps` ## Main definitions * `DualNumber` * `DualNumber.eps` * `DualNumber.lift` ## Implementation notes Rather than duplicating the API of `TrivSqZeroExt`, this file reuses the functions there. ## References * https://en.wikipedia.org/wiki/Dual_number -/ variable {R : Type _} /-- The type of dual numbers, numbers of the form $a + bε$ where $ε^2 = 0$.-/ abbrev DualNumber (R : Type _) : Type _ := TrivSqZeroExt R R #align dual_number DualNumber /-- The unit element $ε$ that squares to zero. -/ def DualNumber.eps [Zero R] [One R] : DualNumber R := TrivSqZeroExt.inr 1 #align dual_number.eps DualNumber.eps -- mathport name: dual_number.eps scoped[DualNumber] notation "ε" => DualNumber.eps -- mathport name: dual_number scoped[DualNumber] postfix:1024 "[ε]" => DualNumber open DualNumber namespace DualNumber open TrivSqZeroExt @[simp] theorem fst_eps [Zero R] [One R] : fst ε = (0 : R) := fst_inr _ _ #align dual_number.fst_eps DualNumber.fst_eps @[simp] theorem snd_eps [Zero R] [One R] : snd ε = (1 : R) := snd_inr _ _ #align dual_number.snd_eps DualNumber.snd_eps /-- A version of `TrivSqZeroExt.snd_mul` with `*` instead of `•`. -/ @[simp] theorem snd_mul [Semiring R] (x y : R[ε]) : snd (x * y) = fst x * snd y + snd x * fst y := TrivSqZeroExt.snd_mul _ _ #align dual_number.snd_mul DualNumber.snd_mul @[simp] theorem eps_mul_eps [Semiring R] : (ε * ε : R[ε]) = 0 := inr_mul_inr _ _ _ #align dual_number.eps_mul_eps DualNumber.eps_mul_eps @[simp] theorem inr_eq_smul_eps [MulZeroOneClass R] (r : R) : inr r = (r • ε : R[ε]) := ext (MulZeroClass.mul_zero r).symm (mul_one r).symm #align dual_number.inr_eq_smul_eps DualNumber.inr_eq_smul_eps /-- For two algebra morphisms out of `R[ε]` to agree, it suffices for them to agree on `ε`. -/ @[ext] variable {A : Type _} [CommSemiring R] [Semiring A] [Algebra R A] /-- A universal property of the dual numbers, providing a unique `R[ε] →ₐ[R] A` for every element of `A` which squares to `0`. This isomorphism is named to match the very similar `complex.lift`. -/ @[simps!] def lift : { e : A // e * e = 0 } ≃ (R[ε] →ₐ[R] A) := Equiv.trans (show { e : A // e * e = 0 } ≃ { f : R →ₗ[R] A // ∀ x y, f x * f y = 0 } from (LinearMap.ringLmapEquivSelf R ℕ A).symm.toEquiv.subtypeEquiv fun a => by dsimp simp_rw [smul_mul_smul] refine' ⟨fun h x y => h.symm ▸ smul_zero _, fun h => by simpa using h 1 1⟩) TrivSqZeroExt.lift #align dual_number.lift DualNumber.lift -- When applied to `ε`, `DualNumber.lift` produces the element of `A` that squares to 0. -- @[simp] -- Porting note: simp can prove this theorem lift_apply_eps (e : { e : A // e * e = 0 }) : @lift R _ _ _ _ e (ε : R[ε]) = e := by simp only [lift_apply_apply, fst_eps, map_zero, snd_eps, one_smul, zero_add] #align dual_number.lift_apply_eps DualNumber.lift_apply_eps -- Lifting `DualNumber.eps` itself gives the identity. @[simp] theorem lift_eps : lift ⟨ε, eps_mul_eps⟩ = AlgHom.id R R[ε] := algHom_ext <| lift_apply_eps _ #align dual_number.lift_eps DualNumber.lift_eps end DualNumber
Formal statement is: lemma LIM_offset: "f \<midarrow>a\<rightarrow> L \<Longrightarrow> (\<lambda>x. f (x + k)) \<midarrow>(a - k)\<rightarrow> L" for a :: "'a::real_normed_vector" Informal statement is: If $f$ converges to $L$ as $x$ approaches $a$, then $f(x+k)$ converges to $L$ as $x$ approaches $a-k$.
State Before: α : Type u_1 β : Type u_2 γ : Type ?u.109928 δ : Type ?u.109931 σ : Type ?u.109934 inst✝⁴ : Primcodable α inst✝³ : Primcodable β inst✝² : Primcodable γ inst✝¹ : Primcodable δ inst✝ : Primcodable σ f : α → ℕ g : α → β h : α → β → β hf : Primrec f hg : Primrec g hh : Primrec₂ h a : α ⊢ Nat.rec (g a) (fun n IH => h a (n, IH).snd) (f a) = (h a^[f a]) (g a) State After: no goals Tactic: induction f a <;> simp [*, -Function.iterate_succ, Function.iterate_succ']
subsection \<open>Example: A Reference Monitor for a Simple Language\<close> theory Simple_Lang imports Main begin text \<open>In this example we will use the above theorem to prove the security of a very simplistic programming language incorporating a reference monitor.\<close> subsubsection \<open>Language\<close> text \<open>As actions, we only consider assignments of the form @{text "n :=\<^bsub>d\<^esub> \<tau>"}, where @{text n} is a variable name, @{text d} is a domain, and @{text \<tau>} is an expression. We only consider basic expressions that are built using constants, variable references, subtraction, and addition:\<close> datatype 'var expr = Var 'var | Const integer | Plus "('var expr)" "('var expr)" | Minus "('var expr)" "('var expr)" text \<open>For example, the term @{text "Minus (Plus (Var Y) (Var Z)) (Const 5)"} is a syntactic representation of an expression which would more commonly be written as @{text "(Y + Z) - 5"}. The state of the automaton is simply a mapping from variable names to integers.\<close> type_synonym 'var state = "'var \<Rightarrow> integer" text \<open>The function @{text \<E>} evaluates an expression in a given state.\<close> fun \<E> :: "'var expr \<Rightarrow> 'var state \<Rightarrow> integer" where "\<E> (Var v) s = s v" | "\<E> (Const c) s = c" | "\<E> (Plus e1 e2) s = \<E> e1 s + \<E> e2 s" | "\<E> (Minus e1 e2) s = \<E> e1 s - \<E> e2 s" text \<open>The function @{text Vars} returns the set of variables appearing in an expression.\<close> fun Vars :: "'var expr \<Rightarrow> 'var set" where "Vars (Var v) = {v}" | "Vars (Const c) = {}" | "Vars (Plus e1 e2) = Vars e1 \<union> Vars e2" | "Vars (Minus e1 e2) = Vars e1 \<union> Vars e2" text \<open>Actions are assignments of expressions to variables, annotated with a domain.\<close> datatype ('var, 'dom) cmd = Assign 'var (dom: 'dom) "'var expr" ("_ :=\<^bsub>_\<^esub> _") lemma "dom (v :=\<^bsub>d\<^esub> e) = d" by auto text \<open>The effect of an assignment @{text "v :=\<^bsub>d\<^esub> e"} in a state @{text s} is that the value of @{text v} is updated to the evaluation of @{text "e"} in @{text s}.\<close> fun execute :: "'var state \<Rightarrow> ('var, 'dom) cmd \<Rightarrow> 'var state" where "execute s (v :=\<^bsub>d\<^esub> e) = s(v := \<E> e s)" text \<open>The following coincidence lemma will be useful: If two states coincide on the variables appearing in an expression, then the result of evaluating the expression in both states is equal.\<close> lemma coincidence: assumes "\<forall>v \<in> Vars e. s v = t v" shows "\<E> e s = \<E> e t" using assms by (induction e) auto end
/- Copyright (c) 2022 Markus Himmel. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Markus Himmel -/ import category_theory.limits.yoneda import category_theory.preadditive.opposite import algebra.category.Module.basic import algebra.category.Group.preadditive /-! # The Yoneda embedding for preadditive categories The Yoneda embedding for preadditive categories sends an object `Y` to the presheaf sending an object `X` to the group of morphisms `X ⟶ Y`. At each point, we get an additional `End Y`-module structure. We also show that this presheaf is additive and that it is compatible with the normal Yoneda embedding in the expected way and deduce that the preadditive Yoneda embedding is fully faithful. ## TODO * The Yoneda embedding is additive itself -/ universes v u open category_theory.preadditive opposite category_theory.limits noncomputable theory namespace category_theory variables {C : Type u} [category.{v} C] [preadditive C] /-- The Yoneda embedding for preadditive categories sends an object `Y` to the presheaf sending an object `X` to the `End Y`-module of morphisms `X ⟶ Y`. -/ @[simps] def preadditive_yoneda_obj (Y : C) : Cᵒᵖ ⥤ Module.{v} (End Y) := { obj := λ X, Module.of _ (X.unop ⟶ Y), map := λ X X' f, { to_fun := λ g, f.unop ≫ g, map_add' := λ g g', comp_add _ _ _ _ _ _, map_smul' := λ r g, eq.symm $ category.assoc _ _ _ } } /-- The Yoneda embedding for preadditive categories sends an object `Y` to the presheaf sending an object `X` to the group of morphisms `X ⟶ Y`. At each point, we get an additional `End Y`-module structure, see `preadditive_yoneda_obj`. -/ @[simps] def preadditive_yoneda : C ⥤ (Cᵒᵖ ⥤ AddCommGroup.{v}) := { obj := λ Y, preadditive_yoneda_obj Y ⋙ forget₂ _ _, map := λ Y Y' f, { app := λ X, { to_fun := λ g, g ≫ f, map_zero' := limits.zero_comp, map_add' := λ g g', add_comp _ _ _ _ _ _ }, naturality' := λ X X' g, AddCommGroup.ext _ _ _ _ $ λ x, category.assoc _ _ _ }, map_id' := λ X, by { ext, simp }, map_comp' := λ X Y Z f g, by { ext, simp } } /-- The Yoneda embedding for preadditive categories sends an object `X` to the copresheaf sending an object `Y` to the `End X`-module of morphisms `X ⟶ Y`. -/ @[simps] def preadditive_coyoneda_obj (X : Cᵒᵖ) : C ⥤ Module.{v} (End X) := { obj := λ Y, Module.of _ (unop X ⟶ Y), map := λ Y Y' f, { to_fun := λ g, g ≫ f, map_add' := λ g g', add_comp _ _ _ _ _ _, map_smul' := λ r g, category.assoc _ _ _ } } /-- The Yoneda embedding for preadditive categories sends an object `X` to the copresheaf sending an object `Y` to the group of morphisms `X ⟶ Y`. At each point, we get an additional `End X`-module structure, see `preadditive_coyoneda_obj`. -/ @[simps] def preadditive_coyoneda : Cᵒᵖ ⥤ (C ⥤ AddCommGroup.{v}) := { obj := λ X, preadditive_coyoneda_obj X ⋙ forget₂ _ _, map := λ X X' f, { app := λ Y, { to_fun := λ g, f.unop ≫ g, map_zero' := limits.comp_zero, map_add' := λ g g', comp_add _ _ _ _ _ _ }, naturality' := λ Y Y' g, AddCommGroup.ext _ _ _ _ $ λ x, eq.symm $ category.assoc _ _ _ }, map_id' := λ X, by { ext, simp }, map_comp' := λ X Y Z f g, by { ext, simp } } instance additive_yoneda_obj (X : C) : functor.additive (preadditive_yoneda_obj X) := {} instance additive_yoneda_obj' (X : C) : functor.additive (preadditive_yoneda.obj X) := {} instance additive_coyoneda_obj (X : Cᵒᵖ) : functor.additive (preadditive_coyoneda_obj X) := {} instance additive_coyoneda_obj' (X : Cᵒᵖ) : functor.additive (preadditive_coyoneda.obj X) := {} /-- Composing the preadditive yoneda embedding with the forgetful functor yields the regular Yoneda embedding. -/ @[simp] /-- Composing the preadditive yoneda embedding with the forgetful functor yields the regular Yoneda embedding. -/ @[simp] lemma whiskering_preadditive_coyoneda : preadditive_coyoneda ⋙ (whiskering_right C AddCommGroup (Type v)).obj (forget AddCommGroup) = coyoneda := rfl instance preadditive_yoneda_full : full (preadditive_yoneda : C ⥤ Cᵒᵖ ⥤ AddCommGroup) := let yoneda_full : full (preadditive_yoneda ⋙ (whiskering_right Cᵒᵖ AddCommGroup (Type v)).obj (forget AddCommGroup)) := yoneda.yoneda_full in by exactI full.of_comp_faithful preadditive_yoneda ((whiskering_right Cᵒᵖ AddCommGroup (Type v)).obj (forget AddCommGroup)) instance preadditive_coyoneda_full : full (preadditive_coyoneda : Cᵒᵖ ⥤ C ⥤ AddCommGroup) := let coyoneda_full : full (preadditive_coyoneda ⋙ (whiskering_right C AddCommGroup (Type v)).obj (forget AddCommGroup)) := coyoneda.coyoneda_full in by exactI full.of_comp_faithful preadditive_coyoneda ((whiskering_right C AddCommGroup (Type v)).obj (forget AddCommGroup)) instance preadditive_yoneda_faithful : faithful (preadditive_yoneda : C ⥤ Cᵒᵖ ⥤ AddCommGroup) := faithful.of_comp_eq whiskering_preadditive_yoneda instance preadditive_coyoneda_faithful : faithful (preadditive_coyoneda : Cᵒᵖ ⥤ C ⥤ AddCommGroup) := faithful.of_comp_eq whiskering_preadditive_coyoneda end category_theory
/- Copyright (c) 2021 Bhavik Mehta. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Bhavik Mehta, Alena Gusakov, Yaël Dillies -/ import data.finset.slice import logic.function.iterate /-! # Shadows This file defines shadows of a set family. The shadow of a set family is the set family of sets we get by removing any element from any set of the original family. If one pictures `finset α` as a big hypercube (each dimension being membership of a given element), then taking the shadow corresponds to projecting each finset down once in all available directions. ## Main definitions * `finset.shadow`: The shadow of a set family. Everything we can get by removing a new element from some set. * `finset.up_shadow`: The upper shadow of a set family. Everything we can get by adding an element to some set. ## Notation We define notation in locale `finset_family`: * `∂ 𝒜`: Shadow of `𝒜`. * `∂⁺ 𝒜`: Upper shadow of `𝒜`. We also maintain the convention that `a, b : α` are elements of the ground type, `s, t : finset α` are finsets, and `𝒜, ℬ : finset (finset α)` are finset families. ## References * https://github.com/b-mehta/maths-notes/blob/master/iii/mich/combinatorics.pdf * http://discretemath.imp.fu-berlin.de/DMII-2015-16/kruskal.pdf ## Tags shadow, set family -/ open finset nat variables {α : Type*} namespace finset section shadow variables [decidable_eq α] {𝒜 : finset (finset α)} {s t : finset α} {a : α} {k r : ℕ} /-- The shadow of a set family `𝒜` is all sets we can get by removing one element from any set in `𝒜`, and the (`k` times) iterated shadow (`shadow^[k]`) is all sets we can get by removing `k` elements from any set in `𝒜`. -/ def shadow (𝒜 : finset (finset α)) : finset (finset α) := 𝒜.sup (λ s, s.image (erase s)) localized "notation `∂ `:90 := finset.shadow" in finset_family /-- The shadow of the empty set is empty. -/ @[simp] lemma shadow_empty : ∂ (∅ : finset (finset α)) = ∅ := rfl @[simp] lemma shadow_singleton_empty : ∂ ({∅} : finset (finset α)) = ∅ := rfl --TODO: Prove `∂ {{a}} = {∅}` quickly using `covers` and `grade_order` /-- The shadow is monotone. -/ @[mono] lemma shadow_monotone : monotone (shadow : finset (finset α) → finset (finset α)) := λ 𝒜 ℬ, sup_mono /-- `s` is in the shadow of `𝒜` iff there is an `t ∈ 𝒜` from which we can remove one element to get `s`. -/ lemma mem_shadow_iff : s ∈ ∂ 𝒜 ↔ ∃ t ∈ 𝒜, ∃ a ∈ t, erase t a = s := by simp only [shadow, mem_sup, mem_image] lemma erase_mem_shadow (hs : s ∈ 𝒜) (ha : a ∈ s) : erase s a ∈ ∂ 𝒜 := mem_shadow_iff.2 ⟨s, hs, a, ha, rfl⟩ /-- `t` is in the shadow of `𝒜` iff we can add an element to it so that the resulting finset is in `𝒜`. -/ lemma mem_shadow_iff_insert_mem : s ∈ ∂ 𝒜 ↔ ∃ a ∉ s, insert a s ∈ 𝒜 := begin refine mem_shadow_iff.trans ⟨_, _⟩, { rintro ⟨s, hs, a, ha, rfl⟩, refine ⟨a, not_mem_erase a s, _⟩, rwa insert_erase ha }, { rintro ⟨a, ha, hs⟩, exact ⟨insert a s, hs, a, mem_insert_self _ _, erase_insert ha⟩ } end /-- The shadow of a family of `r`-sets is a family of `r - 1`-sets. -/ protected lemma _root_.set.sized.shadow (h𝒜 : (𝒜 : set (finset α)).sized r) : (∂ 𝒜 : set (finset α)).sized (r - 1) := begin intros A h, obtain ⟨A, hA, i, hi, rfl⟩ := mem_shadow_iff.1 h, rw [card_erase_of_mem hi, h𝒜 hA], end lemma sized_shadow_iff (h : ∅ ∉ 𝒜) : (∂ 𝒜 : set (finset α)).sized r ↔ (𝒜 : set (finset α)).sized (r + 1) := begin refine ⟨λ h𝒜 s hs, _, set.sized.shadow⟩, obtain ⟨a, ha⟩ := nonempty_iff_ne_empty.2 (ne_of_mem_of_not_mem hs h), rw [←h𝒜 (erase_mem_shadow hs ha), card_erase_add_one ha], end /-- `s ∈ ∂ 𝒜` iff `s` is exactly one element less than something from `𝒜` -/ lemma mem_shadow_iff_exists_mem_card_add_one : s ∈ ∂ 𝒜 ↔ ∃ t ∈ 𝒜, s ⊆ t ∧ t.card = s.card + 1 := begin refine mem_shadow_iff_insert_mem.trans ⟨_, _⟩, { rintro ⟨a, ha, hs⟩, exact ⟨insert a s, hs, subset_insert _ _, card_insert_of_not_mem ha⟩ }, { rintro ⟨t, ht, hst, h⟩, obtain ⟨a, ha⟩ : ∃ a, t \ s = {a} := card_eq_one.1 (by rw [card_sdiff hst, h, add_tsub_cancel_left]), exact ⟨a, λ hat, not_mem_sdiff_of_mem_right hat ((ha.ge : _ ⊆ _) $ mem_singleton_self a), by rwa [insert_eq a s, ←ha, sdiff_union_of_subset hst]⟩ } end /-- Being in the shadow of `𝒜` means we have a superset in `𝒜`. -/ lemma exists_subset_of_mem_shadow (hs : s ∈ ∂ 𝒜) : ∃ t ∈ 𝒜, s ⊆ t := let ⟨t, ht, hst⟩ := mem_shadow_iff_exists_mem_card_add_one.1 hs in ⟨t, ht, hst.1⟩ /-- `t ∈ ∂^k 𝒜` iff `t` is exactly `k` elements less than something in `𝒜`. -/ lemma mem_shadow_iff_exists_mem_card_add : s ∈ (∂^[k]) 𝒜 ↔ ∃ t ∈ 𝒜, s ⊆ t ∧ t.card = s.card + k := begin induction k with k ih generalizing 𝒜 s, { refine ⟨λ hs, ⟨s, hs, subset.refl _, rfl⟩, _⟩, rintro ⟨t, ht, hst, hcard⟩, rwa eq_of_subset_of_card_le hst hcard.le }, simp only [exists_prop, function.comp_app, function.iterate_succ], refine ih.trans _, clear ih, split, { rintro ⟨t, ht, hst, hcardst⟩, obtain ⟨u, hu, htu, hcardtu⟩ := mem_shadow_iff_exists_mem_card_add_one.1 ht, refine ⟨u, hu, hst.trans htu, _⟩, rw [hcardtu, hcardst], refl }, { rintro ⟨t, ht, hst, hcard⟩, obtain ⟨u, hsu, hut, hu⟩ := finset.exists_intermediate_set k (by { rw [add_comm, hcard], exact le_succ _ }) hst, rw add_comm at hu, refine ⟨u, mem_shadow_iff_exists_mem_card_add_one.2 ⟨t, ht, hut, _⟩, hsu, hu⟩, rw [hcard, hu], refl } end end shadow open_locale finset_family section up_shadow variables [decidable_eq α] [fintype α] {𝒜 : finset (finset α)} {s t : finset α} {a : α} {k r : ℕ} /-- The upper shadow of a set family `𝒜` is all sets we can get by adding one element to any set in `𝒜`, and the (`k` times) iterated upper shadow (`up_shadow^[k]`) is all sets we can get by adding `k` elements from any set in `𝒜`. -/ def up_shadow (𝒜 : finset (finset α)) : finset (finset α) := 𝒜.sup $ λ s, sᶜ.image $ λ a, insert a s localized "notation `∂⁺ `:90 := finset.up_shadow" in finset_family /-- The upper shadow of the empty set is empty. -/ @[simp] lemma up_shadow_empty : ∂⁺ (∅ : finset (finset α)) = ∅ := rfl /-- The upper shadow is monotone. -/ @[mono] lemma up_shadow_monotone : monotone (up_shadow : finset (finset α) → finset (finset α)) := λ 𝒜 ℬ, sup_mono /-- `s` is in the upper shadow of `𝒜` iff there is an `t ∈ 𝒜` from which we can remove one element to get `s`. -/ lemma mem_up_shadow_iff : s ∈ ∂⁺ 𝒜 ↔ ∃ t ∈ 𝒜, ∃ a ∉ t, insert a t = s := by simp_rw [up_shadow, mem_sup, mem_image, exists_prop, mem_compl] lemma insert_mem_up_shadow (hs : s ∈ 𝒜) (ha : a ∉ s) : insert a s ∈ ∂⁺ 𝒜 := mem_up_shadow_iff.2 ⟨s, hs, a, ha, rfl⟩ /-- The upper shadow of a family of `r`-sets is a family of `r + 1`-sets. -/ protected lemma _root_.set.sized.up_shadow (h𝒜 : (𝒜 : set (finset α)).sized r) : (∂⁺ 𝒜 : set (finset α)).sized (r + 1) := begin intros A h, obtain ⟨A, hA, i, hi, rfl⟩ := mem_up_shadow_iff.1 h, rw [card_insert_of_not_mem hi, h𝒜 hA], end /-- `t` is in the upper shadow of `𝒜` iff we can remove an element from it so that the resulting finset is in `𝒜`. -/ lemma mem_up_shadow_iff_erase_mem : s ∈ ∂⁺ 𝒜 ↔ ∃ a ∈ s, s.erase a ∈ 𝒜 := begin refine mem_up_shadow_iff.trans ⟨_, _⟩, { rintro ⟨s, hs, a, ha, rfl⟩, refine ⟨a, mem_insert_self a s, _⟩, rwa erase_insert ha }, { rintro ⟨a, ha, hs⟩, exact ⟨s.erase a, hs, a, not_mem_erase _ _, insert_erase ha⟩ } end /-- `s ∈ ∂⁺ 𝒜` iff `s` is exactly one element less than something from `𝒜`. -/ lemma mem_up_shadow_iff_exists_mem_card_add_one : s ∈ ∂⁺ 𝒜 ↔ ∃ t ∈ 𝒜, t ⊆ s ∧ t.card + 1 = s.card := begin refine mem_up_shadow_iff_erase_mem.trans ⟨_, _⟩, { rintro ⟨a, ha, hs⟩, exact ⟨s.erase a, hs, erase_subset _ _, card_erase_add_one ha⟩ }, { rintro ⟨t, ht, hts, h⟩, obtain ⟨a, ha⟩ : ∃ a, s \ t = {a} := card_eq_one.1 (by rw [card_sdiff hts, ←h, add_tsub_cancel_left]), refine ⟨a, sdiff_subset _ _ ((ha.ge : _ ⊆ _) $ mem_singleton_self a), _⟩, rwa [←sdiff_singleton_eq_erase, ←ha, sdiff_sdiff_eq_self hts] } end /-- Being in the upper shadow of `𝒜` means we have a superset in `𝒜`. -/ lemma exists_subset_of_mem_up_shadow (hs : s ∈ ∂⁺ 𝒜) : ∃ t ∈ 𝒜, t ⊆ s := let ⟨t, ht, hts, _⟩ := mem_up_shadow_iff_exists_mem_card_add_one.1 hs in ⟨t, ht, hts⟩ /-- `t ∈ ∂^k 𝒜` iff `t` is exactly `k` elements more than something in `𝒜`. -/ lemma mem_up_shadow_iff_exists_mem_card_add : s ∈ (∂⁺^[k]) 𝒜 ↔ ∃ t ∈ 𝒜, t ⊆ s ∧ t.card + k = s.card := begin induction k with k ih generalizing 𝒜 s, { refine ⟨λ hs, ⟨s, hs, subset.refl _, rfl⟩, _⟩, rintro ⟨t, ht, hst, hcard⟩, rwa ←eq_of_subset_of_card_le hst hcard.ge }, simp only [exists_prop, function.comp_app, function.iterate_succ], refine ih.trans _, clear ih, split, { rintro ⟨t, ht, hts, hcardst⟩, obtain ⟨u, hu, hut, hcardtu⟩ := mem_up_shadow_iff_exists_mem_card_add_one.1 ht, refine ⟨u, hu, hut.trans hts, _⟩, rw [←hcardst, ←hcardtu, add_right_comm], refl }, { rintro ⟨t, ht, hts, hcard⟩, obtain ⟨u, htu, hus, hu⟩ := finset.exists_intermediate_set 1 (by { rw [add_comm, ←hcard], exact add_le_add_left (zero_lt_succ _) _ }) hts, rw add_comm at hu, refine ⟨u, mem_up_shadow_iff_exists_mem_card_add_one.2 ⟨t, ht, htu, hu.symm⟩, hus, _⟩, rw [hu, ←hcard, add_right_comm], refl } end @[simp] lemma shadow_image_compl : (∂ 𝒜).image compl = ∂⁺ (𝒜.image compl) := begin ext s, simp only [mem_image, exists_prop, mem_shadow_iff, mem_up_shadow_iff], split, { rintro ⟨_, ⟨s, hs, a, ha, rfl⟩, rfl⟩, exact ⟨sᶜ, ⟨s, hs, rfl⟩, a, not_mem_compl.2 ha, compl_erase.symm⟩ }, { rintro ⟨_, ⟨s, hs, rfl⟩, a, ha, rfl⟩, exact ⟨s.erase a, ⟨s, hs, a, not_mem_compl.1 ha, rfl⟩, compl_erase⟩ } end @[simp] lemma up_shadow_image_compl : (∂⁺ 𝒜).image compl = ∂ (𝒜.image compl) := begin ext s, simp only [mem_image, exists_prop, mem_shadow_iff, mem_up_shadow_iff], split, { rintro ⟨_, ⟨s, hs, a, ha, rfl⟩, rfl⟩, exact ⟨sᶜ, ⟨s, hs, rfl⟩, a, mem_compl.2 ha, compl_insert.symm⟩ }, { rintro ⟨_, ⟨s, hs, rfl⟩, a, ha, rfl⟩, exact ⟨insert a s, ⟨s, hs, a, mem_compl.1 ha, rfl⟩, compl_insert⟩ } end end up_shadow end finset
SUBROUTINE PrintGloc(fh_dos, fh_gc, fh_dt, Glc, gloc, gtot, Deltac, omega, csize, csizes, nl, ll, legend, iatom, ncix, nomega, natom, norbitals, maxsize, Ry2eV) !-- In the old version we used: fh_dos = 100, fh_gc = 120, fh_dt = 140 !---Currently we use : fh_dos = 500, fh_gc = 180, fh_dl = 280 USE com_mpi, ONLY: myrank, master IMPLICIT NONE INTEGER, intent(in) :: fh_dos, fh_gc, fh_dt COMPLEX*16, intent(in) :: Glc(maxsize,ncix,nomega), gloc(norbitals,nomega), gtot(nomega), Deltac(maxsize,ncix,nomega) REAL*8, intent(in) :: omega(nomega) INTEGER, intent(in) :: csize(ncix), csizes(ncix), iatom(natom) INTEGER, intent(in) :: nl(natom), ll(natom,4) CHARACTER*30, intent(in):: legend(maxsize,ncix) INTEGER, intent(in) :: nomega, norbitals, natom, ncix, maxsize REAL*8, intent(in) :: Ry2eV ! local INTEGER :: L, iom, lcase, i, icix, itape, jtape, icase, iorb REAL*8 :: pi if (myrank.NE.master) RETURN pi=ACOS(-1.0D0) ! Header for correlated do icix=1,ncix ! Header itape = fh_gc+icix jtape = fh_dt+icix write(itape,'(A7,14x)',advance='no') '# omega' write(jtape,'(A7,14x)',advance='no') '# omega' do i=1,csize(icix) write(itape,'(A28)',advance='no') legend(i,icix) !end relativistic DOS write(jtape,'(A28)',advance='no') legend(i,icix) !end relativistic DOS enddo write(itape,*) write(jtape,*) enddo do iom=1,nomega do icix=1,ncix ! Header itape = fh_gc+icix jtape = fh_dt+icix write(itape,'(f19.12,1x)',advance='no') omega(iom)*Ry2eV write(jtape,'(f19.12,1x)',advance='no') omega(iom)*Ry2eV do i=1,csizes(icix) write(jtape,'(2f19.12)',advance='no') Deltac(i,icix,iom)*Ry2eV enddo do i=1,csize(icix) write(itape,'(2f19.12)',advance='no') Glc(i,icix,iom)/Ry2eV enddo write(itape,*) write(jtape,*) enddo enddo ! Header for non-correlated itape = fh_dos write(itape,'(A14,6x)',advance='no') '# omega' write(itape,'(A5,9x)',advance='no') 'total' do icase=1,natom do lcase=1,nl(icase) L=ll(icase,lcase) write(itape,'(A2,I2,1x,A2,I2,5x)',advance='no') 'a=', iatom(icase), 'L=', L enddo enddo write(itape,*) do iom=1,nomega write(itape,'(f14.8,1x)',advance='no') omega(iom)*Ry2eV write(itape,'(f14.8)',advance='no') -aimag(gtot(iom))/pi/Ry2eV do iorb=1,norbitals write(itape,'(f14.8)',advance='no') -aimag(gloc(iorb,iom))/pi/Ry2eV enddo write(itape,*) enddo do icix=1,ncix close(fh_gc+icix) close(fh_dt+icix) enddo close(fh_dos) return END SUBROUTINE PrintGloc
Formal statement is: lemma prime_elem_dvd_power_iff: "prime_elem p \<Longrightarrow> n > 0 \<Longrightarrow> p dvd x ^ n \<longleftrightarrow> p dvd x" Informal statement is: If $p$ is a prime element and $n > 0$, then $p$ divides $x^n$ if and only if $p$ divides $x$.
import numpy as np from untitlednn.tensor import Tensor class Initializer(object): """Initializer initialize parameters of layers Example: initer = Initializer(shape) params = initer() """ def init(self, shape): raise NotImplementedError def __call__(self, shape): return Tensor(self.init(shape).astype(np.float32)) class RandomInitializer(Initializer): """RandomInitializer initialize parameters of layers with random float in the interval (-0.05, 0.05) """ def init(self, shape): return 0.1 * (np.random.random(shape) - 0.5) class ZeroInitializer(Initializer): """ZeroInitializer initialize parameters of layers by zeros """ def init(self, shape): return np.zeros(shape)
library(WGCNA) options(stringsAsFactors = FALSE) enableWGCNAThreads() args = commandArgs(trailingOnly=TRUE) #***********************************step1 cluster Sample************************************* prefix = strsplit(args[1], '.', fixed=TRUE)[[1]][1] d=read.table(args[1],sep="\t",header=T,check.names=F, row.names=1) #d=d[1:10000,] datExpr = t(d) gsg = goodSamplesGenes(datExpr, verbose = 3) if (!gsg$allOK) { if (sum(!gsg$goodGenes)>0) printFlush(paste("Removing genes:", paste(names(datExpr)[!gsg$goodGenes], collapse = ", "))) if (sum(!gsg$goodSamples)>0) printFlush(paste("Removing samples:", paste(rownames(datExpr)[!gsg$goodSamples], collapse = ", "))) datExpr = datExpr[gsg$goodSamples, gsg$goodGenes] } #write.table(names(datExpr)[!gsg$goodGenes], file="removeGene.xls", row.names=FALSE, col.names=FALSE, quote=FALSE) #write.table(names(datExpr)[!gsg$goodSamples], file="removeSample.xls", row.names=FALSE, col.names=FALSE, quote=FALSE) sampleTree = hclust(dist(datExpr), method = "average") pdf(file = paste(prefix,"sampleClustering.pdf", sep='_'), width = 12, height = 9) par(cex = 0.6) par(mar = c(0,4,2,0)) plot(sampleTree, main = "Sample clustering", sub="", xlab="", cex.lab = 1.5, cex.axis = 1.5, cex.main = 2) dev.off() #***********************************step2 Choosing the soft threshold beta via scale free topology************************************* powers = c(c(1:10), seq(from = 12, to=20, by=2)) #sft = pickSoftThreshold(datExpr, powerVector = powers, verbose = 5) sft=pickSoftThreshold(datExpr,dataIsExpr = TRUE,powerVector = powers,corFnc = cor,corOptions = list(use = 'p'),networkType = "unsigned") pdf(file=paste(prefix,'pickSoftThreshold.pdf', sep='_'), wi=12, he=9) par(mfrow = c(1,2)) cex1 = 0.9 # Plot the results: par(mfrow = c(1, 2)) # SFT index as a function of different powers plot(sft$fitIndices[,1], -sign(sft$fitIndices[, 3])*sft$fitIndices[, 2], xlab = "Soft Threshold (power)", ylab = "Scale Free Topology Model Fit, signed R^2", type = "n", main = paste("Scale independence")) text(sft$fitIndices[, 1], -sign(sft$fitIndices[, 3]) * sft$fitIndices[, 2], labels = powers, col = "red") # this line corresponds to using an R^2 cut-off of h abline(h = 0.9, col = "red") # Mean connectivity as a function of different powers plot(sft$fitIndices[, 1], sft$fitIndices[, 5], type = "n", xlab = "Soft Threshold (power)", ylab = "Mean Connectivity", main = paste("Mean connectivity")) text(sft$fitIndices[, 1], sft$fitIndices[, 5], labels = powers, col = "red") dev.off() save(datExpr, file='datExpr.RData')
[STATEMENT] lemma eqButUID12_trans: assumes "eqButUID12 freq freq1" and "eqButUID12 freq1 freq2" shows "eqButUID12 freq freq2" [PROOF STATE] proof (prove) goal (1 subgoal): 1. eqButUID12 freq freq2 [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: eqButUID12 freq freq1 eqButUID12 freq1 freq2 goal (1 subgoal): 1. eqButUID12 freq freq2 [PROOF STEP] unfolding eqButUID12_def [PROOF STATE] proof (prove) using this: \<forall>uid uid'. if (uid, uid') \<in> {(UID1, UID2), (UID2, UID1)} then True else freq uid uid' = freq1 uid uid' \<forall>uid uid'. if (uid, uid') \<in> {(UID1, UID2), (UID2, UID1)} then True else freq1 uid uid' = freq2 uid uid' goal (1 subgoal): 1. \<forall>uid uid'. if (uid, uid') \<in> {(UID1, UID2), (UID2, UID1)} then True else freq uid uid' = freq2 uid uid' [PROOF STEP] by (auto split: if_splits)
Formal statement is: lemma simple_path_image_uncountable: fixes g :: "real \<Rightarrow> 'a::metric_space" assumes "simple_path g" shows "uncountable (path_image g)" Informal statement is: If $g$ is a simple path, then the image of $g$ is uncountable.
SUBROUTINE DSLINE ( iltyp, ilthw, iwidth, ilwhw, + jltyp, jlthw, jwidth, jlwhw, iret ) C************************************************************************ C* DSLINE * C* * C* This subroutine sets the line attributes including the line type * C* number, the software/hardware line type flag, the line width size * C* multiplier and the software/hardware line width flag. * C* * C* DSLINE ( ILTYP, ILTHW, IWIDTH, ILWHW, JLTYP, JLTHW, JWIDTH, JLWHW, * C* IRET ) * C* * C* Input parameters: * C* ILTYP INTEGER Line type * C* <=0 = no change * C* ILTHW INTEGER Sw/hw line type flag * C* 1 = software line type * C* 2 = hardware line type * C* otherwise no change * C* IWIDTH INTEGER Line width size multiplier * C* <=0 = no change * C* ILWHW INTEGER Sw/hw line width flag * C* 1 = software line width * C* 2 = hardware line width * C* otherwise no change * C* * C* Output parameters: * C* JLTYP INTEGER Line type * C* JLTHW INTEGER Sw/hw line type flag * C* JWIDTH INTEGER Line width size multiplier * C* JLWHW INTEGER Sw/hw line width flag * C* IRET INTEGER Return code * C** * C* Log: * C* M. Goodman/RDS 5/85 GEMPLT Version 3.1 * C* M. desJardins/GSFC 5/89 Documentation * C************************************************************************ INCLUDE 'FUNCCODE.PRM' INCLUDE 'ERROR.PRM' INCLUDE 'DEVACT.CMN' C* INTEGER isend (6), ircv (5) C------------------------------------------------------------------------ C* Load input parameters into buffer and write them to the mailbox. C isend (1) = 6 isend (2) = CSLINE isend (3) = iltyp isend (4) = ilthw isend (5) = iwidth isend (6) = ilwhw C CALL GPUT ( isend, 6, iret ) IF ( iret .ne. NORMAL ) RETURN C CALL GGET ( ircv, 5, iret ) IF ( iret .ne. NORMAL ) RETURN C C* Fill output variables. C iret = ircv (1) jltyp = ircv (2) jlthw = ircv (3) jwidth = ircv (4) jlwhw = ircv (5) C C* Fill the ACTIVE common block variables C mltyp = ircv (2) mlthw = ircv (3) mlwid = ircv (4) mlwhw = ircv (5) C* RETURN END
theory T154 imports Main begin lemma "( (\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) & (\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) & (\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) & (\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, meet(y, z)) = meet(mult(x, y), mult(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(meet(x, y), z) = meet(mult(x, z), mult(y, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(x, join(y, z)) = join(undr(x, y), undr(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(join(x, y), z) = join(over(x, z), over(y, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(meet(x, y), z) = join(undr(x, z), undr(y, z))) & (\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) & (\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) & (\<forall> x::nat. invo(invo(x)) = x) ) \<longrightarrow> (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(x, meet(y, z)) = join(over(x, y), over(x, z))) " nitpick[card nat=7,timeout=86400] oops end
/- Copyright (c) 2019 Scott Morrison. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Scott Morrison -/ import Mathlib.PrePort import Mathlib.Lean3Lib.init.default import Mathlib.set_theory.game.state import Mathlib.PostPort namespace Mathlib /-! # Domineering as a combinatorial game. We define the game of Domineering, played on a chessboard of arbitrary shape (possibly even disconnected). Left moves by placing a domino vertically, while Right moves by placing a domino horizontally. This is only a fragment of a full development; in order to successfully analyse positions we would need some more theorems. Most importantly, we need a general statement that allows us to discard irrelevant moves. Specifically to domineering, we need the fact that disjoint parts of the chessboard give sums of games. -/ namespace pgame namespace domineering /-- The embedding `(x, y) ↦ (x, y+1)`. -/ def shift_up : ℤ × ℤ ↪ ℤ × ℤ := function.embedding.prod_map (function.embedding.refl ℤ) (function.embedding.mk (fun (n : ℤ) => n + 1) sorry) /-- The embedding `(x, y) ↦ (x+1, y)`. -/ def shift_right : ℤ × ℤ ↪ ℤ × ℤ := function.embedding.prod_map (function.embedding.mk (fun (n : ℤ) => n + 1) sorry) (function.embedding.refl ℤ) /-- A Domineering board is an arbitrary finite subset of `ℤ × ℤ`. -/ def board := finset (ℤ × ℤ) /-- Left can play anywhere that a square and the square below it are open. -/ /-- Right can play anywhere that a square and the square to the left are open. -/ def left (b : board) : finset (ℤ × ℤ) := b ∩ finset.map shift_up b def right (b : board) : finset (ℤ × ℤ) := b ∩ finset.map shift_right b /-- After Left moves, two vertically adjacent squares are removed from the board. -/ def move_left (b : board) (m : ℤ × ℤ) : board := finset.erase (finset.erase b m) (prod.fst m, prod.snd m - 1) /-- After Left moves, two horizontally adjacent squares are removed from the board. -/ def move_right (b : board) (m : ℤ × ℤ) : board := finset.erase (finset.erase b m) (prod.fst m - 1, prod.snd m) theorem card_of_mem_left {b : board} {m : ℤ × ℤ} (h : m ∈ left b) : bit0 1 ≤ finset.card b := sorry theorem card_of_mem_right {b : board} {m : ℤ × ℤ} (h : m ∈ right b) : bit0 1 ≤ finset.card b := sorry theorem move_left_card {b : board} {m : ℤ × ℤ} (h : m ∈ left b) : finset.card (move_left b m) + bit0 1 = finset.card b := sorry theorem move_right_card {b : board} {m : ℤ × ℤ} (h : m ∈ right b) : finset.card (move_right b m) + bit0 1 = finset.card b := sorry theorem move_left_smaller {b : board} {m : ℤ × ℤ} (h : m ∈ left b) : finset.card (move_left b m) / bit0 1 < finset.card b / bit0 1 := sorry theorem move_right_smaller {b : board} {m : ℤ × ℤ} (h : m ∈ right b) : finset.card (move_right b m) / bit0 1 < finset.card b / bit0 1 := sorry /-- The instance describing allowed moves on a Domineering board. -/ protected instance state : state board := state.mk (fun (s : board) => finset.card s / bit0 1) (fun (s : board) => finset.image (move_left s) (left s)) (fun (s : board) => finset.image (move_right s) (right s)) sorry sorry end domineering /-- Construct a pre-game from a Domineering board. -/ def domineering (b : domineering.board) : pgame := of b /-- All games of Domineering are short, because each move removes two squares. -/ protected instance short_domineering (b : domineering.board) : short (domineering b) := id (pgame.short_of b) /-- The Domineering board with two squares arranged vertically, in which Left has the only move. -/ def domineering.one : pgame := domineering (list.to_finset [(0, 0), (0, 1)]) /-- The `L` shaped Domineering board, in which Left is exactly half a move ahead. -/ def domineering.L : pgame := domineering (list.to_finset [(0, bit0 1), (0, 1), (0, 0), (1, 0)]) protected instance short_one : short domineering.one := id (pgame.short_domineering (list.to_finset [(0, 0), (0, 1)])) protected instance short_L : short domineering.L := id (pgame.short_domineering (list.to_finset [(0, bit0 1), (0, 1), (0, 0), (1, 0)]))
-- --------------------------------------------------------------------- -- Ejercicio. Demostrar que para cualquier conjunto s, s ⊆ s. -- ---------------------------------------------------------------------- import tactic variables {α : Type*} (s : set α) -- 1ª demostración -- =============== example : s ⊆ s := begin assume x, assume xs: x ∈ s, show x ∈ s, by exact xs, end -- 2ª demostración -- =============== example : s ⊆ s := begin intros x xs, exact xs, end -- 3ª demostración -- =============== example : s ⊆ s := λ x (xs : x ∈ s), xs -- 4ª demostración -- =============== example : s ⊆ s := -- by library_search rfl.subset -- 5ª demostración -- =============== example : s ⊆ s := -- by hint by refl
data Vect : Nat -> Type -> Type where Nil : Vect Z a (::) : a -> Vect k a -> Vect (S k) a %name Vect xs, ys, zs dupAll : Vect n a -> Vect n (a, a) dupAll xs = zipHere xs xs where zipHere : forall n . Vect n a -> Vect n b -> Vect n (a, b)
Formal statement is: lemma prime_elem_dvd_multD: "prime_elem p \<Longrightarrow> p dvd (a * b) \<Longrightarrow> p dvd a \<or> p dvd b" Informal statement is: If $p$ is a prime element and $p$ divides $ab$, then $p$ divides $a$ or $p$ divides $b$.
theory T58 imports Main begin lemma "( (\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) & (\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) & (\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) & (\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, meet(y, z)) = meet(mult(x, y), mult(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(x, join(y, z)) = join(undr(x, y), undr(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(x, meet(y, z)) = join(over(x, y), over(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(meet(x, y), z) = join(undr(x, z), undr(y, z))) & (\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) & (\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) & (\<forall> x::nat. invo(invo(x)) = x) ) \<longrightarrow> (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(meet(x, y), z) = meet(mult(x, z), mult(y, z))) " nitpick[card nat=7,timeout=86400] oops end
Formal statement is: lemma diameter_bounded_bound: fixes S :: "'a :: metric_space set" assumes S: "bounded S" "x \<in> S" "y \<in> S" shows "dist x y \<le> diameter S" Informal statement is: If $S$ is a bounded set in a metric space, then the distance between any two points in $S$ is less than or equal to the diameter of $S$.
/- Copyright (c) 2022 Joël Riou. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Joël Riou -/ import for_mathlib.dold_kan.p_infty /-! # Construction of functors N for the Dold-Kan correspondence TODO (@joelriou) continue adding the various files referenced below In this file, we construct functors `N₁ : simplicial_object C ⥤ karoubi (chain_complex C ℕ)` and `N₂ : karoubi (simplicial_object C) ⥤ karoubi (chain_complex C ℕ)` for any preadditive category `C`. (The indices of these functors are the number of occurrences of `karoubi` at the source or the target.) In the case `C` is additive, the functor `N₂` shall be the functor of the equivalence `category_theory.preadditive.dold_kan.equivalence` defined in `equivalence_additive.lean`. In the case the category `C` is pseudoabelian, the composition of `N₁` with the inverse of the equivalence `chain_complex C ℕ ⥤ karoubi (chain_complex C ℕ)` will be the functor `category_theory.idempotents.dold_kan.N` of the equivalence of categories `category_theory.idempotents.dold_kan.equivalence : simplicial_object C ≌ chain_complex C ℕ` defined in `equivalence_pseudoabelian.lean`. When the category `C` is abelian, a relation between `N₁` and the normalized Moore complex functor shall be obtained in `normalized.lean`. (See `equivalence.lean` for the general strategy of proof.) -/ open category_theory open category_theory.category open category_theory.idempotents noncomputable theory namespace algebraic_topology namespace dold_kan variables {C : Type*} [category C] [preadditive C] /-- The functor `simplicial_object C ⥤ karoubi (chain_complex C ℕ)` which maps `X` to the formal direct factor of `K[X]` defined by `P_infty`. -/ @[simps] def N₁ : simplicial_object C ⥤ karoubi (chain_complex C ℕ) := { obj := λ X, { X := alternating_face_map_complex.obj X, p := P_infty, idem := P_infty_idem, }, map := λ X Y f, { f := P_infty ≫ alternating_face_map_complex.map f, comm := by { ext, simp }, }, map_id' := λ X, by { ext, dsimp, simp }, map_comp' := λ X Y Z f g, by { ext, simp } } /-- The extension of `N₁` to the Karoubi envelope of `simplicial_object C`. -/ @[simps] def N₂ : karoubi (simplicial_object C) ⥤ karoubi (chain_complex C ℕ) := (functor_extension₁ _ _).obj N₁ lemma to_karoubi_comp_N₂ : to_karoubi (simplicial_object C) ⋙ N₂ = N₁ := functor.congr_obj (functor_extension₁_comp_whiskering_left_to_karoubi _ _) N₁ end dold_kan end algebraic_topology
module Main import Conway.Conway import Conway.Graphics import Conway.RLE import Effect.File import Data.Vect showCW : Conway m n -> String showCW (MkConway v) = concat $ map (\row => showRow row ++ "\n") v where showRow : Vect n CellState -> String showRow v = concat $ map (\cs => case cs of Alive => ". " Dead => " " ) v loop : Conway m n -> IO () loop cw = do putStrLn $ showCW cw getLine loop $ iterateGame cw maxLength : List (List a) -> Nat maxLength xs = maxLength' 0 xs where maxLength' : Nat -> List (List a) -> Nat maxLength' n [] = n maxLength' n (x::xs) = maxLength' (max n (length x)) xs repeatN : (n : Nat) -> a -> Vect n a repeatN Z _ = [] repeatN (S n) a = (a::(repeatN n a)) lToV : (n : Nat) -> List (List CellState) -> List (Vect n CellState) lToV n [] = [] lToV n ys = map (\x => f n x) ys where f : (n : Nat) -> List CellState -> Vect n CellState f Z _ = [] f (S n) [] = (Dead::(f n [])) f (S n) (y::ys) = (y::(f n ys)) main : IO () main = do args <- getArgs let fNameM = index' 1 args case fNameM of Nothing => print "Usage ./Conway 'filename'" Just fName => do cwM <- readCFile fName case cwM of Nothing => print "Error in converting file contents\n" Just cw => do let lVec = lToV (maxLength cw) cw let vec = fromList lVec let conway = MkConway vec conwayLoop conway {- conwayLoop cw where row1 : Vect 4 CellState row1 = fromList [Dead, Alive, Dead, Dead] row2 : Vect 4 CellState row2 = fromList [Dead, Alive, Dead, Dead] row3 : Vect 4 CellState row3 = fromList [Dead, Alive, Dead, Dead] row4 : Vect 4 CellState row4 = fromList [Dead, Dead, Dead, Dead] v : Vect 4 (Vect 4 CellState) v = fromList [row1, row2, row3, row4] cw : Conway 4 4 cw = MkConway v -}
FUNCTION:NAME :BEGIN The pid is -- @@stderr -- dtrace: script 'test/unittest/scripting/tst.pid.d' matched probe
module Blender using PyCall using GeometryBasics using CoordinateTransformations using ColorTypes using PrettyTables include("bpy.jl") include("materials.jl") include("object.jl") include("camera.jl") include("render.jl") end # module
(* * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only *) (* CSpace invariants *) theory ArchCSpaceInv_AI imports "../CSpaceInv_AI" begin context Arch begin global_naming RISCV64 definition safe_ioport_insert :: "cap \<Rightarrow> cap \<Rightarrow> 'a::state_ext state \<Rightarrow> bool" where "safe_ioport_insert newcap oldcap \<equiv> \<lambda>_. True" lemma safe_ioport_insert_triv: "\<not>is_arch_cap newcap \<Longrightarrow> safe_ioport_insert newcap oldcap s" by (clarsimp simp: safe_ioport_insert_def) lemma set_cap_ioports': "\<lbrace>\<lambda>s. valid_ioports s \<and> cte_wp_at (\<lambda>cap'. safe_ioport_insert cap cap' s) ptr s\<rbrace> set_cap cap ptr \<lbrace>\<lambda>rv. valid_ioports\<rbrace>" by wpsimp lemma replace_cap_invs: "\<lbrace>\<lambda>s. invs s \<and> cte_wp_at (replaceable s p cap) p s \<and> cap \<noteq> cap.NullCap \<and> ex_cte_cap_wp_to (appropriate_cte_cap cap) p s \<and> s \<turnstile> cap\<rbrace> set_cap cap p \<lbrace>\<lambda>rv s. invs s\<rbrace>" apply (simp add: invs_def valid_state_def valid_mdb_def2 valid_arch_mdb_def) apply (rule hoare_pre) apply (wp replace_cap_valid_pspace set_cap_caps_of_state2 set_cap_idle replace_cap_ifunsafe valid_irq_node_typ set_cap_typ_at set_cap_irq_handlers set_cap_valid_arch_caps set_cap_cap_refs_respects_device_region_replaceable) apply (clarsimp simp: valid_pspace_def cte_wp_at_caps_of_state replaceable_def) apply (rule conjI) apply (fastforce simp: tcb_cap_valid_def dest!: cte_wp_tcb_cap_valid [OF caps_of_state_cteD]) apply (rule conjI) apply (erule_tac P="\<lambda>cps. mdb_cte_at cps (cdt s)" in rsubst) apply (rule ext) apply (safe del: disjE)[1] apply (simp add: gen_obj_refs_empty final_NullCap)+ apply (rule conjI) apply (simp add: untyped_mdb_def is_cap_simps) apply (erule disjE) apply (clarsimp, rule conjI, clarsimp+)[1] apply (erule allEI, erule allEI) apply (drule_tac x="fst p" in spec, drule_tac x="snd p" in spec) apply (clarsimp simp: gen_obj_refs_subset) apply (drule(1) disjoint_subset, simp) apply (rule conjI) apply (erule descendants_inc_minor) apply simp apply (elim disjE) apply clarsimp apply clarsimp apply (rule conjI) apply (erule disjE) apply (simp add: fun_upd_def[symmetric] fun_upd_idem) apply (simp add: untyped_inc_def not_is_untyped_no_range) apply (rule conjI) apply (erule disjE) apply (simp add: fun_upd_def[symmetric] fun_upd_idem) apply (simp add: ut_revocable_def) apply (rule conjI) apply (erule disjE) apply (clarsimp simp: irq_revocable_def) apply clarsimp apply (clarsimp simp: irq_revocable_def) apply (rule conjI) apply (erule disjE) apply (simp add: fun_upd_def[symmetric] fun_upd_idem) apply (simp add: reply_master_revocable_def) apply (rule conjI) apply (erule disjE) apply (simp add: fun_upd_def[symmetric] fun_upd_idem) apply (clarsimp simp add: reply_mdb_def) apply (thin_tac "\<forall>a b. (a, b) \<in> cte_refs cp nd \<and> Q a b\<longrightarrow> R a b" for cp nd Q R) apply (thin_tac "is_pt_cap cap \<longrightarrow> P" for cap P)+ apply (rule conjI) apply (unfold reply_caps_mdb_def)[1] apply (erule allEI, erule allEI) apply (clarsimp split: if_split simp add: is_cap_simps simp del: split_paired_Ex split_paired_All) apply (rename_tac ptra ptrb rights') apply (rule_tac x="(ptra,ptrb)" in exI) apply fastforce apply (unfold reply_masters_mdb_def)[1] apply (erule allEI, erule allEI) subgoal by (fastforce split: if_split_asm simp: is_cap_simps) apply (rule conjI) apply (erule disjE) apply (clarsimp simp add: is_reply_cap_to_def) apply (drule caps_of_state_cteD) apply (subgoal_tac "cte_wp_at (is_reply_cap_to t) p s") apply (erule(1) valid_reply_capsD [OF has_reply_cap_cte_wpD]) apply (erule cte_wp_at_lift) apply (fastforce simp add:is_reply_cap_to_def) apply (simp add: is_cap_simps) apply (frule(1) valid_global_refsD2) apply (frule(1) cap_refs_in_kernel_windowD) apply (rule conjI) apply (erule disjE) apply (clarsimp simp: valid_reply_masters_def cte_wp_at_caps_of_state) apply (cases p, fastforce simp:is_master_reply_cap_to_def) apply (simp add: is_cap_simps) apply (elim disjE) apply simp apply (clarsimp simp: valid_table_capsD[OF caps_of_state_cteD] valid_arch_caps_def unique_table_refs_no_cap_asidE) apply (rule conjI, clarsimp) apply (rule conjI, rule Ball_emptyI, simp add: gen_obj_refs_subset) by clarsimp definition "is_simple_cap_arch cap \<equiv> \<not>is_pt_cap cap" lemma is_simple_cap_arch: "\<not>is_arch_cap cap \<Longrightarrow> is_simple_cap_arch cap" by (simp add: is_cap_simps is_simple_cap_arch_def) (* True when cap' is derived from cap. *) definition "is_derived_arch cap' cap \<equiv> (is_pt_cap cap' \<longrightarrow> cap_asid cap = cap_asid cap' \<and> cap_asid cap' \<noteq> None) \<and> (vs_cap_ref cap = vs_cap_ref cap' \<or> is_frame_cap cap)" lemma is_derived_arch_non_arch: "\<lbrakk> \<not> is_arch_cap cap; \<not> is_arch_cap cap' \<rbrakk> \<Longrightarrow> is_derived_arch cap cap'" unfolding is_derived_arch_def vs_cap_ref_def is_arch_cap_def is_pt_cap_def by (auto split: cap.splits) lemmas cap_master_arch_cap_simps = cap_master_arch_cap_def[split_simps arch_cap.split] lemmas cap_master_cap_def = cap_master_cap_def[simplified cap_master_arch_cap_def] lemma same_master_cap_same_types: "cap_master_cap cap = cap_master_cap cap' \<Longrightarrow> is_pt_cap cap = is_pt_cap cap' \<and> is_frame_cap cap = is_frame_cap cap' \<and> is_ap_cap cap = is_ap_cap cap'" by (clarsimp simp: cap_master_cap_def is_cap_simps split: cap.splits arch_cap.splits) lemma is_derived_cap_arch_asid_issues: "\<lbrakk> is_derived_arch cap cap'; cap_master_cap cap = cap_master_cap cap' \<rbrakk> \<Longrightarrow> (is_pt_cap cap \<longrightarrow> cap_asid cap \<noteq> None) \<and> (is_frame_cap cap \<or> (vs_cap_ref cap = vs_cap_ref cap'))" apply (simp add: is_derived_arch_def) by (auto simp: cap_master_cap_def is_cap_simps cap_asid_def split: cap.splits arch_cap.splits option.splits) lemma is_derived_cap_arch_asid: "\<lbrakk> is_derived_arch cap cap'; cap_master_cap cap = cap_master_cap cap'; is_pt_cap cap' \<rbrakk> \<Longrightarrow> cap_asid cap = cap_asid cap'" unfolding is_derived_arch_def apply (cases cap; cases cap'; simp) by (auto simp: is_cap_simps cap_master_cap_def split: arch_cap.splits) definition safe_parent_for_arch :: "cap \<Rightarrow> cap \<Rightarrow> bool" where "safe_parent_for_arch cap parent \<equiv> False" lemma safe_parent_for_arch_not_arch: "\<not>is_arch_cap cap \<Longrightarrow> \<not>safe_parent_for_arch cap p" by (clarsimp simp: safe_parent_for_arch_def is_cap_simps) lemma safe_parent_cap_range_arch: "\<And>cap pcap. safe_parent_for_arch cap pcap \<Longrightarrow> cap_range cap \<subseteq> cap_range pcap" by (clarsimp simp: safe_parent_for_arch_def cap_range_def) definition "cap_asid_base_arch cap \<equiv> case cap of ASIDPoolCap _ asid \<Rightarrow> Some asid | _ \<Rightarrow> None" declare cap_asid_base_arch_def[abs_def, simp] definition cap_asid_base :: "cap \<Rightarrow> asid option" where "cap_asid_base cap \<equiv> arch_cap_fun_lift cap_asid_base_arch None cap" lemmas cap_asid_base_simps [simp] = cap_asid_base_def [simplified, split_simps cap.split arch_cap.split] definition "cap_vptr_arch acap \<equiv> case acap of (FrameCap _ _ _ _ (Some (_, vptr))) \<Rightarrow> Some vptr | (PageTableCap _ (Some (_, vptr))) \<Rightarrow> Some vptr | _ \<Rightarrow> None" definition "cap_vptr cap \<equiv> arch_cap_fun_lift cap_vptr_arch None cap" declare cap_vptr_arch_def[abs_def, simp] lemmas cap_vptr_simps [simp] = cap_vptr_def [simplified, split_simps cap.split arch_cap.split option.split prod.split] end context begin interpretation Arch . requalify_facts replace_cap_invs end end
[STATEMENT] lemma allDefs_narrows[simp]: "v \<in> step.allDefs g n \<Longrightarrow> v \<in> allDefs g n" [PROOF STATE] proof (prove) goal (1 subgoal): 1. v \<in> step.allDefs g n \<Longrightarrow> v \<in> allDefs g n [PROOF STEP] by (auto simp:step.allDefs_def step.phiDefs_def phiDefs_def allDefs_def split:if_split_asm)
{-# OPTIONS --safe #-} module Issue2250-2 where open import Agda.Builtin.Bool open import Agda.Builtin.Equality data ⊥ : Set where abstract f : Bool → Bool f x = true {-# INJECTIVE f #-} same : f true ≡ f false same = refl not-same : f true ≡ f false → ⊥ not-same () absurd : ⊥ absurd = not-same same
[GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g✝ : α → β c c₁ c₂ x : α inst✝ : Add α h : Periodic f c g : β → γ ⊢ Periodic (g ∘ f) c [PROOFSTEP] simp_all [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g✝ : α → β c c₁ c₂ x✝ : α inst✝¹ : Add α inst✝ : Add γ h : Periodic f c g : AddHom γ α g_inv : α → γ hg : RightInverse g_inv ↑g x : γ ⊢ (f ∘ ↑g) (x + g_inv c) = (f ∘ ↑g) x [PROOFSTEP] simp only [hg c, h (g x), map_add, comp_apply] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : Add α inst✝ : Mul β hf : Periodic f c hg : Periodic g c ⊢ Periodic (f * g) c [PROOFSTEP] simp_all [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : Add α inst✝ : Div β hf : Periodic f c hg : Periodic g c ⊢ Periodic (f / g) c [PROOFSTEP] simp_all [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : Add α inst✝ : Monoid β l : List (α → β) hl : ∀ (f : α → β), f ∈ l → Periodic f c ⊢ Periodic (List.prod l) c [PROOFSTEP] induction' l with g l ih hl [GOAL] case nil α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : Add α inst✝ : Monoid β l : List (α → β) hl✝ : ∀ (f : α → β), f ∈ l → Periodic f c hl : ∀ (f : α → β), f ∈ [] → Periodic f c ⊢ Periodic (List.prod []) c [PROOFSTEP] simp [GOAL] case cons α : Type u_1 β : Type u_2 γ : Type u_3 f g✝ : α → β c c₁ c₂ x : α inst✝¹ : Add α inst✝ : Monoid β l✝ : List (α → β) hl✝ : ∀ (f : α → β), f ∈ l✝ → Periodic f c g : α → β l : List (α → β) ih : (∀ (f : α → β), f ∈ l → Periodic f c) → Periodic (List.prod l) c hl : ∀ (f : α → β), f ∈ g :: l → Periodic f c ⊢ Periodic (List.prod (g :: l)) c [PROOFSTEP] rw [List.forall_mem_cons] at hl [GOAL] case cons α : Type u_1 β : Type u_2 γ : Type u_3 f g✝ : α → β c c₁ c₂ x : α inst✝¹ : Add α inst✝ : Monoid β l✝ : List (α → β) hl✝ : ∀ (f : α → β), f ∈ l✝ → Periodic f c g : α → β l : List (α → β) ih : (∀ (f : α → β), f ∈ l → Periodic f c) → Periodic (List.prod l) c hl : Periodic g c ∧ ∀ (x : α → β), x ∈ l → Periodic x c ⊢ Periodic (List.prod (g :: l)) c [PROOFSTEP] simpa only [List.prod_cons] using hl.1.mul (ih hl.2) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f✝ g : α → β c c₁ c₂ x : α inst✝¹ : Add α inst✝ : CommMonoid β ι : Type u_4 f : ι → α → β s : Finset ι hs : ∀ (i : ι), i ∈ s → Periodic (f i) c ⊢ ∀ (f_1 : α → β), f_1 ∈ List.map f (Finset.toList s) → Periodic f_1 c [PROOFSTEP] simpa [-Periodic] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : Add α inst✝ : SMul γ β h : Periodic f c a : γ ⊢ Periodic (a • f) c [PROOFSTEP] simp_all [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝² : AddMonoid α inst✝¹ : Group γ inst✝ : DistribMulAction γ α h : Periodic f c a : γ x : α ⊢ (fun x => f (a • x)) (x + a⁻¹ • c) = (fun x => f (a • x)) x [PROOFSTEP] simpa only [smul_add, smul_inv_smul] using h (a • x) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝² : AddCommMonoid α inst✝¹ : DivisionSemiring γ inst✝ : Module γ α h : Periodic f c a : γ x : α ⊢ (fun x => f (a • x)) (x + a⁻¹ • c) = (fun x => f (a • x)) x [PROOFSTEP] by_cases ha : a = 0 [GOAL] case pos α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝² : AddCommMonoid α inst✝¹ : DivisionSemiring γ inst✝ : Module γ α h : Periodic f c a : γ x : α ha : a = 0 ⊢ (fun x => f (a • x)) (x + a⁻¹ • c) = (fun x => f (a • x)) x [PROOFSTEP] simp only [ha, zero_smul] [GOAL] case neg α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝² : AddCommMonoid α inst✝¹ : DivisionSemiring γ inst✝ : Module γ α h : Periodic f c a : γ x : α ha : ¬a = 0 ⊢ (fun x => f (a • x)) (x + a⁻¹ • c) = (fun x => f (a • x)) x [PROOFSTEP] simpa only [smul_add, smul_inv_smul₀ ha] using h (a • x) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝² : AddMonoid α inst✝¹ : Group γ inst✝ : DistribMulAction γ α h : Periodic f c a : γ ⊢ Periodic (fun x => f (a⁻¹ • x)) (a • c) [PROOFSTEP] simpa only [inv_inv] using h.const_smul a⁻¹ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝² : AddCommMonoid α inst✝¹ : DivisionSemiring γ inst✝ : Module γ α h : Periodic f c a : γ ⊢ Periodic (fun x => f (a⁻¹ • x)) (a • c) [PROOFSTEP] simpa only [inv_inv] using h.const_smul₀ a⁻¹ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝ : DivisionSemiring α h : Periodic f c a : α ⊢ Periodic (fun x => f (x * a)) (c / a) [PROOFSTEP] simpa only [div_eq_mul_inv] using h.mul_const a [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝ : DivisionSemiring α h : Periodic f c a : α ⊢ Periodic (fun x => f (x / a)) (c * a) [PROOFSTEP] simpa only [div_eq_mul_inv] using h.mul_const_inv a [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝ : AddSemigroup α h1 : Periodic f c₁ h2 : Periodic f c₂ ⊢ Periodic f (c₁ + c₂) [PROOFSTEP] simp_all [← add_assoc] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝ : AddGroup α h : Periodic f c x : α ⊢ f (x - c) = f x [PROOFSTEP] simpa only [sub_add_cancel] using (h (x - c)).symm [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝ : AddCommGroup α h : Periodic f c ⊢ f (c - x) = f (-x) [PROOFSTEP] simpa only [sub_eq_neg_add] using h (-x) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝ : AddGroup α h : Periodic f c ⊢ Periodic f (-c) [PROOFSTEP] simpa only [sub_eq_add_neg, Periodic] using h.sub_eq [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝ : AddGroup α h1 : Periodic f c₁ h2 : Periodic f c₂ x : α ⊢ f (x + (c₁ - c₂)) = f x [PROOFSTEP] rw [sub_eq_add_neg, ← add_assoc, h2.neg, h1] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝ : AddSemigroup α h : Periodic f c a x : α ⊢ (fun x => f (a + x)) (x + c) = (fun x => f (a + x)) x [PROOFSTEP] simpa [add_assoc] using h (a + x) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝ : AddCommSemigroup α h : Periodic f c a x : α ⊢ (fun x => f (x + a)) (x + c) = (fun x => f (x + a)) x [PROOFSTEP] simpa only [add_right_comm] using h (x + a) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝ : AddCommGroup α h : Periodic f c a x : α ⊢ (fun x => f (a - x)) (x + c) = (fun x => f (a - x)) x [PROOFSTEP] simp only [← sub_sub, h.sub_eq] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝ : AddCommGroup α h : Periodic f c a : α ⊢ Periodic (fun x => f (x - a)) c [PROOFSTEP] simpa only [sub_eq_add_neg] using h.add_const (-a) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝ : AddMonoid α h : Periodic f c n : ℕ ⊢ Periodic f (n • c) [PROOFSTEP] induction n [GOAL] case zero α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝ : AddMonoid α h : Periodic f c ⊢ Periodic f (Nat.zero • c) [PROOFSTEP] simp_all [Nat.succ_eq_add_one, add_nsmul, ← add_assoc, zero_nsmul] [GOAL] case succ α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝ : AddMonoid α h : Periodic f c n✝ : ℕ n_ih✝ : Periodic f (n✝ • c) ⊢ Periodic f (Nat.succ n✝ • c) [PROOFSTEP] simp_all [Nat.succ_eq_add_one, add_nsmul, ← add_assoc, zero_nsmul] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝ : Semiring α h : Periodic f c n : ℕ ⊢ Periodic f (↑n * c) [PROOFSTEP] simpa only [nsmul_eq_mul] using h.nsmul n [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝ : AddGroup α h : Periodic f c n : ℕ ⊢ f (x - n • c) = f x [PROOFSTEP] simpa only [sub_eq_add_neg] using h.neg_nsmul n x [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝ : Ring α h : Periodic f c n : ℕ ⊢ f (x - ↑n * c) = f x [PROOFSTEP] simpa only [nsmul_eq_mul] using h.sub_nsmul_eq n [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝ : Ring α h : Periodic f c n : ℕ ⊢ f (↑n * c - x) = f (-x) [PROOFSTEP] simpa only [sub_eq_neg_add] using h.nat_mul n (-x) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝ : AddGroup α h : Periodic f c n : ℤ ⊢ Periodic f (n • c) [PROOFSTEP] cases' n with n n [GOAL] case ofNat α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝ : AddGroup α h : Periodic f c n : ℕ ⊢ Periodic f (Int.ofNat n • c) [PROOFSTEP] simpa only [Int.ofNat_eq_coe, coe_nat_zsmul] using h.nsmul n [GOAL] case negSucc α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝ : AddGroup α h : Periodic f c n : ℕ ⊢ Periodic f (Int.negSucc n • c) [PROOFSTEP] simpa only [negSucc_zsmul] using (h.nsmul (n + 1)).neg [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝ : Ring α h : Periodic f c n : ℤ ⊢ Periodic f (↑n * c) [PROOFSTEP] simpa only [zsmul_eq_mul] using h.zsmul n [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝ : AddZeroClass α h : Periodic f c ⊢ f c = f 0 [PROOFSTEP] simpa only [zero_add] using h 0 [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : LinearOrderedAddCommGroup α inst✝ : Archimedean α h : Periodic f c hc : c ≠ 0 a : α ⊢ f '' uIcc a (a + c) = range f [PROOFSTEP] cases hc.lt_or_lt with | inl hc => rw [uIcc_of_ge (add_le_of_nonpos_right hc.le), ← h.neg.image_Icc (neg_pos.2 hc) (a + c), add_neg_cancel_right] | inr hc => rw [uIcc_of_le (le_add_of_nonneg_right hc.le), h.image_Icc hc] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : LinearOrderedAddCommGroup α inst✝ : Archimedean α h : Periodic f c hc : c ≠ 0 a : α x✝ : c < 0 ∨ 0 < c ⊢ f '' uIcc a (a + c) = range f [PROOFSTEP] cases hc.lt_or_lt with | inl hc => rw [uIcc_of_ge (add_le_of_nonpos_right hc.le), ← h.neg.image_Icc (neg_pos.2 hc) (a + c), add_neg_cancel_right] | inr hc => rw [uIcc_of_le (le_add_of_nonneg_right hc.le), h.image_Icc hc] [GOAL] case inl α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : LinearOrderedAddCommGroup α inst✝ : Archimedean α h : Periodic f c hc✝ : c ≠ 0 a : α hc : c < 0 ⊢ f '' uIcc a (a + c) = range f [PROOFSTEP] | inl hc => rw [uIcc_of_ge (add_le_of_nonpos_right hc.le), ← h.neg.image_Icc (neg_pos.2 hc) (a + c), add_neg_cancel_right] [GOAL] case inl α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : LinearOrderedAddCommGroup α inst✝ : Archimedean α h : Periodic f c hc✝ : c ≠ 0 a : α hc : c < 0 ⊢ f '' uIcc a (a + c) = range f [PROOFSTEP] rw [uIcc_of_ge (add_le_of_nonpos_right hc.le), ← h.neg.image_Icc (neg_pos.2 hc) (a + c), add_neg_cancel_right] [GOAL] case inr α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : LinearOrderedAddCommGroup α inst✝ : Archimedean α h : Periodic f c hc✝ : c ≠ 0 a : α hc : 0 < c ⊢ f '' uIcc a (a + c) = range f [PROOFSTEP] | inr hc => rw [uIcc_of_le (le_add_of_nonneg_right hc.le), h.image_Icc hc] [GOAL] case inr α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : LinearOrderedAddCommGroup α inst✝ : Archimedean α h : Periodic f c hc✝ : c ≠ 0 a : α hc : 0 < c ⊢ f '' uIcc a (a + c) = range f [PROOFSTEP] rw [uIcc_of_le (le_add_of_nonneg_right hc.le), h.image_Icc hc] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f✝ g : α → β c c₁ c₂ x✝ : α inst✝ : AddZeroClass α f : α → β x : α ⊢ f (x + 0) = f x [PROOFSTEP] rw [add_zero] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝ : AddCommGroup α hf : Periodic f c a : { x // x ∈ AddSubgroup.zmultiples c } x : α ⊢ f (a +ᵥ x) = f x [PROOFSTEP] rcases a with ⟨_, m, rfl⟩ [GOAL] case mk.intro α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝ : AddCommGroup α hf : Periodic f c x : α m : ℤ ⊢ f ({ val := (fun x => x • c) m, property := (_ : ∃ y, (fun x => x • c) y = (fun x => x • c) m) } +ᵥ x) = f x [PROOFSTEP] simp [AddSubgroup.vadd_def, add_comm _ x, hf.zsmul m x] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝ : AddCommMonoid α hf : Periodic f c a : { x // x ∈ AddSubmonoid.multiples c } x : α ⊢ f (a +ᵥ x) = f x [PROOFSTEP] rcases a with ⟨_, m, rfl⟩ [GOAL] case mk.intro α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝ : AddCommMonoid α hf : Periodic f c x : α m : ℕ ⊢ f ({ val := (fun i => i • c) m, property := (_ : ∃ y, (fun i => i • c) y = (fun i => i • c) m) } +ᵥ x) = f x [PROOFSTEP] simp [AddSubmonoid.vadd_def, add_comm _ x, hf.nsmul m x] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝ : AddGroup α h : Periodic f c x : α ⧸ AddSubgroup.zmultiples c a b : α h' : Setoid.r a b ⊢ f a = f b [PROOFSTEP] rw [QuotientAddGroup.leftRel_apply] at h' [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝ : AddGroup α h : Periodic f c x : α ⧸ AddSubgroup.zmultiples c a b : α h' : -a + b ∈ AddSubgroup.zmultiples c ⊢ f a = f b [PROOFSTEP] obtain ⟨k, hk⟩ := h' [GOAL] case intro α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝ : AddGroup α h : Periodic f c x : α ⧸ AddSubgroup.zmultiples c a b : α k : ℤ hk : (fun x => x • c) k = -a + b ⊢ f a = f b [PROOFSTEP] exact (h.zsmul k _).symm.trans (congr_arg f (add_eq_of_eq_neg_add hk)) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : Semiring α inst✝ : InvolutiveNeg β h : Antiperiodic f c ⊢ Periodic f (2 * c) [PROOFSTEP] simp [two_mul, ← add_assoc, h _] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : AddZeroClass α inst✝ : Neg β h : Antiperiodic f c ⊢ f c = -f 0 [PROOFSTEP] simpa only [zero_add] using h 0 [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝¹ : Semiring α inst✝ : InvolutiveNeg β h : Antiperiodic f c n : ℕ x : α ⊢ f (x + (↑n * (2 * c) + c)) = -f x [PROOFSTEP] rw [← add_assoc, h, h.periodic.nat_mul] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝¹ : Ring α inst✝ : InvolutiveNeg β h : Antiperiodic f c n : ℤ x : α ⊢ f (x + (↑n * (2 * c) + c)) = -f x [PROOFSTEP] rw [← add_assoc, h, h.periodic.int_mul] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝¹ : AddGroup α inst✝ : InvolutiveNeg β h : Antiperiodic f c x : α ⊢ f (x - c) = -f x [PROOFSTEP] simp only [← neg_eq_iff_eq_neg, ← h (x - c), sub_add_cancel] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : AddCommGroup α inst✝ : Neg β h : Antiperiodic f c ⊢ f (c - x) = -f (-x) [PROOFSTEP] simpa only [sub_eq_neg_add] using h (-x) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : AddGroup α inst✝ : InvolutiveNeg β h : Antiperiodic f c ⊢ Antiperiodic f (-c) [PROOFSTEP] simpa only [sub_eq_add_neg, Antiperiodic] using h.sub_eq [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : AddGroup α inst✝ : InvolutiveNeg β h : Antiperiodic f c ⊢ f (-c) = -f 0 [PROOFSTEP] simpa only [zero_add] using h.neg 0 [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : Semiring α inst✝ : NegZeroClass β h : Antiperiodic f c hi : f 0 = 0 ⊢ f (↑0 * c) = 0 [PROOFSTEP] rwa [Nat.cast_zero, zero_mul] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : Semiring α inst✝ : NegZeroClass β h : Antiperiodic f c hi : f 0 = 0 n : ℕ ⊢ f (↑(n + 1) * c) = 0 [PROOFSTEP] simp [add_mul, h _, Antiperiodic.nat_mul_eq_of_eq_zero h hi n] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : Ring α inst✝ : SubtractionMonoid β h : Antiperiodic f c hi : f 0 = 0 n : ℕ ⊢ f (↑↑n * c) = 0 [PROOFSTEP] rw [Int.cast_ofNat, h.nat_mul_eq_of_eq_zero hi n] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : Ring α inst✝ : SubtractionMonoid β h : Antiperiodic f c hi : f 0 = 0 n : ℕ ⊢ f (↑(Int.negSucc n) * c) = 0 [PROOFSTEP] rw [Int.cast_negSucc, neg_mul, ← mul_neg, h.neg.nat_mul_eq_of_eq_zero hi] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝¹ : AddSemigroup α inst✝ : Neg β h : Antiperiodic f c a x : α ⊢ (fun x => f (a + x)) (x + c) = -(fun x => f (a + x)) x [PROOFSTEP] simpa [add_assoc] using h (a + x) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝¹ : AddCommSemigroup α inst✝ : Neg β h : Antiperiodic f c a x : α ⊢ (fun x => f (x + a)) (x + c) = -(fun x => f (x + a)) x [PROOFSTEP] simpa only [add_right_comm] using h (x + a) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝¹ : AddCommGroup α inst✝ : InvolutiveNeg β h : Antiperiodic f c a x : α ⊢ (fun x => f (a - x)) (x + c) = -(fun x => f (a - x)) x [PROOFSTEP] simp only [← sub_sub, h.sub_eq] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : AddCommGroup α inst✝ : Neg β h : Antiperiodic f c a : α ⊢ Antiperiodic (fun x => f (x - a)) c [PROOFSTEP] simpa only [sub_eq_add_neg] using h.add_const (-a) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝³ : Add α inst✝² : Monoid γ inst✝¹ : AddGroup β inst✝ : DistribMulAction γ β h : Antiperiodic f c a : γ ⊢ Antiperiodic (a • f) c [PROOFSTEP] simp_all [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝³ : AddMonoid α inst✝² : Neg β inst✝¹ : Group γ inst✝ : DistribMulAction γ α h : Antiperiodic f c a : γ x : α ⊢ (fun x => f (a • x)) (x + a⁻¹ • c) = -(fun x => f (a • x)) x [PROOFSTEP] simpa only [smul_add, smul_inv_smul] using h (a • x) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x✝ : α inst✝³ : AddCommMonoid α inst✝² : Neg β inst✝¹ : DivisionSemiring γ inst✝ : Module γ α h : Antiperiodic f c a : γ ha : a ≠ 0 x : α ⊢ (fun x => f (a • x)) (x + a⁻¹ • c) = -(fun x => f (a • x)) x [PROOFSTEP] simpa only [smul_add, smul_inv_smul₀ ha] using h (a • x) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝³ : AddMonoid α inst✝² : Neg β inst✝¹ : Group γ inst✝ : DistribMulAction γ α h : Antiperiodic f c a : γ ⊢ Antiperiodic (fun x => f (a⁻¹ • x)) (a • c) [PROOFSTEP] simpa only [inv_inv] using h.const_smul a⁻¹ [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝³ : AddCommMonoid α inst✝² : Neg β inst✝¹ : DivisionSemiring γ inst✝ : Module γ α h : Antiperiodic f c a : γ ha : a ≠ 0 ⊢ Antiperiodic (fun x => f (a⁻¹ • x)) (a • c) [PROOFSTEP] simpa only [inv_inv] using h.const_smul₀ (inv_ne_zero ha) [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : DivisionSemiring α inst✝ : Neg β h : Antiperiodic f c a : α ha : a ≠ 0 ⊢ Antiperiodic (fun x => f (x * a)) (c / a) [PROOFSTEP] simpa only [div_eq_mul_inv] using h.mul_const ha [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : DivisionSemiring α inst✝ : Neg β h : Antiperiodic f c a : α ha : a ≠ 0 ⊢ Antiperiodic (fun x => f (x / a)) (c * a) [PROOFSTEP] simpa only [div_eq_mul_inv] using h.mul_const_inv ha [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : AddGroup α inst✝ : InvolutiveNeg β h1 : Antiperiodic f c₁ h2 : Antiperiodic f c₂ ⊢ Periodic f (c₁ + c₂) [PROOFSTEP] simp_all [← add_assoc] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : AddGroup α inst✝ : InvolutiveNeg β h1 : Antiperiodic f c₁ h2 : Antiperiodic f c₂ ⊢ Periodic f (c₁ - c₂) [PROOFSTEP] simpa only [sub_eq_add_neg] using h1.add h2.neg [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : AddGroup α inst✝ : Neg β h1 : Periodic f c₁ h2 : Antiperiodic f c₂ ⊢ Antiperiodic f (c₁ + c₂) [PROOFSTEP] simp_all [← add_assoc] [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝¹ : AddGroup α inst✝ : InvolutiveNeg β h1 : Periodic f c₁ h2 : Antiperiodic f c₂ ⊢ Antiperiodic f (c₁ - c₂) [PROOFSTEP] simpa only [sub_eq_add_neg] using h1.add_antiperiod h2.neg [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝² : Add α inst✝¹ : Mul β inst✝ : HasDistribNeg β hf : Antiperiodic f c hg : Antiperiodic g c ⊢ Periodic (f * g) c [PROOFSTEP] simp_all [GOAL] α : Type u_1 β : Type u_2 γ : Type u_3 f g : α → β c c₁ c₂ x : α inst✝² : Add α inst✝¹ : DivisionMonoid β inst✝ : HasDistribNeg β hf : Antiperiodic f c hg : Antiperiodic g c ⊢ Periodic (f / g) c [PROOFSTEP] simp_all [neg_div_neg_eq] [GOAL] α✝ : Type u_1 β : Type u_2 γ : Type u_3 f g : α✝ → β c c₁ c₂ x : α✝ α : Type u_4 inst✝¹ : LinearOrderedRing α inst✝ : FloorRing α a : α ⊢ fract (a + 1) = fract a [PROOFSTEP] exact_mod_cast Int.fract_add_int a 1
-- MIT License -- Copyright (c) 2021 Luca Ciccone and Luca Padovani -- Permission is hereby granted, free of charge, to any person -- obtaining a copy of this software and associated documentation -- files (the "Software"), to deal in the Software without -- restriction, including without limitation the rights to use, -- copy, modify, merge, publish, distribute, sublicense, and/or sell -- copies of the Software, and to permit persons to whom the -- Software is furnished to do so, subject to the following -- conditions: -- The above copyright notice and this permission notice shall be -- included in all copies or substantial portions of the Software. -- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -- OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -- HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -- WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -- FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -- OTHER DEALINGS IN THE SOFTWARE. {-# OPTIONS --guardedness #-} open import Data.Product open import Data.Sum open import Common module Progress {ℙ : Set} (message : Message ℙ) where open import SessionType message open import Transitions message open import Session message data Progress : Session -> Set where win#def : ∀{T S} (w : Win T) (def : Defined S) -> Progress (T # S) inp#out : ∀{f g} (W : Witness g) -> Progress (inp f # out g) out#inp : ∀{f g} (W : Witness f) -> Progress (out f # inp g) progress-sound : ∀{S} -> Progress S -> ProgressS S progress-sound (win#def e def) = inj₁ (win#def e def) progress-sound (inp#out (_ , !x)) = inj₂ (_ , sync inp (out !x)) progress-sound (out#inp (_ , !x)) = inj₂ (_ , sync (out !x) inp) progress-complete : ∀{S} -> ProgressS S -> Progress S progress-complete (inj₁ (win#def e def)) = win#def e def progress-complete (inj₂ (_ , sync inp (out !x))) = inp#out (_ , !x) progress-complete (inj₂ (_ , sync (out !x) inp)) = out#inp (_ , !x)
lemma contour_integral_diff: "f1 contour_integrable_on g \<Longrightarrow> f2 contour_integrable_on g \<Longrightarrow> contour_integral g (\<lambda>x. f1 x - f2 x) = contour_integral g f1 - contour_integral g f2"
State Before: α : Type u_1 β : Type u_2 l l₁ l₂ l₃ : List α a b : α m n : ℕ h : l₁ <+: l₂ f : α → β ⊢ List.map f l₁ <+: List.map f l₂ State After: case nil α : Type u_1 β : Type u_2 l l₁ l₂✝ l₃ : List α a b : α m n : ℕ h✝ : l₁ <+: l₂✝ f : α → β l₂ : List α h : [] <+: l₂ ⊢ List.map f [] <+: List.map f l₂ case cons α : Type u_1 β : Type u_2 l l₁ l₂✝ l₃ : List α a b : α m n : ℕ h✝ : l₁ <+: l₂✝ f : α → β hd : α tl : List α hl : ∀ {l₂ : List α}, tl <+: l₂ → List.map f tl <+: List.map f l₂ l₂ : List α h : hd :: tl <+: l₂ ⊢ List.map f (hd :: tl) <+: List.map f l₂ Tactic: induction' l₁ with hd tl hl generalizing l₂ State Before: case nil α : Type u_1 β : Type u_2 l l₁ l₂✝ l₃ : List α a b : α m n : ℕ h✝ : l₁ <+: l₂✝ f : α → β l₂ : List α h : [] <+: l₂ ⊢ List.map f [] <+: List.map f l₂ State After: no goals Tactic: simp only [nil_prefix, map_nil] State Before: case cons α : Type u_1 β : Type u_2 l l₁ l₂✝ l₃ : List α a b : α m n : ℕ h✝ : l₁ <+: l₂✝ f : α → β hd : α tl : List α hl : ∀ {l₂ : List α}, tl <+: l₂ → List.map f tl <+: List.map f l₂ l₂ : List α h : hd :: tl <+: l₂ ⊢ List.map f (hd :: tl) <+: List.map f l₂ State After: case cons.nil α : Type u_1 β : Type u_2 l l₁ l₂ l₃ : List α a b : α m n : ℕ h✝ : l₁ <+: l₂ f : α → β hd : α tl : List α hl : ∀ {l₂ : List α}, tl <+: l₂ → List.map f tl <+: List.map f l₂ h : hd :: tl <+: [] ⊢ List.map f (hd :: tl) <+: List.map f [] case cons.cons α : Type u_1 β : Type u_2 l l₁ l₂ l₃ : List α a b : α m n : ℕ h✝ : l₁ <+: l₂ f : α → β hd : α tl : List α hl : ∀ {l₂ : List α}, tl <+: l₂ → List.map f tl <+: List.map f l₂ hd₂ : α tl₂ : List α h : hd :: tl <+: hd₂ :: tl₂ ⊢ List.map f (hd :: tl) <+: List.map f (hd₂ :: tl₂) Tactic: cases' l₂ with hd₂ tl₂ State Before: case cons.nil α : Type u_1 β : Type u_2 l l₁ l₂ l₃ : List α a b : α m n : ℕ h✝ : l₁ <+: l₂ f : α → β hd : α tl : List α hl : ∀ {l₂ : List α}, tl <+: l₂ → List.map f tl <+: List.map f l₂ h : hd :: tl <+: [] ⊢ List.map f (hd :: tl) <+: List.map f [] State After: no goals Tactic: simpa only using eq_nil_of_prefix_nil h State Before: case cons.cons α : Type u_1 β : Type u_2 l l₁ l₂ l₃ : List α a b : α m n : ℕ h✝ : l₁ <+: l₂ f : α → β hd : α tl : List α hl : ∀ {l₂ : List α}, tl <+: l₂ → List.map f tl <+: List.map f l₂ hd₂ : α tl₂ : List α h : hd :: tl <+: hd₂ :: tl₂ ⊢ List.map f (hd :: tl) <+: List.map f (hd₂ :: tl₂) State After: case cons.cons α : Type u_1 β : Type u_2 l l₁ l₂ l₃ : List α a b : α m n : ℕ h✝ : l₁ <+: l₂ f : α → β hd : α tl : List α hl : ∀ {l₂ : List α}, tl <+: l₂ → List.map f tl <+: List.map f l₂ hd₂ : α tl₂ : List α h : hd = hd₂ ∧ tl <+: tl₂ ⊢ List.map f (hd :: tl) <+: List.map f (hd₂ :: tl₂) Tactic: rw [cons_prefix_iff] at h State Before: case cons.cons α : Type u_1 β : Type u_2 l l₁ l₂ l₃ : List α a b : α m n : ℕ h✝ : l₁ <+: l₂ f : α → β hd : α tl : List α hl : ∀ {l₂ : List α}, tl <+: l₂ → List.map f tl <+: List.map f l₂ hd₂ : α tl₂ : List α h : hd = hd₂ ∧ tl <+: tl₂ ⊢ List.map f (hd :: tl) <+: List.map f (hd₂ :: tl₂) State After: no goals Tactic: simp only [List.map_cons, h, prefix_cons_inj, hl, map]
{-# OPTIONS --cubical --safe #-} module Cubical.Structures.InftyMagma where open import Cubical.Foundations.Prelude open import Cubical.Foundations.Equiv open import Cubical.Foundations.FunExtEquiv open import Cubical.Foundations.SIP renaming (SNS-PathP to SNS) private variable ℓ ℓ' ℓ'' : Level -- ∞-Magmas with SNS ∞-magma-structure : Type ℓ → Type ℓ ∞-magma-structure X = X → X → X ∞-Magma : Type (ℓ-suc ℓ) ∞-Magma {ℓ} = TypeWithStr ℓ ∞-magma-structure ∞-magma-iso : StrIso ∞-magma-structure ℓ ∞-magma-iso (X , _·_) (Y , _∗_) f = (x x' : X) → equivFun f (x · x') ≡ equivFun f x ∗ equivFun f x' ∞-magma-is-SNS : SNS {ℓ} ∞-magma-structure ∞-magma-iso ∞-magma-is-SNS f = SNS-≡→SNS-PathP ∞-magma-iso (λ _ _ → funExt₂Equiv) f
# modules prevent namespace pollution module CoreTest using ParserCombinator using Test @testset "core" begin include("core/sources.jl") include("core/fix.jl") include("core/print.jl") include("core/names.jl") include("core/tests.jl") include("core/slow.jl") include("core/case.jl") include("core/calc.jl") include("core/debug.jl") include("core/try.jl") end end module GmlTest using ParserCombinator using ParserCombinator.Parsers.GML using Test @testset "GML" begin include("gml/ok.jl") include("gml/errors.jl") include("gml/example1.jl") include("gml/example2.jl") # need zip files unpacking #include("gml/celegansneural.jl") #include("gml/polblogs.jl") #include("gml/10k-49963.jl") end end module DotTest using ParserCombinator using ParserCombinator.Parsers.DOT using Test @testset "DOT" begin include("dot/example.jl") include("dot/fragments.jl") include("dot/subgraphs.jl") include("dot/examples.jl") end end
module Keypad import Data.Vect %default total export record Keypad where constructor MkKeypad -- 16 key hexadecimal keypad K : Vect 16 Bool export Show Keypad where show k = show (K k) export isKeyPressed : (keypad : Keypad) -> (n : Fin 16) -> Bool isKeyPressed keypad n = Vect.index n (K keypad) export setKeyPress : (keypad : Keypad) -> (n : Fin 16) -> Keypad setKeyPress keypad n = let newK = Vect.replaceAt n True (K keypad) in record { K = newK } keypad export clearKeyPress : (keypad : Keypad) -> (n : Fin 16) -> Keypad clearKeyPress keypad n = let newK = Vect.replaceAt n False (K keypad) in record { K = newK } keypad
module KentFinancialEconomics using HTTP using CSV using JSON using DataFrames using TimeSeries using ZipFile using Dates export fundamentals, events, metadata, stockprices, fundprices include("data_sources/quandl.jl") end
(* Property from Productive Use of Failure in Inductive Proof, Andrew Ireland and Alan Bundy, JAR 1996. This Isabelle theory is produced using the TIP tool offered at the following website: https://github.com/tip-org/tools This file was originally provided as part of TIP benchmark at the following website: https://github.com/tip-org/benchmarks Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly to make it compatible with Isabelle2017.*) theory TIP_prop_49 imports "../../Test_Base" begin datatype 'a list = nil2 | cons2 "'a" "'a list" datatype Nat = Z | S "Nat" fun y :: "Nat => Nat => bool" where "y (Z) (Z) = True" | "y (Z) (S z2) = False" | "y (S x2) (Z) = False" | "y (S x2) (S y22) = y x2 y22" fun x :: "bool => bool => bool" where "x True y2 = True" | "x False y2 = y2" fun elem :: "Nat => Nat list => bool" where "elem z (nil2) = False" | "elem z (cons2 z2 xs) = x (y z z2) (elem z xs)" fun t2 :: "Nat => Nat => bool" where "t2 (Z) y2 = True" | "t2 (S z2) (Z) = False" | "t2 (S z2) (S x2) = t2 z2 x2" fun insert :: "Nat => Nat list => Nat list" where "insert z (nil2) = cons2 z (nil2)" | "insert z (cons2 z2 xs) = (if t2 z z2 then cons2 z (cons2 z2 xs) else cons2 z2 (insert z xs))" fun isort :: "Nat list => Nat list" where "isort (nil2) = nil2" | "isort (cons2 y2 xs) = insert y2 (isort xs)" theorem property0 : "((elem z (isort y2)) ==> (elem z y2))" oops end
Formal statement is: lemma LIMSEQ_iff: fixes L :: "'a::real_normed_vector" shows "(X \<longlonglongrightarrow> L) = (\<forall>r>0. \<exists>no. \<forall>n \<ge> no. norm (X n - L) < r)" Informal statement is: A sequence $X$ converges to $L$ if and only if for every $\epsilon > 0$, there exists $N$ such that for all $n \geq N$, we have $|X_n - L| < \epsilon$.
theory T154 imports Main begin lemma "( (\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) & (\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) & (\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) & (\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, meet(y, z)) = meet(mult(x, y), mult(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(meet(x, y), z) = meet(mult(x, z), mult(y, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(x, join(y, z)) = join(undr(x, y), undr(x, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(join(x, y), z) = join(over(x, z), over(y, z))) & (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(meet(x, y), z) = join(undr(x, z), undr(y, z))) & (\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) & (\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) & (\<forall> x::nat. invo(invo(x)) = x) ) \<longrightarrow> (\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(x, meet(y, z)) = join(over(x, y), over(x, z))) " nitpick[card nat=4,timeout=86400] oops end
\ TOOLS LOADSCREEN 22MAR85RE) ONLYFORTH \NEEDS CODE abort( Assembler is needed ) VOCABULARY TOOLS TOOLS ALSO DEFINITIONS hex 1 &11 +THRU decimal ONLYFORTH \ HANDLE STEPS BP 10 02 85) ASSEMBLER ALSO DEFINITIONS ONLY FORTH ALSO TOOLS ALSO DEFINITIONS | VARIABLE (W | VARIABLE RPT | CODE STEP RPT DEC RP X) LDA IP STA RP )Y LDA IP 1+ STA RP 2INC (W LDA W STA (W 1+ LDA W 1+ STA W 1- JMP END-CODE | CREATE NEXTSTEP ] STEP [ \ THROW STATUS ON R-STACK B 23JUL85RE) | CREATE NPULL 0 ] RP@ COUNT 2DUP + RP! R> SWAP CMOVE ; : NPUSH ( ADDR LEN -) R> -ROT OVER >R RP@ OVER 1+ - DUP RP! PLACE NPULL >R >R ; | : ONELINE .STATUS SPACE QUERY INTERPRET -82 ALLOT RDROP ( DELETE QUIT FROM TNEXT ) ; \ TRAP AND DISPLAY KS 26MAR85RE) LABEL TNEXT IP 2INC RP LDA RPT CMP 0<> ?[ [[ W 1- JMP SWAP ]? RP 1+ LDA RPT 1+ CMP 0= ?] LABEL DOTRACE RPT INC ( DISABLE TRACER ) W LDA (W STA W 1+ LDA (W 1+ STA ;C: R@ NEXTSTEP >R INPUT PUSH KEYBOARD OUTPUT PUSH DISPLAY CR 2- DUP 4 U.R @ DUP 5 U.R 2 SPACES >NAME .NAME 1C COL - 0 MAX SPACES .S STATE PUSH BLK PUSH >IN PUSH [ ' 'QUIT >BODY ] LITERAL PUSH [ ' >INTERPRET >BODY ] LITERAL PUSH \ #TIB PUSH TIB #TIB @ NPUSH R0 PUSH RP@ R0 ! 082 ALLOT ['] ONELINE IS 'QUIT QUIT ; -2 ALLOT \ TRACER COMMANDS BP 23JUL85RE) | CODE (TRACE TNEXT 0 100 M/MOD # LDA NEXT 0C + STA # LDA NEXT 0B + STA 04C # LDA NEXT 0A + STA NEXT JMP END-CODE : TRACE' RP@ 2- RPT ! ' (TRACE EXECUTE END-TRACE ; : BREAK RP@ 2+ RPT ! (TRACE ; RESTRICT : TRACEL: CREATE , DOES> @ RPT +! ; -6 TRACEL: +DO 6 TRACEL: -DO -2 TRACEL: +R 2 TRACEL: -R -6 TRACEL: +PUSH 6 TRACEL: -PUSH \ WATCH TRAP BP 10 02 85 ) | VARIABLE WATCHPT 2 ALLOT LABEL WNEXT IP 2INC WATCHPT LDA N STA WATCHPT 1+ LDA N 1+ STA N X) LDA WATCHPT 2+ CMP 0<> ?[ [[ RP LDA RPT STA RP 1+ LDA RPT 1+ STA ( SET TO TNEXT) TNEXT 0 100 M/MOD # LDA NEXT 0C + STA # LDA NEXT 0B + STA DOTRACE JMP SWAP ]? N )Y LDA WATCHPT 3 + CMP 0= ?] W 1- JMP END-CODE \ WATCH COMMANDS BP 10 02 85 ) | CODE (WATCH WNEXT 0 100 M/MOD # LDA NEXT 0C + STA # LDA NEXT 0B + STA 04C # LDA NEXT 0A + STA NEXT JMP END-CODE : WATCH' ( ADR -- ) DUP WATCHPT ! @ WATCHPT 2+ ! ' (WATCH EXECUTE END-TRACE ; : CONT ( -) WATCHPT @ @ WATCHPT 2+ ! (WATCH ; ( SYNTAX : <VARNAME> WATCH' <PROCEDURE> ) \ TOOLS FOR DECOMPILING, KS 4 APR 83 ) ( INTERACTIVE USE ) | : ?: DUP 4 U.R ." :" ; | : @? DUP @ 6 U.R ; | : C? DUP C@ 3 .R ; | : BL 024 COL - 0 MAX SPACES ; : S ( ADR - ADR+) ( PRINT LITERAL STRING) ?: SPACE C? 4 SPACES DUP COUNT TYPE DUP C@ + 1+ BL ; ( COUNT + RE) : N ( ADR - ADR+2) ( PRINT NAME OF NEXT WORD BY ITS CFA) ?: @? 2 SPACES DUP @ >NAME .NAME 2+ BL ; : L ( ADR - ADR+2) ( PRINT LITERAL VALUE) ?: @? 2+ BL ; \ TOOLS FOR DECOMPILING, INTERACTIVE ) : D ( ADR N - ADR+N) ( DUMP N BYTES) 2DUP SWAP ?: 3 SPACES SWAP 0 DO C? 1+ LOOP 4 SPACES -ROT TYPE BL ; : C ( ADR - ADR+1) ( PRINT BYTE AS UNSIGNED VALUE) 1 D ; : B ( ADR - ADR+2) ( PRINT BRANCH TARGET LOCATION ) ?: @? DUP @ OVER + 6 U.R 2+ BL ; ( USED FOR : ) ( NAME STRING LITERAL DUMP CLIT BRANCH ) ( - - - - - - ) \ DEBUGGING UTILITIES BP 19 02 85 ) : UNRAVEL \ UNRAVEL PERFORM (ABORT" RDROP RDROP RDROP CR ." TRACE DUMP IS " CR BEGIN RP@ R0 @ - WHILE R> DUP 8 U.R SPACE 2- @ >NAME .NAME CR REPEAT (ERROR ; ' UNRAVEL ERRORHANDLER !
module TestClock using Mimi using Test import Mimi: AbstractTimestep, FixedTimestep, VariableTimestep, Clock, timestep, time_index, advance t_f = FixedTimestep{1850, 10, 3000}(1) c_f = Clock{FixedTimestep}(1850, 10, 3000) @test timestep(c_f) == t_f @test time_index(c_f) == 1 advance(c_f) @test time_index(c_f) == 2 years = Tuple([2000:1:2024; 2025:5:2105]) t_v = VariableTimestep{years}() c_v = Clock{VariableTimestep}(years) @test timestep(c_v) == t_v end #module
[STATEMENT] lemma min_coin_sym_iff: "g r =\<^sub>m h s \<longleftrightarrow> h s =\<^sub>m g r" [PROOF STATE] proof (prove) goal (1 subgoal): 1. g r =\<^sub>m h s = h s =\<^sub>m g r [PROOF STEP] using min_coin_sym [PROOF STATE] proof (prove) using this: ?g ?r =\<^sub>m ?h ?s \<Longrightarrow> ?h ?s =\<^sub>m ?g ?r goal (1 subgoal): 1. g r =\<^sub>m h s = h s =\<^sub>m g r [PROOF STEP] by auto
function [y,varname]=xml2struct(filename_or_xml_string) %XML2STRUCT reads non-MbML compliant xmlfile into matlab structure % % Syntax: [y,varname]=xml2struct(filename_or_xml_string) % % Description: % 1. Convert any non-MbML xml into MbML compliant string % 2. Stores consecutive structures in the a dimensional strucure % % If the non-MbML compliant XML has a consistent internal reference structure % (those that were derived from explicit data models often do) % this conversion will produce the best results, by building % % Note: if your XML string is MbML compliant use XML2MAT instead % % See also: XML2STRUCT % % Jonas Almeida, [email protected], 19 May 2003, MAT4NAT Tbox [y,varname]=xml2cell(filename_or_xml_string); y=consolidateall(y);