content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' @import ggplot2 #' @import stats #' @importFrom crayon green #' @importFrom crayon bold #' @importFrom crayon italic #' @importFrom crayon red #' @importFrom crayon blue #' @importFrom crayon black #' @importFrom nortest lillie.test #' @importFrom nortest ad.test #' @importFrom nortest cvm.test #' @importFrom nortest pearson.test #' @importFrom nortest sf.test #' @importFrom utils read.table #' @importFrom lmtest dwtest #' @importFrom stats cor.test dic.analysis <- function(trat, response, norm="sw", homog="bt", alpha.f=0.05, alpha.t=0.05, quali=TRUE, mcomp="tukey", grau=1, transf=1, constant=0, test="parametric", p.adj="holm", geom="bar", theme=theme_classic(), ylab="Response", sup=NA, CV=TRUE, xlab="", fill="lightblue", angle=0, family="sans", textsize=12, labelsize=4, dec=3, addmean=TRUE, errorbar=TRUE, posi="top", point="mean_sd", angle.label=0){ mean.stat <- function (y, x, stat = "mean") {k<-0 numerico<- NULL if(is.null(ncol(x))){ if(is.numeric(x)){ k<-1 numerico[1]<-1}} else{ ncolx<-ncol(x) for (i in 1:ncolx) { if(is.numeric(x[,i])){ k<-k+1 numerico[k]<-i }}} cx <- deparse(substitute(x)) cy <- deparse(substitute(y)) x <- data.frame(c1 = 1, x) y <- data.frame(v1 = 1, y) nx <- ncol(x) ny <- ncol(y) namex <- names(x) namey <- names(y) if (nx == 2) namex <- c("c1", cx) if (ny == 2) namey <- c("v1", cy) namexy <- c(namex, namey) for (i in 1:nx) { x[, i] <- as.character(x[, i])} z <- NULL for (i in 1:nx){z <- paste(z, x[, i], sep = "&")} w <- NULL for (i in 1:ny) { m <- tapply(y[, i], z, stat) m <- as.matrix(m) w <- cbind(w, m)} nw <- nrow(w) c <- rownames(w) v <- rep("", nw * nx) dim(v) <- c(nw, nx) for (i in 1:nw) { for (j in 1:nx) { v[i, j] <- strsplit(c[i], "&")[[1]][j + 1]}} rownames(w) <- NULL junto <- data.frame(v[, -1], w) junto <- junto[, -nx] names(junto) <- namexy[c(-1, -(nx + 1))] if(k==1 & nx==2) { junto[,numerico[1]]<-as.character(junto[,numerico[1]]) junto[,numerico[1]]<-as.numeric(junto[,numerico[1]]) junto<-junto[order(junto[,1]),]} if (k>0 & nx > 2) { for (i in 1:k){ junto[,numerico[i]]<-as.character(junto[,numerico[i]]) junto[,numerico[i]]<-as.numeric(junto[,numerico[i]])} junto<-junto[do.call("order", c(junto[,1:(nx-1)])),]} rownames(junto)<-1:(nrow(junto)) return(junto)} regression=function(trat, resp, ylab="Response", xlab="Independent", yname.poly="y", xname.poly="x", grau=NA, theme=theme_classic(), point="mean_sd", color="gray80", posi="top", textsize=12, se=FALSE, ylim=NA, family="sans", pointsize=4.5, linesize=0.8, width.bar=NA, n=NA, SSq=NA, DFres=NA){ requireNamespace("ggplot2") if(is.na(width.bar)==TRUE){width.bar=0.1*mean(trat)} if(is.na(grau)==TRUE){grau=1} dados=data.frame(trat,resp) medias=c() dose=tapply(trat, trat, mean, na.rm=TRUE) mod=c() mod1=c() mod2=c() modm=c() mod1m=c() mod2m=c() text1=c() text2=c() text3=c() mods=c() mod1s=c() mod2s=c() fparcial1=c() fparcial2=c() fparcial3=c() media=tapply(resp, trat, mean, na.rm=TRUE) desvio=tapply(resp, trat, sd, na.rm=TRUE) erro=tapply(resp, trat, sd, na.rm=TRUE)/sqrt(table(trat)) dose=tapply(trat, trat, mean, na.rm=TRUE) moda=lm(resp~trat) mod1a=lm(resp~trat+I(trat^2)) mod2a=lm(resp~trat+I(trat^2)+I(trat^3)) mods=summary(moda)$coefficients mod1s=summary(mod1a)$coefficients mod2s=summary(mod2a)$coefficients modm=lm(media~dose) mod1m=lm(media~dose+I(dose^2)) mod2m=lm(media~dose+I(dose^2)+I(dose^3)) modf1=lm(resp~trat) modf2=lm(resp~trat+I(trat^2)) modf3=lm(resp~trat+I(trat^2)+I(trat^3)) modf1ql=anova(modf1) modf2ql=anova(modf2) modf3ql=anova(modf3) modf1q=aov(resp~as.factor(trat)) res=anova(modf1q) fadj1=anova(modf1,modf1q)[2,c(3,4,5,6)] fadj2=anova(modf2,modf1q)[2,c(3,4,5,6)] fadj3=anova(modf3,modf1q)[2,c(3,4,5,6)] if(is.na(DFres)==TRUE){DFres=res[2,1]} if(is.na(SSq)==TRUE){SSq=res[2,2]} df1=c(modf3ql[1,1],fadj1[1,1],DFres) df2=c(modf3ql[1:2,1],fadj2[1,1],DFres) df3=c(modf3ql[1:3,1],fadj3[1,1],DFres) sq1=c(modf3ql[1,2],fadj1[1,2],SSq) sq2=c(modf3ql[1:2,2],fadj2[1,2],SSq) sq3=c(modf3ql[1:3,2],fadj3[1,2],SSq) qm1=sq1/df1 qm2=sq2/df2 qm3=sq3/df3 if(grau=="1"){fa1=data.frame(cbind(df1,sq1,qm1)) fa1$f1=c(fa1$qm1[1:2]/fa1$qm1[3],NA) fa1$p=c(pf(fa1$f1[1:2],fa1$df1[1:2],fa1$df1[3],lower.tail = F),NA) colnames(fa1)=c("Df","SSq","MSQ","F","p-value");rownames(fa1)=c("Linear","Deviation","Residual")} if(grau=="2"){fa2=data.frame(cbind(df2,sq2,qm2)) fa2$f2=c(fa2$qm2[1:3]/fa2$qm2[4],NA) fa2$p=c(pf(fa2$f2[1:3],fa2$df2[1:3],fa2$df2[4],lower.tail = F),NA) colnames(fa2)=c("Df","SSq","MSQ","F","p-value");rownames(fa2)=c("Linear","Quadratic","Deviation","Residual")} if(grau=="3"){ fa3=data.frame(cbind(df3,sq3,qm3)) fa3$f3=c(fa3$qm3[1:4]/fa3$qm3[5],NA) fa3$p=c(pf(fa3$f3[1:4],fa3$df3[1:4],fa3$df3[5],lower.tail = F),NA) colnames(fa3)=c("Df","SSq","MSQ","F","p-value");rownames(fa3)=c("Linear","Quadratic","Cubic","Deviation","Residual")} if(grau=="1"){r2=round(summary(modm)$r.squared, 2)} if(grau=="2"){r2=round(summary(mod1m)$r.squared, 2)} if(grau=="3"){r2=round(summary(mod2m)$r.squared, 2)} if(grau=="1"){ if(is.na(n)==FALSE){coef1=round(coef(moda)[1],n)}else{coef1=coef(moda)[1]} if(is.na(n)==FALSE){coef2=round(coef(moda)[2],n)}else{coef2=coef(moda)[2]} s1=s <- sprintf("%s == %e %s %e*%s ~~~~~ italic(R^2) == %0.2f", yname.poly, coef1, ifelse(coef2 >= 0, "+", "-"), abs(coef2), xname.poly, r2)} if(grau=="2"){ if(is.na(n)==FALSE){coef1=round(coef(mod1a)[1],n)}else{coef1=coef(mod1a)[1]} if(is.na(n)==FALSE){coef2=round(coef(mod1a)[2],n)}else{coef2=coef(mod1a)[2]} if(is.na(n)==FALSE){coef3=round(coef(mod1a)[3],n)}else{coef3=coef(mod1a)[3]} s2=s <- sprintf("%s == %e %s %e * %s %s %e * %s^2 ~~~~~ italic(R^2) == %0.2f", yname.poly, coef1, ifelse(coef2 >= 0, "+", "-"), abs(coef2), xname.poly, ifelse(coef3 >= 0, "+", "-"), abs(coef3), xname.poly, r2)} if(grau=="3"){ if(is.na(n)==FALSE){coef1=round(coef(mod2a)[1],n)}else{coef1=coef(mod2a)[1]} if(is.na(n)==FALSE){coef2=round(coef(mod2a)[2],n)}else{coef2=coef(mod2a)[2]} if(is.na(n)==FALSE){coef3=round(coef(mod2a)[3],n)}else{coef3=coef(mod2a)[3]} if(is.na(n)==FALSE){coef4=round(coef(mod2a)[4],n)}else{coef4=coef(mod2a)[4]} s3=s <- sprintf("%s == %e %s %e * %s %s %e * %s^2 %s %0.e * %s^3 ~~~~~ italic(R^2) == %0.2f", yname.poly, coef1, ifelse(coef2 >= 0, "+", "-"), abs(coef2), xname.poly, ifelse(coef3 >= 0, "+", "-"), abs(coef3), xname.poly, ifelse(coef4 >= 0, "+", "-"), abs(coef4), xname.poly, r2)} data1=data.frame(trat,resp) data1=data.frame(trat=dose,#as.numeric(as.character(names(media))), resp=media, desvio, erro) grafico=ggplot(data1,aes(x=trat,y=resp)) if(point=="all"){grafico=grafico+ geom_point(data=dados, aes(y=resp,x=trat),shape=21, fill=color,color="black")} if(point=="mean_sd"){grafico=grafico+ geom_errorbar(aes(ymin=resp-desvio,ymax=resp+desvio),width=width.bar,size=linesize)} if(point=="mean_se"){grafico=grafico+ geom_errorbar(aes(ymin=resp-erro,ymax=resp+erro),width=width.bar,size=linesize)} if(point=="mean"){grafico=grafico} grafico=grafico+geom_point(aes(fill=as.factor(rep(1,length(resp)))),na.rm=TRUE, size=pointsize,shape=21, color="black")+ theme+ylab(ylab)+xlab(xlab) if(is.na(ylim[1])==TRUE){grafico=grafico}else{grafico=grafico+ylim(ylim)} if(grau=="0"){grafico=grafico+geom_line(y=mean(resp),size=linesize,lty=2)} if(grau=="1"){grafico=grafico+geom_smooth(method = "lm",se=se, na.rm=TRUE, formula = y~x,size=linesize,color="black")} if(grau=="2"){grafico=grafico+geom_smooth(method = "lm",se=se, na.rm=TRUE, formula = y~x+I(x^2),size=linesize,color="black")} if(grau=="3"){grafico=grafico+geom_smooth(method = "lm",se=se, na.rm=TRUE, formula = y~x+I(x^2)+I(x^3),size=linesize,color="black")} if(grau=="0"){grafico=grafico+ scale_fill_manual(values=color,label=paste("y =",round(mean(resp),3)),name="")} if(grau=="1"){grafico=grafico+ scale_fill_manual(values=color,label=c(parse(text=s1)),name="")} if(grau=="2"){grafico=grafico+ scale_fill_manual(values=color,label=c(parse(text=s2)),name="")} if(grau=="3"){grafico=grafico+ scale_fill_manual(values=color,label=c(parse(text=s3)),name="")} if(color=="gray"){if(grau=="1"){grafico=grafico+ scale_fill_manual(values="black",label=c(parse(text=s1)),name="")} if(grau=="2"){grafico=grafico+ scale_fill_manual(values="black",label=c(parse(text=s2)),name="")} if(grau=="3"){grafico=grafico+ scale_fill_manual(values="black",label=c(parse(text=s3)),name="")} } grafico=grafico+ theme(text = element_text(size=textsize,color="black",family=family), axis.text = element_text(size=textsize,color="black",family=family), axis.title = element_text(size=textsize,color="black",family=family), legend.position = posi, legend.text=element_text(size=textsize), legend.direction = "vertical", legend.text.align = 0, legend.justification = 0) print(grafico) if(grau==1){ cat("\n----------------------------------------------------\n") cat("Regression Models") cat("\n----------------------------------------------------\n") print(mods) cat("\n----------------------------------------------------\n") cat("Deviations from regression") cat("\n----------------------------------------------------\n") print(as.matrix(fa1),na.print=" ") } if(grau==2){ cat("\n----------------------------------------------------\n") cat("Regression Models") cat("\n----------------------------------------------------\n") print(mod1s) cat("\n----------------------------------------------------\n") cat("Deviations from regression") cat("\n----------------------------------------------------\n") print(as.matrix(fa2),na.print=" ") } if(grau==3){ cat("\n----------------------------------------------------\n") cat("Regression Models") cat("\n----------------------------------------------------\n") print(mod2s) cat("\n----------------------------------------------------\n") cat("Deviations from regression") cat("\n----------------------------------------------------\n") print(as.matrix(fa3),na.print=" ") } graficos=list(grafico) } levenehomog <- function (y, ...) { UseMethod("levenehomog")} levenehomog.default <- function (y, group, center=median, ...) { if (!is.numeric(y)) stop(deparse(substitute(y)), " is not a numeric variable") if (!is.factor(group)){warning(deparse(substitute(group)), " coerced to factor.") group <- as.factor(group)} valid <- complete.cases(y, group) meds <- tapply(y[valid], group[valid], center, ...) resp <- abs(y - meds[group]) table <- anova(lm(resp ~ group))[, c(1, 4, 5)] rownames(table)[2] <- " " dots <- deparse(substitute(...)) attr(table, "heading") <- paste("Levene's Test (center = ", deparse(substitute(center)), if(!(dots == "NULL")) paste(":", dots), ")", sep="") table} levenehomog.formula <- function(y, data, ...) { form <- y mf <- if (missing(data)) model.frame(form) else model.frame(form, data) if (any(sapply(2:dim(mf)[2], function(j) is.numeric(mf[[j]])))) stop("Levene's test is not appropriate with quantitative explanatory variables.") y <- mf[,1] if(dim(mf)[2]==2) group <- mf[,2] else { if (length(grep("\\+ | \\| | \\^ | \\:",form))>0) stop("Model must be completely crossed formula only.") group <- interaction(mf[,2:dim(mf)[2]])} levenehomog.default(y=y, group=group, ...)} levenehomog.lm <- function(y, ...) { m <- model.frame(y) m$..y <- model.response(m) f <- formula(y) f[2] <- expression(..y) levenehomog.formula(f, data=m, ...)} ordenacao=function (treatment, means, alpha, pvalue, console){ n <- length(means) z <- data.frame(treatment, means) letras<-c(letters[1:26],LETTERS[1:26],1:9, c(".","+","-","*","/","#","$","%","&","^","[","]",":", "@",";","_","?","!","=","#",rep(" ",2000))) w <- z[order(z[, 2], decreasing = TRUE), ] M<-rep("",n) k<-1 k1<-0 j<-1 i<-1 cambio<-n cambio1<-0 chequeo=0 M[1]<-letras[k] q <- as.numeric(rownames(w)) #Check while(j<n) { chequeo<-chequeo+1 if (chequeo > n) break for(i in j:n) { s<-pvalue[q[i],q[j]]>alpha if(s) { if(lastC(M[i]) != letras[k])M[i]<-paste(M[i],letras[k],sep="") } else { k<-k+1 cambio<-i cambio1<-0 ja<-j for(jj in cambio:n) M[jj]<-paste(M[jj],"",sep="") # El espacio M[cambio]<-paste(M[cambio],letras[k],sep="") for( v in ja:cambio) { if(pvalue[q[v],q[cambio]]<=alpha) {j<-j+1 cambio1<-1 } else break } break } } if (cambio1 ==0 )j<-j+1 } w<-data.frame(w,stat=M) trt <- as.character(w$treatment) means <- as.numeric(w$means) output <- data.frame(means, groups=M) rownames(output)<-trt if(k>81) cat("\n",k,"groups are estimated.The number of groups exceeded the maximum of 81 labels. change to group=FALSE.\n") invisible(output) } lastC <- function(x) { y<-sub(" +$", "",x) p1<-nchar(y) cc<-substr(y,p1,p1) return(cc)} duncan <- function(y, trt, DFerror, MSerror, alpha = 0.05, group = TRUE, main = NULL, console = FALSE) {name.y <- paste(deparse(substitute(y))) name.t <- paste(deparse(substitute(trt))) if(is.null(main))main<-paste(name.y,"~", name.t) clase<-c("aov","lm") if("aov"%in%class(y) | "lm"%in%class(y)){ if(is.null(main))main<-y$call A<-y$model DFerror<-df.residual(y) MSerror<-deviance(y)/DFerror y<-A[,1] ipch<-pmatch(trt,names(A)) nipch<- length(ipch) for(i in 1:nipch){ if (is.na(ipch[i])) return(if(console)cat("Name: ", trt, "\n", names(A)[-1], "\n"))} name.t<- names(A)[ipch][1] trt <- A[, ipch] if (nipch > 1){ trt <- A[, ipch[1]] for(i in 2:nipch){ name.t <- paste(name.t,names(A)[ipch][i],sep=":") trt <- paste(trt,A[,ipch[i]],sep=":") }} name.y <- names(A)[1] } junto <- subset(data.frame(y, trt), is.na(y) == FALSE) Mean<-mean(junto[,1]) CV<-sqrt(MSerror)*100/Mean medians<-mean.stat(junto[,1],junto[,2],stat="median") for(i in c(1,5,2:4)) { x <- mean.stat(junto[,1],junto[,2],function(x)quantile(x)[i]) medians<-cbind(medians,x[,2]) } medians<-medians[,3:7] names(medians)<-c("Min","Max","Q25","Q50","Q75") means <- mean.stat(junto[,1],junto[,2],stat="mean") # change sds <- mean.stat(junto[,1],junto[,2],stat="sd") #change nn <- mean.stat(junto[,1],junto[,2],stat="length") # change means<-data.frame(means,std=sds[,2],r=nn[,2],medians) names(means)[1:2]<-c(name.t,name.y) ntr<-nrow(means) Tprob<-NULL k<-0 for(i in 2:ntr){ k<-k+1 x <- suppressWarnings(warning(qtukey((1-alpha)^(i-1), i, DFerror))) if(x=="NaN")break else Tprob[k]<-x } if(k<(ntr-1)){ for(i in k:(ntr-1)){ f <- Vectorize(function(x)ptukey(x,i+1,DFerror)-(1-alpha)^i) Tprob[i]<-uniroot(f, c(0,100))$root } } Tprob<-as.numeric(Tprob) nr <- unique(nn[,2]) if(console){ cat("\nStudy:", main) cat("\n\nDuncan's new multiple range test\nfor",name.y,"\n") cat("\nMean Square Error: ",MSerror,"\n\n") cat(paste(name.t,",",sep="")," means\n\n") print(data.frame(row.names = means[,1], means[,2:6])) } if(length(nr) == 1 ) sdtdif <- sqrt(MSerror/nr) else { nr1 <- 1/mean(1/nn[,2]) sdtdif <- sqrt(MSerror/nr1) } DUNCAN <- Tprob * sdtdif names(DUNCAN)<-2:ntr duncan<-data.frame(Table=Tprob,CriticalRange=DUNCAN) if ( group & length(nr) == 1 & console){ cat("\nAlpha:",alpha,"; DF Error:",DFerror,"\n") cat("\nCritical Range\n") print(DUNCAN)} if ( group & length(nr) != 1 & console) cat("\nGroups according to probability of means differences and alpha level(",alpha,")\n") if ( length(nr) != 1) duncan<-NULL Omeans<-order(means[,2],decreasing = TRUE) #correccion 2019, 1 abril. Ordindex<-order(Omeans) comb <-utils::combn(ntr,2) nn<-ncol(comb) dif<-rep(0,nn) DIF<-dif LCL<-dif UCL<-dif pvalue<-dif odif<-dif sig<-NULL for (k in 1:nn) { i<-comb[1,k] j<-comb[2,k] dif[k]<-means[i,2]-means[j,2] DIF[k]<-abs(dif[k]) nx<-abs(i-j)+1 odif[k] <- abs(Ordindex[i]- Ordindex[j])+1 pvalue[k]<- round(1-ptukey(DIF[k]/sdtdif,odif[k],DFerror)^(1/(odif[k]-1)),4) LCL[k] <- dif[k] - DUNCAN[odif[k]-1] UCL[k] <- dif[k] + DUNCAN[odif[k]-1] sig[k]<-" " if (pvalue[k] <= 0.001) sig[k]<-"***" else if (pvalue[k] <= 0.01) sig[k]<-"**" else if (pvalue[k] <= 0.05) sig[k]<-"*" else if (pvalue[k] <= 0.1) sig[k]<-"." } if(!group){ tr.i <- means[comb[1, ],1] tr.j <- means[comb[2, ],1] comparison<-data.frame("difference" = dif, pvalue=pvalue,"signif."=sig,LCL,UCL) rownames(comparison)<-paste(tr.i,tr.j,sep=" - ") if(console){cat("\nComparison between treatments means\n\n") print(comparison)} groups=NULL } if (group) { comparison=NULL Q<-matrix(1,ncol=ntr,nrow=ntr) p<-pvalue k<-0 for(i in 1:(ntr-1)){ for(j in (i+1):ntr){ k<-k+1 Q[i,j]<-p[k] Q[j,i]<-p[k] } } groups <- ordenacao(means[, 1], means[, 2],alpha, Q,console) names(groups)[1]<-name.y if(console) { cat("\nMeans with the same letter are not significantly different.\n\n") print(groups) } } parameters<-data.frame(test="Duncan",name.t=name.t,ntr = ntr,alpha=alpha) statistics<-data.frame(MSerror=MSerror,Df=DFerror,Mean=Mean,CV=CV) rownames(parameters)<-" " rownames(statistics)<-" " rownames(means)<-means[,1] means<-means[,-1] output<-list(statistics=statistics,parameters=parameters, duncan=duncan, means=means,comparison=comparison,groups=groups) class(output)<-"group" invisible(output) } TUKEY <- function(y, trt, DFerror, MSerror, alpha=0.05, group=TRUE, main = NULL,unbalanced=FALSE,console=FALSE){ name.y <- paste(deparse(substitute(y))) name.t <- paste(deparse(substitute(trt))) if(is.null(main))main<-paste(name.y,"~", name.t) clase<-c("aov","lm") if("aov"%in%class(y) | "lm"%in%class(y)){ if(is.null(main))main<-y$call A<-y$model DFerror<-df.residual(y) MSerror<-deviance(y)/DFerror y<-A[,1] ipch<-pmatch(trt,names(A)) nipch<- length(ipch) for(i in 1:nipch){ if (is.na(ipch[i])) return(if(console)cat("Name: ", trt, "\n", names(A)[-1], "\n")) } name.t<- names(A)[ipch][1] trt <- A[, ipch] if (nipch > 1){ trt <- A[, ipch[1]] for(i in 2:nipch){ name.t <- paste(name.t,names(A)[ipch][i],sep=":") trt <- paste(trt,A[,ipch[i]],sep=":") }} name.y <- names(A)[1] } junto <- subset(data.frame(y, trt), is.na(y) == FALSE) Mean<-mean(junto[,1]) CV<-sqrt(MSerror)*100/Mean medians<-mean.stat(junto[,1],junto[,2],stat="median") for(i in c(1,5,2:4)) { x <- mean.stat(junto[,1],junto[,2],function(x)quantile(x)[i]) medians<-cbind(medians,x[,2]) } medians<-medians[,3:7] names(medians)<-c("Min","Max","Q25","Q50","Q75") means <- mean.stat(junto[,1],junto[,2],stat="mean") sds <- mean.stat(junto[,1],junto[,2],stat="sd") nn <- mean.stat(junto[,1],junto[,2],stat="length") means<-data.frame(means,std=sds[,2],r=nn[,2],medians) names(means)[1:2]<-c(name.t,name.y) ntr<-nrow(means) Tprob <- qtukey(1-alpha,ntr, DFerror) nr<-unique(nn[, 2]) nr1<-1/mean(1/nn[,2]) if(console){ cat("\nStudy:", main) cat("\n\nHSD Test for",name.y,"\n") cat("\nMean Square Error: ",MSerror,"\n\n") cat(paste(name.t,",",sep="")," means\n\n") print(data.frame(row.names = means[,1], means[,2:6])) cat("\nAlpha:",alpha,"; DF Error:",DFerror,"\n") cat("Critical Value of Studentized Range:", Tprob,"\n") } HSD <- Tprob * sqrt(MSerror/nr) statistics<-data.frame(MSerror=MSerror,Df=DFerror,Mean=Mean,CV=CV,MSD=HSD) if ( group & length(nr) == 1 & console) cat("\nMinimun Significant Difference:",HSD,"\n") if ( group & length(nr) != 1 & console) cat("\nGroups according to probability of means differences and alpha level(",alpha,")\n") if ( length(nr) != 1) statistics<-data.frame(MSerror=MSerror,Df=DFerror,Mean=Mean,CV=CV) comb <-utils::combn(ntr,2) nn<-ncol(comb) dif<-rep(0,nn) sig<-NULL LCL<-dif UCL<-dif pvalue<-rep(0,nn) for (k in 1:nn) { i<-comb[1,k] j<-comb[2,k] dif[k]<-means[i,2]-means[j,2] sdtdif<-sqrt(MSerror * 0.5*(1/means[i,4] + 1/means[j,4])) if(unbalanced)sdtdif<-sqrt(MSerror /nr1) pvalue[k]<- round(1-ptukey(abs(dif[k])/sdtdif,ntr,DFerror),4) LCL[k] <- dif[k] - Tprob*sdtdif UCL[k] <- dif[k] + Tprob*sdtdif sig[k]<-" " if (pvalue[k] <= 0.001) sig[k]<-"***" else if (pvalue[k] <= 0.01) sig[k]<-"**" else if (pvalue[k] <= 0.05) sig[k]<-"*" else if (pvalue[k] <= 0.1) sig[k]<-"." } if(!group){ tr.i <- means[comb[1, ],1] tr.j <- means[comb[2, ],1] comparison<-data.frame("difference" = dif, pvalue=pvalue,"signif."=sig,LCL,UCL) rownames(comparison)<-paste(tr.i,tr.j,sep=" - ") if(console){cat("\nComparison between treatments means\n\n") print(comparison)} groups=NULL } if (group) { comparison=NULL Q<-matrix(1,ncol=ntr,nrow=ntr) p<-pvalue k<-0 for(i in 1:(ntr-1)){ for(j in (i+1):ntr){ k<-k+1 Q[i,j]<-p[k] Q[j,i]<-p[k] } } groups <- ordenacao(means[, 1], means[, 2],alpha, Q,console) names(groups)[1]<-name.y if(console) { cat("\nTreatments with the same letter are not significantly different.\n\n") print(groups) } } parameters<-data.frame(test="Tukey",name.t=name.t,ntr = ntr, StudentizedRange=Tprob,alpha=alpha) rownames(parameters)<-" " rownames(statistics)<-" " rownames(means)<-means[,1] means<-means[,-1] output<-list(statistics=statistics,parameters=parameters, means=means,comparison=comparison,groups=groups) class(output)<-"group" invisible(output) } sk<-function(y, trt, DFerror, SSerror, alpha = 0.05, group = TRUE, main = NULL){ sk <- function(medias,s2,dfr,prob){ bo <- 0 si2 <- s2 defr <- dfr parou <- 1 np <- length(medias) - 1 for (i in 1:np){ g1 <- medias[1:i] g2 <- medias[(i+1):length(medias)] B0 <- sum(g1)^2/length(g1) + sum(g2)^2/length(g2) - (sum(g1) + sum(g2))^2/length(c(g1,g2)) if (B0 > bo) {bo <- B0 parou <- i} } g1 <- medias[1:parou] g2 <- medias[(parou+1):length(medias)] teste <- c(g1,g2) sigm2 <- (sum(teste^2) - sum(teste)^2/length(teste) + defr*si2)/(length(teste) + defr) lamb <- pi*bo/(2*sigm2*(pi-2)) v0 <- length(teste)/(pi-2) p <- pchisq(lamb,v0,lower.tail = FALSE) if (p < prob) { for (i in 1:length(g1)){ cat(names(g1[i]),"\n",file="sk_groups",append=TRUE)} cat("*","\n",file="sk_groups",append=TRUE)} if (length(g1)>1){sk(g1,s2,dfr,prob)} if (length(g2)>1){sk(g2,s2,dfr,prob)} } trt=factor(trt,unique(trt)) trt1=trt levels(trt)=paste("T",1:length(levels(trt)),sep = "") medias <- sort(tapply(y,trt,mean),decreasing=TRUE) dfr <- DFerror rep <- tapply(y,trt,length) s0 <- MSerror <-SSerror/DFerror s2 <- s0/rep[1] prob <- alpha sk(medias,s2,dfr,prob) f <- names(medias) names(medias) <- 1:length(medias) resultado <- data.frame("r"=0,"f"=f,"m"=medias) if (file.exists("sk_groups") == FALSE) {stop} else{ xx <- read.table("sk_groups") file.remove("sk_groups") x <- xx[[1]] x <- as.vector(x) z <- 1 for (j in 1:length(x)){ if (x[j] == "*") {z <- z+1} for (i in 1:length(resultado$f)){ if (resultado$f[i]==x[j]){ resultado$r[i] <- z;} } } } letras<-letters if(length(resultado$r)>26) { l<-floor(length(resultado$r)/26) for(i in 1:l) letras<-c(letras,paste(letters,i,sep='')) } res <- 1 for (i in 1:(length(resultado$r)-1)) { if (resultado$r[i] != resultado$r[i+1]){ resultado$r[i] <- letras[res] res <- res+1 if (i == (length(resultado$r)-1)){ resultado$r[i+1] <- letras[res]} } else{ resultado$r[i] <- letras[res] if (i == (length(resultado$r)-1)){ resultado$r[i+1] <- letras[res] } } } names(resultado) <- c("groups","Tratamentos","Means") resultado1=resultado[,c(3,1)] rownames(resultado1)=resultado$Tratamentos final=list(resultado1)[[1]] final=final[as.character(unique(trt)),] rownames(final)=as.character(unique(trt1)) final } scottknott=function(means, df1, QME, nrep, alpha=0.05){ sk1=function(means, df1, QME, nrep, alpha=alpha) { means=sort(means,decreasing=TRUE) n=1:(length(means)-1) n=as.list(n) f=function(n){list(means[c(1:n)],means[-c(1:n)])} g=lapply(n, f) b1=function(x){(sum(g[[x]][[1]])^2)/length(g[[x]][[1]]) + (sum(g[[x]][[2]])^2)/length(g[[x]][[2]])- (sum(c(g[[x]][[1]],g[[x]][[2]]))^2)/length(c(g[[x]][[1]],g[[x]][[2]]))} p=1:length(g) values=sapply(p,b1) minimo=min(values); maximo=max(values) alfa=(1/(length(means)+df1))*(sum((means-mean(means))^2)+(df1*QME/nrep)) lambda=(pi/(2*(pi-2)))*(maximo/alfa) vq=qchisq((alpha),lower.tail=FALSE, df=length(means)/(pi-2)) ll=1:length(values); da=data.frame(ll,values); da=da[order(-values),] ran=da$ll[1] r=g[[ran]]; r=as.list(r) i=ifelse(vq>lambda|length(means)==1, 1,2) means=list(means) res=list(means, r) return(res[[i]]) } u=sk1(means, df1, QME, nrep, alpha=alpha) u=lapply(u, sk1, df1=df1, QME=QME, nrep=nrep, alpha=alpha) sk2=function(u){ v1=function(...){c(u[[1]])} v2=function(...){c(u[[1]],u[[2]])} v3=function(...){c(u[[1]],u[[2]],u[[3]])} v4=function(...){c(u[[1]],u[[2]],u[[3]],u[[4]])} v5=function(...){c(u[[1]],u[[2]],u[[3]],u[[4]],u[[5]])} v6=function(...){c(u[[1]],u[[2]],u[[3]],u[[4]],u[[5]],u[[6]])} v7=function(...){c(u[[1]],u[[2]],u[[3]],u[[4]],u[[5]],u[[6]],u[[7]])} v8=function(...){c(u[[1]],u[[2]],u[[3]],u[[4]],u[[5]],u[[6]],u[[7]],u[[8]])} v9=function(...){c(u[[1]],u[[2]],u[[3]],u[[4]],u[[5]],u[[6]],u[[7]],u[[8]],u[[9]])} v10=function(...){c(u[[1]],u[[2]],u[[3]],u[[4]],u[[5]],u[[6]],u[[7]],u[[8]],u[[9]],u[[10]])} lv=list(v1,v2,v3,v4,v5,v6,v7,v8,v9,v10) l=length(u) ti=lv[[l]] u=ti() u=lapply(u, sk1, df1=df1, QME=QME, nrep=nrep, alpha=alpha) return(u) } u=sk2(u);u=sk2(u);u=sk2(u);u=sk2(u);u=sk2(u) u=sk2(u);u=sk2(u);u=sk2(u);u=sk2(u);u=sk2(u) v1=function(...){c(u[[1]])} v2=function(...){c(u[[1]],u[[2]])} v3=function(...){c(u[[1]],u[[2]],u[[3]])} v4=function(...){c(u[[1]],u[[2]],u[[3]],u[[4]])} v5=function(...){c(u[[1]],u[[2]],u[[3]],u[[4]],u[[5]])} v6=function(...){c(u[[1]],u[[2]],u[[3]],u[[4]],u[[5]],u[[6]])} v7=function(...){c(u[[1]],u[[2]],u[[3]],u[[4]],u[[5]],u[[6]],u[[7]])} v8=function(...){c(u[[1]],u[[2]],u[[3]],u[[4]],u[[5]],u[[6]],u[[7]],u[[8]])} v9=function(...){c(u[[1]],u[[2]],u[[3]],u[[4]],u[[5]],u[[6]],u[[7]],u[[8]],u[[9]])} v10=function(...){c(u[[1]],u[[2]],u[[3]],u[[4]],u[[5]],u[[6]],u[[7]],u[[8]],u[[9]],u[[10]])} lv=list(v1,v2,v3,v4,v5,v6,v7,v8,v9,v10) l=length(u) ti=lv[[l]] u=ti() rp=u l2=lapply(rp, length) l2=unlist(l2) rp2=rep(letters[1:length(rp)], l2) return(rp2)} if(is.na(sup==TRUE)){sup=0.1*mean(response)} if(angle.label==0){hjust=0.5}else{hjust=0} requireNamespace("nortest") requireNamespace("crayon") requireNamespace("ggplot2") if(test=="parametric"){ if(transf==1){resp=response+constant}else{resp=((response+constant)^transf-1)/transf} if(transf==0){resp=log(response+constant)} if(transf==0.5){resp=sqrt(response+constant)} if(transf==-0.5){resp=1/sqrt(response+constant)} if(transf==-1){resp=1/(response+constant)} trat1=trat trat=as.factor(trat) a = anova(aov(resp ~ trat)) aa = summary(aov(resp ~ trat)) b = aov(resp ~ trat) anava=a colnames(anava)=c("GL","SQ","QM","Fcal","p-value") respad=b$residuals/sqrt(a$`Mean Sq`[2]) out=respad[respad>3 | respad<(-3)] out=names(out) out=if(length(out)==0)("No discrepant point")else{out} if(norm=="sw"){norm1 = shapiro.test(b$res)} if(norm=="li"){norm1=nortest::lillie.test(b$residuals)} if(norm=="ad"){norm1=nortest::ad.test(b$residuals)} if(norm=="cvm"){norm1=nortest::cvm.test(b$residuals)} if(norm=="pearson"){norm1=nortest::pearson.test(b$residuals)} if(norm=="sf"){norm1=nortest::sf.test(b$residuals)} if(homog=="bt"){ homog1 = bartlett.test(b$res ~ trat) statistic=homog1$statistic phomog=homog1$p.value method=paste("Bartlett test","(",names(statistic),")",sep="") } if(homog=="levene"){ homog1 = levenehomog(b$res~trat)[1,] statistic=homog1$`F value`[1] phomog=homog1$`Pr(>F)`[1] method="Levene's Test (center = median)(F)" names(homog1)=c("Df", "F value","p.value")} indep = dwtest(b) resids=b$residuals/sqrt(a$`Mean Sq`[2]) Ids=ifelse(resids>3 | resids<(-3), "darkblue","black") residplot=ggplot(data=data.frame(resids,Ids),aes(y=resids,x=1:length(resids)))+ geom_point(shape=21,color="gray",fill="gray",size=3)+ labs(x="",y="Standardized residuals")+ geom_text(x=1:length(resids),label=1:length(resids),color=Ids,size=4)+ scale_x_continuous(breaks=1:length(resids))+ theme_classic()+theme(axis.text.y = element_text(size=12), axis.text.x = element_blank())+ geom_hline(yintercept = c(0,-3,3),lty=c(1,2,2),color="red",size=1) print(residplot) cat(green(bold("\n-----------------------------------------------------------------\n"))) cat(green(bold("Normality of errors"))) cat(green(bold("\n-----------------------------------------------------------------\n"))) normal=data.frame(Method=paste(norm1$method,"(",names(norm1$statistic),")",sep=""), Statistic=norm1$statistic, "p-value"=norm1$p.value) rownames(normal)="" print(normal) cat("\n") message(if(norm1$p.value>0.05){ black("As the calculated p-value is greater than the 5% significance level, hypothesis H0 is not rejected. Therefore, errors can be considered normal")} else {"As the calculated p-value is less than the 5% significance level, H0 is rejected. Therefore, errors do not follow a normal distribution"}) cat(green(bold("\n-----------------------------------------------------------------\n"))) cat(green(bold("Homogeneity of Variances"))) cat(green(bold("\n-----------------------------------------------------------------\n"))) homoge=data.frame(Method=method, Statistic=statistic, "p-value"=phomog) rownames(homoge)="" print(homoge) cat("\n") message(if(homog1$p.value>0.05){ black("As the calculated p-value is greater than the 5% significance level,hypothesis H0 is not rejected. Therefore, the variances can be considered homogeneous")} else {"As the calculated p-value is less than the 5% significance level, H0 is rejected.Therefore, the variances are not homogeneous"}) cat(green(bold("\n-----------------------------------------------------------------\n"))) cat(green(bold("Independence from errors"))) cat(green(bold("\n-----------------------------------------------------------------\n"))) indepe=data.frame(Method=paste(indep$method,"(", names(indep$statistic),")",sep=""), Statistic=indep$statistic, "p-value"=indep$p.value) rownames(indepe)="" print(indepe) cat("\n") message(if(indep$p.value>0.05){ black("As the calculated p-value is greater than the 5% significance level, hypothesis H0 is not rejected. Therefore, errors can be considered independent")} else {"As the calculated p-value is less than the 5% significance level, H0 is rejected.Therefore, errors are not independent"}) cat(green(bold("\n-----------------------------------------------------------------\n"))) cat(green(bold("Additional Information"))) cat(green(bold("\n-----------------------------------------------------------------\n"))) cat(paste("\nCV (%) = ",round(sqrt(a$`Mean Sq`[2])/mean(resp,na.rm=TRUE)*100,2))) cat(paste("\nR-squared = ",round(a$`Mean Sq`[1]/(a$`Mean Sq`[2]+a$`Mean Sq`[1]),2))) cat(paste("\nMean = ",round(mean(response,na.rm=TRUE),4))) cat(paste("\nMedian = ",round(median(response,na.rm=TRUE),4))) cat("\nPossible outliers = ", out) cat("\n") cat(green(bold("\n-----------------------------------------------------------------\n"))) cat(green(bold("Analysis of Variance"))) cat(green(bold("\n-----------------------------------------------------------------\n"))) anava1=as.matrix(data.frame(anava)) colnames(anava1)=c("Df","Sum Sq","Mean.Sq","F value","Pr(F)" ) print(anava1,na.print = "") cat("\n\n") message(if (a$`Pr(>F)`[1]<alpha.f){ black("As the calculated p-value, it is less than the 5% significance level.The hypothesis H0 of equality of means is rejected. Therefore, at least two treatments differ")} else {"As the calculated p-value is greater than the 5% significance level, H0 is not rejected"}) cat(green(bold("\n\n-----------------------------------------------------------------\n"))) if(quali==TRUE){cat(green(bold("Multiple Comparison Test")))}else{cat(green(bold("Regression")))} cat(green(bold("\n-----------------------------------------------------------------\n"))) if(quali==TRUE){ if(mcomp=="tukey"){ letra <- TUKEY(b, "trat", alpha=alpha.t) letra1 <- letra$groups; colnames(letra1)=c("resp","groups")} if(mcomp=="sk"){ # letra=SK(b,"trat",sig.level=alpha.t) # letra1=data.frame(resp=letra$m.inf[,1],groups=letters[letra$groups]) nrep=table(trat)[1] medias=sort(tapply(resp,trat,mean),decreasing = TRUE) letra=scottknott(means = medias, df1 = a$Df[2], nrep = nrep, QME = a$`Mean Sq`[2], alpha = alpha.t) letra1=data.frame(resp=medias,groups=letra)} if(mcomp=="duncan"){ letra <- duncan(b, "trat", alpha=alpha.t) letra1 <- letra$groups; colnames(letra1)=c("resp","groups")} media = tapply(response, trat, mean, na.rm=TRUE) if(transf=="1"){letra1}else{letra1$respO=media[rownames(letra1)]} print(if(a$`Pr(>F)`[1]<alpha.f){letra1}else{"H0 is not rejected"}) cat("\n") message(if(transf=="1"){}else{blue("\nNOTE: resp = transformed means; respO = averages without transforming\n")}) if(transf==1 && norm1$p.value<0.05 | transf==1 && indep$p.value<0.05 | transf==1 &&homog1$p.value<0.05){ message("\nYour analysis is not valid, suggests using a non-parametric test and try to transform the data") } else{} if(transf != 1 && norm1$p.value<0.05 | transf!=1 && indep$p.value<0.05 | transf!=1 && homog1$p.value<0.05){cat(red("\nWarning!!! Your analysis is not valid, suggests using a non-parametric test"))}else{} if(point=="mean_sd"){ dadosm=data.frame(letra1, media=tapply(response, trat, mean, na.rm=TRUE)[rownames(letra1)], desvio=tapply(response, trat, sd, na.rm=TRUE)[rownames(letra1)])} if(point=="mean_se"){ dadosm=data.frame(letra1, media=tapply(response, trat, mean, na.rm=TRUE)[rownames(letra1)], desvio=tapply(response, trat, sd, na.rm=TRUE)/sqrt(tapply(response, trat, length))[rownames(letra1)])} dadosm$trats=factor(rownames(dadosm),levels = unique(trat)) dadosm$limite=dadosm$media+dadosm$desvio dadosm=dadosm[unique(as.character(trat)),] if(addmean==TRUE){dadosm$letra=paste(format(dadosm$media,digits = dec),dadosm$groups)} if(addmean==FALSE){dadosm$letra=dadosm$groups} trats=dadosm$trats limite=dadosm$limite media=dadosm$media desvio=dadosm$desvio letra=dadosm$letra if(geom=="bar"){grafico=ggplot(dadosm,aes(x=trats,y=media)) if(fill=="trat"){grafico=grafico+ geom_col(aes(fill=trats),color=1)}else{grafico=grafico+ geom_col(aes(fill=trats),fill=fill,color=1)} if(errorbar==TRUE){grafico=grafico+ geom_text(aes(y=media+sup+if(sup<0){-desvio}else{desvio}, label=letra),family=family,angle=angle.label,size=labelsize, hjust=hjust)} if(errorbar==FALSE){grafico=grafico+ geom_text(aes(y=media+sup,label=letra),family=family,size=labelsize,angle=angle.label, hjust=hjust)} if(errorbar==TRUE){grafico=grafico+ geom_errorbar(data=dadosm,aes(ymin=media-desvio, ymax=media+desvio,color=1), color="black",width=0.3)}} if(geom=="point"){grafico=ggplot(dadosm,aes(x=trats, y=media)) if(errorbar==TRUE){grafico=grafico+ geom_text(aes(y=media+sup+if(sup<0){-desvio}else{desvio}, label=letra),family=family,angle=angle.label,size=labelsize, hjust=hjust)} if(errorbar==FALSE){grafico=grafico+ geom_text(aes(y=media+sup, label=letra),family=family,angle=angle.label, size=labelsize,hjust=hjust)} if(errorbar==TRUE){grafico=grafico+ geom_errorbar(data=dadosm, aes(ymin=media-desvio, ymax=media+desvio,color=1), color="black",width=0.3)} if(fill=="trat"){grafico=grafico+ geom_point(aes(color=trats),size=5)} else{grafico=grafico+ geom_point(aes(color=trats), color="black", fill=fill,shape=21,size=5)}} if(geom=="box"){ datam1=data.frame(trats=factor(trat,levels = unique(as.character(trat))), response) dadosm2=data.frame(letra1, superior=tapply(response, trat, mean, na.rm=TRUE)[rownames(letra1)]) dadosm2$trats=rownames(dadosm2) dadosm2=dadosm2[unique(as.character(trat)),] dadosm2$limite=dadosm$media+dadosm$desvio dadosm2$letra=paste(format(dadosm$media,digits = dec),dadosm$groups) trats=dadosm2$trats limite=dadosm2$limite superior=dadosm2$superior letra=dadosm2$letra stat_box=ggplot(datam1,aes(x=trats,y=response))+geom_boxplot() superior=ggplot_build(stat_box)$data[[1]]$ymax dadosm2$superior=superior+sup grafico=ggplot(datam1,aes(x=trats,y=response)) if(fill=="trat"){grafico=grafico+geom_boxplot(aes(fill=trats))} else{grafico=grafico+ geom_boxplot(aes(fill=trats),fill=fill)} grafico=grafico+ geom_text(data=dadosm2, aes(y=superior, label=letra), family = family,size=labelsize,angle=angle.label, hjust=hjust)} grafico=grafico+ theme+ ylab(ylab)+ xlab(xlab)+ theme(text = element_text(size=textsize,color="black", family = family), axis.text = element_text(size=textsize,color="black", family = family), axis.title = element_text(size=textsize,color="black", family = family), legend.position = "none") if(angle !=0){grafico=grafico+ theme(axis.text.x=element_text(hjust = 1.01,angle = angle))} if(CV==TRUE){grafico=grafico+ labs(caption=paste("p-value", if(a$`Pr(>F)`[1]<0.0001){paste("<", 0.0001)} else{paste("=", round(a$`Pr(>F)`[1],4))},"; CV = ", round(abs(sqrt(a$`Mean Sq`[2])/mean(resp))*100,2),"%"))} grafico=as.list(grafico) } if(quali==FALSE){ trat=trat1 # trat=as.numeric(as.character(trat)) if(grau==1){graph=regression(trat,response, grau = 1,textsize=textsize,xlab=xlab,ylab=ylab, family=family,posi=posi,point=point)} if(grau==2){graph=regression(trat,response, grau = 2,textsize=textsize,xlab=xlab,ylab=ylab, family=family,posi=posi,point=point)} if(grau==3){graph=regression(trat,response, grau = 3,textsize=textsize,xlab=xlab,ylab=ylab, family=family,posi=posi,point=point)} grafico=graph[[1]] }} if(test=="noparametric"){ kruskal=function (y, trt, alpha = 0.05, p.adj = c("none", "holm", "hommel", "hochberg", "bonferroni", "BH", "BY", "fdr"), group = TRUE, main = NULL,console=FALSE){ name.y <- paste(deparse(substitute(y))) name.t <- paste(deparse(substitute(trt))) if(is.null(main))main<-paste(name.y,"~", name.t) p.adj <- match.arg(p.adj) junto <- subset(data.frame(y, trt), is.na(y) == FALSE) N <- nrow(junto) medians<-mean.stat(junto[,1],junto[,2],stat="median") for(i in c(1,5,2:4)) { x <- mean.stat(junto[,1],junto[,2],function(x)quantile(x)[i]) medians<-cbind(medians,x[,2])} medians<-medians[,3:7] names(medians)<-c("Min","Max","Q25","Q50","Q75") Means <- mean.stat(junto[,1],junto[,2],stat="mean") sds <- mean.stat(junto[,1],junto[,2], stat="sd") nn <- mean.stat(junto[,1],junto[,2],stat="length") Means<-data.frame(Means,std=sds[,2],r=nn[,2],medians) rownames(Means)<-Means[,1] Means<-Means[,-1] names(Means)[1]<-name.y junto[, 1] <- rank(junto[, 1]) means <- mean.stat(junto[, 1], junto[, 2], stat = "sum") sds <- mean.stat(junto[, 1], junto[, 2], stat = "sd") nn <- mean.stat(junto[, 1], junto[, 2], stat = "length") means <- data.frame(means, r = nn[, 2]) names(means)[1:2] <- c(name.t, name.y) ntr <- nrow(means) nk <- choose(ntr, 2) DFerror <- N - ntr rs <- 0 U <- 0 for (i in 1:ntr) { rs <- rs + means[i, 2]^2/means[i, 3] U <- U + 1/means[i, 3] } S <- (sum(junto[, 1]^2) - (N * (N + 1)^2)/4)/(N - 1) H <- (rs - (N * (N + 1)^2)/4)/S p.chisq <- 1 - pchisq(H, ntr - 1) if(console){ cat("\nStudy:", main) cat("\nKruskal-Wallis test's\nTies or no Ties\n") cat("\nCritical Value:", H) cat("\nDegrees of freedom:", ntr - 1) cat("\nPvalue Chisq :", p.chisq, "\n\n")} DFerror <- N - ntr Tprob <- qt(1 - alpha/2, DFerror) MSerror <- S * ((N - 1 - H)/(N - ntr)) means[, 2] <- means[, 2]/means[, 3] if(console){cat(paste(name.t, ",", sep = ""), " means of the ranks\n\n") print(data.frame(row.names = means[, 1], means[, -1])) cat("\nPost Hoc Analysis\n")} if (p.adj != "none") { if(console)cat("\nP value adjustment method:", p.adj) a <- 1e-06 b <- 1 for (i in 1:100) { x <- (b + a)/2 xr <- rep(x, nk) d <- p.adjust(xr, p.adj)[1] - alpha ar <- rep(a, nk) fa <- p.adjust(ar, p.adj)[1] - alpha if (d * fa < 0) b <- x if (d * fa > 0) a <- x} Tprob <- qt(1 - x/2, DFerror) } nr <- unique(means[, 3]) if (group & console){ cat("\nt-Student:", Tprob) cat("\nAlpha :", alpha)} if (length(nr) == 1) LSD <- Tprob * sqrt(2 * MSerror/nr) statistics<-data.frame(Chisq=H,Df=ntr-1,p.chisq=p.chisq) if ( group & length(nr) == 1 & console) cat("\nMinimum Significant Difference:",LSD,"\n") if ( group & length(nr) != 1 & console) cat("\nGroups according to probability of treatment differences and alpha level.\n") if ( length(nr) == 1) statistics<-data.frame(statistics,t.value=Tprob,MSD=LSD) comb <- utils::combn(ntr, 2) nn <- ncol(comb) dif <- rep(0, nn) LCL <- dif UCL <- dif pvalue <- dif sdtdif <- dif for (k in 1:nn) { i <- comb[1, k] j <- comb[2, k] dif[k] <- means[i, 2] - means[j, 2] sdtdif[k] <- sqrt(MSerror * (1/means[i,3] + 1/means[j, 3])) pvalue[k] <- 2*(1 - pt(abs(dif[k])/sdtdif[k],DFerror)) } if (p.adj != "none") pvalue <- p.adjust(pvalue, p.adj) pvalue <- round(pvalue,4) sig <- rep(" ", nn) for (k in 1:nn) { if (pvalue[k] <= 0.001) sig[k] <- "***" else if (pvalue[k] <= 0.01) sig[k] <- "**" else if (pvalue[k] <= 0.05) sig[k] <- "*" else if (pvalue[k] <= 0.1) sig[k] <- "." } tr.i <- means[comb[1, ], 1] tr.j <- means[comb[2, ], 1] LCL <- dif - Tprob * sdtdif UCL <- dif + Tprob * sdtdif comparison <- data.frame(Difference = dif, pvalue = pvalue, "Signif."=sig, LCL, UCL) if (p.adj !="bonferroni" & p.adj !="none"){ comparison<-comparison[,1:3] statistics<-data.frame(Chisq=H,p.chisq=p.chisq)} rownames(comparison) <- paste(tr.i, tr.j, sep = " - ") if (!group) { groups<-NULL if(console){ cat("\nComparison between treatments mean of the ranks.\n\n") print(comparison) } } if (group) { comparison=NULL Q<-matrix(1,ncol=ntr,nrow=ntr) p<-pvalue k<-0 for(i in 1:(ntr-1)){ for(j in (i+1):ntr){ k<-k+1 Q[i,j]<-p[k] Q[j,i]<-p[k] } } groups <- ordenacao(means[, 1], means[, 2],alpha, Q, console) names(groups)[1]<-name.y if(console) { cat("\nTreatments with the same letter are not significantly different.\n\n") print(groups) } } ranks=means Means<-data.frame(rank=ranks[,2],Means) Means<-Means[,c(2,1,3:9)] parameters<-data.frame(test="Kruskal-Wallis",p.ajusted=p.adj,name.t=name.t,ntr = ntr,alpha=alpha) rownames(parameters)<-" " rownames(statistics)<-" " output<-list(statistics=statistics,parameters=parameters, means=Means,comparison=comparison,groups=groups) class(output)<-"group" invisible(output) } krusk=kruskal(response,trat,p.adj = p.adj,alpha=alpha.t) cat(green(bold("\n\n-----------------------------------------------------------------\n"))) cat(green(italic("Statistics"))) cat(green(bold("\n-----------------------------------------------------------------\n"))) print(krusk$statistics) cat(green(bold("\n\n-----------------------------------------------------------------\n"))) cat(green(italic("Parameters"))) cat(green(bold("\n-----------------------------------------------------------------\n"))) print(krusk$parameters) cat(green(bold("\n\n-----------------------------------------------------------------\n"))) cat(green(italic("Multiple Comparison Test"))) cat(green(bold("\n-----------------------------------------------------------------\n"))) saida=cbind(krusk$means[,c(1,3)],krusk$groups[rownames(krusk$means),]) colnames(saida)=c("Mean","SD","Rank","Groups") print(saida) dadosm=data.frame(krusk$means,krusk$groups[rownames(krusk$means),]) dadosm$trats=factor(rownames(dadosm),levels = unique(trat)) dadosm$media=tapply(response,trat,mean, na.rm=TRUE)[rownames(krusk$means)] if(point=="mean_sd"){dadosm$std=tapply(response,trat,sd, na.rm=TRUE)[rownames(krusk$means)]} if(point=="mean_se"){dadosm$std=tapply(response, trat, sd, na.rm=TRUE)/ sqrt(tapply(response, trat, length))[rownames(krusk$means)]} if(addmean==TRUE){dadosm$letra=paste(format(dadosm$response,digits = dec),dadosm$groups)} if(addmean==FALSE){dadosm$letra=dadosm$groups} trats=dadosm$trats limite=dadosm$limite media=dadosm$media std=dadosm$std letra=dadosm$letra if(geom=="bar"){grafico=ggplot(dadosm, aes(x=trats,y=response)) if(fill=="trat"){grafico=grafico+ geom_col(aes(fill=trats),color=1)} else{grafico=grafico+ geom_col(aes(fill=trats),fill=fill,color=1)} if(errorbar==TRUE){grafico=grafico+ geom_text(aes(y=media+sup+if(sup<0){-std}else{std}, label=letra),family=family,size=labelsize,angle=angle.label, hjust=hjust)} if(errorbar==FALSE){grafico=grafico+ geom_text(aes(y=media+sup,label=letra),size=labelsize,family=family,angle=angle.label, hjust=hjust)} if(errorbar==TRUE){grafico=grafico+ geom_errorbar(data=dadosm,aes(ymin=response-std, ymax=response+std, color=1), color="black",width=0.3)}} if(geom=="point"){grafico=ggplot(dadosm, aes(x=trats, y=response)) if(errorbar==TRUE){grafico=grafico+ geom_text(aes(y=media+sup+if(sup<0){-std}else{std}, label=letra), family=family,angle=angle.label,size=labelsize, hjust=hjust)} if(errorbar==FALSE){grafico=grafico+ geom_text(aes(y=media+sup, label=letra), family=family,angle=angle.label, size=labelsize,hjust=hjust)} if(errorbar==TRUE){grafico=grafico+ geom_errorbar(data=dadosm, aes(ymin=response-std, ymax=response+std, color=1), color="black",width=0.3)} if(fill=="trat"){grafico=grafico+ geom_point(aes(color=trats),size=5)} else{grafico=grafico+ geom_point(aes(color=trats), color="black", fill=fill,shape=21,size=5)}} if(geom=="box"){ datam1=data.frame(trats=factor(trat,levels = unique(as.character(trat))),response) dadosm2=data.frame(krusk$means) dadosm2$trats=rownames(dadosm2) dadosm2$limite=dadosm2$response+dadosm2$std dadosm2$letra=paste(format(dadosm2$response,digits = dec), dadosm$groups) dadosm2=dadosm2[unique(as.character(trat)),] trats=dadosm2$trats limite=dadosm2$limite letra=dadosm2$letra stat_box=ggplot(datam1,aes(x=trats,y=response))+geom_boxplot() superior=ggplot_build(stat_box)$data[[1]]$ymax dadosm2$superior=superior+sup grafico=ggplot(datam1, aes(x=trats, y=response)) if(fill=="trat"){grafico=grafico+ geom_boxplot(aes(fill=1))} else{grafico=grafico+ geom_boxplot(aes(fill=trats),fill=fill)} grafico=grafico+ geom_text(data=dadosm2, aes(y=superior, label=letra), family = family,angle=angle.label, size=labelsize,hjust=hjust)} grafico=grafico+theme+ ylab(ylab)+ xlab(xlab)+ theme(text = element_text(size=textsize,color="black", family = family), axis.title = element_text(size=textsize,color="black", family = family), axis.text = element_text(size=textsize,color="black", family = family), legend.position = "none") if(angle !=0){grafico=grafico+theme(axis.text.x=element_text(hjust = 1.01,angle = angle))} } if(quali==TRUE){print(grafico)} graficos=list(grafico)#[[1]] }
/scratch/gouwar.j/cran-all/cranData/AgroTech/R/dic_analysis.R
#' Dataset: Example markblue #' #' The data are part of an experiment that studied the spray deposit #' #' @docType data #' #' @usage data(example_markblue) #' #' @format data.frame containing data set #' \describe{ #' \item{\code{curva}}{Vector with curves} #' \item{\code{TRATAMENTO}}{Numeric vector with treatment} #' \item{\code{repe}}{Numeric vector with repetition} #' \item{\code{Abs}}{Numeric vector with absorbance} #' \item{\code{area}}{Numeric vector with area} #' } #' @author Rodrigo Yudi Palhaci Marubayashi, \email{[email protected]} #' @author Gabriel Danilo Shimizu #' @author Otavio Jorge Grigoli Abi Saab #' @keywords datasets #' @examples #' data(example_markblue) "example_markblue"
/scratch/gouwar.j/cran-all/cranData/AgroTech/R/example_markblue.R
#' Dataset: Example markbluecurve #' #' The data are part of an experiment that studied the spray deposit #' #' @docType data #' #' @usage data(example_markbluecurve) #' #' @format data.frame containing data set #' \describe{ #' \item{\code{curva}}{Vector with curve} #' \item{\code{Amostra}}{Numeric vector with sample} #' \item{\code{Abs}}{Numeric vector with absorbance} #' \item{\code{ppm}}{Numeric vector with concentration} #' } #' @author Rodrigo Yudi Palhaci Marubayashi, \email{[email protected]} #' @author Gabriel Danilo Shimizu #' @author Otavio Jorge Grigoli Abi Saab #' @keywords datasets #' @examples #' data(example_markbluecurve) "example_markbluecurve"
/scratch/gouwar.j/cran-all/cranData/AgroTech/R/example_markbluecurve.R
#' Dataset: Example markmet #' #' The data are part of an experiment that studied the spray deposit #' #' @docType data #' #' @usage data(example_markmet) #' #' @format data.frame containing data set #' \describe{ #' \item{\code{trat}}{Vector with treatment} #' \item{\code{repe}}{Numeric vector with repetition} #' \item{\code{ppm}}{Numeric vector with concentration} #' } #' @author Rodrigo Yudi Palhaci Marubayashi, \email{[email protected]} #' @author Gabriel Danilo Shimizu #' @author Otavio Jorge Grigoli Abi Saab #' @keywords datasets #' @examples #' data(example_markmet) "example_markmet"
/scratch/gouwar.j/cran-all/cranData/AgroTech/R/example_markmet.R
#' Dataset: Example meteorological #' #' The data come from a meteorological station on a rural property in the city of Rolandia/PR #' #' @docType data #' #' @usage data("example_meteorological") #' #' @format data.frame containing data set #' \describe{ #' \item{\code{tempo}}{Numeric vector with time} #' \item{\code{temp}}{Numeric vector with air temperature} #' \item{\code{ur}}{Numeric vector with relative humidity} #' \item{\code{vento}}{Numeric vector with wind speed} #' } #' @author Rodrigo Yudi Palhaci Marubayashi, \email{[email protected]} #' @author Gabriel Danilo Shimizu #' @author Otavio Jorge Grigoli Abi Saab #' @keywords datasets #' @examples #' data(example_meteorological) "example_meteorological"
/scratch/gouwar.j/cran-all/cranData/AgroTech/R/example_meteorological.R
#' Flow graphic of nozzles on spray bar #' #' @description This is a function to check the conditions of the spray nozzles #' #' @param file Numerical vector with the flows #' @param pointsize Point size (\emph{default} 3.5) #' @param xsup Upper limit #' @param xinf Bottom limit #' @param pointcolor Point color (red) #' @param xlab x axis legend #' @param ylab y axis legend #' #' @return Returns graph of ggplot2 #' #' @import ggplot2 #' #' @importFrom gridExtra grid.arrange #' @importFrom stats na.omit #' @importFrom stats coef #' @importFrom readxl read_excel #' @importFrom ggrepel geom_text_repel #' #' @author Rodrigo Yudi Palhaci Marubayashi, \email{[email protected]} #' @author Gabriel Danilo Shimizu #' @author Otavio Jorge Grigoli Abi Saab #' #' @references #' ANDEF Associacao Nacional de Defesa Vegetal. Manual de tecnologia de aplicacao de produtos fitossanitarios. Campinas: Linea Creativa, 2004. 50p. #' #' BOLLER, W.; RAETANO, C. G. Bicos e pontas de pulverizacao de energia hidraulica, regulagens e calibracao de pulverizadores de barras. In: ANTUNIASSI, U. R.; BOLLER, W. (Organizadores). Tecnologia de aplicacao para culturas anuais. Passo Fundo: Aldeia Norte; Botucatu: FEPAF, 2011. p.51-82. #' #' SPRAYING SYSTEMS CO. Catalogo 51A-PT - Produtos de pulverizacao para agricultura. Wheaton: Spraying Systems Co., 2014. 160p. #' #' @export #' #' @examples #' resp=c(881,854,865,876,906.3, #' 874.7,868.3,878.7,872.7,901.7, #' 823.3,889.7,861.3,900.3,890.3, #' 886.7,916.7,872,912.7,894) #' flowgrap(resp) #' # flowgrap("file.xlsx") flowgrap=function(file, pointsize=3.5, xsup=1.1, xinf=0.9, pointcolor="red", xlab="Nozzle number", ylab=NA){ requireNamespace("crayon") if(is.na(ylab)==TRUE){ylab=expression("Nozzle flow"~(mL~min^{-1}))} if(length(file)==1){dados=read_excel(file) x=as.vector(unlist(dados[,1]))}else{x=file} ponta=1:length(x) d=data.frame(ponta,x) m1=mean(x) m2=mean(x)*xsup m3=mean(x)*xinf outlier=d[x<m3 | x>m2,] requireNamespace("ggplot2") requireNamespace("ggrepel") graph=ggplot(d,aes(x = ponta, y = x)) + geom_hline(yintercept = m1, #linetype = "dashed", color="black",size=1) + geom_hline(yintercept = m2, linetype = 2,color="black",size=1) + geom_hline(yintercept = m3, linetype = 2,color="black",size=1) + theme_classic()+ geom_point(data=outlier,aes(x=ponta,y=x),color=pointcolor,size=pointsize)+ geom_text_repel(data=outlier,aes(x=ponta,y=x,label=ponta))+ labs(x = xlab, y = ylab) + geom_point(shape=21,size=pointsize) + scale_x_continuous(breaks = seq(1,length(x),1)) print(graph) if(length(outlier$x)==0){print("No problem nozzles")}else{ message(black("\n-----------------------------------------------\n")) message(black("Problem 1: Worn nozzles ")) message(outlier$ponta[outlier$x>m2]) message(black("\n-----------------------------------------------")) message("\n") message(black("Problem 2: Clogged nozzles ")) message(outlier$ponta[outlier$x<m3]) message(black("\n-----------------------------------------------"))} }
/scratch/gouwar.j/cran-all/cranData/AgroTech/R/flowgrap.R
#' Flow calculation as a function of working pressure #' #' @description This is a function to determine the flow rate of a spray nozzle as a function of the working pressure #' #' @param q1 Nozzle flow 1 (L/min) #' @param q2 Nozzle flow 2 (L/min) #' @param p1 Nozzle pressure 1 (bar) #' @param p2 Nozzle pressure 2 (bar) #' #' @return Returns values of flow (L/min) or pressure (bar) #' #' @details #' Nozzle flow 1: #' \deqn{q1=\frac{\sqrt{p1}}{\sqrt{p2}}*q2} #' Nozzle flow 2: #' \deqn{q2=\frac{q1}{\frac{\sqrt{p1}}{\sqrt{p2}}}} #' Nozzle pressure 1: #' \deqn{p1=(\sqrt{p2}*\frac{q1}{q2})^2} #' Nozzle pressure 2: #' \deqn{p2=(\frac{\sqrt{p1}}{(\frac{q1}{q2})})^2} #' #' @author Rodrigo Yudi Palhaci Marubayashi, \email{[email protected]} #' @author Gabriel Danilo Shimizu #' @author Otavio Jorge Grigoli Abi Saab #' #' @references #' ANDEF Associacao Nacional de Defesa Vegetal. Manual de tecnologia de aplicacao de produtos fitossanitarios. Campinas: Linea Creativa, 2004. 50p. #' #' BOLLER, W.; RAETANO, C. G. Bicos e pontas de pulverizacao de energia hidraulica, regulagens e calibracao de pulverizadores de barras. In: ANTUNIASSI, U. R.; BOLLER, W. (Organizadores). Tecnologia de aplicacao para culturas anuais. Passo Fundo: Aldeia Norte; Botucatu: FEPAF, 2011. p.51-82. #' #' SPRAYING SYSTEMS CO. Catalogo 51A-PT - Produtos de pulverizacao para agricultura. Wheaton: Spraying Systems Co., 2014. 160p. #' #' @export #' #' @seealso \link{flowrat} #' \link{product} #' #' @examples #' flowpres(q1=NA,q2=0.80,p1=1.00,p2=2.80) #' flowpres(q1=0.48,q2=0.80,p1=1.00,p2=NA) flowpres=function(q1,q2,p1,p2){ requireNamespace("crayon") if(is.na(q1)==TRUE){q1=(sqrt(p1)/sqrt(p2))*q2 message(black("Nozzle flow 1 (L/min): ")) message(round(q1,2))} if(is.na(q2)==TRUE){q2=q1/(sqrt(p1)/sqrt(p2)) message(black("Nozzle flow 2 (L/min): ")) message(round(q2,2))} if(is.na(p1)==TRUE){p1=(sqrt(p2)*(q1/q2))^2 message(black("Nozzle pressure 1 (bar): ")) message(round(p1,2))} if(is.na(p2)==TRUE){p2=(sqrt(p1)/(q1/q2))^2 message(black("Nozzle pressure 2 (bar): ")) message(round(p2,2))} }
/scratch/gouwar.j/cran-all/cranData/AgroTech/R/flowpres.R
#' Calculation of required spray nozzle flow #' #' @description This is a function to determine the required flow rate of a spray nozzle #' #' @param q Nozzle flow (L/min) #' @param Q Application rate (L/ha) #' @param V Sprayer speed (km/h) #' @param W Spacing between spray nozzles (cm) #' #' @note 60000 Units conversion factor #' #' @return Returns values for flow, application rate, sprayer speed, spacing between spray tips #' #' @details #' Application rate (L/ha): #' \deqn{Q=\frac{60000*q}{V*W}} #' Nozzle flow (L/min): #' \deqn{q=\frac{Q*(V*W)}{60000}} #' Sprayer speed (km/h): #' \deqn{V=\frac{\frac{60000*q}{Q}}{W}} #' Spacing between spray nozzles (m): #' \deqn{W=\frac{\frac{60000*q}{Q}}{V}} #' #' @author Rodrigo Yudi Palhaci Marubayashi, \email{[email protected]} #' @author Gabriel Danilo Shimizu #' @author Otavio Jorge Grigoli Abi Saab #' #' @references #' ANDEF Associacao Nacional de Defesa Vegetal. Manual de tecnologia de aplicacao de produtos fitossanitarios. Campinas: Linea Creativa, 2004. 50p. #' #' BOLLER, W.; RAETANO, C. G. Bicos e pontas de pulverizacao de energia hidraulica, regulagens e calibracao de pulverizadores de barras. In: ANTUNIASSI, U. R.; BOLLER, W. (Organizadores). Tecnologia de aplicacao para culturas anuais. Passo Fundo: Aldeia Norte; Botucatu: FEPAF, 2011. p.51-82. #' #' SPRAYING SYSTEMS CO. Catalogo 51A-PT - Produtos de pulverizacao para agricultura. Wheaton: Spraying Systems Co., 2014. 160p. #' #' @export #' #' @seealso \link{flowpres} #' \link{product} #' #' @examples #' flowrat(Q = 190,q = NA,V = 10,W = 50) flowrat=function(Q,q,V,W=50){ if(is.na(Q)==TRUE){Q=(60000*q)/(V*W) message(black("Taxa de aplicacao (L/ha): ")) message(round(Q,2))} if(is.na(q)==TRUE){q=(Q*(V*W))/60000 message(black("Vazao da ponta (L/min): ")) message(round(q,2))} if(is.na(V)==TRUE){V=((60000*q)/Q)/W message(black("Velocidade do Pulverizador (km/h): ")) message(round(V,2))} if(is.na(W)==TRUE){W=((60000*q)/Q)/V message(black("Espacamento entre pontas (m): ")) message(round(W,2))} }
/scratch/gouwar.j/cran-all/cranData/AgroTech/R/flowrat.R
#' Spray deposit (glowing blue marker) #' #' @description This is a function to determine spray deposit using bright blue marker and then after performing tests of assumptions, analysis of variance and comparison of means #' #' @param d1 Curved worksheet #' @param d2 Experiment worksheet #' @param vl Wash volume (mL) #' @param ci Initial marker concentration #' @param naf2 Sheet area (cm2) #' @param ncu2 Column referring to the curve (\emph{default} is 1) #' @param nresp2 Column referring to absorbance #' @param ntrat2 Column referring to treatment #' @param nrep2 Column referring to repetition #' @param analysis Perform statistical analysis #' @param design Experiment design #' @param transf Data transformation #' @param quali Qualitative or quantitative treatment (\emph{default} is TRUE) #' @param grau degree of the polynomial (when treatment is quantitative) #' @param test Parametric or Nonparametric (\emph{default} is "parametric") #' @param mcomp Mean comparison test (\emph{default} is "tukey") #' @param ylab y axis name (\emph{default} is expression(mu~cm^2)) #' @param save.xlsx Want to export in excel format (\emph{default} is FALSE) #' #' @note Curve name on the curve worksheet (d1) must be the same as the curve name on the experiment worksheet (d2) #' #' @return Returns the comparison between the treatments of the experiment #' #' @import ggplot2 #' #' @importFrom gridExtra grid.arrange #' @importFrom stats na.omit #' @importFrom stats coef #' @importFrom readxl read_excel #' @importFrom ggrepel geom_text_repel #' @importFrom xlsx write.xlsx #' #' @author Rodrigo Yudi Palhaci Marubayashi, \email{[email protected]} #' @author Gabriel Danilo Shimizu #' @author Otavio Jorge Grigoli Abi Saab #' #' @references #' No reference #' #' @export #' #' @seealso \link{markmet} #' #' @examples #' data("example_markbluecurve") #' data("example_markblue") #' markblue(d1=example_markbluecurve, #' d2=example_markblue, #' vl=20, #' ci=1500, #' ncu2 = 1, #' ntrat2 = 2, #' nrep2 = 3, #' nresp2 = 4, #' naf2 = 5) markblue=function(d1, d2, vl, ci, ncu2=1, ntrat2=2, nrep2=3, nresp2=5, naf2=6, analysis=TRUE, design="DIC", transf=1, quali=TRUE, grau=1, test="parametric", mcomp="tukey", ylab=expression(mu~cm^2), save.xlsx=FALSE){ if(is.data.frame(d1)==FALSE){d1=read_excel(d1)} if(is.data.frame(d2)==FALSE){d2=read_excel(d2)} curva=factor(as.vector(unlist(d1[,1])),unique(unlist(d1[,1]))) ncurvas=nlevels(curva) modelos=c() response=as.vector(unlist(d2[,nresp2])) curvas=as.vector(unlist(d2[,ncu2])) af=as.vector(unlist(d2[,naf2])) curvas=factor(curvas,unique(curvas)) d2a=data.frame(curvas,response) xmax=tapply(response,curvas, max) xmin=tapply(response,curvas, min) for(i in 1:ncurvas){ maximo=xmax[levels(curvas)[i]] minimo=xmin[levels(curvas)[i]] abs=as.vector(unlist(d1[,3])) ppm=as.vector(unlist(d1[,4])) data=data.frame(curva,abs,ppm) abst=abs[abs>minimo & abs<maximo] ppmt=ppm[abs>minimo & abs<maximo] curvat=curva[abs>minimo & abs<maximo] abst1=abs[abs>maximo] curvat1=curva[abs>maximo] ocultos=length(na.omit(abst1[curvat1==levels(curva)[i]])) datat=data.frame(curvat,abst,ppmt) aa=na.omit(datat[curvat==levels(curva)[i],]) numeros=as.numeric(rownames(aa)) minimos=numeros[1]+ocultos-1 maximos=numeros[length(numeros)]+ocultos+1 data=data[c(minimos:maximos),] modelos[[i]]=with(data,#[curva==levels(curva)[i],], lm(abs~ppm)) } names(modelos)=levels(curva) respostas=c() models=c() for(i in 1:ncurvas){ resps=d2a[curvas==levels(curva)[i],] response=as.vector(unlist(resps[,2])) mod=modelos[levels(curva)[i]][[1]] b0=coef(mod)[1] b1=coef(mod)[2] preditos=function(y){x=(y-b0)/b1} respostas[[i]]=preditos(response) models[[i]]=summary(mod) } names(models)=levels(curva) mgL=unlist(respostas) ml=(mgL*vl)/ci microl=ml*1000 microlcm2=microl/af if(analysis==FALSE){ respostas=data.frame(Abs=as.vector(unlist(d2[,nresp2])), mgL,ml, microl,microlcm2)} if(analysis==TRUE){ respostas=data.frame(trat=as.vector(unlist(d2[,ntrat2])), rep=as.vector(unlist(d2[,nrep2])), Abs=as.vector(unlist(d2[,nresp2])), mgL,ml, microl,microlcm2) if(design=="DIC"){ with(respostas,dic.analysis(trat, microlcm2, transf = transf, quali=quali, grau = grau, test=test, mcomp=mcomp, ylab=ylab))} if(design=="DBC"){ with(respostas,dbc.analysis(trat, rep, microlcm2, transf = transf, quali=quali, grau = grau, test=test, mcomp = mcomp, ylab=ylab))}} cat("\n") if(save.xlsx=="TRUE"){xlsx::write.xlsx(respostas,"result.xlsx")} list(models=models,results=respostas) }
/scratch/gouwar.j/cran-all/cranData/AgroTech/R/markblue.R
#' Spray deposit (metallic marker) #' #' @description This is a function to determine spray deposit using metallic markers and then after performing tests of assumptions, analysis of variance and comparison of means #' #' @param ppm Concentração #' @param white White reading #' @param VL Wah volume (mL) #' @param AL blade area (cm2) #' @param trat Vector with treatment #' @param block Vector with block (if design = "DBC") #' @param analysis Perform statistical analysis #' @param design Experiment design #' @param transf Data transformation #' @param quali Qualitative or quantitative treatment (\emph{default} is TRUE) #' @param grau degree of the polynomial (when treatment is quantitative) #' @param test Parametric or Nonparametric (\emph{default} is "parametric") #' @param mcomp Mean comparison test (\emph{default} is "tukey") #' @param ylab y axis name (\emph{default} is expression(mu~cm^2)) #' @param save.xlsx Want to export in excel format (\emph{default} is FALSE) #' #' @return Returns the comparison between the treatments of the experiment #' #' @author Rodrigo Yudi Palhaci Marubayashi, \email{[email protected]} #' @author Gabriel Danilo Shimizu #' @author Otavio Jorge Grigoli Abi Saab #' #' @references #' No reference #' #' @export #' #' @seealso \link{markblue} #' #' @examples #' library(AgroTech) #' data("example_markmet") #' with(example_markmet, #' markmet(ppm = ppm, #' white = 0.02, #' VL = 35, #' AL = 63.61, #' analysis = TRUE, #' trat=trat)) markmet=function(ppm, white, VL, AL, analysis=TRUE, trat, block, design="DIC", transf=1, quali=TRUE, grau=1, test="parametric", mcomp="tukey", ylab=expression(mu~cm^2), save.xlsx=FALSE){ ppmc=(ppm-white)*1000 microg=(ppmc*VL)/1000 mcm2=microg/AL if(analysis==FALSE){ respostas=data.frame(conc=ppm, microg, microcm2=mcm2)} if(analysis==TRUE){ respostas=data.frame(conc=ppm, microg, microcm2=mcm2) if(design=="DIC"){ with(respostas,dic.analysis(trat, microcm2, transf = transf, quali=quali, grau = grau, test=test, mcomp=mcomp, ylab=ylab))} if(design=="DBC"){ with(respostas,dbc.analysis(trat, block, microcm2, transf = transf, quali=quali, grau = grau, test=test, mcomp = mcomp, ylab=ylab))}} cat("\n") if(save.xlsx=="TRUE"){xlsx::write.xlsx(respostas,"result.xlsx")} list(response=respostas) }
/scratch/gouwar.j/cran-all/cranData/AgroTech/R/markmet.R
#' @keywords internal #' @md #' @name AgroTech-package #' @docType package "_PACKAGE"
/scratch/gouwar.j/cran-all/cranData/AgroTech/R/package.R
#' Amount of phytosanitary product per spray tank #' #' @description This is a function to determine the amount of commercial product to be placed in the sprayer tank at each fill #' #' @param Ct Spray tank volumetric capacity (L) #' @param Dose Product dose to be applied (L/ha, mL/ha, kg/ha, g/ha) #' @param Q Application rate (L/ha) #' #' @return Returns values for amount of product (L or kg) #' #' @author Rodrigo Yudi Palhaci Marubayashi, \email{[email protected]} #' @author Gabriel Danilo Shimizu #' @author Otavio Jorge Grigoli Abi Saab #' #' @references #' ANDEF Associacao Nacional de Defesa Vegetal. Manual de tecnologia de aplicacao de produtos fitossanitarios. Campinas: Linea Creativa, 2004. 50p. #' #' BOLLER, W.; RAETANO, C. G. Bicos e pontas de pulverizacao de energia hidraulica, regulagens e calibracao de pulverizadores de barras. In: ANTUNIASSI, U. R.; BOLLER, W. (Organizadores). Tecnologia de aplicacao para culturas anuais. Passo Fundo: Aldeia Norte; Botucatu: FEPAF, 2011. p.51-82. #' #' SPRAYING SYSTEMS CO. Catalogo 51A-PT - Produtos de pulverizacao para agricultura. Wheaton: Spraying Systems Co., 2014. 160p. #' #' @export #' #' @seealso \link{flowpres} #' \link{flowrat} #' #' @examples #' product(Ct = 800,Dose = 200,Q = 100) product=function(Ct,Dose,Q){ product=(Ct*Dose)/Q message(black(paste("Amount of product (L or kg) to be added to the tank at each sprayer refill: ",product))) }
/scratch/gouwar.j/cran-all/cranData/AgroTech/R/produtc.R
#' Temporal variability graph of weather conditions #' #' @description This is a function to check weather conditions in agricultural spraying #' #' @param file Excel file (xlsx) #' @param nx Time #' @param ny Weather conditions #' @param variable Variable name #' @param ylab y axis (Dependent) #' @param xlab x axis (Independent) #' @param size.text Size text (\emph{default} is 12) #' @param size.title Size title (\emph{default} is 12) #' @param size.strip Size strip (\emph{default} is 12) #' @param size.lty Size line (\emph{default} is 0.7) #' #' @return Returns graph of ggplot2 #' #' @author Rodrigo Yudi Palhaci Marubayashi, \email{[email protected]} #' @author Gabriel Danilo Shimizu #' @author Otavio Jorge Grigoli Abi Saab #' #' @references #' No reference #' #' @export #' #' @examples #' data("example_meteorological") #' vartemp(example_meteorological) vartemp=function(file, nx=1, ny=2, variable=NA, ylab="Dependent", xlab="Independent", size.text=12, size.title=12, size.strip=12, size.lty=0.7){ requireNamespace("readxl") requireNamespace("ggplot2") if(is.data.frame(file)==TRUE){dados=file}else{dados=read_excel(file)} time=unlist(dados[,nx]) resp=unlist(dados[,ny]) data1=data.frame(time=time, resp=resp) a=ggplot(data1, aes(x=time))+ geom_line(aes(y=resp),size=size.lty)+ylab(ylab)+xlab(xlab)+ facet_wrap(~as.character(variable))+theme_bw()+ theme(axis.text = element_text(size=size.text,color="black"), strip.text = element_text(size=size.strip,color="black"), axis.title = element_text(size=size.title,color="black")) list(a)[[1]] }
/scratch/gouwar.j/cran-all/cranData/AgroTech/R/vartemp.R
#' AhoCorasickTrie: fast searching for multiple keywords in multiple texts #' #' @docType package #' @name AhoCorasickTrie #' @importFrom Rcpp evalCpp #' @useDynLib AhoCorasickTrie #' @description Builds an Aho-Corasick trie from one or more keywords and uses it to #' search one or more texts. For a large number of keywords, Aho-Corasick is much faster #' than a naive approach (such as \code{lapply(keywords, gregexpr, text)}). NULL
/scratch/gouwar.j/cran-all/cranData/AhoCorasickTrie/R/AhoCorasickTrie.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #' Fast searching for one or more keywords in a list of texts #' #' @param keywords Character vector of one or more keywords #' @param textList List of lists, each sublist with one or more texts to search #' @param alphabet Alphabet to use; one of \code{ascii}, \code{aminoacid}, or \code{nucleicacid} #' @param groupByKeyword If true, matches are grouped by keyword (instead of by text) #' @param iterationFeedback When set to a positive integer \code{i}, console output will indicate when searching every \code{i}th text #' @return List of lists of matches, grouped by either text or by keyword (each list of texts gets its own list of matches) #' @description Builds an Aho-Corasick trie from one or more keywords and uses it to search a list of #' one or more texts. For a large number of keywords, Aho-Corasick is much faster #' than a naive approach (such as \code{lapply(keywords, gregexpr, text)}). #' #' Use \code{\link{AhoCorasickSearchList}} instead of \code{\link{AhoCorasickSearch}} when you want to keep the matches #' of each input sublist separate. If the sublists of the input list have names, the resulting list of lists #' will use those names, but sublists with no matches will still be in the resulting list. #' If the texts of the sublists have names, the resulting sublists of matches will use #' those names, and the texts with no matches will be dropped. If the input texts do #' not have names, then the resulting sublists of matches will be in the same order as the #' input texts, and non-matched texts will be kept to preserve that order. Thus, it is more #' efficient to use named input texts (so non-matched texts can be dropped). #' #' The default alphabet allows all 128 ASCII characters in the keywords and the texts. #' Characters outside this range will cause an error. A more efficient trie is possible #' if the alphabet size can be reduced. For example, DNA sequences use at most 19 distinct #' characters and usually only 4; protein sequences use at most 26 distinct characters and #' usually only 20. Set the \code{alphabet} parameter if a reduced alphabet is appropriate. #' #' UTF-8 (Unicode) matching is not currently supported. #' @seealso #' \itemize{ #' \item \href{https://www.codeproject.com/Articles/12383/Aho-Corasick-string-matching-in-C}{Aho-Corasick string matching in C#} for the article this package is based on #' \item \code{\link[Biostrings]{matchPDict}} for a more memory efficient, but DNA-only, implementation of the algorithm #' } #' @examples #' listEquals = function(a, b) { is.null(unlist(a)) && is.null(unlist(b)) || #' !is.null(a) && !is.null(b) && all(unlist(a) == unlist(b)) } #' keywords = c("Abra", "cadabra", "is", "the", "Magic", "Word") #' #' # 1. Search a list of lists without names #' # * sublists are accessed by index #' # * texts are accessed by index #' # * non-matched texts are kept (input index order is preserved) #' listSearch = AhoCorasickSearchList(keywords, #' list(c("What in", "the world"), #' c("is"), #' "secret about", #' "the Magic Word?")) #' stopifnot(listEquals(listSearch[[1]][[1]], list())) #' stopifnot(listEquals(listSearch[[1]][[2]][[1]], list(keyword="the", offset=1))) #' stopifnot(listEquals(listSearch[[2]][[1]][[1]], list(keyword="is", offset=1))) #' stopifnot(listEquals(listSearch[[3]], list())) #' stopifnot(listEquals(listSearch[[4]][[1]][[1]], list(keyword="the", offset=1))) #' stopifnot(listEquals(listSearch[[4]][[1]][[2]], list(keyword="Magic", offset=5))) #' stopifnot(listEquals(listSearch[[4]][[1]][[3]], list(keyword="Word", offset=11))) #' #' # 2. Search a named list of named lists #' # * sublists are accessed by name #' # * matched texts are accessed by name #' # * non-matched texts are dropped #' namedSearch = AhoCorasickSearchList(keywords, #' list(subject=c(phrase1="What in", phrase2="the world"), #' verb=c(phrase1="is"), #' predicate1=c(phrase1="secret about"), #' predicate2=c(phrase1="the Magic Word?"))) #' stopifnot(listEquals(namedSearch$subject$phrase2[[1]], list(keyword="the", offset=1))) #' stopifnot(listEquals(namedSearch$verb$phrase1[[1]], list(keyword="is", offset=1))) #' stopifnot(listEquals(namedSearch$predicate1, list())) #' stopifnot(listEquals(namedSearch$predicate2$phrase1[[1]], list(keyword="the", offset=1))) #' stopifnot(listEquals(namedSearch$predicate2$phrase1[[2]], list(keyword="Magic", offset=5))) #' stopifnot(listEquals(namedSearch$predicate2$phrase1[[3]], list(keyword="Word", offset=11))) #' @export AhoCorasickSearchList <- function(keywords, textList, alphabet = "ascii", groupByKeyword = FALSE, iterationFeedback = 0L) { .Call('_AhoCorasickTrie_AhoCorasickSearchList', PACKAGE = 'AhoCorasickTrie', keywords, textList, alphabet, groupByKeyword, iterationFeedback) } #' Fast searching for one or more keywords in one or more texts #' #' @param text Character vector of one or more texts to search #' @inheritParams AhoCorasickSearchList #' @return List of matches, grouped by either text or by keyword #' @description Builds an Aho-Corasick trie from one or more keywords and uses it to #' search one or more texts. For a large number of keywords, Aho-Corasick is much faster #' than a naive approach (such as \code{lapply(keywords, gregexpr, text)}). #' #' Use \code{\link{AhoCorasickSearchList}} instead of \code{\link{AhoCorasickSearch}} when you want to keep the matches #' of each input text separate. If the input texts have names, the resulting list of matches will include those #' names and non-matched texts will be excluded from the results. If the input texts do #' not have names, then the resulting list of matches will be in the same order as the #' input texts, and non-matched texts will be kept to preserve that order. Thus, it is more #' efficient to use named input texts (so non-matched texts can be dropped). #' #' The default alphabet allows all 128 ASCII characters in the keywords and the texts. #' Characters outside this range will cause an error. A more efficient trie is possible #' if the alphabet size can be reduced. For example, DNA sequences use at most 19 distinct #' characters and usually only 4; protein sequences use at most 26 distinct characters and #' usually only 20. Set the \code{alphabet} parameter if a reduced alphabet is appropriate. #' #' UTF-8 (Unicode) matching is not currently supported. #' @seealso #' \itemize{ #' \item \href{https://www.codeproject.com/Articles/12383/Aho-Corasick-string-matching-in-C}{Aho-Corasick string matching in C#} for the article this package is based on #' \item \code{\link[Biostrings]{matchPDict}} for a more memory efficient, but DNA-only, implementation of the algorithm #' } #' @examples #' listEquals = function(a, b) { is.null(unlist(a)) && is.null(unlist(b)) || #' !is.null(a) && !is.null(b) && all(unlist(a) == unlist(b)) } #' #' # 1. Search for multiple keywords in a single text #' keywords = c("Abra", "cadabra", "is", "the", "Magic", "Word") #' oneSearch = AhoCorasickSearch(keywords, "Is Abracadabra the Magic Word?") #' stopifnot(listEquals(oneSearch[[1]][[1]], list(keyword="Abra", offset=4))) #' stopifnot(listEquals(oneSearch[[1]][[2]], list(keyword="cadabra", offset=8))) #' stopifnot(listEquals(oneSearch[[1]][[3]], list(keyword="the", offset=16))) #' stopifnot(listEquals(oneSearch[[1]][[4]], list(keyword="Magic", offset=20))) #' stopifnot(listEquals(oneSearch[[1]][[5]], list(keyword="Word", offset=26))) #' #' # 2. Search multiple named texts in a named list with keyword grouping and aminoacid alphabet #' # * all matches to a keyword are accessed by name #' # * non-matched keywords are dropped #' proteins = c(protein1="PEPTIDEPEPTIDEDADADARARARARAKEKEKEKEPEPTIDE", #' protein2="DERPADERPAPEWPEWPEEPEERAWRAWWARRAGTAGPEPTIDEKESEQUENCE") #' peptides = c("PEPTIDE", "DERPA", "SEQUENCE", "KEKE", "PEPPIE") #' #' peptideSearch = AhoCorasickSearch(peptides, proteins, alphabet="aminoacid", groupByKeyword=TRUE) #' stopifnot(listEquals(peptideSearch$PEPTIDE, list(list(keyword="protein1", offset=1), #' list(keyword="protein1", offset=8), #' list(keyword="protein1", offset=37), #' list(keyword="protein2", offset=38)))) #' stopifnot(listEquals(peptideSearch$DERPA, list(list(keyword="protein2", offset=1), #' list(keyword="protein2", offset=6)))) #' stopifnot(listEquals(peptideSearch$SEQUENCE, list(list(keyword="protein2", offset=47)))) #' stopifnot(listEquals(peptideSearch$KEKE, list(list(keyword="protein1", offset=29), #' list(keyword="protein1", offset=31), #' list(keyword="protein1", offset=33)))) #' stopifnot(listEquals(peptideSearch$PEPPIE, NULL)) #' #' # 3. Grouping by keyword without text names: offsets are given without reference to the text #' names(proteins) = NULL #' peptideSearch = AhoCorasickSearch(peptides, proteins, groupByKeyword=TRUE) #' stopifnot(listEquals(peptideSearch$PEPTIDE, list(1, 8, 37, 38))) #' stopifnot(listEquals(peptideSearch$DERPA, list(1, 6))) #' stopifnot(listEquals(peptideSearch$SEQUENCE, list(47))) #' stopifnot(listEquals(peptideSearch$KEKE, list(29, 31, 33))) #' @export AhoCorasickSearch <- function(keywords, text, alphabet = "ascii", groupByKeyword = FALSE, iterationFeedback = 0L) { .Call('_AhoCorasickTrie_AhoCorasickSearch', PACKAGE = 'AhoCorasickTrie', keywords, text, alphabet, groupByKeyword, iterationFeedback) }
/scratch/gouwar.j/cran-all/cranData/AhoCorasickTrie/R/RcppExports.R
#' #' @docType package #' @name AirMonitor #' @aliases AirMonitor-package #' @title Air Quality Data Analysis #' @description #' \code{ #' Utilities for working with hourly air quality monitoring data #' with a focus on small particulates (PM2.5). A compact data model is #' structured as a list with two dataframes. A 'meta' dataframe contains #' spatial and measuring device metadata associated with deployments at known #' locations. A 'data' dataframe contains a 'datetime' column followed by #' columns of measurements associated with each "device-deployment". #' } NULL # ----- Internal Data ------------------------------------------------- #' coreMetadataNames #' #' @export #' @docType data #' @name coreMetadataNames #' @title Names of standard metadata columns #' @format A vector of character strings #' @description Vector of names of the required \code{monitor$meta} columns. #' These represent metadata columns that must exist in every valid #' \emph{mts_monitor} object. Any number of additional columns may also be present. #' @examples #' print(coreMetadataNames, width = 80) # NOTE: AirNow units include: # NOTE: "C", "DEGREES", "KNOTS", "M/S", "MILLIBAR", "MM", # NOTE: "PERCENT", "PPB", "PPM", "UG/M3", "WATTS/M2" coreMetadataNames <- c( # Specific to AirMonitor "deviceDeploymentID", # -- timeseries unique identifier "deviceID", # -- device unique identifier "deviceType", # -- internally-standardized identifier for the type of device (e.g. EBAM, ESAM, PA, ...) "deviceDescription", # -- human readable device details "deviceExtra", # -- extra device information (possibly as JSON) "pollutant", # -- one of "OZONE|CO|NO2|PM2.5|PM10" "units", # -- one of "PPM|PPB|UG/M3" "dataIngestSource", # -- internally-standardized identifier for the data source (e.g. AIRNOW, WRCC, ...) "dataIngestURL", # -- top level URL "dataIngestUnitID", # -- unique identifier used to extract data from the URL "dataIngestExtra", # -- extra data ingest information (possibly as JSON) "dataIngestDescription", # -- human readable data ingest details # Defined in MazamaLocationUtils "locationID", # -- location unique identifier "locationName", # -- human readable location name "longitude", # -- "latitude", # -- "elevation", # -- "countryCode", # -- ISO 3166-1 alpha-2 "stateCode", # -- ISO 3166-2 alpha-2 "countyName", # -- "timezone", # -- Olson time zone "houseNumber", # -- "street", # -- "city", # -- "zip", # -- # Extras "AQSID", # -- EPA AQS site identifier (widely used for North American air quality data) "fullAQSID" # -- Updated, scalable and future-oriented EPA unique identifier ) #' pollutantNames #' #' @export #' @docType data #' @name pollutantNames #' @title Names of standard pollutants #' @format A vector of character strings #' @description Character string identifiers of recognized pollutant names. #' @examples #' print(coreMetadataNames, width = 80) pollutantNames <- c( "PM2.5", "AQI", "CO", "NO", "OZONE", "PM10", "SO2" ) #' AirFire_S3_archiveBaseUrl #' #' @export #' @docType data #' @name AirFire_S3_archiveBaseUrl #' @title USFS maintained archive base URL #' @format A url #' @description The US Forest Service AirFire group maintains an archive of #' processed monitoring data. The base URL for this archive is used as the #' default in all \code{~_load()} functions. #' #' \preformatted{ #' "https://airfire-data-exports.s3.us-west-2.amazonaws.com/monitoring/v2" #' } AirFire_S3_archiveBaseUrl <- "https://airfire-data-exports.s3.us-west-2.amazonaws.com/monitoring/v2" # ----- State codes ----------------------------------------------------------- #' CONUS state codes #' #' @export #' @docType data #' @name CONUS #' @title CONUS state codes #' @format A vector with 49 elements #' @description #' State codes for the 48 contiguous states +DC that make up the CONtinental US. #' #' \code{ #' CONUS <- c( #' "AL","AZ","AR","CA","CO","CT","DE","FL","GA", #' "ID","IL","IN","IA","KS","KY","LA","ME","MD", #' "MA","MI","MN","MS","MO","MT","NE","NV","NH","NJ", #' "NM","NY","NC","ND","OH","OK","OR","PA","RI","SC", #' "SD","TN","TX","UT","VT","VA","WA","WV","WI","WY", #' "DC" #' ) #' } CONUS <- c( "AL","AZ","AR","CA","CO","CT","DE","FL","GA", "ID","IL","IN","IA","KS","KY","LA","ME","MD", "MA","MI","MN","MS","MO","MT","NE","NV","NH","NJ", "NM","NY","NC","ND","OH","OK","OR","PA","RI","SC", "SD","TN","TX","UT","VT","VA","WA","WV","WI","WY", "DC" ) #' US state codes #' #' @export #' @docType data #' @name US_52 #' @title US state codes #' @format A vector with 52 elements #' @description #' State codes for the 50 states +DC +PR (Puerto Rico). #' #' \code{ #' US_52 <- c( #' "AK","AL","AZ","AR","CA","CO","CT","DE","FL","GA", #' "HI","ID","IL","IN","IA","KS","KY","LA","ME","MD", #' "MA","MI","MN","MS","MO","MT","NE","NV","NH","NJ", #' "NM","NY","NC","ND","OH","OK","OR","PA","RI","SC", #' "SD","TN","TX","UT","VT","VA","WA","WV","WI","WY", #' "DC","PR" #' ) #' } US_52 <- c( "AK","AL","AZ","AR","CA","CO","CT","DE","FL","GA", "HI","ID","IL","IN","IA","KS","KY","LA","ME","MD", "MA","MI","MN","MS","MO","MT","NE","NV","NH","NJ", "NM","NY","NC","ND","OH","OK","OR","PA","RI","SC", "SD","TN","TX","UT","VT","VA","WA","WV","WI","WY", "DC","PR" ) # ----- AQI categories --------------------------------------------------------- #' AQI breaks and associated names and colors #' #' @export #' @docType data #' @name US_AQI #' @title US EPA AQI Index levels, names, colors and action text #' @format A list with named elements #' @description #' Official, US EPA AQI levels, names, colors and action text are provided in a #' list for easy coloring and labeling. #' #' @section Breaks: #' #' Breakpoints are given in units reported for each parameter and include: #' \itemize{ #' \item{\code{breaks_AQI}} #' \item{\code{breaks_CO}} #' \item{\code{breaks_NO2}} #' \item{\code{breaks_OZONE_1hr}} #' \item{\code{breaks_OZONE_8hr}} #' \item{\code{breaks_PM2.5}} #' \item{\code{breaks_PM10}} #' } #' #' @section Colors: #' #' Several different color palettes are provided: #' \itemize{ #' \item{\code{colors_EPA} -- official EPA AQI colors} #' \item{\code{colors_subdued} -- subdued colors fo use with leaflet maps} #' \item{\code{colors_deuteranopia} -- color vision impaired colors} #' } #' #' @section Names: #' #' Names of AQI categories are provided in several languages identified by the #' ISO 639-2 alpha-3 code: #' \itemize{ #' \item{\code{names_eng}} #' \item{\code{names_spa}} #' } #' #' @section Actions: #' #' Text for "actions to protect yourself" are provided for each #' category in several languages identified by the #' ISO 639-2 alpha-3 code: #' \itemize{ #' \item{\code{actions_eng}} #' \item{\code{actions_spa}} #' } #' #' Currently supported languages include English (eng) and Spanish (spa). #' #' AQI breaks are defined at #' \url{https://www.airnow.gov/sites/default/files/2020-05/aqi-technical-assistance-document-sept2018.pdf} #' and are given in units appropriate for each pollutant. #' #' AQI colors are defined at \url{https://docs.airnowapi.org/aq101} #' @note #' The low end of each break category is used as the breakpoint. #' #' @examples #' print(US_AQI$breaks_AQI) #' print(US_AQI$colors_EPA) #' print(US_AQI$names_eng) #' print(US_AQI$names_spa) US_AQI <- list( # NOTE: We must have default breaks with just the parameter name # Breaks for all supported parameters breaks_AQI = c(-Inf, 50, 100, 150, 200, 300, Inf), breaks_CO = c(-Inf, 4.5, 9.5, 12.5, 15.5, 30.5, Inf), breaks_NO2 = c(-Inf, 54, 101, 361, 650, 2501, Inf), breaks_OZONE = c(-Inf, 0, .125, .165, .205, .405, Inf), # Using OZONE_1hr breaks_PM2.5 = c(-Inf, 12, 35.5, 55.5, 150.5, 250.5, Inf), # Using PM2.5_24hr breaks_PM2.5_2024 = c(-Inf, 9, 35, 55, 125, 225, Inf), # https://www.epa.gov/system/files/documents/2024-02/pm-naaqs-air-quality-index-fact-sheet.pdf breaks_PM10 = c(-Inf, 55, 155, 255, 355, 425, Inf), # Special breaks breaks_OZONE_1hr = c(-Inf, 0, .125, .165, .205, .405, Inf), # GOOD, MOD undefined at EPA breaks_OZONE_8hr = c(-Inf, .055, .071, .086, .106, .405, Inf), # HAZ undefined at EPA # Official EPA colors colors_EPA = c( grDevices::rgb(0,228/255,0), grDevices::rgb(255/255,255/255,0), grDevices::rgb(255/255,126/255,0), grDevices::rgb(255/255,0,0), grDevices::rgb(143/255,63/255,151/255), grDevices::rgb(126/255,0,35/255) ), # Subdued colors used by USFS AirFire Monitoring (Mv4) site colors_subdued = c("#2ecc71", "#f1c40f", "#e67e22", "#e74c3c", "#9b59b6", "#8c3a3a"), # Color vision impaired colors recommended by Mazama Science colors_deuteranopia = c("#8cddf5", "#ffef00", "#f7921f", "#ed1d24", "#a3064b", "#6d0526"), # Names in different languages names_eng = c('Good', 'Moderate', 'USG', 'Unhealthy', 'Very Unhealthy', 'Hazardous'), names_spa = c('Buena', 'Moderada', 'IGS', 'Insalubre', 'Muy insalubre', 'Peligrosa'), # Action text in different languages # NOTE: R packages require that unicode characters be escaped. actions_eng = c( 'None.', 'Unusually sensitive individuals should consider limiting prolonged or heavy exertion.', 'People within Sensitive Groups should reduce prolonged or heavy outdoor exertion.', 'People within Sensitive Groups should avoid all physical outdoor activity.', 'Everyone should avoid prolonged or heavy exertion.', 'Everyone should avoid any outdoor activity.' ), actions_spa = c( 'Ninguna.', 'Personas inusualmente sensitivas deber\\u00edan considerar limitar la labor prolongada \\u00f3 intensa.', 'Personas dentro de los grupos sensitivos deben reducir la labor prolongada \\u00f3 intensa al aire libre.', 'Personas dentro de los grupos sensitivos deben evitar toda actividad f\\u00edsica al aire libre.', 'Todos deben evitar la labor prolongada \\u00f3 intensa.', 'Todos deben evitar cualquier actividad al aire libre.' ) )
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/AirMonitor.R
#' @export #' @title Add an AQI legend to a map #' @param x x Coordinate passed on to the \code{legend()} command. #' @param y y Coordinate passed on to the \code{legend()} command. #' @param pollutant EPA AQS criteria pollutant. #' @param palette Named color palette to use for AQI categories. #' @param languageCode ISO 639-2 alpha-3 language code. #' @param ... Additional arguments to be passed to \code{legend()}. #' @param NAAQS Version of NAAQS levels to use. See Note. #' #' @description This function is a convenience wrapper around #' \code{graphics::legend()}. It will show the AQI colors and #' names by default if \code{col} and \code{legend} are not specified. #' #' AQI categories are arranged with lower levels at the bottom of the legend #' to match the arrangement in the plot. This is different from the default #' "reading order" so you may wish to reverse the order of user supplied #' arguments with \code{rev()} . #' @return A list with components \code{rect} and \code{text} is returned #' invisbly. (See \link{legend}.) #' #' @note #' On February 7, 2024, EPA strengthened the National Ambient Air Quality #' Standards for Particulate Matter (PM NAAQS) to protect millions of Americans #' from harmful and costly health impacts, such as heart attacks and premature #' death. Particle or soot pollution is one of the most dangerous forms of air #' pollution, and an extensive body of science links it to a range of serious #' and sometimes deadly illnesses. EPA is setting the level of the primary #' (health-based) annual PM2.5 standard at 9.0 micrograms per cubic meter to #' provide increased public health protection, consistent with the available #' health science. #' See \href{https://www.epa.gov/pm-pollution/final-reconsideration-national-ambient-air-quality-standards-particulate-matter-pm}{PM NAAQS update}. #' addAQILegend <- function( x = "topright", y = NULL, pollutant = c("PM2.5", "CO", "OZONE", "PM10", "AQI"), palette = c("EPA", "subdued", "deuteranopia"), languageCode = c("eng", "spa"), NAAQS = c("PM2.5", "PM2.5_2024"), ... ) { # ----- Validate parameters -------------------------------------------------- pollutant <- match.arg(pollutant) palette <- match.arg(palette) languageCode <- match.arg(languageCode) NAAQS = match.arg(NAAQS) colors <- US_AQI[[paste0("colors_", palette)]] names <- US_AQI[[paste0("names_", languageCode)]] # Handle the added NAAQS argument if ( pollutant == "PM2.5" && NAAQS == "PM2.5_2024" ) { breaks <- US_AQI$breaks_PM2.5_2024 } # ----- Create argsList ------------------------------------------------------ argsList <- list(...) argsList$x = x argsList$y = y if ( ("col" %in% names(argsList)) ) { argsList$col <- col } else { argsList$col <- rev(colors) # Lower levels on the bottom } if ( ("legend" %in% names(argsList)) ) { argsList$legend <- legend } else { argsList$legend <- rev(names) # Lower levels on the bottom } if ( !("pch" %in% names(argsList)) ) { argsList$pch <- 16 } if ( !("title" %in% names(argsList)) ) argsList$title <- paste0(pollutant, " Air Quality Index") do.call(legend, argsList) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/addAQILegend.R
#' @export #' @title Add AQI lines to a plot #' @param pollutant EPA AQS criteria pollutant. #' @param palette Named color palette to use for AQI categories. #' @param ... additional arguments to be passed to \code{abline()} #' @param NAAQS Version of NAAQS levels to use. See Note. #' #' @description Draws AQI lines across a plot at the levels appropriate for #' The \link{monitor_timeseriesPlot} function uses this function internally when #' specifying \code{addAQI = TRUE}. #' \code{pollutant}. #' @return No return value, called to add lines to a time series plot. #' #' @note #' On February 7, 2024, EPA strengthened the National Ambient Air Quality #' Standards for Particulate Matter (PM NAAQS) to protect millions of Americans #' from harmful and costly health impacts, such as heart attacks and premature #' death. Particle or soot pollution is one of the most dangerous forms of air #' pollution, and an extensive body of science links it to a range of serious #' and sometimes deadly illnesses. EPA is setting the level of the primary #' (health-based) annual PM2.5 standard at 9.0 micrograms per cubic meter to #' provide increased public health protection, consistent with the available #' health science. #' See \href{https://www.epa.gov/pm-pollution/final-reconsideration-national-ambient-air-quality-standards-particulate-matter-pm}{PM NAAQS update}. #' addAQILines <- function( pollutant = c("PM2.5", "CO", "OZONE", "PM10", "AQI"), palette = c("EPA", "subdued", "deuteranopia"), NAAQS = c("PM2.5", "PM2.5_2024"), ... ) { pollutant <- match.arg(pollutant) palette <- match.arg(palette) NAAQS = match.arg(NAAQS) breaks <- US_AQI[[paste0("breaks_", pollutant)]] colors <- US_AQI[[paste0("colors_", palette)]] # Handle the added NAAQS argument if ( pollutant == "PM2.5" && NAAQS == "PM2.5_2024" ) { breaks <- US_AQI$breaks_PM2.5_2024 } graphics::abline( h = breaks, col = colors, ... ) # NOTE: Most breaks begin with -Inf so we add a zero line here abline(h = 0, col = colors[1]) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/addAQILines.R
#' @export #' @title Create stacked AQI bar #' @param pollutant EPA AQS criteria pollutant. #' @param width Width of the bar as a fraction of the width of the plot area. #' @param height Height of the bar as a fraction of the height of the plot area. #' @param pos Position of the stacked bar relative to the plot. #' @param palette Named color palette to use for AQI categories. #' @param NAAQS Version of NAAQS levels to use. See Note. #' #' @description Draws a stacked bar indicating AQI levels on one side of a plot #' The \link{monitor_timeseriesPlot} function uses this function internally when #' specifying \code{addAQI = TRUE}. #' @return No return value, called to add color bars to a time series plot. #' #' @note #' On February 7, 2024, EPA strengthened the National Ambient Air Quality #' Standards for Particulate Matter (PM NAAQS) to protect millions of Americans #' from harmful and costly health impacts, such as heart attacks and premature #' death. Particle or soot pollution is one of the most dangerous forms of air #' pollution, and an extensive body of science links it to a range of serious #' and sometimes deadly illnesses. EPA is setting the level of the primary #' (health-based) annual PM2.5 standard at 9.0 micrograms per cubic meter to #' provide increased public health protection, consistent with the available #' health science. #' See \href{https://www.epa.gov/pm-pollution/final-reconsideration-national-ambient-air-quality-standards-particulate-matter-pm}{PM NAAQS update}. #' addAQIStackedBar <- function( pollutant = c("PM2.5", "CO", "OZONE", "PM10", "AQI"), palette = c("EPA", "subdued", "deuteranopia"), width = .01, height = 1, pos = c("left", "right"), NAAQS = c("PM2.5", "PM2.5_2024") ) { pollutant <- match.arg(pollutant) pos <- match.arg(pos) palette <- match.arg(palette) NAAQS = match.arg(NAAQS) usr <- par("usr") if (pos == "right") { l <- usr[2] - width*(usr[2] - usr[1]) r <- usr[2] } else if (pos == "left") { l <- usr[1] r <- usr[1] + width*(usr[2] - usr[1]) } breaks <- US_AQI[[paste0("breaks_", pollutant)]] colors <- US_AQI[[paste0("colors_", palette)]] # Handle the added NAAQS argument if ( pollutant == "PM2.5" && NAAQS == "PM2.5_2024" ) { breaks <- US_AQI$breaks_PM2.5_2024 } for (i in 1:6) { rect( xleft = l, ybottom = min(max(0, breaks[i]), height*usr[4]), xright = r, ytop = min(breaks[i + 1], height*usr[4]), col = colors[i], xpd = NA, border = NA ) } }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/addAQIStackedBar.R
#' @export #' @importFrom grDevices adjustcolor #' @importFrom graphics rect par #' @title Add nighttime shading to a timeseries plot #' @param timeInfo dataframe as returned by \code{MazamaTimeSeries::monitor_timeInfo()} #' @param col Color used to shade nights. #' @description Draw shading rectangles on a plot to indicate nighttime hours. #' The \link{monitor_timeseriesPlot} function uses this function internally when #' specifying \code{shadedNight = TRUE}. #' @return No return value, called to add day/night shading to a timeseries plot. addShadedNight <- function( timeInfo, col = adjustcolor('black', 0.1) ) { # ----- Validate parameters -------------------------------------------------- localTime <- timeInfo$localTime sunrise <- timeInfo$sunrise[!duplicated(timeInfo$sunrise)] sunset <- timeInfo$sunset[!duplicated(timeInfo$sunset)] # Sanity check if ( any(sunset < sunrise) ) stop("sunset before sunrise???") # ----- Shaded nights -------------------------------------------------------- # Left edge to first sunrise if ( localTime[1] < sunrise[1] ) { rect( xleft = par('usr')[1], ybottom = par('usr')[3], xright = sunrise[1], ytop = par('usr')[4], col = col, border = NA ) } # Complete nights if ( length(sunset) > 1 ) { for ( i in seq(length(sunset) - 1) ) { rect( xleft = sunset[i], ybottom = par('usr')[3], xright = sunrise[i + 1], ytop = par('usr')[4], col = col, border = NA ) } } # Last sunset to right edge rect( xleft = sunset[length(sunset)], ybottom = par('usr')[3], xright = par('usr')[2], ytop = par('usr')[4], col = col, border = NA ) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/addShadedNight.R
#' @export #' @importFrom dplyr across #' #' @title Load annual AirNow monitoring data #' #' @param year Year [YYYY]. #' @param archiveBaseUrl Base URL for monitoring v2 data files. #' @param archiveBaseDir Local base directory for monitoring v2 data files. #' @param QC_negativeValues Type of QC to apply to negative values. #' @param parameterName One of the EPA AQS criteria parameter names. #' #' @return A \emph{mts_monitor} object with AirNow data. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @description Loads pre-generated .rda files containing hourly AirNow data. #' #' If \code{archiveDataDir} is defined, data will be loaded from this local #' archive. Otherwise, data will be loaded from the monitoring data repository #' maintained by the USFS AirFire team. #' #' The files loaded by this function contain a single year's worth of data #' #' For the most recent data in the last 10 days, use \code{airnow_loadLatest()}. #' #' For daily updates covering the most recent 45 days, use \code{airnow_loadDaily()}. #' #' For archival data for a specific month, use \code{airnow_loadMonthly()}. #' #' Pre-processed AirNow exists for the following parameters: #' \enumerate{ # #' \item{BARPR} # #' \item{BC} # #' \item{CO} # #' \item{NO} # #' \item{NO2} # #' \item{NO2Y} # #' \item{NO2X}s # #' \item{NOX} # #' \item{NOOY} # #' \item{OC} # #' \item{OZONE} # #' \item{PM10} #' \item{PM2.5} # #' \item{PM2.5_nowcast} # #' \item{PRECIP} # #' \item{RHUM} # #' \item{SO2} # #' \item{SRAD} # #' \item{TEMP} # #' \item{UV-AETH} # #' \item{WD} # #' \item{WS} #' } #' #' @seealso \code{\link{airnow_loadDaily}} #' @seealso \code{\link{airnow_loadLatest}} #' @seealso \code{\link{airnow_loadMonthly}} #' #' @examples #' \dontrun{ #' library(AirMonitor) #' #' # Fail gracefully if any resources are not available #' try({ #' #' # See https://en.wikipedia.org/wiki/2017_Montana_wildfires #' #' # Daily Barplot of Montana wildfires #' airnow_loadAnnual(2017) \%>\% #' monitor_filter(stateCode == "MT") \%>\% #' monitor_filterDate(20170701, 20170930, timezone = "America/Denver") \%>\% #' monitor_dailyStatistic() \%>\% #' monitor_timeseriesPlot( #' ylim = c(0, 300), #' xpd = NA, #' addAQI = TRUE, #' main = "Montana 2017 -- AirNow Daily Average PM2.5" #' ) #' #' }, silent = FALSE) #' } airnow_loadAnnual <- function( year = NULL, archiveBaseUrl = paste0( "https://airfire-data-exports.s3.us-west-2.amazonaws.com/", "monitoring/v2" ), archiveBaseDir = NULL, QC_negativeValues = c("zero", "na", "ignore"), parameterName = "PM2.5" ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(year) MazamaCoreUtils::stopIfNull(parameterName) QC_negativeValues <- match.arg(QC_negativeValues) if ( is.null(archiveBaseUrl) && is.null(archiveBaseDir) ) stop("one of 'archiveBaseUrl' or 'archiveBaseDir' must be defined") # Parameter code validParameterNames <- c( # "BARPR", # "BC", # "CO", # "NO", # "NO2", # "NO2Y", # "NO2X", # "NOX", # "NOOY", # "OC", # "OZONE", # "PM10", "PM2.5" # "PM2.5_nowcast" # "PRECIP", # "RHUM", # "SO2", # "SRAD", # "TEMP", # "UV-AETH", # "WD", # "WS" ) parameterName <- as.character(parameterName) if ( !parameterName %in% validParameterNames ) { stop(sprintf( "data for parameterName '%s' has not been processed", parameterName )) } # ----- Load data ------------------------------------------------------------ # Create file name and path according to the AirMonitorIngest scheme if ( is.null(archiveBaseUrl) ) { dataUrl <- NULL } else { dataUrl <- file.path(archiveBaseUrl, "airnow", year, "data") } if ( is.null(archiveBaseDir) ) { dataDir <- NULL } else { dataDir <- file.path(archiveBaseDir, "airnow", year, "data") } metaFileName <- sprintf("airnow_%s_%s_meta.rda", parameterName, year) dataFileName <- sprintf("airnow_%s_%s_data.rda", parameterName, year) meta <- MazamaCoreUtils::loadDataFile(metaFileName, dataUrl, dataDir) data <- MazamaCoreUtils::loadDataFile(dataFileName, dataUrl, dataDir) # Guarantee that 'meta' and 'data' match ids <- names(data)[-1] meta <- meta %>% dplyr::filter(.data$deviceDeploymentID %in% ids) # Guarantee presence of fullAQSID if ( !"fullAQSID" %in% names(meta) ) meta$fullAQSID <- NA_character_ data <- data %>% dplyr::select(dplyr::all_of(c("datetime", meta$deviceDeploymentID))) %>% # Replace any NaN that snuck in dplyr::mutate(across(tidyselect::vars_select_helpers$where(is.numeric), function(x) ifelse(is.nan(x), NA, x))) # Create monitor object monitor <- list(meta = meta, data = data) monitor <- structure(monitor, class = c("mts_monitor", "mts", class(monitor))) MazamaTimeSeries::mts_check(monitor) # ----- Apply QC ------------------------------------------------------------- if ( QC_negativeValues == "zero" ) { monitor <- monitor_replaceValues(monitor, data < 0, 0) } else if ( QC_negativeValues == "na" ) { monitor <- monitor_replaceValues(monitor, data < 0, as.numeric(NA)) } # ----- Return --------------------------------------------------------------- return(monitor) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/airnow_loadAnnual.R
#' @export #' @importFrom dplyr across #' #' @title Load daily AirNow monitoring data #' #' @param parameterName One of the EPA AQS criteria parameter names. #' @param archiveBaseUrl Base URL for monitoring v2 data files. #' @param archiveBaseDir Local base directory for monitoring v2 data files. #' @param QC_negativeValues Type of QC to apply to negative values. #' #' @return A \emph{mts_monitor} object with AirNow data. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @description Loads pre-generated .rda files containing hourly #' AirNow data. #' #' If \code{archiveDataDir} is defined, data will be loaded from this local #' archive. Otherwise, data will be loaded from the monitoring data repository #' maintained by the USFS AirFire team. #' #' The files loaded by this function are updated once per day and #' contain data for the previous 45 days. #' #' For the most recent data in the last 10 days, use \code{airnow_loadLatest()}. #' #' For data extended more than 45 days into the past, use \code{airnow_loadAnnual()}. #' #' Pre-processed AirNow exists for the following parameters: #' \enumerate{ # #' \item{BARPR} # #' \item{BC} # #' \item{CO} # #' \item{NO} # #' \item{NO2} # #' \item{NO2Y} # #' \item{NO2X}s # #' \item{NOX} # #' \item{NOOY} # #' \item{OC} # #' \item{OZONE} # #' \item{PM10} #' \item{PM2.5} #' \item{PM2.5_nowcast} # #' \item{PRECIP} # #' \item{RHUM} # #' \item{SO2} # #' \item{SRAD} # #' \item{TEMP} # #' \item{UV-AETH} # #' \item{WD} # #' \item{WS} #' } #' #' @seealso \code{\link{airnow_loadAnnual}} #' @seealso \code{\link{airnow_loadLatest}} #' @seealso \code{\link{airnow_loadMonthly}} #' #' @examples #' \dontrun{ #' library(AirMonitor) #' #' # Fail gracefully if any resources are not available #' try({ #' #' airnow_loadDaily() \%>\% #' monitor_filter(stateCode == "WA") \%>\% #' monitor_leaflet() #' #' }, silent = FALSE) #' } airnow_loadDaily <- function( archiveBaseUrl = paste0( "https://airfire-data-exports.s3.us-west-2.amazonaws.com/", "monitoring/v2" ), archiveBaseDir = NULL, QC_negativeValues = c("zero", "na", "ignore"), parameterName = "PM2.5" ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(parameterName) QC_negativeValues <- match.arg(QC_negativeValues) if ( is.null(archiveBaseUrl) && is.null(archiveBaseDir) ) stop("one of 'archiveBaseUrl' or 'archiveBaseDir' must be defined") # Parameter code validParameterNames <- c( # "BARPR", # "BC", # "CO", # "NO", # "NO2", # "NO2Y", # "NO2X", # "NOX", # "NOOY", # "OC", # "OZONE", # "PM10", "PM2.5", "PM2.5_nowcast" # "PRECIP", # "RHUM", # "SO2", # "SRAD", # "TEMP", # "UV-AETH", # "WD", # "WS" ) parameterName <- as.character(parameterName) if ( !parameterName %in% validParameterNames ) { stop(sprintf( "data for parameterName '%s' has not been processed", parameterName )) } # ----- Load data ------------------------------------------------------------ # Create file name and path according to the AirMonitorIngest scheme if ( is.null(archiveBaseUrl) ) { dataUrl <- NULL } else { dataUrl <- file.path(archiveBaseUrl, "daily/data") } if ( is.null(archiveBaseDir) ) { dataDir <- NULL } else { dataDir <- file.path(archiveBaseDir, "daily/data") } metaFileName <- sprintf("airnow_%s_daily_meta.rda", parameterName) dataFileName <- sprintf("airnow_%s_daily_data.rda", parameterName) meta <- MazamaCoreUtils::loadDataFile(metaFileName, dataUrl, dataDir) data <- MazamaCoreUtils::loadDataFile(dataFileName, dataUrl, dataDir) # Guarantee that 'meta' and 'data' match ids <- names(data)[-1] meta <- meta %>% dplyr::filter(.data$deviceDeploymentID %in% ids) # Guarantee presence of fullAQSID if ( !"fullAQSID" %in% names(meta) ) meta$fullAQSID <- NA_character_ data <- data %>% dplyr::select(dplyr::all_of(c("datetime", meta$deviceDeploymentID))) %>% # Replace any NaN that snuck in dplyr::mutate(across(tidyselect::vars_select_helpers$where(is.numeric), function(x) ifelse(is.nan(x), NA, x))) # Create monitor object monitor <- list(meta = meta, data = data) monitor <- structure(monitor, class = c("mts_monitor", "mts", class(monitor))) MazamaTimeSeries::mts_check(monitor) # ----- Apply QC ------------------------------------------------------------- if ( QC_negativeValues == "zero" ) { monitor <- monitor_replaceValues(monitor, data < 0, 0) } else if ( QC_negativeValues == "na" ) { monitor <- monitor_replaceValues(monitor, data < 0, as.numeric(NA)) } # ----- Return --------------------------------------------------------------- return(monitor) } # ===== DEBUG ================================================================== if ( FALSE ) { parameterName <- "PM2.5" archiveBaseUrl <- "https://data-monitoring1.airfire.org/monitoring-v2" archiveBaseDir <- NULL QC_negativeValues = "zero" }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/airnow_loadDaily.R
#' @export #' @importFrom dplyr across #' #' @title Load most recent AirNow monitoring data #' #' @param parameterName One of the EPA AQS criteria parameter names. #' @param archiveBaseUrl Base URL for monitoring v2 data files. #' @param archiveBaseDir Local base directory for monitoring v2 data files. #' @param QC_negativeValues Type of QC to apply to negative values. #' #' @return A \emph{mts_monitor} object with AirNow data. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @description Loads pre-generated .rda files containing the most recent #' AirNow data. #' #' If \code{archiveDataDir} is defined, data will be loaded from this local #' archive. Otherwise, data will be loaded from the monitoring data repository #' maintained by the USFS AirFire team. #' #' The files loaded by this function are updated multiple times an hour and #' contain data for the previous 10 days. #' #' For daily updates covering the most recent 45 days, use \code{airnow_loadDaily()}. #' #' For data extended more than 45 days into the past, use \code{airnow_loadAnnual()}. #' #' Pre-processed AirNow exists for the following parameters: #' \enumerate{ # #' \item{BARPR} # #' \item{BC} # #' \item{CO} # #' \item{NO} # #' \item{NO2} # #' \item{NO2Y} # #' \item{NO2X}s # #' \item{NOX} # #' \item{NOOY} # #' \item{OC} # #' \item{OZONE} # #' \item{PM10} #' \item{PM2.5} #' \item{PM2.5_nowcast} # #' \item{PRECIP} # #' \item{RHUM} # #' \item{SO2} # #' \item{SRAD} # #' \item{TEMP} # #' \item{UV-AETH} # #' \item{WD} # #' \item{WS} #' } #' #' @seealso \code{\link{airnow_loadAnnual}} #' @seealso \code{\link{airnow_loadDaily}} #' @seealso \code{\link{airnow_loadMonthly}} #' #' @examples #' \dontrun{ #' library(AirMonitor) #' #' # Fail gracefully if any resources are not available #' try({ #' #' airnow_loadLatest() \%>\% #' monitor_filter(stateCode == "WA") \%>\% #' monitor_leaflet() #' #' }, silent = FALSE) #' } airnow_loadLatest <- function( archiveBaseUrl = paste0( "https://airfire-data-exports.s3.us-west-2.amazonaws.com/", "monitoring/v2" ), archiveBaseDir = NULL, QC_negativeValues = c("zero", "na", "ignore"), parameterName = "PM2.5" ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(parameterName) QC_negativeValues <- match.arg(QC_negativeValues) if ( is.null(archiveBaseUrl) && is.null(archiveBaseDir) ) stop("one of 'archiveBaseUrl' or 'archiveBaseDir' must be defined") # Parameter code validParameterNames <- c( # "BARPR", # "BC", # "CO", # "NO", # "NO2", # "NO2Y", # "NO2X", # "NOX", # "NOOY", # "OC", # "OZONE", # "PM10", "PM2.5", "PM2.5_nowcast" # "PRECIP", # "RHUM", # "SO2", # "SRAD", # "TEMP", # "UV-AETH", # "WD", # "WS" ) parameterName <- as.character(parameterName) if ( !parameterName %in% validParameterNames ) { stop(sprintf( "data for parameterName '%s' has not been processed", parameterName )) } # ----- Load data ------------------------------------------------------------ # Create file name and path according to the AirMonitorIngest scheme if ( is.null(archiveBaseUrl) ) { dataUrl <- NULL } else { dataUrl <- file.path(archiveBaseUrl, "latest/data") } if ( is.null(archiveBaseDir) ) { dataDir <- NULL } else { dataDir <- file.path(archiveBaseDir, "latest/data") } metaFileName <- sprintf("airnow_%s_latest_meta.rda", parameterName) dataFileName <- sprintf("airnow_%s_latest_data.rda", parameterName) meta <- MazamaCoreUtils::loadDataFile(metaFileName, dataUrl, dataDir) data <- MazamaCoreUtils::loadDataFile(dataFileName, dataUrl, dataDir) # Guarantee that 'meta' and 'data' match ids <- names(data)[-1] meta <- meta %>% dplyr::filter(.data$deviceDeploymentID %in% ids) # Guarantee presence of fullAQSID if ( !"fullAQSID" %in% names(meta) ) meta$fullAQSID <- NA_character_ data <- data %>% dplyr::select(dplyr::all_of(c("datetime", meta$deviceDeploymentID))) %>% # Replace any NaN that snuck in dplyr::mutate(across(tidyselect::vars_select_helpers$where(is.numeric), function(x) ifelse(is.nan(x), NA, x))) # Create monitor object monitor <- list(meta = meta, data = data) monitor <- structure(monitor, class = c("mts_monitor", "mts", class(monitor))) MazamaTimeSeries::mts_check(monitor) # ----- Apply QC ------------------------------------------------------------- if ( QC_negativeValues == "zero" ) { monitor <- monitor_replaceValues(monitor, data < 0, 0) } else if ( QC_negativeValues == "na" ) { monitor <- monitor_replaceValues(monitor, data < 0, as.numeric(NA)) } # ----- Return --------------------------------------------------------------- return(monitor) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/airnow_loadLatest.R
#' @export #' @importFrom dplyr across #' #' @title Load monthly AirNow monitoring data #' #' @param monthStamp Year-month [YYYYmm]. #' @param parameterName One of the EPA AQS criteria parameter names. #' @param archiveBaseUrl Base URL for monitoring v2 data files. #' @param archiveBaseDir Local base directory for monitoring v2 data files. #' @param QC_negativeValues Type of QC to apply to negative values. #' #' @return A \emph{mts_monitor} object with AirNow data. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @description Loads pre-generated .rda files containing hourly AirNow data. #' #' If \code{archiveDataDir} is defined, data will be loaded from this local #' archive. Otherwise, data will be loaded from the monitoring data repository #' maintained by the USFS AirFire team. #' #' The files loaded by this function contain a single month's worth of data #' #' For the most recent data in the last 10 days, use \code{airnow_loadLatest()}. #' #' For daily updates covering the most recent 45 days, use \code{airnow_loadDaily()}. #' #' For data extended more than 45 days into the past, use \code{airnow_loadAnnual()}. #' #' Pre-processed AirNow exists for the following parameters: #' #' \enumerate{ # #' \item{BARPR} # #' \item{BC} # #' \item{CO} # #' \item{NO} # #' \item{NO2} # #' \item{NO2Y} # #' \item{NO2X}s # #' \item{NOX} # #' \item{NOOY} # #' \item{OC} # #' \item{OZONE} # #' \item{PM10} #' \item{PM2.5} # #' \item{PM2.5_nowcast} # #' \item{PRECIP} # #' \item{RHUM} # #' \item{SO2} # #' \item{SRAD} # #' \item{TEMP} # #' \item{UV-AETH} # #' \item{WD} # #' \item{WS} #' } airnow_loadMonthly <- function( monthStamp = NULL, archiveBaseUrl = paste0( "https://airfire-data-exports.s3.us-west-2.amazonaws.com/", "monitoring/v2" ), archiveBaseDir = NULL, QC_negativeValues = c("zero", "na", "ignore"), parameterName = "PM2.5" ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(monthStamp) result <- MazamaCoreUtils::parseDatetime(monthStamp, timezone = "UTC") year <- stringr::str_sub(monthStamp, 1, 4) MazamaCoreUtils::stopIfNull(parameterName) QC_negativeValues <- match.arg(QC_negativeValues) if ( is.null(archiveBaseUrl) && is.null(archiveBaseDir) ) stop("one of 'archiveBaseUrl' or 'archiveBaseDir' must be defined") # Parameter code validParameterNames <- c( # "BARPR", # "BC", # "CO", # "NO", # "NO2", # "NO2Y", # "NO2X", # "NOX", # "NOOY", # "OC", # "OZONE", # "PM10", "PM2.5" # "PM2.5_nowcast" # "PRECIP", # "RHUM", # "SO2", # "SRAD", # "TEMP", # "UV-AETH", # "WD", # "WS" ) parameterName <- as.character(parameterName) if ( !parameterName %in% validParameterNames ) { stop(sprintf( "data for parameterName '%s' has not been processed", parameterName )) } # ----- Load data ------------------------------------------------------------ # Create file name and path according to the AirMonitorIngest scheme if ( is.null(archiveBaseUrl) ) { dataUrl <- NULL } else { dataUrl <- file.path(archiveBaseUrl, "airnow", year, "data") } if ( is.null(archiveBaseDir) ) { dataDir <- NULL } else { dataDir <- file.path(archiveBaseDir, "airnow", year, "data") } metaFileName <- sprintf("airnow_%s_%s_meta.rda", parameterName, monthStamp) dataFileName <- sprintf("airnow_%s_%s_data.rda", parameterName, monthStamp) meta <- MazamaCoreUtils::loadDataFile(metaFileName, dataUrl, dataDir) data <- MazamaCoreUtils::loadDataFile(dataFileName, dataUrl, dataDir) # Guarantee that 'meta' and 'data' match ids <- names(data)[-1] meta <- meta %>% dplyr::filter(.data$deviceDeploymentID %in% ids) # Guarantee presence of fullAQSID if ( !"fullAQSID" %in% names(meta) ) meta$fullAQSID <- NA_character_ data <- data %>% dplyr::select(dplyr::all_of(c("datetime", meta$deviceDeploymentID))) %>% # Replace any NaN that snuck in dplyr::mutate(across(tidyselect::vars_select_helpers$where(is.numeric), function(x) ifelse(is.nan(x), NA, x))) # Create monitor object monitor <- list(meta = meta, data = data) monitor <- structure(monitor, class = c("mts_monitor", "mts", class(monitor))) MazamaTimeSeries::mts_check(monitor) # ----- Apply QC ------------------------------------------------------------- if ( QC_negativeValues == "zero" ) { monitor <- monitor_replaceValues(monitor, data < 0, 0) } else if ( QC_negativeValues == "na" ) { monitor <- monitor_replaceValues(monitor, data < 0, as.numeric(NA)) } # ----- Return --------------------------------------------------------------- return(monitor) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/airnow_loadMonthly.R
#' @export #' @importFrom dplyr across #' #' @title Load annual AIRSIS monitoring data #' #' @param year Year [YYYY]. #' @param archiveBaseUrl Base URL for monitoring v2 data files. #' @param archiveBaseDir Local base directory for monitoring v2 data files. #' @param QC_negativeValues Type of QC to apply to negative values. #' @param QC_removeSuspectData Removes monitors determined to be misbehaving. #' #' @return A \emph{mts_monitor} object with AIRSIS data. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @description Loads pre-generated .rda files containing annual #' AIRSIS data. #' #' If \code{archiveDataDir} is defined, data will be loaded from this local #' archive. Otherwise, data will be loaded from the monitoring data repository #' maintained by the USFS AirFire team. #' #' Current year files loaded by this function are updated once per week. #' #' For the most recent data in the last 10 days, use \code{airsis_loadLatest()}. #' #' For daily updates covering the most recent 45 days, use \code{airsis_loadDaily()}. #' #' @note #' Some older AIRSIS timeseries contain only values of 0, 1000, 2000, 3000, ... ug/m3. #' Data from these deployments pass instrument-level QC checks but these #' timeseries generally do not represent valid data and should be removed. #' With \code{QC_removeSuspectData = TRUE} (the default), data is checked and #' periods reporting only values of 0:10 * 1000 ug/m3 are invalidated. #' #' Only those personally familiar with the individual instrument deployments #' should work with the "suspect" data. #' #' @seealso \code{\link{airsis_loadDaily}} #' @seealso \code{\link{airsis_loadLatest}} #' #' @examples #' \dontrun{ #' library(AirMonitor) #' #' # Fail gracefully if any resources are not available #' try({ #' #' # See https://en.wikipedia.org/wiki/Camp_Fire_(2018) #' #' # AIRSIS monitors during the Camp Fire #' airsis_loadAnnual(2018) \%>\% #' monitor_filter(stateCode == "CA") \%>\% #' monitor_filterDate(20181101, 20181201) \%>\% #' monitor_dropEmpty() \%>\% #' monitor_leaflet() #' #' }, silent = FALSE) #' } airsis_loadAnnual <- function( year = NULL, archiveBaseUrl = paste0( "https://airfire-data-exports.s3.us-west-2.amazonaws.com/", "monitoring/v2" ), archiveBaseDir = NULL, QC_negativeValues = c("zero", "na", "ignore"), QC_removeSuspectData = TRUE ) { parameterName <- "PM2.5" # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(year) MazamaCoreUtils::stopIfNull(parameterName) if ( as.numeric(year) < 2014 ) stop(paste0("No ARISIS data is available before 2014")) QC_negativeValues <- match.arg(QC_negativeValues) if ( is.null(archiveBaseUrl) && is.null(archiveBaseDir) ) stop("one of 'archiveBaseUrl' or 'archiveBaseDir' must be defined") # Parameter code validParameterNames <- c( # "BARPR", # "BC", # "CO", # "NO", # "NO2", # "NO2Y", # "NO2X", # "NOX", # "NOOY", # "OC", # "OZONE", # "PM10", "PM2.5" # "PM2.5_nowcast" # "PRECIP", # "RHUM", # "SO2", # "SRAD", # "TEMP", # "UV-AETH", # "WD", # "WS" ) parameterName <- as.character(parameterName) if ( !parameterName %in% validParameterNames ) { stop(sprintf( "data for parameterName '%s' has not been processed", parameterName )) } # ----- Load data ------------------------------------------------------------ # Create file name and path according to the AirMonitorIngest scheme if ( is.null(archiveBaseUrl) ) { dataUrl <- NULL } else { dataUrl <- file.path(archiveBaseUrl, "airsis", year, "data") } if ( is.null(archiveBaseDir) ) { dataDir <- NULL } else { dataDir <- file.path(archiveBaseDir, "airsis", year, "data") } metaFileName <- sprintf("airsis_%s_%s_meta.rda", parameterName, year) dataFileName <- sprintf("airsis_%s_%s_data.rda", parameterName, year) meta <- MazamaCoreUtils::loadDataFile(metaFileName, dataUrl, dataDir) data <- MazamaCoreUtils::loadDataFile(dataFileName, dataUrl, dataDir) # Guarantee that 'meta' and 'data' match ids <- names(data)[-1] meta <- meta %>% dplyr::filter(.data$deviceDeploymentID %in% ids) # Guarantee presence of fullAQSID if ( !"fullAQSID" %in% names(meta) ) meta$fullAQSID <- NA_character_ data <- data %>% dplyr::select(dplyr::all_of(c("datetime", meta$deviceDeploymentID))) %>% # Replace any NaN that snuck in dplyr::mutate(across(tidyselect::vars_select_helpers$where(is.numeric), function(x) ifelse(is.nan(x), NA, x))) # Create monitor object monitor <- list(meta = meta, data = data) monitor <- structure(monitor, class = c("mts_monitor", "mts", class(monitor))) MazamaTimeSeries::mts_check(monitor) # ----- Apply QC ------------------------------------------------------------- # Handle negative values if ( QC_negativeValues == "zero" ) { monitor <- monitor_replaceValues(monitor, data < 0, 0) } else if ( QC_negativeValues == "na" ) { monitor <- monitor_replaceValues(monitor, data < 0, as.numeric(NA)) } # NOTE: Several monitors in 2015 have values only at 0, 1000, 2000, 3000, ... if ( QC_removeSuspectData ) { monitor <- monitor %>% monitor_mutate(QC_invalidateConsecutiveSuspectValues) %>% monitor_dropEmpty() } # ----- Return --------------------------------------------------------------- return(monitor) } # ===== DEBUG ================================================================== if ( FALSE ) { year <- 2015 archiveBaseUrl <- "https://airfire-data-exports.s3.us-west-2.amazonaws.com/monitoring/v2" archiveBaseDir <- NULL QC_negativeValues = "zero" QC_removeSuspectData = TRUE }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/airsis_loadAnnual.R
#' @export #' @importFrom dplyr across #' #' @title Load daily AIRSIS monitoring data #' #' @param archiveBaseUrl Base URL for monitoring v2 data files. #' @param archiveBaseDir Local base directory for monitoring v2 data files. #' @param QC_negativeValues Type of QC to apply to negative values. #' @param QC_removeSuspectData Removes monitors determined to be misbehaving. #' #' @return A \emph{mts_monitor} object with AIRSIS data. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @description Loads pre-generated .rda files containing daily #' AIRSIS data. #' #' If \code{archiveDataDir} is defined, data will be loaded from this local #' archive. Otherwise, data will be loaded from the monitoring data repository #' maintained by the USFS AirFire team. #' #' The files loaded by this function are updated once per day and #' contain data for the previous 45 days. #' #' For the most recent data in the last 10 days, use \code{airsis_loadLatest()}. #' #' For data extended more than 45 days into the past, use \code{airsis_loadAnnual()}. #' #' @note #' Some older AIRSIS timeseries contain only values of 0, 1000, 2000, 3000, ... ug/m3. #' Data from these deployments pass instrument-level QC checks but these #' timeseries generally do not represent valid data and should be removed. #' With \code{QC_removeSuspectData = TRUE} (the default), data is checked and #' periods reporting only values of 0:10 * 1000 ug/m3 are invalidated. #' #' Only those personally familiar with the individual instrument deployments #' should work with the "suspect" data. #' #' @seealso \code{\link{airsis_loadAnnual}} #' @seealso \code{\link{airsis_loadLatest}} #' #' @examples #' \dontrun{ #' library(AirMonitor) #' #' # Fail gracefully if any resources are not available #' try({ #' #' airsis_loadDaily()\ %>\% #' monitor_filter(stateCode == "CA") \%>\% #' monitor_leaflet() #' #' }, silent = FALSE) #' } airsis_loadDaily <- function( archiveBaseUrl = paste0( "https://airfire-data-exports.s3.us-west-2.amazonaws.com/", "monitoring/v2" ), archiveBaseDir = NULL, QC_negativeValues = c("zero", "na", "ignore"), QC_removeSuspectData = TRUE ) { parameterName <- "PM2.5" # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(parameterName) QC_negativeValues <- match.arg(QC_negativeValues) if ( is.null(archiveBaseUrl) && is.null(archiveBaseDir) ) stop("one of 'archiveBaseUrl' or 'archiveBaseDir' must be defined") # Parameter code validParameterNames <- c( # "BARPR", # "BC", # "CO", # "NO", # "NO2", # "NO2Y", # "NO2X", # "NOX", # "NOOY", # "OC", # "OZONE", # "PM10", "PM2.5" # "PM2.5_nowcast" # "PRECIP", # "RHUM", # "SO2", # "SRAD", # "TEMP", # "UV-AETH", # "WD", # "WS" ) parameterName <- as.character(parameterName) if ( !parameterName %in% validParameterNames ) { stop(sprintf( "data for parameterName '%s' has not been processed", parameterName )) } # ----- Load data ------------------------------------------------------------ # Create file name and path according to the AirMonitorIngest scheme if ( is.null(archiveBaseUrl) ) { dataUrl <- NULL } else { dataUrl <- file.path(archiveBaseUrl, "daily/data") } if ( is.null(archiveBaseDir) ) { dataDir <- NULL } else { dataDir <- file.path(archiveBaseDir, "daily/data") } metaFileName <- sprintf("airsis_%s_daily_meta.rda", parameterName) dataFileName <- sprintf("airsis_%s_daily_data.rda", parameterName) meta <- MazamaCoreUtils::loadDataFile(metaFileName, dataUrl, dataDir) data <- MazamaCoreUtils::loadDataFile(dataFileName, dataUrl, dataDir) # Guarantee that 'meta' and 'data' match ids <- names(data)[-1] meta <- meta %>% dplyr::filter(.data$deviceDeploymentID %in% ids) # Guarantee presence of fullAQSID if ( !"fullAQSID" %in% names(meta) ) meta$fullAQSID <- NA_character_ data <- data %>% dplyr::select(dplyr::all_of(c("datetime", meta$deviceDeploymentID))) %>% # Replace any NaN that snuck in dplyr::mutate(across(tidyselect::vars_select_helpers$where(is.numeric), function(x) ifelse(is.nan(x), NA, x))) # Create monitor object monitor <- list(meta = meta, data = data) monitor <- structure(monitor, class = c("mts_monitor", "mts", class(monitor))) MazamaTimeSeries::mts_check(monitor) # ----- Apply QC ------------------------------------------------------------- # Handle negative values if ( QC_negativeValues == "zero" ) { monitor <- monitor_replaceValues(monitor, data < 0, 0) } else if ( QC_negativeValues == "na" ) { monitor <- monitor_replaceValues(monitor, data < 0, as.numeric(NA)) } # NOTE: Several monitors in 2015 have values only at 0, 1000, 2000, 3000, ... if ( QC_removeSuspectData ) { monitor <- monitor %>% monitor_mutate(QC_invalidateConsecutiveSuspectValues) %>% monitor_dropEmpty() } # ----- Return --------------------------------------------------------------- return(monitor) } # ===== DEBUG ================================================================== if ( FALSE ) { archiveBaseUrl <- "https://airfire-data-exports.s3.us-west-2.amazonaws.com/monitoring/v2" archiveBaseDir <- NULL QC_negativeValues = "zero" }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/airsis_loadDaily.R
#' @export #' @importFrom dplyr across #' #' @title Load most recent AIRSIS monitoring data #' #' @param archiveBaseUrl Base URL for monitoring v2 data files. #' @param archiveBaseDir Local base directory for monitoring v2 data files. #' @param QC_negativeValues Type of QC to apply to negative values. #' @param QC_removeSuspectData Removes monitors determined to be misbehaving. #' #' @return A \emph{mts_monitor} object with AIRSIS data. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @description Loads pre-generated .rda files containing the most recent #' AIRSIS data. #' #' If \code{archiveDataDir} is defined, data will be loaded from this local #' archive. Otherwise, data will be loaded from the monitoring data repository #' maintained by the USFS AirFire team. #' #' The files loaded by this function are updated multiple times an hour and #' contain data for the previous 10 days. #' #' For daily updates covering the most recent 45 days, use \code{airsis_loadDaily()}. #' #' For data extended more than 45 days into the past, use \code{airsis_loadAnnual()}. #' #' @note #' Some older AIRSIS timeseries contain only values of 0, 1000, 2000, 3000, ... ug/m3. #' Data from these deployments pass instrument-level QC checks but these #' timeseries generally do not represent valid data and should be removed. #' With \code{QC_removeSuspectData = TRUE} (the default), data is checked and #' periods reporting only values of 0:10 * 1000 ug/m3 are invalidated. #' #' Only those personally familiar with the individual instrument deployments #' should work with the "suspect" data. #' #' @seealso \code{\link{airsis_loadAnnual}} #' @seealso \code{\link{airsis_loadDaily}} #' #' @examples #' \dontrun{ #' library(AirMonitor) #' #' # Fail gracefully if any resources are not available #' try({ #' #' airsis_loadLatest()\ %>\% #' monitor_filter(stateCode == "CA") \%>\% #' monitor_leaflet() #' #' }, silent = FALSE) #' } airsis_loadLatest <- function( archiveBaseUrl = paste0( "https://airfire-data-exports.s3.us-west-2.amazonaws.com/", "monitoring/v2" ), archiveBaseDir = NULL, QC_negativeValues = c("zero", "na", "ignore"), QC_removeSuspectData = TRUE ) { parameterName <- "PM2.5" # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(parameterName) QC_negativeValues <- match.arg(QC_negativeValues) if ( is.null(archiveBaseUrl) && is.null(archiveBaseDir) ) stop("one of 'archiveBaseUrl' or 'archiveBaseDir' must be defined") # Parameter code validParameterNames <- c( # "BARPR", # "BC", # "CO", # "NO", # "NO2", # "NO2Y", # "NO2X", # "NOX", # "NOOY", # "OC", # "OZONE", # "PM10", "PM2.5" # "PM2.5_nowcast" # "PRECIP", # "RHUM", # "SO2", # "SRAD", # "TEMP", # "UV-AETH", # "WD", # "WS" ) parameterName <- as.character(parameterName) if ( !parameterName %in% validParameterNames ) { stop(sprintf( "data for parameterName '%s' has not been processed", parameterName )) } # ----- Load data ------------------------------------------------------------ # Create file name and path according to the AirMonitorIngest scheme if ( is.null(archiveBaseUrl) ) { dataUrl <- NULL } else { dataUrl <- file.path(archiveBaseUrl, "latest/data") } if ( is.null(archiveBaseDir) ) { dataDir <- NULL } else { dataDir <- file.path(archiveBaseDir, "latest/data") } metaFileName <- sprintf("airsis_%s_latest_meta.rda", parameterName) dataFileName <- sprintf("airsis_%s_latest_data.rda", parameterName) meta <- MazamaCoreUtils::loadDataFile(metaFileName, dataUrl, dataDir) data <- MazamaCoreUtils::loadDataFile(dataFileName, dataUrl, dataDir) # Guarantee that 'meta' and 'data' match ids <- names(data)[-1] meta <- meta %>% dplyr::filter(.data$deviceDeploymentID %in% ids) # Guarantee presence of fullAQSID if ( !"fullAQSID" %in% names(meta) ) meta$fullAQSID <- NA_character_ data <- data %>% dplyr::select(dplyr::all_of(c("datetime", meta$deviceDeploymentID))) %>% # Replace any NaN that snuck in dplyr::mutate(across(tidyselect::vars_select_helpers$where(is.numeric), function(x) ifelse(is.nan(x), NA, x))) # Create monitor object monitor <- list(meta = meta, data = data) monitor <- structure(monitor, class = c("mts_monitor", "mts", class(monitor))) MazamaTimeSeries::mts_check(monitor) # ----- Apply QC ------------------------------------------------------------- # Handle negative values if ( QC_negativeValues == "zero" ) { monitor <- monitor_replaceValues(monitor, data < 0, 0) } else if ( QC_negativeValues == "na" ) { monitor <- monitor_replaceValues(monitor, data < 0, as.numeric(NA)) } # NOTE: Several monitors in 2015 have values only at 0, 1000, 2000, 3000, ... if ( QC_removeSuspectData ) { monitor <- monitor %>% monitor_mutate(QC_invalidateConsecutiveSuspectValues) %>% monitor_dropEmpty() } # ----- Return --------------------------------------------------------------- return(monitor) } # ===== DEBUG ================================================================== if ( FALSE ) { archiveBaseUrl <- "https://airfire-data-exports.s3.us-west-2.amazonaws.com/monitoring/v2" archiveBaseDir <- NULL QC_negativeValues = "zero" }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/airsis_loadLatest.R
#' @export #' #' @title Generate AQI categories #' #' @param x Vector or matrix of PM2.5 values or an \emph{mts_monitor} object. #' @param pollutant EPA AQS criteria pollutant. #' @param NAAQS Version of NAAQS levels to use. See Note. #' @param conversionArray Array of six text or other values to return instead of integers. #' #' @return A vector or matrix of AQI category indices in the range 1:6. #' #' @description #' This function converts hourly PM2.5 measurements into AQI category levels. #' These levels can then be converted to colors or names using the arrays found #' in \code{\link{US_AQI}}. #' #' @details #' By default, return values will be integers in the range 1:6 or \code{NA}. The #' \code{conversionArray} parameter can be used to convert these integers into #' whatever is specified in the first six elements of \code{conversionArray}. A #' typical usage would be: \code{conversionArray = US_AQI$names_eng}. #' #' @note #' On February 7, 2024, EPA strengthened the National Ambient Air Quality #' Standards for Particulate Matter (PM NAAQS) to protect millions of Americans #' from harmful and costly health impacts, such as heart attacks and premature #' death. Particle or soot pollution is one of the most dangerous forms of air #' pollution, and an extensive body of science links it to a range of serious #' and sometimes deadly illnesses. EPA is setting the level of the primary #' (health-based) annual PM2.5 standard at 9.0 micrograms per cubic meter to #' provide increased public health protection, consistent with the available #' health science. #' See \href{https://www.epa.gov/pm-pollution/final-reconsideration-national-ambient-air-quality-standards-particulate-matter-pm}{PM NAAQS update}. #' #' @seealso \code{\link{aqiColors}} #' #' @examples #' library(AirMonitor) #' #' # Lane County, Oregon AQSIDs all begin with "41039" #' LaneCounty <- #' NW_Megafires %>% #' monitor_filter(stringr::str_detect(AQSID, '^41039')) %>% #' monitor_filterDate(20150822, 20150823) #' #' LaneCounty %>% #' aqiCategories() #' #' LaneCounty %>% #' aqiCategories(conversionArray = US_AQI$names_eng) aqiCategories <- function( x, pollutant = c("PM2.5", "AQI", "CO", "NO", "OZONE", "PM10", "SO2"), NAAQS = c("PM2.5", "PM2.5_2024"), conversionArray = NULL ) { # ----- Validate parameters -------------------------------------------------- pollutant <- match.arg(pollutant) NAAQS = match.arg(NAAQS) breaks <- US_AQI[[paste0("breaks_", pollutant)]] # Handle the added NAAQS argument if ( pollutant == "PM2.5" && NAAQS == "PM2.5_2024" ) { breaks <- US_AQI$breaks_PM2.5_2024 } # ----- Prepare data --------------------------------------------------------- # Pull data out of mts_monitor object if necessary if ( !is.numeric(x) ) { if ( !monitor_isValid(x) ) stop("'x' is neither numeric nor a valid mts_monitor object") x <- x$data[,-1] } # Convert to matrix if necessary ncol <- 1 if ( !is.null(ncol(x)) ) { ncol <- ncol(x) x <- as.matrix(x) } # Force conversion to a numeric vector x <- as.numeric(x) # ----- Create categories ---------------------------------------------------- categories <- .bincode(x, breaks) if ( !is.null(conversionArray) ) { categories <- conversionArray[categories] } # ----- Return --------------------------------------------------------------- # Restore shape if ( ncol > 1 ) { categories <- matrix(categories, ncol = ncol, byrow = FALSE) } return(categories) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/aqiCategories.R
#' @export #' #' @title Generate AQI colors #' #' @param x Vector or matrix of PM2.5 values or an \emph{mts_monitor} object. #' @param pollutant EPA AQS criteria pollutant. #' @param palette Named color palette to use for AQI categories. #' @param na.color Color assigned to missing values. #' @param NAAQS Version of NAAQS levels to use. See Note. #' #' @return A vector or matrix of AQI colors to be used in maps and plots. #' #' @description #' This function uses the \code{leaflet::colorBin()} function to return a #' vector or matrix of colors derived from data values. #' #' @note #' On February 7, 2024, EPA strengthened the National Ambient Air Quality #' Standards for Particulate Matter (PM NAAQS) to protect millions of Americans #' from harmful and costly health impacts, such as heart attacks and premature #' death. Particle or soot pollution is one of the most dangerous forms of air #' pollution, and an extensive body of science links it to a range of serious #' and sometimes deadly illnesses. EPA is setting the level of the primary #' (health-based) annual PM2.5 standard at 9.0 micrograms per cubic meter to #' provide increased public health protection, consistent with the available #' health science. #' See \href{https://www.epa.gov/pm-pollution/final-reconsideration-national-ambient-air-quality-standards-particulate-matter-pm}{PM NAAQS update}. #' #' @seealso \code{\link{aqiCategories}} #' #' @examples #' library(AirMonitor) #' #' # Fancy plot based on pm2.5 values #' pm2.5 <- Carmel_Valley$data[,2] #' Carmel_Valley %>% #' monitor_timeseriesPlot( #' shadedNight = TRUE, #' pch = 16, #' cex = pmax(pm2.5 / 100, 0.5), #' col = aqiColors(pm2.5), #' opacity = 0.8 #' ) aqiColors <- function( x, pollutant = c("PM2.5", "AQI", "CO", "NO", "OZONE", "PM10", "SO2"), palette = c("EPA", "subdued", "deuteranopia"), na.color = NA, NAAQS = c("PM2.5", "PM2.5_2024") ) { # ----- Validate parameters -------------------------------------------------- pollutant <- match.arg(pollutant) palette <- match.arg(palette) NAAQS = match.arg(NAAQS) breaks <- US_AQI[[paste0("breaks_", pollutant)]] colors <- US_AQI[[paste0("colors_", palette)]] # Handle the added NAAQS argument if ( pollutant == "PM2.5" && NAAQS == "PM2.5_2024" ) { breaks <- US_AQI$breaks_PM2.5_2024 } # ----- Prepare data --------------------------------------------------------- # Pull data out of mts_monitor object if necessary if ( !is.numeric(x) ) { if ( !monitor_isValid(x) ) stop("'x' is neither numeric nor a valid mts_monitor object") x <- x$data[,-1] } # Convert to matrix if necessary ncol <- 1 if ( !is.null(ncol(x)) ) { ncol <- ncol(x) x <- as.matrix(x) } # Force conversion to a numeric vector x <- as.numeric(x) # ----- Create colors -------------------------------------------------------- # Generate color function colorFUN <- leaflet::colorBin( palette = colors, domain = c(0, 1e6), bins = breaks, na.color = na.color ) # Assign colors cols <- colorFUN(x) # ----- Return --------------------------------------------------------------- # Restore shape if ( ncol > 1 ) { cols <- matrix(cols, ncol = ncol, byrow = FALSE) } return(cols) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/aqiColors.R
# ----- Example datasets ------------------------------------------------------ #' @encoding UTF-8 #' @title NW_Megafires example dataset #' @format A \emph{mts_monitor} object with 1080 rows and 143 columns of data. #' @description The \code{NW_Megafires} dataset provides a quickly loadable #' version of a \emph{mts_monitor} object for practicing and code examples. #' #' @details #' In the summer of 2015, Washington state had several catastrophic wildfires #' that led to many days of heavy smoke in eastern Washington, Oregon and #' northern Idaho. The NW_Megafires dataset contains monitoring data for the #' Pacific Northwest from July 24 through September 06, 2015. #' #' This dataset was generated on 2022-10-28 by running: #' #' \preformatted{ #' library(AirMonitor) #' #' NW_Megafires <- #' monitor_loadAnnual(2015, epaPreference = "epa_aqs") %>% #' monitor_filterMeta(stateCode %in% c("WA", "OR", "ID")) %>% #' monitor_filterDate(20150724, 20150907, timezone = "America/Los_Angeles") %>% #' monitor_dropEmpty() #' #' save(NW_Megafires, file = "data/NW_Megafires.rda") #' } #' "NW_Megafires" #' @encoding UTF-8 #' @title Carmel Valley example dataset #' @format A \emph{mts_monitor} object with 576 rows and 2 columns of data. #' @description The \code{Carmel_Valley} dataset provides a quickly loadable #' version of a \emph{mts_monitor} object for practicing and code examples. #' #' @details #' In August of 2016, the Soberanes fire in California burned along the Big Sur #' coast. At the time, it was the most expensive wildfire in US history. This #' dataset contains PM2.5 monitoring data for the monitor in Carmel Valley which #' shows heavy smoke as well as strong diurnal cycles associated with sea #' breezes. Data are stored as a \emph{mts_monitor} object and are used in some #' examples in the package documentation. #' #' This dataset was generated on 2022-10-12 by running: #' #' \preformatted{ #' library(AirMonitor) #' #' Carmel_Valley <- #' airnow_loadAnnual(2016) \%>\% #' monitor_filterMeta(deviceDeploymentID == "a9572a904a4ed46d_840060530002") \%>\% #' monitor_filterDate(20160722, 20160815) #' #' save(Carmel_Valley, file = "data/Carmel_Valley.rda") #' } #' "Carmel_Valley" #' @encoding UTF-8 #' @title Camp Fire example dataset #' @format A \emph{mts_monitor} object with 360 rows and 134 columns of data. #' @description The \code{Camp_Fire} dataset provides a quickly loadable #' version of a \emph{mts_monitor} object for practicing and code examples. #' #' @details #' The 2018 Camp Fire was the deadliest and most destructive wildfire in California's #' history, and the most expensive natural disaster in the world in 2018 in #' terms of insured losses. The fire caused at least 85 civilian fatalities and #' injured 12 civilians and five firefighters. It covered an area of 153,336 #' acres and destroyed more than 18,000 structures, most with the first 4 hours. #' Smoke from the fire resulted in the worst air pollution ever for the #' San Francisco Bay Area and Sacramento Valley. #' #' This dataset was was generated on 2022-10-12 by running: #' #' \preformatted{ #' library(AirMonitor) #' #' Camp_Fire <- #' monitor_loadAnnual(2018) \%>\% #' monitor_filter(stateCode == 'CA') \%>\% #' monitor_filterDate( #' startdate = 20181108, #' enddate = 20181123, #' timezone = "America/Los_Angeles" #' ) \%>\% #' monitor_dropEmpty() #' #' save(Camp_Fire, file = "data/Camp_Fire.rda") #' } "Camp_Fire"
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/data.R
#' @export #' @importFrom dplyr across #' #' @title Load annual AirNow monitoring data #' #' @param year Year [YYYY]. #' @param archiveBaseUrl Base URL for monitoring v2 data files. #' @param archiveBaseDir Local base directory for monitoring v2 data files. #' @param QC_negativeValues Type of QC to apply to negative values. #' @param parameterCode One of the EPA AQS criteria parameter codes. #' #' @return A \emph{mts_monitor} object with EPA AQS data. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @description Loads pre-generated .rda files containing hourly AirNow data. #' #' If \code{archiveDataDir} is defined, data will be loaded from this local #' archive. Otherwise, data will be loaded from the monitoring data repository #' maintained by the USFS AirFire team. #' #' The files loaded by this function contain a single year's worth of data. #' #' Pre-processed AirNow exists for the following parameter codes: #' \enumerate{ #' \item{88101 -- PM2.5 FRM/FEM Mass} #' \item{88502 -- PM2.5 non FRM/FEM Mass} #' } #' #' Specifying \code{parameterCode = "PM2.5"} will merge records from both #' sources. #' #' @examples #' \dontrun{ #' library(AirMonitor) #' #' # Fail gracefully if any resources are not available #' try({ #' #' # See https://en.wikipedia.org/wiki/2017_Montana_wildfires #' #' # Daily Barplot of Montana wildfires #' epa_aqs_loadAnnual(2015) \%>\% #' monitor_filter(stateCode == "WA") \%>\% #' monitor_filterDate(20150724, 20150907) \%>\% #' monitor_dailyStatistic() \%>\% #' monitor_timeseriesPlot( #' main = "Washington 2015 -- AirNow Daily Average PM2.5" #' ) #' #' }, silent = FALSE) #' } epa_aqs_loadAnnual <- function( year = NULL, archiveBaseUrl = paste0( "https://airfire-data-exports.s3.us-west-2.amazonaws.com/", "monitoring/v2" ), archiveBaseDir = NULL, QC_negativeValues = c("zero", "na", "ignore"), parameterCode = c("PM2.5", "88101", "88502") ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(year) QC_negativeValues <- match.arg(QC_negativeValues) if ( is.null(archiveBaseUrl) && is.null(archiveBaseDir) ) stop("one of 'archiveBaseUrl' or 'archiveBaseDir' must be defined") parameterCode <- match.arg(parameterCode) if ( parameterCode == "PM2.5" ) { parameterCodes <- c("88101", "88502") } else { parameterCodes <- parameterCode } # ----- Load data ------------------------------------------------------------ # Create file name and path according to the AirMonitorIngest scheme if ( is.null(archiveBaseUrl) ) { dataUrl <- NULL } else { dataUrl <- file.path(archiveBaseUrl, "epa_aqs", year, "data") } if ( is.null(archiveBaseDir) ) { dataDir <- NULL } else { dataDir <- file.path(archiveBaseDir, "epa_aqs", year, "data") } monitorList <- list() for ( parameterCode in parameterCodes ) { result <- try({ suppressWarnings({ metaFileName <- sprintf("epa_aqs_%s_%s_meta.rda", parameterCode, year) dataFileName <- sprintf("epa_aqs_%s_%s_data.rda", parameterCode, year) meta <- MazamaCoreUtils::loadDataFile(metaFileName, dataUrl, dataDir) data <- MazamaCoreUtils::loadDataFile(dataFileName, dataUrl, dataDir) # Guarantee that 'meta' and 'data' match ids <- names(data)[-1] meta <- meta %>% dplyr::filter(.data$deviceDeploymentID %in% ids) # Guarantee presence of fullAQSID if ( !"fullAQSID" %in% names(meta) ) meta$fullAQSID <- NA_character_ data <- data %>% dplyr::select(dplyr::all_of(c("datetime", meta$deviceDeploymentID))) %>% # Replace any NaN that snuck in dplyr::mutate(across(tidyselect::vars_select_helpers$where(is.numeric), function(x) ifelse(is.nan(x), NA, x))) # Create monitor object monitor <- list(meta = meta, data = data) monitor <- structure(monitor, class = c("mts_monitor", "mts", class(monitor))) MazamaTimeSeries::mts_check(monitor) monitorList[[parameterCode]] <- monitor }) }, silent = TRUE) } # Test for data if ( length(monitorList) == 0 ) { if ( is.null(dataDir) ) { err_msg <- sprintf("no data could be loaded from dataUrl: %s\n\nDid you mean to specify dataDir?", dataUrl) } else { err_msg <- sprintf("no data could be loaded from dataDir: %s\n\nDid you mean to specify dataDir?", dataDir) } stop(err_msg) } if ( length(monitorList) > 1 ) { # NOTE: Combine with "replace na" to handle cases where identical # NOTE: deviceDeplomentIDs represent a "temporary" monitor from 88502 being # NOTE: replaced with a "permanent" monitor from 88101. monitor <- monitor_combine( monitorList, replaceMeta = TRUE, overlapStrategy = "replace na" ) } # ----- Apply QC ------------------------------------------------------------- if ( QC_negativeValues == "zero" ) { monitor <- monitor_replaceValues(monitor, data < 0, 0) } else if ( QC_negativeValues == "na" ) { monitor <- monitor_replaceValues(monitor, data < 0, as.numeric(NA)) } # ----- Return --------------------------------------------------------------- return(monitor) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/epa_aqs_loadAnnual.R
#' @export #' #' @title Calculate hourly NowCast-based AQI values #' #' @param monitor \emph{mts_monitor} object. #' @param version Name of the type of nowcast algorithm to be used. #' @param includeShortTerm Logical specifying whether to alcluate preliminary #' NowCast values starting with the 2nd hour. #' @param NAAQS Version of NAAQS levels to use. See Note. #' #' @return A modified \code{mts_monitor} object containing AQI values. (A list #' with \code{meta} and \code{data} dataframes.) #' #' @description Nowcast and AQI algorithms are applied to the data in the #' monitor object. A modified \code{mts_monitor} object is returned whre values #' have been replaced with their Air Quality Index equivalents. See \link{monitor_nowcast}. #' #' @note #' On February 7, 2024, EPA strengthened the National Ambient Air Quality #' Standards for Particulate Matter (PM NAAQS) to protect millions of Americans #' from harmful and costly health impacts, such as heart attacks and premature #' death. Particle or soot pollution is one of the most dangerous forms of air #' pollution, and an extensive body of science links it to a range of serious #' and sometimes deadly illnesses. EPA is setting the level of the primary #' (health-based) annual PM2.5 standard at 9.0 micrograms per cubic meter to #' provide increased public health protection, consistent with the available #' health science. #' See \href{https://www.epa.gov/pm-pollution/final-reconsideration-national-ambient-air-quality-standards-particulate-matter-pm}{PM NAAQS update}. #' #' @references \url{https://en.wikipedia.org/wiki/Nowcast_(Air_Quality_Index)} #' @references \url{https://www.airnow.gov/aqi/aqi-basics/} #' monitor_aqi <- function( monitor, version = c("pm", "pmAsian", "ozone"), includeShortTerm = FALSE, NAAQS = c("PM2.5", "PM2.5_2024") ) { parameterName <- "PM2.5" # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(monitor) version <- match.arg(version) includeShortTerm <- MazamaCoreUtils::setIfNull(includeShortTerm, FALSE) NAAQS = match.arg(NAAQS) # A little involved to catch the case where the user forgets to pass in 'monitor' result <- try({ if ( !monitor_isValid(monitor) ) stop("First argument is not a valid 'mts_monitor' object.") }, silent = TRUE) if ( class(result) %in% "try-error" ) { err_msg <- geterrmessage() if ( stringr::str_detect(err_msg, "object .* not found") ) { stop(paste0(err_msg, "\n(Did you forget to pass in the 'monitor' object?)")) } } if ( monitor_isEmpty(monitor) ) stop("Parameter 'monitor' has no data.") # ----- AQI algorithm -------------------------------------------------------- # Assign breakpoints breakpointsTable <- .assignBreakpointsTable(parameterName, NAAQS) # Calculate NowCast monitor <- monitor %>% # NOTE: see https://forum.airnowtech.org/t/how-does-airnow-handle-negative-hourly-concentrations/143 monitor_replaceValues(data < 0, 0) %>% monitor_nowcast(version = version, includeShortTerm = includeShortTerm) # pull out data for AQI calculation data <- dplyr::select(monitor$data, -1) # TODO: include/expand checks to ensure values are appropriately truncated if ( parameterName == "PM2.5" || version == "pm" ) { digits <- 1 } else { digits <- 0 } data <- trunc(data*10^digits)/10^digits # For each datapoint find the breakpointsTable row index that corresponds to the concentration rowIndex <- apply( X = data, MARGIN = 2, FUN = findInterval, vec = breakpointsTable$rangeHigh, left.open = TRUE ) rowIndex <- rowIndex + 1 # From 40 CFR 58 Appendix G.12.ii: # If the concentration is larger than the highest breakpoint in Table 2 # then you may use the last two breakpoints in Table 2 when you apply Equation 1. rowIndex[rowIndex > nrow(breakpointsTable)] <- nrow(breakpointsTable) # Assign breakpoints and corresponding index values I_Hi <- breakpointsTable$aqiHigh[rowIndex] I_Lo <- breakpointsTable$aqiLow[rowIndex] BP_Hi <- breakpointsTable$rangeHigh[rowIndex] BP_Lo <- breakpointsTable$rangeLow[rowIndex] # Apply Equation 1 from 40 CFR 58 Appendix G and round to the nearest integer I_p <- (I_Hi-I_Lo)/(BP_Hi-BP_Lo)*(data-BP_Lo) + I_Lo I_p <- round(I_p, 0) monitor$data[,-1] <- I_p # ----- Update meta ---------------------------------------------------------- monitor$meta$pollutant <- "AQI" monitor$meta$units <- "" # ----- Return --------------------------------------------------------------- return(monitor) } # ===== Internal Functions ===================================================== .assignBreakpointsTable <- function(parameterName = "PM2.5", NAAQS = "PM2.5") { # TODO: Add other breakpoint table options if ( parameterName == "PM2.5") { # PM2.5 -- From Appendix G, Table 2 at https://www.ecfr.gov/current/title-40/part-58 # PM2.5_2024 -- https://www.epa.gov/system/files/documents/2024-02/pm-naaqs-air-quality-index-fact-sheet.pdf if ( NAAQS == "PM2.5" ) { breakpointsTable <- data.frame( rangeLow = c(0.0, 12.001, 35.5, 55.5, 150.5, 250.5, 350.5), rangeHigh = c(12.0, 35.4, 55.4, 150.4, 250.4, 350.4, 500.4), aqiLow = c(0, 51, 101, 151, 201, 301, 401), aqiHigh = c(50, 100, 150, 200, 300, 400, 500) ) } else { breakpointsTable <- data.frame( rangeLow = c(0.0, 9.1, 35.5, 55.5, 125.5, 225.5), rangeHigh = c(9.0, 35.4, 55.4, 125.4, 225.4, 500), aqiLow = c(0, 51, 101, 151, 201, 301, 401), aqiHigh = c(50, 100, 150, 200, 300, 400, 500) ) } } else { stop("only PM2.5 currently supported") } return(breakpointsTable) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_aqi.R
#' @export #' #' @title Order \emph{mts_monitor} time series by metadata values #' #' @param monitor \emph{mts_monitor} object. #' @param ... variables in \code{mts$meta}. #' #' @description The variable(s) in \code{...} are used to specify columns of #' \code{monitor$meta} to use for ordering. Under the hood, this #' function uses \code{\link[dplyr]{arrange}} on \code{monitor$meta} and then #' reorders \code{monitor$data} to match. #' #' @return A reorderd version of the incoming \emph{mts} time series object. #' (A list with \code{meta} and \code{data} dataframes.) #' #' @seealso \link{monitor_select} #' #' @examples #' library(AirMonitor) #' #' Camp_Fire$meta$elevation[1:10] #' #' byElevation <- #' Camp_Fire %>% #' monitor_arrange(elevation) #' #' byElevation$meta$elevation[1:10] #' monitor_arrange <- function( monitor, ... ) { # ----- Validate parameters -------------------------------------------------- # A little involved to catch the case where the user forgets to pass in 'monitor' result <- try({ if ( !monitor_isValid(monitor) ) stop("First argument is not a valid 'mts_monitor' object.") }, silent = TRUE) if ( class(result) %in% "try-error" ) { err_msg <- geterrmessage() if ( stringr::str_detect(err_msg, "object .* not found") ) { stop(paste0(err_msg, "\n(Did you forget to pass in the 'monitor' object?)")) } } # ----- Call MazamaTimeSeries function --------------------------------------- monitor <- MazamaTimeSeries::mts_arrange(monitor, ...) class(monitor) <- union("mts_monitor", class(monitor)) # ----- Return --------------------------------------------------------------- return(monitor) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_arrange.R
#' @export #' #' @title Return the most common timezone #' #' @param monitor \emph{mts_monitor} object. #' #' @description Evaluates all timezones in \code{monitor} and returns the #' most common one. In the case of a tie, the alphabetically first one is #' returned. #' #' @return A valid \code{base::OlsonNames()} timezone. #' monitor_bestTimezone <- function( monitor = NULL ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(monitor) # ----- Find most common timezone -------------------------------------------- timezoneTable <- sort(table(monitor$meta$timezone), decreasing = TRUE) timezone <- names(timezoneTable)[1] if ( !timezone %in% OlsonNames() ) stop(sprintf("timezone '%s' is not a valid OlsonNames() timezone", timezone)) # ----- Return --------------------------------------------------------------- return(timezone) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_bestTimezone.R
#' @export #' #' @title Collapse an \code{mts_monitor} object into a single time series #' #' @param monitor \emph{mts_monitor} object. #' @param longitude Longitude of the collapsed time series. #' @param latitude Latitude of the collapsed time series. #' @param deviceID Device identifier for the collapsed time series. #' @param FUN Function used to collapse multiple time series. #' @param na.rm Logical specifying whether NA values should be ignored when FUN #' is applied. #' @param ... additional arguments to be passed on to the \code{apply()} function. #' #' @return A \emph{mts_monitor} object representing a single time series. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @description #' Collapses data from all time series in a \code{mts_monitor} into a #' single-time series \emph{mts_monitor} object using the function provided in the #' \code{FUN} argument. The single-time series result will be located at the mean #' longitude and latitude unless \code{longitude} and \code{latitude} #' parameters are specified. #' #' Any columns of \code{monitor$meta} that are constant across all records will #' be retained in the returned \emph{mts_monitor} \code{meta} dataframe. #' #' The core metadata associated with this location (\emph{e.g.} #' \code{countryCode, stateCode, timezone, ...}) will be determined from #' the most common (or average) value found in \code{monitor$meta}. This will be #' a reasonable assumption for the vast majority of intended use cases where #' data from multiple instruments in close proximity are averaged together. #' #' @note #' After \code{FUN} is applied, values of \code{+/-Inf} and \code{NaN} are #' converted to \code{NA}. This is a convenience for the common case where #' \code{FUN = min/max} or \code{FUN = mean} and some of the time steps have all #' missing values. See the R documentation for \code{min} for an explanation. #' #' @examples #' library(AirMonitor) #' #' # Lane County, Oregon AQSIDs all begin with "41039" #' LaneCounty <- #' NW_Megafires %>% #' monitor_filter(stringr::str_detect(AQSID, '^41039')) %>% #' monitor_filterDate(20150821, 20150828) #' #' # Get min/max for all monitors #' LaneCounty_min <- monitor_collapse(LaneCounty, deviceID = 'LaneCounty_min', FUN = min) #' LaneCounty_max <- monitor_collapse(LaneCounty, deviceID = 'LaneCounty_max', FUN = max) #' #' # Create plot #' monitor_timeseriesPlot( #' LaneCounty, #' shadedNight = TRUE, #' main = "Lane County Range of PM2.5 Values" #' ) #' #' # Add min/max lines #' monitor_timeseriesPlot(LaneCounty_max, col = 'red', type = 's', add = TRUE) #' monitor_timeseriesPlot(LaneCounty_min, col = 'blue', type = 's', add = TRUE) monitor_collapse <- function( monitor, longitude = NULL, latitude = NULL, deviceID = "generatedID", FUN = mean, na.rm = TRUE, ... ) { # ----- Validate parameters -------------------------------------------------- # NOTE: Validate is handled by MazamaTimeSeries::mts_collapse() # ----- Call MazamaTimeSeries function --------------------------------------- monitor <- MazamaTimeSeries::mts_collapse( mts = monitor, longitude = longitude, latitude = latitude, deviceID = deviceID, FUN = FUN, na.rm = na.rm, ... ) class(monitor) <- union("mts_monitor", class(monitor)) # ----- Return --------------------------------------------------------------- return(invisible(monitor)) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_collapse.R
#' @export #' #' @title Combine multiple \code{mts_monitor} objects #' #' @param ... Any number of valid \emph{mts_monitor} objects or a list of objects. #' @param replaceMeta Logical specifying whether to allow replacement of metadata #' associated when duplicate \code{deviceDeploymentIDs} are encountered. #' @param overlapStrategy Strategy to use when data found in time series #' overlaps. #' #' @return A combined \code{mts_monitor} object. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @description Create a combined \emph{mts_monitor} from any number of \emph{mts_monitor} #' objects or from a list of \emph{mts_monitor} objects. The resulting \emph{mts_monitor} #' object with contain all \code{deviceDeploymentIDs} found in any incoming #' \emph{mts_monitor} and will have a regular time axis covering the the entire range #' of incoming data. #' #' If incoming time ranges are tempporally non-contiguous, the resulting #' \emph{mts_monitor} will have gaps filled with \code{NA} values. #' #' An error is generated if the incoming \emph{mts_monitor} objects have #' non-identical metadata for the same \code{deviceDeploymentID} unless #' \code{replaceMeta = TRUE}. #' #' @note Data are combined with a "later is better" sensibility where any #' data overlaps exist. Incoming \emph{mts_monitor} objects are ordered based on the #' time stamp of their last record. Any data records found in a "later" \emph{mts_monitor} #' will overwrite data associated with an "earlier" \emph{mts_monitor}. #' #' With \code{overlapStrategy = "replace all"}, any data records found #' in "later" \emph{mts_monitor} objects are preferentially retained before the "shared" #' data are finally reordered by ascending \code{datetime}. #' #' With \code{overlapStrategy = "replace missing"}, only missing values in "earlier" #' \emph{mts_monitor} objects are replaced with data records from "later" time series. #' #' #' @examples #' library(AirMonitor) #' #' # Two monitors near Pendelton, Oregon #' # #' # Use the interactive map to get the deviceDeploymentIDs #' # NW_Megafires %>% monitor_leaflet() #' #' Pendleton_West <- #' NW_Megafires %>% #' monitor_select("f187226671d1109a_410590121_03") %>% #' monitor_filterDatetime(2015082300, 2015082305) #' #' Pendleton_East <- #' NW_Megafires %>% #' monitor_select("6c906c6d1cf46b53_410597002_02") %>% #' monitor_filterDatetime(2015082300, 2015082305) #' #' monitor_combine(Pendleton_West, Pendleton_East) %>% #' monitor_getData() #' monitor_combine <- function( ..., replaceMeta = FALSE, overlapStrategy = c("replace all", "replace na") ) { # Accept any number of monitor objects monitorList <- list(...) # ----- Validate parameters -------------------------------------------------- if ( length(monitorList) == 0 ) stop("no 'monitor' arguments provided") overlapStrategy <- match.arg(overlapStrategy) # ----- Call MazamaTimeSeries function --------------------------------------- result <- try({ monitor <- MazamaTimeSeries::mts_combine( ..., replaceMeta = replaceMeta, overlapStrategy = overlapStrategy ) }, silent = TRUE) # Handle errors if ( "try-error" %in% class(result) ) { err_msg <- geterrmessage() if ( stringr::str_detect(err_msg, "non-identical metadata") ) { stop(paste( "device-deployments have non-identical metadata\n\n", "Use 'replaceMeta = TRUE' to avoid this error message." )) } else { stop(err_msg) } } # If we didn't stop, we succeeded, so continue. # Ensure we have the proper class name class(monitor) <- union("mts_monitor", class(monitor)) # ----- Return --------------------------------------------------------------- return(invisible(monitor)) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_combine.R
#' @title Create daily barplot #' #' @description #' Creates a daily barplot of data from a \emph{mts_monitor} object. #' #' Reasonable defaults are chosen for annotations and plot characteristics. #' Users can override any defaults by passing in parameters accepted by #' \code{graphics::barplot}. #' #' @note #' The underlying axis for this plot is not a time axis so you cannot use this #' function to "add" bars on top of a \code{monitor_timeseriesPlot()}. See #' the \pkg{AirMonitorPlots} package for more flexibility in plotting. #' #' @param monitor \emph{mts_monitor} object. #' @param id \code{deviceDeploymentID} for a single time series found in \code{monitor}. #' (Optional if \code{monitor} contains only a single time series.) #' @param add Logical specifying whether to add to the current plot. #' @param addAQI Logical specifying whether to add visual AQI decorations. #' @param palette Named color palette to use when adding AQI decorations. #' @param opacity Opacity to use for bars. #' @param minHours Minimum number of valid hourly records per day required to #' calculate statistics. Days with fewer valid records will be assigned \code{NA}. #' @param dayBoundary Treatment of daylight savings time: "clock" uses daylight #' savings time as defined in the local timezone, "LST" uses "local standard time" #' all year round. #' @param NAAQS Version of NAAQS levels to use. See Note. #' @param ... Additional arguments to be passed to \code{graphics::barplot()}. #' #' @return No return value. This function is called to draw an air quality #' daily average plot on the active graphics device. #' #' @note #' On February 7, 2024, EPA strengthened the National Ambient Air Quality #' Standards for Particulate Matter (PM NAAQS) to protect millions of Americans #' from harmful and costly health impacts, such as heart attacks and premature #' death. Particle or soot pollution is one of the most dangerous forms of air #' pollution, and an extensive body of science links it to a range of serious #' and sometimes deadly illnesses. EPA is setting the level of the primary #' (health-based) annual PM2.5 standard at 9.0 micrograms per cubic meter to #' provide increased public health protection, consistent with the available #' health science. #' See \href{https://www.epa.gov/pm-pollution/final-reconsideration-national-ambient-air-quality-standards-particulate-matter-pm}{PM NAAQS update}. #' #' @import graphics #' @importFrom grDevices adjustcolor #' @export #' #' @examples #' library(AirMonitor) #' #' layout(matrix(seq(2))) #' #' Carmel_Valley %>% monitor_dailyBarplot() #' title("(pre-2024 PM NAAQS)", line = 0) #' #' Carmel_Valley %>% monitor_dailyBarplot(NAAQS = "PM2.5_2024") #' title("(updated PM NAAQS)", line = 0) #' #' layout(1) #' monitor_dailyBarplot <- function( monitor = NULL, id = NULL, add = FALSE, addAQI = FALSE, palette = c("EPA", "subdued", "deuteranopia"), opacity = NULL, minHours = 18, dayBoundary = c("clock", "LST"), NAAQS = c("PM2.5", "PM2.5_2024"), ... ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(monitor) palette <- match.arg(palette) MazamaCoreUtils::stopIfNull(minHours) dayBoundary <- match.arg(dayBoundary) NAAQS = match.arg(NAAQS) # Subset 'monitor' to a single time series if ( nrow(monitor$meta) > 1 ) { MazamaCoreUtils::stopIfNull(id) if ( !id %in% monitor$meta$deviceDeploymentID ) stop("id = \"%s\" is not found in 'monitor'") monitor <- monitor %>% monitor_filter(.data$deviceDeploymentID == !!id) } monitor <- monitor_dropEmpty(monitor) if ( ncol(monitor$data) < 2 ) stop("no valid data in 'monitor'") if ( nrow(monitor$meta) > 1 ) stop("multiple records found in 'monitor$meta'") # ----- Calculate daily average ---------------------------------------------- daily <- monitor_dailyStatistic( monitor = monitor, FUN = mean, na.rm = TRUE, minHours = minHours, dayBoundary = dayBoundary ) meta <- daily$meta data <- daily$data pollutant <- meta$pollutant units <- meta$units locationName <- meta$locationName timezone <- meta$timezone localTime <- data$datetime dailyAverage <- data %>% dplyr::pull(2) if ( all(is.na(dailyAverage)) ) stop("not enough data to calculate daily averages") # ----- argsList ------------------------------------------------------------- argsList <- list(...) # Height and color argsList$height <- dailyAverage argsList$col <- aqiColors( dailyAverage, pollutant = pollutant, palette = palette, na.color = NA, NAAQS = NAAQS ) # X axis labeling is handled after the plot # NOTE: For mathematical notation in R see: # NOTE: https://magnusmetz.github.io/2013/04/mathematical-annotation-in-r/ # Y axis labeling if ( !("ylab" %in% names(argsList)) ) { if ( meta$units == "UG/M3") { # Most common case argsList$ylab <- expression(paste(PM[2.5] * " (", mu, "g/m"^3, ")")) } else { argsList$ylab <- sprintf("%s (%s)", meta$pollutant[1], meta$units[1]) } } # Additional small tweaks argsList$las <- ifelse("las" %in% names(argsList), argsList$las, 1) # Title argsList$main <- ifelse( "main" %in% names(argsList), argsList$main, sprintf("%s -- Daily Average %s", locationName, pollutant) ) # Subitle argsList$sub <- ifelse( "sub" %in% names(argsList), argsList$sub, strftime(localTime[1], format = "%Y", tz = timezone) ) # Explicitly declare defaults for use in creating the x axis argsList$axes <- ifelse("axes" %in% names(argsList), argsList$axes, TRUE) argsList$space <- ifelse("space" %in% names(argsList), argsList$space, 0.2) argsList$cex.names <- ifelse("cex.names" %in% names(argsList), argsList$cex.names, par("cex.axis") * 0.8) # ----- Plotting ------------------------------------------------------------- if ( addAQI ) { do.call(barplot, argsList) addAQIStackedBar(pollutant = pollutant, palette = palette, NAAQS = NAAQS) addAQILines(pollutant = pollutant, palette = palette, NAAQS = NAAQS) argsList$add <- TRUE } do.call(barplot, argsList) # Add default X axis if ( argsList$axes && !("names.arg" %in% names(argsList)) ) { barCount <- length(argsList$height) allIndices <- 1:barCount allLabels <- strftime(localTime, "%b %d", tz = timezone) maxLabelCount <- 16 stride <- round(barCount / maxLabelCount) if ( stride == 0 ) { indices <- allIndices labels <- allLabels } else { indices <- allIndices[seq(1, barCount, by = stride)] labels <- allLabels[seq(1, barCount, by = stride)] } labels_x <- (indices - 0.5) + (indices * argsList$space) labels_y <- -0.06 * (par("usr")[4] - par("usr")[3]) text(labels_x, labels_y, labels, cex = argsList$cex.names, xpd = NA) # Now add tick marks axis(1, at = labels_x, labels = FALSE, lwd = 0, lwd.ticks = 1) } } # ===== DEBUG ================================================================== if ( FALSE ) { monitor = Carmel_Valley id = NULL add = FALSE addAQI = FALSE palette = "EPA" opacity = NULL minHours = 18 dayBoundary = "clock" }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_dailyBarplot.R
#' @export #' #' @title Create daily statistics for each monitor in an \emph{mts_monitor} object #' #' @param monitor \emph{mts_monitor} object. #' @param FUN Function used to create daily statistics. #' @param na.rm Value passed on to \code{FUN}. If \code{FUN} does not use #' \code{na.rm}, this should be set to \code{NULL}. #' @param minHours Minimum number of valid hourly records per day required to #' calculate statistics. Days with fewer valid records will be assigned \code{NA}. #' @param dayBoundary Treatment of daylight savings time: "clock" uses daylight #' savings time as defined in the local timezone, "LST" uses "local standard time" #' all year round. #' @param ... Additional arguments to be passed to \code{FUN}. #' #' @return A \emph{mts_monitor} object containing daily statistical summaries. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @description #' Daily statstics are calculated for each time series in \code{monitor$data} #' using \code{FUN} and any arguments passed in \code{...}. #' #' Because the returned \emph{mts_monitor} object is defined on a daily axis in a #' specific time zone, it is important that the incoming \code{monitor} contain #' timeseries associated with a single time zone. #' #' @note #' When \code{dayBoundary = "clock"}, the returned \code{monitor$data$datetime} #' time axis will be defined in the local timezone (not "UTC") with days defined #' by midnight as it appears on a clock in that timezone. The transition from #' DST to standard time will result in a 23 hour day and standard to DST in a #' 25 hour day. #' #' When \code{dayBoundary = "LST"}, the returned \code{monitor$data$datetime} #' time axis will be defined in "UTC" with times as they \emph{appear} in standard #' time in the local timezone. These days will be one hour off from clock #' time during DST but every day will consist of 24 hours. #' #' @examples #' library(AirMonitor) #' #' Carmel_Valley %>% #' monitor_dailyStatistic(max) %>% #' monitor_getData() #' #' Carmel_Valley %>% #' monitor_dailyStatistic(min) %>% #' monitor_getData() monitor_dailyStatistic <- function( monitor = NULL, FUN = mean, na.rm = TRUE, minHours = 18, dayBoundary = c("clock", "LST"), ... ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(monitor) MazamaCoreUtils::stopIfNull(FUN) MazamaCoreUtils::stopIfNull(minHours) dayBoundary <- match.arg(dayBoundary) if ( length(unique(monitor$meta$timezone)) > 1 ) stop("'monitor' has muliple timezones") timezone <- unique(monitor$meta$timezone) # ----- Create LST time axis ------------------------------------------------- if ( dayBoundary == "LST" ) { # NOTE: There is no recognized timezone where LST exists so we have to be clever. # NOTE: The EPA defines regulatory daily averages as midnight-to-midnight # NOTE: in local-standard-time-all-year. Here we calculate LST times # NOTE: but move them to the UTC timezone where no daylight savings # NOTE: adjustment will be applied by the lubridate package. # Calculate the Local Standard Time offset Christmas_UTC <- lubridate::ymd_h("2019-12-25 00", tz = "UTC") Christmas_localTime <- lubridate::with_tz(Christmas_UTC, tzone = timezone) Christmas_localTime_UTC <- lubridate::force_tz(Christmas_localTime, tzone = "UTC") lst_offset <- as.numeric(difftime(Christmas_localTime_UTC, Christmas_UTC, units = "hours")) localStandardTime_UTC <- lubridate::with_tz(monitor$data$datetime, tzone = "UTC") + lst_offset * lubridate::dhours(1) monitor$data$datetime <- localStandardTime_UTC } # ----- Create daily statistic ----------------------------------------------- # MazamaTimeSeries::mts_summarize() function signature: # # mts_summarize <- function( # mts, # timezone = NULL, # unit = c("day", "week", "month", "year"), # FUN = NULL, # ..., # minCount = NULL # ) { argsList <- list(...) if ( !is.null(na.rm) ) argsList$na.rm <- na.rm if ( dayBoundary == "LST" ) argsList$timezone = "UTC" argsList$mts <- monitor argsList$unit <- "day" argsList$FUN <- FUN argsList$minCount <- minHours daily <- do.call(MazamaTimeSeries::mts_summarize, argsList) # ----- Return --------------------------------------------------------------- return(daily) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_dailyStatistic.R
#' @export #' #' @title Daily counts of values at or above a threshold #' #' @param monitor \emph{mts_monitor} object. #' @param threshold AQI level name (e.g. \code{"unhealthy"}) or numerical #' threshold at and above which a measurement is counted. #' @param na.rm Logical value indicating whether NA values should be ignored. #' @param minHours Minimum number of valid hourly records per day required to #' calculate statistics. Days with fewer valid records will be assigned \code{NA}. #' @param dayBoundary Treatment of daylight savings time: "clock" uses daylight #' savings time as defined in the local timezone, "LST" uses "local standard time" #' all year round. #' @param NAAQS Version of NAAQS levels to use. See Note. #' #' @return A \emph{mts_monitor} object containing daily counts of hours at or above #' a threshold value. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @description #' Calculates the number of hours per day each time series in \code{monitor} was #' at or above a given threshold. #' #' Because the returned \emph{mts_monitor} object is defined on a daily axis in a #' specific time zone, it is important that the incoming \code{monitor} contain #' only timeseries within a single time zone. #' #' @note #' When \code{dayBoundary = "clock"}, the returned \code{monitor$data$datetime} #' time axis will be defined in the local timezone (not "UTC") with days defined #' by midnight as it appears on a clock in that timezone. The transition from #' DST to standard time will result in a 23 hour day and standard to DST in a #' 25 hour day. #' #' When \code{dayBoundary = "LST"}, the returned \code{monitor$data$datetime} #' time axis will be defined in "UTC" with times as they \emph{appear} in standard #' time in the local timezone. These days will be one hour off from clock #' time during DST but every day will consist of 24 hours. #' #' @note #' On February 7, 2024, EPA strengthened the National Ambient Air Quality #' Standards for Particulate Matter (PM NAAQS) to protect millions of Americans #' from harmful and costly health impacts, such as heart attacks and premature #' death. Particle or soot pollution is one of the most dangerous forms of air #' pollution, and an extensive body of science links it to a range of serious #' and sometimes deadly illnesses. EPA is setting the level of the primary #' (health-based) annual PM2.5 standard at 9.0 micrograms per cubic meter to #' provide increased public health protection, consistent with the available #' health science. #' See \href{https://www.epa.gov/pm-pollution/final-reconsideration-national-ambient-air-quality-standards-particulate-matter-pm}{PM NAAQS update}. #' #' @examples #' library(AirMonitor) #' #'# Hours at MODERATE or above #' Carmel_Valley %>% #' monitor_dailyThreshold("Moderate") %>% #' monitor_getData() #' #' # Hours at MODERATE or above with the 2024 updated NAAQS #' Carmel_Valley %>% #' monitor_dailyThreshold("Moderate", NAAQS = "PM2.5_2024") %>% #' monitor_getData() #' #'# Hours at UNHEALTHY or above #' Carmel_Valley %>% #' monitor_dailyThreshold("Unhealthy") %>% #' monitor_getData() #' monitor_dailyThreshold <- function( monitor = NULL, threshold = NULL, na.rm = TRUE, minHours = 18, dayBoundary = c("clock", "LST"), NAAQS = c("PM2.5", "PM2.5_2024") ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(monitor) MazamaCoreUtils::stopIfNull(threshold) na.rm <- MazamaCoreUtils::setIfNull(na.rm, TRUE) MazamaCoreUtils::stopIfNull(minHours) dayBoundary <- match.arg(dayBoundary) NAAQS = match.arg(NAAQS) if ( length(unique(monitor$meta$timezone)) > 1 ) stop("'monitor' has muliple timezones") # Check if official AQI level name is provided if ( typeof(threshold) == "character" ) { if ( !tolower(threshold) %in% tolower(US_AQI$names_eng) ) stop(sprintf("'%s' is not a recognized AQI level. Please use one from US_AQI$names_eng.", threshold)) breaks <- US_AQI$breaks_PM2.5 # Handle the added NAAQS argument if ( NAAQS == "PM2.5_2024" ) { breaks <- US_AQI$breaks_PM2.5_2024 } breaks[1] <- 0 index <- which(tolower(US_AQI$names_eng) == tolower(threshold)) threshold <- breaks[index] } # ----- Create threshold count ----------------------------------------------- # Threshold function myFUN <- function( x, threshold, na.rm = TRUE ) { return(sum(x >= threshold, na.rm = na.rm)) } # Use monitor_dailyStatistic to calculate counts overThreshold <- monitor_dailyStatistic( monitor = monitor, FUN = myFUN, na.rm = na.rm, threshold = threshold, minHours = minHours, dayBoundary = dayBoundary ) # ----- Return --------------------------------------------------------------- return(overThreshold) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_dailyThreshold.R
#' @export #' @importFrom rlang .data #' #' @title Drop device deployments with all missing data #' #' @param monitor \emph{mts_monitor} object. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @description The incoming \emph{mts_monitor} object is subset to retain #' only time series with valid data. #' #' @return A subset of the incoming \code{mts_monitor}. (A list with #' \code{meta} and \code{data} dataframes.) #' monitor_dropEmpty <- function( monitor ) { # ----- Validate parameters -------------------------------------------------- # A little involved to catch the case where the user forgets to pass in 'monitor' result <- try({ if ( !monitor_isValid(monitor) ) stop("First argument is not a valid 'mts_monitor' object.") }, silent = TRUE) if ( class(result) %in% "try-error" ) { err_msg <- geterrmessage() if ( stringr::str_detect(err_msg, "object .* not found") ) { stop(paste0(err_msg, "\n(Did you forget to pass in the 'monitor' object?)")) } } if ( monitor_isEmpty(monitor) ) stop("Parameter 'monitor' has no data.") # Remove any duplicate data records monitor <- monitor_distinct(monitor) # ----- Select monitors with data -------------------------------------------- any_finite <- function(x) any(is.finite(x)) # https://stackoverflow.com/questions/62459736/how-do-i-use-tidyselect-where-in-a-custom-package monitor$data <- monitor$data %>% dplyr::select(tidyselect::vars_select_helpers$where(any_finite)) ids <- names(monitor$data)[-1] monitor <- monitor %>% MazamaTimeSeries::mts_filterMeta(.data$deviceDeploymentID %in% ids) class(monitor) <- union("mts_monitor", class(monitor)) # ----- Return --------------------------------------------------------------- return(monitor) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_dropEmpty.R
#' @export #' #' @title Create Interactive Time Series Plot #' #' @param monitor \emph{mts_monitor} object. #' @param title Title text. #' @param ylab Title for the y axis #' @param rollPeriod Rolling mean to be applied to the data. #' @param showLegend Logical to toggle display of the legend. #' #' @description This function creates interactive graphs that will be displayed #' in RStudio's 'Viewer' tab. #' #' @return Initiates the interactive dygraph plot in RStudio's 'Viewer' tab. #' #' @examples #' \dontrun{ #' library(AirMonitor) #' #' # Multiple monitors #' Camp_Fire %>% #' monitor_filter(countyName == "Alameda") %>% #' monitor_dygraph() #' } monitor_dygraph <- function( monitor, title = "title", ylab = "PM2.5 Concentration", rollPeriod = 1, showLegend = TRUE ) { # ----- Validate parameters -------------------------------------------------- monitor_check(monitor) if ( monitor_isEmpty(monitor) ) stop("monitor object has no data") # Set timezone timezone <- monitor_bestTimezone(monitor) # Simplify access to variables datetime <- monitor$data$datetime # Create an xts from all data columns except the first which is 'datetime' timeseriesData <- xts::xts(monitor$data[, -1], datetime, tzone = timezone) names(timeseriesData) <- monitor$meta$locationName show <- ifelse(showLegend, "always", "never") # Create dygraph dygraphs::dygraph(timeseriesData, main = title, ylab = ylab) %>% dygraphs::dyOptions(useDataTimezone = TRUE) %>% # Always show local time dygraphs::dyLegend(show = show, width = 250, labelsSeparateLines = TRUE) %>% ###dygraphs::dyRangeSelector(dateWindow = dateWindow) %>% dygraphs::dyRoller(rollPeriod = rollPeriod) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_dygraph.R
#' @export #' #' @title Filter by distance from a target location #' #' @param monitor \emph{mts_monitor} object. #' @param longitude Target longitude. #' @param latitude Target. #' @param radius Distance (m) of radius defining a target area. #' @param count Number of time series to return. #' @param addToMeta Logical specifying whether to add \code{distanceFromTarget} #' as a field in \code{monitor$meta}. #' #' @return A \emph{mts_monitor} object with monitors near a location. #' #' @description Filters the \code{monitor} argument to include only those time series #' located within a certain radius of a target location. If no time series fall #' within the specified \code{radius}, an empty \emph{mts_monitor} object will #' be returned. #' #' When \code{count} is used, a \emph{mts_monitor} object is #' created containing \strong{up to} \code{count} time series, ordered by #' increasing distance from the target location. Note that the number #' of monitors returned may be less than the specified \code{count} value if #' fewer than \code{count} time series are found within the target area. #' #' @note The returned \emph{mts_monitor} will have an extra \code{distance}. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @examples #' library(AirMonitor) #' #' # Walla Walla #' longitude <- -118.330278 #' latitude <- 46.065 #' #' Walla_Walla_monitors <- #' NW_Megafires %>% #' monitor_filterByDistance( #' longitude = -118.330, #' latitude = 46.065, #' radius = 50000, # 50 km #' addToMeta = TRUE #' ) #' #' Walla_Walla_monitors %>% #' monitor_getMeta() %>% #' dplyr::select(c("locationName", "distanceFromTarget")) #' monitor_filterByDistance <- function( monitor, longitude = NULL, latitude = NULL, radius = 50, count = NULL, addToMeta = FALSE ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(monitor) MazamaCoreUtils::validateLonLat(longitude, latitude) if ( monitor_isEmpty(monitor) ) stop("monitor is empty") # ----- Filter by distance --------------------------------------------------- # * Create distance mask ----- distance <- monitor_getDistance(monitor, longitude, latitude) distanceMask <- distance <= radius # * Return if empty ----- if ( !(any(distanceMask)) ) { monitor <- monitor %>% monitor_filter(.data$deviceDeploymentID == "DONT FIND ME") return(monitor) } # * Apply distance filter ----- ids <- monitor$meta$deviceDeploymentID[distanceMask] monitor <- monitor %>% monitor_select(ids) # Update the distance vector for the new subset of monitors distance <- distance[distanceMask] if ( addToMeta ) monitor$meta$distanceFromTarget <- round(as.numeric(distance)) # * Handle count ----- if ( is.null(count) ) count <- nrow(monitor$meta) # NOTE: When using count, return monitors in distance order and make sure # NOTE: that the distances are also subset and returned in distance order. # Find the 'count' closest monitors closestIndices <- order(distance)[1:count] # Reorder ids <- ids[closestIndices] monitor <- monitor %>% monitor_select(ids) # ----- Return --------------------------------------------------------------- return(monitor) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_filterByDistance.R
#' @export #' #' @title Date filtering for \emph{mts_monitor} objects #' #' @param monitor \emph{mts_monitor} object. #' @param startdate Desired start datetime (ISO 8601). #' @param enddate Desired end datetime (ISO 8601). #' @param timezone Olson timezone used to interpret dates. #' @param unit Units used to determine time at end-of-day. #' @param ceilingStart Logical instruction to apply #' \code{\link[lubridate]{ceiling_date}} to the \code{startdate} rather than #' \code{\link[lubridate]{floor_date}} #' @param ceilingEnd Logical instruction to apply #' \code{\link[lubridate]{ceiling_date}} to the \code{enddate} rather than #' \code{\link[lubridate]{floor_date}} #' #' @description Subsets a \emph{mts_monitor} object by date. This function #' always filters to day-boundaries. For sub-day filtering, use #' \code{monitor_filterDatetime()}. #' #' Dates can be anything that is understood by \code{MazamaCoreUtils::parseDatetime()} #' including either of the following recommended formats: #' #' \itemize{ #' \item{\code{"YYYYmmdd"}} #' \item{\code{"YYYY-mm-dd"}} #' } #' #' If either \code{startdate} or \code{enddate} is not provided, the start/end of #' the \emph{mts_monitor} time axis will be used. #' #' Timezone determination precedence assumes that if you are passing in #' \code{POSIXct} values then you know what you are doing. #' #' \enumerate{ #' \item{get timezone from \code{startdate} if it is \code{POSIXct}} #' \item{use passed in \code{timezone}} #' \item{get timezone from \code{mts_monitor}} #' } #' #' @note The returned data will run from the beginning of \code{startdate} until #' the \strong{beginning} of \code{enddate} -- \emph{i.e.} no values associated #' with \code{enddate} will be returned. The exception being when #' \code{enddate} is less than 24 hours after \code{startdate}. In that case, a #' single day is returned. #' #' @return A subset of the given \emph{mts_monitor} object. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @seealso \link{monitor_filterDatetime} #' @seealso \link{monitor_filterMeta} #' #' @examples #' library(AirMonitor) #' #' Camp_Fire %>% #' monitor_timeRange() #' #' # Day boundaries returned in "UTC" #' Camp_Fire %>% #' monitor_filterDate( #' "2018-11-15", #' "2018-11-22", #' timezone = "America/Los_Angeles" #' ) %>% #' monitor_timeRange() #' #' # Day boundaries returned in "America/Los_Angeles" #' Camp_Fire %>% #' monitor_filterDatetime( #' "20181115", #' "20181122", #' timezone = "America/Los_Angeles" #' ) %>% #' monitor_timeRange( #' timezone = "America/Los_Angeles" #' ) #' monitor_filterDate <- function( monitor = NULL, startdate = NULL, enddate = NULL, timezone = NULL, unit = "sec", ceilingStart = FALSE, ceilingEnd = FALSE ) { # ----- Validate parameters -------------------------------------------------- # A little involved to catch the case where the user forgets to pass in 'monitor' result <- try({ if ( !monitor_isValid(monitor) ) stop("First argument is not a valid 'mts_monitor' object.") }, silent = TRUE) if ( class(result) %in% "try-error" ) { err_msg <- geterrmessage() if ( stringr::str_detect(err_msg, "object .* not found") ) { stop(paste0(err_msg, "\n(Did you forget to pass in the 'monitor' object?)")) } } if ( monitor_isEmpty(monitor) ) stop("Parameter 'monitor' has no data.") # Handle missing times if ( is.null(startdate) ) startdate <- min(monitor$data$datetime, na.rm = TRUE) if ( is.null(enddate) ) enddate <- max(monitor$data$datetime, na.rm = TRUE) # Deal with missing timezones if ( is.null(timezone) ) { if ( length(unique(monitor$meta$timezone)) == 1 ) { timezone <- monitor$meta$timezone[1] } else { if ( lubridate::is.POSIXct(startdate) ) { timezone <- lubridate::tz(startdate) } else { message("Multiple timezones found and none specified. Using 'UTC'.") timezone <- "UTC" } } } # ----- Call MazamaTimeSeries function --------------------------------------- monitor <- MazamaTimeSeries::mts_filterDate( mts = monitor, startdate = startdate, enddate = enddate, timezone = timezone, unit = unit, ceilingStart = ceilingStart, ceilingEnd = ceilingEnd ) class(monitor) <- union("mts_monitor", class(monitor)) # ----- Return --------------------------------------------------------------- return(monitor) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_filterDate.R
#' @export #' #' @title Datetime filtering for \code{mts_monitor} objects #' #' @param monitor \emph{mts_monitor} object. #' @param startdate Desired start datetime (ISO 8601). #' @param enddate Desired end datetime (ISO 8601). #' @param timezone Olson timezone used to interpret \code{startdate} and \code{enddate}. #' @param unit Units used to determine time at end-of-day. #' @param ceilingStart Logical specifying application of #' \code{\link[lubridate]{ceiling_date}} to the \code{startdate} rather than #' \code{\link[lubridate]{floor_date}} #' @param ceilingEnd Logical specifying application of #' \code{\link[lubridate]{ceiling_date}} to the \code{enddate} rather than #' \code{\link[lubridate]{floor_date}} #' #' @description Subsets a \emph{mts_monitor} object by datetime. This function #' allows for sub-day filtering as opposed to \code{monitor_filterDate()} which #' always filters to day-boundaries. #' #' Datetimes can be anything that is understood by #' \code{MazamaCoreUtils::parseDatetime()}. For non-\code{POSIXct} values, #' the recommended format is \code{"YYYY-mm-dd HH:MM:SS"}. #' #' If either \code{startdate} or \code{enddate} is not provided, the start/end of #' the \emph{mts_monitor} time axis will be used. #' #' Timezone determination precedence assumes that if you are passing in #' \code{POSIXct} values then you know what you are doing. #' #' \enumerate{ #' \item{get timezone from \code{startdate} if it is \code{POSIXct}} #' \item{use passed in \code{timezone}} #' \item{get timezone from \code{mts_monitor}} #' } #' #' @return A subset of the given \emph{mts_monitor} object. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @seealso \link{monitor_filterDate} #' @seealso \link{monitor_filterMeta} #' #' @examples #' library(AirMonitor) #' #' Camp_Fire %>% #' monitor_timeRange() #' #' # Reduced time range returned in "UTC" #' Camp_Fire %>% #' monitor_filterDatetime( #' "2018-11-15 02:00:00", #' "2018-11-22 06:00:00", #' timezone = "America/Los_Angeles" #' ) %>% #' monitor_timeRange() #' #' # Reduced time range returned in "America/Los_Angeles" #' Camp_Fire %>% #' monitor_filterDatetime( #' "2018111502", #' "2018112206", #' timezone = "America/Los_Angeles" #' ) %>% #' monitor_timeRange( #' timezone = "America/Los_Angeles" #' ) #' monitor_filterDatetime <- function( monitor = NULL, startdate = NULL, enddate = NULL, timezone = NULL, unit = "sec", ceilingStart = FALSE, ceilingEnd = FALSE ) { # ----- Validate parameters -------------------------------------------------- # A little involved to catch the case where the user forgets to pass in 'monitor' result <- try({ if ( !monitor_isValid(monitor) ) stop("First argument is not a valid 'mts_monitor' object.") }, silent = TRUE) if ( class(result) %in% "try-error" ) { err_msg <- geterrmessage() if ( stringr::str_detect(err_msg, "object .* not found") ) { stop(paste0(err_msg, "\n(Did you forget to pass in the 'monitor' object?)")) } } if ( monitor_isEmpty(monitor) ) stop("Parameter 'monitor' has no data.") # Handle missing times if ( is.null(startdate) ) startdate <- min(monitor$data$datetime, na.rm = TRUE) if ( is.null(enddate) ) enddate <- max(monitor$data$datetime, na.rm = TRUE) # Deal with missing timezones if ( is.null(timezone) ) { if ( length(unique(monitor$meta$timezone)) == 1 ) { timezone <- monitor$meta$timezone[1] } else { if ( lubridate::is.POSIXct(startdate) ) { timezone <- lubridate::tz(startdate) } else { message("Multiple timezones found and none specified. Using 'UTC'.") timezone <- "UTC" } } } # ----- Call MazamaTimeSeries function --------------------------------------- monitor <- MazamaTimeSeries::mts_filterDatetime( mts = monitor, startdate = startdate, enddate = enddate, timezone = timezone, unit = unit, ceilingStart = ceilingStart, ceilingEnd = ceilingEnd ) class(monitor) <- union("mts_monitor", class(monitor)) # ----- Return --------------------------------------------------------------- return(monitor) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_filterDatetime.R
#' @export #' #' @title General purpose metadata filtering for \emph{mts_monitor} objects #' #' @param monitor \emph{mts_monitor} object. #' @param ... Logical predicates defined in terms of the variables in #' \code{monitor$meta}. #' #' @description A generalized metadata filter for \emph{mts_monitor} objects to #' choose cases where conditions are true. Multiple conditions are #' combined with \code{&} or separated by a comma. Only rows where the condition #' evaluates to TRUE are kept. Rows of \code{monitor$meta} where the condition #' evaluates to \code{NA} are dropped. Associated olumns of \code{monitor$data} #' are also dropped for internal consistency in the returned \emph{mts_monitor} #' object. #' #' \code{monitor_filter()} is an alias for \code{monitor_filterMeta()}. #' #' @note Filtering is done on variables in \code{monitor$meta}. #' #' @return A subset of the incoming \code{mts_monitor}. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @seealso \link{monitor_filterDate} #' @seealso \link{monitor_filterDatetime} #' #' @examples #' library(AirMonitor) #' #' # Filter based on countyName field #' Camp_Fire %>% #' monitor_filter(countyName == "Alameda") %>% #' monitor_timeseriesPlot(main = "All Alameda County Monitors") #' #' # Filter combining two fields #' Camp_Fire %>% #' monitor_filter(latitude > 39.5, longitude > -121.5) %>% #' monitor_pull("locationName") #' #' # Filter using string matching #' Camp_Fire %>% #' monitor_filter(stringr::str_detect(locationName, "^San")) %>% #' monitor_pull("locationName") #' monitor_filterMeta <- function( monitor, ... ) { # ----- Validate parameters -------------------------------------------------- # A little involved to catch the case where the user forgets to pass in 'monitor' result <- try({ if ( !monitor_isValid(monitor) ) stop("First argument is not a valid 'mts_monitor' object.") }, silent = TRUE) if ( class(result) %in% "try-error" ) { err_msg <- geterrmessage() if ( stringr::str_detect(err_msg, "object .* not found") ) { stop(paste0(err_msg, "\n(Did you forget to pass in the 'monitor' object?)")) } } # ----- Call MazamaTimeSeries function --------------------------------------- monitor <- MazamaTimeSeries::mts_filterMeta(monitor, ...) class(monitor) <- union("mts_monitor", class(monitor)) # ----- Return --------------------------------------------------------------- return(monitor) } # ===== Alias ================================================================== # TODO: Add examples to the alias #' @rdname monitor_filterMeta #' @export monitor_filter <- monitor_filterMeta
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_filterMeta.R
#' @export #' @importFrom dplyr all_of #' #' @title Convert a ws_monitor object from the PWFSLSmoke package #' #' @param ws_monitor \emph{ws_monitor} object. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @return A \emph{mts_monitor} object. #' #' @description A \pkg{PWFSLSmoke} package \emph{ws_monitor} object is enhanced #' and modified so that it becomes a valid \emph{mts_monitor} object. This is #' a lossless operation and can be reversed with \code{monitor_toPWFSLSmoke()}. #' #' monitor_fromPWFSLSmoke <- function( ws_monitor = NULL ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(ws_monitor) # ----- Create meta ---------------------------------------------------------- # > print(names(ws_monitor$meta), width = 75) # [1] "monitorID" "longitude" # [3] "latitude" "elevation" # [5] "timezone" "countryCode" # [7] "stateCode" "siteName" # [9] "agencyName" "countyName" # [11] "msaName" "monitorType" # [13] "siteID" "instrumentID" # [15] "aqsID" "pwfslID" # [17] "pwfslDataIngestSource" "telemetryAggregator" # [19] "telemetryUnitID" commonColumns <- intersect(names(ws_monitor$meta), coreMetadataNames) # > print(commonColumns, width = 75) # [1] "longitude" "latitude" "elevation" "timezone" "countryCode" # [6] "stateCode" "countyName" missingColumns <- setdiff(coreMetadataNames, names(ws_monitor$meta)) # > print(missingColumns, width = 75) # [1] "deviceDeploymentID" "deviceID" # [3] "deviceType" "deviceDescription" # [5] "deviceExtra" "pollutant" # [7] "units" "dataIngestSource" # [9] "dataIngestURL" "dataIngestUnitID" # [11] "dataIngestExtra" "dataIngestDescription" # [13] "locationID" "locationName" # [15] "houseNumber" "street" # [17] "city" "zip" # [19] "AQSID" "fullAQSID" meta <- ws_monitor$meta %>% # NOTE: ws_monitor$meta$instrumentID has no useful information # Rename some columns dplyr::rename( deviceID = .data$monitorID, deviceType = .data$monitorType, locationName = .data$siteName, AQSID = .data$aqsID, dataIngestSource = .data$pwfslDataIngestSource, dataIngestUnitID = .data$telemetryUnitID ) %>% # Other core monitoring metadata dplyr::mutate( fullAQSID = .data$AQSID, locationID = MazamaCoreUtils::createLocationID(.data$longitude, .data$latitude), pollutant = "PM2.5", units = "UG/M3", deviceDescription = as.character(NA), deviceExtra = as.character(NA), dataIngestURL = as.character(NA), dataIngestExtra = as.character(NA), dataIngestDescription = as.character(NA), deploymentType = as.character(NA) ) # Fix deviceID, deploymentType: # for AirNow data, deviceID = monitorID minus the "_01" # for AIRSIS/WRCC, deviceID = unitID mask <- (meta$pwfslDataIngestSource == "AIRNOW") meta$deviceID[mask] <- stringr::str_replace(meta$deviceID[mask], "_01$", "") # NOTE: PWFSLSmoke AirNow data is assumed to be "Permanent" meta$deploymentType[mask] = "Permanent" mask <- (meta$pwfslDataIngestSource == "AIRSIS") meta$deviceID[mask] <- meta$instrumentID[mask] meta$deploymentType[mask] = "Temporary" mask <- (meta$pwfslDataIngestSource == "WRCC") meta$deviceID[mask] <- meta$instrumentID[mask] meta$deploymentType[mask] = "Temporary" # Other core location metadata meta <- meta %>% dplyr::mutate( deviceDeploymentID = paste0(.data$locationID, "_", .data$deviceID), houseNumber = as.character(NA), street = as.character(NA), city = as.character(NA), zip = as.character(NA) ) # TODO: We need an example object with the updated (2022-08-10) metadata # TODO: so we can pull fields from that object rather than creating them # TODO: manually. # airsis_loadLatest() %>% monitor_getMeta() %>% names() %>% paste0(collapse = '", "') %>% cat() desiredColumns <- c( "deviceDeploymentID", "deviceID", "deviceType", "deviceDescription", "deviceExtra", "pollutant", "units", "dataIngestSource", "dataIngestURL", "dataIngestUnitID", "dataIngestExtra", "dataIngestDescription", "locationID", "locationName", "longitude", "latitude", "elevation", "countryCode", "stateCode", "countyName", "timezone", "houseNumber", "street", "city", "zip", "AQSID", "fullAQSID", ###"airnow_stationID", "airnow_parameterName", "airnow_monitorType", "airnow_siteCode", "airnow_status", "airnow_agencyID", "airnow_agencyName", "airnow_EPARegion", "airnow_GMTOffsetHours", "airnow_CBSA_ID", "airnow_CBSA_Name", "airnow_stateAQSCode", "airnow_countyAQSCode", "airnow_MSAName", "address", "airnow_countryCode", "airnow_stateCode", "airnow_timezone", "airnow_houseNumber", "airnow_street", "airnow_city", "airnow_zip", "airsis_Alias", "airsis_dataFormat", "airsis_provider", "airsis_unitID", "deploymentType" ) # Reorganize the columns newColumns <- intersect(names(meta), desiredColumns) meta <- meta %>% dplyr::select(all_of(newColumns)) # ----- Create data ---------------------------------------------------------- # Guarantee columns are in the correct order oldColumnNames <- c('datetime', meta$deviceID) newColumnNames <- c('datetime', meta$deviceDeploymentID) data <- ws_monitor$data %>% dplyr::select(all_of(oldColumnNames)) names(data) <- newColumnNames # ----- Create mts_monitor --------------------------------------------------- monitor <- list(meta = meta, data = data) monitor <- structure(monitor, class = c("mts_monitor", "mts", class(monitor))) # ----- Return --------------------------------------------------------------- monitor_check(monitor) return(monitor) } # ===== DEBUG ================================================================== if ( FALSE ) { library(AirMonitor) ws_monitor <- PWFSLSmoke::monitor_loadLatest() monitor <- monitor_fromPWFSLSmoke(ws_monitor) monitor %>% monitor_filter(stateCode == "IA") %>% monitor_timeseriesPlot() }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_fromPWFSLSmoke.R
#' @export #' @importFrom rlang .data #' #' @title Get current status of monitors #' #' @description #' This function augments \code{monitor$meta} with summary information derived #' from \code{monitor$data} reflecting recent measurements. #' #' @param monitor \emph{mts_monitor} object. #' @param enddate Time relative to which current status is calculated. By #' default, it is the latest time in \code{monitor$data$datetime}. This time can #' be given as a POSIXct time, or a string/numeric value in ymd format (\emph{e.g.} #' 20190301). This time converted to UTC. #' @param minHours Minimum number of valid hourly records required to calculate #' \code{yesterday_PM2.5_avg}. Days with fewer valid records will be assigned \code{NA}. #' @param dayBoundary Treatment of daylight savings time: "clock" uses daylight #' savings time as defined in the local timezone, "LST" uses "local standard time" #' all year round. (See \code{monitor_dailyStatistic()} for more details.) #' #' @return The \code{monitor$meta} table augmented with current status #' information for each time series. #' #' @section "Last" and "Previous": #' The goal of this function is to provide useful information about what #' happened recently with each time series in the provided \emph{mts_monitor} object. #' Devices don't always consistently report data, however, and it is not alwlays #' useful to have \code{NA}'s reported when there is recent valid data at earlier #' times. To address this, \code{monitor_getCurrentStatus()} uses \emph{last} and #' \emph{previous} valid times. These are the time when a monitor most recently #' reported data, and the most recent time of valid data before that, #' respectively. By reporting on these times, this function ensures that valid #' data is returned and provides information on how outdated this information #' is. This information can be used in maps to show AQI colored dots when data #' is only a few hours old but gray dots when data is older than some threshold. #' #' @section Calculating latency: #' According to https://docs.airnowapi.org/docs/HourlyDataFactSheet.pdf #' a datum assigned to 2pm represents the average of data between 2pm and 3pm. #' So, if we check at 3:15pm and see that we have a value for 2pm but not 3pm #' then the data are completely up-to-date with zero latency. #' #' \code{monitor_getCurrentStatus()} defines latency as the difference between #' a time index and the next most recent time index associated with a #' valid value. If there is no more recent time index, then the difference is #' measured to the given \code{enddate} parameter. Because \emph{mts_monitor} #' objects are defined on an hourly axis, these differences have units of hours. #' #' For example, if the recorded values for a monitor are #' \code{[16.2, 15.8, 16.4, NA, 14.0, 12.5, NA, NA, 13.3, NA]}, then the \emph{last} #' valid value is 13.3 with an index is 9, and the \emph{previous} valid value is 12.4 #' with an index of 6. The \emph{last} latency is then 1 (hour before the end), and the #' \emph{previous} latency is 3 (hours before the last valid value). #' #' @section Summary data: #' The table created by \code{monitor_getCurrentStatus()} includes per-time series #' summary information calculated from \code{monitor$data}. #' The additional data fields added to \code{monitor$meta} are listed below: #' #' \describe{ #' \item{currentStatus_processingTime}{Time at which this function was run} #' \item{currentStatus_enddate}{Time relative to which "currency" is calculated} #' \item{last_validIndex}{Row index of the last valid mesurement in \code{monitor$data}} #' \item{previous_validIndex}{Row index of the previous valid measurement in \code{monitor$data}} #' \item{last_validTime}{UTC time associated with \code{last_validIndex}} #' \item{previous_validTime}{UTC time associated with \code{previous_validIndex}} #' \item{last_latency}{Hours between \code{last_validTime} and \code{endtime}} #' \item{previous_latency}{Hours between \code{previous_validTime} and\code{last_validTime}} #' \item{last_validLocalTimestamp}{Local time representation of \code{last_validTime}} #' \item{previous_validLocalTimestamp}{Local time representation of \code{previous_validTime}} #' \item{last_PM2.5}{Last valid PM2.5 measurement} #' \item{previous_PM2.5}{Previous valid PM2.5 measurement} #' \item{last_nowcast}{Last valid PM2.5 NowCast value} #' \item{previous_nowcast}{Previous valid PM2.5 NowCast value} #' \item{yesterday_PM2.5_avg}{Daily average PM2.5 for the day prior to \code{enddate}} #' } #' #' @examples #' \donttest{ #' # Fail gracefully if any resources are not available #' try({ #' #' library(AirMonitor) #' #' monitor <- airnow_loadLatest() #' # TODO: Needed before rebuilding of v2 database with fullAQSID #' monitor$meta$fullAQSID <- paste0("840", monitor$meta$AQSID) #' #' currentStatus <- #' monitor %>% #' monitor_filter(stateCode == "WA") %>% #' monitor_getCurrentStatus() #' #' }, silent = FALSE) #' } monitor_getCurrentStatus <- function( monitor, enddate = NULL, minHours = 18, dayBoundary = c("clock", "LST") ) { # ----- Validate parameters -------------------------------------------------- monitor_check(monitor) if ( monitor_isEmpty(monitor) ) stop("monitor is empty") MazamaCoreUtils::stopIfNull(minHours) dayBoundary <- match.arg(dayBoundary) # ----- Subset data ---------------------------------------------------------- startdate <- min(monitor$data$datetime) if ( is.null(enddate) ) { enddate <- max(monitor$data$datetime) } else { enddate <- MazamaCoreUtils::parseDatetime(enddate, timezone = "UTC") %>% lubridate::floor_date(unit = "hour") # NOTE: Previous version subtracted another hour here. Not sure why? } monitor <- monitor %>% monitor_filterDatetime(startdate, enddate, timezone = "UTC") # Check again to make sure subset includes data if ( monitor_isEmpty(monitor) ) { stop(sprintf( "monitor contains zero valid data before %s", strftime(enddate, "%Y-%m-%d %H:00 UTC", tz = "UTC") )) } # ----- Prepare data --------------------------------------------------------- data <- monitor_getData(monitor) meta <- monitor_getMeta(monitor) nowcast_data <- monitor %>% monitor_nowcast(includeShortTerm = TRUE) %>% monitor_getData() # ----- Create validTimeIndices ---------------------------------------------- # This is a tibble identifying recent valid indices for each device validTimeIndices <- # Start with data data %>% # Ensure rows are arranged by datetime and then remove 'datetime' dplyr::arrange(.data$datetime) %>% dplyr::select(-.data$datetime) %>% # Find last two non-NA indices apply(2, function(x) { rev(which(!is.na(x)))[1:2] }) # Provide rownames that will end up as colnames rownames(validTimeIndices) <- c("last_validIndex", "previous_validIndex") # Transpose to have a row for each deviceDeploymentID validTimeIndices <- t(validTimeIndices) %>% # Convert matrix to tibble with sensible names dplyr::as_tibble(rownames = "deviceDeploymentID") # ----- Add latency values --------------------------------------------------- enhancedMeta <- # Start with monitor$meta meta %>% # Add times dplyr::mutate( currentStatus_processingTime = lubridate::now(tzone = "UTC"), currentStatus_enddate = enddate ) %>% # Add valid data indices dplyr::left_join(validTimeIndices, by = "deviceDeploymentID") %>% # Add POSIXct times dplyr::mutate( last_validTime = data$datetime[.data$last_validIndex], previous_validTime = data$datetime[.data$previous_validIndex] ) %>% # Add latency dplyr::mutate( last_latency = as.numeric(difftime( enddate, .data$last_validTime, units = "hour" )), previous_latency = as.numeric(difftime( .data$last_validTime, .data$previous_validTime, units = "hour" )) ) # NOTE: The strftime() function accepts arguments 'x, 'format' and 'tz'. # NOTE: While strftime() is vectorized for 'x', this is not true for 'format' or 'tz'. # NOTE: So we create local timestamps below in the timezone-dependent section. # ----- Add last/previous values --------------------------------------------- # Order data columns to match currentStatus dataBrick <- data %>% dplyr::select(enhancedMeta$deviceDeploymentID) nowcast_dataBrick <- nowcast_data %>% dplyr::select(enhancedMeta$deviceDeploymentID) # TODO: We may need to separately determine last_validNowcastIndex as it will # TODO: normally be one hour later than the pm25 value. enhancedMeta$last_PM2.5 <- mapply( function(x, y) { return(round(x[y], 1)) }, dataBrick, enhancedMeta$last_validIndex ) enhancedMeta$previous_PM2.5 <- mapply( function(x, y) { return(round(x[y], 1)) }, dataBrick, enhancedMeta$previous_validIndex ) enhancedMeta$last_nowcast <- mapply( function(x, y) { return(round(x[y], 1)) }, nowcast_dataBrick, enhancedMeta$last_validIndex ) enhancedMeta$previous_nowcast <- mapply( function(x, y) { return(round(x[y], 1)) }, nowcast_dataBrick, enhancedMeta$previous_validIndex ) # ----- Timezone dependent section ------------------------------------------- emList <- list() for ( timezone in unique(enhancedMeta$timezone) ) { # * Add local timestamps ----- # Subset enhancedMeta em <- enhancedMeta %>% dplyr::filter(.data$timezone == !!timezone) em$last_validLocalTimestamp <- strftime( em$last_validTime, format = "%Y-%m-%d %H:%M:%S", tz = timezone, usetz = TRUE ) em$previous_validLocalTimestamp <- strftime( em$previous_validTime, format = "%Y-%m-%d %H:%M:%S", tz = timezone, usetz = TRUE ) # * Add yesterday avg ----- # Local time 24 hours representing "yesterday" relative to enddate dateRange <- MazamaCoreUtils::dateRange( enddate - lubridate::ddays(1), timezone = timezone, days = 1 ) # Get yesterday mean for a single timezone timezoneMon <- monitor %>% monitor_filter(timezone == !!timezone) # NOTE: monitor_filterDate() will fail if timezoneMon is empty if ( monitor_isEmpty(timezoneMon) ) { em$yesterday_PM2.5_avg <- as.numeric(NA) } else { # Calculate yesterday average em$yesterday_PM2.5_avg <- timezoneMon %>% monitor_filterDate(dateRange[1], dateRange[2], timezone = timezone) %>% # Guarantee the same order as in cs monitor_reorder(em$deviceDeploymentID) %>% # Calculate yesterday mean monitor_dailyStatistic( FUN = mean, na.rm = TRUE, minHours = minHours, dayBoundary = dayBoundary ) %>% # Pull out the daily means, omitting the 'datetime' column monitor_getData() %>% dplyr::select(-c("datetime")) %>% # Convert single row dataframe to numeric dplyr::slice(1) %>% as.numeric() %>% round(1) } # END monitor_isEmpty() emList[[timezone]] <- em } timezoneMeta <- dplyr::bind_rows(emList) # ----- Retain original ordering --------------------------------------------- currentStatus <- enhancedMeta %>% dplyr::select(.data$deviceDeploymentID) %>% dplyr::left_join(timezoneMeta, by = "deviceDeploymentID") # ----- Return --------------------------------------------------------------- return(currentStatus) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_getCurrentStatus.R
#' @export #' #' @title Calculate distances from \emph{mts_monitor} locations to a location of interest #' #' @param monitor \emph{mts_monitor} object. #' @param longitude Longitude of the location of interest. #' @param latitude Latitude of the location of interest. #' @param measure One of "geodesic", "haversine" "vincenty", or "cheap". #' #' @description #' This function returns the distances (meters) between \code{monitor} locations #' and a location of interest. These distances can be used to create a #' mask identifying monitors within a certain radius of the location of interest. #' #' @note The measure \code{"cheap"} may be used to speed things up depending on #' the spatial scale being considered. Distances calculated with #' \code{measure = "cheap"} will vary by a few meters compared with those #' calculated using \code{measure = "geodesic"}. #' #' @return Named vector of distances (meters) with each distance identified #' by \code{deviceDeploymentID}. #' #' @examples #' library(AirMonitor) #' #' # Walla Walla #' longitude <- -118.3302 #' latitude <- 46.065 #' #' distance <- monitor_getDistance(NW_Megafires, longitude, latitude) #' closestIndex <- which(distance == min(distance)) #' #' # Distance in meters #' distance[closestIndex] #' #' # Monitor core metadata #' str(NW_Megafires$meta[closestIndex, AirMonitor::coreMetadataNames]) #' monitor_getDistance <- function( monitor = NULL, longitude = NULL, latitude = NULL, measure = c("geodesic", "haversine", "vincenty", "cheap") ) { # ----- Validate parameters -------------------------------------------------- # NOTE: Validate is handled by MazamaTimeSeries::mts_getDistance() # ----- Call MazamaTimeSeries function --------------------------------------- distance <- MazamaTimeSeries::mts_getDistance( mts = monitor, longitude = longitude, latitude = latitude, measure = measure ) # ----- Return --------------------------------------------------------------- return(distance) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_getDistance.R
#' @export #' #' @title Leaflet interactive map of monitor locations #' #' @param monitor \emph{mts_monitor} object. #' @param slice Either a formatted time string, a time index, the name of a #' (potentially user defined) function used to collapse the time axis. # @param breaks set of breaks used to assign colors # @param colors a set of colors for different levels of air quality data # determined by \code{breaks} # @param labels a set of text labels, one for each color # @param legendTitle legend title #' @param radius radius of monitor circles #' @param opacity opacity of monitor circles #' @param maptype optional name of leaflet ProviderTiles to use, e.g. "terrain" # @param popupInfo a vector of column names from monitor$meta to be shown in # a popup window #' @param extraVars Character vector of additional column names from \code{monitor$meta} #' to be shown in leaflet popups. #' @param jitter Amount to use to slightly adjust locations so that multiple #' monitors at the same location can be seen. Use zero or \code{NA} to see #' precise locations. #' @param NAAQS Version of NAAQS levels to use. See Note. #' @param ... Additional arguments passed to \code{leaflet::addCircleMarker()}. #' #' @description #' This function creates interactive maps that will be displayed in RStudio's #' 'Viewer' tab. The \code{slice} argument is used to collapse a #' \emph{mts_monitor} timeseries into a single value. If \code{slice} is an #' integer, that row index will be selected from the \code{monitor$data} #' dataframe. If \code{slice} is a function (unquoted), that function will be #' applied to the timeseries with the argument \code{na.rm=TRUE} (e.g. #' \code{max(..., na.rm=TRUE)}). #' #' If \code{slice} is a user defined function, it will be used with argument #' \code{na.rm=TRUE} to collapse the time dimension. Thus, user defined #' functions must accept \code{na.rm} as an argument. #' #' @details #' The \code{maptype} argument is mapped onto leaflet "ProviderTile" names. #' Current map types include: #' #' \describe{ #' \item{"roadmap"}{ -- "OpenStreetMap"} #' \item{"satellite"}{ -- "Esri.WorldImagery"} #' \item{"terrain"}{ -- "Esri.WorldTopoMap"} #' \item{"toner"}{ -- "Stamen.Toner"} #' } #' #' If a character string not listed above is provided, it will be used as the #' underlying map tile if available. See #' \url{https://leaflet-extras.github.io/leaflet-providers/} for a list of #' "provider tiles" to use as the background map. #' #' @note #' On February 7, 2024, EPA strengthened the National Ambient Air Quality #' Standards for Particulate Matter (PM NAAQS) to protect millions of Americans #' from harmful and costly health impacts, such as heart attacks and premature #' death. Particle or soot pollution is one of the most dangerous forms of air #' pollution, and an extensive body of science links it to a range of serious #' and sometimes deadly illnesses. EPA is setting the level of the primary #' (health-based) annual PM2.5 standard at 9.0 micrograms per cubic meter to #' provide increased public health protection, consistent with the available #' health science. #' See \href{https://www.epa.gov/pm-pollution/final-reconsideration-national-ambient-air-quality-standards-particulate-matter-pm}{PM NAAQS update}. #' #' @return Invisibly returns a leaflet map of class "leaflet". #' #' @examples #' \dontrun{ #' library(AirMonitor) #' # Fail gracefully if any resources are not available #' try({ #' #' # Maximum AQI category at each site #' monitor_loadLatest() %>% #' monitor_filter(stateCode %in% CONUS) %>% #' monitor_leaflet() #' #' # Mean AQI category at each site #' monitor_loadLatest() %>% #' monitor_filter(stateCode %in% CONUS) %>% #' monitor_leaflet( #' slice = "mean" #' ) #' #' # Mean AQI category at each site using the updated NAAQS #' monitor_loadLatest() %>% #' monitor_filter(stateCode %in% CONUS) %>% #' monitor_leaflet( #' slice = "mean", #' NAAQS = "PM2.5_2024" #' ) #' #' }, silent = FALSE) #' } monitor_leaflet <- function( monitor, slice = "max", radius = 10, opacity = 0.7, maptype = "terrain", extraVars = NULL, jitter = 5e-4, NAAQS = c("PM2.5", "PM2.5_2024"), ... ) { # Configurable values na.color = "#cccccc" # ----- Validate parameters -------------------------------------------------- monitor_check(monitor) if ( monitor_isEmpty(monitor) ) stop("monitor object has no data") slice <- MazamaCoreUtils::setIfNull(slice, "max") maptype <- MazamaCoreUtils::setIfNull(maptype, "terrain") if ( !is.null(extraVars) ) { unrecognizedVars <- setdiff(extraVars, names(monitor$meta)) if ( length(unrecognizedVars) > 0 ) { stop("variables in 'extraVars' not found in 'monitor$meta'") } } if ( is.null(jitter) || is.na(jitter) ) { jitter <- 0 } NAAQS = match.arg(NAAQS) # ----- Pollutant dependent AQI ---------------------------------------------- # See: https://www.airnow.gov/sites/default/files/2020-05/aqi-technical-assistance-document-sept2018.pdf pollutant <- toupper(unique(monitor$meta$pollutant)) if ( length(pollutant) > 1 ) { pollutantString <- paste0(pollutant, collapse = ", ") stop(sprintf("multiple pollutants found: %s", pollutantString)) } if ( pollutant == "CO" ) { legendTitle <- "CO AQI Level" units <- "ppm" digits <- 1 } else if ( pollutant == "OZONE" ) { legendTitle <- "Ozone AQI Level" units <- "ppm" digits <- 2 } else if ( pollutant == "PM2.5" ) { legendTitle <- "PM2.5 AQI Level" units <- "\U00B5g/m3" digits <- 0 } else if ( pollutant == "PM10" ) { legendTitle <- "PM10 AQI Level" units <- "\U00B5g/m3" digits <- 0 } AQI_breaks <- US_AQI[[paste0("breaks_", pollutant)]] AQI_colors <- US_AQI[[paste0("colors_", "EPA")]] AQI_names <- US_AQI$names_eng # Handle the added NAAQS argument if ( pollutant == "PM2.5" && NAAQS == "PM2.5_2024" ) { AQI_breaks <- US_AQI$breaks_PM2.5_2024 } breaks <- AQI_breaks colors <- AQI_colors labels <- AQI_names # ----- Create the 'slice' values -------------------------------------------- if ( is.numeric(slice) ) { # * slice = number ----- if ( slice < 1 || slice > nrow(monitor$data) ) stop(sprintf("slice = %d is outside the range 1:%d", slice, nrow(monitor$data))) slice <- round(slice) popupValue <- as.numeric(dplyr::slice(monitor$data, slice)[-1]) popupWhen <- strftime(monitor$data$datetime[slice], "on %B %d, %Y at %H:00", tz = "UTC", usetz = TRUE) } else if ( is.character(slice) ) { # * slice = character ----- if ( exists(slice) && is.function(get(slice)) ) { # ** slice = function ----- FUN <- get(slice) # NOTE: min/max will warn and return Inf/-Inf when all data are missing # NOTE: while mean returns NaN so we need to suppress warnings and replace # NOTE: all those non-finite values with NA. suppressWarnings({ popupValue <- base::apply( dplyr::select(monitor$data, -1), 2, FUN, na.rm = TRUE ) }) popupValue[!is.finite(popupValue)] <- NA popupWhen <- "" legendTitle <- sprintf("%s %s", stringr::str_to_title(slice), legendTitle) if ( slice == "max" ) { # Can't find a good dplyr way to get the first occurance of each value so we roll our own suppressWarnings({ dataBrick <- dplyr::select(monitor$data, -1) sliceValueBrick <- matrix(rep(popupValue, nrow(dataBrick)), nrow = nrow(dataBrick), byrow = TRUE) logicalBrick <- dataBrick == sliceValueBrick logicalBrick[is.na(logicalBrick)] <- FALSE firstRowAtMax <- base::apply(logicalBrick, 2, function(x) { min(which(x), na.rm = TRUE) }) firstRowAtMax[!is.finite(firstRowAtMax)] <- NA firstTimeAtMax <- monitor$data$datetime[firstRowAtMax] }) popupWhen <- strftime(firstTimeAtMax, "on %B %d, %Y at %H:00", tz = "UTC", usetz = TRUE) # TODO: Idea for local time not working yet. # popupWhen <- vector("character", length(firstTimeAtMax)) # for ( i in seq_len(firstTimeAtMax) ) { # popupWhen[i] <- # strftime(firstTimeAtMax[i], "on %B %d, %Y at %H:00", tz = monitor$meta$timezone[i], usetz = TRUE) # } popupWhen[is.na(popupWhen)] <- "" } # END of slice == "max" } else { # ** slice = datetime ----- result <- try({ sliceTime <- MazamaCoreUtils::parseDatetime(slice, timezone = "UTC") }, silent = TRUE) if ( "try-error" %in% class(result) ) { stop("improper use of slice parameter") } else { # Now proceed as if slice were an integer slice <- which(monitor$data$datetime == sliceTime) popupValue <- as.numeric(dplyr::slice(monitor$data, slice)[-1]) popupWhen <- strftime(monitor$data$datetime[slice], "on %B %d, %Y at %H:00", tz = "UTC", usetz = TRUE) } } # END slice = character } else { # * slice = neither ----- stop("improper use of slice parameter") } # ----- Order by popupValue -------------------------------------------------- # NOTE: This step is required if you want to have higher valued locations # NOTE: plotted on top so that they aren't hidden by lower valued locations. orderedIndices <- order(popupValue) popupValue <- popupValue[orderedIndices] popupWhen <- popupWhen[orderedIndices] monitor$meta <- monitor$meta[orderedIndices,] # monitor$data is no longer needed # ----- Create colors and legend labels -------------------------------------- # If the user didn't use custom breaks then use AQI names and colors if ( all.equal(breaks, AQI_breaks) && all.equal(colors, AQI_colors) ) { # Ignore warnings from RColorBrewer as leaflet::colorBin does the right thing suppressWarnings({ colorFunc <- leaflet::colorBin( AQI_colors, bins = AQI_breaks, na.color = na.color ) cols <- colorFunc(popupValue) colors <- AQI_colors labels <- AQI_names }) } else { if ( length(breaks) <= 2) { stop("Please specify the correct vector of breaks") } if (! (length(breaks) - 1 == length(colors)) ) { stop("The number of colors provided should be one less than the number of breaks") } if ( missing(labels) ){ labels <- paste(sprintf("%.1f", breaks[-length(breaks)]), "--", sprintf("%.1f", breaks[-1])) } else if ( length(labels) != length(colors) ) { stop("The number of labels should be equal to the number of colors") } # Create levels and use them to create a color mask levels <- .bincode(popupValue, breaks, include.lowest = TRUE) if ( !all(!is.na(levels)) ) { print("NOTE that there are data points outside of your specified breaks, non-requested color(s) might be displayed on your map.") } cols <- colors[levels] } # ----- Create popup text ---------------------------------------------------- popupText <- paste0( "<b>", monitor$meta$locationName, "</b><br>", "<b>", round(popupValue, digits), " ", units, "</b> ", popupWhen, "<br><br>", "<b>", monitor$meta$deviceDeploymentID, "</b><br>", "locationID = ", monitor$meta$locationID, "<br>", "deviceID = ", monitor$meta$deviceID, "<br><br>", monitor$meta$countyName, " County, ", monitor$meta$stateCode, "<br>", "timezone = ", monitor$meta$timezone, "<br>", "longitude = ", monitor$meta$longitude, ", ", "latitude = ", monitor$meta$latitude, "<br>" ) # Add extra vars for ( i in seq_along(popupText) ) { extraText <- vector("character", length(extraVars)) for ( j in seq_along(extraVars) ) { var <- extraVars[j] extraText[j] <- paste0(var, " = ", monitor$meta[i, var], "<br>") } extraText <- paste0(extraText, collapse = "") popupText[i] <- paste0(popupText[i], "<hr>", extraText) } monitor$meta$popupText <- popupText # ----- Create leaflet map --------------------------------------------------- # Filter out missing location data monitor$meta <- monitor$meta %>% dplyr::filter(!is.na(.data$longitude)) %>% dplyr::filter(!is.na(.data$latitude)) # Spread out locations if requested if ( jitter > 0 ) { monitor$meta <- monitor$meta %>% dplyr::mutate( longitude = jitter(.data$longitude, amount = 5e-4), latitude = jitter(.data$latitude, amount = 5e-4) ) } # Convert maptype to a character string that addProviderTiles can read if ( missing(maptype) || maptype == "terrain") { providerTiles <- "Esri.WorldTopoMap" } else if ( maptype == "roadmap" ) { providerTiles <- "OpenStreetMap" } else if ( maptype == "toner" ) { providerTiles <- "Stamen.Toner" } else if (maptype == "satellite" ) { providerTiles <- "Esri.WorldImagery" } else { providerTiles <- maptype } # Determine appropriate zoom level lonRange <- range(monitor$meta$longitude, na.rm = TRUE) latRange <- range(monitor$meta$latitude, na.rm = TRUE) # Create leaflet map leafletMap <- leaflet::leaflet(monitor$meta) %>% leaflet::fitBounds(lonRange[1], latRange[1], lonRange[2], latRange[2]) %>% leaflet::addProviderTiles(providerTiles) %>% leaflet::addCircleMarkers( lat = ~latitude, lng = ~longitude, radius = radius, fillColor = cols, fillOpacity = opacity, stroke = FALSE, popup = monitor$meta$popupText, ...) %>% leaflet::addLegend( position = "bottomright", colors = rev(colors), # show low levels at the bottom labels = rev(labels), # show low levels at the bottom opacity = 1, title = legendTitle ) return(leafletMap) } # ===== DEBUGGING ============================================================== if ( FALSE ) { library(AirMonitor) archiveBaseUrl <- "https://airfire-data-exports.s3.us-west-2.amazonaws.com/monitoring/v2" monitor <- airnow_loadDaily(archiveBaseUrl = archiveBaseUrl) %>% ##monitor_select(c("089a067f92712ad1_530750003", "d121a99bc6c2ac7f_160570005")) monitor_filter(countryCode %in% c("US", "CA", "MX")) slice = "max" radius = 10 opacity = 0.7 maptype = "terrain" extraVars = NULL jitter = 5e-4 monitor_leaflet( monitor, slice = "max", radius = 10, opacity = 0.7, maptype = "terrain", extraVars = NULL, jitter = 5e-4, ) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_leaflet.R
#' @export #' #' @title Load monitoring data from all sources #' #' @param startdate Desired start datetime (ISO 8601). #' @param enddate Desired end datetime (ISO 8601). #' @param timezone Olson timezone used to interpret dates. #' @param archiveBaseUrl Base URL for monitoring v2 data files. #' @param archiveBaseDir Local base directory for monitoring v2 data files. #' @param QC_negativeValues Type of QC to apply to negative values. #' files are available from both `epa` and `airnow`. #' @param epaPreference Preferred data source for EPA data when annual data #' files are available from both `epa_aqs` and `airnow`. #' #' @description Loads monitoring data for a given time range. Data from AirNow, #' AIRSIS and WRCC are combined into a single \emph{mts_monitor} object. #' #' Archival datasets are combined with 'daily' and 'latest' datasets as needed to #' satisfy the requested date range. #' #' @seealso \code{\link{monitor_loadAnnual}} #' @seealso \code{\link{monitor_loadDaily}} #' @seealso \code{\link{monitor_loadLatest}} #' #' @return A \emph{mts_monitor} object with PM2.5 monitoring data. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @examples #' \dontrun{ #' library(AirMonitor) #' #' # Fail gracefully if any resources are not available #' try({ #' #' wa <- #' monitor_load(20210601, 20211001) %>% #' monitor_filter(stateCode == "WA") #' #' monitor_timeseriesPlot(wa) #' #' }, silent = FALSE) #' } monitor_load <- function( startdate = NULL, enddate = NULL, timezone = NULL, archiveBaseUrl = paste0( "https://airfire-data-exports.s3.us-west-2.amazonaws.com/", "monitoring/v2" ), archiveBaseDir = NULL, QC_negativeValues = c("zero", "na", "ignore"), epaPreference = c("airnow", "epa_aqs") ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(startdate) MazamaCoreUtils::stopIfNull(enddate) QC_negativeValues <- match.arg(QC_negativeValues) epaPreference <- match.arg(epaPreference) # Deal with missing timezones if ( is.null(timezone) ) { if ( lubridate::is.POSIXct(startdate) ) { timezone <- lubridate::tz(startdate) } else { timezone <- "UTC" } } tRange <- MazamaCoreUtils::dateRange( startdate = startdate, enddate = enddate, timezone = timezone, unit = "hour", ceilingStart = FALSE, ceilingEnd = FALSE ) starttime <- tRange[1] endtime <- tRange[2] now <- lubridate::now(tzone = "UTC") now_m1 <- now - lubridate::ddays(1) now_m10 <- now - lubridate::ddays(10) now_m45 <- now - lubridate::ddays(45) # ----- Load annual data ----------------------------------------------------- if ( starttime < now_m45 ) { y1 <- lubridate::year(starttime) y2 <- lubridate::year(endtime) monitorList <- list() # NOTE: Failure to load any annual data for a given year is not trapped. for ( year in y1:y2 ) { monitorList[[as.character(year)]] <- monitor_loadAnnual(year, archiveBaseUrl, archiveBaseDir, QC_negativeValues, epaPreference) } annualData <- monitor_combine(monitorList) } # ----- Load daily data ------------------------------------------------------ if ( endtime >= now_m45 && starttime < now_m10 ) { dailyData <- monitor_loadDaily(archiveBaseUrl, archiveBaseDir, QC_negativeValues) } # ----- Load latest data ----------------------------------------------------- if ( starttime >= now_m10 || endtime >= now_m1 ) { latestData <- monitor_loadLatest(archiveBaseUrl, archiveBaseDir, QC_negativeValues) } # ----- Join mts_monitor objects --------------------------------------------- if ( exists("annualData") ) { monitor <- annualData } if ( exists("dailyData") ) { if ( exists("mts_monitor") ) { monitor <- monitor_combine(monitor, dailyData) } else { monitor <- dailyData } } if ( exists("latestData") ) { if ( exists("mts_monitor") ) { monitor <- monitor_combine(monitor, latestData) } else { monitor <- latestData } } # Filter to the requested time range monitor <- monitor_filterDate(monitor, starttime, endtime) # ----- Return --------------------------------------------------------------- return(monitor) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_load.R
#' @export #' #' @title Load annual monitoring data from all sources #' #' @param year Year [YYYY]. #' @param archiveBaseUrl Base URL for monitoring v2 data files. #' @param archiveBaseDir Local base directory for monitoring v2 data files. #' @param QC_negativeValues Type of QC to apply to negative values. #' @param epaPreference Preferred data source for EPA data when annual data #' files are available from both `epa_aqs` and `airnow`. #' #' @return A \emph{mts_monitor} object with PM2.5 monitoring data. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @description Combine annual data from AirNow, AIRSIS and WRCC: #' #' If \code{archiveDataDir} is defined, data will be loaded from this local #' archive. Otherwise, data will be loaded from the monitoring data repository #' maintained by the USFS AirFire team. #' #' Current year files loaded by this function are updated once per week. #' #' For the most recent data in the last 10 days, use \code{monitor_loadLatest()}. #' #' For daily updates covering the most recent 45 days, use \code{monitor_loadDaily()}. #' #' For data extended more than 45 days into the past, use \code{monitor_load()}. #' #' @note This function guarantees that only a single time series will be #' associated with each \code{locationID} using the following logic: #' \enumerate{ #' \item{AirNow data takes precedence over data from AIRSIS or WRCC} #' \item{more recent data takes precedence over older data} #' } #' This relevant mostly for "temporary" monitors which may be replaced after they #' are initially deployed. If you want access to all device deployments associated #' with a specific \code{locationID}, you can use the provider specific functions: #' \code{\link{airnow_loadAnnual}}, #' \code{\link{airsis_loadAnnual}} and #' \code{\link{wrcc_loadAnnual}} #' # #' @seealso \code{\link{monitor_load}} #' @seealso \code{\link{monitor_loadDaily}} #' @seealso \code{\link{monitor_loadLatest}} #' @examples #' \dontrun{ #' library(AirMonitor) #' # Fail gracefully if any resources are not available #' try({ #' #' monitor_loadAnnual() %>% #' monitor_filter(stateCode %in% CONUS) %>% #' monitor_leaflet() #' #' }, silent = FALSE) #' } monitor_loadAnnual <- function( year = NULL, archiveBaseUrl = paste0( "https://airfire-data-exports.s3.us-west-2.amazonaws.com/", "monitoring/v2" ), archiveBaseDir = NULL, QC_negativeValues = c("zero", "na", "ignore"), epaPreference = c("airnow", "epa_aqs") ) { parameterName <- "PM2.5" # ----- Validate parameters -------------------------------------------------- # Cutoff years firstAirnowYear <- 2014 firstAirsisYear <- 2004 firstWrccYear <- 2010 firstEpa88101Year <- 2008 firstEpa88502Year <- 1998 MazamaCoreUtils::stopIfNull(year) year <- as.numeric(year) if ( year < firstEpa88502Year ) stop("no data available prior to 1998") QC_negativeValues <- match.arg(QC_negativeValues) if ( is.null(archiveBaseUrl) && is.null(archiveBaseDir) ) stop("one of 'archiveBaseUrl' or 'archiveBaseDir' must be defined") epaPreference <- match.arg(epaPreference) # Override epaPreference if year is before any AirNow data if ( year < firstAirnowYear ) epaPreference <- "epa_aqs" # ----- Load data ------------------------------------------------------------ monitorList <- list() if ( year >= firstAirnowYear ) { if ( epaPreference == "airnow" ) { # AirNow annual files try({ monitorList[["airnow"]] <- airnow_loadAnnual(year, archiveBaseUrl, archiveBaseDir, QC_negativeValues, parameterName) %>% monitor_dropEmpty() }, silent = TRUE) } else { # EPA AQS 88101 + 88502 files try({ monitorList[["epa_aqs"]] <- epa_aqs_loadAnnual(year, archiveBaseUrl, archiveBaseDir, QC_negativeValues, parameterCode = "PM2.5") %>% monitor_dropEmpty() }, silent = TRUE) } } else { # EPA AQS 88101 + 88502 files if ( year >= firstEpa88101Year ) { try({ monitorList[["epa_aqs"]] <- epa_aqs_loadAnnual(year, archiveBaseUrl, archiveBaseDir, QC_negativeValues, parameterCode = "PM2.5") %>% monitor_dropEmpty() }, silent = TRUE) } } # AIRSIS annual files if ( year >= firstAirsisYear ) { try({ monitorList[["airsis"]] <- airsis_loadAnnual(year, archiveBaseUrl, archiveBaseDir, QC_negativeValues) %>% monitor_dropEmpty() }, silent = TRUE) } # WRCC annual files if ( year >= firstWrccYear ) { try({ monitorList[["wrcc"]] <- wrcc_loadAnnual(year, archiveBaseUrl, archiveBaseDir, QC_negativeValues) %>% monitor_dropEmpty() }, silent = TRUE) } # ----- Remove older deployments --------------------------------------------- for ( name in names(monitorList) ) { monitor <- monitorList[[name]] # Find locations with multiple deployments duplicateLocationIDs <- monitor$meta$locationID[duplicated(monitor$meta$locationID)] %>% unique() # Filter to include only locations with multiple deployments monitor <- monitor %>% monitor_filter(.data$locationID %in% duplicateLocationIDs) # Find last valid datum for each deployment (see monitor_getCurrentStatus.R) monitor$meta$lastValidIndex <- # Start with data monitor$data %>% # Ensure rows are arranged by datetime and then remove 'datetime' dplyr::arrange(.data$datetime) %>% dplyr::select(-.data$datetime) %>% # Find last non-NA index apply(2, function(x) { rev(which(!is.na(x)))[1] }) # Find deployments to be removed deploymentList <- list() for (locationID in duplicateLocationIDs) { latestValid <- monitor$meta %>% dplyr::filter(.data$locationID == !!locationID) %>% dplyr::pull(.data$lastValidIndex) %>% max() deploymentList[[locationID]] <- monitor$meta %>% dplyr::filter(.data$locationID == !!locationID) %>% dplyr::filter(.data$lastValidIndex != !!latestValid) %>% dplyr::pull(.data$deviceDeploymentID) } deploymentsToRemove <- unlist(deploymentList) # Replace monitor object with only the most recent deployments deploymentsToRetain <- setdiff(monitorList[[name]]$meta$deviceDeploymentID, deploymentsToRemove) monitorList[[name]] <- monitorList[[name]] %>% monitor_select(deploymentsToRetain) # NOTE: Some locations like the Rocky Mtn Fire Cache will have multiple # NOTE: monitors all producing data at the same time. in this case, we # NOTE: rely on dplyr::distinct() below to simply pick the first one. } # ----- Remove duplicate locations ------------------------------------------- # NOTE: This applies only to AirNow data, not EPA AQS data. # NOTE: # NOTE: Whenever we have multiple monitors reporting from the same location, # NOTE: we always favor the data fom AirNow over AIRSIS and WRCC. # NOTE: Because airnow comes first in monitor_combine() above, AirNow data # NOTE: will be preferentially retained. monitor_all <- monitor_combine(monitorList) ids <- monitor_all$meta %>% dplyr::distinct(.data$locationID, .keep_all = TRUE) %>% dplyr::pull(.data$deviceDeploymentID) monitor <- monitor_select(monitor_all, ids) # ----- Return --------------------------------------------------------------- return(monitor) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_loadAnnual.R
#' @export #' #' @title Load daily monitoring data from all sources #' #' @param archiveBaseUrl Base URL for monitoring v2 data files. #' @param archiveBaseDir Local base directory for monitoring v2 data files. #' @param QC_negativeValues Type of QC to apply to negative values. #' #' @return A \emph{mts_monitor} object with PM2.5 monitoring data. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @description Combine daily data from AirNow, AIRSIS and WRCC: #' #' If \code{archiveDataDir} is defined, data will be loaded from this local #' archive. Otherwise, data will be loaded from the monitoring data repository #' maintained by the USFS AirFire team. #' #' The files loaded by this function are updated once per day and #' contain data for the previous 45 days. #' #' For the most recent data in the last 10 days, use \code{monitor_loadLatest()}. #' #' For data extended more than 45 days into the past, use \code{monitor_load()}. #' #' @note This function guarantees that only a single time series will be #' associated with each \code{locationID} using the following logic: #' \enumerate{ #' \item{AirNow data takes precedence over data from AIRSIS or WRCC} #' \item{more recent data takes precedence over older data} #' } #' This relevant mostly for "temporary" monitors which may be replaced after they #' are initially deployed. If you want access to all device deployments associated #' with a specific \code{locationID}, you can use the provider specific functions: #' \code{\link{airnow_loadDaily}}, #' \code{\link{airsis_loadDaily}} and #' \code{\link{wrcc_loadDaily}} #' # #' @seealso \code{\link{monitor_load}} #' @seealso \code{\link{monitor_loadAnnual}} #' @seealso \code{\link{monitor_loadLatest}} #' @examples #' \dontrun{ #' library(AirMonitor) #' # Fail gracefully if any resources are not available #' try({ #' #' monitor_loadDaily() %>% #' monitor_filter(stateCode %in% CONUS) %>% #' monitor_leaflet() #' #' }, silent = FALSE) #' } monitor_loadDaily <- function( archiveBaseUrl = paste0( "https://airfire-data-exports.s3.us-west-2.amazonaws.com/", "monitoring/v2" ), archiveBaseDir = NULL, QC_negativeValues = c("zero", "na", "ignore") ) { parameterName <- "PM2.5" # ----- Validate parameters -------------------------------------------------- QC_negativeValues <- match.arg(QC_negativeValues) if ( is.null(archiveBaseUrl) && is.null(archiveBaseDir) ) stop("one of 'archiveBaseUrl' or 'archiveBaseDir' must be defined") # ----- Load data ------------------------------------------------------------ monitorList <- list() try({ monitorList[["airnow"]] <- airnow_loadDaily(archiveBaseUrl, archiveBaseDir, QC_negativeValues, parameterName) %>% monitor_dropEmpty() }, silent = TRUE) try({ monitorList[["airsis"]] <- airsis_loadDaily(archiveBaseUrl, archiveBaseDir, QC_negativeValues) %>% monitor_dropEmpty() }, silent = TRUE) try({ monitorList[["wrcc"]] <- wrcc_loadDaily(archiveBaseUrl, archiveBaseDir, QC_negativeValues) %>% monitor_dropEmpty() }, silent = TRUE) # ----- Remove older deployments --------------------------------------------- for ( name in names(monitorList) ) { monitor <- monitorList[[name]] # Find locations with multiple deployments duplicateLocationIDs <- monitor$meta$locationID[duplicated(monitor$meta$locationID)] %>% unique() # Filter to include only locations with multiple deployments monitor <- monitor %>% monitor_filter(.data$locationID %in% duplicateLocationIDs) # Find last valid datum for each deployment (see monitor_getCurrentStatus.R) monitor$meta$lastValidIndex <- # Start with data monitor$data %>% # Ensure rows are arranged by datetime and then remove 'datetime' dplyr::arrange(.data$datetime) %>% dplyr::select(-.data$datetime) %>% # Find last non-NA index apply(2, function(x) { rev(which(!is.na(x)))[1] }) # Find deployments to be removed deploymentList <- list() for (locationID in duplicateLocationIDs) { latestValid <- monitor$meta %>% dplyr::filter(.data$locationID == !!locationID) %>% dplyr::pull(.data$lastValidIndex) %>% max() deploymentList[[locationID]] <- monitor$meta %>% dplyr::filter(.data$locationID == !!locationID) %>% dplyr::filter(.data$lastValidIndex != !!latestValid) %>% dplyr::pull(.data$deviceDeploymentID) } deploymentsToRemove <- unlist(deploymentList) # Replace monitor object with only the most recent deployments deploymentsToRetain <- setdiff(monitorList[[name]]$meta$deviceDeploymentID, deploymentsToRemove) monitorList[[name]] <- monitorList[[name]] %>% monitor_select(deploymentsToRetain) # NOTE: Some locations like the Rocky Mtn Fire Cache will have multiple # NOTE: monitors all producing data at the same time. in this case, we # NOTE: rely on dplyr::distinct() below to simply pick the first one. } # ----- Remove duplicate locations ------------------------------------------- # NOTE: Whenever we have multiple monitors reporting from the same location, # NOTE: we always favor the data fom AirNow over AIRSIS and WRCC. # NOTE: Because airnow comes first in monitorList, AirNow data # NOTE: will be preferentially retained. monitor_all <- monitor_combine(monitorList) ids <- monitor_all$meta %>% dplyr::distinct(.data$locationID, .keep_all = TRUE) %>% dplyr::pull(.data$deviceDeploymentID) monitor <- monitor_all %>% monitor_select(ids) # ----- Return --------------------------------------------------------------- return(monitor) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_loadDaily.R
#' @export #' #' @title Load most recent monitoring data from all sources #' #' @param archiveBaseUrl Base URL for monitoring v2 data files. #' @param archiveBaseDir Local base directory for monitoring v2 data files. #' @param QC_negativeValues Type of QC to apply to negative values. #' #' @return A \emph{mts_monitor} object with PM2.5 monitoring data. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @description Combine recent data from AirNow, AIRSIS and WRCC: #' #' If \code{archiveDataDir} is defined, data will be loaded from this local #' archive. Otherwise, data will be loaded from the monitoring data repository #' maintained by the USFS AirFire team. #' #' The files loaded by this function are updated multiple times an hour and #' contain data for the previous 10 days. #' #' For daily updates covering the most recent 45 days, use \code{monitor_loadDaily()}. #' #' For data extended more than 45 days into the past, use \code{monitor_load()}. #' #' @note This function guarantees that only a single time series will be #' associated with each \code{locationID} using the following logic: #' \enumerate{ #' \item{AirNow data takes precedence over data from AIRSIS or WRCC} #' \item{more recent data takes precedence over older data} #' } #' This relevant mostly for "temporary" monitors which may be replaced after they #' are initially deployed. If you want access to all device deployments associated #' with a specific \code{locationID}, you can use the provider specific functions: #' \code{\link{airnow_loadLatest}}, #' \code{\link{airsis_loadLatest}} and #' \code{\link{wrcc_loadLatest}} #' # #' @seealso \code{\link{monitor_load}} #' @seealso \code{\link{monitor_loadAnnual}} #' @seealso \code{\link{monitor_loadDaily}} #' @examples #' \dontrun{ #' library(AirMonitor) #' # Fail gracefully if any resources are not available #' try({ #' #' monitor_loadLatest() %>% #' monitor_filter(stateCode %in% CONUS) %>% #' monitor_leaflet() #' #' }, silent = FALSE) #' } monitor_loadLatest <- function( archiveBaseUrl = paste0( "https://airfire-data-exports.s3.us-west-2.amazonaws.com/", "monitoring/v2" ), archiveBaseDir = NULL, QC_negativeValues = c("zero", "na", "ignore") ) { parameterName <- "PM2.5" # ----- Validate parameters -------------------------------------------------- QC_negativeValues <- match.arg(QC_negativeValues) if ( is.null(archiveBaseUrl) && is.null(archiveBaseDir) ) stop("one of 'archiveBaseUrl' or 'archiveBaseDir' must be defined") # ----- Load data ------------------------------------------------------------ monitorList <- list() try({ monitorList[["airnow"]] <- airnow_loadLatest(archiveBaseUrl, archiveBaseDir, QC_negativeValues, parameterName) %>% monitor_dropEmpty() }, silent = TRUE) try({ monitorList[["airsis"]] <- airsis_loadLatest(archiveBaseUrl, archiveBaseDir, QC_negativeValues) %>% monitor_dropEmpty() }, silent = TRUE) try({ monitorList[["wrcc"]] <- wrcc_loadLatest(archiveBaseUrl, archiveBaseDir, QC_negativeValues) %>% monitor_dropEmpty() }, silent = TRUE) # ----- Remove older deployments --------------------------------------------- for ( name in names(monitorList) ) { monitor <- monitorList[[name]] # Find locations with multiple deployments duplicateLocationIDs <- monitor$meta$locationID[duplicated(monitor$meta$locationID)] %>% unique() # Filter to include only locations with multiple deployments monitor <- monitor %>% monitor_filter(.data$locationID %in% duplicateLocationIDs) # Find last valid datum for each deployment (see monitor_getCurrentStatus.R) monitor$meta$lastValidIndex <- # Start with data monitor$data %>% # Ensure rows are arranged by datetime and then remove 'datetime' dplyr::arrange(.data$datetime) %>% dplyr::select(-.data$datetime) %>% # Find last non-NA index apply(2, function(x) { rev(which(!is.na(x)))[1] }) # Find deployments to be removed deploymentList <- list() for (locationID in duplicateLocationIDs) { latestValid <- monitor$meta %>% dplyr::filter(.data$locationID == !!locationID) %>% dplyr::pull(.data$lastValidIndex) %>% max() deploymentList[[locationID]] <- monitor$meta %>% dplyr::filter(.data$locationID == !!locationID) %>% dplyr::filter(.data$lastValidIndex != !!latestValid) %>% dplyr::pull(.data$deviceDeploymentID) } deploymentsToRemove <- unlist(deploymentList) # Replace monitor object with only the most recent deployments deploymentsToRetain <- setdiff(monitorList[[name]]$meta$deviceDeploymentID, deploymentsToRemove) monitorList[[name]] <- monitorList[[name]] %>% monitor_select(deploymentsToRetain) # NOTE: Some locations like the Rocky Mtn Fire Cache will have multiple # NOTE: monitors all producing data at the same time. in this case, we # NOTE: rely on dplyr::distinct() below to simply pick the first one. } # ----- Remove duplicate locations ------------------------------------------- # NOTE: Whenever we have multiple monitors reporting from the same location, # NOTE: we always favor the data fom AirNow over AIRSIS and WRCC. # NOTE: Because airnow comes first in monitorList, AirNow data # NOTE: will be preferentially retained. monitor_all <- monitor_combine(monitorList) ids <- monitor_all$meta %>% dplyr::distinct(.data$locationID, .keep_all = TRUE) %>% dplyr::pull(.data$deviceDeploymentID) monitor <- monitor_all %>% monitor_select(ids) # ----- Return --------------------------------------------------------------- return(monitor) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_loadLatest.R
#' @export #' #' @title Apply a function to \emph{mts_monitor} time series #' #' @param monitor \emph{mts_monitor} object. #' @param FUN Function used to modify time series. #' @param ... Additional arguments to be passed to \code{FUN}. #' #' @description #' This function works similarly to \code{dplyr::mutate()} and applies #' \code{FUN} to each time series found in \code{monitor$data}. \code{FUN} must #' be a function that accepts a numeric vector as its first argument and returns #' a vector of the same length. #' #' #' @return A modified \code{mts_monitor} object. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @examples #' library(AirMonitor) #' #' Carmel_Valley %>% #' monitor_filterDatetime(2016080207, 2016080212) %>% #' monitor_toCSV(includeMeta = FALSE) %>% #' cat() #' #' Carmel_Valley %>% #' monitor_filterDatetime(2016080207, 2016080212) %>% #' monitor_mutate(function(x) { return(x / 2) }) %>% #' monitor_toCSV(includeMeta = FALSE) %>% #' cat() #' monitor_mutate <- function( monitor = NULL, FUN = NULL, ... ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(monitor) MazamaCoreUtils::stopIfNull(FUN) # A little involved to catch the case where the user forgets to pass in 'monitor' result <- try({ if ( !monitor_isValid(monitor) ) stop("First argument is not a valid 'mts_monitor' object.") }, silent = TRUE) if ( class(result) %in% "try-error" ) { err_msg <- geterrmessage() if ( stringr::str_detect(err_msg, "object .* not found") ) { stop(paste0(err_msg, "\n(Did you forget to pass in the 'monitor' object?)")) } } if ( monitor_isEmpty(monitor) ) stop("Parameter 'monitor' has no data.") # ----- Apply function ------------------------------------------------------- dataBrick <- base::apply( dplyr::select(monitor$data, -1), 2, FUN, ... ) monitor$data <- cbind(dplyr::select(monitor$data, 1), dataBrick) # ----- Return --------------------------------------------------------------- return(monitor) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_mutate.R
#' @export #' #' @title Apply NowCast algorithm to \emph{mts_monitor} data #' #' @param monitor \emph{mts_monitor} object. #' @param version Name of the type of nowcast algorithm to be used. #' @param includeShortTerm Logical specifying whether to alcluate preliminary #' NowCast values starting with the 2nd hour. #' #' @return A modified \code{mts_monitor} object. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @description A NowCast algorithm is applied to the data in in the #' \code{monitor} object. The \code{version} argument specifies the minimum #' weight factor and number of hours to be used in the calculation. #' #' Available versions include: #' \enumerate{ #' \item{\code{pm}: hours = 12, weight = 0.5} #' \item{\code{pmAsian}: hours = 3, weight = 0.1} #' \item{\code{ozone}: hours = 8, weight = NA} #' } #' #' The default, \code{version = "pm"}, is appropriate for typical usage. #' #' @details #' This function calculates each hour's NowCast value based on the value #' for the given hour and the previous N-1 hours, where N is the number #' of hours appropriate for the \code{version} argument. For example, if #' \code{version = "pm"}, the NowCast value for Hour 12 is based on the data #' from hours 1-12. #' #' The function returns values when at least two of the previous three hours #' have data. NA's are returned for hours where this condition is not met. #' #' By default, the funtion will not return a valid value until the Nth hour. #' If \code{includeShortTerm = TRUE}, the function will return a valid value #' after only the 2nd hour (provided, of course, that both hours are valid). #' #' Calculated Nowcast values are truncated to the nearest .1 ug/m3 for 'pm' and #' nearest .001 ppm for 'ozone' regardless of the precision of the data in the #' incoming \emph{mts_monitor} object. #' #' @references \url{https://en.wikipedia.org/wiki/Nowcast_(Air_Quality_Index)} #' @references \href{https://www.airnow.gov/sites/default/files/2020-05/aqi-technical-assistance-document-sept2018.pdf}{AQI Technical Assistance Document} #' # NOTE: This script is based on the javascript code at: # NOTE: https://github.com/chatch/nowcast-aqi/blob/master/nowcast-aqi.js # NOTE: To compute a valid NowCast, you must have at least two of the most recent 3 hours # TODO: python-aqi at: https://pypi.python.org/pypi/python-aqi #### ----- NowCast Calculation Overview ----- # # The process for calculating the NowCast concentration and AQI for PM2.5 or PM10 is as follows: # # 1. Compute the concentrations range (max-min) over the last 12 hours. # 2. Divide the range by the maximum concentration in the 12 hour period to obtain the scaled rate of change. # 3. Compute the weight factor by subtracting the scaled rate from 1. The weight factor must be between .5 and 1. # The minimum limit approximates a 3-hour average. If the weight factor is less than .5, then set it equal to .5. # 4. Multiply each hourly concentration by the weight factor raised to the power of how many hours ago the concentration # was measured (for the current hour, the factor is raised to the zero power). # 5. Compute the NowCast by summing these products and dividing by the sum of the weight factors raised to the power of # how many hours ago the concentration was measured. monitor_nowcast <- function( monitor, version = c("pm", "pmAsian", "ozone"), includeShortTerm = FALSE ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(monitor) version <- match.arg(version) includeShortTerm <- MazamaCoreUtils::setIfNull(includeShortTerm, FALSE) # A little involved to catch the case where the user forgets to pass in 'monitor' result <- try({ if ( !monitor_isValid(monitor) ) stop("First argument is not a valid 'mts_monitor' object.") }, silent = TRUE) if ( "try-error" %in% class(result) ) { err_msg <- geterrmessage() if ( stringr::str_detect(err_msg, "object .* not found") ) { stop(paste0(err_msg, "\n(Did you forget to pass in the 'monitor' object?)")) } } if ( monitor_isEmpty(monitor) ) stop("Parameter 'monitor' has no data.") # ----- Choose settings -------------------------------------------------------- # Set parameters based on version if ( version == "pm" ) { numHrs <- 12 weightFactorMin <- 0.5 digits <- 1 } else if ( version == "pmAsian" ) { numHrs <- 3 weightFactorMin <- 0.1 digits <- 1 } else if ( version == "ozone" ) { numHrs <- 8 weightFactorMin <- NA # negative values adjusted up to 0 in .weightFactor() digits <- 3 # NOTE: digits = 3 assumes Ozone values given in ppm; update to 0 if values given in ppb } # ----- Apply function ------------------------------------------------------- # Apply nowcast to each data column newData <- apply( dplyr::select(monitor$data, -1), 2, function(x) { .nowcast(x, numHrs, weightFactorMin, includeShortTerm) } ) # NOTE: Truncate, rather than round, per the following: # NOTE: https://forum.airnowtech.org/t/the-nowcast-for-ozone-and-pm/172 monitor$data[2:ncol(monitor$data)] <- as.data.frame(trunc(newData*10^digits)/10^digits) # ----- Return --------------------------------------------------------------- return( structure(monitor, class = c("mts_monitor", "list")) ) } # ===== Internal Functions ===================================================== .nowcast <- function(x, numHrs, weightFactorMin, includeShortTerm) { if ( includeShortTerm ) { firstHr <- 2 } else { firstHr <- numHrs } # Start at the end of the data (most recent hour) and work backwards # The oldest hour for which we can calculate nowcast is numHrs, unless # includeShortTerm = TRUE, in which case we can go back to the 2nd hour. for ( i in length(x):firstHr ) { # Apply nowcast algorithm to numHrs data points in order with more recent first concByHour <- x[i:max(1, i - numHrs + 1)] if ( sum( is.na(concByHour[1:3]) ) >= 2 ) { # If two or more of the most recent 3 hours are missing, no valid Nowcast will be reported x[i] <- NA } else if ( is.na(concByHour[1]) ) { # If the current hour is missing, no valid Nowcast will be reported # NOTE: This conflicts with the algorithm as described here: # NOTE: https://forum.airnowtech.org/t/daily-and-hourly-aqi-pm2-5/171 # NOTE: # NOTE: But experience shows that NowCast replacements for missing # NOTE: PM2.5 values are very problematic. # NOTE: # NOTE: The Wikipedia page: https://en.wikipedia.org/wiki/NowCast_(air_quality_index) # NOTE: has the following statement without citation: # NOTE: "Because the most recent hours of data are weighted so heavily in the NowCast when # NOTE: PM levels are changing, EPA does not report the NowCast when data is missing for c1 or c2." # NOTE: # NOTE: We take a compromise approach and only invalidate NowCast when data is missing for c1. x[i] <- NA } else { # Calculate the weight factor according to the type of air quality data weightFactor <- .weightFactor(concByHour, weightFactorMin) # NOTE: We need to create vectors so that we can sum at the end with na.rm = TRUE weightedConcs <- rep(as.numeric(NA), numHrs) weightFactors <- rep(as.numeric(NA), numHrs) # Loop over hours to get individual elements for (j in 1:numHrs) { if ( !is.na( concByHour[j]) ) { weightedConcs[j] <- concByHour[j] * weightFactor^(j - 1) weightFactors[j] <- weightFactor^(j - 1) } } x[i] <- sum(weightedConcs, na.rm = TRUE) / sum(weightFactors, na.rm = TRUE) } } # Set missing values when there are not enough preceding hours x[1:(firstHr - 1)] <- NA return(x) } # Calculate the weight factor ('w' in the nowcast formula) # concByHour: vector of hourly concentration values # weightFactorMin (optional): wight factor minimum # # Assumes concByHour has at least one valid value to calculate min & max. In # fact, .nowcast won't even call this function if more than one of the three # most recent hours is invalid. .weightFactor <- function(concByHour, weightFactorMin) { min <- min(concByHour, na.rm = TRUE) max <- max(concByHour, na.rm = TRUE) # Calculate weight factor # NOTE: https://forum.airnowtech.org/t/the-nowcast-for-ozone-and-pm/172 says that there is "no minimum # NOTE: weight factor" for ozone; however, we limit the value to zero since otherwise it would be possible # NOTE: to get negative weights, even as large as -Inf (i.e. if min<0 & max=0). # NOTE: Otherwise, we don't worry about negatives, per the following: # NOTE: https://forum.airnowtech.org/t/how-does-airnow-handle-negative-hourly-concentrations/143 weightFactor <- 1 - (max - min)/max weightFactor <- min(weightFactor, 1, na.rm = TRUE) weightFactor <- max(weightFactor, weightFactorMin, 0, na.rm = TRUE) return(weightFactor) } # ===== DEBUG ================================================================= if ( FALSE ) { # From: https://forum.airnowtech.org/t/the-nowcast-for-pm2-5-and-pm10/172 x <- c(34.9, 43, 50, 64.9, 69.2, 66.2, 53.7, 48.6, 49.2, 35, NA, 21) .nowcast(x, 12, 0.5, FALSE) print(.nowcast(x, 12, 0.5, TRUE)[12], digits = 8) # 28.409801 matches the result in the web page. }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_nowcast.R
#' @export #' #' @title Extract a column of metadata or data #' #' @param monitor \emph{mts_monitor} object. #' @param var A variable name found in the \code{meta} or \code{data} #' dataframe of the incoming \emph{mts_monitor} time series object. #' #' @description #' This function acts similarly to \code{\link[dplyr]{pull}} working on #' \code{monitor$meta} or \code{monitor$data}. Data are returned as a simple array. #' Data are pulled from whichever dataframe contains \code{var}. #' #' @return An array of values. #' #' @examples #' library(AirMonitor) #' #' # Metadata #' Camp_Fire %>% #' monitor_pull("deploymentType") %>% #' table() #' #' # Data for a specific ID #' Camp_Fire %>% #' monitor_dailyStatistic(mean) %>% #' monitor_pull("6bbab08e3786ef66_840060450006") %>% #' round(0) #' #' # Associated dates #' Camp_Fire %>% #' monitor_dailyStatistic(mean) %>% #' monitor_pull("datetime") #' monitor_pull <- function( monitor = NULL, var = NULL ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(monitor) MazamaCoreUtils::stopIfNull(var) # NOTE: Additional validation is handled by MazamaTimeSeries::mts_pull() # ----- Call MazamaTimeSeries function --------------------------------------- values <- MazamaTimeSeries::mts_pull( mts = monitor, var = var ) # ----- Return --------------------------------------------------------------- return(values) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_pull.R
#' @export #' #' @title Replace \emph{mts_monitor} data with another value #' #' @param monitor \emph{mts_monitor} object. #' @param filter R expression used to identify values for replacement. #' @param value Numeric replacement value. #' #' @description Use an R expression to identify values for replacement. #' #' The R expression given in \code{filter} is used to identify elements #' in \code{monitor$data} that should be replaced. The \code{datetime} column #' will be retained unmodified. Typical usage would include #' #' \enumerate{ #' \item{replacing negative values with 0} #' \item{replacing unreasonably high values with \code{NA}} #' } #' #' Expressions should use \code{data} for the left hand side of the comparison. #' #' @return A modified \code{mts_monitor} object. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @examples #' library(AirMonitor) #' #' wa <- monitor_filterMeta(NW_Megafires, stateCode == 'WA') #' any(wa$data < 5, na.rm = TRUE) #' #' wa_zero <- monitor_replaceValues(wa, data < 5, 5) #' any(wa_zero$data < 5, na.rm = TRUE) monitor_replaceValues <- function( monitor = NULL, filter = NULL, value = NULL ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(monitor) MazamaCoreUtils::stopIfNull(value) if ( monitor_isEmpty(monitor) ) stop("'monitor' has no data") # Remove any duplicate data records monitor <- monitor_distinct(monitor) # NOTE: Test this with: condition_call <- substitute(data < 0) # Create a "condition call" -- basically, an expression that isn't run yet. condition_call <- substitute(filter) filterString <- paste(as.character(condition_call)[2], as.character(condition_call)[1], as.character(condition_call)[3]) # NOTE: Example condition_call: # NOTE: > as.character(condition_call) # NOTE: [1] "<" "data" "0" if ( !any(stringr::str_detect(filterString, 'data')) ) stop( sprintf("bad filter: \"%s\". Try something like \"data < 0\".", filterString) ) # ----- Replace data --------------------------------------------------------- # Create a data-only tibble by omitting the first 'datetime' column data <- monitor$data %>% dplyr::select(-.data$datetime) # Find places where condition is true dataMask <- eval(condition_call) # NOTE: Below is a previous version which passes a second argument to eval. # # # Use FUN to create a mask # FUN <- function(list) { eval(condition_call, data.frame(data = list)) } # dataMask <- apply(data, 2, FUN) # dataMask <- replace(dataMask, is.na(dataMask), FALSE) # Replace matching data with value data[dataMask] <- value # Replace monitor$data data columns with new data monitor$data[,-1] <- data # ----- Return ------------------------------------------------------------- return( monitor ) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_replaceValues.R
#' @export #' @importFrom rlang .data #' #' @title Subset and reorder time series within an \emph{mts_monitor} object #' #' @param monitor \emph{mts_monitor} object. #' @param id Vector of \code{deviceDeploymentIDs}. #' #' @description #' This function acts similarly to \code{dplyr::select()} working on #' \code{monitor$data}. The returned \emph{mts_monitor} object will contain only #' those time series identified by \code{id} in the order specified. #' #' This can be helpful when using faceted plot functions based on \pkg{ggplot} #' such as those found in the \pkg{AirMonitorPlots} package. #' #' @return A reordered (subset) of the incoming \emph{mts_monitor} object. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @seealso \link{monitor_filterMeta} #' monitor_select <- function( monitor, id ) { # ----- Validate parameters -------------------------------------------------- # NOTE: Validate is handled by MazamaTimeSeries::mts_select() # ----- Call MazamaTimeSeries function --------------------------------------- monitor <- MazamaTimeSeries::mts_select( mts = monitor, deviceDeploymentID = id ) class(monitor) <- union("mts_monitor", class(monitor)) # ----- Return --------------------------------------------------------------- return(invisible(monitor)) } # ===== Alias ================================================================== # TODO: Add examples to the alias #' @rdname monitor_select #' @export monitor_reorder <- monitor_select
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_select.R
#' @export #' @importFrom rlang .data #' #' @title Data-based subsetting of time series within an \emph{mts_monitor} object. #' #' @param monitor \emph{mts_monitor} object. #' @param FUN A function applied to time series data that returns TRUE or FALSE. #' #' @description #' Subsetting of \code{monitor} acts similarly to \code{tidyselect::where()} working on #' \code{monitor$data}. The returned \emph{mts_monitor} object will contain only #' those time series where \code{FUN} applied to the time series data returns \code{TRUE}. #' #' @return A subset of the incoming \emph{mts_monitor} object. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @seealso \link{monitor_select} #' #' @examples #' library(AirMonitor) #' #' # Show all Camp_Fire locations #' Camp_Fire$meta$locationName #' #' # Use package US_AQI data for HAZARDOUS #' name <- US_AQI$names_eng[6] #' threshold <- US_AQI$breaks_PM2.5[6] #' #' # Find HAZARDOUS locations #' worst_sites <- #' Camp_Fire %>% #' monitor_selectWhere( #' function(x) { any(x >= threshold, na.rm = TRUE) } #' ) #' #' # Show the worst locations #' worst_sites$meta$locationName #' monitor_selectWhere <- function( monitor, FUN ) { # NOTE: Validate is handled by MazamaTimeSeries::mts_selectWhere() # ----- Call MazamaTimeSeries function --------------------------------------- monitor <- MazamaTimeSeries::mts_selectWhere( mts = monitor, FUN = FUN ) class(monitor) <- union("mts_monitor", class(monitor)) # ----- Return --------------------------------------------------------------- return(invisible(monitor)) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_selectWhere.R
#' @export #' #' @title Extend/contract \emph{mts_monitor} time series to new start and end times #' #' @param monitor \emph{mts_monitor} object. #' @param startdate Desired start date (ISO 8601). #' @param enddate Desired end date (ISO 8601). #' @param timezone Olson timezone used to interpret \code{startdate} and \code{enddate}. #' #' @description Extends or contracts the time range of an \emph{mts_monitor} object by #' adding/removing time steps at the start and end and filling any new time #' steps with missing values. The resulting time axis is guaranteed to be #' a regular, hourly axis with no gaps using the same timezone as the incoming #' \emph{mts_monitor} object. This is useful when you want to place separate \emph{mts_monitor} #' objects on the same time axis for plotting. #' #' If either \code{startdate} or \code{enddate} is missing, the start or end of #' the timeseries in \code{monitor} will be used. #' #' @note If \code{startdate} or \code{enddate} is a \code{POSIXct} value, then #' \code{timezone} will be set to the timezone associated with \code{startdate} #' or \code{enddate}. #' In this common case, you don't need to specify \code{timezone} explicitly. #' #' If neither \code{startdate} nor \code{enddate} is a \code{POSIXct} value #' AND no \code{timezone} is supplied, the timezone will be inferred from #' the most common timezone found in \code{monitor}. #' #' @return The incoming \emph{mts_monitor} time series object defined on a new time axis. #' (A list with \code{meta} and \code{data} dataframes.) #' #' @examples #' library(AirMonitor) #' #' # Default range #' Carmel_Valley %>% #' monitor_timeRange() #' #' # One-sided extend with user specified timezone #' Carmel_Valley %>% #' monitor_setTimeAxis(enddate = 20160820, timezone = "UTC") %>% #' monitor_timeRange() #' #' # Two-sided extend with user specified timezone #' Carmel_Valley %>% #' monitor_setTimeAxis(20190720, 20190820, timezone = "UTC") %>% #' monitor_timeRange() #' #' # Two-sided extend without timezone (uses monitor$meta$timezone) #' Carmel_Valley %>% #' monitor_setTimeAxis(20190720, 20190820) %>% #' monitor_timeRange() #' monitor_setTimeAxis <- function( monitor = NULL, startdate = NULL, enddate = NULL, timezone = NULL ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(monitor) # NOTE: Additional validation is handled by MazamaTimeSeries::mts_setTimeAxis() # ----- Call MazamaTimeSeries function --------------------------------------- monitor <- MazamaTimeSeries::mts_setTimeAxis( mts = monitor, startdate = startdate, enddate = enddate, timezone = timezone ) class(monitor) <- union("mts_monitor", class(monitor)) # ----- Return --------------------------------------------------------------- return(invisible(monitor)) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_setTimeAxis.R
#' @export #' #' @title Subset time series based on their position within an \emph{mts_monitor} object #' #' @param monitor \emph{mts_monitor} object. #' @param n Number of rows of \code{monitor$meta} to select. #' #' @description An \emph{mts_monitor} object is reduced so as to contain only #' the first or last \code{n} timeseries. These functions work similarly to #' \code{\link[dplyr:slice_head]{dplyr::slice_head}} and #' \code{\link[dplyr:slice_tail]{dplyr::slice_tail}} #' but apply to both dataframes in the \emph{mts_monitor} object. #' #' This is primarily useful when the \emph{mts_monitor} object has been ordered #' by a previous call to \code{\link{monitor_arrange}} or by some other means. #' #' \code{monitor_slice_head()} selects the first and \code{monitor_slice_tail()} #' the last timeseries in the object. #' #' @return A subset of the incoming \emph{mts_monitor} time series object. #' (A list with \code{meta} and \code{data} dataframes.) #' #' #' @examples #' library(AirMonitor) #' #' # Find lowest elevation sites #' Camp_Fire %>% #' monitor_filter(!is.na(elevation)) %>% #' monitor_arrange(elevation) %>% #' monitor_slice_head(n = 5) %>% #' monitor_getMeta() %>% #' dplyr::select(elevation, locationName) #' #' # Find highest elevation sites #' Camp_Fire %>% #' monitor_filterMeta(!is.na(elevation)) %>% #' monitor_arrange(elevation) %>% #' monitor_slice_tail(n = 5) %>% #' monitor_getMeta() %>% #' dplyr::select(elevation, locationName) #' #' @export #' @rdname monitor_slice monitor_slice_head <- function( monitor, n = 5 ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(monitor) MazamaCoreUtils::stopIfNull(n) # NOTE: Additional validation is handled by MazamaTimeSeries::mts_slice_head() # ----- Call MazamaTimeSeries function --------------------------------------- monitor <- MazamaTimeSeries::mts_slice_head( mts = monitor, n = n ) class(monitor) <- union("mts_monitor", class(monitor)) # ----- Return --------------------------------------------------------------- return(invisible(monitor)) } #' @export #' @rdname monitor_slice monitor_slice_tail <- function( monitor, n = 5 ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(monitor) MazamaCoreUtils::stopIfNull(n) # NOTE: Additional validation is handled by MazamaTimeSeries::mts_slice_head() # ----- Call MazamaTimeSeries function --------------------------------------- monitor <- MazamaTimeSeries::mts_slice_tail( mts = monitor, n = n ) class(monitor) <- union("mts_monitor", class(monitor)) # ----- Return --------------------------------------------------------------- return(invisible(monitor)) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_slice.R
#' @title Get time related information for a monitor #' #' @description Calculate the local time for a monitor, as well as #' sunrise, sunset and solar noon times, and create several temporal masks. #' #' The returned dataframe will have as many rows as the length of the incoming #' UTC \code{time} vector and will contain the following columns: #' #' \itemize{ #' \item{\code{localStdTime_UTC} -- UTC representation of local \strong{standard} time} #' \item{\code{daylightSavings} -- logical mask = TRUE if daylight savings is in effect} #' \item{\code{localTime} -- local clock time} #' \item{\code{sunrise} -- time of sunrise on each localTime day} #' \item{\code{sunset} -- time of sunset on each localTime day} #' \item{\code{solarnoon} -- time of solar noon on each localTime day} #' \item{\code{day} -- logical mask = TRUE between sunrise and sunset} #' \item{\code{morning} -- logical mask = TRUE between sunrise and solarnoon} #' \item{\code{afternoon} -- logical mask = TRUE between solarnoon and sunset} #' \item{\code{night} -- logical mask = opposite of day} #' } #' #' @details #' While the \pkg{lubridate} package makes it easy to work in local timezones, #' there is no easy way in R to work in "Local Standard Time" (LST) (\emph{i.e. #' never shifting to daylight savings}) as is often required when working with #' air quality data. US EPA regulations mandate that daily averages be calculated #' based on LST. #' #' The \code{localStdTime_UTC} is primarily for use internally and provides #' an important tool for creating LST daily averages and LST axis labeling. #' #' @param monitor \emph{mts_monitor} object. #' @param id \code{deviceDeploymentID} used to select a single time #' series found in \code{monitor}. -- optional if \code{monitor} only has one #' time series. #' #' @return A dataframe with times and masks. #' #' @export #' @importFrom rlang .data #' #' @examples #' library(AirMonitor) #' #' carmel <- #' Carmel_Valley %>% #' monitor_filterDate(20160801, 20160810) #' #' # Create timeInfo object for this monitor #' ti <- monitor_timeInfo(carmel) #' #' # Subset the data based on day/night masks #' data_day <- carmel$data[ti$day,] #' data_night <- carmel$data[ti$night,] #' #' # Build two monitor objects #' carmel_day <- list(meta = carmel$meta, data = data_day) #' carmel_night <- list(meta = carmel$meta, data = data_night) #' #' # Plot them #' carmel_day %>% #' monitor_timeseriesPlot( #' pch = 8, #' col = "goldenrod", #' shadedNight = TRUE #' ) #' #' carmel_night %>% #' monitor_timeseriesPlot( #' add = TRUE, #' pch = 16, #' col = "darkblue" #' ) monitor_timeInfo <- function( monitor = NULL, id = NULL ) { # ----- Validate parameters -------------------------------------------------- monitor_check(monitor) if ( nrow(monitor$meta) == 1 ) { deviceDeploymentID <- monitor$meta$deviceDeploymentID[1] } else { if ( is.null(deviceDeploymentID) ) { stop("'deviceDeploymentID' must be specified if more than one monitor is present") } else if ( !deviceDeploymentID %in% monitor$meta$deviceDeploymentID ) { stop(sprintf("deviceDeploymentID $s is not found in 'monitor'", deviceDeploymentID)) } monitor <- monitor_filterMeta(.data$deviceDeploymentID == !!deviceDeploymentID) } # ----- Return --------------------------------------------------------------- timeInfo <- MazamaTimeSeries::timeInfo( monitor$data$datetime, monitor$meta$longitude, monitor$meta$latitude, monitor$meta$timezone ) return(timeInfo) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_timeInfo.R
#' @export #' @title Get the time range for a monitor #' #' @description This function is a wrapper for \code{range(monitor$data$datetime)} #' and is convenient for use in data pipelines. #' #' Dates will be returned in the timezone associated with #' \code{monitor$data$datetime} which is typically "UTC" unless #' \code{timezone} is specified. #' #' @param monitor \emph{mts_monitor} object. #' @param timezone Olson timezone for the returned dates. #' #' @return A vector containing the minimum and maximum times of a #' \emph{mts_monitor} object. #' #' @examples #' Carmel_Valley %>% #' monitor_timeRange(timezone = "America/Los_Angeles") monitor_timeRange <- function( monitor = NULL, timezone = NULL ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(monitor) if ( !is.null(timezone) ) { if ( !timezone %in% OlsonNames() ) stop(sprintf("timezone '%s' is not a valid OlsonNames() timezone", timezone)) } # ----- Return --------------------------------------------------------------- timeRange <- range(monitor$data$datetime) if ( !is.null(timezone) ) timeRange <- lubridate::with_tz(timeRange, tzone = timezone) return(timeRange) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_timeRange.R
#' @export #' @import graphics #' @importFrom grDevices adjustcolor #' #' @title Create timeseries plot #' #' @description #' Creates a time series plot of data from a \emph{mts_monitor} object. #' By default, points are plotted as semi-transparent squares. All data values #' are plotted from all monitors found in the \emph{mts_monitor} object. #' #' Reasonable defaults are chosen for annotations and plot characteristics. #' Users can override any defaults by passing in parameters accepted by #' \code{graphics::plot.default}. #' #' @param monitor \emph{mts_monitor} object. #' @param id \code{deviceDeploymentID} used to limit plotting to a single time #' series found in \code{monitor}. #' @param shadedNight Logical specifying whether to add nighttime shading. #' @param add Logical specifying whether to add to the current plot. #' @param addAQI Logical specifying whether to add visual AQI decorations. #' @param palette Named color palette to use when adding AQI decorations. #' @param opacity Opacity to use for points. By default, an opacity is chosen based #' on the number of points so that trends are highlighted while outliers diminish #' in visual importance as the number of points increases. #' @param NAAQS Version of NAAQS levels to use. See Note. #' @param ... Additional arguments to be passed to \code{graphics::plot.default()}. #' #' @return No return value. This function is called to draw an air quality #' time series plot on the active graphics device. #' #' @note #' On February 7, 2024, EPA strengthened the National Ambient Air Quality #' Standards for Particulate Matter (PM NAAQS) to protect millions of Americans #' from harmful and costly health impacts, such as heart attacks and premature #' death. Particle or soot pollution is one of the most dangerous forms of air #' pollution, and an extensive body of science links it to a range of serious #' and sometimes deadly illnesses. EPA is setting the level of the primary #' (health-based) annual PM2.5 standard at 9.0 micrograms per cubic meter to #' provide increased public health protection, consistent with the available #' health science. #' See \href{https://www.epa.gov/pm-pollution/final-reconsideration-national-ambient-air-quality-standards-particulate-matter-pm}{PM NAAQS update}. #' #' @examples #' library(AirMonitor) #' #' # Single monitor #' Carmel_Valley %>% #' monitor_timeseriesPlot() #' #' # Multiple monitors #' Camp_Fire %>% #' monitor_filter(countyName == "Alameda") %>% #' monitor_timeseriesPlot(main = "All Alameda County Monitors") #' #' # Standard extras #' Carmel_Valley %>% #' monitor_timeseriesPlot( #' shadedNight = TRUE, #' addAQI = TRUE #' ) #' addAQILegend() #' #' # Standard extras using the updated PM NAAQS #' Carmel_Valley %>% #' monitor_timeseriesPlot( #' shadedNight = TRUE, #' addAQI = TRUE, #' NAAQS = "PM2.5_2024" #' ) #' addAQILegend(NAAQS = "PM2.5_2024") #' #' # Fancy plot based on pm2.5 values #' pm2.5 <- Carmel_Valley$data[,2] #' Carmel_Valley %>% #' monitor_timeseriesPlot( #' shadedNight = TRUE, #' pch = 16, #' cex = pmax(pm2.5 / 100, 0.5), #' col = aqiColors(pm2.5), #' opacity = 0.8 #' ) #' addAQILegend(pch = 16, cex = 0.6, bg = "white") monitor_timeseriesPlot <- function( monitor = NULL, id = NULL, shadedNight = FALSE, add = FALSE, addAQI = FALSE, palette = c("EPA", "subdued", "deuteranopia"), opacity = NULL, NAAQS = c("PM2.5", "PM2.5_2024"), ... ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(monitor) shadedNight <- MazamaCoreUtils::setIfNull(shadedNight, FALSE) add <- MazamaCoreUtils::setIfNull(add, FALSE) addAQI <- MazamaCoreUtils::setIfNull(addAQI, FALSE) palette <- match.arg(palette) NAAQS = match.arg(NAAQS) # Subset 'monitor' to a single time series if ( !is.null(id) ) { if ( !id %in% monitor$meta$deviceDeploymentID ) stop("id = \"%s\" is not found in 'monitor'") monitor <- monitor %>% monitor_filter(.data$deviceDeploymentID == !!id) } monitor <- monitor_dropEmpty(monitor) if ( ncol(monitor$data) < 2 ) stop("no valid data in 'monitor'") meta <- monitor$meta data <- monitor$data # ----- Time axis ------------------------------------------------------------ # Identify timezone(s) timezone <- monitor_bestTimezone(monitor) # Pull out time data datetime <- lubridate::with_tz(data$datetime, tzone = timezone) # ----- argsList ------------------------------------------------------------- argsList <- list(...) argsList$x <- data$datetime argsList$y <- data %>% dplyr::pull(2) # * Plot limits ----- if ( !("ylim" %in% names(argsList)) ) { ymin <- min(data[, -1], na.rm = TRUE) ymin <- min(0, ymin) ymax <- max(data[, -1], na.rm = TRUE) buffer <- 0.04 * (ymax - ymin) # Standard R buffer around min/max argsList$ylim <- c(ymin - buffer, ymax + buffer) } # * Annotations ----- middleDatetime <- datetime[round(length(datetime)/2)] year <- MazamaCoreUtils::timeStamp(middleDatetime, timezone, unit = "year") if ( !("xlab" %in% names(argsList)) ) { if ( timezone == "UTC" ) { argsList$xlab <- paste0(year[1], " (UTC)") } else { argsList$xlab <- paste0(year[1], " (local time)") } } # NOTE: For mathematical notation in R see: # NOTE: https://magnusmetz.github.io/2013/04/mathematical-annotation-in-r/ if ( !("ylab" %in% names(argsList)) ) { if ( meta$units[1] == "UG/M3") { # Most common case argsList$ylab <- expression(paste(PM[2.5] * " (", mu, "g/m"^3, ")")) } else if ( meta$units[1] == "" ) { argsList$ylab <- sprintf("%s", meta$pollutant[1]) } else { argsList$ylab <- sprintf("%s (%s)", meta$pollutant[1], meta$units[1]) } } if ( !("main" %in% names(argsList)) ) { if ( nrow(meta) == 1 ) argsList$main <- sprintf("Hourly %s at %s", meta$pollutant[1], meta$locationName) else argsList$main <- paste0("Hourly ", meta$pollutant[1]) } # * Plot style ----- if ( !("pch" %in% names(argsList)) ) argsList$pch <- 15 # NOTE: Save the color outside of argsList so that opacity can be applied below if ( "col" %in% names(argsList) ) { my_col <- argsList$col } else { my_col <- "black" } # * argsListBlank ----- argsListBlank <- argsList argsListBlank$col <- "transparent" argsListBlank$axes <- FALSE # ----- Base plot ------------------------------------------------------------ needToResetMargins <- FALSE # Base plot for background if ( !add ) { # Add space to the left if default margins are in place if ( all(par("mar") == c(5.1, 4.1, 4.1, 2.1)) ) { par("mar" = c(5.1, 5.1, 4.1, 2.1)) needToResetMargins <- TRUE } # Create blank plot do.call(plot, argsListBlank) # Shaded Night if ( shadedNight ) { lat <- mean(meta$latitude) lon <- mean(meta$longitude) timeInfo <- MazamaTimeSeries::timeInfo(datetime, lon, lat, timezone) addShadedNight(timeInfo) } # Add AQI decorations underneath if ( addAQI ) { addAQIStackedBar(pollutant = meta$pollutant[1], palette = palette, NAAQS = NAAQS) addAQILines(pollutant = meta$pollutant[1], palette = palette, NAAQS = NAAQS) } # Put a box around the plot area box() # Add axes axis(2, las = 1) # TODO: better x axis smarts, e.g. keep from saying "Monday, Tuesday" etc... axis.POSIXct(1, datetime) } # ----- Overlay data --------------------------------------------------------- if ( is.null(opacity) ) { # Set opacity based on total number of valid measurements dims <- dim(as.matrix(data[, -1])) naCount <- length(which(is.na(data[, -1]))) validCount <- dims[1] * dims[2] - naCount if ( validCount < 2 ) opacity <- 1.0 else if ( validCount < 200 ) opacity <- 0.9 else if ( validCount < 500 ) opacity <- 0.7 else if ( validCount < 1000 ) opacity <- 0.5 else if ( validCount < 2000 ) opacity <- 0.3 else if ( validCount < 5000 ) opacity <- 0.2 else opacity <- 0.15 } for ( id in meta$deviceDeploymentID ) { argsList$y <- data[[id]] # same as data[, id] argsList$col <- adjustcolor(my_col, alpha.f = opacity) # Add the points do.call(points, argsList) } if ( needToResetMargins ) par("mar" = c(5.1, 4.1, 4.1, 2.1)) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_timeseriesPlot.R
#' @export #' @importFrom rlang .data #' @importFrom dplyr across everything na_if #' #' @title Convert monitor data into an AQI category table #' #' @param monitor \emph{mts_monitor} object. #' @param NAAQS Version of NAAQS levels to use. See Note. #' @param siteIdentifier Metadata column used to identify sites or a character #' vector with site identifiers. #' #' @description Creates a table of AQI category vs monitoring site with a count #' of the number of times each AQI category was experienced at each site. The #' count will be a count of hours or days depending on averaging period of #' the incoming \code{monitor} object. #' #' When \code{siteIdentifier} is used, the identifiers must be in the same #' order as \code{monitor$meta}. #' #' @return Table of AQI category counts. #' #' @note #' On February 7, 2024, EPA strengthened the National Ambient Air Quality #' Standards for Particulate Matter (PM NAAQS) to protect millions of Americans #' from harmful and costly health impacts, such as heart attacks and premature #' death. Particle or soot pollution is one of the most dangerous forms of air #' pollution, and an extensive body of science links it to a range of serious #' and sometimes deadly illnesses. EPA is setting the level of the primary #' (health-based) annual PM2.5 standard at 9.0 micrograms per cubic meter to #' provide increased public health protection, consistent with the available #' health science. #' See \href{https://www.epa.gov/pm-pollution/final-reconsideration-national-ambient-air-quality-standards-particulate-matter-pm}{PM NAAQS update}. #' #' @examples #' library(AirMonitor) #' #' # Lane County, Oregon AQSIDs all begin with "41039" #' LaneCounty <- #' NW_Megafires %>% #' monitor_filter(stringr::str_detect(AQSID, '^41039')) %>% #' monitor_filterDate(20150801, 20150901) #' #' # Count of hours each site spent in each AQ category in August #' LaneCounty %>% #' monitor_toAQCTable() #' #' # Count of days each site spent in each AQ #' LaneCounty %>% #' monitor_dailyStatistic(mean) %>% #' monitor_toAQCTable() #' #' # Count of days each site spent in each AQ (simplified names) #' siteNames <- c( #' "Eugene 1", "Eugene 2", "Eugene 3", #' "Springfield", "Oakridge", "Cottage Grove" #' ) #' LaneCounty %>% #' monitor_dailyStatistic(mean) %>% #' monitor_toAQCTable(siteIdentifier = siteNames) #' #' # Count of days at each AQ level with the new, 2024 NAAQS #' LaneCounty %>% #' monitor_dailyStatistic(mean) %>% #' monitor_toAQCTable(NAAQS = "PM2.5_2024") #' #' monitor_toAQCTable <- function( monitor, NAAQS = c("PM2.5", "PM2.5_2024"), siteIdentifier = "locationName" ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(monitor) NAAQS = match.arg(NAAQS) if ( !monitor_isValid(monitor) ) stop("Parameter 'monitor' is not a valid 'mts_monitor' object.") if ( monitor_isEmpty(monitor) ) stop("Parameter 'monitor' has no data.") pollutant <- unique(monitor$meta$pollutant) if ( length(pollutant) > 1 ) { pollutantString <- paste(pollutant, collapse = ", ") stop(sprintf("Monitor object contains multiple pollutants: %", pollutantString)) } if ( length(siteIdentifier) == 1 ) { if ( !siteIdentifier %in% names(monitor$meta) ) { stop(sprintf("siteIdentifier '%s' is not found in monitor$meta", siteIdentifier)) } else { siteNames <- monitor$meta[[siteIdentifier]] } } else { if ( length(siteIdentifier) != nrow(monitor$meta) ) { stop(sprintf("siteIdentifier array length: %d does not match montitor$meta rows: %d", length(siteIdentifier), nrow(monitor$meta))) } else { siteNames <- siteIdentifier } } # ----- Create table --------------------------------------------------------- # Get AQC matrix (rows = datetime, cols = site, cells = PM2.5) aqcMatrix <- monitor %>% aqiCategories( pollutant, NAAQS ) # NOTE: aqiCategories() can (intentionally) return a vector if ( "integer" %in% class(aqcMatrix)) { aqcMatrix <- as.matrix(aqcMatrix, ncol = 1) } # Create the empty counts table (rows = AQC, cols = site, cells = count) countsTable <- data.frame(row.names = c(US_AQI$names_eng, "Missing")) for ( i in 1:ncol(aqcMatrix) ) { # AQC counts per site aqcCounts <- aqcMatrix[,i] %>% factor(levels = 1:6, labels = US_AQI$names_eng, exclude = NA) %>% table(useNA = "always") %>% as.numeric() # Add another column of data named with siteIdentifier countsTable[[siteNames[i]]] = aqcCounts } # Create AQC table (rows = site, cols = AQC, cells = count) aqcTable <- t(countsTable) # ---- Return ---------------------------------------------------------------- return(aqcTable) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_toAQCTable.R
#' @export #' @importFrom rlang .data #' @importFrom dplyr across everything na_if #' #' @title Convert monitor data as CSV #' #' @param monitor \emph{mts_monitor} object. #' @param includeMeta Logical specifying whether to include \code{monitor$meta}. #' @param includeData Logical specifying whether to include \code{monitor$data}. #' #' @description Converts the contents of the \code{monitor} argument to CSV. #' By default, the output is a text string with "human readable" CSV that #' includes both \code{meta} and \code{data}. When saved as a file, this format #' is useful for point-and-click spreadsheet users who want to have everything #' on a single sheet. #' #' To obtain a machine parseable CSV string for just the data, you can use #' \code{includeMeta = FALSE}. To obtain machine parseable metadata, use #' \code{includeData = FALSE}. #' #' @return CSV formatted text. #' #' @examples #' library(AirMonitor) #' #' monitor <- #' Carmel_Valley %>% #' monitor_filterDate(20160802, 20160803) #' #' monitor_toCSV(monitor) %>% cat() #' monitor_toCSV(monitor, includeData = FALSE) %>% cat() #' monitor_toCSV(monitor, includeMeta = FALSE) %>% cat() #' monitor_toCSV <- function( monitor, includeMeta = TRUE, includeData = TRUE ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(monitor) includeMeta <- MazamaCoreUtils::setIfNull(includeMeta, TRUE) includeData <- MazamaCoreUtils::setIfNull(includeData, TRUE) if ( !monitor_isValid(monitor) ) stop("Parameter 'monitor' is not a valid 'mts_monitor' object.") if ( monitor_isEmpty(monitor) ) stop("Parameter 'monitor' has no data.") # ----- Early return if possible --------------------------------------------- if ( !includeMeta && !includeData ) { return("") } else if ( includeMeta && !includeData ) { # Nothing fancy, just the monitor$meta dataframe csvText <- readr::format_csv(monitor$meta, na = "NA", col_names = TRUE) return(csvText) } # ----- Create metaTbl ------------------------------------------------------- # Structure the metaMatrix so that columns match up with dataTbl columns # NOTE: second column gets NA to align with the data 'Local Time' column metaMatrix <- cbind( AirMonitor::coreMetadataNames, "", t(monitor$meta[,AirMonitor::coreMetadataNames]) ) # To avoid dplyr .name_repair issues colnames(metaMatrix) <- c("parameter", "blank", monitor$meta$deviceDeploymentID) metaTbl <- dplyr::as_tibble(metaMatrix, .name_repair = "check_unique") # ---- Create dataTbl -------------------------------------------------------- # localTime determination timezones <- monitor$meta$timezone timezone <- ifelse( length(unique(timezones)) == 1, unique(timezones), "UTC" ) # datetime from a monitor object should always be UTC utcTime <- lubridate::with_tz(monitor$data$datetime, tzone = "UTC") # Save character string representations of utcTime and localTime # localTime defaults to UTC if > 1 timezone involved utcTimeString <- strftime(utcTime, "%Y-%m-%d %H:%M:%S %Z", tz = "UTC") localTimeString <- strftime(utcTime, "%Y-%m-%d %H:%M:%S %Z", tz = timezone) dataMatrix <- cbind( utcTimeString, localTimeString, monitor$data[,-1] ) # To avoid dplyr .name_repair issues colnames(dataMatrix) <- make.names(1:ncol(dataMatrix)) dataTbl <- dplyr::as_tibble(dataMatrix, .name_repair = "check_unique") %>% # Convert "NaN" to NA dplyr::mutate(across(everything(), ~ na_if(., "NaN"))) if ( length(unique(timezones)) == 1 ) { names(dataTbl) <- c("UTC Time", "Local Time", monitor$meta$deviceDeploymentID) } else { names(dataTbl) <- c("UTC Time", "UTC Time (no Local Time because > 1 monitor timezone)", monitor$meta$deviceDeploymentID) } # ---- Assemble desired output ----------------------------------------------- # Two possible output formats remain if ( !includeMeta && includeData ) { # Use the improved 'data' dataframe csvText <- readr::format_csv(dataTbl, na = "NA", col_names = TRUE) } else { # Fancy, "human readable" format appropriate for point-and-click Excel users # Create fake tibble to use as human-readable separators emptyRow <- metaTbl %>% dplyr::filter(.data$parameter == "DONT MATCH ME") metaSeparator <- emptyRow metaSeparator[1,1] <- "##### Site metadata begins below here" dataSeparator <- emptyRow dataSeparator[1,1] <- "##### Hourly data begins below here" # Format as CSV and combine into a "fake file" text string emptyRowText <- readr::format_csv(emptyRow, na = "NA", col_names = FALSE) metaHeaderText <- readr::format_csv(metaSeparator, na = "NA", col_names = FALSE) metaBodyText <- readr::format_csv(metaTbl, na = "NA", col_names = TRUE) dataHeaderText <- readr::format_csv(dataSeparator, na = "NA", col_names = FALSE) dataBodyText <- readr::format_csv(dataTbl, na = "NA", col_names = TRUE) csvText <- paste0( metaHeaderText, metaBodyText, emptyRowText, dataHeaderText, dataBodyText, collapse = "\n" ) } # ---- Return ---------------------------------------------------------------- return(csvText) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_toCSV.R
#' @export #' @importFrom dplyr all_of #' #' @title Convert a mts_monitor object to a ws_monitor object for the PWFSLSmoke package #' #' @param monitor \emph{mts_monitor} object #' #' @return A \pkg{PWFSLSmoke} \emph{ws_monitor} object. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @description A \emph{mts_monitor} object is modified so that it becomes #' a \pkg{PWFSLSmoke} package \emph{ws_monitor} object. While some information #' will be lost, this operation can be reversed with \code{monitor_fromPWFSLSmoke()}. #' #' @note In order to avoid duplicated \code{monitorID} values in the returned #' \emph{ws_monitor} object, the full \code{deviceDeploymentID} will be used #' as the \code{monitorID}. #' monitor_toPWFSLSmoke <- function( monitor = NULL ) { # ----- Validate parameters -------------------------------------------------- monitor_check(monitor) # ----- Create meta ---------------------------------------------------------- # > names(PWFSLSmoke::Carmel_Valley$meta) %>% print(width = 75) # [1] "monitorID" "longitude" # [3] "latitude" "elevation" # [5] "timezone" "countryCode" # [7] "stateCode" "siteName" # [9] "agencyName" "countyName" # [11] "msaName" "monitorType" # [13] "siteID" "instrumentID" # [15] "aqsID" "pwfslID" # [17] "pwfslDataIngestSource" "telemetryAggregator" # [19] "telemetryUnitID" newColumns <- c( "monitorID", "longitude", "latitude", "elevation", "timezone", "countryCode", "stateCode", "siteName", "agencyName", "countyName", "msaName", "monitorType", "siteID", "instrumentID", "aqsID", "pwfslID", "pwfslDataIngestSource", "telemetryAggregator", "telemetryUnitID" ) commonColumns <- intersect(newColumns, AirMonitor::coreMetadataNames) # > print(commonColumns, width = 75) # [1] "longitude" "latitude" "elevation" "timezone" "countryCode" # [6] "stateCode" "countyName" missingColumns <- setdiff(newColumns, AirMonitor::coreMetadataNames) # > print(missingColumns, width = 75) # [1] "monitorID" "siteName" # [3] "agencyName" "msaName" # [5] "monitorType" "siteID" # [7] "instrumentID" "aqsID" # [9] "pwfslID" "pwfslDataIngestSource" # [11] "telemetryAggregator" "telemetryUnitID" # Available columns # > print(coreMetadataNames, width = 75) # [1] "deviceDeploymentID" "deviceID" # [3] "deviceType" "deviceDescription" # [5] "deviceExtra" "pollutant" # [7] "units" "dataIngestSource" # [9] "dataIngestURL" "dataIngestUnitID" # [11] "dataIngestExtra" "dataIngestDescription" # [13] "locationID" "locationName" # [15] "longitude" "latitude" # [17] "elevation" "countryCode" # [19] "stateCode" "countyName" # [21] "timezone" "houseNumber" # [23] "street" "city" # [25] "zip" "AQSID" # [27] "fullAQSID" meta <- monitor$meta %>% # Add other metadata dplyr::mutate( monitorID = .data$deviceDeploymentID, siteName = .data$locationName, agencyName = as.character(NA), msaName = as.character(NA), monitorType = .data$deviceType, siteID = as.character(NA), instrumentID = as.character(NA), aqsID = .data$AQSID, pwfslID = as.character(NA), pwfslDataIngestSource = toupper(.data$dataIngestSource), telemetryAggregator = as.character(NA), telemetryUnitID = as.character(NA) ) # Fix siteID: # for AirNow data, siteID = aqsID mask <- (meta$pwfslDataIngestSource == "AIRNOW") meta$siteID[mask] <- meta$aqsID[mask] # Reorganize the columns meta <- meta %>% dplyr::select(all_of(newColumns)) %>% as.data.frame() rownames(meta) <- meta$monitorID # ----- Create data ---------------------------------------------------------- # Guarantee columns are in the correct order dataColumns <- c('datetime', meta$monitorID) data <- monitor$data %>% dplyr::select(all_of(dataColumns)) %>% as.data.frame() # ----- Create ws_monitor ---------------------------------------------------- ws_monitor <- list(meta = meta, data = data) ws_monitor <- structure(ws_monitor, class = c("ws_monitor", "list")) # ----- Return --------------------------------------------------------------- return(ws_monitor) } # ===== DEBUG ================================================================== if ( FALSE ) { library(AirMonitor) monitor <- AirMonitor::airnow_loadLatest() ws_monitor <- monitor_toPWFSLSmoke(monitor) ws_monitor %>% PWFSLSmoke::monitor_subset(stateCodes = "IA") %>% AirMonitorPlots::monitor_ggDailyHourlyBarplot() }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_toPWFSLSmoke.R
#' @export #' #' @title Trim a \emph{mts_monitor} object to full days #' #' @param monitor \emph{mts_monitor} object. #' @param timezone Olson timezone used to interpret dates. #' @param trimEmptyDays Logical specifying whether to remove days with no data #' at the beginning and end of the time range. #' #' @description Trims the date range of a \emph{mts_monitor} object to local time date #' boundaries which are \emph{within} the range of data. This has the effect #' of removing partial-day data records at the start and end of the timeseries #' and is useful when calculating full-day statistics. #' #' By default, multi-day periods of all-missing data at the beginning and end #' of the timeseries are removed before trimming to date boundaries. If #' \code{trimEmptyDays = FALSE} all records are retained except for partial days #' beyond the first and after the last date boundary. #' #' Day boundaries are calculated using the specified \code{timezone} or, if #' \code{NULL}, from \code{monitor$meta$timezone}. #' #' @return A subset of the given \emph{mts_monitor} object. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @examples #' library(AirMonitor) #' #' # Non-day boundaries #' monitor <- #' Camp_Fire %>% #' monitor_filterDatetime( #' "2018111502", #' "2018112206", #' timezone = "America/Los_Angeles" #' ) #' #' monitor %>% #' monitor_timeRange(timezone = "America/Los_Angeles") #' #' # Trim to full days only #' monitor %>% #' monitor_trimDate() %>% #' monitor_timeRange(timezone = "America/Los_Angeles") #' monitor_trimDate <- function( monitor = NULL, timezone = NULL, trimEmptyDays = TRUE ) { # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(monitor) if ( !monitor_isValid(monitor) ) stop("Parameter 'monitor' is not a valid 'mts_monitor' object.") if ( monitor_isEmpty(monitor) ) stop("Parameter 'monitor' has no data.") # ----- Call MazamaTimeSeries function --------------------------------------- monitor <- MazamaTimeSeries::mts_trimDate(monitor, timezone, trimEmptyDays) class(monitor) <- union("mts_monitor", class(monitor)) # ----- Return --------------------------------------------------------------- return(monitor) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/monitor_trimDate.R
#' @export #' #' @title Invalidate consecutive suspect values. #' #' @param x Timeseries data. #' @param suspectValues Vector of numeric values considered suspect. #' @param consecutiveCount How many \code{suspectValues} must appear in a row #' before they are invalidated. #' #' @description Invalidates values within a timeseries that appear "sticky". #' Some temporary monitoring data has stretches of consecutive values, sometimes #' well outside the range of reasonable. This QC function identifies these #' "sticky" stretches and returns the original timeseries data with "sticky" #' stretches replaced with \code{NA}. #' #' @return Returns \code{x} with some values potentially replaced with \code{NA}. QC_invalidateConsecutiveSuspectValues <- function( x = NULL, suspectValues = c(0:10 * 1000, NA), consecutiveCount = 2 ) { # Create a mask of suspect values using 1/0 instead of T/F isSuspect <- as.numeric(x %in% suspectValues) # Left aligned consecutive count left <- MazamaRollUtils::roll_sum( isSuspect, width = 2, by = 1, align = "left", na.rm = FALSE ) # Right aligned consecutive_count right <- MazamaRollUtils::roll_sum( isSuspect, width = 2, by = 1, align = "right", na.rm = FALSE ) # NOTE: Mask is TRUE only when a value is part of a sequence of N or more # NOTE: consecutive suspect values. mask <- (left >= consecutiveCount) | (right >= consecutiveCount) # Invalidate these values x[mask] <- as.numeric(NA) return(x) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/utils-QC.R
#' @export #' #' @title Check an \emph{mts_monitor} object for validity. #' #' @param monitor \emph{mts_monitor} object. #' #' @description Checks on the validity of an \emph{mts_monitor} object. If any test #' fails, this function will stop with a warning message. #' #' @return Invisibly returns \code{TRUE} if \code{mts_monitor} has the correct #' structure. Stops with an error message otherwise. #' #' monitor_check <- function(monitor) { tryCatch( monitor_isValid(monitor, verbose = TRUE), warning = function(w) stop(w), finally = invisible(TRUE) ) } #' @export #' #' @name monitor_isValid #' @title Test \emph{mts_monitor} object for correct structure #' #' @param monitor \emph{mts_monitor} object #' @param verbose Logical specifying whether to produce detailed warning messages. #' #' @description The \code{mts_monitor} is checked for the presence of core #' \code{meta} and \code{data} columns. #' #' Core \code{meta} columns include: (TODO: complete this list) #' #' \itemize{ #' \item{\code{deviceDeploymentID} -- unique identifier (see \pkg{MazmaLocationUtils})} #' \item{\code{deviceID} -- device identifier} #' \item{\code{locationID} -- location identifier (see \pkg{MazmaLocationUtils})} #' \item{\code{locationName} -- English language name} #' \item{\code{longitude} -- decimal degrees E} #' \item{\code{latitude} -- decimal degrees N} #' \item{\code{elevation} -- elevation of station in m} #' \item{\code{countryCode} -- ISO 3166-1 alpha-2} #' \item{\code{stateCode} -- ISO 3166-2 alpha-2} #' \item{\code{timezone} -- Olson time zone} #' } #' #' Core \code{data} columns include: #' #' \itemize{ #' \item{\code{datetime} -- measurement time (UTC)} #' } #' #' @return Invisibly returns \code{TRUE} if \code{mts_monitor} has the correct #' structure, \code{FALSE} otherwise. #' #' monitor_isValid <- function( monitor = NULL, verbose = FALSE ) { MazamaCoreUtils::stopIfNull(monitor) msg <- ifelse( verbose, function(m) warning(m, call. = FALSE, immediate. = TRUE), function(m) NULL ) if ( !"mts_monitor" %in% class(monitor) ) { msg("'monitor' is not of class 'mts_monitor'") return(invisible(FALSE)) } # Check that it is a valid 'mts' object MazamaTimeSeries::mts_check(monitor) # Test for metadata missingNames <- setdiff(coreMetadataNames, names(monitor$meta)) if ( length(missingNames) > 0 ) { msg(sprintf( "monitor$meta is missing columns: %s", paste0(missingNames, collapse = ", ") )) return(invisible(FALSE)) } return(invisible(TRUE)) } #' @export #' #' @title Test for an empty \emph{mts_monitor} object #' #' @param monitor \emph{mts_monitor} object #' @return Invisibly returns \code{TRUE} if no data exist in \code{mts_monitor}, \code{FALSE} otherwise. #' @description This function returns true under the following conditions: #' \itemize{ #' \item{no time series: \code{ncol(monitor$data) == 1}} #' \item{no time series records: \code{nrow(monitor$data) == 0}} #' \item{all timeseries values are \code{NA}} #' } #' This makes for more readable code in functions that need to test for this. #' monitor_isEmpty <- function(monitor) { MazamaCoreUtils::stopIfNull(monitor) # NOTE: Use minimal validation for improved speed if ( !'data' %in% names(monitor) || !'data.frame' %in% class(monitor$data) ) stop("monitor is not a valid 'mts_monitor' object") if ( ncol(monitor$data) == 1 ) { # No time series returnVal <- TRUE } else if ( nrow(monitor$data) == 0 ) { # No time series records returnVal <- TRUE } else { # Is every record in every non-datetime column NA? returnVal <- all(sapply(monitor$data[,-1], function(x) { all(is.na(x)) })) } return(invisible(returnVal)) } #' @importFrom rlang .data #' @export #' #' @title Retain only distinct data records in monitor$data #' #' @param monitor \emph{mts_monitor} object #' #' @return A \emph{mts_monitor} object with no duplicated data records. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @description Two successive steps are used to guarantee that the #' \code{datetime} axis contains no repeated values: #' #' \enumerate{ #' \item{remove any duplicate records} #' \item{guarantee that rows are in \code{datetime} order} #' } #' #' @note This function is primarily for package-internal use. #' monitor_distinct <- function(monitor) { # NOTE: Use minimal validation for improved speed if ( !'data' %in% names(monitor) || !'data.frame' %in% class(monitor$data) ) stop("monitor is not a valid 'mts_monitor' object") monitor$data <- monitor$data %>% dplyr::distinct() %>% dplyr::arrange(.data$datetime) if ( any(duplicated(monitor$data$datetime)) ) stop("duplicate timesteps with differing values found in 'monitor' object") return(monitor) } #' @title Extract dataframes from \emph{mts_monitor} objects #' #' @description #' These functions are convenient wrappers for extracting the dataframes that #' comprise a \emph{mts_monitor} object. These functions are designed to be #' useful when manipulating data in a pipeline using \code{\%>\%}. #' #' Below is a table showing equivalent operations for each function. #' #' \tabular{ll}{ #' \strong{Function} \tab \strong{Equivalent Operation}\cr #' \code{monitor_getData(monitor)} \tab \code{monitor$data}\cr #' \code{monitor_getMeta(monitor)} \tab \code{monitor$meta} #' } #' #' @param monitor \emph{mts_monitor} object to extract dataframe from. #' #' @return A dataframe from the given \emph{mts_monitor} object. #' #' @name monitor_getDataFrame #' @aliases monitor_getData monitor_getMeta #' NULL #' @export #' @rdname monitor_getDataFrame #' monitor_getData <- function(monitor) { # NOTE: Use minimal validation for improved speed if ( !'data' %in% names(monitor) || !'data.frame' %in% class(monitor$data) ) stop("monitor is not a valid 'mts_monitor' object") return(monitor$data) } #' @export #' @rdname monitor_getDataFrame #' monitor_getMeta <- function(monitor) { # NOTE: Use minimal validation for improved speed if ( !'meta' %in% names(monitor) || !'data.frame' %in% class(monitor$meta) ) stop("monitor is not a valid 'mts_monitor' object") return(monitor$meta) }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/utils-monitor.R
#' Pipe operator #' #' See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details. #' #' @name %>% #' @rdname pipe #' @keywords internal #' @export #' @importFrom magrittr %>% #' @usage lhs \%>\% rhs #' @param lhs A value or the magrittr placeholder. #' @param rhs A function call using the magrittr semantics. #' @return The result of calling `rhs(lhs)`. NULL
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/utils-pipe.R
#' @export #' @importFrom dplyr across #' #' @title Load annual WRCC monitoring data #' #' @param year Year [YYYY]. #' @param archiveBaseUrl Base URL for monitoring v2 data files. #' @param archiveBaseDir Local base directory for monitoring v2 data files. #' @param QC_negativeValues Type of QC to apply to negative values. #' @param QC_removeSuspectData Removes monitors determined to be misbehaving. #' #' @return A \emph{mts_monitor} object with WRCC data. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @description Loads pre-generated .rda files containing annual #' WRCC data. #' #' If \code{archiveDataDir} is defined, data will be loaded from this local #' archive. Otherwise, data will be loaded from the monitoring data repository #' maintained by the USFS AirFire team. #' #' Current year files loaded by this function are updated once per week. #' #' For the most recent data in the last 10 days, use \code{wrcc_loadLatest()}. #' #' For daily updates covering the most recent 45 days, use \code{wrcc_loadDaily()}. #' #' @note #' Some older WRCC timeseries contain only values of 0, 1000, 2000, 3000, ... ug/m3. #' Data from these deployments pass instrument-level QC checks but these #' timeseries generally do not represent valid data and should be removed. #' With \code{QC_removeSuspectData = TRUE} (the default), data is checked and #' periods reporting only values of 0:10 * 1000 ug/m3 are invalidated. #' #' Only those personally familiar with the individual instrument deployments #' should work with the "suspect" data. #' #' @seealso \code{\link{wrcc_loadDaily}} #' @seealso \code{\link{wrcc_loadLatest}} #' #' @examples #' \dontrun{ #' library(AirMonitor) #' #' # Fail gracefully if any resources are not available #' try({ #' #' # See https://en.wikipedia.org/wiki/Snake_River_Complex_Fire #' #' # WRCC monitors during the Snake River Complex Fire #' wrcc_loadAnnual(2021) \%>\% #' monitor_filter(stateCode \%in\% c("ID", "MT")) \%>\% #' monitor_filterDate(20210707, 20210820, timezone = "America/Denver") \%>\% #' monitor_timeseriesPlot( #' ylim = c(0, 300), #' xpd = NA, #' addAQI = TRUE, #' main = "WRCC monitors during Snake River Complex Fire" #' ) #' #' }, silent = FALSE) #' } wrcc_loadAnnual <- function( year = NULL, archiveBaseUrl = paste0( "https://airfire-data-exports.s3.us-west-2.amazonaws.com/", "monitoring/v2" ), archiveBaseDir = NULL, QC_negativeValues = c("zero", "na", "ignore"), QC_removeSuspectData = TRUE ) { parameterName <- "PM2.5" # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(year) MazamaCoreUtils::stopIfNull(parameterName) if ( as.numeric(year) < 2014 ) stop(paste0("No ARISIS data is available before 2014")) QC_negativeValues <- match.arg(QC_negativeValues) if ( is.null(archiveBaseUrl) && is.null(archiveBaseDir) ) stop("one of 'archiveBaseUrl' or 'archiveBaseDir' must be defined") # Parameter code validParameterNames <- c( # "BARPR", # "BC", # "CO", # "NO", # "NO2", # "NO2Y", # "NO2X", # "NOX", # "NOOY", # "OC", # "OZONE", # "PM10", "PM2.5" # "PM2.5_nowcast" # "PRECIP", # "RHUM", # "SO2", # "SRAD", # "TEMP", # "UV-AETH", # "WD", # "WS" ) parameterName <- as.character(parameterName) if ( !parameterName %in% validParameterNames ) { stop(sprintf( "data for parameterName '%s' has not been processed", parameterName )) } # ----- Load data ------------------------------------------------------------ # Create file name and path according to the AirMonitorIngest scheme if ( is.null(archiveBaseUrl) ) { dataUrl <- NULL } else { dataUrl <- file.path(archiveBaseUrl, "wrcc", year, "data") } if ( is.null(archiveBaseDir) ) { dataDir <- NULL } else { dataDir <- file.path(archiveBaseDir, "wrcc", year, "data") } metaFileName <- sprintf("wrcc_%s_%s_meta.rda", parameterName, year) dataFileName <- sprintf("wrcc_%s_%s_data.rda", parameterName, year) meta <- MazamaCoreUtils::loadDataFile(metaFileName, dataUrl, dataDir) data <- MazamaCoreUtils::loadDataFile(dataFileName, dataUrl, dataDir) # Guarantee that 'meta' and 'data' match ids <- names(data)[-1] meta <- meta %>% dplyr::filter(.data$deviceDeploymentID %in% ids) # Guarantee presence of fullAQSID if ( !"fullAQSID" %in% names(meta) ) meta$fullAQSID <- NA_character_ data <- data %>% dplyr::select(dplyr::all_of(c("datetime", meta$deviceDeploymentID))) %>% # Replace any NaN that snuck in dplyr::mutate(across(tidyselect::vars_select_helpers$where(is.numeric), function(x) ifelse(is.nan(x), NA, x))) # Create monitor object monitor <- list(meta = meta, data = data) monitor <- structure(monitor, class = c("mts_monitor", "mts", class(monitor))) MazamaTimeSeries::mts_check(monitor) # ----- Apply QC ------------------------------------------------------------- # Handle negative values if ( QC_negativeValues == "zero" ) { monitor <- monitor_replaceValues(monitor, data < 0, 0) } else if ( QC_negativeValues == "na" ) { monitor <- monitor_replaceValues(monitor, data < 0, as.numeric(NA)) } # NOTE: Several monitors in 2015 have values only at 0, 1000, 2000, 3000, ... if ( QC_removeSuspectData ) { monitor <- monitor %>% monitor_mutate(QC_invalidateConsecutiveSuspectValues) %>% monitor_dropEmpty() } # ----- Return --------------------------------------------------------------- return(monitor) } # ===== DEBUG ================================================================== if ( FALSE ) { year <- 2021 archiveBaseUrl <- "https://airfire-data-exports.s3.us-west-2.amazonaws.com/monitoring/v2" archiveBaseDir <- NULL QC_negativeValues = "zero" }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/wrcc_loadAnnual.R
#' @export #' @importFrom dplyr across #' #' @title Load daily WRCC monitoring data #' #' @param archiveBaseUrl Base URL for monitoring v2 data files. #' @param archiveBaseDir Local base directory for monitoring v2 data files. #' @param QC_negativeValues Type of QC to apply to negative values. #' @param QC_removeSuspectData Removes monitors determined to be misbehaving. #' #' @return A \emph{mts_monitor} object with WRCC data. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @description Loads pre-generated .rda files containing daily #' WRCC data. #' #' If \code{archiveDataDir} is defined, data will be loaded from this local #' archive. Otherwise, data will be loaded from the monitoring data repository #' maintained by the USFS AirFire team. #' #' The files loaded by this function are updated once per day and #' contain data for the previous 45 days. #' #' For the most recent data in the last 10 days, use \code{wrcc_loadLatest()}. #' #' For data extended more than 45 days into the past, use \code{wrcc_loadAnnual()}. #' #' @note #' Some older WRCC timeseries contain only values of 0, 1000, 2000, 3000, ... ug/m3. #' Data from these deployments pass instrument-level QC checks but these #' timeseries generally do not represent valid data and should be removed. #' With \code{QC_removeSuspectData = TRUE} (the default), data is checked and #' periods reporting only values of 0:10 * 1000 ug/m3 are invalidated. #' #' Only those personally familiar with the individual instrument deployments #' should work with the "suspect" data. #' #' @seealso \code{\link{wrcc_loadAnnual}} #' @seealso \code{\link{wrcc_loadDaily}} #' #' @examples #' \dontrun{ #' library(AirMonitor) #' #' # Fail gracefully if any resources are not available #' try({ #' #' wrcc_loadDaily() \%>\% #' monitor_leaflet() #' #' }, silent = FALSE) #' } wrcc_loadDaily <- function( archiveBaseUrl = paste0( "https://airfire-data-exports.s3.us-west-2.amazonaws.com/", "monitoring/v2" ), archiveBaseDir = NULL, QC_negativeValues = c("zero", "na", "ignore"), QC_removeSuspectData = TRUE ) { parameterName <- "PM2.5" # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(parameterName) QC_negativeValues <- match.arg(QC_negativeValues) if ( is.null(archiveBaseUrl) && is.null(archiveBaseDir) ) stop("one of 'archiveBaseUrl' or 'archiveBaseDir' must be defined") # Parameter code validParameterNames <- c( # "BARPR", # "BC", # "CO", # "NO", # "NO2", # "NO2Y", # "NO2X", # "NOX", # "NOOY", # "OC", # "OZONE", # "PM10", "PM2.5", "PM2.5_nowcast" # "PRECIP", # "RHUM", # "SO2", # "SRAD", # "TEMP", # "UV-AETH", # "WD", # "WS" ) parameterName <- as.character(parameterName) if ( !parameterName %in% validParameterNames ) { stop(sprintf( "data for parameterName '%s' has not been processed", parameterName )) } # ----- Load data ------------------------------------------------------------ # Create file name and path according to the AirMonitorIngest scheme if ( is.null(archiveBaseUrl) ) { dataUrl <- NULL } else { dataUrl <- file.path(archiveBaseUrl, "daily/data") } if ( is.null(archiveBaseDir) ) { dataDir <- NULL } else { dataDir <- file.path(archiveBaseDir, "daily/data") } metaFileName <- sprintf("wrcc_%s_daily_meta.rda", parameterName) dataFileName <- sprintf("wrcc_%s_daily_data.rda", parameterName) meta <- MazamaCoreUtils::loadDataFile(metaFileName, dataUrl, dataDir) data <- MazamaCoreUtils::loadDataFile(dataFileName, dataUrl, dataDir) # Guarantee that 'meta' and 'data' match ids <- names(data)[-1] meta <- meta %>% dplyr::filter(.data$deviceDeploymentID %in% ids) # Guarantee presence of fullAQSID if ( !"fullAQSID" %in% names(meta) ) meta$fullAQSID <- NA_character_ data <- data %>% dplyr::select(dplyr::all_of(c("datetime", meta$deviceDeploymentID))) %>% # Replace any NaN that snuck in dplyr::mutate(across(tidyselect::vars_select_helpers$where(is.numeric), function(x) ifelse(is.nan(x), NA, x))) # Create monitor object monitor <- list(meta = meta, data = data) monitor <- structure(monitor, class = c("mts_monitor", "mts", class(monitor))) MazamaTimeSeries::mts_check(monitor) # ----- Apply QC ------------------------------------------------------------- # Handle negative values if ( QC_negativeValues == "zero" ) { monitor <- monitor_replaceValues(monitor, data < 0, 0) } else if ( QC_negativeValues == "na" ) { monitor <- monitor_replaceValues(monitor, data < 0, as.numeric(NA)) } # NOTE: Several monitors in 2015 have values only at 0, 1000, 2000, 3000, ... if ( QC_removeSuspectData ) { monitor <- monitor %>% monitor_mutate(QC_invalidateConsecutiveSuspectValues) %>% monitor_dropEmpty() } # ----- Return --------------------------------------------------------------- return(monitor) } # ===== DEBUG ================================================================== if ( FALSE ) { archiveBaseUrl <- "https://airfire-data-exports.s3.us-west-2.amazonaws.com/monitoring/v2" archiveBaseDir <- NULL QC_negativeValues = "zero" }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/wrcc_loadDaily.R
#' @export #' @importFrom dplyr across #' #' @title Load most recent WRCC monitoring data #' #' @param archiveBaseUrl Base URL for monitoring v2 data files. #' @param archiveBaseDir Local base directory for monitoring v2 data files. #' @param QC_negativeValues Type of QC to apply to negative values. #' @param QC_removeSuspectData Removes monitors determined to be misbehaving. #' #' @return A \emph{mts_monitor} object with WRCC data. (A list with #' \code{meta} and \code{data} dataframes.) #' #' @description Loads pre-generated .rda files containing the most recent #' WRCC data. #' #' If \code{archiveDataDir} is defined, data will be loaded from this local #' archive. Otherwise, data will be loaded from the monitoring data repository #' maintained by the USFS AirFire team. #' #' The files loaded by this function are updated multiple times an hour and #' contain data for the previous 10 days. #' #' For daily updates covering the most recent 45 days, use \code{wrcc_loadDaily()}. #' #' For data extended more than 45 days into the past, use \code{wrcc_loadAnnual()}. #' #' @note #' Some older WRCC timeseries contain only values of 0, 1000, 2000, 3000, ... ug/m3. #' Data from these deployments pass instrument-level QC checks but these #' timeseries generally do not represent valid data and should be removed. #' With \code{QC_removeSuspectData = TRUE} (the default), data is checked and #' periods reporting only values of 0:10 * 1000 ug/m3 are invalidated. #' #' Only those personally familiar with the individual instrument deployments #' should work with the "suspect" data. #' #' @seealso \code{\link{wrcc_loadAnnual}} #' @seealso \code{\link{wrcc_loadDaily}} #' #' @examples #' \dontrun{ #' library(AirMonitor) #' #' # Fail gracefully if any resources are not available #' try({ #' #' wrcc_loadLatest() \%>\% #' monitor_leaflet() #' #' }, silent = FALSE) #' } wrcc_loadLatest <- function( archiveBaseUrl = paste0( "https://airfire-data-exports.s3.us-west-2.amazonaws.com/", "monitoring/v2" ), archiveBaseDir = NULL, QC_negativeValues = c("zero", "na", "ignore"), QC_removeSuspectData = TRUE ) { parameterName <- "PM2.5" # ----- Validate parameters -------------------------------------------------- MazamaCoreUtils::stopIfNull(parameterName) QC_negativeValues <- match.arg(QC_negativeValues) if ( is.null(archiveBaseUrl) && is.null(archiveBaseDir) ) stop("one of 'archiveBaseUrl' or 'archiveBaseDir' must be defined") # Parameter code validParameterNames <- c( # "BARPR", # "BC", # "CO", # "NO", # "NO2", # "NO2Y", # "NO2X", # "NOX", # "NOOY", # "OC", # "OZONE", # "PM10", "PM2.5", "PM2.5_nowcast" # "PRECIP", # "RHUM", # "SO2", # "SRAD", # "TEMP", # "UV-AETH", # "WD", # "WS" ) parameterName <- as.character(parameterName) if ( !parameterName %in% validParameterNames ) { stop(sprintf( "data for parameterName '%s' has not been processed", parameterName )) } # ----- Load data ------------------------------------------------------------ # Create file name and path according to the AirMonitorIngest scheme if ( is.null(archiveBaseUrl) ) { dataUrl <- NULL } else { dataUrl <- file.path(archiveBaseUrl, "latest/data") } if ( is.null(archiveBaseDir) ) { dataDir <- NULL } else { dataDir <- file.path(archiveBaseDir, "latest/data") } metaFileName <- sprintf("wrcc_%s_latest_meta.rda", parameterName) dataFileName <- sprintf("wrcc_%s_latest_data.rda", parameterName) meta <- MazamaCoreUtils::loadDataFile(metaFileName, dataUrl, dataDir) data <- MazamaCoreUtils::loadDataFile(dataFileName, dataUrl, dataDir) # Guarantee that 'meta' and 'data' match ids <- names(data)[-1] meta <- meta %>% dplyr::filter(.data$deviceDeploymentID %in% ids) # Guarantee presence of fullAQSID if ( !"fullAQSID" %in% names(meta) ) meta$fullAQSID <- NA_character_ data <- data %>% dplyr::select(dplyr::all_of(c("datetime", meta$deviceDeploymentID))) %>% # Replace any NaN that snuck in dplyr::mutate(across(tidyselect::vars_select_helpers$where(is.numeric), function(x) ifelse(is.nan(x), NA, x))) # Create monitor object monitor <- list(meta = meta, data = data) monitor <- structure(monitor, class = c("mts_monitor", "mts", class(monitor))) MazamaTimeSeries::mts_check(monitor) # ----- Apply QC ------------------------------------------------------------- # Handle negative values if ( QC_negativeValues == "zero" ) { monitor <- monitor_replaceValues(monitor, data < 0, 0) } else if ( QC_negativeValues == "na" ) { monitor <- monitor_replaceValues(monitor, data < 0, as.numeric(NA)) } # NOTE: Several monitors in 2015 have values only at 0, 1000, 2000, 3000, ... if ( QC_removeSuspectData ) { monitor <- monitor %>% monitor_mutate(QC_invalidateConsecutiveSuspectValues) %>% monitor_dropEmpty() } # ----- Return --------------------------------------------------------------- return(monitor) } # ===== DEBUG ================================================================== if ( FALSE ) { archiveBaseUrl <- "https://airfire-data-exports.s3.us-west-2.amazonaws.com/monitoring/v2" archiveBaseDir <- NULL QC_negativeValues = "zero" }
/scratch/gouwar.j/cran-all/cranData/AirMonitor/R/wrcc_loadLatest.R
## ----setup, include=FALSE----------------------------------------------------- knitr::opts_chunk$set(echo = TRUE, fig.width = 7, fig.height = 5) ## ----library, echo = FALSE---------------------------------------------------- suppressPackageStartupMessages({ library(AirMonitor) Camp_Fire <- Camp_Fire }) ## ----Sacramento_2------------------------------------------------------------- monitor_leaflet(Camp_Fire) ## ----Sacramento_3------------------------------------------------------------- # create single-monitor Sacramento Sacramento <- # 1) start with Camp_Fire Camp_Fire %>% # 2) select a specific device-deployment monitor_select("127e996697f9731c_840060670010") # review timeseries plot Sacramento %>% monitor_timeseriesPlot( shadedNight = TRUE, addAQI = TRUE, main = "Hourly PM2.5 Concentration in Sacramento" ) # add the AQI legend addAQILegend(cex = 0.8) ## ----Sacramento_4------------------------------------------------------------- Sacramento_area <- # 1) start with Camp_Fire Camp_Fire %>% # 2) find all monitors within 50km of Sacramento monitor_filterByDistance( longitude = Sacramento$meta$longitude, latitude = Sacramento$meta$latitude, radius = 50000 ) monitor_leaflet(Sacramento_area) ## ----Sacramento_5------------------------------------------------------------- Sacramento_area %>% monitor_timeseriesPlot( shadedNight = TRUE, addAQI = TRUE, main = "Wildfire Smoke within 30 miles of Sacramento" ) addAQILegend(lwd = 1, pch = NA, bg = "white", cex = 0.8) ## ----Sacramento_6------------------------------------------------------------- # 1) start with Sacramento_area Sacramento_area %>% # 2) average together all timeseries hour-by-hour monitor_collapse( deviceID = "Sacramento_area" ) %>% # 3) calculate the local-time daily average (default) monitor_dailyStatistic() %>% # 4) pull out the $data dataframe monitor_getData() ## ----Sacramento_7------------------------------------------------------------- # 1) start with Sacramento_area Sacramento_area %>% # 2) average together all timeseries hour-by-hour monitor_collapse() %>% # 3) create daily barplot monitor_dailyBarplot( main = "Daily Average PM2.5 in the Sacramento Area" ) # add the AQI legend addAQILegend(pch = 15, bg = "white", cex = 0.8)
/scratch/gouwar.j/cran-all/cranData/AirMonitor/inst/doc/AirMonitor.R
--- title: "Introduction to AirMonitor" author: "Mazama Science" date: "2022-10-31" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Introduction to AirMonitor} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE, fig.width = 7, fig.height = 5) ``` ## Installation Install from CRAN with: `install.packages('AirMonitor')` Install the latest version from GitHub with: `devtools::install_github('mazamascience/AirMonitor')` ## Available data The USFS AirFire group regularly processes monitoring data in support of their various operational tools. Pre-processed, harmonized and QC'ed data files can be loaded with the following functions: * `~_load()` -- load data based on a start- and end-time * `~loadAnnual()` -- load a year's worth of data * `~loadDaily()` -- load the most recent 45 days of data (updated once per day) * `~loadLatest()` -- load the most recent 10 days of data (updated every hour) Data archives go back to 2014 or earlier depending on the data source. ## Recipes We encourage people to embrace "recipe" style coding as enabled by **dplyr** and related packages. The special `%>%` operator uses the output of one function as the first argument of the next function, thus allowing for easy "chaining" of results to create a step-by-step recipe. With only a few exceptions, all the `monitor_` functions accept a _mts_monitor_ object as their first argument and generate a _mts_monitor_ object as a result so they can be chained together. ## A first example ```{r library, echo = FALSE} suppressPackageStartupMessages({ library(AirMonitor) Camp_Fire <- Camp_Fire }) ``` Let's say we are interested in the impact of smoke from the 2018 [Camp Fire](https://en.wikipedia.org/wiki/Camp_Fire_(2018)) in the Sacramento area. We would begin by creating a `Camp_Fire` object that has all the monitors in California for the period of interest. The recipe for creating `Camp_Fire` has four steps: 1) load annual data; 2) filter for monitors in California; 3) restrict the date range to Camp Fire dates; 4) remove any monitors with no valid data in this range. ``` # create the Camp_Fire 'mts_monitor' object Camp_Fire <- # 1) load annual data monitor_loadAnnual(2018) %>% # 2) filter for California monitor_filter(stateCode == 'CA') %>% # 3) restrict date range monitor_filterDate( startdate = 20181108, enddate = 20181123, timezone = "America/Los_Angeles" ) %>% # 4) remove monitors with no valid data monitor_dropEmpty() ``` We can use the `monitor_leaflet()` function to display these monitors (colored by maximum PM2.5 value) in an interactive map. This map allows us to zoom in and click on the monitor in downtown Sacramento to get it's `deviceDeploymentID` -- "127e996697f9731c_840060670010". ```{r Sacramento_2} monitor_leaflet(Camp_Fire) ``` We can use this `deviceDeploymentID` to create a _mts_monitor_ object for this single monitor and take a look at a time series plot. Day-night shading and AQI decorations create a publication-ready plot: ```{r Sacramento_3} # create single-monitor Sacramento Sacramento <- # 1) start with Camp_Fire Camp_Fire %>% # 2) select a specific device-deployment monitor_select("127e996697f9731c_840060670010") # review timeseries plot Sacramento %>% monitor_timeseriesPlot( shadedNight = TRUE, addAQI = TRUE, main = "Hourly PM2.5 Concentration in Sacramento" ) # add the AQI legend addAQILegend(cex = 0.8) ``` Next, we can use this specific location to create a _mts_monitor_ object containing all monitors within 50 kilometers (31 miles) of Sacramento. ```{r Sacramento_4} Sacramento_area <- # 1) start with Camp_Fire Camp_Fire %>% # 2) find all monitors within 50km of Sacramento monitor_filterByDistance( longitude = Sacramento$meta$longitude, latitude = Sacramento$meta$latitude, radius = 50000 ) monitor_leaflet(Sacramento_area) ``` We can use the same `monitor_timeseriesPlot()` function to display the hourly data for _all_ the monitors in the Sacramento area in a single plot. This gives a sense of the range of values within the area at any given hour. ```{r Sacramento_5} Sacramento_area %>% monitor_timeseriesPlot( shadedNight = TRUE, addAQI = TRUE, main = "Wildfire Smoke within 30 miles of Sacramento" ) addAQILegend(lwd = 1, pch = NA, bg = "white", cex = 0.8) ``` Now we can average together all the monitors and create a local-time, daily average for the Sacramento area. ```{r Sacramento_6} # 1) start with Sacramento_area Sacramento_area %>% # 2) average together all timeseries hour-by-hour monitor_collapse( deviceID = "Sacramento_area" ) %>% # 3) calculate the local-time daily average (default) monitor_dailyStatistic() %>% # 4) pull out the $data dataframe monitor_getData() ``` Alternatively, we can plot the daily averages. ```{r Sacramento_7} # 1) start with Sacramento_area Sacramento_area %>% # 2) average together all timeseries hour-by-hour monitor_collapse() %>% # 3) create daily barplot monitor_dailyBarplot( main = "Daily Average PM2.5 in the Sacramento Area" ) # add the AQI legend addAQILegend(pch = 15, bg = "white", cex = 0.8) ``` ---- Best of luck analyzing your local air quality data!
/scratch/gouwar.j/cran-all/cranData/AirMonitor/inst/doc/AirMonitor.Rmd
## ---- echo=FALSE-------------------------------------------------------------- knitr::opts_chunk$set(fig.width = 7, fig.height = 5) ## ----data_model_1------------------------------------------------------------- library(AirMonitor) # Recipe to select Washington state monitors in August of 2014: monitor <- # 1) start with NW Megafires NW_Megafires %>% # 2) filter to only include Washington state monitor_filter(stateCode == "WA") %>% # 3) filter to only include August monitor_filterDate(20150801, 20150901) %>% # 4) remove monitors with all missing values monitor_dropEmpty() # 'mts_monitor' objects can be identified by their class class(monitor) # They alwyas have two elements called 'meta' and 'data' names(monitor) # Examine the 'meta' dataframe dim(monitor$meta) names(monitor$meta) # Examine the 'data' dataframe dim(monitor$data) # This should always be true identical(names(monitor$data), c('datetime', monitor$meta$deviceDeploymentID)) ## ----monitor_leaflet, results = "hold"---------------------------------------- # First, Obtain the monitor ids by clicking on dots in the interactive map: NW_Megafires %>% monitor_leaflet() ## ----Methow_Valley, results = "hold"------------------------------------------ # Calculate daily means for the Methow Valley from monitors in Twisp and Winthrop TwispID <- "99a6ee8e126ff8cf_530470009_04" WinthropID <- "123035bbdc2bc702_530470010_04" # Recipe to calculate Methow Valley August Means: Methow_Valley_AugustMeans <- # 1) start with NW Megafires NW_Megafires %>% # 2) select monitors from Twisp and Winthrop monitor_select(c(TwispID, WinthropID)) %>% # 3) average them together hour-by-hour monitor_collapse(deviceID = 'MethowValley') %>% # 4) restrict data to August monitor_filterDate(20150801, 20150901) %>% # 5) calculate daily mean monitor_dailyStatistic(mean, minHours = 18) %>% # 6) round data to one decimal place monitor_mutate(round, 1) # Look at the first week Methow_Valley_AugustMeans$data[1:7,] ## ----custom_use1-------------------------------------------------------------- # Monitors within 100 km of Spokane, WA Spokane <- NW_Megafires %>% monitor_filterByDistance(-117.42, 47.70, 100000) %>% monitor_filterDate(20150801, 20150901) %>% monitor_dropEmpty() # Show the daily statistic for one week Spokane %>% monitor_filterDate(20150801, 20150808) %>% monitor_dailyStatistic(mean) %>% monitor_getData() # Custom function to convert from metric ug/m3 to imperial grain/gallon my_FUN <- function(x) { return( x * 15.43236 / 0.004546 ) } Spokane %>% monitor_filterDate(20150801, 20150808) %>% monitor_mutate(my_FUN) %>% monitor_dailyStatistic(mean) %>% monitor_getData() ## ----custom_use2-------------------------------------------------------------- # Pull out the time series data to calculate correlations Spokane_data <- Spokane %>% monitor_getData() %>% dplyr::select(-1) # omit 'datetime' column # Provide human readable names names(Spokane_data) <- Spokane$meta$locationName # Find correlation among monitors cor(Spokane_data, use = "complete.obs")
/scratch/gouwar.j/cran-all/cranData/AirMonitor/inst/doc/Data_Model.R
--- title: "Data Model" author: "Jonathan Callahan" date: "2022-10-31" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Data Model} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, echo=FALSE} knitr::opts_chunk$set(fig.width = 7, fig.height = 5) ``` This vignette explores the _mts_monitor_ data model used throughout the **AirMonitor** package to store and work with monitoring data. The **AirMonitor** package is designed to provide a compact, full-featured suite of utilities for working with PM2.5 data. A uniform data model provides consistent data access across monitoring data available from different agencies. The core data model in this package is defined by the _mts_monitor_ object used to store data associated with groups of individual monitors. To work efficiently with the package it is important that you understand the structure of this data object and the functions that operate on it. Package functions whose names begin with `monitor_`, expect objects of class _mts_monitor_ as their first argument. (*'mts' stands for 'Multiple Time Series'*) ## Data Model The **AirMonitor** package uses the _mts_ data model defined in the **[MazamaTimeSeries](https://mazamascience.github.io/MazamaTimeSeries/)** package. In this data model, each unique time series is referred to as a _"device-deployment"_ -- a time series collected by a particular device at a specific location. Multiple device-deployments are stored in memory as a _mts_monitor_ object, typically called `monitor`. Each `monitor` is just an \pkg{R} list with two dataframes. `monitor$meta` -- rows = unique device-deployments; cols = device/location metadata `monitor$data` -- rows = UTC times; cols = device-deployment data (plus an additional `datetime` column) A key feature of this data model is the use of the `deviceDeploymentID` as a "foreign key" that allows `data` columns to be mapped onto the associated spatial and device metadata in a `meta` row. The following will always be true: ``` identical(names(monitor$data), c('datetime', monitor$meta$deviceDeploymentID)) ``` Each column of `monitor$data` represents a time series associated with a particular device-deployment while each row of `monitor$data` represents a _synoptic_ snapshot of all measurements made at a particular time. In this manner, software can create both time series plots and maps from a single `monitor` object in memory. The `data` dataframe contains all hourly measurements organized with rows (the 'unlimited' dimension) as unique timesteps and columns as unique device-deployments. The very first column is always named `datetime` and contains the `POSIXct` datetime in Coordinated Universal Time (UTC). This time axis is guaranteed to be a regular hourly axis with no gaps. The `meta` dataframe contains all metadata associated with device-deployments and is organized with rows as unique device-deployments and columns containing both location and device metadata. The following columns are guaranteed to exist in the `meta` dataframe. Those marked with "(optional)" may contain `NA`s. Additional columns may also be present depending on the data source. * `deviceDeploymentID` -- unique ID associated with a time series * `deviceID` -- unique location ID * `deviceType` -- (optional) device type * `deviceDescription` -- (optional) human readable device description * `deviceExtra` -- (optional) additional human readable device information * `pollutant` -- pollutant name from `AirMonitor::pollutantNames` * `units` -- one of `"PPM|PPB|UG/M3"` * `dataIngestSource`-- (optional) source of data * `dataIngestURL` -- (optional) URL used to access data * `dataIngestUnitID` -- (optional) instrument identifier used at `dataIngestSource` * `dataIngestExtra` -- (optional) human readable data ingest information * `dataIngestDescription`-- (optional) human readable data ingest instructions * `locationID` -- unique location ID from `MazamaLocationUtils::location_createID()` * `locationName` -- human readable location name * `longitude` -- longitude * `latitude` -- latitude * `elevation` -- (optional) elevation * `countryCode` -- ISO 3166-1 alpha-2 country code * `stateCode` -- ISO 3166-2 alpha-2 state code * `countyName` -- US county name * `timezone` -- Olson time zone * `houseNumber` -- (optional) * `street` -- (optional) * `city` -- (optional) * `zip` -- (optional) * `AQSID` -- (optional) EPA AQS unique identifier * `fullAQSID` -- (optional) EPA AQS unique identifier **Example 1: Exploring _mts_monitor_ objects** We will use the built-in "NW_Megafires" dataset and various `monitor_filter~()` functions to subset a _mts_monitor_ object which we then examine. ```{r data_model_1} library(AirMonitor) # Recipe to select Washington state monitors in August of 2014: monitor <- # 1) start with NW Megafires NW_Megafires %>% # 2) filter to only include Washington state monitor_filter(stateCode == "WA") %>% # 3) filter to only include August monitor_filterDate(20150801, 20150901) %>% # 4) remove monitors with all missing values monitor_dropEmpty() # 'mts_monitor' objects can be identified by their class class(monitor) # They alwyas have two elements called 'meta' and 'data' names(monitor) # Examine the 'meta' dataframe dim(monitor$meta) names(monitor$meta) # Examine the 'data' dataframe dim(monitor$data) # This should always be true identical(names(monitor$data), c('datetime', monitor$meta$deviceDeploymentID)) ``` **Example 2: Basic manipulation of _mts_monitor_ objects** The **AirMonitor** package has numerous functions that work with _mts_monitor_ objects, all of which begin with `monitor_`. If you need to do something that the package functions do not provide, you can manipulate _mts_monitor_ objects directly as long as you retain the structure of the data model. Functions that accept and return _mts_monitor_ objects include: * `monitor_aqi()` * `monitor_collapse()` * `monitor_combine()` * `monitor_dailyStatistic()` * `monitor_dailyThreshold()` * `monitor_dropEmpty()` * `monitor_filter()` ( aka `monitor_filterMeta()`) * `monitor_filterByDistance()` * `monitor_filterDate()` * `monitor_filterDatetime()` * `monitor_mutate()` * `monitor_nowcast()` * `monitor_replaceValues()` * `monitor_select()` ( aka `monitor_reorder()`) * `monitor_selectWhere()` * `monitor_trimDate()` These functions can be used with the **magrittr** package pipe operator (`%>%`) as in the following example: ```{r monitor_leaflet, results = "hold"} # First, Obtain the monitor ids by clicking on dots in the interactive map: NW_Megafires %>% monitor_leaflet() ``` ```{r Methow_Valley, results = "hold"} # Calculate daily means for the Methow Valley from monitors in Twisp and Winthrop TwispID <- "99a6ee8e126ff8cf_530470009_04" WinthropID <- "123035bbdc2bc702_530470010_04" # Recipe to calculate Methow Valley August Means: Methow_Valley_AugustMeans <- # 1) start with NW Megafires NW_Megafires %>% # 2) select monitors from Twisp and Winthrop monitor_select(c(TwispID, WinthropID)) %>% # 3) average them together hour-by-hour monitor_collapse(deviceID = 'MethowValley') %>% # 4) restrict data to August monitor_filterDate(20150801, 20150901) %>% # 5) calculate daily mean monitor_dailyStatistic(mean, minHours = 18) %>% # 6) round data to one decimal place monitor_mutate(round, 1) # Look at the first week Methow_Valley_AugustMeans$data[1:7,] ``` **Example 3: Advanced manipulation of _mts_monitor_ objects** The following code demonstrates user creation of a custom function to manipulate the `data` tibble from a _mts_monitor_ object with `monitor_mutate()`. ```{r custom_use1} # Monitors within 100 km of Spokane, WA Spokane <- NW_Megafires %>% monitor_filterByDistance(-117.42, 47.70, 100000) %>% monitor_filterDate(20150801, 20150901) %>% monitor_dropEmpty() # Show the daily statistic for one week Spokane %>% monitor_filterDate(20150801, 20150808) %>% monitor_dailyStatistic(mean) %>% monitor_getData() # Custom function to convert from metric ug/m3 to imperial grain/gallon my_FUN <- function(x) { return( x * 15.43236 / 0.004546 ) } Spokane %>% monitor_filterDate(20150801, 20150808) %>% monitor_mutate(my_FUN) %>% monitor_dailyStatistic(mean) %>% monitor_getData() ``` Understanding that `monitor$data` is a just a dataframe of measurements prepended with a `datetime` column, we can pull out the measurements and do analyses independent of the _mts_monitor_ data model. Here we look for correlations among the PM2.5 time series. ```{r custom_use2} # Pull out the time series data to calculate correlations Spokane_data <- Spokane %>% monitor_getData() %>% dplyr::select(-1) # omit 'datetime' column # Provide human readable names names(Spokane_data) <- Spokane$meta$locationName # Find correlation among monitors cor(Spokane_data, use = "complete.obs") ``` This introduction to the _mts_monitor_ data model should be enough to get you started. Lots more examples are available in the package documentation. ---- _Best of luck exploring and understanding PM 2.5 air quality data!_
/scratch/gouwar.j/cran-all/cranData/AirMonitor/inst/doc/Data_Model.Rmd
--- title: "Developer Style Guide" author: "Jonathan Callahan" date: "2022-02-15" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Developer Style Guide} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- # R Style Guide This document describes the coding style used within the package. Having a consistent style enhances the readability and "understandability" of the code and makes it easier for users and developers to work with this package and with other, related [Mazama Science packages](https://github.com/MazamaScience). ## Naming Objects Naming variables is one of the most important things to get right to make your code readable and understandable to future readers of the code. _(Perhaps even yourself!)_. Having a system for creating names also makes it easier to come up with new ones. Mazama Science packages embrace **`lowerCamelCase`** for object names. With the casing settled, we use an ornithologist’s sensibility for how to identify things: * What is it? — a `bird` * What kind of bird is it? — a `blackBird` * What kind of blackBird is it? — a `redwingedBlackBird` It’s a simple system: start with a noun and prefix it with descriptors until it is uniquely identified. In this system we would never have a variable called: `num_hours`. Instead we go through our process: * What is it? — _(Hmm. What noun describes this? Ah yes!)_ — a `count` * What kind of count is it? — _(It's not a "head count" or a "body count".)_ It's an `hourCount`. For complex objects it is often helpful to give readers of the code a hint as to what type of object it is so they will know how to work with it. We often use variable names like: * `monitor` — a _mts_monitor_ object We occasionally use ‘_’ to create classes of similar variables that are otherwise hard to name, _e.g._: ``` QC_negativeValues ``` ## Naming Functions Most functions should strive to be atomic in nature and should do one thing really well. Think of them as functional _Lego_ bricks that we click together to achieve more advanced functionality. Where objects are _**well described nouns**_, functions are _**well described verbs**_ that describe what they do as in: ``` monitor_collapse() monitor_combine() monitor_dropEmpty() monitor_filter() monitor_filterDate() monitor_select() ... ``` All of these functions begin with `monitor_` because they are for creating or working with _mts_monitor_ objects. Many of these functions accept a _mts_monitor_ object as their first argument and return a modified _mts_monitor_. This means that they can be used with the `%>%` "pipe" operator and chained together as in: ``` # Create a daily average dataframe (aka tibble) Sacramento_area_daily_avg <- # get all data for 2018 monitor_loadAnnual(2018) %>% # filter to a 2-week period monitor_filterDate( startdate = 20181108, enddate = 20181123, timezone = "America/Los_Angeles" ) %>% # filter to monitors near Sacramento monitor_filterByDistance( longitude = -121.4931, latitude = 38.56844 , radius = 50000 ) %>% # combine by averaging hourly values from different monitors monitor_collapse() %>% # calculate local time daily averages monitor_dailyStatistic() %>% # extract daily average values from 'monitor' object monitor_getData() ``` ## Naming Files Each file should contain a single function of the same name. Thus, the function named `monitor_filterDate()` is defined in `monitor_filterDate.R`. An exception is made for small, mostly internal functions used in conjunction with a particular type of object or activity. These can be stored together in a file named `utils-~`: ``` utils-monitor.R ``` ## Syntax We generally adhere to the [Wickham Style Guide](http://adv-r.had.co.nz/Style.html) for syntax with a few exceptions: ### Spacing **Do** place spaces around code in parentheses if it is an `if` test: ``` if ( <logical expression part1> && <logical expression part2> ) { ... } ``` When debugging, this makes it much easier to select the logical test with a cursor and paste it into the RStudio console. ### Lists We generally like to specify R lists with each `parameter = value` pair on a separate line. This goes for regular lists and for named argument lists passed to a function: ``` # Filter to a 2-week period monitor_filterDate( startdate = 20181108, enddate = 20181123, timezone = "America/Los_Angeles" ) %>% ``` Coding this way makes it easy to see which function arguments are being passed. It also eases future refactoring of the code when additional arguments need to be added or the order of arguments needs to be changed. ----- It is our belief that good code should be both readable and understandable and should inspire others to copy and innovate on their own.
/scratch/gouwar.j/cran-all/cranData/AirMonitor/inst/doc/Developer_Style_Guide.Rmd
--- title: "Introduction to AirMonitor" author: "Mazama Science" date: "2022-10-31" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Introduction to AirMonitor} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE, fig.width = 7, fig.height = 5) ``` ## Installation Install from CRAN with: `install.packages('AirMonitor')` Install the latest version from GitHub with: `devtools::install_github('mazamascience/AirMonitor')` ## Available data The USFS AirFire group regularly processes monitoring data in support of their various operational tools. Pre-processed, harmonized and QC'ed data files can be loaded with the following functions: * `~_load()` -- load data based on a start- and end-time * `~loadAnnual()` -- load a year's worth of data * `~loadDaily()` -- load the most recent 45 days of data (updated once per day) * `~loadLatest()` -- load the most recent 10 days of data (updated every hour) Data archives go back to 2014 or earlier depending on the data source. ## Recipes We encourage people to embrace "recipe" style coding as enabled by **dplyr** and related packages. The special `%>%` operator uses the output of one function as the first argument of the next function, thus allowing for easy "chaining" of results to create a step-by-step recipe. With only a few exceptions, all the `monitor_` functions accept a _mts_monitor_ object as their first argument and generate a _mts_monitor_ object as a result so they can be chained together. ## A first example ```{r library, echo = FALSE} suppressPackageStartupMessages({ library(AirMonitor) Camp_Fire <- Camp_Fire }) ``` Let's say we are interested in the impact of smoke from the 2018 [Camp Fire](https://en.wikipedia.org/wiki/Camp_Fire_(2018)) in the Sacramento area. We would begin by creating a `Camp_Fire` object that has all the monitors in California for the period of interest. The recipe for creating `Camp_Fire` has four steps: 1) load annual data; 2) filter for monitors in California; 3) restrict the date range to Camp Fire dates; 4) remove any monitors with no valid data in this range. ``` # create the Camp_Fire 'mts_monitor' object Camp_Fire <- # 1) load annual data monitor_loadAnnual(2018) %>% # 2) filter for California monitor_filter(stateCode == 'CA') %>% # 3) restrict date range monitor_filterDate( startdate = 20181108, enddate = 20181123, timezone = "America/Los_Angeles" ) %>% # 4) remove monitors with no valid data monitor_dropEmpty() ``` We can use the `monitor_leaflet()` function to display these monitors (colored by maximum PM2.5 value) in an interactive map. This map allows us to zoom in and click on the monitor in downtown Sacramento to get it's `deviceDeploymentID` -- "127e996697f9731c_840060670010". ```{r Sacramento_2} monitor_leaflet(Camp_Fire) ``` We can use this `deviceDeploymentID` to create a _mts_monitor_ object for this single monitor and take a look at a time series plot. Day-night shading and AQI decorations create a publication-ready plot: ```{r Sacramento_3} # create single-monitor Sacramento Sacramento <- # 1) start with Camp_Fire Camp_Fire %>% # 2) select a specific device-deployment monitor_select("127e996697f9731c_840060670010") # review timeseries plot Sacramento %>% monitor_timeseriesPlot( shadedNight = TRUE, addAQI = TRUE, main = "Hourly PM2.5 Concentration in Sacramento" ) # add the AQI legend addAQILegend(cex = 0.8) ``` Next, we can use this specific location to create a _mts_monitor_ object containing all monitors within 50 kilometers (31 miles) of Sacramento. ```{r Sacramento_4} Sacramento_area <- # 1) start with Camp_Fire Camp_Fire %>% # 2) find all monitors within 50km of Sacramento monitor_filterByDistance( longitude = Sacramento$meta$longitude, latitude = Sacramento$meta$latitude, radius = 50000 ) monitor_leaflet(Sacramento_area) ``` We can use the same `monitor_timeseriesPlot()` function to display the hourly data for _all_ the monitors in the Sacramento area in a single plot. This gives a sense of the range of values within the area at any given hour. ```{r Sacramento_5} Sacramento_area %>% monitor_timeseriesPlot( shadedNight = TRUE, addAQI = TRUE, main = "Wildfire Smoke within 30 miles of Sacramento" ) addAQILegend(lwd = 1, pch = NA, bg = "white", cex = 0.8) ``` Now we can average together all the monitors and create a local-time, daily average for the Sacramento area. ```{r Sacramento_6} # 1) start with Sacramento_area Sacramento_area %>% # 2) average together all timeseries hour-by-hour monitor_collapse( deviceID = "Sacramento_area" ) %>% # 3) calculate the local-time daily average (default) monitor_dailyStatistic() %>% # 4) pull out the $data dataframe monitor_getData() ``` Alternatively, we can plot the daily averages. ```{r Sacramento_7} # 1) start with Sacramento_area Sacramento_area %>% # 2) average together all timeseries hour-by-hour monitor_collapse() %>% # 3) create daily barplot monitor_dailyBarplot( main = "Daily Average PM2.5 in the Sacramento Area" ) # add the AQI legend addAQILegend(pch = 15, bg = "white", cex = 0.8) ``` ---- Best of luck analyzing your local air quality data!
/scratch/gouwar.j/cran-all/cranData/AirMonitor/vignettes/AirMonitor.Rmd
--- title: "Data Model" author: "Jonathan Callahan" date: "2022-10-31" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Data Model} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, echo=FALSE} knitr::opts_chunk$set(fig.width = 7, fig.height = 5) ``` This vignette explores the _mts_monitor_ data model used throughout the **AirMonitor** package to store and work with monitoring data. The **AirMonitor** package is designed to provide a compact, full-featured suite of utilities for working with PM2.5 data. A uniform data model provides consistent data access across monitoring data available from different agencies. The core data model in this package is defined by the _mts_monitor_ object used to store data associated with groups of individual monitors. To work efficiently with the package it is important that you understand the structure of this data object and the functions that operate on it. Package functions whose names begin with `monitor_`, expect objects of class _mts_monitor_ as their first argument. (*'mts' stands for 'Multiple Time Series'*) ## Data Model The **AirMonitor** package uses the _mts_ data model defined in the **[MazamaTimeSeries](https://mazamascience.github.io/MazamaTimeSeries/)** package. In this data model, each unique time series is referred to as a _"device-deployment"_ -- a time series collected by a particular device at a specific location. Multiple device-deployments are stored in memory as a _mts_monitor_ object, typically called `monitor`. Each `monitor` is just an \pkg{R} list with two dataframes. `monitor$meta` -- rows = unique device-deployments; cols = device/location metadata `monitor$data` -- rows = UTC times; cols = device-deployment data (plus an additional `datetime` column) A key feature of this data model is the use of the `deviceDeploymentID` as a "foreign key" that allows `data` columns to be mapped onto the associated spatial and device metadata in a `meta` row. The following will always be true: ``` identical(names(monitor$data), c('datetime', monitor$meta$deviceDeploymentID)) ``` Each column of `monitor$data` represents a time series associated with a particular device-deployment while each row of `monitor$data` represents a _synoptic_ snapshot of all measurements made at a particular time. In this manner, software can create both time series plots and maps from a single `monitor` object in memory. The `data` dataframe contains all hourly measurements organized with rows (the 'unlimited' dimension) as unique timesteps and columns as unique device-deployments. The very first column is always named `datetime` and contains the `POSIXct` datetime in Coordinated Universal Time (UTC). This time axis is guaranteed to be a regular hourly axis with no gaps. The `meta` dataframe contains all metadata associated with device-deployments and is organized with rows as unique device-deployments and columns containing both location and device metadata. The following columns are guaranteed to exist in the `meta` dataframe. Those marked with "(optional)" may contain `NA`s. Additional columns may also be present depending on the data source. * `deviceDeploymentID` -- unique ID associated with a time series * `deviceID` -- unique location ID * `deviceType` -- (optional) device type * `deviceDescription` -- (optional) human readable device description * `deviceExtra` -- (optional) additional human readable device information * `pollutant` -- pollutant name from `AirMonitor::pollutantNames` * `units` -- one of `"PPM|PPB|UG/M3"` * `dataIngestSource`-- (optional) source of data * `dataIngestURL` -- (optional) URL used to access data * `dataIngestUnitID` -- (optional) instrument identifier used at `dataIngestSource` * `dataIngestExtra` -- (optional) human readable data ingest information * `dataIngestDescription`-- (optional) human readable data ingest instructions * `locationID` -- unique location ID from `MazamaLocationUtils::location_createID()` * `locationName` -- human readable location name * `longitude` -- longitude * `latitude` -- latitude * `elevation` -- (optional) elevation * `countryCode` -- ISO 3166-1 alpha-2 country code * `stateCode` -- ISO 3166-2 alpha-2 state code * `countyName` -- US county name * `timezone` -- Olson time zone * `houseNumber` -- (optional) * `street` -- (optional) * `city` -- (optional) * `zip` -- (optional) * `AQSID` -- (optional) EPA AQS unique identifier * `fullAQSID` -- (optional) EPA AQS unique identifier **Example 1: Exploring _mts_monitor_ objects** We will use the built-in "NW_Megafires" dataset and various `monitor_filter~()` functions to subset a _mts_monitor_ object which we then examine. ```{r data_model_1} library(AirMonitor) # Recipe to select Washington state monitors in August of 2014: monitor <- # 1) start with NW Megafires NW_Megafires %>% # 2) filter to only include Washington state monitor_filter(stateCode == "WA") %>% # 3) filter to only include August monitor_filterDate(20150801, 20150901) %>% # 4) remove monitors with all missing values monitor_dropEmpty() # 'mts_monitor' objects can be identified by their class class(monitor) # They alwyas have two elements called 'meta' and 'data' names(monitor) # Examine the 'meta' dataframe dim(monitor$meta) names(monitor$meta) # Examine the 'data' dataframe dim(monitor$data) # This should always be true identical(names(monitor$data), c('datetime', monitor$meta$deviceDeploymentID)) ``` **Example 2: Basic manipulation of _mts_monitor_ objects** The **AirMonitor** package has numerous functions that work with _mts_monitor_ objects, all of which begin with `monitor_`. If you need to do something that the package functions do not provide, you can manipulate _mts_monitor_ objects directly as long as you retain the structure of the data model. Functions that accept and return _mts_monitor_ objects include: * `monitor_aqi()` * `monitor_collapse()` * `monitor_combine()` * `monitor_dailyStatistic()` * `monitor_dailyThreshold()` * `monitor_dropEmpty()` * `monitor_filter()` ( aka `monitor_filterMeta()`) * `monitor_filterByDistance()` * `monitor_filterDate()` * `monitor_filterDatetime()` * `monitor_mutate()` * `monitor_nowcast()` * `monitor_replaceValues()` * `monitor_select()` ( aka `monitor_reorder()`) * `monitor_selectWhere()` * `monitor_trimDate()` These functions can be used with the **magrittr** package pipe operator (`%>%`) as in the following example: ```{r monitor_leaflet, results = "hold"} # First, Obtain the monitor ids by clicking on dots in the interactive map: NW_Megafires %>% monitor_leaflet() ``` ```{r Methow_Valley, results = "hold"} # Calculate daily means for the Methow Valley from monitors in Twisp and Winthrop TwispID <- "99a6ee8e126ff8cf_530470009_04" WinthropID <- "123035bbdc2bc702_530470010_04" # Recipe to calculate Methow Valley August Means: Methow_Valley_AugustMeans <- # 1) start with NW Megafires NW_Megafires %>% # 2) select monitors from Twisp and Winthrop monitor_select(c(TwispID, WinthropID)) %>% # 3) average them together hour-by-hour monitor_collapse(deviceID = 'MethowValley') %>% # 4) restrict data to August monitor_filterDate(20150801, 20150901) %>% # 5) calculate daily mean monitor_dailyStatistic(mean, minHours = 18) %>% # 6) round data to one decimal place monitor_mutate(round, 1) # Look at the first week Methow_Valley_AugustMeans$data[1:7,] ``` **Example 3: Advanced manipulation of _mts_monitor_ objects** The following code demonstrates user creation of a custom function to manipulate the `data` tibble from a _mts_monitor_ object with `monitor_mutate()`. ```{r custom_use1} # Monitors within 100 km of Spokane, WA Spokane <- NW_Megafires %>% monitor_filterByDistance(-117.42, 47.70, 100000) %>% monitor_filterDate(20150801, 20150901) %>% monitor_dropEmpty() # Show the daily statistic for one week Spokane %>% monitor_filterDate(20150801, 20150808) %>% monitor_dailyStatistic(mean) %>% monitor_getData() # Custom function to convert from metric ug/m3 to imperial grain/gallon my_FUN <- function(x) { return( x * 15.43236 / 0.004546 ) } Spokane %>% monitor_filterDate(20150801, 20150808) %>% monitor_mutate(my_FUN) %>% monitor_dailyStatistic(mean) %>% monitor_getData() ``` Understanding that `monitor$data` is a just a dataframe of measurements prepended with a `datetime` column, we can pull out the measurements and do analyses independent of the _mts_monitor_ data model. Here we look for correlations among the PM2.5 time series. ```{r custom_use2} # Pull out the time series data to calculate correlations Spokane_data <- Spokane %>% monitor_getData() %>% dplyr::select(-1) # omit 'datetime' column # Provide human readable names names(Spokane_data) <- Spokane$meta$locationName # Find correlation among monitors cor(Spokane_data, use = "complete.obs") ``` This introduction to the _mts_monitor_ data model should be enough to get you started. Lots more examples are available in the package documentation. ---- _Best of luck exploring and understanding PM 2.5 air quality data!_
/scratch/gouwar.j/cran-all/cranData/AirMonitor/vignettes/Data_Model.Rmd
--- title: "Developer Style Guide" author: "Jonathan Callahan" date: "2022-02-15" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Developer Style Guide} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- # R Style Guide This document describes the coding style used within the package. Having a consistent style enhances the readability and "understandability" of the code and makes it easier for users and developers to work with this package and with other, related [Mazama Science packages](https://github.com/MazamaScience). ## Naming Objects Naming variables is one of the most important things to get right to make your code readable and understandable to future readers of the code. _(Perhaps even yourself!)_. Having a system for creating names also makes it easier to come up with new ones. Mazama Science packages embrace **`lowerCamelCase`** for object names. With the casing settled, we use an ornithologist’s sensibility for how to identify things: * What is it? — a `bird` * What kind of bird is it? — a `blackBird` * What kind of blackBird is it? — a `redwingedBlackBird` It’s a simple system: start with a noun and prefix it with descriptors until it is uniquely identified. In this system we would never have a variable called: `num_hours`. Instead we go through our process: * What is it? — _(Hmm. What noun describes this? Ah yes!)_ — a `count` * What kind of count is it? — _(It's not a "head count" or a "body count".)_ It's an `hourCount`. For complex objects it is often helpful to give readers of the code a hint as to what type of object it is so they will know how to work with it. We often use variable names like: * `monitor` — a _mts_monitor_ object We occasionally use ‘_’ to create classes of similar variables that are otherwise hard to name, _e.g._: ``` QC_negativeValues ``` ## Naming Functions Most functions should strive to be atomic in nature and should do one thing really well. Think of them as functional _Lego_ bricks that we click together to achieve more advanced functionality. Where objects are _**well described nouns**_, functions are _**well described verbs**_ that describe what they do as in: ``` monitor_collapse() monitor_combine() monitor_dropEmpty() monitor_filter() monitor_filterDate() monitor_select() ... ``` All of these functions begin with `monitor_` because they are for creating or working with _mts_monitor_ objects. Many of these functions accept a _mts_monitor_ object as their first argument and return a modified _mts_monitor_. This means that they can be used with the `%>%` "pipe" operator and chained together as in: ``` # Create a daily average dataframe (aka tibble) Sacramento_area_daily_avg <- # get all data for 2018 monitor_loadAnnual(2018) %>% # filter to a 2-week period monitor_filterDate( startdate = 20181108, enddate = 20181123, timezone = "America/Los_Angeles" ) %>% # filter to monitors near Sacramento monitor_filterByDistance( longitude = -121.4931, latitude = 38.56844 , radius = 50000 ) %>% # combine by averaging hourly values from different monitors monitor_collapse() %>% # calculate local time daily averages monitor_dailyStatistic() %>% # extract daily average values from 'monitor' object monitor_getData() ``` ## Naming Files Each file should contain a single function of the same name. Thus, the function named `monitor_filterDate()` is defined in `monitor_filterDate.R`. An exception is made for small, mostly internal functions used in conjunction with a particular type of object or activity. These can be stored together in a file named `utils-~`: ``` utils-monitor.R ``` ## Syntax We generally adhere to the [Wickham Style Guide](http://adv-r.had.co.nz/Style.html) for syntax with a few exceptions: ### Spacing **Do** place spaces around code in parentheses if it is an `if` test: ``` if ( <logical expression part1> && <logical expression part2> ) { ... } ``` When debugging, this makes it much easier to select the logical test with a cursor and paste it into the RStudio console. ### Lists We generally like to specify R lists with each `parameter = value` pair on a separate line. This goes for regular lists and for named argument lists passed to a function: ``` # Filter to a 2-week period monitor_filterDate( startdate = 20181108, enddate = 20181123, timezone = "America/Los_Angeles" ) %>% ``` Coding this way makes it easy to see which function arguments are being passed. It also eases future refactoring of the code when additional arguments need to be added or the order of arguments needs to be changed. ----- It is our belief that good code should be both readable and understandable and should inspire others to copy and innovate on their own.
/scratch/gouwar.j/cran-all/cranData/AirMonitor/vignettes/Developer_Style_Guide.Rmd
--- title: "Citations" author: "Jonathan Callahan" date: "2022-09-03" output: html_document --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) ``` The **AirMonitor** package's predecessor, the **PWFSLSmoke** package is widely used in air quality analysis throughout North America and the world. This document provides links to projects and publications that mention the **PWFSLSmoke** package. ## PWFSLSmoke The predecessor to the **AirMonitor** package is the [PWFSLSmoke](https://github.com/MazamaScience/PWFSLSmoke) package from which many functions were copied and refactored. ### Packages Dependent Upon PWFSLSmoke The following R packages depend upon PWFSLSmoke: * [AirSensor](https://CRAN.R-project.org/package=AirSensor) -- Process and Display Data from Air Quality Sensors ### Publications that mention PWFSLSmoke * **Feb, 2022 -- Environmental Modeling & Software**<br> https://doi.org/10.1016/j.envsoft.2021.105256<br> "the AirSensor package leverages the _PWFSLSmoke_ package to provide access to PM2.5 data" * **Feb, 2021 -- Eos**<br> https://doi.org/10.1029/2021EO155076<br> "Special thanks go to ... Mazama Science for the _PWFSLSmoke_ R statistical package" * **Dec, 2020 -- Environmental Modeling & Software**<br> https://doi.org/10.1016/j.envsoft.2020.104832<br> "AirSensor has been integrated with the _PWFSLSmoke_ R package for access to regulatory AM data" * **Sep, 2020 -- Journal of the Air & Waste Management Association**<br> https://doi.org/10.1080/10962247.2021.1891994<br> "Data were accessed and processed by the _PWFSLSmoke_ statistical package" * **Sep, 2020 -- Atmosphere**<br> https://doi.org/10.3390/atmos11090970<br> "daily average concentrations of PM2.5 were downloaded in R using the _PWFSLSmoke_ Package" ### Student Theses that mention PWFSLSmoke * **2021 -- UW Masters Thesis**<br> http://hdl.handle.net/1773/48258<br> "Many thanks to the USFS Pacific Wildland Fire Sciences Lab Airfire team for developing the _PWFSLSmoke_ R package and making it available for researchers."
/scratch/gouwar.j/cran-all/cranData/AirMonitor/vignettes/articles/Citations.Rmd
--- title: "NowCast" author: "Rex Thompson and Jonathan Callahan" date: "2022-10-31" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{NowCast} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, echo = FALSE} knitr::opts_chunk$set(fig.width = 7, fig.height = 5, comment = NA) options(width = 105) ``` This vignette documents the `monitor_nowcast()` function, which converts a _mts_monitor_ object's values to NowCast values. We provide details on the NowCast algorithm and our implementation of it. We also provide examples to highlight specific attributes and potential points of confusion in the algorithm. This vignette also briefly covers the `monitor_aqi()` function, which uses the `monitor_nowcast()` function to convert raw PM2.5 data into NowCast-based AQI values. # What is NowCast? NowCast is an air quality data smoothing algorithm that puts an emphasis on recent values when measurements are unstable, and approaches a long-term (*e.g.* 12-hour) average when measurements are stable. The original algorithm, known as the Conroy method, was developed in 2003 to make real-time air quality measurements roughly comparable to established regulatory air quality health thresholds (*e.g.* 24‐hour PM2.5 standards). However, that method was shown to be slow to respond to rapidly changing air quality conditions, which, at best, reduced public confidence in disseminated AQI values an, at worst, had the potential to adversely affect the health of those in high impact areas (*e.g.* near wildfires). In response, EPA developed a new method -- the Reff method -- in 2013 to be more responsive to rapidly changing air quality conditions. The **AirMonitor** package provides support for applying the new NowCast algorithm to hourly PM2.5 data, though theoretically it could be applied to regular-interval time series data of any type, including other criteria pollutants. Algorithm specifics are provided below. Sources: * https://en.wikipedia.org/wiki/NowCast_(air_quality_index) * https://forum.airnowtech.org/t/the-nowcast-for-pm2-5-and-pm10/172 # The NowCast Algorithm (Reff Method) ## NowCast Equation for PM Data (PM~2.5~ or PM~10~) The NowCast value for a given hour can be calculated as follows: $$ NowCast = \frac{\sum_{i=1}^{12}{w^{i-1}c_i}}{\sum_{i=1}^{12}w^{i-1}} $$ where $i$, $w$ and $c$ are as defined in the sections below. Source: * https://en.wikipedia.org/wiki/NowCast_(air_quality_index) ## Data Selection For PM2.5 data, the NowCast algorithm uses hourly averages from the prior 12 clock hours. The hourly averages are denoted below by $c_i$, where $i$ represents the number of hours before present. For example, $c_1$, $c_2$, $c_3$, $...$, $c_N$ represent the hourly averages for the most recent 1, 2, 3, $...$, $N$ hours. Source: * https://forum.airnowtech.org/t/the-nowcast-for-pm2-5-and-pm10/172 ## Weight Factor Using data points as specified above, define the weight factor $w^*$ as follows: $$ w^* = 1- \frac{c_{max}-c_{min}}{c_{max}} = \frac{c_{min}}{c_{max}} $$ where - $c_{max}$ = the highest value in $c_1$, $...$, $c_N$ - $c_{min}$ = the lowest value in $c_1$, $...$, $c_N$ NowCast-related literature usually gives the equation for $w^*$ in one of two basic forms, both of which are included above for reference. Note that both forms are equivalent. Sources: * https://en.wikipedia.org/wiki/NowCast_(air_quality_index) * https://forum.airnowtech.org/t/the-nowcast-for-pm2-5-and-pm10/172 ## Minimum Weight Factor Before plugging into the NowCast equation, $w*$ is updated to $w$ for PM2.5 and PM10 as follows: $$ w = \begin{cases} w^* & \text{if} & w^*>\frac{1}{2} \\ \frac{1}{2} & \text{if} & w^*\leq \frac{1}{2} \\ \end{cases} $$ Source * https://forum.airnowtech.org/t/the-nowcast-for-pm2-5-and-pm10/172 ## Truncation Final NowCast values are truncated based on the type of data being processed: - **PM2.5**: 0.1 µg/m3 - **PM10**: 1 µg/m3 Source: * https://forum.airnowtech.org/t/the-nowcast-for-pm2-5-and-pm10/172 # Algorithm Expansion For PM2.5, the NowCast equation can be expanded as follows: $$ NowCast = \frac {w^0c_1 + w^1c_2 + w^2c_3 + w^3c_4 + w^4c_5 + w^5c_6 + w^6c_7 + w^7c_8 + w^8c_9 + w^9c_{10} + w^{10}c_{11} + w^{11}c_{12}} {w^0 + w^1 + w^2 + w^3 + w^4 + w^5 + w^6 + w^7 + w^8 + w^9 + w^{10} + w^{11}} $$ Note that: - $w^0$, $w^1$, $w^2$, $...$ represent $w$ to the power of 0, 1, 2, $...$ - $c_1$, $c_2$, $c_3$, $...$ represent the hourly average for the most recent 1, 2, 3, $...$ hours. When written this way, it is easy to see that in the extreme case where $w = 1$ (i.e. if $c_{min} = c_{max}$) the equation above reduces to: $$ NowCast = \frac{\sum_{i=1}^{12}{c_i}}{12} $$ which is just a simple 12-hour arithmetic average. Incidentally, all 12 hourly averages and the 12-hour average itself would all be equivalent in this case. In the case of highly variable PM2.5 data, $w$ would be set to the minimum value of $1/2$, and the most recent data would carry the majority of the weight in the equation above. # Implementation Details ## Missing Data The NowCast algorithm ignores terms corresponding to hours for which a valid observation is not available. For example, suppose PM2.5 is invalid for all but the first three and last three hours of a 12-hour period. Then the PM2.5 NowCast equation takes the following form: $$ NowCast = \frac {w^0c_1 + w^1c_2 + w^2c_3 + \color{gray}{\text{[note: middle values ignored]}} + w^9c_{10} + w^{10}c_{11} + w^{11}c_{12}} {w^0 + w^1 + w^2 + \color{gray}{\text{[note: middle values ignored]}} + w^9 + w^{10} + w^{11}} $$ Minimum data availability requirements do apply, however. See the following section for details. ## Data Availability Requirements To get a valid NowCast value, the NowCast algorithm simply requires valid data for at least two of the three most recent clock hours. This means that a valid NowCast value can be calculated from as little as two valid hours, even if 12 hours are typically used in the calculation. However, note that https://en.wikipedia.org/wiki/NowCast_(air_quality_index) says: > Because the most recent hours of data are weighted so heavily in the NowCast > when PM levels are changing, EPA does not report the NowCast when data is > missing for c~1~ or c~2~. While this seems like a reasonable approach, we could not find a source for this statement (in the Wikipedia page sources or elsewhere). We take a compromise approach and still allow valid NowCast values to be calculated when c~2~ is invalid but return `NA` when c~1~ is invalid. ## Argument: `includeShortTerm` As mentioned above, the NowCast algorithm only requires two valid hours to calculate valid values. Does this mean that the `monitor_nowcast()` function can begin reporting valid values after the second hour in the data (assuming the first two hours are both valid)? We assert that it would be inappropriate to do so, _usually_. In most cases a user will have created a _mts_monitor_ object using one of the `~_load()`, functions, which return data for a specific period of time. Data before this period is not necessarily invalid, it was simply not retrieved. But the function itself has no way of knowing whether such earlier data exists, so it has no choice but to consider earlier hours "invalid". This means that, if followed by-the-book, the NowCast algorithm could return different values for a given hour depending on whether or not the earlier data had been retrieved. This is not a desirable behavior, so, by default, the `monitor_nowcast()` returns invalid NowCast values until the $N$^th^ hour of data. However, we provide a manual override in `includeShortTerm = TRUE` which causes the function to return valid values as per the bare-bones data availability requirements described above, treating the hours before the beginning of the data as invalid. Thus, it can return "valid" NowCast values as early as the second (valid) hour in the data. This argument may be useful (or even appropriate) for datasets where the beginning of the data truly corresponds to the beginning of the monitoring, such as when a new monitor has just been installed. In this case, data prior to the first hour is truly invalid since it does not exist. The argument may also be useful for field personnel looking to ensure that their monitor has been successfully plugged in to the data processing pipeline, even if the values themselves are not truly representative of the data for the past $N$ hours. ## Negative Values In the NowCast literature we find no mention of negative values, which, while aphysical, are common in air quality monitoring data. Thus, we do not adjust negative values up to zero in the `monitor_nowcast()` function itself. Negative values should always be handled prior to converting raw data to NowCast or AQI values. All of the `~_load()` functions handle negative values by _lifting_ them to zero as part of the data loading process. ## Argument: `version` The `version` argument sets defaults for the number of hours in the lookback $N$ (`numHrs`), the minimum weight factor $c_{min}$ (`weightFactorMin`), and the number of digits to which the final data is truncated (`digits`). * `version='pm25'` (default) + `numHrs <- 12` + `weightFactorMin <- 0.5` + `digits <- 1` * `version='ozone'` + `numHrs <- 8` + `weightFactorMin <- NA` + `digits <- 3` * `version='pmAsian'` + `numHrs <- 3` + `weightFactorMin <- 0.1` + `digits <- 1` The default setting is `version = 'pm25'` since this is the parameter most commonly stored in _mts_monitor_ objects. `version = 'ozone'` supports the O3 NowCast as described above. `version='pmAsian'` supports an alternative shorter-term NowCast as proposed here: https://aqicn.org/faq/2015-03-15/air-quality-nowcast-a-beginners-guide/ Although the NowCast algorithm itself supports PM10, we do not currently provide functionality for this parameter in the `monitor_nowcast()` function. # NowCast and AQI Calculations The EPA uses an [Air Quality Index](https://www.airnow.gov/aqi/aqi-basics/) to put different pollutants on the same scale. From their site: > Think of the AQI as a yardstick that runs from 0 to 500. The higher the AQI > value, the greater the level of air pollution and the greater the health > concern. For example, an AQI value of 50 represents good air quality with > little potential to affect public health, while an AQI value over 300 > represents hazardous air quality. We provide the `monitor_aqi()` function to convert the PM2.5 data in a _mts_monitor_ object into NowCast-based AQI values in the 0-500 range. # Examples The following examples demonstrate the functionality of `monitor_nowcast()` and specifics of its implementation. ## Setup For the following examples we will use the Northwest Megafires data from the **AirMonitor** package. In particular, we will look at PM2.5 data from Omak, WA, which was heavily impacted by smoke from wildfires during the second half of August, 2015: ```{r Omak_nowcast} library(AirMonitor) Omak_ID <- "decef259cdefa79f_530470013_04" # Recipe to obtain monitoring data for Omak, WA Omak <- # start with NW Megafires dataset NW_Megafires %>% # select the Omak time series monitor_select(Omak_ID) %>% # filter to include only August, 2015 monitor_filterDate(20150801, 20150901) ``` ```{r Omak_nowcast_plot, echo = FALSE} monitor_timeseriesPlot(Omak, type = 'l', lwd = 2) monitor_timeseriesPlot( monitor_nowcast(Omak), add = TRUE, type = 'l', lwd = 2, col = 'purple', main = "Hourly and Nowcast PM2.5 Values\nOmak, Washington; August, 2015" ) addAQILines() addAQILegend('topleft', lwd = 1, pch = NULL) legend( 'topright', lwd = 2, col = c('black', 'purple'), legend = c('hourly', 'nowcast') ) ``` ## Example 1: Basic Formula Verification In the code above we used the `monitor_nowcast()` function to calculate PM2.5 NowCast values for the Omak _mts_monitor_ object. Let's see if we can verify the function's output for a single hour. Below is the Omak PM2.5 data for the first 12 hours of 8/21/15. Let's see if we can verify the accuracy of the NowCast value for the last hour in this series, 8/21/15 Hour 11. ```{r} Omak_1 <- Omak %>% monitor_filterDatetime(2015082100, 2015082112, timezone = "UTC") print(Omak_1$data) ``` First we'll pull out just the values themselves, and reverse them so the most recent values come first (_i.e._ so the vector represents $c_1$, $c_2$, $...$, $c_N$). ```{r example_1_values} example_1_values <- Omak_1 %>% monitor_getData() %>% dplyr::pull(2) %>% rev() print(example_1_values) ``` Per the NowCast algorithm, we define $w*$ as $\frac{c_{min}}{c_{max}}$: ```{r} (w_star <- min(example_1_values)/max(example_1_values)) ``` We now define $w$ based on $w^*$ and the minimum weight factor; recall that $w_{min}=\frac{1}{2}$ for PM2.5: ```{r} (w <- max(1/2, w_star)) ``` Thus, the numerator of the NowCast equation for Hour 11 $$ w^0c_1 + w^1c_2 + w^2c_3 + w^3c_4 + w^4c_5 + w^5c_6 + w^6c_7 + w^7c_8 + w^8c_9 + w^9c_{10} + w^{10}c_{11} + w^{11}c_{12} $$ becomes the following: $0.5^0 \times 46.3 + 0.5^1 \times 27.4 + 0.5^2 \times 59.8 + 0.5^3 \times 129.2 + 0.5^4 \times 130.6 + 0.5^5 \times 215.4$ + $0.5^6 \times 143.2 + 0.5^7 \times 93.7 + 0.5^8 \times 101.8 + 0.5^9 \times 49.3 + 0.5^{10} \times 80.2 + 0.5^{11} \times 123.3$ which we can calculate in R as follows ```{r} (numerator <- sum(w^(0:11) * example_1_values)) ``` Meanwhile, the denominator $$ w^0 + w^1 + w^2 + w^3 + w^4 + w^5 + w^6 + w^7 + w^8 + w^9 + w^{10} + w^{11} $$ becomes the following: $0.5^0 + 0.5^1 + 0.5^2 + 0.5^3 + 0.5^4 + 0.5^5 + 0.5^6 + 0.5^7 + 0.5^8 + 0.5^9 + 0.5^{10} + 0.5^{11}$ which we can calculate in R as follows ```{r} (denominator <- sum(w^(0:11))) ``` Dividing the numerator by the denominator gives a value of ```{r echo = FALSE} (numerator/denominator) ``` which we truncate to one decimal place for a final NowCast value of `r trunc(10*numerator/denominator)/10` for 2015-08-21 11:00 local time. So how does this compare to our `monitor_nowcast()` output for the same hour. ```{r} Omak %>% monitor_nowcast() %>% monitor_filterDatetime(2015082111, 2015082112, timezone = "UTC") %>% monitor_getData() %>% dplyr::pull(Omak_ID) ``` Right on! ## Example 2: Short Missing Data Period So we have verified the calculation for a period with 12 valid hours. But how does the function handle missing data periods? Let's take a look at a short period of missing data to find out. We will invalidate a single hour and see how the NowCast algorithm responds. ```{r Omak_missing_data} # Create Omak_2 with one value missing Omak_2 <- Omak Omak_2$data[564, 2] <- NA Omak_2 %>% monitor_filterDatetime(2015082412, 2015082500, timezone = "UTC") %>% monitor_getData() ``` Here we see that the monitor data is missing for 18:00. We want to verify that this hour is properly excluded from the NowCast calculation for subsequent hours. We also want to ensure that NowCast returns valid values for all hours in the vicinity, since all hours meet the minimum data availability requirements (i.e. two of three most recent hours valid). ### Formula Verification Let's see if we can verify the accuracy of the NowCast value for Hour 23. We again begin by pulling out our vector of values $c_1$, $c_2$, $...$, $c_N$ ```{r example_2_values} example_2_values <- Omak_2 %>% monitor_filterDatetime(2015082412, 2015082500, timezone = "UTC") %>% monitor_getData() %>% dplyr::pull(2) %>% rev() print(example_2_values) ``` and calculating $w*$ and $w$ ```{r} w_star <- min(example_2_values, na.rm = TRUE)/max(example_2_values, na.rm = TRUE) (w <- max(1/2, w_star)) ``` This time our numerator and denominator should both exclude the 6th term, since $c_6$ is invalid. So we have ```{r} validIndexes <- which(!is.na(example_2_values)) numerator <- sum(w^(validIndexes - 1) * example_2_values[validIndexes]) denominator <- sum(w^(validIndexes - 1)) numerator/denominator ``` We truncate the value above to one decimal place for a final NowCast value of `r trunc(10*numerator/denominator)/10` for 2015-08-24 23:00. So how does this compare to our `monitor_nowcast()` output for the same hour? ```{r} Omak_2 %>% monitor_nowcast() %>% monitor_filterDatetime(2015082423, 2015082500, timezone = "UTC") %>% monitor_getData() %>% dplyr::pull(Omak_ID) ``` Excellent! ### Missing Data Verification A quick look at the monitored data alongside the NowCast data shows that all NowCast hours are valid except for the c~1~ moment when the monitoring data is also invalid. ```{r} tbl <- Omak_2 %>% monitor_filterDatetime(2015082412, 2015082500, timezone = "UTC") %>% monitor_getData() tbl$nowcast <- Omak_2 %>% monitor_nowcast() %>% monitor_filterDatetime(2015082412, 2015082500, timezone = "UTC") %>% monitor_getData() %>% dplyr::pull(Omak_ID) print(tbl) ``` ## Example 3: Long Missing Data Period We now look at a longer period of missing data, from the day prior on 8/23/15. ```{r Omak_long_missing_data} # Create Omak_3 with 5 missing values Omak_3 <- Omak Omak_3$data[538:542, 2] <- NA Omak_3 %>% monitor_filterDatetime(2015082312, 2015082400, timezone = "UTC") %>% monitor_getData() ``` Here we see that the monitored data is missing for Hours 16-20. We again want to verify that these hours are properly excluded from the NowCast calculation for subsequent hours. This time we also want to ensure that NowCast returns invalid values for hours in which the minimum data availability requirements are not met (i.e. two of three most recent hours valid). ### Formula Verification Let's see if we can verify the accuracy of the NowCast value for Hour 23. We again begin by pulling out our vector of values $c_1$, $c_2$, $...$, $c_N$ ```{r example_3_values} example_3_values <- Omak_3 %>% monitor_filterDatetime(2015082312, 2015082400, timezone = "UTC") %>% monitor_getData() %>% dplyr::pull(2) %>% rev() print(example_3_values) ``` and calculating $w*$ and $w$ ```{r} w_star <- min(example_3_values, na.rm = TRUE)/max(example_3_values, na.rm = TRUE) (w <- max(1/2, w_star)) ``` This time our numerator and denominator should both exclude the 4th-8th terms, since $c_4$, $c_5$, $c_6$, $c_7$ and $c_8$ are invalid. So we have ```{r} validIndexes <- which(!is.na(example_3_values)) numerator <- sum(w^(validIndexes - 1) * example_3_values[validIndexes]) denominator <- sum(w^(validIndexes - 1)) numerator/denominator ``` We truncate the value above to one decimal place for a final NowCast value of `r trunc(10*numerator/denominator)/10` for 2015-08-23 23:00. So how does this compare to our `monitor_nowcast()` output for the same hour? ```{r} Omak_3 %>% monitor_nowcast() %>% monitor_filterDatetime(2015082323, 2015082400, timezone = "UTC") %>% monitor_getData() %>% dplyr::pull(Omak_ID) ``` Again, right on! #### Missing Data Verification Now we'll look at the monitored data alongside the NowCast data to see how the validity of the NowCast data was affected by the missing monitored hours. ```{r} tbl <- Omak_3 %>% monitor_filterDatetime(2015082312, 2015082400, timezone = "UTC") %>% monitor_getData() tbl$nowcast <- Omak_3 %>% monitor_nowcast() %>% monitor_filterDatetime(2015082312, 2015082400, timezone = "UTC") %>% monitor_getData() %>% dplyr::pull(Omak_ID) print(tbl) ``` There's a lot going on here so let's walk through the data one step at a time. **Hours 12-15**: NowCast data is valid because the monitored data is valid for all of the three most recent hours. Status: <span style="color:limegreen">GOOD</span> **Hour 16**: This is the first hour for which the monitored data is invalid. NowCast should also return invalid for this c~1~ timestep. Status: <span style="color:limegreen">GOOD</span> **Hour 17**: This NowCast value is invalid because the monitored data is only valid for one of the three most recent hours (Hour 15). Status: <span style="color:limegreen">GOOD</span> **Hours 18-20**: NowCast is invalid since the monitored data is valid for none of the three most recent hours. Status: <span style="color:limegreen">GOOD</span> **Hour 21**: This is the first hour for which the monitored data is valid again. However, NowCast still returns an invalid value this hour since the monitored data is only valid for one of the three most recent hours (Hour 21). Status: <span style="color:limegreen">GOOD</span> **Hour 22**: This is the first hour for which the NowCast data is valid again. This is because the monitored data is again valid for two of the three most recent hours (Hours 21 and 22). Status: <span style="color:limegreen">GOOD</span> **Hour 23**: NowCast data is valid because the monitored data is valid for all of the three most recent hours. Status: <span style="color:limegreen">GOOD</span> So, it appears the `monitor_nowcast()` returns data that has been correctly validated according to the NowCast algorithm and associated data availability requirements. #### Does "most recent" include "current"? A subtle point about the `monitor_nowcast()` function which falls out of the example above: **the "current" hour is considered to be a part of the three "most recent clock hours"**. This may seem strange at first, but to understand why we chose this approach, one must think about how the data is captured and how measurement timestamps correspond to the period they actually represent. For particle pollution, measurements typically represent the mass of particles that accumulate on a filter during a set period of time, e.g. the past hour. Measurements come in at the end of the hour, but these measurements aren't necessarily representative of conditions at the exact times of the measurements. Instead, they are actually representative of concentrations **during the previous clock hour**. For example, if an hourly measurement comes in at 12:00, the measurement is actually representative of data during Hour 11, **NOT** Hour 12. So we call the 12:00 measurement the Hour 11 data point. This is really just a labeling convention, and it is no different from how we treat other datasets. For example, suppose you wanted to take an average of the low temperatures for every day in July. You would have to wait until August 1st to do so, since the temperature might still be dropping at the end of the day on July 31st. But does this mean you would call this value the average low temperature for August, since it wasn't calculated until August? Of course not: the data belongs to July, even if it couldn't be calculated until August. It is no different with particle pollution: the Hour 11 data belongs to Hour 11, even if it wasn't calculated until 12:00. As a result of this convention, timestamps are usually an entire hour (or more) earlier than the time the measurements were actually taken (exact differences depend on several factors). Another reason for including the "current" hour in the NowCast "three most recent hours" is for speed of updates. Suppose it is 12:04, and a measurement just came in at 12:00 (the Hour 11 measurement). It would be inappropriate to wait until 13:00 to calculate the updated NowCast value. For this reason, we calculate NowCast values using the monitored data for the "current" hour and the $N-1$ prior hours. ### Example 4: `includeShortTerm` argument For our final NowCast example we will explore the `includeShortTerm` argument, including a demonstration of why it defaults to `FALSE`. Suppose we wanted to know the NowCast values for Omak for the first half of 8/25/15, since concentrations were extremely high at the time. Not thinking about the details of the NowCast algorithm, one might proceed by filtering in time before creating the nowcast: ```{r} Omak %>% monitor_filterDatetime(2015082500, 2015082600, timezone = "UTC") %>% monitor_nowcast() %>% monitor_getData() ``` Unfortunately, the first 11 hours end up as invalid. But you read in the documentation that `includeShortTerm = TRUE` will return values for the second valid hour onwards. So you try again, this time setting `includeShortTerm = TRUE`. Let's check it out. ```{r} Omak %>% monitor_filterDatetime(2015082500, 2015082600, timezone = "UTC") %>% monitor_nowcast(includeShortTerm = TRUE) %>% monitor_getData() ``` Nice. We were able to get NowCast values for all but the first hour! But there's a problem lurking. Setting `includeShortTerm = TRUE` caused the `monitor_nowcast()` function to treat hours prior to Hour 0 as invalid. **It has no idea that valid data is available for this period.** As an experiment, let's see how the NowCast values would look if we calculated them from a larger dataset that includes the data prior to 8/25/15 Hour 0. ```{r} tbl <- Omak %>% # NowCast first monitor_nowcast %>% # then restrict times monitor_filterDatetime(2015082500, 2015082600, timezone = "UTC") %>% monitor_getData() tbl$shortTerm_T <- Omak %>% # restrict times first monitor_filterDatetime(2015082500, 2015082600, timezone = "UTC") %>% # then NowCast monitor_nowcast(includeShortTerm = TRUE) %>% monitor_getData() %>% dplyr::pull(Omak_ID) print(tbl) ``` While not too extreme, we see differences in the NowCast values calculated by the different approaches, even as late as the 11th hour of the period. As expected, the values match from the 12th hour on. The discrepancies displayed above are why, by default, `includeShortTerm = FALSE`. It should be used only when necessary, and with an understanding that the first $N$ hours' values might not necessarily be true NowCast values. If it exists, we recommend always grabbing an extra day of data at the beginning of the period if you think you might want to calculate NowCast values on a dataset. ---- Hopefully, we have thoroughly addressed any questions about the **AirMonitor** implementation of the NowCast algorithm.
/scratch/gouwar.j/cran-all/cranData/AirMonitor/vignettes/articles/NowCast.Rmd
--- title: "Example: Save Data as CSV" author: "Jonathan Callahan" date: "2022-10-14" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Example: Save Data as CSV} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, echo = FALSE} knitr::opts_chunk$set(fig.width = 7, fig.height = 5) ``` > How can I save data from temporary monitors to a CSV file? If you have **AirMonitor** installed, you should be able to copy and paste this code into the RStudio console. The coding style takes advantage of the "pipe" operator, `%>%`, which uses the output of the preceding function as the first argument of the next function. Package functions are specifically designed to work well in this manner, encouraging **_readable and understandable code_**. Think of each chunk as a _recipe_ that begins with what you want to make and is followed by the steps needed to make it. Enjoy! ```{r workedExample, eval = FALSE} library(AirMonitor) # AIRSIS in California # - start with all AIRSIS monitors in 2019 # - subset for those where stateCode is one of "CA" airnow_ca <- airnow_loadAnnual(2019) %>% monitor_filter(stateCode == "CA") # Interactive map to pick a monitor monitor_leaflet(airnow_ca) # Select a single monitor by deviceDeploymentID # - start with airnow_ca # - subset for the "Mariposa" monitorID Mariposa <- airnow_ca %>% monitor_select("9d60fd2e746019a5_840MMMPC1000") # Interactive graph to pick some time limits monitor_dygraph(Mariposa) # Trim empty periods from the beginning and end of this time series # - start with Mariposa # - trim periods with missing data Mariposa <- Mariposa %>% monitor_trimDate() monitor_timeRange(Mariposa) # Subset this time series to October through December # - start with Mariposa # - subset based on a date range Mariposa <- Mariposa %>% monitor_filterDate(20191008, 20200101) monitor_timeRange(Mariposa) # A quick plot for October through December monitor_timeseriesPlot( Mariposa, addAQI = TRUE, main = "Mariposa 2019" ) addAQILegend("topright") # Dump out a meta/data combined CSV file for a subset of Mariposa Mariposa %>% monitor_filterDate(20191101, 20191102) %>% monitor_toCSV() %>% cat(file = "") # Review the metadata at the command line dplyr::glimpse(Mariposa$meta) # Alternatively, View the data (or metadata) in the RStudio table viewer: View(Mariposa$data) # Set the output directory outputDir <- tempdir() # Dump out all of airnow_ca meta to a CSV file airnow_ca %>% monitor_toCSV(includeData = FALSE) %>% cat(file = file.path(outputDir, "airnow_CA_meta.csv")) # Dump out all of airnow_ca data to a CSV file airnow_ca %>% monitor_toCSV(includeMeta = FALSE) %>% cat(file = file.path(outputDir, "airnow_CA_data.csv")) # Alternatively, View() the metadata and data in RStudio: View(airnow_ca$meta) View(airnow_ca$data) # ============================================================================== # Everything above also applies to monitoring data from other sources. # Just start with one of the following: # All of AIRSIS for 2019 #airsis_loadAnnual(2019) # All of WRCC for 2019 #wrcc_loadAnnual(2019) ``` Finally, to emphasize the compact, expressiveness of "recipe"-style syntax, the following calculates daily mean timeseries for each California monitor and displays the `data` dataframe in the RStudio viewer: ```{r recipe, eval = FALSE} airnow_loadAnnual(2019) %>% monitor_filter(stateCode == "CA") %>% monitor_filterDate(20191001, 20201101, timezone = "America/Los_Angeles") %>% monitor_trimDate(timezone = "America/Los_Angeles") %>% monitor_dailyStatistic(mean) %>% monitor_mutate(round, 1) %>% monitor_getData() %>% View() ```
/scratch/gouwar.j/cran-all/cranData/AirMonitor/vignettes/articles/Save_Data_as_CSV.Rmd
dke.fun <- function(Vec, ...) UseMethod("dke.fun")
/scratch/gouwar.j/cran-all/cranData/Ake/R/dke.fun.R
dke.fun.default <- function(Vec,h,type_data=c("discrete","continuous"),ker=c("BE","GA","LN","RIG"),x=NULL,a0=0,a1=1,...) # INPUTS: # "ker" kernel function: "GA" Gamma, "BE" extended beta, # "LN" lognormal, RIG "reciprocal inverse Gaussian" # "Vec" sample of data # "h" bandwidth # "x" single value or grid where the kernel estimation is computed # "a0" left bound of the support # "a1" right bound of the support # OUTPUT:Returns a list containing: # "C_n" the normalizing constant # "f_n" vector containing the estimated function # in the grid values { n <- length(Vec) if(is.null(x)) { x=seq(min(Vec),max(Vec),length.out=100)} aux <- matrix(data=Vec,nrow=length(x),ncol=length(Vec),byrow=TRUE) #for(i in 1:n){ # aux[i,]= kef(x[i],vec_data,bw,ker,a,b) # } aux <- kef(x,aux,h,"continuous",ker,a0,a1) res<- apply(aux,1,mean) # density without normalization C<-simp_int(x,res) # Normalizant constant result<-res/C # density normalized structure(list(data=Vec,n=length(Vec),hist=hist(Vec,prob=TRUE),eval.points= x,h=h, kernel=ker,C_n=C,est.fn=result),class="dke.fun") }
/scratch/gouwar.j/cran-all/cranData/Ake/R/dke.fun.default.R
hbay.fun <- function(Vec, ...) UseMethod("hbay.fun")
/scratch/gouwar.j/cran-all/cranData/Ake/R/hbay.fun.R
hbay.fun.default <- function(Vec,x=NULL,...){ y1<-sort(Vec) if(is.null(x)){x<-0:max(y1)} vec1=0 vec2=0 alp=0.5 bet=15 for (i in 1: length(x)){ if (x[i]<= y1+1){ k=seq(0,x[i],by=1) vec1[i]=sum ((factorial(y1+1)*(y1^k)*beta(x[i]+alp-k+1,y1+bet-x[i]+1))/(factorial(y1+1-x[i])*factorial(k)*factorial (x[i]-k)*(y1+1)^(y1+1))) vec2[i]=sum ((factorial(y1+1)*(y1^k)*beta(x[i]+alp-k,y1+bet-x[i]+1))/(factorial(y1+1-x[i])*factorial(k)*factorial (x[i]-k)*(y1+1)^(y1+1))) } else{ vec1[i]=0 vec2[i]=0 } } result=sum(vec1)/sum(vec2) structure(list(hby=result),class="hbay.fun") }
/scratch/gouwar.j/cran-all/cranData/Ake/R/hbay.fun.default.R
hcvc.fun <- function(Vec, ...) UseMethod("hcvc.fun")
/scratch/gouwar.j/cran-all/cranData/Ake/R/hcvc.fun.R
hcvc.fun.default <- function(Vec,bw=NULL,type_data,ker,a0=0,a1=1,...) { if(is.null(bw)) { bw=seq((max(Vec)-min(Vec))/200,(max(Vec)-min(Vec))/2, length.out=100) } result1<-0 result2<-0 x=seq(min(Vec),max(Vec),length.out=100) n1 <- length(x) n2 <- length(Vec) m1=matrix(0,n1,length(Vec)) m2=matrix(0,n2,n2) Dak<-Vectorize(kef,vectorize.args=c('x','t')) for(k in 1:length(bw)){ m1 <-outer(x,Vec,Dak,bw[k],"continuous",ker,a0,a1) m2 <-outer(Vec,Vec,Dak,bw[k],"continuous",ker,a0,a1) res1<-apply(m1,1,mean) diag(m2)<-0 res2<-apply(m2,1,sum) result1[k]=simp_int(x,res1^2) result2[k]=(2/((n2-1)*n2))*sum(res2) } CV=result1-result2 index<-which.min(CV) #to compute the optimal bandwidth hcv<-bw[index] structure(list(hcv=hcv,seq_h=bw,CV=CV),class="hcvc.fun") }
/scratch/gouwar.j/cran-all/cranData/Ake/R/hcvc.fun.default.R
hcvd.fun <- function(Vec, ...) UseMethod("hcvd.fun")
/scratch/gouwar.j/cran-all/cranData/Ake/R/hcvd.fun.R
hcvd.fun.default <- function(Vec,seq_bws=NULL,ker=c("bino","triang","dirDU"),a=1,c=2,...) { if(is.null(seq_bws)) if (ker=="bino") { seq_bws=seq((max(Vec)-min(Vec))/500,1, length.out=100) } else { seq_bws=seq((max(Vec)-min(Vec))/200,(max(Vec)-min(Vec))/2, length.out=100) } result1<-0 result2<-0 if(ker=="dirDU"){x=0:(max(Vec))}# The values on the support must be up to max else {x=0:(max(Vec)+2)}# and up to two points after the max for other kernels. n1 <- length(x) n2 <- length(Vec) m1=matrix(0,n1,length(Vec)) m2=matrix(0,n2,n2) Dak<-Vectorize(kef,vectorize.args=c('x','t')) for(k in 1:length(seq_bws)){ m1 <-outer(x,Vec,Dak,seq_bws[k],"discrete",ker,a,c) m2 <-outer(Vec,Vec,Dak,seq_bws[k],"discrete",ker,a,c) res1<-apply(m1,1,mean) diag(m2)<-0 res2<-apply(m2,1,sum) result1[k]=sum(res1^2) result2[k]=(2/((n2-1)*n2))*sum(res2) } CV=result1-result2 index<-which.min(CV) #to compute the optimal bandwidth hcv<-seq_bws[index] #return(hcv=hcv)#,CV=CV,seq_bws=seq_bws)) structure(list(hcv=hcv,seq_h=seq_bws,CV=CV),class="hcvd.fun") }
/scratch/gouwar.j/cran-all/cranData/Ake/R/hcvd.fun.default.R
hcvreg.fun <- function(Vec, ...) UseMethod("hcvreg.fun")
/scratch/gouwar.j/cran-all/cranData/Ake/R/hcvreg.fun.R
hcvreg.fun.default <- function(Vec,y,type_data=c("discrete","continuous"), ker=c("bino","triang","dirDU","BE","GA","LN","RIG"), h=NULL, a0=0, a1=1, a=1, c=2, ...) { if(is.null(h)) { h=seq(0.001,(max(Vec)-min(Vec))/2, length.out=1000) } x<-Vec n <- length(x) m=matrix(0,n,length(Vec)) m2=matrix(0,n,length(Vec)) A=rep(0,length(h)) for(k in 1:length(h)){ for(i in 1:n){ m[i,]= kef(x[i],Vec,h[k],type_data,ker,a0,a1,a,c) m2[i,]= m[i,]*y } diag(m)<-0 diag(m2)<-0 G<-apply(m2,1, sum) E<-apply(m,1, sum) A[k]<-(1/length(Vec))*sum((y-(G/E))^2) } index<-which.min(A) hcv<-h[index] structure(list(kernel = ker,hcv=hcv,CV=A,seq_bws=h),class="hcvreg.fun") }
/scratch/gouwar.j/cran-all/cranData/Ake/R/hcvreg.fun.default.R
kef <- function(x,t,h,type_data=c("discrete","continuous"),ker=c("bino","triang", "dirDU","BE","GA","LN", "RIG"),a0=0,a1=1,a=1,c=2){ ########################################################################################################### # INPUTS: # "x" : the target. # "t" : the single or the grid value where the function is computed. # "h" : the bandwidth parameter. # "ker" : the kernel: "bino" binomial,"triang" discrete triangular,"dirDU" Dirac discrete uniform. # "BE" extended beta,"GA" gamma,"LN" lognormal,"RIG" reciprocal inverse Gaussian. # "a0" : the left bound of the support of the distribution for extended beta kernel. Default value is 0. # "a1" : the right bound of the support of the distribution for extended beta kernel. Default value is 1. # "a" : The arm is used only for the discrete triangular distribution. # "c" : The number of categories in DiracDU kernel and is used only for DiracDU # OUTPUT: # Returns the discrete associated kernel value at t. ########################################################################################################### if (missing(type_data)) stop("argument 'type_data' is omitted") if ((type_data=="discrete") & (ker=="GA"||ker=="LN"||ker=="BE" ||ker=="RIG")) stop(" Not appropriate kernel for type_data") if ((type_data=="continuous") & (ker=="bino"||ker=="triang"||ker=="dirDU")) stop(" Not appropriate kernel for 'type_data'") if ((type_data=="discrete") & missing(ker)) ker<-"bino" if ((type_data=="continuous") & missing(ker)) ker<-"GA" dtrg<-function(x,t,h,a){ if (a==0) { result <- t Logic1 <- (t==x) Logic0 <- (t!=x) result[Logic1]=1 result[Logic0]=0 } else { u=0:a; u=sum(u^h) D=(2*a+1)*(a+1)^h -2*u result <- t Logic0 <- ((t>=(x-a)) & (t<=(x+a))) # support Sx={x-a,...,x+a} support of the distribution Logic1 <- ((t<(x-a))|(t>(x+a))) tval <- result[Logic0] result[Logic1]=0 result[Logic0]<- ((a+1)^h - (abs(tval-x))^h)/D # Discrete Triangular } return(result) } diracDU<- function(x,t,h,c) { result<-t Logic1 <- (t==x) Logic0 <- (t!=x) result[Logic1]<-(1-h) result[Logic0]<- (h/(c-1)) } if(ker=="bino"){ result <- t Logic0 <- (t <= x+1) # support Sx={0,1,...,x+1} Logic1 <- (x+1 < t) tval <- result[Logic0] result[Logic1]=0 result[Logic0]<- dbinom(tval,x+1,(x+h)/(x+1)) # The Binomial kernel } else if(ker=="triang"){ result <- dtrg(x,t,h,a) # The discrete Triangular kernel } else if(ker=="dirDU") { result <- diracDU(x,t,h,c) # The Dirac Discrete Uniform kernel } if(ker=="BE"){ result <- t Logic0 <- ((a0<=t)&(t<= a1)) # support Logic1 <- ((t<a0)|(a1<t)) tval <- result[Logic0] result[Logic1]=0 result[Logic0]<- ((1/((a1-a0)^(1+h^(-1))*beta(((x-a0)/((a1-a0)*h))+1,((a1-x)/((a1-a0)*h))+1))))*((tval-a0)^((x-a0)/((a1-a0)*h)))*((a1-tval)^((a1-x)/((a1-a0)*h))) } else if(ker=="GA"){ result <- t Logic0 <- (0<=t) # support Logic1 <- (t<0) tval <- result[Logic0] result[Logic1]=0 result[Logic0]<- dgamma(tval,(x/h)+1,1/h) } else if(ker=="LN"){ result <- t Logic0 <- (0<=t) # support Logic1 <- (t<0) tval <- result[Logic0] result[Logic1]=0 # result[Logic0]<- (1/(tval*h*sqrt(2*pi)))*exp((-1/2)*((1/h)*log(tval/x)-h)^2) result[Logic0]<- dlnorm(tval,meanlog=log(x)+h^2,sdlog=h) } else if(ker=="RIG"){ result <- t Logic0 <- (0<t) # support Logic1 <- (t<=0) tval <- result[Logic0] result[Logic1]<- 0 eps<-sqrt(x^2+x*h) # see Libengué (2013) result[Logic0]<- (1/sqrt(2*pi*h*tval))*exp((-eps/(2*h))*((tval/eps) -2+(eps/tval))) } return(result) }
/scratch/gouwar.j/cran-all/cranData/Ake/R/kef.R
kern.fun <- function(x,...) UseMethod("kern.fun")
/scratch/gouwar.j/cran-all/cranData/Ake/R/kern.fun.R
kern.fun.default <- function(x,t,h,type_data=c("discrete","continuous"), ker=c("bino","triang","dirDU","BE","GA","LN","RIG"),a0=0,a1=1,a=1,c=2,...) { if (missing(type_data)) stop("argument 'type_data' is omitted") if ((type_data=="discrete") & (ker=="GA"||ker=="LN"||ker=="BE" ||ker=="RIG")) stop(" Not appropriate kernel for type_data") if ((type_data=="continuous") & (ker=="bino"||ker=="triang"||ker=="dirDU")) stop(" Not appropriate kernel for 'type_data'") if ((type_data=="discrete") & missing(ker)) ker<-"bino" if ((type_data=="continuous") & missing(ker)) ker<-"GA" kx <- kef(x,t,h,type_data,ker,a0,a1,a,c) structure(list(kernel = ker,x=x,t=t,kx=kx),class="kern.fun") }
/scratch/gouwar.j/cran-all/cranData/Ake/R/kern.fun.default.R
kpmfe.fun <- function(Vec, ...) UseMethod("kpmfe.fun")
/scratch/gouwar.j/cran-all/cranData/Ake/R/kpmfe.fun.R
kpmfe.fun.default <- function(Vec,h, type_data=c("discrete","continuous"), ker=c("bino","triang","dirDU"), x=NULL, a=1,c=2,...) { ########################################################################################################### # INPUTS: # "Vec" : Sample of data # "h" : Bandwidth. # "ker" : The kernel function: "dirDU" DiracDU,"bino" Binomial,"triang" discrete Triangular. # "a" : The arm is used only for the Discrete Triangular kernel. The default value is 1. # "c" : The number of categories in the Aitchison and Aitken kernel is used only for DiracaDU.The default value is 2. # OUTPUT: Returns a list containing: # "n" : The number of observations. # "support" : The support of fn. # "C_n" : The normalizant constant. # "ISE_0" : The integrated squared error when using the naive distribution instead of fn. # "f_0" : The couples (x,f_0(x)). # "f_n" : The couples (x,f_n(x)). # "f0" : The empirical p.m.f. # "fn" : The estimated p.m.f. containing estimated values after normalization. ########################################################################################################### V=data.frame(table(Vec),row.names=NULL) N=V$Freq if(is.null(x)){ if(ker=="dirDU"){x=0:(max(Vec))} else {x=0:(max(Vec)+2)} } t1=rep(0,length(x)) t2=rep(0,length(x)) n <- length(x) f0=c(N/sum(N),rep(0,length(x)-length(N))) m=matrix(0,n,length(Vec)) for(i in 1:n){ m[i,]= kef(x[i],Vec,h,type_data,ker,a,c) } res<-apply(m,1,mean) result<-res/sum(res) E0=sum((result-f0)^2) for (i in 1:n){ t1[i]=paste(x[i],";",f0[i]) t2[i]=paste(x[i],";",result[i]) } structure(list(data=Vec,n=length(Vec),eval.points= x,h=h, kernel=ker,C_n=sum(res),ISE_0 = E0,f_0=t1,f_n=t2,f0=f0,est.fn=result),class="kpmfe.fun") }
/scratch/gouwar.j/cran-all/cranData/Ake/R/kpmfe.fun.default.R