content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' @title Cross Covariance of a TS Pair #' @description #' Calculates the Cross-covariance between two wavelet transfomations (dwt or modwt) #' @param x A \code{vector} with dimensions N x 1. #' @param y A \code{vector} with dimensions N x 1. #' @param decomp A \code{string} that indicates whether to use the "dwt" or "modwt" decomposition. #' @param filter A \code{string} that specifies what wavelet filter to use. #' @param nlevels An \code{integer} that indicates the level of decomposition. It must be less than or equal to floor(log2(length(x))). #' @return Returns a \code{list} of a \code{matrix} containing cross-covariance, variance of each wavelet cross-covariance and its 95% CI. #' @importFrom coda spectrum0 #' @importFrom stats qnorm #' @details #' If \code{nlevels} is not specified, it is set to \eqn{\left\lfloor {{{\log }_2}\left( {length\left( x \right)} \right)} \right\rfloor}{floor(log2(length(x)))} #' @export #' @author Justin Lee wccv_pair = function(x, y, decomp = "modwt", filter = "haar", nlevels = NULL){ if (sum(class(x) %in% "gts") == 1){ x = as.numeric(x) } if (sum(class(y) %in% "gts") == 1){ y = as.numeric(y) } if(is.null(x) || is.null(y)){ stop("`x` or `y` must contain a value.") }else if((is.data.frame(x) || is.matrix(x)) || is.data.frame(y) || is.matrix(y)){ if(ncol(x) > 1) stop("There must be only one column of data supplied.") } if(length(x)!=length(y)){ stop("`x` and `y` must be of same length.") } if(decomp == "modwt"){ if(is.null(nlevels)){ nlevels = floor(log2(length(x))-1) } f = modwt }else if(decomp == "dwt"){ if(is.null(nlevels)){ nlevels = floor(log2(length(x))) } f = dwt } coef1 = f(x = x, nlevels = nlevels, filter = filter) coef2 = f(x = y, nlevels = nlevels, filter = filter) # Slightly inefficient! # Better method may be to implement spectrum0 in Rcpp product = mapply("*", coef1, coef2, SIMPLIFY = FALSE) const = 1/sapply(product, length) variance = const * unlist(sapply(product, spectrum0, max.freq = 0, order = 2, max.length = 130)) lower = sqrt(unname(variance)) * qnorm(0.025) upper = sqrt(unname(variance)) * qnorm(0.975) obj = compute_cov_cpp(coef1 = coef1, coef2 = coef2, variance = variance, lower = lower, upper = upper) colnames(obj) = c("Cross-Covariance", "Variance", "Lower Bound", "Upper Bound") ret = list(obj) mostattributes(ret) = list(filter = filter, J = nlevels, N = length(x), class=c("wccv_pair","list","matrix")) return(ret) } #' @title Cross Covariance of Matrix #' @description #' Calculates the Cross-covariance between multiple wavelet transfomations (dwt or modwt) #' @param x A \code{vector} with dimensions N x M. #' @param decomp A \code{string} that indicates whether to use the "dwt" or "modwt" decomposition. #' @param filter A \code{string} that specifies what wavelet filter to use. #' @param nlevels An \code{integer} that indicates the level of decomposition. It must be less than or equal to floor(log2(length(x))). #' @return Returns a \code{matrix} of \code{lists} of all the possible pair cross-covariance, variance of each wavelet cross-covariance and its 95% CI. #' @details #' If \code{nlevels} is not specified, it is set to \eqn{\left\lfloor {{{\log }_2}\left( {length\left( x \right)} \right)} \right\rfloor}{floor(log2(length(x)))} #' @export #' @author Justin Lee wccv = function(x, decomp = "modwt", filter = "haar", nlevels = NULL){ if (sum(class(x) %in% "gts") == 1){ x = as.numeric(x) } if(is.null(x)) stop("`x` must contain a value.") if(decomp == "modwt"){ if(is.null(nlevels)){ nlevels = floor(log2(nrow(x))-1) } }else if(decomp == "dwt"){ if(is.null(nlevels)){ nlevels = floor(log2(nrow(x))) } } mat = matrix(list(), ncol(x), ncol(x)) for(i in seq_len(ncol(x))){ j = i for(j in i:ncol(x)){ mat[i,j] = wccv_pair(x[,i], x[,j], decomp = decomp, filter = filter, nlevels = nlevels) } } # mostattributes(mat) = list(filter = filter, class=c("wccv","matrix","list")) return(mat) } #' @title Plot Cross Covariance Pair #' @description #' Plots results of the a wccv_pair list in which additional parameters can be specified #' @author Justin Lee, Haotian Xu, and Stephane Guerrier #' @method plot wccv_pair #' @keywords internal #' @export #' @examples #' n = 10^5 #' Xt = cumsum(rnorm(n, 0, 0.01)) #' Wt = Xt + rnorm(n) #' Yt = Xt + rnorm(n) #' wcov = wccv_pair(Wt, Yt) #' plot(wcov) plot.wccv_pair = function(x, theo.wccv = NULL, main = NULL, xlab = NULL, ylab = NULL, units = NULL, col_wccv = NULL, col_ci = NULL, nb_ticks_x = NULL, nb_ticks_y = NULL, ...){ J = attr(x,"J") N = attr(x, "N") scales = scales_cpp(J) x = x[[1]] # simplify # Include all CI values combCI = c(x[,3], x[,4]) abscombCI = abs(combCI) # Labels if (is.null(xlab)){ if (is.null(units)){ xlab = expression(paste("Scale ", tau, sep ="")) }else{ xlab = bquote(paste("Scale ", "", tau, " [", .(units), "]", sep = "")) } } if (is.null(ylab)){ if(is.null(units)){ ylab = expression(paste("Wavelet Cross Covariance ", "", (nu^2), "", sep = "")) }else{ ylab = bquote(paste("Wavelet Cross Covariance ", "", (nu^2), " [", .(units)^2, "]", sep = "")) } } # Line and CI colors if(is.null(col_wccv)){ col_wccv = "darkblue" } if(is.null(col_ci)){ col_ci = hcl(h = 210, l = 65, c = 100, alpha = 0.2) } if(is.null(main)){ main = "Sample Wavelet Cross-Covariance" } # Axes if (is.null(nb_ticks_x)){ nb_ticks_x = 4 } if (is.null(nb_ticks_y)){ nb_ticks_y = 7 } # set ticks and labels tick_y_max = ceiling(max(log10(abscombCI))) tick_y_min = floor(min(log10(abscombCI))) tick_y_step = 2*(tick_y_max - tick_y_min)/(nb_ticks_y - 1) if (tick_y_step < 0.75){ tick_y_step = 0.5 }else{ tick_y_step = round(tick_y_step) } y_at_lower = y_at_upper = y_at = seq(tick_y_min, tick_y_max, by = tick_y_step) upper_labels = sapply(y_at_upper, function(i) as.expression(bquote(10^ .(i)))) lower_labels = sapply(y_at_lower, function(i) as.expression(bquote(-10^ .(i)))) m = length(y_at_lower) ticks_y = c(-(m:1), 0, 1:m) labels = c(rev(lower_labels), 0, upper_labels) x_high = ceiling(log10(scales[J])) x_low = floor(log10(scales[1])) x_ticks = seq(x_low, x_high, by = 1) if (length(x_ticks) > nb_ticks_x){ x_ticks = x_low + ceiling((x_high - x_low)/(nb_ticks_x + 1))*(0:nb_ticks_x) } x_labels = sapply(x_ticks, function(i) as.expression(bquote(10^ .(i)))) x_at = 10^x_ticks x_actual_length = sum((x_at < x_high)*(x_at > x_low)) if (x_actual_length < 4){ x_low = floor(log2(scales[1])) x_high = ceiling(log2(scales[J])) x_ticks = seq(x_low, x_high, by = 1) if (length(x_ticks) > 8){ x_ticks = seq(x_low, x_high, by = 2) } x_labels = sapply(x_ticks, function(i) as.expression(bquote(2^ .(i)))) x_at = 2^x_ticks } plot(NA, log = "x", xlim = c(scales[1], scales[J]), ylim = c(min(ticks_y), max(1.09*ticks_y)), xaxt = "n", yaxt = "n", main = main, xlab = xlab, ylab = ylab, ann = FALSE, bty = "n") # Main plot win_dim = par("usr") par(new = TRUE) plot(NA, log = "x", xlim = c(scales[1], scales[J]), ylim = c(win_dim[3], win_dim[4] + 0.09*(win_dim[4] - win_dim[3])), xaxt = "n", yaxt = "n", main = main, xlab = xlab, ylab = ylab, ann = FALSE, bty = "n") win_dim = par("usr") # Add grid abline(v = x_at, lty = 1, col = "grey95") abline(h = ticks_y, lty = 1, col = "grey95") abline(h = 0) # Add title x_vec = 10^c(win_dim[1], win_dim[2], win_dim[2], win_dim[1]) y_vec = c(win_dim[4], win_dim[4], win_dim[4] - 0.09*(win_dim[4] - win_dim[3]), win_dim[4] - 0.09*(win_dim[4] - win_dim[3])) polygon(x_vec, y_vec, col = "grey95", border = NA) text(x = 10^mean(c(win_dim[1], win_dim[2])), y = (win_dim[4] - 0.09/2*(win_dim[4] - win_dim[3])), main) lines(x_vec[1:2], rep((win_dim[4] - 0.09*(win_dim[4] - win_dim[3])),2), col = 1) box() axis(2, at = ticks_y, labels = labels) axis(1, at = x_at, labels = x_labels) # Add CI y_low_ci = wccv_get_y(x[,3], tick_y_min, tick_y_step) y_high_ci = wccv_get_y(x[,4], tick_y_min, tick_y_step) polygon(c(scales, rev(scales)), c(y_low_ci, rev(y_high_ci)), border = NA, col = col_ci) # Add wccv y_wccv = wccv_get_y(x[,1], tick_y_min, tick_y_step) lines(x = scales, y = y_wccv, type = "l", col = col_wccv, pch = 16, cex = 1.25) lines(x = scales, y = y_wccv, type = "p", col = col_wccv, pch = 16) # not sure what this is for if (is.null(theo.wccv) == F){ # log.theo.positive = sapply(theo.wccv, function(x){ifelse(x < 0, NA, log(x))}) # log.theo.negative = sapply(theo.wccv, function(x){ifelse(x > 0, NA, log(-x))}) # lines(x = scales, y = log.theo.positive, lty = 3) lines(scales, wccv_get_y(theo.wccv, tick_y_min, tick_y_step), col="orange", lty = 3, lwd = 2) } # # not sure what this is for # if (is.null(theo.wccv) == F){ # lines(x = scales, y = -log.theo.negative, lty = 3) # } } #' @title Mapping to log10 scale #' @description #' Map x to the value in log10 scale #' @export #' @param x A \code{vector} with dimensions J x 1. #' @param tick_y_min A \code{negtive integer} the minimum power of 10, which corresponds to the smallest scale on y-axis. #' @param tick_y_step An \code{integer} indicating the increment of the sequence. #' @return A \code{field<vec>} that contains values in log10 scale. #' @details #' \code{tick_y_min} is usually chosen as \eqn{floor(min(log10(abs(x))))} #' @author James Balamuta and Justin Lee #' @examples #' x = 2^(-1:-9) #' y.min = floor(min(log10(abs(x)))) #' y.step = 2 #' wccv_get_y(x, y.min, y.step) wccv_get_y = function(x, tick_y_min, tick_y_step){ if (sum(class(x) %in% "gts") == 1){ x = as.numeric(x) } n = length(x) for (i in 1:n){ if (x[i] > 0){ x[i] = (log10(x[i]) - tick_y_min)/tick_y_step + 1 }else{ x[i] = -(log10(abs(x[i])) - tick_y_min)/tick_y_step - 1 } } x }
/scratch/gouwar.j/cran-all/cranData/wv/R/covwv.R
#' @title Wavelet variance of IMU Data from a navchip sensor #' @description This data set contains wavelet variance of gyroscope and accelerometer data from a navchip sensor. #' @format A list of the following elements: #' \itemize{ #' \item "sensor": Name of the sensor. #' \item "freq": The frequency at which the error signal is measured. #' \item "n": Sample size of the data. #' \item "type": The types of sensors considered in the data. #' \item "axis": The axes of sensors considered in the data. #' \item "wvar": A list containing the computed wavelet variance based on the data. #' } #' @source The IMU data of the navchip sensor comes from Geodetic Engineering Laboratory (TOPO) and Swiss Federal Institute of Technology Lausanne (EPFL). "navchip_wv" #' @title Wavelet variance of IMU Data from an ADIS 16405 sensor #' @description This data set contains wavelet variance of gyroscope and accelerometer data from an ADIS 16405 sensor. #' @format A list of the following elements: #' \itemize{ #' \item "sensor": Name of the sensor. #' \item "freq": The frequency at which the error signal is measured. #' \item "n": Sample size of the data. #' \item "type": The types of sensors considered in the data. #' \item "axis": The axes of sensors considered in the data. #' \item "wvar": A list containing the computed wavelet variance based on the data. #' } #' @source The IMU data comes from Department of Geomatics Engineering, University of Calgary. "adis_wv" #' @title Wavelet variance of IMU Data from a KVH1750 IMU sensor #' @description This data set contains wavelet variance of gyroscope and accelerometer data from an KVH1750 sensor. #' @format A list of the following elements: #' \itemize{ #' \item "sensor": Name of the sensor. #' \item "freq": The frequency at which the error signal is measured. #' \item "n": Sample size of the data. #' \item "type": The types of sensors considered in the data. #' \item "axis": The axes of sensors considered in the data. #' \item "wvar": A list containing the computed wavelet variance based on the data. #' } #' @source The IMU data comes from Department of Geomatics Engineering, University of Calgary. "kvh1750_wv" #' @title Wavelet variance of IMU Data from IMAR Gyroscopes #' @description This data set contains wavelet variance of IMAR gyroscopes data. #' @format A list of the following elements: #' \itemize{ #' \item "sensor": Name of the sensor. #' \item "freq": The frequency at which the error signal is measured. #' \item "n": Sample size of the data. #' \item "type": The types of sensors considered in the data. #' \item "axis": The axes of sensors considered in the data. #' \item "wvar": A list containing the computed wavelet variance based on the data. #' } #' @source The IMU data comes from Geodetic Engineering Laboratory (TOPO) and Swiss Federal Institute of Technology Lausanne (EPFL). "imar_wv" #' @title Wavelet variance of IMU Data from a LN200 sensor #' @description This data set contains wavelet variance of LN200 gyroscope and accelerometer data. #' @format A list of the following elements: #' \itemize{ #' \item "sensor": Name of the sensor. #' \item "freq": The frequency at which the error signal is measured. #' \item "n": Sample size of the data. #' \item "type": The types of sensors considered in the data. #' \item "axis": The axes of sensors considered in the data. #' \item "wvar": A list containing the computed wavelet variance based on the data. #' } #' @source The IMU data comes from Geodetic Engineering Laboratory (TOPO) and Swiss Federal Institute of Technology Lausanne (EPFL). "ln200_wv"
/scratch/gouwar.j/cran-all/cranData/wv/R/data.R
#' @title Discrete Wavelet Transform #' @name dwt #' @description #' Calculation of the coefficients for the discrete wavelet transformation #' @export #' @param x A \code{vector} with dimensions N x 1. #' @param nlevels A \code{integer} indicating the \eqn{J} levels of decomposition. #' @param filter A \code{string} indicating the filter name #' @return A \code{field<vec>} that contains the wavelet coefficients for each decomposition level #' @details #' Performs a level \eqn{J} decomposition of the time series using the pyramid algorithm. #' The default \eqn{J} is determined by \eqn{floor\left(log_2 \left(length\left(x\right)\right)\right)}{floor(log2(length(x)))} #' @author James Balamuta, Justin Lee and Stephane Guerrier #' @examples #' set.seed(999) #' x = rnorm(2^8) #' ret = dwt(x) #' #' summary(ret) #' #' plot(ret) dwt = function(x, nlevels = floor(log2(length(x))), filter = "haar") { if (sum(class(x) %in% "gts") == 1){ x = as.numeric(x) } if(is.vector(x) && length(x) %% 2^nlevels != 0){ warning("The data has been truncated so that it is divisible by `nlevels` (e.g. 2^*)") x = x[1:2^nlevels] }else if(is.matrix(x) || is.data.frame(x)){ if(ncol(x) != 1){ stop("Only one column is allowed to be decomposed at a time.") } if(nrow(x) %% nlevels !=0){ warning("The data has been truncated so that it is divisible by `nlevels` (e.g. 2^*)") idx = 1:2^nlevels x[idx,1] = x[idx,1] } } ret = dwt_cpp(x = x, filter_name = filter, nlevels) # call to C++ version of dwt mostattributes(ret) = list(J=nrow(ret), filter = filter, class=c("dwt","list")) ret } #' @title Print Discrete Wavelet Transform #' @name print.dwt #' @description #' Prints the results of the modwt list #' @method print dwt #' @export #' @param x A \code{dwt} object #' @param ... further arguments passed to or from other methods. #' @return Prints the dwt decomposition #' @author James Balamuta and Nathanael Claussen #' @keywords internal #' @examples #' set.seed(999) #' x = rnorm(2^8) #' print(dwt(x)) print.dwt=function(x, ...){ NextMethod("print") } #' @title Summary Discrete Wavelet Transform #' @name summary.dwt #' @description #' Prints DWT object in a concise format #' @method summary dwt #' @importFrom utils head #' @export #' @keywords internal #' @param object A \code{dwt} object #' @param ... additional arguments affecting the summary produced. #' @return Prints the dwt matrix decomposition #' @author Nathanael Claussen and Justin Lee #' @examples #' set.seed(999) #' x = rnorm(2^8) #' summary(dwt(x)) summary.dwt=function(object, ...) { cat("\n") cat("Results of DWT using",attr(object,"filter"),"filter with",attr(object, "J"),"levels:\n") cat("Displaying only the first 6 coefficients...\n") y = as.list(object) j = length(y) for( i in 1:j ) { cat("Level",i,"Wavelet Coefficients\n", c(head(y[[i]])), "...\n") } } #' @title Plot Discrete Wavelet Transform #' @name plot.dwt #' @description #' Plots results of the dwt list in which additional parameters can be specified #' @method plot dwt #' @export #' @param x A \code{dwt} object. #' @param index A \code{vector} containing the indices to scales to be included in #' the graph. By default \code{index = 1:(min(c(J,4)))}, where \code{J} denotes the #' number of scales in \code{y}. #' @param couleur A \code{vector} of colors of the same size as \code{index} used #' for the different scales depicted in the graph. If \code{couleur} contains a single #' value the the same color will be used for all scales. #' @param ... additional arguments affecting the plot produced. #' @author Justin Lee and Stephane Guerrier #' @keywords internal #' @examples #' # Simulate a Gaussian white noise #' n = 10^3 #' Xt = rnorm(n) #' #' # dwt #' Yt = dwt(Xt) #' #' # Graph examples #' plot(Yt) #' plot(Yt, index = c(1,4,5,6,8,2)) #' plot(Yt, index = c(1,4,5,6), couleur = "blue") #' plot(Yt, index = c(1,4,5,6), couleur = rep(c("blue","yellow"),2)) plot.dwt = function(x, index = NULL, couleur = NULL, ...){ J = attr(x,"J") if (is.null(index)){ index = 1:(min(c(4,J))) }else{ if (max(index) > J || min(index) < 1){ stop("Incorrect index specified") } } nb_plot = length(index) if (is.null(couleur)){ hues = seq(15, 375, length = nb_plot + 1) couleur = hcl(h = hues, l = 65, c = 100, alpha = 1)[seq_len(nb_plot)] }else{ if (length(couleur) == 1 || length(couleur) != nb_plot){ couleur = rep(couleur[1],nb_plot) } } old_pars = par(mfrow = c(nb_plot,1), mar = c(0,3,0,0), oma = c(5,2,1,1)) on.exit(par(old_pars)) x_range = length(x[[1]]) for (i in seq_len(nb_plot)){ current_time_series = x[[index[i]]] plot(NA, xlim = c(1,x_range), ylim = range(current_time_series), bty = "n", axes = FALSE) box(col = "lightgrey") grid() axis(2) mtext(paste("Scale ",index[i], sep = ""), side = 2, line = 3, cex = 0.8) lines(unlist(current_time_series), col = couleur[i]) # Add bottom axis if (i == nb_plot){ axis(1) mtext("Time", side = 1, line = 3, cex = 0.8) } } }
/scratch/gouwar.j/cran-all/cranData/wv/R/dwt.R
#' @title Maximum Overlap Discrete Wavelet Transform #' @description #' Calculates the coefficients for the discrete wavelet transformation #' @param x A \code{vector} with dimensions N x 1. #' @param nlevels A \code{integer} indicating the \eqn{J} levels of decomposition. #' @param filter A \code{string} indicating the filter name #' @return A \code{field<vec>} that contains the wavelet coefficients for each decomposition level #' @details #' Performs a level \eqn{J} decomposition of the time series using the pyramid algorithm. #' The default \eqn{J} is determined by \eqn{floor\left(log_2 \left(length\left(x\right)\right)\right)}{floor(log2(length(x)))} #' @author James Balamuta, Justin Lee and Stephane Guerrier #' @export #' @examples #' set.seed(999) #' x = rnorm(100) #' ret = modwt(x) #' #' summary(ret) #' #' plot(ret) modwt = function(x, nlevels = floor(log2(length(x)-1)), filter = "haar") { if (sum(class(x) %in% "gts") == 1){ x = as.numeric(x) } ret = modwt_cpp(x = x, filter_name = filter, nlevels) mostattributes(ret) = list(J=nlevels, filter = filter, class=c("modwt","list")) ret } #' @title Print Maximum Overlap Discrete Wavelet Transform #' @description #' Prints the results of the modwt list #' @method print modwt #' @export #' @param x A \code{modwt} object #' @param ... further arguments passed to or from other methods. #' @return Prints the modwt decomposition #' @author James Balamuta and Nathanael Claussen #' @keywords internal #' @examples #' set.seed(999) #' x = rnorm(100) #' print(modwt(x)) print.modwt = function(x, ...){ NextMethod("print") } #' @title Summary Maximum Overlap Discrete Wavelet Transform #' @description #' Prints MODWT object in a concise format #' @method summary modwt #' @importFrom utils head #' @export #' @keywords internal #' @param object A \code{modwt} object #' @param ... additional arguments affecting the summary produced. #' @return Prints the modwt matrix decomposition #' @author Nathanael Claussen and Justin Lee #' @examples #' set.seed(999) #' x = rnorm(100) #' summary(modwt(x)) summary.modwt=function(object, ...) { cat("\n") cat("Results of MODWT using",attr(object,"filter"),"filter with",attr(object, "J"),"levels:\n") cat("Displaying only the first 6 coefficients...\n") y = as.list(object) j = length(y) for( i in 1:j ) { cat("Level",i,"Wavelet Coefficients\n", c(head(y[[i]])), "...\n") } } #' @title Plot Maximum Overlap Discrete Wavelet Transform #' @description #' Plots results of the modwt list in which additional parameters can be specified #' @method plot modwt #' @export #' @param x A \code{modwt} object. #' @param index A \code{vector} containing the indices to scales to be included in #' the graph. By default \code{index = 1:(min(c(J,4)))}, where \code{J} denotes the #' number of scales in \code{y}. #' @param couleur A \code{vector} of colors of the same size as \code{index} used #' for the different scales depicted in the graph. If \code{couleur} contains a single #' value the the same color will be used for all scales. #' @param ... additional arguments affecting the plot produced. #' @author Justin Lee and Stephane Guerrier #' @keywords internal #' @examples #' # Simulate a Gaussian white noise #' n = 10^3 #' Xt = rnorm(n) #' #' # MODWT #' Yt = modwt(Xt) #' #' # Graph examples #' plot(Yt) #' plot(Yt, index = c(1,4,5,6,8,2)) #' plot(Yt, index = c(1,4,5,6), couleur = "blue") #' plot(Yt, index = c(1,4,5,6), couleur = rep(c("blue","yellow"),2)) plot.modwt = function(x, index = NULL, couleur = NULL, ...){ J = attr(x,"J") if (is.null(index)){ index = 1:(min(c(4,J))) }else{ if (sum(index == "all") == 1){ # i.e. index == "all" index = 1:J } if (max(index) > J || min(index) < 1){ stop("Incorrect index specified") } } nb_plot = length(index) if (is.null(couleur)){ hues = seq(15, 375, length = nb_plot + 1) couleur = hcl(h = hues, l = 65, c = 100, alpha = 1)[seq_len(nb_plot)] }else{ if (length(couleur) == 1 || length(couleur) != nb_plot){ couleur = rep(couleur[1],nb_plot) } } old_pars = par(mfrow = c(nb_plot,1), mar = c(0,3,0,0), oma = c(5,2,1,1)) on.exit(par(old_pars)) x_range = length(x[[1]]) for (i in seq_len(nb_plot)){ current_time_series = x[[index[i]]] plot(NA, xlim = c(1,x_range), ylim = range(current_time_series), bty = "n", axes = FALSE) box(col = "lightgrey") grid() axis(2) mtext(paste("Scale ",index[i], sep = ""), side = 2, line = 3, cex = 0.8) lines(unlist(current_time_series), col = couleur[i]) # Add bottom axis if (i == nb_plot){ axis(1) mtext("Time", side = 1, line = 3, cex = 0.8) } } }
/scratch/gouwar.j/cran-all/cranData/wv/R/modwt.R
#' wv #' @keywords internal #' @importFrom Rcpp evalCpp #' @importFrom grDevices hcl #' @useDynLib wv #' @import graphics #' @description Provides a series of tools to compute and plot quantities related to classical and robust wavelet variance for time series and regular lattices. More details can be found, for example, in Serroukh, A., Walden, A.T., & Percival, D.B. (2000) <doi:10.2307/2669537> and Guerrier, S. & Molinari, R. (2016) <arXiv:1607.05858>. #' @name wv #' @docType package "_PACKAGE" NULL
/scratch/gouwar.j/cran-all/cranData/wv/R/wv-package.R
#' @title Convert Unit of Time Series Data #' @description Manipulate the units of time to different ones #' @keywords internal #' @param x A \code{vector} containing the values on x-axis. #' @param from.unit A \code{string} indicating the unit which the data is converted from. #' @param to.unit A \code{string} indicating the unit which the data is converted to. #' @details #' The supported units are "ns"(nanosecond), "ms"(millisecond), "sec", "min", "hour", "day", "month", and "year". #' Make sure \code{from.unit} and \code{to.unit} are not \code{NULL} before it is passed to this function. #' @return A \code{list} with the following structure: #' \itemize{ #' \item "x": Data #' \item "converted": A \code{boolean} indicating whether conversion is made #' } #' @export #' @examples #' x = seq(60, 3600, 60) #' unitConversion(x, 'sec', 'min') #' y = 1:10 #' unitConversion(y, 'hour', 'sec') unitConversion = function(x, from.unit, to.unit){ #ns, ms, second, min, hour, day, month, year unit = c(ns = 1, ms = 2,se = 3, mi = 4, ho = 5, da = 6, mo = 7, ye = 8) #assume 1 month = 30 days ratio = c(1E6, 1E3, 60, 60, 24, 30, 12) from.unit.1 = substr(from.unit, 1, 2) to.unit.1 = substr(to.unit, 1, 2) #check unit: no.convert = F if(from.unit.1 == to.unit.1){no.convert = T} if(is.na(unit[from.unit.1]) ) { message = paste('No such unit: ', from.unit, '. Supported units are "ns"(nanosecond), "ms"(millisecond), "sec", "min", "hour", "day", "month", and "year". Conversion is terminated.', sep = '') warning(message); no.convert = T} if(is.na(unit[to.unit.1]) ) { message = paste('No such unit: ', to.unit, '. Supported units are "ns"(nanosecond), "ms"(millisecond), "sec", "min", "hour", "day", "month", and "year". Conversion is terminated.', sep = '') warning(message); no.convert = T} if(!no.convert){ #print out warning when day is convert to month, or month is converted to day. conversionRange = unit[from.unit.1] : unit[to.unit.1] if(6 %in% conversionRange && 7 %in% conversionRange){ warning('Unit conversion might be wrong because this function simply assumes 1 month = 30 days.') } } if(!no.convert){ if(unit[from.unit.1] > unit[to.unit.1]){ temp = ratio[unit[to.unit.1]: (unit[from.unit.1]-1)] multiplier = prod(temp) x = x*multiplier }else{ temp = ratio[unit[from.unit.1]: (unit[to.unit.1]-1) ] multiplier = prod(temp) x = x/multiplier } } obj = list(x = x, converted = !no.convert) return(obj) } #' @title Wavelet Variance #' @description Calculates the (MO)DWT wavelet variance #' @param x A \code{vector} with dimensions N x 1. #' @param decomp A \code{string} that indicates whether to use a "dwt" or "modwt" decomposition. #' @param filter A \code{string} that specifies which wavelet filter to use. #' @param nlevels An \code{integer} that indicates the level of decomposition. It must be less than or equal to floor(log2(length(x))). #' @param alpha A \code{double} that specifies the significance level which in turn specifies the \eqn{1-\alpha} confidence level. #' @param robust A \code{boolean} that triggers the use of the robust estimate. #' @param eff A \code{double} that indicates the efficiency as it relates to an MLE. #' @param freq A \code{numeric} that provides the rate of samples. #' @param from.unit A \code{string} indicating the unit from which the data is converted. #' @param to.unit A \code{string} indicating the unit to which the data is converted. #' @param ... Further arguments passed to or from other methods. #' @return A \code{list} with the structure: #' \itemize{ #' \item "variance": Wavelet Variance #' \item "ci_low": Lower CI #' \item "ci_high": Upper CI #' \item "robust": Robust active #' \item "eff": Efficiency level for Robust calculation #' \item "alpha": p value used for CI #' \item "unit": String representation of the unit #' } #' @details #' The default value of \code{nlevels} will be set to \eqn{\left\lfloor {{{\log }_2}\left( {length\left( x \right)} \right)} \right\rfloor}{floor(log2(length(x)))}, unless otherwise specified. #' @author James Balamuta, Justin Lee and Stephane Guerrier #' @rdname wvar #' @examples #' set.seed(999) #' x = rnorm(100) #' #' # Default #' wvar(x) #' #' # Robust #' wvar(x, robust = TRUE, eff=0.3) #' #' # Classical #' wvar(x, robust = FALSE, eff=0.3) #' #' # 90% Confidence Interval #' wvar(x, alpha = 0.10) #' @export wvar = function(x, ...) { if (sum(class(x) %in% "gts") == 1){ x = as.numeric(x) } UseMethod("wvar") } #' @rdname wvar #' @export wvar.lts = function(x, decomp = "modwt", filter = "haar", nlevels = NULL, alpha = 0.05, robust = FALSE, eff = 0.6, to.unit = NULL, ...){ warning('`lts` object is detected. This function can only operate on the combined process.') freq = attr(x, 'freq') unit = attr(x, 'unit') x = x[,ncol(x)] wvar.default(x, decomp, filter, nlevels, alpha, robust, eff, freq = freq, from.unit = unit, to.unit = to.unit) } #' @rdname wvar #' @export wvar.gts = function(x, decomp="modwt", filter = "haar", nlevels = NULL, alpha = 0.05, robust = FALSE, eff = 0.6, to.unit = NULL, ...){ freq = attr(x, 'freq') unit = attr(x, 'unit') x = x[,1] wvar.default(x, decomp, filter, nlevels, alpha, robust, eff, freq = freq, from.unit = unit, to.unit = to.unit) } #' @rdname wvar #' @export wvar.ts = function(x, decomp="modwt", filter = "haar", nlevels = NULL, alpha = 0.05, robust = FALSE, eff = 0.6, to.unit = NULL, ...){ freq = attr(x, 'tsp')[3] unit = NULL wvar.default(x, decomp, filter, nlevels, alpha, robust, eff, freq = freq, from.unit = unit, to.unit = to.unit) } #' @rdname wvar #' @export wvar.imu = function(x, decomp="modwt", filter = "haar", nlevels = NULL, alpha = 0.05, robust = FALSE, eff = 0.6, to.unit = NULL, ...){ # Retrive sensor name if (!is.null(attr(x, "stype"))){ sensor_name = attr(x, "stype") }else{ warning("Unknown sensor name. IMU object is missing some information.") sensor_name = NULL } # Retrive freq if (!is.null(attr(x, "freq"))){ freq = attr(x, "freq") }else{ warning("Unknown frequency. IMU object is missing some information. Freq is set to 1 by default.") freq = 1 } # Retrive sample size if (!is.null(attr(x, "dim"))){ n = attr(x, "dim")[1] }else{ warning("Unknown sample size. IMU object is missing some information.") n = NULL } # Retrive col names if (!is.null(attr(x, "dimnames")[[2]])){ col_names = attr(x, "dimnames")[[2]] }else{ stop("Unknown colunms names. IMU object is missing some information.") col_names = NULL } # Retrive sensor if (!is.null(attr(x, "sensor"))){ sensor = attr(x, "sensor") }else{ warning("Unknown sensor. IMU object is missing some information.") sensor = NULL } # Retrive axis if (!is.null(attr(x, "axis"))){ ax = attr(x, "axis") }else{ warning("Unknown axes. IMU object is missing some information.") ax = NULL } # Compute wvar m = length(col_names) wvariance = list() for (i in 1:m){ wvariance[[i]] = wvar.default(x[,i], decomp, filter, nlevels, alpha, robust, eff, freq = freq, to.unit = to.unit) } names(wvariance) = col_names out = list(sensor = sensor_name, freq = freq, n = n, type = sensor, axis = ax, wvar = wvariance) class(out) = "imu_wvar" invisible(out) } #' @rdname wvar #' @export #' @importFrom methods is #' @importFrom simts WN wvar.default = function(x, decomp = "modwt", filter = "haar", nlevels = NULL, alpha = 0.05, robust = FALSE, eff = 0.6, freq = 1, from.unit = NULL, to.unit = NULL, ...){ if(is.null(x)){ stop("`x` must contain a value") }else if((is.data.frame(x) || is.matrix(x))){ if(ncol(x) > 1) stop("There must be only one column of data supplied.") } if(decomp == "modwt" && is.null(nlevels)){ nlevels = floor(log2(length(x))-1) }else if(decomp == "dwt" && is.null(nlevels)){ nlevels = floor(log2(length(x))) } # Check Freq if(!is(freq,"numeric") || length(freq) != 1){ stop("'freq' must be one numeric number.") } if(freq <= 0) { stop("'freq' must be larger than 0.") } # Check Unit all.units = c('ns', 'ms', 'sec', 'second', 'min', 'minute', 'hour', 'day', 'mon', 'month', 'year') if( (!is.null(from.unit) && !from.unit %in% all.units) || (!is.null(to.unit) && !to.unit %in% all.units) ){ stop('The supported units are "ns", "ms", "sec", "min", "hour", "day", "month", "year". ') } if(robust) { if(eff > 0.99) { stop("The efficiency specified is too close to the classical case. Use `robust = FALSE`") } } obj = modwt_wvar_cpp(signal=x, nlevels=nlevels, robust=robust, eff=eff, alpha=alpha, ci_type="eta3", strWavelet=filter, decomp = decomp) # nlevels may be changed during modwt nlevels = nrow(obj) scales = scales_cpp(nlevels)/freq # NO Unit Conversion if( is.null(from.unit) && is.null(to.unit)==F ){ warning("'from.unit' is NULL. Unit conversion was not done.") } # Unit Conversion if (!is.null(from.unit)){ if (!is.null(to.unit)){ convert.obj = unitConversion(scales, from.unit = from.unit, to.unit = to.unit) if (convert.obj$converted) { # YES Unit Conversion scales = convert.obj$x message(paste0('Unit of object is converted from ', from.unit, ' to ', to.unit), appendLF = T) } } } if(!is.null(from.unit) && !is.null(to.unit)){ unit = to.unit }else{ unit = from.unit} # Additional internal useful values mean_diff = mean(diff(x)) N = length(x) ranged = (max(x) - min(x))/N J = nlevels create_wvar(obj, decomp, filter, robust, eff, alpha, scales, unit, mean_diff, N, ranged, J) } #' @title Create a \code{wvar} object #' @description Structures elements into a \code{wvar} object #' @param obj A \code{matrix} with dimensions N x 3 that contains Wavelet Variance, Lower CI, and Upper CI. #' @param decomp A \code{string} that indicates whether to use a "dwt" or "modwt" decomposition. #' @param filter A \code{string} that specifies the type of wavelet filter used in the decomposition. #' @param robust A \code{boolean} that triggers the use of the robust estimate. #' @param eff A \code{double} that indicates the efficiency as it relates to an MLE. #' @param alpha A \code{double} that specifies the significance level which in turn specifies the \eqn{1-\alpha} confidence level. #' @param scales A \code{vec} that contains the amount of decomposition performed at each level. #' @param unit A \code{string} that indicates the unit expression of the frequency. #' @param mean_diff A \code{double} that specified the empirical mean of the first difference. #' @param N A \code{integer} that specified the empirical length of the time series. #' @param ranged A \code{double} that specified the scaled range of the data, i.e. (max(x) - min(x))/length(x). #' @param J A \code{integer} that specified the number of scales. #' #' @return A \code{list} with the structure: #' \itemize{ #' \item "variance": Wavelet variance #' \item "ci_low": Lower CI #' \item "ci_high": Upper CI #' \item "robust": Robust active #' \item "eff": Efficiency level for robust calculation #' \item "alpha": p value used for CI #' \item "unit": String representation of the unit #' \item "mean_diff": Empirical mean of the first difference #' \item "N": Length of the time series #' \item "ranged": Scaled range of the data, i.e. (max(x) - min(x))/length(x) #' \item "J": Number of scales #' } #' @keywords internal create_wvar = function(obj, decomp, filter, robust, eff, alpha, scales, unit, mean_diff, N, ranged, J){ structure(list(variance = obj[,1], ci_low = obj[,2], ci_high = obj[,3], robust = robust, eff = eff, alpha = alpha, scales = scales, decomp = decomp, unit = unit, mean_diff = mean_diff, N = N, ranged = ranged, J = J, filter = filter), class = "wvar") } #' @title Print Wavelet Variances #' @description Displays the summary table of wavelet variance. #' @author James Balamuta #' @method print wvar #' @export #' @keywords internal #' @param x A \code{wvar} object. #' @param ... Further arguments passed to or from other methods. #' @return Summary table #' @examples #' set.seed(999) #' x = rnorm(100) #' out = wvar(x) #' print( out ) print.wvar = function(x, ...){ mat = matrix(unlist(x[1:3]),ncol=3,byrow=F) colnames(mat) = c("Variance", "Low CI", "High CI") rownames(mat) = x$scales print(mat) } #' @title Summary of Wavelet Variances #' @description Displays the summary table of wavelet variance accounting for CI values and supplied efficiency. #' @method summary wvar #' @export #' @keywords internal #' @param object A \code{wvar} object. #' @param ... Additional arguments affecting the summary produced. #' @return Summary table and other properties of the object. #' @author James Balamuta #' @examples #' set.seed(999) #' x = rnorm(100) #' ret = wvar(x) #' summary(ret) summary.wvar = function(object, ...){ name = if(object$robust){ "robust" }else{ "classical" } cat("Results of the wavelet variance calculation using the ",name, " method.\n",sep="") if(object$robust){ cat("Robust was created using efficiency=",object$eff,"\n",sep="") } cat("The confidence interval was generated using (1-",object$alpha,")*100 \n",sep="") print(object) } #' @title Plot Wavelet Variance #' @description Displays a plot of wavelet variance accounting for CI values and supplied efficiency. #' @method plot wvar #' @keywords internal #' @param x A \code{wvar} object. #' @param units A \code{string} that specifies the units of time plotted on the x axis. #' @param xlab A \code{string} that gives a title for the x axis. #' @param ylab A \code{string} that gives a title for the y axis. #' @param main A \code{string} that gives an overall title for the plot. #' @param col_wv A \code{string} that specifies the color of the wavelet variance line. #' @param col_ci A \code{string} that specifies the color of the confidence interval polygon. #' @param nb_ticks_x An \code{integer} that specifies the maximum number of ticks for the x-axis. #' @param nb_ticks_y An \code{integer} that specifies the maximum number of ticks for the y-axis. #' @param legend_position A \code{string} that specifies the position of the legend (use \code{legend_position = NA} to remove legend). #' @param ci_wv A \code{boolean} that determines whether a confidence interval polygon will be drawn. #' @param point_cex A \code{double} that specifies the size of each symbol to be plotted. #' @param point_pch A \code{double} that specifies the symbol type to be plotted. #' @param ... Additional arguments affecting the plot. #' @return Plot of wavelet variance and confidence interval for each scale. #' @author Stephane Guerrier, Nathanael Claussen, and Justin Lee #' @export #' @examples #' set.seed(999) #' n = 10^4 #' Xt = rnorm(n) #' wv = wvar(Xt) #' plot(wv) #' plot(wv, main = "Simulated white noise", xlab = "Scales") #' plot(wv, units = "sec", legend_position = "topright") #' plot(wv, col_wv = "darkred", col_ci = "pink") plot.wvar = function(x, units = NULL, xlab = NULL, ylab = NULL, main = NULL, col_wv = NULL, col_ci = NULL, nb_ticks_x = NULL, nb_ticks_y = NULL, legend_position = NULL, ci_wv = NULL, point_cex = NULL, point_pch = NULL, ...){ # Labels if (is.null(xlab)){ if (is.null(units)){ xlab = expression(paste("Scale ", tau, sep ="")) }else{ xlab = bquote(paste("Scale ", tau, " [", .(units), "]", sep = " ")) } } if (is.null(ylab)){ ylab = expression(paste("Wavelet Variance ", nu^2, sep = "")) }else{ ylab = ylab } # Main Title if (is.null(main)){ main = "Haar Wavelet Variance Representation" } # Line and CI colors if (is.null(col_wv)){ col_wv = "darkblue" } if (is.null(col_ci)){ col_ci = hcl(h = 210, l = 65, c = 100, alpha = 0.2) } # Range x_range = range(x$scales) x_low = floor(log2(x_range[1])) x_high = ceiling(log2(x_range[2])) y_range = range(c(x$ci_low, x$ci_high)) y_low = floor(log10(y_range[1])) y_high = ceiling(log10(y_range[2])) # Axes if (is.null(nb_ticks_x)){ nb_ticks_x = 6 } if (is.null(nb_ticks_y)){ nb_ticks_y = 5 } x_ticks = seq(x_low, x_high, by = 1) if (length(x_ticks) > nb_ticks_x){ x_ticks = x_low + ceiling((x_high - x_low)/(nb_ticks_x + 1))*(0:nb_ticks_x) } x_labels = sapply(x_ticks, function(i) as.expression(bquote(2^ .(i)))) y_ticks <- seq(y_low, y_high, by = 1) if (length(y_ticks) > nb_ticks_y){ y_ticks = y_low + ceiling((y_high - y_low)/(nb_ticks_y + 1))*(0:nb_ticks_y) } y_labels <- sapply(y_ticks, function(i) as.expression(bquote(10^ .(i)))) # Legend Position if (is.null(legend_position)){ if (which.min(abs(c(y_low, y_high) - log2(x$variance[1]))) == 1){ legend_position = "topleft" }else{ legend_position = "bottomleft" } } # Main Plot plot(NA, xlim = x_range, ylim = y_range, xlab = xlab, ylab = ylab, log = "xy", xaxt = 'n', yaxt = 'n', bty = "n", ann = FALSE) win_dim = par("usr") par(new = TRUE) plot(NA, xlim = x_range, ylim = 10^c(win_dim[3], win_dim[4] + 0.09*(win_dim[4] - win_dim[3])), xlab = xlab, ylab = ylab, log = "xy", xaxt = 'n', yaxt = 'n', bty = "n") win_dim = par("usr") # Add Grid abline(v = 2^x_ticks, lty = 1, col = "grey95") abline(h = 10^y_ticks, lty = 1, col = "grey95") # Add Title x_vec = 10^c(win_dim[1], win_dim[2], win_dim[2], win_dim[1]) y_vec = 10^c(win_dim[4], win_dim[4], win_dim[4] - 0.09*(win_dim[4] - win_dim[3]), win_dim[4] - 0.09*(win_dim[4] - win_dim[3])) polygon(x_vec, y_vec, col = "grey95", border = NA) text(x = 10^mean(c(win_dim[1], win_dim[2])), y = 10^(win_dim[4] - 0.09/2*(win_dim[4] - win_dim[3])), main) # Add Axes and Box lines(x_vec[1:2], rep(10^(win_dim[4] - 0.09*(win_dim[4] - win_dim[3])),2), col = 1) #y_ticks = y_ticks[(2^y_ticks) < 10^(win_dim[4] - 0.09*(win_dim[4] - win_dim[3]))] y_labels = y_labels[1:length(y_ticks)] box() axis(1, at = 2^x_ticks, labels = x_labels, padj = 0.3) axis(2, at = 10^y_ticks, labels = y_labels, padj = -0.2) # CI for WV if (ci_wv == TRUE || is.null(ci_wv)){ polygon(c(x$scales, rev(x$scales)), c(x$ci_low, rev(x$ci_high)), border = NA, col = col_ci) } # Add legend CI_conf = 1 - x$alpha if (x$robust == TRUE){ wv_title_part1 = "Empirical Robust WV " }else{ wv_title_part1 = "Empirical WV " } if (!is.na(legend_position)){ if (legend_position == "topleft"){ legend_position = 10^c(1.1*win_dim[1], 0.98*(win_dim[4] - 0.09*(win_dim[4] - win_dim[3]))) legend(x = legend_position[1], y = legend_position[2], legend = c(as.expression(bquote(paste(.(wv_title_part1), hat(nu)^2))), as.expression(bquote(paste("CI(",hat(nu)^2,", ",.(CI_conf),")")))), pch = c(16, 15), lty = c(1, NA), col = c(col_wv, col_ci), cex = 1, pt.cex = c(1.25, 3), bty = "n") }else{ if (legend_position == "topright"){ legend_position = 10^c(0.7*win_dim[2], 0.98*(win_dim[4] - 0.09*(win_dim[4] - win_dim[3]))) legend(x = legend_position[1], y = legend_position[2], legend = c(as.expression(bquote(paste(.(wv_title_part1), hat(nu)^2))), as.expression(bquote(paste("CI(",hat(nu)^2,", ",.(CI_conf),")")))), pch = c(16, 15), lty = c(1, NA), col = c(col_wv, col_ci), cex = 1, pt.cex = c(1.25, 3), bty = "n") }else{ legend(legend_position, legend = c(as.expression(bquote(paste(.(wv_title_part1), hat(nu)^2))), as.expression(bquote(paste("CI(",hat(nu)^2,", ",.(CI_conf),")")))), pch = c(16, 15), lty = c(1, NA), col = c(col_wv, col_ci), cex = 1, pt.cex = c(1.25, 3), bty = "n") } } } # Add WV lines(x$scales, x$variance, type = "l", col = col_wv, pch = 16) if (is.null(point_pch)){ point_pch = 16 } if (is.null(point_cex)){ point_cex = 1.25 } lines(x$scales, x$variance, type = "p", col = col_wv, pch = point_pch, cex = point_cex) } #' @title Plot Wavelet Variance based on IMU Data #' @description Displays a plot of wavelet variance accounting for CI values and supplied efficiency. #' @method plot imu_wvar #' @keywords internal #' @param x A \code{wvar} object. #' @param xlab A \code{string} that gives a title for the x axis. #' @param ylab A \code{string} that gives a title for the y axis. #' @param main A \code{string} that gives an overall title for the plot. #' @param col_wv A \code{string} that specifies the color of the wavelet variance line. #' @param col_ci A \code{string} that specifies the color of the confidence interval polygon. #' @param nb_ticks_x An \code{integer} that specifies the maximum number of ticks for the x-axis. #' @param nb_ticks_y An \code{integer} that specifies the maximum number of ticks for the y-axis. #' @param ci_wv A \code{boolean} that determines whether a confidence interval polygon will be drawn. #' @param point_cex A \code{double} that specifies the size of each symbol to be plotted. #' @param point_pch A \code{double} that specifies the symbol type to be plotted. #' @param ... Additional arguments affecting the plot. #' @return Plot of wavelet variance and confidence interval for each scale. #' @author Stephane Guerrier and Yuming Zhang #' @export #' @examples #' data("kvh1750_wv") #' plot(kvh1750_wv) plot.imu_wvar = function(x, xlab = NULL, ylab = NULL, main = NULL, col_wv = NULL, col_ci = NULL, nb_ticks_x = NULL, nb_ticks_y = NULL, ci_wv = NULL, point_cex = NULL, point_pch = NULL, ...){ type = unique(x$type) if ("Gyroscope" %in% type){ gyro_index = which(x$type == "Gyroscope") }else{ gyro_index = NULL } if ("Accelerometer" %in% type){ accel_index = which(x$type == "Accelerometer") }else{ accel_index = NULL } ncol = length(unique(x$axis)) nrow = length(type) m = length(x$wvar) J = length(x$wvar[[1]]$variance) # remove negative CI values index_to_remove = c() for (i in 1:m) { if(length(which(x$wvar[[i]]$lci<0)) > 0){ index_to_remove = c(index_to_remove, which(x$wvar[[i]]$lci<0)) } } if (!is.null(index_to_remove)){ index_to_remove = unique(index_to_remove) index_to_keep = which(seq(1:J) != index_to_remove) }else{ index_to_keep = 1:J } J = length(index_to_keep) scales = x$wvar[[1]]$scales[index_to_keep] ci_up = ci_lw = av = matrix(NA, J, m) for (i in 1:m){ ci_up[,i] = x$wvar[[i]]$ci_high[index_to_keep] ci_lw[,i] = x$wvar[[i]]$ci_low[index_to_keep] av[,i] = x$wvar[[i]]$variance[index_to_keep] } # Axes if (is.null(nb_ticks_x)){ nb_ticks_x = 6 } if (is.null(nb_ticks_y)){ nb_ticks_y = 5 } # Range x_range = range(scales) x_low = floor(log10(x_range[1])) x_high = ceiling(log10(x_range[2])) x_ticks = seq(x_low, x_high, by = 1) if (length(x_ticks) > nb_ticks_x){ x_ticks = x_low + ceiling((x_high - x_low)/(nb_ticks_x + 1))*(0:nb_ticks_x) } x_labels = sapply(x_ticks, function(i) as.expression(bquote(10^ .(i)))) # Line and CI colors if (is.null(col_wv)){ col_wv = "darkblue" } if (is.null(col_ci)){ col_ci = hcl(h = 210, l = 65, c = 100, alpha = 0.2) } if (is.null(point_pch)){ point_pch = 16 } if (is.null(point_cex)){ point_cex = 1.25 } # Main Title if (is.null(main)){ main = paste("Wavelet Variance Representation - ", x$sensor, " @ ", x$freq, " Hz", sep="") } # Labels if (is.null(xlab)){ xlab = bquote(paste("Averaging time ", tau, " [sec]", sep = " ")) } if (is.null(ylab)){ ylab = expression(paste("Wavelet Variance ", nu, sep = "")) } # Main plot par(omi=rep(1, 4), mar=c(0,0,0,0), mfrow=c(nrow,ncol)) # Gyro if (!is.null(gyro_index)){ y_range = c(min(ci_lw[,gyro_index]), max(ci_up[,gyro_index])) y_low = floor(log10(y_range[1])) y_high = ceiling(log10(y_range[2])) y_ticks <- seq(y_low, y_high, by = 1) if (length(y_ticks) > nb_ticks_y){ y_ticks = y_low + ceiling((y_high - y_low)/(nb_ticks_y + 1))*(0:nb_ticks_y) } y_labels <- sapply(y_ticks, function(i) as.expression(bquote(10^ .(i)))) for (i in seq_along(gyro_index)){ plot(NA, xlim = range(scales), ylim = y_range, xaxt="n", yaxt="n", log = "xy", bty = "n") box(col = "grey") mtext(paste("Axis - ", x$axis[gyro_index][i], sep = ""), 3, line = 0.5) if (i == 1){ axis(2, at = 10^y_ticks, labels = y_labels, padj = -0.2, cex = 1.25) } if (i == 1){ mtext("Gyroscope", 2, line = 4.5) mtext(ylab, 2, line = 2.5) } abline(h = 10^y_ticks, col = "grey85") abline(v = 10^x_ticks, col = "grey85") # CI for AD if(ci_wv == TRUE || is.null(ci_wv)){ polygon(c(scales, rev(scales)), c(ci_lw[,gyro_index[i]], rev(ci_up[,gyro_index[i]])), border = NA, col = col_ci) } # Add AD lines(scales, (av[,gyro_index[i]]), type = "l", col = col_wv, pch = 16) lines(scales, (av[,gyro_index[i]]), type = "p", col = col_wv, pch = point_pch, cex = point_cex) if (is.null(accel_index)){ axis(1, at = 10^x_ticks, labels = x_labels, padj = -0.2, cex = 1.25) } } } # Accel if (!is.null(accel_index)){ y_range = c(min(ci_lw[,accel_index]), max(ci_up[,accel_index])) y_low = floor(log10(y_range[1])) y_high = ceiling(log10(y_range[2])) y_ticks <- seq(y_low, y_high, by = 1) if (length(y_ticks) > nb_ticks_y){ y_ticks = y_low + ceiling((y_high - y_low)/(nb_ticks_y + 1))*(0:nb_ticks_y) } y_labels <- sapply(y_ticks, function(i) as.expression(bquote(10^ .(i)))) for (i in seq_along(accel_index)){ plot(NA, xlim = range(scales), ylim = y_range, xaxt="n", yaxt="n", log = "xy", bty = "n") box(col = "grey") if (i == 1){ axis(2, at = 10^y_ticks, labels = y_labels, padj = -0.2, cex = 1.25) } if (i == 1){ mtext("Accelerometer", 2, line = 4.5) mtext(ylab, 2, line = 2.5) } if (length(accel_index) == 3 && i == 2){ mtext(xlab, 1, line = 3.5) } if (is.null(gyro_index)){ mtext(paste("Axis - ", x$axis[gyro_index][i], sep = ""), 3, line = 0.5) } abline(h = 10^y_ticks, col = "grey85") abline(v = 10^x_ticks, col = "grey85") # CI for AD if(ci_wv == TRUE || is.null(ci_wv)){ polygon(c(scales, rev(scales)), c(ci_lw[,accel_index[i]], rev(ci_up[,accel_index[i]])), border = NA, col = col_ci) } # Add AD lines(scales, (av[,accel_index[i]]), type = "l", col = col_wv, pch = 16) lines(scales, (av[,accel_index[i]]), type = "p", col = col_wv, pch = point_pch, cex = point_cex) axis(1, at = 10^x_ticks, labels = x_labels, padj = -0.2, cex = 1.25) } } # Add main title mtext(main, side = 3, line = 3, outer = TRUE) par(mfrow = c(1,1)) } #' @title Comparison between classical and robust Wavelet Variances #' @description Displays a plot of the wavelet variances (classical and robust) for a given time series accounting for CI values. #' @param x A time series objects. #' @param eff An \code{integer} that specifies the efficiency of the robust estimator. #' @param units A \code{string} that specifies the units of time plotted on the x axis. #' @param xlab A \code{string} that gives a title for the x axis. #' @param ylab A \code{string} that gives a title for the y axis. #' @param main A \code{string} that gives an overall title for the plot. #' @param col_wv A \code{string} that specifies the color of the wavelet variance line. #' @param col_ci A \code{string} that specifies the color of the confidence interval shade. #' @param nb_ticks_x An \code{integer} that specifies the maximum number of ticks for the x-axis. #' @param nb_ticks_y An \code{integer} that specifies the maximum number of ticks for the y-axis. #' @param legend_position A \code{string} that specifies the position of the legend (use \code{legend_position = NA} to remove legend). #' @param ... Additional arguments affecting the plot. #' @return Plot of wavelet variance and confidence interval for each scale. #' @author Stephane Guerrier, Nathanael Claussen, and Justin Lee #' @examples #' set.seed(999) #' n = 10^4 #' Xt = rnorm(n) #' wv = wvar(Xt) #' #' plot(wv) #' plot(wv, main = "Simulated white noise", xlab = "Scales") #' plot(wv, units = "sec", legend_position = "topright") #' plot(wv, col_wv = "darkred", col_ci = "pink") #' @export robust_eda = function(x, eff = 0.6, units = NULL, xlab = NULL, ylab = NULL, main = NULL, col_wv = NULL, col_ci = NULL, nb_ticks_x = NULL, nb_ticks_y = NULL, legend_position = NULL, ...){ wv_cl = wvar(x) wv_rob = wvar(x, robust = TRUE, eff = eff) # Labels if (is.null(xlab)){ if (is.null(units)){ xlab = expression(paste("Scale ", tau, sep ="")) }else{ xlab = bquote(paste("Scale ", "", tau , " [", .(units), "]", sep = "")) } } if (is.null(ylab)){ ylab = expression(paste("Wavelet Variance ", nu^2, sep = "")) }else{ ylab = ylab } # Main Title if (is.null(main)){ main = "Classical vs Robust WV" } # Line and CI colors if (is.null(col_wv)){ col_wv = c("darkblue", "darkorange2") } if (is.null(col_ci)){ col_ci = c(hcl(h = 210, l = 65, c = 100, alpha = 0.2), hcl(h = 60, l = 65, c = 100, alpha = 0.2)) } # Range x_range = range(wv_cl$scales) x_low = floor(log2(x_range[1])) x_high = ceiling(log2(x_range[2])) y_range = range(c(wv_cl$ci_low, wv_cl$ci_high, wv_rob$ci_low, wv_rob$ci_high)) y_low = floor(log10(y_range[1])) y_high = ceiling(log10(y_range[2])) # Axes if (is.null(nb_ticks_x)){ nb_ticks_x = 6 } if (is.null(nb_ticks_y)){ nb_ticks_y = 5 } x_ticks = seq(x_low, x_high, by = 1) if (length(x_ticks) > nb_ticks_x){ x_ticks = x_low + ceiling((x_high - x_low)/(nb_ticks_x + 1))*(0:nb_ticks_x) } x_labels = sapply(x_ticks, function(i) as.expression(bquote(2^ .(i)))) y_ticks <- seq(y_low, y_high, by = 1) if (length(y_ticks) > nb_ticks_y){ y_ticks = y_low + ceiling((y_high - y_low)/(nb_ticks_y + 1))*(0:nb_ticks_y) } y_labels <- sapply(y_ticks, function(i) as.expression(bquote(10^ .(i)))) # Legend position if (is.null(legend_position)){ if (which.min(abs(c(y_low, y_high) - log2(wv_rob$variance[1]))) == 1){ legend_position = "topleft" }else{ legend_position = "bottomleft" } } # Main plot plot(NA, xlim = x_range, ylim = y_range, xlab = xlab, ylab = ylab, log = "xy", xaxt = 'n', yaxt = 'n', bty = "n", ann = FALSE) win_dim = par("usr") par(new = TRUE) plot(NA, xlim = x_range, ylim = 10^c(win_dim[3], win_dim[4] + 0.09*(win_dim[4] - win_dim[3])), xlab = xlab, ylab = ylab, log = "xy", xaxt = 'n', yaxt = 'n', bty = "n") win_dim = par("usr") # Add grid abline(v = 2^x_ticks, lty = 1, col = "grey95") abline(h = 10^y_ticks, lty = 1, col = "grey95") # Add title x_vec = 10^c(win_dim[1], win_dim[2], win_dim[2], win_dim[1]) y_vec = 10^c(win_dim[4], win_dim[4], win_dim[4] - 0.09*(win_dim[4] - win_dim[3]), win_dim[4] - 0.09*(win_dim[4] - win_dim[3])) polygon(x_vec, y_vec, col = "grey95", border = NA) text(x = 10^mean(c(win_dim[1], win_dim[2])), y = 10^(win_dim[4] - 0.09/2*(win_dim[4] - win_dim[3])), main) # Add Axes and Box lines(x_vec[1:2], rep(10^(win_dim[4] - 0.09*(win_dim[4] - win_dim[3])),2), col = 1) y_ticks = y_ticks[(2^y_ticks) < 10^(win_dim[4] - 0.09*(win_dim[4] - win_dim[3]))] y_labels = y_labels[1:length(y_ticks)] box() axis(1, at = 2^x_ticks, labels = x_labels, padj = 0.3) axis(2, at = 10^y_ticks, labels = y_labels, padj = -0.2) # CI for WV polygon(c(wv_cl$scales, rev(wv_cl$scales)), c(wv_cl$ci_low, rev(wv_cl$ci_high)), border = NA, col = col_ci[1]) polygon(c(wv_rob$scales, rev(wv_rob$scales)), c(wv_rob$ci_low, rev(wv_rob$ci_high)), border = NA, col = col_ci[2]) # Legend Position if (!is.na(legend_position)){ if (legend_position == "topleft"){ legend_position = 10^c(1.1*win_dim[1], 0.98*(win_dim[4] - 0.09*(win_dim[4] - win_dim[3]))) legend(x = legend_position[1], y = legend_position[2], legend = c("Classical WV", "Classical CI", "Robust WV", "Robust CI"), pch = c(16, 15, 16, 15), lty = c(1, NA, 1, NA), col = c(col_wv[1], col_ci[1], col_wv[2], col_ci[2]), cex = 1, pt.cex = c(1.25, 3, 1.25, 3), bty = "n") }else{ if (legend_position == "topright"){ legend_position = 10^c(0.7*win_dim[2], 0.98*(win_dim[4] - 0.09*(win_dim[4] - win_dim[3]))) legend(x = legend_position[1], y = legend_position[2], legend = c("Classical WV", "Classical CI", "Robust WV", "Robust CI"), pch = c(16, 15, 16, 15), lty = c(1, NA, 1, NA), col = c(col_wv[1], col_ci[1], col_wv[2], col_ci[2]), cex = 1, pt.cex = c(1.25, 3, 1.25, 3), bty = "n") }else{ legend(legend_position, legend = c("Classical WV", "Classical CI", "Robust WV", "Robust CI"), pch = c(16, 15, 16, 15), lty = c(1, NA, 1, NA), col = c(col_wv[1], col_ci[1], col_wv[2], col_ci[2]), cex = 1, pt.cex = c(1.25, 3, 1.25, 3), bty = "n") } } } lines(wv_cl$scales, wv_cl$variance, type = "l", col = col_wv[1], pch = 16) lines(wv_cl$scales, wv_cl$variance, type = "p", col = col_wv[1], pch = 16, cex = 1.25) lines(wv_cl$scales, wv_rob$variance, type = "l", col = col_wv[2], pch = 16) lines(wv_cl$scales, wv_rob$variance, type = "p", col = col_wv[2], pch = 16, cex = 1.25) } #' @title Multi-Plot Comparison Between Multiple Wavelet Variances #' @description #' This is a helper function for the \code{compare_var()} function. #' This method accepts the same set of arguments as \code{compare_wvar} and returns a comparision #' of multiple wavelet variances of different time series accounting for CI values as a set of different plots. #' #' @param graph_details List of inputs #' #' @author Stephane Guerrier, Justin Lee, and Nathanael Claussen #' @export #' compare_wvar_split = function(graph_details){ old_pars = par(mfrow = c(graph_details$obj_len, graph_details$obj_len), mar = c(0.5,0.5,0.5,1.5), oma = c(4,4,4,4)) on.exit(par(old_pars)) for (i in 1:graph_details$obj_len){ for (j in 1:graph_details$obj_len){ # Main plot plot(NA, xlim = graph_details$x_range, ylim = graph_details$y_range, log = "xy", xaxt = 'n', yaxt = 'n', bty = "n", ann = FALSE) win_dim = par("usr") kill_y_tick = graph_details$y_at < 10^(win_dim[4] - 0.09*(win_dim[4] - win_dim[3])) # Add grid abline(v = graph_details$x_at, lty = 1, col = "grey95") abline(h = graph_details$y_at, lty = 1, col = "grey95") # Add axes and box box(col = "grey") # Corner left piece if (j == 1){ axis(2, at = graph_details$y_at[kill_y_tick], labels = graph_details$y_labels[kill_y_tick], padj = -0.2, cex.axis = 1/log(graph_details$obj_len)) } # Corner bottom if (i == graph_details$obj_len){ axis(1, at = graph_details$x_at, labels = graph_details$x_labels, padj = 0.1, cex.axis = 1/log(graph_details$obj_len)) # figure out how to size these things for smaller plots } # Diag graph if (i == j){ scales = graph_details$obj_list[[i]]$scales ci_low = graph_details$obj_list[[i]]$ci_low ci_high = graph_details$obj_list[[i]]$ci_high variance = graph_details$obj_list[[i]]$variance if(graph_details$ci_wv[i] == TRUE){ polygon(c(scales, rev(scales)), c(ci_low, rev(ci_high)), border = NA, col = graph_details$col_ci[i]) } lines(scales, variance, type = "l", col = graph_details$col_wv[i], pch = 16) lines(scales, variance, type = "p", col = graph_details$col_wv[i], pch = 17, cex = graph_details$point_cex[i]/1.25) win_dim = par("usr") x_vec = 10^c(win_dim[1], win_dim[2], win_dim[2], win_dim[1]) y_vec = 10^c(win_dim[4], win_dim[4], win_dim[4] - 0.09*(win_dim[4] - win_dim[3]), win_dim[4] - 0.09*(win_dim[4] - win_dim[3])) box() # if (graph_details$add_legend){ # if (i == j){ # legend(graph_details$legend_position, legend = graph_details$names, bty = "n", # lwd = 1, pt.cex = graph_details$point_cex, pch = graph_details$point_pch, # col = graph_details$col_wv, cex=0.7) # } # } } if (i != j){ scales = graph_details$obj_list[[i]]$scales ci_low = graph_details$obj_list[[i]]$ci_low ci_high = graph_details$obj_list[[i]]$ci_high variance = graph_details$obj_list[[i]]$variance win_dim = par("usr") x_vec = 10^c(win_dim[1], win_dim[2], win_dim[2], win_dim[1]) y_vec = 10^c(win_dim[4], win_dim[4], win_dim[4] - 0.09*(win_dim[4] - win_dim[3]), win_dim[4] - 0.09*(win_dim[4] - win_dim[3])) if (is.null(graph_details$main[i,j])){ main = paste("WV:", graph_details$names[i], "vs", graph_details$names[j]) } box() if (i < j && graph_details$ci_wv[i] == TRUE){ polygon(c(scales, rev(scales)), c(ci_low, rev(ci_high)), border = NA, col = graph_details$col_ci[i]) } lines(scales, variance, type = "l", col = graph_details$col_wv[i], pch = 16) lines(scales, variance, type = "p", col = graph_details$col_wv[i], pch = 17, cex = graph_details$point_cex[i]/1.25) scales = graph_details$obj_list[[j]]$scales ci_low = graph_details$obj_list[[j]]$ci_low ci_high = graph_details$obj_list[[j]]$ci_high variance = graph_details$obj_list[[j]]$variance if (i < j && graph_details$ci_wv[i] == TRUE){ # don't show confidence intervals polygon(c(scales, rev(scales)), c(ci_low, rev(ci_high)), border = NA, col = graph_details$col_ci[j]) } lines(scales, variance, type = "l", col = graph_details$col_wv[j], pch = 16) lines(scales, variance, type = "p", col = graph_details$col_wv[j], pch = 17, cex = graph_details$point_cex[j]/1.25) } # Add Details # @todo: expand win_dim and position $names if(j==4){ x_vec = 13^c(win_dim[1], win_dim[2], win_dim[2], win_dim[1]) #mtext(graph_details$names[i], side = 4, line = 0.1, cex = 0.8) par(xpd = TRUE) #Draw outside plot area text(x = x_vec[2], y = 0.03, graph_details$names[i], srt = 270, cex = 1.3, col = graph_details$col_wv[i]) par(xpd = FALSE) } if(i==1){ mtext(graph_details$names[j], side = 3, line = 0.2, cex = 0.8, col = graph_details$col_wv[j]) } } } mtext(graph_details$ylab, side = 2, line = 2.80, cex = graph_details$cex_labels, outer = T) mtext(graph_details$xlab, side = 1, line = 2.75, cex = graph_details$cex_labels, outer = T) if (is.null(graph_details$main)){ main = "Haar Wavelet Variance Representation" }else{ mtext(main, side = 3, line = 1) } } #' @title Combined Plot Comparison Between Multiple Wavelet Variances #' @description #' This is a helper function for the \code{compare_var()} function. #' This method accepts the same set of arguments as \code{compare_wvar} and returns a single plot #' that compares multiple wavelet variances of different time series accounting for CI values. #' #' @param graph_details List of inputs #' #' @author Stephane Guerrier, Justin Lee, and Nathanael Claussen #' @export #' compare_wvar_no_split = function(graph_details){ # Main plot plot(NA, xlim = graph_details$x_range, ylim = graph_details$y_range, log = "xy", xaxt = 'n', yaxt = 'n', bty = "n", ann = FALSE) win_dim = par("usr") # Main Plot par(new = TRUE) plot(NA, xlim = graph_details$x_range, ylim = 10^c(win_dim[3], win_dim[4] + 0.09*(win_dim[4] - win_dim[3])), log = "xy", xaxt = 'n', yaxt = 'n', bty = "n", xlab = graph_details$xlab, ylab = graph_details$ylab, cex.lab = graph_details$cex_labels) win_dim = par("usr") # Add Grid abline(v = graph_details$x_at, lty = 1, col = "grey95") abline(h = graph_details$y_at, lty = 1, col = "grey95") # Add Title x_vec = 10^c(win_dim[1], win_dim[2], win_dim[2], win_dim[1]) y_vec = 10^c(win_dim[4], win_dim[4], win_dim[4] - 0.09*(win_dim[4] - win_dim[3]), win_dim[4] - 0.09*(win_dim[4] - win_dim[3])) polygon(x_vec, y_vec, col = "grey95", border = NA) text(x = 10^mean(c(win_dim[1], win_dim[2])), y = 10^(win_dim[4] - 0.09/2*(win_dim[4] - win_dim[3])), graph_details$main) # Add Axes and Box lines(x_vec[1:2], rep(10^(win_dim[4] - 0.09*(win_dim[4] - win_dim[3])),2), col = 1) y_ticks = graph_details$y_ticks[(10^graph_details$y_ticks) < 10^(win_dim[4] - 0.09*(win_dim[4] - win_dim[3]))] kill_y_tick = graph_details$y_at < 10^(win_dim[4] - 0.09*(win_dim[4] - win_dim[3])) box() axis(1, at = graph_details$x_at, labels = graph_details$x_labels, padj = 0.3) axis(2, at = graph_details$y_at[kill_y_tick], labels = graph_details$y_labels[kill_y_tick], padj = -0.2) for (i in 1:graph_details$obj_len){ scales = graph_details$obj_list[[i]]$scales ci_low = graph_details$obj_list[[i]]$ci_low ci_high = graph_details$obj_list[[i]]$ci_high variance = graph_details$obj_list[[i]]$variance if (graph_details$ci_wv[i]){ polygon(c(scales, rev(scales)), c(ci_low, rev(ci_high)), border = NA, col = graph_details$col_ci[i]) } lines(scales, variance, type = "l", col = graph_details$col_wv[i], pch = 16) lines(scales, variance, type = "p", col = graph_details$col_wv[i], pch = graph_details$point_pch[i], cex = graph_details$point_cex[i]) } if (graph_details$add_legend){ legend(graph_details$legend_position, legend = graph_details$names, bty = "n", lwd = 1, pt.cex = graph_details$point_cex, pch = graph_details$point_pch, col = graph_details$col_wv) } } #' @title Comparison Between Multiple Wavelet Variances #' @description #' Displays plots of multiple wavelet variances of different time series accounting for CI values. #' #' @param ... One or more time series objects. #' @param split A \code{boolean} that, if TRUE, arranges the plots into a matrix-like format. #' @param add_legend A \code{boolean} that, if TRUE, adds a legend to the plot. #' @param units A \code{string} that specifies the units of time plotted on the x axes. Note: This argument will not be used if xlab is specified. #' @param xlab A \code{string} that gives a title for the x axes. #' @param ylab A \code{string} that gives a title for the y axes. #' @param main A \code{string} that gives an overall title for the plot. #' @param col_wv A \code{string} that specifies the color of the wavelet variance lines. #' @param col_ci A \code{string} that specifies the color of the confidence interval shade. #' @param nb_ticks_x An \code{integer} that specifies the maximum number of ticks for the x-axis. #' @param nb_ticks_y An \code{integer} that specifies the maximum number of ticks for the y-axis. #' @param legend_position A \code{string} that specifies the position of the legend (use \code{legend_position = NA} to remove legend). #' @param ci_wv A \code{boolean} that determines whether confidence interval polygons will be drawn. #' @param point_cex A \code{double} that specifies the size of each symbol to be plotted. #' @param point_pch A \code{double} that specifies the symbol type to be plotted. #' @param names A \code{string} that specifies the name of the WVAR objects. #' @param cex_labels A \code{double} that specifies the magnification of the labels (x and y). #' @param x_range A \code{vector} that specifies the range of values on the x axis (default NULL). #' @param y_range A \code{vector} that specifies the range of values on the y axis (default NULL). #' @author Stephane Guerrier and Justin Lee #' @export #' @examples #' set.seed(999) #' n = 10^4 #' Xt = arima.sim(n = n, list(ar = 0.10)) #' Yt = arima.sim(n = n, list(ar = 0.35)) #' Zt = arima.sim(n = n, list(ar = 0.70)) #' Wt = arima.sim(n = n, list(ar = 0.95)) #' #' wv_Xt = wvar(Xt) #' wv_Yt = wvar(Yt) #' wv_Zt = wvar(Zt) #' wv_Wt = wvar(Wt) #' #' compare_wvar(wv_Xt, wv_Yt, wv_Zt, wv_Wt) compare_wvar = function(... , split = FALSE, add_legend = TRUE, units = NULL, xlab = NULL, ylab = NULL, main = NULL, col_wv = NULL, col_ci = NULL, nb_ticks_x = NULL, nb_ticks_y = NULL, legend_position = NULL, ci_wv = NULL, point_cex = NULL, point_pch = NULL, names = NULL, cex_labels = 0.8, x_range = NULL, y_range = NULL){ obj_list = list(...) obj_name = as.character(substitute(...())) obj_len = length(obj_list) # Check if passed objects are of class wvar is_wvar = sapply(obj_list, FUN = is, class2 = 'wvar') if(!all(is_wvar == T)){ stop("Supplied objects must be 'wvar' objects.") } # Check length of time series argument if (obj_len == 0){ stop('No object given!') }else if (obj_len == 1){ # -> plot.wvar plot.wvar(..., nb_ticks_x = nb_ticks_x, nb_ticks_y = nb_ticks_y) }else{ if (is.null(xlab)){ if (is.null(units)){ xlab = expression(paste("Scale ", tau, sep ="")) }else{ xlab = bquote(paste("Scale ", "(", .(units), ")", sep = " ")) } }else{ xlab = xlab } if (is.null(ylab)){ ylab = bquote(paste("Wavelet Variance ", nu^2, sep = " ")) }else{ ylab = ylab } if (is.null(ci_wv)){ ci_wv = rep(TRUE, obj_len) }else{ ci_wv = rep(ci_wv, obj_len) } # Main Title if (split == FALSE){ if (is.null(main)){ main = "Haar Wavelet Variance Representation" } }else{ if (!is.null(main) && (dim(main)[1] != obj_len || dim(main)[2] != obj_len)){ main = NULL } } hues = seq(15, 375, length = obj_len + 1) # Line Colors if (is.null(col_wv)){ col_wv = hcl(h = hues, l = 65, c = 200, alpha = 1)[seq_len(obj_len)] }else{ if (length(col_wv) != obj_len){ col_wv = hcl(h = hues, l = 65, c = 200, alpha = 1)[seq_len(obj_len)] } } # CI Colors if (is.null(col_ci)){ col_ci = hcl(h = hues, l = 80, c = 100, alpha = 0.2)[seq_len(obj_len)] }else{ if (length(col_ci) != obj_len){ col_ci = hcl(h = hues, l = 80, c = 100, alpha = 0.2)[seq_len(obj_len)] } } # X and Y Limits #x_range = y_range = rep(NULL, 2) for (i in 1:obj_len){ x_range = range(c(x_range, obj_list[[i]]$scales)) y_range = range(c(y_range, obj_list[[i]]$ci_low, obj_list[[i]]$ci_high)) } x_low = floor(log10(x_range[1])) x_high = ceiling(log10(x_range[2])) y_low = floor(log10(y_range[1])) y_high = ceiling(log10(y_range[2])) # Axes Labels and Ticks if (is.null(nb_ticks_x)){ nb_ticks_x = 6 } if (is.null(nb_ticks_y)){ nb_ticks_y = 5 } x_ticks = seq(x_low, x_high, by = 1) if (length(x_ticks) > nb_ticks_x){ x_ticks = x_low + ceiling((x_high - x_low)/(nb_ticks_x + 1))*(0:nb_ticks_x) } x_labels = sapply(x_ticks, function(i) as.expression(bquote(10^ .(i)))) x_at = 10^x_ticks x_actual_length = sum((x_at < x_range[2])*(x_at > x_range[1])) if (x_actual_length < (3 + as.numeric(split == FALSE))){ x_low = floor(log2(x_range[1])) x_high = ceiling(log2(x_range[2])) x_ticks = seq(x_low, x_high, by = 1) if (length(x_ticks) > 8){ x_ticks = seq(x_low, x_high, by = 2) } x_labels = sapply(x_ticks, function(i) as.expression(bquote(2^ .(i)))) x_at = 2^x_ticks } y_ticks <- seq(y_low, y_high, by = 1) if (length(y_ticks) > nb_ticks_y){ y_ticks = y_low + ceiling((y_high - y_low)/(nb_ticks_y + 1))*(0:nb_ticks_y) } y_labels = sapply(y_ticks, function(i) as.expression(bquote(10^ .(i)))) y_at = 10^y_ticks # Legend position if (is.null(legend_position)){ inter = rep(NA, obj_len) for (i in 1:obj_len){ inter[i] = obj_list[[i]]$variance[1] } mean_wv_1 = mean(inter) if (which.min(abs(c(y_low, y_high) - log2(mean_wv_1))) == 1){ legend_position = "topleft" }else{ legend_position = "bottomleft" } } # Type of Points if (is.null(point_pch)){ inter = rep(15:18, obj_len) point_pch = inter[1:obj_len] }else{ if (length(point_pch) != obj_len){ inter = rep(15:18, obj_len) point_pch = inter[1:obj_len] } } #Size of Points if (is.null(point_cex)){ inter = rep(c(1.25,1.25,1.25,1.25), obj_len) point_cex = inter[1:obj_len] }else{ if (length(point_pch) != obj_len){ inter = rep(c(1.25,1.25,1.25,1.25), obj_len) point_cex = inter[1:obj_len] } } # Names of WVAR Objects if (is.null(names)){ names = obj_name }else{ if (length(names) != obj_len){ names = obj_name } } # Arguments passed into compare_wvar_split or compare_wvar_no_split graph_details = list(obj_list = obj_list, obj_len = obj_len, names = names, xlab = xlab, ylab = ylab, col_wv = col_wv, add_legend = add_legend, col_ci = col_ci, main = main, legend_position = legend_position, ci_wv = ci_wv, point_cex = point_cex, point_pch = point_pch, x_range = x_range, y_range = y_range, x_ticks = x_ticks, x_labels = x_labels, y_labels = y_labels, x_at = x_at, y_at = y_at, y_ticks = y_ticks, nb_ticks_x = nb_ticks_x, nb_ticks_y = nb_ticks_y, cex_labels = cex_labels) if (split == FALSE){ # -> compare_wvar_no_split compare_wvar_no_split(graph_details) }else{ # -> compare_wvar_split compare_wvar_split(graph_details) } } }
/scratch/gouwar.j/cran-all/cranData/wv/R/wvar.R
# check if packages are installed, and install them when needed inst_pkg = load_pkg = c('rbenchmark', 'microbenchmark', 'ggplot2', 'Rcpp') inst_pkg = inst_pkg[!(inst_pkg %in% installed.packages()[,'Package'])] if (length(inst_pkg)>0) install.packages(inst_pkg) # load all necessary packages pkgs_loaded = lapply(load_pkg, require, character.only = TRUE) sourceCpp("src/dwt.cpp") source("R/dwt.R") set.seed(1) x = rnorm(2^16) # Run benchmark out = benchmark(dwt(x), dwt_bw(x)) # Table Object out # Results: # Note that dwt(x) runs relatively faster than dwt_bw(x) # Some significant relative difference. In terms of bigger datasets this may be hugely beneficial # Test for equality a = dwt_bw(x, nlevels = 4) b = dwt(x, nlevels = 4) all.equal(a,b) # Result: Equal except for class name # Run microbenchmark out = microbenchmark(dwt(x), dwt_bw(x)) # Table Object summary(out) # Violin Plot autoplot(out) # Results: # Again, looking at the microbenchmark results, dwt(x) runs signficantly faster than dwt_bw(x) # We can assume that dwt(x) will be more suitable for our purposes in the future
/scratch/gouwar.j/cran-all/cranData/wv/inst/dwt_sim.R
# check if packages are installed, and install them when needed inst_pkg = load_pkg = c('rbenchmark', 'microbenchmark', 'ggplot2', 'Rcpp') inst_pkg = inst_pkg[!(inst_pkg %in% installed.packages()[,'Package'])] if (length(inst_pkg)>0) install.packages(inst_pkg) # load all necessary packages pkgs_loaded = lapply(load_pkg, require, character.only = TRUE) sourceCpp("src/dwt.cpp") source("src/modwt.R") set.seed(1) x = rnorm(10000) # Test for equality a = modwt(x, nlevels = 2) b = modwt_bw(x, nlevels = 2) d = modwt_test(x, nlevels = 2) all.equal(b, d) # Result: Equal except for class name # Run benchmark out = benchmark(modwt_bw(x), modwt_test(x)) out # Results: # Note that modwt_test(x) runs relatively faster than modwt_bw(x) # Some significant relative difference. In terms of bigger datasets this may be hugely beneficial # Run microbenchmark out = microbenchmark(modwt_bw(x), modwt_test(x)) # Violin Plot autoplot(out) # Tests for "reflection" # d = modwt(x, boundary = "reflection", nlevels = 5) # e = modwt_bw(x, boundary = "reflection", nlevels = 5)
/scratch/gouwar.j/cran-all/cranData/wv/inst/modwt_sim.R
## Function to compute filter for Spatial decomposition sp_hfilter_r = function(jscale){ g = (1/sqrt(2))*c(1,1)/sqrt(2) h = c(1/sqrt(2),-1/sqrt(2))/sqrt(2) L = 2 if(jscale==1) hup=h if(jscale >1){ zero=c(rep(0,(2^(jscale-1)-1))) hup=h[1] for(i in 1:(L-1)) hup=c(hup,zero,h[(i+1)]) for( j in 0:(jscale-2)){ if(j==0) gup=g zero=c(rep(0,(2^j-1))) temp=g[1] for(i in 1:(L-1)) temp=c(temp,zero,g[(i+1)]) if(j >0){ sala=rep(0,(length(gup)+length(temp)-1)) for(k in 1:length(sala)){ dummy=0 for( u in 1:length(gup)){ if((k-u+1)>0 && (k-u+1)<=length(temp)) dummy=dummy + gup[u]*temp[(k-u+1)] } sala[k]=dummy } gup=sala } } sala=rep(0,(length(hup)+length(gup)-1)) for(k in 1:length(sala)){ dummy=0 for( u in 1:length(hup)){ if((k-u+1)> 0 && (k-u+1)<=length(gup)) dummy=dummy+hup[u]*gup[(k-u+1)] } sala[k]=dummy } hup=sala } hup } ## General function to compute wavelet coefficients for spatial cases sp_modwt_r = function(X, J1 = floor(log2(dim(X)[1]-1)), J2 = floor(log2(dim(X)[2]-1))){ n = dim(X)[1] m = dim(X)[2] nb.level = J1*J2 i = 0 k = 0 for(j1 in 1:J1){ for(j2 in 1:J2){ hfil1 = sp_hfilter(j1) hfil2 = sp_hfilter(j2) mm1 = 2^j1 mm2 = 2^j2 pp1 = n - mm1 + 1 pp2 = m - mm2 + 1 if(n >= m){ xh = xhh = matrix(NA,n,n) }else{ xh = xhh = matrix(NA,m,m) } for(spt in 1:n){ for(tpt in 1:pp1) { xts=X[tpt:(tpt+mm1-1),spt] xh[ (tpt+mm1%/%2), spt] = sum(xts*hfil1) } } for(tpt in 1:pp1){ for(spt in 1:pp2) { xts=xh[(tpt+mm1%/%2),spt:(spt+mm2-1)] xhh[(tpt+mm1%/%2), (spt+mm2%/%2)] = sum(xts*hfil2) } } i = i+1 wv.coeff = c(xhh) wv.coeff = wv.coeff[!is.na(wv.coeff)] } } }
/scratch/gouwar.j/cran-all/cranData/wv/inst/sp_modwt_rough.R
library(wv) par(mfrow = c(1,2)) # Test white noise Xt = rnorm(10000) wv = wvar(Xt) plot(wv, title = "White noise") lines(wv$scales, 1/wv$scales, col = "darkorange") # Test random walk Yt = cumsum(Xt) wv = wvar(Yt) plot(wv, title = "Random walk") lines(wv$scales, (wv$scales^2 + 2)/(12*wv$scales), col = "darkorange")
/scratch/gouwar.j/cran-all/cranData/wv/inst/test_wvar.R
# V_WS_quantile_far comuputes the 1-alpha qunatile of the beta * chi-squared distribution with nu # degrees of freedom, where beta and nu are obtained from a Welch-Satterthwaite approximation # of the test statistic V_K. This quantile is used to conduct an approximate size alpha test # for the adequacy of FAR(1) models . # Input: eg = the model residual matrix with functions in columns # f = the matrix adjusting the dependence caused by estimating the kernel operator # lag = specifies the range of lags 1:K for the test statistic V_K # alpha = the significance level to be used in the hypothesis test # M = optional argument specifying the sampling size in the related Monte Carlo method # Output: scalar value of the 1-alpha quantile of the beta * chi-square distribution with nu # degrees of freedom (which approximates V_K) V_WS_quantile_far<-function (eg, f, lag, alpha = 0.05, M = 10000) { mean_V_K <- mean_hat_V_K_far(eg, f, lag) var_V_K <- variance_hat_V_K_far(eg, f, lag, M = M) beta <- var_V_K/(2 * mean_V_K) nu <- 2 * (mean_V_K^2)/var_V_K quantile <- beta * qchisq(1 - alpha, nu) statistic <- t_statistic_V(eg, lag) p_val <- pchisq(statistic/beta, nu, lower.tail = FALSE) list(statistic = statistic, quantile = quantile, p_value = p_val) } # mean_hat_V_K_far computes the approximation of the mean which is used in the Welch- # Satterthwaite approximation as mean of the chi-squared random variable approximating V_K. # Input: eg = the model residual matrix with functions in columns # f = the matrix adjusting the dependence caused by estimating the kernel operator # lag = specifies the range of lags 1:K for for the test statistic V_K # Output: scalar approximation of the mean of the test statistic V_K. mean_hat_V_K_far<-function (eg, f, lag) { J <- NROW(eg) sum1 <- 0 store <- covariance_diag_store_far(eg, f, lag) for (i in 1:lag) { sum1 <- sum1 + sum(store[[i]]) } mu_hat_V_K <- (1/(J^2)) * sum1 mu_hat_V_K } # covariance_diag_store_far returns a list storage of the approximate covariances c^hat_i_i(t,s,t,s), # for all i in 1:K, for each encoding all values of t,s in U_J X U_J. # Input: eg = the model residual matrix with functions in columns # f = the matrix adjusting the dependence caused by estimating the kernel operator # lag = specifies the range of lags 1:K for for the test statistic V_K # Output: a list containing K 2-D arrays encoding c^hat_i_j(t,s,t,s) evaluated at all (t,s) in # U_JxU_J, for i in 1:K covariance_diag_store_far<-function (eg, f, lag) { cov_i_store <- list() for (j in 1:lag) { cov_i_store[[j]] <- diagonal_covariance_i_far(eg, f, j) } cov_i_store } # diagonal_covariance_i_j returns the approximate covariance c^hat_i_i(t,s,t,s), encoding all # values of t,s in U_J X U_J, i in 1:T. # Input: eg = the model residual matrix with functions in columns # f = the matrix adjusting the dependence caused by estimating the kernel operator # lag = specifies the range of lags 1:K for for the test statistic V_K # Output: a 2-D array encoding c^hat_i_j(t,s,t,s) evaluated at all (t,s) in U_JxU_J. diagonal_covariance_i_far<-function (eg, f, lag) { N = NCOL(eg) J = NROW(eg) e_sq_times_e_sq<-(eg[,(1+lag):N])^2%*%t((eg[,1:(N-lag)])^2)/N e_sq_times_f_sq<-(eg[,(1+lag):N])^2%*%(f[lag:(N-1),,lag])^2/N e_sq_ef<- (eg[,(1+lag):N])^2%*%t(eg[,1:(N-lag)]*t(f[lag:(N-1),,lag]))/N ee_ef_sq<-eg[,(1+lag):N]%*%t(eg[,1:(N-lag)])/N - eg[,(1+lag):N]%*%f[lag:(N-1),,lag]/N cov<-e_sq_times_e_sq + e_sq_times_f_sq - 2*e_sq_ef - (ee_ef_sq)^2 cov } # variance_hat_V_K_far computes the approximation of the variance which is used in # the Welch- Satterthwaite approximation as the variance of the chi-squared random variable # approximating V_K. # Input: eg = the model residual matrix with functions in columns # f = the matrix adjusting the dependence caused by estimating the kernel operator # lag = specifies the range of lags 1:K for for the test statistic V_K # M = optional argument specifying the sampling size in the related Monte Carlo method # Output: scalar approximation of the variance of the test statistic V_K. variance_hat_V_K_far<-function (eg, f, lag, M = NULL) { N <- NCOL(eg) K=lag sum1 <- 0 for (i in 1:K) { sum1 <- sum1 + MCint_eta_approx_i_j_far(eg, f, i, i, M = M) } bandwidth <- ceiling(0.25 * (N^(1/3))) if (K > 1) { for (i in 1:(K - 1)) { for (j in (i + 1):K) { if (abs(i - j) > bandwidth) { next } sum1 <- sum1 + (2 * MCint_eta_approx_i_j_far(eg, f, i, j, M = M)) } } } variance_V_K <- sum1 variance_V_K } # MCint_eta_approx_i_j_far computes an approximation using the # Monte Carlo integration method "MCint".. # Input: eg = the model residual matrix with functions in columns # f = the matrix adjusting the dependence caused by estimating the kernel operator # lag = specifies the range of lags 1:K for for the test statistic V_K # i,j = the indices i,j in 1:T that we are computing eta^hat_i_j for # M = number of vectors (v1, v2, v3, v4) to sample uniformly from U_J X U_J X U_J X U_J # Output: scalar value of eta^_hat_i_j computed using the MCint method. MCint_eta_approx_i_j_far<-function (eg, f, i, j, M = NULL) { J <- NROW(eg) N <- NCOL(eg) if (is.null(M)) { M = floor((max(150 - N, 0) + max(100 - J, 0) + (J/sqrt(2)))) } rand_samp_mat <- matrix(nrow = M, ncol = 4) rand_samp_mat <- cbind(sample(1:J, M, replace = TRUE),sample(1:J, M, replace = TRUE),sample(1:J, M, replace = TRUE),sample(1:J, M, replace = TRUE)) eta_hat_i_j_sum <- 0 for (k in 1:M) { cov <- scalar_covariance_i_j_far(eg, f, i, j, rand_samp_mat[k, ]) eta_hat_i_j_sum <- eta_hat_i_j_sum + (cov^2) } eta_hat_i_j <- (2/M) * eta_hat_i_j_sum eta_hat_i_j } # scalar_covariance_i_j_far returns the approximate covariance c^hat_i_j(t,s,u,v) evaluated at a # given t,s,u,v in U_J X U_J X U_J X U_J (for use in MCint method). # Input: eg = the model residual matrix with functions in columns # f = the matrix adjusting the dependence caused by estimating the kernel operator # i,j = the indices i,j in 1:N that we are computing the covariance for # times = a 4-element vector representing the values (t,s,u,v) # Output: scalar value of the computed covariance c^hat_i_j(t,s,u,v). # scalar_covariance_i_j_far<-function (eg, f, i, j, times) { J <- NROW(eg) N <- NCOL(eg) k=1+max(i,j) iuv<-eg[times[1], k:N] * eg[times[2], (k-i):(N-i)] - eg[times[1], k:N] * f[(k-1):(N-1), times[2], i] juv<-eg[times[3], k:N] * eg[times[4], (k-j):(N-j)] - eg[times[3], k:N] * f[(k-1):(N-1), times[4], j] Eiuv<-t(eg[times[1],(1+i):N])%*%eg[times[2],1:(N-i)]/N - t(eg[times[1], (1+i):N])%*%f[i:(N-1), times[2], i]/N Ejuv<-t(eg[times[3],(1+j):N])%*%eg[times[4],1:(N-j)]/N - t(eg[times[3], (1+j):N])%*%f[j:(N-1), times[4], j]/N cov <- sum(iuv * juv)/N - Eiuv*Ejuv cov }
/scratch/gouwar.j/cran-all/cranData/wwntests/R/GOF_far.R
# diagonal_autocov_approx_0 computes the intergral of y^hat_0(t,t) with respect to \mu(dt). # Input: f_data = the functional data matrix with functions in columns # Output: scalar value of the integral of y^hat_0(t,t) with respect to \mu(dt). # # roxygen comments: #' Compute the diagonal covariance #' #' `diagonal_autocov_approx_0` Computes the diagonal covariance of the given functional data. #' #' @param f_data the functional data matrix with observed functions in the columns #' @return A numeric value; integral approximation of the diagonal covariance of the functional data. diagonal_autocov_approx_0 <- function(f_data) { J <- NROW(f_data) gamma_hat_0 <- autocov_approx_h(f_data, 0) sum(diag(gamma_hat_0)) / J } # autocorrelation_coff_h computes the approximate functional autocorrelation coefficient # rho^hat_h at lag h, defined in (17) # Input: f_data = the functional data matrix with functions in columns # lag = lag for which to compute the coefficient # Output: scalar value of the approximate functional autocorrelation coefficient at lag h. # # roxygen comments: #' `autocorrelation_coeff_h` Computes the approximate functional autocorrelation coefficient at a given lag. #' #' @param f_data the functional data matrix with observed functions in the columns #' @param lag the lag to use to compute the single lag test statistic #' @return numeric value; the approximate functional autocorrelation coefficient at lag h. autocorrelation_coeff_h <- function(f_data, lag) { N <- NCOL(f_data) num <- sqrt(t_statistic_Q(f_data, lag)) denom <- sqrt(N) * diagonal_autocov_approx_0(f_data) coefficient <- num / denom coefficient } # B_h_bound returns an approximate asymptotic upper 1-alpha confidence bound for the functional # autocorrelation coefficient at lag h under the assumption that f_data forms a weak white # noise. # Input: f_data = the functional data matrix with functions in columns # lag = the lag for which to ccmpute the bound # alpha = significance level of the bound # M = optional argument specifying the sampling size in the related Monte Carlo method # Output: scalar value of the 1-alpha confidence bound for the functional autocorrelation # coefficient at lag h under a weak white noise assumption. # # roxygen comments: #' Compute weak white noise confidence bound for autocorrelation coefficient. #' #' `B_h_bound` Computes an approximate asymptotic upper 1-alpha confidence bound for the functional #' autocorrelation coefficient at lag h under a weak white noise assumption. #' #' @param f_data the functional data matrix with observed functions in the columns #' @param lag the lag to use to compute the single lag test statistic #' @param alpha the significance level to be used in the hypothesis test #' @param M Number of samples to take when applying a Monte-Carlo approximation #' @return numeric value; the 1-alpha confidence bound for the functional autocorrelation #' coefficient at lag h under a weak white noise assumption. B_h_bound <- function(f_data, lag, alpha=0.05, M=NULL) { N <- NCOL(f_data) quantile = Q_WS_quantile(f_data, lag, alpha=alpha, M=M)$quantile num <- sqrt(quantile) denom <- sqrt(N) * diagonal_autocov_approx_0(f_data) bound <- num / denom bound } # B_h_bound returns an approximate asymptotic upper 1-alpha confidence bound for the functional # autocorrelation coefficient at lag h under the assumption that f_data forms a strong # white noise. # Input: f_data = the functional data matrix with functions in columns # alpha = significance level of the bound # Output: scalar value of the 1-alpha confidence bound for the functional autocorrelation # coefficient at lag h under a strong white noise assumption. # # roxygen comments: #' Compute strong white noise confidence bound for autocorrelation coefficient. #' #' `B_iid_bound` Computes an approximate asymptotic upper 1-alpha confidence bound for the functional #' autocorrelation coefficient at lag h under the assumption that f_data forms a strong white noise #' #' @param f_data the functional data matrix with observed functions in the columns #' @param alpha the significance level to be used in the hypothesis test #' @return Numeric value; the 1-alpha confidence bound for the functional autocorrelation coefficient #' at lag h under a strong white noise assumption. #' @rdname B_iid_bound B_iid_bound <- function(f_data, alpha=0.05) { N <- NCOL(f_data) quantile_iid = Q_WS_quantile_iid(f_data, alpha=alpha)$quantile num <- sqrt(quantile_iid) denom <- sqrt(N) * diagonal_autocov_approx_0(f_data) bound <- num / denom bound }
/scratch/gouwar.j/cran-all/cranData/wwntests/R/autocorrelation_bound_functions.R
#' `block_bootstrap` Performs a block bootstrap on the functional data f_data with block size b. #' #' @param f_data the functional data matrix with observed functions in the columns #' @param b the block size (of each block in each bootstrap sample) #' @param B the number of bootstraps samples #' @param moving boolean value specifying whether the block bootstrap should be moving or not. A moving black #' bootstrap samples individual functional observations and adds on the consequent block, rather than sampling #' blocks of the data. #' @return Returns a list of B elements, each element being a block bootstrap sample in the same format #' as the original functional data f_data. #' #' @export #' block_bootsrap <- function(f_data, b, B = 300, moving = FALSE) { N <- NCOL(f_data) if (b > N) { stop("Please select a block size that is less than or equal to the sample size of the functional data. It is best to select a block size that evenly divides the sample size.") } else if (b < 1) { stop("The block size must be a positive integer.") } else if (B < 1) { stop("The number of bootstrap samples must be a positive integer.") } blocks <- list() M <- floor(N / b) for (s in 1:M) { blocks[[s]] <- (b*(s - 1) + 1):(b*s) } bootstrap_samples <- list() for (j in 1:B) { if (moving == FALSE) { samples <- sample(1:M, M, replace = TRUE) bootstrapped_data <- f_data[,blocks[[samples[1]]]] for (i in samples[-1]) { bootstrapped_data <- cbind(bootstrapped_data, f_data[,blocks[[samples[i]]]]) } } else if (moving == TRUE) { samples <- sample(1:(N - b), M, replace = TRUE) bootstrapped_data <- f_data[, samples[1]:(samples[1] + b)] for (i in 2:M) { bootstrapped_data <- cbind(bootstrapped_data, f_data[,samples[i]:(samples[i] + b)]) } } bootstrap_samples[[j]] <- bootstrapped_data } bootstrap_samples }
/scratch/gouwar.j/cran-all/cranData/wwntests/R/block_bootstrap.R
# autocov_approx_h computes the approximate autocovariance gamma^hat_h(t,s) for a given lag (h) # for every (t,s) in U_J X U_J. # Input: f_data = the functional data matrix with observed functions in columns # lag = the fixed lag for which to compute gamma^hat_h(t,s) # Output: a 2-D array encoding the values of gamma^hat_h(t,s) for every (t,s) in U_J X U_J. # # roxygen comments: #' Compute the approximate autocovariance at specified lag #' #' `autocov_approx_h` computes the approximate autocovariance for a given lag h of the functional #' data #' #' @param f_data the functional data matrix with observed functions in the columns #' @param lag the lag to use to compute the single lag test statistic #' @return A 2-dimensional array encoding the autocovariance matrix for a given lag h. autocov_approx_h <- function(f_data, lag) { N = NCOL(f_data) c_f_data <- center(f_data) gamma_hat_sum <- 0 for (i in 1:(N-lag)) { gamma_hat_sum <- gamma_hat_sum + c_f_data[,i] %o% c_f_data[,i+lag] } gamma_hat <- gamma_hat_sum / N gamma_hat } # covariance_i_j returns the approximate covariance c^hat_i_j(t,s,u,v), encoding all values of # (t,s,u,v) in U_J X U_J X U_J X U_J, i,j in 1:T. Both T and J are inferred from f_data. # Input: f_data = the functional data matrix with functions in columns # i,j = the indices i,j in 1:T that we are computing the covariance for # Output: a 4-D array encoding c^hat_i_j(t,s,u,v) evaluated at all (t,s,u,v) in U_JxU_JxU_JxU_J. # # roxygen comments: #' Compute the approximate covariance tensor for lag windows defined by i,j #' #' `covariance_i_j` computes the approximate covariance tensor of the functional data for lag #' windows defined by i,j. #' #' @param f_data the functional data matrix with observed functions in the columns #' @param i,j the indices i,j in 1:T that we are computing the covariance for #' @return A 4-dimensional array, encoding the covariance tensor of the functional data for lag #' windows defined by i,j. covariance_i_j <- function(f_data, i, j) { N = NCOL(f_data) J = NROW(f_data) c_f_data <- center(f_data) sum <- array(0, c(J, J, J, J)) for (k in (1+max(i,j)):N) { sum <- sum + c_f_data[,k-i] %o% c_f_data[,k] %o% c_f_data[,k-j] %o% c_f_data[,k] } cov <- sum / N cov } # covariance_i_j_vec is a vectorized version of the function covariance_i_j # Input: f_data = the functional data matrix with functions in columns # i,j = the indices i,j in 1:T that we are computing the covariance for # Output: a 4-D array encoding c^hat_i_j(t,s,u,v) evaluated at all (t,s,u,v) in U_JxU_JxU_JxU_J. # # roxygen comments: #' Compute the approximate covariance tensor for lag windows defined by i,j #' #' `covariance_i_j_vec` computes the approximate covariance tensor of the functional data for lag #' windows defined by i,j; a vectorized version of covariance_i_j. #' #' @param f_data the functional data matrix with observed functions in the columns #' @param i,j the indices i,j in 1:T that we are computing the covariance for #' @return A 4-dimensional array, encoding the covariance tensor of the functional data for lag #' windows defined by i,j. covariance_i_j_vec <- function(f_data, i, j) { N = NCOL(f_data) J = NROW(f_data) c_f_data <- center(f_data) sum_parts <- as.list((1+max(i,j)):N) sum_parts <- lapply(sum_parts, function(k) c_f_data[,k-i] %o% c_f_data[,k] %o% c_f_data[,k-j] %o% c_f_data[,k]) cov <- Reduce('+', sum_parts) cov / N } # diagonal_covariance_i_j returns the approximate covariance c^hat_i_i(t,s,t,s), encoding all # values of t,s in U_J X U_J, i in 1:T. # Input: f_data = the functional data matrix with functions in columns # i = the index i in 1:T that we are computing the covariance diagonal for # Output: a 2-D array encoding c^hat_i_j(t,s,t,s) evaluated at all (t,s) in U_JxU_J. # # roxygen comments: #' Compute the approximate diagonal covariance matrix for lag windows defined by i #' #' `diagonal_covariance_i` computes the approximate diagonal covariance matrix of the functional #' data for lag windows defined by i. #' #' @param f_data the functional data matrix with observed functions in the columns #' @param i the index in 1:T that we are computing the covariance for #' @return A 2-dimensional array, encoding the covariance matrix of the functional data for lag #' windows defined by i. diagonal_covariance_i <- function(f_data, i) { N = NCOL(f_data) J = NROW(f_data) c_f_data <- center(f_data) sum1 <- array(0, c(J, J)) for (k in (1+i):N) { sum1 <- sum1 + ((c_f_data[,k-i])^2 %o% (c_f_data[,k])^2) } cov <- (1 / N) * sum1 cov } # scalar_covariance_i_j returns the approximate covariance c^hat_i_j(t,s,u,v) evaluated at a # given t,s,u,v in U_J X U_J X U_J X U_J (for use in MCint method). # Input: f_data = the functional data matrix with functions in columns # i,j = the indices i,j in 1:T that we are computing the covariance for # times = a 4-element vector representing the values (t,s,u,v) # Output: scalar value of the computed covariance c^hat_i_j(t,s,u,v). # # roxygen comments: #' Compute the approximate covariance at a point for lag windows defined by i,j #' #' `scalar_covariance_i_j` computes the approximate covariance at a point of the functional data #' for lag windows defined by i,j; a scalarized version of covariance_i_j that takes point estimates. #' #' @param f_data the functional data matrix with observed functions in the columns #' @param i,j the indices i,j in 1:T that we are computing the covariance for #' @param times A vector with 4 columns containing indices specifying which subset of f_data to consider #' @return A numeric value; the covariance of the functional data at a point for lag #' windows defined by i,j. scalar_covariance_i_j <- function(f_data, i, j, times) { J <- NROW(f_data) N <- NCOL(f_data) c_f_data <- center(f_data) sum1 <- 0 for (k in (1+max(i,j)):N) { sum1 <- sum1 + c_f_data[times[1],k-i] * c_f_data[times[2],k] * c_f_data[times[3],k-j] * c_f_data[times[4],k] } cov <- (1/N) * sum1 cov } # scalar_covariance_i_j_vec is a vectorized version of the function scalar_covariance_i_j # Input: f_data = the functional data matrix with functions in columns # i,j = the indices i,j in 1:T that we are computing the covariance for # times = a 4-element vector representing the values (t,s,u,v) # Output: scalar value of the computed covariance c^hat_i_j(t,s,u,v). # # roxygen comments: #' Compute the approximate covariance at a point for lag windows defined by i,j #' #' `scalar_covariance_i_j_vec` computes the approximate covariance at a point of the functional data #' for lag windows defined by i,j; a vectorized version of scalar_covariance_i_j. #' #' @param f_data the functional data matrix with observed functions in the columns #' @param i,j the indices i,j in 1:T that we are computing the covariance for #' @param times A vector with 4 columns containing indices specifying which subset of f_data to consider #' @return A numeric value; the covariance of the functional data at a point for lag #' windows defined by i,j. scalar_covariance_i_j_vec <- function(f_data, i, j, times) { J <- NROW(f_data) N <- NCOL(f_data) c_f_data <- center(f_data) sum_parts <- list((1+max(i,j)):N) sum_parts <- lapply(sum_parts, function(k) c_f_data[times[1],k-i] * c_f_data[times[2],k] * c_f_data[times[3],k-j] * c_f_data[times[4],k]) cov <- (1/N) * Reduce('+', sum_parts) cov } # iid_covariance returns one of the two independent sum terms in the approximate covariance # c^*_0(t,s,u,v) definition, encoding all values of (t,s) in U_J X U_J, i,j in 1:T. # Input: f_data = the functional data matrix with functions in columns # Output: returns a 2-D tensor of c^*(t,s), one of the two independent sums in the computation # of c^*(t,s,u,v). # # roxygen comments: #' Compute part of the covariance under a strong white noise assumption #' #' `iid_covariance` A helper function used to compute one of the two independent sum terms in the #' computation of the approximate covariance of the functional data under a strong white noise assumption. #' #' @param f_data the functional data matrix with observed functions in the columns #' @return A 2-dimensional matrix containing one of the two independent sums in the computation of the #' covariance. iid_covariance <- function(f_data) { N <- NCOL(f_data) c_f_data <- center(f_data) sum1 <- 0 for (i in 1:N) { sum1 <- sum1 + c_f_data[,i] %o% c_f_data[,i] } sum1 / N } # # roxygen comments: #' Compute part of the covariance under a strong white noise assumption #' #' `iid_covariance_vec` A helper function used to compute one of the two independent sum terms in the #' computation of the approximate covariance of the functional data under a strong white noise assumption; #' a vectorized version of iid_covariance. #' #' @param f_data the functional data matrix with observed functions in the columns #' @return A 2-dimensional matrix containing one of the two independent sums in the computation of the #' covariance. iid_covariance_vec <- function(f_data) { N <- NCOL(f_data) c_f_data <- center(f_data) sum_parts <- 1:N sum_parts <- lapply(sum_parts, function(i) c_f_data[,i] %o% c_f_data[,i]) cov <- (1 / N) * Reduce('+', sum_parts) cov } # covariance_diag_store returns a list storage of the approximate covariances c^hat_i_i(t,s,t,s), # for all i in 1:K, for each encoding all values of t,s in U_J X U_J. # Input: f_data = the functional data matrix with functions in columns # K = the maximum value of i in the range 1:K for which to compute c^hat_i_i(t,s,t,s) # Output: a list containing K 2-D arrays encoding c^hat_i_j(t,s,t,s) evaluated at all (t,s) in # U_JxU_J, for i in 1:K # # roxygen comments: #' List storage of diagonal covariances. #' #' `covariance_diag_store` Creates a list storage of approximate diagonal covariances computed #' by the function diagonal_covariance_i #' #' @param f_data the functional data matrix with observed functions in the columns #' @param K the range of lags 1:K to use #' @return A list containing K 2-dimensional arrays containing the diagonal covariance matrices of the #' functional data, for lags h in the range 1:K. covariance_diag_store <- function(f_data, K) { cov_i_store <- list() for (j in 1:K) { cov_i_store[[j]] <- diagonal_covariance_i(f_data, j) } cov_i_store }
/scratch/gouwar.j/cran-all/cranData/wwntests/R/covariance_functions.R
#' `brown_motion` Creates at J x N matrix, containing N independent Brownian motion sample paths in #' each of the columns. #' #' @param N the number of independent Brownian motion sample paths to compute. #' @param J the number of steps observed for each sample path (the resolution of the data). #' @return A J x N matrix containing Brownian motion functional data in the columns. #' @examples #' b <- brown_motion(250, 50) #' #' @import sde #' #' @export brown_motion <- function(N, J) { motion <- matrix(nrow = J, ncol = N) for (i in 1:N) { motion[,i] <- as.vector(BM(N=J-1)) } as.array(motion) } #' `fgarch_1_1` Simulates an fGARCH(1,1) process with N independent observations, each observed # discretely at J points on the interval [0,1]. Uses the Ornstein-Uhlenbeck process. #' #' @param N the number of fGARCH(1,1) curves to sample. #' @param J the number of points at which each curve is sampled (the resolution of the data). #' @param delta a parameter used in the variance recursion of the model. #' @param burn_in the number of initial samples to burn (discard). #' @return A list containing two J x N matrices, the former containing the sample of fGARCH(1,1) #' curves and the latter containing the respective variance values. #' @examples #' f <- fgarch_1_1(100, 50) #' #' @import MASS #' @import sde #' @export fgarch_1_1 <- function(N, J, delta=0.01, burn_in=50) { grid <- (1:J) / J error_mat <- matrix(nrow=J, ncol=N+burn_in) covariance_mat <- matrix(0, nrow = J, ncol = J) for (i in 1:J) { for (j in 1:J) { covariance_mat[i,j] <- exp(-(grid[i]+grid[j]) / 2) * min(exp(grid[i]), exp(grid[j])) } } means <- rep(0,J) for (i in 1:(N+burn_in)) { error_mat[,i] = mvrnorm(1, mu=means, Sigma=covariance_mat) } alpha_op <- beta_op <- function(t,s) { 12*t*(1-t)*s*(1-s) } garch_mat <- sigma2_mat <- matrix(nrow=J, ncol=N+burn_in) int_approx <- function(x) { sum(x) / NROW(x) } sigma2_mat[,1] <- rep(delta, J) garch_mat[,1] <- sqrt(delta) * error_mat[,1] for (i in 2:(N+burn_in)) { for (u in 1:J) { alpha_op_vec <- alpha_op(grid[u], grid) * (garch_mat[,i-1] ^ 2) beta_op_vec <- beta_op(grid[u], grid) * sigma2_mat[,i-1] sigma2_mat[u,i] <- delta + int_approx(alpha_op_vec) + int_approx(beta_op_vec) } garch_mat[,i] <- sqrt(sigma2_mat[,i]) * error_mat[,i] } garch_mat[,(burn_in+1):(burn_in+N)] } #' `far_1_S` Simulates an FAR(1,S)-fGARCH(1,1) process with N independent observations, each #' observed discretely at J points on the interval [0,1]. #' #' @param N the number of fGARCH(1,1) curves to sample. #' @param J the number of points at which each curve is sampled (the resolution of the data). #' @param S the autoregressive operator of the model, between 0 and 1, indicating the level of #' conditional heteroscedasticity. #' @param type the assumed model of the error term. The default argument is 'IID', under which #' the errors are assumed to be independent and identically distributed. The alternative argument #' is 'fGARCH', which will assume that the errors follow an fGARCH(1,1) process. #' @param burn_in the number of initial samples to burn (discard). #' @return A J x N matrix containing FAR(1,S) functional data in the columns. #' @examples #' f <- far_1_S(100, 50, 0.75) #' #' @import MASS #' @export far_1_S <- function(N, J, S, type='IID', burn_in=50) { grid <- (1:J) / J if (type == 'IID') { error_mat <- brown_motion(N + burn_in, J) } else if (type == 'fGARCH') { error_mat <- fgarch_1_1(N + burn_in, J)$Garch } func <- function(t,s) { exp(-((t^2 + s^2) / 2)) } sum1 <- 0 for (t in grid) { for (s in grid) { sum1 <- sum1 + (func(t,s)^2) } } phi_norm_w_out_c <- sqrt(sum1 / (J^2)) abs_c <- S / phi_norm_w_out_c phi_c_t_s <- function(t, s, c=abs_c) { c * exp(-((t^2 + s^2) / 2)) } far_mat <- matrix(0, nrow=J, ncol=N+burn_in) far_mat[,1] <- error_mat[,1] for (i in 2:(N+burn_in)) { for (j in 1:J) { far_mat[j,i] <- (sum(phi_c_t_s(grid[j], grid) * far_mat[,i-1]) / J) + error_mat[j,i] } } far_mat[,(burn_in+1):(burn_in+N)] }
/scratch/gouwar.j/cran-all/cranData/wwntests/R/data_simulations.R
# center centers the functional data f_data by substracting the row means from the data. # Input: f_data = the functional data matrix with observed functions in columns # Output: a matrix containing the centered functional data # roxygen comments: #' Center functional data #' #' `center` Centers the given functional data #' #' @param f_data the functional data matrix with observed functions in the columns #' @return A matrix of the same form as f_data containing the centered functional data. center <- function(f_data) { f_data - rowMeans(f_data) }
/scratch/gouwar.j/cran-all/cranData/wwntests/R/helper_functions.R
# V_WS_hyp_test comuputes the 1-alpha qunatile of the beta * chi-squared distribution with nu # degrees of freedom, where beta and nu are obtained from a Welch-Satterthwaite approximation # of the test statistic V_K. This quantile is used to conduct an approximate size alpha test # of the hypothesis H'_0_K. # Input: f_data = the functional data matrix with functions in columns # K = specifies the range of lags 1:K for the test statistic V_K # alpha = the significance level to be used in the hypothesis test # M = optional argument specifying the sampling size in the related Monte Carlo method # Output: scalar value of the 1-alpha quantile of the beta * chi-square distribution with nu # degrees of freedom (which approximates V_K) V_WS_quantile <- function(f_data, K, alpha=0.05, M=NULL) { mean_V_K <- mean_hat_V_K(f_data, K) var_V_K <- variance_hat_V_K(f_data, K, M=M) beta <- var_V_K / (2 * mean_V_K) nu <- 2 * (mean_V_K^2) / var_V_K quantile <- beta * qchisq(1 - alpha, nu) statistic <- t_statistic_V(f_data, K) p_val <- pchisq(statistic / beta, nu, lower.tail = FALSE) list(statistic = statistic, quantile = quantile, p_value = p_val) } V_WS_quantile_iid <- function(f_data, K, alpha=0.05) { mean_V_K <- mean_hat_V_K_iid(f_data, K) var_V_K <- variance_hat_V_K_iid(f_data, K) beta <- var_V_K / (2 * mean_V_K) nu <- 2 * (mean_V_K^2) / var_V_K quantile <- beta * qchisq(1 - alpha, nu) statistic <- t_statistic_V(f_data, K) p_val <- pchisq(statistic / beta, nu, lower.tail = FALSE) list(statistic = statistic, quantile = quantile, p_value = p_val) } # Q_WS_hyp_test comuputes the 1-alpha qunatile of the beta * chi-squared distribution with nu # degrees of freedom, where beta and nu are obtained from a Welch-Satterthwaite approximation # of the test statistic Q_h. This quantile is used to conduct an approximate size alpha test # of the hypothesis H_0_h. # Input: f_data = the functional data matrix with functions in columns # lag = specifies the lag used for the test statistic Q_h # alpha = the significance level to be used in the hypothesis test # M = optional argument specifying the sampling size in the related Monte Carlo method # Output: scalar value of the 1-alpha quantile of the beta * chi-square distribution with nu # degrees of freedom (which approximates Q_h). Q_WS_quantile <- function(f_data, lag, alpha=0.05, M=NULL) { mean_Q_h <- mean_hat_Q_h(f_data, lag) var_Q_h <- variance_hat_Q_h(f_data, lag, M=M) beta <- var_Q_h / (2 * mean_Q_h) nu <- 2 * (mean_Q_h^2) / var_Q_h quantile <- beta * qchisq(1 - alpha, nu) statistic <- t_statistic_Q(f_data, lag) p_val <- pchisq(statistic / beta, nu, lower.tail = FALSE) list(statistic = statistic, quantile = quantile, p_value = p_val) } # Q_WS_quantile_iid computes the size alpha test of the hypothesis H_0_h using the WS # Approximation under the assumption that the data follows a strong white noise. # Input: f_data = the functional data matrix with functions in columns # alpha = the significance level to be used in the hypothesis test # Output: scalar value of the 1-alpha quantile of the beta * chi-square distribution with nu # degrees of freedom (which approximates Q_h) (computed under a strong white noise # assumption). Q_WS_quantile_iid <- function(f_data, alpha=0.05) { mean_Q_h <- mean_hat_Q_h_iid(f_data) var_Q_h <- variance_hat_Q_h_iid(f_data) beta <- var_Q_h / (2 * mean_Q_h) nu <- 2 * (mean_Q_h^2) / var_Q_h quantile <- beta * qchisq(1 - alpha, nu) statistic <- t_statistic_Q(f_data, lag = 1) p_val <- pchisq(statistic / beta, nu, lower.tail = FALSE) list(statistic = statistic, quantile = quantile, p_value = p_val) } #' Compute size alpha single-lag hypothesis test under weak or strong white noise assumption #' #' `Q_WS_hyp_test` computes the size alpha test of a single lag hypothesis under a weak white noise #' or strong white noise assumption using a Welch-Satterthwaite Approximation. #' #' @param f_data The functional data matrix with observed functions in the columns #' @param lag Positive integer value. The lag to use to compute the single lag test statistic. #' @param alpha Numeric value between 0 and 1 specifying the significance level to be used in the specified #' hypothesis test. The default value is 0.05. Note, the significance value is only ever used to compute the #' 1-alpha quantile of the limiting distribution of the specified test's test statistic. #' @param iid A Boolean value, FALSE by default. If given TRUE, the hypothesis test will use a strong-white #' noise assumption (instead of a weak-white noise assumption). #' @param M Positive integer value. Number of Monte-Carlo simulations for the Welch-Satterthwaite approximation. #' @param bootstrap A Boolean value, FALSE by default. If given TRUE, the hypothesis test is done by #' approximating the limiting distribution of the test statistic via a block bootstrap process. #' @param block_size A positive Integer value, with the default value being computed via the adaptive #' bandwidth selection method in the "spectral" test. Determines the block size (of each block in each #' bootstrap sample) if the test is being bootstrapped. #' @param straps A positive Integer, with a default value of 300. Determines the number of bootstrap samples #' to take if the test is being bootstrapped. Only used if 'bootstrap' == TRUE. #' @param moving A Boolean value, FALSE by default. If given TRUE, the performed block bootstrap will be moving #' rather than stationary. #' @return A list containing the p-value, the quantile, and a boolean value indicating whether or not the #' hypothesis is rejected. #' #' @import stats Q_WS_hyp_test <- function(f_data, lag, alpha=0.05, iid=FALSE, M=NULL, bootstrap=FALSE, block_size='adaptive', straps=300, moving = FALSE) { statistic <- t_statistic_Q(f_data, lag) if (bootstrap == TRUE) { if (block_size == 'adaptive') { block_size <- ceiling(adaptive_bandwidth(f_data, kernel = 'Bartlett')) } bootsraps <- list() bootstrap_samples <- block_bootsrap(f_data, block_size, B = straps, moving = moving) stats_distr <- lapply(bootstrap_samples, t_statistic_Q, lag=lag) statistic <- t_statistic_Q(f_data, lag=lag) quantile <- quantile(as.numeric(stats_distr), 1 - alpha) p_value <- sum(statistic > stats_distr) / length(stats_distr) list(statistic = as.numeric(statistic), quantile = as.numeric(quantile), p_value = as.numeric(p_value), block_size = block_size) } else if (iid == FALSE) { results <- Q_WS_quantile(f_data, lag, alpha=alpha, M=M) statistic <- results$statistic quantile <- results$quantile p_val <- results$p_val reject <- statistic > quantile list(statistic = statistic, quantile = quantile, p_value = p_val) } else { results <- Q_WS_quantile_iid(f_data, alpha=alpha) statistic <- results$statistic quantile <- results$quantile p_val <- results$p_val reject <- statistic > quantile list(statistic= statistic, quantile = quantile, p_value = p_val) } }
/scratch/gouwar.j/cran-all/cranData/wwntests/R/hypothesis_quantiles.R
# imhof_test returns the the SVD of the tensor c^hat_i_j(t,s,u,v) and the p-value computing # the probability that the observed value of the statistic Q_h is larger than the 1-alpha # quantile of the quadratic form in normal variables described in (15) # Input: f_data = the functional data matrix with functions in columns # lag = the lag for which to compute the imhof test # Output: a list containing the SVD of tensor c^hat_i_j(t,s,u,v) and the p-value computing the # probability that the observed value of the statistic Q_h is larger than the 1-alpha quantile # of the quadratic form in normal variables. imhof_test <- function(f_data, lag) { if (!requireNamespace('tensorA')) { stop("Please install the 'tensorA' package to perform the imhof test.") } if (!requireNamespace('CompQuadForm')) { stop("Please install the 'CompQuadForm' package to perform the imhof test.") } if ((lag < 1) | (lag %% 1 != 0)) { stop("The 'lag' parameter must a positive integer.") } N = NCOL(f_data) J = NROW(f_data) t_statistic_val = t_statistic_Q(f_data, lag) c_f_data <- center(f_data) tensor <- array(0, c(J, J, J, J)) sum1 <- 0 for (k in 1:(N-lag)) { tensor <- tensor + c_f_data[,k] %o% c_f_data[,k+lag] %o% c_f_data[,k] %o% c_f_data[,k+lag] } tensor <- tensor / N temp_tensor <- as.numeric(tensor) tensor_numeric <- tensorA::to.tensor(temp_tensor, c(J,J,J,J)) names(tensor_numeric) = c("a", "b", "c", "d") SVD <- tensorA::svd.tensor(tensor_numeric, i=c("a", "b")) eigenvalues <- as.numeric(SVD$d / (J^2)) pval_imhof <- CompQuadForm::imhof(t_statistic_val, lambda = eigenvalues)$Qq list(statistic = t_statistic_val, p_value = pval_imhof) }
/scratch/gouwar.j/cran-all/cranData/wwntests/R/imhof_test.R
#' Single-Lag Hypothesis Test #' #' `single_lag_test` computes the single-lag hypothesis test at a single user-specified lag. #' #' @param f_data The functional data matrix with observed functions in the columns #' @param lag Positive integer value. The lag to use to compute the single lag test statistic. #' @param alpha Numeric value between 0 and 1 specifying the significance level to be used in the specified #' hypothesis test. The default value is 0.05. Note, the significance value is only ever used to compute the #' 1-alpha quantile of the limiting distribution of the specified test's test statistic. #' @param iid A Boolean value, FALSE by default. If given TRUE, the hypothesis test will use a strong-white #' noise assumption (instead of a weak-white noise assumption). #' @param M Positive integer value. Number of Monte-Carlo simulations for the Welch-Satterthwaite approximation. #' @param bootstrap A Boolean value, FALSE by default. If given TRUE, the hypothesis test is done by #' approximating the limiting distribution of the test statistic via a block bootstrap process. #' @param block_size A positive Integer value, with the default value being computed via the adaptive #' bandwidth selection method in the "spectral" test. Determines the block size (of each block in each #' bootstrap sample) if the test is being bootstrapped. #' @param straps A positive Integer, with a default value of 300. Determines the number of bootstrap samples #' to take if the test is being bootstrapped. Only used if 'bootstrap' == TRUE. #' @param moving A Boolean value, FALSE by default. If given TRUE, the performed block bootstrap will be moving #' rather than stationary. #' @param suppress_raw_output Boolean value, FALSE by default. If TRUE, the function will not return the list #' containing the p-value, quantile, and statistic. #' @param suppress_print_output Boolean value, FALSE by default. If TRUE, the function will not print any #' output to the console. #' @details The "single-lag" portmanteau test is based on the sample autocovariance function computed from the #' functional data. This test assesses the significance of lagged autocovariance operators at a single, #' user-specified lag h. More specifically, it tests the null hypothesis that the lag-h autocovariance #' operator is equal to 0. This test is designed for stationary functional time-series, and is valid under #' conditional heteroscedasticity conditions. #' @return If suppress_raw_output = FALSE, a list containing the test statistic, the 1-alpha quantile of the #' limiting distribution, and the p-value computed from the specified hypothesis test. Also prints output #' containing a short description of the test, the p-value, and additional information about the test if #' suppress_print_output = FALSE. #' #' @references #' [1] Kokoszka P., & Rice G., & Shang H.L. (2017). Inference for the autocovariance of a functional time series #' under conditional heteroscedasticity. Journal of Multivariate Analysis, 162, 32-50. #' #' @examples #' f <- far_1_S(150, 50, S = 0.75) #' single_lag_test(f, lag = 1) #' single_lag_test(f, lag = 2, M=100) #' #' @import stats #' @export single_lag_test <- function(f_data, lag=1, alpha=0.05, iid=FALSE, M=NULL, bootstrap=FALSE, block_size='adaptive', straps=300, moving = FALSE, suppress_raw_output=FALSE, suppress_print_output=FALSE) { if (bootstrap == TRUE & (iid == TRUE)) { stop("Bootstrapping this test only requires the lag parameter (and optionally, a significance level).") } if (suppress_raw_output == TRUE & suppress_print_output == TRUE) { stop("Current choice of parameters will produce no output. At least one of the parameters 'suppress_raw_output' or 'suppress_print_output' must be FALSE.") } if (bootstrap == TRUE) { results <- Q_WS_hyp_test(f_data, lag, alpha = alpha, bootstrap = TRUE, block_size = block_size, moving = moving, straps = straps) if (suppress_print_output == FALSE) { if (moving == TRUE) { title_print <- sprintf(" Moving Block Bootstrapped Single-Lag Test\n\n") } else if (moving == FALSE) { title_print <- sprintf("Block Bootstrapped Single-Lag Test\n\n") } test_type <- 'the series is a weak white noise\n' null_print <- sprintf("null hypothesis: %s", test_type) p_val_print <- sprintf("p-value = %f\n", results$p_value) samp_print <- sprintf("sample size = %d\n", NCOL(f_data)) lag_print <- sprintf("lag = %d\n", lag) boot_num <- sprintf("number of bootstrap samples = %d\n", straps) block_sze <- sprintf("block size = %d\n\n\n", results$block_size) message(c(title_print, null_print, p_val_print, samp_print, lag_print, boot_num, block_sze)) } if (suppress_raw_output == FALSE) { results[-4] } } else if (iid == FALSE) { results <- Q_WS_hyp_test(f_data, lag, alpha=alpha, M=M) if (suppress_print_output == FALSE) { title_print <- sprintf("Single-Lag Test\n\n") null_print <- sprintf("null hypothesis: the series is uncorrelated at lag %d\n", lag) p_val_print <- sprintf("p-value = %f\n", results$p_value) samp_print <- sprintf("sample size = %d\n", NCOL(f_data)) lag_print <- sprintf("lag = %d\n\n\n", lag) message(c(title_print, null_print, p_val_print, samp_print, lag_print)) } if (suppress_raw_output == FALSE) { results } } else if (iid == TRUE) { results <- Q_WS_hyp_test(f_data, iid = TRUE, lag = lag, alpha=alpha) if (suppress_print_output == FALSE) { title_print <- sprintf("Single-Lag Test (iid assumption)\n\n") test_type <- 'the series is a strong white noise\n' null_print <- sprintf("null hypothesis: %s", test_type) p_val_print <- sprintf("p-value = %f\n", results$p_value) samp_print <- sprintf("sample size = %d\n", NCOL(f_data)) lag_print <- sprintf("lag = %d\n\n\n", lag) message(c(title_print, null_print, p_val_print, samp_print, lag_print)) } if (suppress_raw_output == FALSE) { results } } } #' Multi-Lag Hypothesis Test #' #' `multi_lag_test` Computes the multi-lag hypothesis test over a range of user-specified lags. #' #' @param f_data The functional data matrix with observed functions in the columns #' @param lag Positive integer value. The lag to use to compute the multi-lag test statistic #' @param alpha Numeric value between 0 and 1 specifying the significance level to be used in the specified #' hypothesis test. The default value is 0.05. Note, the significance value is only ever used to compute the #' 1-alpha quantile of the limiting distribution of the specified test's test statistic. #' @param iid A Boolean value, FALSE by default. If given TRUE, the hypothesis test will use a strong-white #' noise assumption (instead of a weak-white noise assumption). #' @param M Positive integer value. Number of Monte-Carlo simulation for Welch-Satterthwaite approximation. #' @param suppress_raw_output Boolean value, FALSE by default. If TRUE, the function will not return the list #' containing the p-value, quantile, and statistic. #' @param suppress_print_output Boolean value, FALSE by default. If TRUE, the function will not print any #' output to the console. #' @details The "multi-lag" portmanteau test is also based on the sample autocovariance function computed from the #' functional data. This test assesses the cumulative significance of lagged autocovariance operators, up to a #' user-selected maximum lag K. More specifically, it tests the null hypothesis that the first K lag-h autocovariance #' operators (h going from 1 to K) is equal to 0. This test is designed for stationary functional time-series, and #' is valid under conditional heteroscedasticity conditions. #' @return If suppress_raw_output = FALSE, a list containing the test statistic, the 1-alpha quantile of the #' limiting distribution, and the p-value computed from the specified hypothesis test. Also prints output #' containing a short description of the test, the p-value, and additional information about the test if #' suppress_print_output = FALSE. #' #' @references #' [1] Kokoszka P., & Rice G., & Shang H.L. (2017). Inference for the autocovariance of a functional time series #' under conditional heteroscedasticity. Journal of Multivariate Analysis, 162, 32-50. #' #' @examples #' b <- brown_motion(150, 50) #' multi_lag_test(b, lag = 5) #' multi_lag_test(b, lag = 10, M = 50) #' #' @import stats #' @export multi_lag_test <- function(f_data, lag = 20, M=NULL, iid=FALSE, alpha=0.05, suppress_raw_output=FALSE, suppress_print_output=FALSE) { K <- lag if (suppress_raw_output == TRUE & suppress_print_output == TRUE) { stop("Current choice of parameters will produce no output. Atleast one of the parameters 'suppress_raw_output' or 'suppress_print_output' must be FALSE.") } if (iid == FALSE) { results <- V_WS_quantile(f_data, K, alpha=alpha, M=M) if (suppress_print_output == FALSE) { title_print <- sprintf("Multi-Lag Test\n\n") test_type <- 'the series is a weak white noise\n' null_print <- sprintf("null hypothesis: %s", test_type) p_val_print <- sprintf("p-value = %f\n", results$p_value) samp_print <- sprintf("sample size = %d\n", NCOL(f_data)) lag_print <- sprintf("maximum lag = %d\n", K) mc_print <- sprintf("number of monte-carlo simulations = %d\n\n\n", M) message(c(title_print, null_print, p_val_print, samp_print, lag_print, mc_print)) } if (suppress_raw_output == FALSE) { results } } else { results <- V_WS_quantile_iid(f_data, K, alpha=alpha) if (suppress_print_output == FALSE) { title_print <- sprintf("Multi-Lag Test (iid assumption)\n\n") test_type <- 'the series is a strong white noise\n' null_print <- sprintf("null hypothesis: %s", test_type) p_val_print <- sprintf("p-value = %f\n", results$p_value) samp_print <- sprintf("sample size = %d\n", NCOL(f_data)) lag_print <- sprintf("maximum lag = %d\n\n\n", K) message(c(title_print, null_print, p_val_print, samp_print, lag_print)) } if (suppress_raw_output == FALSE) { results } } } #' Spectral Density Test #' #' `spectral_test` Computes the spectral hypothesis test under a user-specified kernel function and #' bandwidth; automatic bandwidth selection methods are provided. #' #' @param f_data The functional data matrix with observed functions in the columns #' @param kernel A String specifying the kernel function to use. The currently supported kernels are the #' 'Bartlett' and 'Parzen' kernels. The default kernel is 'Bartlett'. #' @param bandwidth A String or positive Integer value which specifies the bandwidth to use. Currently admitted #' string handles are 'static' which computes the bandwidth p via p = n^(1/(2q+1)) where n is the sample size #' and q is the kernel order, or 'adaptive' which uses a bandwidth selection method that is based on the #' functional data. #' @param alpha Numeric value between 0 and 1 specifying the significance level to be used for the test. #' The significance level is 0.05 by default. Note, the significance value is only ever used to compute the #' 1-alpha quantile of the limiting distribution of the specified test's test statistic. #' @param suppress_raw_output Boolean value, FALSE by default. If TRUE, the function will not return the list #' containing the p-value, quantile, and statistic. #' @param suppress_print_output Boolean value, FALSE by default. If TRUE, the function will not print any #' output to the console. #' @description The "spectral" portmanteau test is based on the spectral density operator. It essentially measures #' the proximity of a functional time series to a white noise - the constant spectral density operator of an #' uncorrelated series. Unlike the "single-lag" and "multi-lag" tests, this test is not for general white noise #' series, and may not hold under functional conditionally heteroscedastic assumptions. #' @return If suppress_raw_output = FALSE, a list containing the test statistic, the 1-alpha quantile of the #' limiting distribution, and the p-value computed from the specified hypothesis test. Also prints output #' containing a short description of the test, the p-value, and additional information about the test if #' suppress_print_output = FALSE. #' #' @references #' [1] Characiejus V., & Rice G. (2019). A general white noise test based on kernel lag-window estimates of the #' spectral density operator. Econometrics and Statistics, submitted. #' #' [2] Chen W.W. & Deo R.S. (2004). Power transformations to induce normality and their applications. #' Journal of the Royal Statistical Society: Series B (Statistical Methodology), 66, 117–130. #' #' @examples #' b <- brown_motion(100, 50) #' spectral_test(b) #' spectral_test(b, kernel = 'Parzen', bandwidth = 'adaptive') #' spectral_test(b, kernel = 'Bartlett', bandwidth = 2) #' #' @export spectral_test <- function(f_data, kernel = 'Bartlett', bandwidth = 'adaptive', alpha = 0.05, suppress_raw_output=FALSE, suppress_print_output=FALSE) { if (suppress_raw_output == TRUE & suppress_print_output == TRUE) { stop("Current choice of parameters will produce no output. Atleast one of the parameters 'suppress_raw_output' or 'suppress_print_output' must be FALSE.") } quantile <- qnorm(1 - alpha) statistic <- spectral_t_statistic(f_data, kernel = kernel, bandwidth = bandwidth) band <- statistic$band statistic <- statistic$stat p_val <- 1 - pnorm(statistic) results <- list(statistic = statistic, quantile = quantile, p_value = p_val, band = band) if (suppress_print_output == FALSE) { title_print <- sprintf("Spectral Test\n\n") test_type <- 'the series is iid\n' null_print <- sprintf("null hypothesis: %s", test_type) p_val_print <- sprintf("p-value = %f\n", results$p_value) samp_print <- sprintf("sample size = %d\n", NCOL(f_data)) kern_print <- sprintf("kernel function = %s\n", kernel) band_print <- sprintf("bandwidth = %f\n", results$band) if (is.numeric(bandwidth)) { band_sel <- sprintf("bandwidth selection = %d\n\n\n", bandwidth) } else { band_sel <- sprintf("bandwidth selection = %s\n\n\n", bandwidth) } message(c(title_print, null_print, p_val_print, samp_print, kern_print, band_print, band_sel)) } if (suppress_raw_output == FALSE) { results[-4] } } #' Independence Test #' #' `independence_test` Computes the independence test with a user-specified number of principal components #' and range of lags. #' #' @param f_data The functional data matrix with observed functions in the columns #' @param components A positive Integer specifying the number of principal components to project the data on; #' ranked in order of importance (importance is determined by the proportion of the variance that is explained #' by the individual principal component.) #' @param lag A positive Integer value, specifying the maximum lag to include - this can be seen as the bandwidth #' or lag-window. #' @param alpha Numeric value between 0 and 1 specifying the significance level to be used in the specified #' hypothesis test. The default value is 0.05. Note, the significance value is only ever used to compute the #' 1-alpha quantile of the limiting distribution of the specified test's test statistic. #' @param suppress_raw_output Boolean value, FALSE by default. If TRUE, the function will not return the list #' containing the p-value, quantile, and statistic. #' @param suppress_print_output Boolean value, FALSE by default. If TRUE, the function will not print any #' output to the console. #' @details The "independence" portmanteau test is a test of independence and identical distribution based on a #' dimensionality reduction by projecting the data onto the most important functional principal components. #' It is based on the resulting lagged cross-variances. This test is not for general white noise series, and #' may not hold under functional conditionally heteroscedastic assumptions. Please consult the vignette for a #' deeper exposition, and consult the reference for a complete treatment. #' @return If suppress_raw_output = FALSE, a list containing the test statistic, the 1-alpha quantile of the #' limiting distribution, and the p-value computed from the specified hypothesis test. Also prints output #' containing a short description of the test, the p-value, and additional information about the test if #' suppress_print_output = FALSE. #' @references #' [1] Gabrys R., & Kokoszka P. (2007). Portmanteau Test of Independence for Functional Observations. #' Journal of the American Statistical Association, 102:480, 1338-1348, DOI: 10.1198/016214507000001111. #' #' @examples #' b <- brown_motion(250, 100) #' independence_test(b, components = 3, lag = 5) #' #' @importFrom rainbow fts #' @importFrom ftsa ftsm #' #' @export independence_test <- function(f_data, components, lag, alpha = 0.05, suppress_raw_output=FALSE, suppress_print_output=FALSE) { if (suppress_raw_output == TRUE & suppress_print_output == TRUE) { stop("Current choice of parameters will produce no output. Atleast one of the parameters 'suppress_raw_output' or 'suppress_print_output' must be FALSE.") } if ((components < 1) | (components %% 1 != 0)) { stop("The 'components parameter must be a positive integer.") } if ((lag < 1) | (lag %% 1 != 0)) { stop("The 'components lag must be a positive integer.") } N <- NCOL(f_data) J <- NROW(f_data) f_data <- center(f_data) suppressWarnings(pc_decomp <- ftsa::ftsm(rainbow::fts(1:J, f_data), order = components, mean = FALSE)) scores <- pc_decomp$coeff C_0 <- crossprod(scores) / N c_h <- array(0, dim=c(components,components,lag)) for (h in 1:lag) { for (k in 1:components) { for (l in 1:components) { score_uni <- 0 for (t in 1:(N-h)) { score_uni <- score_uni + (scores[t,k] * scores[t+h,l]) } c_h[k,l,h] <- score_uni / N } } } r_f_h <- r_b_h <- array(0, dim=c(components,components,lag)) summand <- vector('numeric', lag) for (h in 1:lag) { r_f_h[,,h] <- solve(C_0) %*% c_h[,,h] r_b_h[,,h] <- c_h[,,h] %*% solve(C_0) summand[h] <- sum(r_f_h[,,h] * r_b_h[,,h]) } Q_n <- N * sum(summand) p_val <- as.numeric(1 - pchisq(Q_n, df = components^2 * lag)) quantile <- as.numeric(qchisq(1 - alpha, df = components^2 * lag)) results <- list(statistic = Q_n, quantile = quantile, p_value = p_val) if (suppress_print_output == FALSE) { title_print <- sprintf("Independence Test\n\n") test_type <- 'the series is iid\n' null_print <- sprintf("null hypothesis: %s", test_type) p_val_print <- sprintf("p-value = %f\n", results$p_value) samp_print <- sprintf("sample size = %d\n", NCOL(f_data)) comp_print <- sprintf('number of principal components = %d\n', components) lag_print <- sprintf("maximum lag = %d\n\n\n", lag) message(c(title_print, null_print, p_val_print, comp_print, lag_print)) } if (suppress_raw_output == FALSE) { results } } #' Goodness-of-fit test for FAR(1) #' #' `GOF_far` computes the goodness-of-fit test for FAR(1) over a range of user-specified lags. #' #' @param f_data The functional data matrix with observed functions in the columns. #' @param lag Positive integer value. A user-selected maximum lag. 10 by default. #' @param alpha Numeric value between 0 and 1 specifying the significance level to be used in the specified #' hypothesis test. The default value is 0.05. Note, the significance value is only ever used to compute the #' 1-alpha quantile of the limiting distribution of the specified test's test statistic. #' @param M Positive integer value. Number of Monte-Carlo simulation for Welch-Satterthwaite approximation.10000 by default. #' @param suppress_raw_output Boolean value, FALSE by default. If TRUE, the function will not return the list #' containing the p-value, quantile, and statistic. #' @param suppress_print_output Boolean value, FALSE by default. If TRUE, the function will not print any #' output to the console. #' @description The "GOF_far" test fits a FAR(1) model and then assesses the cumulative significance of lagged #' autocovariance operators from the model residuals, up to a user-selected maximum lag K. #' More specifically, it tests the null hypothesis that the first K lag-h autocovariance #' operators (h going from 1 to K) from the model residuals is equal to 0. #' @return If suppress_raw_output = FALSE, a list containing the test statistic, the 1-alpha quantile of the #' limiting distribution, and the p-value computed from the specified hypothesis test. Also prints output #' containing a short description of the test, the p-value, and additional information about the test if #' suppress_print_output = FALSE. #' #' @references #' [1] Kim, M., Kokoszka, P., & Rice, G. (2023). White noise testing for functional time series. Statistic Surveys, 17, 119-168. #' #' @examples #' f <- far_1_S(100, 50, 0.75) #' GOF_far(f, lag=5) #' #' @import stats #' @export GOF_far<-function(f_data, lag=5, M=10000, alpha=0.05, suppress_raw_output=FALSE, suppress_print_output=FALSE) { if (!requireNamespace('fda')) { stop("Please install the 'fda' package to perform the GOf for FAR(1) test.") } if (suppress_raw_output == TRUE & suppress_print_output == TRUE) { stop("Current choice of parameters will produce no output. Atleast one of the parameters 'suppress_raw_output' or 'suppress_print_output' must be FALSE.") } J <- dim(f_data)[1] N <- dim(f_data)[2] basis <- fda::create.bspline.basis(rangeval=c(0,1), nbasis=25, norder=4) fd.data <- fda::smooth.basis(0:(J-1)/(J-1), f_data, basis)$fd pca <- fda::pca.fd(fd.data[1:(N-1)], nharm=20, fda::fdPar(fd.data), centerfns=TRUE) kN <- which.max(cumsum(pca$values)/sum(pca$values)>0.90) score <- as.matrix(pca$scores[,1:kN]) eigenval <- pca$values[1:kN] xi <- score%*%diag(1/eigenval,kN)%*%t(score)/N X <- fda::eval.fd(fd.data,0:(J-1)/(J-1)) X <- X-rowMeans(X) eg <- X[,2:N]-X[,2:N]%*%xi eg <- cbind(X[,1],eg) f <- array(NA,c(N-1,J,lag)) for(h in 1:lag) { f[,,h] <- xi[,h:(N-1)]%*%t(eg[,1:(N-h)]) } results <-V_WS_quantile_far(eg, f, lag, alpha, M) if (suppress_print_output == FALSE) { title_print <- sprintf("Goodness-of-fit test for FAR(1)\n\n") null_print <- sprintf("null hypothesis: FAR(1) model is adequate for the series.\n") p_val_print <- sprintf("p-value = %f\n", results$p_value) samp_print <- sprintf("sample size = %d\n", NCOL(f_data)) lag_print <- sprintf("lag = %d\n\n\n", lag) message(c(title_print, null_print, p_val_print, samp_print, lag_print)) } if (suppress_raw_output == FALSE) { results } }
/scratch/gouwar.j/cran-all/cranData/wwntests/R/main_tests.R
#' Compute Functional Hypothesis Tests #' #' `fport_test` computes a variety of white noise tests for functional data. All white noise tests in this #' package are accessible through this function. #' #' @param f_data The functional data matrix with observed functions in the columns. #' @param test A String specifying the hypothesis test. Currently available tests are referred to by their #' string handles: "single-lag", "multi-lag", "spectral", "independence", and "imhof". Please see the Details #' section of the documentation, or the vignette, for a short overview of the available tests. For a more #' complete treatment of these hypothesis tests, please consult the references. #' @param lag A positive integer value. Only used for the "single-lag", "multi-lag", "independence", and "imhof" tests. #' This parameter specifies the single lag, or maximum lag, to be used by the specified test. #' @param iid Only used for the "single-lag" and "multi-lag" tests. A Boolean value, FALSE by default. If given TRUE, #' the hypothesis test will use a strong-white noise assumption (instead of a weak-white noise assumption). #' @param M Only used for the "single-lag" and "multi-lag" tests. A positive Integer. Determines the number of #' Monte-Carlo simulations employed in the Welch-Satterthwaite approximation of the limiting distribution of the #' test statistic. #' @param kernel Only used for the "spectral" test. A String, 'Bartlett' by default. Specifies the kernel to be #' used in the "spectral" test. Currently supported kernels are the 'Bartlett' and 'Parzen' kernels. #' @param bandwidth Only used for the "spectral" test. Either a String or a positive Integer value, 'adaptive' by #' default. Determines the bandwidth (or lag-window) to be used for the test. Given the string handle 'adaptive', #' the bandwidth is computed via a bandwidth selection method which aims to minimize the integrated normed #' error of the spectral density operator. If the given string handle is 'static', the bandwidth is computed #' to be n^(1/(2q + 1)), where n is the sample size and q is the kernel order. If a positive integer is #' given, that will be the bandwidth that is used. #' @param components Only used for the "independence" test. A positive Integer value. Determines the number of #' functional principal components to use (ranked by their importance). #' @param bootstrap Only used for the "single-lag" test. A Boolean value, FALSE by default. If given TRUE, the #' hypothesis test is evaluated by approximating the limiting distribution of the test statistic via a block #' bootstrapping process. #' @param block_size Only used for the "single-lag" test in the case when 'bootstrap' = TRUE. A positive Integer #' value, with the default value being computed via the adaptive bandwidth selection method in the "spectral" test. #' Determines the block size (of each block in each bootstrap sample) if the test is being bootstrapped. #' @param straps Only used for the "single-lag" test in the case when 'bootstrap' = TRUE. A positive Integer with #' a default value of 300. Determines the number of bootstrap samples to take if the test is being bootstrapped. #' @param moving Only used for the "single-lag" test in the case when 'bootstrap' = TRUE. A Boolean value, FALSE #' by default If given TRUE, the performed block bootstrap will be moving rather than stationary. #' @param alpha Numeric value between 0 and 1 specifying the significance level to be used in the specified #' hypothesis test. The default value is 0.05. Note, the significance value is only ever used to compute the #' 1-alpha quantile of the limiting distribution of the specified test's test statistic. #' @param complete_test A Boolean value, FALSE by default. If TRUE, the function requires no other parameters #' other than f_data, and will return a table with a single column containing p-values from an array of tests #' contained in the rows. #' @param suppress_raw_output A Boolean value, FALSE by default. If given TRUE, the function will not return a #' list containing the p-value, quantile and statistic, and instead only prints output to the console. #' @param suppress_print_output A Boolean value, FALSE by default. If TRUE, the function will not print any #' output to the console. #' @details The "single-lag" portmanteau test is based on the sample autocovariance function computed from the #' functional data. This test assesses the significance of lagged autocovariance operators at a single, user-specified #' lag h. More specifically, it tests the null hypothesis that the lag-h autocovariance operator is equal to 0. #' This test is designed for stationary functional time-series, and is valid under conditional heteroscedasticity #' conditions. The required parameter for this test is 'lag', which determines the lag at which the test is evaluated. If this #' parameter is left blank, it will take a default of 1. The optional parameters for this test are 'iid', 'M', 'bootstrap', #' 'block_size', 'straps', 'moving',and 'alpha'. #' #' The "multi-lag" portmanteau test is also based on the sample autocovariance function computed from the functional #' data. This test assesses the cumulative significance of lagged autocovariance operators, up to a user-selected #' maximum lag K. More specifically, it tests the null hypothesis that the first K lag-h autocovariance operators #' (h going from 1 to K) is equal to 0. This test is designed for stationary functional time-series, and is valid #' under conditional heteroscedasticity conditions. #' The required parameter for this test is 'lag', which determines the maximum lag at which the test is evaluated. #' If this parameter is left blank, it will take a default of 20. #' The optional parameters for this test are 'iid', 'M', 'bootstrap', 'block_size', 'straps', 'moving', #' and 'alpha'. #' #' The "spectral" portmanteau test is based on the spectral density operator. It essentially measures the proximity of a #' functional time series to a white noise - the constant spectral density operator of an uncorrelated series. #' Unlike the "single-lag" and "multi-lag" tests, this test is not for general white noise series, and may not hold #' under functional conditionally heteroscedastic assumptions. #' The optional parameters for this test are 'kernel', 'bandwidth', and 'alpha'. #' #' The "independence" portmanteau test is a test of independence and identical distribution based on a dimensionality #' reduction by projecting the data onto the most important functional principal components. It is based on the #' resulting lagged cross-variances. This test is not for general white noise series, and may not hold under #' functional conditionally heteroscedastic assumptions. #' The required parameters for this test are 'lag' and 'components'. The 'lag' parameter determines the maximum lag at #' which the test is evaluated. The 'components' parameter determines the number of the most important principal #' components to use (importance is determined by the proportion of the variance that is explained by the #' individual principal component.) #' #' The "imhof" portmanteau test is an analogue of the "single-lag" test. While the "single-lag" test computes the #' limiting distribution of the test statistic via a Welch-Satterthwaite approximation, the "imhof" test directly #' computes the coefficients of the quadratic form in Normal variables which the test statistic converges too as #' the sample size goes to infinity. We warn the user that this test is extremely computationally expensive, and #' is only recommended for small datasets as a means of cross-verification against the single-lag test. #' The required parameter for this test is 'lag', which determines the lag at which the test is evaluated. #' The "imhof" test requires the "tensorA" and "CompQuadForm" packages. Note also that the imhof test does not #' return a statistic, and thus returns a list with only 2 elements if suppress_raw_output = FALSE. #' @return If suppress_raw_output = FALSE, a list containing the test statistic, the 1-alpha quantile of the #' limiting distribution, and the p-value computed from the specified hypothesis test. Also prints output #' containing a short description of the test, the p-value, and additional information about the test if #' suppress_print_output = FALSE. If 'complete-test' = TRUE, will return a 1-column table instead containing #' the p-values for a variety of tests, which are given short descriptions in the index of the table. #' #' @references #' [1] Kokoszka P., & Rice G., & Shang H.L. (2017). Inference for the autocovariance of a functional time series #' under conditional heteroscedasticity. Journal of Multivariate Analysis, 162, 32-50. #' #' [2] Characiejus V., & Rice G. (2019). A general white noise test based on kernel lag-window estimates of the #' spectral density operator. Econometrics and Statistics, submitted. #' #' [3] Gabrys R., & Kokoszka P. (2007). Portmanteau Test of Independence for Functional Observations. #' Journal of the American Statistical Association, 102:480, 1338-1348, DOI: 10.1198/016214507000001111. #' #' [4] Zhang X. (2016). White noise testing and model diagnostic checking for functional time series. #' Journal of Econometrics, 194, 76-95. #' #' [5] Chen W.W. & Deo R.S. (2004). Power transformations to induce normality and their applications. #' Journal of the Royal Statistical Society: Series B (Statistical Methodology), 66, 117–130. #' #' @examples #' b <- brown_motion(250, 50) #' fport_test(b, test = 'single-lag', lag = 10) #' fport_test(b, test = 'multi-lag', lag = 10, alpha = 0.01) #' fport_test(b, test = 'single-lag', lag = 1, M = 250) #' fport_test(b, test = 'spectral', kernel = 'Bartlett', bandwidth = 'static', alpha = 0.05) #' fport_test(b, test = 'spectral', alpha = 0.1, kernel = 'Parzen', bandwidth = 'adaptive') #' fport_test(b, test = 'independence', components = 3, lag = 3) #' #' @export #' @import stats fport_test <- function(f_data, test = 'multi-lag', lag=NULL, iid=FALSE, M=NULL, kernel = "Bartlett", bandwidth = "adaptive", components = 3, bootstrap=FALSE, block_size = "adaptive", moving=FALSE, straps = 300, alpha=0.05, complete_test=FALSE, suppress_raw_output = FALSE, suppress_print_output = FALSE) { tests = c('single-lag', 'multi-lag', 'spectral', 'independence', 'imhof') if (test == 'multi-lag' & is.null(lag) & complete_test==FALSE) { warning("You did not specify a maximum lag for the multi-lag test. We use a default of lag = 20") lag = 20 } if (test == 'single-lag' & is.null(lag) & complete_test==FALSE) { warning("You did not specify a maximum lag for the single-lag test. We use a default of lag = 1") lag = 1 } if (!(test %in% tests)) { stop("Please see the documentation for available tests.") } if (!is.matrix(f_data)) { stop("Invalid arguments, functional data f_data must be passed in matrix form.") } if (!is.null(lag)) { if (!all.equal(lag, as.integer(lag)) | lag <= 0) { stop("Invalid arguments, lag must be a positive integer for the single-lag and multi-lag tests.") } } if (alpha < 0 | alpha > 1) { stop("Invalid arguments, the significance level alpha must be between 0 and 1.") } if (!is.logical(iid)) { stop("Invalid arguments, the iid parameter must be logical values.") } if (!is.null(M)) { if (!all.equal(M, as.integer(M)) | M < 0) { stop("Invalid arguments, M must be a positive integer or NULL.") } } iid_error = base::simpleError("When iid = true, this function does not use Monte Carlo methods, and thus also does not support low-discrepancy sequence sampling or parallelization. Please change the parameters.") if ((iid == TRUE) & (!is.null(M))) { stop(iid_error) } if (complete_test == TRUE) { m <- as.table(matrix(0, ncol = 1, 10)) colnames(m) <- c('p_value') rownames(m) <- c('single-lag, lag = 1', 'single-lag, lag = 2', 'single-lag, lag = 3', 'multi-lag, lag = 5', 'multi-lag, lag = 10', 'multi-lag, lag = 20', 'spectral, static bandwidth', 'spectral, adaptive bandwidth', 'independence, 3 components, lag = 3', 'independence, 16 components, lag = 10') m[1] <- fport_test(f_data, test = 'single-lag', lag = 1)$p_value m[2] <- fport_test(f_data, test = 'single-lag', lag = 2)$p_value m[3] <- fport_test(f_data, test = 'single-lag', lag = 3)$p_value m[4] <- fport_test(f_data, test = 'multi-lag', lag = 5)$p_value m[5] <- fport_test(f_data, test = 'multi-lag', lag = 10)$p_value m[6] <- fport_test(f_data, test = 'multi-lag', lag = 20)$p_value m[7] <- fport_test(f_data, test = 'spectral', bandwidth = 'static')$p_value m[8] <- fport_test(f_data, test = 'spectral', bandwidth = 'adaptive')$p_value m[9] <- fport_test(f_data, test = 'independence', components = 3, lag = 3)$p_value m[10] <- fport_test(f_data, test = 'independence', components = 16, lag = 10)$p_value m } else if (test == 'multi-lag') { multi_lag_test(f_data, lag, M=M, iid=iid, suppress_raw_output = suppress_raw_output, suppress_print_output = suppress_print_output) } else if (test == 'single-lag') { single_lag_test(f_data, lag, alpha=alpha, iid=iid, M=M, bootstrap=bootstrap, block_size=block_size, straps=straps, moving = moving, suppress_raw_output = suppress_raw_output, suppress_print_output = suppress_print_output) } else if (test == 'spectral') { spectral_test(f_data, kernel = kernel, bandwidth = bandwidth, alpha = alpha, suppress_raw_output=suppress_raw_output, suppress_print_output = suppress_print_output) } else if (test == 'independence') { independence_test(f_data, components = components, lag = lag, suppress_raw_output = suppress_raw_output, suppress_print_output = suppress_print_output) } else if (test == 'imhof') { input <- readline("We warn the user that the imhof test is extremely computationally expensive. \n Press [enter] if you would like to continue.") if (input != '') { stop("User cancelled the test.") } results <- imhof_test(f_data, lag) if (suppress_print_output == FALSE) { title_print <- sprintf("Imhof Test\n\n") test_type <- 'the series is a weak white noise\n' null_print <- sprintf("null hypothesis: %s", test_type) p_val_print <- sprintf("p-value = %f\n", results$p_value) samp_print <- sprintf("sample size = %d\n", NCOL(f_data)) lag_print <- sprintf("lag = %d\n\n\n", lag) message(c(title_print, null_print, p_val_print, samp_print, lag_print)) } if (suppress_raw_output == FALSE) { results } } } #' Plot Confidence Bounds of Estimated Functional Autocorrelation Coefficients #' #' `autocorrelation_coeff_plot` Computes the 1-alpha upper confidence bounds for the functional #' autocorrelation coefficients at lags h = 1:K under both weak white noise (WWN) and strong white #' noise (SWN) assumptions. It plots the coefficients as well as the bounds for all lags h = 1:K. #' Note, the SWN bound is constant, while the WWN is dependent on the lag. #' #' @param f_data The functional data matrix with observed functions in the columns. #' @param K A positive Integer value. The maximum lag for which to compute the single-lag test (tests #' will be computed for lags h in 1:K). #' @param alpha A numeric value between 0 and 1 specifying the significance level to be used in the single-lag #' test. The default value is 0.05. #' @param M A positive Integer value. Determines the number of Monte-Carlo simulations employed in the #' Welch-Satterthwaite approximation of the limiting distribution of the test statistics, for each test. #' @param wwn_bound A Boolean value allowing the user to turn off the weak white noise bound. TRUE by default. #' Speeds up computation when FALSE. #' @details This function computes and plots autocorrelation coefficients at lag h, for h in 1:K. It also #' computes an estimated asymptotic 1 - alpha confidence bound, under the assumption that the series #' forms a weak white noise. Additionally, it computes a similar (constant) bound under the assumption the #' series form a strong white noise. Please see the vignette or the references for a more complete treatment. #' @return Plot of the estimated autocorrelation coefficients for lags h in 1:K with the weak #' white noise 1-alpha upper confidence bound for each lag, as well as the constant strong white #' noise 1-alpha confidence bound. #' #' @references #' [1] Kokoszka P., & Rice G., & Shang H.L. (2017). Inference for the autocovariance of a functional time series #' under conditional heteroscedasticity. Journal of Multivariate Analysis, 162, 32-50. #' #' @examples #' b <- brown_motion(75, 40) #' autocorrelation_coeff_plot(b) #' autocorrelation_coeff_plot(b, M = 200) #' #' @export #' @import sde #' @importFrom graphics legend lines par plot autocorrelation_coeff_plot <- function(f_data, K=20, alpha=0.05, M=NULL, wwn_bound=TRUE) { if ((K < 1) | (K %% 1 != 0)) { stop("The parameter 'K' must be a positive integer.") } if ((alpha > 1) | (alpha < 0)) { stop("The 'alpha' parameter must be a value between 0 and 1.") } J = NROW(f_data) coefficients = array(0, K) B_iid_bounds = array(0,K) lags = 1:K if (wwn_bound == TRUE) { B_h_bounds = array(0,K) for (h in lags){ coefficients[h] <- autocorrelation_coeff_h(f_data, h) B_h_bounds[h] <- B_h_bound(f_data, h, M=M) } } else { for (h in lags){ coefficients[h] <- autocorrelation_coeff_h(f_data, h) } } plot(lags, coefficients, ylim=c(0,2 * max(coefficients)), type='h', xlab='Lag', ylab='Autocorrelation Coefficient', main = 'Autocorrelation Bounds') lines(rep(B_iid_bound(f_data), K), col='red', lty='solid') if (wwn_bound == TRUE) { lines(B_h_bounds, col='blue', lty='dotted') legend('topleft', legend=c('Estimated Autocorrelation Coefficients', 'WWN Bound', 'SWN Bound'), col=c('black', 'blue', 'red'), lty=c('solid', 'dotted', 'solid'), cex=0.75) } else { legend('topleft', legend=c('Estimated Autocorrelation Coefficients', 'SWN Bound'), col=c('black', 'red'), lty=c('solid', 'solid'), cex=0.75) } }
/scratch/gouwar.j/cran-all/cranData/wwntests/R/master_functions.R
#' Bartlett Kernel Function #' #' `bartlett_kernel` Computes the Bartlett kernel function at a given point value. #' @param x the point value at which the kernel function is evaluated #' @return A scalar value; the value of the Bartlett kernel function at the point value x. bartlett_kernel <- function(x) { len <- length(x) for (i in 1:len) { if (abs(x[i]) <= 1) { x[i] <- 1 - abs(x[i]) } else { x[i] <- 0 } } x } #' Parzen Kernel Function #' #' `parzen_kernel` Computes the Parzen kernel function at a given point value. #' @param x the point value at which the kernel function is evaluated #' @return A scalar value; the value of the Parzen kernel function at the point value x. parzen_kernel <- function(x) { len <- length(x) for (i in 1:len) { if (abs(x[i]) <= 1) { if (abs(x[i]) <= 0.5) { x[i] <- 1 - 6 * x[i]^2 + 6 * abs(x[i])^3 } else { x[i] <- 2 * (1 - abs(x[i]))^3 } } else { x[i] <- 0 } } x } #' Daniell Kernel Function #' #' `daniell_kernel` Computes the Daniell kernel function at a given point value. #' @param x the point value at which the kernel function is evaluated #' @return A scalar value; the value of the Daniell kernel function at the point value x. daniell_kernel <- function(x) { len <- length(x) for (i in 1:len) { if (x[i] == 0) { x[i] <- 0 } else { x[i] <- sin(pi * x[i]) / (pi * x[i]) } } x }
/scratch/gouwar.j/cran-all/cranData/wwntests/R/spectral_kernels.R
# spectral_t_statistic computes the spectral density operator based test statistic of the functional # data f_data. # Input: f_data = The functional data matrix with observed functions in columns # kernel = The kernel function to use. The currently supported kernels are 'Bartlett' and 'Parzen'. # The default kernel is 'Bartlett'. # bandwidth = specifies the bandwidth to use. Currently admitted arguments are positive # integers, 'static' which computes the bandwith p via p = n^(1/(2q+1)) where # n is the sample size and q is the kernel order, or 'adaptive' which uses a # bandwith selection method that is based on the functional data. # Output: scalar value of the spectral density based test statistic. spectral_t_statistic <- function(f_data, kernel = 'Bartlett', bandwidth = 'adaptive') { J <- NROW(f_data) N <- NCOL(f_data) f_data <- center(f_data) kernel_string <- kernel if (kernel == 'Bartlett') { kernel <- bartlett_kernel kernel_order <- 1 } else if (kernel == 'Parzen') { kernel <- parzen_kernel kernel_order <- 2 #} else if (kernel == 'Daniell') { #kernel <- daniell_kernel #kernel_order <- 2 } else { stop("This kernel is not supported. Please see the documentation for supported kernel functions.") } if (bandwidth == 'static') { bandwidth <- N^(1 / (2 * kernel_order + 1)) } else if (bandwidth == 'adaptive') { bandwidth <- max(2, adaptive_bandwidth(f_data, kernel_string)) } else if (!is.numeric(bandwidth)) { stop("Please see the documentation for valid bandwith arguments.") } data_inner_prod <- crossprod(f_data) / J C_hat_HS_norm <- numeric(0) for (j in 0:(N-1)) { C_hat_HS_norm[j+1] <- N^(-2) * sum(data_inner_prod[(j+1):N, (j+1):N] * data_inner_prod[1:(N-j), 1:(N-j)]) } kernel_vals <- sapply(1:(N-1) / bandwidth, kernel) spectral_distance_Q_sq <- 2 * sum((kernel_vals^2) * C_hat_HS_norm[-1]) C_n_k <- sum( (1 - 1:(N-1)/N) * kernel_vals^2 ) D_n_k <- sum( (1 - 1:(N-2)/N) * (1 - 2:(N-1)/N) * kernel_vals[-(N-1)]^4 ) sigma_squared_hat <- sum(diag(data_inner_prod)) / N t_stat_term <- sigma_squared_hat^(-2) * C_hat_HS_norm[1] * sqrt(2 * D_n_k) # for convenience untrans_num <- 2^(-1) * N * sigma_squared_hat^(-2) * spectral_distance_Q_sq ### TODO: add case for when H is not R. This is denoted by const in original codebase beta <- 1 - (2/3) * sum(kernel_vals^2) * sum(kernel_vals^6) / (sum(kernel_vals^4)^2) t_stat <- ((2^(-1) * N * sigma_squared_hat^(-2) * spectral_distance_Q_sq)^beta - (C_n_k^beta + 2^(-1) * beta * (beta - 1) * C_n_k^(beta-2) * t_stat_term^2)) / (beta * C_n_k^(beta-1) * t_stat_term) list(stat = t_stat, band = bandwidth) } # adaptive_bandwidth computes the "optimal" bandwidth using a bandwidth selection method based on the # spectral density operator which adapts to the functional data. # Input: f_data = the functional data matrix with observed functions in columns # kernel = the kernel function to use. The currently supported kernels are 'Bartlett' and 'Parzen'. # The default kernel is 'Bartlett'. # Output: a scalar value of the "optimal" data-adapted bandwidth. adaptive_bandwidth <- function(f_data, kernel) { J <- NROW(f_data) N <- NCOL(f_data) if (kernel == 'Bartlett') { kernel <- bartlett_kernel order <- 1 xi <- 1 kern_int <- 2 / 3 } else if (kernel == 'Parzen') { kernel <- parzen_kernel order <- 2 xi <- 6 kern_int <- 151 / 280 #} else if (kernel == 'Daniell') { #kernel <- daniell_kernel #order <- 2 #xi <- (pi^2) / 6 #kern_int <- 1 } else { stop('Please see the documentation for supported kernels.') } data_inner_prod <- crossprod(f_data) / (N * J) C_hat_HS <- numeric(0) for (j in 0:(N-1)) { C_hat_HS[j+1]<-sum(data_inner_prod[(j+1):N,(j+1):N] * data_inner_prod[1:(N-j),1:(N-j)]) } initial_band_q <- 4 * N^(1 / (2*order +1)) k_n_j_q <- kernel(1:(N-1) / initial_band_q) initial_band_0 <- initial_band_q / 4 k_n_j_0 <- kernel(1:(N-1) / initial_band_0) Q_hat_sq <- 2 * sum(k_n_j_0^2 * C_hat_HS[-1]) Term2 <- Q_hat_sq + C_hat_HS[1] Term1 <- 2 * sum(k_n_j_q^2 * ((1:(N-1))^(2*order)) * C_hat_HS[-1]) C_hat_TR <- numeric(0) for (j in 0:(N-1)) { C_hat_TR[j+1] <- sum(data_inner_prod[1:(N-j), (j+1):N]) } trace <- 2 * sum(k_n_j_0^2 * C_hat_TR[-1]) Term3 <- trace + sum(C_hat_TR[1]) band_constant <- (2 * order * xi^2 * Term1 / (kern_int * (Term2 + Term3)))^(1/(2*order + 1)) band_constant * N^(1 / (2*order + 1)) }
/scratch/gouwar.j/cran-all/cranData/wwntests/R/spectral_test_statistic.R
# t_statistic_Q computes the test statistic Q_{T,h} = T*||y^hat_h||^2 for fixed h and for T # inferred from the functional data f_data that is passed. # Input: f_data = the functional data matrix with observed functions in columns # lag = the fixed time lag used in the computation of the statistic # Output: scalar value of the statistic Q_{T,h} to test the hypothesis H_{0,h} : y_h(t,s) = 0. t_statistic_Q <- function(f_data, lag) { N = NCOL(f_data) J = NROW(f_data) gamma_hat <- autocov_approx_h(f_data, lag) Q_T_h <- N * sum(gamma_hat^2) / (J^2) Q_T_h } # t_statistic_V computes the statistic V_{T,K} = T*sum_h(||y^hat_h||^2) or h in 1:K and for T # inferred from the functional data f_data that is passed to the function. # Input: f_data = the functional data with functions in columns # K = the max value in the range of time lags (1:K) used # Output: scalar value of the statistic V_{T,K} to test the hypothesis # H'_{0,K} : for all h in 1:K y_h(t,s) = 0. t_statistic_V <- function(f_data, K) { V_T_K <- 0 for (h in 1:K) { V_T_K <- V_T_K + t_statistic_Q(f_data, h) } V_T_K }
/scratch/gouwar.j/cran-all/cranData/wwntests/R/test_statistics.R
# true_eta_approx is a non-stochaistic approximation of eta_i_j (see (15)) using a Riemann sum. # Input: f_data = the functional data matrix with functions in columns # i,j = the indices i,j in 1:T that we are computing eta^hat_i_j for # Output: scalar value of eta^hat_i_j computed using a simple Riemann sum. true_eta_approx_i_j <- function(f_data, i, j) { J <- NROW(f_data) cov_tensor <- covariance_i_j(f_data, i, j) 2 * sum(cov_tensor^2) / (J^4) } # MCint_eta_approx_i_j computes an approximation of eta_i_j (defined under (15)) using the second # Monte Carlo integration method "MCint" defined on page 8. # Input: f_data = the functional data matrix with functions in columns # i,j = the indices i,j in 1:T that we are computing eta^hat_i_j for # M = number of vectors (v1, v2, v3, v4) to sample uniformly from U_J X U_J X U_J X U_J # Output: scalar value of eta^_hat_i_j computed using the MCint method. MCint_eta_approx_i_j <- function(f_data, i, j, M=NULL) { J <- NROW(f_data) T <- NCOL(f_data) if (is.null(M)) { M = floor((max(150 - T, 0) + max(100-J,0) + (J / sqrt(2)))) } rand_samp_mat <- matrix(nrow=M, ncol=4) rand_samp_mat <- cbind(sample(1:J, M, replace = TRUE),sample(1:J, M, replace = TRUE),sample(1:J, M, replace = TRUE),sample(1:J, M, replace = TRUE)) eta_hat_i_j_sum <- 0 for (k in 1:M) { cov <- scalar_covariance_i_j(f_data, i, j, rand_samp_mat[k,]) eta_hat_i_j_sum <- eta_hat_i_j_sum + (cov^2) } eta_hat_i_j <- (2/M) * eta_hat_i_j_sum eta_hat_i_j } # MCint_eta_approx_i_j_vec is a vectorized version of MCint_eta_approx_i_j. # Input: f_data = the functional data matrix with functions in columns # i,j = the indices i,j in 1:T that we are computing eta^hat_i_j for # M = number of vectors (v1, v2, v3, v4) to sample uniformly from U_J X U_J X U_J X U_J # Output: scalar value of eta^_hat_i_j computed using the MCint method. MCint_eta_approx_i_j_vec <- function(f_data, i, j, M=NULL) { J <- NROW(f_data) N <- NCOL(f_data) M = floor((max(150 - N, 0) + max(100-J,0) + (J / sqrt(2)))) rand_samp_mat <- matrix(nrow=M, ncol=4) rand_samp_mat <- cbind(sample(1:J, M, replace = TRUE),sample(1:J, M, replace = TRUE),sample(1:J, M, replace = TRUE),sample(1:J, M, replace = TRUE)) eta_parts <- as.list(1:M) eta_parts <- lapply(eta_parts, function(k) scalar_covariance_i_j(f_data, i, j, rand_samp_mat[k,]) ^ 2) eta_hat_i_j <- (2 / M) * Reduce('+', eta_parts) eta_hat_i_j } # mean_hat_V_K computes the approximation of the mean defined in (15) which is used in the Welch- # Satterthwaite approximation as mean of the chi-squared random variable approximating V_K. # Input: f_data = the functional data matrix with functions in columns # K = specifies the range of lags 1:K for for the test statistic V_K # Output: scalar approximation of the mean of the test statistic V_K. mean_hat_V_K <- function(f_data, K) { J <- NROW(f_data) sum1 <- 0 store <- covariance_diag_store(f_data, K) for (i in 1:K) { sum1 <- sum1 + sum(store[[i]]) } mu_hat_V_K <- (1 / (J^2)) * sum1 mu_hat_V_K } # mean_hat_V_K_iid computes the approximation of the mean defined in (15) which is used in the # Welch-Satterthwaite approximation under the assumption that the functional data follows a # strong white noise. # Input: f_data = the functional data matrix with functions in columns # K = specifies the range of lags 1:K for for the test statistic V_K # Output: scalar approximation of the mean of the test statistic V_K under a strong white noise # assumption. mean_hat_V_K_iid <- function(f_data, K) { J <- NROW(f_data) cov <- iid_covariance(f_data) mu_hat_Q_h <- K * ((sum(diag(cov)) / J)^2) mu_hat_Q_h } # mean_hat_Q_h computes the approximation of the mean defined in (15) which is used in the Welch- # Satterthwaite approximation as mean of the chi-squared random variable approximating Q_h. # Input: f_data = the functional data matrix with functions in columns # lag = specifies the lag use in the test statistic Q_h (lag = h in paper) # Output: scalar approximation of the mean of the test statistic Q_h. mean_hat_Q_h <- function(f_data, lag) { J <- NROW(f_data) cov <- diagonal_covariance_i(f_data, lag) mu_hat_Q_h <- (1 / (J^2)) * sum(cov) mu_hat_Q_h } # mean_hat_Q_h_iid computes the approximation of the mean defined in (15) which is used in the # Welch-Satterthwaite approximation under the assumption that the functional data follows a # strong white noise. # Input: f_data = the functional data matrix with functions in columns # Output: scalar approximation of the mean of the test statistic Q_h under a strong white noise # assumption. mean_hat_Q_h_iid <- function(f_data) { J <- NROW(f_data) cov <- iid_covariance(f_data) mu_hat_Q_h_iid <- (sum(diag(cov)) / J)^2 mu_hat_Q_h_iid } # variance_hat_V_K computes the approximation of the variance defined in (15) which is used in # the Welch- Satterthwaite approximation as the variance of the chi-squared random variable # approximating V_K. # Input: f_data = the functional data matrix with functions in columns # K = specifies the range of lags 1:K for the test statistic V_K # M = optional argument specifying the sampling size in the related Monte Carlo method # Output: scalar approximation of the variance of the test statistic V_K. variance_hat_V_K <- function(f_data, K, M=NULL) { N <- NCOL(f_data) sum1 <- 0 for (i in 1:K) { sum1 <- sum1 + MCint_eta_approx_i_j(f_data, i, i, M=M) } bandwidth <- ceiling(0.25 * (N ^ (1/3))) if (K > 1) { for (i in 1:(K-1)) { for (j in (i+1):K) { if (abs(i-j) > bandwidth) { # empirically, past a lag of 15, error is less than 1% next } sum1 <- sum1 + (2 * MCint_eta_approx_i_j(f_data, i, j, M=M)) } } } variance_V_K <- sum1 variance_V_K } # variance_hat_V_K_iid computes the approximation of the variance defined in (15) which is used # in the Welch- Satterthwaite approximation under the assumption that the functional data # follows a strong white noise. # Input: f_data = the functional data matrix with functions in columns # K = specifies the range of lags 1:K for the test statistic V_K # Output: scalar approximation of the variance of the test statistic V_K variance_hat_V_K_iid <- function(f_data, K) { J <- NROW(f_data) cov_iid <- iid_covariance(f_data) variance_V_K_iid <- K * 2 * ( sum(cov_iid^2) / (J^2) )^2 variance_V_K_iid } # variance_hat_Q_h computes the approximation of the variance defined in (15) which is used in # the Welch- Satterthwaite approximation as variance of the chi-squared random variable # approximating Q_h. # Input: f_data = the functional data matrix with functions in columns # lag = specifies the lag use in the test statistic Q_h (lag = h in paper) # M = optional argument specifying the sampling size in the related Monte Carlo method # Output: scalar approximation of the variance of the test statistic Q_h variance_hat_Q_h <- function(f_data, lag, M=NULL) { variance_Q_h <- MCint_eta_approx_i_j(f_data, lag, lag, M=M) variance_Q_h } # variance_hat_Q_h_iid computes the approximation of the variance defined in (15) which is used # in the Welch- Satterthwaite approximation under the assumption that the functional data # follows a strong white noise. # Input: f_data = the functional data matrix with functions in columns # lag = specifies the lag use in the test statistic Q_h (lag = h in paper) # Output: scalar approximation of the variance of the test statistic Q_h variance_hat_Q_h_iid <- function(f_data) { J <- NROW(f_data) cov_iid <- iid_covariance(f_data) variance_Q_h_iid <-2 * ( sum(cov_iid^2) / (J^2) )^2 variance_Q_h_iid }
/scratch/gouwar.j/cran-all/cranData/wwntests/R/welch_satterthwaite.R
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(wwntests)
/scratch/gouwar.j/cran-all/cranData/wwntests/inst/doc/wwntests.R
--- title: "wwntests" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{wwntests} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(wwntests) ```
/scratch/gouwar.j/cran-all/cranData/wwntests/inst/doc/wwntests.Rmd
--- title: "wwntests" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{wwntests} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library(wwntests) ```
/scratch/gouwar.j/cran-all/cranData/wwntests/vignettes/wwntests.Rmd
#' Example meteorological training data for weather generator #' #' Weather data (precipitation, temperature, and season) measured at the NWS station #' (GHCND:USC00440766) in Blacksburg, Virginia. #' #' @docType data #' #' @usage data(BlacksburgVA) #' #' @format A data frame. #' #' @keywords data #' #' @source \href{https://www.ncdc.noaa.gov/cdo-web/datasets/GHCND/stations/GHCND:USC00440766/detail}{Blacksburg, VA NWS office} #' #' @examples #' data(BlacksburgVA) "BlacksburgVA"
/scratch/gouwar.j/cran-all/cranData/wxgenR/R/BlacksburgVA.R
#' Example meteorological training data for weather generator #' #' Weather data (precipitation, temperature, and season) for the Lower Santa Cruz River Basin in Southern Arizona. #' Dataset was developed for the Hydroclimate Analysis within Reclamation's Lower Santa Cruz River Basin Study. #' #' #' @docType data #' #' @usage data(LowerSantaCruzRiverBasinAZ) #' #' @format A data frame #' #' @keywords data #' #' @source \href{https://www.usbr.gov/lc/phoenix/programs/lscrbasin/LSCRBS_Hydroclimate_2021.pdf}{Hydroclimate Analysis - Lower Santa Cruz River Basin Study} #' #' @examples #' data(LowerSantaCruzRiverBasinAZ) "LowerSantaCruzRiverBasinAZ"
/scratch/gouwar.j/cran-all/cranData/wxgenR/R/LowerSantaCruzRiverBasin.R
#' Get dates in window #' #' Find grouping of dates around each Julian day of year (1-366) based on the window you set. #' The start and end years for this function should include at least one leap year #' (i.e., the record should be at least 4-years in length), or else the function will #' return non-existing dates (February 29th during non-leap years).\cr #' \cr #' Setting leapflag to true will set February 29th as NA for non-leap years.\cr #' \cr #' Setting leapflag to false will remove February 29th for non-leap years (recommended).\cr #' \cr #' The 'wwidth' variable is the semi-bandwidth that sets the window size to search #' for adjacent days. Given a value of 'wwidth', the window size will be #' 2*wwidth + 1. For example a 'wwidth' of 7 would give a window size of #' 2*7+1 = 15.\cr #' \cr #' Other applications of this function might include a daily bias correction approach #' where it is necessary to find N adjacent days for each day of year in order to train #' the bias correction algorithm. #' #' @param syr Start year. #' @param eyr End year. #' @param wwidth Window set for finding surrounding days (semi-bandwidth). #' @param leapflag Set index for leap years (default = F). #' #' @return Returns a matrix with 366 rows (one for each Julian day of year, including leap days) #' and nCols; where nCols = (2 x wwidth + 1) x (eyr - syr + 1). Each row is specific to a certain #' Julian day (e.g., day 1) and contains the preceding and antecedent dates around that Julian day #' based on the window length you set. The dates will be fetched for each year in the range you set #' between the start and ending years (inclusive of the start and end years). Matrix values are either dates #' formatted as 'yyyymmdd' or NA values. #' #' #' @examples #' getDatesInWindow(syr = 2000, eyr = 2005, wwidth = 3, leapflag = FALSE) #' #' @export "getDatesInWindow" <- function(syr, eyr, wwidth, leapflag = FALSE){ #input(s) #syr - starting year #eyr - ending year #wwidth - semi-bandwidth of window, e.g., wwidth=1 results in a 3-day window width, (day-1),day,(day+1) #leapflag - boolean with default as TRUE, sets index for 02/29 (Julian day 60) for non-leap years to NA; # if FALSE, the window for Julian day 60 uses days from March. For example, with wwidth=1, # and say, syr=1987 and eyr=1999, then for Julian day 60 (02/29), the days in the window will be, # 19870228,19870301,19870302,19880228,19880229,19880301,19890228,19890301,10890302, so on and so forth. # With leapflag=TRUE, the dates will be, 19870228,NA,19870301,19880228,19880229,19880301,19890228,NA,19890301, etc. # #output(s) #matrix with list of dates (columns)for a given Julian day (1 through 366; rows) # if(leapflag == F | leapflag == FALSE){ # leapflag == FALSE # } else{leapflag == TRUE} month = c(31,29,31,30,31,30,31,31,30,31,30,31) #assumes a 366-day year. March 1st is always Julian Day #61. yrlist = syr:eyr nyrs = length(yrlist) noleapdate = rep(NA,length(yrlist)) #list of years with no leap dates for (i in 1:length(yrlist)){ yr = yrlist[i] if (yr%%4 != 0) date = (yr*10000+2*100+29) else if (yr%%100 == 0 && yr%%400 != 0) date = (yr*10000+2*100+29) else date = NA noleapdate[i] = date } #i rindex = which(is.na(noleapdate) == TRUE) #get indices with NA's, to be removed noleapdate = noleapdate[-rindex] #removes NA, needed for later in the code, intersect datevec = rep(NA, wwidth) juldayvec = rep(NA, wwidth) mmddvec = rep(NA, wwidth) for (yr in yrlist){ jday = 0 for (mm in 1:12){ ndays = month[mm] for (id in 1:ndays){ jday = jday+1 #date=as.numeric(paste((yr*10000+mm*100+id),sprintf("%03d",jday),sep="")) date = (yr*10000+mm*100+id) datevec = append(datevec, date) juldayvec = append(juldayvec, jday) mmdd = mm*100+id mmddvec = append(mmddvec, mmdd) } #id } #mm } #yr datevec = append(datevec, rep(NA, wwidth)) juldayvec = append(juldayvec, rep(NA, wwidth)) mmddvec = append(mmddvec, rep(NA, wwidth)) JMAT = matrix(NA, nrow = 366, ncol = nyrs*(2*wwidth+1)) #matrix with list of dates (columns)for a given Julian day (1 through 366) #loop through all the 366 Julian days for (julday in 1:366){ avec = which(juldayvec == julday) indexlist = vector() for (a in avec){ i1 = (a-wwidth) i2 = (a+wwidth) index = i1:i2 if (leapflag){ index[which((datevec[index]%in%noleapdate) == TRUE)] = NA } else{ if (length(intersect(datevec[index],noleapdate)) > 0){ rindex = which((datevec[index]%in%noleapdate) == TRUE) #remove index index = i1:(i2+1) index = index[-rindex] } #endif } indexlist = append(indexlist, index) } #a JMAT[julday,] = datevec[indexlist] } #julday #get month-day-julian day relationship mdjday = vector() jday = 0 for (mm in 1:12){ ndays = month[mm] for (id in 1:ndays){ jday = jday+1 mdjday[jday] = 100*mm+id } #id } #mm return(JMAT) } #end function
/scratch/gouwar.j/cran-all/cranData/wxgenR/R/getDatesInWindow.R
#' Precipitation transition probabilities #' #' Calculate seasonal precipitation transition probability matrix for each year. #' Computes transition probabilities for wet/dry spells. #' Calculates values for each year, then takes average of all years. #' #' @param dat.d Training data processed from prepData wrapper function. #' # @rawNamespace import(stats, except = filter) #' #' #' @noRd #' "getPtpm" <- function(dat.d){ #calculate seasonal precipitation transition probability matrix for each year # yr.d = dat.d$year uyr = unique(yr.d) nyr = length(uyr) #precipitation data pcp.d = dat.d$prcp #transition probabilities for wet/dry spells #compute wet and dry transitions for each state and each year tpm.y2 = array(data = NA, dim = c(2,2,max(dat.d$states), length(uyr))) j=1 for(j in 1:length(uyr)){ k=1 for(k in 1:max(dat.d$states)){ if(sum(dat.d$states == k & yr.d == uyr[j]) == 0) next x = ts((pcp.d[dat.d$states == k & yr.d == uyr[j]] >= 0.01) + 0,1) tpm.tmp = transProbMatrix(x) tpm.y2[as.numeric(rownames(tpm.tmp))+1, as.numeric(colnames(tpm.tmp))+1, k,j] = tpm.tmp }#k }#j # collapse tpm.y2 over all years tpm.y = apply(tpm.y2, 1:3, mean, na.rm=T) tpm.y[is.na(tpm.y)] = 0 #tpm.y2[is.na(tpm.y2)]=0 for(j in 1:max(dat.d$states)){ for(i in 1:nrow(tpm.y[,,j])){ if(max(tpm.y[i,,j]) == 0) next tpm.y[i,,j] = tpm.y[i,,j]/sum(tpm.y[i,,j]) } } #default olist=list("tpm.y2" = tpm.y2,"tpm.y" = tpm.y) return(olist) } #end function
/scratch/gouwar.j/cran-all/cranData/wxgenR/R/getPtpm.R
#' Calculate temperature parameters #' #' Imports precipitation and temperature data, #' then fits a linear model to predict daily #' temperature based on the prior day’s temperature, #' sine and cosine functions, monthly mean temperature, #' and the occurrence of precipitation. #' #' @param dat.d Training data processed from prepData wrapper function. #' # @import lubridate # @rawNamespace import(stats, except = filter) # @importFrom dplyr group_by summarise left_join glimpse mutate relocate if_else filter #' #' @noRd #' "getTpars" <- function(dat.d){ #calculate paramters for temperature simulation # # require("plyr") # require("dplyr") #temperature data, temp for time t tmp = dat.d$temp #temperature for time t-1 ptmp = tmp ptmp[1] = NA ptmp[2:length(ptmp)] = tmp[1:(length(tmp)-1)] yr.d=dat.d$year uyr=unique(yr.d) nyr=length(uyr) mo.d=dat.d$month #define day of year tmp1 = tmp2 = c() k=1 for(k in 1:nyr){ origin.tmp = ymd(paste(uyr[k],"01","01",sep="-")) start.tmp = julian(ymd(subset(dat.d, year==uyr[k])$date)[1],origin=origin.tmp) end.tmp = julian(ymd(subset(dat.d, year==uyr[k])$date)[nrow(subset(dat.d, year==uyr[k]))], origin=origin.tmp) tmp1 = c(tmp1, seq(from=start.tmp,to=end.tmp)) tmp2 = c(tmp2, rep(end.tmp, nrow(subset(dat.d, year==uyr[k])))) }#k #change 0-364 to 1-365 dat.d$jday = tmp1+1 dat.d$tday = tmp2+1 #define cos(t) and sin(t) for daily temp series ct <- cos((2*pi*dat.d$jday)/dat.d$tday) st <- sin((2*pi*dat.d$jday)/dat.d$tday) #monthly mean temperature by year montmp.obs = data.frame(year=yr.d, month=mo.d, temp=dat.d$temp) %>% group_by(year, month) %>% summarise(tavgm = mean(temp, na.rm = T)) # montmp.obs = ddply(data.frame(year=yr.d, month=mo.d, temp=dat.d$temp), # .(year,month), summarise, tavgm=mean(temp)) dat.d = left_join(dat.d, montmp.obs, by=c("year","month")) Rt <- dat.d$tavgm #define precip occurrence for daily temp series oc <- (dat.d$prcp >= 0.01) + 0 #set NAs to 0 precipitation oc[which(is.na(oc), T)] = 0 dat.d$oc=oc dat.d$ct=ct #cosine term dat.d$st=st #sine term #define design matrix (covariates) #temp(t) is a function of: #[temp(t-1); cosine(t); sine(t); prec.occ(t); mon.mean.temp(t)] x.tmp <- cbind(ptmp, ct, st, oc, Rt) z.tmp <- lm(tmp ~ x.tmp) z.tmp.res <- z.tmp$residuals coeftmp <- z.tmp$coefficients tmp.sd <- numeric(12) for(i in 1:12) tmp.sd[i] <- sd(z.tmp.res[mo.d==i],na.rm=T) # #default olist=list("dat.d"=dat.d,"coeftmp"=coeftmp,"tmp.sd"=tmp.sd) return(olist) } #end function
/scratch/gouwar.j/cran-all/cranData/wxgenR/R/getTpars.R
#' N states probability #' #' Returns an integer vector corresponding to n states broken by equal #' probability or equal distance. #' #' @noRd #' # @rawNamespace import(stats, except = filter) #' #' nStatProb <- function(x, n, limit.type = 'prob', limits = NULL, tie = 1, altobs = NULL ){ # returns an integer vector corresponding to n states broken by equal # probability or equal distance # limit <- if(limit.type == 'prob') quantile(x,seq(0,1,1/n)) else if(limit.type == 'equal') seq(min(x),max(x),by=diff(range(x))/n) else if(limit.type == 'manual') limits if(!is.null(altobs)) limit <- quantile(altobs,seq(0,1,1/n)) b <- integer(length(x)) for(i in 1:(n+1)){ filter <- if(tie == 1) x >= limit[i] & x <= limit[i+1] else x > limit[i] & x <= limit[i+1] #only need to set the 1's because b is already 0's b[filter] <- as.integer(i-1) } # if(class(x) == 'ts') if(inherits(x, 'ts')){ return(ts(b,start=start(x),end=end(x))) }else{ return(b) } } #end function #' TPM #' #' Checks transition probability matrix. #' # @import msm #' #' @noRd #' #' transProbMatrix <- function(x,ns=NULL,limits=NULL,tie=0){ # require(msm) if(is.null(ns)){ ns <- max(x) states <- x if(length(unique(states)) > 26) stop('Too many states, specify a smaller number.') } # else{ # states <- ntile.ts(x,n=ns,limit.type='manual',limits=limits,tie=tie) # } st <- statetable.msm(state,data=list(state=states)) st/apply(st,1,sum) } #end function
/scratch/gouwar.j/cran-all/cranData/wxgenR/R/lib.R
#' Read in data to train simulator #' #' Read training data and setup variables to facilitate simulation. #' #' @param trainingData A path to .csv file (or matrix, data frame in your environment) #' with the following variables #' is required: year, month, day, prcp (daily precipitation), #' temp (daily temperature), #' and season (1, 2, ..., N, for N seasons - up to 20 seasons will work). #' Any units will work for precipitation and temperature, as long as they are #' consistent. Can be station data, basin averages, grid cells, etc. #' #' @param sdate Start date of training data (yyyymmdd). #' If empty, the start date will be the beginning of your time series. #' @param edate End date of training data (yyyymmdd). #' If empty, the end date will be the end of your time series. #' #' @examples #' #' prepData(trainingData = "./MetData.csv", sdate = 20000101, edate = 20201231) #' # @import lubridate #' #' @noRd "prepData" <- function(trainingData, sdate, edate){ # require("lubridate") # if(typeof(trainingData) == "character"){ # dat.d = read.table(trainingData, header=T, sep=",") # } else{ dat.d = trainingData # } dat.d$date1 = dat.d$year*10000 + dat.d$month*100 + dat.d$day i1 = which(dat.d$date1 == sdate) i2 = which(dat.d$date1 == edate) dat.d = dat.d[i1:i2,] yr.d = dat.d$year mo.d = dat.d$month da.d = dat.d$day wk.d = week(ymd(paste(yr.d, mo.d, da.d, sep="-"))) wk.d[wk.d == 53] = 52 dat.d$week = wk.d dat.d$date = ymd(paste(yr.d, mo.d, da.d, sep="-")) dat.d$states = dat.d$season # #default return(dat.d) } #end function
/scratch/gouwar.j/cran-all/cranData/wxgenR/R/prepData.R
#' Random variates from the Epanechnikov kernel #' #' Simulate outside the historical envelope #' using randomly generated values from the Epanechnikov kernel #' (via acceptance-rejection sampling). \cr #' \cr #' For more details on the Epanechnikov kernel and its use #' in a weather generator, see Rajagopalan et al. (1996). #' #' @param nsim Number of simulations. #' #' @return Returns a vector of random variates sampled from the Epanechnikov kernel. `nsim` number of samples are returned. #' #' #' @examples #' repan(nsim = 10) #' #' #simulate and plot density and distribution function #' oldpar = par(mfrow=c(1,3), mar=c(2,2.5,2,1), #' oma=c(2,2,0,0), mgp=c(2,1,0), cex.axis=0.8) #' #' par(mfrow=c(1,2)) #' nsim=1e5 #' x <- sort(repan(nsim));y=0.75*(1-x^2) #' plot(x,y,xlab="x",ylab="f(x)",type="l",lwd=2) #' grid() #' title (main="Epanechnikov PDF",cex.main=0.8) #' F=rank(x)/(nsim+1) #' plot(x,F,ylab="F(x)",type="l",lwd=2) #' grid() #' title (main="Epanechnikov CDF",cex.main=0.8) #' #' dev.off() #' #' par(oldpar) #' #' @references {Rajagopalan, B., Lall, U., & Tarboton, D. G. (1996). Nonhomogeneous Markov Model for Daily Precipitation. Journal of Hydrologic Engineering, 1(1), 33–40. https://doi.org/10.1061/(ASCE)1084-0699(1996)1:1(33)} #' #' @export #' # @rawNamespace import(stats, except = filter) #' #' #' "repan" <- function(nsim){ #simulations using Epanechnikov kernel #using acceptance-rejection sampling icount=0 x <- rep(NA,nsim) while (icount < nsim){ u1=runif(1,-1,1) u2=runif(1,0,1) if (((3*u1^2+4*u2)<=3)){ icount=icount+1 x[icount]=u1 } } #end while return(x) } #end function
/scratch/gouwar.j/cran-all/cranData/wxgenR/R/repan.R
#' Select transition state #' #' Function selects and returns the transition state given a uniform random number between 0 and 1 and #' the cumulative probability vector of the state sequence. #' #' @param uni Uniform random number between 0 and 1. #' @param wt Cumulative probability vector of states. #' #' #' @return Returns an object containing the transition state(s) based on the given cumulative probability vector and random numbers. #' #' #' @examples #' #' rand = runif(1) #' #' print(rand) #' #' selectState(uni = rand, wt = c(0.25, .55, 0.85, 1)) #' #' #' @export #' ## "selectState" <- function(uni, wt){ #function returns state given a uniform random number and #the cumulative probability vector k = length(wt) #number of system states if (uni <= wt[1]){ state = 1 return(state) } if (uni == wt[k]){ #since max(wt[k])=1.0, so no need for (uni >= wt[k]) state = k #as we are using cumulative transition probability return(state) } jl = 1; ju = k while ((ju - jl) > 1 ){ jn = ceiling((ju+jl)/2) if(wt[jn] > uni){ ju = jn } else{ jl = jn } } #end while state = ju return(state) } #end function
/scratch/gouwar.j/cran-all/cranData/wxgenR/R/selectState.R
#' Precipitation simulator #' #' Simulate daily precipitation depth. #' #' @param dat.d Training data processed from prepData function. #' @param syr Start year. #' @param eyr End year. #' @param wwidth Window set for finding surrounding days: +/- wwidth. #' @param nsim Number of simulation years. #' @param nrealz Number of realizations. #' @param Xjday Julian day when prcp occurs, from simTPocc function #' @param ekflag Simulate outside historical envelope? #' @param parallelize Enable parallel computing for precip simulation, set T to enable #' # @import lubridate # @import parallel # @import doParallel # @import foreach # @import sm # @rawNamespace import(stats, except = filter) #' #' @noRd #' "simPamt" <- function(dat.d,syr,eyr,wwidth,nsim,nrealz,Xjday,ekflag,awinFlag,parallelize){ # require("sm") #simulate precipitation amounts and select dates # #get month for a given julian day lpyear = dat.d$year[min(which(leap_year(dat.d$year)))] aday <- ymd(paste(lpyear, 1, 1, sep="-")) #jan 1 of a leap year to have a 366-day year it1 = which(dat.d$date == aday) it2 = it1+366-1 jdaymth <- dat.d$month[it1:it2] #calculations are based on a 366-day year # #get dates in window for each julian day 1-366 Xdates=getDatesInWindow(syr,eyr,wwidth,leapflag=T) # if (ekflag){ #get the bandwidth for each julian day bSJ <- vector() diwprcp <- vector() #days in window with precipitation jday = 1 for (jday in 1:366){ diw <- na.omit(Xdates[jday,]) #dates in window for a given julian day idxlist <- vector() iday = 1 for (iday in 1:length(diw)){ idxlist[iday] = which(dat.d$date1==diw[iday]) } #iday baprcp <- dat.d$prcp[idxlist] #basin average precipitation #also includes 0 prcp amount #for days within the window pamt <- baprcp[which(baprcp>=0.01)] #precipitation amount vector wwidth.adapt = wwidth # while(length(pamt) < 2){ wwidth.adapt = wwidth.adapt + 1 #get dates in window for each julian day 1-366 Xdates.adapt=getDatesInWindow(syr,eyr,wwidth.adapt,leapflag=T) diw <- na.omit(Xdates.adapt[jday,]) #dates in window for a given julian day idxlist <- vector() for (iday in 1:length(diw)){ idxlist[iday]=which(dat.d$date1==diw[iday]) } #iday baprcp <- dat.d$prcp[idxlist] pamt <- baprcp[which(baprcp>=0.01)] #precipitation amount vector } # if(awinFlag == T & wwidth.adapt != wwidth){ # cat(paste0("\n Window width too small on Julian day ", jday,", increased window to ", wwidth.adapt*2+1, " days\n")) # } diwprcp[jday] = length(pamt) logpamt <- log(pamt) #log-transformed precipitation amount vector bSJ[jday] = hsj(logpamt) #Sheather-Jones plug-in bandwidth } #jday } #ekflag # #simulate precipitation amount and select prcp date # Xpdate <- Xpamt <- matrix(NA, nrow=nsim*366, ncol=nrealz) if(parallelize == T){ Xpdate <- Xpamt <- matrix(NA, nrow=nsim*366, ncol=1) # library(foreach) # library(doParallel) cl <- makePSOCKcluster(detectCores()-1) registerDoParallel(cl) # startTime <- Sys.time() #benchmark run time irealz = 1 result <- foreach(irealz=1:nrealz, .packages='foreach', .export=c('repan', 'getDatesInWindow')) %dopar% { # for (irealz in 1:nrealz){ message(paste0("-- Starting trace number ", irealz, " --")) xp = Xjday[,irealz] nxp = length(xp) ixp = 1 # foreach(ixp=1:nxp, .combine = c) %do% { for (ixp in 1:nxp){ jd = xp[ixp] if (is.na(jd)){ Xpamt[ixp] = 0.0 }else{ diw <- na.omit(Xdates[jd,]) #dates in window for a given julian day idxlist <- vector() iday = 1 for (iday in 1:length(diw)){ idxlist[iday] = which(dat.d$date1 == diw[iday]) } #iday baprcp <- dat.d$prcp[idxlist] #e.g., basin average precipitation #also includes 0 prcp amount #days within the window np = length(which(baprcp>=0.01)) #number of prcp days in window pdate <- diw[which(baprcp>=0.01)] #dates in window where prcp occurred pamt <- baprcp[which(baprcp>=0.01)] #precipitation amount vector wwidth.adapt = wwidth # while(np < 2){ wwidth.adapt = wwidth.adapt + 1 #get dates in window for each julian day 1-366 Xdates.adapt = getDatesInWindow(syr,eyr,wwidth.adapt,leapflag=T) diw <- na.omit(Xdates.adapt[jd,]) #dates in window for a given julian day idxlist <- vector() iday = 1 for (iday in 1:length(diw)){ idxlist[iday]=which(dat.d$date1==diw[iday]) } #iday baprcp <- dat.d$prcp[idxlist] #e.g., basin average precipitation #also includes 0 prcp amount #days within the window np = length(which(baprcp>=0.01)) #number of prcp days in window pdate <- diw[which(baprcp>=0.01)] #dates in window where prcp occurred pamt <- baprcp[which(baprcp>=0.01)] #precipitation amount vector message(paste0("\n Window width too small on Julian day ", jd,", increasing window to ", wwidth.adapt*2+1, " days")) } # if(awinFlag == T & wwidth.adapt != wwidth){ # print(paste0("\n Window width too small on Julian day ", jd,", increased window to ", wwidth.adapt*2+1, " days\n")) # } logpamt <- log(pamt) #log-transformed prcp amount vector aindex = sample(1:np, 1) #randomly pick a prcp day Xpdate[ixp] = pdate[aindex] ybar = logpamt[aindex] Xpamt[ixp] = exp(ybar) if (ekflag){ rek = repan(1) #simulate a random number from the EKD Xpamt[ixp] = exp(ybar+rek*bSJ[jd]) } #ekflag } } #ixp message("\n") list(Xpamt, Xpdate) } #irealz stopCluster(cl) # endTime = Sys.time() # timeP = difftime(endTime, startTime, units='mins') Xpamt = as.data.frame(do.call(cbind,lapply(result,function(x){x[[1]]}))) Xpdate = as.data.frame(do.call(cbind,lapply(result,function(x){x[[2]]}))) # #default olist=list("Xpamt"=Xpamt,"Xpdate"=Xpdate) if (ekflag) olist=list("Xpamt"=Xpamt,"Xpdate"=Xpdate,"bSJ"=bSJ) return(olist) }else{ #non-parallel loop Xpdate <- Xpamt <- matrix(NA, nrow=nsim*366, ncol=nrealz) # startTime <- Sys.time() #benchmark run time irealz = 1 for (irealz in 1:nrealz){ message(paste0("-- Starting trace number ", irealz, " --")) xp = Xjday[,irealz] nxp = length(xp) ixp = 1 for (ixp in 1:nxp){ jd = xp[ixp] if (is.na(jd)){ Xpamt[ixp,irealz] = 0.0 }else{ diw <- na.omit(Xdates[jd,]) #dates in window for a given julian day idxlist <- vector() iday = 1 for (iday in 1:length(diw)){ idxlist[iday] = which(dat.d$date1 == diw[iday]) } #iday baprcp <- dat.d$prcp[idxlist] #e.g., basin average precipitation #also includes 0 prcp amount #days within the window np = length(which(baprcp>=0.01)) #number of prcp days in window pdate <- diw[which(baprcp>=0.01)] #dates in window where prcp occurred pamt <- baprcp[which(baprcp>=0.01)] #precipitation amount vector wwidth.adapt = wwidth # while(np < 2){ wwidth.adapt = wwidth.adapt + 1 #get dates in window for each julian day 1-366 Xdates.adapt = getDatesInWindow(syr,eyr,wwidth.adapt,leapflag=T) diw <- na.omit(Xdates.adapt[jd,]) #dates in window for a given julian day idxlist <- vector() iday = 1 for (iday in 1:length(diw)){ idxlist[iday]=which(dat.d$date1==diw[iday]) } #iday baprcp <- dat.d$prcp[idxlist] #e.g., basin average precipitation #also includes 0 prcp amount #days within the window np = length(which(baprcp>=0.01)) #number of prcp days in window pdate <- diw[which(baprcp>=0.01)] #dates in window where prcp occurred pamt <- baprcp[which(baprcp>=0.01)] #precipitation amount vector message(paste0("\n Window width too small on Julian day ", jd,", increasing window to ", wwidth.adapt*2+1, " days")) } # if(awinFlag == T & wwidth.adapt != wwidth){ # print(paste0("\n Window width too small on Julian day ", jd,", increased window to ", wwidth.adapt*2+1, " days\n")) # } logpamt <- log(pamt) #log-transformed prcp amount vector aindex = sample(1:np, 1) #randomly pick a prcp day Xpdate[ixp,irealz] = pdate[aindex] ybar = logpamt[aindex] Xpamt[ixp,irealz] = exp(ybar) if (ekflag){ rek = repan(1) #simulate a random number from the EKD Xpamt[ixp,irealz] = exp(ybar+rek*bSJ[jd]) } #ekflag } } #ixp message("\n") } #irealz # endTime = Sys.time() # timeNP = difftime(endTime, startTime, units='mins') # #default olist=list("Xpamt"=Xpamt,"Xpdate"=Xpdate) if (ekflag) olist=list("Xpamt"=Xpamt,"Xpdate"=Xpdate,"bSJ"=bSJ) return(olist) } #end non-parallel loop } #end function
/scratch/gouwar.j/cran-all/cranData/wxgenR/R/simPamt.R
#' Simulate precipitation occurrence and temperature magnitude #' #' Simulate daily precipitation occurrence (wet or dry) #' and daily temperature for the desired length of time. #' One of the covariates in the temperature model is #' precipitation occurrence. #' #' @param aseed Random number seed. #' @param dat.d Training data processed from prepData wrapper function. #' @param nsim Number of simulation years. #' @param nrealz Number of realizations. #' @param coeftmp Coefficients from linear model for predicting daily temperature. #' @param tpm.sd Standard deviation of temperature model residuals by month. #' @param tpm.y2 Transition probability matrix for each year in training data. #' @param tpm.y Aggregate transition probability matrix for all years in training data. #' # @import lubridate # @rawNamespace import(stats, except = filter) #' #' @noRd #' "simTPocc" <- function(aseed, dat.d, nsim, nrealz, coeftmp, tmp.sd, tpm.y2, tpm.y, tempPerturb){ #simulate precipitation occurrence and temperature magnitude # #initialize arrays X <- matrix(NA, nrow = nsim*366, ncol = nrealz) #saves prcp occurrence simyr1 <- matrix(NA, nrow = nsim, ncol = nrealz) #saves ALL selected simulation years Xjday <- matrix(NA, nrow = nsim*366, ncol = nrealz) #saves julian day when prcp occurs Xtemp <- matrix(NA, nrow = nsim*366, ncol = nrealz) #saves temperature Xseas <- matrix(NA, nrow = nsim*366, ncol = nrealz) #saves season Xweek <- matrix(NA, nrow = nsim*366, ncol = nrealz) #saves week yr.d = dat.d$year uyr = unique(yr.d) nyr = length(uyr) set.seed(aseed) #set seed # #get month for a given julian day lpyear = uyr[min(which(leap_year(uyr)))] aday <- ymd(paste(lpyear, 1, 1, sep="-")) #jan 1 of a leap year to have a 366-day year it1 = which(dat.d$date == aday) it2 = it1+366-1 jdaymth <- dat.d$month[it1:it2] zz <- rep(jdaymth, nsim) #simulation month yy <- rep(1:nsim, each=366) #simulation year # #realization loop irealz = 1 for (irealz in 1:nrealz){ message(irealz) prcpocc <- temp <- matrix(NA, nrow=366, ncol=nsim) #366-day pseas <- pweek <- matrix(NA, nrow=366, ncol=nsim) #366-day simyr <- rep(NA, nsim) #loop through simulation years isim=1 for (isim in 1:nsim){ iyr = sample(1:nyr, 1) #randomly select a year index simyr[isim] = uyr[iyr] simyr1[isim,irealz] = simyr[isim] leapflag = 0 if (leap_year(uyr[iyr])) leapflag = 1 nt = 365 + leapflag startdate <- ymd(paste(uyr[iyr], 1, 1, sep="-")) #jan 1 of year uyr[iyr] it1 = which(dat.d$date == startdate) #starting index of data it2 = it1+nt-1 dframe <- dat.d[it1:it2,] #data subset for simulation prcpocc[1,isim] = dframe$oc[1] if(is.na(dframe$temp[1]) == FALSE){ temp[1,isim] = dframe$temp[1] #temperature for day=1 of simulation }else if(is.na(dframe$temp[1]) == TRUE){ temp[1,isim] = dframe$tavgm[1] #temperature for day=1 of simulation } if (dframe$oc[1]==0) pstate=1 #dry, corresponds to row number 1 if (dframe$oc[1]==1) pstate=2 #wet, corresponds to row number 2 aseas <- dframe$season[1] #selected season for sim day 1 aweek <- dframe$week[1] #week number for sim day 1 ptvec <- tpm.y2[pstate,,aseas,iyr] #prcp, prob transition vector #Use 30-year average probabilities if transition is not found within the season that year if(is.na(ptvec[1]) == TRUE | is.na(ptvec[2]) == TRUE) ptvec=tpm.y[pstate,,aseas] pseas[1,isim] = aseas pweek[1,isim] = aweek #loop through days in simulated year it = 2 for (it in 2:nt){ #precipitation occurrence u = runif(1) pstate = selectState(u,cumsum(ptvec)) prcpocc[it,isim] = pstate - 1 #0 - dry; 1 - wet aseas <- dframe$season[it] aweek <- dframe$week[it] pseas[it,isim] = aseas pweek[it,isim] = aweek ptvec <- tpm.y2[pstate,,aseas,iyr] if(is.na(ptvec[1]) == TRUE | is.na(ptvec[2]) == TRUE) ptvec = tpm.y[pstate,,aseas] #temperature if(tempPerturb == T){ temp[it,isim] = sum(coeftmp*c(1,temp[(it-1),isim], dframe$ct[it], dframe$st[it], prcpocc[it,isim], dframe$tavgm[it] ) ) + rnorm(n=1,mean=0,sd=tmp.sd[jdaymth[it]]) } else{ temp[it,isim] = sum(coeftmp*c(1,temp[(it-1),isim], dframe$ct[it], dframe$st[it], prcpocc[it,isim], dframe$tavgm[it])) } } #it } #isim #make arrays #precipitation occurrence x1 <- prcpocc #dimension of prcpocc is 366 x nsim x2 <- vector() x2 <- append(x2,x1[,1:nsim]) X[,irealz] = x2 #precipitation occurrence #temperature x3 <- temp #dimension of temp is 366 x nsim x4 <- vector() x4 <- append(x4, x3[,1:nsim]) Xtemp[,irealz] = x4 #temperature #seasonality x5 <- pseas x6 <- vector() x6 <- append(x6,x5[,1:nsim]) Xseas[,irealz] = x6 #season #week x7 <- pweek x8 <- vector() x8 <- append(x8, x7[,1:nsim]) Xweek[,irealz] = x8 #week } #irealz # #get julian day of precipitation occurrence it1 = seq(1, dim(X)[1], 366) it2 = it1+366-1 for (irealz in 1:nrealz){ for(isim in 1:nsim){ i1 = it1[isim] i2 = it2[isim] xx <- X[i1:i2, irealz] pday <- which(xx == 1) idxlist = i1+pday-1 Xjday[idxlist, irealz] = pday } #isim } #irealz # X1 <- cbind(yy,zz,X) #simulation year, simulation month, prcp occurrence # #default olist=list("simyr1" = simyr1, "X" = X, "X1" = X1, "Xjday" = Xjday, "Xseas" = Xseas, "Xweek" = Xweek, "Xtemp" = Xtemp ) return(olist) } #end function
/scratch/gouwar.j/cran-all/cranData/wxgenR/R/simTPocc.R
#' Spell length calculation #' #' Function to calculate the length (duration in years) of wet or dry periods. #' #' @param s A binary vector of 0 dry and 1 wet only. #' #' @return Returns a list object containing a vector of dry spell lengths and a vector of wet spell lengths. #' #' @examples #' #' #use 0 for dry and 1 for wet years #' spells = c(0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0) #' #' spellLengths(spells) #' #' @export #' #' "spellLengths" <- function(s){ #input s is a binary vector of 0 (dry) and 1 (wet) only n=length(s) l=list("0" = c(),"1" = c()) #"0": dry; "1":wet cnt=1 for (i in 2:n){ if (s[i-1] == s[i]){ cnt = cnt+1 } else{ l[[as.character(s[i-1])]]=c(l[[as.character(s[i-1])]],cnt) cnt = 1 } }#i l[[as.character(s[n])]]=c(l[[as.character(s[n])]],cnt) return(l) } #end function
/scratch/gouwar.j/cran-all/cranData/wxgenR/R/spellLengths.R
#' Write simulations to file #' #' Write simulation results to .csv files (one .csv file is generated for each trace). #' Inputs include the weather simulations stored in the list object output from the `wx()` function as well as the `nsim` and `nrealz` #' variables that were inputs to the `wx()` function.\cr #' \cr #' A debug flag allows for more detailed reports (debug = TRUE), but setting 'debug = FALSE' is generally #' recommended for more concise output. Keeping 'debug = FALSE' will also include a simulation #' time stamp (year, month, day) beginning in year 1.\cr #' \cr #' This function will write the .csv files to your working directory.\cr #' \cr #' Leap years may be included in the simulated weather if they are included in your training data, #' so non-leap years include a row of 'NA' values at the end of the calendar year as a book-keeping #' measure so that the total number of rows in each trace is the same. #' #' @param wxOutput Weather simulations output from `wx()` function. #' @param nsim Number of simulation years. #' @param nrealz Number of realizations (ensemble size). #' @param path Specified path to where simulation output shall be written. Defaults to current working directory (path = NULL). #' Specified path should be a character string of the folder location ending with '/'. #' @param debug Option to include additional variables in the .csv file #' outputs for debugging and advanced analysis. Includes sampling date, etc. Default = FALSE (off). #' If debug is off, the weather simulations will have a simulation year time stamp #' (beginning in year 1) as well as month and day time stamps. #' #' @return No return value, called to write simulation results to file. #' #' @examples #' #' \donttest{ #' z = wx(trainingData = LowerSantaCruzRiverBasinAZ, #' eyr = 1990, nsim = 5, nrealz = 5, aseed = 23, #' wwidth = 3, unitSystem = "U.S. Customary", #' ekflag = TRUE, awinFlag = TRUE, tempPerturb = TRUE, parallelize = FALSE) #' #' #' writeSim(wxOutput = z, nsim = 5, nrealz = 5, path = paste0(tempdir(), "/"), debug = FALSE) #'} #' #' @export #' # @import utils #' #' "writeSim" <- function(wxOutput, nsim, nrealz, path = NULL, debug = FALSE){ #parse variables from wx() output dat.d = wxOutput$dat.d simyr1 = wxOutput$simyr1 X = wxOutput$X Xseas = wxOutput$Xseas Xpdate = wxOutput$Xpdate Xpamt = wxOutput$Xpamt Xtemp = wxOutput$Xtemp #write simulation output # it1 <- seq(1, length(X[,1]), 366) it2 = it1+366-1 #loop through realization for (irealz in 1:nrealz){ outmat <- vector() if(is.null(path) == TRUE){ fout = paste("Realization_", sprintf("%03d", irealz), ".csv", sep="") }else if(is.null(path) == FALSE){ fout = paste(path, "Realization_", sprintf("%03d", irealz), ".csv", sep="") } if(debug == TRUE){ #loop through simulation years for (isim in 1:nsim){ leapflag = FALSE ayr = simyr1[isim, irealz] if (leap_year(ayr)) leapflag = TRUE col1 = rep(isim, 366) #column 1, simulation year d1 = ayr*10^4+01*10^2+01; d2 = ayr*10^4+12*10^2+31 i1 = which(dat.d$date1 == d1) i2 = which(dat.d$date1 == d2) col2 = dat.d$date1[i1:i2] #column 2, simulation date if (leapflag == FALSE) col2 = c(col2,NA) i1 = it1[isim] i2 = it2[isim] col3 = Xseas[i1:i2, irealz] #column 3, simulation season col4 = X[i1:i2, irealz] #column 4, precipitation occurrence col5 = Xpdate[i1:i2, irealz] #column 5, precipation resampling date col6 = Xpamt[i1:i2, irealz] #column 6, resampled precipitation amount col7 = Xtemp[i1:i2, irealz] #column 7, simulated temperature outmat = rbind(outmat, cbind(col1, col2, col3, col4, col5, col6, col7)) } #isim narows = (which(is.na(outmat[,2]) == TRUE))*-1 outmat = outmat[narows,] colnames(outmat) = c("Year", "Date", "Season", "PrcpOccurence", "PrcpResamplingDate", "ResampledPrcpAmt", "SimulatedTemp") } else{ #loop through simulation years for (isim in 1:nsim){ leapflag = FALSE ayr = simyr1[isim, irealz] if (leap_year(ayr)) leapflag = TRUE col1 = rep(isim, 366) #column 1, simulation year d1 = ayr*10^4+01*10^2+01; d2 = ayr*10^4+12*10^2+31 i1 = which(dat.d$date1 == d1) i2 = which(dat.d$date1 == d2) col2 = dat.d$date1[i1:i2] #column 2, simulation date if (leapflag == FALSE) col2 = c(col2,NA) i1 = it1[isim] i2 = it2[isim] col3 = Xseas[i1:i2, irealz] #column 3, simulation season col4 = X[i1:i2, irealz] #column 4, precipitation occurrence col5 = Xpdate[i1:i2, irealz] #column 5, precipation resampling date col6 = Xpamt[i1:i2, irealz] #column 6, resampled precipitation amount col7 = Xtemp[i1:i2, irealz] #column7, simulated temperature #create time series of 'simulation day' sim.yr = rep(isim, length(col2)) sim.month = month(ymd(col2)) sim.day = day(ymd(col2)) outmat = rbind(outmat, cbind(sim.yr, sim.month, sim.day, col6, col7, col3)) } #isim colnames(outmat) = c("simulation year", "month", "day", "prcp", "temp", "season") } write.table(outmat, fout, row.names = FALSE, col.names = TRUE, sep=",") } #irealz } #end function
/scratch/gouwar.j/cran-all/cranData/wxgenR/R/writeSim.R
#' Runs weather generator #' #' Runs the weather generator based on user inputs.\cr #' \cr #' Your input/training data MUST have the following variables, #' in this order: year, month, day, prcp, temp, season. These variables are case sensitive #' and must be spelled as specified here.\cr #' \cr #' Your training data should start at the beginning of the calendar year (January 1) as the #' weather simulator is designed for the full calendar year.\cr #' Use starting- and ending- years to subset your input data if desired; #' otherwise starting and ending dates will default to the beginning and end of your dataset.\cr #' \cr #' Using 'ekflag = T' will generate simulations outside of the historical envelope #' via an Epanechnikov kernel. For more details on the Epanechnikov kernel and its use #' in a weather generator, see Rajagopalan et al. (1996).\cr #' \cr #' \cr #' Leap years may be included in the simulated weather if they are included in your training data, #' so non-leap years include a row of 'NA' values at the end of the calendar year as a book-keeping #' measure so that the total number of rows in each trace is the same.\cr #' \cr #' The weather generator can handle missing precipitation and temperature data if it is #' marked as `NA` in your training data. It will set `NA` precipitation values to 0 and pass along `NA` temperature values #' if that date is sampled for the simulations. Consider replacing any missing data with monthly or #' daily averages to avoid `NA` values in your simulated weather. #' #' #' @param trainingData Either a matrix, dataframe, or path to a .csv file with the following variables #' is required: year, month, day, prcp (daily precipitation), #' temp (daily temperature), #' and season (1, 2, ..., N, for N seasons - up to 26 seasons will work but seasons need to be defined in a meaningful way). #' Units must be either U.S. Customary (inches, degrees F) or metric (mm, degrees C) and must be specified with #' the `unitSystem` input variable. Input data can be station-based, basin averages, grid cells, etc. #' Input data MUST have these variables: year, month, day, prcp, temp, season. #' @param syr Optional: subset training data to specific start year (defaults to beginning of training data). Subset will begin on the first day available in `syr`. # @param smm Training data start month (you can also use to subset your training data). # @param sdd Training data start day (you can also use to subset your training data). #' @param eyr Optional: subset training data to specific end year (defaults to end of training data). Subset will end on the last day available in `eyr`. # @param emm Training data end month (you can also use to subset your training data). # @param edd Training data end day (you can also use to subset your training data). #' @param nsim Number of simulation years. #' @param nrealz Number of realizations or traces (i.e., ensemble size). #' @param aseed Specify a seed for reproducibility. #' @param wwidth Set the sampling window for each day of year, a lower value for `wwidth` will sample fewer surrounding days (lower variability) and a higher value will sample more days (higher variability). Typical setting of `wwidth` is between 2 and 15, resulting in a daily sampling window of 5 days and 31 days, respectively. #' @param unitSystem Specify the unit system of your training data. Input a string that is either "U.S. Customary" or "Metric". U.S. Customary corresponds to inches and degrees Farenheit, while Metric corresponds to millimeter and degrees Celsius. #' If Metric is specified, units will automatically be converted to U.S. Customary for weather simulation, then re-converted to Metric for results output. #' @param ekflag Simulate outside historical envelope using an Epanechnikov kernel? (T/F) #' @param awinFlag Set to T or TRUE if you would like to see the results of the adaptive window width. #' If only one or zero precipitation values (>0.01 inches) are found within the initial window width you set from a day where precipitation occurred, #' it will be iteratively increased until two or more precipitation values are found. By default, the results are not shown. #' @param tempPerturb Set to T or TRUE if you would like to add random noise to the #' temperature simulations based on a normal distribution fit on the training data. #' @param parallelize Enable parallel computing for precipitation simulation, set T or TRUE to enable. By default, this is turned off. #' #' #' @return Returns a list containing both inputs to the weather generator as well as outputs. #' \itemize{ #' \item dat.d - User inputs to weather generator, saved for future use. #' \item simyr1 - The years sampled for each trace. #' \item X - The simulated daily dry/wet sequences for each trace (0 = dry, 1 = wet). #' \item Xseas - The simulated season by day for each trace. #' \item Xpdate - If precipitation was simulated to occur on a given day, this is the date from which historical precipitation is sampled. #' \item Xpamt - The simulated daily precipitation depth. #' \item Xtemp - The simulated daily mean temperature. #' } #' #' @examples #' #' \donttest{ #' #' data(LowerSantaCruzRiverBasinAZ) #' #' head(LowerSantaCruzRiverBasinAZ) #' #' #No input for `syr` because we want the training period to begin at the beginning of the data #' #record (1970), but set `eyr` = 1990 because we want to subset training period to end in 1990. #' #' wx(trainingData = LowerSantaCruzRiverBasinAZ, #' eyr = 1990, nsim = 3, nrealz = 3, aseed = 23, #' wwidth = 3, unitSystem = "U.S. Customary", #' ekflag = TRUE, awinFlag = TRUE, tempPerturb = TRUE, parallelize = FALSE) #' #'} #' #' @export #' # @importFrom plyr ddply # @importFrom dplyr group_by summarise left_join glimpse mutate relocate if_else filter # @import lubridate # @import msm # @import sm # @import doParallel # @import parallel # @import foreach # @import utils # @import magrittr #' #' "wx" <- function(trainingData, syr = NULL, eyr = NULL, nsim, nrealz, aseed, wwidth, unitSystem, ekflag, awinFlag, tempPerturb, parallelize = NULL ){ #weather generator # # require("lubridate") if(typeof(trainingData) == "character"){ trainingData = read.table(trainingData, header=T, sep = ",") } #starting and ending date of simulation if(is.null(syr) == T){ syr = trainingData$year[1] smm = trainingData$month[1] sdd = trainingData$day[1] sdate = syr*10^4 + smm*10^2 + sdd }else{ trainingData = subset(trainingData, year >= syr) smm = trainingData$month[1] sdd = trainingData$day[1] sdate = syr*10^4+smm*10^2+sdd } if(is.null(eyr) == T){ eyr = tail(trainingData$year,1) emm = tail(trainingData$month,1) edd = tail(trainingData$day,1) edate = eyr*10^4 + emm*10^2 + edd }else{ trainingData = subset(trainingData, year <= eyr) emm = tail(trainingData$month,1) edd = tail(trainingData$day,1) edate = eyr*10^4+emm*10^2+edd } #convert units to U.S. Customary if necessary if(unitSystem == "metric" | unitSystem == "Metric"){ trainingData$prcp = trainingData$prcp/25.4 #mm to inches trainingData$temp = trainingData$temp*1.8 + 32 #deg C to deg F } # #read data and setup variables to facilitate simulation dat.d <- prepData(trainingData, sdate, edate) # #calculate seasonal precipitation transition prob matrix for each year tpm.y2 <- getPtpm(dat.d)$tpm.y2 tpm.y <- getPtpm(dat.d)$tpm.y # #calculate parameters for temperature simulation z <- getTpars(dat.d) dat.d=z$dat.d #updated with tavgm, sine and cosine terms coeftmp=z$coeftmp tmp.sd=z$tmp.sd # #simulate precipitation occurrence and temperature message("...Simulate precipitation occurrence and temperature...") z <- simTPocc(aseed,dat.d,nsim,nrealz,coeftmp,tmp.sd,tpm.y2,tpm.y,tempPerturb) simyr1=z$simyr1 X=z$X Xjday=z$Xjday Xseas=z$Xseas Xtemp=z$Xtemp # #simulate precipitation amount message("...Simulate precipitation amount...") z <- simPamt(dat.d,syr,eyr,wwidth,nsim,nrealz,Xjday,ekflag,awinFlag,parallelize) Xpamt <- z$Xpamt Xpdate <- z$Xpdate if (ekflag) bSJ <- z$bSJ # #re-convert units back to metric if necessary if(unitSystem == "metric" | unitSystem == "Metric"){ #simulations Xpamt = Xpamt*25.4 #inches to mm Xtemp = (Xtemp-32)*(5/9) #deg F to deg C #observed dat.d$prcp = dat.d$prcp*25.4 #inches to mm dat.d$temp = (dat.d$temp-32)*(5/9) #deg F to deg C } #default olist=list("dat.d"=dat.d,"simyr1"=simyr1,"X"=X,"Xseas"=Xseas, "Xpdate"=Xpdate,"Xpamt"=Xpamt,"Xtemp"=Xtemp ) if (ekflag) olist=c(olist) return(olist) } #end function
/scratch/gouwar.j/cran-all/cranData/wxgenR/R/wx.R
#' \code{wxgenR} package #' #' A weather generator with seasonality #' #' #' @docType package #' @name wxgenR #' #' @rawNamespace import(stats, except = filter) #' @import lubridate #' @importFrom dplyr group_by summarise left_join glimpse mutate relocate if_else filter #' @import msm #' @import parallel #' @import doParallel #' @import foreach #' @import sm #' @import utils #' @importFrom plyr ddply #' @import magrittr #' #' #' NULL ## quiets concerns of R CMD check re: the .'s that appear in pipelines utils::globalVariables(c("temp", "state"))
/scratch/gouwar.j/cran-all/cranData/wxgenR/R/wxgenR.R
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----------------------------------------------------------------------------- library(wxgenR) library(lubridate) library(dplyr) library(tidyr) library(reshape2) library(ggpubr) library(data.table) library(moments) library(seas) data(BlacksburgVA) head(BlacksburgVA) ## ---- results='hide'---------------------------------------------------------- nsim = 5 #number of simulation years nrealz = 10 #number of traces in ensemble startTime <- Sys.time() #benchmark run time z = wx(trainingData = BlacksburgVA, syr = 2000, eyr = 2004, nsim = nsim, nrealz = nrealz, aseed = 123, wwidth = 1, unitSystem = "Metric", ekflag = TRUE, awinFlag = TRUE, tempPerturb = TRUE, parallelize = FALSE) endTime = Sys.time() ## ----------------------------------------------------------------------------- # glimpse(z) ## ----------------------------------------------------------------------------- #parse variables from wx() output dat.d = z$dat.d simyr1 = z$simyr1 X = z$X Xseas = z$Xseas Xpdate = z$Xpdate Xpamt = z$Xpamt Xtemp = z$Xtemp #write simulation output # it1 <- seq(1, length(X[,1]), 366) it2 = it1+366-1 #initialize storage sim.pcp = matrix(NA, nrow = nsim*366, ncol = nrealz+3) sim.tmp = matrix(NA, nrow = nsim*366, ncol = nrealz+3) sim.szn = matrix(NA, nrow = nsim*366, ncol = nrealz+3) #loop through realization irealz = 1 for (irealz in 1:nrealz){ outmat <- vector() #loop through simulation years isim = 1 for (isim in 1:nsim){ leapflag = FALSE ayr = simyr1[isim, irealz] if (lubridate::leap_year(ayr)) leapflag = TRUE col1 = rep(isim, 366) #column 1, simulation year d1 = ayr*10^4+01*10^2+01; d2 = ayr*10^4+12*10^2+31 i1 = which(dat.d$date1 == d1) i2 = which(dat.d$date1 == d2) col2 = dat.d$date1[i1:i2] #column 2, simulation date if (leapflag == FALSE) col2 = c(col2,NA) i1 = it1[isim] i2 = it2[isim] col3 = Xseas[i1:i2, irealz] #column 3, simulation season col4 = X[i1:i2, irealz] #column 4, precipitation occurrence col5 = Xpdate[i1:i2, irealz] #column 5, precipation resampling date col6 = Xpamt[i1:i2, irealz] #column 6, resampled precipitation amount col7 = Xtemp[i1:i2, irealz] #column7, simulated temperature #create time series of 'simulation day' sim.yr = rep(isim, length(col2)) sim.month = month(ymd(col2)) sim.day = day(ymd(col2)) outmat = rbind(outmat, cbind(sim.yr, sim.month, sim.day, col6, col7, col3)) } #isim colnames(outmat) = c("simulation year", "month", "day", "prcp", "temp", "season") if(irealz == 1){ sim.pcp[,1:3] = outmat[,1:3] sim.tmp[,1:3] = outmat[,1:3] sim.szn[,1:3] = outmat[,1:3] } sim.pcp[,irealz+3] = outmat[,4] sim.tmp[,irealz+3] = outmat[,5] sim.szn[,irealz+3] = outmat[,6] } #irealz ## ----------------------------------------------------------------------------- # df = sim.pcp formatting = function(df){ df = as.data.frame(df) colnames(df) = c("simulation year", "month", "day", paste0("Trace_", 1:nrealz)) #remove 366 days for non-leap years df = drop_na(df, c(month, day)) #assign simulation year to start at the same time as training data df$`simulation year` = df$`simulation year` + dat.d$year[1] - 1 #format date df$Date = ymd(paste(df$`simulation year`, df$month, df$day, sep = "-")) #remove years that aren't leap years # df = drop_na(df, Date) df = df %>% mutate(yday = as.numeric(yday(Date)), week = as.numeric(week(Date))) %>% relocate(c(Date,yday,week), .after = day) %>% melt(id = 1:6) return(df) } ## ----------------------------------------------------------------------------- sim.pcp = formatting(sim.pcp) sim.tmp = formatting(sim.tmp) sim.szn = formatting(sim.szn) ## ----------------------------------------------------------------------------- colnames(dat.d)[11] = "yday" obs.pcp = dat.d[,c(1:3,8:9,11,4)] obs.tmp = dat.d[,c(1:3,8:9,11,5)] ## ----------------------------------------------------------------------------- #plot simulated daily data # simDat = sim.tmp # obsDat = obs.tmp # Tag = "Temp" dailyPlot = function(simDat, obsDat, Tag){ simD = simDat %>% drop_na() %>% group_by(variable, yday) %>% summarise( mean = mean(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simDq <- simD %>% group_by(yday) %>% summarise( mean_q5 = quantile(mean, 0.05, na.rm = T), mean_med = median(mean, na.rm = T), mean_q95 = quantile(mean, 0.95, na.rm =T), max_q5 = quantile(max, 0.05, na.rm = T), max_med = median(max, na.rm = T), max_q95 = quantile(max, 0.95, na.rm = T), sd_q5 = quantile(sd, 0.05, na.rm = T), sd_med = median(sd), sd_q95 = quantile(sd, 0.95, na.rm = T), skew_q5 = quantile(skew, 0.05, na.rm = T), skew_med = median(skew, na.rm = T), skew_q95 = quantile(skew, 0.95, na.rm = T) ) %>% drop_na() %>% ungroup() if(Tag == "Temp"){ obs <- obsDat %>% drop_na() %>% group_by(yday) %>% summarise( mean = mean(temp, na.rm = T), max = max(temp, na.rm = T), sd = sd(temp, na.rm = T), skew = skewness(temp, na.rm = T) ) %>% ungroup() } else if(Tag == "Precip"){ obs <- obsDat %>% drop_na() %>% group_by(yday) %>% summarise( mean = mean(prcp, na.rm = T), max = max(prcp, na.rm = T), sd = sd(prcp, na.rm = T), skew = skewness(prcp, na.rm = T) ) %>% ungroup() } colnames(obs)[-1] = paste0("obs_", colnames(obs)[-1]) df.comb = left_join(simDq, obs, by = "yday") #plotting -------------------------------- lgdLoc = c(0.8, 0.9) if(Tag == "Temp"){ yLabel = "Daily Temperature " units = "(Β°F)" } else if(Tag == "Precip"){ yLabel = "Daily Precipitation " units = "(inches)" } trnAlpha = 0.65 #daily mean p1 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = mean_q5, ymax = mean_q95), alpha = 0.25) + geom_line(aes(x = yday, y = mean_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_mean), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_mean), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Mean ", yLabel, units)) #daily SD p2 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = sd_q5, ymax = sd_q95), alpha = 0.25) + geom_line(aes(x = yday, y = sd_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_sd), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_sd), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Std. Deviation of ", yLabel, units)) #daily skew p3 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = skew_q5, ymax = skew_q95), alpha = 0.25) + geom_line(aes(x = yday, y = skew_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_skew), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_skew), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Skew of ", yLabel, " (-)")) #daily Max p4 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = max_q5, ymax = max_q95), alpha = 0.25) + geom_line(aes(x = yday, y = max_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_max), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_max), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Maximum ", yLabel, units)) p.comb = ggarrange(p1, p2, p3, p4, nrow = 2, ncol = 2, common.legend = TRUE, legend = "bottom") print(p.comb) # p.out = paste0(tempdir(), "/outputPlots/dailyStats_", Tag, ".png") # ggsave(filename = p.out, plot = p.comb, device = "png") } ## ---- fig.width=8, fig.height=8----------------------------------------------- dailyPlot(sim.pcp, obs.pcp, "Precip") ## ---- fig.width=8, fig.height=8----------------------------------------------- dailyPlot(sim.tmp, obs.tmp, "Temp") ## ----------------------------------------------------------------------------- #plot simulated daily data simDat = sim.tmp obsDat = obs.tmp Tag = "Temp" monthlyPlot = function(simDat, obsDat, Tag){ if(Tag == "Temp"){ simM = simDat %>% drop_na() %>% group_by(variable, month, `simulation year`) %>% summarise( mean = mean(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simMM <- simM %>% group_by(variable, month) %>% summarise( mean=mean(mean), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(month, year) %>% summarise( mean = mean(temp, na.rm = T), max = max(temp, na.rm = T), sd = sd(temp, na.rm = T), skew = skewness(temp, na.rm = T) ) %>% ungroup() obsMM <- obs %>% group_by(month) %>% summarise( mean = mean(mean, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) }else if(Tag == "Precip"){ simM = simDat %>% drop_na() %>% group_by(variable, month, `simulation year`) %>% summarise( sum = sum(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simMM <- simM %>% group_by(variable, month) %>% summarise( sum=mean(sum), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(month, year) %>% summarise( sum = sum(prcp, na.rm = T), max = max(prcp, na.rm = T), sd = sd(prcp, na.rm = T), skew = skewness(prcp, na.rm = T) ) %>% ungroup() obsMM <- obs %>% group_by(month) %>% summarise( sum = mean(sum, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) } df.comb = rbind(obsMM, simMM) #plotting -------------------------------- if(Tag == "Temp"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = mean, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = mean, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = mean, color = "Observed")) + xlab("Month") + ylab("Temperature (Β°F)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle("Average Mean Monthly Temperature") }else if(Tag == "Precip"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = sum, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = sum, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = sum, color = "Observed")) + xlab("Month") + ylab("Precipitation (inches)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle("Average Total Monthly Precipitation") } if(Tag == "Temp"){ yLabel = "Temperature " units = "(Β°F)" } else if(Tag == "Precip"){ yLabel = "Precipitation " units = "(inches)" } #monthly SD p2 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = sd, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = sd, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = sd, color = "Observed")) + xlab("Month") + ylab(paste0("Standard Deviation ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle(paste0("Average Standard Deviation in Monthly ", yLabel)) #monthly Skew p3 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = skew, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = skew, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = skew, color = "Observed")) + xlab("Month") + ylab("Skew (-)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle(paste0("Average Skew in Monthly ", yLabel)) #monthly max p4 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = max, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = max, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = max, color = "Observed")) + xlab("Month") + ylab(paste0("Maximum ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle(paste0("Average Monthly Maximum ", yLabel)) p.comb = ggarrange(p1, p2, p3, p4, nrow = 2, ncol = 2, common.legend = TRUE, legend = "bottom") print(p.comb) p.out = paste0(tempdir(), "/outputPlots/monthlyStats_", Tag, ".png") # ggsave(filename = p.out, plot = p.comb, device = "png", height = 8, width = 8, units = "in") } ## ---- fig.width=8, fig.height=8----------------------------------------------- monthlyPlot(sim.pcp, obs.pcp, "Precip") ## ---- fig.width=8, fig.height=8----------------------------------------------- monthlyPlot(sim.tmp, obs.tmp, "Temp") ## ----------------------------------------------------------------------------- #plot simulated daily data simDat = sim.tmp obsDat = obs.tmp Tag = "Temp" weeklyPlot = function(simDat, obsDat, Tag){ if(Tag == "Temp"){ simW = simDat %>% drop_na() %>% group_by(variable, week, `simulation year`) %>% summarise( mean = mean(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simWW <- simW %>% group_by(variable, week) %>% summarise( mean=mean(mean), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(week, year) %>% summarise( mean = mean(temp, na.rm = T), max = max(temp, na.rm = T), sd = sd(temp, na.rm = T), skew = skewness(temp, na.rm = T) ) %>% ungroup() obsWW <- obs %>% group_by(week) %>% summarise( mean = mean(mean, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) }else if(Tag == "Precip"){ simW = simDat %>% drop_na() %>% group_by(variable, week, `simulation year`) %>% summarise( sum = sum(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simWW <- simW %>% group_by(variable, week) %>% summarise( sum=mean(sum), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(week, year) %>% summarise( sum = sum(prcp, na.rm = T), max = max(prcp, na.rm = T), sd = sd(prcp, na.rm = T), skew = skewness(prcp, na.rm = T) ) %>% ungroup() obsWW <- obs %>% group_by(week) %>% summarise( sum = mean(sum, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) } df.comb = rbind(obsWW, simWW) #plotting -------------------------------- if(Tag == "Temp"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = mean, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = mean, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = mean, color = "Observed")) + xlab("Week") + ylab("Temperature (Β°F)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle("Average Mean Weekly Temperature") }else if(Tag == "Precip"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = sum, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = sum, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = sum, color = "Observed")) + xlab("Week") + ylab("Precipitation (inches)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle("Average Total Weekly Precipitation") } if(Tag == "Temp"){ yLabel = "Temperature " units = "(Β°F)" } else if(Tag == "Precip"){ yLabel = "Precipitation " units = "(inches)" } #weekly SD p2 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = sd, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = sd, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = sd, color = "Observed")) + xlab("Week") + ylab(paste0("Standard Deviation ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle(paste0("Average Standard Deviation in Weekly ", yLabel)) #weekly Skew p3 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = skew, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = skew, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = skew, color = "Observed")) + xlab("Week") + ylab("Skew (-)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle(paste0("Average Skew in Weekly ", yLabel)) #weekly max p4 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = max, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = max, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = max, color = "Observed")) + xlab("Week") + ylab(paste0("Maximum ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle(paste0("Average Weekly Maximum ", yLabel)) p.comb = ggarrange(p1, p2, p3, p4, nrow = 2, ncol = 2, common.legend = TRUE, legend = "bottom") print(p.comb) p.out = paste0(tempdir(), "/outputPlots/weeklyStats_", Tag, ".png") # ggsave(filename = p.out, plot = p.comb, device = "png", height = 8, width = 10, units = "in") } ## ---- fig.width=10, fig.height=8---------------------------------------------- weeklyPlot(sim.pcp, obs.pcp, "Precip") ## ---- fig.width=10, fig.height=8---------------------------------------------- weeklyPlot(sim.tmp, obs.tmp, "Temp") ## ----------------------------------------------------------------------------- # setwd() to desired location for writeSim to save .csv files containing the simulated precipitation and temperature # setwd(tempdir()) # # writeSim(wxOutput = z, nsim = nsim, nrealz = nrealz, debug = TRUE) ## ----------------------------------------------------------------------------- #wxgenR weather generation run time: print(difftime(endTime, startTime, units='mins'))
/scratch/gouwar.j/cran-all/cranData/wxgenR/inst/doc/Vignette_BlacksburgVA.R
--- title: "wxgenR - Blacksburg, VA NWS Station" author: "Subhrendu Gangopadhyay, Lindsay Bearup, Andrew Verdin, Eylon Shamir, Eve Halper, David Woodson" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{wxgenR - Blacksburg, VA NWS Station} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` A weather generator is a numerical tool that resamples an input timeseries many times, while preserving observed or projected characteristics of importance, such as the statistics of the transition between wet and dry days. The resulting large group, or ensemble, of likely rainfall and temperature timeseries represents a range of possible amounts, daily patterns, and seasonality. This weather generator is, to our knowledge, novel in that it includes *seasons* (up to 26) in training the simulation algorithm. The goal of `wxgenR` is to provide users a tool that can simulate, with fidelity, an ensemble of precipitation and temperature based on training data that could include, for example, station based point measurements, grid cell values derived from models or remotely sensed data, or basin averages. The incorporation of seasonality as a covariate in the training algorithm allows users to examine the effects of shifts in seasonality due to climate warming (e.g., earlier snowmelt seasons or prolonged summer dry periods). `wxgenR` is an effective and robust scenario planning tool for a wide variety of potential futures. ## Running `wxgenR` All that is needed to run `wxgenR` is a single time series of precipitation, temperature, and season. Up to 20 seasons may be defined, but most users will likely only need two to four based on their study region. For example, `wxgenR` is provided with single station data from Blacksburg, Virgina, a temperate locale that is better defined by four seasons. Within the data used to train the weather generator, these four seasons should be noted with an index of either 1, 2, 3, or 4 for each day in the time series. The varying statistics of each season will impact the resulting simulations. Precipitation and temperature data are point measurements taken at the Blacksburg National Weather Service office (GHCND:USC00440766). ## Tutorial For example, using the Blacksburg, VA National Weather Service station-based precipitation, temperature, and season from 1991 to 2020, we can generate simulated precipitation and temperature for any desired time length and ensemble size. Your variables *must* be named as the following: 'year', 'month', 'day', 'prcp', 'temp', 'season'. All input variables must be contained within the same dataframe or text file. If inputting a text file, it must be comma separated (.csv). The weather generator can handle NA values for precipitation or temperature, but all other variables should be numeric values. ### Step 1: Load your data ```{r} library(wxgenR) library(lubridate) library(dplyr) library(tidyr) library(reshape2) library(ggpubr) library(data.table) library(moments) library(seas) data(BlacksburgVA) head(BlacksburgVA) ``` ### Step 2: Select your run settings and run the weather generator Use the variables within the wx() function like `syr` and `eyr` (start and end year) to set the temporal boundaries from which to sample, otherwise, if left empty the start and end years will default to the beginning and end of your training data. Use `nsim` to set the length (in years) of your simulated weather, and `nrealz` to set the ensemble size (number of traces). The variable `wwidth` will set the sampling window for each day of year (Jan. 1 through Dec. 31) for every year in the simulation. The sampling window for each day of year is +/- `wwidth` + 1, effectively sampling `wwidth` number of days before and after each day of year, including that day of year. A lower value for `wwidth` will sample fewer surrounding days and a higher value will sample more days, resulting in dampened and heightened variability, respectively. Typical setting of `wwidth` is between 1 and 15, resulting in a daily sampling window of 3 days and 31 days, respectively. Generally, higher and lower values of `wwidth` result in higher and lower variance, respectively, in the simulated data. For example, to simulate precipitation on day 1 of the simulation (Jan. 1 of year 1), with `wwidth` = 1 (a 3-day window), the algorithm will sample days in the training record between (and including) December 31 and January 2 (for all years in the training record). For day 2 of the simulation (Jan. 2 of year 1), the algorithm will sample days in the training record between January 1 and January 3. Simulation day 3 (Jan. 3) will sample between January 2 and January 4, and so on. Increasing `wwidth` to 2 (a 5-day window) will sample between December 30 and January 3 for Jan. 1 simulations, December 31 to January 4 for Jan. 2, and January 1 to January 5 for Jan. 3, and so on. In some cases, the `wwidth` will be automatically increased through an adaptive window width if precipitation occurred on a given day but there were less than two daily precipitation values over 0.01 inches during the window for that day. `wwidth` will adaptively increase by 1 until two or more daily precipitation values over 0.01 inches are in each window. Adaptive window width is most likely to occur in regions with high aridity, dry seasons, a small initial value of `wwidth` is used, or if the number of years in the training data is relatively short (e.g., less than 30 years). To display the results of the adaptive window width, set `awinFlag = T`. Here, our training data spans 1991-01-01 to 2020-12-31, but we don't want to use the full historical record, so we set `syr` and `eyr` to 2000 and 2004, respectively, in the `wx()` function so that the training data is subset between those years. We want a simulation length of 5-years (`nsim`) in order to match the length of the subset training record and 10 traces in our ensemble (`nrealz`) for computational efficiency (although more traces, e.g. 50, are recommended). Sampling for each day of the year will sample from the preceding 1-day, the day of, and the following 1-day (`wwidth`) for a total window size of 3-days. We may also want to increase the variability of our simulated precipitation by sampling outside the historical envelope with an Epanechnikov Kernel (`ekflag = T`). For more details on the Epanechnikov kernel and its use in a weather generator, see Rajagopalan et al. (1996). Setting `tempPerturb = T` will increase the variability of the simulated temperature by adding random noise from a normal distribution fit using a mean of zero and a standard deviation equal to the monthly standard deviation of simulated temperature residuals. Given that simulated daily temperature at time t is a function of temperature(t-1), cosine(t), sine(t), precipitation occurrence(t), and monthly mean temperature(t), the standard deviation of daily residuals from this model is calculated for each month and used to add random noise to the simulated temperature. The temperature simulation approach is inspired by- and adapted from- Verdin et al. (2015, 2018). Since the training data has units of inches and degrees Fahrenheit for precipitation and temperature, respectively, we must set `unitSystem = "Metric"`. Setting `parallelize = T` will enable parallel computing for precipitation simulation which is the most computationally intensive aspect of the weather generator. ```{r, results='hide'} nsim = 5 #number of simulation years nrealz = 10 #number of traces in ensemble startTime <- Sys.time() #benchmark run time z = wx(trainingData = BlacksburgVA, syr = 2000, eyr = 2004, nsim = nsim, nrealz = nrealz, aseed = 123, wwidth = 1, unitSystem = "Metric", ekflag = TRUE, awinFlag = TRUE, tempPerturb = TRUE, parallelize = FALSE) endTime = Sys.time() ``` The wx() function will return a list containing both your input/training data, and a variety of processed outputs, named here as the variable `z`. Within `z`, `dat.d` is the original input data as well as some intermediary variables. `simyr1` contains the years within your training data that were sampled to generate simulated values for each trace. `X` is the occurrence of daily precipitation for each trace, where 1 and 0 indicate the presence and absence of precipitation, respectively. `Xseas` is the season index for each day and trace. `Xpdate` shows which days from the training data were sampled for each simulated day and trace, if precipitation was simulated to occur on a given day. `Xpamt` is the simulated precipitation amount for each day and trace. `Xtemp` is the simulated temperature for each day and trace. Generally, `Xpamt` and `Xtemp` will be of most interest to users as these are the desired outputs of simulated daily precipitation and temperature. ```{r} # glimpse(z) ``` ### Step 3: Analyze simulated weather ### First, use modified approach from writeSim function to post-process/format output ```{r} #parse variables from wx() output dat.d = z$dat.d simyr1 = z$simyr1 X = z$X Xseas = z$Xseas Xpdate = z$Xpdate Xpamt = z$Xpamt Xtemp = z$Xtemp #write simulation output # it1 <- seq(1, length(X[,1]), 366) it2 = it1+366-1 #initialize storage sim.pcp = matrix(NA, nrow = nsim*366, ncol = nrealz+3) sim.tmp = matrix(NA, nrow = nsim*366, ncol = nrealz+3) sim.szn = matrix(NA, nrow = nsim*366, ncol = nrealz+3) #loop through realization irealz = 1 for (irealz in 1:nrealz){ outmat <- vector() #loop through simulation years isim = 1 for (isim in 1:nsim){ leapflag = FALSE ayr = simyr1[isim, irealz] if (lubridate::leap_year(ayr)) leapflag = TRUE col1 = rep(isim, 366) #column 1, simulation year d1 = ayr*10^4+01*10^2+01; d2 = ayr*10^4+12*10^2+31 i1 = which(dat.d$date1 == d1) i2 = which(dat.d$date1 == d2) col2 = dat.d$date1[i1:i2] #column 2, simulation date if (leapflag == FALSE) col2 = c(col2,NA) i1 = it1[isim] i2 = it2[isim] col3 = Xseas[i1:i2, irealz] #column 3, simulation season col4 = X[i1:i2, irealz] #column 4, precipitation occurrence col5 = Xpdate[i1:i2, irealz] #column 5, precipation resampling date col6 = Xpamt[i1:i2, irealz] #column 6, resampled precipitation amount col7 = Xtemp[i1:i2, irealz] #column7, simulated temperature #create time series of 'simulation day' sim.yr = rep(isim, length(col2)) sim.month = month(ymd(col2)) sim.day = day(ymd(col2)) outmat = rbind(outmat, cbind(sim.yr, sim.month, sim.day, col6, col7, col3)) } #isim colnames(outmat) = c("simulation year", "month", "day", "prcp", "temp", "season") if(irealz == 1){ sim.pcp[,1:3] = outmat[,1:3] sim.tmp[,1:3] = outmat[,1:3] sim.szn[,1:3] = outmat[,1:3] } sim.pcp[,irealz+3] = outmat[,4] sim.tmp[,irealz+3] = outmat[,5] sim.szn[,irealz+3] = outmat[,6] } #irealz ``` #Format dataframes for simulated precip, temperature, and season ```{r} # df = sim.pcp formatting = function(df){ df = as.data.frame(df) colnames(df) = c("simulation year", "month", "day", paste0("Trace_", 1:nrealz)) #remove 366 days for non-leap years df = drop_na(df, c(month, day)) #assign simulation year to start at the same time as training data df$`simulation year` = df$`simulation year` + dat.d$year[1] - 1 #format date df$Date = ymd(paste(df$`simulation year`, df$month, df$day, sep = "-")) #remove years that aren't leap years # df = drop_na(df, Date) df = df %>% mutate(yday = as.numeric(yday(Date)), week = as.numeric(week(Date))) %>% relocate(c(Date,yday,week), .after = day) %>% melt(id = 1:6) return(df) } ``` ```{r} sim.pcp = formatting(sim.pcp) sim.tmp = formatting(sim.tmp) sim.szn = formatting(sim.szn) ``` ### Format training data ```{r} colnames(dat.d)[11] = "yday" obs.pcp = dat.d[,c(1:3,8:9,11,4)] obs.tmp = dat.d[,c(1:3,8:9,11,5)] ``` ### First you might want to plot the daily time series for verification If your data contained NA values, they can propagate to simulated temperature values (NA precip values in your data are set to 0), so use `na.rm = T` for any subsequent analysis. You may also choose to replace `NA` values with daily or monthly averages. Additionally, leap years may be included in the simulated weather if they are included in your training data, so all non-leap years include a row of 'NA' values at the end of the calendar year as a book-keeping measure so that the total number of rows in each trace is the same. ```{r} #plot simulated daily data # simDat = sim.tmp # obsDat = obs.tmp # Tag = "Temp" dailyPlot = function(simDat, obsDat, Tag){ simD = simDat %>% drop_na() %>% group_by(variable, yday) %>% summarise( mean = mean(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simDq <- simD %>% group_by(yday) %>% summarise( mean_q5 = quantile(mean, 0.05, na.rm = T), mean_med = median(mean, na.rm = T), mean_q95 = quantile(mean, 0.95, na.rm =T), max_q5 = quantile(max, 0.05, na.rm = T), max_med = median(max, na.rm = T), max_q95 = quantile(max, 0.95, na.rm = T), sd_q5 = quantile(sd, 0.05, na.rm = T), sd_med = median(sd), sd_q95 = quantile(sd, 0.95, na.rm = T), skew_q5 = quantile(skew, 0.05, na.rm = T), skew_med = median(skew, na.rm = T), skew_q95 = quantile(skew, 0.95, na.rm = T) ) %>% drop_na() %>% ungroup() if(Tag == "Temp"){ obs <- obsDat %>% drop_na() %>% group_by(yday) %>% summarise( mean = mean(temp, na.rm = T), max = max(temp, na.rm = T), sd = sd(temp, na.rm = T), skew = skewness(temp, na.rm = T) ) %>% ungroup() } else if(Tag == "Precip"){ obs <- obsDat %>% drop_na() %>% group_by(yday) %>% summarise( mean = mean(prcp, na.rm = T), max = max(prcp, na.rm = T), sd = sd(prcp, na.rm = T), skew = skewness(prcp, na.rm = T) ) %>% ungroup() } colnames(obs)[-1] = paste0("obs_", colnames(obs)[-1]) df.comb = left_join(simDq, obs, by = "yday") #plotting -------------------------------- lgdLoc = c(0.8, 0.9) if(Tag == "Temp"){ yLabel = "Daily Temperature " units = "(Β°F)" } else if(Tag == "Precip"){ yLabel = "Daily Precipitation " units = "(inches)" } trnAlpha = 0.65 #daily mean p1 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = mean_q5, ymax = mean_q95), alpha = 0.25) + geom_line(aes(x = yday, y = mean_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_mean), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_mean), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Mean ", yLabel, units)) #daily SD p2 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = sd_q5, ymax = sd_q95), alpha = 0.25) + geom_line(aes(x = yday, y = sd_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_sd), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_sd), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Std. Deviation of ", yLabel, units)) #daily skew p3 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = skew_q5, ymax = skew_q95), alpha = 0.25) + geom_line(aes(x = yday, y = skew_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_skew), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_skew), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Skew of ", yLabel, " (-)")) #daily Max p4 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = max_q5, ymax = max_q95), alpha = 0.25) + geom_line(aes(x = yday, y = max_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_max), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_max), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Maximum ", yLabel, units)) p.comb = ggarrange(p1, p2, p3, p4, nrow = 2, ncol = 2, common.legend = TRUE, legend = "bottom") print(p.comb) # p.out = paste0(tempdir(), "/outputPlots/dailyStats_", Tag, ".png") # ggsave(filename = p.out, plot = p.comb, device = "png") } ``` ### plot daily precipitation ```{r, fig.width=8, fig.height=8} dailyPlot(sim.pcp, obs.pcp, "Precip") ``` ### plot daily temperature ```{r, fig.width=8, fig.height=8} dailyPlot(sim.tmp, obs.tmp, "Temp") ``` ## Looking at just the daily mean may not be representative since weather may be very different depending on the season, so plot monthly statistics as well for more detail. Boxplot whiskers are in the style of Tukey (1.5 x interquartile range) ```{r} #plot simulated daily data simDat = sim.tmp obsDat = obs.tmp Tag = "Temp" monthlyPlot = function(simDat, obsDat, Tag){ if(Tag == "Temp"){ simM = simDat %>% drop_na() %>% group_by(variable, month, `simulation year`) %>% summarise( mean = mean(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simMM <- simM %>% group_by(variable, month) %>% summarise( mean=mean(mean), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(month, year) %>% summarise( mean = mean(temp, na.rm = T), max = max(temp, na.rm = T), sd = sd(temp, na.rm = T), skew = skewness(temp, na.rm = T) ) %>% ungroup() obsMM <- obs %>% group_by(month) %>% summarise( mean = mean(mean, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) }else if(Tag == "Precip"){ simM = simDat %>% drop_na() %>% group_by(variable, month, `simulation year`) %>% summarise( sum = sum(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simMM <- simM %>% group_by(variable, month) %>% summarise( sum=mean(sum), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(month, year) %>% summarise( sum = sum(prcp, na.rm = T), max = max(prcp, na.rm = T), sd = sd(prcp, na.rm = T), skew = skewness(prcp, na.rm = T) ) %>% ungroup() obsMM <- obs %>% group_by(month) %>% summarise( sum = mean(sum, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) } df.comb = rbind(obsMM, simMM) #plotting -------------------------------- if(Tag == "Temp"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = mean, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = mean, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = mean, color = "Observed")) + xlab("Month") + ylab("Temperature (Β°F)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle("Average Mean Monthly Temperature") }else if(Tag == "Precip"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = sum, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = sum, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = sum, color = "Observed")) + xlab("Month") + ylab("Precipitation (inches)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle("Average Total Monthly Precipitation") } if(Tag == "Temp"){ yLabel = "Temperature " units = "(Β°F)" } else if(Tag == "Precip"){ yLabel = "Precipitation " units = "(inches)" } #monthly SD p2 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = sd, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = sd, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = sd, color = "Observed")) + xlab("Month") + ylab(paste0("Standard Deviation ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle(paste0("Average Standard Deviation in Monthly ", yLabel)) #monthly Skew p3 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = skew, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = skew, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = skew, color = "Observed")) + xlab("Month") + ylab("Skew (-)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle(paste0("Average Skew in Monthly ", yLabel)) #monthly max p4 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = max, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = max, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = max, color = "Observed")) + xlab("Month") + ylab(paste0("Maximum ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle(paste0("Average Monthly Maximum ", yLabel)) p.comb = ggarrange(p1, p2, p3, p4, nrow = 2, ncol = 2, common.legend = TRUE, legend = "bottom") print(p.comb) p.out = paste0(tempdir(), "/outputPlots/monthlyStats_", Tag, ".png") # ggsave(filename = p.out, plot = p.comb, device = "png", height = 8, width = 8, units = "in") } ``` ### plot monthly precipitation ```{r, fig.width=8, fig.height=8} monthlyPlot(sim.pcp, obs.pcp, "Precip") ``` ### plot monthly temperature ```{r, fig.width=8, fig.height=8} monthlyPlot(sim.tmp, obs.tmp, "Temp") ``` ## Weekly statistics offer a finer resolution than monthly statistics but are not as noisy as daily values. Boxplot whiskers are in the style of Tukey (1.5 x interquartile range) ```{r} #plot simulated daily data simDat = sim.tmp obsDat = obs.tmp Tag = "Temp" weeklyPlot = function(simDat, obsDat, Tag){ if(Tag == "Temp"){ simW = simDat %>% drop_na() %>% group_by(variable, week, `simulation year`) %>% summarise( mean = mean(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simWW <- simW %>% group_by(variable, week) %>% summarise( mean=mean(mean), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(week, year) %>% summarise( mean = mean(temp, na.rm = T), max = max(temp, na.rm = T), sd = sd(temp, na.rm = T), skew = skewness(temp, na.rm = T) ) %>% ungroup() obsWW <- obs %>% group_by(week) %>% summarise( mean = mean(mean, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) }else if(Tag == "Precip"){ simW = simDat %>% drop_na() %>% group_by(variable, week, `simulation year`) %>% summarise( sum = sum(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simWW <- simW %>% group_by(variable, week) %>% summarise( sum=mean(sum), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(week, year) %>% summarise( sum = sum(prcp, na.rm = T), max = max(prcp, na.rm = T), sd = sd(prcp, na.rm = T), skew = skewness(prcp, na.rm = T) ) %>% ungroup() obsWW <- obs %>% group_by(week) %>% summarise( sum = mean(sum, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) } df.comb = rbind(obsWW, simWW) #plotting -------------------------------- if(Tag == "Temp"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = mean, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = mean, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = mean, color = "Observed")) + xlab("Week") + ylab("Temperature (Β°F)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle("Average Mean Weekly Temperature") }else if(Tag == "Precip"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = sum, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = sum, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = sum, color = "Observed")) + xlab("Week") + ylab("Precipitation (inches)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle("Average Total Weekly Precipitation") } if(Tag == "Temp"){ yLabel = "Temperature " units = "(Β°F)" } else if(Tag == "Precip"){ yLabel = "Precipitation " units = "(inches)" } #weekly SD p2 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = sd, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = sd, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = sd, color = "Observed")) + xlab("Week") + ylab(paste0("Standard Deviation ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle(paste0("Average Standard Deviation in Weekly ", yLabel)) #weekly Skew p3 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = skew, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = skew, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = skew, color = "Observed")) + xlab("Week") + ylab("Skew (-)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle(paste0("Average Skew in Weekly ", yLabel)) #weekly max p4 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = max, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = max, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = max, color = "Observed")) + xlab("Week") + ylab(paste0("Maximum ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle(paste0("Average Weekly Maximum ", yLabel)) p.comb = ggarrange(p1, p2, p3, p4, nrow = 2, ncol = 2, common.legend = TRUE, legend = "bottom") print(p.comb) p.out = paste0(tempdir(), "/outputPlots/weeklyStats_", Tag, ".png") # ggsave(filename = p.out, plot = p.comb, device = "png", height = 8, width = 10, units = "in") } ``` ### plot weekly precipitation ```{r, fig.width=10, fig.height=8} weeklyPlot(sim.pcp, obs.pcp, "Precip") ``` ### plot weekly temperature ```{r, fig.width=10, fig.height=8} weeklyPlot(sim.tmp, obs.tmp, "Temp") ``` ### Step 4: Save your data to file Save your simulated weather ensemble to a file via the `writeSim` function. It will conveniently save each trace to a .csv file. ```{r} # setwd() to desired location for writeSim to save .csv files containing the simulated precipitation and temperature # setwd(tempdir()) # # writeSim(wxOutput = z, nsim = nsim, nrealz = nrealz, debug = TRUE) ``` ### Performance Benchmarking Running the `wx()` weather generator code for a 5-year, 10-trace simulation on a laptop with the following characteristics results in the below run time. Parallel computing was enabled via `parallelize = T` in the wx() function. OS: Microsoft Windows 10 Enterprise 10.0.19044 Build 19044 Hardware: Intel(R) Core(TM) i7-10850H CPU @2.70GHz, 2712 Mhz, 6 Cores, 12 Logical Processors. 16 GB installed physical memory. ```{r} #wxgenR weather generation run time: print(difftime(endTime, startTime, units='mins')) ``` ## Citations For more details and examples, including analysis of the dataset used in this vignette, see the following works: Bearup, L., Gangopadhyay, S., & Mikkelson, K. (2021). Hydroclimate Analysis Lower Santa Cruz River Basin Study (Technical Memorandum No ENV-2020-056). Bureau of Reclamation. https://www.usbr.gov/lc/phoenix/programs/lscrbasin/LSCRBS_Hydroclimate_2021.pdf Gangopadhyay, S., Bearup, L. A., Verdin, A., Pruitt, T., Halper, E., & Shamir, E. (2019, December 1). A collaborative stochastic weather generator for climate impacts assessment in the Lower Santa Cruz River Basin, Arizona. Fall Meeting 2019, American Geophysical Union. https://ui.adsabs.harvard.edu/abs/2019AGUFMGC41G1267G Rajagopalan, B., Lall, U., and Tarboton, D. G.: Nonhomogeneous Markov Model for Daily Precipitation, Journal of Hydrologic Engineering, 1, 33–40, https://doi.org/10.1061/(ASCE)1084-0699(1996)1:1(33), 1996. Verdin, A., Rajagopalan, B., Kleiber, W., and Katz, R. W.: Coupled stochastic weather generation using spatial and generalized linear models, Stoch Environ Res Risk Assess, 29, 347–356, https://doi.org/10.1007/s00477-014-0911-6, 2015. Verdin, A., Rajagopalan, B., Kleiber, W., PodestΓ‘, G., and Bert, F.: A conditional stochastic weather generator for seasonal to multi-decadal simulations, Journal of Hydrology, 556, 835–846, https://doi.org/10.1016/j.jhydrol.2015.12.036, 2018. ## Disclaimer This information is preliminary and is subject to revision. It is being provided to meet the need for timely best science. The information is provided on the condition that neither the U.S. Bureau of Reclamation nor the U.S. Government may be held liable for any damages resulting from the authorized or unauthorized use of the information.
/scratch/gouwar.j/cran-all/cranData/wxgenR/inst/doc/Vignette_BlacksburgVA.Rmd
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----------------------------------------------------------------------------- library(wxgenR) library(lubridate) library(dplyr) library(tidyr) library(reshape2) library(ggpubr) library(data.table) library(moments) library(seas) data(LowerSantaCruzRiverBasinAZ) head(LowerSantaCruzRiverBasinAZ) ## ---- results = 'hide'-------------------------------------------------------- nsim = 5 #number of simulation years nrealz = 10 #number of traces in ensemble startTime <- Sys.time() #benchmark run time z = wx(trainingData = LowerSantaCruzRiverBasinAZ, syr = 1970, eyr = 1974, nsim = nsim, nrealz = nrealz, aseed = 123, wwidth = 3, unitSystem = "U.S. Customary", ekflag = TRUE, awinFlag = TRUE, tempPerturb = TRUE, parallelize = FALSE) endTime = Sys.time() ## ----------------------------------------------------------------------------- glimpse(z) ## ----------------------------------------------------------------------------- #parse variables from wx() output dat.d = z$dat.d simyr1 = z$simyr1 X = z$X Xseas = z$Xseas Xpdate = z$Xpdate Xpamt = z$Xpamt Xtemp = z$Xtemp #write simulation output # it1 <- seq(1, length(X[,1]), 366) it2 = it1+366-1 #initialize storage sim.pcp = matrix(NA, nrow = nsim*366, ncol = nrealz+3) sim.tmp = matrix(NA, nrow = nsim*366, ncol = nrealz+3) sim.szn = matrix(NA, nrow = nsim*366, ncol = nrealz+3) #loop through realization irealz = 1 for (irealz in 1:nrealz){ outmat <- vector() #loop through simulation years isim = 1 for (isim in 1:nsim){ leapflag = FALSE ayr = simyr1[isim, irealz] if (lubridate::leap_year(ayr)) leapflag = TRUE col1 = rep(isim, 366) #column 1, simulation year d1 = ayr*10^4+01*10^2+01; d2 = ayr*10^4+12*10^2+31 i1 = which(dat.d$date1 == d1) i2 = which(dat.d$date1 == d2) col2 = dat.d$date1[i1:i2] #column 2, simulation date if (leapflag == FALSE) col2 = c(col2,NA) i1 = it1[isim] i2 = it2[isim] col3 = Xseas[i1:i2, irealz] #column 3, simulation season col4 = X[i1:i2, irealz] #column 4, precipitation occurrence col5 = Xpdate[i1:i2, irealz] #column 5, precipation resampling date col6 = Xpamt[i1:i2, irealz] #column 6, resampled precipitation amount col7 = Xtemp[i1:i2, irealz] #column7, simulated temperature #create time series of 'simulation day' sim.yr = rep(isim, length(col2)) sim.month = month(ymd(col2)) sim.day = day(ymd(col2)) outmat = rbind(outmat, cbind(sim.yr, sim.month, sim.day, col6, col7, col3)) } #isim colnames(outmat) = c("simulation year", "month", "day", "prcp", "temp", "season") if(irealz == 1){ sim.pcp[,1:3] = outmat[,1:3] sim.tmp[,1:3] = outmat[,1:3] sim.szn[,1:3] = outmat[,1:3] } sim.pcp[,irealz+3] = outmat[,4] sim.tmp[,irealz+3] = outmat[,5] sim.szn[,irealz+3] = outmat[,6] } #irealz ## ----------------------------------------------------------------------------- # df = sim.pcp formatting = function(df){ df = as.data.frame(df) colnames(df) = c("simulation year", "month", "day", paste0("Trace_", 1:nrealz)) #remove 366 days for non-leap years df = drop_na(df, c(month, day)) #assign simulation year to start at the same time as training data df$`simulation year` = df$`simulation year` + dat.d$year[1] - 1 #format date df$Date = ymd(paste(df$`simulation year`, df$month, df$day, sep = "-")) #remove years that aren't leap years # df = drop_na(df, Date) df = df %>% mutate(yday = as.numeric(yday(Date)), week = as.numeric(week(Date))) %>% relocate(c(Date,yday,week), .after = day) %>% melt(id = 1:6) return(df) } ## ----------------------------------------------------------------------------- sim.pcp = formatting(sim.pcp) sim.tmp = formatting(sim.tmp) sim.szn = formatting(sim.szn) ## ----------------------------------------------------------------------------- colnames(dat.d)[11] = "yday" obs.pcp = dat.d[,c(1:3,8:9,11,4)] obs.tmp = dat.d[,c(1:3,8:9,11,5)] ## ----------------------------------------------------------------------------- #plot simulated daily data # simDat = sim.tmp # obsDat = obs.tmp # Tag = "Temp" dailyPlot = function(simDat, obsDat, Tag){ simD = simDat %>% drop_na() %>% group_by(variable, yday) %>% summarise( mean = mean(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simDq <- simD %>% group_by(yday) %>% summarise( mean_q5 = quantile(mean, 0.05, na.rm = T), mean_med = median(mean, na.rm = T), mean_q95 = quantile(mean, 0.95, na.rm =T), max_q5 = quantile(max, 0.05, na.rm = T), max_med = median(max, na.rm = T), max_q95 = quantile(max, 0.95, na.rm = T), sd_q5 = quantile(sd, 0.05, na.rm = T), sd_med = median(sd), sd_q95 = quantile(sd, 0.95, na.rm = T), skew_q5 = quantile(skew, 0.05, na.rm = T), skew_med = median(skew, na.rm = T), skew_q95 = quantile(skew, 0.95, na.rm = T) ) %>% drop_na() %>% ungroup() if(Tag == "Temp"){ obs <- obsDat %>% drop_na() %>% group_by(yday) %>% summarise( mean = mean(temp, na.rm = T), max = max(temp, na.rm = T), sd = sd(temp, na.rm = T), skew = skewness(temp, na.rm = T) ) %>% ungroup() } else if(Tag == "Precip"){ obs <- obsDat %>% drop_na() %>% group_by(yday) %>% summarise( mean = mean(prcp, na.rm = T), max = max(prcp, na.rm = T), sd = sd(prcp, na.rm = T), skew = skewness(prcp, na.rm = T) ) %>% ungroup() } colnames(obs)[-1] = paste0("obs_", colnames(obs)[-1]) df.comb = left_join(simDq, obs, by = "yday") #plotting -------------------------------- lgdLoc = c(0.8, 0.9) if(Tag == "Temp"){ yLabel = "Daily Temperature " units = "(Β°F)" } else if(Tag == "Precip"){ yLabel = "Daily Precipitation " units = "(inches)" } trnAlpha = 0.65 #daily mean p1 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = mean_q5, ymax = mean_q95), alpha = 0.25) + geom_line(aes(x = yday, y = mean_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_mean), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_mean), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Mean ", yLabel, units)) #daily SD p2 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = sd_q5, ymax = sd_q95), alpha = 0.25) + geom_line(aes(x = yday, y = sd_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_sd), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_sd), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Std. Deviation of ", yLabel, units)) #daily skew p3 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = skew_q5, ymax = skew_q95), alpha = 0.25) + geom_line(aes(x = yday, y = skew_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_skew), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_skew), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Skew of ", yLabel, " (-)")) #daily Max p4 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = max_q5, ymax = max_q95), alpha = 0.25) + geom_line(aes(x = yday, y = max_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_max), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_max), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Maximum ", yLabel, units)) p.comb = ggarrange(p1, p2, p3, p4, nrow = 2, ncol = 2, common.legend = TRUE, legend = "bottom") print(p.comb) # p.out = paste0(tempdir(), "/outputPlots/dailyStats_", Tag, ".png") # ggsave(filename = p.out, plot = p.comb, device = "png") } ## ---- fig.width=8, fig.height=8----------------------------------------------- dailyPlot(sim.pcp, obs.pcp, "Precip") ## ---- fig.width=8, fig.height=8----------------------------------------------- dailyPlot(sim.tmp, obs.tmp, "Temp") ## ----------------------------------------------------------------------------- #plot simulated daily data # simDat = sim.tmp # obsDat = obs.tmp # Tag = "Temp" monthlyPlot = function(simDat, obsDat, Tag){ if(Tag == "Temp"){ simM = simDat %>% drop_na() %>% group_by(variable, month, `simulation year`) %>% summarise( mean = mean(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simMM <- simM %>% group_by(variable, month) %>% summarise( mean=mean(mean), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(month, year) %>% summarise( mean = mean(temp, na.rm = T), max = max(temp, na.rm = T), sd = sd(temp, na.rm = T), skew = skewness(temp, na.rm = T) ) %>% ungroup() obsMM <- obs %>% group_by(month) %>% summarise( mean = mean(mean, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) }else if(Tag == "Precip"){ simM = simDat %>% drop_na() %>% group_by(variable, month, `simulation year`) %>% summarise( sum = sum(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simMM <- simM %>% group_by(variable, month) %>% summarise( sum=mean(sum), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(month, year) %>% summarise( sum = sum(prcp, na.rm = T), max = max(prcp, na.rm = T), sd = sd(prcp, na.rm = T), skew = skewness(prcp, na.rm = T) ) %>% ungroup() obsMM <- obs %>% group_by(month) %>% summarise( sum = mean(sum, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) } df.comb = rbind(obsMM, simMM) #plotting -------------------------------- if(Tag == "Temp"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = mean, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = mean, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = mean, color = "Observed")) + xlab("Month") + ylab("Temperature (Β°F)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle("Average Mean Monthly Temperature") }else if(Tag == "Precip"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = sum, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = sum, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = sum, color = "Observed")) + xlab("Month") + ylab("Precipitation (inches)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle("Average Total Monthly Precipitation") } if(Tag == "Temp"){ yLabel = "Temperature " units = "(Β°F)" } else if(Tag == "Precip"){ yLabel = "Precipitation " units = "(inches)" } #monthly SD p2 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = sd, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = sd, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = sd, color = "Observed")) + xlab("Month") + ylab(paste0("Standard Deviation ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle(paste0("Average Standard Deviation in Monthly ", yLabel)) #monthly Skew p3 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = skew, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = skew, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = skew, color = "Observed")) + xlab("Month") + ylab("Skew (-)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle(paste0("Average Skew in Monthly ", yLabel)) #monthly max p4 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = max, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = max, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = max, color = "Observed")) + xlab("Month") + ylab(paste0("Maximum ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle(paste0("Average Monthly Maximum ", yLabel)) p.comb = ggarrange(p1, p2, p3, p4, nrow = 2, ncol = 2, common.legend = TRUE, legend = "bottom") print(p.comb) # p.out = paste0(tempdir(), "/outputPlots/monthlyStats_", Tag, ".png") # ggsave(filename = p.out, plot = p.comb, device = "png", height = 8, width = 8, units = "in") } ## ---- fig.width=8, fig.height=8----------------------------------------------- monthlyPlot(sim.pcp, obs.pcp, "Precip") ## ---- fig.width=8, fig.height=8----------------------------------------------- monthlyPlot(sim.tmp, obs.tmp, "Temp") ## ----------------------------------------------------------------------------- #plot simulated daily data simDat = sim.tmp obsDat = obs.tmp Tag = "Temp" weeklyPlot = function(simDat, obsDat, Tag){ if(Tag == "Temp"){ simW = simDat %>% drop_na() %>% group_by(variable, week, `simulation year`) %>% summarise( mean = mean(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simWW <- simW %>% group_by(variable, week) %>% summarise( mean=mean(mean), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(week, year) %>% summarise( mean = mean(temp, na.rm = T), max = max(temp, na.rm = T), sd = sd(temp, na.rm = T), skew = skewness(temp, na.rm = T) ) %>% ungroup() obsWW <- obs %>% group_by(week) %>% summarise( mean = mean(mean, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) }else if(Tag == "Precip"){ simW = simDat %>% drop_na() %>% group_by(variable, week, `simulation year`) %>% summarise( sum = sum(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simWW <- simW %>% group_by(variable, week) %>% summarise( sum=mean(sum), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(week, year) %>% summarise( sum = sum(prcp, na.rm = T), max = max(prcp, na.rm = T), sd = sd(prcp, na.rm = T), skew = skewness(prcp, na.rm = T) ) %>% ungroup() obsWW <- obs %>% group_by(week) %>% summarise( sum = mean(sum, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) } df.comb = rbind(obsWW, simWW) #plotting -------------------------------- if(Tag == "Temp"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = mean, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = mean, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = mean, color = "Observed")) + xlab("Week") + ylab("Temperature (Β°F)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle("Average Mean Weekly Temperature") }else if(Tag == "Precip"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = sum, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = sum, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = sum, color = "Observed")) + xlab("Week") + ylab("Precipitation (inches)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle("Average Total Weekly Precipitation") } if(Tag == "Temp"){ yLabel = "Temperature " units = "(Β°F)" } else if(Tag == "Precip"){ yLabel = "Precipitation " units = "(inches)" } #weekly SD p2 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = sd, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = sd, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = sd, color = "Observed")) + xlab("Week") + ylab(paste0("Standard Deviation ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle(paste0("Average Standard Deviation in Weekly ", yLabel)) #weekly Skew p3 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = skew, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = skew, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = skew, color = "Observed")) + xlab("Week") + ylab("Skew (-)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle(paste0("Average Skew in Weekly ", yLabel)) #weekly max p4 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = max, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = max, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = max, color = "Observed")) + xlab("Week") + ylab(paste0("Maximum ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle(paste0("Average Weekly Maximum ", yLabel)) p.comb = ggarrange(p1, p2, p3, p4, nrow = 2, ncol = 2, common.legend = TRUE, legend = "bottom") print(p.comb) # p.out = paste0(tempdir(), "/outputPlots/weeklyStats_", Tag, ".png") # ggsave(filename = p.out, plot = p.comb, device = "png", height = 8, width = 10, units = "in") } ## ---- fig.width=10, fig.height=8---------------------------------------------- weeklyPlot(sim.pcp, obs.pcp, "Precip") ## ---- fig.width=10, fig.height=8---------------------------------------------- weeklyPlot(sim.tmp, obs.tmp, "Temp") ## ----------------------------------------------------------------------------- # dat.d = z$dat.d # X = z$Xpamt # syr = head(dat.d$year, 1) # eyr = tail(dat.d$year, 1) # wLO=0.05; wHI=0.95 #Set whisker percentile for boxplots spellstats <- function(dat.d,X,syr,eyr,nrealz,nsim,wLO,wHI){ #get these variables after running the driver_wx#.R code #dat.d <- z$dat.d #X <- z$X uyr=syr:eyr nyr=length(uyr) #Training Data Tdat=LowerSantaCruzRiverBasinAZ %>% mutate(occ = if_else(prcp>=0.01, 1, 0)) #### END DATA PREPARATION BLOCK TO RUN STATS CODE BELOW #### #to get month sequence nobs=length(unique(Tdat$year)) lpyear=dat.d$year[min(which(lubridate::leap_year(dat.d$year)))] aday <- ymd(paste(lpyear,1,1,sep="-")) #jan 1 of a leap year to have a 366-day year it1=which(dat.d$date==aday) it2=it1+366-1 jdaymth <- dat.d$month[it1:it2] zz <- rep(jdaymth,nsim) yy <- rep(1:nsim,each=366) X1 <- cbind(yy,zz,X) #get spell length stats 3 statistics - mean, var and max Y <- array(NA,dim=c(nrealz,3,2,nsim,12)) #save dry and wet spell lengths by sim yr W <- array(NA,dim=c(3,2,nobs,12)) #save dry and wet spell lengths by obs yr Z <- array(NA,dim=c(3,2,nrealz,12)) #save average spell length by realz V <- array(NA,dim=c(3,2,12)) #save average spell length for obs fidx=seq(1,dim(X1)[1],by=366) for (irealz in 1:nrealz){ for (isim in 1:nsim){ i1=fidx[isim] i2=i1+366-1 for (imth in 1:12){ idxlist=i1 + which(X1[i1:i2,2]==imth) - 1 s <- na.omit(X[idxlist,irealz]) z.f <- spellLengths(s) if (length(z.f$`0`)>0){ Y[irealz,1,1,isim,imth]=mean(z.f$`0`) Y[irealz,2,1,isim,imth]=var(z.f$`0`) Y[irealz,3,1,isim,imth]=max(z.f$`0`) } if (length(z.f$`1`)>0){ Y[irealz,1,2,isim,imth]=mean(z.f$`1`) Y[irealz,2,2,isim,imth]=var(z.f$`1`) Y[irealz,3,2,isim,imth]=max(z.f$`1`) } } #imth }#isim } #irealz for (isim in 1:nobs){ for (imth in 1:12){ s <- dplyr::filter(Tdat, year==unique(Tdat$year)[isim], month==imth)$occ z.f <- spellLengths(s) if (length(z.f$`0`)>0){ W[1,1,isim,imth]=mean(z.f$`0`) W[2,1,isim,imth]=var(z.f$`0`) W[3,1,isim,imth]=max(z.f$`0`) } if (length(z.f$`1`)>0){ W[1,2,isim,imth]=mean(z.f$`1`) W[2,2,isim,imth]=var(z.f$`1`) W[3,2,isim,imth]=max(z.f$`1`) } } #imth }#isim for (imth in 1:12){ Z[1,1,,imth]=apply(Y[,1,1,,imth],1,"mean",na.rm=T) Z[2,1,,imth]=apply(Y[,2,1,,imth],1,"mean",na.rm=T) Z[3,1,,imth]=apply(Y[,3,1,,imth],1,"mean",na.rm=T) Z[1,2,,imth]=apply(Y[,1,2,,imth],1,"mean",na.rm=T) Z[2,2,,imth]=apply(Y[,2,2,,imth],1,"mean",na.rm=T) Z[3,2,,imth]=apply(Y[,3,2,,imth],1,"mean",na.rm=T) V[1,1,imth]=mean(W[1,1,,imth],na.rm=T) V[2,1,imth]=mean(W[2,1,,imth],na.rm=T) V[3,1,imth]=mean(W[3,1,,imth],na.rm=T) V[1,2,imth]=mean(W[1,2,,imth],na.rm=T) V[2,2,imth]=mean(W[2,2,,imth],na.rm=T) V[3,2,imth]=mean(W[3,2,,imth],na.rm=T) } #imth #Boxplots d01 <- as.data.frame(Z[1,1,,]) #average mean dry spell length d02 <- as.data.frame(sqrt(Z[2,1,,])) #average sd dry spell length d03 <- as.data.frame(Z[3,1,,]) #average max dry spell length d01T=V[1,1,] d02T=sqrt(V[2,1,]) d03T=V[3,1,] RecRed = "red" RecBlue = "blue" # pdf(file=paste0(tempdir(), "/outputPlots/DryWetStats.pdf"), width=9, height=4) oldpar = par(mfrow=c(1,3), mar=c(2,2.5,2,1), oma=c(2,2,0,0), mgp=c(2,1,0), cex.axis=0.8) par(mfrow=c(1,3), mar=c(2,2.5,2,1), oma=c(2,2,0,0), mgp=c(2,1,0),cex.axis=0.8) #Mean #ylimit=range(c(d01,d01hist),na.rm=T) bb=boxplot(d01, plot=F, na.rm=T, names=1:12) ylimit=c(range(c(d01,d01T),na.rm=T)) out=matrix(nrow=nsim, ncol=12) for(b in 1:12){ x=d01[,b] quants=quantile(x, c(wLO,wHI),na.rm=T) bb$stats[c(1,5),b] = quants outs=which(x < quants[1] | x > quants[2]) out[1:length(outs), b]= x[outs] } bxp(bb, ylim=ylimit,na.rm=T, outline=F,xlab="",ylab="") mtext("days", side=2, outer = T) title(main="average mean dry spell length") for(m in 1:12){points(rep(m, length(which(is.na(out[,m])==F))), out[!is.na(out[,m]),m])} points(1:12, d01T, pch=17, cex=1, col=RecRed) lines(1:12, d01T, col=RecRed) #Standard Deviation bb=boxplot(d02, plot=F, na.rm=T, names=1:12) ylimit=c(range(c(d02,d02T),na.rm=T)) out=matrix(nrow=nsim, ncol=12) for(b in 1:12){ x=d02[,b] quants=quantile(x, c(wLO,wHI),na.rm=T) bb$stats[c(1,5),b] = quants outs=which(x < quants[1] | x > quants[2]) out[1:length(outs), b]= x[outs] } bxp(bb, ylim=ylimit,na.rm=T, outline=F,xlab="",ylab="") title(main="average sd dry spell length") mtext("month", side=1, outer = T, line=0.5) for(m in 1:12){points(rep(m, length(which(is.na(out[,m])==F))), out[!is.na(out[,m]),m])} points(1:12, d02T, pch=17, cex=1, col=RecRed) lines(1:12, d02T, col=RecRed) #Max Length bb=boxplot(d03, plot=F, na.rm=T, names=1:12) ylimit=c(range(c(d03,d03T),na.rm=T)) out=matrix(nrow=nsim, ncol=12) for(b in 1:12){ x=d03[,b] quants=quantile(x, c(wLO,wHI),na.rm=T) bb$stats[c(1,5),b] = quants outs=which(x < quants[1] | x > quants[2]) out[1:length(outs), b]= x[outs] } bxp(bb, ylim=ylimit,xlab="",ylab="",na.rm=T, outline=F) title(main="average max dry spell length") for(m in 1:12){points(rep(m, length(which(is.na(out[,m])==F))), out[!is.na(out[,m]),m])} points(1:12, d03T, pch=17, cex=1, col=RecRed) lines(1:12, d03T, col=RecRed) dev.off() par(oldpar) ########################################## } ## ---- fig.width=8, fig.height=6----------------------------------------------- spellstats(dat.d = z$dat.d, X = z$Xpamt, syr = head(dat.d$year, 1), eyr = tail(dat.d$year, 1), nrealz = nrealz, nsim = nsim, wLO = 0.05, wHI = 0.95) ## ----------------------------------------------------------------------------- # setwd() to desired location for writeSim to save .csv files containing the simulated precipitation and temperature # setwd(tempdir()) # # writeSim(wxOutput = z, nsim = nsim, nrealz = nrealz, debug = TRUE) ## ----------------------------------------------------------------------------- #wxgenR weather generation run time: print(difftime(endTime, startTime, units='mins'))
/scratch/gouwar.j/cran-all/cranData/wxgenR/inst/doc/Vignette_LowerSantaCruzRiverBasinAZ.R
--- title: "wxgenR - Lower Santa Cruz River Basin, AZ" author: "Subhrendu Gangopadhyay, Lindsay Bearup, David Woodson, Andrew Verdin, Eylon Shamir, Eve Halper" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{wxgenR - Lower Santa Cruz River Basin, AZ} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` A weather generator is a numerical tool that resamples a daily time series of precipitation, temperature, and season many times, while preserving observed or projected characteristics of importance, such as the statistics of the transition between wet and dry days. The resulting large group, or ensemble, of likely rainfall and temperature time series represents a range of possible amounts, daily patterns, and seasonality. This weather generator is, to our knowledge, novel in that it includes *seasons* (up to 26) in training the simulation algorithm. The goal of `wxgenR` is to provide users a tool that can simulate, with fidelity, an ensemble of precipitation and temperature based on training data that could include, for example, station based point measurements, grid cell values derived from models or remotely sensed data, or basin averages. The incorporation of seasonality as a covariate in the training algorithm allows users to examine the effects of shifts in seasonality due to climate warming (e.g., earlier snowmelt seasons or prolonged summer dry periods). `wxgenR` is an effective and robust scenario planning tool for a wide variety of potential futures. ## Running `wxgenR` All that is needed to run `wxgenR` is a single time series of precipitation, temperature, and season. Up to 20 seasons may be defined, but most users will likely only need two to four based on their study region. For example, `wxgenR` is provided with basin-average data from the Lower Santa Cruz River Basin (LSCRB) in Arizona, a monsoon dominated region with three distinct seasons. Within the data used to train the weather generator, these three seasons should be noted with an index of either 1, 2, or 3 for each day in the time series. The varying statistics of each season will impact the resulting simulations. ## Tutorial For example, using the Lower Santa Cruz River basin-average precipitation, temperature, and season from 1970 to 1999, we can generate simulated precipitation and temperature for any desired time length and ensemble size. Your variables *must* be named as the following: 'year', 'month', 'day', 'prcp', 'temp', 'season', whether they are input as a dataframe or a text file. All input variables must be contained within the same dataframe or text file. If inputting a text file, it must be comma separated (.csv). The weather generator can handle NA values for precipitation or temperature, but all other variables should be numeric values. ### Step 1: Load your data ```{r} library(wxgenR) library(lubridate) library(dplyr) library(tidyr) library(reshape2) library(ggpubr) library(data.table) library(moments) library(seas) data(LowerSantaCruzRiverBasinAZ) head(LowerSantaCruzRiverBasinAZ) ``` ### Step 2: Select your run settings and run the weather generator Use the variables within the wx() function like `syr` and `eyr` (start and end year) to set the temporal boundaries from which to sample, otherwise, if left empty the start and end years will default to the beginning and end of your training data. Use `nsim` to set the length (in years) of your simulated weather, and `nrealz` to set the ensemble size (number of traces). The variable `wwidth` will set the sampling window for each day of year (Jan. 1 through Dec. 31) for every year in the simulation. The sampling window for each day of year is +/- `wwidth` + 1, effectively sampling `wwidth` number of days before and after each day of year, including that day of year. A lower value for `wwidth` will sample fewer surrounding days and a higher value will sample more days, resulting in dampened and heightened variability, respectively. Typical setting of `wwidth` is between 1 and 15, resulting in a daily sampling window of 3 days and 31 days, respectively. Generally, higher and lower values of `wwidth` result in higher and lower variance, respectively, in the simulated data. For example, to simulate precipitation on day 1 of the simulation (Jan. 1 of year 1), with `wwidth` = 1 (a 3-day window), the algorithm will sample days in the training record between (and including) December 31 and January 2 (for all years in the training record). For day 2 of the simulation (Jan. 2 of year 1), the algorithm will sample days in the training record between January 1 and January 3. Simulation day 3 (Jan. 3) will sample between January 2 and January 4, and so on. Increasing `wwidth` to 2 (a 5-day window) will sample between December 30 and January 3 for Jan. 1 simulations, December 31 to January 4 for Jan. 2, and January 1 to January 5 for Jan. 3, and so on. In some cases, the `wwidth` will be automatically increased through an adaptive window width if precipitation occurred on a given day but there were less than two daily precipitation values over 0.01 inches during the window for that day. `wwidth` will adaptively increase by 1 until two or more daily precipitation values over 0.01 inches are in each window. Adaptive window width is most likely to occur in regions with high aridity, dry seasons, a small initial value of `wwidth` is used, or if the number of years in the training data is relatively short (e.g., less than 30 years). To display the results of the adaptive window width, set `awinFlag = T`. Here, our training data spans 1970-01-01 to 1999-12-31, but we don't want to use the full historical record, so we set `syr` and `eyr` to 1970 and 1974, respectively, in the `wx()` function so that the training data is subset between those years. We want a simulation length of 5-years (`nsim`) in order to match the length of the subset training record and 10 traces in our ensemble (`nrealz`) for computational efficiency (although more traces, e.g. 50, are recommended). Sampling for each day of the year will sample from the preceding 3-days, the day of, and the following 3-days (`wwidth`) for a total window size of 7-days. We may also want to increase the variability of our simulated precipitation by sampling outside the historical envelope with an Epanechnikov Kernel (`ekflag = T`). For more details on the Epanechnikov kernel and its use in a weather generator, see Rajagopalan et al. (1996). Setting `tempPerturb = T` will increase the variability of the simulated temperature by adding random noise from a normal distribution fit using a mean of zero and a standard deviation equal to the monthly standard deviation of simulated temperature residuals. Given that simulated daily temperature at time t is a function of temperature(t-1), cosine(t), sine(t), precipitation occurrence(t), and monthly mean temperature(t), the standard deviation of daily residuals from this model is calculated for each month and used to add random noise to the simulated temperature. The temperature simulation approach is inspired by- and adapted from- Verdin et al. (2015, 2018). Since the training data has units of inches and degrees Fahrenheit for precipitation and temperature, respectively, we must set `unitSystem = "U.S. Customary"`. Setting `parallelize = T` will enable parallel computing for precipitation simulation which is the most computationally intensive aspect of the weather generator. ```{r, results = 'hide'} nsim = 5 #number of simulation years nrealz = 10 #number of traces in ensemble startTime <- Sys.time() #benchmark run time z = wx(trainingData = LowerSantaCruzRiverBasinAZ, syr = 1970, eyr = 1974, nsim = nsim, nrealz = nrealz, aseed = 123, wwidth = 3, unitSystem = "U.S. Customary", ekflag = TRUE, awinFlag = TRUE, tempPerturb = TRUE, parallelize = FALSE) endTime = Sys.time() ``` The wx() function will return a list containing both your input/training data, and a variety of processed outputs, named here as the variable `z`. Within `z`, `dat.d` is the original input data as well as some intermediary variables. `simyr1` contains the years within your training data that were sampled to generate simulated values for each trace. `X` is the occurrence of daily precipitation for each trace, where 1 and 0 indicate the presence and absence of precipitation, respectively. `Xseas` is the season index for each day and trace. `Xpdate` shows which days from the training data were sampled for each simulated day and trace, if precipitation was simulated to occur on a given day. `Xpamt` is the simulated precipitation amount for each day and trace. `Xtemp` is the simulated temperature for each day and trace. Generally, `Xpamt` and `Xtemp` will be of most interest to users as these are the desired outputs of simulated daily precipitation and temperature. ```{r} glimpse(z) ``` ### Step 3: Analyze simulated weather ### First, use modified approach from writeSim function to post-process/format output ```{r} #parse variables from wx() output dat.d = z$dat.d simyr1 = z$simyr1 X = z$X Xseas = z$Xseas Xpdate = z$Xpdate Xpamt = z$Xpamt Xtemp = z$Xtemp #write simulation output # it1 <- seq(1, length(X[,1]), 366) it2 = it1+366-1 #initialize storage sim.pcp = matrix(NA, nrow = nsim*366, ncol = nrealz+3) sim.tmp = matrix(NA, nrow = nsim*366, ncol = nrealz+3) sim.szn = matrix(NA, nrow = nsim*366, ncol = nrealz+3) #loop through realization irealz = 1 for (irealz in 1:nrealz){ outmat <- vector() #loop through simulation years isim = 1 for (isim in 1:nsim){ leapflag = FALSE ayr = simyr1[isim, irealz] if (lubridate::leap_year(ayr)) leapflag = TRUE col1 = rep(isim, 366) #column 1, simulation year d1 = ayr*10^4+01*10^2+01; d2 = ayr*10^4+12*10^2+31 i1 = which(dat.d$date1 == d1) i2 = which(dat.d$date1 == d2) col2 = dat.d$date1[i1:i2] #column 2, simulation date if (leapflag == FALSE) col2 = c(col2,NA) i1 = it1[isim] i2 = it2[isim] col3 = Xseas[i1:i2, irealz] #column 3, simulation season col4 = X[i1:i2, irealz] #column 4, precipitation occurrence col5 = Xpdate[i1:i2, irealz] #column 5, precipation resampling date col6 = Xpamt[i1:i2, irealz] #column 6, resampled precipitation amount col7 = Xtemp[i1:i2, irealz] #column7, simulated temperature #create time series of 'simulation day' sim.yr = rep(isim, length(col2)) sim.month = month(ymd(col2)) sim.day = day(ymd(col2)) outmat = rbind(outmat, cbind(sim.yr, sim.month, sim.day, col6, col7, col3)) } #isim colnames(outmat) = c("simulation year", "month", "day", "prcp", "temp", "season") if(irealz == 1){ sim.pcp[,1:3] = outmat[,1:3] sim.tmp[,1:3] = outmat[,1:3] sim.szn[,1:3] = outmat[,1:3] } sim.pcp[,irealz+3] = outmat[,4] sim.tmp[,irealz+3] = outmat[,5] sim.szn[,irealz+3] = outmat[,6] } #irealz ``` #Format dataframes for simulated precip, temperature, and season ```{r} # df = sim.pcp formatting = function(df){ df = as.data.frame(df) colnames(df) = c("simulation year", "month", "day", paste0("Trace_", 1:nrealz)) #remove 366 days for non-leap years df = drop_na(df, c(month, day)) #assign simulation year to start at the same time as training data df$`simulation year` = df$`simulation year` + dat.d$year[1] - 1 #format date df$Date = ymd(paste(df$`simulation year`, df$month, df$day, sep = "-")) #remove years that aren't leap years # df = drop_na(df, Date) df = df %>% mutate(yday = as.numeric(yday(Date)), week = as.numeric(week(Date))) %>% relocate(c(Date,yday,week), .after = day) %>% melt(id = 1:6) return(df) } ``` ```{r} sim.pcp = formatting(sim.pcp) sim.tmp = formatting(sim.tmp) sim.szn = formatting(sim.szn) ``` ### Format training data ```{r} colnames(dat.d)[11] = "yday" obs.pcp = dat.d[,c(1:3,8:9,11,4)] obs.tmp = dat.d[,c(1:3,8:9,11,5)] ``` ### First you might want to plot the daily time series for verification If your data contained NA values, they can propagate to simulated temperature values (NA precip values in your data are set to 0), so use `na.rm = T` for any subsequent analysis. You may also choose to replace `NA` values with daily or monthly averages. Additionally, leap years may be included in the simulated weather if they are included in your training data, so all non-leap years include a row of 'NA' values at the end of the calendar year as a book-keeping measure so that the total number of rows in each trace is the same. ```{r} #plot simulated daily data # simDat = sim.tmp # obsDat = obs.tmp # Tag = "Temp" dailyPlot = function(simDat, obsDat, Tag){ simD = simDat %>% drop_na() %>% group_by(variable, yday) %>% summarise( mean = mean(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simDq <- simD %>% group_by(yday) %>% summarise( mean_q5 = quantile(mean, 0.05, na.rm = T), mean_med = median(mean, na.rm = T), mean_q95 = quantile(mean, 0.95, na.rm =T), max_q5 = quantile(max, 0.05, na.rm = T), max_med = median(max, na.rm = T), max_q95 = quantile(max, 0.95, na.rm = T), sd_q5 = quantile(sd, 0.05, na.rm = T), sd_med = median(sd), sd_q95 = quantile(sd, 0.95, na.rm = T), skew_q5 = quantile(skew, 0.05, na.rm = T), skew_med = median(skew, na.rm = T), skew_q95 = quantile(skew, 0.95, na.rm = T) ) %>% drop_na() %>% ungroup() if(Tag == "Temp"){ obs <- obsDat %>% drop_na() %>% group_by(yday) %>% summarise( mean = mean(temp, na.rm = T), max = max(temp, na.rm = T), sd = sd(temp, na.rm = T), skew = skewness(temp, na.rm = T) ) %>% ungroup() } else if(Tag == "Precip"){ obs <- obsDat %>% drop_na() %>% group_by(yday) %>% summarise( mean = mean(prcp, na.rm = T), max = max(prcp, na.rm = T), sd = sd(prcp, na.rm = T), skew = skewness(prcp, na.rm = T) ) %>% ungroup() } colnames(obs)[-1] = paste0("obs_", colnames(obs)[-1]) df.comb = left_join(simDq, obs, by = "yday") #plotting -------------------------------- lgdLoc = c(0.8, 0.9) if(Tag == "Temp"){ yLabel = "Daily Temperature " units = "(Β°F)" } else if(Tag == "Precip"){ yLabel = "Daily Precipitation " units = "(inches)" } trnAlpha = 0.65 #daily mean p1 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = mean_q5, ymax = mean_q95), alpha = 0.25) + geom_line(aes(x = yday, y = mean_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_mean), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_mean), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Mean ", yLabel, units)) #daily SD p2 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = sd_q5, ymax = sd_q95), alpha = 0.25) + geom_line(aes(x = yday, y = sd_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_sd), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_sd), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Std. Deviation of ", yLabel, units)) #daily skew p3 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = skew_q5, ymax = skew_q95), alpha = 0.25) + geom_line(aes(x = yday, y = skew_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_skew), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_skew), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Skew of ", yLabel, " (-)")) #daily Max p4 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = max_q5, ymax = max_q95), alpha = 0.25) + geom_line(aes(x = yday, y = max_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_max), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_max), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Maximum ", yLabel, units)) p.comb = ggarrange(p1, p2, p3, p4, nrow = 2, ncol = 2, common.legend = TRUE, legend = "bottom") print(p.comb) # p.out = paste0(tempdir(), "/outputPlots/dailyStats_", Tag, ".png") # ggsave(filename = p.out, plot = p.comb, device = "png") } ``` ### plot daily precipitation ```{r, fig.width=8, fig.height=8} dailyPlot(sim.pcp, obs.pcp, "Precip") ``` ### plot daily temperature ```{r, fig.width=8, fig.height=8} dailyPlot(sim.tmp, obs.tmp, "Temp") ``` ## Looking at just the daily mean may not be representative since weather may be very different depending on the season, so plot monthly statistics as well for more detail. Boxplot whiskers are in the style of Tukey (1.5 x interquartile range) ```{r} #plot simulated daily data # simDat = sim.tmp # obsDat = obs.tmp # Tag = "Temp" monthlyPlot = function(simDat, obsDat, Tag){ if(Tag == "Temp"){ simM = simDat %>% drop_na() %>% group_by(variable, month, `simulation year`) %>% summarise( mean = mean(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simMM <- simM %>% group_by(variable, month) %>% summarise( mean=mean(mean), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(month, year) %>% summarise( mean = mean(temp, na.rm = T), max = max(temp, na.rm = T), sd = sd(temp, na.rm = T), skew = skewness(temp, na.rm = T) ) %>% ungroup() obsMM <- obs %>% group_by(month) %>% summarise( mean = mean(mean, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) }else if(Tag == "Precip"){ simM = simDat %>% drop_na() %>% group_by(variable, month, `simulation year`) %>% summarise( sum = sum(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simMM <- simM %>% group_by(variable, month) %>% summarise( sum=mean(sum), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(month, year) %>% summarise( sum = sum(prcp, na.rm = T), max = max(prcp, na.rm = T), sd = sd(prcp, na.rm = T), skew = skewness(prcp, na.rm = T) ) %>% ungroup() obsMM <- obs %>% group_by(month) %>% summarise( sum = mean(sum, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) } df.comb = rbind(obsMM, simMM) #plotting -------------------------------- if(Tag == "Temp"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = mean, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = mean, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = mean, color = "Observed")) + xlab("Month") + ylab("Temperature (Β°F)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle("Average Mean Monthly Temperature") }else if(Tag == "Precip"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = sum, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = sum, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = sum, color = "Observed")) + xlab("Month") + ylab("Precipitation (inches)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle("Average Total Monthly Precipitation") } if(Tag == "Temp"){ yLabel = "Temperature " units = "(Β°F)" } else if(Tag == "Precip"){ yLabel = "Precipitation " units = "(inches)" } #monthly SD p2 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = sd, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = sd, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = sd, color = "Observed")) + xlab("Month") + ylab(paste0("Standard Deviation ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle(paste0("Average Standard Deviation in Monthly ", yLabel)) #monthly Skew p3 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = skew, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = skew, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = skew, color = "Observed")) + xlab("Month") + ylab("Skew (-)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle(paste0("Average Skew in Monthly ", yLabel)) #monthly max p4 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = max, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = max, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = max, color = "Observed")) + xlab("Month") + ylab(paste0("Maximum ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle(paste0("Average Monthly Maximum ", yLabel)) p.comb = ggarrange(p1, p2, p3, p4, nrow = 2, ncol = 2, common.legend = TRUE, legend = "bottom") print(p.comb) # p.out = paste0(tempdir(), "/outputPlots/monthlyStats_", Tag, ".png") # ggsave(filename = p.out, plot = p.comb, device = "png", height = 8, width = 8, units = "in") } ``` ### plot monthly precipitation ```{r, fig.width=8, fig.height=8} monthlyPlot(sim.pcp, obs.pcp, "Precip") ``` ### plot monthly temperature ```{r, fig.width=8, fig.height=8} monthlyPlot(sim.tmp, obs.tmp, "Temp") ``` ## Weekly statistics offer a finer resolution than monthly statistics but are not as noisy as daily values. Boxplot whiskers are in the style of Tukey (1.5 x interquartile range) ```{r} #plot simulated daily data simDat = sim.tmp obsDat = obs.tmp Tag = "Temp" weeklyPlot = function(simDat, obsDat, Tag){ if(Tag == "Temp"){ simW = simDat %>% drop_na() %>% group_by(variable, week, `simulation year`) %>% summarise( mean = mean(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simWW <- simW %>% group_by(variable, week) %>% summarise( mean=mean(mean), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(week, year) %>% summarise( mean = mean(temp, na.rm = T), max = max(temp, na.rm = T), sd = sd(temp, na.rm = T), skew = skewness(temp, na.rm = T) ) %>% ungroup() obsWW <- obs %>% group_by(week) %>% summarise( mean = mean(mean, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) }else if(Tag == "Precip"){ simW = simDat %>% drop_na() %>% group_by(variable, week, `simulation year`) %>% summarise( sum = sum(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simWW <- simW %>% group_by(variable, week) %>% summarise( sum=mean(sum), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(week, year) %>% summarise( sum = sum(prcp, na.rm = T), max = max(prcp, na.rm = T), sd = sd(prcp, na.rm = T), skew = skewness(prcp, na.rm = T) ) %>% ungroup() obsWW <- obs %>% group_by(week) %>% summarise( sum = mean(sum, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) } df.comb = rbind(obsWW, simWW) #plotting -------------------------------- if(Tag == "Temp"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = mean, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = mean, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = mean, color = "Observed")) + xlab("Week") + ylab("Temperature (Β°F)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle("Average Mean Weekly Temperature") }else if(Tag == "Precip"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = sum, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = sum, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = sum, color = "Observed")) + xlab("Week") + ylab("Precipitation (inches)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle("Average Total Weekly Precipitation") } if(Tag == "Temp"){ yLabel = "Temperature " units = "(Β°F)" } else if(Tag == "Precip"){ yLabel = "Precipitation " units = "(inches)" } #weekly SD p2 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = sd, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = sd, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = sd, color = "Observed")) + xlab("Week") + ylab(paste0("Standard Deviation ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle(paste0("Average Standard Deviation in Weekly ", yLabel)) #weekly Skew p3 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = skew, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = skew, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = skew, color = "Observed")) + xlab("Week") + ylab("Skew (-)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle(paste0("Average Skew in Weekly ", yLabel)) #weekly max p4 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = max, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = max, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = max, color = "Observed")) + xlab("Week") + ylab(paste0("Maximum ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle(paste0("Average Weekly Maximum ", yLabel)) p.comb = ggarrange(p1, p2, p3, p4, nrow = 2, ncol = 2, common.legend = TRUE, legend = "bottom") print(p.comb) # p.out = paste0(tempdir(), "/outputPlots/weeklyStats_", Tag, ".png") # ggsave(filename = p.out, plot = p.comb, device = "png", height = 8, width = 10, units = "in") } ``` ### plot weekly precipitation ```{r, fig.width=10, fig.height=8} weeklyPlot(sim.pcp, obs.pcp, "Precip") ``` ### plot weekly temperature ```{r, fig.width=10, fig.height=8} weeklyPlot(sim.tmp, obs.tmp, "Temp") ``` ### We can also calculate dry- and wet- spell length statistics for further verification. Here, lower and upper boxplot whiskers are 5th and 95th percentiles, respectively. ```{r} # dat.d = z$dat.d # X = z$Xpamt # syr = head(dat.d$year, 1) # eyr = tail(dat.d$year, 1) # wLO=0.05; wHI=0.95 #Set whisker percentile for boxplots spellstats <- function(dat.d,X,syr,eyr,nrealz,nsim,wLO,wHI){ #get these variables after running the driver_wx#.R code #dat.d <- z$dat.d #X <- z$X uyr=syr:eyr nyr=length(uyr) #Training Data Tdat=LowerSantaCruzRiverBasinAZ %>% mutate(occ = if_else(prcp>=0.01, 1, 0)) #### END DATA PREPARATION BLOCK TO RUN STATS CODE BELOW #### #to get month sequence nobs=length(unique(Tdat$year)) lpyear=dat.d$year[min(which(lubridate::leap_year(dat.d$year)))] aday <- ymd(paste(lpyear,1,1,sep="-")) #jan 1 of a leap year to have a 366-day year it1=which(dat.d$date==aday) it2=it1+366-1 jdaymth <- dat.d$month[it1:it2] zz <- rep(jdaymth,nsim) yy <- rep(1:nsim,each=366) X1 <- cbind(yy,zz,X) #get spell length stats 3 statistics - mean, var and max Y <- array(NA,dim=c(nrealz,3,2,nsim,12)) #save dry and wet spell lengths by sim yr W <- array(NA,dim=c(3,2,nobs,12)) #save dry and wet spell lengths by obs yr Z <- array(NA,dim=c(3,2,nrealz,12)) #save average spell length by realz V <- array(NA,dim=c(3,2,12)) #save average spell length for obs fidx=seq(1,dim(X1)[1],by=366) for (irealz in 1:nrealz){ for (isim in 1:nsim){ i1=fidx[isim] i2=i1+366-1 for (imth in 1:12){ idxlist=i1 + which(X1[i1:i2,2]==imth) - 1 s <- na.omit(X[idxlist,irealz]) z.f <- spellLengths(s) if (length(z.f$`0`)>0){ Y[irealz,1,1,isim,imth]=mean(z.f$`0`) Y[irealz,2,1,isim,imth]=var(z.f$`0`) Y[irealz,3,1,isim,imth]=max(z.f$`0`) } if (length(z.f$`1`)>0){ Y[irealz,1,2,isim,imth]=mean(z.f$`1`) Y[irealz,2,2,isim,imth]=var(z.f$`1`) Y[irealz,3,2,isim,imth]=max(z.f$`1`) } } #imth }#isim } #irealz for (isim in 1:nobs){ for (imth in 1:12){ s <- dplyr::filter(Tdat, year==unique(Tdat$year)[isim], month==imth)$occ z.f <- spellLengths(s) if (length(z.f$`0`)>0){ W[1,1,isim,imth]=mean(z.f$`0`) W[2,1,isim,imth]=var(z.f$`0`) W[3,1,isim,imth]=max(z.f$`0`) } if (length(z.f$`1`)>0){ W[1,2,isim,imth]=mean(z.f$`1`) W[2,2,isim,imth]=var(z.f$`1`) W[3,2,isim,imth]=max(z.f$`1`) } } #imth }#isim for (imth in 1:12){ Z[1,1,,imth]=apply(Y[,1,1,,imth],1,"mean",na.rm=T) Z[2,1,,imth]=apply(Y[,2,1,,imth],1,"mean",na.rm=T) Z[3,1,,imth]=apply(Y[,3,1,,imth],1,"mean",na.rm=T) Z[1,2,,imth]=apply(Y[,1,2,,imth],1,"mean",na.rm=T) Z[2,2,,imth]=apply(Y[,2,2,,imth],1,"mean",na.rm=T) Z[3,2,,imth]=apply(Y[,3,2,,imth],1,"mean",na.rm=T) V[1,1,imth]=mean(W[1,1,,imth],na.rm=T) V[2,1,imth]=mean(W[2,1,,imth],na.rm=T) V[3,1,imth]=mean(W[3,1,,imth],na.rm=T) V[1,2,imth]=mean(W[1,2,,imth],na.rm=T) V[2,2,imth]=mean(W[2,2,,imth],na.rm=T) V[3,2,imth]=mean(W[3,2,,imth],na.rm=T) } #imth #Boxplots d01 <- as.data.frame(Z[1,1,,]) #average mean dry spell length d02 <- as.data.frame(sqrt(Z[2,1,,])) #average sd dry spell length d03 <- as.data.frame(Z[3,1,,]) #average max dry spell length d01T=V[1,1,] d02T=sqrt(V[2,1,]) d03T=V[3,1,] RecRed = "red" RecBlue = "blue" # pdf(file=paste0(tempdir(), "/outputPlots/DryWetStats.pdf"), width=9, height=4) oldpar = par(mfrow=c(1,3), mar=c(2,2.5,2,1), oma=c(2,2,0,0), mgp=c(2,1,0), cex.axis=0.8) par(mfrow=c(1,3), mar=c(2,2.5,2,1), oma=c(2,2,0,0), mgp=c(2,1,0),cex.axis=0.8) #Mean #ylimit=range(c(d01,d01hist),na.rm=T) bb=boxplot(d01, plot=F, na.rm=T, names=1:12) ylimit=c(range(c(d01,d01T),na.rm=T)) out=matrix(nrow=nsim, ncol=12) for(b in 1:12){ x=d01[,b] quants=quantile(x, c(wLO,wHI),na.rm=T) bb$stats[c(1,5),b] = quants outs=which(x < quants[1] | x > quants[2]) out[1:length(outs), b]= x[outs] } bxp(bb, ylim=ylimit,na.rm=T, outline=F,xlab="",ylab="") mtext("days", side=2, outer = T) title(main="average mean dry spell length") for(m in 1:12){points(rep(m, length(which(is.na(out[,m])==F))), out[!is.na(out[,m]),m])} points(1:12, d01T, pch=17, cex=1, col=RecRed) lines(1:12, d01T, col=RecRed) #Standard Deviation bb=boxplot(d02, plot=F, na.rm=T, names=1:12) ylimit=c(range(c(d02,d02T),na.rm=T)) out=matrix(nrow=nsim, ncol=12) for(b in 1:12){ x=d02[,b] quants=quantile(x, c(wLO,wHI),na.rm=T) bb$stats[c(1,5),b] = quants outs=which(x < quants[1] | x > quants[2]) out[1:length(outs), b]= x[outs] } bxp(bb, ylim=ylimit,na.rm=T, outline=F,xlab="",ylab="") title(main="average sd dry spell length") mtext("month", side=1, outer = T, line=0.5) for(m in 1:12){points(rep(m, length(which(is.na(out[,m])==F))), out[!is.na(out[,m]),m])} points(1:12, d02T, pch=17, cex=1, col=RecRed) lines(1:12, d02T, col=RecRed) #Max Length bb=boxplot(d03, plot=F, na.rm=T, names=1:12) ylimit=c(range(c(d03,d03T),na.rm=T)) out=matrix(nrow=nsim, ncol=12) for(b in 1:12){ x=d03[,b] quants=quantile(x, c(wLO,wHI),na.rm=T) bb$stats[c(1,5),b] = quants outs=which(x < quants[1] | x > quants[2]) out[1:length(outs), b]= x[outs] } bxp(bb, ylim=ylimit,xlab="",ylab="",na.rm=T, outline=F) title(main="average max dry spell length") for(m in 1:12){points(rep(m, length(which(is.na(out[,m])==F))), out[!is.na(out[,m]),m])} points(1:12, d03T, pch=17, cex=1, col=RecRed) lines(1:12, d03T, col=RecRed) dev.off() par(oldpar) ########################################## } ``` ```{r, fig.width=8, fig.height=6} spellstats(dat.d = z$dat.d, X = z$Xpamt, syr = head(dat.d$year, 1), eyr = tail(dat.d$year, 1), nrealz = nrealz, nsim = nsim, wLO = 0.05, wHI = 0.95) ``` ### Step 4: Save your data to file Save your simulated weather ensemble to a file via the `writeSim` function. It will conveniently save each trace to a .csv file. ```{r} # setwd() to desired location for writeSim to save .csv files containing the simulated precipitation and temperature # setwd(tempdir()) # # writeSim(wxOutput = z, nsim = nsim, nrealz = nrealz, debug = TRUE) ``` ### Performance Benchmarking Running the `wx()` weather generator code for a 5-year, 10-trace simulation on a laptop with the following characteristics results in the below run time. Parallel computing was enabled via `parallelize = T` in the wx() function. OS: Microsoft Windows 10 Enterprise 10.0.19044 Build 19044 Hardware: Intel(R) Core(TM) i7-10850H CPU @2.70GHz, 2712 Mhz, 6 Cores, 12 Logical Processors. 16 GB installed physical memory. ```{r} #wxgenR weather generation run time: print(difftime(endTime, startTime, units='mins')) ``` ## Final notes * The weather simulations use a 366-day per year framework in order to handle leap years. During leap years, all 366 days will have precipitation and temperature values (i.e., February 29th exists and contains data), but during non-leap years February 29th does not exist and a row of NULL values is added after December 31st in order to maintain the same length between leap years and non-leap years. Other datasets and algorithms use various approaches to handle leap years, such as avoiding leap years altogether, using a 360-day year, etc. * Because leap years are acceptable in the `wxgenR` algorithm, it is possible (but unlikely) to have two or more leap years in a row in the weather simulations since years are sampled at random. * Please report any bugs or issues to either [email protected] or [email protected] ## Citations For more details and examples, including analysis of the dataset used in this vignette, see the following works: Bearup, L., Gangopadhyay, S., & Mikkelson, K. (2021). Hydroclimate Analysis Lower Santa Cruz River Basin Study (Technical Memorandum No ENV-2020-056). Bureau of Reclamation. <https://www.usbr.gov/lc/phoenix/programs/lscrbasin/LSCRBS_Hydroclimate_2021.pdf>. Gangopadhyay, S., Bearup, L. A., Verdin, A., Pruitt, T., Halper, E., & Shamir, E. (2019). A collaborative stochastic weather generator for climate impacts assessment in the Lower Santa Cruz River Basin, Arizona. Fall Meeting 2019, American Geophysical Union. <https://ui.adsabs.harvard.edu/abs/2019AGUFMGC41G1267G>. Rajagopalan, B., Lall, U., and Tarboton, D. G. (1996). Nonhomogeneous Markov Model for Daily Precipitation, Journal of Hydrologic Engineering, 1, 33–40, <https://doi.org/10.1061/(ASCE)1084-0699(1996)1:1(33)>. Verdin, A., Rajagopalan, B., Kleiber, W., and Katz, R. W. (2015). Coupled stochastic weather generation using spatial and generalized linear models, Stoch Environ Res Risk Assess, 29, 347–356, <https://doi.org/10.1007/s00477-014-0911-6>. Verdin, A., Rajagopalan, B., Kleiber, W., PodestΓ‘, G., and Bert, F. (2018). A conditional stochastic weather generator for seasonal to multi-decadal simulations, Journal of Hydrology, 556, 835–846, <https://doi.org/10.1016/j.jhydrol.2015.12.036>. ## Disclaimer This information is preliminary and is subject to revision. It is being provided to meet the need for timely best science. The information is provided on the condition that neither the U.S. Bureau of Reclamation nor the U.S. Government may be held liable for any damages resulting from the authorized or unauthorized use of the information.
/scratch/gouwar.j/cran-all/cranData/wxgenR/inst/doc/Vignette_LowerSantaCruzRiverBasinAZ.Rmd
--- title: "wxgenR - Blacksburg, VA NWS Station" author: "Subhrendu Gangopadhyay, Lindsay Bearup, Andrew Verdin, Eylon Shamir, Eve Halper, David Woodson" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{wxgenR - Blacksburg, VA NWS Station} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` A weather generator is a numerical tool that resamples an input timeseries many times, while preserving observed or projected characteristics of importance, such as the statistics of the transition between wet and dry days. The resulting large group, or ensemble, of likely rainfall and temperature timeseries represents a range of possible amounts, daily patterns, and seasonality. This weather generator is, to our knowledge, novel in that it includes *seasons* (up to 26) in training the simulation algorithm. The goal of `wxgenR` is to provide users a tool that can simulate, with fidelity, an ensemble of precipitation and temperature based on training data that could include, for example, station based point measurements, grid cell values derived from models or remotely sensed data, or basin averages. The incorporation of seasonality as a covariate in the training algorithm allows users to examine the effects of shifts in seasonality due to climate warming (e.g., earlier snowmelt seasons or prolonged summer dry periods). `wxgenR` is an effective and robust scenario planning tool for a wide variety of potential futures. ## Running `wxgenR` All that is needed to run `wxgenR` is a single time series of precipitation, temperature, and season. Up to 20 seasons may be defined, but most users will likely only need two to four based on their study region. For example, `wxgenR` is provided with single station data from Blacksburg, Virgina, a temperate locale that is better defined by four seasons. Within the data used to train the weather generator, these four seasons should be noted with an index of either 1, 2, 3, or 4 for each day in the time series. The varying statistics of each season will impact the resulting simulations. Precipitation and temperature data are point measurements taken at the Blacksburg National Weather Service office (GHCND:USC00440766). ## Tutorial For example, using the Blacksburg, VA National Weather Service station-based precipitation, temperature, and season from 1991 to 2020, we can generate simulated precipitation and temperature for any desired time length and ensemble size. Your variables *must* be named as the following: 'year', 'month', 'day', 'prcp', 'temp', 'season'. All input variables must be contained within the same dataframe or text file. If inputting a text file, it must be comma separated (.csv). The weather generator can handle NA values for precipitation or temperature, but all other variables should be numeric values. ### Step 1: Load your data ```{r} library(wxgenR) library(lubridate) library(dplyr) library(tidyr) library(reshape2) library(ggpubr) library(data.table) library(moments) library(seas) data(BlacksburgVA) head(BlacksburgVA) ``` ### Step 2: Select your run settings and run the weather generator Use the variables within the wx() function like `syr` and `eyr` (start and end year) to set the temporal boundaries from which to sample, otherwise, if left empty the start and end years will default to the beginning and end of your training data. Use `nsim` to set the length (in years) of your simulated weather, and `nrealz` to set the ensemble size (number of traces). The variable `wwidth` will set the sampling window for each day of year (Jan. 1 through Dec. 31) for every year in the simulation. The sampling window for each day of year is +/- `wwidth` + 1, effectively sampling `wwidth` number of days before and after each day of year, including that day of year. A lower value for `wwidth` will sample fewer surrounding days and a higher value will sample more days, resulting in dampened and heightened variability, respectively. Typical setting of `wwidth` is between 1 and 15, resulting in a daily sampling window of 3 days and 31 days, respectively. Generally, higher and lower values of `wwidth` result in higher and lower variance, respectively, in the simulated data. For example, to simulate precipitation on day 1 of the simulation (Jan. 1 of year 1), with `wwidth` = 1 (a 3-day window), the algorithm will sample days in the training record between (and including) December 31 and January 2 (for all years in the training record). For day 2 of the simulation (Jan. 2 of year 1), the algorithm will sample days in the training record between January 1 and January 3. Simulation day 3 (Jan. 3) will sample between January 2 and January 4, and so on. Increasing `wwidth` to 2 (a 5-day window) will sample between December 30 and January 3 for Jan. 1 simulations, December 31 to January 4 for Jan. 2, and January 1 to January 5 for Jan. 3, and so on. In some cases, the `wwidth` will be automatically increased through an adaptive window width if precipitation occurred on a given day but there were less than two daily precipitation values over 0.01 inches during the window for that day. `wwidth` will adaptively increase by 1 until two or more daily precipitation values over 0.01 inches are in each window. Adaptive window width is most likely to occur in regions with high aridity, dry seasons, a small initial value of `wwidth` is used, or if the number of years in the training data is relatively short (e.g., less than 30 years). To display the results of the adaptive window width, set `awinFlag = T`. Here, our training data spans 1991-01-01 to 2020-12-31, but we don't want to use the full historical record, so we set `syr` and `eyr` to 2000 and 2004, respectively, in the `wx()` function so that the training data is subset between those years. We want a simulation length of 5-years (`nsim`) in order to match the length of the subset training record and 10 traces in our ensemble (`nrealz`) for computational efficiency (although more traces, e.g. 50, are recommended). Sampling for each day of the year will sample from the preceding 1-day, the day of, and the following 1-day (`wwidth`) for a total window size of 3-days. We may also want to increase the variability of our simulated precipitation by sampling outside the historical envelope with an Epanechnikov Kernel (`ekflag = T`). For more details on the Epanechnikov kernel and its use in a weather generator, see Rajagopalan et al. (1996). Setting `tempPerturb = T` will increase the variability of the simulated temperature by adding random noise from a normal distribution fit using a mean of zero and a standard deviation equal to the monthly standard deviation of simulated temperature residuals. Given that simulated daily temperature at time t is a function of temperature(t-1), cosine(t), sine(t), precipitation occurrence(t), and monthly mean temperature(t), the standard deviation of daily residuals from this model is calculated for each month and used to add random noise to the simulated temperature. The temperature simulation approach is inspired by- and adapted from- Verdin et al. (2015, 2018). Since the training data has units of inches and degrees Fahrenheit for precipitation and temperature, respectively, we must set `unitSystem = "Metric"`. Setting `parallelize = T` will enable parallel computing for precipitation simulation which is the most computationally intensive aspect of the weather generator. ```{r, results='hide'} nsim = 5 #number of simulation years nrealz = 10 #number of traces in ensemble startTime <- Sys.time() #benchmark run time z = wx(trainingData = BlacksburgVA, syr = 2000, eyr = 2004, nsim = nsim, nrealz = nrealz, aseed = 123, wwidth = 1, unitSystem = "Metric", ekflag = TRUE, awinFlag = TRUE, tempPerturb = TRUE, parallelize = FALSE) endTime = Sys.time() ``` The wx() function will return a list containing both your input/training data, and a variety of processed outputs, named here as the variable `z`. Within `z`, `dat.d` is the original input data as well as some intermediary variables. `simyr1` contains the years within your training data that were sampled to generate simulated values for each trace. `X` is the occurrence of daily precipitation for each trace, where 1 and 0 indicate the presence and absence of precipitation, respectively. `Xseas` is the season index for each day and trace. `Xpdate` shows which days from the training data were sampled for each simulated day and trace, if precipitation was simulated to occur on a given day. `Xpamt` is the simulated precipitation amount for each day and trace. `Xtemp` is the simulated temperature for each day and trace. Generally, `Xpamt` and `Xtemp` will be of most interest to users as these are the desired outputs of simulated daily precipitation and temperature. ```{r} # glimpse(z) ``` ### Step 3: Analyze simulated weather ### First, use modified approach from writeSim function to post-process/format output ```{r} #parse variables from wx() output dat.d = z$dat.d simyr1 = z$simyr1 X = z$X Xseas = z$Xseas Xpdate = z$Xpdate Xpamt = z$Xpamt Xtemp = z$Xtemp #write simulation output # it1 <- seq(1, length(X[,1]), 366) it2 = it1+366-1 #initialize storage sim.pcp = matrix(NA, nrow = nsim*366, ncol = nrealz+3) sim.tmp = matrix(NA, nrow = nsim*366, ncol = nrealz+3) sim.szn = matrix(NA, nrow = nsim*366, ncol = nrealz+3) #loop through realization irealz = 1 for (irealz in 1:nrealz){ outmat <- vector() #loop through simulation years isim = 1 for (isim in 1:nsim){ leapflag = FALSE ayr = simyr1[isim, irealz] if (lubridate::leap_year(ayr)) leapflag = TRUE col1 = rep(isim, 366) #column 1, simulation year d1 = ayr*10^4+01*10^2+01; d2 = ayr*10^4+12*10^2+31 i1 = which(dat.d$date1 == d1) i2 = which(dat.d$date1 == d2) col2 = dat.d$date1[i1:i2] #column 2, simulation date if (leapflag == FALSE) col2 = c(col2,NA) i1 = it1[isim] i2 = it2[isim] col3 = Xseas[i1:i2, irealz] #column 3, simulation season col4 = X[i1:i2, irealz] #column 4, precipitation occurrence col5 = Xpdate[i1:i2, irealz] #column 5, precipation resampling date col6 = Xpamt[i1:i2, irealz] #column 6, resampled precipitation amount col7 = Xtemp[i1:i2, irealz] #column7, simulated temperature #create time series of 'simulation day' sim.yr = rep(isim, length(col2)) sim.month = month(ymd(col2)) sim.day = day(ymd(col2)) outmat = rbind(outmat, cbind(sim.yr, sim.month, sim.day, col6, col7, col3)) } #isim colnames(outmat) = c("simulation year", "month", "day", "prcp", "temp", "season") if(irealz == 1){ sim.pcp[,1:3] = outmat[,1:3] sim.tmp[,1:3] = outmat[,1:3] sim.szn[,1:3] = outmat[,1:3] } sim.pcp[,irealz+3] = outmat[,4] sim.tmp[,irealz+3] = outmat[,5] sim.szn[,irealz+3] = outmat[,6] } #irealz ``` #Format dataframes for simulated precip, temperature, and season ```{r} # df = sim.pcp formatting = function(df){ df = as.data.frame(df) colnames(df) = c("simulation year", "month", "day", paste0("Trace_", 1:nrealz)) #remove 366 days for non-leap years df = drop_na(df, c(month, day)) #assign simulation year to start at the same time as training data df$`simulation year` = df$`simulation year` + dat.d$year[1] - 1 #format date df$Date = ymd(paste(df$`simulation year`, df$month, df$day, sep = "-")) #remove years that aren't leap years # df = drop_na(df, Date) df = df %>% mutate(yday = as.numeric(yday(Date)), week = as.numeric(week(Date))) %>% relocate(c(Date,yday,week), .after = day) %>% melt(id = 1:6) return(df) } ``` ```{r} sim.pcp = formatting(sim.pcp) sim.tmp = formatting(sim.tmp) sim.szn = formatting(sim.szn) ``` ### Format training data ```{r} colnames(dat.d)[11] = "yday" obs.pcp = dat.d[,c(1:3,8:9,11,4)] obs.tmp = dat.d[,c(1:3,8:9,11,5)] ``` ### First you might want to plot the daily time series for verification If your data contained NA values, they can propagate to simulated temperature values (NA precip values in your data are set to 0), so use `na.rm = T` for any subsequent analysis. You may also choose to replace `NA` values with daily or monthly averages. Additionally, leap years may be included in the simulated weather if they are included in your training data, so all non-leap years include a row of 'NA' values at the end of the calendar year as a book-keeping measure so that the total number of rows in each trace is the same. ```{r} #plot simulated daily data # simDat = sim.tmp # obsDat = obs.tmp # Tag = "Temp" dailyPlot = function(simDat, obsDat, Tag){ simD = simDat %>% drop_na() %>% group_by(variable, yday) %>% summarise( mean = mean(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simDq <- simD %>% group_by(yday) %>% summarise( mean_q5 = quantile(mean, 0.05, na.rm = T), mean_med = median(mean, na.rm = T), mean_q95 = quantile(mean, 0.95, na.rm =T), max_q5 = quantile(max, 0.05, na.rm = T), max_med = median(max, na.rm = T), max_q95 = quantile(max, 0.95, na.rm = T), sd_q5 = quantile(sd, 0.05, na.rm = T), sd_med = median(sd), sd_q95 = quantile(sd, 0.95, na.rm = T), skew_q5 = quantile(skew, 0.05, na.rm = T), skew_med = median(skew, na.rm = T), skew_q95 = quantile(skew, 0.95, na.rm = T) ) %>% drop_na() %>% ungroup() if(Tag == "Temp"){ obs <- obsDat %>% drop_na() %>% group_by(yday) %>% summarise( mean = mean(temp, na.rm = T), max = max(temp, na.rm = T), sd = sd(temp, na.rm = T), skew = skewness(temp, na.rm = T) ) %>% ungroup() } else if(Tag == "Precip"){ obs <- obsDat %>% drop_na() %>% group_by(yday) %>% summarise( mean = mean(prcp, na.rm = T), max = max(prcp, na.rm = T), sd = sd(prcp, na.rm = T), skew = skewness(prcp, na.rm = T) ) %>% ungroup() } colnames(obs)[-1] = paste0("obs_", colnames(obs)[-1]) df.comb = left_join(simDq, obs, by = "yday") #plotting -------------------------------- lgdLoc = c(0.8, 0.9) if(Tag == "Temp"){ yLabel = "Daily Temperature " units = "(Β°F)" } else if(Tag == "Precip"){ yLabel = "Daily Precipitation " units = "(inches)" } trnAlpha = 0.65 #daily mean p1 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = mean_q5, ymax = mean_q95), alpha = 0.25) + geom_line(aes(x = yday, y = mean_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_mean), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_mean), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Mean ", yLabel, units)) #daily SD p2 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = sd_q5, ymax = sd_q95), alpha = 0.25) + geom_line(aes(x = yday, y = sd_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_sd), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_sd), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Std. Deviation of ", yLabel, units)) #daily skew p3 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = skew_q5, ymax = skew_q95), alpha = 0.25) + geom_line(aes(x = yday, y = skew_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_skew), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_skew), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Skew of ", yLabel, " (-)")) #daily Max p4 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = max_q5, ymax = max_q95), alpha = 0.25) + geom_line(aes(x = yday, y = max_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_max), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_max), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Maximum ", yLabel, units)) p.comb = ggarrange(p1, p2, p3, p4, nrow = 2, ncol = 2, common.legend = TRUE, legend = "bottom") print(p.comb) # p.out = paste0(tempdir(), "/outputPlots/dailyStats_", Tag, ".png") # ggsave(filename = p.out, plot = p.comb, device = "png") } ``` ### plot daily precipitation ```{r, fig.width=8, fig.height=8} dailyPlot(sim.pcp, obs.pcp, "Precip") ``` ### plot daily temperature ```{r, fig.width=8, fig.height=8} dailyPlot(sim.tmp, obs.tmp, "Temp") ``` ## Looking at just the daily mean may not be representative since weather may be very different depending on the season, so plot monthly statistics as well for more detail. Boxplot whiskers are in the style of Tukey (1.5 x interquartile range) ```{r} #plot simulated daily data simDat = sim.tmp obsDat = obs.tmp Tag = "Temp" monthlyPlot = function(simDat, obsDat, Tag){ if(Tag == "Temp"){ simM = simDat %>% drop_na() %>% group_by(variable, month, `simulation year`) %>% summarise( mean = mean(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simMM <- simM %>% group_by(variable, month) %>% summarise( mean=mean(mean), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(month, year) %>% summarise( mean = mean(temp, na.rm = T), max = max(temp, na.rm = T), sd = sd(temp, na.rm = T), skew = skewness(temp, na.rm = T) ) %>% ungroup() obsMM <- obs %>% group_by(month) %>% summarise( mean = mean(mean, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) }else if(Tag == "Precip"){ simM = simDat %>% drop_na() %>% group_by(variable, month, `simulation year`) %>% summarise( sum = sum(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simMM <- simM %>% group_by(variable, month) %>% summarise( sum=mean(sum), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(month, year) %>% summarise( sum = sum(prcp, na.rm = T), max = max(prcp, na.rm = T), sd = sd(prcp, na.rm = T), skew = skewness(prcp, na.rm = T) ) %>% ungroup() obsMM <- obs %>% group_by(month) %>% summarise( sum = mean(sum, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) } df.comb = rbind(obsMM, simMM) #plotting -------------------------------- if(Tag == "Temp"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = mean, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = mean, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = mean, color = "Observed")) + xlab("Month") + ylab("Temperature (Β°F)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle("Average Mean Monthly Temperature") }else if(Tag == "Precip"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = sum, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = sum, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = sum, color = "Observed")) + xlab("Month") + ylab("Precipitation (inches)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle("Average Total Monthly Precipitation") } if(Tag == "Temp"){ yLabel = "Temperature " units = "(Β°F)" } else if(Tag == "Precip"){ yLabel = "Precipitation " units = "(inches)" } #monthly SD p2 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = sd, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = sd, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = sd, color = "Observed")) + xlab("Month") + ylab(paste0("Standard Deviation ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle(paste0("Average Standard Deviation in Monthly ", yLabel)) #monthly Skew p3 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = skew, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = skew, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = skew, color = "Observed")) + xlab("Month") + ylab("Skew (-)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle(paste0("Average Skew in Monthly ", yLabel)) #monthly max p4 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = max, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = max, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = max, color = "Observed")) + xlab("Month") + ylab(paste0("Maximum ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle(paste0("Average Monthly Maximum ", yLabel)) p.comb = ggarrange(p1, p2, p3, p4, nrow = 2, ncol = 2, common.legend = TRUE, legend = "bottom") print(p.comb) p.out = paste0(tempdir(), "/outputPlots/monthlyStats_", Tag, ".png") # ggsave(filename = p.out, plot = p.comb, device = "png", height = 8, width = 8, units = "in") } ``` ### plot monthly precipitation ```{r, fig.width=8, fig.height=8} monthlyPlot(sim.pcp, obs.pcp, "Precip") ``` ### plot monthly temperature ```{r, fig.width=8, fig.height=8} monthlyPlot(sim.tmp, obs.tmp, "Temp") ``` ## Weekly statistics offer a finer resolution than monthly statistics but are not as noisy as daily values. Boxplot whiskers are in the style of Tukey (1.5 x interquartile range) ```{r} #plot simulated daily data simDat = sim.tmp obsDat = obs.tmp Tag = "Temp" weeklyPlot = function(simDat, obsDat, Tag){ if(Tag == "Temp"){ simW = simDat %>% drop_na() %>% group_by(variable, week, `simulation year`) %>% summarise( mean = mean(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simWW <- simW %>% group_by(variable, week) %>% summarise( mean=mean(mean), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(week, year) %>% summarise( mean = mean(temp, na.rm = T), max = max(temp, na.rm = T), sd = sd(temp, na.rm = T), skew = skewness(temp, na.rm = T) ) %>% ungroup() obsWW <- obs %>% group_by(week) %>% summarise( mean = mean(mean, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) }else if(Tag == "Precip"){ simW = simDat %>% drop_na() %>% group_by(variable, week, `simulation year`) %>% summarise( sum = sum(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simWW <- simW %>% group_by(variable, week) %>% summarise( sum=mean(sum), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(week, year) %>% summarise( sum = sum(prcp, na.rm = T), max = max(prcp, na.rm = T), sd = sd(prcp, na.rm = T), skew = skewness(prcp, na.rm = T) ) %>% ungroup() obsWW <- obs %>% group_by(week) %>% summarise( sum = mean(sum, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) } df.comb = rbind(obsWW, simWW) #plotting -------------------------------- if(Tag == "Temp"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = mean, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = mean, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = mean, color = "Observed")) + xlab("Week") + ylab("Temperature (Β°F)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle("Average Mean Weekly Temperature") }else if(Tag == "Precip"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = sum, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = sum, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = sum, color = "Observed")) + xlab("Week") + ylab("Precipitation (inches)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle("Average Total Weekly Precipitation") } if(Tag == "Temp"){ yLabel = "Temperature " units = "(Β°F)" } else if(Tag == "Precip"){ yLabel = "Precipitation " units = "(inches)" } #weekly SD p2 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = sd, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = sd, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = sd, color = "Observed")) + xlab("Week") + ylab(paste0("Standard Deviation ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle(paste0("Average Standard Deviation in Weekly ", yLabel)) #weekly Skew p3 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = skew, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = skew, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = skew, color = "Observed")) + xlab("Week") + ylab("Skew (-)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle(paste0("Average Skew in Weekly ", yLabel)) #weekly max p4 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = max, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = max, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = max, color = "Observed")) + xlab("Week") + ylab(paste0("Maximum ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle(paste0("Average Weekly Maximum ", yLabel)) p.comb = ggarrange(p1, p2, p3, p4, nrow = 2, ncol = 2, common.legend = TRUE, legend = "bottom") print(p.comb) p.out = paste0(tempdir(), "/outputPlots/weeklyStats_", Tag, ".png") # ggsave(filename = p.out, plot = p.comb, device = "png", height = 8, width = 10, units = "in") } ``` ### plot weekly precipitation ```{r, fig.width=10, fig.height=8} weeklyPlot(sim.pcp, obs.pcp, "Precip") ``` ### plot weekly temperature ```{r, fig.width=10, fig.height=8} weeklyPlot(sim.tmp, obs.tmp, "Temp") ``` ### Step 4: Save your data to file Save your simulated weather ensemble to a file via the `writeSim` function. It will conveniently save each trace to a .csv file. ```{r} # setwd() to desired location for writeSim to save .csv files containing the simulated precipitation and temperature # setwd(tempdir()) # # writeSim(wxOutput = z, nsim = nsim, nrealz = nrealz, debug = TRUE) ``` ### Performance Benchmarking Running the `wx()` weather generator code for a 5-year, 10-trace simulation on a laptop with the following characteristics results in the below run time. Parallel computing was enabled via `parallelize = T` in the wx() function. OS: Microsoft Windows 10 Enterprise 10.0.19044 Build 19044 Hardware: Intel(R) Core(TM) i7-10850H CPU @2.70GHz, 2712 Mhz, 6 Cores, 12 Logical Processors. 16 GB installed physical memory. ```{r} #wxgenR weather generation run time: print(difftime(endTime, startTime, units='mins')) ``` ## Citations For more details and examples, including analysis of the dataset used in this vignette, see the following works: Bearup, L., Gangopadhyay, S., & Mikkelson, K. (2021). Hydroclimate Analysis Lower Santa Cruz River Basin Study (Technical Memorandum No ENV-2020-056). Bureau of Reclamation. https://www.usbr.gov/lc/phoenix/programs/lscrbasin/LSCRBS_Hydroclimate_2021.pdf Gangopadhyay, S., Bearup, L. A., Verdin, A., Pruitt, T., Halper, E., & Shamir, E. (2019, December 1). A collaborative stochastic weather generator for climate impacts assessment in the Lower Santa Cruz River Basin, Arizona. Fall Meeting 2019, American Geophysical Union. https://ui.adsabs.harvard.edu/abs/2019AGUFMGC41G1267G Rajagopalan, B., Lall, U., and Tarboton, D. G.: Nonhomogeneous Markov Model for Daily Precipitation, Journal of Hydrologic Engineering, 1, 33–40, https://doi.org/10.1061/(ASCE)1084-0699(1996)1:1(33), 1996. Verdin, A., Rajagopalan, B., Kleiber, W., and Katz, R. W.: Coupled stochastic weather generation using spatial and generalized linear models, Stoch Environ Res Risk Assess, 29, 347–356, https://doi.org/10.1007/s00477-014-0911-6, 2015. Verdin, A., Rajagopalan, B., Kleiber, W., PodestΓ‘, G., and Bert, F.: A conditional stochastic weather generator for seasonal to multi-decadal simulations, Journal of Hydrology, 556, 835–846, https://doi.org/10.1016/j.jhydrol.2015.12.036, 2018. ## Disclaimer This information is preliminary and is subject to revision. It is being provided to meet the need for timely best science. The information is provided on the condition that neither the U.S. Bureau of Reclamation nor the U.S. Government may be held liable for any damages resulting from the authorized or unauthorized use of the information.
/scratch/gouwar.j/cran-all/cranData/wxgenR/vignettes/Vignette_BlacksburgVA.Rmd
--- title: "wxgenR - Lower Santa Cruz River Basin, AZ" author: "Subhrendu Gangopadhyay, Lindsay Bearup, David Woodson, Andrew Verdin, Eylon Shamir, Eve Halper" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{wxgenR - Lower Santa Cruz River Basin, AZ} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` A weather generator is a numerical tool that resamples a daily time series of precipitation, temperature, and season many times, while preserving observed or projected characteristics of importance, such as the statistics of the transition between wet and dry days. The resulting large group, or ensemble, of likely rainfall and temperature time series represents a range of possible amounts, daily patterns, and seasonality. This weather generator is, to our knowledge, novel in that it includes *seasons* (up to 26) in training the simulation algorithm. The goal of `wxgenR` is to provide users a tool that can simulate, with fidelity, an ensemble of precipitation and temperature based on training data that could include, for example, station based point measurements, grid cell values derived from models or remotely sensed data, or basin averages. The incorporation of seasonality as a covariate in the training algorithm allows users to examine the effects of shifts in seasonality due to climate warming (e.g., earlier snowmelt seasons or prolonged summer dry periods). `wxgenR` is an effective and robust scenario planning tool for a wide variety of potential futures. ## Running `wxgenR` All that is needed to run `wxgenR` is a single time series of precipitation, temperature, and season. Up to 20 seasons may be defined, but most users will likely only need two to four based on their study region. For example, `wxgenR` is provided with basin-average data from the Lower Santa Cruz River Basin (LSCRB) in Arizona, a monsoon dominated region with three distinct seasons. Within the data used to train the weather generator, these three seasons should be noted with an index of either 1, 2, or 3 for each day in the time series. The varying statistics of each season will impact the resulting simulations. ## Tutorial For example, using the Lower Santa Cruz River basin-average precipitation, temperature, and season from 1970 to 1999, we can generate simulated precipitation and temperature for any desired time length and ensemble size. Your variables *must* be named as the following: 'year', 'month', 'day', 'prcp', 'temp', 'season', whether they are input as a dataframe or a text file. All input variables must be contained within the same dataframe or text file. If inputting a text file, it must be comma separated (.csv). The weather generator can handle NA values for precipitation or temperature, but all other variables should be numeric values. ### Step 1: Load your data ```{r} library(wxgenR) library(lubridate) library(dplyr) library(tidyr) library(reshape2) library(ggpubr) library(data.table) library(moments) library(seas) data(LowerSantaCruzRiverBasinAZ) head(LowerSantaCruzRiverBasinAZ) ``` ### Step 2: Select your run settings and run the weather generator Use the variables within the wx() function like `syr` and `eyr` (start and end year) to set the temporal boundaries from which to sample, otherwise, if left empty the start and end years will default to the beginning and end of your training data. Use `nsim` to set the length (in years) of your simulated weather, and `nrealz` to set the ensemble size (number of traces). The variable `wwidth` will set the sampling window for each day of year (Jan. 1 through Dec. 31) for every year in the simulation. The sampling window for each day of year is +/- `wwidth` + 1, effectively sampling `wwidth` number of days before and after each day of year, including that day of year. A lower value for `wwidth` will sample fewer surrounding days and a higher value will sample more days, resulting in dampened and heightened variability, respectively. Typical setting of `wwidth` is between 1 and 15, resulting in a daily sampling window of 3 days and 31 days, respectively. Generally, higher and lower values of `wwidth` result in higher and lower variance, respectively, in the simulated data. For example, to simulate precipitation on day 1 of the simulation (Jan. 1 of year 1), with `wwidth` = 1 (a 3-day window), the algorithm will sample days in the training record between (and including) December 31 and January 2 (for all years in the training record). For day 2 of the simulation (Jan. 2 of year 1), the algorithm will sample days in the training record between January 1 and January 3. Simulation day 3 (Jan. 3) will sample between January 2 and January 4, and so on. Increasing `wwidth` to 2 (a 5-day window) will sample between December 30 and January 3 for Jan. 1 simulations, December 31 to January 4 for Jan. 2, and January 1 to January 5 for Jan. 3, and so on. In some cases, the `wwidth` will be automatically increased through an adaptive window width if precipitation occurred on a given day but there were less than two daily precipitation values over 0.01 inches during the window for that day. `wwidth` will adaptively increase by 1 until two or more daily precipitation values over 0.01 inches are in each window. Adaptive window width is most likely to occur in regions with high aridity, dry seasons, a small initial value of `wwidth` is used, or if the number of years in the training data is relatively short (e.g., less than 30 years). To display the results of the adaptive window width, set `awinFlag = T`. Here, our training data spans 1970-01-01 to 1999-12-31, but we don't want to use the full historical record, so we set `syr` and `eyr` to 1970 and 1974, respectively, in the `wx()` function so that the training data is subset between those years. We want a simulation length of 5-years (`nsim`) in order to match the length of the subset training record and 10 traces in our ensemble (`nrealz`) for computational efficiency (although more traces, e.g. 50, are recommended). Sampling for each day of the year will sample from the preceding 3-days, the day of, and the following 3-days (`wwidth`) for a total window size of 7-days. We may also want to increase the variability of our simulated precipitation by sampling outside the historical envelope with an Epanechnikov Kernel (`ekflag = T`). For more details on the Epanechnikov kernel and its use in a weather generator, see Rajagopalan et al. (1996). Setting `tempPerturb = T` will increase the variability of the simulated temperature by adding random noise from a normal distribution fit using a mean of zero and a standard deviation equal to the monthly standard deviation of simulated temperature residuals. Given that simulated daily temperature at time t is a function of temperature(t-1), cosine(t), sine(t), precipitation occurrence(t), and monthly mean temperature(t), the standard deviation of daily residuals from this model is calculated for each month and used to add random noise to the simulated temperature. The temperature simulation approach is inspired by- and adapted from- Verdin et al. (2015, 2018). Since the training data has units of inches and degrees Fahrenheit for precipitation and temperature, respectively, we must set `unitSystem = "U.S. Customary"`. Setting `parallelize = T` will enable parallel computing for precipitation simulation which is the most computationally intensive aspect of the weather generator. ```{r, results = 'hide'} nsim = 5 #number of simulation years nrealz = 10 #number of traces in ensemble startTime <- Sys.time() #benchmark run time z = wx(trainingData = LowerSantaCruzRiverBasinAZ, syr = 1970, eyr = 1974, nsim = nsim, nrealz = nrealz, aseed = 123, wwidth = 3, unitSystem = "U.S. Customary", ekflag = TRUE, awinFlag = TRUE, tempPerturb = TRUE, parallelize = FALSE) endTime = Sys.time() ``` The wx() function will return a list containing both your input/training data, and a variety of processed outputs, named here as the variable `z`. Within `z`, `dat.d` is the original input data as well as some intermediary variables. `simyr1` contains the years within your training data that were sampled to generate simulated values for each trace. `X` is the occurrence of daily precipitation for each trace, where 1 and 0 indicate the presence and absence of precipitation, respectively. `Xseas` is the season index for each day and trace. `Xpdate` shows which days from the training data were sampled for each simulated day and trace, if precipitation was simulated to occur on a given day. `Xpamt` is the simulated precipitation amount for each day and trace. `Xtemp` is the simulated temperature for each day and trace. Generally, `Xpamt` and `Xtemp` will be of most interest to users as these are the desired outputs of simulated daily precipitation and temperature. ```{r} glimpse(z) ``` ### Step 3: Analyze simulated weather ### First, use modified approach from writeSim function to post-process/format output ```{r} #parse variables from wx() output dat.d = z$dat.d simyr1 = z$simyr1 X = z$X Xseas = z$Xseas Xpdate = z$Xpdate Xpamt = z$Xpamt Xtemp = z$Xtemp #write simulation output # it1 <- seq(1, length(X[,1]), 366) it2 = it1+366-1 #initialize storage sim.pcp = matrix(NA, nrow = nsim*366, ncol = nrealz+3) sim.tmp = matrix(NA, nrow = nsim*366, ncol = nrealz+3) sim.szn = matrix(NA, nrow = nsim*366, ncol = nrealz+3) #loop through realization irealz = 1 for (irealz in 1:nrealz){ outmat <- vector() #loop through simulation years isim = 1 for (isim in 1:nsim){ leapflag = FALSE ayr = simyr1[isim, irealz] if (lubridate::leap_year(ayr)) leapflag = TRUE col1 = rep(isim, 366) #column 1, simulation year d1 = ayr*10^4+01*10^2+01; d2 = ayr*10^4+12*10^2+31 i1 = which(dat.d$date1 == d1) i2 = which(dat.d$date1 == d2) col2 = dat.d$date1[i1:i2] #column 2, simulation date if (leapflag == FALSE) col2 = c(col2,NA) i1 = it1[isim] i2 = it2[isim] col3 = Xseas[i1:i2, irealz] #column 3, simulation season col4 = X[i1:i2, irealz] #column 4, precipitation occurrence col5 = Xpdate[i1:i2, irealz] #column 5, precipation resampling date col6 = Xpamt[i1:i2, irealz] #column 6, resampled precipitation amount col7 = Xtemp[i1:i2, irealz] #column7, simulated temperature #create time series of 'simulation day' sim.yr = rep(isim, length(col2)) sim.month = month(ymd(col2)) sim.day = day(ymd(col2)) outmat = rbind(outmat, cbind(sim.yr, sim.month, sim.day, col6, col7, col3)) } #isim colnames(outmat) = c("simulation year", "month", "day", "prcp", "temp", "season") if(irealz == 1){ sim.pcp[,1:3] = outmat[,1:3] sim.tmp[,1:3] = outmat[,1:3] sim.szn[,1:3] = outmat[,1:3] } sim.pcp[,irealz+3] = outmat[,4] sim.tmp[,irealz+3] = outmat[,5] sim.szn[,irealz+3] = outmat[,6] } #irealz ``` #Format dataframes for simulated precip, temperature, and season ```{r} # df = sim.pcp formatting = function(df){ df = as.data.frame(df) colnames(df) = c("simulation year", "month", "day", paste0("Trace_", 1:nrealz)) #remove 366 days for non-leap years df = drop_na(df, c(month, day)) #assign simulation year to start at the same time as training data df$`simulation year` = df$`simulation year` + dat.d$year[1] - 1 #format date df$Date = ymd(paste(df$`simulation year`, df$month, df$day, sep = "-")) #remove years that aren't leap years # df = drop_na(df, Date) df = df %>% mutate(yday = as.numeric(yday(Date)), week = as.numeric(week(Date))) %>% relocate(c(Date,yday,week), .after = day) %>% melt(id = 1:6) return(df) } ``` ```{r} sim.pcp = formatting(sim.pcp) sim.tmp = formatting(sim.tmp) sim.szn = formatting(sim.szn) ``` ### Format training data ```{r} colnames(dat.d)[11] = "yday" obs.pcp = dat.d[,c(1:3,8:9,11,4)] obs.tmp = dat.d[,c(1:3,8:9,11,5)] ``` ### First you might want to plot the daily time series for verification If your data contained NA values, they can propagate to simulated temperature values (NA precip values in your data are set to 0), so use `na.rm = T` for any subsequent analysis. You may also choose to replace `NA` values with daily or monthly averages. Additionally, leap years may be included in the simulated weather if they are included in your training data, so all non-leap years include a row of 'NA' values at the end of the calendar year as a book-keeping measure so that the total number of rows in each trace is the same. ```{r} #plot simulated daily data # simDat = sim.tmp # obsDat = obs.tmp # Tag = "Temp" dailyPlot = function(simDat, obsDat, Tag){ simD = simDat %>% drop_na() %>% group_by(variable, yday) %>% summarise( mean = mean(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simDq <- simD %>% group_by(yday) %>% summarise( mean_q5 = quantile(mean, 0.05, na.rm = T), mean_med = median(mean, na.rm = T), mean_q95 = quantile(mean, 0.95, na.rm =T), max_q5 = quantile(max, 0.05, na.rm = T), max_med = median(max, na.rm = T), max_q95 = quantile(max, 0.95, na.rm = T), sd_q5 = quantile(sd, 0.05, na.rm = T), sd_med = median(sd), sd_q95 = quantile(sd, 0.95, na.rm = T), skew_q5 = quantile(skew, 0.05, na.rm = T), skew_med = median(skew, na.rm = T), skew_q95 = quantile(skew, 0.95, na.rm = T) ) %>% drop_na() %>% ungroup() if(Tag == "Temp"){ obs <- obsDat %>% drop_na() %>% group_by(yday) %>% summarise( mean = mean(temp, na.rm = T), max = max(temp, na.rm = T), sd = sd(temp, na.rm = T), skew = skewness(temp, na.rm = T) ) %>% ungroup() } else if(Tag == "Precip"){ obs <- obsDat %>% drop_na() %>% group_by(yday) %>% summarise( mean = mean(prcp, na.rm = T), max = max(prcp, na.rm = T), sd = sd(prcp, na.rm = T), skew = skewness(prcp, na.rm = T) ) %>% ungroup() } colnames(obs)[-1] = paste0("obs_", colnames(obs)[-1]) df.comb = left_join(simDq, obs, by = "yday") #plotting -------------------------------- lgdLoc = c(0.8, 0.9) if(Tag == "Temp"){ yLabel = "Daily Temperature " units = "(Β°F)" } else if(Tag == "Precip"){ yLabel = "Daily Precipitation " units = "(inches)" } trnAlpha = 0.65 #daily mean p1 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = mean_q5, ymax = mean_q95), alpha = 0.25) + geom_line(aes(x = yday, y = mean_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_mean), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_mean), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Mean ", yLabel, units)) #daily SD p2 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = sd_q5, ymax = sd_q95), alpha = 0.25) + geom_line(aes(x = yday, y = sd_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_sd), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_sd), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Std. Deviation of ", yLabel, units)) #daily skew p3 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = skew_q5, ymax = skew_q95), alpha = 0.25) + geom_line(aes(x = yday, y = skew_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_skew), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_skew), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Skew of ", yLabel, " (-)")) #daily Max p4 = ggplot(df.comb) + geom_ribbon(aes(x = yday, ymin = max_q5, ymax = max_q95), alpha = 0.25) + geom_line(aes(x = yday, y = max_med, color = "red"), size = 1, alpha = 0.8) + geom_line(aes(x = yday, y = obs_max), size = 0.3, alpha = trnAlpha, linetype = "solid", color = "blue") + geom_point(aes(x = yday, y = obs_max), size = 0.6, alpha = trnAlpha, color = "blue") + scale_colour_manual(values =c('blue'='blue','red'='red', 'grey' = 'grey'), labels = c('Training Data','Simulation Median', '95% Confidence')) + theme_classic() + theme(axis.title = element_text(face = "bold"), # text=element_text(size=14), panel.grid.major = element_line(), legend.title=element_blank(), legend.position = lgdLoc, legend.background = element_blank(), legend.box.background = element_blank(), legend.key = element_blank()) + xlab("Day of Year") + ylab(paste0("Maximum ", yLabel, units)) p.comb = ggarrange(p1, p2, p3, p4, nrow = 2, ncol = 2, common.legend = TRUE, legend = "bottom") print(p.comb) # p.out = paste0(tempdir(), "/outputPlots/dailyStats_", Tag, ".png") # ggsave(filename = p.out, plot = p.comb, device = "png") } ``` ### plot daily precipitation ```{r, fig.width=8, fig.height=8} dailyPlot(sim.pcp, obs.pcp, "Precip") ``` ### plot daily temperature ```{r, fig.width=8, fig.height=8} dailyPlot(sim.tmp, obs.tmp, "Temp") ``` ## Looking at just the daily mean may not be representative since weather may be very different depending on the season, so plot monthly statistics as well for more detail. Boxplot whiskers are in the style of Tukey (1.5 x interquartile range) ```{r} #plot simulated daily data # simDat = sim.tmp # obsDat = obs.tmp # Tag = "Temp" monthlyPlot = function(simDat, obsDat, Tag){ if(Tag == "Temp"){ simM = simDat %>% drop_na() %>% group_by(variable, month, `simulation year`) %>% summarise( mean = mean(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simMM <- simM %>% group_by(variable, month) %>% summarise( mean=mean(mean), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(month, year) %>% summarise( mean = mean(temp, na.rm = T), max = max(temp, na.rm = T), sd = sd(temp, na.rm = T), skew = skewness(temp, na.rm = T) ) %>% ungroup() obsMM <- obs %>% group_by(month) %>% summarise( mean = mean(mean, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) }else if(Tag == "Precip"){ simM = simDat %>% drop_na() %>% group_by(variable, month, `simulation year`) %>% summarise( sum = sum(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simMM <- simM %>% group_by(variable, month) %>% summarise( sum=mean(sum), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(month, year) %>% summarise( sum = sum(prcp, na.rm = T), max = max(prcp, na.rm = T), sd = sd(prcp, na.rm = T), skew = skewness(prcp, na.rm = T) ) %>% ungroup() obsMM <- obs %>% group_by(month) %>% summarise( sum = mean(sum, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) } df.comb = rbind(obsMM, simMM) #plotting -------------------------------- if(Tag == "Temp"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = mean, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = mean, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = mean, color = "Observed")) + xlab("Month") + ylab("Temperature (Β°F)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle("Average Mean Monthly Temperature") }else if(Tag == "Precip"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = sum, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = sum, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = sum, color = "Observed")) + xlab("Month") + ylab("Precipitation (inches)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle("Average Total Monthly Precipitation") } if(Tag == "Temp"){ yLabel = "Temperature " units = "(Β°F)" } else if(Tag == "Precip"){ yLabel = "Precipitation " units = "(inches)" } #monthly SD p2 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = sd, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = sd, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = sd, color = "Observed")) + xlab("Month") + ylab(paste0("Standard Deviation ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle(paste0("Average Standard Deviation in Monthly ", yLabel)) #monthly Skew p3 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = skew, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = skew, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = skew, color = "Observed")) + xlab("Month") + ylab("Skew (-)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle(paste0("Average Skew in Monthly ", yLabel)) #monthly max p4 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = month, y = max, group = month)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = month, y = max, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = month, y = max, color = "Observed")) + xlab("Month") + ylab(paste0("Maximum ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = 1:12) + ggtitle(paste0("Average Monthly Maximum ", yLabel)) p.comb = ggarrange(p1, p2, p3, p4, nrow = 2, ncol = 2, common.legend = TRUE, legend = "bottom") print(p.comb) # p.out = paste0(tempdir(), "/outputPlots/monthlyStats_", Tag, ".png") # ggsave(filename = p.out, plot = p.comb, device = "png", height = 8, width = 8, units = "in") } ``` ### plot monthly precipitation ```{r, fig.width=8, fig.height=8} monthlyPlot(sim.pcp, obs.pcp, "Precip") ``` ### plot monthly temperature ```{r, fig.width=8, fig.height=8} monthlyPlot(sim.tmp, obs.tmp, "Temp") ``` ## Weekly statistics offer a finer resolution than monthly statistics but are not as noisy as daily values. Boxplot whiskers are in the style of Tukey (1.5 x interquartile range) ```{r} #plot simulated daily data simDat = sim.tmp obsDat = obs.tmp Tag = "Temp" weeklyPlot = function(simDat, obsDat, Tag){ if(Tag == "Temp"){ simW = simDat %>% drop_na() %>% group_by(variable, week, `simulation year`) %>% summarise( mean = mean(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simWW <- simW %>% group_by(variable, week) %>% summarise( mean=mean(mean), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(week, year) %>% summarise( mean = mean(temp, na.rm = T), max = max(temp, na.rm = T), sd = sd(temp, na.rm = T), skew = skewness(temp, na.rm = T) ) %>% ungroup() obsWW <- obs %>% group_by(week) %>% summarise( mean = mean(mean, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) }else if(Tag == "Precip"){ simW = simDat %>% drop_na() %>% group_by(variable, week, `simulation year`) %>% summarise( sum = sum(value, na.rm = T), max = max(value, na.rm = T), sd = sd(value, na.rm = T), skew = skewness(value, na.rm = T) ) %>% ungroup() simWW <- simW %>% group_by(variable, week) %>% summarise( sum=mean(sum), max=mean(max), sd=sqrt(mean(sd^2)), skew=mean(skew, na.rm=T) ) %>% ungroup() obs <- obsDat %>% drop_na() %>% group_by(week, year) %>% summarise( sum = sum(prcp, na.rm = T), max = max(prcp, na.rm = T), sd = sd(prcp, na.rm = T), skew = skewness(prcp, na.rm = T) ) %>% ungroup() obsWW <- obs %>% group_by(week) %>% summarise( sum = mean(sum, na.rm = T), max = mean(max, na.rm = T), sd = sqrt(mean(sd^2)), skew = mean(skew, na.rm=T) ) %>% mutate(variable = "Observed") %>% relocate(variable) %>% ungroup() # colnames(obsMM)[-1] = paste0("obs_", colnames(obsMM)[-1]) } df.comb = rbind(obsWW, simWW) #plotting -------------------------------- if(Tag == "Temp"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = mean, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = mean, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = mean, color = "Observed")) + xlab("Week") + ylab("Temperature (Β°F)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle("Average Mean Weekly Temperature") }else if(Tag == "Precip"){ p1 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = sum, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = sum, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = sum, color = "Observed")) + xlab("Week") + ylab("Precipitation (inches)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle("Average Total Weekly Precipitation") } if(Tag == "Temp"){ yLabel = "Temperature " units = "(Β°F)" } else if(Tag == "Precip"){ yLabel = "Precipitation " units = "(inches)" } #weekly SD p2 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = sd, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = sd, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = sd, color = "Observed")) + xlab("Week") + ylab(paste0("Standard Deviation ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle(paste0("Average Standard Deviation in Weekly ", yLabel)) #weekly Skew p3 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = skew, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = skew, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = skew, color = "Observed")) + xlab("Week") + ylab("Skew (-)") + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle(paste0("Average Skew in Weekly ", yLabel)) #weekly max p4 = ggplot(df.comb) + geom_boxplot(data = subset(df.comb, variable != "Observed"), aes(x = week, y = max, group = week)) + geom_line(data = subset(df.comb, variable == "Observed"), size = 0.5, aes(x = week, y = max, color = "Observed")) + geom_point(data = subset(df.comb, variable == "Observed"), size = 1.5, aes(x = week, y = max, color = "Observed")) + xlab("Week") + ylab(paste0("Maximum ", units)) + theme_classic() + theme(axis.title = element_text(face = "bold"), text=element_text(size=12), panel.grid.major = element_line(), legend.title=element_blank(), plot.title = element_text(size=10) ) + scale_x_continuous(breaks = seq(1,52,2)) + ggtitle(paste0("Average Weekly Maximum ", yLabel)) p.comb = ggarrange(p1, p2, p3, p4, nrow = 2, ncol = 2, common.legend = TRUE, legend = "bottom") print(p.comb) # p.out = paste0(tempdir(), "/outputPlots/weeklyStats_", Tag, ".png") # ggsave(filename = p.out, plot = p.comb, device = "png", height = 8, width = 10, units = "in") } ``` ### plot weekly precipitation ```{r, fig.width=10, fig.height=8} weeklyPlot(sim.pcp, obs.pcp, "Precip") ``` ### plot weekly temperature ```{r, fig.width=10, fig.height=8} weeklyPlot(sim.tmp, obs.tmp, "Temp") ``` ### We can also calculate dry- and wet- spell length statistics for further verification. Here, lower and upper boxplot whiskers are 5th and 95th percentiles, respectively. ```{r} # dat.d = z$dat.d # X = z$Xpamt # syr = head(dat.d$year, 1) # eyr = tail(dat.d$year, 1) # wLO=0.05; wHI=0.95 #Set whisker percentile for boxplots spellstats <- function(dat.d,X,syr,eyr,nrealz,nsim,wLO,wHI){ #get these variables after running the driver_wx#.R code #dat.d <- z$dat.d #X <- z$X uyr=syr:eyr nyr=length(uyr) #Training Data Tdat=LowerSantaCruzRiverBasinAZ %>% mutate(occ = if_else(prcp>=0.01, 1, 0)) #### END DATA PREPARATION BLOCK TO RUN STATS CODE BELOW #### #to get month sequence nobs=length(unique(Tdat$year)) lpyear=dat.d$year[min(which(lubridate::leap_year(dat.d$year)))] aday <- ymd(paste(lpyear,1,1,sep="-")) #jan 1 of a leap year to have a 366-day year it1=which(dat.d$date==aday) it2=it1+366-1 jdaymth <- dat.d$month[it1:it2] zz <- rep(jdaymth,nsim) yy <- rep(1:nsim,each=366) X1 <- cbind(yy,zz,X) #get spell length stats 3 statistics - mean, var and max Y <- array(NA,dim=c(nrealz,3,2,nsim,12)) #save dry and wet spell lengths by sim yr W <- array(NA,dim=c(3,2,nobs,12)) #save dry and wet spell lengths by obs yr Z <- array(NA,dim=c(3,2,nrealz,12)) #save average spell length by realz V <- array(NA,dim=c(3,2,12)) #save average spell length for obs fidx=seq(1,dim(X1)[1],by=366) for (irealz in 1:nrealz){ for (isim in 1:nsim){ i1=fidx[isim] i2=i1+366-1 for (imth in 1:12){ idxlist=i1 + which(X1[i1:i2,2]==imth) - 1 s <- na.omit(X[idxlist,irealz]) z.f <- spellLengths(s) if (length(z.f$`0`)>0){ Y[irealz,1,1,isim,imth]=mean(z.f$`0`) Y[irealz,2,1,isim,imth]=var(z.f$`0`) Y[irealz,3,1,isim,imth]=max(z.f$`0`) } if (length(z.f$`1`)>0){ Y[irealz,1,2,isim,imth]=mean(z.f$`1`) Y[irealz,2,2,isim,imth]=var(z.f$`1`) Y[irealz,3,2,isim,imth]=max(z.f$`1`) } } #imth }#isim } #irealz for (isim in 1:nobs){ for (imth in 1:12){ s <- dplyr::filter(Tdat, year==unique(Tdat$year)[isim], month==imth)$occ z.f <- spellLengths(s) if (length(z.f$`0`)>0){ W[1,1,isim,imth]=mean(z.f$`0`) W[2,1,isim,imth]=var(z.f$`0`) W[3,1,isim,imth]=max(z.f$`0`) } if (length(z.f$`1`)>0){ W[1,2,isim,imth]=mean(z.f$`1`) W[2,2,isim,imth]=var(z.f$`1`) W[3,2,isim,imth]=max(z.f$`1`) } } #imth }#isim for (imth in 1:12){ Z[1,1,,imth]=apply(Y[,1,1,,imth],1,"mean",na.rm=T) Z[2,1,,imth]=apply(Y[,2,1,,imth],1,"mean",na.rm=T) Z[3,1,,imth]=apply(Y[,3,1,,imth],1,"mean",na.rm=T) Z[1,2,,imth]=apply(Y[,1,2,,imth],1,"mean",na.rm=T) Z[2,2,,imth]=apply(Y[,2,2,,imth],1,"mean",na.rm=T) Z[3,2,,imth]=apply(Y[,3,2,,imth],1,"mean",na.rm=T) V[1,1,imth]=mean(W[1,1,,imth],na.rm=T) V[2,1,imth]=mean(W[2,1,,imth],na.rm=T) V[3,1,imth]=mean(W[3,1,,imth],na.rm=T) V[1,2,imth]=mean(W[1,2,,imth],na.rm=T) V[2,2,imth]=mean(W[2,2,,imth],na.rm=T) V[3,2,imth]=mean(W[3,2,,imth],na.rm=T) } #imth #Boxplots d01 <- as.data.frame(Z[1,1,,]) #average mean dry spell length d02 <- as.data.frame(sqrt(Z[2,1,,])) #average sd dry spell length d03 <- as.data.frame(Z[3,1,,]) #average max dry spell length d01T=V[1,1,] d02T=sqrt(V[2,1,]) d03T=V[3,1,] RecRed = "red" RecBlue = "blue" # pdf(file=paste0(tempdir(), "/outputPlots/DryWetStats.pdf"), width=9, height=4) oldpar = par(mfrow=c(1,3), mar=c(2,2.5,2,1), oma=c(2,2,0,0), mgp=c(2,1,0), cex.axis=0.8) par(mfrow=c(1,3), mar=c(2,2.5,2,1), oma=c(2,2,0,0), mgp=c(2,1,0),cex.axis=0.8) #Mean #ylimit=range(c(d01,d01hist),na.rm=T) bb=boxplot(d01, plot=F, na.rm=T, names=1:12) ylimit=c(range(c(d01,d01T),na.rm=T)) out=matrix(nrow=nsim, ncol=12) for(b in 1:12){ x=d01[,b] quants=quantile(x, c(wLO,wHI),na.rm=T) bb$stats[c(1,5),b] = quants outs=which(x < quants[1] | x > quants[2]) out[1:length(outs), b]= x[outs] } bxp(bb, ylim=ylimit,na.rm=T, outline=F,xlab="",ylab="") mtext("days", side=2, outer = T) title(main="average mean dry spell length") for(m in 1:12){points(rep(m, length(which(is.na(out[,m])==F))), out[!is.na(out[,m]),m])} points(1:12, d01T, pch=17, cex=1, col=RecRed) lines(1:12, d01T, col=RecRed) #Standard Deviation bb=boxplot(d02, plot=F, na.rm=T, names=1:12) ylimit=c(range(c(d02,d02T),na.rm=T)) out=matrix(nrow=nsim, ncol=12) for(b in 1:12){ x=d02[,b] quants=quantile(x, c(wLO,wHI),na.rm=T) bb$stats[c(1,5),b] = quants outs=which(x < quants[1] | x > quants[2]) out[1:length(outs), b]= x[outs] } bxp(bb, ylim=ylimit,na.rm=T, outline=F,xlab="",ylab="") title(main="average sd dry spell length") mtext("month", side=1, outer = T, line=0.5) for(m in 1:12){points(rep(m, length(which(is.na(out[,m])==F))), out[!is.na(out[,m]),m])} points(1:12, d02T, pch=17, cex=1, col=RecRed) lines(1:12, d02T, col=RecRed) #Max Length bb=boxplot(d03, plot=F, na.rm=T, names=1:12) ylimit=c(range(c(d03,d03T),na.rm=T)) out=matrix(nrow=nsim, ncol=12) for(b in 1:12){ x=d03[,b] quants=quantile(x, c(wLO,wHI),na.rm=T) bb$stats[c(1,5),b] = quants outs=which(x < quants[1] | x > quants[2]) out[1:length(outs), b]= x[outs] } bxp(bb, ylim=ylimit,xlab="",ylab="",na.rm=T, outline=F) title(main="average max dry spell length") for(m in 1:12){points(rep(m, length(which(is.na(out[,m])==F))), out[!is.na(out[,m]),m])} points(1:12, d03T, pch=17, cex=1, col=RecRed) lines(1:12, d03T, col=RecRed) dev.off() par(oldpar) ########################################## } ``` ```{r, fig.width=8, fig.height=6} spellstats(dat.d = z$dat.d, X = z$Xpamt, syr = head(dat.d$year, 1), eyr = tail(dat.d$year, 1), nrealz = nrealz, nsim = nsim, wLO = 0.05, wHI = 0.95) ``` ### Step 4: Save your data to file Save your simulated weather ensemble to a file via the `writeSim` function. It will conveniently save each trace to a .csv file. ```{r} # setwd() to desired location for writeSim to save .csv files containing the simulated precipitation and temperature # setwd(tempdir()) # # writeSim(wxOutput = z, nsim = nsim, nrealz = nrealz, debug = TRUE) ``` ### Performance Benchmarking Running the `wx()` weather generator code for a 5-year, 10-trace simulation on a laptop with the following characteristics results in the below run time. Parallel computing was enabled via `parallelize = T` in the wx() function. OS: Microsoft Windows 10 Enterprise 10.0.19044 Build 19044 Hardware: Intel(R) Core(TM) i7-10850H CPU @2.70GHz, 2712 Mhz, 6 Cores, 12 Logical Processors. 16 GB installed physical memory. ```{r} #wxgenR weather generation run time: print(difftime(endTime, startTime, units='mins')) ``` ## Final notes * The weather simulations use a 366-day per year framework in order to handle leap years. During leap years, all 366 days will have precipitation and temperature values (i.e., February 29th exists and contains data), but during non-leap years February 29th does not exist and a row of NULL values is added after December 31st in order to maintain the same length between leap years and non-leap years. Other datasets and algorithms use various approaches to handle leap years, such as avoiding leap years altogether, using a 360-day year, etc. * Because leap years are acceptable in the `wxgenR` algorithm, it is possible (but unlikely) to have two or more leap years in a row in the weather simulations since years are sampled at random. * Please report any bugs or issues to either [email protected] or [email protected] ## Citations For more details and examples, including analysis of the dataset used in this vignette, see the following works: Bearup, L., Gangopadhyay, S., & Mikkelson, K. (2021). Hydroclimate Analysis Lower Santa Cruz River Basin Study (Technical Memorandum No ENV-2020-056). Bureau of Reclamation. <https://www.usbr.gov/lc/phoenix/programs/lscrbasin/LSCRBS_Hydroclimate_2021.pdf>. Gangopadhyay, S., Bearup, L. A., Verdin, A., Pruitt, T., Halper, E., & Shamir, E. (2019). A collaborative stochastic weather generator for climate impacts assessment in the Lower Santa Cruz River Basin, Arizona. Fall Meeting 2019, American Geophysical Union. <https://ui.adsabs.harvard.edu/abs/2019AGUFMGC41G1267G>. Rajagopalan, B., Lall, U., and Tarboton, D. G. (1996). Nonhomogeneous Markov Model for Daily Precipitation, Journal of Hydrologic Engineering, 1, 33–40, <https://doi.org/10.1061/(ASCE)1084-0699(1996)1:1(33)>. Verdin, A., Rajagopalan, B., Kleiber, W., and Katz, R. W. (2015). Coupled stochastic weather generation using spatial and generalized linear models, Stoch Environ Res Risk Assess, 29, 347–356, <https://doi.org/10.1007/s00477-014-0911-6>. Verdin, A., Rajagopalan, B., Kleiber, W., PodestΓ‘, G., and Bert, F. (2018). A conditional stochastic weather generator for seasonal to multi-decadal simulations, Journal of Hydrology, 556, 835–846, <https://doi.org/10.1016/j.jhydrol.2015.12.036>. ## Disclaimer This information is preliminary and is subject to revision. It is being provided to meet the need for timely best science. The information is provided on the condition that neither the U.S. Bureau of Reclamation nor the U.S. Government may be held liable for any damages resulting from the authorized or unauthorized use of the information.
/scratch/gouwar.j/cran-all/cranData/wxgenR/vignettes/Vignette_LowerSantaCruzRiverBasinAZ.Rmd
DataFactory <- function() { self <- environment() class(self) <- append('DataFactory', class(self)) identity <- function(x_) x_ aleatory <- function(valueSet_1_, n_i_1, replace_b_1 = TRUE) { sample(valueSet_1_, abs(n_i_1), replace = replace_b_1 || length(valueSet_1_) > abs(n_i_1)) } drawBoolean <- function(n_i_1, replace_b_1 = TRUE) { n <- abs(n_i_1) if (n == 0) return(vector('logical')) aleatory(c(TRUE, FALSE), n, replace_b_1 = replace_b_1) } drawLogical <- function(n_i_1, replace_b_1 = TRUE) { n <- abs(n_i_1) if (n == 0) return(vector('logical')) aleatory(c(TRUE, FALSE, NA), n, replace_b_1 = replace_b_1) } drawIntegerMath <- function(n_i_1, replace_b_1 = TRUE) { n <- abs(n_i_1) if (n == 0) return(vector('integer')) aleatory(-17:17, n, replace_b_1 = replace_b_1) } drawInteger <- function(n_i_1, replace_b_1 = TRUE) { n <- abs(n_i_1) if (n == 0) return(vector('integer')) aleatory(c(-17:17, NA_integer_), n, replace_b_1 = replace_b_1) } drawRealMath <- function(n_i_1, replace_b_1 = TRUE) { n <- abs(n_i_1) if (n == 0) return(vector('double')) stats::runif(n, -17.0, 17.0) } drawDouble <- function(n_i_1, replace_b_1 = TRUE) { n <- abs(n_i_1) if (n == 0) return(vector('double')) aleatory(c(drawRealMath(n, replace_b_1), NA_integer_), n, replace_b_1 = replace_b_1) } drawNumeric <- function(n_i_1, replace_b_1 = TRUE) { fn <- if (stats::runif(1) < .5) drawInteger else drawDouble fn(n_i_1, replace_b_1 = replace_b_1) } drawUnsignedReal <- function(n_i_1, replace_b_1 = TRUE) { n <- abs(n_i_1) if (n == 0) return(vector('integer')) stats::runif(n, 1.0, 17.0) } drawNegativeReal <- function(n_i_1, replace_b_1 = TRUE) { n <- abs(n_i_1) if (n == 0) return(vector('integer')) -1.0 * stats::runif(n, 1.0, 17.0) } drawUnsignedInteger <- function(n_i_1, replace_b_1 = TRUE) { n <- abs(n_i_1) if (n == 0) return(vector('integer')) as.integer(ceiling(drawUnsignedReal(n, replace_b_1))) } drawNegativeInteger <- function(n_i_1, replace_b_1 = TRUE) { n <- abs(n_i_1) if (n == 0) return(vector('integer')) -1L * as.integer(ceiling(drawUnsignedReal(n, replace_b_1))) } buildString <- function(l, replace_b_1 = TRUE) { paste(aleatory(letters, l, replace_b_1), collapse = '') } drawString <- function(n_i_1, replace_b_1 = TRUE) { n <- abs(n_i_1) if (n == 0) return(vector('character')) sapply(seq_len(n), function(e) buildString(aleatory(3:11, 1))) } drawCharacter <- function(n_i_1, replace_b_1 = TRUE) { n <- abs(n_i_1) if (n == 0) return(vector('complex')) sapply(seq_len(n), function(e) { if (stats::runif(1) <= .93) drawString(1, replace_b_1) else NA_character_ }) } drawRaw <- function(n_i_1, replace_b_1 = TRUE) { n <- abs(n_i_1) if (n == 0) return(vector('raw')) charToRaw(buildString(n, replace_b_1)) } drawComplexMath <- function(n_i_1, replace_b_1 = TRUE) { n <- abs(n_i_1) if (n == 0) return(vector('complex')) sapply(seq_len(n), function(e) { complex(1, drawIntegerMath(1), drawIntegerMath(1)) }) } drawComplex <- function(n_i_1, replace_b_1 = TRUE) { n <- abs(n_i_1) if (n == 0) return(vector('complex')) sapply(seq_len(abs(n)), function(e) { if (stats::runif(1) <= .93) drawComplexMath(1, replace_b_1) else NA_complex_ }) } buildDateString <- function() { y <- sample(2000:2030, 1) m <- sample(1:12, 1) md <- ifelse(m %in% c(1, 3, 5, 7, 8, 10, 12), 31, ifelse(m %in% c(4, 6, 9, 11), 30, ifelse(y %% 4 == 0, 28, 27))) d <- sample(1:md, 1) sprintf('%04d-%02d-%02d', y, m, d) } drawDate <- function(n_i_1, replace_b_1 = TRUE) { n <- abs(n_i_1) n <- ifelse(n == 0, 1, n) # no way to create a 0 length vector of class Date s <- sapply(seq_len(n), function(e) { buildDateString() }) as.Date(sample(s, n, replace_b_1)) } drawPOSIXctDate <- function(n_i_1, replace_b_1 = TRUE) { as.POSIXct(drawDate(n_i_1, replace_b_1)) } drawList <- function(n_i_1, replace_b_1 = TRUE, forceHomogeneousType_b_1 = FALSE, allowSublist_b_1 = TRUE, needContext_b_1 = FALSE) { n <- abs(n_i_1) if (n == 0) return(list(data = list(), context = 'x_')) mbt <- if (allowSublist_b_1) base_types else setdiff(base_types, 'l') if (forceHomogeneousType_b_1) { bt <- aleatory(mbt, 1) df <- getDrawFunction(bt, FALSE) lfn <- lapply(seq_len(n), function(e) df) } else { bt <- if (!'l' %in% mbt) aleatory(mbt, n, TRUE) else { l <- length(mbt) w <- which(mbt == 'l') proba <- rep(.7 / l, l) proba[w] <- .3 sample(mbt, n, TRUE, prob = proba) } lfn <- lapply(bt, function(e) { getDrawFunction(e, FALSE) }) } data <- lapply(seq_len(n), function(e) { ns <- aleatory(0L:7L, 1, FALSE) lfn[[e]]$fun(aleatory(ns, 1)) }) if (!needContext_b_1) return(list(data = data)) ctxt <- sapply(seq_len(n), function(e) { lfn[[e]]$suffix }) return(list(data = data, context = ctxt)) } verifyFunctionDeclaration <- function(suffix_s_1, typeVerifier_f_1) { if (suffix_s_1 != 'l' && !matchFunctionSignature(typeVerifier_f_1, drawBoolean)) return(FALSE) vfn <- tf$getVerificationFunction(suffix_s_1) if (!is.function(vfn)) return(FALSE) data <- typeVerifier_f_1(3L) if (is.list(data)) { all(sapply(data, vfn) == TRUE) } else { all(vfn(data) == TRUE) } } getRowNumber <- function(value_s_1) { if (value_s_1 %in% dt$suffix) return(which(dt$suffix == value_s_1)) if (value_s_1 %in% dt$type) return(which(dt$type == value_s_1)) NA } getRecordedTypes <- function() copy(dt[order(suffix)]) retrieveKnownSuffixes <- function() dt$suffix checkSuffix <- function(suffix_s_1) suffix_s_1[1] %in% dt$suffix addSuffix <- function(suffix_s_1, type_s_1, typeVerifier_f_1) { if (!is.function(typeVerifier_f_1)) return(FALSE) s <- gsub('_*([A-Za-z].*)', '\\1', suffix_s_1, perl = TRUE) if (!verifyFunctionDeclaration(s, typeVerifier_f_1)) return(FALSE) rv <- checkSuffix(s) if (!rv) dt <<- data.table::rbindlist(list(dt, list(s, type_s_1, list(typeVerifier_f_1)))) !rv } getType <- function(value_s_1, humanUser_b_1 = TRUE) { rn <- getRowNumber(value_s_1[1]) if (is.na(rn)) { if (!humanUser_b_1) abort('no suffix or type associated matches', strBracket(value_s_1[1])) return(paste('No suffix or type matches', strBracket(value_s_1[1]))) } dt[rn]$type } getDrawFunction <- function(value_s_1, humanUser_b_1 = TRUE) { rn <- getRowNumber(value_s_1[1]) if (is.na(rn)) { if (!humanUser_b_1) abort('no draw function associated with', strBracket(value_s_1[1])) return(paste('No draw function matches', strBracket(value_s_1[1]))) } list(fun = dt[rn]$draw_function[[1]], suffix = dt[rn]$suffix) } drawValues <- function(parameterName_s_1, numberOfValues_i_1 = NA_integer_, forceHomogeneousType_b_1 = TRUE, allowSubList_b_1 = FALSE, forceList_b_1 = TRUE) { fpn <- wyz.code.offensiveProgramming::FunctionParameterName(parameterName_s_1) if (!fpn$isSemanticName()) abort('parameter name', strBracket(parameterName_s_1), 'must be a semantic name') mx <- ifelse(forceHomogeneousType_b_1, 7L, 3L) ns <- if (is.na(numberOfValues_i_1)) aleatory(0L:mx, 1, FALSE) else abs(numberOfValues_i_1) if (fpn$isPolymorphic()) { l <- drawList(ns, TRUE, forceHomogeneousType_b_1, allowSubList_b_1, TRUE) l$n <- ns return(l) } rs <- fpn$getTypeSuffix() df <- getDrawFunction(rs, FALSE) lsuf <- fpn$getLengthSuffix() if (!is.na(lsuf)) { # if valid length suffix, then consider it for data generation lm <- fpn$getLengthModifier() #cat(parameterName_s_1, ' lsuf=', strBracket(lsuf), ' n=', strBracket(ns), # ' lm=', strBracket(lm), sep = '', '\n') ns <- if (is.na(lm)) lsuf else { if (lm == 'm') { #cat('m way\n') aleatory(seq_len(lsuf) + lsuf - 1, 1) } else { if (lm == 'l') { #cat('l way\n') aleatory(0:lsuf, 1) } else { #cat('n way\n') if (stats::runif(1) < .5) lsuf else 1 } } } #cat('ns=', ns, '\n') } # cat(strBracket(parameterName_s_1), ' n=', strBracket(ns), sep = '', '\n') cv <- if (forceList_b_1) as.list else identity if (rs != 'l') return(list(data = cv(df$fun(ns)), context = df$suffix, n = ns)) m <- df$fun(ns, TRUE, forceHomogeneousType_b_1, allowSubList_b_1, TRUE) list(data = m$data, context = 'l', subcontext = m$context, n = ns) } simpleTypes <- list( list('b' , 'boolean' , list(drawBoolean) ), list('lo' , 'logical' , list(drawLogical) ), list('i' , 'integer' , list(drawInteger) ), list('im' , 'integer-math' , list(drawIntegerMath) ), list('d' , 'double' , list(drawDouble) ), list('r' , 'real-math' , list(drawRealMath) ), list('rm' , 'real-math alias' , list(drawRealMath) ), list('n' , 'numeric' , list(drawNumeric) ), list('ui' , 'unsigned integer', list(drawUnsignedInteger) ), list('pi' , 'positive integer', list(drawUnsignedInteger) ), list('ni' , 'negative integer', list(drawNegativeInteger) ), list('ur' , 'unsigned real' , list(drawUnsignedReal) ), list('pr' , 'positive real' , list(drawUnsignedReal) ), list('nr' , 'negative real' , list(drawNegativeReal) ), list('ra' , 'raw' , list(drawRaw) ), list('ch' , 'character' , list(drawCharacter) ), list('s' , 'string' , list(drawString) ), list('c' , 'complex' , list(drawComplex) ), list('cm' , 'complex-math' , list(drawComplexMath) ), list('da' , 'date' , list(drawDate) ), list('dc' , 'POSIXct' , list(drawPOSIXctDate) ), list('l' , 'list' , list(drawList) ) ) suffix <- NULL # data.table NSE issue with Rcmd check dt <- data.table::rbindlist(simpleTypes) data.table::setnames(dt, colnames(dt), c('suffix', 'type', 'draw_function')) stopifnot(all(sapply(dt$draw_function, function(e) is.function(e)) == TRUE)) base_types <- dt$suffix tf <- retrieveFactory() # enforce draw_function compliance checks invisible(sapply(seq_len(nrow(dt)), function(k) { if (!verifyFunctionDeclaration(dt[k]$suffix, dt[k]$draw_function[[1]])) abort(dt[k]$suffix, 'function declaration mismatch') })) self }
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/DataFactory.R
buildArgumentsSignature <- function(argumentNames_l) { if (length(argumentNames_l) == 0) return(character(0)) paste(Filter(function(e) length(e) > 0, unlist(argumentNames_l)), collapse = ', ') }
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/buildArgumentsSignature.R
buildEllipsisNames <- function(ellipsisReplacementNumber_ui_1 = 3L, ellipsisNameString_s_1 = getEllipsisSubstitutionName()) { es <- ifelse(length(ellipsisNameString_s_1) == 0, getEllipsisSubstitutionName(), ifelse(endsWith(ellipsisNameString_s_1, '_'), gsub('_', '', ellipsisNameString_s_1, fixed = TRUE), ellipsisNameString_s_1) ) n <- if (ellipsisReplacementNumber_ui_1 < 1) 1 else ellipsisReplacementNumber_ui_1 buildSemanticArgumentName('_', paste0(es, seq_len(n))) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/buildEllipsisNames.R
buildEllipsisSignature <- function(ellipsisReplacementNumber_ui_1 = 3L, ellipsisNameString_s_1 = 'ellipsis') { if (ellipsisReplacementNumber_ui_1 == 0) return(list(character(0))) en <- buildEllipsisNames(ellipsisReplacementNumber_ui_1, ellipsisNameString_s_1)[-1] l <- sapply(seq_len(length(en)), function(k) buildArgumentsSignature(en[1:k]), simplify = FALSE) append(as.list(unlist(l)), list(character(0)), 0) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/buildEllipsisSignature.R
buildSemanticArgumentName <- function(suffix_s_1, variableName_s_1 = 'x_') { paste0(ifelse(endsWith(variableName_s_1, '_'), variableName_s_1, paste0(variableName_s_1, '_')), ifelse(suffix_s_1 == '_', '', suffix_s_1) ) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/buildSemanticArgumentName.R
codePatcher <- function(code_, originArgumentName_s, targetArgumentName_s) { substituteSymbol <- function(inputSymbol_) { s <- as.character(inputSymbol_) w <- which(originArgumentName_s == s) if (length(w) == 1) as.symbol(targetArgumentName_s[w]) else inputSymbol_ } substituteLanguage <- function(inputLanguage_) { l <- length(inputLanguage_) outputLanguage <- inputLanguage_ n <- 1 repeat { t <- typeof(inputLanguage_[[n]]) if (t == 'language') outputLanguage[[n]] <- substituteLanguage(inputLanguage_[[n]]) if (t == 'symbol') outputLanguage[[n]] <- substituteSymbol(inputLanguage_[[n]]) if (n >= l) break; n <- n + 1 } outputLanguage } stopifnot(all(length(originArgumentName_s) == length(targetArgumentName_s))) new_code <- code_ if (is.list(code_)) names(new_code) <- targetArgumentName_s lapply(new_code, function(e) { t <- typeof(e) if (t == 'language') { return(substituteLanguage(e)) } if (t != 'symbol') return(e) substituteSymbol(e) }) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/codePatcher.R
computeArgumentsCombination <- function(fun_f_1) { combine <- function(v_s) { l <- append( unlist(lapply(seq_len(length(v_s)), function(k) { combn(v_s, k, simplify = FALSE) }), FALSE), list(character(0)), 0) lapply(l, function(e) if (length(e) <= 1) e else paste(e, collapse = ', ')) } spa <- function(a_s_1, b_s, ...) { paste(a_s_1, paste(b_s, collapse = ', '), ..., sep = ', ', collapse = ', ') } combineLists <- function(x_l, y_l) { l <- lapply(x_l, function(e) { lapply(y_l, function(h) { if (length(e) == 0) return(h) if (length(h) == 0) return(e) paste(e, h, collapse = ', ', sep = ', ') }) }) unlist(l, FALSE) } qfa <- qualifyFunctionArguments(fun_f_1) ar <- length(qfa$stripped_symbol_names) # mandatory arguments as <- qfa$stripped_symbol_names er <- if (qfa$owns_ellipsis == TRUE) 0:3 else 0 mer <- max(er) en <- buildEllipsisNames(3) es <- if (mer == 0) list(character(0)) else list( character(0), en[1], spa(en[1], en[2]), spa(en[1], en[2], en[3]) ) l <- length(qfa$default_names) dr <- if (l == 0) 0 else seq.int(0, l) mdr <- max(dr) ds <- if (mdr == 0) list(character(0)) else combine(qfa$default_names) nz <- lapply(qfa$argument_names, function(e) { if (e %in% qfa$stripped_symbol_names) return(list(e)) if (e %in% qfa$default_names) return(list(character(0), e)) es }) siglen <- 2^(length(qfa$default_names) + ifelse(mer == 0, 0, 2)) lnz <- length(nz) if (lnz == 0) sig <- list(character(0)) else { if (lnz == 1) sig <- as.list(nz[[1]]) else { z <- nz[[1]] lapply(seq_len(lnz - 1), function(k) { z <<- combineLists(z, nz[[k + 1]]) }) sig <- z } } list(names = list(argument = as, ellipsis = es, default = ds), number = list(argument = ar, ellipsis = er, default = dr), signatures = sig, theoritical_signature_number = siglen ) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/computeArgumentsCombination.R
computeArgumentsSignature <- function(argumentNames_s, defaultValued_b_1 = TRUE) { if (!defaultValued_b_1) return(as.list(buildArgumentsSignature(argumentNames_s))) la <- length(argumentNames_s) if (la == 0) return(list(character(0))) r <- lapply(seq_len(la), function(k) { v <- utils::combn(argumentNames_s, k) if (k == 1) return(as.list(v[1, ])) as.list(apply(v, 2, buildArgumentsSignature)) }) append(as.list(unlist(r)), list(character(0)), 0) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/computeArgumentsSignature.R
exploreSignatures <- function(fun_f_1, argumentsTypeRestrictions_l = list(), signaturesRestrictions_l = list()) { associateSignature <- function(generatedData_l) { l <- length(generatedData_l$generation$argname) if (l == 0) return(ifelse(length(cs$signatures[[1]]) == 0, "no argument signature", 'no match for 0 length signature')) lsig <- paste(generatedData_l$generation$argname, collapse = ', ') k <- 1 s <- length(cs$signatures) repeat { if (length(cs$signatures[[k]]) > 0) { if (cs$signatures[[k]][1] == lsig) return(lsig) } k <- k + 1 if (k > s) return(paste0("no match for [", lsig, '] in signatures')) } } ntests <- 1 rc <- unlist(erc, FALSE) matchExecutionContext <- function(replacementContext_s_1, ellipsisReplacementContext_s_1, defaultArgumentsContext_s_1) { if (qfa$owns_ellipsis) g <- generateEllipsisSubstitutionName(qfa$argument_names) z <- lapply(sigres, function(h) { la <- length(qfa$stripped_symbol_names) if (length(h) == 0) return( rc[[replacementContext_s_1]]$number_replacement == 0 && rc[[ellipsisReplacementContext_s_1]]$number_replacement == 0 && dac[[defaultArgumentsContext_s_1]]$use == FALSE) a <- if (la > 0) { p <- paste(qfa$stripped_symbol_names, collapse = ', ') grepl(p, h, fixed = TRUE) } else TRUE s <- strsplit(h, ', ', fixed = TRUE)[[1]] e <- if (qfa$owns_ellipsis) { v <- grepl(paste0(g, "[1-3]"), s, perl = TRUE) n <- length(v[v == TRUE]) rc[[ellipsisReplacementContext_s_1]]$number_replacement == n } else TRUE ldn <- length(qfa$default_names) d <- if (ldn > 0) { v <- grepl(paste0(paste(qfa$default_names, collapse = '|')), s, perl = TRUE) n <- length(v[v == TRUE]) if (dac[[defaultArgumentsContext_s_1]]$use == FALSE) { n == 0 } else { if (dac[[defaultArgumentsContext_s_1]]$use_all) { n == ldn } else { n > 0 && n < ldn } } } else TRUE a && e && d }) any(z == TRUE) } runTest <- function(replacementContext_s_1, ellipsisReplacementContext_s_1, defaultArgumentsContext_s_1) { if (!matchExecutionContext(replacementContext_s_1, ellipsisReplacementContext_s_1, defaultArgumentsContext_s_1)) { # cat('execution context does not match:', replacementContext_s_1, ellipsisReplacementContext_s_1, # defaultArgumentsContext_s_1, '\n') return(NULL) } gd <- generateData(fun_f_1, argumentsTypeRestrictions_l, rc[[replacementContext_s_1]], rc[[ellipsisReplacementContext_s_1]], dac[[defaultArgumentsContext_s_1]]) r <- testFunction(fun_f_1, gd$data, fn) r$argument_replacement <- replacementContext_s_1 r$ellipsis_replacement <- ellipsisReplacementContext_s_1 r$default_replacement <- defaultArgumentsContext_s_1 r$signature <- associateSignature(gd) r$test_number <- ntests ntests <<- ntests + 1 r } runCampaigns <- function(replacementContext_s, ellipsisReplacementStrategy_s, defaultReplacementStrategy_s) { lapply(replacementContext_s, function(r) { lapply(ellipsisReplacementStrategy_s, function(e) { lapply(defaultReplacementStrategy_s, function(d) { runTest(r, e, d) }) }) }) } computeReplacementSynthesis <- function(x_s, n_ui_1 = 3L) { s <- strsplit(x_s, '(\\.|_)') paste0('{', unlist(lapply(seq_len(n_ui_1), function(k) { paste( unique(unlist(lapply(s, function(e) e[k]))), collapse = ',') })), '}', collapse = '_') } stopifnot(is.function(fun_f_1)) fn <- deparse(substitute(fun_f_1)) qfa <- qualifyFunctionArguments(fun_f_1) # sharpen standard argument replacement strategy lsa <- length(qfa$stripped_symbol_indexes) sn <- names(unlist(erc, FALSE)) srs <- if (lsa == 0) sn[1] else sn[-c(1, 5, 9, 13)] # sharpen default argument replacement strategy lda <- length(qfa$default_indexes) dn <- names(dac) drs <- if (lda == 0) dn[1] else { if (lda == 1) dn[-2] else dn } # sharpen ellipsis argument replacement strategy ers <- if (qfa$owns_ellipsis) sn else sn[1] # run tests cs <- computeArgumentsCombination(fun_f_1) ls <- length(signaturesRestrictions_l) sigres <- if (ls > 0) { i <- intersect(cs$signatures, signaturesRestrictions_l) if (length(i) != ls) abort('unknwon signatures are not allowed', strBracket(setdiff(signaturesRestrictions_l, i))) i } else cs$signatures rv <- runCampaigns(srs, ers, drs) lrv <- unlist(unlist(rv, FALSE), FALSE) lrv <- Filter(function(e) !is.null(e), lrv) # build result for humans bad <- Filter(function(e) !e$result$status, lrv) good <- Filter(function(e) e$result$status, lrv) gg <- lapply(good, function(e) { list( test_number = e$test_number, call_string = paste(deparse(e$call), collapse = '---'), result = list(e$result$result) ) }) g <- lapply(good, function(e) { list( test_number = e$test_number, call_signature = e$signature, replacement = e$argument_replacement, ellipsis = e$ellipsis_replacement, default = e$default_replacement ) }) lg <- if (length(g) > 0) { dg <- rbindlist(g) ll <- list( number_successfull_tests = nrow(dg), signatures = unique(dg$call_signature), imperative = computeReplacementSynthesis(unlist(dg$replacement)), ellipsis = computeReplacementSynthesis(unlist(dg$ellipsis)), default = computeReplacementSynthesis(unlist(dg$default), 1) ) if (length(qfa$stripped_symbol_names) == 0) ll$imperative <- NULL if (qfa$owns_ellipsis == FALSE) ll$ellipsis <- NULL if (length(qfa$default_names) == 0) ll$default <- NULL list(code = rbindlist(gg), table = dg, synthesis = ll) } else list(table = NA, synthesis = NA) b <- lapply(bad, function(e) { list( test_number = e$test_number, error = e$result$errorMessage, call_signature = e$signature, replacement = e$argument_replacement, ellipsis = e$ellipsis_replacement, default = e$default_replacement ) }) lb <- if (length(b) > 0) { db <- rbindlist(b) ll <- list( number_erroneous_tests = nrow(db), error = unique(db$error), signatures = unique(db$call_signature), imperative = computeReplacementSynthesis(unlist(db$replacement)), ellipsis = computeReplacementSynthesis(unlist(db$ellipsis)), default = computeReplacementSynthesis(unlist(db$default), 1) ) if (length(qfa$stripped_symbol_names) == 0) ll$imperative <- NULL if (qfa$owns_ellipsis == FALSE) ll$ellipsis <- NULL if (length(qfa$default_names) == 0) ll$default <- NULL list(table = db, synthesis = ll) } else list(table = NA, synthesis = NA) list(info = list(raw = lrv, good = good, bad = bad), success = lg, failure = lb) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/exploreSignatures.R
generateData <- function(function_f_1, argumentsTypeRestrictions_l = list(), replacementContext_l = setGenerationContext(), ellipsisReplacementContext_l = setGenerationContext(), defaultArgumentsContext_l = setDefaultArgumentsGenerationContext(), functionName_s_1 = deparse(substitute(function_f_1))) { enforceSemanticIdentifier <- function(values_s) { ifelse(grepl('_', values_s, fixed = TRUE), values_s, buildSemanticArgumentName(values_s)) } extractSubValues <- function(values_l, subValueName_s_1) { lapply(values_l, function(e) e[[subValueName_s_1]]) } if (!usesSemanticArgumentNames(function_f_1)) abort('function', strBracket(functionName_s_1), 'owns arguments that are not semantic names') qfa <- qualifyFunctionArguments(function_f_1) df <- retrieveDataFactory() # argumentsTypeRestrictions_l checks # 1. names must be unique # 2. names must match argument names # 3. values must be known lar <- length(argumentsTypeRestrictions_l) atr <- if (lar > 0) { nm <- names(argumentsTypeRestrictions_l) du <- nm[duplicated(nm)] if (length(du) != 0) abort('following argument restriction names are duplicated', strBracket(strJoin(du))) sd <- setdiff(nm, qfa$argument_names) if (length(sd) != 0) abort('following argument restriction names are not matching any function argument', strBracket(strJoin(sd))) u <- unique(unlist(argumentsTypeRestrictions_l)) b <- sapply(u, df$checkSuffix) if (!all(b)) abort('following argument restriction values are illegal', strBracket(strJoin(u[!b]))) lapply(argumentsTypeRestrictions_l, enforceSemanticIdentifier) } else list() ellipsis <- getEllipsisName() nm <- qfa$argument_names if (ellipsisReplacementContext_l$number_replacements == 0) nm <- setdiff(nm, ellipsis) if (!defaultArgumentsContext_l$use) nm <- setdiff(nm, qfa$default_names) semell <- character(0) ellnames <- character(0) owns_ellipsis <- ellipsis %in% nm if (owns_ellipsis) { if (ellipsisReplacementContext_l$number_replacements > 0) { rks <- if (lar == 0 || !ellipsis %in% names(argumentsTypeRestrictions_l)) { if (ellipsisReplacementContext_l$force_list) 'l' else { d <- df$retrieveKnownSuffixes() if (ellipsisReplacementContext_l$allow_list) d else setdiff(d, 'l') } } else { atr[[ellipsis]] } nell <- ifelse(ellipsisReplacementContext_l$homogeneous_type, 1, ellipsisReplacementContext_l$number_replacements) ell <- sample(rks, nell, replace = nell > length(rks)) ellnames <- buildEllipsisNames(ellipsisReplacementContext_l$number_replacements, generateEllipsisSubstitutionName(nm)) semell <- if (nell == 1) rep(ell, ellipsisReplacementContext_l$number_replacements) else ell names(semell) <- ellnames l <- length(ell) if (l > 0) { w <- which(nm == ellipsis) nm <- append(nm[-w], ellnames, w - 1) } } } tracker <- list() ne <- 0 ef <- if (ellipsisReplacementContext_l$force_list) as.list else as.vector rf <- if (replacementContext_l$force_list) as.list else as.vector l <- lapply(nm, function(e) { #cat('name=', strBracket(e), '\n') ex <- if (owns_ellipsis && e %in% ellnames) { cv <- ef rc <- ellipsisReplacementContext_l ne <<- ne + 1 if (lar > 0 && ellipsis %in% names(argumentsTypeRestrictions_l)) { enforceSemanticIdentifier(semell[ne])} else e } else { fpn <- FunctionParameterName(e) is_polymorphic <- fpn$isPolymorphic() if (is_polymorphic) { cv <- rf rc <- replacementContext_l } else { is_list <- fpn$getTypeSuffix() == 'l' cv <- if (is_list) as.list else as.vector rc <- setGenerationContext(replacementContext_l$number_replacements, FALSE, is_list, is_list) } if (is_polymorphic && lar > 0 && e %in% names(argumentsTypeRestrictions_l)) { sample(atr[[e]], 1) } else e } tracker[[length(tracker) + 1]] <<- list('argname' = e, 'semantic' = ex) #cat('name=', strBracket(ex), '\n') # number of replacements must come from replacement_context_l cv(df$drawValues(ex, ifelse(owns_ellipsis, NA_integer_, replacementContext_l$number_replacements), rc$homogeneous_type, rc$allow_list, rc$force_list) ) }) names(l) <- nm list(generation = rbindlist(tracker), data = extractSubValues(l, 'data'), context = extractSubValues(l, 'context'), n = extractSubValues(l, 'n')) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/generateData.R
generateEllipsisSubstitutionName <- function(argumentNames_s) { el <- getEllipsisSubstitutionName() verifyNoMatch <- function(elvalue_s_1) { b <- sapply(argumentNames_s, startsWith, elvalue_s_1) all(!b) } if (verifyNoMatch(el)) return(el) while (TRUE) { el <- paste0(el, sample(LETTERS, 1)) if (verifyNoMatch(el)) return(el) } }
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/generateEllipsisSubstitutionName.R
getEllipsisSubstitutionName <- function() 'ellipsis'
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/getEllipsisName.R
opMetaTestingInformation <- function() { stratum <- buildIdentityList(c('core', paste0('layer_', 1:3))) phasing <- buildIdentityList(c('design', 'build', 'test', 'run', 'maintain', 'evolve', 'transversal')) intent <- buildIdentityList(c('parts_building', 'parts_assembly', 'quality_control', 'statistics', 'feedback', 'content_generation', 'utilities')) category <- buildIdentityList(c('function', 'class', 'data')) nature <- buildIdentityList(c('exported', 'internal')) buildList <- function(name_s_1, category_s_1, nature_s_1, stratum_s_1, phasing_s_1, intent_s_1) { list(name = name_s_1, category = category_s_1, nature = nature_s_1, stratum = stratum_s_1, phasing = phasing_s_1, intent = intent_s_1 ) } bec <- function(name_s_1, stratum_s_1, phasing_s_1, intent_s_1) { buildList(name_s_1, category$CLASS, nature$EXPORTED, stratum_s_1, phasing_s_1, intent_s_1) } bic <- function(name_s_1, stratum_s_1, phasing_s_1, intent_s_1) { buildList(name_s_1, category$CLASS, nature$INTERNAL, stratum_s_1, phasing_s_1, intent_s_1) } bef <- function(name_s_1, stratum_s_1, phasing_s_1, intent_s_1) { buildList(name_s_1, category$FUNCTION, nature$EXPORTED, stratum_s_1, phasing_s_1, intent_s_1) } bif <- function(name_s_1, stratum_s_1, phasing_s_1, intent_s_1) { buildList(name_s_1, category$FUNCTION, nature$INTERNAL, stratum_s_1, phasing_s_1, intent_s_1) } dt <- data.table::rbindlist(list( bef('qualifyFunctionArguments', stratum$LAYER_3, phasing$BUILD, intent$PARTS_BUILDING), bef('buildSemanticArgumentName', stratum$CORE, phasing$BUILD, intent$PARTS_BUILDING), bic('DataFactory', stratum$CORE, phasing$RUN, intent$UTILITIES), bef('retrieveDataFactory', stratum$CORE, phasing$RUN, intent$UTILITIES), bef('generateData', stratum$CORE, phasing$RUN, intent$PARTS_BUILDING), bef('opwf', stratum$CORE, phasing$RUN, intent$UTILITIES), bef('offensiveProgrammingWrapFunction', stratum$CORE, phasing$RUN, intent$UTILITIES), bef('usesSemanticArgumentNames', stratum$CORE, phasing$BUILD, intent$PARTS_BUILDING), bef('testFunction', stratum$LAYER_1, phasing$BUILD, intent$QUALITY_CONTROL), bef('setGenerationContext', stratum$LAYER_1, phasing$RUN, intent$UTILITIES), bef('setDefaultArgumentsGenerationContext', stratum$LAYER_3, phasing$RUN, intent$UTILITIES), bef('opMetaTestingInformation', stratum$LAYER_3, phasing$RUN, intent$FEEDBACK), bef('computeArgumentsCombination', stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), bef('exploreSignatures', stratum$LAYER_3, phasing$RUN, intent$PARTS_BUILDING), bef('dac', stratum$LAYER_3, phasing$RUN, intent$UTILITIES), bef('default_arguments_context', stratum$LAYER_3, phasing$RUN, intent$UTILITIES), bef('erc', stratum$LAYER_3, phasing$RUN, intent$UTILITIES), bef('established_replacement_context', stratum$LAYER_3, phasing$RUN, intent$UTILITIES), bif('computeArgumentsSignature', stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), bif('buildArgumentsSignature', stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), bif('buildEllipsisNames', stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), bif('buildEllipsisSignature', stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), bif('codePatcher', stratum$CORE, phasing$BUILD, intent$PARTS_BUILDING), bif('generateEllipsisSubstitutionName', stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), bif('getEllipsisSubstitutionName', stratum$LAYER_1, phasing$BUILD, intent$PARTS_BUILDING), bif('patchArgumentInCode', stratum$CORE, phasing$BUILD, intent$PARTS_BUILDING), bif('removeEllipsisName', stratum$CORE, phasing$BUILD, intent$PARTS_BUILDING) )) name <- NULL # nse dt[order(name)] }
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/opMetatTestingInformation.R
opwf <- function(fun_f_1, parameterNames_s, functionName_s_1 = NA_character_) { qfa <- qualifyFunctionArguments(fun_f_1) l <- length(parameterNames_s) lfa <- length(qfa$arguments) if (lfa != l) abort('function owns', lfa, 'arguments, you provided', l, 'arguments') if (l == 0) return(fun_f_1) # no work on functions without arguments if (l > 0) { rv <- sapply(seq_len(l), function(k) { x <- wyz.code.offensiveProgramming::FunctionParameterName(parameterNames_s[k]) x$isSemanticName() }) if (!all(rv)) abort('provided parameter names are not all semantic names', strBracket(paste(parameterNames_s[!rv], collapse = ', '))) } ff <- qfa$arguments names(ff) <- parameterNames_s audit <- wyz.code.offensiveProgramming::isAuditable() if (qfa$owns_ellipsis) { substitution_names <- removeEllipsisName(parameterNames_s) sfa <- removeEllipsisName(qfa$argument_names) args <- qfa$arguments[-qfa$ellipsis_index] } else { substitution_names <- parameterNames_s sfa <- qfa$argument_names args <- qfa$arguments } fg <- codePatcher(args, sfa, substitution_names) callParameters <- function() { sapply(seq_len(l), function(k) { if (is.symbol(qfa$arguments[[k]])) parameterNames_s[k] else { paste(qfa$argument_names[k], '=', parameterNames_s[k]) } }) } f <- function() {} formals(f) <- if (qfa$owns_ellipsis) append(fg, ff[getEllipsisName()], qfa$ellipsis_index - 1) else fg dsf <- ifelse(is.na(functionName_s_1), deparse(substitute(fun_f_1)), functionName_s_1) bd <- paste0('`', dsf, '`', '(', paste(callParameters(), collapse = ', '), ')') if (audit) cat('>>> patching body with', bd, '\n') e <- str2lang(bd) if (audit) { cat('>>>result\n'); print(e) } body(f) <- as.call(c(as.name('{'), e)) # some errors might remain in code translation - # uneasy to detect them as it implies function execution with valued arguments. f } offensiveProgrammingWrapFunction <- opwf
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/opwf.R
abort <- function(msg_s_1, ...) { stop(paste(msg_s_1, ...)) } strBracket <- function(text_s_n) { paste0('[', text_s_n, ']') } strJoin <- function(text_s, join_s_n = ', ') { paste(text_s, sep = '', collapse = join_s_n) } guardExecution <- function(yourExpression_ex, functionName_s_1 = 'no function name provided') { rv <- tryCatch(yourExpression_ex, error = function(e) e, warning = function(w) w) processed_without_warning <- !methods::is(rv, 'warning') processed_without_error <- !methods::is(rv, 'error') brv <- function(status_b_1, result_, warningMessage_s_1, errorMessage_s_1) { list( status = status_b_1, function_name = functionName_s_1, warning_message = warningMessage_s_1, errorMessage = errorMessage_s_1, result = result_ ) } if (!processed_without_error) return(brv(FALSE, errorCondition, '', as.character(rv))) if (!processed_without_warning) { nv <- tryCatch(suppressWarnings(yourExpression_ex), error = function(e) e) good_processing <- !methods::is(nv, 'error') return(brv(good_processing, if (good_processing) nv else warningCondition, as.character(rv), if (good_processing) '' else nv)) } brv(TRUE, rv, '', '') } buildIdentityList <- function(entries_s) { d <- toupper(entries_s) names(d) <- gsub('[^A-Z0-9_]', '', d, perl = TRUE) as.list(d) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/outOfPackage.R
patchArgumentInCode <- function(code_s_1, originArgName_s_1, targetArgName_s_1, verbose_b_1 = FALSE) { if (verbose_b_1) cat('code [', code_s_1, ']\n', sep = '') if (!grepl(originArgName_s_1, code_s_1, fixed = TRUE)) return(code_s_1) if (grepl(paste0('^\\s*', originArgName_s_1, '\\s*$'), code_s_1, perl = TRUE)) return(targetArgName_s_1) markers <- c('\u03DF', '\u03D9', '\u03E1') # koppa, qoppa, sampi v <- sapply(markers, function(e) grepl(e, code_s_1, fixed = TRUE)) w <- which(v == FALSE) if (length(w) == 0) abort('none of the three marks is free') mark <- markers[w[1]] if (verbose_b_1) cat('using mark [', mark, ']\n', sep = '') subtitution_part <- paste0(' ', mark, '\\1', mark, ' ') separators <- list(perl_special = "[](){}^$*|#+?.-", not_special = "&@`'\"=%/:;,#~<>!", not_special_multichar = c(':::', '::', '\\[\\[', '\\]\\]')) pat1 <- sapply(strsplit(separators$perl_special, '')[[1]], function(p) { paste0('\\', p) }) pat <- paste0(paste(separators$not_special_multichar, collapse = '|'), paste(pat1, collapse = '|'), '|', paste0('[', separators$not_special, ']')) pat <- paste0('(', pat, ')') if (verbose_b_1) cat('using pattern ', pat, '\n', sep = '') code <- code_s_1 #paste0(code_s_1, mark) # absolutely necessary expand_code <- gsub(pat, subtitution_part, code, perl = TRUE) if (verbose_b_1) cat('expanded code [', expand_code, ']\n', sep = '') # no original argument in code psf <- paste0('( )*', originArgName_s_1, '( )*') #if (!grepl(psf, expand_code, perl = TRUE)) return(code_s_1) # take care that function can be eponymous of original argument name psf_not_function <- paste0(psf, mark, '(?!(\\(|\\$|\\.|@))') if (verbose_b_1) cat('seek pattern ', psf_not_function, '\n', sep = '') tsf <- paste0(' ', targetArgName_s_1, ' ', mark) f <- gsub(psf_not_function, tsf, expand_code, perl = TRUE) if (verbose_b_1) cat('subtitution [', f, ']\n', sep = '') g <- gsub(paste0('(?<!@|\\.|\\$|:::|::)', mark, ' ', originArgName_s_1, '\\s*$'), paste0('\\1', mark, ' ', targetArgName_s_1), f, perl = TRUE) if (verbose_b_1) cat('prior final substitution [', g, ']\n', sep = '') # substitute back markers tp <- paste0(' ', mark, '(.+?)', mark, ' ') gsub(mark, '', gsub(tp, '\\1', g, perl = TRUE), fixed = TRUE) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/patchArgumentInCode.R
qualifyFunctionArguments <- function(fun_f_1) { rfa <- wyz.code.offensiveProgramming::retrieveFunctionArguments(fun_f_1) if (is.null(rfa)) { return(list( argument_names = character(0), owns_ellipsis = FALSE, ellipsis_index = NA_integer_, symbol_names = character(0), symbol_indexes = NA_integer_, stripped_symbol_names = character(0), stripped_symbol_indexes = NA_integer_, default_names = character(0), default_indexes = NA_character_, arguments = rfa )) } l <- lapply(rfa, is.symbol) u <- unlist(l) nm <- names(l) ellipsis <- wyz.code.offensiveProgramming::getEllipsisName() oe <- ellipsis %in% nm ren <- if (oe) removeEllipsisName(nm[u]) else nm[u] list( argument_names = nm, owns_ellipsis = oe, ellipsis_index = ifelse(oe, which(nm == ellipsis), NA_integer_), symbol_names = nm[u], symbol_indexes = which(nm %in% nm[u]), stripped_symbol_names = ren, stripped_symbol_indexes = which(nm %in% ren), default_names = nm[!u], default_indexes = which(nm %in% nm[!u]), arguments = rfa ) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/qualifyFunctionArguments.R
removeEllipsisName <- function(argumentNames_s) { setdiff(argumentNames_s, wyz.code.offensiveProgramming::getEllipsisName()) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/removeEllipsisName.R
retrieveDataFactory <- function() { traceFactory <- function(factory_o_1, info_s_1 = 'default') { if (isAuditable()) { cat(info_s_1, 'data factory address', data.table::address(factory_o_1), '\nsuffixes', strBracket(strJoin(factory_o_1$retrieveKnownSuffixes())), "\n") } factory_o_1 } g <- options('op_mt_data_factory')$op_mt_data_factory if (is.environment(g) && is(g, 'DataFactory')) return(traceFactory(g, 'user defined')) traceFactory(DataFactory()) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/retrieveDataFactory.R
setDefaultArgumentsGenerationContext <- function(useDefaultArguments_b_1 = TRUE, useAllDefaultArguments_b_1 = FALSE) { list( use = isTRUE(useDefaultArguments_b_1), use_all = ifelse(useDefaultArguments_b_1, isTRUE(useAllDefaultArguments_b_1), FALSE) ) } dac <- default_arguments_context <- list( none = setDefaultArgumentsGenerationContext(FALSE, FALSE), partial = setDefaultArgumentsGenerationContext(TRUE, FALSE), full = setDefaultArgumentsGenerationContext(TRUE, TRUE) )
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/setDefaultArgumentsGenerationContext.R
setGenerationContext <- function(replacementNumber_ui_1 = sample(0:3L, 1), homogeneousTypeReplacement_b_1 = FALSE, allowList_b_1 = TRUE, forceList_b_1 = FALSE) { list( number_replacements = abs(replacementNumber_ui_1), homogeneous_type = isTRUE(homogeneousTypeReplacement_b_1), allow_list = isTRUE(allowList_b_1), force_list = isTRUE(forceList_b_1) ) } erc <- established_replacement_context <- list( homo_vector = list( none = setGenerationContext(0, TRUE, FALSE), one = setGenerationContext(1, TRUE, FALSE), two = setGenerationContext(2, TRUE, FALSE), three = setGenerationContext(3, TRUE, FALSE) ), hetero_vector = list( none = setGenerationContext(0, FALSE, FALSE), one = setGenerationContext(1, FALSE, FALSE), two = setGenerationContext(2, FALSE, FALSE), three = setGenerationContext(3, FALSE, FALSE) ), homo_list = list( none = setGenerationContext(0, TRUE, TRUE, TRUE), one = setGenerationContext(1, TRUE, TRUE, TRUE), two = setGenerationContext(2, TRUE, TRUE, TRUE), three = setGenerationContext(3, TRUE, TRUE, TRUE) ), hetero_list = list( none = setGenerationContext(0, FALSE, TRUE, TRUE), one = setGenerationContext(1, FALSE, TRUE, TRUE), two = setGenerationContext(2, FALSE, TRUE, TRUE), three = setGenerationContext(3, FALSE, TRUE, TRUE) ) )
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/setGenerationContext.R
testFunction <- function(function_f_1, generatedData_l, functionName_s_1 = deparse(substitute(function_f_1))) { if (!usesSemanticArgumentNames(function_f_1)) abort('function', strBracket(functionName_s_1), 'owns arguments that are not semantic names') list( call = do.call(call, append(generatedData_l, functionName_s_1, 0)), data = generatedData_l, result = guardExecution(do.call(function_f_1, generatedData_l), functionName_s_1) ) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/testFunction.R
usesSemanticArgumentNames <- function(fun_f_1) { qfa <- qualifyFunctionArguments(fun_f_1) l <- length(qfa$argument_names) all(sapply(seq_len(l), function(k) { x <- wyz.code.offensiveProgramming::FunctionParameterName(qfa$argument_names[k]) x$isSemanticName() })) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/R/usesSemanticArgumentNames.R
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set(collapse = TRUE, comment = "") source('common-style.R')
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/inst/doc/release-notes.R
--- title: "wyz.code.metaTesting releases" author: "Fabien GELINEAU" date: "Last update 2020.04.22" output: rmarkdown::html_vignette: number_sections: false toc: false css: style.css vignette: > %\VignetteIndexEntry{wyz.code.metaTesting releases} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- <img src='images/metatesting-hex.png' alt='offensive programming - meta testing' style='width:30%'/> ```{r setup, include = FALSE} knitr::opts_chunk$set(collapse = TRUE, comment = "") source('common-style.R') ``` Latest release replaces fully olders ones, that are now considered obsoletes. Keep the pace, and upgrade your packages to use the latest version! # name [awesome-asterion-khi] package-version [1.1.22] timestamp [2023-09-25 08:34:40] 1. CRAN note about item in Rd for 4.3.0 implies changes 1. Changes applied to `r citefile("FunctionParameterTypeFactory.R")` # name [awesome-asterion-phi] package-version [1.1.21] timestamp [2021-10-05 20:33:52] 1. CRAN information on obsolescence of lubridate - need to remove dependency. 1. lubridate was speficied in files `r citefile("DESCRIPTION")` and `r citefile("NAMESPACE")` <pre> tests/testthat/test_DataFactory.R: expect_true(verifyFunction('x_da', lubridate::is.Date)) tests/testthat/test_DataFactory.R: expect_true(verifyFunction('x_dc', lubridate::is.POSIXct)) </pre> 1. Test, Duration: `r citefigure('4.1')`, OK: `r citefigure('750')` 1. `r citeexec('R CMD check')`, Duration: `r citefigure('29.2s')`, 0 errors βœ“ | 0 warnings βœ“ | 0 notes βœ“ 1. `r citeval('Commercial software release management')` 1. `r citefolder('vignette')` content update 1. `r citeop("git")` alignment # name [awesome-asterion-tau] package-version [1.1.19] timestamp [2020-11-09 19:49:02] 1. solved test issue related to R 4.0 1. enforced R 4.0 1. Test, Duration: `r citefigure('3.0s')`, OK: `r citefigure('491')` 1. `r citeexec('R CMD check')`, Duration: `r citefigure('33.9s')`, 0 errors βœ“ | 0 warnings βœ“ | 0 notes βœ“ 1. `r citeval('Commercial software release management')` 1. `r citefolder('vignette')` content update 1. `r citeop("git")` alignment # Release awesome-asterion-nu - 1.1.13 - 2020-05-04 1. Test, Duration: `r citefigure('2.9s')`, OK: `r citefigure('475')` 1. `r citeexec('R CMD check')`, Duration: `r citefigure('27.9s')`, 0 errors βœ“ | 0 warnings βœ“ | 0 notes βœ“ 1. `r citeval('Commercial software release management')` 1. `r citefolder('vignette')` structure update 1. `r citefolder('vignette')` content update # Release 1.1.12 - April, 22th 2020 1. Test, Duration: `r citefigure('2.9s')`, OK: `r citefigure('475')` 1. R CMD check, Duration: `r citefigure('28.2s')`, 0 errors βœ“ | 0 warnings βœ“ | 0 notes βœ“ 1. Reviewed all documentation 1. Upgraded and updated vignettes # Release 1.1.4 - January 2020 Main improvements are 1. removed environment variable `r citecode("OP_DATA_FACTORY")` management. Replaced by `r citecode("options('op_mt_data_factory')")` 1. solved CRAN compilation issue on older R release - enforced R dependency to R 3.6 or higher 1. **packageFunctionsInformation** renamed to **metaTestingInformation** to avoid name collisions 1. **metaTestingInformation** verified and upgraded 1. redesign of vignette files - now industrialized with external style 1. Worked on test coverage to reach level higher than 99% 1. cleaned up dependencies 1. Reviewed and cleaned up code - 23 files - 18 exported function - 9 internals 1. Reviewed and cleaned up manual pages - 13 files 1. Reviewed and updated tests - 21 files - 475 unit tests 1. Vignettes upgraded - 1 vignette 1. Timing for tests 3s, checks 27s # Release 1.1.3 - Octobre 2019 Main improvements are 1. enhanced implementation 1. completed unit tests 1. enforced higher code coverage (from 55.23% up to 100%) 1. documentation completion 1. clean up package dependencies. This release replaces fully olders ones, that are now considered obsoletes.
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/inst/doc/release-notes.Rmd
op_sum <- opwf(sum, c('...', 'removeNA_b_1')) op_kronecker <- opwf(kronecker, c('arrayA_a_1', 'arrayB_a_1', 'function_f_1', 'computeDimensionNames_b_1', '...')) op_formatdf <- opwf(format.data.frame, c('x_o_1', '...', 'justificationScheme_s_1')) cac_sum <- computeArgumentsCombination(op_sum) cac_kronecker <- computeArgumentsCombination(op_kronecker) cac_formatdf <- computeArgumentsCombination(op_formatdf)
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/inst/unit-testing/ut-computeArgumentCombination.R
require(wyz.code.offensiveProgramming) #----------- STANDARD R -------------------------------------------------------- sr_divide <- function(x, y) { if (any(y == 0)) stop('can not divide by zero') x / y } sr_code <- function(x, y) { tryCatch(sr_divide(x, y), error = function(e) NA_real_) } rr <- sr_code(pi, 0:7 * 1.0) #----------- STANDARD R -------------------------------------------------------- op_divide <- function(x_r, x_rnz) x_r / x_rnz op_code <- function(x, y) { runTransientFunction(op_divide, list(x, y), EvaluationMode(defineEvaluationModes()[3]), 'x_r') } fp <- FunctionParameterTypeFactory() fp$addSuffix('rnz', 'real-not-zero', function(x_) is.double(x_) && x_ != 0.0) Sys.setenv('OP_TYPE_FACTORY' = 'fp') if (rv$status) { print(rv$value) } else { print(rv) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/inst/unit-testing/ut-divide.R
library(data.table) library(wyz.code.offensiveProgramming) op_sum <- opwf(sum, c('...', 'removeNA_b_1')) op_cos <- opwf(cos, c('radianAngleOrComplex_')) rv_cos <- exploreSignatures(op_cos, list(radianAngleOrComplex_ = c('im', 'r', 'cm'))) rv_sum <- exploreSignatures(op_sum, list(... = c('im', 'r', 'cm'))) cac_sum <- computeArgumentsCombination(op_sum) rv_sum_f <- exploreSignatures(op_sum, list(... = c('im', 'r', 'cm')), cac_sum$signatures[c(1, 5)])
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/inst/unit-testing/ut-exploreSignatures.R
library(data.table) library(wyz.code.offensiveProgramming) options(warn = 2) # retrievePackageFunctionName from rdoc - copied to avoid looping dependencies rpfn <- function(packageName_s_1, libraryPath_s_1 = .libPaths()[1]) { if (!packageName_s_1 %in% installed.packages()[, 'Package']) abort('package', strBracket(packageName_s_1), 'is not installed') sn <- packageName_s_1 if (!sn %in% search()) { tt <- paste0('package:', packageName_s_1) if (!tt %in% search()) library(packageName_s_1, character.only = TRUE) sn <- tt } if (sn %in% search()) { l <- ls(sn, all.names = TRUE) return(l[sapply(l, function(e) is.function(get(e)))]) } abort('package', strBracket(packageName_s_1), 'not found in search path') } bfn <- rpfn('base') l <- lapply(bfn, function(e) { f <- get(e) if (!is.function(f)) return(list(fn = e, number_arguments = NA_integer_, number_default_arguments = NA_integer_)) print(e) qfa <- qualifyFunctionArguments(f) list(fn = e, number_arguments = length(qfa$argument_names), number_default_arguments = length(qfa$default_indexes)) }) dt <- rbindlist(l) df <- dt[number_default_arguments > 0 & number_arguments > 0] buildReplacementNames <- function(originalNames_s) { buildArgnames <- function(numberOfArguments_ui_1) { if (numberOfArguments_ui_1 <= 26) return(paste0(letters[1:numberOfArguments_ui_1], '_')) c(paste0(letters[numberOfArguments_ui_1 %/% 26], buildArgnames(numberOfArguments_ui_1 %% 26)), buildArgnames(26)) } nn <- buildArgnames(length(originalNames_s)) e <- getEllipsisName() if (e %in% originalNames_s) { # very important - do not patch ellipsis nn[which(originalNames_s == e)] <- e } nn } ne <- 0 funs <- lapply(seq_len(nrow(df)), function(k) { #cat(df[k]$fn, '\n') fn <- get(df[k]$fn) if (!is.function(fn)) return(simpleError(df[k]$fn, 'is not a function')) h <- qualifyFunctionArguments(fn) tryCatch(opwf(fn, buildReplacementNames(h$argument_names), df[k]$fn), error = function(e) { ne <<- ne + 1 cat(ne, 'error with', df[k]$fn, e$message, '\n') e }) }) names(funs) <- df$fn gf <- Filter(function(e) is.function(e), funs) bf <- Filter(function(e) !is.function(e), funs) n <- 1 l <- length(gf) repeat { cat(n, "/", l, ' function name is ', names(gf)[n],'\n', sep = '') print(get(names(gf)[n])) cat(strrep('-', 79), '\n') print(gf[[n]]) x <- readline('next (integer|function name|q to exit)>') if (nchar(x) == 0) { n <- n + 1 next } if (tolower(x) == 'q') break; p <- suppressWarnings(as.integer(x)) if (!is.na(p)) { n <- ifelse(p > 0 && p <= l, p, n + 1) next } if (x %in% names(gf)) { n <- which(names(gf) == x) } }
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/inst/unit-testing/ut-packageSignatures.R
df <- retrieveDataFactory() op_kronecker <- opwf(kronecker, c('arrayA_a_1', 'arrayB_a_1', 'function_f_1', 'computeDimensionNames_b_1', '...')) draw_integer_array <- function(n_i_1, replace_b_1 = TRUE) { m <- n_i_1 + sample(0:3, 1) matrix(seq(1, n_i_1 * m), byrow = TRUE, nrow = n_i_1, dimnames = list(paste('row_', 1:n_i_1), paste('col_', 1:m))) } df$addSuffix('a', 'array', draw_integer_array) draw_function_wrong <- function(n_i_1, replace_b_1 = TRUE) { } # wrong return type df$addSuffix('f', 'function', draw_function_wrong) draw_function_wrong2 <- function(n_i_, replace_b_1 = TRUE) { sum } # wrong arg name #1 df$addSuffix('f', 'function', draw_function_wrong2) draw_function_wrong3 <- function(n_i_1, bool = TRUE) { sum } # wrong arg name #2 df$addSuffix('f', 'function', draw_function_wrong3) draw_function <- function(n_i_1, replace_b_1 = TRUE) { list(`*`, `+`, `-`)[[sample(1:3, 1)]]} df$addSuffix('f', 'function', draw_function) # make your factory findable options(op_mt_data_factory = df) es <- exploreSignatures(op_kronecker)
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/inst/unit-testing/ut-sample.R
citeit <- function(x_s) paste0('<cite class="itb">', x_s, '</cite>') citefun <- function(x_s) paste0('<cite class="it">', x_s, '</cite>') citeop <- function(x_s) paste0('<cite class="op">', x_s, '</cite>') citearg <- function(x_s) paste0('<cite class="os">', x_s, '</cite>') citeval <- function(x_s) paste0('<cite class="ea">', x_s, '</cite>') citesection <- function(x_s) paste0('<cite class="bj">', x_s, '</cite>') citecode <- function(x_s) paste0('<cite class="oc">', x_s, '</cite>') citechar <- function(x_s) paste0('<cite class="isa">', x_s, '</cite>') cmt <- function(x_s) paste0('<cite class="comment">', x_s, '</cite>') citefigure <- function(x_s) paste0('<cite class="figure">', x_s, '</cite>') citetime <- function(x_s) paste0('<cite class="time">', x_s, '</cite>') citefile <- function(x_s) paste0('<cite class="file">', x_s, '</cite>') citefolder <- function(x_s) paste0('<cite class="folder">', x_s, '</cite>') citeexec <- function(x_s) paste0('<cite class="exec">', x_s, '</cite>') citeEA <- function() { n <- 0 function(x_s) { n <<- n + 1 paste0('<cite class="oc"> EA#', n, ' ', x_s, '</cite>') } } cmt <- function(x_s) paste0('<cite class="comment">', x_s, '</cite>') rdoc <- citeval('wyz.code.rdoc') roxy <- citeval('roxygen2') op <- citeval('wyz.code.offensiveProgramming') R <- citeit('R') brkfun <- function(x_s) { paste(sapply(x_s, function(e) paste('\u25b6', e, '<br/>')), collapse = '') } showTable <- function(x_dt_1) { DT::datatable(x_dt_1, options = list(pageLength = 25)) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/vignettes/common-style.R
--- title: "wyz.code.metaTesting releases" author: "Fabien GELINEAU" date: "Last update 2020.04.22" output: rmarkdown::html_vignette: number_sections: false toc: false css: style.css vignette: > %\VignetteIndexEntry{wyz.code.metaTesting releases} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- <img src='images/metatesting-hex.png' alt='offensive programming - meta testing' style='width:30%'/> ```{r setup, include = FALSE} knitr::opts_chunk$set(collapse = TRUE, comment = "") source('common-style.R') ``` Latest release replaces fully olders ones, that are now considered obsoletes. Keep the pace, and upgrade your packages to use the latest version! # name [awesome-asterion-khi] package-version [1.1.22] timestamp [2023-09-25 08:34:40] 1. CRAN note about item in Rd for 4.3.0 implies changes 1. Changes applied to `r citefile("FunctionParameterTypeFactory.R")` # name [awesome-asterion-phi] package-version [1.1.21] timestamp [2021-10-05 20:33:52] 1. CRAN information on obsolescence of lubridate - need to remove dependency. 1. lubridate was speficied in files `r citefile("DESCRIPTION")` and `r citefile("NAMESPACE")` <pre> tests/testthat/test_DataFactory.R: expect_true(verifyFunction('x_da', lubridate::is.Date)) tests/testthat/test_DataFactory.R: expect_true(verifyFunction('x_dc', lubridate::is.POSIXct)) </pre> 1. Test, Duration: `r citefigure('4.1')`, OK: `r citefigure('750')` 1. `r citeexec('R CMD check')`, Duration: `r citefigure('29.2s')`, 0 errors βœ“ | 0 warnings βœ“ | 0 notes βœ“ 1. `r citeval('Commercial software release management')` 1. `r citefolder('vignette')` content update 1. `r citeop("git")` alignment # name [awesome-asterion-tau] package-version [1.1.19] timestamp [2020-11-09 19:49:02] 1. solved test issue related to R 4.0 1. enforced R 4.0 1. Test, Duration: `r citefigure('3.0s')`, OK: `r citefigure('491')` 1. `r citeexec('R CMD check')`, Duration: `r citefigure('33.9s')`, 0 errors βœ“ | 0 warnings βœ“ | 0 notes βœ“ 1. `r citeval('Commercial software release management')` 1. `r citefolder('vignette')` content update 1. `r citeop("git")` alignment # Release awesome-asterion-nu - 1.1.13 - 2020-05-04 1. Test, Duration: `r citefigure('2.9s')`, OK: `r citefigure('475')` 1. `r citeexec('R CMD check')`, Duration: `r citefigure('27.9s')`, 0 errors βœ“ | 0 warnings βœ“ | 0 notes βœ“ 1. `r citeval('Commercial software release management')` 1. `r citefolder('vignette')` structure update 1. `r citefolder('vignette')` content update # Release 1.1.12 - April, 22th 2020 1. Test, Duration: `r citefigure('2.9s')`, OK: `r citefigure('475')` 1. R CMD check, Duration: `r citefigure('28.2s')`, 0 errors βœ“ | 0 warnings βœ“ | 0 notes βœ“ 1. Reviewed all documentation 1. Upgraded and updated vignettes # Release 1.1.4 - January 2020 Main improvements are 1. removed environment variable `r citecode("OP_DATA_FACTORY")` management. Replaced by `r citecode("options('op_mt_data_factory')")` 1. solved CRAN compilation issue on older R release - enforced R dependency to R 3.6 or higher 1. **packageFunctionsInformation** renamed to **metaTestingInformation** to avoid name collisions 1. **metaTestingInformation** verified and upgraded 1. redesign of vignette files - now industrialized with external style 1. Worked on test coverage to reach level higher than 99% 1. cleaned up dependencies 1. Reviewed and cleaned up code - 23 files - 18 exported function - 9 internals 1. Reviewed and cleaned up manual pages - 13 files 1. Reviewed and updated tests - 21 files - 475 unit tests 1. Vignettes upgraded - 1 vignette 1. Timing for tests 3s, checks 27s # Release 1.1.3 - Octobre 2019 Main improvements are 1. enhanced implementation 1. completed unit tests 1. enforced higher code coverage (from 55.23% up to 100%) 1. documentation completion 1. clean up package dependencies. This release replaces fully olders ones, that are now considered obsoletes.
/scratch/gouwar.j/cran-all/cranData/wyz.code.metaTesting/vignettes/release-notes.Rmd
EvaluationMode <- function(value_s_1 = defineEvaluationModes()[2]) { self <- environment() class(self) <- append('EvaluationMode', class(self)) if (!value_s_1 %in% defineEvaluationModes()) abort('unauthorized value', strBracket(value_s_1)) is <- function(aValue_s_1) { aValue_s_1 == value_s_1 } getEvaluationMode <- function() value_s_1 toString <- function() { paste('evaluation mode', strBracket(value_s_1)) } self } print.EvaluationMode <- function(x, ...) cat(x$toString(), ..., '\n')
/scratch/gouwar.j/cran-all/cranData/wyz.code.offensiveProgramming/R/EvaluationMode.R
FunctionParameterName <- function(name_s_1) { self <- environment() class(self) <- append('FunctionParameterName', class(self)) l <- length(name_s_1) if (l != 1) abort('you must provide a single function parameter name, got', l) if (!is.character(name_s_1)) abort('you must provide the function parameter name as a character, got,', class(name_s_1)) final_underscore <- endsWith(name_s_1, '_') s <- strsplit(name_s_1, '_')[[1]] l <- length(s) isPreValid <- function() { if (l > 1 && l <= 3) return(TRUE) if (l == 1 && final_underscore) return(TRUE) FALSE } figure_pattern <- '([1-9][0-9]*)' has_type_constraint <- l >= 2 && !grepl(figure_pattern, s[2], perl = TRUE) has_length_constraint <- if (l >= 2) grepl(figure_pattern, s[l], perl = TRUE) else FALSE length_modifier_value <- if (l >= 2) gsub(figure_pattern, '', s[l], perl = TRUE) else FALSE has_length_modifier <- nchar(length_modifier_value) > 0 has_valid_length_modifier <- ifelse(has_length_constraint, grepl('(l|m|n)$', s[l], perl = TRUE), FALSE) length_modifier <- if (has_length_constraint) substring(s[l], nchar(s[l])) else NA if (isPreValid()) { parameter_name <- s[1] type_suffix <- if (has_type_constraint) { s[2] } else NA length_suffix <- if (has_length_constraint) { tryCatch({ v <- regmatches(s[l], regexpr(figure_pattern, s[l])) k <- as.integer(v) if (k == 0) NA else k }, error = function(e) NA) } else NA } else { parameter_name <- name_s_1 type_suffix <- length_suffix <- NA } isEllipsis <- function() parameter_name == '...' isValid <- function() { if (isEllipsis()) return(TRUE) if (!isPreValid()) return(FALSE) if (!substr(parameter_name, 1, 1) %in% letters) return(FALSE) if (has_type_constraint) { if (final_underscore) return(FALSE) if (is.na(length_suffix)) return(TRUE) if (has_length_constraint) { if (length_modifier == 'n' && length_suffix == 1) return(FALSE) # forbidden combination 1n if (has_length_modifier) return(has_valid_length_modifier) return(TRUE) } # no fall trhu required } if (has_length_constraint) { if (!has_type_constraint && !final_underscore) return(FALSE) if (has_length_modifier) return(has_valid_length_modifier) return((final_underscore && !has_type_constraint) || (has_type_constraint && !final_underscore)) } if (final_underscore) return(!has_type_constraint) # no fall trhu required } isSemanticName <- function() { if (!isValid()) return(FALSE) if (isEllipsis()) return(TRUE) if (isPolymorphic()) return(TRUE) retrieveFactory()$checkSuffix(getTypeSuffix()) } getFullParameterName <- function() name_s_1 getParameterName <- function() parameter_name getTypeSuffix <- function() type_suffix isPolymorphic <- function() final_underscore getLengthSuffix <- function() length_suffix getLengthModifier <- function() { if (has_valid_length_modifier) return(length_modifier) NA } deduceParameterLabel <- function() { tolower(gsub('([A-Z])', ' \\1', parameter_name, perl = TRUE)) } getLengthSpecification <- function() { if (is.na(length_suffix)) return(NA_character_) if (is.na(getLengthModifier())) return(as.character(length_suffix)) paste0(length_suffix, length_modifier) } hasCompliantLength <- function(value_) { lc <- getLengthSuffix() if (is.na(lc)) return(TRUE) l <- length(value_) if (!has_valid_length_modifier) return(l == lc) if (length_modifier == 'l') return(l <= lc) if (length_modifier == 'n') return(l == 1 || l == length_suffix) if (length_modifier == 'm') return(l >= lc) # no fallthru needed } toString <- function() { paste('parameter name', strBracket(parameter_name), 'type suffix', strBracket(type_suffix), 'length suffix', strBracket(length_suffix), 'length modifier', strBracket(ifelse(has_valid_length_modifier, length_modifier, 'no length modifier'))) } self } print.FunctionParameterName <- function(x, ...) { cat(x$toString(), '\n') }
/scratch/gouwar.j/cran-all/cranData/wyz.code.offensiveProgramming/R/FunctionParameterName.R
FunctionParameterTypeFactory <- function() { self <- environment() class(self) <- append('FunctionParameterTypeFactory', class(self)) type_classes <- list(basic = 'basic', math = 'math', data_structure = 'data structure', date = 'date', numeric = 'numeric', language = 'language', feature = 'feature', error = 'error management', user_defined = 'user defined') isWarning <- function(o_1l_) 'warning' %in% class(o_1l_) isError <- function(o_1l_) 'error' %in% class(o_1l_) isDate <- function(o_1l_) is(o_1l_, 'Date') isPOSIXct <- function(o_1l_) inherits(o_1l_, 'POSIXct') isPOSIXlt <- function(o_1l_) inherits(o_1l_, 'POSIXlt') isPureBoolean <- function(o_1l_) { if (!is.logical(o_1l_)) return(FALSE) if (length(o_1l_) == 0) return(TRUE) all(is.na(o_1l_) == FALSE) } isPureComplex <- function(o_1l_) { if (!is.complex(o_1l_)) return(FALSE) if (length(o_1l_) == 0) return(TRUE) all(is.na(o_1l_) == FALSE) && all(is.infinite(o_1l_) == FALSE) } isPureInteger <- function(o_1l_) { if (!is.numeric(o_1l_)) return(FALSE) if (length(o_1l_) == 0) return(typeof(o_1l_) == 'integer') all(is.double(o_1l_) == FALSE) } isPureMathInteger <- function(o_1l_) { if (!isPureInteger(o_1l_)) return(FALSE) if (length(o_1l_) == 0) return(TRUE) !is.na(o_1l_[1]) && !is.infinite(o_1l_[1]) } isPureReal <- function(o_1l_) { if (!is.double(o_1l_)) return(FALSE) if (length(o_1l_) == 0) return(TRUE) all(is.na(o_1l_[1]) == FALSE) && all(is.infinite(o_1l_) == FALSE) } # ease reuse generateIsOfClass <- function(classname_s_1) function(o_1l_) is(o_1l_, classname_s_1) isPositiveReal <- function(o_1l_) isPureReal(o_1l_) && all(o_1l_ >= 0.0) isNegativeReal <- function(o_1l_) isPureReal(o_1l_) && all(o_1l_ <= 0.0) isStrictlyPositiveReal <- function(o_1l_) isPureReal(o_1l_) && all(o_1l_ > 0.0) isStrictlyNegativeReal <- function(o_1l_) isPureReal(o_1l_) && all(o_1l_ < 0.0) isPositiveInteger <- function(o_1l_) isPureMathInteger(o_1l_) && all(o_1l_ >= 0L) isStrictlyPositiveInteger <- function(o_1l_) isPureMathInteger(o_1l_) && all(o_1l_ > 0L) isNegativeInteger <- function(o_1l_) isPureMathInteger(o_1l_) && all(o_1l_ <= 0L) isStrictlyNegativeInteger <- function(o_1l_) isPureMathInteger(o_1l_) && all(o_1l_ < 0L) isCantorReal <- function(o_1l_) isPureReal(o_1l_) && all(o_1l_ >= 0L) && all(o_1l_ <= 1L) isString <- function(o_1l_) { if (!is.character(o_1l_)) return(FALSE) if (length(o_1l_) == 0) return(TRUE) all(is.na(o_1l_) == FALSE) } allowedSuffixes <- list( list('a' , 'array' , list(is.array) , type_classes$data_structure), list('b' , 'boolean' , list(isPureBoolean) , type_classes$math), list('c' , 'complex' , list(is.complex) , type_classes$numeric), list('cm' , 'complex-math' , list(isPureComplex) , type_classes$math), list('d' , 'double' , list(is.double) , type_classes$numeric), list('r' , 'real-math' , list(isPureReal) , type_classes$math), list('rm' , 'real-math alias', list(isPureReal) , type_classes$math), list('ch' , 'character' , list(is.character) , type_classes$basic), list('s' , 'string' , list(isString) , type_classes$basic), list('ca' , 'call' , list(is.call) , type_classes$language), list('da' , 'date' , list(isDate) , type_classes$date), list('dc' , 'POSIXct' , list(isPOSIXct) , type_classes$date), list('df' , 'data.frame' , list(is.data.frame) , type_classes$data_structure), list('dt' , 'data.table' , list(data.table::is.data.table) , type_classes$data_structure), list('dl' , 'POSIXlt' , list(isPOSIXlt) , type_classes$date), list('dm' , 'double-math' , list(isPureReal) , type_classes$math), list('e' , 'environment' , list(is.environment) , type_classes$basic), list('ex' , 'expression' , list(is.expression) , type_classes$language), list('er' , 'error' , list(isError) , type_classes$error), list('f' , 'function' , list(is.function) , type_classes$basic), list('fa' , 'factor' , list(is.factor) , type_classes$basic), list('i' , 'integer' , list(isPureInteger) , type_classes$numeric), list('im' , 'integer-math' , list(isPureMathInteger) , type_classes$math), list('l' , 'list' , list(is.list) , type_classes$data_structure), list('lo' , 'logical' , list(is.logical) , type_classes$basic), list('m' , 'matrix' , list(is.matrix) , type_classes$data_structure), list('n' , 'numeric' , list(is.numeric) , type_classes$numeric), list('na' , 'na' , list(is.na) , type_classes$basic), list('nm' , 'name' , list(is.name) , type_classes$language), list('o' , 'object' , list(is.object) , type_classes$basic), list('ra' , 'raw' , list(is.raw) , type_classes$basic), list('ui' , 'unsigned integer', list(isPositiveInteger) , type_classes$math), list('pi' , 'positive integer', list(isPositiveInteger) , type_classes$math), list('ni' , 'negative integer', list(isNegativeInteger) , type_classes$math), list('spi' , 'strictly positive integer', list(isStrictlyPositiveInteger), type_classes$math), list('sni' , 'strictly negative integer', list(isStrictlyNegativeInteger), type_classes$math), list('cr' , 'cantor real' , list(isCantorReal) , type_classes$math), list('ur' , 'unsigned real' , list(isPositiveReal) , type_classes$math), list('pr' , 'positive real' , list(isPositiveReal) , type_classes$math), list('nr' , 'negative real' , list(isNegativeReal) , type_classes$math), list('spr' , 'strictly positive real', list(isStrictlyPositiveReal), type_classes$math), list('snr' , 'strictly negative real', list(isStrictlyNegativeReal), type_classes$math), list('t' , 'table' , list(is.table) , type_classes$data_structure), list('w' , 'warning' , list(isWarning) , type_classes$error) ) suffix <- NULL # data.table NSE issue with Rcmd check dt <- data.table::rbindlist(allowedSuffixes) data.table::setnames(dt, colnames(dt), c('suffix', 'type', 'verify_function', 'category')) stopifnot(all(sapply(dt$verify_function, function(e) is.function(e)) == TRUE)) dt <- dt[order(suffix)] getRowNumber <- function(value_s_1) { if (value_s_1 %in% dt$suffix) return(which(dt$suffix == value_s_1)) if (value_s_1 %in% dt$type) return(which(dt$type == value_s_1)) NA } getRecordedTypes <- function() copy(dt[order(suffix)]) retrieveKnownSuffixes <- function() dt$suffix checkSuffix <- function(suffix_s_1) suffix_s_1[1] %in% dt$suffix addSuffix <- function(suffix_s_1, type_s_1, typeVerifier_f_1) { if (!is.function(typeVerifier_f_1)) return(FALSE) i <- identical(retrieveFunctionArguments(isString), retrieveFunctionArguments(typeVerifier_f_1)) if (!i) return(FALSE) s <- gsub('_*([A-Za-z].*)', '\\1', suffix_s_1, perl = TRUE) rv <- checkSuffix(s) if (!rv) dt <<- data.table::rbindlist(list(dt, list(s, type_s_1, list(typeVerifier_f_1), type_classes$user_defined))) !rv } getType <- function(value_s_1) { rn <- getRowNumber(value_s_1[1]) if (is.na(rn)) return(paste('No suffix or type matches', strBracket(value_s_1[1]))) dt[rn]$type } getVerificationFunction <- function(value_s_1) { rn <- getRowNumber(value_s_1[1]) if (is.na(rn)) return(paste('No verification function', strBracket(value_s_1[1]))) dt[rn]$verify_function[[1]] } verifyValue <- function(functionParameterName_o, value_) { stopifnot(methods::is(functionParameterName_o, 'FunctionParameterName')) brv <- function(validity_b, msg_s) { list(parameter_name = functionParameterName_o$getFullParameterName(), parameter_value = list(value_), validity = validity_b, message = msg_s) } checkValue <- function() { if (!functionParameterName_o$hasCompliantLength(value_)) return(brv(FALSE, paste('wrong length, was expecting', strBracket(functionParameterName_o$getLengthSpecification()), ', got', strBracket(length(value_))))) rs <- functionParameterName_o$getTypeSuffix() if (!rs %in% dt$suffix) return(brv(FALSE, paste0('unknown suffix, [', rs, ']'))) fn <- dt[suffix == rs]$verify_function[[1]] # b <- if (is.list(value_) && !is.object(value_)) all(sapply(value_, fn) == TRUE) else fn(value_) b <- fn(value_) return(brv(b, paste(ifelse(b, 'good', 'wrong'), 'type in values'))) } if (functionParameterName_o$isPolymorphic()) return(brv(TRUE, 'polymorphic parameter')) checkValue() } getTypeDescription <- function(functionParameterName_o) { getAdj <- function(x_s, capitalize_b = FALSE) ifelse(grepl('^[aeiouy]', x_s, perl = TRUE), ifelse(capitalize_b, 'An', 'an'), ifelse(capitalize_b, 'A', 'a')) if (functionParameterName_o$isEllipsis()) return('additional arguments.') s <- functionParameterName_o$getTypeSuffix() if (checkSuffix(s)) { type <- dt[suffix == s]$type kind <- if (dt[suffix == s]$category %in% c(type_classes$basic, type_classes$numeric, type_classes$math)) 'values' else 'objects' } else { type <- if (functionParameterName_o$isPolymorphic()) 'variable type' else 'unknown' kind <- 'objects' } lu <- functionParameterName_o$getLengthSuffix() ll <- functionParameterName_o$getLengthModifier() constraint <- if (is.na(lu)) 'unconstrained' else { if (!is.na(ll)) { paste0('length-', switch(ll, 'n' = paste('1 or', lu), 'l' = paste(lu, 'or less'), 'm' = paste(lu, 'or more')) ) } else { if (lu == 1L) 'single' else paste0('length-', lu) } } single <- !is.na(lu) && is.na(ll) && lu == 1L paste0(getAdj(constraint, TRUE), ' ', constraint, ' ', if (type == 'list') type else paste0(ifelse(single, '', 'vector of '), type, ' ', ifelse(single, substr(kind, 1L, nchar(kind) - 1L), kind) ) ) } self }
/scratch/gouwar.j/cran-all/cranData/wyz.code.offensiveProgramming/R/FunctionParameterTypeFactory.R
TestCaseDefinition <- function(params_l, expectedResult_, description_s_1) { self <- environment() class(self) <- append('TestCaseDefinition', class(self)) getParams <- function() params_l getExpectedResult <- function() expectedResult_ getDescription <- function() description_s_1 asList <- function() { list(params = params_l, expected_result = expectedResult_, description = description_s_1) } self } print.TestCaseDefinition <- function(x, ...) { print(x$asList()) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.offensiveProgramming/R/TestCaseDefinition.R
defineEvaluationModes <- function() { c('standard_R_evaluation', 'enhanced_R_evaluation', 'type_checking_enforcement') }
/scratch/gouwar.j/cran-all/cranData/wyz.code.offensiveProgramming/R/defineEvaluationModes.R
defineFunctionReturnTypesParameterName <- function() 'function_return_types'
/scratch/gouwar.j/cran-all/cranData/wyz.code.offensiveProgramming/R/defineFunctionReturnTypesParameterName.R
defineTestCaseDefinitionsParameterName <- function() 'test_case_definitions'
/scratch/gouwar.j/cran-all/cranData/wyz.code.offensiveProgramming/R/defineTestCaseDefinitionsParameterName.R
exploreObjectNamesVerification <- function(object_o_1, what_s_1 = c('names', 'return type', 'test cases', '*')[1]) { idx <- switch(tolower(substr(what_s_1, 1, 1)), 'a' = 2, # arguments 'p' = 2, # parameters 'r' = 2, # return 't' = 3, # return type '*' = 4, # all 1 ) r <- verifyObjectNames(object_o_1) if (idx == 1 || idx == 4) { w <- which(r$parameter_name_compliance$semantic_naming_check == FALSE) l <- c(length(which(r$class_name_compliance == FALSE)), length(which(r$function_name_compliance == FALSE)), length(w) ) if (l[3] != 0) { sapply(w, function(k) { cat(crayon::yellow(paste('parameter', strBracket(r$parameter_name_compliance[k]$parameter_name), 'from function', strBracket(r$parameter_name_compliance[k]$function_name))), '\n') }) } cat(generateStatusSummary(l, c(1, length(r$function_name_compliance), length(r$parameter_name_compliance$semantic_naming_check)), c('class name', 'function names', 'function parameter names')), '\n') } if (idx == 2 || idx == 4) { if (r$owns_function_return_type_information == FALSE) { cat(generateStatusSummary(1, 1, 'no instrumentation of function return type'), '\n') } else { if (r$is_function_fully_instrumented == FALSE && length(r$missing_functions) == 1 && !is.na(r$missing_functions[1])) { cat(crayon::yellow(paste('missing function instrumentation:', paste(r$missing_functions, collapse = ', '))), '\n') } l <- ifelse(r$is_function_fully_instrumented == TRUE, 0, length(r$missing_functions)) cat(generateStatusSummary(l, length(r$function_name_compliance), 'functions not instrumented'), '\n') } } if (idx == 3 || idx == 4) { if (r$owns_test_case_definitions == FALSE) { cat(generateStatusSummary(1, 1, 'no instrumentation of test cases'), '\n') } else { if (r$is_test_case_fully_instrumented == FALSE && length(r$missing_test_cases) == 1 && !is.na(r$missing_test_cases[1])) { cat(crayon::yellow(paste('missing test case instrumentation:', paste(r$missing_test_cases, collapse = ', '))), '\n') } l <- ifelse(r$is_test_case_fully_instrumented == TRUE, 0, length(r$missing_test_cases)) cat(generateStatusSummary(l, length(r$function_name_compliance), 'test cases not instrumented'), '\n') } } invisible(r) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.offensiveProgramming/R/exploreObjectNamesVerification.R
findFilesInPackage <- function(filenames_s, packageName_s_1) { sapply(filenames_s, function(e) { file.path(list.files(system.file(package = packageName_s_1), e, recursive = TRUE, full.names = TRUE)) }) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.offensiveProgramming/R/findFilesInPackage.R
generateStatusSummary <- function(numberErrors_ui, numberTotal_ui, labels_s) { mark <- ifelse(!numberErrors_ui, crayon::green('\u2714'), crayon::red('\u2718')) paste(crayon::blue(paste0(numberErrors_ui, '/', numberTotal_ui)), crayon::blue(labels_s), mark, collapse = ' | ') }
/scratch/gouwar.j/cran-all/cranData/wyz.code.offensiveProgramming/R/generateStatusSummary.R
getClassTypicalFunctionNames <- function(object_o_1) { on <- getObjectClassKind(object_o_1) if (is.na(on)) return(NA) switch(on, 'environment' = vector(mode = 'character', 0), 'R6' = c('clone'), 'S3' = vector(mode = 'character', 0), 'S4' = c('show', 'initialize'), 'RC' = c("callSuper", "copy", "export", "field", "getClass", "getRefClass", "import", "initFields", "show", "trace", "untrace", "usingMethods", 'initialize') ) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.offensiveProgramming/R/getClassTypicalFunctionNames.R
getEllipsisName <- function() '...'
/scratch/gouwar.j/cran-all/cranData/wyz.code.offensiveProgramming/R/getEllipsisName.R
getObjectClassKind <- function(object_o_1) { if (!is.object(object_o_1)) return(NA_character_) if (isS4(object_o_1)) { if (is(object_o_1, 'refClass')) return('RC') return('S4') } on <- class(object_o_1) if ('R6' %in% on || 'R6ClassGenerator' %in% on) return('R6') if (is.environment(object_o_1)) return('environment') if (typeof(object_o_1) == 'list') return('S3') 'unknown' }
/scratch/gouwar.j/cran-all/cranData/wyz.code.offensiveProgramming/R/getObjectClassKind.R
getObjectClassNames <- function(object_o_1) { on <- class(object_o_1) list(classname = setdiff(on, c('environment', 'R6'))[1], classnames = on) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.offensiveProgramming/R/getObjectClassNames.R
getObjectFunctionArgumentNames <- function(object_o_1, allNames_b_1 = TRUE) { on <- getObjectClassKind(object_o_1) if (is.na(on)) return(NA) cn <- getObjectClassNames(object_o_1)$classname ofn <- getObjectFunctionNames(object_o_1, allNames_b_1) l <- lapply(ofn, function(f) { fa <- switch(on, 'environment' = object_o_1[[f]], 'R6' = object_o_1[[f]], 'S4' = getMethod(f, signature = cn), 'S3' = get(paste0(f, '.', cn), mode = 'function'), 'RC' = eval(parse(text = paste0(cn, '$def@refMethods[["', f, '"]]'))) ) x <- retrieveFunctionArgumentNames(fa) if (is.null(x)) return(vector('character', 0)) x }) names(l) <- ofn l }
/scratch/gouwar.j/cran-all/cranData/wyz.code.offensiveProgramming/R/getObjectFunctionArgumentNames.R
getObjectFunctionNames <- function(object_o_1, allNames_b_1 = FALSE) { filterNames <- function(x) { if (allNames_b_1) return(x) setdiff(x, getClassTypicalFunctionNames(object_o_1)) } filterOutFunctions <- function(names_s, obj_1_) { v <- sapply(names_s, function(e) is.function(obj_1_[[e]])) names(v[which(v == TRUE)]) } on <- getObjectClassKind(object_o_1) if (is.na(on)) return(NA) cn <- getObjectClassNames(object_o_1)$classname if (on %in% c('environment', 'R6')) { return(filterNames(filterOutFunctions(ls(envir = object_o_1, all.names = TRUE), object_o_1))) } if (on == 'RC') { x <- get(cn) return(filterNames(filterOutFunctions(ls(envir = x$def@refMethods, all.names = TRUE), x$def@refMethods))) } if (on %in% c('S3', 'S4')) { d <- suppressWarnings(methods(class = cn)) return(filterNames(attributes(d)$info$generic)) } NA }
/scratch/gouwar.j/cran-all/cranData/wyz.code.offensiveProgramming/R/getObjectFunctionNames.R
hasMainClass <- function(object_o_1, classname_s_1) { classname_s_1[1] == getObjectClassNames(object_o_1)$classname[1] }
/scratch/gouwar.j/cran-all/cranData/wyz.code.offensiveProgramming/R/hasMainClass.R
identifyOPInstrumentationLevel <- function(object_o_1 = NULL, methodName_s_1 = NA_character_) { if (is.null(object_o_1) || !is.object(object_o_1)) { b1 <- if (is.na(methodName_s_1)) FALSE else { args <- retrieveFunctionArgumentNames(methodName_s_1) if (is.null(args)) FALSE else { all(sapply(args, function(e) { FunctionParameterName(e)$isValid() })) } } list(offensive_programming = FALSE, full_instrumentation = FALSE, semantic_naming = b1, function_return_type = FALSE, test_case_definition = FALSE) } else { v <- verifyObjectNames(object_o_1) b1 <- if (is.logical(v$parameter_name_compliance)) { v$parameter_name_compliance } else { all(v$parameter_name_compliance$semantic_naming_check) } b2 <- v$owns_function_return_type_information b3 <- v$owns_test_case_definitions list(offensive_programming = b1 && (b2 || b3), # manages partial instrumentation full_instrumentation = b1 && b2 && b3, semantic_naming = b1, function_return_type = b2, test_case_definition = b3) } }
/scratch/gouwar.j/cran-all/cranData/wyz.code.offensiveProgramming/R/identifyOPInstrumentationLevel.R
isAuditable <- function() { isTRUE(options('op_audit')$op_audit) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.offensiveProgramming/R/isAuditable.R
matchFunctionArguments <- function(realArguments_l, signatureArguments_s) { verifySignature <- function(real_l, theoritical_s, message_s_1) { lr <- length(real_l) lt <- length(theoritical_s) if (lt == 0) { return(rbindlist(list( list(parameter_name = '', parameter_value = NA_character_, validity = TRUE, message = paste('no parameter required', ifelse(lr == 0, '', paste(lr, 'values provided'))) )))) } remaining <- theoritical_s anm <- sapply(seq_len(length(real_l)), function(k) { nm <- names(real_l)[k] pn <- if (missing(nm) || is.null(nm) || is.na(nm) ||nchar(nm) == 0) remaining[1] else remaining[pmatch(nm, remaining)] if (length(pn) > 0) { if (!is.na(pn) && pn != ellipsis) remaining <<- setdiff(remaining, pn) } pn }, simplify = TRUE) if (!use_ellipsis && length(unique(anm)) != length(anm)) { ta <- table(names(real_l)) return(rbindlist(list( list(parameter_name = 'input real argument names', parameter_value = names(ta)[ta > 1], validity = FALSE, message = 'parameter names mismatch') ))) } tf <- retrieveFactory() rv <- sapply(seq_len(length(real_l)), function(k) { #catn('parameter name', anm[k], 'value', strBracket(real_l[[k]])) if (!is.na(anm[k]) && anm[k] != ellipsis) { tf$verifyValue(FunctionParameterName(anm[k]), real_l[[k]]) } else { list(parameter_name = ellipsis, parameter_value = list(real_l[[k]]), validity = TRUE, message = 'ellipsis matches all') } }, simplify = FALSE) if (is.na(message_s_1)) rbindlist(rv) else rbindlist(append(rv, list(list(parameter_name = '#', parameter_value = NA, validity = FALSE, message = message_s_1)))) } lsa <- length(signatureArguments_s) lra <- length(realArguments_l) message <- NA_character_ ellipsis <- getEllipsisName() use_ellipsis <- ellipsis %in% signatureArguments_s if (lra > lsa ) { if (!use_ellipsis) { message <- paste(lra - lsa, 'extraneous arguments found') } } ra <- if (use_ellipsis || lra == 0) realArguments_l else realArguments_l[1:min(lra, lsa)] # force cut verifySignature(ra, signatureArguments_s, message) }
/scratch/gouwar.j/cran-all/cranData/wyz.code.offensiveProgramming/R/matchFunctionArguments.R