content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' Plotting method for Bayesian VAR predictions #' #' Plotting method for forecasts obtained from \code{\link{predict.bvar}}. #' Forecasts of all or a subset of the available variables can be plotted. #' #' @param x A \code{bvar_fcast} object, obtained from \code{\link{predict.bvar}}. #' @param vars Optional numeric or character vector. Used to subset the plot to #' certain variables by position or name (must be available). Defaults to #' \code{NULL}, i.e. all variables. #' @param col Character vector. Colour(s) of the lines delineating credible #' intervals. Single values will be recycled if necessary. Recycled HEX color #' codes are varied in transparency if not provided (e.g. "#737373FF"). Lines #' can be bypassed by setting this to \code{"transparent"}. #' @param t_back Integer scalar. Number of observed datapoints to plot ahead of #' the forecast. #' @param area Logical scalar. Whether to fill the credible intervals using #' \code{\link[graphics]{polygon}}. #' @param fill Character vector. Colour(s) to fill the credible intervals with. #' See \emph{col} for more information. #' @param variables Optional character vector. Names of all variables in the #' object. Used to subset and title. Taken from \code{x$variables} if available. #' @param orientation String indicating the orientation of the plots. Defaults #' to \code{"v"} (i.e. vertical); may be set to \code{"h"} (i.e. horizontal). #' @param mar Numeric vector. Margins for \code{\link[graphics]{par}}. #' @param ... Other graphical parameters for \code{\link[graphics]{par}}. #' #' @return Returns \emph{x} invisibly. #' #' @seealso \code{\link{bvar}}; \code{\link{predict.bvar}} #' #' @keywords BVAR forecast analysis plot #' #' @export #' #' @importFrom utils tail #' #' @examples #' \donttest{ #' # Access a subset of the fred_qd dataset #' data <- fred_qd[, c("CPIAUCSL", "UNRATE", "FEDFUNDS")] #' # Transform it to be stationary #' data <- fred_transform(data, codes = c(5, 5, 1), lag = 4) #' #' # Estimate a BVAR using one lag, default settings and very few draws #' x <- bvar(data, lags = 1, n_draw = 1000L, n_burn = 200L, verbose = FALSE) #' #' # Store predictions ex-post #' predict(x) <- predict(x) #' #' # Plot forecasts for all available variables #' plot(predict(x)) #' #' # Subset to variables in positions 1 and 3 via their name #' plot(predict(x), vars = c("CPI", "FED")) #' #' # Subset via position, increase the plotted forecast horizon and past data #' plot(predict(x, horizon = 20), vars = c(1, 3), t_back = 10) #' #' # Adjust confidence bands and the plot's orientation #' plot(predict(x, conf_bands = 0.25), orientation = "h") #' #' # Draw areas inbetween the confidence bands and skip drawing lines #' plot(predict(x), col = "transparent", area = TRUE) #' #' # Plot a conditional forecast (with a constrained second variable). #' plot(predict(x, cond_path = c(1, 1, 1, 1, 1, 1), cond_var = 2)) #' } plot.bvar_fcast <- function( x, vars = NULL, col = "#737373", t_back = 1, area = FALSE, fill = "#808080", variables = NULL, orientation = c("vertical", "horizontal"), mar = c(2, 2, 2, 0.5), ...) { if(!inherits(x, "bvar_fcast")) {stop("Please provide a `bvar_fcast` object.")} plot_fcast(x = x, vars = vars, variables = variables, orientation = orientation, mar = mar, t_back = t_back, area = area, col = col, fill = fill, ...) } #' @noRd plot_fcast <- function( x, vars = NULL, variables = NULL, orientation = c("vertical", "horizontal"), mar = c(2, 2, 2, 0.5), t_back = 1, area = FALSE, col = "#737373", fill = "#808080", ...) { # Checks --- if(!inherits(x, "bvar") && !inherits(x, "bvar_fcast")) { stop("Please provide a `bvar` or `bvar_fcast` object.") } if(inherits(x, "bvar")) {x <- predict(x)} orientation <- match.arg(orientation) # Prepare data --- has_quants <- length(dim(x[["quants"]])) == 3L if(has_quants) { quants <- x[["quants"]] M <- dim(quants)[3]; P <- P2 <- dim(quants)[1] } else { if(area) {message("Cannot plot area without quantiles."); area <- FALSE} M <- dim(x[["quants"]])[2]; P <- 1; P2 <- 2 # Cheat day - quants must be 3-dimensional, so we fill with NAs quants <- array(NA, c(2, dim(x[["quants"]]))) quants[1, , ] <- x[["quants"]] } # Add t_back actual datapoints t_back <- int_check(t_back, 0, Inf, msg = "Issue with t_back.") use_data <- t_back != 0 if(use_data) { if(is.null(x[["data"]])) { # To support versions prior to 1.0.0 message("No data found, filling with NAs. Recalculate with `predict()`.") t_back <- 0L } else { data <- tail(x[["data"]], t_back) t_forw <- x[["setup"]][["horizon"]] } # Extend the quants array with data, quantiles are set to NA quants <- vapply(seq(M), function(i) { t(rbind(fill_ci_na(data[, i], P2), t(quants[, , i]))) }, matrix(0, P2, t_back + t_forw), USE.NAMES = FALSE) } # Prepare other arguments --- variables <- name_deps(variables = if(is.null(variables)) { x[["variables"]]} else {variables}, M = M) # Sort out colours - applies alpha if they're HEX and need recycling col <- fill_ci_col(x = "#000000", y = col, P = P) if(area) {fill <- fill_ci_col(x = integer(), y = fill, P = P)} pos <- pos_vars(vars, variables, M) mfrow <- if(grepl("^vertical$", orientation)) { c(length(pos), 1) } else {c(1, length(pos))} .plot_fcast(x = quants, variables = variables, pos = pos, col = col, mar = mar, mfrow = mfrow, t_back = t_back, area = area, fill = fill, ...) return(invisible(x)) } #' Forecast plot #' #' @param x Numeric array (3-dimensional) with data to plot. The first #' dimension contains quantiles, the seconds paths and the third variables. #' @param variables Character vector with the names of variables. #' @param pos Integer vector. Positions of the variables to plot. #' @param col Character vector. Colours to feed to \code{\link[stats]{ts.plot}}. #' @param mar Numeric vector. Margins for \code{\link[graphics]{par}}. #' @param mfrow Numeric vector. Layout for \code{\link[graphics]{par}}. #' @param t_back Integer scalar. Number of initial datapoints without intervals. #' @param area Logical scalar. Whether to draw polygons between intervals. #' @param fill Character vector. Colours for \code{\link[graphics]{polygon}}. #' @param ... Other graphical parameters for \code{\link[graphics]{par}}. #' #' @importFrom graphics par grid abline polygon #' @importFrom stats ts.plot ts #' #' @noRd .plot_fcast <- function( x, variables, pos, col, mar, mfrow, t_back = 0, area = FALSE, fill, ...) { if(area) { excl <- if(t_back > 0) {seq(t_back)} else {0} P <- dim(x)[1] x_vals <- c(seq(dim(x)[2] - t_back), rev(seq(dim(x)[2] - t_back))) } mid <- length(col) %/% 2 + 1 op <- par(mfrow = mfrow, mar = mar) for(i in pos) { ts.plot(ts(t(as.matrix(x[, , i])), start = (-t_back + 1)), col = col, lty = 1, main = paste("Forecast", variables[i])) # Fill areas if(area) {for(j in seq(P - 1)) { polygon(y = c(x[j, -excl, i], rev(x[j + 1, -excl, i])), x = x_vals, col = fill[j], border = NA) }} grid() abline(v = 1, lty = "dashed", col = "gray") abline(h = 0, lty = "dashed", col = "gray") lines(ts(x[mid, , i], start = (-t_back + 1)), col = col[mid]) } par(op) }
/scratch/gouwar.j/cran-all/cranData/BVAR/R/58_fcast_plot.R
#' Impulse response settings and identification #' #' Provides settings for the computation of impulse responses to #' \code{\link{bvar}}, \code{\link{irf.bvar}} or \code{\link{fevd.bvar}}. Allows #' setting the horizon for which impulse responses should be computed, whether #' or not forecast error variance decompositions (FEVDs) should be included #' as well as if and what kind of identification should be used. See the Details #' section for further information on identification. Identification can be #' achieved via Cholesky decomposition, sign restrictions (Rubio-Ramirez, #' Waggoner and Zha, 2010), and zero and sign restrictions (Arias, #' Rubio-Ramirez and Waggoner, 2018). #' #' Identification can be performed via Cholesky decomposition, sign #' restrictions, or zero and sign restrictions. The algorithm #' for generating suitable sign restrictions follows Rubio-Ramirez, Waggoner #' and Zha (2010), while the one for zero and sign restrictions follows #' Arias, Rubio-Ramirez and Waggoner (2018). #' Note the possiblity of finding no suitable zero/sign restrictions. #' #' @param horizon Integer scalar. The horizon for which impulse responses #' (and FEVDs) should be computed. Note that the first period corresponds to #' impacts i.e. contemporaneous effects. #' @param fevd Logical scalar. Whether or not forecast error variance #' decompositions should be calculated. #' @param identification Logical scalar. Whether or not the shocks used for #' calculating impulses should be identified. Defaults to \code{TRUE}, i.e. #' identification via Cholesky decomposition of the VCOV-matrix unless #' \emph{sign_restr} is provided. #' @param sign_restr Elements inform about expected impacts #' of certain shocks. Can be either \eqn{1}, \eqn{-1} or \eqn{0} depending #' on whether a positive, a negative or no contemporaneous effect of a #' certain shock is expected. Elements set to \eqn{NA} indicate that there are #' no particular expectations for the contemporaneous effects. The default #' value is \code{NULL}. Note that in order to be fully identified at least #' \eqn{M * (M - 1) / 2} restrictions have to be set and a maximum of #' \eqn{M - j} zero restrictions can be imposed on the \eqn{j}'th column. #' @param sign_lim Integer scalar. Maximum number of tries to find suitable #' matrices to for fitting sign or zero and sign restrictions. #' #' @return Returns a named list of class \code{bv_irf} with options for #' \code{\link{bvar}}, \code{\link{irf.bvar}} or \code{\link{fevd.bvar}}. #' #' @references #' Rubio-Ramirez, J. F. and Waggoner, D. F. and Zha, T. (2010) Structural #' Vector Autoregressions: Theory of Identification and Algorithms for #' Inference. \emph{The Review of Economic Studies}, \bold{77}, 665-696, #' \doi{10.1111/j.1467-937X.2009.00578.x}. #' Arias, J.E. and Rubio-Ramirez, J. F. and Waggoner, D. F. (2018) #' Inference Based on Structural Vector Autoregressions Identifiied with #' Sign and Zero Restrictions: Theory and Applications. #' \emph{Econometrica}, \bold{86}, 2, 685-720, #' \doi{10.3982/ECTA14468}. #' #' @seealso \code{\link{irf.bvar}}; \code{\link{plot.bvar_irf}} #' #' @keywords BVAR irf fevd settings #' #' @export #' #' @examples #' # Set impulse responses to a horizon of 20 time periods and enable FEVD #' # (Identification is performed via Cholesky decomposition) #' bv_irf(horizon = 20, fevd = TRUE) #' #' # Set up structural impulse responses using sign restrictions #' signs <- matrix(c(1, NA, NA, -1, 1, -1, -1, 1, 1), nrow = 3) #' bv_irf(sign_restr = signs) #' #' # Set up structural impulse responses using zero and sign restrictions #' zero_signs <- matrix(c(1, 0, NA, -1, 1, 0, -1, 1, 1), nrow = 3) #' bv_irf(sign_restr = zero_signs) #' #' # Prepare to estimate unidentified impulse responses #' bv_irf(identification = FALSE) bv_irf <- function( horizon = 12, fevd = FALSE, identification = TRUE, sign_restr = NULL, sign_lim = 1000) { # Input checks horizon <- int_check(horizon, min = 1, max = 1e6, msg = "Invalid value for horizon (outside of [1, 1e6]).") sign_lim <- int_check(sign_lim, min = 100, max = Inf, msg = "Invalid value for sign_lim (outside of [100, Inf]).") if(!is.logical(c(identification, fevd))){ stop("Please provide fevd and identification as logical scalars.") } zero <- FALSE # Zero or sign restrictions if(identification) { if(!is.null(sign_restr)) { restr_len <- length(sign_restr) if(!is.numeric(sign_restr) && !all(sign_restr %in% c(-1, 0, NA, 1)) && sqrt(restr_len) %% 1 != 0) { stop("Please provide sign_restr as a numeric square matrix ", "containing NAs, 1s and -1s (and 0s for zero restrictions).") } if(0 %in% sign_restr) {zero <- TRUE} if(is.vector(sign_restr)) { sign_restr <- matrix(sign_restr, nrow = sqrt(restr_len)) } if(zero && any(colSums(sign_restr == 0, na.rm = TRUE) > rev(seq_len(sqrt(restr_len)) - 1))) { stop("Number of zero restrictions on at least one of the shocks is ", "too high. Please reduce or change the order of variables.") } if(sum(!is.na(sign_restr)) < (sqrt(restr_len) - 1) * sqrt(restr_len) / 2) { message("Number of restrictions implies an underidentified system.") } } # Cholesky } # Outputs out <- list("horizon" = horizon, "fevd" = fevd, "identification" = identification, "sign_restr" = sign_restr, "zero" = zero, "sign_lim" = sign_lim ) class(out) <- "bv_irf" return(out) }
/scratch/gouwar.j/cran-all/cranData/BVAR/R/60_irf_setup.R
#' Impulse response draws #' #' Computes impulse responses using the posterior draws of the VAR coefficients #' and VCOV-matrix obtained from \code{\link{draw_post}}. #' #' @param beta_comp Numeric matrix. Posterior draw of the VAR coefficients in #' state space representation. #' @param sigma Numeric matrix. Posterior draw of the VCOV-matrix of the #' model. #' @param sigma_chol Numeric matrix. Lower part of the Cholesky decomposition #' of \emph{sigma}. Calculated as \code{t(chol(sigma))}. #' @param M Integer scalar. Number of columns in \emph{Y}. #' @param lags Integer scalar. Number of lags in the model. #' @param horizon Integer scalar. Horizon for which impulse responses should be #' computed. Note that the first period corresponds to impacts i.e. #' contemporaneous effects. #' @param identification Logical scalar. Whether or not the shocks used for #' calculating the impulse should be identified. Defaults to \code{TRUE}, #' meaning identification will be performed recursively through a #' Cholesky decomposition of the VCOV-matrix as long as \emph{sign_restr} #' and \emph{zero_restr} are \code{NULL}. If set to \code{FALSE}, shocks will #' be unidentified. #' @param sign_restr Numeric matrix. Elements inform about expected impacts #' of certain shocks. Can be either \eqn{1}, \eqn{-1} or \eqn{0} depending #' on whether a positive, a negative or no contemporaneous effect of a #' certain shock is expected. Elements set to \eqn{NA} indicate that there are #' no particular expectations for the contemporaneous effects. #' @param zero Logical scalar. Whether to impose zero and sign restrictions, #' following Arias et al. (2018). #' @param sign_lim Integer scalar. Maximum number of rotational matrices to #' draw and check for fitting sign restrictions. #' #' @return Returns a numeric array of impulse responses. #' #' @noRd compute_irf <- function( beta_comp, sigma, sigma_chol, M, lags, horizon, identification, sign_restr, zero = FALSE, sign_lim = 10000) { # Identification if(identification) { sigma_chol <- t(chol(sigma)) if(is.null(sign_restr)) { shock <- sigma_chol } else { shock <- sign_restr(sigma_chol = sigma_chol, sign_restr = sign_restr, M = M, sign_lim = sign_lim, zero = zero) } } else {shock <- sigma} # Impulse responses irf_comp <- array(0, c(M * lags, horizon, M * lags)) irf_comp[1:M, 1, 1:M] <- shock for(i in 2:horizon) { irf_comp[, i, ] <- beta_comp %*% irf_comp[, i - 1, ] # Could vectorise } irf_comp <- irf_comp[1:M, , 1:M] return(irf_comp) }
/scratch/gouwar.j/cran-all/cranData/BVAR/R/61_irf_compute.R
#' Sign restriction algorithm #' #' Implements the algorithms by Rubio-Ramirez, Waggoner and Zha (2010) and #' Arias, Rubio-Ramirez and Waggoner (2018) in order to find suitable # sign and/or zero restricted matrices for identification purposes. #' Called by \code{\link{compute_irf}} and throws an error if no suitable #' restrictions can be found. Called by \code{\link{compute_irf}} and throws #' an error if no suitable restrictions are found after \code{sign_lim} draws. #' #' @param sigma_chol Numeric matrix. Lower part of the Cholesky decomposition #' of \emph{sigma}. Calculated as \code{t(chol(sigma))}. #' @param sign_restr Numeric matrix. Elements inform about expected impacts #' of certain shocks. Can be either \eqn{1}, \eqn{-1} or \eqn{0} depending #' on whether a positive, a negative or no contemporaneous effect of a #' certain shock is expected. Elements set to \eqn{NA} indicate that there are #' no particular expectations for the contemporaneous effects. #' @param M Integer scalar. Columns of \emph{Y}. #' @param zero Logical scalar. Whether to impose zero and sign restrictions, #' following Arias, Rubio-Ramirez and Waggoner (2018). #' @param sign_lim Integer scalar. Maximum number (approximately) of rotational #' matrices to draw and check for fitting sign restrictions. #' #' @return Returns a shock matrix for the computation of impulse responses #' that is identified via sign and/or zero restrictions. #' #' @references #' Rubio-Ramirez, J. F. and Waggoner, D. F. and Zha, T. (2010) Structural #' Vector Autoregressions: Theory of Identification and Algorithms for #' Inference. \emph{The Review of Economic Studies}, \bold{77}, 665-696, #' \url{https://doi.org/10.1111/j.1467-937X.2009.00578.x}. #' Arias, J.E. and Rubio-Ramirez, J. F. and Waggoner, D. F. (2018) #' Inference Based on Structural Vector Autoregressions Identifiied with #' Sign and Zero Restrictions: Theory and Applications. #' \emph{Econometrica}, \bold{86}, 2, 685-720, #' \url{https://doi.org/10.3982/ECTA14468}. #' #' @importFrom stats rnorm #' #' @noRd sign_restr <- function(sigma_chol, sign_restr, M, zero = FALSE, sign_lim = 1000) { counter_outer <- 0L while(TRUE) { # Search for a shock until we exceed the number of tries if(counter_outer > min(sign_lim^.5)) { # Minimum of 10 tries (100^.5) stop(paste0("No matrix fitting the sign restrictions found after ", sign_lim, " tries. Consider increasing the limit via the", "`sign_lim` argument of `bv_irf()` or adapting the restrictions.")) } counter_outer <- counter_outer + 1L Q <- matrix(0, M, M) # Shock matrix pos_check <- logical(M) # Vector that indicates suitable shocks i <- 1L counter_inner <- 0L while(!all(pos_check)) { if(counter_inner > min(sign_lim^.6)) break counter_inner <- counter_inner + 1L # Minimum of 15 tries (100^.6) # Draw and check a shock q_i <- draw_qi(sigma_chol, sign_restr, M, i = i, zero = zero, Q = Q) sign_check <- check_qi(q_i, sigma_chol, sr_i = sign_restr[, i]) if(sign_check != 0L) { # The signs are correct pos_check[i] <- TRUE Q[, i] <- q_i * sign_check # Keep or flip the sign of the shock i <- i + 1L } } if(all(pos_check)) {return(sigma_chol %*% Q)} } } #' @noRd draw_qi <- function(sigma_chol, sign_restr, M, i, zero = FALSE, Q) { if(isTRUE(zero)) { # Zero-sign-restrictions Q <- t(Q) sel_row <- which(sign_restr[, i] == 0) R <- rbind(sigma_chol[sel_row, ], Q[seq_len(i - 1L), ]) qr_object <- qr(t(R)) qr_rank <- qr_object[["rank"]] set <- if(qr_rank == 0) {seq_len(M)} else {-seq_len(qr_rank)} N_i <- qr.Q(qr_object, complete = TRUE)[, set, drop = FALSE] N_stdn <- crossprod(N_i, rnorm(M, 0, 1)) q_i <- N_i %*% (N_stdn / norm(N_stdn, type = "2")) } else { # Pure sign-restrictions if(i == 1) { x <- rnorm(M, 0, 1) q_i <- x / norm(x, type = "2") } else { x <- rnorm(M, 0, 1) QQ <- diag(M) - tcrossprod(Q) q_i <- QQ %*% x / norm(QQ %*% x, type = "2") } } return(q_i) } #' @noRd check_qi <- function(q_i, sigma_chol, sr_i) { restricted <- which(!is.na(sr_i) & sr_i != 0) shock_vec <- sigma_chol %*% q_i shock_vec <- sign(shock_vec) # Return 1L for a fit, -1L for a fit with flipped signs, and 0L for a failure if(identical(shock_vec[restricted], sr_i[restricted])) { return(1L) } else if (identical(-shock_vec[restricted], sr_i[restricted])) { return(-1L) } else { return(0L) } }
/scratch/gouwar.j/cran-all/cranData/BVAR/R/62_sign_restr.R
#' Forecast error variance decompostions draws #' #' Computes forecast error variance decompostions (FEVDs) using the impulse #' response draws obtained from \code{\link{compute_irf}}. #' #' @param irf_comp Numeric matrix. Contains a draw of impulse responses #' obtained from \code{\link{compute_irf}}. #' @param M Integer scalar. Columns of \emph{Y}. #' @param horizon Integer scalar. Horizon of impulse responses and FEVDs. #' #' @return Returns a numeric array of FEVDs. #' #' @noRd compute_fevd <- function(irf_comp, M, horizon) { fevd_comp <- apply(irf_comp * irf_comp, c(1, 3), cumsum) tmp <- matrix(0, M, M) for(i in seq_len(horizon)) { tmp <- tmp + tcrossprod(irf_comp[, i, ]) fevd_comp[i, , ] <- fevd_comp[i, , ] * (1 / diag(tmp)) } return(aperm(fevd_comp, c(2, 1, 3))) }
/scratch/gouwar.j/cran-all/cranData/BVAR/R/63_fevd_compute.R
#' Impulse response and forecast error methods for Bayesian VARs #' #' Retrieves / calculates impulse response functions (IRFs) and/or forecast #' error variance decompositions (FEVDs) for Bayesian VARs generated via #' \code{\link{bvar}}. If the object is already present and no settings are #' supplied it is simply retrieved, otherwise it will be calculated ex-post. #' Note that FEVDs require the presence / calculation of IRFs. #' To store the results you may want to assign the output using the setter #' function (\code{irf(x) <- irf(x)}). May also be used to update #' confidence bands. #' #' @param x,object A \code{bvar} object, obtained from \code{\link{bvar}}. #' Summary and print methods take in a \code{bvar_irf} / \code{bvar_fevd} #' object. #' @param ... A \code{bv_irf} object or arguments to be fed into #' \code{\link{bv_irf}}. Contains settings for the IRFs / FEVDs. #' @param n_thin Integer scalar. Every \emph{n_thin}'th draw in \emph{x} is used #' to calculate, others are dropped. #' @param verbose Logical scalar. Whether to print intermediate results and #' progress. #' @param vars_impulse,vars_response Optional numeric or character vector. #' Used to subset the summary method's outputs to certain variables by position #' or name (must be available). Defaults to \code{NULL}, i.e. all variables. #' @param value A \code{bvar_irf} object to assign. #' @inheritParams predict.bvar #' #' @return Returns a list of class \code{bvar_irf} including IRFs and optionally #' FEVDs at desired confidence bands. The \code{fevd} method only returns a #' the nested \code{bvar_fevd} object. #' The summary method returns a numeric array of impulse responses at the #' specified confidence bands. #' #' @seealso \code{\link{plot.bvar_irf}}; \code{\link{bv_irf}} #' #' @keywords BVAR irf fevd analysis #' #' @export #' #' @examples #' \donttest{ #' # Access a subset of the fred_qd dataset #' data <- fred_qd[, c("CPIAUCSL", "UNRATE", "FEDFUNDS")] #' # Transform it to be stationary #' data <- fred_transform(data, codes = c(5, 5, 1), lag = 4) #' #' # Estimate a BVAR using one lag, default settings and very few draws #' x <- bvar(data, lags = 1, n_draw = 600L, n_burn = 100L, verbose = FALSE) #' #' # Compute + store IRF with a longer horizon, no identification and thinning #' irf(x) <- irf(x, bv_irf(horizon = 24L, identification = FALSE), n_thin = 5L) #' #' # Update the confidence bands of the IRFs #' irf(x, conf_bands = c(0.01, 0.05, 0.1)) #' #' # Recalculate with sign restrictions provided via the ellipsis #' irf(x, sign_restr = matrix(c(1, NA, NA, -1, 1, -1, -1, 1, 1), nrow = 3)) #' #' # Recalculate with zero and sign restrictions provided via the ellipsis #' irf(x, sign_restr = matrix(c(1, 0, 1, NA, 1, 1, -1, -1, 1), nrow = 3)) #' #' # Calculate the forecast error variance decomposition #' fevd(x) #' #' # Get a summary of the saved impulse response function #' summary(x) #' #' # Limit the summary to responses of variable #2 #' summary(x, vars_response = 2L) #' } irf.bvar <- function(x, ..., conf_bands, n_thin = 1L, verbose = FALSE) { dots <- list(...) irf_store <- x[["irf"]] verbose <- isTRUE(verbose) # Calculate impulse responses ----- if(is.null(irf_store) || length(dots) != 0L) { # Setup --- start_time <- Sys.time() irf <- if(length(dots) > 0 && inherits(dots[[1]], "bv_irf")) { dots[[1]] } else {bv_irf(...)} n_pres <- x[["meta"]][["n_save"]] n_thin <- int_check(n_thin, min = 1, max = (n_pres / 10), "Issue with n_thin. Maximum allowed is n_save / 10.") n_save <- int_check((n_pres / n_thin), min = 1) Y <- x[["meta"]][["Y"]] N <- x[["meta"]][["N"]] K <- x[["meta"]][["K"]] M <- x[["meta"]][["M"]] lags <- x[["meta"]][["lags"]] beta <- x[["beta"]] sigma <- x[["sigma"]] # Check sign restrictions if(!is.null(irf[["sign_restr"]]) && length(irf[["sign_restr"]]) != M ^ 2 || !is.null(irf[["zero_restr"]]) && length(irf[["zero_restr"]]) != M ^ 2) { stop("Dimensions of provided restrictions do not fit the data.") } # Sampling --- irf_store <- structure(list( "irf" = array(NA, c(n_save, M, irf[["horizon"]], M)), "fevd" = if(irf[["fevd"]]) { structure( list("fevd" = array(NA, c(n_save, M, irf[["horizon"]], M)), "variables" = x[["variables"]]), class = "bvar_fevd") } else {NULL}, "setup" = irf, "variables" = x[["variables"]]), class = "bvar_irf") j <- 1 if(verbose) { cat("Calculating impulse responses.\n") pb <- txtProgressBar(min = 0, max = n_save, style = 3) } for(i in seq_len(n_save)) { beta_comp <- get_beta_comp(beta[j, , ], K, M, lags) irf_comp <- compute_irf( beta_comp = beta_comp, sigma = sigma[j, , ], M = M, lags = lags, horizon = irf[["horizon"]], identification = irf[["identification"]], sign_restr = irf[["sign_restr"]], zero = irf[["zero"]], sign_lim = irf[["sign_lim"]]) irf_store[["irf"]][i, , , ] <- irf_comp if(irf[["fevd"]]) { # Forecast error variance decomposition irf_store[["fevd"]][["fevd"]][i, , , ] <- compute_fevd( irf_comp = irf_comp, M = M, horizon = irf[["horizon"]]) } j <- j + n_thin if(verbose) {setTxtProgressBar(pb, j)} } if(verbose) { close(pb) timer <- Sys.time() - start_time cat("Finished after ", format(round(timer, 2)), ".\n", sep = "") } } # End new impulse responses if(is.null(irf_store[["quants"]]) || !missing(conf_bands)) { irf_store <- if(!missing(conf_bands)) { irf.bvar_irf(irf_store, conf_bands) } else {irf.bvar_irf(irf_store, c(0.16))} } if(irf_store[["setup"]][["fevd"]]) { if(is.null(irf_store[["fevd"]][["quants"]]) || !missing(conf_bands)) { irf_store[["fevd"]] <- if(!missing(conf_bands)) { fevd.bvar_irf(irf_store, conf_bands) } else {fevd.bvar_irf(irf_store, c(0.16))} } } return(irf_store) } #' @noRd #' @export `irf<-.bvar` <- function(x, value) { if(!inherits(x, "bvar")) {stop("Please use a `bvar` object.")} if(!inherits(value, "bvar_irf")) { stop("Please provide a `bvar_irf` object to assign.") } x[["irf"]] <- value return(x) } #' @noRd #' @export #' #' @importFrom stats quantile irf.bvar_irf <- function(x, conf_bands, ...) { if(!missing(conf_bands)) { quantiles <- quantile_check(conf_bands) x[["quants"]] <- apply(x[["irf"]], c(2, 3, 4), quantile, quantiles) } return(x) } #' @rdname irf.bvar #' @export fevd.bvar <- function(x, ..., conf_bands, n_thin = 1L) { dots <- list(...) irf_store <- x[["irf"]] vars <- x[["variables"]] if(is.null(vars)) {vars <- paste0("var", 1:x[["meta"]][["M"]])} if(is.null(irf_store[["fevd"]]) || length(dots) != 0L) { irf <- if(length(dots) > 0 && inherits(dots[[1]], "bv_irf")) { dots[[1]] } else {bv_irf(...)} irf[["fevd"]] <- TRUE irf_store <- irf.bvar(x, irf, n_thin = n_thin) # Recalculate } fevd_store <- fevd.bvar_irf(irf_store, conf_bands = conf_bands) return(fevd_store) } #' @noRd #' @export `fevd<-.bvar` <- function(x, value) { if(!inherits(x, "bvar")) {stop("Please use a `bvar` object.")} if(!inherits(value, "bvar_fevd")) { stop("Please provide a `bvar_fevd` object to assign.") } x[["fevd"]] <- value return(x) } #' @noRd #' @export fevd.bvar_irf <- function(x, conf_bands, ...) { if(is.null(x[["fevd"]])) { x[["fevd"]] <- structure(list("fevd" = array(NA, dim(x[["irf"]])), "variables" = x[["variables"]]), class = "bvar_fevd") n_save <- dim(x[["irf"]])[1] M <- dim(x[["irf"]])[2] horizon <- dim(x[["irf"]])[3] for(i in seq_len(n_save)) { irf_comp <- x[["irf"]][i, , , ] x[["fevd"]][["fevd"]][i, , , ] <- compute_fevd(irf_comp = irf_comp, M = M, horizon = horizon) } } fevd_store <- x[["fevd"]] if(is.null(fevd_store[["quants"]]) || !missing(conf_bands)) { fevd_store <- if(!missing(conf_bands)) { fevd.bvar_fevd(fevd_store, conf_bands) } else {fevd.bvar_fevd(fevd_store, c(0.16))} } return(fevd_store) } #' @noRd #' @export #' #' @importFrom stats quantile fevd.bvar_fevd <- function(x, conf_bands, ...) { if(!missing(conf_bands)) { quantiles <- quantile_check(conf_bands) x[["quants"]] <- apply(x[["fevd"]], c(2, 3, 4), quantile, quantiles) # Make 'em sum to 1 (breaks with quantiles) and keep dimension ordering apply_vec <- if(length(quantiles) > 1) {c(1, 2, 3)} else {c(1, 2)} aperm_vec <- if(length(quantiles) > 1) {c(2, 3, 4, 1)} else {c(2, 3, 1)} x[["quants"]] <- apply(x[["quants"]], apply_vec, function(x) { x / sum(x)}) x[["quants"]] <- aperm(x[["quants"]], aperm_vec) } return(x) } #' @rdname irf.bvar #' @export irf <- function(x, ...) {UseMethod("irf", x)} #' @rdname irf.bvar #' @export irf.default <- function(x, ...) { stop("No methods for class ", paste0(class(x), collapse = " / "), " found.") } #' @rdname irf.bvar #' @export `irf<-` <- function(x, value) {UseMethod("irf<-", x)} #' @rdname irf.bvar #' @export fevd <- function(x, ...) {UseMethod("fevd", x)} #' @rdname irf.bvar #' @export fevd.default <- function(x, ...) { stop("No methods for class ", paste0(class(x), collapse = " / "), " found.") } #' @rdname irf.bvar #' @export `fevd<-` <- function(x, value) {UseMethod("fevd<-", x)}
/scratch/gouwar.j/cran-all/cranData/BVAR/R/64_irf_method.R
#' @export print.bv_irf <- function(x, ...) { cat("Object with settings for computing impulse responses.\n") .print_irf(x, ...) return(invisible(x)) } #' @export print.bvar_irf <- function(x, ...) { cat("Impulse response object from `bvar()`.\n") .print_irf(x[["setup"]], ...) cat("Variables: ", dim(x[["irf"]])[2], "\n", "Iterations: ", dim(x[["irf"]])[1], "\n", sep = "") return(invisible(x)) } #' @export print.bvar_fevd <- function(x, digits = 4L, complete = FALSE, ...) { has_quants <- length(dim(x[["quants"]])) == 4 if(has_quants) { bands <- dimnames(x[["quants"]])[[1]] } cat("Numeric array (dimensions ", paste0(dim(x[["fevd"]]), collapse = ", "), ") of FEVD values from a BVAR.\n", sep = "") if(has_quants) { cat("Computed confidence bands: ", paste(bands, collapse = ", "), "\n", sep = "") } return(invisible(x)) } #' @noRd .print_irf <- function(x, ...) { cat("Horizon:", x[["horizon"]]) cat("\nIdentification: ") if(x[["identification"]]) { if(is.null(x[["sign_restr"]])) { cat("Cholesky decomposition") } else { cat("Sign restrictions", "\nChosen restrictions:\n", sep = "") sign_restr <- apply(x[["sign_restr"]], 2, factor, levels = c(-1, 0, 1), labels = c("-", "0", "+")) if(length(sign_restr) < 10 ^ 2) { cat("\t\t\tShock to\n\t\t\t", # Use cat cause it's nice paste0("Var", 1:nrow(sign_restr), sep = "\t"), paste0(c("\nResponse of\t", rep("\n\t\t", nrow(sign_restr) - 1)), "Var", 1:nrow(sign_restr), "\t", apply(sign_restr, 1, function(x) { paste0(" ", x, sep = "\t", collapse = "")}), collapse = "\n")) } else if(length(sign_restr) < 18 ^ 2) { print(sign_restr) # Print the matrix } else { cat("Too large to print.") # Skip } } } else { cat(FALSE) } cat("\nFEVD: ", x[["fevd"]], "\n", sep = "") return(invisible(x)) } #' @rdname irf.bvar #' @export summary.bvar_irf <- function( object, vars_impulse = NULL, vars_response = NULL, ...) { if(!inherits(object, "bvar_irf")) { stop("Please provide a `bvar_irf` object.") } quants <- object[["quants"]] has_quants <- length(dim(quants)) == 4 M <- if(has_quants) {dim(quants)[2]} else {dim(quants)[1]} variables <- name_deps(variables = object[["variables"]], M = M) pos_imp <- pos_vars(vars_impulse, variables, M) pos_res <- pos_vars(vars_response, variables, M) out <- structure(list( "irf" = object, "quants" = quants, "variables" = variables, "pos_imp" = pos_imp, "pos_res" = pos_res, "has_quants" = has_quants), class = "bvar_irf_summary") return(out) } #' @noRd #' @export print.bvar_irf_summary <- function(x, digits = 2L, ...) { if(!inherits(x, "bvar_irf_summary")) { stop("Please provide a `bvar_irf_summary` object.") } print.bvar_irf(x$irf) cat(if(!x$has_quants) {"Median impulse"} else {"Impulse"}, "responses:\n") for(i in x$pos_res) {for(j in x$pos_imp) { cat("\tShock ", x[["variables"]][j], " on ", x[["variables"]][i], ":\n", sep = "") print(round( if(x[["has_quants"]]) { x[["quants"]][, i, , j] } else {x[["quants"]][i, , j]}, digits = digits)) }} return(invisible(x)) }
/scratch/gouwar.j/cran-all/cranData/BVAR/R/65_irf_print.R
#' Plotting method for Bayesian VAR impulse responses #' #' Plotting method for impulse responses obtained from \code{\link{irf.bvar}}. #' Impulse responses of all or a subset of the available variables can be #' plotted. #' #' @param x A \code{bvar_irf} object, obtained from \code{\link{irf.bvar}}. #' @param vars_impulse,vars_response Optional numeric or character vector. Used #' to subset the plot's impulses / responses to certain variables by position #' or name (must be available). Defaults to \code{NULL}, i.e. all variables. #' @param col Character vector. Colour(s) of the lines delineating credible #' intervals. Single values will be recycled if necessary. Recycled HEX color #' codes are varied in transparency if not provided (e.g. "#737373FF"). Lines #' can be bypassed by setting this to \code{"transparent"}. #' @param area Logical scalar. Whether to fill the credible intervals using #' \code{\link[graphics]{polygon}}. #' @param fill Character vector. Colour(s) to fill the credible intervals with. #' See \emph{col} for more information. #' @param variables Optional character vector. Names of all variables in the #' object. Used to subset and title. Taken from \code{x$variables} if available. #' @param mar Numeric vector. Margins for \code{\link[graphics]{par}}. #' @param ... Other graphical parameters for \code{\link[graphics]{par}}. #' #' @return Returns \emph{x} invisibly. #' #' @seealso \code{\link{bvar}}; \code{\link{irf.bvar}} #' #' @keywords BVAR irf fevd analysis plot #' #' @export #' #' @examples #' \donttest{ #' # Access a subset of the fred_qd dataset #' data <- fred_qd[, c("CPIAUCSL", "UNRATE", "FEDFUNDS")] #' # Transform it to be stationary #' data <- fred_transform(data, codes = c(5, 5, 1), lag = 4) #' #' # Estimate a BVAR using one lag, default settings and very few draws #' x <- bvar(data, lags = 1, n_draw = 1000L, n_burn = 200L, verbose = FALSE) #' #' # Store IRFs ex-post #' irf(x) <- irf(x) #' #' # Plot impulse responses for all available variables #' plot(irf(x)) #' #' # Subset to impulse variables in positions 2 and 3 via their name #' plot(irf(x), vars_impulse = c(2, 3)) #' #' # Subset via position and increase the plotted IRF horizon #' plot(irf(x, horizon = 20), vars_impulse = c("UNRATE", "FED")) #' #' # Adjust confidence bands and subset to one response variables #' plot(irf(x, conf_bands = 0.25), vars_response = "CPI") #' #' # Draw areas inbetween the confidence bands and skip drawing lines #' plot(irf(x), col = "transparent", area = TRUE) #' #' # Subset to a specific impulse and response #' plot(irf(x), vars_response = "CPI", vars_impulse = "FED") #' } plot.bvar_irf <- function( x, vars_response = NULL, vars_impulse = NULL, col = "#737373", area = FALSE, fill = "#808080", variables = NULL, mar = c(2, 2, 2, 0.5), ...) { if(!inherits(x, "bvar_irf")) {stop("Please provide a `bvar_irf` object.")} plot_irf(x = x, vars_response = vars_response, vars_impulse = vars_impulse, variables = variables, mar = mar, area = area, col = col, fill = fill, ...) } #' @noRd plot_irf <- function( x, vars_response = NULL, vars_impulse = NULL, variables = NULL, mar = c(2, 2, 2, 0.5), area = FALSE, col = "#737373", fill = "#808080", ...) { # Checks --- if(!inherits(x, "bvar") && !inherits(x, "bvar_irf")) { stop("Please provide a `bvar` or `bvar_irf` object.") } if(inherits(x, "bvar")) {x <- irf(x)} # Prepare data --- has_quants <- length(dim(x[["quants"]])) == 4L if(has_quants) { quants <- x[["quants"]] M <- dim(quants)[2]; P <- dim(quants)[1] } else { if(area) {message("Cannot plot area without quantiles."); area <- FALSE} M <- dim(x[["quants"]])[1]; P <- 1 # Cheat day - quants must be 4-dimensional, so we fill with NAs quants <- array(NA, c(2, dim(x[["quants"]]))) quants[1, , , ] <- x[["quants"]] } # Prepare other arguments --- variables <- name_deps(variables = if(is.null(variables)) { x[["variables"]]} else {variables}, M = M) # Sort out colours - applies alpha if they're HEX and need recycling col <- fill_ci_col(x = "#000000", y = col, P = P) if(area) {fill <- fill_ci_col(x = integer(), y = fill, P = P)} pos_imp <- pos_vars(vars_impulse, variables, M) pos_res <- pos_vars(vars_response, variables, M) mfrow <- c(length(pos_res), length(pos_imp)) .plot_irf(x = quants, variables = variables, pos_imp = pos_imp, pos_res = pos_res, col = col, mar = mar, mfrow = mfrow, area = area, fill = fill, ...) return(invisible(x)) } #' Impulse response plot #' #' @param x Numeric array (4-dimensional) with data to plot. The first #' dimension contains quantiles, the second responses, the third paths and the #' fourth impulses. #' @param variables Character vector with the names of variables. #' @param pos_imp Integer vector. Positions of the impulse variables to plot. #' @param pos_res Integer vector. Positions of the response variables to plot. #' @param col Character vector. Colours to feed to \code{\link[stats]{ts.plot}}. #' @param mar Numeric vector. Margins for \code{\link[graphics]{par}}. #' @param mfrow Numeric vector. Layout for \code{\link[graphics]{par}}. #' @param area Logical scalar. Whether to draw polygons between intervals. #' @param fill Character vector. Colours for \code{\link[graphics]{polygon}}. #' @param ... Other graphical parameters for \code{\link[graphics]{par}}. #' #' @importFrom graphics par grid abline polygon #' @importFrom stats ts.plot ts #' #' @noRd .plot_irf <- function( x, variables, pos_imp, pos_res, col, mar, mfrow, area = FALSE, fill, ...) { if(area) { P <- dim(x)[1] x_vals <- c(seq(dim(x)[3]), rev(seq(dim(x)[3]))) } mid <- length(col) %/% 2 + 1 op <- par(mfrow = mfrow, mar = mar, ...) for(i in pos_res) { for(j in pos_imp) { ts.plot(t(as.matrix(x[, i, , j])), col = col, lty = 1, main = paste("Shock", variables[j], "on", variables[i])) # Fill areas if(area) {for(k in seq(P - 1)) { polygon(y = c(x[k, i, , j], rev(x[k + 1, i, , j])), x = x_vals, col = fill[k], border = NA) }} grid() abline(h = 0, lty = "dashed", col = "gray") lines(x[mid, i, , j], col = col[mid]) } } par(op) }
/scratch/gouwar.j/cran-all/cranData/BVAR/R/68_irf_plot.R
# #' Plotting method for Bayesian VAR forecast error variance decompositions # #' # #' Plotting method for forecast error variance decompositions obtained from # #' \code{\link{fevd.bvar}}. Forecast error variance decompositions of all or a # #' subset of the available variables can be plotted. # #' # #' @param x A \code{bvar_fevd} object, obtained from \code{\link{fevd.bvar}}. # #' @param vars Optional numeric or character vector. Used to subset the plot's # #' forecast error variance decompositions to certain variables by position # #' or name (must be available). Defaults to \code{NULL}, i.e. all variables. # #' @param variables Optional character vector. Names of all variables in the # #' object. Used to subset and title. Taken from \code{x$variables} if available. # #' @param mar Numeric vector. Margins for \code{\link[graphics]{par}}. # #' @param ... Other graphical parameters for \code{\link[graphics]{par}}. # #' # #' @return Returns \emph{x} invisibly. # #' # #' @seealso \code{\link{bvar}}; \code{\link{fevd.bvar}} # #' # #' @keywords BVAR fevd irf analysis plot # #' # #' @export # #' # #' @examples # #' \donttest{ # #' # Access a subset of the fred_qd dataset # #' data <- fred_qd[, c("CPIAUCSL", "UNRATE", "FEDFUNDS")] # #' # Transform it to be stationary # #' data <- fred_transform(data, codes = c(5, 5, 1), lag = 4) # #' # #' # Estimate a BVAR using one lag, default settings and very few draws # #' x <- bvar(data, lags = 1, n_draw = 1000L, n_burn = 200L, verbose = FALSE) # #' # #' # Store FEVDs ex-post # #' fevd(x) <- fevd(x) # #' # #' # Plot FEVDs for all available variables # #' plot(fevd(x)) # #' # #' # Subset to FEVDs in positions 2 and 3 via their name # #' plot(fevd(x), vars = c(2, 3)) # #' # #' # Subset via position and increase the plotted FEVD horizon # #' plot(fevd(x, horizon = 20), vars = c("UNRATE", "FED")) # #' } # plot.bvar_fevd <- function( # x, # vars = NULL, # variables = NULL, # mar = c(2, 2, 2, 0.5), # ...) { # if(!inherits(x, "bvar_fevd")) {stop("Please provide a `bvar_fevd` object.")} # plot_fevd(x = x, # vars = vars, variables = variables, # mar = mar, ...) # } # #' @noRd # plot_fevd <- function( # x, # vars = NULL, # variables = NULL, # mar = c(2, 2, 2, 0.5), # ...) { # # Checks --- # if(!inherits(x, "bvar") && !inherits(x, "bvar_fevd")) { # stop("Please provide a `bvar` or `bvar_fevd` object.") # } # if(inherits(x, "bvar")) {x <- fevd(x)} # # Prepare data --- # M <- dim(x[["fevd"]])[2] # fevd_mean <- apply(x[["fevd"]], c(2, 3, 4), mean) # # Prepare other arguments --- # variables <- name_deps(variables = if(is.null(variables)) { # x[["variables"]]} else {variables}, M = M) # pos <- pos_vars(vars, variables, M) # mfrow <- c(length(pos), 1) # .plot_fevd(x = fevd_mean, variables = variables, # pos = pos, # mar = mar, mfrow = mfrow, ...) # return(invisible(x)) # } # #' Forecast error variance decomposition plot # #' # #' @param x Numeric array (3-dimensional) with data to plot. The first # #' dimension contains responses, the second paths and the third impulses. # #' @param variables Character vector with the names of variables. # #' @param pos Integer vector. Positions of the forecast error variance # #' decompositions to plot. # #' @param mar Numeric vector. Margins for \code{\link[graphics]{par}}. # #' @param mfrow Numeric vector. Layout for \code{\link[graphics]{par}}. # #' @param ... Other graphical parameters for \code{\link[graphics]{par}}. # #' # #' @importFrom graphics par stats barplot # #' # #' @noRd # .plot_fevd <- function( # x, # variables, # pos, # mar, mfrow, # ...) { # M <- dim(x)[1] # if(M > 5) M <- 5 # think of another way to make legend fitting # h <- dim(x)[2] # op <- par(mfrow = mfrow, mar = mar, ...) # for(i in pos) { # barplot(t(as.matrix(x[i, , ])), # main = paste("Forecast error variance decomposition of", # variables[i]), # legend.text = variables, # args.legend = list(x = "bottom", inset = c(0, -0.25), # bty = "n", ncol = M)) # } # par(op) # }
/scratch/gouwar.j/cran-all/cranData/BVAR/R/69_fevd_plot.R
#' Historical decomposition #' #' Function to compute a historical variance decomposition of a VAR. #' #' @param x A \code{bvar} object, obtained from \code{\link{bvar}}. #' @param type Character scalar. Whether to use median or mean values. #' @param ... Not used. #' #' @return Returns a numerical array (time, variable, shock) with the results #' of the historical decomposition. #' #' @keywords BVAR historical decomposition hd #' #' @export #' #' @examples #' \donttest{ #' # Access a subset of the fred_qd dataset #' data <- fred_qd[, c("CPIAUCSL", "UNRATE", "FEDFUNDS")] #' # Transform it to be stationary #' data <- fred_transform(data, codes = c(5, 5, 1), lag = 4) #' #' # Estimate a BVAR using one lag, default settings and very few draws #' x <- bvar(data, lags = 1, n_draw = 600L, n_burn = 100L, verbose = FALSE) #' #' # Compute historical decomposition #' hist_decomp(x, type = "mean") #' } hist_decomp.bvar <- function(x, type = c("mean", "quantile"), ...) { type <- match.arg(type) # Necessary quantities shock <- matrix(0, x[["meta"]][["K"]] - 1, x[["meta"]][["M"]]) shock[seq(x[["meta"]][["M"]]), ] <- t(chol(vcov(x, type = type))) comp <- companion(x, type = type) eps <- solve(shock[seq(x[["meta"]][["M"]]), ], t(residuals(x, type = type))) out <- compute_hd(x, shock, comp, eps) return(out) } #' @rdname hist_decomp.bvar #' @export hist_decomp <- function(x, ...) {UseMethod("hist_decomp", x)} #' @rdname hist_decomp.bvar #' @export hist_decomp.default <- function(x, ...) { stop("No methods for class ", paste0(class(x), collapse = " / "), " found.") } #' Historical variance decompostions draws #' #' Computes a historical variance decomposition (HD) using draws of the #' orthogonalized vcov-matrix (currently retrieved with Cholesky decomposition). #' #' @param x Numeric matrix. Contains a draw of impulse responses #' obtained from \code{\link{compute_irf}}. #' @param shock Numeric matrix with the shocks. #' @param comp Numeric matrix with the coefficients in companion form. #' @param eps Numeric matrix of the inverse shock times residuals. #' @param lags,N,M,K Integer scalar. Dimensions of the VAR. #' #' @return Returns a numeric array of HDs. #' #' @noRd compute_hd <- function(x, shock, comp, eps, lags = x[["meta"]][["lags"]], N = x[["meta"]][["N"]], M = x[["meta"]][["M"]], K = x[["meta"]][["K"]]) { kind_of_I <- cbind(diag(M), matrix(0, M, (lags - 1) * M)) hd_decomp <- array(0, dim = c(M, N + 1, M)) tmp <- array(0, dim = c(K - 1, N + 1, M)) for(j in seq_len(dim(hd_decomp)[3])) { # Over variables eps_tmp <- matrix(0, M, N + 1) eps_tmp[j, seq(2, N + 1)] <- eps[j, ] for(i in seq.int(2, dim(hd_decomp)[2])) { # Over observations tmp[, i, j] <- shock %*% eps_tmp[, i] + comp %*% tmp[, i - 1, j] hd_decomp[, i, j] <- kind_of_I %*% tmp[, i, j] } } return(aperm(hd_decomp, c(2, 3, 1))) }
/scratch/gouwar.j/cran-all/cranData/BVAR/R/70_hist_decomp.R
#' Model fit in- and out-of-sample #' #' Functions to compute the root mean squared error and log predictive scores. #' #' @param x A \code{bvar} object, obtained from \code{\link{bvar}}. #' @param holdout Optional numeric matrix or dataframe. Used for the #' out-of-sample fit. #' @param n_thin Integer scalar. Every \emph{n_thin}'th draw in \emph{x} is used #' to calculate, others are dropped. #' @param ... Not used. #' #' @return Returns a matrix with measures of model fit. #' #' @keywords BVAR RMSE LPS #' #' @export #' #' @importFrom stats dnorm resid #' #' @examples #' \donttest{ #' # Access a subset of the fred_qd dataset #' data <- fred_qd[, c("CPIAUCSL", "UNRATE", "FEDFUNDS")] #' # Transform it to be stationary #' data <- fred_transform(data, codes = c(5, 5, 1), lag = 4) #' #' # Estimate a BVAR using one lag, default settings and very few draws #' x <- bvar(data[seq(1, nrow(data) - 5), ], lags = 1, #' n_draw = 600L, n_burn = 100L, verbose = FALSE) #' #' # Compute RMSE #' rmse(x) #' lps(x, holdout = data[seq(nrow(data) - 4, nrow(data)), ]) #' } rmse.bvar <- function(x, holdout, ...) { if(missing(holdout)) { # In-sample apply(resid(x, type = "mean"), 2, function(r) sqrt(sum(r^2) / length(r))) } else { # Out-of-sample fit <- apply(predict(x, horizon = NROW(holdout))$fcast, c(2, 3), mean) err <- fit - holdout apply(err, 2, function(r) sqrt(sum(r^2) / length(r))) } } #' @rdname rmse.bvar #' @export lps.bvar <- function(x, holdout, n_thin = 1L, ...) { n_pres <- x[["meta"]][["n_save"]] n_thin <- int_check(n_thin, min = 1, max = (n_pres / 10), "Issue with n_thin. Maximum allowed is (n_draw - n_burn) / 10.") n_save <- int_check((n_pres / n_thin), min = 1) if(missing(holdout)) { # In-sample # We need the mean and sd from these fits # To-do: adapt `resid` fit <- array(NA, dim = c(n_save, x[["meta"]][["N"]], x[["meta"]][["M"]])) i <- 1 for(s in sample(n_pres, n_save)) { fit[i, , ] <- x[["meta"]][["X"]] %*% x[["beta"]][s, , ] i <- i + 1L } Y <- x[["meta"]][["Y"]] } else { # Out-of-sample fit <- predict(x, horizon = NROW(holdout))$fcast Y <- holdout } mu <- apply(fit, c(2, 3), mean) sd <- apply(fit, c(2, 3), sd) lps <- matrix(NA, NROW(mu), NCOL(mu)) for(j in seq_len(ncol(lps))) { lps[, j] <- dnorm(Y[, j] - mu[, j], sd = sd[, j], log = TRUE) } return(lps) } #' @rdname rmse.bvar #' @export rmse <- function(x, ...) {UseMethod("rmse", x)} #' @rdname rmse.bvar #' @export rmse.default <- function(x, ...) { stop("No methods for class ", paste0(class(x), collapse = " / "), " found.") } #' @rdname rmse.bvar #' @export lps <- function(x, ...) {UseMethod("lps", x)} #' @rdname rmse.bvar #' @export lps.default <- function(x, ...) { stop("No methods for class ", paste0(class(x), collapse = " / "), " found.") }
/scratch/gouwar.j/cran-all/cranData/BVAR/R/71_rmse.R
#' Methods for \pkg{coda} Markov chain Monte Carlo objects #' #' Methods to convert parameter and/or coefficient draws from \code{\link{bvar}} #' to \pkg{coda}'s \code{\link[coda]{mcmc}} (or \code{\link[coda]{mcmc.list}}) #' format for further processing. #' #' @name coda #' #' @param x A \code{bvar} object, obtained from \code{\link{bvar}}. #' @param vars Character vector used to select variables. Elements are matched #' to hyperparameters or coefficients. Coefficients may be matched based on #' the dependent variable (by providing the name or position) or the #' explanatory variables (by providing the name and the desired lag). See the #' example section for a demonstration. Defaults to \code{NULL}, i.e. all #' hyperparameters. #' @param vars_response,vars_impulse Optional character or integer vectors used #' to select coefficents. Dependent variables are specified with #' \emph{vars_response}, explanatory ones with \emph{vars_impulse}. See the #' example section for a demonstration. #' @param chains List with additional \code{bvar} objects. If provided, #' an object of class \code{\link[coda]{mcmc.list}} is returned. #' @param ... Other parameters for \code{\link[coda]{as.mcmc}}. #' #' @return Returns a \pkg{coda} \code{\link[coda]{mcmc}} (or #' \code{\link[coda]{mcmc.list}}) object. #' #' @seealso \code{\link{bvar}}; \code{\link[coda]{mcmc}}; #' \code{\link[coda]{mcmc.list}} #' #' @keywords BVAR coda MCMC analysis #' #' @examples #' \donttest{ #' library("coda") #' #' # Access a subset of the fred_qd dataset #' data <- fred_qd[, c("CPIAUCSL", "UNRATE", "FEDFUNDS")] #' # Transform it to be stationary #' data <- fred_transform(data, codes = c(5, 5, 1), lag = 4) #' #' # Estimate two BVARs using one lag, default settings and very few draws #' x <- bvar(data, lags = 1, n_draw = 750L, n_burn = 250L, verbose = FALSE) #' y <- bvar(data, lags = 1, n_draw = 750L, n_burn = 250L, verbose = FALSE) #' #' # Convert the hyperparameter lambda #' as.mcmc(x, vars = c("lambda")) #' #' # Convert coefficients for the first dependent, use chains in method #' as.mcmc(structure(list(x, y), class = "bvar_chains"), vars = "CPIAUCSL") #' #' # Convert the coefs of variable three's first lag, use in the generic #' as.mcmc(x, vars = "FEDFUNDS-lag1", chains = y) #' #' # Convert hyperparameters and constant coefficient values for variable 1 #' as.mcmc(x, vars = c("lambda", "CPI", "constant")) #' #' # Specify coefficent values to convert in alternative way #' as.mcmc(x, vars_impulse = c("FED", "CPI"), vars_response = "UNRATE") #' } NULL #' @rdname coda as.mcmc.bvar <- function( # Dynamic export (zzz.R) x, vars = NULL, vars_response = NULL, vars_impulse = NULL, chains = list(), ...) { # Checks --- if(!inherits(x, "bvar")) { if(inherits(x[[1]], "bvar")) { # Allow chains to x chains <- x x <- x[[1]] chains[[1]] <- NULL } else {stop("Please provide a `bvar` object.")} } if(inherits(chains, "bvar")) {chains <- list(chains)} lapply(chains, function(z) {if(!inherits(z, "bvar")) { stop("Please provide `bvar` objects to the chains.") }}) has_coda() # Get data and transform --- prep <- prep_data(x, vars = vars, vars_response = vars_response, vars_impulse = vars_impulse, chains = chains, check_chains = TRUE, Ms = TRUE, n_saves = TRUE) chains <- prep[["chains"]] if(!is.null(chains) && length(chains) > 0) { chains[["x"]] <- prep[["data"]] out <- coda::mcmc.list(... = lapply(chains, coda::as.mcmc, ...)) } else { out <- coda::as.mcmc(prep[["data"]], ...) } return(out) } #' @rdname coda as.mcmc.bvar_chains <- as.mcmc.bvar # Dynamic export (zzz.R) #' @noRd has_coda <- function() {has_package("coda")}
/scratch/gouwar.j/cran-all/cranData/BVAR/R/80_coda.R
#' Parallel hierarchical Bayesian vector autoregression #' #' Wrapper for \code{\link{bvar}} to simplify parallel computation via #' \code{\link[parallel]{parLapply}}. Make sure to properly start and stop the #' provided cluster. #' #' @param cl A \code{cluster} object obtained from #' \code{\link[parallel]{makeCluster}}. #' @param n_runs The number of parallel runs to calculate. Defaults to the #' length of \emph{cl}, i.e. the number of registered nodes. #' @inheritParams bvar #' #' @return Returns a list of class \code{bvar_chain} with \code{bvar} objects. #' #' @seealso \code{\link{bvar}}; \code{\link[parallel]{parLapply}} #' #' @keywords BVAR Metropolis-Hastings MCMC priors hierarchical #' #' @export #' #' @examples #' \donttest{ #' library("parallel") #' #' cl <- makeCluster(2L) #' #' # Access a subset of the fred_qd dataset #' data <- fred_qd[, c("CPIAUCSL", "UNRATE", "FEDFUNDS")] #' # Transform it to be stationary #' data <- fred_transform(data, codes = c(5, 5, 1), lag = 4) #' #' # A singular run using one lag, default settings and very few draws #' x <- bvar(data, lags = 1, n_draw = 1000L, n_burn = 200L, verbose = FALSE) #' #' # Two parallel runs #' y <- par_bvar(cl, n_runs = 2, #' data = data, lags = 1, n_draw = 1000L, n_burn = 200L) #' #' stopCluster(cl) #' } #' # Plot lambda for all of the runs #' \dontrun{ #' plot(x, type = "full", vars = "lambda", chains = y) #' #' # Convert the hyperparameter lambda to a coda mcmc.list object #' coda::as.mcmc(y, vars = "lambda") #' } par_bvar <- function( cl, n_runs = length(cl), data, lags, n_draw = 10000L, n_burn = 5000L, n_thin = 1L, priors = bv_priors(), mh = bv_mh(), fcast = NULL, irf = NULL) { # Checks --- if(!inherits(cl, "cluster")) {stop("Please provide a `cluster` object.")} # Maybe check whether it is actually loaded has_parallel() # Data if(!all(vapply(data, is.numeric, logical(1))) || any(is.na(data)) || ncol(data) < 2) { stop("Problem with the data. Make sure it is numeric, without any NAs.") } Y <- as.matrix(data) # Integers lags <- int_check(lags, min = 1L, max = nrow(Y)) n_draw <- int_check(n_draw, min = 1L) n_burn <- int_check(n_burn, min = 0L, max = n_draw - 1L, msg = "Issue with n_burn. Is n_burn < n_draw?") n_thin <- int_check(n_thin, min = 1L, max = ((n_draw - n_burn) / 10), msg = "Issue with n_thin. Maximum allowed is (n_draw - n_burn) / 10.") n_save <- int_check(((n_draw - n_burn) / n_thin), min = 1) # Constructors, required if(!inherits(priors, "bv_priors")) { stop("Please use `bv_priors()` to configure the priors.") } if(!inherits(mh, "bv_metropolis")) { stop("Please use `bv_mh()` to configure the Metropolis-Hastings step.") } # Not required if(!is.null(fcast) && !inherits(fcast, "bv_fcast")) { stop("Please use `bv_fcast()` to configure forecasts.") } if(!is.null(irf) && !inherits(irf, "bv_irf")) { stop("Please use `bv_irf()` to configure impulse responses.") } if(mh[["adjust_acc"]]) {n_adj <- as.integer(n_burn * mh[["adjust_burn"]])} # Get several BVARs --- out <- parallel::parLapply(cl, rep(list(data), n_runs), function(data, ...) { # This guy is spawned all alone, we need to load BVAR library("BVAR") bvar(data = data, ..., verbose = FALSE) }, lags = lags, n_draw = n_draw, n_burn = n_burn, n_thin = n_thin, priors = priors, mh = mh, fcast = fcast, irf = irf) class(out) <- "bvar_chains" return(out) } #' @noRd has_parallel <- function() {has_package("parallel")}
/scratch/gouwar.j/cran-all/cranData/BVAR/R/81_parallel.R
#' FRED transformation and subset helper #' #' Apply transformations given by FRED-MD or FRED-QD and generate rectangular #' subsets. See \code{\link{fred_qd}} for information on data and the Details #' section for information on the transformations. Call without arguments to #' retrieve available codes / all FRED suggestions. #' #' FRED-QD and FRED-MD include a transformation code for every variable. All #' codes are provided in \code{system.file("fred_trans.csv", package = "BVAR")}. #' The transformation codes are as follows: #' \enumerate{ #' \item \code{1} - no transformation; #' \item \code{2} - first differences - \eqn{\Delta x_t}{Delta x}; #' \item \code{3} - second differences - \eqn{\Delta^2 x_t}{Delta2 x}; #' \item \code{4} - log transformation - \eqn{\log x_t}{log x}; #' \item \code{5} - log differences - #' \eqn{\Delta \log x_t}{Delta log x}; #' \item \code{6} - log second differences - #' \eqn{\Delta^2 \log x_t}{Delta2 log x}; #' \item \code{7} - percent change differences - #' \eqn{\Delta x_t / x_{t-1} - 1}{Delta x / lag-x - 1}; #' } #' Note that the transformation codes of FRED-MD and FRED-QD may differ for #' the same series. #' #' @param data A \code{data.frame} with FRED-QD or FRED-MD time series. The #' column names are used to find the correct transformation. #' @param type Character scalar. Whether \emph{data} stems from the FRED-QD or #' the FRED-MD database. #' @param codes Integer vector. Transformation code(s) to apply to \emph{data}. #' Overrides automatic lookup of transformation codes. #' @param na.rm Logical scalar. Whether to subset to rows without any #' \code{NA} values. A warning is thrown if rows are non-sequential. #' @param lag Integer scalar. Number of lags to apply when taking differences. #' See \code{\link[base]{diff}}. #' @param scale Numeric scalar. Scaling to apply to log differences. #' @param vars Character vector. Names of the variables to look for. #' @param table Logical scalar. Whether to return a table of matching #' transformation codes instead of just the codes. #' #' @return \code{\link{fred_transform}} returns a \code{data.frame} object with #' applied transformations. \code{\link{fred_code}} returns transformation #' codes, or a \code{data.frame} of matching transformation codes. #' #' @seealso \code{\link{fred_qd}} #' #' @keywords datasets FRED #' #' @export #' #' @examples #' # Transform a subset of FRED-QD #' fred_transform(fred_qd[, c("GDPC1", "INDPRO", "FEDFUNDS")]) #' #' # Get info on transformation codes for unemployment variables #' fred_code("UNRATE", table = TRUE) #' #' # Get the transformation code for GDPC1 #' fred_code("GDPC1", type = "fred_qd") #' #' # Transform all of FRED-MD #' \dontrun{ #' fred_transform(fred_md, type = "fred_md") #' } fred_transform <- function( data, type = c("fred_qd", "fred_md"), codes, na.rm = TRUE, lag = 1L, scale = 100) { if(missing(data)) { return(structure(1:7, names = c("none", "1st-diff", "2nd-diff", "log", "log-diff", "log-2nd-diff", "pct-ch-diff"))) } # Data if(!all(vapply(data, is.numeric, logical(1))) || !is.data.frame(data)) { stop("Problem with the data. Please provide a numeric data.frame.") } data <- as.data.frame(data) # Deal with tibbles and close #60 vars <- colnames(data) rows <- rownames(data) if(!missing(codes)) { codes <- vapply(codes, int_check, min = 1L, max = 7L, msg = "Invalid value for code (outside of [1, 7]).", integer(1L)) if(length(codes) != ncol(data)) { stop("Please provide one transformation code per column.") } } else { codes <- fred_code(paste0("^", vars, "$"), type = type) } data <- vapply(seq(ncol(data)), function(i, codes, data) { get_transformation(codes[i], lag = lag, scale = scale)(data[, i]) }, codes = codes, data = data, FUN.VALUE = numeric(nrow(data))) na_rows <- apply(data, 1, function(x) sum(is.na(x))) na_cols <- apply(data, 2, function(x) sum(is.na(x))) if(na.rm) { used_rows <- na_rows == 0 if(!any(used_rows)) { stop("No row without NA values available. Variable ", vars[which.max(na_cols)], " is problematic at ", max(na_cols), " NAs.") } if(!all(na_rows[used_rows] == cummax(na_rows[used_rows]))) { warning("Rows used to subset are not all sequential.") } data <- data[used_rows, ] rows <- rows[used_rows] } data <- as.data.frame(data) rownames(data) <- rows colnames(data) <- vars return(data) } #' @rdname fred_transform #' @export #' #' @importFrom utils read.table fred_code <- function(vars, type = c("fred_qd", "fred_md"), table = FALSE) { fred_trans <- read.table(system.file("fred_trans.csv", package = "BVAR"), header = TRUE, sep = ",", na.strings = c("", "NA")) fred_trans[, 2:3] <- lapply(fred_trans[, 2:3], factor, levels = c("none", "1st-diff", "2nd-diff", "log", "log-diff", "log-2nd-diff", "pct-ch-diff")) if(missing(vars)) { return(fred_trans) } if(!is.character(vars) || length(vars) == 0) { stop("Please provide named variables to look up transformation codes.") } table <- isTRUE(table) if(table) { matches <- do.call(c, sapply(vars, grep, fred_trans[["variable"]], simplify = FALSE)) if(length(matches) == 0) {message("No transformation code(s) found.")} return(fred_trans[matches, ]) } type <- match.arg(type) match <- vapply(vars, function(x, y) { out <- grep(paste0("^", x), y) if(length(out) == 0) {out <- NA_integer_} if(length(out) > 1) { message("Mutiple matches for ", x, " found. Using the first one - ", "consider calling with `table = TRUE`.") out <- out[1] } return(out) }, y = fred_trans[["variable"]], FUN.VALUE = integer(1L)) codes <- as.integer(fred_trans[match, type]) # No more factor if(any(is.na(codes))) { message("No transformation code(s) for ", paste0(vars[is.na(codes)], collapse = ", "), " found. Setting to 1 for no transformation.") codes[is.na(codes)] <- 1 } return(codes) } #' @noRd #' #' @param code Integer scalar. Code of the requested transformation. #' @param lag Integer scalar. Number of lags to apply. #' @param scalar Numeric scalar. Scaling to apply to log differences. #' #' @return Returns a function that provides the requested transformation. #' #' @importFrom utils head get_transformation <- function(code, lag = 1L, scale = 100) { code <- int_check(code, min = 0L, max = 7L, msg = "Code not found.") lag <- int_check(lag, min = 1L, max = Inf, msg = "Issue with provided lag.") scale <- num_check(scale, min = 1e-16, max = Inf, msg = "Issue with scale.") switch(code, function(x) {x}, # No transformation function(x) { # First differences c(rep(NA, lag), diff(x, lag = lag, differences = 1L))}, function(x) { # Second differences c(rep(NA, lag * 2), diff(x, lag = lag, differences = 2L))}, function(x) {log(x)}, # Logs function(x) { # Log first differences c(rep(NA, lag), diff(log(x), lag = lag, differences = 1L)) * scale}, function(x) { # Log second differences c(rep(NA, lag * 2), diff(log(x), lag = lag, differences = 2L)) * scale}, function(x) { # Percent-change differences c(rep(NA, lag * 2), diff(x[-seq(lag)] / head(x, length(x) - lag) - 1L, lag = lag, differences = 1L)) * scale} ) }
/scratch/gouwar.j/cran-all/cranData/BVAR/R/85_transform.R
#' @export print.bvar <- function(x, ...) { y <- x[["meta"]] cat("Bayesian VAR consisting of", y[["N"]], "observations,", y[["M"]], "variables and", y[["lags"]], "lags.") cat("\nTime spent calculating:", format(round(y[["timer"]], 2))) cat("\nHyperparameters:", paste(x[["priors"]][["hyper"]], collapse = ", "), "\nHyperparameter values after optimisation:", paste(round(x[["optim"]][["par"]], 5), collapse = ", ")) cat("\nIterations (burnt / thinning): ", y[["n_draw"]], " (", y[["n_burn"]], " / ", y[["n_thin"]], ")", sep = "") cat("\nAccepted draws (rate): ", y[["accepted"]], " (", round(y[["accepted"]] / (y[["n_draw"]] - y[["n_burn"]]), 3), ")\n", sep = "") return(invisible(x)) }
/scratch/gouwar.j/cran-all/cranData/BVAR/R/90_print.R
#' Plotting method for Bayesian VARs #' #' Method to plot trace and densities of coefficient, hyperparameter and #' marginal likelihood draws obtained from \code{\link{bvar}}. Several types of #' plot are available via the argument \emph{type}, including traces, densities, #' plots of forecasts and impulse responses. #' #' @param x A \code{bvar} object, obtained from \code{\link{bvar}}. #' @param type A string with the type of plot desired. The default option #' \code{"full"} plots both densities and traces. #' @param vars Character vector used to select variables. Elements are matched #' to hyperparameters or coefficients. Coefficients may be matched based on #' the dependent variable (by providing the name or position) or the #' explanatory variables (by providing the name and the desired lag). See the #' example section for a demonstration. Defaults to \code{NULL}, i.e. all #' hyperparameters. #' @param vars_response,vars_impulse Optional character or integer vectors used #' to select coefficents. Dependent variables are specified with #' \emph{vars_response}, explanatory ones with \emph{vars_impulse}. See the #' example section for a demonstration. #' @param chains List of \code{bvar} objects. Contents are then added to trace #' and density plots to help assessing covergence. #' @param mar Numeric vector. Margins for \code{\link[graphics]{par}}. #' @param ... Other graphical parameters for \code{\link[graphics]{par}}. #' #' @return Returns \emph{x} invisibly. #' #' @seealso \code{\link{bvar}}; \code{\link{plot.bvar_fcast}}; #' \code{\link{plot.bvar_irf}}. #' #' @keywords BVAR MCMC plot analysis #' #' @export #' #' @examples #' \donttest{ #' # Access a subset of the fred_qd dataset #' data <- fred_qd[, c("CPIAUCSL", "UNRATE", "FEDFUNDS")] #' # Transform it to be stationary #' data <- fred_transform(data, codes = c(5, 5, 1), lag = 4) #' #' # Estimate a BVAR using one lag, default settings and very few draws #' x <- bvar(data, lags = 1, n_draw = 1000L, n_burn = 200L, verbose = FALSE) #' #' # Plot full traces and densities #' plot(x) #' #' # Only plot the marginal likelihood's trace #' plot(x, "trace", "ml") #' #' # Access IRF and forecast plotting functions #' plot(x, type = "irf", vars_response = 2) #' plot(x, type = "fcast", vars = 2) #' } plot.bvar <- function( x, type = c("full", "trace", "density", "irf", "fcast"), vars = NULL, vars_response = NULL, vars_impulse = NULL, chains = list(), mar = c(2, 2, 2, 0.5), ...) { if(!inherits(x, "bvar")) { if(inherits(x[[1]], "bvar")) { # Allow chains to x chains <- x x <- x[[1]] chains[[1]] <- NULL } else {stop("Please provide a `bvar` object.")} } type <- match.arg(type) # Forward and return if "irf" or "fcast" if(type == "irf") { if(is.null(x[["irf"]])) {message("No `bvar_irf` found. Calculating...")} return(plot.bvar_irf( irf(x), vars_response = vars_response, vars_impulse = vars_impulse, variables = x[["variables"]], mar = mar, ...)) } if(type == "fcast") { if(is.null(x[["fcast"]])) {message("No `bvar_fcast` found. Calculating...")} return(plot.bvar_fcast( predict(x), vars = vars, variables = x[["variables"]], mar = mar, ...)) } if(inherits(chains, "bvar")) {chains <- list(chains)} lapply(chains, function(x) {if(!inherits(x, "bvar")) { stop("Please provide `bvar` objects to the chains parameter.") }}) # Get data and plot ------------------------------------------------------- prep <- prep_data(x, vars = vars, vars_response = vars_response, vars_impulse = vars_impulse, chains, check_chains = FALSE) .plot_bvar(prep[["data"]], type, prep[["vars"]], prep[["chains"]], prep[["bounds"]], mar, ...) return(invisible(x)) } #' @export plot.bvar_chains <- plot.bvar #' @rdname .plot_trace #' @noRd #' #' @importFrom graphics par .plot_bvar <- function( x, type = c("full", "trace", "density"), vars = NULL, chains = list(), bounds = NULL, mar = c(2, 2, 2, 0.5), ...) { # Plot --- op <- par(mfrow = c(length(vars), if(type == "full") {2} else {1}), mar = mar, ...) for(i in seq_len(ncol(x))) { if(type != "density") { # i.e. full or trace .plot_trace(x[, i], name = vars[i], bounds = bounds[, i], dots = lapply(chains, function(x) {x[, i]})) } if(type != "trace") { # i.e. full or density .plot_dens(x[, i], name = vars[i], bounds = bounds[, i], dots = lapply(chains, function(x) {x[, i]})) } } par(op) return(invisible(x)) } #' Trace & density plot #' #' @param x Numeric vector to plot. #' @param name Optional string with the plotted parameter's name. #' @param bounds Optional numeric vector plotted horizontally via #' \code{\link[graphics]{abline}}. #' @param dots Optional list of numeric vectors to add to the plot. #' #' @importFrom graphics plot polygon lines abline #' @importFrom stats density #' #' @noRd .plot_trace <- function(x, name = NULL, bounds = NULL, dots = list()) { ylim <- c(min(vapply(dots, min, double(1)), x), max(vapply(dots, max, double(1)), x)) plot(x, type = "l", xlab = "Index", ylab = "Value", ylim = ylim, main = paste0("Trace", if(!is.null(name)) {paste(" of", name)})) for(dot in dots) {lines(dot, col = "lightgray")} lines(x) abline(h = bounds, lty = "dashed", col = "darkgray") return(invisible(x)) } #' @rdname .plot_trace #' @noRd .plot_dens <- function(x, name = NULL, bounds = NULL, dots = list()) { xlim <- c(min(vapply(dots, min, double(1)), x), max(vapply(dots, max, double(1)), x)) ylim <- c(0, max(vapply(lapply(dots, function(x) density(x)[["y"]]), max, double(1)), density(x)[["y"]])) plot(density(x), xlim = xlim, ylim = ylim, main = paste0("Density", if(!is.null(name)) {paste(" of", name)})) polygon(density(x), col = "#CCCCCC33", border = NA) for(dot in dots) { dens <- density(dot) polygon(dens, col = "#CCCCCC33", border = NA) lines(dens) } lines(density(x)) abline(v = bounds, lty = "dashed", col = "darkgray") return(invisible(x)) }
/scratch/gouwar.j/cran-all/cranData/BVAR/R/91_plot.R
#' Coefficient and VCOV methods for Bayesian VARs #' #' Retrieves coefficient / variance-covariance values from Bayesian VAR models #' generated with \code{\link{bvar}}. Note that coefficients are available for #' every stored draw and one may retrieve (a) credible intervals via the #' \emph{conf_bands} argument, or (2) means via the \emph{type} argument. #' #' @param object A \code{bvar} object, obtained from \code{\link{bvar}}. #' @param type Character scalar. Whether to return quantile or mean values. #' Note that \emph{conf_bands} is ignored for mean values. #' @param conf_bands Numeric vector of confidence bands to apply. #' E.g. for bands at 5\%, 10\%, 90\% and 95\% set this to \code{c(0.05, 0.1)}. #' Note that the median, i.e. 0.5 is always included. #' @param companion Logical scalar. Whether to retrieve the companion matrix of #' coefficients. See \code{\link{companion.bvar}}. #' @param ... Not used. #' #' @return Returns a numeric array of class \code{bvar_coefs} or #' \code{bvar_vcovs} at the specified values. #' #' @seealso \code{\link{bvar}}; \code{\link{companion.bvar}} #' #' @keywords BVAR analysis #' #' @export #' #' @importFrom stats coef vcov #' #' @examples #' \donttest{ #' # Access a subset of the fred_qd dataset #' data <- fred_qd[, c("CPIAUCSL", "UNRATE", "FEDFUNDS")] #' # Transform it to be stationary #' data <- fred_transform(data, codes = c(5, 5, 1), lag = 4) #' #' # Estimate a BVAR using one lag, default settings and very few draws #' x <- bvar(data, lags = 1, n_draw = 1000L, n_burn = 200L, verbose = FALSE) #' #' # Get coefficent values at the 10%, 50% and 90% quantiles #' coef(x, conf_bands = 0.10) #' #' # Only get the median of the variance-covariance matrix #' vcov(x, conf_bands = 0.5) #' } coef.bvar <- function( object, type = c("quantile", "mean"), conf_bands = 0.5, companion = FALSE, ...) { type <- match.arg(type) if(companion) {return(companion.bvar(object, type, conf_bands, ...))} if(type == "quantile") { quantiles <- quantile_check(conf_bands) coefs <- apply(object[["beta"]], c(2, 3), quantile, quantiles) } else { quantiles <- 0.5 coefs <- apply(object[["beta"]], c(2, 3), mean) } M <- object[["meta"]][["M"]] lags <- object[["meta"]][["lags"]] vars <- name_deps(object[["variables"]], M = M) vars_expl <- name_expl(vars, M = M, lags = lags) if(length(quantiles) == 1) { dimnames(coefs)[[2]] <- vars dimnames(coefs)[[1]] <- vars_expl } else { dimnames(coefs)[[3]] <- vars dimnames(coefs)[[2]] <- vars_expl } class(coefs) <- append("bvar_coefs", class(coefs)) return(coefs) } #' @rdname coef.bvar #' @export vcov.bvar <- function( object, type = c("quantile", "mean"), conf_bands = 0.5, ...) { type <- match.arg(type) if(type == "quantile") { quantiles <- quantile_check(conf_bands) vcovs <- apply(object[["sigma"]], c(2, 3), quantile, quantiles) } else { quantiles <- 0.5 vcovs <- apply(object[["sigma"]], c(2, 3), mean) } vars <- name_deps(object[["variables"]], M = object[["meta"]][["M"]]) if(length(quantiles) == 1) { dimnames(vcovs)[[1]] <- dimnames(vcovs)[[2]] <- vars } else { dimnames(vcovs)[[2]] <- dimnames(vcovs)[[3]] <- vars } class(vcovs) <- append("bvar_vcovs", class(vcovs)) return(vcovs) } #' @export print.bvar_coefs <- function(x, digits = 3L, complete = FALSE, ...) { .print_coefs(x, digits, type = "coefficient", complete = complete, ...) return(invisible(x)) } #' @export print.bvar_vcovs <- function(x, digits = 3L, complete = FALSE, ...) { if(!inherits(x, "bvar_vcovs")) {stop("Please provide a `bvar_vcovs` object.")} .print_coefs(x, digits, type = "variance-covariance", complete = complete, ...) return(invisible(x)) } #' Coefficient and variance-covariance print method #' #' @param x Numeric array with coefficient or variance-covariance values of a #' \code{bvar} object. #' @param digits Integer scalar. Fed to \code{\link[base]{round}} and applied to #' numeric outputs (i.e. the quantiles). #' @param type String indicating whether \emph{x} contains coefficient, #' variance-covariance or forecast-error-variance decomposition values. #' @param complete Logical scalar. Whether to print every contained quantile. #' #' @noRd .print_coefs <- function( x, digits = 3L, type = c("coefficient", "variance-covariance", "FEVD", "companion"), complete = FALSE, ...) { type <- match.arg(type) has_quants <- length(dim(x)) == 3 if(has_quants) { P <- dim(x)[1] coefs <- x["50%", , ] bands <- dimnames(x)[[1]] } else {coefs <- x[]} # Remove class to avoid recursion cat("Numeric array (dimensions ", paste0(dim(x), collapse = ", "), ")", " of ", type, " values from a BVAR.\n", sep = "") if(has_quants) { cat("Computed confidence bands: ", paste(bands, collapse = ", "), "\n", sep = "") } if(complete && has_quants) { cat("Values:\n") for(band in bands) { cat(" ", band, ":\n", sep = "") print(round(x[band, , ], digits = digits)) } } else { cat("Median values:\n") print(round(coefs, digits = digits)) } return(invisible(x)) }
/scratch/gouwar.j/cran-all/cranData/BVAR/R/92_coef.R
#' Density methods for Bayesian VARs #' #' Calculates densities of hyperparameters or coefficient draws from Bayesian #' VAR models generated via \code{\link{bvar}}. Wraps standard #' \code{\link[stats]{density}} outputs into a \code{list}. #' #' @param x A \code{bvar} object, obtained from \code{\link{bvar}}. #' @param ... Fed to \code{\link[stats]{density}} or #' \code{\link[graphics]{par}}. #' @param mfrow Numeric vector. Rows for \code{\link[graphics]{par}}. #' @param var,n_vars,lag Integer scalars. Retrieve the position of lag #' \emph{lag} of variable \emph{var} given \emph{n_vars} total variables. #' @inheritParams plot.bvar #' #' @return Returns a list with outputs of \code{\link[stats]{density}}. #' #' @seealso \code{\link{bvar}}; \code{\link[stats]{density}} #' #' @keywords BVAR analysis #' #' @export #' #' @importFrom stats density #' #' @examples #' \donttest{ #' # Access a subset of the fred_qd dataset #' data <- fred_qd[, c("CPIAUCSL", "UNRATE", "FEDFUNDS")] #' # Transform it to be stationary #' data <- fred_transform(data, codes = c(5, 5, 1), lag = 4) #' #' # Estimate a BVAR using one lag, default settings and very few draws #' x <- bvar(data, lags = 1, n_draw = 1000L, n_burn = 200L, verbose = FALSE) #' #' # Get densities of the hyperparameters #' density(x) #' #' # Plot them #' plot(density(x)) #' #' # Only get the densities associated with dependent variable 1 #' density(x, vars_response = "CPI") #' #' # Check out the constant's densities #' plot(density(x, vars_impulse = 1)) #' #' # Get the densities of variable three's first lag #' density(x, vars = "FEDFUNDS-lag1") #' #' # Get densities of lambda and the coefficients of dependent variable 2 #' density(x, vars = c("lambda", "UNRATE")) #' } density.bvar <- function( x, vars = NULL, vars_response = NULL, vars_impulse = NULL, ...) { # Get data and apply density --- prep <- prep_data(x, vars = vars, vars_response = vars_response, vars_impulse = vars_impulse) data <- prep[["data"]] vars <- prep[["vars"]] out <- if(length(vars) == 1) { structure(list(density(data, ...)), names = vars) } else {structure(apply(data, 2, density, ...), names = vars)} class(out) <- "bvar_density" return(out) } #' @export print.bvar_density <- function(x, ...) { lapply(x, print, ...) return(invisible(x)) } #' @rdname density.bvar #' @export #' #' @importFrom graphics par plot.bvar_density <- function( x, mar = c(2, 2, 2, 0.5), mfrow = c(length(x), 1), ...) { op <- par(mfrow = mfrow, mar = mar, ...) for(i in seq_along(x)) { plot(x[[i]], main = paste("Density of", names(x)[i])) } par(op) return(invisible(x)) } #' @rdname density.bvar #' @export independent_index <- function(var, n_vars, lag) { x <- vapply(c(var, n_vars, lag), int_check, integer(1L)) return(1 + x[2] * (x[3] - 1) + x[1]) }
/scratch/gouwar.j/cran-all/cranData/BVAR/R/93_density.R
#' Log-Likelihood method for Bayesian VARs #' #' Calculates the log-likelihood of Bayesian VAR models generated with #' \code{\link{bvar}}. #' #' @param object A \code{bvar} object, obtained from \code{\link{bvar}}. #' @param ... Not used. #' #' @return Returns an object of class \code{logLik}. #' #' @seealso \code{\link{bvar}} #' #' @keywords BVAR analysis #' #' @export #' #' @importFrom mvtnorm dmvnorm #' @importFrom stats logLik #' #' @examples #' \donttest{ #' # Access a subset of the fred_qd dataset #' data <- fred_qd[, c("CPIAUCSL", "UNRATE", "FEDFUNDS")] #' # Transform it to be stationary #' data <- fred_transform(data, codes = c(5, 5, 1), lag = 4) #' #' # Estimate a BVAR using one lag, default settings and very few draws #' x <- bvar(data, lags = 1, n_draw = 1000L, n_burn = 200L, verbose = FALSE) #' #' # Calculate the log-likelihood #' logLik(x) #' } logLik.bvar <- function(object, ...) { Y <- object[["meta"]][["Y"]] N <- object[["meta"]][["N"]] K <- object[["meta"]][["K"]] mean <- fitted.bvar(object, type = "mean")[] sigma <- vcov.bvar(object, type = "mean")[] ll <- sum(vapply(seq_len(N), function(i) { dmvnorm(Y[i, ], mean[i, ], sigma, log = TRUE) }, numeric(1))) attr(ll, "nall") <- N attr(ll, "nobs") <- N attr(ll, "df") <- K # Maybe provide effective DoF class(ll) <- "logLik" return(ll) } #' Widely applicable information criterion (WAIC) for Bayesian VARs #' #' Calculates the widely applicable (or Watanabe-Akaike) information criterion #' (Watanabe, 2010) for VAR models generated with \code{\link{bvar}}. The #' result equals \deqn{-2 (\text{lppd} - \text{pWAIC}}, where 'lppd' is the #' log pointwise predictive density, and 'pWAIC' is the effective number of #' parameters. #' #' @param x A \code{bvar} object, obtained from \code{\link{bvar}}. #' @param n_thin Integer scalar. Every \emph{n_thin}'th draw in \emph{x} is used #' to calculate, others are dropped. #' @param ... Not used. #' #' @return Returns a numerical value. #' #' @references #' Watanabe, S. (2010) Asymptotic Equivalence of Bayes Cross Validation and #' Widely Applicable Information Criterion in Singular Learning Theory. #' \emph{Journal of Machine Learning Research}, \bold{11}, 3571-3594. #' #' Kuschnig, N. and Vashold, L. (2021) BVAR: Bayesian Vector Autoregressions #' with Hierarchical Prior Selection in R. #' \emph{Journal of Statistical Software}, \bold{14}, 1-27, #' \doi{10.18637/jss.v100.i14}. #' @seealso \code{\link{bvar}} #' #' @keywords BVAR analysis #' #' @export #' #' @importFrom mvtnorm dmvnorm #' #' @examples #' \donttest{ #' # Access a subset of the fred_qd dataset #' data <- fred_qd[, c("CPIAUCSL", "UNRATE", "FEDFUNDS")] #' # Transform it to be stationary #' data <- fred_transform(data, codes = c(5, 5, 1), lag = 4) #' #' # Estimate a BVAR using one lag, default settings and very few draws #' x <- bvar(data, lags = 1, n_draw = 600L, n_burn = 100L, verbose = FALSE) #' #' # Calculate the log-likelihood #' WAIC(x) #' } WAIC.bvar <- function(x, n_thin = 1L, ...) { Y <- x[["meta"]][["Y"]] N <- x[["meta"]][["N"]] K <- x[["meta"]][["K"]] n_pres <- x[["meta"]][["n_save"]] n_thin <- int_check(n_thin, min = 1, max = (n_pres / 10), "Issue with n_thin. Maximum allowed is n_save / 10.") n_save <- int_check((n_pres / n_thin), min = 1) # Matrix of log-likelihoods ll <- matrix(NA_real_, N, n_save) for(s in seq_len(n_save)) { mean <- x[["meta"]][["X"]] %*% x[["beta"]][s, , ] sigma <- x[["sigma"]][s, , ] ll[, s] <- vapply(seq_len(N), function(i) { dmvnorm(Y[i, ], mean[i, ], sigma, log = TRUE) }, numeric(1L)) } # WAIC lppd <- log(rowMeans(exp(ll))) # log pointwise predictive densities res <- ll - matrix(rowMeans(ll), N, n_save) pWAIC <- sum(rowMeans(res * res) * (N / (N - 1))) # Gelman (2014) WAIC <- -2 * sum(lppd) + 2 * pWAIC return(WAIC) } #' @rdname WAIC.bvar #' @export WAIC <- function(x, ...) {UseMethod("WAIC", x)} #' @rdname WAIC.bvar #' @export WAIC.default <- function(x, ...) { stop("No methods for class ", paste0(class(x), collapse = " / "), " found.") }
/scratch/gouwar.j/cran-all/cranData/BVAR/R/94_logLik.R
#' Fitted and residual methods for Bayesian VARs #' #' Calculates fitted or residual values for Bayesian VAR models generated with #' \code{\link{bvar}}. #' #' @param object A \code{bvar} object, obtained from \code{\link{bvar}}. #' @param x Object of class \code{bvar_fitted} / \code{bvar_resid}. #' @inheritParams coef.bvar #' @inheritParams plot.bvar #' #' @return Returns a numeric array of class \code{bvar_fitted} or #' \code{bvar_resid} at the specified values. #' #' @seealso \code{\link{bvar}} #' #' @keywords BVAR analysis #' #' @export #' #' @importFrom stats fitted residuals #' #' @examples #' \donttest{ #' # Access a subset of the fred_qd dataset #' data <- fred_qd[, c("CPIAUCSL", "UNRATE", "FEDFUNDS")] #' # Transform it to be stationary #' data <- fred_transform(data, codes = c(5, 5, 1), lag = 4) #' #' # Estimate a BVAR using one lag, default settings and very few draws #' x <- bvar(data, lags = 1, n_draw = 1000L, n_burn = 200L, verbose = FALSE) #' #' # Get fitted values and adjust confidence bands to 10%, 50% and 90% #' fitted(x, conf_bands = 0.10) #' #' # Get the residuals of variable 1 #' resid(x, vars = 1) #' } #' \dontrun{ #' # Get residuals and plot them #' plot(residuals(x)) #' } fitted.bvar <- function( object, type = c("quantile", "mean"), conf_bands = 0.5, ...) { type <- match.arg(type) X <- object[["meta"]][["X"]] N <- object[["meta"]][["N"]] M <- object[["meta"]][["M"]] betas <- coef(object, type, conf_bands) has_quants <- length(dim(betas)) == 3 if(has_quants) { fit <- array(NA, c(dim(betas)[1], N, M), dimnames = list(dimnames(betas)[[1]], NULL, dimnames(betas)[[3]])) for(i in seq_len(dim(betas)[1])) { fit[i, , ] <- X %*% betas[i, , ] } } else { fit <- X %*% betas } class(fit) <- append("bvar_fitted", class(fit)) return(fit) } #' @rdname fitted.bvar #' @export residuals.bvar <- function( object, type = c("quantile", "mean"), conf_bands = 0.5, ...) { type <- match.arg(type) fit <- fitted.bvar(object, type = type, conf_bands = conf_bands) Y <- object[["meta"]][["Y"]] has_quants <- length(dim(fit)) == 3 if(has_quants) { resids <- array(NA, dim(fit), dimnames(fit)) for(i in seq_len(dim(fit)[1])) { resids[i, , ] <- Y - fit[i, , ] } } else { resids <- Y - fit } class(resids) <- append("bvar_resid", class(resids)) return(resids) } #' @rdname fitted.bvar #' @export plot.bvar_resid <- function(x, vars = NULL, mar = c(2, 2, 2, 0.5), ...) { has_quants <- length(dim(x)) == 3 if(has_quants) {x <- x["50%", , ]} M <- dim(x)[2] variables <- name_deps(variables = dimnames(x)[[2]], M = M) pos <- pos_vars(vars, variables, M) op <- par(mfrow = c(length(pos), 1), mar = mar, ...) for(i in pos) { plot(x[, i], main = paste("Residuals", variables[i])) abline(h = 0, lty = "dashed", col = "gray") } par(op) return(invisible(x)) } #' @export print.bvar_fitted <- function(x, digits = 2L, ...) { print_fitted(x, digits, type = "fitted", ...) return(invisible(x)) } #' @export print.bvar_resid <- function(x, digits = 2L, ...) { print_fitted(x, digits, type = "residual", ...) return(invisible(x)) } #' Fitted and residual print method #' #' @param x Numeric array with residual or fitted values of a \code{bvar} #' object. #' @param digits Integer scalar. Fed to \code{\link[base]{round}} and applied to #' numeric outputs (i.e. the quantiles). #' @param type String indicating whether \emph{x} contains fitted or resiudal #' values. #' #' @noRd print_fitted <- function( x, digits = 2L, type = c("fitted", "residual"), ...) { type <- match.arg(type) has_quants <- length(dim(x)) == 3 if(has_quants) { N <- dim(x)[2]; M <- dim(x)[3]; P <- dim(x)[1] variables <- name_deps(variables = dimnames(x)[[3]], M = M) top <- x["50%", 1:3, ] bot <- x["50%", (N - 2):N, ] } else { N <- dim(x)[1]; M <- dim(x)[2] variables <- name_deps(variables = dimnames(x)[[2]], M = M) top <- x[1:3, ] bot <- x[(N - 2):N, ] } cat("Numeric array (dimensions ", paste0(dim(x), collapse = ", "), ")", " with ", type, " values from a BVAR.\n", sep = "") if(has_quants) { cat("Computed confidence bands: ", paste(dimnames(x)[[1]], collapse = ", "), "\n", sep = "") } cat("Median values:\n") for(var in seq_len(M)) { cat("\t", variables[var], ": ", paste0(round(top[, var], digits), collapse = ", "), ", [...], ", paste0(round(bot[, var], digits), collapse = ", "), "\n", sep = "") } return(invisible(x)) }
/scratch/gouwar.j/cran-all/cranData/BVAR/R/95_fitted.R
#' Summary method for Bayesian VARs #' #' Retrieves several outputs of interest, including the median coefficient #' matrix, the median variance-covariance matrix, and the log-likelihood. #' Separate summary methods exist for impulse responses and forecasts. #' #' @param object A \code{bvar} object, obtained from \code{\link{bvar}}. #' @param ... Not used. #' #' @return Returns a list of class \code{bvar_summary} with elements that can #' can be accessed individually: #' \itemize{ #' \item \code{bvar} - the \code{bvar} object provided. #' \item \code{coef} - coefficient values from \code{\link{coef.bvar}}. #' \item \code{vcov} - VCOV values from \code{\link{vcov.bvar}}. #' \item \code{logLik} - the log-likelihood from \code{\link[stats]{logLik}}. #' } #' #' @seealso \code{\link{bvar}}; #' \code{\link{predict.bvar}}; \code{\link{irf.bvar}} #' #' @keywords BVAR analysis #' #' @export #' #' @examples #' \donttest{ #' # Access a subset of the fred_qd dataset #' data <- fred_qd[, c("CPIAUCSL", "UNRATE", "FEDFUNDS")] #' # Transform it to be stationary #' data <- fred_transform(data, codes = c(5, 5, 1), lag = 4) #' #' # Estimate a BVAR using one lag, default settings and very few draws #' x <- bvar(data, lags = 1, n_draw = 1000L, n_burn = 200L, verbose = FALSE) #' #' summary(x) #' } summary.bvar <- function(object, ...) { out <- structure(list( "bvar" = object, "coef" = coef.bvar(object), "vcov" = vcov.bvar(object), "logLik" = logLik.bvar(object)), class = "bvar_summary") return(out) } #' @export print.bvar_summary <- function(x, ...) { print(x[["bvar"]]) cat("\n"); print.bvar_coefs(x[["coef"]]) cat("\n"); print.bvar_vcovs(x[["vcov"]]) cat("\n"); cat("Log-Likelihood:", x[["logLik"]], "\n") return(invisible(x)) }
/scratch/gouwar.j/cran-all/cranData/BVAR/R/96_summary.R
#' Retrieve companion matrix from a Bayesian VAR #' #' Calculates the companion matrix for Bayesian VARs generated via #' \code{\link{bvar}}. #' #' @inheritParams coef.bvar #' #' @return Returns a numeric array/matrix of class \code{bvar_comp} with the #' VAR's coefficents in companion form at the specified values. #' #' @seealso \code{\link{bvar}}; \code{\link{coef.bvar}} #' #' @keywords BVAR analysis #' #' @export #' #' @importFrom stats quantile #' #' @examples #' \donttest{ #' # Access a subset of the fred_qd dataset #' data <- fred_qd[, c("CPIAUCSL", "UNRATE", "FEDFUNDS")] #' # Transform it to be stationary #' data <- fred_transform(data, codes = c(5, 5, 1), lag = 4) #' #' # Estimate a BVAR using one lag, default settings and very few draws #' x <- bvar(data, lags = 1, n_draw = 1000L, n_burn = 200L, verbose = FALSE) #' #' # Get companion matrices for confidence bands at 10%, 50% and 90% #' companion(x, conf_bands = 0.10) #' } companion <- function(object, ...) {UseMethod("companion", object)} #' @rdname companion #' @export companion.default <- function(object, ...) { stop("No methods for class ", paste0(class(object), collapse = " / "), " found.") } #' @rdname companion #' @export companion.bvar <- function( object, type = c("quantile", "mean"), conf_bands = 0.5, ...) { type <- match.arg(type) K <- object[["meta"]][["K"]] M <- object[["meta"]][["M"]] lags <- object[["meta"]][["lags"]] vars <- name_deps(object[["variables"]], M = M) vars_expl <- name_expl(vars, M = M, lags = lags)[-1] # No constant vars_dep <- c(vars, if(lags > 1) {rep("lag", M * (lags - 1))}) if(type == "quantile") { quantiles <- quantile_check(conf_bands) coefs <- apply(object[["beta"]], c(2, 3), quantile, quantiles) } else { quantiles <- 0.5 coefs <- apply(object[["beta"]], c(2, 3), mean) } if(length(quantiles) == 1) { comp <- get_beta_comp(coefs, K, M, lags) dimnames(comp) <- list(vars_dep, vars_expl) } else { comp <- array(NA, c(length(quantiles), K - 1, K - 1)) for(i in 1:length(quantiles)) { comp[i, , ] <- get_beta_comp(coefs[i, , ], K, M, lags) } dimnames(comp)[[1]] <- dimnames(coefs)[[1]] dimnames(comp)[[2]] <- vars_dep dimnames(comp)[[3]] <- vars_expl } class(comp) <- append("bvar_comp", class(comp)) return(comp) } #' @export print.bvar_comp <- function(x, digits = 3L, complete = FALSE, ...) { .print_coefs(x, digits, type = "companion", complete = complete, ...) return(invisible(x)) }
/scratch/gouwar.j/cran-all/cranData/BVAR/R/97_companion.R
#' BVAR: Hierarchical Bayesian vector autoregression #' #' Estimation of hierarchical Bayesian vector autoregressive models following #' Kuschnig & Vashold (2021). #' Implements hierarchical prior selection for conjugate priors in the fashion #' of Giannone, Lenza & Primiceri (2015) <doi:10.1162/REST_a_00483>. Functions #' to compute and identify impulse responses, calculate forecasts, #' forecast error variance decompositions and scenarios are available. #' Several methods to print, plot and summarise results facilitate analysis. #' #' @docType package #' #' @name BVAR-package #' #' @references #' Giannone, D. and Lenza, M. and Primiceri, G. E. (2015) Prior Selection for #' Vector Autoregressions. \emph{The Review of Economics and Statistics}, #' \bold{97:2}, 436-451, \doi{10.1162/REST_a_00483}. #' #' Kuschnig, N. and Vashold, L. (2021) BVAR: Bayesian Vector Autoregressions #' with Hierarchical Prior Selection in R. #' \emph{Journal of Statistical Software}, \bold{14}, 1-27, #' \doi{10.18637/jss.v100.i14}. #' "_PACKAGE"
/scratch/gouwar.j/cran-all/cranData/BVAR/R/BVAR-package.R
#' FRED-MD and FRED-QD: Databases for Macroeconomic Research #' #' FRED-MD and FRED-QD are large macroeconomic databases, containing monthly #' and quarterly time series that are frequently used in the literature. They #' are intended to facilitate the reproduction of empirical work and simplify #' data related tasks. #' Included datasets are provided as is - transformation codes are available #' in \code{system.file("fred_trans.rds", package = "BVAR")}. These can be #' applied automatically with \code{\link{fred_transform}}. #' #' The versions of FRED-MD and FRED-QD that are provided here are licensed #' under a modified ODC-BY 1.0 license that can be found in the provided #' \emph{LICENSE} file. The provided versions are subset to variables that are #' either in public domain or for which we were given permission to use. #' For further details see McCracken and Ng (2016) or #' \url{https://research.stlouisfed.org/econ/mccracken/fred-databases/}. #' We would like to thank Michael McCracken and Serena Ng, Adrienne Brennecke #' and the Federal Reserve Bank of St. Louis for creating, updating and making #' available the datasets and many of the contained time series. We also thank #' all other owners of included time series that permitted their use. #' #' @docType data #' #' @format A \code{data.frame} object with dates as rownames. #' #' @seealso \code{\link{fred_transform}} #' #' @keywords FRED macroeconomics #' #' @references #' McCracken, M. W. and Ng, S. (2016) FRED-MD: A Monthly Database for #' Macroeconomic Research. \emph{Journal of Business & Economic Statistics}, #' \bold{34:4}, 574-589, \doi{10.1080/07350015.2015.1086655}. #' McCracken, M. W., & Ng, S. (2020). FRED-QD: A Quarterly Database for #' Macroeconomic Research \bold{w26872}. National Bureau of Economic Research. #' #' @source \url{https://research.stlouisfed.org/econ/mccracken/fred-databases/} "fred_qd" #' @rdname fred_qd "fred_md"
/scratch/gouwar.j/cran-all/cranData/BVAR/R/data.R
/scratch/gouwar.j/cran-all/cranData/BVAR/R/deprecated.R
.onLoad <- function(...) { register_s3("coda", "as.mcmc", "bvar") register_s3("coda", "as.mcmc", "bvar_chains") register_s3("vars", "irf", "bvar") register_s3("vars", "irf", "bvar_irf") register_s3("vars", "fevd", "bvar") register_s3("vars", "fevd", "bvar_irf") register_s3("vars", "fevd", "bvar_fevd") invisible() } register_s3 <- function(pkg, generic, class) { fun <- get(paste0(generic, ".", class), envir = parent.frame()) stopifnot(is.function(fun)) if(pkg %in% loadedNamespaces()) { registerS3method(generic, class, fun, envir = asNamespace(pkg)) } setHook(packageEvent(pkg, "onLoad"), function(...) { registerS3method(generic, class, fun, envir = asNamespace(pkg)) }) }
/scratch/gouwar.j/cran-all/cranData/BVAR/R/zzz.R
### R code from vignette source 'article.Rnw' ################################################### ### code chunk number 1: preliminaries ################################################### options(prompt = "R> ", continue = "+ ", width = 70, useFancyQuotes = FALSE) ################################################### ### code chunk number 2: setup ################################################### set.seed(42) library("BVAR") ################################################### ### code chunk number 3: data ################################################### x <- fred_qd[1:243, c("GDPC1", "PCECC96", "GPDIC1", "HOANBS", "GDPCTPI", "FEDFUNDS")] x <- fred_transform(x, codes = c(4, 4, 4, 4, 4, 1)) ################################################### ### code chunk number 4: timeseries ################################################### op <- par(mfrow = c(2, 3), mar = c(3, 3, 1, 0.5), mgp = c(2, 0.6, 0)) plot(as.Date(rownames(x)), x[ , "GDPC1"], type = "l", xlab = "Time", ylab = "Gross domestic product") plot(as.Date(rownames(x)), x[ , "PCECC96"], type = "l", xlab = "Time", ylab = "Consumption expenditure") plot(as.Date(rownames(x)), x[ , "GPDIC1"], type = "l", xlab = "Time", ylab = "Private investment") plot(as.Date(rownames(x)), x[ , "HOANBS"], type = "l", xlab = "Time", ylab = "Total hours worked") plot(as.Date(rownames(x)), x[ , "GDPCTPI"], type = "l", xlab = "Time", ylab = "GDP deflator") plot(as.Date(rownames(x)), x[ , "FEDFUNDS"], type = "l", xlab = "Time", ylab = "Federal funds rate") par(op) ################################################### ### code chunk number 5: minnesota ################################################### mn <- bv_minnesota( lambda = bv_lambda(mode = 0.2, sd = 0.4, min = 0.0001, max = 5), alpha = bv_alpha(mode = 2), var = 1e07) ################################################### ### code chunk number 6: dummies ################################################### soc <- bv_soc(mode = 1, sd = 1, min = 1e-04, max = 50) sur <- bv_sur(mode = 1, sd = 1, min = 1e-04, max = 50) ################################################### ### code chunk number 7: priors ################################################### priors <- bv_priors(hyper = "auto", mn = mn, soc = soc, sur = sur) ################################################### ### code chunk number 8: metropolis ################################################### mh <- bv_metropolis(scale_hess = c(0.05, 0.0001, 0.0001), adjust_acc = TRUE, acc_lower = 0.25, acc_upper = 0.45) ################################################### ### code chunk number 9: bvar ################################################### run <- bvar(x, lags = 5, n_draw = 15000, n_burn = 5000, n_thin = 1, priors = priors, mh = mh, verbose = TRUE) ################################################### ### code chunk number 10: print ################################################### print(run) ################################################### ### code chunk number 11: trace_density (eval = FALSE) ################################################### ## plot(run) ## plot(run, type = "dens", ## vars_response = "GDPC1", vars_impulse = "GDPC1-lag1") ################################################### ### code chunk number 12: trace_density ################################################### plot(run) ################################################### ### code chunk number 13: betas ################################################### plot(run, type = "dens", vars_response = "GDPC1", vars_impulse = "GDPC1-lag1") ################################################### ### code chunk number 14: fitted ################################################### fitted(run, type = "mean") ################################################### ### code chunk number 15: residuals ################################################### plot(residuals(run, type = "mean"), vars = c("GDPC1", "PCECC96")) ################################################### ### code chunk number 16: irf ################################################### opt_irf <- bv_irf(horizon = 16, identification = TRUE) irf(run) <- irf(run, opt_irf, conf_bands = c(0.05, 0.16)) ################################################### ### code chunk number 17: irf_cholesky ################################################### plot(irf(run), area = TRUE, vars_impulse = c("GDPC1", "FEDFUNDS"), vars_response = c(1:2, 6)) ################################################### ### code chunk number 18: predict ################################################### predict(run) <- predict(run, horizon = 16, conf_bands = c(0.05, 0.16)) ################################################### ### code chunk number 19: predict_unconditional ################################################### plot(predict(run), area = TRUE, t_back = 32, vars = c("GDPC1", "GDPCTPI", "FEDFUNDS")) ################################################### ### code chunk number 20: app_data ################################################### y <- fred_qd[1:243, c("GDPC1", "GDPCTPI", "FEDFUNDS")] z <- fred_transform(y, type = "fred_qd") y <- fred_transform(y, codes = c(5, 5, 1), lag = 4) ################################################### ### code chunk number 21: app_timeseries ################################################### op <- par(mfrow = c(1, 3), mar = c(3, 3, 1, 0.5), mgp = c(2, 0.6, 0)) plot(as.Date(rownames(y)), y[ , "GDPC1"], type = "l", xlab = "Time", ylab = "GDP growth") plot(as.Date(rownames(y)), y[ , "GDPCTPI"], type = "l", xlab = "Time", ylab = "Inflation") plot(as.Date(rownames(y)), y[ , "FEDFUNDS"], type = "l", xlab = "Time", ylab = "Federal funds rate") par(op) ################################################### ### code chunk number 22: app_bvar ################################################### priors_app <- bv_priors(mn = bv_mn(b = 0)) run_app <- bvar(y, lags = 5, n_draw = 15000, n_burn = 5000, priors = priors_app, mh = bv_mh(scale_hess = 0.5, adjust_acc = TRUE), verbose = FALSE) ################################################### ### code chunk number 23: app_dummies ################################################### add_soc <- function(Y, lags, par) { soc <- if(lags == 1) {diag(Y[1, ]) / par} else { diag(colMeans(Y[1:lags, ])) / par } X_soc <- cbind(rep(0, ncol(Y)), matrix(rep(soc, lags), nrow = ncol(Y))) return(list("Y" = soc, "X" = X_soc)) } ################################################### ### code chunk number 24: app_priors ################################################### soc <- bv_dummy(mode = 1, sd = 1, min = 0.0001, max = 50, fun = add_soc) priors_soc <- bv_priors(soc = soc) ################################################### ### code chunk number 25: app_coda ################################################### library("coda") run_mcmc <- as.mcmc(run_app, vars = "lambda") geweke.diag(run_mcmc) ################################################### ### code chunk number 26: app_parallel ################################################### library("parallel") n_cores <- 2 cl <- makeCluster(n_cores) runs <- par_bvar(cl = cl, data = y, lags = 5, n_draw = 15000, n_burn = 5000, n_thin = 1, priors = priors_app, mh = bv_mh(scale_hess = 0.5, adjust_acc = TRUE)) stopCluster(cl) runs_mcmc <- as.mcmc(runs, vars = "lambda") gelman.diag(runs_mcmc, autoburnin = FALSE) ################################################### ### code chunk number 27: app_chains ################################################### plot(runs, type = "full", vars = "lambda") ################################################### ### code chunk number 28: app_signs ################################################### sr <- matrix(c(1, 1, 1, -1, 1, NA, -1, -1, 1), ncol = 3) opt_signs <- bv_irf(horizon = 16, fevd = TRUE, identification = TRUE, sign_restr = sr) print(opt_signs) ################################################### ### code chunk number 29: app_irf ################################################### irf(run_app) <- irf(run_app, opt_signs) ################################################### ### code chunk number 30: app_irf_signs ################################################### plot(irf(run_app), vars_impulse = c(1, 3)) ################################################### ### code chunk number 31: app_predict ################################################### path <- c(2.25, 3, 4, 5.5, 6.75, 4.25, 2.75, 2, 2, 2) predict(run_app) <- predict(run_app, horizon = 16, cond_path = path, cond_var = "FEDFUNDS") ################################################### ### code chunk number 32: app_predict_conditional ################################################### plot(predict(run_app), t_back = 16)
/scratch/gouwar.j/cran-all/cranData/BVAR/inst/doc/article.R
# Internal tests ----- # 11_input.R --- expect_error(BVAR:::num_check(x = "1")) expect_identical(BVAR:::num_check(0L), 0) expect_error(BVAR:::int_check(c = 0.99, min = 1L)) expect_error(BVAR:::int_check(0L, min = 1L, max = Inf)) expect_identical(BVAR:::int_check(1.01), 1L) expect_identical(BVAR:::int_check(1.99), 1L) # Fails, fits ARIMA, fits AR expect_error(BVAR:::auto_psi(cbind(rep(1, 100), rnorm(100)))) expect_message(BVAR:::auto_psi(cbind(c(2, 1, 1, 3), c(1, 2, 0, 2)), lags = 1)) expect_silent(BVAR:::auto_psi(cbind(c(2, 1, 1, 2), c(1, 2, 0, 2)), lags = 1)) # 12_aux.R --- expect_equal( BVAR:::lag_var(matrix(1:4), lags = 2), matrix(c(0, 0, 2, 3, 0, 0, 1, 2), ncol = 2)) expect_equal( BVAR:::gamma_coef(2, 2), list("k" = 2.618034, "theta" = 1.236068), tol = 1e-6) expect_equal( BVAR:::name_pars(c("a", "b", "psi", "c"), M = 2), c("a", "b", "psi1", "psi2", "c")) # Quite some cornercases here expect_error(BVAR:::fill_ci("gray", y = "teal", P = 2)) expect_equal( BVAR:::fill_ci("gray", y = "teal", P = 3), c("teal", "gray", "teal")) expect_equal( BVAR:::fill_ci_na(1:3, P = 2), cbind("x" = 1:3, NA)) expect_equal( BVAR:::fill_ci_na(1, P = 3), c(NA, 1, NA)) expect_equal( BVAR:::fill_ci_na(1:3, P = 5), cbind(NA, NA, "x" = 1:3, NA, NA)) expect_equal( BVAR:::fill_ci_col(x = integer(), y = "#008080", P = 5), c("#00808080", "#008080FF", "#008080FF", "#00808080")) # Handpicked and automatic expect_true(all(grepl("^[0-9a-fA-F]{2}$", BVAR:::alpha_hex(7)))) expect_true(all(grepl("^[0-9a-fA-F]{2}$", BVAR:::alpha_hex(120)))) expect_true(BVAR:::is_hex("#008080")) expect_true(BVAR:::is_hex("#008080FF", alpha = TRUE)) expect_false(BVAR:::is_hex("008080")) expect_false(BVAR:::is_hex("#0000")) expect_false(BVAR:::is_hex("#008080FF", alpha = FALSE)) # Check numeric, character and missing expect_error(BVAR:::pos_vars(vars = "a")) expect_error(BVAR:::pos_vars(vars = "a", variables = c("x", "y"))) expect_equal(BVAR:::pos_vars(vars = NULL, M = 3), 1:3) expect_equal(BVAR:::pos_vars(vars = c(2, 1), M = 3), c(2, 1)) expect_equal( BVAR:::pos_vars(c("a", "c"), variables = c("a", "b", "c")), c(1, 3)) expect_error(BVAR:::name_deps(c("a", "b"), M = 3)) expect_equal(BVAR:::name_deps(NULL, M = 3), c("var1", "var2", "var3")) expect_equal(BVAR:::name_deps(c("a", "b"), M = 2), c("a", "b")) expect_equal( BVAR:::name_expl(c("a", "b"), M = 2, lags = 2), c("constant", "a-lag1", "b-lag1", "a-lag2", "b-lag2")) expect_equal( BVAR:::name_expl(NULL, M = 2, lags = 1), c("constant", "var1-lag1", "var2-lag1")) expect_equal(BVAR:::p_log_ig(5, 0.004, 0.004), -7.157927, tol = 1e-6) expect_equal( BVAR:::get_beta_comp(matrix(1:12, nrow = 4), K = 4, M = 3, lags = 1), matrix(c(2:4, 6:8, 10:12), nrow = 3, byrow = TRUE)) expect_equal( BVAR:::get_beta_comp(matrix(1:21, nrow = 7), K = 7, M = 3, lags = 2), matrix(c(2:7, 9:14, 16:21, 1, rep(0, 6), 1, rep(0, 6), 1, rep(0, 3)), nrow = 6, byrow = TRUE)) expect_error(BVAR:::has_package("BSE")) expect_equal(BVAR:::has_package("BVAR"), NULL) expect_error(BVAR:::quantile_check(1)) expect_error(BVAR:::quantile_check(0)) expect_equal(BVAR:::quantile_check(0.5), 0.5) expect_equal(BVAR:::quantile_check(0.1), c(0.1, 0.5, 0.9)) expect_equal( BVAR:::quantile_check(c(0.6, 0.4, 0.3)), c(0.3, 0.4, 0.5, 0.6, 0.7)) # 13_mvtnorm.R --- sigma <- matrix(c(1, 0.2, 0.1, 0, 1, 0.2, 0, 0, 1), nrow = 3) sigma <- crossprod(sigma) expect_silent(BVAR:::rmvn_proposal(1, mean = 0.5, sigma = list("values" = 1))) expect_silent(BVAR:::rmvn_proposal(1, mean = 2, sigma = eigen(sigma))) expect_silent(BVAR:::rmvn_inv(1, sigma_inv = solve(sigma), method = "eigen")) expect_silent(BVAR:::rmvn_inv(1, sigma_inv = solve(sigma), method = "chol")) expect_error(BVAR:::rmvn_inv(1, sigma_inv = solve(sigma), method = "svd")) # 43_sur_soc.R --- expect_silent(soc <- BVAR:::.add_soc( matrix(c(1, 2, 1, 2, 3, 3), ncol = 2), 2, 0.5)) expect_silent(sur <- BVAR:::.add_sur( matrix(c(1, 2, 1, 2, 3, 3), ncol = 2), 2, 0.5)) # zzz.R --- expect_silent(BVAR:::.onLoad())
/scratch/gouwar.j/cran-all/cranData/BVAR/inst/tinytest/internal.R
# Manual tests ----- # 13_mvtnorm.R --- sigma <- matrix(c(1, 0.2, 0.1, 0, 1, 0.2, 0, 0, 1), nrow = 3) sigma <- crossprod(sigma) uvt_1 <- BVAR:::rmvn_proposal(1000, mean = 2, sigma = list("values" = 5)) uvt_2 <- rnorm(1000, mean = 2, sd = 5) # Kolmogorov-Smirnov Test for univariate distributions expect_true(ks.test(uvt_1, uvt_2)[["p.value"]] >= 0.1) mvt_1 <- BVAR:::rmvn_proposal(1000, mean = 2, sigma = eigen(sigma)) mvt_2 <- mvtnorm::rmvnorm(1000, mean = rep(2, 3L), sigma = sigma) mvt_3 <- BVAR:::rmvn_inv(1000, sigma_inv = solve(sigma), method = "eigen") mvt_4 <- BVAR:::rmvn_inv(1000, sigma_inv = solve(sigma), method = "chol") mvt_5 <- mvtnorm::rmvnorm(1000, mean = rep(0, 3L), sigma = sigma) # Cramer test for multivariate distributions if(requireNamespace("cramer", quietly = TRUE)) { expect_true(cramer::cramer.test(mvt_1, mvt_2)[["p.value"]] >= 0.1) expect_true(cramer::cramer.test(mvt_3, mvt_4)[["p.value"]] >= 0.1) expect_true(cramer::cramer.test(mvt_3, mvt_5)[["p.value"]] >= 0.1) }
/scratch/gouwar.j/cran-all/cranData/BVAR/inst/tinytest/manual.R
# API tests ------- data <- data2 <- data3 <- matrix(rnorm(1000), nrow = 200) # Fail and prepare ----- # 10_bvar --- # n_ shenaningans expect_error(bvar(data, lags = 1, n_draw = 1000, n_burn = 1000)) expect_error(bvar(data, lags = 1, n_draw = 10, n_burn = 1)) # Erroneous data and lags expect_error(bvar(data = data[1:5, ], lags = 5)) expect_error(bvar(data = data, lags = 0)) data2[1:3, ] <- NA_real_ expect_error(bvar(data2, lags = 2)) # Faulty arguments expect_error(bvar(data, lags = 2, priors = NULL)) expect_error(bvar(data, lags = 2, mh = NULL)) expect_error(bvar(data, lags = 2, fcast = TRUE)) expect_error(bvar(data, lags = 2, irf = TRUE)) # 3*_metropolis --- # Proper use expect_silent(bv_metropolis(scale_hess = c(0.1, 0.05), adjust_acc = TRUE, adjust_burn = 0.5, acc_lower = 0.1, acc_upper = 0.9, acc_change = 0.1)) expect_silent(mh <- bv_mh(scale_hess = 0.1, adjust_acc = TRUE, adjust_burn = 0.5, acc_lower = 0.1, acc_upper = 0.9, acc_change = 0.1)) expect_silent(print(mh)) # Faulty arguments expect_error(bv_mh(scale_hess = -1)) expect_error(bv_mh(adjust_acc = TRUE, adjust_burn = 0)) expect_error(bv_mh(adjust_acc = TRUE, acc_lower = 0.5, acc_upper = 0.4)) expect_error(bv_mh(adjust_acc = TRUE, acc_change = -1)) # 4*_priors --- # Proper use expect_silent(bv_minnesota(lambda = bv_lambda(0.25, sd = 0.4), alpha = bv_alpha(mode = 1.5, min = 0.5, max = 5), var = 1e06)) expect_silent(mn <- bv_mn(lambda = bv_lambda(0.2, sd = 0.4, max = 4.5), alpha = bv_alpha(mode = 1.5, min = 0.5, max = 5), var = 1e08)) expect_silent(dummy <- bv_dummy(fun = function(Y, lags, par) { return(list(Y = Y[1, ] * par, X = c(1, rep(Y[1, ] * par, lags))))})) expect_silent(bv_soc(mode = 1.5, sd = 2, min = 1e-7, max = 100)) expect_silent(bv_sur(mode = 2, sd = 1, min = 0.01, max = 50)) expect_silent(priors <- bv_priors(hyper = "auto", mn = mn, sur = bv_sur(), soc = bv_soc(), custom = dummy)) expect_silent(print(priors)) expect_equal( bv_priors(hyper = c("lambda", "alpha", "psi")), bv_priors(hyper = c("full"))) expect_silent(print(bv_mn(lambda = c(0.2, 0.4, 1e-6, 5), alpha = c(1.5, 0.5, 0.1, 5), var = 100))) expect_silent(print(bv_mn(psi = bv_psi(scale = 0.2, shape = 0.2, mode = c(1, 1.5, 1.2, 0.4), min = rep(0.001, 4))))) expect_silent(print(bv_mn(psi = bv_psi(scale = 0.2, shape = 0.2, mode = c(1, 1.5, 1.2, 0.4), max = rep(1000, 4))))) # Faulty sd, dummy prior and hyperparameters expect_error(bv_priors(mn = bv_lambda(sd = 0))) expect_error(bv_priors(mn = bv_mn(), dummy = list("mode" = 1, "sd" = 1))) expect_error(bv_priors(hyper = c("lambda", "alpha", "soc"), sur = bv_sur())) # Wrong format for alpha, faulty sd and var expect_error(bv_mn(alpha = c(2, 1))) expect_error(bv_mn(lambda = bv_lambda(mode = 0.4, sd = 0))) expect_error(bv_mn(var = -1)) # Boundaries w/o mode, faulty mode, wrong length, wrong boundaries expect_error(bv_mn(bv_psi(min = c(0, 0), max = c(1, 1)))) expect_error(bv_mn(bv_psi(mode = c(1, 2, 0)))) expect_error(bv_mn(bv_psi(mode = c(1, 2, 1), min = c(0.1, 0.1)))) expect_error(bv_mn(bv_psi(mode = c(1, 2), min = c(0.1, 0.5), max = c(1, 0.1)))) # Faulty sd, wrong boundaries expect_error(bv_dummy(mode = 2, sd = 0)) expect_error(bv_dummy(min = 2, max = 1)) # 5*_fcast --- # Proper use expect_silent(opt_fcast1 <- bv_fcast()) expect_silent(print(opt_fcast1)) expect_silent(opt_fcast2 <- bv_fcast(cond_path = c(2, 2, 2, 2), cond_vars = 1)) expect_silent(bv_fcast(cond_path = c(2, 2, 2, 2), cond_vars = "FEDFUNDS")) expect_silent(bv_fcast(cond_path = matrix(rep(2, 6), nrow = 3))) expect_silent(bv_fcast(horizon = 2020, cond_path = matrix(c(2, 2, NA, 1.5, NA, NA, 1, 1.2, 1.5), nrow = 3))) # Short horizon and duplicated cond_vars expect_message(bv_fcast(horizon = 4, cond_path = rep(2, 6), cond_vars = 1)) expect_error(bv_fcast(cond_path = matrix(rnorm(9), nrow = 3), cond_vars = c(1, 1))) expect_error(bv_fcast(cond_path = matrix(rnorm(9), nrow = 3), cond_vars = c("FEDFUNDS", "FEDFUNDS", "GDP"))) # 6*_irf --- # Proper use expect_silent(opt_irf1 <- bv_irf(fevd = TRUE)) expect_silent(print(opt_irf1)) expect_silent(bv_irf(horizon = 2020, identification = FALSE)) expect_silent(opt_irf2 <- bv_irf(fevd = FALSE, # Sign restricted sign_restr = matrix(c(1, NA, NA, NA, 1, -1, -1, 1, NA), nrow = 3))) expect_silent(print(opt_irf2)) expect_silent(opt_irf3 <- bv_irf(fevd = FALSE, # Zero sign restricted sign_restr = matrix(c(NA, 0, NA, NA, 1, -1, NA, 1, NA), nrow = 3))) expect_silent(bv_irf(sign_restr = c(1, NA, -1, 1), sign_lim = 1000)) expect_silent(bv_irf(sign_restr = c(0, NA, NA, 1), sign_lim = 1000)) # Underidentified, too many 0, non-square restrictions expect_message(bv_irf(sign_restr = matrix(c(NA, NA, NA, NA), nrow = 2))) expect_error(bv_irf(sign_restr = matrix(c(0, 0, -1, NA), nrow = 2))) expect_error(bf_irf(sign_restr = matrix(rnorm(6), nrow = 3))) # Run and analyse ----- # 10_bvar --- # Base run expect_silent(run <- bvar(data, lags = 2, priors = priors, mh = mh)) # Conditional and sign-restricted expect_silent(run2 <- bvar(data[, 1:3], lags = 2, fcast = opt_fcast2, irf = opt_irf2, n_draw = 1000L, n_burn = 500L)) # Psi as hyperprior expect_silent(run3 <- bvar(data, lags = 2, priors = bv_priors(hyper = c("lambda", "psi")), mh = mh, n_draw = 1000L, n_burn = 500L)) # 5*_fcast --- # Ex-post predicts and methods expect_silent(predict(run) <- predict(run, opt_fcast1)) expect_silent(fcasts1 <- predict(run)) expect_silent(fcasts2 <- predict(run2)) expect_silent(print(fcasts1)) expect_silent(print(summary(fcasts1))) expect_silent(print(summary(fcasts2))) expect_silent(plot(fcasts1, vars = 1)) # 6*_irf --- # Ex-post irfs and methods expect_silent(irf(run) <- irf(run, opt_irf1)) expect_silent(irfs1 <- irf(run, verbose = TRUE)) expect_silent(irfs2 <- irf(run2, opt_irf2)) expect_silent(irfs3 <- irf(run2, opt_irf3)) expect_silent(print(irfs1)) expect_silent(print(summary(irfs1))) expect_silent(print(fevd(run) <- fevd(run))) # Access expect_silent(print(fevd(run2))) # Recalculates expect_silent(print(fevd(irfs2))) # Recalculates expect_silent(plot(irfs1, vars_res = 1, vars_imp = 1)) # 80_coda --- # Get 'mcmc' object expect_silent(coda::as.mcmc(run)) # 81_parallel --- library("parallel") cl <- makeCluster(2L) expect_silent( tryCatch(run_par <- par_bvar(cl, n_runs = 2, data = data, lags = 2, priors = priors, mh = mh), finally = stopCluster(cl))) expect_silent(plot(run, type = "full", vars = "lambda", chains = run_par)) expect_silent(coda::as.mcmc(run_par, vars = "lambda")) expect_silent(BVAR:::chains_fit(run, run_par, Ms = TRUE, n_saves = TRUE, hypers = TRUE)) # 85_transform --- x <- fred_md[c("RPI")] expect_silent(fred_transform()) expect_silent(fred_transform(fred_qd[c("GDPC1", "GDPCTPI", "FEDFUNDS")])) expect_silent(fred_transform(x, type = "fred_md")) expect_silent(fred_transform(x, codes = 7, na.rm = FALSE)) expect_silent(fred_transform(x, type = "fred_md", lag = 2, scale = 50)) expect_silent(fred_code()) expect_silent(fred_code(c("GDPC1", "GDPCTPI", "FEDFUNDS"))) expect_silent(fred_code(c("RPI"), type = "fred_md")) expect_silent(fred_code(c("GDPC1", "RPI"), table = TRUE)) expect_equivalent(fred_transform(x, code = 1), x) expect_equivalent(fred_transform(x, code = 5, lag = 12, scale = 101), data.frame(diff(log(x[, 1]), lag = 12) * 101)) expect_message(fred_code("A")) expect_message(fred_code("Temperature")) # 9*_methods --- expect_silent(print(run)) expect_silent(plot(run)) expect_silent(plot(run, type = "trace", vars = c("lambda"))) expect_silent(plot(run, type = "dens", vars_res = 1, vars_imp = 2)) expect_silent(plot(run, type = "fcast", vars = 1)) expect_silent(plot(run, type = "irf", vars_impulse = 1, vars_response = 1)) expect_silent(plot(predict(run, conf_bands = 0.5), vars = 1)) expect_silent(plot(predict(run, conf_bands = 0.25), vars = 3, area = TRUE)) expect_silent(plot(irf(run, conf_bands = 0.5), vars_impulse = 1, vars_response = 1)) expect_silent(plot(irf(run, conf_bands = 0.75), area = TRUE, vars_impulse = 3, vars_response = 3)) expect_silent(print(coef(run))) expect_silent(print(coef(run, type = "mean"))) expect_silent(print(coef(run, conf_bands = 0.1))) expect_silent(print(vcov(run))) expect_silent(print(vcov(run, type = "mean"))) expect_silent(print(density(run, vars = 2))) expect_silent(plot(density(run, vars = 2))) expect_silent(independent_index(2, 4, 2)) expect_silent(print(fitted(run))) expect_silent(print(residuals(run, conf_bands = 0.1))) expect_silent(plot(residuals(run), vars = 1)) expect_silent(print(summary(run))) expect_silent(print(companion(run))) expect_silent(print(companion(run, type = "mean"))) expect_silent(print(companion(run, conf_bands = 0.1))) expect_silent(print(hist_decomp(run, type = "mean"))) expect_silent(print(hist_decomp(run, type = "quantile"))) expect_silent(print(rmse(run))) expect_silent(print(rmse(run, holdout = data2[1:10, ]))) expect_silent(print(lps(run))) expect_silent(print(lps(run, holdout = data2[1:10, ]))) expect_silent(print(WAIC(run)))
/scratch/gouwar.j/cran-all/cranData/BVAR/inst/tinytest/test_BVAR.R
# # Internal BVAR functions that we use here. Basically the contents of # BVAR's 11..., 12... and 15... scripts. At some point we should include # these directly from the main repository. # #' Check numeric scalar #' #' Check whether an object is bounded and coercible to a numeric value. #' #' @param x Numeric scalar. #' @param min Numeric scalar. Minimum value of \emph{x}. #' @param max Numeric scalar. Maximum value of \emph{x}. #' @param fun Function to apply to \emph{x} before returning. #' @param msg String fed to \code{\link[base]{stop}} if an error occurs. #' #' @return Returns \code{fun(x)}. #' #' @noRd num_check <- function( x, min = 0, max = Inf, msg = "Please check the numeric parameters.", fun = as.numeric) { if(!is.numeric(x) || length(x) != 1 || x < min || x > max) {stop(msg)} return(fun(x)) } #' @noRd int_check <- function( x, min = 0L, max = Inf, msg = "Please check the integer parameters.") { num_check(x, min, max, msg, fun = as.integer) } #' Lag a matrix #' #' Compute a lagged version of a matrix to be used in vector autoregressions. #' Higher lags are further to the right. #' #' @param x Matrix (\eqn{N * M}) to lag. #' @param lags Integer scalar. Number of lags to apply. #' #' @return Returns an \eqn{N * (M * lags)} matrix with consecutive lags on the #' right. The elements of the first \emph{lags} rows are 0. #' #' @noRd lag_var <- function(x, lags) { x_rows <- nrow(x) x_cols <- ncol(x) x_lagged <- matrix(0, x_rows, lags * x_cols) for(i in 1:lags) { x_lagged[(lags + 1):x_rows, (x_cols * (i - 1) + 1):(x_cols * i)] <- x[(lags + 1 - i):(x_rows - i), (1:x_cols)] } return(x_lagged) } #' Compute gamma coefficients #' #' Compute the shape \emph{k} and scale \emph{theta} of a Gamma #' distribution via the mode and standard deviation. #' #' @param mode Numeric scalar. #' @param sd Numeric scalar. #' #' @return Returns a list with shape \emph{k} and scale parameter \emph{theta}. #' #' @noRd gamma_coef <- function(mode, sd) { mode_sq <- mode ^ 2 sd_sq <- sd ^ 2 k <- (2 + mode_sq / sd_sq + sqrt((4 + mode_sq / sd_sq) * mode_sq / sd_sq)) / 2 theta <- sqrt(sd_sq / k) return(list("k" = k, "theta" = theta)) } #' Name hyperparameters #' #' Function to help name hyperparameters. Accounts for multiple occurences #' of \emph{psi} by adding sequential numbers. #' #' @param x Character vector. Parameter names. #' @param M Integer scalar. Number of columns in the data. #' #' @return Returns a character vector of adjusted parameter names. #' #' @noRd name_pars <- function(x, M) { out <- Reduce(c, sapply(x, function(y) { if(y == "psi") {paste0(y, 1:M)} else {y}})) return(out) } #' Fill credible intervals #' #' Helper function to fill data, colours or similar things based on credible #' intervals. These are used in \code{\link{plot.bvar_irf}} and #' \code{\link{plot.bvar_fcast}}. #' #' Note that transparency may get appended to recycled HEX colours. Also note #' that no, i.e. a length 0 central element is required when drawing polygons. #' #' @param x Scalar or vector. The central element. #' @param y Scalar or vector. Value(s) to surround the central element with. #' The first value is closest, values may get recycled. #' @param P Odd integer scalar. Number of total bands. #' #' @return Returns a vector or matrix (if \emph{x} is a vector) of \emph{x}, #' surrounded by \emph{y}. #' #' @noRd fill_ci <- function(x, y, P) { n_y <- if(P %% 2 == 0) { stop("No central position for x found.") } else {P %/% 2} fill <- rep(y, length.out = n_y) if(length(x) > 1) { # Matrix n_row <- length(x) return(cbind(t(rev(fill))[rep(1, n_row), ], x, t(fill)[rep(1, n_row), ])) } else { # Vector return(c(rev(fill), x, fill)) } } #' @noRd fill_ci_na <- function(x, P) { # Corner case when quantiles are missing (t_back or conditional forecasts) if(P == 2) {return(if(length(x > 1)) {cbind(x, NA)} else {c(x, NA)})} fill_ci(x = x, y = NA, P = P) } #' @noRd fill_ci_col <- function(x, y, P) { # Apply transparency to HEX colours if(length(y) == 1 && is_hex(y, alpha = FALSE)) { y <- paste0(y, alpha_hex(P)) } fill_ci(x = x, y = y, P = P) } #' Get a transparency HEX code #' #' @param P Integer scalar. Number of total bands. #' #' @return Returns a character vector of transparency codes. #' #' @importFrom grDevices rgb #' #' @noRd alpha_hex <- function(P) { n_trans <- P %/% 2 out <- switch(n_trans, # Handpicked with love "FF", c("FF", "80"), c("FF", "A8", "54"), c("FF", "BF", "80", "40"), c("FF", "CC", "99", "66", "33")) if(is.null(out)) { # Let rgb() sort it out otherwise out <- substr(rgb(1, 1, 1, seq(1, 0, length.out = n_trans)), 8, 10) } return(out) } #' Check valid HEX colour #' #' @param x Character scalar or vector. String(s) to check. #' @param alpha Logical scalar. Whether the string may contain alpha values. #' #' @return Returns a logical scalar or vector. #' #' @noRd is_hex <- function(x, alpha = FALSE) { if(alpha) return(grepl("^#[0-9a-fA-F]{6,8}$", x)) return(grepl("^#[0-9a-fA-F]{6,6}$", x)) } #' Get variable positions #' #' Helper functions to aid with variable selection, e.g. in #' \code{\link{plot.bvar_irf}} and \code{\link{plot.bvar_fcast}}. #' #' @param vars Numeric or character vector of variables to subset to. #' @param variables Character vector of all variable names. Required if #' \emph{vars} is provided as character vector. #' @param M Integer scalar. Count of all variables. #' #' @return Returns a numeric vector with the positions of desired variables. #' #' @noRd pos_vars <- function(vars, variables, M) { if(is.null(vars) || length(vars) == 0L) { return(1:M) # Full set } if(is.numeric(vars)) { return(vapply(vars, int_check, # By position min = 1, max = M, msg = "Variable(s) not found.", integer(1))) } if(is.character(vars) && !is.null(variables)) { out <- do.call(c, lapply(vars, grep, variables)) # By name if(length(out) > 0) {return(out)} } stop("Variable(s) not found.") } #' Name dependent / explanatory variables #' #' @param variables Character vector of all variable names. #' @param M Integer scalar. Count of all variables. #' @param lags Integer scalar. Number of lags applied. #' #' @return Returns a character vector of variable names. #' #' @noRd name_deps <- function(variables, M) { if(is.null(variables)) { variables <- paste0("var", seq(M)) } else if(length(variables) != M) { stop("Vector with variables is incomplete.") } return(variables) } #' @noRd name_expl <- function(variables, M, lags) { if(is.null(variables)) { variables <- name_deps(variables, M) } explanatories <- c("constant", paste0(rep(variables, lags), "-lag", rep(seq(lags), each = length(variables)))) return(explanatories) } #' Compute log distribution function of Inverse Gamma #' #' @param x Numeric scalar. Draw of the IG-distributed variable. #' @param shape Numeric scalar. #' @param scale Numeric scalar. #' #' @return Returns the log Inverse Gamma distribution function. #' #' @noRd p_log_ig <- function(x, shape, scale) { return(shape * log(scale) - (shape + 1) * log(x) - scale / x - lgamma(shape)) } #' Compute companion matrix #' #' Compute the companion form of the VAR coefficients. #' #' @param beta Numeric (\eqn{K * M}) matrix with VAR coefficients. #' @param K Integer scalar. Number of columns in the independent data. #' @param M Integer scalar. Number of columns in the dependent data. #' @param lags Integer scalar. Number of lags applied. #' #' @return Returns a numeric (\eqn{K - 1 * K -1}) matrix with \emph{beta} in #' companion form. #' #' @noRd get_beta_comp <- function(beta, K, M, lags) { beta_comp <- matrix(0, K - 1, K - 1) beta_comp[1:M, ] <- t(beta[2:K, ]) # Kick constant if(lags > 1) { # Add block-diagonal matrix beneath VAR coefficients beta_comp[(M + 1):(K - 1), 1:(K - 1 - M)] <- diag(M * (lags - 1)) } return(beta_comp) } #' Check whether a package is installed #' #' @param package Character scalar. #' #' @noRd has_package <- function(package) { if(!requireNamespace(package, quietly = TRUE)) { stop("Package \'", package, "\' required for this method.", call. = FALSE) } return(NULL) } #' Generate quantiles #' #' Check a vector of confidence bands and create quantiles from it. #' #' @param conf_bands Numeric vector of probabilities (\eqn{(0, 1)}). #' #' @return Returns a sorted, symmetric vector of quantiles. #' #' @noRd quantile_check <- function(conf_bands) { conf_bands <- sapply(conf_bands, num_check, min = 0 + 1e-16, max = 1 - 1e-16, msg = "Confidence bands misspecified.") # Allow only returning the median if(length(conf_bands) == 1 && conf_bands == 0.5) {return(conf_bands)} # Sort and make sure we have no duplicates (thank mr float) quants <- sort(c(conf_bands, 0.5, (1 - conf_bands))) quants <- quants[!duplicated(round(quants, digits = 12L))] return(quants) } #' Prepare BVAR data for methods #' #' Helper function to retrieve hyperparameters or coefficient values based on #' name / position. Also supports multiple \code{bvar} objects and may be used #' to check them for similarity. #' #' @param x A \code{bvar} object, obtained from \code{\link{bvar}}. #' @param vars Character vector used to select variables. Elements are matched #' to hyperparameters or coefficients. Coefficients may be matched based on #' the dependent variable (by providing the name or position) or the #' explanatory variables (by providing the name and the desired lag). See the #' example section for a demonstration. Defaults to \code{NULL}, i.e. all #' hyperparameters. #' @param vars_response,vars_impulse Optional character or integer vectors used #' to select coefficents. Dependent variables are specified with #' \emph{vars_response}, explanatory ones with \emph{vars_impulse}. See the #' example section for a demonstration. #' @param chains List with additional \code{bvar} objects. Contents are then #' added to trace and density plots. #' @param check_chains Logical scalar. Whether to check \emph{x} and #' \emph{chains} for similarity. #' @param ... Fed to \code{\link{chains_fit}}. #' #' @return Returns a named list with: #' \itemize{ #' \item \code{data} - Numeric matrix with desired data. #' \item \code{vars} - Character vector with names for the desired data. #' \item \code{chains} - List of numeric matrices with desired data. #' \item \code{bounds} - Numeric matrix with optional boundaries. #' } #' #' @noRd prep_data <- function( x, vars = NULL, vars_response = NULL, vars_impulse = NULL, chains = list(), check_chains = FALSE, ...) { if(!inherits(x, "bvar")) {stop("Please provide a `bvar` object.")} if(inherits(chains, "bvar")) {chains <- list(chains)} lapply(chains, function(x) {if(!inherits(x, "bvar")) { stop("Please provide `bvar` objects to the chains parameter.") }}) if(check_chains) {chains_fit(x, chains, ...)} # Prepare selection --- vars_hyp <- c("ml", colnames(x[["hyper"]])) vars_dep <- x[["variables"]] vars_ind <- x[["explanatories"]] if(is.null(vars_ind)) { # Compatibility to older versions (<= 0.2.2) vars_ind <- name_expl(vars_dep, M = x[["meta"]][["M"]], lags = x[["meta"]][["lags"]]) } if(is.null(vars) && is.null(vars_impulse) && is.null(vars_response)) { vars <- vars_hyp } choice_hyp <- vars_hyp[unique(do.call(c, lapply(vars, grep, vars_hyp)))] choice_dep <- if(is.null(vars_response)) { # Interpret numbers as positions, exclude independents vars_dep[unique(c(as.integer(vars[grep("^[0-9]+$", vars)]), do.call(c, lapply(vars[!grepl("(^const|lag[0-9]+$)", vars)], grep, vars_dep))))] } else {pos_vars(vars_response, vars_dep, M = x[["meta"]][["M"]])} choice_dep <- choice_dep[!is.na(choice_dep)] choice_ind <- if(is.null(vars_impulse)) { # Limit to ones with "-lag#" or "constant" to separate from dependents vars_ind[unique(do.call(c, lapply(vars[grep("(^const|lag[0-9]+$)", vars)], grep, vars_ind)))] } else {pos_vars(vars_impulse, vars_ind, M = x[["meta"]][["K"]])} if(all(c(length(choice_hyp), length(choice_dep), length(choice_ind)) == 0)) { stop("No matching data found.") } # Build up required outputs --- out <- out_vars <- out_bounds <- out_chains <- list() N <- x[["meta"]][["n_save"]] if(length(choice_hyp) > 0) { # Hyperparameters out[["hyper"]] <- cbind("ml" = x[["ml"]], x[["hyper"]])[seq(N), choice_hyp] out_vars[["hyper"]] <- choice_hyp out_bounds[["hyper"]] <- vapply(choice_hyp, function(z) { if(z == "ml") {c(NA, NA)} else { c(x[["priors"]][[z]][["min"]], x[["priors"]][[z]][["max"]]) }}, double(2)) out_chains[["hyper"]] <- lapply(chains, function(z) { cbind("ml" = z[["ml"]], z[["hyper"]])[seq(N), choice_hyp] }) } else { out_chains[["hyper"]] <- rep(list(NULL), length(chains)) } if(length(choice_dep) > 0 || length(choice_ind) > 0) { # Betas pos_dep <- pos_vars(choice_dep, variables = vars_dep, M = x[["meta"]][["M"]]) pos_ind <- pos_vars(choice_ind, variables = vars_ind, M = x[["meta"]][["K"]]) K <- length(pos_dep) * length(pos_ind) out[["betas"]] <- grab_betas(x, N, K, pos_dep, pos_ind) out_vars[["betas"]] <- paste0( rep(vars_dep[pos_dep], length(pos_ind)), "_", rep(vars_ind[pos_ind], each = length(pos_dep))) out_bounds[["betas"]] <- matrix(NA, ncol = K, nrow = 2) out_chains[["betas"]] <- lapply(chains, grab_betas, N, K, pos_dep, pos_ind) } else { out_chains[["betas"]] <- rep(list(NULL), length(chains)) } # Merge stuff and return --- out_data <- cbind(out[["hyper"]], out[["betas"]]) out_vars <- c(out_vars[["hyper"]], out_vars[["betas"]]) out_chains <- mapply(cbind, out_chains[["hyper"]], out_chains[["betas"]], SIMPLIFY = FALSE) out_chains <- lapply(out_chains, `colnames<-`, out_vars) colnames(out_data) <- out_vars out <- list( "data" = out_data, "vars" = out_vars, "chains" = out_chains, "bounds" = cbind(out_bounds[["hyper"]], out_bounds[["betas"]])) return(out) } #' Grab draws of certain betas #' #' Helper function for \code{\link{prep_data}}. #' #' @param x A \code{bvar} object, obtained from \code{\link{bvar}}. #' @param N,K Integer scalars. Number of rows and columns to return. #' @param pos_dep,pos_ind Numeric vectors. Positions of desired variables. #' #' @return Returns a matrix with the requested data. #' #' @noRd grab_betas <- function(x, N, K, pos_dep, pos_ind) { data <- matrix(NA, nrow = N, ncol = K) k <- 1 for(i in pos_ind) {for(j in pos_dep) { data[, k] <- x[["beta"]][seq(N), i, j] # seq() for longer chains k <- k + 1 }} return(data) } #' Check equalities across chains #' #' Function to help check whether \code{bvar} objects are close enough to #' compare. Accessed via \code{\link{prep_data}}. #' #' @param x A \code{bvar} object, obtained from \code{\link{bvar}}. #' @param chains List with additional \code{bvar} objects. #' @param Ms Logical scalar. Whether to check equality of #' \code{x[["meta"]][["M"]]}. #' @param n_saves Logical scalar. Whether to check equality of #' \code{x[["meta"]][["n_save"]]}. #' @param hypers Logical scalar. Whether to check equality of #' \code{x[["priors"]][["hyper"]]}. #' #' @return Returns \code{TRUE} or throws an error. #' #' @noRd chains_fit <- function( x, chains, Ms = TRUE, n_saves = FALSE, hypers = FALSE) { if(is.null(chains) || length(chains) == 0) {return(TRUE)} if(Ms) { Ms <- c(x[["meta"]][["M"]], vapply(chains, function(x) {x[["meta"]][["M"]]}, integer(1))) if(!all(duplicated(Ms)[-1])) {stop("Number of variables does not match.")} } if(n_saves) { n_saves <- c(x[["meta"]][["n_save"]], vapply(chains, function(x) {x[["meta"]][["n_save"]]}, integer(1))) if(!all(duplicated(n_saves)[-1])) { stop("Number of stored iterations does not match.") } } if(hypers) { hypers <- vapply(chains, function(z) { x[["priors"]][["hyper"]] == z[["priors"]][["hyper"]]}, logical(1)) if(!all(hypers)) {stop("Hyperparameters do not match.")} } return(TRUE) }
/scratch/gouwar.j/cran-all/cranData/BVARverse/R/BVAR.R
#' @importFrom generics augment #' @export generics::augment #' Augment BVAR outputs and convert into a tibble #' #' Turn the outputs of a Bayesian VAR (see \code{\link[BVAR]{bvar}}) into a #' an augmented tibble. Methods are available for \code{bvar} objects (will #' yield coefficients and their quantiles), \code{bvar_fcast} objects (with #' predictions, their quantiles and optionally real datapoints), and #' \code{bvar_irf} objects (with impulse responses). #' #' @param x A \code{bvar} or derived object to turn into a tibble. #' @param conf_bands Numeric vector. Credible intervals of coefficients to #' include in the tibble. #' @param t_back Integer scalar. Whether to include actual datapoints in the #' tidied forecast. #' @param ... Not used. #' #' @return Returns a \code{\link[tibble]{tibble}} with relevant information; #' quantiles can be found in the columns. #' #' @import BVAR #' @importFrom rlang .data #' @importFrom tidyr pivot_wider #' @importFrom stats coef #' #' @export #' #' @examples #' \donttest{ #' # Access a subset of the fred_qd dataset #' data <- fred_qd[, c("CPIAUCSL", "UNRATE", "FEDFUNDS")] #' # Transform it to be stationary #' data <- fred_transform(data, codes = c(5, 5, 1), lag = 4) #' #' # Estimate a BVAR using one lag, default settings and very few draws #' x <- bvar(data, lags = 1, n_draw = 1000L, n_burn = 200L, verbose = FALSE) #' #' # Create tibbles from the outputs #' augment(x) #' augment(irf(x)) #' augment(predict(x)) #' } augment.bvar <- function(x, conf_bands = 0.16, ...) { coefs <- coef(x, conf_bands = conf_bands) df <- tidy.bvar_coefs(coefs, conf_bands = conf_bands) out <- pivot_wider(df, names_from = .data$quantile, names_prefix = "q") return(out) } #' @rdname augment.bvar #' @export augment.bvar_fcast <- function(x, t_back = 0L, ...) { df <- tidy.bvar_fcast(x, t_back = t_back) out <- pivot_wider(df, names_from = .data$quantile, names_prefix = "q") return(out) } #' @rdname augment.bvar #' @export augment.bvar_irf <- function(x, ...) { df <- tidy.bvar_irf(x) out <- pivot_wider(df, names_from = .data$quantile, names_prefix = "q") return(out) }
/scratch/gouwar.j/cran-all/cranData/BVARverse/R/augment.R
#' Quick ggplot2 plots for Bayesian VARs #' #' Function to quickly plot outputs from \code{bvar} and derived objects. #' Supported plots include traces and densities, forecasts, and impulse #' response functions. For more flexible plots one may use the outputs of #' \code{\link{tidy.bvar}} and \code{\link{augment.bvar}}. #' #' @inheritParams tidy.bvar #' @param type A string with the type (trace or density) of plot desired. #' @param orientation A string indicating the desired orientation of trace or #' density plots #' @param col Character vector. Colour(s) of the lines delineating credible #' intervals. Single values will be recycled if necessary. Recycled HEX color #' codes are varied in transparency if not provided (e.g. "#737373FF"). Lines #' can be bypassed by setting this to \code{"transparent"}. #' #' @return Returns a \code{ggplot} object with a basic structure. #' #' @import BVAR #' @importFrom rlang .data #' @importFrom ggplot2 ggplot aes labs theme_bw geom_line geom_density #' @importFrom ggplot2 geom_hline facet_wrap scale_x_continuous #' @importFrom ggplot2 scale_color_manual #' #' @export #' #' @examples #' # Access a subset of the fred_qd dataset #' data <- fred_qd[, c("CPIAUCSL", "UNRATE", "FEDFUNDS")] #' # Transform it to be stationary #' data <- fred_transform(data, codes = c(5, 5, 1), lag = 4) #' #' # Estimate a BVAR using one lag, default settings and very few draws #' x <- bvar(data, lags = 1, n_draw = 1000L, n_burn = 200L, verbose = FALSE) #' #' # Plot the outputs - alternatively use ggplot() with fortify() #' bv_ggplot(x) #' bv_ggplot(irf(x)) #' bv_ggplot(predict(x)) bv_ggplot <- function(x, ...) {UseMethod("bv_ggplot", x)} #' @rdname bv_ggplot #' @export bv_ggplot.default <- function(x, ...) { stop("No methods for class ", paste0(class(x), collapse = " / "), " found.") } #' @rdname bv_ggplot #' @export bv_ggplot.bvar_chains <- function(x, ...) { bv_ggplot.bvar(x, ...) } #' @rdname bv_ggplot #' @export bv_ggplot.bvar <- function(x, type = c("trace", "density"), vars = NULL, vars_response = NULL, vars_impulse = NULL, orientation = c("horizontal", "vertical"), chains = list(), ...) { if(!inherits(x, "bvar")) { if(inherits(x[[1]], "bvar")) { # Allow chains to x chains <- x x <- x[[1]] chains[[1]] <- NULL } else {stop("Please provide a `bvar` object.")} } type <- match.arg(type) orientation <- match.arg(orientation) df <- tidy.bvar(x, vars = vars, vars_response = vars_response, vars_impulse = vars_impulse, chains = chains) # Trace or density plot if(type == "trace") { p <- ggplot(df, aes(x = .data$draw)) + labs(x = NULL, y = NULL, color = "MCMC chain") + theme_bw() p <- p + facet_wrap(. ~ .data$variable, scales = "free", nrow = ifelse(orientation == "horizontal", 1, length(unique(df[["variable"]])))) if(length(chains) == 0) { p <- p + geom_line(aes(y = .data$value)) } else { p <- p + geom_line(aes(y = .data$value, color = .data$chain), alpha = 0.5) } } else if(type == "density") { p <- ggplot(df, aes(x = .data$value)) + labs(x = NULL, y = NULL, fill = "MCMC chain", color = "MCMC chain") + theme_bw() + facet_wrap(. ~ .data$variable, scales = "free") if(length(chains) == 0) { p <- p + geom_density(fill = "grey", alpha = 0.5) } else { p <- p + geom_density(aes(color = .data$chain, fill = .data$chain), alpha = 0.5) } } # To-do: Add information about quantiles to the plot return(p) } #' @rdname bv_ggplot #' @export bv_ggplot.bvar_irf <- function(x, vars_response = NULL, vars_impulse = NULL, col = "#737373", ...) { df <- tidy.bvar_irf(x) P <- length(unique(df[["quantile"]])) variables <- x[["variables"]] M <- length(variables) pos_imp <- pos_vars(vars_impulse, variables, M) pos_res <- pos_vars(vars_response, variables, M) # Only keep the ones specified df <- df[intersect(which(df[["impulse"]] %in% variables[pos_imp]), which(df[["response"]] %in% variables[pos_res])), ] df[["impulse"]] <- factor(paste(df[["impulse"]], "shock")) df[["response"]] <- factor(paste(df[["response"]], "response")) p <- ggplot(df, aes(x = .data$time - 1)) + scale_x_continuous(breaks = function(x) { unique(floor(pretty(seq(0, (max(x)) * 1.1)))) }) + labs(x = NULL, y = NULL) + theme_bw() p <- p + facet_wrap(.data$response ~ .data$impulse, scales = "free_y") col <- fill_ci_col(x = "#000000", y = col, P = P) p <- p + geom_line(aes(y = .data$value, col = .data$quantile)) + scale_color_manual(values = col, breaks = as.character(unique(df$quantile))) + geom_hline(yintercept = 0, colour = "darkgray", lty = 2) # To-do: Allow ribbons return(p) } #' @rdname bv_ggplot #' @export bv_ggplot.bvar_fcast <- function(x, vars = NULL, col = "#737373", t_back = 1L, ...) { df <- tidy.bvar_fcast(x, t_back) P <- length(unique(df[["quantile"]])) variables <- x[["variables"]] M <- length(variables) pos <- pos_vars(vars, variables, M) # Only keep the ones specified df <- df[which(df[["variable"]] %in% variables[pos]), ] df[["variable"]] <- factor(paste(df[["variable"]], "prediction")) p <- ggplot(df, aes(x = .data$time)) + scale_x_continuous(breaks = function(x) { unique(floor(pretty(seq((-t_back + 1), (max(x)) * 1.1)))) }) + labs(x = NULL, y = NULL) + theme_bw() p <- p + facet_wrap(. ~ .data$variable, scales = "free_y") col <- fill_ci_col(x = "#000000", y = col, P = P) p <- p + geom_line(aes(y = .data$value, col = .data$quantile), na.rm = TRUE) + scale_color_manual(values = col, breaks = as.character(unique(df$quantile))) + geom_hline(yintercept = 0, colour = "darkgray", lty = 2) # To-do: Allow ribbons return(p) }
/scratch/gouwar.j/cran-all/cranData/BVARverse/R/ggplot.R
#' @importFrom generics tidy #' @export generics::tidy #' Tidy BVAR outputs and convert into a tibble #' #' Turn the outputs of a Bayesian VAR (see \code{\link[BVAR]{bvar}}) into a #' a tidy tibble. Methods are available for \code{bvar} objects (will yield a #' subset of coefficient and/or hyperparameter draws), \code{bvar_coefs} objects #' (with the coefficients and their quantiles) \code{bvar_fcast} objects (with #' predictions, their quantiles and optionally real datapoints), and #' \code{bvar_irf} objects (with impulse responses). #' #' @param x A \code{bvar} or derived object to turn into a dataframe. #' @param vars Character vector used to select variables. Elements are matched #' to hyperparameters or coefficients. Coefficients may be matched based on #' the dependent variable (by providing the name or position) or the #' explanatory variables (by providing the name and the desired lag). See the #' example section for a demonstration. Defaults to \code{NULL}, i.e. all #' hyperparameters. #' @param vars_impulse,vars_response Optional character or integer vectors used #' to select coefficents. Dependent variables are specified with #' \emph{vars_response}, explanatory ones with \emph{vars_impulse}. Defaults to #' \code{NULL}, indicating that no coefficients will be processed. #' draws. #' @param chains List of \code{bvar} objects. Contents of multiple runs are #' added to the output, in order to help in assessing covergence. #' @param t_back Integer scalar. Whether to include actual datapoints in the #' tidied forecast. #' @param ... Not used. #' #' @return Returns a tidy \code{\link[tibble]{tibble}} with relevant #' information for further processing. #' #' @import BVAR #' @importFrom tidyr as_tibble #' @importFrom utils tail #' #' @export #' #' @examples #' \donttest{ #' # Access a subset of the fred_qd dataset #' data <- fred_qd[, c("CPIAUCSL", "UNRATE", "FEDFUNDS")] #' # Transform it to be stationary #' data <- fred_transform(data, codes = c(5, 5, 1), lag = 4) #' #' # Estimate a BVAR using one lag, default settings and very few draws #' x <- bvar(data, lags = 1, n_draw = 1000L, n_burn = 200L, verbose = FALSE) #' #' # Create tidy tibbles from the outputs #' tidy(x) #' tidy(irf(x)) #' tidy(predict(x)) #' } tidy.bvar <- function(x, vars = NULL, vars_response = NULL, vars_impulse = NULL, chains = list(), ...) { if(!inherits(x, "bvar")) { if(inherits(x[[1]], "bvar")) { # Allow chains to x chains <- x x <- x[[1]] chains[[1]] <- NULL } else {stop("Please provide a `bvar` object.")} } x <- prep_data(x, vars = vars, vars_response = vars_response, vars_impulse = vars_impulse, chains = chains, check_chains = FALSE) df <- as.data.frame.table(x[["data"]], stringsAsFactors = FALSE, responseName = "value") names(df)[1:2] <- c("chain", "variable") df[["chain"]] <- 1 dfs <- vector("list", length(x[["chains"]]) + 1) dfs[[1]] <- df for(i in seq_along(x[["chains"]])) { df_chain <- as.data.frame.table(x[["chains"]][[i]], stringsAsFactors = FALSE, responseName = "value") names(df_chain) [1:2] <- c("chain", "variable") df_chain[["chain"]] <- i + 1 dfs[[i + 1]] <- df_chain } out <- do.call(rbind, dfs) out[["draw"]] <- seq.int(nrow(x[["data"]])) out[["chain"]] <- as.factor(out[["chain"]]) out <- out[c(2, 4, 1, 3)] return(as_tibble(out)) } #' @rdname tidy.bvar #' @export tidy.bvar_coefs <- function(x, ...) { out <- as.data.frame.table(x, stringsAsFactors = FALSE, responseName = "value") has_quants <- length(dim(x)) == 3 if(has_quants) { out[["quantile"]] <- as.factor(gsub("([0-9]+)%", "\\1", out[[1]])) out[[1]] <- NULL } else { out[["quantile"]] <- 0.5 } out <- out[, c(2, 1, 3, 4)] names(out)[1:2] <- c("variable", "term") return(as_tibble(out)) } #' @rdname tidy.bvar #' @export tidy.bvar_fcast <- function(x, t_back = 0L, ...) { H <- x[["setup"]][["horizon"]] variables <- x[["variables"]] M <- length(variables) quants <- x[["quants"]] has_quants <- length(dim(quants)) == 3L if(has_quants) { P <- dim(quants)[1] bands <- dimnames(quants)[[1]] } else { P <- 2 # We make quants 3-dimensional so filling with t_back is easier quants <- array(NA, c(2, dim(x[["quants"]]))) quants[1, , ] <- x[["quants"]] bands <- c("50%", "NA") } # Add t_back actual datapoints t_back <- int_check(t_back, 0, Inf, msg = "Issue with t_back.") if(t_back != 0) { data <- tail(x[["data"]], t_back) # Extend the quants array with data, quantiles are set to NA quants <- vapply(seq(M), function(i) { t(rbind(fill_ci_na(data[, i], P), t(quants[, , i]))) }, matrix(0, P, t_back + H), USE.NAMES = FALSE) dimnames(quants)[[1]] <- bands } # Names as identifier dimnames(quants)[[2]] <- seq.int(-t_back + 1, H) dimnames(quants)[[3]] <- variables # Prep dataframe out <- as.data.frame.table(quants, stringsAsFactors = FALSE, responseName = "value") # Mess with it names(out)[1:3] <- c("quantile", "time", "variable") out[["time"]] <- as.integer(out[["time"]]) out <- out[out[["quantile"]] != "NA", ] # Clean dummy dimension from before out <- out[, c(3, 2, 4, 1)] out[["quantile"]] <- as.factor(gsub("([0-9]+)%", "\\1", out[["quantile"]])) out <- out[order(out[["variable"]], out[["time"]]), ] return(as_tibble(out)) } #' @rdname tidy.bvar #' @export tidy.bvar_irf <- function(x, ...) { H <- x[["setup"]][["horizon"]] variables <- x[["variables"]] quants <- x[["quants"]] has_quants <- length(dim(quants)) == 4L if(has_quants) { P <- dim(quants)[1] bands <- dimnames(quants)[[1]] } else { P <- 2 # We make quants 3-dimensional so filling with t_back is easier quants <- array(NA, c(2, dim(x[["quants"]]))) quants[1, , , ] <- x[["quants"]] bands <- c("50%", "NA") } # Names as identifier dimnames(quants)[[2]] <- variables # Response dimnames(quants)[[3]] <- seq.int(1, H) dimnames(quants)[[4]] <- variables # Impulse # Prep dataframe out <- as.data.frame.table(quants, stringsAsFactors = FALSE, responseName = "value") # Mess with it names(out)[1:4] <- c("quantile", "response", "time", "impulse") out[["time"]] <- as.integer(out[["time"]]) out <- out[out[["quantile"]] != "NA", ] # Clean dummy dimension from before out <- out[, c(4, 2, 3, 5, 1)] out[["quantile"]] <- as.factor(gsub("([0-9]+)%", "\\1", out[["quantile"]])) out <- out[order(out[["impulse"]], out[["response"]], out[["time"]]), ] return(as_tibble(out)) }
/scratch/gouwar.j/cran-all/cranData/BVARverse/R/tidy.R
#' Coefficient estimation for a specific set of covariates #' #' @description This function estimates coefficient vector for a given set of #' covariates in a logistic regression and Cox proportional hazard models. It #' uses the inverse moment nonlocal prior (iMOM) for non zero coefficients. #' @param X The design matrix. It is assumed that the preprocessing steps have #' been done on this matrix. It is recommended that to use the output of #' \code{\link{PreProcess}} function of the package. Also note that the #' \code{X} should NOT have a vector of $1$'s as the first column. If the #' coefficients of a selected model by \code{\link{bvs}} is to be estimated, it #' is highly recommended that the design matrix that is one of the outputs of #' the \code{bvs} function and is reported as \code{des_mat} to be used here. #' @param resp For logistic regression models, this variable is the binary #' response vector. For Cox proportional hazard models this is a two column #' matrix where the first column contains the survival time vector and the #' second column is the censoring status for each observation. #' @param mod_cols A vector of column indices of the design matrix, #' representing the selected model. #' @param nlptype Determines the type of nonlocal prior that is used in the #' analyses. It can be "piMOM" for product inverse moment prior, or "pMOM" for #' product moment prior. The default is set to piMOM prior. #' @param tau Hyperparameter \code{tau} of the iMOM prior. #' @param r Hyperparameter \code{r} of the iMOM prior. #' @param family Determines the type of data analysis. \code{logistic} is for #' binary outcome and logistic regression model whereas, #' \code{survival} represents survival outcomes and the Cox proportional #' hazard model. #' @return It returns the vector of coefficients for the given model. #' @author Amir Nikooienejad #' @references Nikooienejad, A., Wang, W., and Johnson, V. E. (2016). Bayesian #' variable selection for binary outcomes in high dimensional genomic studies #' using non-local priors. Bioinformatics, 32(9), 1338-1345.\cr\cr #' Nikooienejad, A., Wang, W., & Johnson, V. E. (2020). Bayesian variable #' selection for survival data using inverse moment priors. Annals of Applied #' Statistics, 14(2), 809-828. #' @seealso \code{\link{ModProb}} #' @examples #' ### Simulating Survival Data #' n <- 400 #' p <- 1000 #' lambda <- 0.8 #' cens_rate <- 0.27 #' set.seed(123) #' Sigma <- diag(p) #' full <- matrix(c(rep(0.5, p*p)), ncol=p) #' Sigma <- full + 0.5*Sigma #' cholS <- chol(Sigma) #' Beta <- c(-1.8, 1.2, -1.7, 1.4, -1.4, 1.3) #' X = matrix(rnorm(n*p), ncol=p) #' X = X%*%cholS #' X <- scale(X) #' beta <- numeric(p) #' beta[c(1:length(Beta))] <- Beta #' XB <- X%*%beta #' uvector <- -log(runif(n)); #' times <- uvector/(lambda*exp(XB)) #' cens_time <- quantile(times,1-cens_rate) #' status <- as.numeric(times < cens_time) #' TS <- cbind(times,status) #' #' ### Estimating coeffcients of the true model and an arbitrary hyper #' ### parameter for the iMOM prior density #' mod <- c(1:6) #' coef <- CoefEst(X, TS, mod, tau = 1.8, r = 2, family = "survival") #' coef #' CoefEst <- function(X, resp, mod_cols, nlptype = "piMOM", tau, r, family = c("logistic", "survival")){ if(nlptype=="piMOM") nlptype_int <- 0 if(nlptype=="pMOM") nlptype_int <- 1 if (family == "logistic"){ X <- cbind(rep(1,length(resp)),X) exmat <- cbind(resp, X) out <- lreg_coef_est(exmat, mod_cols, tau, r, nlptype_int) } else { exmat <- cbind(resp, X) out <- cox_coef_est(exmat, mod_cols, tau, r, nlptype_int) } return(out) }
/scratch/gouwar.j/cran-all/cranData/BVSNLP/R/CoefEst.R
#' Hyperparameter selection for iMOM prior density #' #' @description This function finds data specific hyperparameters for inverse #' moment prior density so that the overlap between the iMOM prior and null #' MLE density is \eqn{1/\sqrt p}. In this algorithm \code{r} is always chosen #' to be equal to 1 and \code{tau} is found based on the mentioned overlap. #' @param X The design matrix. \code{NA}'s should be removed and columns be #' scaled. It is recommended that the \code{PreProcess} function is run first #' and its output used for this argument. The columns are genes and rows #' represent the observations. The column names are used as gene names. #' @param resp For logistic regression models, it is the binary response #' vector. For Cox proportional hazard model, this is a two column matrix #' where the first column contains survival time vector and the second column #' is the censoring status for each observation. #' @param eff_size This is the expected effect size in the model for a #' standardized design matrix, which is basically the coefficient value that is #' expected to occur the most based on some prior knowledge. #' @param nlptype Determines the type of nonlocal prior that is used in the #' analyses. It can be "piMOM" for product inverse moment prior, or "pMOM" for #' product moment prior. The default is set to piMOM prior. #' @param iter The number of iteration needed to simulate from null model in #' order to approximate the null MLE density. #' @param mod_prior Type of prior used for model space. \code{uniform} is for #' uniform binomial and \code{beta} is for beta binomial prior. In the former #' case, both hyper parameters in the beta prior are equal to \code{1} but in #' the latter case those two hyper parameters are chosen as explained in the #' reference papers. #' @param family Determines the type of data analysis. \code{logistic} is for #' binary outcome data where logistic regression modeling is used whereas #' \code{survival} is for survival outcome data using Cox proportional #' hazard model. #' @return It returns a list having following object: #' \item{tau}{The hyperparameter for iMOM prior density function, calculated #' using the proposed algorithm for the given dataset.} #' @author Amir Nikooienejad #' @references Nikooienejad, A., Wang, W., and Johnson, V. E. (2016). Bayesian #' variable selection for binary outcomes in high dimensional genomic studies #' using nonlocal priors. Bioinformatics, 32(9), 1338-1345.\cr\cr #' Johnson, V. E., and Rossell, D. (2010). On the use of nonlocal prior #' densities in Bayesian hypothesis tests. Journal of the Royal Statistical #' Society: Series B (Statistical Methodology), 72(2), 143-170. #' @examples #' ### Simulating Logistic Regression Data #' n <- 20 #' p <- 100 #' set.seed(321) #' Sigma <- diag(p) #' full <- matrix(c(rep(0.5, p * p)), ncol = p) #' Sigma <- full + 0.5 * Sigma #' cholS <- chol(Sigma) #' Beta <- c(1, 1.8, 2.5) #' X = matrix(rnorm(n*p, 1, 2), ncol = p) #' X = X%*%cholS #' beta <- numeric(p) #' beta[c(1:length(Beta))] <- Beta #' XB <- X%*%beta #' probs <- as.vector(exp(XB)/(1+exp(XB))) #' y <- rbinom(n,1,probs) #' colnames(X) <- paste("gene_",c(1:p),sep="") #' Xout <- PreProcess(X) #' XX <- Xout$X #' hparam <- HyperSelect(XX, y, iter = 1000, mod_prior = "beta", #' family = "logistic") #' #' hparam$tau HyperSelect <- function(X, resp, eff_size = 0.7, nlptype = "piMOM", iter = 10000, mod_prior=c("unif","beta"), family = c("logistic", "survival")){ # ================== Function Definitions ========================= mydimom <- function(x, tau, r){ out <- tau ^ (r / 2) / gamma(r / 2) * abs(x) ^ (-r - 1) * exp(-tau / x ^ 2) out[is.na(out)] <- 0 return(out) } # ================================ mydmom <- function(x, tau, r){ out <- (2*pi)^(-0.5)*tau^(-r-0.5)*exp(-x^2/(2*tau))*x^(2*r) out[is.na(out)] <- 0 return(out) } # ================================ mypimom_tmp <- function(x, tau, r) if (x <= 0) 0.5 * pgamma(1 / x ^ 2, r / 2, tau) else 1-(0.5 * pgamma(1 / x ^ 2, r / 2, tau)) mypimom <- Vectorize(mypimom_tmp) # ================================ mypmom_tmp <- function(x, tau, r){ a <- tau^(0.5-r)*(sqrt(tau)*pnorm(abs(x)/sqrt(tau))-abs(x)/sqrt(2*pi)*exp(-0.5*x^2/tau)) if (x <= 0) 1-a else a } mypmom <- Vectorize(mypmom_tmp) # ================================ Obj_Fun <- function(par, p, betalim, betasd, betam, mydimom, mypimom, mydmom, mypmom, nlptype){ tau <- par r <- 1 froot <- function(x, pars, nlptype){ if(nlptype==0) mydimom(x, tau=pars, r=1) - dnorm(x, betam, betasd) if(nlptype==1) mydmom(x, tau=pars, r=1) - dnorm(x, betam, betasd) } pv1 <- tryCatch({pv <- uniroot(froot, pars = tau, nlptype = nlptype, interval = c(0.000001, 7))$root} , error = function(err){ return(NA) }) if (is.na(pv1)) return (1000) pv <- c(-pv, pv) ov1 <- 1 - diff(pnorm(pv, betam, betasd)) if (nlptype==0) ov2 <- diff(mypimom(pv, tau = tau, r = 1)) if (nlptype==1) ov2 <- diff(mypmom(pv, tau = tau, r = 1)) ov <- ov1 + ov2 out <- abs(ov - 1 / sqrt(p)) return(out) } # ========================== Main ================================= if(nlptype=="piMOM") nlptype_int <- 0 if(nlptype=="pMOM") nlptype_int <- 1 XX <- X bincols <- which(apply(XX, 2, function(x) {all(na.omit(x) %in% 0:1)})) if (length(bincols)) XX <- XX[, -bincols] X <- XX dx <- dim(X) n <- dx[1] if(family=="logistic"){ p <- dx[2] - 1 cons <- 0; prp <- p / n ar <- 2 ^ n if (prp > 4 && ar < Inf){ ac <- 0 cons <- 0 while (ar > ac) { cons <- cons + 1 ac <- choose(p, cons) } }else{ cons <- ceiling(log(p)) } cons <- min(cons,ceiling(log(p))) sprob <- sum(resp) / n } if(family=="survival"){ p <- dx[2] exmat <- cbind(resp,X) cons <- ceiling(log(p)) csr <- 1 - mean(exmat[, 2]) } if (mod_prior == "beta"){ a <- cons; b <- p - a; } if (mod_prior == "unif"){ a <- 1; b <- 1; } betalim <- 10 set.seed(1234) if(family=="logistic"){ EsBeta <- null_mle_lreg(X, n, p, cons, a, b, sprob, iter)} if(family=="survival"){ EsBeta <- null_mle_cox(X, n, p, cons, a, b, csr, iter)} betasd <- sd(EsBeta) betam <- 0 init <- 1 res <- nlminb(init, objective = Obj_Fun, p = p, betalim = betalim, betasd = betasd, betam = betam, mydimom = mydimom, mypimom = mypimom, mydmom = mydmom, mypmom=mypmom, nlptype = nlptype_int, lower=0,upper=20) pars <- res$par pars <- round(pars, 2) eff <- round(eff_size^2, 2) pars <- min(pars, eff) return(list(tau = pars)) }
/scratch/gouwar.j/cran-all/cranData/BVSNLP/R/HyperSelect.R
#' Logarithm of unnormalized probability of a given model #' #' @description This function calculates the logarithm of unnormalized #' probability of a given set of covariates for both survival and binary #' response data. It uses the inverse moment nonlocal prior (iMOM) for #' non zero coefficients and beta binomial prior for the model space. #' @param X The design matrix. It is assumed that the design matrix has #' standardized columns. It is recommended that to use the output of #' \code{\link{PreProcess}} function of the package. #' @param resp For logistic regression models, this variable is the binary #' response vector. For Cox proportional hazard models this is a two column #' matrix where the first column contains the survival time vector and the #' second column is the censoring status for each observation. #' @param mod_cols A vector of column indices of the design matrix, #' representing the model. #' @param nlptype Determines the type of nonlocal prior that is used in the #' analyses. It can be "piMOM" for product inverse moment prior, or "pMOM" for #' product moment prior. The default is set to piMOM prior. #' @param tau Hyperparameter \code{tau} of the iMOM prior. #' @param r Hyperparameter \code{r} of the iMOM prior. #' @param a First parameter in the beta binomial prior. #' @param b Second parameter in the beta binomial prior. #' @param family Determines the type of data analysis. \code{logistic} is for #' binary outcome and logistic regression model whereas, #' \code{survival} represents survival outcomes and the Cox proportional #' hazard model. #' @return It returns the unnormalized probability for the selected model. #' @seealso \code{\link{CoefEst}} #' @author Amir Nikooienejad #' @references Nikooienejad, A., Wang, W., and Johnson, V. E. (2016). Bayesian #' variable selection for binary outcomes in high dimensional genomic studies #' using nonlocal priors. Bioinformatics, 32(9), 1338-1345.\cr\cr #' Nikooienejad, A., Wang, W., & Johnson, V. E. (2020). Bayesian variable #' selection for survival data using inverse moment priors. Annals of Applied #' Statistics, 14(2), 809-828. #' @examples #' ### Simulating Logistic Regression Data #' n <- 400 #' p <- 1000 #' set.seed(123) #' Sigma <- diag(p) #' full <- matrix(c(rep(0.5, p*p)), ncol=p) #' Sigma <- full + 0.5*Sigma #' cholS <- chol(Sigma) #' Beta <- c(1,1.8,2.5) #' X = matrix(rnorm(n*p), ncol=p) #' X = X%*%cholS #' beta <- numeric(p) #' beta[c(1:length(Beta))] <- Beta #' XB <- X%*%beta #' probs <- as.vector(exp(XB)/(1+exp(XB))) #' y <- rbinom(n,1,probs) #' #' ### Calling the function for a subset of the true model, with an arbitrary #' ### parameters for prior densities #' mod <- c(1:3) #' Mprob <- ModProb(X, y, mod, tau = 0.7, r = 1, a = 7, b = 993, #' family = "logistic") #' #' Mprob ModProb <- function(X, resp, mod_cols, nlptype = "piMOM", tau, r, a, b, family = c("logistic", "survival")){ if(nlptype=="piMOM") nlptype_int <- 0 if(nlptype=="pMOM") nlptype_int <- 1 if (family == "logistic"){ exmat <- cbind(resp, X) out <- lreg_mod_prob(exmat, mod_cols, tau, r, a, b, nlptype_int) } else { exmat <- cbind(resp, X) out <- cox_mod_prob(exmat, mod_cols, tau, r, a, b, nlptype_int) } return(out) }
/scratch/gouwar.j/cran-all/cranData/BVSNLP/R/ModProb.R
#' Preprocessing the design matrix, preparing it for variable selection #' procedure #' #' @description This function preprocesses the design matrix by removing #' columns that contain \code{NA}'s or are all zero. It also standardizes #' non-binary columns to have mean zero and variance one. The user has the #' choice of log transforming continuous covariates before scaling them. #' @param X The \code{n} times \code{p} design matrix. The columns should #' represent genes and rows represent the observations. The column names are #' used as gene names so they should not be left as \code{NULL}. Note that the #' input matrix \code{X} should NOT contain vector of \code{1}'s representing #' the intercept. #' @param logT A boolean variable determining if log transform should be done #' on continuous columns before scaling them. Note that those columns should #' not contain any zeros or negative values. #' @author Amir Nikooienejad #' @return It returns a list having the following objects: #' \item{X}{The filtered design matrix which can be used in variable selection #' procedure. Binary columns are moved to the end of the design matrix.} #' \item{gnames}{Gene names read from the column names of the filtered design #' matrix.} #' @examples #' ### Constructing a synthetic design matrix for the purpose of preprocessing #' ### imposing columns with different scales #' n <- 40 #' p1 <- 50 #' p2 <- 150 #' p <- p1 + p2 #' X1 <- matrix(rnorm(n*p1, 1, 2), ncol = p1) #' X2 <- matrix(rnorm(n*p2), ncol = p2) #' X <- cbind(X1, X2) #' #' ### putting NA elements in the matrix #' X[3,85] <- NA #' X[25,85] <- NA #' X[35,43] <- NA #' X[15,128] <- NA #' colnames(X) <- paste("gene_",c(1:p),sep="") #' #' ### Running the function. Note the intercept column that is added as the #' ### first column in the "logistic" family #' Xout <- PreProcess(X) #' dim(Xout$X)[2] == (p + 1) ## 1 is added because intercept column is included #' ## This is FALSE because of the removal of columns with NA elements PreProcess <- function(X, logT = FALSE){ Xin <- X ex0 <- which(apply(Xin,2,function(x) all(x==0))) if (length(ex0)) Xin <- Xin[,-ex0] ex1 <- which(colSums(is.na(Xin)) != 0) if (length(ex1)) Xin <- Xin[, -ex1] XX2 <- Xin bincols <- which(apply(Xin, 2, function(x) {all(x %in% 0:1)})) if (length(bincols)){ if(logT) XX2[,-bincols] <- log(XX2[,-bincols]) XX2[,-bincols] <- scale(XX2[,-bincols]) } else { if(logT) Xin <- log(Xin) XX2 <- scale(Xin) } ex2 <- which(colSums(is.na(XX2)) != 0) if (length(ex2)) XX2 <- XX2[, -ex2] gene_names <- colnames(XX2) return(list(X = XX2, gnames = gene_names)) }
/scratch/gouwar.j/cran-all/cranData/BVSNLP/R/PreProcess.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 null_mle_lreg <- function(XX, n, p, cons, a, b, sprob, niters) { .Call(`_BVSNLP_null_mle_lreg`, XX, n, p, cons, a, b, sprob, niters) } #' Non-parallel version of Bayesian variable selector for logistic regression #' data using nonlocal priors #' @description This function performs Bayesian variable selection for #' logistic regression data in a non-parallel fashion. It does not contain any #' pre-processing step or variable initialization. Moreover it does not have #' the feature to be run in parallel for performing the coupling algorithm. #' Therefore in general, it is NOT recommended to be used unless the user #' knows how to initialize all the parameters. However, this function is #' called by \code{\link{bvs}} function, the recommended way to run Bayesian #' variable selection for such datasets. #' @param exmat An extended matrix where the first column is binary resonse #' vector and the rest is the design matrix which has its first column all 1 #' to account for the intercept in the model and is the output of #' \code{PreProcess} code where the fixed columns are moved to the beginning. #' @param chain1 The first chain or initial model where the MCMC algorithm #' starts from. Note that the first \code{nf+1} elements are \code{1} where #' \code{nf} is the number of fixed covariates that do not enter the selection #' procedure and \code{1} is for the intercept. #' @param nf The number of fixed covariates that do not enter the selection #' procedure. #' @param tau The paramter \code{tau} of the iMOM prior. #' @param r The paramter \code{r} of the iMOM prior. #' @param nlptype Determines the type of nonlocal prior that is used in the #' analyses. \code{0} is for piMOM and \code{1} is for pMOM. #' @param a The first parameter in beta distribution used as prior on model #' size. This parameter is equal to 1 when uinform-binomial prior is used. #' @param b The second paramter in beta distribution used as prior on model #' size. This parameter is equal to 1 when uinform-binomial prior is used. #' @param in_cons The average model size. This value under certain conditions #' and when \code{p} is large, is equal to parameter \code{a} of the #' beta-binomial prior. #' @param loopcnt Number of iterations for MCMC procedure. #' @param cplng A boolean variable indicating the coupling algorithm to be #' performed or not. #' @param chain2 Second chain or model for starting the MCMC procedure. This #' parameter is only used when \code{cplng=TRUE}. Thus, it could be simply #' set to \code{chain1} when it is not used. #' #' @return It returns a list containing following objects: #' \item{max_chain}{A \code{1} by \code{p+1} binary vector showing the selected #' model with maximum probability. \code{1} means a specific variable is #' selected. The first variable is always the intercept.} #' \item{beta_hat}{The coefficient vector for the selected model. The first one #' is always for the intercept.} #' \item{max_prop}{The unnormalized probability of the model with highest #' posterior probability.} #' \item{num_iterations}{The number of MCMC iterations that are executed. #' This is used when \code{cplng=TRUE} to check whether the total designated #' MCMC iterations were used or two chains are coupled sooner than that.} #' \item{cplng_flag}{This is used when \code{cplng=TRUE} and indicates whether #' two chains are coupled or not.} #' \item{num_vis_models}{Number of visited models in search for the highest #' probability model. This contains redundant models too and is not the number #' of unique models.} #' \item{hash_key}{This is only used when \code{cplng = FALSE}. This is a #' vector containing real numbers uniquely assigned to each model for #' distinguishing them.} #' \item{hash_prob}{This is only used when \code{cplng = FALSE}. This is a #' vector of probabilities for each visited model.} #' \item{vis_covs}{This is only used when \code{cplng = FALSE}. This is a #' list where each element contains indices of covariates for each visited #' model.} #' @author Amir Nikooienejad #' @references Nikooienejad, A., Wang, W., and Johnson, V. E. (2016). Bayesian #' variable selection for binary outcomes in high dimensional genomic studies #' using nonlocal priors. Bioinformatics, 32(9), 1338-1345.\cr\cr #' Nikooienejad, A., Wang, W., and Johnson, V. E. (2017). Bayesian Variable #' Selection in High Dimensional Survival Time Cancer Genomic Datasets using #' Nonlocal Priors. arXiv preprint, arXiv:1712.02964.\cr\cr #' Johnson, V. E., and Rossell, D. (2010). On the use of non-local prior #' densities in Bayesian hypothesis tests. Journal of the Royal Statistical #' Society: Series B (Statistical Methodology), 72(2), 143-170.\cr\cr #' Johnson, V. E. (1998). A coupling-regeneration scheme for #' diagnosing convergence in Markov chain Monte Carlo algorithms. Journal of #' the American Statistical Association, 93(441), 238-248. #' @seealso \code{\link{bvs}} #' @examples #' ### Initializing parameters #' n <- 200 #' p <- 40 #' set.seed(123) #' Sigma <- diag(p) #' full <- matrix(c(rep(0.5, p*p)), ncol=p) #' Sigma <- full + 0.5*Sigma #' cholS <- chol(Sigma) #' Beta <- c(-1.7,1.8,2.5) #' X <- matrix(rnorm(n*p), ncol=p) #' X <- X%*%cholS #' colnames(X) <- paste("gene_",c(1:p),sep="") #' beta <- numeric(p) #' beta[c(1:length(Beta))] <- Beta #' XB <- X%*%beta #' probs <- as.vector(exp(XB)/(1+exp(XB))) #' y <- rbinom(n,1,probs) #' exmat <- cbind(y,X) #' tau <- 0.5; r <- 1; a <- 3; b <- p-a; in_cons <- a; #' loopcnt <- 100; cplng <- FALSE; #' initProb <- in_cons/p #' #' ### Initializing Chains #' schain <- p #' while (schain > in_cons || schain == 0) { #' chain1 <- rbinom(p, 1, initProb) #' schain <- sum(chain1) #' } #' chain1 <- as.numeric(c(1, chain1)) #' chain2 <- chain1 #' nlptype <- 0 ## PiMOM nonlocal prior #' nf <- 0 ### No fixed columns #' #' ### Running the function #' bvsout <- logreg_bvs(exmat,chain1,nf,tau,r,nlptype,a,b,in_cons,loopcnt,cplng,chain2) #' #' ### Number of visited models for this specific run: #' bvsout$num_vis_models #' #' ### The selected model: #' which(bvsout$max_chain > 0) #' #' ### Estimated coefficients: #' bvsout$beta_hat #' #' ### The unnormalized probability of the selected model: #' bvsout$max_prob logreg_bvs <- function(exmat, chain1, nf, tau, r, nlptype, a, b, in_cons, loopcnt, cplng, chain2) { .Call(`_BVSNLP_logreg_bvs`, exmat, chain1, nf, tau, r, nlptype, a, b, in_cons, loopcnt, cplng, chain2) } lreg_coef_est <- function(exmat, mod_cols, tau, r, nlptype) { .Call(`_BVSNLP_lreg_coef_est`, exmat, mod_cols, tau, r, nlptype) } lreg_mod_prob <- function(exmat, mod_cols, tau, r, a, b, nlptype) { .Call(`_BVSNLP_lreg_mod_prob`, exmat, mod_cols, tau, r, a, b, nlptype) } null_mle_cox <- function(XX, n, p, cons, a, b, csr, niters) { .Call(`_BVSNLP_null_mle_cox`, XX, n, p, cons, a, b, csr, niters) } #' Non-parallel version of Bayesian variable selector for survival data using #' nonlocal priors #' @description This function performs Bayesian variable selection for #' survival data in a non-parallel fashion. It runs modified S5 algorithm to #' search the model space but since this is only on one CPU, the number of #' visited models will not be large and therefore is NOT recommended for #' high dimensional datasets. This function is called by \code{\link{bvs}} #' function in a parllel fashion and therefore that function is recommended #' to be used. #' #' @param exmat An extended matrix where the first two columns are survival #' times and status, respectively and the rest is the design matrix which is #' produced by \code{PreProcess} function. #' @param cur_cols A vector containing indices of the initial model for #' variable selector to start the S5 algorithm from. Note that the first #' \code{nf} indices are \code{1} to \code{nf} where \code{nf} is the number #' of fixed covariates that do not enter the selection procedure. #' @param nf The number of fixed covariates that do not enter the selection #' procedure. #' @param tau The paramter \code{tau} of the iMOM prior. #' @param r The paramter \code{r} of the iMOM prior. #' @param nlptype Determines the type of nonlocal prior that is used in the #' analyses. \code{0} is for piMOM and \code{1} is for pMOM. #' @param a The first parameter in beta distribution used as prior on model #' size. This parameter is equal to 1 when uinform-binomial prior is used. #' @param b The second paramter in beta distribution used as prior on model #' size. This parameter is equal to 1 when uinform-binomial prior is used. #' @param d This is the number of candidate covariates picked from top #' variables with highest utility function value and used in S5 algorithm. #' @param L Number of temperatures in S5 algorithm. #' @param J Number of iterations at each temperature in S5 algorithm. #' @param temps Vector of temperatuers used in S5 algorithm. #' #' @return It returns a list containing following objects: #' \item{max_model}{A \code{1} by \code{p} binary vector showing the selected #' model with maximum probability. \code{1} means a specific variable is #' selected.} #' \item{hash_key}{A column vector indicating the generated key for each model #' that is used to track visited models and growing dictionary.} #' \item{max_prob}{The unnormalized probability of the model with highest #' posterior probability.} #' \item{all_probs}{A vector containing unnormalized probabilities of all #' visited models.} #' \item{vis_covs_list}{A list containing the covariates in each visited model #' in the stochastic search process.} #' @author Amir Nikooienejad #' @references Nikooienejad, A., Wang, W., and Johnson, V. E. (2017). Bayesian #' Variable Selection in High Dimensional Survival Time Cancer Genomic #' Datasets using Nonlocal Priors. arXiv preprint, arXiv:1712.02964.\cr\cr #' Shin, M., Bhattacharya, A., and Johnson, V. E. (2017). Scalable #' Bayesian variable selection using nonlocal prior densities in ultrahigh #' dimensional settings. Statistica Sinica.\cr\cr #' Johnson, V. E., and Rossell, D. (2010). On the use of non-local prior #' densities in Bayesian hypothesis tests. Journal of the Royal Statistical #' Society: Series B (Statistical Methodology), 72(2), 143-170. #' @seealso \code{\link{bvs}} #' @examples #' ### Initializing the parameters #' n <- 100 #' p <- 40 #' set.seed(123) #' Sigma <- diag(p) #' full <- matrix(c(rep(0.5, p*p)), ncol=p) #' Sigma <- full + 0.5*Sigma #' cholS <- chol(Sigma) #' Beta <- c(-1.8, 1.2, -1.7, 1.4, -1.4, 1.3) #' X = matrix(rnorm(n*p), ncol=p) #' X = X%*%cholS #' X <- scale(X) #' beta <- numeric(p) #' beta[c(1:length(Beta))] <- Beta #' XB <- X%*%beta #' sur_times <- rexp(n,exp(XB)) #' cens_times <- rexp(n,0.2) #' times <- pmin(sur_times,cens_times) #' status <- as.numeric(sur_times <= cens_times) #' exmat <- cbind(times,status,X) #' L <- 10; J <- 10 #' d <- 2 * ceiling(log(p)) #' temps <- seq(3, 1, length.out = L) #' tau <- 0.5; r <- 1; a <- 6; b <- p-a #' nlptype <- 0 ### PiMOM nonlocal prior #' cur_cols <- c(1,2,3) ### Starting model for the search algorithm #' nf <- 0 ### No fixed columns #' #'### Running the Function #' coxout <- cox_bvs(exmat,cur_cols,nf,tau,r,nlptype,a,b,d,L,J,temps) #' #' ### The number of visited model for this specific run: #' length(coxout$hash_key) #' #' #' ### The selected model: #' which(coxout$max_model>0) #' #' ### The unnormalized probability of the selected model: #' coxout$max_prob #' cox_bvs <- function(exmat, cur_cols, nf, tau, r, nlptype, a, b, d, L, J, temps) { .Call(`_BVSNLP_cox_bvs`, exmat, cur_cols, nf, tau, r, nlptype, a, b, d, L, J, temps) } inc_prob_calc <- function(all_probs, vis_covs, p) { .Call(`_BVSNLP_inc_prob_calc`, all_probs, vis_covs, p) } cox_coef_est <- function(exmat, mod_cols, tau, r, nlptype) { .Call(`_BVSNLP_cox_coef_est`, exmat, mod_cols, tau, r, nlptype) } cox_mod_prob <- function(exmat, mod_cols, tau, r, a, b, nlptype) { .Call(`_BVSNLP_cox_mod_prob`, exmat, mod_cols, tau, r, a, b, nlptype) } aucBMA_logistic <- function(X_tr, y_tr, X_te, y_te, tau, r, nlptype, probs, models, k) { .Call(`_BVSNLP_aucBMA_logistic`, X_tr, y_tr, X_te, y_te, tau, r, nlptype, probs, models, k) } aucBMA_survival <- function(X_tr, TS_tr, X_te, TS_te, tau, r, nlptype, times, probs, models, k) { .Call(`_BVSNLP_aucBMA_survival`, X_tr, TS_tr, X_te, TS_te, tau, r, nlptype, times, probs, models, k) }
/scratch/gouwar.j/cran-all/cranData/BVSNLP/R/RcppExports.R
#' High dimensional Bayesian variable selection using nonlocal priors #' #' @description This function performs Bayesian variable selection for high #' dimensional design matrix using iMOM prior for non-zero coefficients. It #' also performs adaptive hyperparameter selection for iMOM prior. Cleaning #' the data in a preprocessing step and before any data analysis is done by #' user preference. This function is for binary and survival time response #' datasets. In the former, MCMC is used to search in the model space while for #' the latter a stochastic search does that job. This function has the option #' to do all the mentioned tasks in a parallel fashion, exploiting hundreds of #' CPUs. It is highly recommended to use a cluster for this purpose. This #' function also supports fixing covariates in variable selection process, #' thus making them included in the final selected model with probability 1. #' Categorical variable are also supported by this function as input covariates #' to the selection process. They need to be well defined factor variables as #' part of the input data frame. For the output, this function reports #' necessary measurements that is common in Bayesian variable selection #' algorithms. They include Highest Posterior Probability model, median #' probability model and posterior inclusion probability for each of the #' covariates in the design matrix. #' #' @param X The \code{n} times \code{p} input data frame containing the #' covariates in the design matrix. The columns should represent genes and rows #' represent the observed samples. The column names are used as gene names so #' they should not be left as \code{NULL}. Moreover, the minimum number of #' columns allowed is 3. The input data frame can also contain categorical #' covariates that are appropriately defined as factor variables in R. #' @param resp For logistic regression models it is the binary response #' vector which could be either numeric or factor variable in R. For the Cox #' proportional hazard models this is a two column matrix where the first #' column contains survival time vector and the second column is the censoring #' status for each observation. #' @param prep A boolean variable determining if the preprocessing step should #' be performed on the design matrix or not. That step contains removing #' columns that have \code{NA}'s or all their elements are equal to 0, along #' with standardizing non-binary columns. This step is recommended and thus the #' default value is \code{TRUE}. #' @param logT A boolean variable determining if log transform should be done #' on continuous columns before scaling them in the preprocessing step. #' Note that those columns should not contain any zeros or negative values. #' @param fixed_cols A vector of indices showing the columns in the input #' data frame that are not subject to the the selection procedure. These #' columns are always in the final selected model. Note that if any of these #' columns contain \code{NA}, they will be removed. Moreover, if a categorical #' variable with \code{k} levels is chosen to be fixed, all \code{k-1} dummy #' variables associated with it will be selected in the final model. #' @param eff_size This is the expected effect size in the model for a #' standardized design matrix, which is basically the coefficient value that is #' expected to occur the most based on some prior knowledge. #' @param family Determines the type of data analysis. \code{logistic} is for #' binary outcome data where logistic regression modeling is used, whereas #' \code{survival} is for survival outcome data using Cox proportional #' hazard model. #' @param hselect A boolean variable indicating whether the automatic procedure #' for hyperparameter selection should be run or not. The default value is #' \code{TRUE}. Note that in this setting, \code{r} is always chosen to be 1. #' @param nlptype Determines the type of nonlocal prior that is used in the #' analyses. It can be "piMOM" for product inverse moment prior, or "pMOM" for #' product moment prior. The default is set to piMOM prior. #' @param r The paramter \code{r} of the iMOM prior, when no automatic #' procedure for hyperparameter selection is done. As a result, this is #' relevant only when \code{hselect = FALSE}, otherwise it is ignored. #' @param tau The paramter \code{tau} of the iMOM prior, when no automatic #' procedure for hyperparameter selection is done. As a result, this is #' relevant only when \code{hselect = FALSE}, otherwise it is ignored. #' @param niter Number of iterations. For binary response data, this #' determines the number of MCMC iterations per CPU. For survival response data #' this is the number of iterations per temperature schedule in the stochastic #' search algorithm. #' @param mod_prior Type of prior used for the model space. \code{unif} is #' for a uniform binomial and \code{beta} is for a beta binomial prior. In the #' former case, both hyper parameters in the beta prior are equal to \code{1}, #' but in the latter case those two hyper parameters are chosen as explained in #' the reference papers. The default choice for this variable is the uniform #' prior. #' @param inseed The input seed for making the parallel processing #' reproducible. This parameter is ignored in logistic regression models when #' \code{cplng = FALSE}. The default value is \code{NULL} which means that each #' time the search for model space is started from different starting points. #' In case it is set to a number, it initializes the RNG for the first task and #' subsequent tasks to get separate substreams. #' @param cplng This parameter is only used in logistic regression models, and #' indicating if coupling algorithm for MCMC output should be performed or not. #' @param ncpu This is the number of cpus used in parallel processing. For #' logistic regression models this is the number of parallel coupled chains #' run at the same time. For survival outcome data this is the number of cpus #' doing stochastic search at the same time to increase th enumber of visited #' models. #' @param parallel.MPI A boolean variable determining if MPI is used for #' parallel processing or not. Note that in order to use this feature, your #' system should support MPI and \code{Rmpi} and \code{doMPI} packages should #' already be installed. The default is set to \code{FALSE} but in case your #' system have the requirements, it is recommended to set this parameter to #' \code{TRUE} as it is more efficient and results in faster run-time. #' @return It returns a list containing different objects that depend on the #' family of the model and the coupling flag for logistic regression models. #' The following describes the objects in the output list based on different #'combinations of those two input arguments.\cr \cr #' \strong{1) } \code{family = logistic && cplng = FALSE} #' \item{num_vis_models}{Number of unique models visited throughout the search #' of the model space.} #' \item{max_prob}{Maximum unnormalized probability among all visited models} #' \item{HPM}{The indices of the model with highest posterior #' probability among all visited models, with respect to the columns in #' the output \code{des_mat}. This is not necessarily the same as the input #' design matrix due to some changes to categorical variables. The names of #' the selected columns can be checked using \code{gene_names}. #' The corresponding design matrix is also one of the outputs that can be #' checked in \code{des_mat}. If the output is \code{character[0]} it means #' none of the variables of the design matrix is selected in the HPM and #' HPM contains only the intercept.} #' \item{beta_hat}{The coefficient vector for the selected model. The first #' component is always for the intercept.} #' \item{MPM}{The indices of median probability model. According to the paper #' Barbieri et. al., this is defined to be the model consisting of those #' variables whose posterior inclusion probability is at least 1/2. The order #' of columns is similar to that is explained for \code{HPM}.} #' \item{max_prob_vec}{A \code{1000} by \code{1} vector of unnormalized #' probabilities of the first 1000 models with highest posterior probability #' among all visited models. If the total number of visited models is less than #' 1000, then the length of this vector would be equal to \code{num_vis_models} #' . Note that the intercept is always used in calculating the probabilities #' in this vector.} #' \item{max_models}{A list containing models corresponding to #' \code{max_prob_vec} vector. Each entry of this list contains the indices of #' covariates for the model with posterior probability reported in the #' corresponding entry in \code{max_prob_vec}. The intercept column is not #' shown in this list as it is present in all of the models.} #' \item{inc_probs}{A vector of length \code{p+1} containing the posterior #' inclusion probability for each covariate in the design matrix. The order of #' columns is with respect to processed design matrix, \code{des_mat}.} #' \item{nlptype}{The type of nonlocal prior used in the analyses.} #' \item{des_mat}{The design matrix used in the analysis where fixed columns #' are moved to the beginning of the matrix and if \code{prep=TRUE}, the #' columns containing \code{NA} are all removed. The reported indices in #' selected models are all with respect to the columns of this matrix.} #' \item{gene_names}{Names of the genes extracted from the design matrix.} #' \item{r}{The hyperparameter for iMOM prior density function, calculated #' using the proposed algorithm for the given dataset.} #' \item{tau}{The hyperparameter for iMOM prior density function, calculated #' using the proposed algorithm for the given dataset.} #' \strong{2) } \code{family = logistic && cplng = TRUE} #' \item{cpl_percent}{Shows what percentage of pairs of chains are coupled.} #' \item{margin_probs}{A \code{k} by \code{1} vector of marginal probabilities #' where element \code{i} shows the maximum marginal probability of the #' data under the maximum model for the \eqn{i^{th}} pair of chains. \code{k} #' is the number of paired chains which is the same as number of CPUs.} #' \item{chains}{A \code{k} by \code{p} binary matrix, where each row is the #' model for the \eqn{i^{th}} pair of chains. Note that the index of nonzero #' elements are not necessarily in the same order as the input design matrix, #' \code{X}, depending on existence of fixed columns in selection procedure. #' As a result, always match the indices to the columns of the design matrix #' that is reported as an output in \code{des_mat}.} #' \item{nlptype}{The type of nonlocal prior used in the analyses.} #' \item{cpl_flags}{A \code{k} by \code{1} binary vector, showing which pairs #' are coupled, (=\code{1}) and which are not, (= \code{0}).} #' \item{beta_hat}{A \code{k} by \code{(p+1)} matrix where each row is the #' estimated coefficient for each modelin the rows of \code{Chains} variable.} #' \item{uniq_models}{A list showing unique models with the indices of the #' included covariates at each model.} #' \item{freq}{Frequency of each of the unique models. It is used to find #' the highest frquency model.} #' \item{probs}{Unnormalized probability of each of the unique models.} #' \item{des_mat}{The design matrix used in the analysis where fixed columns #' are moved to the beginning of the matrix and if \code{prep=TRUE}, the #' columns containing \code{NA} are all removed. The reported indices in #' selected models are all with respect to the columns of this matrix.} #' \item{gene_names}{Names of the genes extracted from the design matrix.} #' \item{r}{The hyperparameter for iMOM prior density function, calculated #' using the proposed algorithm for the given dataset.} #' \item{tau}{The hyperparameter for iMOM prior density function, calculated #' using the proposed algorithm for the given dataset.} #' \strong{3) } \code{family = survival} #' \item{num_vis_models}{Number of visited models during the whole process.} #' \item{max_prob}{The unnormalized probability of the maximum model among #' all visited models.} #' \item{HPM}{The indices of the model with highest posterior #' probability among all visited models, with respect to the columns in #' \code{des_mat}. As a result, always look at the names of the selected #' columns using \code{gene_names}. The corresponding design matrix is one of #' the outputs that can be checked in \code{des_mat}.} #' \item{MPM}{The indices of median probability model. According to the paper #' Barbieri et. al., this is defined to be the model consisting of those #' variables whose posterior inclusion probability is at least 1/2. The order #' of columns is similar to that is explained for \code{HPM}.} #' \item{beta_hat}{The coefficient vector for the selected model reported in #' \code{HPM}.} #' \item{max_prob_vec}{A \code{1000} by \code{1} vector of unnormalized #' probabilities of the first 1000 models with highest posterior probability #' among all visited models. If the total number of visited models is less than #' 1000, then the length of this vector would be equal to \code{num_vis_models} #' .} #' \item{max_models}{A list containing models corresponding to #' \code{max_prob_vec} vector. Each entry of this list contains the indices of #' covariates for the model with posterior probability reported in the #' corresponding entry in \code{max_prob_vec}.} #' \item{inc_probs}{A \code{p} by \code{1} vector containing the posterior #' inclusion probability for each covariate in the design matrix. The order of #' columns is with respect to processed design matrix, \code{des_mat}.} #' \item{nlptype}{The type of nonlocal prior used in the analyses.} #' \item{des_mat}{The design matrix used in the analysis where fixed columns #' are moved to the beginning of the matrix and if \code{prep=TRUE}, the #' columns containing \code{NA} are all removed. The reported indices in #' selected models are all with respect to the columns of this matrix.} #' \item{start_models}{A \code{k} by \code{3} matrix showing the starting model #' for each worker CPU. Obviously \code{k} is equal to the number of CPUs.} #' \item{gene_names}{Names of the genes extracted from the design matrix.} #' \item{r}{The hyperparameter for iMOM prior density function, calculated #' using the proposed algorithm for the given dataset.} #' \item{tau}{The hyperparameter for iMOM prior density function, calculated #' using the proposed algorithm for the given dataset.} #' @author Amir Nikooienejad #' @references Nikooienejad, A., Wang, W., and Johnson, V. E. (2016). Bayesian #' variable selection for binary outcomes in high dimensional genomic studies #' using nonlocal priors. Bioinformatics, 32(9), 1338-1345.\cr\cr #' Nikooienejad, A., Wang, W., & Johnson, V. E. (2020). Bayesian variable #' selection for survival data using inverse moment priors. Annals of Applied #' Statistics, 14(2), 809-828. \cr\cr #' Johnson, V. E. (1998). A coupling-regeneration scheme for #' diagnosing convergence in Markov chain Monte Carlo algorithms. Journal of #' the American Statistical Association, 93(441), 238-248.\cr\cr #' Shin, M., Bhattacharya, A., and Johnson, V. E. (2017). Scalable Bayesian #' variable selection using nonlocal prior densities in ultrahigh dimensional #' settings. Statistica Sinica.\cr\cr #' Johnson, V. E., and Rossell, D. (2010). On the use of non-local prior #' densities in Bayesian hypothesis tests. Journal of the Royal Statistical #' Society: Series B (Statistical Methodology), 72(2), 143-170.\cr\cr #' Barbieri, M. M., and Berger, J. O. (2004). Optimal predictive model #' selection. The annals of statistics, 32(3), 870-897. #' @seealso \code{\link{ModProb}}, \code{\link{CoefEst}} #' @examples #' ### Simulating Logistic Regression Data #' n <- 200 #' p <- 40 #' set.seed(123) #' Sigma <- diag(p) #' full <- matrix(c(rep(0.5, p*p)), ncol=p) #' Sigma <- full + 0.5*Sigma #' cholS <- chol(Sigma) #' Beta <- c(-1.9,1.3,2.2) #' X <- matrix(rnorm(n*p), ncol=p) #' X <- X%*%cholS #' beta <- numeric(p) #' beta[c(1:length(Beta))] <- Beta #' XB <- X%*%beta #' probs <- as.vector(exp(XB)/(1+exp(XB))) #' y <- rbinom(n,1,probs) #' colnames(X) <- paste("gene_",c(1:p),sep="") #' X <- as.data.frame(X) #' #' ### Running 'bvs' function without coupling and with hyperparamter selection #' ### procedure #' bout <- bvs(X, y, family = "logistic", nlptype = "piMOM", #' mod_prior = "beta", niter = 50) #' #' ### Highest Posterior Model #' bout$HPM #' #'### Estimated Coefficients: #' bout$beta_hat #' #' ### Number of Visited Models: #' bout$num_vis_models bvs <- function(X, resp, prep = TRUE, logT = FALSE, fixed_cols = NULL, eff_size = 0.5, family = c("logistic", "survival"), hselect = TRUE, nlptype = "piMOM", r = 1, tau = 0.25, niter = 30, mod_prior = c("unif", "beta"), inseed = NULL, cplng = FALSE, ncpu = 4, parallel.MPI=FALSE){ if(!class(X)=="data.frame") stop("input X should be a data frame!") ol <- matprep(X, fixed_cols, prep, logT) X <- ol$fulmat gnames <- ol$gnames nf <- ol$nf if(family=="logistic"){ y <- as.numeric(as.character(resp)) dx <- dim(X) n <- dx[1] p <- dx[2] X <- cbind(rep(1, n), X) gnames <- c("Intercept", gnames) # ======================= Hyperparameters ======================== cons <- 0; prp <- p / n ar <- 2 ^ n if (prp > 4 && ar < Inf){ ac <- 0 cons <- 0 while (ar > ac) { cons <- cons + 1 ac <- choose(p, cons) } }else{ cons <- ceiling(log(p)) } cons <- min(cons,ceiling(log(p))) if (mod_prior == "beta"){ a <- cons; b <- p - a; } if (mod_prior == "unif"){ a <- 1; b <- 1; } if (hselect){ hyper <- HyperSelect(X, y, eff_size, nlptype, 20000, mod_prior,family) tau <- hyper$tau r <- 1 } initProb <- cons / p exmat <- cbind(y, X) if(nlptype=="piMOM") nlptype_int <- 0 if(nlptype=="pMOM") nlptype_int <- 1 # =========================== Main =============================== if (!cplng){ schain <- p while (schain > cons || schain == 0) { chain1 <- rbinom(p-nf, 1, initProb) schain <- sum(chain1) } chain1 <- as.numeric(c(rep(1,nf+1), chain1)) # one for the intercept. chain2 <- chain1 Lregout <- logreg_bvs(exmat, chain1, nf, tau, r, nlptype_int, a, b, cons, niter, cplng, chain2) #============ reading outputs ====================== Hash_Key <- Lregout$hash_key; all_probs <- Lregout$hash_prob; VisCovs <- Lregout$vis_covs; inds <- which(all_probs!=0); Hash_Key <- Hash_Key[inds]; all_probs <- all_probs[inds]; VisCovs <- VisCovs[inds]; nvm <- length(unique(Hash_Key)) uinds <- which(!duplicated(Hash_Key)) all_probs <- all_probs[uinds] list_vis_covs <- VisCovs[uinds] outnum <- min(nvm,1000) sout <- sort(all_probs,decreasing = T,index.return=T) MaxMargs <- sout$x[1:outnum] minds <- sout$ix[1:outnum] max_marg <- MaxMargs[1]; indmax <- minds[1] sel_model <- list_vis_covs[[indmax]] gnames2 <- gnames[sel_model + 1]; beta_hat <- Lregout$beta_hat; names(beta_hat) <- gnames2; gnames <- gnames[-1] sel_model <- sel_model[-1] MaxModels <- list(NULL) for (i in 1:outnum){ MaxModels[[i]] <- list_vis_covs[[minds[i]]][-1] } inc_probs <- inc_prob_calc(all_probs,list_vis_covs,p+1) inc_probs <- inc_probs[-1] median_model <- which(inc_probs >= 0.5) #========================================# return(list(max_prob = max_marg, HPM = sel_model, beta_hat = beta_hat, MPM = median_model, inc_probs= inc_probs, max_prob_vec = MaxMargs, max_models = MaxModels, num_vis_models = nvm, nlptype = nlptype, des_mat = X, gene_names = gnames, r = r, tau = tau)) } else { comb <- function(x, ...) { lapply(seq_along(x), function(i) c(x[[i]], lapply(list(...), function(y) y[[i]]))) } if(parallel.MPI){ if (!requireNamespace("doMPI", quietly = TRUE)) { stop("Package doMPI needed for this function to work. Please install it.", call. = FALSE) } else { cl <- doMPI::startMPIcluster(count = ncpu) doMPI::registerDoMPI(cl) parout <- foreach(j = 1:ncpu, .combine = "comb", .multicombine = TRUE, .init = list(list(), list(), list(), list()), .packages = 'BVSNLP', .options.mpi = list(seed = inseed)) %dopar% { schain <- p while (schain > cons || schain == 0) { chain1 <- rbinom(p-nf, 1, initProb) schain <- sum(chain1) } chain1 <- as.numeric(c(rep(1,nf+1), chain1)) schain <- p while (schain > cons || schain == 0) { chain2 <- rbinom(p-nf, 1, initProb) schain <- sum(chain2) } chain2 <- as.numeric(c(rep(1,nf+1), chain2)) Lregout <- logreg_bvs(exmat, chain1, nf, tau, r, nlptype_int, a, b, cons, niter, cplng, chain2) maxChain <- as.logical(Lregout$max_chain) maxMarg <- Lregout$max_prob cflag <- Lregout$cplng_flag bhat <- numeric(p + 1) bhat[maxChain] <- Lregout$beta_hat list(maxChain, maxMarg, cflag, bhat) } doMPI::closeCluster(cl) } } else { cl <- makeCluster(ncpu) registerDoParallel(cl) opts <- list(preschedule=TRUE) if (!is.null(inseed)) {clusterSetRNGStream(cl, inseed)} ParOut <- foreach(j = 1:ncpu, .combine = "comb", .multicombine = TRUE, .init = list(list(), list(), list(), list()), .packages = 'BVSNLP', .options.snow = opts ) %dopar% { schain <- p while (schain > cons || schain == 0) { chain1 <- rbinom(p-nf, 1, initProb) schain <- sum(chain1) } chain1 <- as.numeric(c(rep(1,nf+1), chain1)) schain <- p while (schain > cons || schain == 0) { chain2 <- rbinom(p-nf, 1, initProb) schain <- sum(chain2) } chain2 <- as.numeric(c(rep(1,nf+1), chain2)) Lregout <- logreg_bvs(exmat, chain1, nf, tau, r, nlptype_int, a, b, cons, niter, cplng, chain2) maxChain <- as.logical(Lregout$max_chain) maxMarg <- Lregout$max_prob cflag <- Lregout$cplng_flag bhat <- numeric(p + 1) bhat[maxChain] <- Lregout$beta_hat list(maxChain, maxMarg, cflag, bhat) } stopCluster(cl) } MaxChain <- matrix(unlist(ParOut[[1]]), ncol = (p + 1), byrow = T) MaxMarg <- unlist(ParOut[[2]]) cpl_flag <- unlist(ParOut[[3]]) bhat <- matrix(unlist(ParOut[[4]]), ncol = (p + 1), byrow = T) cpl_percent <- sum(cpl_flag) / ncpu Final_Marg <- MaxMarg Final_Chains <- MaxChain D <- as.data.frame(cbind(Final_Chains, Final_Marg)) Counts <- rep(1, length(Final_Marg)) A <- aggregate(Counts, by = as.list(D), FUN = sum) Freq <- A[, p + 3] Probs <- A[, p + 2] UniqModels <- apply(A[, 1:(p + 1)], 1, function(x) which(x > 0)) return(list(cpl_percent = cpl_percent, margin_probs = Final_Marg, chains = Final_Chains, cpl_flags = cpl_flag, beta_hat = bhat, freq = Freq, probs = Probs, uniq_models = UniqModels, nlptype = nlptype, gene_names = gnames, r = r, tau = tau)) } } ### ================================================================= if(family=="survival"){ TS <- resp time <- TS[, 1] status <- TS[, 2] sfidx <- nf+1 dx <- dim(X) n <- dx[1] p <- dx[2] exmat <- cbind(time, status, X) if(nlptype=="piMOM") nlptype_int <- 0 if(nlptype=="pMOM") nlptype_int <- 1 # ======================= Hyperparameters ======================== cons <- 1+nf if (mod_prior == "beta"){ a <- cons; b <- p - a; } if (mod_prior == "unif"){ a <- 1; b <- 1; } if (hselect){ hyper <- HyperSelect(X, TS, eff_size, nlptype, 5000, mod_prior, family) tau <- hyper$tau r <- 1 } ntimes <- 10 d <- 2 * ceiling(log(p)) temps <- seq(3, 1, length.out = ntimes) L <- ntimes J <- niter # =========================== Main =============================== comb <- function(x, ...) { lapply(seq_along(x), function(i) c(x[[i]], lapply(list(...), function(y) y[[i]]))) } if(parallel.MPI){ if (!requireNamespace("doMPI", quietly = TRUE)) { stop("Package doMPI needed for this function to work. Please install it.", call. = FALSE) } else { cl <- doMPI::startMPIcluster(count = ncpu) doMPI::registerDoMPI(cl) parout <- foreach(j = 1:ncpu, .combine = "comb", .multicombine = TRUE, .init = list(list(), list(), list(), list(), list(), list()), .packages = 'BVSNLP', .options.mpi = list(seed = inseed)) %dopar% { cur_model <- sample(sfidx:p, 3) if (nf > 0) cur_model <- c(1:nf,cur_model) coxout <- cox_bvs(exmat, cur_model, nf, tau, r, nlptype_int, a, b, d, L, J, temps) maxmod <- coxout$max_model maxprob <- coxout$max_prob hashkey <- coxout$hash_key allprobs <- coxout$all_probs viscovs <- coxout$vis_covs_list list(maxmod, maxprob, hashkey, allprobs, cur_model, viscovs)#,vismodels) } doMPI::closeCluster(cl) } } else { cl <- makeCluster(ncpu) registerDoParallel(cl) opts <- list(preschedule=TRUE) if (!is.null(inseed)) {clusterSetRNGStream(cl, inseed)} parout <- foreach(j = 1:ncpu, .combine = "comb", .multicombine = TRUE, .init = list(list(), list(), list(), list(), list(), list()), .packages = 'BVSNLP', .options.snow = opts ) %dopar% { cur_model <- sample(sfidx:p, 3); if (nf > 0) cur_model <- c(1:nf,cur_model) coxout <- cox_bvs(exmat, cur_model, nf, tau, r, nlptype_int, a, b, d, L, J, temps) maxmod <- coxout$max_model maxprob <- coxout$max_prob hashkey <- coxout$hash_key allprobs <- coxout$all_probs viscovs <- coxout$vis_covs_list list(maxmod, maxprob, hashkey, allprobs, cur_model, viscovs)#,vismodels) } stopCluster(cl) } Hash_Key <- unlist(parout[[3]]) All_Probs <- unlist(parout[[4]]) CurModel <- matrix(unlist(parout[[5]]), ncol = 3, byrow = T) VisCovs <- NULL for (i in 1:ncpu){ VisCovs <- c(VisCovs,parout[[6]][[i]]) } num_vis_models <- length(unique(Hash_Key)) uinds <- which(!duplicated(Hash_Key)) all_probs <- All_Probs[uinds] list_vis_covs <- VisCovs[uinds] outnum <- min(num_vis_models,1000); sout <- sort(all_probs,decreasing = T,index.return=T) MaxMargs <- sout$x[1:outnum] minds <- sout$ix[1:outnum] max_marg <- MaxMargs[1]; indmax <- minds[1] sel_model <- list_vis_covs[[indmax]] + 1 MaxModels <- list(NULL) for (i in 1:outnum){ MaxModels[[i]] <- list_vis_covs[[minds[i]]] + 1 } inc_probs <- inc_prob_calc(all_probs,list_vis_covs,p) median_model <- which(inc_probs >= 0.5) beta_hat <- CoefEst(X,TS,sel_model,nlptype,tau,r,"survival") return(list(num_vis_models = num_vis_models, max_prob = max_marg, HPM = sel_model, MPM = median_model, beta_hat = beta_hat, max_prob_vec = MaxMargs, max_models = MaxModels, inc_probs = inc_probs, nlptype = nlptype, des_mat = X, start_models = CurModel, r = r, tau = tau, gene_names = gnames)) } }
/scratch/gouwar.j/cran-all/cranData/BVSNLP/R/bvs.R
matprep <- function(X, fixed_cols, prep, logT){ fcols <- Filter(is.factor, X) allnames <- names(X) fnames <- names(fcols) numcols <- dim(fcols)[2] fix_flag <- as.logical(length(fixed_cols)) fct_flag <- as.logical(numcols) if (fct_flag){ outlist <- as.list(1:numcols) for (i in 1:numcols){ Lv <- levels(fcols[,i]) auxmat <- model.matrix(~fcols[,i]) auxmat <- as.matrix(auxmat[,-1]) colnames(auxmat) <- paste(fnames[i],Lv[-1],sep="") outlist[[i]] <- auxmat } fctidx <- which(allnames%in%fnames) if (fix_flag){ fixfctidx <- which(fctidx%in%fixed_cols) if(length(fixfctidx)){ nonfctfixidx <- fixed_cols[-which(fixed_cols%in%fctidx)] nf_fct_vec <- numeric(numcols) for (i in 1:numcols){ Lv <- levels(fcols[,i]) nf_fct_vec[i] <- length(Lv[-1]) } mat_fct_fix <- as.matrix(as.data.frame(outlist[fixfctidx])) nf_fct <- sum(nf_fct_vec[fixfctidx]) if(length(nonfctfixidx)) mat_nfct_fix <- as.matrix(X[nonfctfixidx]) else mat_nfct_fix <- NULL mat_nfct_nfix <- as.matrix(X[,-c(fctidx,nonfctfixidx)]) if(length(outlist[-fixfctidx])) mat_fct_nfix <- as.matrix(as.data.frame(outlist[-fixfctidx])) else mat_fct_nfix <- NULL fulmat <- cbind(mat_fct_fix, mat_nfct_fix, mat_fct_nfix, mat_nfct_nfix) nf <- length(fixed_cols) + nf_fct - length(fixfctidx) } else { fctmat <- as.matrix(as.data.frame(outlist)) X1 <- as.matrix(X[,fixed_cols]) X2 <- as.matrix(X[,-c(fixed_cols,fctidx)]) fulmat <- cbind(X1,fctmat,X2) nf <- length(fixed_cols) } } else { fulmat <- cbind(as.matrix(as.data.frame(outlist)),as.matrix(X[,-fctidx])) nf <- 0 } } else { if (fix_flag){ X1 <- X[,fixed_cols] X2 <- X[,-fixed_cols] X3 <- cbind(X1,X2); fulmat <- as.matrix(X3) nf <- length(fixed_cols) } else { fulmat <- as.matrix(X) nf <- 0 } } if (prep){ Xin <- PreProcess(fulmat, logT) fulmat <- Xin$X gnames <- Xin$gnames } else { gnames <- colnames(fulmat) } return(list(fulmat = fulmat, gnames = gnames, nf = nf)) }
/scratch/gouwar.j/cran-all/cranData/BVSNLP/R/matprep.R
#' Predictive accuracy measurement using Bayesian Model Averaging #' #' @description This function is used for predictive accuracy measurement of #' the selected models using Bayesian Model Averaging methodology. The Occam's #' window with cut out threshold of \code{thr} is used. That means only models #' that have posterior probability of at least \code{thr} * posteior #' probability of the highest posterior probability model are considered in #' model averaging. For survival time response datasets, the predictive Area #' Under Curve (AUC) at each given time point is computed as the output. In #' this case, the predictive AUC is obtained using Uno's method for #' observations in the test set. For binary outcome data, only one AUC is #' reported which is from the ROC computed on the test set. The training set is #' used to find the selected model and relevant probabilities. #' #' @param bvsobj An object that is generated by \code{\link{bvs}} function. It #' is the output of the Bayesian variable selection procedure. #' @param X The same \code{n} times \code{p} data frame initially used in #' \code{bvs} function. This is the full data frame containing all test and #' train data. #' @param prep A boolean variable determining if the preprocessing step has #' been done on the original data frame in \code{bvs} function. This should #' have the same value as \code{prep} variable in the \code{bvs} function. #' @param logT A boolean variable determining if log transform should has been #' done on continuous columns of the data frame in \code{bvs} function. This #' should have the same value as \code{logT} variable in the \code{bvs} #' function. #' @param resp For logistic regression models, this variable is the binary #' response vector. For the Cox proportional hazard models this is a two column #' matrix where the first column contains survival times and the second column #' is the censoring status for each observation. Note that for survival times, #' the time section of this variable should be in the same scale and unit #' (year, days, etc.) as \code{times} variable for which the AUC has to be #' computed. #' @param nlptype Determines the type of nonlocal prior that is used in the #' analyses. It can be "piMOM" for product inverse moment prior, or "pMOM" for #' product moment prior. The default is set to piMOM prior. #' @param train_idx An integer vector containing the indices of the training #' set. #' @param test_idx An integer vector containing the indices of the test set. #' The set of observations that prediction will be performed on. #' @param thr The threshold used for Occam's window as explained in the #' description. The default value for this variable is 0.05. #' @param times A vector of times at which predictive AUC is to be computed. #' This input is only used for prediction in survival data analysis. #' @param family Determines the type of data analysis. \code{logistic} is for #' binary outcome and logistic regression model whereas, \code{survival} #' represents survival outcomes and the Cox proportional hazard model. #' @return The output is different based on the family for the anlysis of data #' \strong{1) } \code{family = logistic} #' The output is a list with the two following objects: #' \item{auc}{This is the area under the ROC curve after Bayesian model #' averaging is used to obtain ROC for the test data.} #' \item{roc_curve}{This is a two column matrix representing points on the ROC #' curve and can be used to plot the curve. The first column is FPR and the #' second column is TPR which represent x-axis and y-axis in the ROC curve, #' respectively.} #' \strong{2) } \code{family = survival} #' \item{auc}{A vector with the same length as \code{times} variable showing #' predictive area under the curve at each given time point using Bayesian #' Model averaging.} #' @author Amir Nikooienejad #' @references Raftery, A. E., Madigan, D., & Hoeting, J. A. (1997). Bayesian #' model averaging for linear regression models. Journal of the American #' Statistical Association, 92(437), 179-191.\cr\cr #' Nikooienejad, A., Wang, W., & Johnson, V. E. (2020). Bayesian variable #' selection for survival data using inverse moment priors. Annals of Applied #' Statistics, 14(2), 809-828. \cr\cr #' Uno, H., Cai, T., Tian, L., & Wei, L. J. (2007). Evaluating #' prediction rules for t-year survivors with censored regression models. #' Journal of the American Statistical Association, 102(478), 527-537. #' @examples #' ### Simulating Logistic Regression Data #'n <- 200 #'p <- 40 #'set.seed(123) #'Sigma <- diag(p) #'full <- matrix(c(rep(0.5, p*p)), ncol=p) #'Sigma <- full + 0.5*Sigma #'cholS <- chol(Sigma) #'Beta <- c(-1.7,1.8,2.5) #'X <- matrix(rnorm(n*p), ncol=p) #'X <- X%*%cholS #'colnames(X) <- c(paste("gene_",c(1:p),sep="")) #'beta <- numeric(p) #'beta[c(1:length(Beta))] <- Beta #'Xout <- PreProcess(X) #'X <- Xout$X #'XB <- X%*%beta #'probs <- as.vector(exp(XB)/(1+exp(XB))) #'y <- rbinom(n,1,probs) #'X <- as.data.frame(X) #'train_idx <- sample(1:n,0.8*n) #'test_idx <- setdiff(1:n,train_idx) #'X_train <- X[train_idx,] #'y_train <- y[train_idx] #'bout <- bvs(X_train, y_train, prep=FALSE, family = "logistic", #' mod_prior = "beta",niter = 50) #'BMAout <- predBMA(bout, X, y, prep = FALSE, logT = FALSE, #' train_idx = train_idx, test_idx = test_idx, #' family="logistic") #' ### AUC for the prediction: #'BMAout$auc #' #'### Plotting ROC Curve #'roc <- BMAout$roc_curve #'plot(roc,lwd=2,type='l',col='blue') predBMA <- function(bvsobj, X, resp, prep, logT, nlptype = "piMOM", train_idx, test_idx, thr = 0.05, times = NULL, family = c("logistic", "survival")){ if(!class(X)=="data.frame") stop("input X should be a data frame!") X <- predmat(X) X_tr <- bvsobj$des_mat gn <- bvsobj$gene_names xname <- colnames(X) ind <- match(gn,xname) X <- X[,ind] X_te <- X[test_idx,] if(prep){ Xout <- PreProcess(X_te, logT) X_te <- Xout$X } r <- bvsobj$r tau <- bvsobj$tau probs <- bvsobj$max_prob_vec models <- bvsobj$max_models maxprob <- bvsobj$max_prob thresh <- maxprob + log(thr) passinds <- which(probs >= thresh) k <- length(passinds) oprobs <- probs[passinds]; sh <- ceiling(max(oprobs)); oprobs <- exp(oprobs - sh); oprobs <- oprobs/sum(oprobs) omodels <- models[passinds] if(nlptype=="piMOM") nlptype_int <- 0 if(nlptype=="pMOM") nlptype_int <- 1 # ======================================= if(family=="logistic"){ y <- resp y_tr <- y[train_idx] y_te <- y[test_idx] X_tr <- cbind(rep(1,length(y_tr)),X_tr); X_te <- cbind(rep(1,length(y_te)),X_te); aucout <- aucBMA_logistic(X_tr,y_tr,X_te,y_te,tau,r,nlptype_int,oprobs,omodels,k) roc <- aucout$roc; colnames(roc) <- c("TPR","FPR") return(list(auc = aucout$auc, roc_curve = roc)) } # ==================== if(family=="survival"){ TS <- resp; TS_tr <- TS[train_idx,] TS_te <- TS[test_idx,] if (!length(times)) stop("No times vector is set!") aucout <- aucBMA_survival(X_tr,TS_tr,X_te,TS_te,tau,r,nlptype_int,times,oprobs,omodels,k) return(aucout) } }
/scratch/gouwar.j/cran-all/cranData/BVSNLP/R/predBMA.R
predmat <- function(X){ fcols <- Filter(is.factor, X) allnames <- names(X) fnames <- names(fcols) numcols <- dim(fcols)[2] fct_flag <- as.logical(numcols) if (fct_flag){ outlist <- as.list(1:numcols) for (i in 1:numcols){ Lv <- levels(fcols[,i]) auxmat <- model.matrix(~fcols[,i]) auxmat <- as.matrix(auxmat[,-1]) colnames(auxmat) <- paste(fnames[i],Lv[-1],sep="") outlist[[i]] <- auxmat } fctidx <- which(allnames%in%fnames) fulmat <- cbind(as.matrix(as.data.frame(outlist)),as.matrix(X[,-fctidx])) } else { fulmat <- as.matrix(X) } return(fulmat) }
/scratch/gouwar.j/cran-all/cranData/BVSNLP/R/predmat.R
.onAttach <- function(libname, pkgname) { packageStartupMessage("Bayesian Variable Selection using Non-Local priors for survival and logistic regression data. Loading Version 1.1.8") }
/scratch/gouwar.j/cran-all/cranData/BVSNLP/R/zzz.R
#gstools_pipeline - derived from bwgs #@ 2017 Sophie Bouchet, Louis Gautier Tran, CHARMET Gilles #Version 1.0.0 - Release date: 31/02/2017 #bwgs_gilles_v55.R #[email protected] #[email protected] #///////////////////////////////////////////////////////////////////// percentNA=function(x) { perNA=length(x[is.na(x)])/length(x) perNA } #///////////////////////////////////////////////////////////////////// #START bwgs.predict TO BE USED ONCE the best model is chosen using bwgs.cv #///////////////////////////////////////////////////////////////////// myMean=function(x) { DENO=x[!is.na(x)] NUME=DENO[DENO>0] M=length(NUME)/length(DENO) M } ##REDUCTION MODELS and other miscellaneous functions #//////////////////////////////////////////////////////////// #Imputation by the A matrix Jeffrey Endelman JF.Shrink() #//////////////////////////////////////////////////////////// AM <- function(geno,arg.A.mat=list(min.MAF=NULL,max.missing=NULL,impute.method="mean",tol=0.02,n.core=1,shrink=TRUE,return.imputed=TRUE)) { #Shrink_result = A.mat(X=geno, impute.method = "mean", tol = 0.02, return.imputed = FALSE)$A #library(rrBLUP) Shrink_result = A.mat(X=geno, impute.method = "mean", tol = 0.02,n.core=1,shrink=TRUE,return.imputed = TRUE)$A # Shrink_result = A.mat,args=c(X=geno,arg.A.mat))$A return(Shrink_result) } #/////////////////////////////////////// # RMR: Randaom sampling of markers #/////////////////////////////////////// RMR <- function(geno,N) { ncolG <- ncol(geno) V <- sample(1:ncolG,N,replace=F) random_geno <- geno[,V] return(random_geno) } #////////////////////////////////////////////////////////////// # ANO: selection of N markers with the lowest Pvalue in GWAS #////////////////////////////////////////////////////////////// ANO <- function(P,GG,pval) { #Genotypic Reduction by Association (ANOVA) #(c)15/06/2015 G. CHARMET & V.G. TRAN #GG = geno #P = pheno #genotypes and phenotypes must not have NAs listF = ANOV(P,GG) names(listF)=colnames(GG) ChoixM = GG[,listF < pval&!is.na(listF)] dim(ChoixM) ChoixM } # function to run ANOVA ANOV <- function(P,GG) { TESTF=rep(0,ncol(GG)) for (i in 1:ncol(GG)) { GGi=GG[,i] names(GGi)=rownames(GG) GGii=GGi[!is.na(GGi)] Pi=P[!is.na(GGi)] AOV=anova(lm(Pi~GGii)) TEST=AOV["Pr(>F)"][1] TESTF[i]=c(TEST)[[1]][1] } return(TESTF) } #//////////////////////////////////////////////////////////////////////////////////// # LD function to select markers based on LD: eliminate pairs with highest LD rd # NB: does NOT work: eliminate TOO MANY markers # MOREOVER LDCORSV is TOO slow #//////////////////////////////////////////////////////////////////////////////////// #//////////////////////////////////////////////////////////////////////////////////////// # # NEWLD is a faster function to remove markers in LD > lambda # #///////////////////////////////////////////////////////////////////////////////////////// NEWLD <- function(geno,R2seuil) { genoNA=MNI(geno) CORLD=cor(genoNA,use="pairwise.complete.obs") CORLD=abs(CORLD) diag(CORLD)=0 CORLD[is.na(CORLD)]<-0 # CORLD[upper.tri(CORLD)]<-0 range(CORLD) geno_LD=CORLD R2maxRow=apply(geno_LD,1,max) R2maxCol=apply(geno_LD,2,max) geno_LD=geno_LD[R2maxRow<R2seuil,R2maxCol<R2seuil] #geno_final=NEWLD_clean(geno,CORLD,R2seuil=R2seuil) geno_LD=geno[,colnames(geno_LD)] return(geno_LD) } #///////////////////////////////////////////////////////////////////////////////////////////// #NEWLD clean function: another function for stepwise elimination of markers with the highest LD # can be VERY llong for large matrices #///////////////////////////////////////////////////////////////////////////////////////////// # @param geno_data is the genotyping matrix with markers in columns # NEWLD_clean <- function(geno_data,CORLD,R2seuil) { # R2seuil=lambda # function to select a subset of marker from columns of geno_data # so that maximum r2 between pairs of markers is < R2seuil # LD is output matrix from LD.Measures of package LDcorSV with supinfo=TRUE # typeR2 is the type of R2: "N" for "normal", "V" for relationship corrected # "S" for structure corrected and "SV" for both corrections S+V marker2remove=character()# to cumulate markers to be removed from the geno_data geno_LD=CORLD # R2max=max(geno_LD) R2maxRow=apply(geno_LD,1,max) R2maxCol=apply(geno_LD,2,max) geno_LD=geno_LD[R2maxRow<R2seuil,R2maxCol<R2seuil] #geno_LD Newgeno_data<-geno_data[,colnames(geno_LD)] #output return(Newgeno_data) } #///////////////////////////////////////////////////////////////////////////// # CHROMLD: function to split a genotypic matrix into chromosomes # then apply LD within each chrmosome (save time) #///////////////////////////////////////////////////////////////////////////// CHROMLD <- function(geno,R2seuil,MAP) { geno_test <- geno chrom=MAP[,"chrom"] Nchrom <- unique(chrom) M = list() E = list() for (i in Nchrom) { M[[i]] = geno_test[,chrom==i] E[[i]] = NEWLD(M[[i]],R2seuil) } E_tout <- do.call(cbind,E) # See function: do.call(rbind,"list of matrices") #E_tout <- LD(E_tout,lambda) # LD them mot lan nua cua matran ghep de loai bo cac cap marker co LD trong N matrices return(E_tout) } # #//////////////////////////////////////////////////////////////////////////////////// # RMGG to create random NA value in geno, useful for testing Imputation methdos #//////////////////////////////////////////////////////////////////////////////////// RMGG <-function(G, N) { #RMMG - Random Missing Matrix Generator #(c)2014 V.G. TRAN & G. CHARMET #G: genotypic matrix #N: % missing nRow = dim(G)[1] nCol = dim(G)[2] N_missing = N*nRow*nCol/100 Gm<-G Missing<-sample(nRow*nCol, N_missing, replace=FALSE) ROWS<-matrix(data = Gm, ncol=1) ROWS[Missing]=NA Gm<-matrix(data = ROWS, nrow=nRow, ncol=nCol, byrow=FALSE) rownames(Gm) <- rownames(G) colnames(Gm) <- colnames(G) return(Gm) } #//////////////////////////////////////////////////////// # RPS function for randomly sampling individuals (lines) # ONLY useful for teachning purpose, NOT in real life #//////////////////////////////////////////////////////////// RPS <- function(geno,N) { #Random Pop Size #(c)03/12/2014 [email protected] & [email protected] nrowG <- nrow(geno) V <- sample(1:nrowG,N,replace=F) random_geno <- geno[V,] return(random_geno) } #////////////////////////////////////////////////////////////////////////// #///IMPUTATION MODELS #///////////////////////////////////////////////////////////////////////// #///////////////////////////////////////////////////////////////////////////////////// # EMI: imputation by Expactation-Maximization algorithm # uses A.mat function from rrBLUP package #///////////////////////////////////////////////////////////////////////////////////// #EMI <- function(geno,arg.A.mat=list(impute.method = "EM", tol = 0.02, return.imputed = TRUE)) EMI <- function(geno) { #EMI_result= do.call(rrBLUP::A.mat,args=c(X=geno, arg.A.mat))$imputed EMI_result= rrBLUP::A.mat(X=geno, impute.method = "EM", tol = 0.02, return.imputed = TRUE)$imputed return(EMI_result) } #//////////////////////////////////////////////////////////////// #CROSS VALIDATION PROCESSING #/////////////////////////////////////////////////////////////// #/////////////////////////////////////////////////////////////////// # runCrossVal carries out cross validation, return only predicted value, to be run with tha "ALL" option #////////////////////////////////////////////////////////////////////////////////////// runCrossVal <- function(pheno, geno, predictor, nFolds, nTimes) { #start.time.cv <- Sys.time() listGENO=rownames(geno) listPHENO=names(pheno) LIST=intersect(listGENO,listPHENO) if (length(LIST)==0) {stop("NO COMMON LINE BETWENN geno AND pheno")} else { geno=geno[LIST,] pheno=pheno[LIST] new.pheno.size=length(LIST) message("Number of common lines between geno and pheno") print(new.pheno.size) } notIsNA <- !is.na(pheno) # pheno <- pheno[notIsNA] ## geno <- geno[notIsNA,] ## nObs <- length(pheno) #saveAcc <- rep(NA, nTimes) saveAcc <- rep(NA, nTimes*nFolds) savePred <- rep(0, nObs) # n_row = nTimes*nFolds # n_col = 3 # cvtable <- matrix(rep(0,n_row*n_col),n_row,n_col) ### title_table <- c("Time","Fold","CV correlation"); ACCU <- rep(NA,nFolds) #initial de ACC pour plusieurs folds 04/05/2015 VG TRAN for (time in 1:nTimes) { # timebar <- time*100/nTimes # For Windows Time Bar # waitingbar(runif(timebar)) # Windows Bar cvPred <- rep(NA, nObs) folds <- sample(rep(1:nFolds, length.out=nObs)) for (fold in 1:nFolds) { phenoTrain <- pheno[folds != fold] genoTrain <- geno[folds != fold,] genoPred <- geno[folds == fold,] pred <- predictor(phenoTrain, genoTrain, genoPred) #message(pred) # them vao 21/11/14 cvPred[folds == fold] <- pred valid <- pheno[folds == fold] # pheno valid them vao 21/11/14 # Correlation entre valid/pred #accu <- round(cor(pred,valid),digits=2) # them vao 21/11/2014 corr <- cor(pred,valid) ACCU[fold] <- round(corr[1],digits=2)# 04/05/2015 VG TRAN message("") # them vao 21/11/2014 # # cat(c("cv for fold",fold,"is:",accu)) # them vao 21/11/2014 #message(c("cv correlation for fold ",fold," is: ",accu)) # them vao 21/11/2014 message(c("cv correlation for fold ",fold," is: ",ACCU[fold])) # 04/05/2015 VG TRAN # Save to a matrix time, fold, and cv correlation: # ## cvTableLine <- c(time,fold,accu) ## cvtable <- rbind(title_table,cvTableLine) #names(cvtable) = c("Time","Fold","CV correlation") #print(cvtable) lier "cvtable" avec "Results" #print(cvtable) # SD <- round(sd(accu),digits=4) } timebar <- time*100/nTimes # For Windows Time Bar waitingbar(runif(timebar)) # Windows Bar #saveAcc[time] <- round(cor(cvPred, pheno)*100,digits=2) # pourcentage/100% #saveAcc[time] <- round(cor(cvPred, pheno),digits=2) # normal #saveAcc[time] <- round(mean(ACCU),digits=3) # 04/05/2015 VG TRAN saveAcc[(((time-1)*nFolds)+1):(time*nFolds)] <- round(ACCU,digits=2) # 04/05/2015 VG TRAN message(c("Mean CV correlation for time ",time," and ",nFolds," folds is: ",saveAcc[time])) savePred <- savePred + cvPred # SD <- round(sd(saveAcc[time]),digits=4) } SD <- round(sd(saveAcc),digits=4) #Standard Deviation predMean <- rep(NA, length(notIsNA)) predMean[notIsNA] <- savePred/nTimes message("") #end.time.cv <- Sys.time() #time.taken.cv <- end.time.cv-start.time.cv #return(c(saveAcc,SD,cvtable)) # return(c(saveAcc,SD)) # OK #return(c(cvtable)) # return(c(saveAcc,SD,time.taken.cv)) # OK #Nota: saveAcc: correlation between pheno pred and pheno real, SD: Standard Deviation(?cartype) } #//////////////////////////////////////////////////////////////////////////////////////////////////////////////// # runCV carries out cross validation, returns prediction and CD for each line (available only with some methods) #////////////////////////////////////////////////////////////////////////////////////////////////////////////////// runCV<- function(pheno, geno, FIXED, pop.reduct.method,rps, predictor, nFolds, nTimes) { #(c)2014 V.G. TRAN & D. LY class_cv <- list()#start.time.cv <- Sys.time() notIsNA <- !is.na(pheno) pheno <- pheno[notIsNA] geno <- geno[notIsNA,] nObs <- length(pheno) saveAcc <- rep(NA, nTimes) saveMSEP <- rep(NA, nTimes) #saveAcc <- rep(NA, nTimes*nFolds) savePred <- rep(0, nObs) savePredSD <-rep(0,nObs) saveCD <-rep (0,nObs) # cvtable <- matrix(rep(0,n_row*n_col),n_row,n_col) # ACCU <- rep(0,nFolds) #initial de ACC pour plusieurs folds 04/05/2015 VG TRAN class_predict = list() class_predSD = list() class_CD = list() # Predict <- rep(0, nObs) #CD <- rep(0,nObs) #Coefficient of Determination # names(Predict) <- names(pheno) #names(CD) <- names(pheno) for (time in 1:nTimes) { # timebar <- time*100/nTimes # For Windows Time Bar# waitingbar(runif(timebar)) # Windows Bar# #Random Pop Size (added on 08/06/2015 V.G. TRAN) ACCU <- rep(0,nFolds) #initial de ACC pour plusieurs folds 04/05/2015 VG TRAN if (pop.reduct.method=="NULL") { # nObs <- length(pheno) message("Random Pop Size is not applied.") Predict <- rep(0, nObs) PredSD <-rep(0, nObs) CD <-rep(0, nObs) folds <- sample(rep(1:nFolds, length.out=nObs)) for (fold in 1:nFolds) { phenoTrain <- pheno[folds != fold] phenoReal <- pheno[folds == fold] genoTrain <- geno[folds != fold,] genoPred <- geno[folds == fold,] if (FIXED!="NULL") { FixedPred <- FIXED[folds == fold,] FixedTrain <- FIXED[folds!=fold,] pred <- predictor(phenoTrain, genoTrain, FixedTrain, genoPred, FixedPred) } # pred <- predictor(phenoTrain,phenoReal, genoTrain, genoPred) else {pred <- predictor(phenoTrain, genoTrain, FixedTrain="NULL", genoPred, FixedPred="NULL")} Predict[folds == fold] <- pred[,1] PredSD[folds==fold] <- pred[,2] CD[folds==fold] <- pred[,3] valid <- pheno[folds == fold] # pheno valid them vao 21/11/14 valid = phenoReal # Correlation entre valid/pred corr <- cor(pred[,1],valid) ACCU[fold] <- round(corr[1],digits=3)# 04/05/2015 VG TRAN message("") # message(c("cv correlation for fold ",fold," is: ",accu)); message(c("cv correlation for fold ",fold," is: ",ACCU[fold])) # 04/05/2015 VG TRAN # Predict[rownames(pred)] <- pred[,1] # PredSD [rownames(pred)] <- pred[,2] # CD[rownames(pred)] <-pred[,3] } # end of fold loop message("") class_predict[[time]] <- Predict class_predSD[[time]] <- PredSD class_CD[[time]] <-CD timebar <- time*100/nTimes waitingbar(runif(timebar)) #saveAcc[time] <- round(cor(cvPred, pheno)*100,digits=2); # pourcentage/100% saveAcc[time] <- round(cor(Predict, pheno, use="na.or.complete"),digits=3) # normal saveMSEP[time] <- round(sqrt(((Predict-pheno)^2)/length(pheno)),3) # saveAcc[(((time-1)*nFolds)+1):(time*nFolds)] <- round(ACCU,digits=2) # 04/05/2015 VG TRAN #saveAcc[time] <- round(mean(ACCU,na.rm=T),digits=2) message("") message(c("Mean CV correlation for time ",time," and ",nFolds," folds is: ",saveAcc[time])) savePred <- savePred + Predict savePredSD <-savePredSD + PredSD saveCD <- saveCD+CD #savePpred=Predict #savePredSD=PredSD #saveCD=CD rm(Predict,PredSD,CD) } if(pop.reduct.method=="RANDOM") { genopop <- RPS(geno,rps) # Random Pop Size #ATTENTION: car le size de genopop est different que rps!! POURQUOI new.geno.pop.size <- dim(genopop) phenopop <- pheno[rownames(genopop)] nObs <- rps message("Random Pop Size applied. New genotypic data dimension:") print(new.geno.pop.size) message("") nObs <- length(pheno) Predict<-rep(NA,nObs) CD<-rep(NA,nObs) PredSD <-rep(NA,nObs) names(PredSD)=names(pheno) names(CD)=names(pheno) names(Predict)=names(pheno) folds <- sample(rep(1:nFolds, length.out=nObs)) for (fold in 1:nFolds) { genoTrain <- RPS(geno,rps) phenoTrain=pheno[rownames(genoTrain)] phenoReal <- pheno[!names(pheno)%in%names(phenoTrain)] genoPred <- geno[names(phenoReal),] if (FIXED!="NULL") { FixedPred <- FIXED[folds == fold,] FixedTrain <- FIXED[folds!=fold,] pred <- predictor(phenoTrain, genoTrain, FixedTrain, genoPred, FixedPred) } #pred <- predictor(phenoTrain,phenoReal, genoTrain, genoPred) else {pred <- predictor(phenoTrain, genoTrain, FixedTrain="NULL", genoPred, FixedPred="NULL")} #else {pred <- predictor(phenoTrain, genoTrain, genoPred)} #Return cbind(GPRED,CD) #pred <- predictor_rrBLUP(phenoTrain,phenoReal, genoTrain, genoPred); #Return cbind(GPRED,CD) Predict[rownames(pred)] <- pred[,1] PredSD[rownames(pred)] <-pred[,2] CD[rownames(pred)] <- pred[,3] valid <- pheno[rownames(pred)] # pheno valid them vao 21/11/14 valid = phenoReal # Correlation entre valid/pred # accu <- round(cor(pred[,1],phenoReal),digits=2); # ACCU[fold] <- round(cor(pred,valid),digits=2)# 04/05/2015 VG TRAN corr <- cor(pred[,1],valid) ACCU[fold] <- round(corr[1],digits=3)# 04/05/2015 VG TRAN message("") # message(c("cv correlation for fold ",fold," is: ",accu)); message(c("cv correlation for fold ",fold," is: ",ACCU[fold])) # 04/05/2015 VG TRAN } # fold loop message("") class_predict[[time]] <- Predict class_predSD[[time]] <- PredSD class_CD[[time]] <-CD timebar <- time*100/nTimes waitingbar(runif(timebar)) #saveAcc[time] <- round(cor(cvPred, pheno)*100,digits=2); # pourcentage/100% saveAcc[time] <- round(cor(Predict, pheno,use="na.or.complete"),digits=3) # normal saveMSEP[time] <- round(sqrt(((Predict-pheno)^2)/length(pheno)),3) #saveAcc[(((time-1)*nFolds)+1):(time*nFolds)] <- round(ACCU,digits=2) # 04/05/2015 VG TRAN #saveAcc[time] <- round(mean(ACCU,na.rm=T),digits=2) message("") message(c("Mean CV correlation for time ",time," and ",nFolds," folds is: ",saveAcc[time])) savePred <- savePred + Predict savePredSD <- savePredSD + PredSD saveCD <- saveCD+CD rm(Predict,PredSD,CD) } #//////End of Random Pop Size script. if(pop.reduct.method=="OPTI") { genopop <- optiTRAIN(geno,rps,Nopti=3000)$genoOptimiz # optimum Pop Size #ATTENTION: car le size de genopop est different que rps!! POURQUOI new.geno.pop.size <- c(rps,ncol(geno)) phenopop <- pheno[rownames(genopop)] nObs <- rps message("CDmean optimisation applied. New genotypic data dimension:") print(new.geno.pop.size) message("") nObs <- length(pheno) #Predict <- rep(0,length(pheno)) Predict<-rep(NA,nObs) CD<-rep(NA,nObs) PredSD <-rep(NA,nObs) cvPred <- rep(NA, nObs) #cvPredSD <-rep(NA,nObs) #cvCD <- rep(NA,nObs) #names(Pred)=names(pheno) names(PredSD)=names(pheno) names(CD)=names(pheno) names(Predict)=names(pheno) folds <- sample(rep(1:nFolds, length.out=nObs)) for (fold in 1:nFolds) { testOPTI<- optiTRAIN(geno,rps,Nopti=30) genoTrain <- testOPTI$genoOptimiz phenoTrain=pheno[rownames(genoTrain)] phenoReal <- pheno[!names(pheno)%in%names(phenoTrain)] genoPred <- geno[names(phenoReal),] if (FIXED!="NULL") { FixedPred <- FIXED[rownames(genoTrain),] FixedTrain <- FIXED[!rownames(FIXED)%in%names(phenoTrain),] pred <- predictor(phenoTrain, genoTrain, FixedTrain, genoPred, FixedPred) } #pred <- predictor(phenoTrain,phenoReal, genoTrain, genoPred) else {pred <- predictor(phenoTrain, genoTrain, FixedTrain="NULL", genoPred, FixedPred="NULL")} #pred <- predictor(phenoTrain, genoTrain, genoPred) #Return cbind(GPRED,CD) #pred <- predictor_rrBLUP(phenoTrain,phenoReal, genoTrain, genoPred); #Return cbind(GPRED,CD) Predict[rownames(pred)] <- pred[,1] PredSD[rownames(pred)] <- pred[,2] CD[rownames(pred)] <- pred[,3] valid <- pheno[rownames(pred)] # pheno valid them vao 21/11/14 valid = phenoReal # Correlation entre valid/pred # accu <- round(cor(pred[,1],phenoReal),digits=2); # ACCU[fold] <- round(cor(pred,valid),digits=2)# 04/05/2015 VG TRAN #corr <- cor(pred,valid) corr <-cor(pred[,1],valid) ACCU[fold] <- round(corr[1],digits=3)# 04/05/2015 VG TRAN message("") # message(c("cv correlation for fold ",fold," is: ",accu)); message(c("cv correlation for fold ",fold," is: ",ACCU[fold])) # 04/05/2015 VG TRAN } message("") class_predict[[time]] <- Predict class_predSD[[time]] <-PredSD class_CD[[time]] <-CD timebar <- time*100/nTimes waitingbar(runif(timebar)) #saveAcc[time] <- round(cor(cvPred, pheno)*100,digits=2); # pourcentage/100% saveAcc[time] <- round(cor(cvPred, pheno,use="pairwise.complete.obs"),digits=3) # normal # saveAcc[(((time-1)*nFolds)+1):(time*nFolds)] <- round(ACCU,digits=2) # 04/05/2015 VG TRAN #saveAcc[time] <- round(mean(ACCU,na.rm=T),digits=3) #saveAcc[time] <- round(cor(Predict, pheno[names(Predict)],use="na.or.complete"),digits=3) saveMSEP[time] <- round(sqrt(sum((Predict-pheno)^2)/length(pheno)),3) message("") message(c("Mean CV correlation for time ",time," and ",nFolds," folds is: ",saveAcc[time])) savePred <- savePred + Predict savePredSD <- savePredSD + PredSD saveCD <- saveCD+CD rm(Predict,PredSD,CD) } #//////End of OPTI script. }# end nTimes SD <- round(sd(saveAcc),digits=4) # Standard Deviation of CV SDMSEP=round(sd(saveMSEP),4) # Standard Deviation of sqrMSEP predMean <- rep(NA, length(notIsNA)) predMean[notIsNA] <- savePred / nTimes CDMean <- rep(NA, length(notIsNA)) CDMean[notIsNA] <- saveCD / nTimes message("") # message("CV correlations for all times : ") # print(saveAcc); #bv_table_mean <- mean(CLASS_TABLE) #message("BV table mean : ") #print(class_predict) bv_predict_all = do.call(cbind,class_predict) bv_predSD_all = do.call(cbind,class_predSD) bv_CD_all = do.call(cbind,class_CD) # message("All BV predictions: ") # print(bv_predict_all) bv_predict_mean = apply(bv_predict_all,1,meanNA) bv_predSD_mean=apply(bv_predSD_all,1,meanNA) # bv_predict_mean=bv_predict_mean+mean(pheno) bv_CD_mean= apply(bv_CD_all,1,meanNA) #message("BV predict mean: "); #print(bv_predict_mean); #message("BV Predict all: "); #print(bv_predict_all) #message("BV Predict mean: "); #print(bv_predict_mean); #mae <- abs(bv_predict_mean-pheno)*100/pheno bv_table <- round(cbind(pheno,bv_predict_mean,bv_predSD_mean,bv_CD_mean),digits=3) #rownames(bv_table) <- names(pheno) #colnames(bv_table) <- c("Real pheno","Predict BV","predict BV_SD","CDmean") class_cv[[1]] <- c(saveAcc,SD) class_cv[[2]] <- c(saveMSEP,SDMSEP) class_cv[[3]] <- bv_table return(class_cv) #return(class_cv) } meanNA <- function(x) { m=mean(x[!is.na(x)]) m } #////////////////////////////////////////////////////////////////////////////////// # Run_All_Croos_Validation # performs prediction with a range of methods (not all) # and provide a summary table for comparison #///////////////////////////////////////////////////////////////////////////////////// Run_All_Cross_Validation <- function(pheno,geno_impute,nFolds,nTimes) { #(c)2013 V.G. TRAN #cross validation for predict_ElasticNet(with glmnet): #Elastic-Net sequence (alpha not equal to 1 and zero. message("Predicting by Elastic-Net ...") time.en = system.time(ElasticNet <-runCrossVal(pheno,geno_impute,predict_ElasticNet,nFolds,nTimes)) L = length(ElasticNet) message("Predict by Elastic-Net...ended.") EN_Results <- c(mean(ElasticNet[1:L-1],na.rm=T),ElasticNet[L],round(time.en[3]/60,digits=3)) names(EN_Results) = c("Mean Correlation CVs","Standard Deviation CVs","Time taken (mins)") message(EN_Results) message("") #cross validation for predictor_SVM: message("Predicting by SVM ...") time.en = system.time(SVM <-runCrossVal(pheno,geno_impute,predict_SVM,nFolds,nTimes)) L = length(ElasticNet) message("Predict by SVM...ended.") SVM_Results <- c(mean(SVM[1:L-1],na.rm=T),SVM[L],round(time.en[3]/60,digits=3)) names(SVM_Results) = c("Mean Correlation CVs","Standard Deviation CVs","Time taken (mins)") message(SVM_Results) message("") #cross validation for predictor_RR(Ridge Regression penalty with glmnet): #Ridge Regression Model message("Predicting by RR (Ridge Regression)...") time.rr = system.time(RR <-runCrossVal(pheno,geno_impute,predict_RR,nFolds,nTimes)) L = length(RR) message("Predict by RR...ended.") RR_Results <- c(mean(RR[1:L-1],na.rm=T),RR[L],round(time.rr[3]/60,digits=3)) names(RR_Results) = c("Mean Correlation CVs","Standard Deviation CVs","Time taken (mins)") message(RR_Results) message("") #cross validation for predictor_Lasso(LASSO penalty with glmnet): message("Predicting by LASSO ...") time.lasso = system.time(LASSO <-runCrossVal(pheno,geno_impute,predict_Lasso,nFolds,nTimes)) L = length(LASSO) message("Predict by LASSO...ended.") LASSO_Results <- c(mean(LASSO[1:L-1],na.rm=T),LASSO[L],round(time.lasso[3]/60,digits=3)) names(LASSO_Results) = c("Mean Correlation CVs","Standard Deviation CVs","Time taken (mins)") message(LASSO_Results) message("") #cross validation for predictor_GBLUP: message("Predict by GBLUP...") time.gblup = system.time(GBLUP <-runCrossVal(pheno,geno_impute,predict_GBLUP,nFolds,nTimes)) L = length(GBLUP) message("Predict by GBLUP...ended.") GBLUP_Results <- c(mean(GBLUP[1:L-1],na.rm=T),GBLUP[L],round(time.gblup[3]/60,digits=3)) names(GBLUP_Results) = c("Mean Correlation CVs","Standard Deviation CVs","Time taken (mins)") message(GBLUP_Results) message("") #cross validation for predictor_EGBLUP: message("Predict by EGBLUP...") time.egblup = system.time(EGBLUP <-runCrossVal(pheno,geno_impute,predict_EGBLUP,nFolds,nTimes)) L = length(EGBLUP) message("Predict by EGBLUP...ended.") EGBLUP_Results <- c(mean(EGBLUP[1:L-1],na.rm=T),EGBLUP[L],round(time.egblup[3]/60,digits=3)) names(EGBLUP_Results) = c("Mean Correlation CVs","Standard Deviation CVs","Time taken (mins)") message(EGBLUP_Results) message("") #cross validation for predictor_BA (Bayes A): message("Predicting by BA...") time.BA = system.time(BA <-runCrossVal(pheno,geno_impute,predict_BA,nFolds,nTimes)) L = length(BA) message("Predict by Bayes A...ended.") BA_Results <- c(mean(BA[1:L-1],na.rm=T),BA[L],round(time.BA[3]/60,digits=3)) names(BA_Results) = c("Mean Correlation CVs","Standard Deviation CVs","Time taken (mins)") message(BA_Results) message("") #cross validation for predictor_BB (Bayes B): message("Predicting by BB...") time.BB = system.time(BB <-runCrossVal(pheno,geno_impute,predict_BB,nFolds,nTimes)) L = length(BB) message("Predict by Bayes B...ended.") BB_Results <- c(mean(BB[1:L-1],na.rm=T),BB[L],round(time.BB[3]/60,digits=3)) names(BB_Results) = c("Mean Correlation CVs","Standard Deviation CVs","Time taken (mins)") message(BB_Results) message("") #cross validation for predictor_BC (Bayes C): message("Predicting by BC...") time.BC = system.time(BC <-runCrossVal(pheno,geno_impute,predict_BC,nFolds,nTimes)) L = length(BC) message("Predict by Bayes C...ended.") BC_Results <- c(mean(BC[1:L-1],na.rm=T),BC[L],round(time.BC[3]/60,digits=3)) names(BC_Results) = c("Mean Correlation CVs","Standard Deviation CVs","Time taken (mins)") message(BC_Results) message("") #cross validation for predictor_BL (Bayesian Lasso): message("Predicting by BL...") time.BL = system.time(BL <-runCrossVal(pheno,geno_impute,predict_BL,nFolds,nTimes)) L = length(BL) message("Predict by Bayes LASSO...ended.") BL_Results <- c(mean(BL[1:L-1],na.rm=T),BL[L],round(time.BL[3]/60,digits=3)) names(BL_Results) = c("Mean Correlation CVs","Standard Deviation CVs","Time taken (mins)") message(BL_Results) message("") #cross validation for predictor_BRR (Bayes RR): message("Predicting by BRR...") time.BRR = system.time(BRR <-runCrossVal(pheno,geno_impute,predict_BRR,nFolds,nTimes)) L = length(BRR) message("Predict by Bayes RR...ended.") BRR_Results <- c(mean(BRR[1:L-1],na.rm=T),BRR[L],round(time.BRR[3]/60,digits=3)) names(BRR_Results) = c("Mean Correlation CVs","Standard Deviation CVs","Time taken (mins)") message(BRR_Results) message("") #cross validation for predictor_BRNN (Bayesian regularization Neural Network): message("Predicting by BRNN...") time.BRNN = system.time(BRNN <-runCrossVal(pheno,geno_impute,predict_BRNN,nFolds,nTimes)) L = length(BRNN) message("Predict by Bayes RR...ended.") BRNN_Results <- c(mean(BRNN[1:L-1],na.rm=T),BRNN[L],round(time.BRNN[3]/60,digits=3)) names(BRNN_Results) = c("Mean Correlation CVs","Standard Deviation CVs","Time taken (mins)") message(BRNN_Results) message("") #cross validation for predictor_RKHS (RKHS Gaussian Kernel): message("Predicting by RKHS...") time.rkhs = system.time(RKHS <-runCrossVal(pheno,geno_impute,predict_RKHS,nFolds,nTimes)) L = length(RKHS) message("Predict by RKHS...ended.") RKHS_Results <- c(mean(RKHS[1:L-1],na.rm=T),RKHS[L], round(time.rkhs[3]/60,digits=3)) names(RKHS_Results) = c("Mean Correlation CVs","Standard Deviation CVs","Time taken (mins)") message(RKHS_Results) message("") #cross validation for predictor_MKRKHS (MKRKHS Gaussian Kernel): message("Predicting by MKRKHS...") time.mkrkhs = system.time(MKRKHS <-runCrossVal(pheno,geno_impute,predict_MKRKHS,nFolds,nTimes)) L = length(MKRKHS) message("Predict by MKRKHS...ended.") MKRKHS_Results <- c(mean(MKRKHS[1:L-1],na.rm=T),MKRKHS[L], round(time.mkrkhs[3]/60,digits=3)) names(MKRKHS_Results) = c("Mean Correlation CVs","Standard Deviation CVs","Time taken (mins)") message(MKRKHS_Results) message("") #cross validation for predictor_RF (Random Forest): message("Predicting by RF...") time.rf = system.time(RF <-runCrossVal(pheno,geno_impute,predict_RF,nFolds,nTimes)) L = length(RF) message("Predict by RandomForest...ended.") RF_Results <- c(mean(RF[1:L-1],na.rm=T),RF[L],round(time.rf[3]/60,digits=3)) names(RF_Results) = c("Mean Correlation CVs","Standard Deviation CVs","Time taken (mins)") message(RF_Results) message("") #Table <- c(EN_Results,SVM_Results,RR_Results,LASSO_Results,rrBLUP_Results,RKHS_Results,RF_Results) #names(Table) <- c("Elastic-Net","SVM","RR","LASSO","rrBLUP","RKHS","RF") Table <- cbind(BA_Results,BB_Results, BC_Results, BL_Results, BRR_Results, EN_Results,SVM_Results,BRNN_Results, RR_Results,LASSO_Results,GBLUP_Results, EGBLUP_Results,RKHS_Results,MKRKHS_Results,RF_Results) rownames(Table) <- c("Mean Correlation","Standard Deviation","Time taken (mins)") colnames(Table) <- c("BayesA","BayesB","BayesC","BayesLASSO", "BayesRR", "Elastic-Net","SVM","BRNN","RR","LASSO","GBLUP","EGBLUP", "RKHS","MKRKHS","RF") Table = t(Table) # chuyen vi return(Table) } #///////////////////////////////////////////////////////// #THE PREDICTORS # function to run the Genomic Prediction Models #//////////////////////////////////////////////////////// #////////////////////////////////////////////////////////// # BA Bayes A # uses BLLR library #///////////////////////////////////////////////////////////// #//////////////////////////////////////////////////////// #predict_BA uses BGLR library ######################################################################################################################## predict_BA <- function(phenoTrain, genoTrain, FixedTrain, genoPred, FixedPred) { #(c)2015 V.G.TRAN & G.CHARMET #BayesA #library(BGLR) if (!requireNamespace("BGLR", quietly = TRUE)) { stop("BGLR needed for this function to work. Please install it.", call. = FALSE) } GENO = rbind(genoTrain,genoPred) y<- as.vector(GENO[,1]) names(y)<- rownames(GENO) y[names(phenoTrain)]<- phenoTrain #whichNa<-sample(1:length(y),size=72,replace=FALSE) yNa<-y yNa[rownames(genoPred)]<-NA nIter=5000; burnIn=1000; thin=3; saveAt = stringi::stri_rand_strings(1, 32, '[a-zA-Z]'); S0=NULL; weights=NULL; R2=0.5; ETA<-list(list(X=GENO,model='BayesA')) if(FixedTrain!="NULL") { FIX <- rbind(FixedTrain,FixedPred) FIX=round(FIX) ETA2<-list(list(X=FIX,model="FIXED"),list(X=GENO,model='BayesA')) MODEL=BGLR(y=yNa,ETA=ETA2,nIter=nIter,burnIn=burnIn,thin=thin,saveAt=saveAt,df0=5,S0=S0,weights=weights,R2=R2) fm=MODEL } else { #MODEL=do.call(BGLR(y=yNa,arg.BGLR)) MODEL=BGLR(y=yNa,ETA=ETA,nIter=nIter,burnIn=burnIn,thin=thin,saveAt=saveAt,df0=5,S0=S0,weights=weights,R2=R2) fm=MODEL #MODEL1=BGLR(y=yNa,ETA=NULL,nIter=nIter,burnIn=burnIn,thin=thin,saveAt=saveAt,df0=5,S0=S0,weights=weights,R2=R2) #MODEL2=BGLR(y=y,ETA=NULL,nIter=nIter,burnIn=burnIn,thin=thin,saveAt=saveAt,df0=5,S0=S0,weights=weights,R2=R2) } removeSaveAt(saveAt) gpred = MODEL$yHat gpredSD=MODEL$SD.yHat GPRED=cbind(gpred,gpredSD) VARU=var(gpred) # See: BGLR Genomics.cimmyt.org/BGLR-extdoc.pdf SDU=MODEL$SD.yHat #CD: coefficient of determination: #CD = round(sqrt(1-fm$ETA1[[1]]$SD.u^2/fm$ETA1[[1]]$varU),digits=2) ; #Accuracy by coefficient of determination #CD = sqrt(1-fm1$SD.u^2/fm1$varU) CD=sqrt(1-(SDU^2/VARU)) GPRED=cbind(GPRED,CD) GPRED=round(GPRED,digits=3) GPRED=GPRED[rownames(genoPred),] return(GPRED) } #//////////////////////////////////////////////////////////////////////////// # predict_BB uses BGLR library #////////////////////////////////////////////////////////////////////////////////////// predict_BB <- function(phenoTrain, genoTrain, FixedTrain, genoPred, FixedPred) { if (!requireNamespace("BGLR", quietly = TRUE)) { stop("BGLR needed for this function to work. Please install it.", call. = FALSE) } GENO = rbind(genoTrain,genoPred) y <- as.vector(GENO[,1]) names(y) <- rownames(GENO) y[names(phenoTrain)] <- phenoTrain #whichNa<-sample(1:length(y),size=72,replace=FALSE) yNa <-y yNa[rownames(genoPred)]<-NA nIter=5000 burnIn=1000 thin=10 saveAt=stringi::stri_rand_strings(1, 32, '[a-zA-Z]') S0=NULL weights=NULL R2=0.5 if(FixedTrain!="NULL") { FIX <- rbind(FixedTrain,FixedPred) FIX=round(FIX) ETA2<-list(list(X=FIX,model="FIXED"),list(X=GENO,model='BayesB')) MODEL=BGLR(y=yNa,ETA=ETA2,nIter=nIter,burnIn=burnIn,thin=thin,saveAt=saveAt,df0=5,S0=S0,weights=weights,R2=R2) fm=MODEL } else{ ETA<-list(list(X=GENO,model='BayesB',probIn=0.05)) # MODEL=do.call(BGLR,args=c(y=yNa,args.BGLR)) MODEL=BGLR(y=yNa,ETA=ETA,nIter=nIter,burnIn=burnIn,thin=thin,saveAt=saveAt,df0=5,S0=S0,weights=weights,R2=R2) } removeSaveAt(saveAt) gpred = MODEL$yHat gpredSD=MODEL$SD.yHat GPRED=cbind(gpred,gpredSD) VARU=var(gpred) # See: BGLR Genomics.cimmyt.org/BGLR-extdoc.pdf SDU=MODEL$SD.yHat #CD: coefficient of determination: #CD = round(sqrt(1-fm$ETA1[[1]]$SD.u^2/fm$ETA1[[1]]$varU),digits=2) ; #Accuracy by coefficient of determination #CD = sqrt(1-fm1$SD.u^2/fm1$varU) CD=sqrt(1-(SDU^2/VARU)) GPRED=cbind(GPRED,CD) GPRED=round(GPRED,digits=3) GPRED=GPRED[rownames(genoPred),] return(GPRED) } #//////////////////////////////////////////////////////////////////////////// # predict_BC uses BGLR library #////////////////////////////////////////////////////////////////////////////////////// predict_BC <- function(phenoTrain, genoTrain, FixedTrain, genoPred, FixedPred) { #(c)2015 V.G.TRAN & G.CHARMET #BayesC for Genomic Selection # library(BGLR) if (!requireNamespace("BGLR", quietly = TRUE)) { stop("BGLR needed for this function to work. Please install it.", call. = FALSE) } #phenoTrain #phenoReal #genoTrain #genoPred #phenoTrain = pheno[1:250] #phenoReal = pheno[251:322] #DArT1 = MNI(DArT) #genoTrain = DArT1[1:250,] #genoPred = DArT1[251:322,] GENO <- rbind(genoTrain,genoPred) y <- as.vector(GENO[,1]) names(y) <- rownames(GENO) y[names(phenoTrain)] <- phenoTrain #whichNa<-sample(1:length(y),size=72,replace=FALSE) yNa <- y yNa[rownames(genoPred)] <- NA nIter=5000 burnIn=1000 thin=3 saveAt=stringi::stri_rand_strings(1, 32, '[a-zA-Z]') S0=NULL weights=NULL R2=0.5 if(FixedTrain!="NULL") { FIX <- rbind(FixedTrain,FixedPred) FIX=round(FIX) ETA2<-list(list(X=FIX,model="FIXED"),list(X=GENO,model='BayesC')) MODEL=BGLR(y=yNa,ETA=ETA2,nIter=nIter,burnIn=burnIn,thin=thin,saveAt=saveAt,df0=5,S0=S0,weights=weights,R2=R2) fm=MODEL } else { ETA<-list(list(X=GENO,model='BayesC')) #MODEL=do.call(BGLR,args=c(y=yNa,arg.BGLR)) MODEL=BGLR(y=yNa,ETA=ETA,nIter=nIter,burnIn=burnIn,thin=thin,saveAt=saveAt,df0=5,S0=S0,weights=weights,R2=R2) } removeSaveAt(saveAt) gpred = MODEL$yHat gpredSD=MODEL$SD.yHat GPRED=cbind(gpred,gpredSD) VARU=var(gpred) # See: BGLR Genomics.cimmyt.org/BGLR-extdoc.pdf SDU=MODEL$SD.yHat #CD: coefficient of determination: #CD = round(sqrt(1-fm$ETA1[[1]]$SD.u^2/fm$ETA1[[1]]$varU),digits=2) ; #Accuracy by coefficient of determination #CD = sqrt(1-fm1$SD.u^2/fm1$varU) CD=sqrt(1-(SDU^2/VARU)) GPRED=cbind(GPRED,CD) GPRED=round(GPRED,digits=3) GPRED=GPRED[rownames(genoPred),] return(GPRED) } #//////////////////////////////////////////////////////////////////////////// # predict_BL Bayesian LASSO uses BGLR library #////////////////////////////////////////////////////////////////////////////////////// predict_BL <- function(phenoTrain, genoTrain, FixedTrain, genoPred, FixedPred) { #(c)2015 V.G.TRAN & G.CHARMET #BayesA #library(BGLR) if (!requireNamespace("BGLR", quietly = TRUE)) { stop("BGLR needed for this function to work. Please install it.", call. = FALSE) } GENO = rbind(genoTrain,genoPred) y<- as.vector(GENO[,1]) names(y)<- rownames(GENO) y[names(phenoTrain)]<- phenoTrain #whichNa<-sample(1:length(y),size=72,replace=FALSE) yNa<-y yNa[rownames(genoPred)]<-NA nIter=5000; burnIn=1000; thin=3; saveAt=stringi::stri_rand_strings(1, 32, '[a-zA-Z]'); S0=NULL; weights=NULL; R2=0.5; if(FixedTrain!="NULL") { FIX <- rbind(FixedTrain,FixedPred) FIX=round(FIX) ETA2<-list(list(X=FIX,model="FIXED"),list(X=GENO,model='BL')) MODEL=BGLR(y=yNa,ETA=ETA2,nIter=nIter,burnIn=burnIn,thin=thin,saveAt=saveAt,df0=5,S0=S0,weights=weights,R2=R2) fm=MODEL } else{ ETA<-list(list(X=GENO,model='BL')) #MODEL=do.call(BGLR(y=yNa,arg.BGLR)) MODEL=BGLR(y=yNa,ETA=ETA,nIter=nIter,burnIn=burnIn,thin=thin,saveAt=saveAt,df0=5,S0=S0,weights=weights,R2=R2) } removeSaveAt(saveAt) gpred = MODEL$yHat gpredSD=MODEL$SD.yHat GPRED=cbind(gpred,gpredSD) VARU=var(gpred) # See: BGLR Genomics.cimmyt.org/BGLR-extdoc.pdf SDU=MODEL$SD.yHat #CD: coefficient of determination: #CD = round(sqrt(1-fm$ETA1[[1]]$SD.u^2/fm$ETA1[[1]]$varU),digits=2) ; #Accuracy by coefficient of determination #CD = sqrt(1-fm1$SD.u^2/fm1$varU) CD=sqrt(1-(SDU^2/VARU)) GPRED=cbind(GPRED,CD) GPRED=round(GPRED,digits=3) GPRED=GPRED[rownames(genoPred),] return(GPRED) } #//////////////////////////////////////////////////////////////////////////// # predict_BRR Bayesian rideg regression uses BGLR library #////////////////////////////////////////////////////////////////////////////////////// predict_BRR <- function(phenoTrain, genoTrain, FixedTrain, genoPred, FixedPred) { #(c)2015 V.G.TRAN & G.CHARMET #BayesA # library(BGLR) if (!requireNamespace("BGLR", quietly = TRUE)) { stop("BGLR needed for this function to work. Please install it.", call. = FALSE) } GENO = rbind(genoTrain,genoPred) y<- as.vector(GENO[,1]) names(y)<- rownames(GENO) y[names(phenoTrain)]<- phenoTrain #whichNa<-sample(1:length(y),size=72,replace=FALSE) yNa<-y yNa[rownames(genoPred)]<-NA nIter=8000; burnIn=2 000; thin=3; saveAt=stringi::stri_rand_strings(1, 32, '[a-zA-Z]'); S0=NULL; weights=NULL; R2=0.5; if(FixedTrain!="NULL") { FIX <- rbind(FixedTrain,FixedPred) FIX=round(FIX) ETA2<-list(list(X=FIX,model="FIXED"),list(X=GENO,model='BRR')) MODEL=BGLR(y=yNa,ETA=ETA2,nIter=nIter,burnIn=burnIn,thin=thin,saveAt=saveAt,df0=5,S0=S0,weights=weights,R2=R2) fm=MODEL } else { ETA<-list(list(X=GENO,model='BRR')) #MODEL=do.call(BGLR(y=yNa,arg.BGLR)) MODEL=BGLR(y=yNa,ETA=ETA,nIter=nIter,burnIn=burnIn,thin=thin,saveAt=saveAt,df0=5,S0=S0,weights=weights,R2=R2) } removeSaveAt(saveAt) gpred = MODEL$yHat gpredSD=MODEL$SD.yHat GPRED=cbind(gpred,gpredSD) VARU=var(gpred) # See: BGLR Genomics.cimmyt.org/BGLR-extdoc.pdf SDU=MODEL$SD.yHat #CD: coefficient of determination: #CD = round(sqrt(1-fm$ETA1[[1]]$SD.u^2/fm$ETA1[[1]]$varU),digits=2) ; #Accuracy by coefficient of determination #CD = sqrt(1-fm1$SD.u^2/fm1$varU) CD=sqrt(1-(SDU^2/VARU)) CD=1-(SDU^2/VARU) GPRED=cbind(GPRED,CD) GPRED=round(GPRED,digits=3) GPRED=GPRED[rownames(genoPred),] return(GPRED) } #//////////////////////////////////////////////////////////////////////////// # predict_ElasticNet uses glmnet library #////////////////////////////////////////////////////////////////////////////////////// # predict_ElasticNet <- function(phenoTrain, genoTrain, genoPred,arg.cv.glmnet=list(family="gaussian",alpha=0.5)){ predict_ElasticNet <- function(phenoTrain, genoTrain, FixedTrain, genoPred, FixedPred) { #(c)2013 V.G. TRAN if (!requireNamespace("glmnet", quietly = TRUE)) { stop("glmnet needed for this function to work. Please install it.", call. = FALSE) } # library(glmnet) # LASSO & Elastic-Net generalized linear models #do cross-validation to get the optimal value of lambda: # cv.fit <- do.call(glmnet::cv.glmnet,args=c(genoTrain,phenoTrain,arg.cv.glmnet)) #ElasticNet penalty with top results cv.fit <- cv.glmnet(genoTrain,phenoTrain,family="gaussian",alpha=0.5) #ElasticNet penalty with top results #alpha=1: lasso penalty, alpha=0: ridge penalty lambda_min <- cv.fit$lambda.min # making the best prediction #lambda.1se <- cv.fit$lambda.1se gpred <- predict(cv.fit,newx=genoPred,s=c(lambda_min)) #GPRED <- round(gpred,digits=3) gpredSD=gpred gpredSD=NA CD <- gpred CD <- NA GPRED=cbind(gpred,gpredSD,CD) GPRED=round(GPRED,digits=3) return(GPRED) } #//////////////////////////////////////////////////////////////////////////// # predict_BRNN Bayesian Regularization Neural Network uses BRNN library #////////////////////////////////////////////////////////////////////////////////////// predict_BRNN<- function(phenoTrain, genoTrain, FixedTrain, genoPred, FixedPred) { #(c)2013 V.G. TRAN if (!requireNamespace("brnn", quietly = TRUE)) { stop("brnn needed for this function to work. Please install it.", call. = FALSE) } #library(brnn) # library(glmnet) # LASSO & Elastic-Net generalized linear models #do cross-validation to get the optimal value of lambda: # cv.fit <- do.call(glmnet::cv.glmnet,args=c(genoTrain,phenoTrain,arg.cv.glmnet)) #ElasticNet penalty with top results cv.fit <- brnn(genoTrain,phenoTrain,neurons=2,epochs=10, verbose=T) # neural networks with 2 neurons and 50 iterations gpred <- predict(cv.fit,newdata=genoPred) #GPRED <- round(gpred,digits=3) gpredSD=gpred gpredSD=NA CD <- gpred CD <- NA GPRED=cbind(gpred,gpredSD,CD) GPRED=round(GPRED,digits=3) rownames(GPRED)=rownames(genoPred) return(GPRED) } #//////////////////////////////////////////////////////////////////////////// # predict_GBLUP uses rrBLUP #////////////////////////////////////////////////////////////////////////////////////// predict_GBLUP <- function(phenoTrain, genoTrain, FixedTrain, genoPred, FixedPred) # NB based on kin.blu (rrBLUP), faster than bayesian, independant from marker number { #BLUP #(c)2015 V.G. TRAN & G. CHARMET # library(BGLR) # if (!requireNamespace("rrBLUP", quietly = TRUE)) { # stop("rrBLUP needed for this function to work. Please install it.", # call. = FALSE) # } GENO <- rbind(genoTrain,genoPred) y <- as.vector(GENO[,1]) names(y) <- rownames(GENO) y[names(phenoTrain)] <- phenoTrain yNa <- y yNa[rownames(genoPred)] <- NA A <- A.mat(GENO) if(FixedTrain!="NULL") { FIX <- rbind(FixedTrain,FixedPred) FIX=round(MNI(FIX)) #ixed=colnames(FIX) dataF=data.frame(genoID=names(y),yield=yNa,FIX) Fixed=colnames(dataF[,-c(1,2)]) MODEL=kin.blup(dataF,geno="genoID",pheno="yield", GAUSS=FALSE,K=A, fixed=Fixed, PEV=TRUE,n.core=1,theta.seq=NULL) } else { dataF=data.frame(genoID=names(y),yield=yNa) #Fixed=colnames(dataF[,-c(1,2)]) MODEL=kin.blup(data=dataF,geno="genoID",pheno="yield", GAUSS=FALSE, K=A, PEV=TRUE,n.core=1,theta.seq=NULL) } # PREDICT <- do.call(BGLR::BGLR,args=c(y=yNa,ETA=ETA,arg.BGLR)) gpred = MODEL$pred gpredSD=sqrt(MODEL$PEV) GPRED=cbind(gpred,gpredSD) VARU=MODEL$Vg # See: BGLR Genomics.cimmyt.org/BGLR-extdoc.pdf SDU=MODEL$PEV #CD: coefficient of determination: CD=sqrt(1-(SDU/VARU)) GPRED=cbind(GPRED,CD) GPRED=round(GPRED,digits=3) GPRED=GPRED[rownames(genoPred),] return(GPRED) } predict_GBLUPB <- function(phenoTrain, genoTrain, genoPred,arg.kinship.BLUP=list(K.method="GAUSS")) # OLD function based on BGLR: too long with bayesian solution { #BLUP #(c)2015 V.G. TRAN & G. CHARMET # library(BGLR) if (!requireNamespace("BGLR", quietly = TRUE)) { stop("BGLR needed for this function to work. Please install it.", call. = FALSE) } GENO <- rbind(genoTrain,genoPred) y <- as.vector(GENO[,1]) names(y) <- rownames(GENO) y[names(phenoTrain)] <- phenoTrain yNa <- y yNa[rownames(genoPred)] <- NA A <-A.mat(GENO) #2# Computing D and then K #3# Kernel Averaging using BGLR # ETA <- list(list(K=exp(-h[1]*D),model='RKHS'), # list(K=exp(-h[2]*D),model='RKHS'), # list(K=exp(-h[3]*D),model='RKHS')) ETA= list(list(K=A, model='RKHS')) # PREDICT <- do.call(BGLR::BGLR,args=c(y=yNa,ETA=ETA,arg.BGLR)) MODEL <- BGLR(y=yNa,ETA=ETA,nIter=5000, burnIn=1000) gpred = MODEL$yHat gpredSD=MODEL$SD.yHat GPRED=cbind(gpred,gpredSD) VARU=var(gpred) # See: BGLR Genomics.cimmyt.org/BGLR-extdoc.pdf SDU=MODEL$SD.yHat #CD: coefficient of determination: CD=sqrt(1-(SDU^2/VARU)) GPRED=cbind(GPRED,CD) GPRED=round(GPRED,digits=3) GPRED=GPRED[rownames(genoPred),] return(GPRED) } #//////////////////////////////////////////////////////////////////////////// # predict_MKRKHS Multiple Kernel Reproductive Kernel Hilbert Space uses BGLR library #////////////////////////////////////////////////////////////////////////////////////// # predict_MKRKHS <- function(phenoTrain, genoTrain, genoPred,arg.BGLR=list(nIter=5000, burnIn=1000)) { predict_MKRKHS <- function(phenoTrain, genoTrain, FixedTrain, genoPred, FixedPred) { #Multi-Kernel RKHS #(c)2015 V.G. TRAN & G. CHARMET # library(BGLR) if (!requireNamespace("BGLR", quietly = TRUE)) { stop("BGLR needed for this function to work. Please install it.", call. = FALSE) } GENO <- rbind(genoTrain,genoPred) y <- as.vector(GENO[,1]) names(y) <- rownames(GENO) y[names(phenoTrain)] <- phenoTrain yNa <- y yNa[rownames(genoPred)] <- NA #A = A.mat(GENO); X = GENO p = ncol(X) #2# Computing D and then K X <- scale(X,center=TRUE,scale=TRUE) D <- (as.matrix(dist(X,method='euclidean'))^2)/p # h<-0.5*c(1/5,1,5) h <- sqrt(c(0.2,0.5,0.8)) #3# Kernel Averaging using BGLR ETA <- list(list(K=exp(-h[1]*D),model='RKHS'), list(K=exp(-h[2]*D),model='RKHS'), list(K=exp(-h[3]*D),model='RKHS')) # PREDICT <- do.call(BGLR::BGLR,args=c(y=yNa,ETA=ETA,arg.BGLR)) MODEL <- BGLR(y=yNa,ETA=ETA,nIter=8000, burnIn=2000) gpred = MODEL$yHat gpredSD=MODEL$SD.yHat GPRED=cbind(gpred,gpredSD) VARU=var(gpred) # See: BGLR Genomics.cimmyt.org/BGLR-extdoc.pdf SDU=MODEL$SD.yHat #CD: coefficient of determination: #CD = round(sqrt(1-fm$ETA1[[1]]$SD.u^2/fm$ETA1[[1]]$varU),digits=2) ; #Accuracy by coefficient of determination #CD = sqrt(1-fm1$SD.u^2/fm1$varU) CD=sqrt(1-(SDU^2/VARU)) GPRED=cbind(GPRED,CD) GPRED=round(GPRED,digits=3) GPRED=GPRED[rownames(genoPred),] return(GPRED) } #//////////////////////////////////////////////////////////////////////////// # predict_RKHS Reproductive Kernel Hilbert Space uses BGLR library #////////////////////////////////////////////////////////////////////////////////////// predict_RKHS <- function(phenoTrain, genoTrain, FixedTrain, genoPred, FixedPred) { # RKHS #(c)2015 V.G. TRAN & G. CHARMET # library(BGLR) if (!requireNamespace("BGLR", quietly = TRUE)) { stop("BGLR needed for this function to work. Please install it.", call. = FALSE) } GENO <- rbind(genoTrain,genoPred) y <- as.vector(GENO[,1]) names(y) <- rownames(GENO) y[names(phenoTrain)] <- phenoTrain yNa <- y yNa[rownames(genoPred)] <- NA X = GENO p = ncol(X) #2# Computing D and then K X <- scale(X,center=TRUE,scale=TRUE) D <- (as.matrix(dist(X,method='euclidean'))^2)/p h <- 0.5 K=exp(-h*D) saveAt=stringi::stri_rand_strings(1, 32, '[a-zA-Z]'); if(FixedTrain!="NULL") { FIX <- rbind(FixedTrain,FixedPred) FIX=round(FIX) ETA2<-list(list(X=FIX,model="FIXED"),list(K=K, model='RKHS')) MODEL=BGLR(y=yNa,ETA=ETA2,nIter=5000, burnIn=1000, saveAt = saveAt) fm=MODEL } else { ETA= list(list(K=K, model='RKHS')) # PREDICT <- do.call(BGLR::BGLR,args=c(y=yNa,ETA=ETA,arg.BGLR)) MODEL <- BGLR(y=yNa,ETA=ETA,nIter=5000, burnIn=1000, saveAt = saveAt) } gpred = MODEL$yHat gpredSD=MODEL$SD.yHat GPRED=cbind(gpred,gpredSD) removeSaveAt(saveAt) VARU=var(gpred) # See: BGLR Genomics.cimmyt.org/BGLR-extdoc.pdf SDU=MODEL$SD.yHat #CD: coefficient of determination: #CD = round(sqrt(1-fm$ETA1[[1]]$SD.u^2/fm$ETA1[[1]]$varU),digits=2) ; #Accuracy by coefficient of determination #CD = sqrt(1-fm1$SD.u^2/fm1$varU) CD=sqrt(1-(SDU^2/VARU)) GPRED=cbind(GPRED,CD) GPRED=round(GPRED,digits=3) GPRED=GPRED[rownames(genoPred),] return(GPRED) } #//////////////////////////////////////////////////////////////////////////// # predict_RR Random Forest regression uses randomForest library #////////////////////////////////////////////////////////////////////////////////////// predict_RF <- function(phenoTrain, genoTrain, FixedTrain, genoPred, FixedPred) { #(c)2013 V.G.TRAN if (!requireNamespace("randomForest", quietly = TRUE)) { stop("randomForest needed for this function to work. Please install it.", call. = FALSE) } # library(randomForest) #return(randomForest(genoTrain, phenoTrain, xtest=genoPred)$test$predicted) #gpred <- do.call(randomForest::randomForest,args=c(genoTrain, phenoTrain, xtest=genoPred,arg.randomForest))$test$predicted gpred = randomForest(genoTrain, phenoTrain, xtest=genoPred)$test$predicted; gpredSD=gpred gpredSD=NA CD <- gpred CD <- NA GPRED=cbind(gpred,gpredSD,CD) GPRED=round(GPRED,digits=3) return(GPRED) } #////////////////////////////////////////////////////////////////////////////////// #predict_EGBLUP Epistatic GBLUP uses BGLR library #////////////////////////////////////////////////////////////////////////////////// predict_EGBLUP <- function(phenoTrain, genoTrain, FixedTrain, genoPred, FixedPred) { #(c)2015 G.CHARMET & V.G. TRAN #EG-BLUP for Genomic Selection #library(BGLR) GENO = rbind(genoTrain,genoPred) A_GENO <- AM(GENO) y<- as.vector(GENO[,1]) names(y)<- rownames(GENO) y[names(phenoTrain)]<- phenoTrain #whichNa<-sample(1:length(y),size=72,replace=FALSE) yNa<-y yNa[rownames(genoPred)]<-NA nIter=5000; burnIn=1000; thin=3; saveAt=stringi::stri_rand_strings(1, 32, '[a-zA-Z]'); S0=NULL; weights=NULL; R2=0.5; if(FixedTrain!="NULL") { FIX <- rbind(FixedTrain,FixedPred) FIX=round(FIX) ETA2<-list(list(X=FIX,model="FIXED"),list(X=GENO,model='BRR'),list(K=A_GENO*A_GENO,model='RKHS')) MODEL=BGLR(y=yNa,ETA=ETA2,nIter=nIter,burnIn=burnIn,thin=thin,saveAt=saveAt,df0=5,S0=S0,weights=weights,R2=R2) fm=MODEL } else{ ETA<-list(list(X=GENO,model='BRR'),list(K=A_GENO*A_GENO,model='RKHS')) MODEL=BGLR(y=yNa,ETA=ETA,nIter=nIter,burnIn=burnIn,thin=thin,saveAt=saveAt,df0=5,S0=S0,weights=weights,R2=R2) } removeSaveAt(saveAt) gpred = MODEL$yHat #gpred=predict(MODEL) gpredSD=MODEL$SD.yHat VARU=var(gpred) SDU=gpredSD CD = round(sqrt(1-(SDU^2/VARU)),digits=3) ; #Accuracy by coefficient of determination GPRED=cbind(gpred,gpredSD,CD) GPRED=GPRED[rownames(genoPred),] return(GPRED) } #//////////////////////////////////////////////////////////////////////////// # predict_RR Ridge regression uses glmnet library #////////////////////////////////////////////////////////////////////////////////////// predict_RR <- function(phenoTrain, genoTrain, FixedTrain, genoPred, FixedPred) { #(c)2013 V.G.TRAN if (!requireNamespace("glmnet", quietly = TRUE)) { stop("glmnet needed for this function to work. Please install it.", call. = FALSE) } # library(glmnet) #do cross-validation to get the optimal value of lambda: # cv.fit <- do.call(glmnet::cv.glmnet,args=c(genoTrain,phenoTrain,arg.cv.glmnet)) #Ridge penalty with top results cv.fit <- cv.glmnet(genoTrain,phenoTrain,alpha=0) #Ridge penalty with top results lambda_min <- cv.fit$lambda.min # making the best prediction #return(predict(cv.fit,newx=genoPred,s=c(lambda_min))) gpred <- predict(cv.fit,newx=genoPred,s=c(lambda_min)) gpredSD=gpred gpredSD=NA CD <- gpred CD <- NA GPRED=cbind(gpred,gpredSD,CD) GPRED=round(GPRED,digits=3) return(GPRED) } #//////////////////////////////////////////////////////////////////////////// # predict_Lasso LASSO uses glmnet library #////////////////////////////////////////////////////////////////////////////////////// predict_Lasso <- function(phenoTrain, genoTrain, FixedTrain, genoPred, FixedPred) { #(c)2013 V.G.TRAN #do cross-validation to get the optimal value of lambda: cv.fit <- cv.glmnet(genoTrain,phenoTrain,family="gaussian",alpha=1) #Lasso penalty with top results lambda_min <- cv.fit$lambda.min # making the best prediction gpred = predict(cv.fit,newx=genoPred,s=c(lambda_min)) gpredSD=gpred gpredSD=NA CD <- gpred CD <- NA GPRED=cbind(gpred,gpredSD,CD) GPRED=round(GPRED,digits=3) return(GPRED) } #//////////////////////////////////////////////////////////////////////////// # predict_SVM Support vector machine uses e1071 library #////////////////////////////////////////////////////////////////////////////////////// predict_SVM <- function(phenoTrain, genoTrain, FixedTrain, genoPred, FixedPred) { #(c)2013 [email protected] if (!requireNamespace("e1071", quietly = TRUE)) { stop("e1071 needed for this function to work. Please install it.", call. = FALSE) } # library(e1071) #model <- do.call(e1071::svm,args=c(genoTrain,phenoTrain,arg.svm)) # model <- svm(genoTrain,phenoTrain,method="C-classification",kernel="radial",cost=10,gamma=0.001) model <- svm(genoTrain,phenoTrain,method="nu-regression",kernel="radial",cost=10,gamma=0.001) gpred <- predict(model,genoPred) gpredSD=gpred gpredSD=NA CD <- gpred CD <- NA GPRED=cbind(gpred,gpredSD,CD) GPRED=round(GPRED,digits=3) return(GPRED) } #END OF PREDICTOR FUNCTIONS #////////////////////////////////////////////////////////////// transfer <- function(data,time.cal,ncol_geno_shrink) { #(c)2014 V.G. TRAN SUMMA <- data[[1]] L <- length(SUMMA) Summary <- c(mean(SUMMA[1:L-1],na.rm=T),SUMMA[L],round(time.cal[3]/60,digits=2),ncol_geno_shrink) names(Summary) <- c("Mean Correlation CVs","Standard Deviation CVs","Time taken (mins)","Number of markers") print(Summary) BV_TABLE <- data[[3]] CV_all_times <- SUMMA[1:L-1] names(CV_all_times) <- paste("time",1:length(CV_all_times)) SD_all_times <- SUMMA[L] names(SD_all_times) <-"sd" MSEP <- data[[2]] MSEP_all_times <- MSEP[1:L-1] names(MSEP_all_times) <- paste("time",1:length(MSEP_all_times)) SD_MSEP <- MSEP[L] names(SD_MSEP) <-"sd squarred MSEP)" Results <- list(Summary,CV_all_times,SD_all_times,MSEP_all_times,SD_MSEP,BV_TABLE) names(Results) <- c("summary","cv","SD_cv","MSEP","SD_MSEP","bv_table") message("") message("Processing...ended! Results in: object$summary, object$cv, object$sd, object$bv") return(Results) } #waitingbar <- function(x) waitingbar <- function(x = sort(runif(20)), ...) { #(c)2013 V.G. TRAN pb <- txtProgressBar(min=0,max=1,initial=0,char="*",width=40,style=3) for(i in c(0, x, 1)) {Sys.sleep(0.1) setTxtProgressBar(pb, i)} Sys.sleep(0.1) close(pb) } #To use: #waiting.bar() #waiting.bar(runif(10)) #waiting.bar(style = 3) #//////////////////////////////////////////////////////////////////////////// #QTL SIMULATION: #////////////////////////////////////////////////////////////////////////////////////// qtlSIM <- function(geno,NQTL=100,h2=0.3) { #(c)2014 G. CHARMET & V.G. TRAN RealizedH2 <- numeric() # realized trait heritability: to check if it fits expected value distriQTLh2 <- numeric()# realized QTL h?, to plot histogrammes.... S <- sample(ncol(geno),NQTL) S <- sort.list(S) QTL <- geno[,S] X <- geno[,-S] # simulation of effects as a single gaussian distribution Effects <- rnorm(NQTL) # Gaussian distribution QTL <- t(t(QTL)*Effects) TBV <- apply(QTL,1,sum) varG <- var(TBV) varE <- varG*((1-h2)/h2) noise <- rnorm(length(TBV),0,sqrt(varE)) QT <- TBV+noise h2QTL <- Effects^2/var(QT) result <- list(newSNP=X,pheno=QT, TBV=TBV, Effects=Effects, h2QTL=h2QTL ) result } #///////////////////////////////////////////// bwgs.nacount <- function(geno) { #Calculate the percentage of missing elements in SNP or DArT, SNP GBS: #(c)06/02/2014 V.G. Tran num_element = table(geno) num_of_binary = length(num_element) # = 2 for DArT, = 3 for SNP, = 5 for SNP issu GBS num_element_total = dim(geno)[1]*dim(geno)[2] if(num_of_binary==2){ num_na = num_element_total-(num_element[1]+num_element[2]) } else { if(num_of_binary==3){ num_na = num_element_total-(num_element[1]+num_element[2]+num_element[3]) } else { if(num_of_binary==4){ num_na = num_element_total-(num_element[1]+num_element[2]+num_element[3]+num_element[4]) } else { if(num_of_binary==5){ num_na = num_element_total-(num_element[1]+num_element[2]+num_element[3]+num_element[4]+num_element[5]) } else { if(num_of_binary==6){ num_na = num_element_total-(num_element[1]+num_element[2]+num_element[3]+num_element[4]+num_element[5]+num_element[6]) } else { stop("Error in The genotype data (or non numerized).") } # } } } } na_pourcentage = num_na*100/num_element_total # in percentage na_pourcentage = as.vector(na_pourcentage) return(na_pourcentage) } #//////////////////////////////////////////////////////////// #COMMON FUNCTION bwgs.common <- function(a1,a2) { #COMMON FUNCTION commun = a1[which(names(a1)%in%names(a2))] return(commun) } bwgs.bestpred <- function(ans,n) { #(c)2014 G. CHARMET & V.G. TRAN message(c("The ",as.character(n)," best prediction[s]:")) message("") #Find the n best predictions: ans.BEST = ans[,sort.list(-ans)] ans.bestn = ans.BEST[1:n] #message(c("The ",n," best prediction are: ",ans.bestn)) return(ans.bestn) } #/////////////////////////////////////////////////////// #MNI Function replaces missing value by average allele frequency #//////////////////////////////////////////////////////// MNI <- function(x) { NAreplace= function (y) { if( length(na.omit(y))!=0){ y[is.na(y)] = mean(y, na.rm = TRUE) } return(y) } imp= apply(x, 2, NAreplace) return(imp) } #------------------------------------------------- #Function for optimizing the calibration set # by selecting a subset in the reference (training population) # which maximizes the CDmean criteria (Rincent et al 2012) #------------------------------------------------- # Main function # apply CDmean estimation either on rendom or optimized samples # Infiles required are # matA1 = the additive relationships matrix, e.g. estimated from marker data using pedigree or A.mat # pheno = the vector of phenotypes for the training set corresponding to matA1 # h2 the heritability of the pheno trait # Nindrep = the number of genotypes in the subsample # otptions are: # Method = "Boot to indicate whether random sampling (Bootstraps) should be done # or "Opti" to indicate that the optimization optiTRAIN=function(geno,NSample=100,Nopti=3000) # supress pheno, useless { # Preliminary computations to be run once matA1=A.mat(geno) matA1=as.matrix(matA1) Nind=nrow(matA1) # total number of individuals #varP=var(pheno) # Pheno is a vector of phenotypes #h2=0.5 #varG=h2*varP #varE=(1-h2)/h2*varG #lambda=varE/varG # lambda is needed to estimate the CDmean lambda=1 invA1=solve(matA1) ############################## # Optimization algorithm ############################## #Design matrices Ident<-diag(NSample) X<-rep(1,NSample) M<-Ident- (X%*%solve(t(X)%*%X) %*% t(X) ) Sample1<-sample(Nind,NSample) #Calibration set initialization SaveSample=Sample1 NotSampled1<-seq(1:Nind) NotSampled<-NotSampled1[-Sample1] # Initial validation set Z=matrix(0,NSample,Nind) for (i in 1:length(Sample1)) { Z[i,Sample1[i]]=1 } T<-contrasteNonPheno(NotSampled,Nind,NSample) # T matrice des contrastes # Calculate of CDmean of the initial set matCD<-(t(T)%*%(matA1-lambda*solve(t(Z)%*%M%*%Z + lambda*invA1))%*%T)/(t(T)%*%matA1%*%T) CD=diag(matCD) CDmeanSave=mean(CD) CDmeanMax1=rep(NA,Nopti) # Exchange algorithm (maximize CDmean) cpt2=1 cpt=0 while (cpt2<Nopti) { # Make sure that Nopti is enough in your case (that you reached a plateau), for this look at CDmeanMax1. NotSampled=NotSampled1[-Sample1] cpt2=cpt2+1 # Remove one individual (randomly choosen) from the sample : Sample2=sample(Sample1,1) # Select one individual (randomly choosen) from the individuals that are not in the Calibration set : Sample3=sample(NotSampled,1) # New calibration set : Sample4=c(Sample3,Sample1[Sample1!=Sample2]) # Calculate the mean CD of the new calibration set : Z=matrix(0,NSample,Nind) for (i in 1:length(Sample4)) { Z[i,Sample4[i]]=1 } NotSampled=NotSampled1[-Sample4] T<-contrasteNonPheno(NotSampled,Nind,NSample) matCD<-(t(T)%*%(matA1-lambda*solve(t(Z)%*%M%*%Z + lambda*invA1))%*%T)/(t(T)%*%matA1%*%T) CD=diag(matCD) if (mean(CD)>CDmeanSave ) { Sample1=Sample4 # Accept the new Calibration set if CDmean is increased, reject otherwise. CDmeanSave=mean(CD) cpt=0 } else { cpt=cpt+1 } CDmeanMax1[cpt2-1]=CDmeanSave } #Fin du while sampleOptimiz=Sample1 # SampleOptimiz is the optimized calibration set sampleOptimiz=Sample1 # SampleOptimiz is the optimized calibration set sortOptimiz=sampleOptimiz[sort.list(sampleOptimiz)] genoOptimiz=geno[sortOptimiz,] result=list(CDmax=CDmeanSave,sampleOPTI=sampleOptimiz,genoOptimiz=genoOptimiz) } # This function creates the matrix of contrast between each of the individual not in the calibration set and the mean of the population contrasteNonPheno=function(NotSampled,Nind,NSample) { mat=matrix(-1/Nind,Nind,Nind-NSample) for (i in 1:ncol(mat)) { mat[NotSampled[i],i]=1-1/Nind } return(mat) } #' Remove saveAt file generated by BGLR package #' #' @param saveAt saveAt removeSaveAt <- function(saveAt) { Sys.sleep(2) files <- list.files('.', paste0("^", saveAt, '.*(lambda|mu|varE|varB|varU|ScaleBayesA|parBayesB|parBayesC)\\.dat$')) tryCatch({ invisible(lapply(files, file.remove)) }, error = function(e) { }) }
/scratch/gouwar.j/cran-all/cranData/BWGS/R/bwgs_gilles.R
#Title bwgs.cv #' @title Genomic Prediction with cross validation #' @description The bwgs.cv function carries out cross-validation using genotypic and #' phenotypic data from a reference population, with options for genotypic #' matrix processing and genomic breeding value estimation. #' @param geno Matrix (n x m) of genotypes for the training population: n lines with m markers. Genotypes should be coded {-1, 0, 1}. Missing data are allowed and coded as NA. #' @param pheno Vector (n x 1) of "phenotypes", i.e. observations or pre-processed, corrected values. This vector should have no missing values, otherwise missing values (NA) will be omitted in both pheno and geno. In a first step, bwgs.cv checks whether rownames(geno) match with names(pheno). If not the case, the common elements (intersect) are selected in both geno and pheno for further analyses. If a MAP file is provided, the selected set of markers are also sorted out in MAP. #' @param FIXED A matrix of fixed effect, to be used with some methods such as those included in BGLR, MUST have same rownames as geno and coded(-1 0 1) #' @param MAXNA The maximum proportion of missing value which is admitted for filtering marker columns in geno. Default value is 0.2 #' @param MAF The minimum allele frequency for filtering marker colums in geno;default value is 0.05 #' @param pop.reduct.method Method for reducing the size of the training population. Can be used for teaching purposes, no real interest in real life if the entire population is already genotyped and phenotyped. #' Default value is NULL (all training set used). #' Proposed methods are: #' \itemize{ #' \item{RANDOM: a subset of sample.pop.size is randomly selected for training the model, and the unselected part of the population is used for validation. The process is repeated nFolds * nTimes to have the same number of replicates than with cross-validation.} #' \item{OPTI: the optimization algorithm based on CDmean (Rincent et al 2012) to select a subset which maximizes average CD (coefficient of determination) in the validation set. Since the process is long and has some stochastic component, it is repeated only nTimes.} #' } #' @param sample.pop.size The size of the subset of individuals in the training set (both geno and pheno) selected by pop.reduct.method if not NULL. #' @param geno.reduct.method Allows sampling a subset of markers for speeding up computing time and/or avoid introducing more noise than informative markers. Options are: #' \itemize{ #' \item{RMR: Random sampling (without replacement) of a subset of markers. To be used with the parameter “reduct.marker.size”.} #' \item{LD (with r2 and MAP): enables “pruning” of markers which are in LD > r2. Only the marker with the least missing values is kept for each pair in LD>r2. To allow faster computation, r2 is estimated chromosome by chromosome, so a MAP file is required with information of marker assignation to chromosomes. The MAP file should contain at least three columns: marker_name, chromosome_name and distance_from_origin (either genetic of physical distance, only used for sorting markers, LD being re-estimated from marker Data).} #' \item{ANO (with pval): one-way ANOVA are carried out with R function lm on trait “pheno”. Every markers are tested one at a time, and only markers with pvalue<pval are kept for GEBV prediction.} #' \item{ANO+LD (with pval and r2, MAP is facultative): combines a first step of marker selection with ANO, then a second step of pruning using LD option.} #' } #' @param reduct.marker.size Specifies the number of markers for the genotypic reduction using RMR (reduct.size < m). #' @param pval p value for ANO method, 0 < pval < 1. #' @param r2 Coefficient of linkage disequilibrium (LD). Setting 0<r2<1 if the genotypic reduction method is in {LD or ANO+LD }. #' @param MAP A matrix with markers in rows and at least ONE columns with colnames= "chrom". Used for computing r2 within linkage groups. #' @param geno.impute.method Allow missing marker data imputation using the two methods proposed in function A.mat of package rrBLUP, namely: #' \itemize{ #' \item{MNI: missing data are replaced by the mean allele frequency of the marker (column in geno)} #' \item{EMI: missing data are replaced using an expectation-maximization methods described in function A.mat (Endelman & Janninck 2012).} #' } #' #' Default value is NULL. #' #' Note that these imputation methods are only suited when there are a few missing value, typically in marker data from SNP chips of KasPAR. They are NOT suited for imputing marker data from low density to high density designs, and when there are MANY missing Data as typically provided by GBS. More sophisticated software (e.g. Beagles, Browning & Browning 2016) should be used before BWGS. #' @param predict.method The options for genomic breeding value prediction methods. The available options are: #' \itemize{ #' \item{GBLUP: performs G-BLUP using a marker-based relationship matrix, implemented through BGLR R-library. Equivalent to ridge regression (RR-BLUP) of marker effects.} #' \item{EGBLUP: performs EG-BLUP, i.e. BLUP using a "squared" relationship matrix to model epistatic 2x2 interactions, as described by Jiang & Reif (2015), using BGLR library} #' \item{RR: ridge regression, using package glmnet. In theory, strictly equivalent to gblup.} #' \item{LASSO: Least Absolute Shrinkage and Selection Operator is another penalized regression methods which yield more shrinked estimates than RR. Run by glmnet library.} #' \item{EN: Elastic Net (Zou and Hastie, 2005), which is a weighted combination of RR and LASSO, using glmnet library} #' } #' Several Bayesian methods, using the BGLR library: #' \itemize{ #' \item{BRR: Bayesian ridge regression: same as rr-blup, but bayesian resolution. Induces homogeneous shrinkage of all markers effects towards zero with Gaussian distribution (de los Campos et al, 2013)} #' \item{BL: Bayesian LASSO: uses an exponential prior on marker variances priors, leading to double exponential distribution of marker effects (Park & Casella 2008)} #' \item{BA: Bayes A uses a scaled-t prior distribution of marker effects. (Meuwissen et al 2001).} #' \item{BB: Bayes B, uses a mixture of distribution with a point mass at zero and with a slab of non-zero marker effects with a scaled-t distribution (Habier et al 2011).} #' \item{BC: Bayes C same as Bayes B with a slab with Gaussian distribution.} #' } #' A more detailed description of these methods can be found in Perez & de los Campos 2014 (http://genomics.cimmyt.org/BGLR-extdoc.pdf). #' Three semi-parametric methods: #' \itemize{ #' \item{RKHS: reproductive kernel Hilbert space and multiple kernel MRKHS, using BGLR (Gianola and van Kaam 2008). Based on genetic distance and a kernel function to regulate the distribution of marker effects. This methods is claimed to be effective for detecting non additive effects.} #' \item{RF: Random forest regression, using randomForest library (Breiman, 2001, Breiman and Cutler 2013). This methods uses regression models on tree nodes which are rooted in bootstrapping data. Supposed to be able to capture interactions between markers} #' \item{SVM: support vector machine, run by e1071 library. For details, see Chang, Chih-Chung and Lin, Chih-Jen: LIBSVM: a library for Support Vector Machines http://www.csie.ntu.edu.tw/~cjlin/libsvm} #' \item{BRNN: Bayesian Regularization for feed-forward Neural Network, with the R-package BRNN (Gianola et al 2011). To keep computing time in reasonable limits, the parameters for the brnn function are neurons=2 and epochs = 20.} #' } #' @param nFolds Number of folds for the cross-validation. Smallest value recommended is nFolds = 3. #' @param nTimes Number of independent replicates for the cross-validation. Smallest value recommended is nTimes = 3. #' @return #' The class bwgs.cv returns a list containing: #' \itemize{ #' \item{\strong{summary}: Summary of cross-validation, including mean and standard deviation of predictive ability (i.e. correlation between phenotype and GEBV, estimated on the validation fold, then averaged over replicates (nTimes), Time taken by the computation and number of markers} #' \item{\strong{cv}: Vector of predictive abilities averaged over nFolds, for each of the nTimes replicates} #' \item{\strong{sd}: Standard deviation of the nTimes predictive abilities} #' \item{\strong{MSEP}: Square root of the mean-squared error of prediction, averaged over Ntimes} #' \item{\strong{SDMSEP}: Standard deviation of the Square root of the mean-squared error of prediction, averaged over Ntimes} #' \item{\strong{bv_table}: Matrix of dimension n x 4. Columns are: #' \itemize{ #' \item{Real BV, i.e. pheno vector} #' \item{Predict BV: the nx1 vector of GEBVs} #' \item{gpreSD: Standart deviation of estimated GEBV} #' \item{CD: coefficient of determination for each GEBV, estimated as sqrt Note that gpredSD and CD are only available for methods using the BGLR library, namely GBLUP, EGBLUP, BA,BB,BC,BL,RKHS and MKRKHS. These two columns contain NA for methods RF, RR, LASSO, EN and SVM.} #' } #' } #' #' } #' @examples #' \donttest{ #' data(inra) #' # Cross validation using GBLUP method #' cv_gblup <- bwgs.cv(TRAIN47K, YieldBLUE, #' geno.impute.method = "mni", #' predict.method = "gblup", #' nFolds = 10, #' nTimes = 1) #' } #' @export bwgs.cv <- function(geno,pheno, FIXED = "NULL", MAXNA = 0.2, MAF = 0.05, pop.reduct.method="NULL", sample.pop.size="NULL", geno.reduct.method="NULL", reduct.marker.size="NULL", pval="NULL", r2="NULL", MAP="NULL", geno.impute.method="NULL", predict.method="NULL", nFolds, nTimes) { #(c)2015 [email protected] & [email protected] message("2017 BWGS - Version 1.10.0 Release date: 31/10/2017") #message("2015 Gilles Charmet & Louis Gautier Tran) #message("") start.time <- Sys.time() message("Start time:") print(start.time) message("") pop.reduct.method <- toupper(pop.reduct.method) geno.reduct.method <- toupper(geno.reduct.method) reduct.size <- as.numeric(reduct.marker.size) geno.impute.method <- toupper(geno.impute.method) predict.method <- toupper(predict.method) #r2 is for LD reduct method #r2 and n.parts are for GMSLD or GMSLDV reduct method #if geno.impute.method ="ALL" and predict.method="ALL": bwgs.cv() will compare all methods #/////////////////////////////////////////////////////////////// #STEP 0: select common lines in geno and pheno matrices # FILTER according to MAF and MAXNA #////////////////////////////////////////////////////////////// if(MAP!="NULL") { if (length(rownames(MAP)) == 0) { stop("Row names are required for MAP") } # if (length(colnames(geno_train)) == 0) { # stop("Column names are required for geno_train") # } MAPPED_markers=intersect(rownames(MAP),colnames(geno)) if (length(MAPPED_markers) == 0) { stop("The row names of MAP is not matched with columns of geno") } MAP=MAP[MAPPED_markers,] geno=geno[,MAPPED_markers] } if(FIXED!="NULL") { genoFIX=intersect(rownames(FIXED),rownames(geno)) FIXED=FIXED[genoFIX,] geno=geno[genoFIX,] } listGENO=rownames(geno) listPHENO=names(pheno) LIST=intersect(listGENO,listPHENO) if (length(LIST)==0) {stop("NO COMMON LINE BETWENN geno AND pheno")} if(length(LIST)>0) { geno=geno[LIST,] pheno=pheno[LIST] if(FIXED!="NULL"){FIXED=FIXED[LIST,]} new.pheno.size=length(LIST) message("Number of common lines between geno and pheno") print(new.pheno.size) } # FILTERING GENOTYPING MATRIX # for % NA per marker markerNA=apply(geno,2,percentNA) geno=geno[,markerNA<MAXNA] freqSNP=apply(geno,2,myMean) geno=geno[,freqSNP>MAF & freqSNP<(1-MAF)] new.geno.size=dim(geno) message("Number of markers after filtering") print(new.geno.size) if (pop.reduct.method=="NULL"|(toupper(as.character(pop.reduct.method))=="NULL")) { rps <-0 message("random pop size not applied") print(rps) message("") } if (pop.reduct.method=="RANDOM"|(toupper(as.character(pop.reduct.method))=="RANDOM")) { rps <- sample.pop.size message("random pop size=") print(rps) message("") } if (pop.reduct.method=="OPTI"|(toupper(as.character(pop.reduct.method))=="OPTI")) { #shrink_size <- as.numeric(reduct.size) rps <- as.numeric(sample.pop.size) message("optimized pop size=") print(rps) message("") } message("POPULATION SAMPLING METHOD") print(pop.reduct.method) message("") #////////////////////////////////////////////////////////////// #Step 1: Imputation "MNI" or "EMI" or "NULL" #////////////////////////////////////////////////////////////// if((geno.impute.method=="NULL")|(geno.impute.method=="MNI")|(geno.impute.method=="EMI")) { if(geno.impute.method=="MNI"){ time.mni = system.time(geno_impute <- MNI(geno)) time.mni.impute = as.numeric(round(time.mni[3]/60,digits=2)) message("Imputed by MNI.") message("Time of imputation by MNI (mins):") print(time.mni.impute) message("A part 5x20 of Imputed genotypic matrix:") print(geno_impute[1:5,1:20],quote=FALSE) message("") } if(geno.impute.method=="EMI") { # emi.time.start = system.time() # geno_impute <- EMI(geno_shrink) # emi.time.stop = system.time() time.emi = system.time(geno_impute <- EMI(geno)) time.emi.impute = as.numeric(round(time.emi[3]/60,digits=2)) message("Imputed by EMI.") message("Time of imputation by EMI (mins):") print(time.emi.impute) message("A part 5x20 of Imputed genotypic matrix:") print(geno_impute[1:5,1:20],quote=FALSE) message("") } message("Imputed by MNI, EMI...finished.") if (FIXED!="NULL") {FIXED=round(MNI(FIXED))} if(geno.impute.method=="NULL") { # emi.time.start = system.time() # geno_impute <- EMI(geno_shrink) # emi.time.stop = system.time() geno_impute <- geno message("NO Imputation.") message("A part 5x20 of Imputed genotypic matrix:") print(geno_impute[1:5,1:20],quote=FALSE) message("") } } else { stop("Please choose an impute method: NULL, MNI, EMI ") } #If not chosen an impute method #/////////////////////////////////////////////////////////////// #STEP 2: reduction OF THE DATA (reduire les donnees) #////////////////////////////////////////////////////////////// if((geno.reduct.method=="NULL")|(geno.reduct.method=="RMR")|(geno.reduct.method=="ANO")|(geno.reduct.method=="LD")|(geno.reduct.method=="ANO+LD")) { if(geno.reduct.method=="NULL"){ geno_shrink <- geno_impute message("No reduction for genomic data.") new.geno.size <- dim(geno_shrink) } if(geno.reduct.method=="RMR"){ #if(is.null(reduct.size)) {stop("Please choose the size of columns for new genotypic data.")} if(!is.null(reduct.size)){ #shrink_size <- as.numeric(reduct.size) time.rmr = system.time(geno_shrink <- RMR(geno_impute,reduct.size)) time.rmr.reduct = as.numeric(round(time.rmr[3]/60,digits=2)) new.geno.size <- dim(geno_shrink) #geno_shrink message("Reduced by RMR. New genotypic data dimension:") print(new.geno.size) message("") message("Time of reduction by RMR (mins):") print(time.rmr.reduct) } else {stop("Please choose the size of columns for new genotypic data.")} } if(geno.reduct.method=="ANO"){ #if(is.null(reduct.size)) {stop("Please choose the size of columns for new genotypic data.")} if(!is.null(pval)){ time.ano = system.time(geno_shrink <- ANO(pheno,geno_impute,pval)) time.ano.reduct = as.numeric(round(time.ano[3]/60,digits=2)) new.geno.size <- dim(geno_shrink) message("Reduced by ANO. New genotypic data dimension:") print(new.geno.size) message("") message("Time of reduction by ANO (mins):") print(time.ano.reduct) } else {stop("Please choose the p value for new genotypic data.")} } if(geno.reduct.method=="ANO+LD") { if(!is.null(pval)) { time.ano = system.time(geno_shrinkANO <- ANO(pheno,geno_impute,pval)) time.ano.reduct = as.numeric(round(time.ano[3]/60,digits=2)) geno_shrinkANO=geno_shrinkANO[,!is.na(colnames(geno_shrinkANO))] } else {stop("Please choose the p value for new genotypic data.")} if(MAP=="NULL") { stop("Please choose the r2 and/or MAP for LD reduction.") } # END OF LD reduction if(MAP!="NULL") { if (!is.null(r2)) { MAP2=MAP[colnames(geno_shrinkANO),] time.chrld = system.time(geno_shrink <- CHROMLD(geno_shrinkANO,R2seuil=r2,MAP=MAP2)) time.chrld.reduct = as.numeric(round(time.chrld[3]/60,digits=2)) new.geno.size <- dim(geno_shrink) #geno_shrink time.anold.reduct=time.ano.reduct+time.chrld.reduct new.geno.size <- dim(geno_shrink) #geno_shrink message("Reduced by ANO + CHROMLD. New genotypic data dimension:") print(new.geno.size) message("") message("Time of reduction by ANO+ LD (mins):") print(time.anold.reduct) } else {stop("Please choose the r2 and/or MAP for LD reduction.")} } # END OF LD reduction } if(geno.reduct.method=="LD") { if(MAP=="NULL") { {stop("Please choose a MAP for LD reduction.")} } # END OF LD reduction MAP2=MAP[colnames(geno),] time.chrld = system.time(geno_shrink <- CHROMLD(geno_impute,R2seuil=r2,MAP=MAP2)) time.chrld.reduct = as.numeric(round(time.chrld[3]/60,digits=2)) new.geno.size <- dim(geno_shrink) #geno_shrink message("Reduced by CHROMLD. New genotypic data dimension:") print(new.geno.size) message("") message("Time of reduction by CHROMLD (mins):") print(time.chrld.reduct) } # END OF LD reduction } # End if not chosen a reduct method else {stop("BWGS Warning! Please choose a valid reduct method. Ex.: RMR, AM, LD, NULL.")} #If not choose a reduct method #End of dimension reduction for the genomic matrix #///////////////////////////////////////////////////// #STEP 3: CROSS VALIDATION - BEST MODEL IDENTIFICATION #///////////////////////////////////////////////////// if((predict.method=="EN")|(predict.method=="SVM")|(predict.method=="EGBLUP")|(predict.method=="RR")|(predict.method=="BA")| (predict.method=="BB")|(predict.method=="LASSO")|(predict.method=="BL")|(predict.method=="BC")|(predict.method=="GBLUP")|(predict.method=="BRNN")| (predict.method=="MKRKHS")|(predict.method=="RF")|(predict.method=="BRR")|(predict.method=="RKHS")|(predict.method=="ALL")) { if(predict.method=="GBLUP"){ message("Predict by GBLUP...") # time.gblup = system.time(GBLUP <-runCV(pheno,geno_shrink,pop.reduct.method,rps,predictor_GBLUP,nFolds,nTimes)) time.gblup = system.time(GBLUP <-runCV(pheno,geno_shrink,FIXED,pop.reduct.method,rps,predict_GBLUP,nFolds,nTimes)) message("Predict by GBLUP...ended.") time.cal <- time.gblup ncol_geno_shrink <- ncol(geno_shrink) Results <- transfer(GBLUP,time.cal,ncol_geno_shrink) } if(predict.method=="EGBLUP"){ message("Predict by EGBLUP...") time.egblup = system.time(EGBLUP <-runCV(pheno,geno_shrink,FIXED,pop.reduct.method,rps=0,predict_EGBLUP,nFolds,nTimes)) message("Predict by EGBLUP...ended.") time.cal <- time.egblup ncol_geno_shrink <- ncol(geno_shrink) Results <- transfer(EGBLUP,time.cal,ncol_geno_shrink) } if(predict.method=="RF"){ message("Predicting by RF...") message("Warning! The processing time for Random Forest Model is very long...") time.rf = system.time(RF <-runCV(pheno,geno_shrink,FIXED="NULL",pop.reduct.method,rps,predict_RF,nFolds,nTimes)) message("Predict by RF...ended.") time.cal <- time.rf ncol_geno_shrink <- ncol(geno_shrink) Results <- transfer(RF,time.cal,ncol_geno_shrink) } if(predict.method=="BRNN"){ message("Predict by BRNN...") time.brnn = system.time(BRNN <-runCV(pheno,geno_shrink,FIXED="NULL",pop.reduct.method,rps,predict_BRNN,nFolds,nTimes)) message("Predict by BRNN...ended.") time.cal <- time.brnn ncol_geno_shrink <- ncol(geno_shrink) Results <- transfer(BRNN,time.cal,ncol_geno_shrink) } if(predict.method=="RKHS"){ message("Predicting by RKHS...") time.rkhs = system.time(RKHS <-runCV(pheno,geno_shrink,FIXED ="NULL",pop.reduct.method,rps,predict_RKHS,nFolds,nTimes)) message("Predict by RKHS...ended.") time.cal <- time.rkhs ncol_geno_shrink <- ncol(geno_shrink) Results <- transfer(RKHS,time.cal,ncol_geno_shrink) } if(predict.method=="MKRKHS"){ message("Predicting by MKRKHS...") time.mkrkhs = system.time(MKRKHS <-runCV(pheno,geno_shrink,FIXED="NULL",pop.reduct.method,rps,predict_MKRKHS,nFolds,nTimes)) message("Predict by MKRKHS...ended.") time.cal <- time.mkrkhs ncol_geno_shrink <- ncol(geno_shrink) Results <- transfer(MKRKHS,time.cal,ncol_geno_shrink) } if(predict.method=="RR"){ #Ridge Regression# message("Predicting by RR (Ridge Regression)...") time.rr = system.time(RR <-runCV(pheno,geno_shrink,FIXED="NULL",pop.reduct.method,rps,predict_RR,nFolds,nTimes)) message("Predict by RR...ended.") time.cal <- time.rr ncol_geno_shrink <- ncol(geno_shrink) Results <- transfer(RR,time.cal,ncol_geno_shrink) } if(predict.method=="BRR"){ #Bayesian Ridge Regression# message("Predicting by BRR (Bayesian Ridge Regression)...") time.brr = system.time(BRR <-runCV(pheno,geno_shrink,FIXED,pop.reduct.method,rps,predict_BRR,nFolds,nTimes)) message("Predict by BRR...ended.") time.cal <- time.brr ncol_geno_shrink <- ncol(geno_shrink) Results <- transfer(BRR,time.cal,ncol_geno_shrink) } if(predict.method=="LASSO"){ #LASSO Regression# message("Predicting by LASSO ...") time.lasso= system.time(LASSO <-runCV(pheno,geno_shrink,FIXED="NULL",pop.reduct.method,rps,predict_Lasso,nFolds,nTimes)) message("Predict by LASSO...ended.") time.cal <- time.lasso ncol_geno_shrink <- ncol(geno_shrink) Results <- transfer(LASSO,time.cal,ncol_geno_shrink) } if(predict.method=="BL"){ #BLR regression# message("Predicting by Bayesian Lasso ...") time.bl= system.time(BL <-runCV(pheno,geno_shrink,FIXED,pop.reduct.method,rps,predict_BL,nFolds,nTimes)) message("Predict by Bayesian Lasso...ended.") time.cal <- time.bl ncol_geno_shrink <- ncol(geno_shrink) Results <- transfer(BL,time.cal,ncol_geno_shrink) } if(predict.method=="BA"){ #BayesianC regression# message("Predicting by BayesA ...") # time.ba= system.time(BA <-runCV(pheno,geno_shrink,pop.reduct.method,rps,predictor_BA,nFolds,nTimes)) time.ba= system.time(BA <-runCV(pheno,geno_shrink,FIXED,pop.reduct.method,rps,predict_BA,nFolds,nTimes)) message("Predict by BayesA...ended.") time.cal <- time.ba ncol_geno_shrink <- ncol(geno_shrink) Results <- transfer(BA,time.cal,ncol_geno_shrink) } if(predict.method=="BB"){ #BayesB regression# message("Predicting by BayesB ...") time.bb= system.time(BB <-runCV(pheno,geno_shrink,FIXED,pop.reduct.method,rps,predict_BB,nFolds,nTimes)) message("Predict by BayesB...ended.") time.cal <- time.bb ncol_geno_shrink <- ncol(geno_shrink) Results <- transfer(BB,time.cal,ncol_geno_shrink) } if(predict.method=="BC"){ #BayesianC regression# message("Predicting by BayesC ...") time.bc= system.time(BC <-runCV(pheno,geno_shrink,FIXED,pop.reduct.method,rps,predict_BC,nFolds,nTimes)) message("Predict by BayesC...ended.") time.cal <- time.bc ncol_geno_shrink <- ncol(geno_shrink) Results <- transfer(BC,time.cal,ncol_geno_shrink) } if(predict.method=="EN"){ #Elastic-Net# message("Predicting by Elastic-Net ...") time.en = system.time(ElasticNet <-runCV(pheno,geno_shrink,FIXED="NULL",pop.reduct.method,rps,predict_ElasticNet,nFolds,nTimes)) message("Predict by Elastic-Net...ended.") time.cal <- time.en ncol_geno_shrink <- ncol(geno_shrink) Results <- transfer(ElasticNet,time.cal,ncol_geno_shrink) } if(predict.method=="SVM"){ #Support Vector Machine# message("Predicting by SVM ...") time.svm = system.time(SVM <-runCV(pheno,geno_shrink,FIXED="NULL",pop.reduct.method,rps,predict_SVM,nFolds,nTimes)) message("Predict by SVM...ended.") time.cal <- time.svm ncol_geno_shrink <- ncol(geno_shrink) Results <- transfer(SVM,time.cal,ncol_geno_shrink) } #FOR ALL PREDICTION METHODS: if (predict.method=="ALL") { message("Predict by all methods...") time.all = system.time(GSALL <-Run_All_Cross_Validation(pheno,geno_shrink,nFolds,nTimes)) message("Predict by ALL METHODS...ended.") time.cal <- time.all ncol_geno_shrink <- ncol(geno_shrink) # Results <- transfer(GSALL,time.cal,ncol_geno_shrink) Results <- GSALL Results } } # else {stop("Please choose a predict method: EN,SVM,RR,EGBLUP, BA, BB, BC, BL, LASSO,GBLUP, RKHS, RF.")} # End of all predict methods else {stop("Please choose a predict method: EN,SVM,RR,LASSO,rrBLUP, RKHS, RF.")} # End of all predict methods Results }
/scratch/gouwar.j/cran-all/cranData/BWGS/R/cross_validation.R
# * Author: Bangyou Zheng ([email protected]) # * Created: 03:40 PM Saturday, 09 June 2018 # * Copyright: AS IS #' @importFrom stats anova cor dist lm na.omit predict rnorm runif sd var #' @importFrom utils setTxtProgressBar txtProgressBar #' @importFrom rrBLUP A.mat kin.blup #' @importFrom BGLR BGLR #' @importFrom glmnet cv.glmnet #' @importFrom randomForest randomForest #' @importFrom brnn brnn #' @importFrom e1071 svm NULL
/scratch/gouwar.j/cran-all/cranData/BWGS/R/import_packages.R
#' @name inra47k #' @rdname inra47k #' @title INRA47K #' #' @description inra data contains a set of geno47K(760 x 47839), pheno (760 x 1) and MAP47K (47839 x 3). The phenotype pheno contains adjusted genotype means for yield trait (YLD) over multi-year/location trials. #' #' @source #' <https://forgemia.inra.fr/umr-gdec/bwgs> NULL #' @rdname inra47k "MAP47K" #' @rdname inra47k "TARGET47K" #' @rdname inra47k "TRAIN47K" #' @rdname inra47k "YieldBLUE"
/scratch/gouwar.j/cran-all/cranData/BWGS/R/inra47k.R
#' Computes the GEBV prediction for the target population with only genotypic Data using the options for model selection. #' #' @param geno_train Matrix (n x m) of genotypes for the training population: n lines with m markers. Genotypes should be coded as {-1, 0, 1, NA}. Missing data are allowed and coded as NA. #' @param pheno_train Vector (n x 1) of phenotype for the training phenotypes. This vector should have no missing values. Otherwise, missing values (NA) will be omitted in both pheno_train and geno_train. #' @param geno_target Matrix (z x m) of genotypes for the target population: z lines with the same m markers as in geno_train. Genotypes should be coded as {-1, 0, 1, NA}. Missing data are allowed and coded as NA. Other arguments are identical to those of bwgs.cv, except pop_reduct_method, nTimes and nFolds, since the prediction is run only once, using the whole training population for model estimation, then applied to the target population. #' @param FIXED_train A matrix of fixed effect for training, to be used with some methods such as those included in BGLR, MUST have same rownames as geno and coded(-1 0 1) #' @param FIXED_target A matrix of fixed effect for targeting, to be used with some methods such as those included in BGLR, MUST have same rownames as geno and coded(-1 0 1) #' @param MAXNA The maximum proportion of missing value which is admitted for filtering marker columns in geno. Default value is 0.2 #' @param MAF The minimum allele frequency for filtering marker colums in geno; default value is 0.05 #' @param geno.reduct.method Allows sampling a subset of markers for speeding up computing time and/or avoid introducing more noise than informative markers. Options are: #' \itemize{ #' \item{RMR: Random sampling (without replacement) of a subset of markers. To be used with the parameter “reduct.marker.size”.} #' \item{LD (with r2 and MAP): enables “pruning” of markers which are in LD > r2. Only the marker with the least missing values is kept for each pair in LD>r2. To allow faster computation, r2 is estimated chromosome by chromosome, so a MAP file is required with information of marker assignation to chromosomes. The MAP file should contain at least three columns: marker_name, chromosome_name and distance_from_origin (either genetic of physical distance, only used for sorting markers, LD being re-estimated from marker Data).} #' \item{ANO (with pval): one-way ANOVA are carried out with R function lm on trait “pheno”. Every markers are tested one at a time, and only markers with pvalue<pval are kept for GEBV prediction.} #' \item{ANO+LD (with pval and r2, MAP is facultative): combines a first step of marker selection with ANO, then a second step of pruning using LD option.} #' } #' @param reduct.size Specifies the number of markers for the genotypic reduction using RMR (reduct.size < m). #' @param r2 Coefficient of linkage disequilibrium (LD). Setting 0<r2<1 if the genotypic reduction method is in {LD or ANO+LD }. #' @param pval p value for ANO method, 0 < pval < 1. #' @param MAP A file with markers in rows ane at least ONE columns with colnames= "chrom". Used for computing r2 within linkage groups. #' @param geno.impute.method Allow missing marker data imputation using the two methods proposed in function A.mat of package rrBLUP, namely: #' \itemize{ #' \item{MNI: missing data are replaced by the mean allele frequency of the marker (column in geno)} #' \item{EMI: missing data are replaced using an expectation-maximization methods described in function A.mat (Endelman & Janninck 2012).} #' } #' #' Default value is NULL. #' #' Note that these imputation methods are only suited when there are a few missing value, typically in marker data from SNP chips of KasPAR. They are NOT suited for imputing marker data from low density to high density designs, and when there are MANY missing Data as typically provided by GBS. More sophisticated software (e.g. Beagles, Browning & Browning 2016) should be used before BWGS. #' @param predict.method The options for genomic breeding value prediction methods. The available options are: #' \itemize{ #' \item{GBLUP: performs G-BLUP using a marker-based relationship matrix, implemented through BGLR R-library. Equivalent to ridge regression (RR-BLUP) of marker effects.} #' \item{EGBLUP: performs EG-BLUP, i.e. BLUP using a "squared" relationship matrix to model epistatic 2x2 interactions, as described by Jiang & Reif (2015), using BGLR library} #' \item{RR: ridge regression, using package glmnet. In theory, strictly equivalent to gblup.} #' \item{LASSO: Least Absolute Shrinkage and Selection Operator is another penalized regression methods which yield more shrinked estimates than RR. Run by glmnet library.} #' \item{EN: Elastic Net (Zou and Hastie, 2005), which is a weighted combination of RR and LASSO, using glmnet library} #' } #' Several Bayesian methods, using the BGLR library: #' \itemize{ #' \item{BRR: Bayesian ridge regression: same as rr-blup, but bayesian resolution. Induces homogeneous shrinkage of all markers effects towards zero with Gaussian distribution (de los Campos et al, 2013)} #' \item{BL: Bayesian LASSO: uses an exponential prior on marker variances priors, leading to double exponential distribution of marker effects (Park & Casella 2008)} #' \item{BA: Bayes A uses a scaled-t prior distribution of marker effects. (Meuwissen et al 2001).} #' \item{BB: Bayes B, uses a mixture of distribution with a point mass at zero and with a slab of non-zero marker effects with a scaled-t distribution (Habier et al 2011).} #' \item{BC: Bayes C same as Bayes B with a slab with Gaussian distribution.} #' } #' A more detailed description of these methods can be found in Perez & de los Campos 2014 (http://genomics.cimmyt.org/BGLR-extdoc.pdf). #' Three semi-parametric methods: #' \itemize{ #' \item{RKHS: reproductive kernel Hilbert space and multiple kernel MRKHS, using BGLR (Gianola and van Kaam 2008). Based on genetic distance and a kernel function to regulate the distribution of marker effects. This methods is claimed to be effective for detecting non additive effects.} #' \item{RF: Random forest regression, using randomForest library (Breiman, 2001, Breiman and Cutler 2013). This methods uses regression models on tree nodes which are rooted in bootstrapping data. Supposed to be able to capture interactions between markers} #' \item{SVM: support vector machine, run by e1071 library. For details, see Chang, Chih-Chung and Lin, Chih-Jen: LIBSVM: a library for Support Vector Machines http://www.csie.ntu.edu.tw/~cjlin/libsvm} #' \item{BRNN: Bayesian Regularization for feed-forward Neural Network, with the R-package BRNN (Gianola et al 2011). To keep computing time in reasonable limits, the parameters for the brnn function are neurons=2 and epochs = 20.} #' } #' #' @return #' The object bwgs.predict returns Matrix of dimension nx3. Columns are: #' \itemize{ #' \item{Predict BV: the nx1 vector of GEBVs for the validation set (rows of geno_valid)} #' \item{gpredSD: Standart deviation of estimated GEBV} #' \item{CD: coefficient of determination for each GEBV, estimated as sqrt ((1-stdev(GEBVi))^2/2g)} #' } #' Note that gpredSD and CD are only available for methods using the BGLR library, namely GBLUP, EGBLUP, BA,BB,BC,BL,RKHS and MKRKHS. #' These two columns contain NA for methods RF, RR, LASSO, EN and SVM. #' @examples #' \donttest{ #' data(inra) #' # Prediction using GBLUP method #' predict_gblup <- bwgs.predict(geno_train = TRAIN47K, #' pheno_train = YieldBLUE, #' geno_target = TARGET47K, #' MAXNA = 0.2, #' MAF = 0.05, #' geno.reduct.method = "NULL", #' reduct.size = "NULL", #' r2 = "NULL", #' pval = "NULL", #' MAP = "NULL", #' geno.impute.method = "MNI", #' predict.method = "GBLUP") #' } #' @export bwgs.predict <- function(geno_train,pheno_train,geno_target,FIXED_train="NULL",FIXED_target="NULL",MAXNA=0.2,MAF=0.05,geno.reduct.method="NULL",reduct.size="NULL",r2="NULL",pval="NULL", MAP=NULL,geno.impute.method="NULL",predict.method="GBLUP") { #(c)2015 [email protected] & [email protected] message("2017 BWGS - Version 1.6.0 Release date: 31/02/2017") #message("2015 Gilles Charmet & Louis Gautier Tran) # upload the necessary libraries #message("") start.time <- Sys.time() message("Start time:") print(start.time) message("") geno.reduct.method <- toupper(geno.reduct.method) #reduct.size <- as.numeric(reduct.size) geno.impute.method <- toupper(geno.impute.method) predict.method <- toupper(predict.method) #r2 is for LD shrink method #r2 and n.parts are for GMSLD shrink method #/////////////////////////////////////////////////////////////// #STEP 0: select common lines in geno and pheno matrices # select common markers in train and target matrices # FILTER according to MAF and MAXNA #////////////////////////////////////////////////////////////// if(MAP!="NULL") { if (length(rownames(MAP)) == 0) { stop("Row names are required for MAP") } if (length(colnames(geno_train)) == 0) { stop("Column names are required for geno_train") } MAPPED_markers=intersect(rownames(MAP),colnames(geno_train)) MAP=MAP[MAPPED_markers,] geno_train=geno_train[,MAPPED_markers] } if(FIXED_train!="NULL") { genoTRFIX=intersect(rownames(FIXED_train),rownames(geno_train)) FIXED_train=FIXED_train[genoTRFIX,] geno_train=geno_train[genoTRFIX,] } if(FIXED_target!="NULL") { genoTAFIX=intersect(rownames(FIXED_target),rownames(geno_target)) FIXED_target=FIXED_target[genoTAFIX,] geno_target=geno_target[genoTAFIX,] } pheno_train=pheno_train[!is.na(pheno_train)] listGENO=rownames(geno_train) listPHENO=names(pheno_train) LIST=intersect(listGENO,listPHENO) if (length(LIST)==0) {stop("NO COMMON LINE BETWENN geno AND pheno")} else { geno_train=geno_train[LIST,] pheno_train=pheno_train[LIST] new.pheno.size=length(LIST) message("Number of common lines between geno and pheno") print(new.pheno.size) if(FIXED_train!="NULL") { genoTRFIX=intersect(rownames(FIXED_train),rownames(geno_train)) FIXED_train=FIXED_train[genoTRFIX,] geno_train=geno_train[genoTRFIX,] } } marker_train=colnames(geno_train) marker_target=colnames(geno_target) marker_common=intersect(marker_train,marker_target) geno_train=geno_train[,marker_common] geno_target=geno_target[,marker_common] geno <- rbind(geno_train,geno_target) geno_train_nrows <- nrow(geno_train) geno_target_nrows=nrow(geno_target) geno_nrows <- nrow(geno) if(FIXED_train!="NULL") { FIXED <- rbind(FIXED_train,FIXED_target) FIXED <-MNI(FIXED) FIXED <-round(FIXED) } # FILTERING GENOTYPING MATRIX # for % NA per marker markerNA=apply(geno,2,percentNA) geno=geno[,markerNA<MAXNA] freqSNP=apply(geno,2,myMean) geno=geno[,freqSNP>MAF & freqSNP<(1-MAF)] new.geno.size=dim(geno) message("Number of markers after filtering") print(new.geno.size) if(MAP!="NULL") { MAP=MAP[colnames(geno),] } #////////////////////////////////////////////////////////////// #Step 1: Imputation "MNI" or "EMI" #////////////////////////////////////////////////////////////// if((geno.impute.method=="NULL")|(geno.impute.method=="MNI")|(geno.impute.method=="EMI")) { geno_impute = geno if(geno.impute.method=="MNI"){ time.mni = system.time(geno_impute <- MNI(geno)) time.mni.impute = as.numeric(round(time.mni[3]/60,digits=2)) message("Imputed by MNI.") message("Time of imputation by MNI (mins):") print(time.mni.impute) message("A part 5x20 of Imputed genotypic matrix:") print(geno_impute[1:5,1:20],quote=FALSE) message("") } if(geno.impute.method=="EMI") { # emi.time.start = system.time() # geno_impute <- EMI(geno_shrink) # emi.time.stop = system.time() time.emi = system.time(geno_impute <- EMI(geno)) time.emi.impute = as.numeric(round(time.emi[3]/60,digits=2)) message("Imputed by EMI.") message("Time of imputation by EMI (mins):") print(time.emi.impute) message("A part 5x20 of Imputed genotypic matrix:") print(geno_impute[1:5,1:20],quote=FALSE) message("") } message("Imputed by MNI, EMI...finished.") geno=geno_impute geno_train_impute=geno[1:geno_train_nrows,] geno_valid_impute=geno[(geno_train_nrows+1):geno_nrows,] if(FIXED_train!="NULL") { FIXED_train=FIXED[1:geno_train_nrows,] FIXED_target=FIXED[(geno_train_nrows+1):geno_nrows,] } } else {stop("Please choose an impute method: NULL, MNI, EMI ") } #If not chosen an impute method #geno.impute.method can be = "MNI", "EMI" #predict.method can be = "LASSO","SVM","BA","BB","BC","RR"(Ridge Regression), "rrBLUP", "RKHS", "MKHS", "EN" (Elastic-Net) #geno.reduct.method can be = "RMR","ANO","LD", "NULL" # #/////////////////////////////////////////////////////////////// #STEP 2: reduction OF THE DATA (reduire les donnees) #////////////////////////////////////////////////////////////// if((geno.reduct.method=="NULL")|(geno.reduct.method=="RMR")|(geno.reduct.method=="ANO")|(geno.reduct.method=="LD")|(geno.reduct.method=="ANO+LD")) { if(geno.reduct.method=="NULL"){ geno_shrink <- geno_impute message("No reduction for genomic data.") new.geno.size <- dim(geno_shrink) } if(geno.reduct.method=="RMR"){ #if(is.null(reduct.size)) {stop("Please choose the size of columns for new genotypic data.")} if(!is.null(reduct.size)){ #shrink_size <- as.numeric(reduct.size) time.rmr = system.time(geno_shrink <- RMR(geno,reduct.size)) time.rmr.reduct = as.numeric(round(time.rmr[3]/60,digits=2)) new.geno.size <- dim(geno_shrink) #geno_shrink message("Reduced by RMR. New genotypic data dimension:") print(new.geno.size) message("") message("Time of reduction by RMR (mins):") print(time.rmr.reduct) } else {stop("Please choose the size of columns for new genotypic data.")} } if(geno.reduct.method=="ANO"){ #if(is.null(reduct.size)) {stop("Please choose the size of columns for new genotypic data.")} if(!is.null(pval)){ time.ano = system.time(genoTrain_shrink <- ANO(pheno_train,geno_train_impute,pval)) time.ano.reduct = as.numeric(round(time.ano[3]/60,digits=2)) genoTrain_shrink=genoTrain_shrink[,!is.na(colnames(genoTrain_shrink))] genoValid_shrink=geno_valid_impute[,colnames(genoTrain_shrink)] geno_shrink=rbind(genoTrain_shrink,genoValid_shrink) new.geno.size <- dim(geno_shrink) message("Reduced by ANO. New genotypic data dimension:") print(new.geno.size) message("") message("Time of reduction by ANO (mins):") print(time.ano.reduct) } else {stop("Please choose the p value for new genotypic data.")} } if(geno.reduct.method=="ANO+LD") { if(!is.null(pval)) { time.ano = system.time(genoTrain_shrinkANO <- ANO(pheno_train,geno_train_impute,pval)) time.ano.reduct = as.numeric(round(time.ano[3]/60,digits=2)) genoTrain_shrinkANO=genoTrain_shrinkANO[,!is.na(colnames(geno_shrinkANO))] genoValid_shrinkANO=geno_valid_impute[,colnames(genoTrain_shrink)] geno_shrinkANO=rbind(genoTrain_shrink,genoValid_shrink) } else {stop("Please choose the p value for new genotypic data.")} if(MAP=="NULL") { stop("Please choose the r2 and/or MAP for LD reduction.") } # END OF LD reduction MAP2=MAP[colnames(geno_shrinkANO),] time.chrld = system.time(geno_shrink <- CHROMLD(geno_shrinkANO,R2seuil = r2,MAP2)) time.chrld.reduct = as.numeric(round(time.chrld[3]/60,digits=2)) new.geno.size <- dim(geno_shrink) #geno_shrink time.anold.reduct=time.ano.reduct+time.chrld.reduct new.geno.size <- dim(geno_shrink) #geno_shrink message("Reduced by ANO + CHROMLD. New genotypic data dimension:") print(new.geno.size) message("") message("Time of reduction by ANO+ LD (mins):") print(time.anold.reduct) } if(geno.reduct.method=="LD") { if(MAP=="NULL") { stop("Please choose MAP for LD reduction.") } # END OF LD reduction MAP2=MAP[colnames(geno_shrinkANO),] time.chrld = system.time(geno_shrink <- CHROMLD(geno_impute,R2seuil=r2,MAP)) time.chrld.reduct = as.numeric(round(time.chrld[3]/60,digits=2)) new.geno.size <- dim(geno_shrink) #geno_shrink message("Reduced by CHROMLD. New genotypic data dimension:") print(new.geno.size) message("") message("Time of reduction by CHROMLD (mins):") print(time.chrld.reduct) } # END OF LD reduction geno=geno_shrink geno_train_impute=geno[1:geno_train_nrows,] geno_valid_impute=geno[(geno_train_nrows+1):geno_nrows,] if(FIXED_train!="NULL") { FIXED_train=FIXED[1:geno_train_nrows,] FIXED_target=FIXED[(geno_train_nrows+1):geno_nrows,] } } # End if not chosen a reduct method else {stop("BWGS Warning! Please choose a valid reduct method. Ex.: RMR, LD, LD+ANO or NULL.")} #If not choose a reduct method #End of dimension reduction for the genomic matrix #///////////////////////////////////////////////////// #STEP 3: BREEDING VALUE PREDICTION PROCESS #///////////////////////////////////////////////////// if((predict.method=="EN")|(predict.method=="SVM")|(predict.method=="EGBLUP")|(predict.method=="RR")|(predict.method=="BA")| (predict.method=="BB")|(predict.method=="LASSO")|(predict.method=="BL")|(predict.method=="BC")|(predict.method=="GBLUP")| (predict.method=="BRNN")|(predict.method=="MKRKHS")|(predict.method=="RF")|(predict.method=="BRR")|(predict.method=="RKHS")) { if (predict.method=="GBLUP") { message("Predict by GBLUP...") #GBLUP <-predict_GBLUP(pheno_train,geno_train_impute,geno_valid_impute) #GBLUP <-predict_GBLUP(phenoTrain=pheno_train, genoTrain=geno_train_impute, FixedTrain=FIXED_train, #genoPred=geno_valid_impute, FixedPred=FIXED_target) GBLUP <-predict_GBLUP(phenoTrain=pheno_train, genoTrain=geno_train_impute, FixedTrain=FIXED_train, genoPred=geno_valid_impute, FixedPred=FIXED_target) Results <- round(GBLUP,digits=5) #rownames(Results) <- ""; message("Phenotypic estimation GEBV:") message("") print(Results) message("") end.time <- Sys.time() time.taken <- end.time-start.time message("Stop time:") print(end.time) print(time.taken,title=FALSE) } if (predict.method=="EGBLUP") { message("Predict by EGBLUP...") EGBLUP <-predict_EGBLUP(phenoTrain=pheno_train, genoTrain=geno_train_impute, FixedTrain=FIXED_train, genoPred=geno_valid_impute, FixedPred=FIXED_target) Results <- round(EGBLUP,digits=5) #rownames(Results) <- ""; message("Phenotypic estimation GEBV:") message("") print(Results) message("") end.time <- Sys.time() time.taken <- end.time-start.time message("Stop time:") print(end.time) print(time.taken,title=FALSE) } if (predict.method=="BA") { message("Predict by BayesA...") BA <-predict_BA(phenoTrain=pheno_train, genoTrain=geno_train_impute, FixedTrain=FIXED_train, genoPred=geno_valid_impute, FixedPred=FIXED_target) Results <- round(BA,digits=5) #rownames(Results) <- ""; message("Phenotypic estimation GEBV:") message("") print(Results) message("") end.time <- Sys.time() time.taken <- end.time-start.time message("Stop time:") print(end.time) print(time.taken,title=FALSE) } if (predict.method=="BB") { message("Predict by BayesB...") BB <-predict_BB(phenoTrain=pheno_train, genoTrain=geno_train_impute, FixedTrain=FIXED_train, genoPred=geno_valid_impute, FixedPred=FIXED_target) Results <- round(BB,digits=5) #rownames(Results) <- ""; message("Phenotypic estimation GEBV:") message("") print(Results) message("") end.time <- Sys.time() time.taken <- end.time-start.time message("Stop time:") print(end.time) print(time.taken,title=FALSE) } if (predict.method=="BC") { message("Predict by BayesC...") BC <-predict_BC(phenoTrain=pheno_train, genoTrain=geno_train_impute, FixedTrain=FIXED_train, genoPred=geno_valid_impute, FixedPred=FIXED_target) Results <- round(BC,digits=5) #rownames(Results) <- ""; message("Phenotypic estimation GEBV:") message("") print(Results) message("") end.time <- Sys.time() time.taken <- end.time-start.time message("Stop time:") print(end.time) print(time.taken,title=FALSE) } if (predict.method=="BL") { message("Predict by Bayesian Lasso...") BL <-predict_BL(phenoTrain=pheno_train, genoTrain=geno_train_impute, FixedTrain=FIXED_train, genoPred=geno_valid_impute, FixedPred=FIXED_target) Results <- round(BL,digits=5) #rownames(Results) <- ""; message("Phenotypic estimation GEBV:") message("") print(Results) message("") end.time <- Sys.time() time.taken <- end.time-start.time message("Stop time:") print(end.time) print(time.taken,title=FALSE) } if (predict.method=="RF") { message("Predicting by RF...") RF <-predict_RF(phenoTrain=pheno_train, genoTrain=geno_train_impute, FixedTrain=FIXED_train, genoPred=geno_valid_impute, FixedPred=FIXED_target) message("Predict by RF...ended.") Results <- round(RF,digits=5) #rownames(Results) <- ""; message("Phenotypic estimation GEBV:") message("") print(Results) message("") end.time <- Sys.time() time.taken <- end.time-start.time message("Time stop at:") print(end.time) print(time.taken,title=FALSE) } if (predict.method=="RKHS") { message("Predicting by RKHS...") RKHS <-predict_RKHS(phenoTrain=pheno_train, genoTrain=geno_train_impute, FixedTrain=FIXED_train, genoPred=geno_valid_impute, FixedPred=FIXED_target) Results <- round(RKHS,digits=5) #Results = t(Results) message("Predict by RKHS...ended.") #Results <- round(RKHS,digits=2) #rownames(Results) <- ""; message("Phenotypic estimation GEBV:") message("") print(Results) message("") end.time <- Sys.time() time.taken <- end.time-start.time message("Stop time:") print(end.time) print(time.taken,title=FALSE) } if (predict.method=="MKRKHS") { message("Predicting by Multi-kernel RKHS...") MKRKHS <- predict_MKRKHS(phenoTrain=pheno_train, genoTrain=geno_train_impute, FixedTrain=FIXED_train, genoPred=geno_valid_impute, FixedPred=FIXED_target) Results <- round(MKRKHS,digits=5) #Results = t(Results) message("Predict by Multi-kernel RKHS...ended.") #Results <- round(MKRKHS,digits=2) #rownames(Results) <- ""; message("Phenotypic estimation GEBV:") message("") print(Results) message("") end.time <- Sys.time() time.taken <- end.time-start.time message("Stop time:") print(end.time) print(time.taken,title=FALSE) } if (predict.method=="RR") { message("Predicting by RR...") RR <-predict_RR(phenoTrain=pheno_train, genoTrain=geno_train_impute, FixedTrain=FIXED_train, genoPred=geno_valid_impute, FixedPred=FIXED_target) # RR = t(RR) #rownames(RR)=""; message("Predict by RR...ended.") Results <- round(RR,digits=5) #rownames(Results) <- ""; message("Phenotypic estimation GEBV:") message("") print(Results) message("") end.time <- Sys.time() time.taken <- end.time-start.time message("Time stop at:") print(end.time) print(time.taken,title=FALSE) } if (predict.method=="BRR") { message("Predicting by BRR...") BRR <-predict_BRR(phenoTrain=pheno_train, genoTrain=geno_train_impute, FixedTrain=FIXED_train, genoPred=geno_valid_impute, FixedPred=FIXED_target) #BRR = t(BRR) #rownames(RR)=""; message("Predict by BRR...ended.") Results <- round(BRR,digits=5) #rownames(Results) <- ""; message("Phenotypic estimation GEBV:") message("") print(Results) message("") end.time <- Sys.time() time.taken <- end.time-start.time message("Time stop at:") print(end.time) print(time.taken,title=FALSE) } if (predict.method=="BRNN") { message("Predicting by BRNN...") BRNN <-predict_BRNN(phenoTrain=pheno_train, genoTrain=geno_train_impute, FixedTrain=FIXED_train, genoPred=geno_valid_impute, FixedPred=FIXED_target) message("Predict by BRNN...ended.") Results <- round(BRNN,digits=5) #rownames(Results) <- ""; message("Phenotypic estimation GEBV:") message("") print(Results) message("") end.time <- Sys.time() time.taken <- end.time-start.time message("Time stop at:") print(end.time) print(time.taken,title=FALSE) } if (predict.method=="LASSO") { message("Predicting by LASSO...") LASSO <-predict_Lasso(phenoTrain=pheno_train, genoTrain=geno_train_impute, FixedTrain=FIXED_train, genoPred=geno_valid_impute, FixedPred=FIXED_target) #LASSO = t(LASSO) #rownames(LASSO)=""; #rownames(LASSO)=rownames(geno_valid_impute) message("Predict by LASSO...ended.") Results <- round(LASSO,digits=5) #rownames(Results) <- ""; message("Phenotypic estimation GEBV:") message("") print(Results) message("") end.time <- Sys.time() time.taken <- end.time-start.time message("Time stop at:") print(end.time) print(time.taken,title=FALSE) } if (predict.method=="EN") { message("Predicting by Elastic-Net...") ElasticNet <-predict_ElasticNet(phenoTrain=pheno_train, genoTrain=geno_train_impute, FixedTrain=FIXED_train, genoPred=geno_valid_impute, FixedPred=FIXED_target) #ElasticNet = t(ElasticNet) #rownames(ElasticNet)="" message("Predict by Elastic-Net...ended.") Results <- round(ElasticNet,digits=5) #rownames(Results) <- ""; message("Phenotypic estimation GEBV:") message("") print(Results) message("") end.time <- Sys.time() time.taken <- end.time-start.time message("Time stop at:") print(end.time) print(time.taken,title=FALSE) } if (predict.method=="SVM") { message("Predicting by SVM...") SVM <-predict_SVM(phenoTrain=pheno_train, genoTrain=geno_train_impute, FixedTrain=FIXED_train, genoPred=geno_valid_impute, FixedPred=FIXED_target) #SVM = t(SVM) #rownames(SVM)="" message("Predict by SVM...ended.") Results <- round(SVM,digits=5) #rownames(Results) <- ""; message("Phenotypic estimation GEBV:") message("") print(Results) message("") end.time <- Sys.time() time.taken <- end.time-start.time message("Time stop at:") print(end.time) print(time.taken,title=FALSE) } # } #else stop("Please choose an impute method: MNI, EMI")} # else {stop("Please choose a predict method: EN, SVM, RR, LASSO, GBLUP, RKHS, BA, BB, BC, BL, RF")} } else { stop("Please choose a predict method: EN, SVM, BRNN, BRR, RR, LASSO, GBLUP,EGBLUP, RKHS, MKRKHS,BA, BB, BC, BL, RF") } # End of all predict methods return(Results) } #END OF BWGS.PREDICT()
/scratch/gouwar.j/cran-all/cranData/BWGS/R/predict.R
# Copyright 2016-2016 Steven E. Pav. All Rights Reserved. # Author: Steven E. Pav # This file is part of BWStest. # # BWStest is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # BWStest is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with BWStest. If not, see <http://www.gnu.org/licenses/>. #' Baumgartner Weiss Schindler test. #' #' @section Background: #' #' The Baumgartner Weiss Schindler test is a two sample test of the null #' that the samples come from the same probability distribution, similar #' to the Kolmogorv-Smirnov, Wilcoxon, and Cramer-Von Mises tests. It is #' similar to the Cramer-Von Mises test in that it estimates the #' square norm of the difference in CDFs of the two samples. However, the #' Baumgartner Weiss Schindler test weights the integral by the variance #' of the difference in CDFs, "[emphasizing] the tails of the distributions, #' which increases the power of the test for a lot of applications." #' #' @section Legal Mumbo Jumbo: #' #' BWStest is distributed in the hope that it will be useful, #' but WITHOUT ANY WARRANTY; without even the implied warranty of #' MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #' GNU Lesser General Public License for more details. #' #' @template etc #' @template ref-bws #' @template ref-modtests #' @name BWStest-package #' @rdname BWStest-package #' @docType package #' @title Baumgartner Weiss Schindler test of equal distributions. #' @keywords package #' @import Rcpp #' @useDynLib BWStest #' @importFrom Rcpp evalCpp #' @importFrom stats ecdf #' @importFrom memoise memoise #' @exportPattern "^[[:alpha:]]+" #' NULL #' @title News for package 'BWStest': #' #' @description #' #' News for package 'BWStest' #' #' \newcommand{\CRANpkg}{\href{https://cran.r-project.org/package=#1}{\pkg{#1}}} #' \newcommand{\cranBWStest}{\CRANpkg{BWStest}} #' \newcommand{\BWStest}{\href{https://github.com/shabbychef/BWStest}} #' #' @section \BWStest{} Version 0.2.3 (2023-10-10) : #' \itemize{ #' \item Update doi links. #' } #' #' @section \BWStest{} Version 0.2.2 (2018-10-17) : #' \itemize{ #' \item Package maintenance--no new features. #' } #' #' @section \BWStest{} Version 0.2.1 (2017-03-20) : #' \itemize{ #' \item Package maintenance--no new features. #' \item move github figures to location CRAN understands. #' \item package initialization mumbo jumbo, see Rcpp issue 636. #' } #' #' @section \BWStest{} Version 0.2.0 (2016-04-29) : #' \itemize{ #' \item Adding Murakami statistics. #' } #' #' @section \BWStest{} Version 0.1.0 (2016-04-07) : #' \itemize{ #' \item First CRAN release. #' } #' #' @section \BWStest{} Initial Version 0.0.0 (2016-04-06) : #' \itemize{ #' \item Start work #' } #' #' @name BWStest-NEWS #' @rdname NEWS NULL #for vim modeline: (do not edit) # vim:fdm=marker:fmr=FOLDUP,UNFOLD:cms=#%s:syn=r:ft=r
/scratch/gouwar.j/cran-all/cranData/BWStest/R/BWStest.r
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #' @title #' Compute the test statistic of the Baumgartner-Weiss-Schindler test. #' #' @description #' #' Compute the Baumgartner-Weiss-Schindler test statistic. #' #' @details #' #' Given vectors \eqn{X} and \eqn{Y}, computes \eqn{B_X} and \eqn{B_Y} as #' described by Baumgartner \emph{et al.}, returning their average, \eqn{B}. #' The test statistic approximates the variance-weighted square norm of the #' difference in CDFs of the two distributions. For sufficiently large sample #' sizes (more than 20, say), under the null the test statistic approaches the asymptotic #' value computed in \code{\link{bws_cdf}}. #' #' The test value is an approximation of #' \deqn{\tilde{B} = \frac{mn}{m+n} \int_0^1 \frac{1}{z(1-z)} \left(F_X(z) - F_Y(z)\right)^2 \mathrm{dz},} #' where \eqn{m} (\eqn{n}) is the number of elements in \eqn{X} (\eqn{Y}), and #' \eqn{F_X(z)}{F_X(z)} (\eqn{F_Y(z)}{F_Y(z)}) is the CDF of \eqn{X} (\eqn{Y}). #' #' The test statistic is based only on the ranks of the input. If the same #' monotonic transform is applied to both vectors, the result should be unchanged. #' Moreover, the test is inherently two-sided, so swapping \eqn{X} and \eqn{Y} #' should also leave the test statistic unchanged. #' #' @param x a vector. #' @param y a vector. #' #' @return The BWS test statistic, \eqn{B}. #' @seealso \code{\link{bws_cdf}}, \code{\link{bws_test}} #' @examples #' #' set.seed(1234) #' x <- runif(1000) #' y <- runif(100) #' bval <- bws_stat(x,y) #' # check a monotonic transform: #' ftrans <- function(x) { log(1 + x) } #' bval2 <- bws_stat(ftrans(x),ftrans(y)) #' stopifnot(all.equal(bval,bval2)) #' # check commutivity #' bval3 <- bws_stat(y,x) #' stopifnot(all.equal(bval,bval3)) #' #' @template etc #' @template ref-bws #' @rdname bws_stat #' @export bws_stat <- function(x, y) { .Call('_BWStest_bws_stat', PACKAGE = 'BWStest', x, y) } #' @title #' CDF of the Baumgartner-Weiss-Schindler test under the null. #' #' @description #' #' Computes the CDF of the Baumgartner-Weiss-Schindler test statistic under the #' null hypothesis of equal distributions. #' #' @details #' #' Given value \eqn{b}, computes the CDF of the BWS statistic under #' the null, denoted as \eqn{\Psi(b)}{Psi(b)} by #' Baumgartner \emph{et al.} The CDF is computed from #' equation (2.5) via numerical quadrature. #' #' The expression for the CDF contains the integral #' \deqn{\int_0^1 \frac{1}{\sqrt{r^3 (1-r)}} \mathrm{exp}\left(\frac{rb}{8} - \frac{\pi^2 (4j+1)^2}{8rb}\right) \mathrm{dr}} #' By making the change of variables \eqn{x = 2r - 1}, this can #' be re-expressed as an integral of the form #' \deqn{\int_{-1}^1 \frac{1}{\sqrt{1-x^2}} f(x) \mathrm{dx},} #' for some function \eqn{f(x)} involving \eqn{b} and \eqn{j}. #' This integral can be approximated #' via Gaussian quadrature using Chebyshev nodes (of the first kind), which #' is the approach we take here. #' #' @param b a vector of BWS test statistics. #' @param maxj the maximum value of j to take in the approximate computation #' of the CDF via equation (2.5). Baumgartner \emph{et. al.} claim that a #' value of 3 is sufficient. #' @param lower_tail boolean, when \code{TRUE} returns \eqn{\Psi}{Psi}, otherwise #' compute the upper tail, \eqn{1-\Psi}{1 - Psi}, which is more useful for hypothesis tests. #' #' @return A vector of the CDF of \eqn{b}, \eqn{\Psi(b)}{Psi(b)}. #' @seealso \code{\link{bws_stat}}, \code{\link{bws_test}} #' @examples #' #' # do it 500 times #' set.seed(123) #' bvals <- replicate(500, bws_stat(rnorm(50),rnorm(50))) #' pvals <- bws_cdf(bvals) #' # these should be uniform! #' \donttest{ #' plot(ecdf(pvals)) #' } #' #' # compare to Table 1 of Baumgartner et al. #' bvals <- c(1.933,2.493,3.076,3.880,4.500,5.990) #' tab1v <- c(0.9,0.95,0.975,0.990,0.995,0.999) #' pvals <- bws_cdf(bvals,lower_tail=TRUE) #' show(data.frame(B=bvals,BWS_psi=tab1v,our_psi=pvals)) #' #' @template etc #' @template ref-bws #' @rdname bws_cdf #' @export bws_cdf <- function(b, maxj = 5L, lower_tail = TRUE) { .Call('_BWStest_bws_cdf', PACKAGE = 'BWStest', b, maxj, lower_tail) } #' @title #' Compute Murakami's test statistic. #' #' @description #' #' Compute one of the modified Baumgartner-Weiss-Schindler test statistics proposed #' by Murakami, or Neuhauser. #' #' @details #' #' Given vectors \eqn{X} and \eqn{Y}, computes \eqn{B_{jX}} and \eqn{B_{jY}} #' for some \eqn{j} as described by Murakami and by Neuhauser, returning either their #' their average or their average distance. #' The test statistics approximate the weighted square norm of the #' difference in CDFs of the two distributions. #' #' The test statistic is based only on the ranks of the input. If the same #' monotonic transform is applied to both vectors, the result should be unchanged. #' #' The various \sQuote{flavor}s of test statistic are: #' \describe{ #' \item{0}{The statistic of Baumgartner-Weiss-Schindler.} #' \item{1}{Murakami's \eqn{B_1} statistic, from his 2006 paper.} #' \item{2}{Neuhauser's difference statistic, denoted by Murakami as \eqn{B_2} in his #' 2012 paper.} #' \item{3}{Murakami's \eqn{B_3} statistic, from his 2012 paper.} #' \item{4}{Murakami's \eqn{B_4} statistic, from his 2012 paper.} #' \item{5}{Murakami's \eqn{B_5} statistic, from his 2012 paper, with a log weighting.} #' } #' #' @param x a vector of the first sample. #' @param y a vector of the second sample. #' @param flavor which \sQuote{flavor} of test statistic. #' @param nx the length of \code{x}, the first sample. #' @param ny the length of \code{y}, the second sample. #' #' @return The BWS test statistic, \eqn{B_j}. For \code{murakami_stat_perms}, a vector of #' the test statistics for \emph{all} permutations of the input. #' @note \code{NA} and \code{NaN} are not yet dealt with! #' @seealso \code{\link{bws_stat}}. #' @examples #' #' set.seed(1234) #' x <- runif(1000) #' y <- runif(100) #' bval <- murakami_stat(x,y,1) #' #' \donttest{ #' nx <- 6 #' ny <- 5 #' # monte carlo #' set.seed(1234) #' repli <- replicate(3000,murakami_stat(rnorm(nx),rnorm(ny),0L)) #' # under the null, perform the permutation test: #' allem <- murakami_stat_perms(nx,ny,0L) #' plot(ecdf(allem)) #' lines(ecdf(repli),col='red') #' } #' #' @template etc #' @template ref-bws #' @template ref-modtests #' @rdname murakami_stat #' @export murakami_stat <- function(x, y, flavor = 0L) { .Call('_BWStest_murakami_stat', PACKAGE = 'BWStest', x, y, flavor) } #' @rdname murakami_stat #' @export murakami_stat_perms <- function(nx, ny, flavor = 0L) { .Call('_BWStest_murakami_stat_perms', PACKAGE = 'BWStest', nx, ny, flavor) }
/scratch/gouwar.j/cran-all/cranData/BWStest/R/RcppExports.R
# Copyright 2016-2016 Steven E. Pav. All Rights Reserved. # Author: Steven E. Pav # This file is part of BWStest. # # BWStest is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # BWStest is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with BWStest. If not, see <http://www.gnu.org/licenses/>. #' @title Perform the Baumgartner-Weiss-Schindler hypothesis test. #' #' @description #' #' Perform the Baumgartner-Weiss-Schindler hypothesis test. #' #' @param x a vector of the first sample. #' @param y a vector of the second sample. #' @param method a character string specifying the test statistic to use. #' should be one of the following: #' \describe{ #' \item{default}{This is \dQuote{Hobson's choice}, which uses the classical #' BWS test for two-sided alternative, but Neuhauser for one sided #' alternatives.} #' \item{BWS}{Use the classical BWS test.} #' \item{Neuhauser}{Use Neuhauser's test.} #' \item{B1}{Use Murakami's \eqn{B_1}{B1} test.} #' \item{B2}{Use Murakami's \eqn{B_2}{B2} test, which is exactly Neuhauser's test.} #' \item{B3}{Use Murakami's \eqn{B_3}{B3} test.} #' \item{B4}{Use Murakami's \eqn{B_4}{B4} test.} #' \item{B5}{Use Murakami's \eqn{B_5}{B5} test.} #' } #' Only Neuhauser's test supports one-sided alternatives. #' @param alternative a character string specifying the alternative hypothesis, #' must be one of \dQuote{two.sided} (default), \dQuote{greater} or #' \dQuote{less}. You can specify just the initial letter. #' \dQuote{greater} corresponds to testing whether the survival function #' of \code{x} is greater than that of \code{y}; equivalently one can #' think of this as \code{x} being \sQuote{greater} than \code{y} #' in the sense of first order stochastic dominance. #' @return Object of class \code{htest}, a list of the test statistic, #' the p-value, and the \code{method} noted. #' @keywords htest #' @seealso \code{\link{bws_test}}, \code{\link{bws_stat}}, #' \code{\link{murakami_stat}}, \code{\link{murakami_cdf}}. #' @template etc #' @template ref-bws #' @note The code will happily compute Murakami's \eqn{B_3} through \eqn{B_5} #' for large sample sizes, even though nominal coverage is \emph{not} achieved. #' A warning will be thrown. User assumes all risk relying on results from this #' function. #' @examples #' #' # under the null #' set.seed(123) #' x <- rnorm(100) #' y <- rnorm(100) #' hval <- bws_test(x,y) #' #' # under the alternative #' set.seed(123) #' x <- rnorm(100) #' y <- rnorm(100,mean=1.0) #' hval <- bws_test(x,y) #' show(hval) #' stopifnot(hval$p.value < 0.05) #' #' # under the alternative with a one sided test. #' set.seed(123) #' x <- rnorm(100) #' y <- rnorm(100,mean=0.7) #' hval <- bws_test(x,y,alternative='less') #' show(hval) #' stopifnot(hval$p.value < 0.01) #' #' hval <- bws_test(x,y,alternative='greater') #' stopifnot(hval$p.value > 0.99) #' #' hval <- bws_test(x,y,alternative='two.sided') #' stopifnot(hval$p.value < 0.05) #' #' @rdname bws_test #' @export bws_test <- function(x,y, method=c('default','BWS','Neuhauser','B1','B2','B3','B4','B5'), alternative=c("two.sided","greater","less")) { method <- match.arg(method) alternative <- match.arg(alternative) dname <- paste(deparse(substitute(x)),'vs.',deparse(substitute(y))) x <- x[!is.na(x)] y <- y[!is.na(y)] nx <- length(x) ny <- length(y) if (max(nx,ny) <= 8) { warning('A permutation test would likely make more sense.') } else if (min(nx,ny) <= 10) { warning('Small, imbalanced, sample size may cause loss of nominal coverage.') } if (method == 'Neuhauser') { method <- 'B2' } # for simplicity if (alternative == 'two.sided') { if (method == 'default') { method <- 'BWS' } switch(method, BWS={ method <- "two-sample BWS test" bval <- bws_stat(x,y) names(bval) <- "B" pval <- bws_cdf(bval,lower_tail=FALSE) }, { # default is parametrized by the number. flavor <- as.numeric(gsub('^B(\\d)$','\\1',method)) method <- "two-sample Murakami test" bval <- murakami_stat(x,y,flavor=flavor) names(bval) <- sprintf('B_%d',flavor) pval <- murakami_cdf(bval,n1=nx,n2=ny,flavor=flavor,lower_tail=FALSE) }) } else { if (method == 'default') { method <- 'B2' } stopifnot(method %in% c('B2')) method <- "two-sample Neuhauser/Murakami test" switch(alternative, greater={ bval <- murakami_stat(x,y,flavor=2L) pval <- murakami_cdf(bval,n1=nx,n2=ny,flavor=2L) }, less={ bval <- murakami_stat(y,x,flavor=2L) pval <- murakami_cdf(bval,n1=ny,n2=nx,flavor=2L) }) names(bval) <- "B_2" } zeta <- 0 names(zeta) <- "difference in survival functions" if ((method %in% c('B3','B4','B5')) && (max(nx,ny) > 12)) { warning('Nominal coverage for B3 through B5 is *not* achieved for larger sample sizes. Use at your own risk!') } retval <- list(statistic = bval, p.value = pval, alternative = alternative, null.value = zeta, method = method, data.name = dname) class(retval) <- "htest" return(retval) } #for vim modeline: (do not edit) # vim:fdm=marker:fmr=FOLDUP,UNFOLD:cms=#%s:syn=r:ft=r
/scratch/gouwar.j/cran-all/cranData/BWStest/R/bws_test.r
# Copyright 2016-2016 Steven E. Pav. All Rights Reserved. # Author: Steven E. Pav # This file is part of BWStest. # # BWStest is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # BWStest is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with BWStest. If not, see <http://www.gnu.org/licenses/>. # create an empirical CDF function that takes ties into account. .murakami_ecdf <- function(n1,n2,flavor) { allv <- murakami_stat_perms(n1,n2,flavor) e1 <- stats::ecdf(allv) e2 <- stats::ecdf(-allv) function(B) { ple <- e1(B) pge <- e2(-B) retv <- 0.5 * (ple + (1 - pge)) } } .murakami_memo_ecdf <- memoise::memoise(.murakami_ecdf) .murakami_practical_cdf <- function(B,n1,n2,flavor) { CUTOFF <- 11 if ((flavor > 2) && (max(n1,n2) > CUTOFF + 1)) { warning('Nominal coverage for B3 through B5 is not achieved for larger sample sizes. Use at your own risk!') } ecdf <- .murakami_memo_ecdf(min(CUTOFF,n1),min(CUTOFF,n2),flavor) ecdf(B) } # .murakami_memo_stats <- memoise::memoise(murakami_stat_perms) #' @title Murakami test statistic distribution. #' #' @description #' #' Estimates the CDF of the Murakami test statistics via permutations. #' #' @details #' #' Given the Murakami test statistic \eqn{B_j} for \eqn{0 \le j \le 5}{0 <= j <= 5}, #' computes the CDF under the null that the two samples come from the same #' distribution. The CDF is computed by permutation test and memoization. #' #' @param B the Murakami test statistic or a vector of the same. #' @param n1 number of elements in the first sample. #' @param n2 number of elements in the second sample. #' @param flavor the 'flavor' of the test statistic. See #' \code{\link{murakami_stat}}. #' @param lower_tail boolean, when \code{TRUE} returns the CDF, \eqn{\Psi}{Psi}, otherwise #' compute the upper tail, \eqn{1-\Psi}{1 - Psi}, which is potentially more useful for hypothesis tests. #' @return a vector of the same size as \code{B} of the CDF under the null. #' @seealso \code{\link{murakami_stat}}. #' @template etc #' @template ref-bws #' @template ref-modtests #' @note the CDF is approximately computed by evaluating the permutations up to #' some reasonably small sample size (currently the cutoff is 9). When larger #' sample sizes are used, the distribution of the test statistic may not #' converge. This is apparently seen in flavors 3 through 5. #' @examples #' #' # basic usage: #' xv <- seq(0,4,length.out=101) #' yv <- murakami_cdf(xv, n1=8, n2=6, flavor=1L) #' plot(xv,yv) #' zv <- bws_cdf(xv) #' lines(xv,zv,col='red') #' #' # check under the null: #' \donttest{ #' flavor <- 1L #' n1 <- 8 #' n2 <- 8 #' set.seed(1234) #' Bvals <- replicate(2000,murakami_stat(rnorm(n1),rnorm(n2),flavor)) #' # should be uniform: #' plot(ecdf(murakami_cdf(Bvals,n1,n2,flavor))) #' } #' #' @rdname murakami_cdf #' @export murakami_cdf <- function(B, n1, n2, flavor=0L, lower_tail=TRUE) { # errors on flavor can come later, but this is important here: stopifnot(n1 > 0,n2 > 0) retv <- .murakami_practical_cdf(B,n1,n2,flavor) if (!lower_tail) { retv <- 1 - retv } return(retv) } #for vim modeline: (do not edit) # vim:fdm=marker:fmr=FOLDUP,UNFOLD:cms=#%s:syn=r:ft=r
/scratch/gouwar.j/cran-all/cranData/BWStest/R/zzz_murakami_dist.r
#' actuarial #' #' actuarial claims data for three groups of insurance policyholders p. 449 #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{year} #' \item\code{payroll} for groups 1, 2, and 3 #' \item\code{claims} for groups 1, 2, and 3 #'} #' #' @usage data(actuarial) #' @format dataset with 5 observations of 7 variables #' @source Scollnik, D. P. M. (2001). Actuarial Modeling with MCMC and BUGS. North American Actuarial Journal 5, 95-124. #' @name actuarial #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/Actuarial.R
#' DA_cwp #' #' Data on ancient Chinese wars #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{X1} #' \item\code{CHLEG010} #' \item\code{LEGHUANG} #' \item\code{X.2697} #' \item\code{X.2697.1} #' \item\code{X2} #' \item\code{X1.1} #' \item\code{X1.2} #' \item\code{X0} #' \item\code{X0.1} #' \item\code{X2.1} #' \item\code{X3} #' \item\code{X2.2} #' \item\code{X3.1} #' \item\code{X2.3} #'} #' #' @name DA_cwp #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/DA_cwp.R
#' recidivism #' #' @description Recidivism Rates. See page 188 #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{Crime.Type} The type of crime committed #' \item\code{Released} The number of individuals released from a facility #' \item\code{Returned} The number of individuals returned to a facility #' \item\code{Percentage} (The number of individuals returned to a facility)/(The number of individuals released from a facility) #' } #' #' @usage data(recidivism) #' @name recidivism #' @format data frame with 27 observations of different crime types with 5 explanatory variables #' @source state-level recidivism data as collected by the Oklahoma Department of Corrections from January 1, 1985 to June 30, 1999 #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/Recidivism-data.R
#' adam.jags #' #' @description data from Differences in the Validity of Self-Reported Drug Use Across Five #' Factors in Indianapolis, Fort Lauderdale, Phoenix, and Dallas, 1994 (ICPSR Study #' Number 2706, Rosay and Herz (2000), from the Arrestee Drug Abuse Monitoring (ADAM) #' Program/Drug Use Forecasting, ICPSR Study Number 2826. The original purpose of the study #' was to understand the accuracy of self-reported drug use, which is a difficult problem for obvious reasons. #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{AGEGRP} 1 for 1,700 cases 18 through 30 years old, 2 for 1,265 cases 31 years old or over #' \item\code{CASES} #' \item\code{CATS} #' \item\code{COCSELF} indicating self-reported cocaine usage prior to arrest (0 for 2,220 negative responses, 1 for 745 positive responses) #' \item\code{COCTEST} #' \item\code{COVARS} #' \item\code{GROUP} #' \item\code{ID} #' \item\code{MJSELF} #' \item\code{MJTEST} a dichotomous variable indicating a positive urine test for marijuan #' \item\code{OFFENSE} #' \item\code{RACE} 1 for 1,554 black cases, 2 for 1,411 white cases #' \item\code{SEX} 1 for 2,213 male cases, 2 for 752 female cases #' \item\code{SITE} coded according to: Indianapolis = 1 (759 cases), Ft. Lauderdale = 2 (974 cases),Phoenix = 3 (646 cases), and Dallas = 4 (586 cases) #'} #' @usage data(adam.jags) #' @aliases AGEGRP CASES CATS COCSELF COCTEST COVARS GROUP ID MJSELF MJTEST OFFENSE RACE SEX SITE #' @name adam.jags #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/adam.jags-data.R
#' afghan.deaths #' #' NATO Fatalities in Afghanistan, 10/01 to 1/07. see page 350 #' #' @usage data(afghan.deaths) #' @format 52 monthly periods, listed by rows #' @name afghan.deaths #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/afghan.deaths.R
#' africa #' #' African Coups Data, pp.562-564 #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{MILTCOUP} Military Coups #' \item\code{MILITARY} Military Oligarchy #' \item\code{POLLIB} Political Liberalization: 0 for no observable civil rights for political expression, 1 for limited, and 2 for extensive #' \item\code{PARTY93} number of legally registered political parties #' \item\code{PCTVOTE} Percent Legislative Voting #' \item\code{PCTTURN} Percent registered voting #' \item\code{SIZE} in one thousand square kilometer units #' \item\code{POP} Population in millions #' \item\code{NUMREGIM} Regime #' \item\code{NUMELEC} Election #'} #' #' @usage data(africa) #' @format data frame with 33 observations of differenct African countries' military coups with 7 explanatory variables #' @source Bratton, M. and Van De Walle, N. (1994). Neopatrimonial Regimes and Political Transitions in Africa. World Politics 46, 453-489. #' @name africa #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/africa.R
#' asap.data.list #' #' @description The American State Administrator's Project (ASAP) survey asks administrators about the influence of a variety of external political actors including "clientele groups" in their agencies., see page 395. #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{contracting} scale from 0 : 6 where higher indicates more private contracting within the respondent's agency. #' \item\code{gov.incluence} respondents' assessment of the governor's influence on contracting in their agency. #' \item\code{leg.influence} respondents' assessment of the legislatures' influence on contracting in their agency, ranging from 0 : 21. #' \item\code{elect.board} dichotomous variable coded 1 if appointed by a board, a commission or elected, and 0 otherwise. #' \item\code{years.tenure} number of years that the respondent has worked at their current agency. #' \item\code{education} ordinal variable for level of education possessed by the respondent. #' \item\code{partisan.ID} a 5-point ordinal variable (1-5) for the respondent's partisanship (strong Democrat to strong Republican). #' \item\code{category} categories of agency type. #' \item\code{med.time} whether the respondent spent more or less than the sample median with representatives of interest groups. #' \item\code{medt.contr} interaction variable betweenmed.time and contracting. #' \item\code{gov.ideology} state government ideology from Berry et al. (1998) from 0 to 100. #' \item\code{lobbyists} total state lobbying registrants in 2000-01 from Gray and Lowery (1996, 2001). #' \item\code{nonprofits} provides the total number of nonprofit groups in the respondents' state in the year 2008, divided by 10,000. #'} #' #' @usage data(asap.data.list) #' @name asap.data.list #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/asap.data.list-data.R
#' Baldus Dataset #' #' Data from Baldus Study on death sentences in Georgia (Exercise 14.2, p. 521). #' To use the data in JAGS or WinBugs, see \code{baldus.jags} and \code{balfus.winbugs}, respectively. #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{race} Defendant's race (1 = Black) #' \item\code{educatn} Educational level #' \item\code{employm} Employment status (1 = Employed) #' \item\code{SES} Socioeconomic status (1 = Low Wage) #' \item\code{married} Marital status (1 = Married) #' \item\code{num.chld} Number of children #' \item\code{military} Military experience (1 = Serving, 0 = No military service, -1 = Dishonorable Discharge) #' \item\code{pr.arrst} #' \item\code{plea} Plea to Murder Indictment #' \item\code{sentence} Sentenced #' \item\code{defense} Status of Principle Defense Council (1 = Retained, 2 = Appointed) #' \item\code{dp.sght} Prosecutor Waive/Fail to Seek DP (1 = Failed/Unknown, 2 = Sought DP) #' \item\code{jdge.dec} Judge Took Sentence from Jury? #' \item\code{pen.phse} Was there a penalty trial? #' \item\code{did.appl} Did defendant appeal cov. or sentence? #' \item\code{out.appl} Outcome of appeal #' \item\code{vict.sex} Victim sex #' \item\code{pr.incrc} #' \item\code{vict.age} Victim's age #' \item\code{vict.rel} Relation of victim with defendant #' \item\code{vict.st1} Victim status (0 = Non-police+judicial, 1 = Polic+judicial) #' \item\code{vict.st2} #' \item\code{specialA} Special Circumstances () #' \item\code{methodA} Method of killing #' \item\code{num.kill} Number of persons killed by defendant #' \item\code{num.prps} Number of persons killed by coperpetrator #' \item\code{def.age} Defendant's age #' \item\code{aggrevat} Aggravating circumstances #' \item\code{bloody} Bloody crime #' \item\code{fam.lov} #' \item\code{insane} Defendant invoked insanity defense #' \item\code{mitcir} #' \item\code{num.depr} #' \item\code{rape} Rape involved #'} #' #' @usage data(baldus) #' @seealso baldus.jags baldus.winbugs #' @source Baldus, D. C., Pulaski, C., & Woodworth, G. (1983). Comparative review of death sentences: An empirical study of the Georgia experience. The Journal of Criminal Law and Criminology (1973-), 74(3), 661-753. #' @name baldus #' @aliases baldus.jags baldus.winbugs race educatn employm SES married num.chld pr.arrst plea sentence dp.sght jdge.dec pen.phse defense did.appl out.appl vict.age vict.rel vict.st1 vict.st2 specialA methodA num.kill num.prps def.age aggrevat bloody fam.lov insane mitcir num.depr rape vict.sex pr.incrc #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/baldus-data.R
#' bcp #' #' Implementation of bcp function, see pages 362-363 (2nd Edition). #' #' @param theta.matrix theta.matrix #' @param y Counts of Coal Mining Disasters #' @param a Alpha Value in the lambda Prior #' @param b Beta Value in the lambda Prior #' @param g Gamma Value in the phi Prior #' @param d Delta Value in the phi Prior #' #' @author Jeff Gill #' @importFrom stats rgamma #' @examples #' \dontrun{ #' bcp(theta.matrix,y,a,b,g,d) #' } #' @export bcp <- function(theta.matrix,y,a,b,g,d) { n <- length(y) k.prob <- rep(0,length=n) for (i in 2:nrow(theta.matrix)) { lambda <- rgamma(1,a+sum(y[1:theta.matrix[(i-1),3]]), b+theta.matrix[(i-1),3]) phi <- rgamma(1,g+sum(y[theta.matrix[(i-1),3]:n]), d+length(y)-theta.matrix[(i-1),3]) for (j in 1:n) k.prob[j] <- exp(j*(phi-lambda))* (lambda/phi)^sum(y[1:j]) k.prob <- k.prob/sum(k.prob) k <- sample(1:n,size=1,prob=k.prob) theta.matrix[i,] <- c(lambda,phi,k) } return(theta.matrix) }
/scratch/gouwar.j/cran-all/cranData/BaM/R/bcp.R
#' biv.norm.post #' #' A function to calculate posterior quantities of the bivariate normal. See page 94. #' #' @usage biv.norm.post(data.mat,alpha,beta,m,n0=5) #' #' @param data.mat A matrix with two columns of normally distributed data #' @param alpha Wishart first (scalar) parameter #' @param beta Wishart second (matrix) parameter #' @param m prior mean for mu #' @param n0 prior confidence parameter #' #' @return Returns #' \item{mu2}{posterior mean, dimension 1} #' \item{sig1}{posterior mean, dimension 2} #' \item{sig2}{posterior variance, dimension 1} #' \item{rho}{posterior variance, dimension 2} #' @author Jeff Gill #' @importFrom stats var #' @examples #' #' data.n10 <- rmultinorm(10, c(1,3), matrix(c(1.0,0.7,0.7,3.0),2,2)) #' rep.mat <- NULL; reps <- 1000 #' for (i in 1:reps){ #' rep.mat <- rbind(rep.mat, biv.norm.post(data.n10,3, matrix(c(10,5,5,10),2,2),c(2,2))) #' } #' round(normal.posterior.summary(rep.mat),3) #' #' @export biv.norm.post <- function(data.mat,alpha,beta,m,n0=5) { # rwishart function below is borrowed from (now archived) dlm function. rwishart <- function(df, p = nrow(SqrtSigma), SqrtSigma = diag(p)) { if((Ident <- missing(SqrtSigma)) && missing(p)) stop("either p or SqrtSigma must be specified") Z <- matrix(0, p, p) diag(Z) <- sqrt(rchisq(p, df:(df-p+1))) if(p > 1) { pseq <- 1:(p-1) Z[rep(p*pseq, pseq) + unlist(lapply(pseq, seq))] <- rnorm(p*(p-1)/2) } if(Ident) crossprod(Z) else crossprod(Z %*% SqrtSigma) } n <- nrow(data.mat) xbar <- apply(data.mat,2,mean) S2 <- (n-1)*var(data.mat) Wp.inv <- solve(beta)+S2+((n0*n)/(n0+n))*(xbar-m)%*%t(xbar-m) Sigma <- solve(rwishart(alpha+n,SqrtSigma=chol(solve(Wp.inv)))) mu <- rmultinorm(1, (n0*m + n*xbar)/(n0+n), Sigma/(n0+n)) return(c(mu1=mu[1],mu2=mu[2],sig1=Sigma[1,1], sig2=Sigma[2,2],rho=Sigma[2,1])) }
/scratch/gouwar.j/cran-all/cranData/BaM/R/biv.norm.post.R
#' cabinet.duration #' #' Cabinet duration (constitutional inter-election period) for eleven Western European countries from 1945 to 1980, page 65 #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{N} number of cabinets #' \item\code{dur} average length of duration #'} #' #' @note Row names indicate country. #' @usage cabinet.duration #' @format cabinet duration of 11 countries #' @references Browne, E. C., Frendreis, J. P., and Gleiber, D. W. (1986). The Process of Cabinet Dissolution: An Exponential Model of Duration and Stability in Western Democracies. American Journal of Political Science 30, 628-650. #' @name cabinet.duration #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/cabinet.duration.R
#' child #' #' Child Support Collection Policies from 50 states from 1982-1991. See page 166 #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{SCCOLL} Change in Child Support collections #' \item\code{ACES} Chapters per Population #' \item\code{INSTABIL} Policy Instability #' \item\code{AAMBIG} Policy Ambiguity #' \item\code{CSTAFF} Change in Agency Staffing #' \item\code{ARD} State Divorce Rate #' \item\code{ASLACK} Organizational Slack #' \item\code{AEXPEND} State Level Expenditures #'} #' #' @usage child #' @format observations of 8 variables for 50 states #' @source Meier, K.J. and Keisler, L.R. (1996). Public Administration as a Science of the Artificial: A Method for Prescription, Public Administration Review 56, 459-466. #' @name child #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/child.R
#' china.wars #' #' Modeling code for the example of ancient Chinese wars. See page 163-165 #' #' @author Jeff Gill #' @source Claudio Cioffi-Revilla and David Lai, 2001, \cr #' "Chinese Warfare and Politics in the Ancient East Asian International System",\cr #' Download from <doi:10.1080/03050620108434971> \cr #' Henry A. Murray Research Archive \cr #' Center for International Relations, Department of Political Science, University of Colorado, Boulder, USA #' @importFrom stats lm #' @importFrom mice mice complete #' @export china.wars<-function(){ DA_cwp=NULL get("DA_cwp") wars <- cbind(as.matrix(DA_cwp[,4:15])) imp.wars <- mice(wars,m=5) complete.wars <- array(c(as.matrix(complete(imp.wars,1)), as.matrix(complete(imp.wars,2)), as.matrix(complete(imp.wars,2)), as.matrix(complete(imp.wars,4)), as.matrix(complete(imp.wars,5))),dim=c(104,12,5)) wars <- complete.wars[,,1] # REPEAT FOR ARRAY DIMENSIONS 2-5 table(wars[,8:9]) wars <- cbind(wars,"SCOPE"=log( 10*wars[,8] + wars[,9])) wars <- cbind(wars, "DURATION" = 1 + wars[,2]-wars[,1]) dimnames(wars) <- list(NULL,c("ONSET","TERM","EXTENT","ETHNIC","DIVERSE","ALLIANCE","DYADS","POL.LEV","COMPLEX","POLAR","BALANCE","TEMPOR","SCOPE","DURATION")) wars <- data.frame(wars) wars.lm <- lm(SCOPE ~ -1 + EXTENT + DIVERSE + ALLIANCE + DYADS + TEMPOR + DURATION, data=wars) summary(wars.lm) } ## alternative1 #data(wars) #X <- cbind(as.numeric(wars$.EXTENT, wars$.DIVERSE,wars$.ALLIANCE,wars$.DYADS,wars$.TEMPOR,wars$.DURATION)) #y <- as.numeric(wars$.SCOPE) #n <- nrow(X); #k <- ncol(X) #nu <- 5 #num.sims <- 100000 #war.samples <- matrix(NA,nrow=num.sims,(ncol=k+n+1)) #beta <- rep(1,ncol(X)); sigma.sq <- 3; Omega <- 3*diag(n) #for (i in 1:num.sims) { # b <- solve(t(X) %*% X) %*% t(X) %*% y # b.star <- solve(t(X) %*% Omega %*% X) %*% t(X) %*% Omega %*% y # s.sq.star <- t(y-X%*%b) %*% solve(Omega) %*% (y-X%*%b) # u <- y - X %*% beta # beta <- as.vector( rmultinorm(1, b.star, sigma.sq *solve(t(X) %*% solve(Omega) %*% X) ) ) # sigma.sq <- 1/rgamma(1, shape=(n-1)/2, rate=s.sq.star/2 ) # for (j in 1:n) Omega[j,j] <- 1/rgamma(1, shape=(nu+1)/2, rate=((sigma.sq^(-1))*u2 + nu)/2 ) # war.samples[i,] <- c(beta,sigma.sq,diag(Omega)) # if(i %% 100 == 0) print(i) #} #start <- 5001; stop <- num.sims #round( cbind( apply(war.samples[start:num.sims,1:(k+1)],2,mean), # apply(war.samples[start:num.sims,1:(k+1)],2,sd), # apply(war.samples[start:num.sims,1:(k+1)],2,mean) - 1.96*apply(war.samples[start:num.sims,1:(k+1)],2,sd), # apply(war.samples[start:num.sims,1:(k+1)],2,mean) + 1.96*apply(war.samples[start:num.sims,1:(k+1)],2,sd) ) ,4)
/scratch/gouwar.j/cran-all/cranData/BaM/R/china.wars.R
#' coal.mining.disasters #' #' A vector of British Coal Mining Disasters, see page 549-550 #' #' @usage coal.mining.disasters #' @format vector of length 111 #' @source Lynn, R. and Vanhanen, T. (2001). National IQ and Economic Development. Mankind Quarterly LXI, 415-437. #' @name coal.mining.disasters #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/coal.mining.disasters.R
#' contracep #' #' Contraception Data by country. See page 446 #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{Country} Developing countries by size #' \item\code{URC} Rural Childhood #' \item\code{WED} Years of Education for the Woman #' \item\code{FPE} Exposure to Family Planning Efforts #' \item\code{WED.FPE} Interaction term specified by Wong and Mason #'} #' #' @usage data(contracep) #' @format 4 variables for 15 countries #' @source Wong, G. Y. and Mason, W. M. (1985). The Hierarchical Logistic Regression Model for Multilevel Analysis. Journal of the American Statistical Association 80, 513-524. #' @name contracep #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/contracep.R
#' dmultinorm #' #' dmultinorm function, see page 376. #' #' @usage dmultinorm(xval,yval,mu.vector,sigma.matrix) #' #' @param xval Vector of X Random Variables #' @param yval Vector of Y Random Variables #' @param mu.vector Mean Vector #' @param sigma.matrix Matrix of Standard Deviations #' #' @author Jeff Gill #' @export dmultinorm <- function(xval,yval,mu.vector,sigma.matrix) { normalizer <- (2*pi*sigma.matrix[1,1]*sigma.matrix[2,2] *sqrt(1-sigma.matrix[1,2]^2))^(-1) like <- exp(-(1/(2*(1-sigma.matrix[1,2]^2)))* ( ((xval-mu.vector[1])/sigma.matrix[1,1])^2 -2*sigma.matrix[1,2]*(((xval-mu.vector[1])/sigma.matrix[1,1])* ((yval-mu.vector[2])/sigma.matrix[2,2])) +((yval-mu.vector[2])/sigma.matrix[2,2])^2 )) normalizer*like }
/scratch/gouwar.j/cran-all/cranData/BaM/R/dmultinorm.R
#' dp #' #' Death Penalty Data, See Page 142. #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{X} State #' \item\code{EXECUTIONS} Number of capital punishments at state level in 1997 #' \item\code{INCOME} Median per capita income in dollars #' \item\code{PERPOVERTY} Percent classified as living in poverty #' \item\code{PERBLACK} Percent of black citizens in population #' \item\code{VC100k96} Rate of violent crime per 100,000 residents for 1996 #' \item\code{SOUTH} Is the state in the South? #' \item\code{PROPDEGREE} Proportion of population with college degree #'} #' #' @usage data(dp) #' @format 7 variables for 17 states #' @source Norrander, B. (2000). The Multi-Layered Impact of Public Opinion on Capital Punishment Implementation in the American States. Political Research Quarterly 53, 771-793. #' @name dp #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/dp.R
#' durations.hpd #' #' Simple HPD calculator from Chapter 2 (page 51, 2nd Edition). #' #' @usage durations.hpd(support,fn.eval,start,stop,target=0.90,tol=0.01) #' #' @param support x-axis values #' @param fn.eval function values at x-axis points #' @param start starting point in the vectors #' @param stop stoppng point in the vectors #' @param target Desired X Level #' @param tol Tolerance for round-off #' #' @author Jeff Gill #' @importFrom stats pgamma #' @examples #' \dontrun{ #' get("cabinet.duration") #' ruler <- seq(0.45,0.75,length=10000) #' g.vals <- round(dgamma(ruler,shape=sum(cabinet.duration$N), #' rate=sum(cabinet.duration$N*cabinet.duration$dur)),2) #' start.point <- 1000; stop.point <- length(g.vals) #' durations.hpd(ruler,g.vals,start.point,stop.point) #' } #' #' @export durations.hpd <- function(support,fn.eval,start=1,stop=length(support),target=0.90,tol=0.01) { get("cabinet.duration") cabinet.duration <- cabinet.duration done <- FALSE; i <- start while (i < stop & done == FALSE) { j <- length(fn.eval)/2 while (j <= stop & done == FALSE) { if (fn.eval[i] == fn.eval[j]) { L <- pgamma(support[i],shape=sum(cabinet.duration$N),rate=sum(cabinet.duration$N*cabinet.duration$dur)) H <- pgamma(support[j],shape=sum(cabinet.duration$N),rate=sum(cabinet.duration$N*cabinet.duration$dur)) if (((H-L)<(target+tol)) & ((H-L)>(target-tol))) done <- TRUE } j <- j+1 } i <- i+1 } return(c(k=fn.eval[i], HPD.L=support[i], HPD.U=support[j])) }
/scratch/gouwar.j/cran-all/cranData/BaM/R/durations.hpd.R
#' elicspend #' #' Eliciting expected campaign spending data. Eight campaign experts are queried for quantiles at levels #' m = [0.1, 0.5, 0.9], and they provide the following values reflecting the national range #' of expected total intake by Senate candidates (in thousands). See page 120 #' #' @usage data(elicspend) #' @name elicspend #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/elicspend.R
#' ethnic.immigration #' #' 1990-1993 W.Europe Ethnic/Minority Populations. see page 280. #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{Country.of.Origin} Country of origin of immigrants #' \item\code{Estimated.Total.K.} Estimated total ethnic minority population in Western European Countries #' \item\code{Percent.of.Total} Percent of Total #'} #' #' @usage data(ethnic.immigration) #' @format total number of ethnic immigrants living in Western Europe from 22 countries #' @source Peach, C. (1997). Postwar Migration to Europe: Reflux, Influx, Refuge. Social Science Quarterly 78, 269-283. #' @name ethnic.immigration #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/ethnic.immigration.R
#' @name executions #' @title executions #' @description Execution data. #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{State} State #' \item\code{EXECUTIONS} Number of capital punishments at state level in 1997 #' \item\code{Median.Income} Median per capita income in dollars #' \item\code{Percent.Poverty} Percent classified as living in poverty #' \item\code{Percent.Black} Percent of black citizens in population #' \item\code{Violent.Crime} Rate of violent crime per 100,000 residents for 1996 #'} #' #' @format explanatory variables for 17 states #' @docType data #' @usage data(executions) NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/executions-data.R
#' @name experts #' @title Campaign fundraisign elicitations #' @description Fabricated data on campaign fundraising elicitations. See page 120 #' @usage experts(q1,q2,q3) #' @param q1 the 0.1 quantile #' @param q2 the 0.5 quantile #' @param q3 the 0.9 quantile #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/experts-data.R
#' expo.gibbs #' #' Simple Gibbs sampler demonstration on conditional exponentials from Chapter 1 (pages 25-27). #' #' @usage expo.gibbs(B,k,m) #' #' @param B an upper bound #' @param k length of the subchains #' @param m number of iterations #' #' @author Jeff Gill #' @importFrom stats runif rexp #' @export expo.gibbs <- function(B=5, k=15, m=5000) { x <- y <- NULL while (length(x) < m) { x.val <- c(runif(1,0,B),rep((B+1),length=k)) y.val <- c(runif(1,0,B),rep((B+1),length=k)) for (j in 2:(k+1)) { while(x.val[j] > B) x.val[j] <- rexp(1,y.val[j-1]) while(y.val[j] > B) y.val[j] <- rexp(1,x.val[j]) } x <- c(x,x.val[(k+1)]) y <- c(y,y.val[(k+1)]) } return(cbind(x,y)) }
/scratch/gouwar.j/cran-all/cranData/BaM/R/expo.gibbs.R
#' expo.metrop #' #' Simple Metropolis algorithm demonstration using a bivariate exponential target from Chapter 1 (pages 27-30). #' #' @usage expo.metrop(m,x,y,L1,L2,L,B) #' #' @param m number of iterations #' @param x starting point for the x vector #' @param y starting point for the y vector #' @param L1 event intensity for the x dimension #' @param L2 event intensity for the y dimension #' @param L shared event intensity #' @param B upper bound #' #' @author Jeff Gill #' @importFrom stats runif #' @examples #' #' expo.metrop(m=5000, x=0.5, y=0.5, L1=0.5, L2=0.1, L=0.01, B=8) #' #' @export expo.metrop <- function(m=5000, x=0.5, y=0.5, L1=0.5, L2=0.1, L=0.01, B=8) { for (i in 1:(m-1)) { cand.val <- cand.gen(B,B) a <- biv.exp(cand.val[1],cand.val[2],L1,L2,L) / biv.exp(x[i],y[i],L1,L2,L) if (a > runif(1)) { x <- c(x,cand.val[1]) y <- c(y,cand.val[2]) } else { x <- c(x,x[i]) y <- c(y,y[i]) } } return(cbind(x,y)) } biv.exp <- function(x,y,L1,L2,L) exp( -(L1+L)*x - (L2+L)*y -L*max(x,y) ) cand.gen <- function(max.x,max.y) c(runif(1,0,max.x),runif(1,0,max.y))
/scratch/gouwar.j/cran-all/cranData/BaM/R/expo.metrop.R
#' @name fdr #' @title fdr #' @description FDR election data. See page 576 #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{State} State name #' \item\code{FDR} Whether or not FDR won the state in 1932 election, 1 = won, 0 = lost #' \item\code{PRE.DEP} Mean income per state before the Great Depression (1929), in dollars #' \item\code{POST.DEP} Mean income per state after the Great Depression (1932), in dollars #' \item\code{FARM} Total farm wage and salary disbursements in thousands of dollars per state in 1932 #'} #' #' @usage data(fdr) #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/fdr-data.R
#' @name hanjack #' @title hanjack #' @description 1964 presidential election data. See page 221 #' @usage hanjack(N,F,L,W,K,IND,DEM,WR,WD,SD) #' @param N number of cases in the group #' @param F Observed cell proportion voting for Johnson #' @param L log-ratio of this proportion, see p. 246 #' @param W collects the inverse of the diagonal of the matrix for the group-weighting from $[N_iP_i(1-P_i)]$ #' @param K constant #' @param IND indifference to the election #' @param DEM stated preference for Democratic party issues #' @param WR Weak Republican #' @param WD Weak Democrat #' @param SD Strong Democrat #' @references Hanushek, E. A. and Jackson, J. E. (1977). Statistical Methods for Social Scientists San Diego, Academic Press #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/hanjack-data.R
#' hit.run #' #' Implementation of hit.run algorithm, p. 361. #' #' @usage hit.run(theta.mat,reps,I.mat) #' #' @param theta.mat theta.mat #' @param reps reps #' @param I.mat I.mat #' #' @author Jeff Gill #' @importFrom stats runif rgamma #' @examples #' ## Not run: #' #code to implement graph on p. 362, see page 376. #' #' num.sims <- 10000 #' Sig.mat <- matrix(c(1.0,0.95,0.95,1.0),2,2) #' walks<-rbind(c(-3,-3),matrix(NA,nrow=(num.sims-1),ncol=2)) #' walks <- hit.run(walks,num.sims,Sig.mat) #' z.grid <- outer(seq(-3,3,length=100),seq(-3,3,length=100), #' FUN=dmultinorm,c(0,0),Sig.mat) #' contour(seq(-3,3,length=100),seq(-3,3,length=100),z.grid, #' levels=c(0.05,0.1,0.2)) #' points(walks[5001:num.sims,],pch=".") ## End(Not run) #' @export hit.run <- function(theta.mat,reps,I.mat) { for (i in 2:reps) { u.vec <- c(runif(1,0,pi/2),runif(1,pi/2,pi), runif(1,pi,3*pi/2), runif(1,3*pi/2,2*pi)) u.dr <- sample(u.vec,size=1, prob=c(1/3,1/6,1/3,1/6)) g.ds <- rgamma(1,1,1) xy.theta <- c(g.ds*cos(u.dr),g.ds*sin(u.dr)) + theta.mat[(i-1),] a <- dmultinorm(xy.theta[1],xy.theta[2], c(0,0),I.mat)/ dmultinorm(theta.mat[(i-1),1], theta.mat[(i-1),2],c(0,0),I.mat) r.uniform <- runif(1) if (a > r.uniform) theta.mat[i,] <- xy.theta else theta.mat[i,] <- theta.mat[(i-1),] } theta.mat }
/scratch/gouwar.j/cran-all/cranData/BaM/R/hit.run.R
#' @name iq #' @title iq data frame #' @description IQ data for 80 countries. See pages 85-87 #' @usage data(iq) #' @source Lynn, R. and Vanhanen, T. (2001). National IQ and Economic Development. Mankind Quarterly LXI, 415-437. #' @examples #' ## Not run: #' { #' data(iq) #' n <- length(iq[1,]) #' t.iq <- (iq[1,]-mean(as.numeric(iq)))/(sd(iq[1,])/sqrt(n)) #' r.t <- (rt(100000, n-1)*(sd(iq)/sqrt(n))) + mean(as.numeric(iq)) #' quantile(r.t,c(0.01,0.10,0.25,0.5,0.75,0.90,0.99)) #' r.sigma.sq <- 1/rgamma(100000,shape=(n-2)/2, rate=var(as.numeric(iq))*(n-1)/2) #' quantile(sqrt(r.sigma.sq), c(0.01,0.10,0.25,0.5,0.75,0.90,0.99)) #' } #' ## End(Not run) #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/iq-data.R
#' @name italy.parties #' @title italy.parties #' @description Italian Parties Data. Vote share of Italian parties from 1948-1983. See page 370-371. #' @usage data(italy.parties) #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/italy.parties-data.R
#' lunatics #' #' @description An 1854 study on mental health in the fourteen counties of Massachusetts yields data on 14 cases. This study was performed by Edward Jarvis (then president of the American Statistical Association) #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{NBR} the number of "lunatics" per county. #' \item\code{DISt} distance to the nearest mental healthcare center #' \item\code{POP} population in the county by thousands #' \item\code{PDEN} population per square county mile #' \item\code{PHOME} the percent of "lunatics" cared for in the home #' } #' @usage data(lunatics) #' @name lunatics #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/lunatics-data.R
#' @name marriage.rates #' @title Marriage Rates in Italy #' @description Italian Marriage Rates. See page 430 #' @usage data(marriage.rates) #' @format a vector containing 16 numbers #' @source Columbo, B. (1952). Preliminary Analysis of Recent Demographic Trends in Italy. Population Index 18, 265-279. #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/marriage.rates-data.R
#' metropolis #' #' Implementation of metropolis function, p. 359. #' #' @usage metropolis(theta.matrix,reps,I.mat) #' #' @param theta.matrix theta.matrix #' @param reps reps #' @param I.mat I.mat #' #' @author Jeff Gill #' @importFrom stats runif rchisq #' @importFrom MASS mvrnorm #' @export metropolis <- function(theta.matrix,reps,I.mat) { for (i in 2:reps) { theta.star <- mvrnorm(1,theta.matrix[(i-1),],I.mat)/ (sqrt(rchisq(2,5)/5)) a <-dmultinorm(theta.star[1],theta.star[2],c(0,0),I.mat)/ dmultinorm(theta.matrix[(i-1),1],theta.matrix[(i-1),2], c(0,0),I.mat) if (a > runif(1)) theta.matrix[i,] <- theta.star else theta.matrix[i,] <- theta.matrix[(i-1),] } theta.matrix }
/scratch/gouwar.j/cran-all/cranData/BaM/R/metropolis.R
#' militarydf #' #' @description A dataset of two variables. The proportional changes in military personnel for the named countries. See page 483-484 #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{Year} The year selected to evaluate #' \item\code{Yugoslavia} The proportion change in the size of Yugoslavia's military #' \item\code{Albania} The proportion change in the size of Albania's military #' \item\code{Bulgaria} The proportion change in the size of Bulgaria's military #' \item\code{Czechoslovakia} The proportion change in the size of Czechoslovakia's military #' \item\code{German.Dem.Republic} The proportion change in the size of the German Democratic Republic's military #' \item\code{Hungary} The proportion change in the size of Hungary's military #' \item\code{Poland} The proportion change in the size of Poland's military #' \item\code{Rumania} The proportion change in the size of Romania's military #' \item\code{USSR} The proportion change in the size of the Soviet Union's military #'} #' #' @usage data(militarydf) #' @name militarydf #' @aliases military #' @format a data frame with 35 observations of years from 1949 to 1983 with 10 explanatory variables #' @source Faber, J. (1989). Annual Data on Nine Economic and Military Characteristics of 78 Nations (SIRE NATDAT), 1948-1983. Ann Arbor: Inter-University Consortium for Political and Social Research and Amsterdam, and Amsterdam, the Netherlands: Europa Institute, Steinmetz Archive. #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/militarydf-data.R
#' nc.sub.dat #' #' @description North Carolina county level health data from the 2000 U.S. census and North Carolina public records, see page 78. #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{Substantiated.Abuse} within family documented abuse for the county #' \item\code{Percent.Poverty} percent within the county living in poverty, U.S. definition #' \item\code{Total.Population} county population/1000 #' } #' #' @usage nc.sub.dat #' @name nc.sub.dat #' @format data frame with 100 observations of different counties in North Carolina with 3 explanatory variables #' @source data from 2000 US census and North Carolina Division of Public Health, Women's and Children's Health Section in Conjunction with State Center for Health Statistics #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/nc.sub.dat-data.R
#' norm.known.var #' #' A function to calculate posterior quanties for a normal-normal model with known variance (pages 70-72). #' It produces the posterior mean, variance, and 95\% credible interval for user-specified prior. #' #' @usage norm.known.var(data.vec,pop.var,prior.mean,prior.var) #' #' @param data.vec a vector of assumed normally distributed data #' @param pop.var known population variance #' @param prior.mean mean of specified prior distribution for mu #' @param prior.var variance of specified prior distribution for mu #' #' @author Jeff Gill #' #' @export norm.known.var <- function(data.vec,pop.var,prior.mean,prior.var) { if(length(data.vec) <= 1) stop("norm.known.var: input data must be a vector") mu.hat <- (prior.mean/prior.var + length(data.vec)*mean(data.vec)/pop.var)/ (1/prior.var + length(data.vec)/pop.var) sigma.hat <- 1/(1/prior.var + length(data.vec)/pop.var) credible.int<-c(mu.hat-1.96*sqrt(sigma.hat),mu.hat+1.96*sqrt(sigma.hat)) return( list(mu.hat=mu.hat,sigma.hat=sigma.hat, credible.interval=credible.int) ) }
/scratch/gouwar.j/cran-all/cranData/BaM/R/norm.known.var.R
#' normal posterior summary #' #' A function to calculate posterior quantities of bivariate normals. See pages 74-80. #' #' @usage normal.posterior.summary(reps) #' #' @param reps a matrix where the columns are defined as in the output of biv.norm.post: #' #' @author Jeff Gill #' @importFrom stats sd #' @seealso \code{\link{biv.norm.post}} #' @export normal.posterior.summary <- function(reps) { reps[,5] <- reps[,5]/sqrt(reps[,3]*reps[,4]) reps <- apply(reps,2,sort) out.mat <- cbind("mean"=apply(reps,2,mean), "std.err"=apply(reps,2,sd), "95% HPD Lower"=reps[25,], "95% HPD Upper"=reps[975,]) return(out.mat) }
/scratch/gouwar.j/cran-all/cranData/BaM/R/normal.posterior.summary.R
#' norr #' #' @description An 1854 study on mental health in the fourteen counties of Massachusetts yields data on 14 cases. This study was performed by Edward Jarvis (then president of the American Statistical Association) #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{Current.policy} Current sentencing policy #' \item\code{Past.execution.rate} Past execution rate #' \item\code{Politicla.Culture} Political culture #' \item\code{Current.opinion} Current opinion #' \item\code{Citizen.ideology} Citizen ideology #' \item\code{Murder.Rate} Murder rate #' \item\code{Catholic} Catholic #' \item\code{Black} Black #' \item\code{Urban} Urban #' \item\code{Past.laws} Past laws #' \item\code{Past.opinion} Past opinion #' } #' #' @usage data(norr) #' @name norr #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/norr-data.R
#' opic #' #' @description private capital investment data. See Page 390. #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{Fund} Name of the private company #' \item\code{Age} Years the company has been in existence #' \item\code{Status} Whether the company is investing or divesting #' \item\code{Size} Maximum fund size in millions #' } #' #' @usage data(opic) #' @name opic #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/opic-data.R
#' pbc.vote #' #' @description Precinct level data for Palm Beach County, Florida from the 2000 U.S. Presidential Election, see page 149 #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{badballots} Total number of spoiled ballots #' \item\code{technology} Voting Technology used, 0 for a datapunch machine or a butterfly ballot, 1 for votomatic #' \item\code{new} Number of "new" voters, as in those who have not voted in the precinct for previous 6 years #' \item\code{size} Total number of precinct voters #' \item\code{Republican} The number of voters registered as Republican #' \item\code{white} The number of white nonminority voters in a given precinct #' } #' #' @usage data(pbc.vote) #' @format data frame with 516 observations of each precinct in Palm Beach County with 11 explanatory variables #' @source Palm Beach Post collected data from state and federal sources about precinct level data in Palm Beach County for the 2000 US presidential election #' @name pbc.vote #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/pbc.vote-data.R
#' plot_walk_G #' #' plot_walk_G code used to produce figure 10.2 #' #' @usage plot_walk_G(walk.mat,sim.rm,X=1,Y=2) #' #' @param walk.mat walk.mat #' @param sim.rm sim.rm #' @param X X #' @param Y Y #' #' @author Jeff Gill #' @importFrom graphics plot segments #' @export plot_walk_G <- function(walk.mat,sim.rm,X=1,Y=2) { plot(walk.mat[1,X],walk.mat[1,Y],type="n", xlim=range(walk.mat[,X]), ylim=range(walk.mat[,Y]), xlab="",ylab="") for(i in 1:(nrow(walk.mat)-1)) { segments(walk.mat[i,X],walk.mat[i,Y], walk.mat[(i+1),X],walk.mat[i,Y]) segments(walk.mat[(i+1),X],walk.mat[i,Y], walk.mat[(i+1),X],walk.mat[(i+1),Y]) } }
/scratch/gouwar.j/cran-all/cranData/BaM/R/plot_walk_G.R
#' plot_walk_MH #' #' plot_walk_MH code used to produce figure 10.4 #' #' @usage plot_walk_MH(walk.mat) #' #' @param walk.mat walk.mat #' #' @author Jeff Gill #' @importFrom graphics plot segments #' @export plot_walk_MH <- function(walk.mat) { plot(walk.mat[1,1],walk.mat[1,2],type="n", xlim=round(range(walk.mat[,1])*1.2), ylim=round(range(walk.mat[,2])*1.2), xlab="",ylab="") for(i in 1:(nrow(walk.mat)-1)) { segments(walk.mat[i,1],walk.mat[i,2], walk.mat[(i+1),1],walk.mat[(i+1),2]) } }
/scratch/gouwar.j/cran-all/cranData/BaM/R/plot_walk_MH.R
#' retail.sales #' #' @description Retail sales from 1979 through 1989 based on data provided by the U.S. Department of Commerce through the Survey of Current Business, see page 439 #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{TIME} the economic quarter specified, starting from the first quarter of 1979 where j=1 to the fourth quarter of 1989 where j=44 #' \item\code{DSB} national income wage and salary disbursements (in billions of dollars) #' \item\code{EMP} employees on non-agricultural payrolls (in thosuands) #' \item\code{BDG} building material dealer sales (in millions of dollars) #' \item\code{CAR} retail automotive dealer sales (in millions of dollars) #' \item\code{FRN} home furnishings dealer sales (in millions of dollars) #' \item\code{GMR} general merchandise dealer sales (in millions of dollars) #' } #' #' @usage data(retail.sales) #' @format data frame with 44 observations of statistics for different economic quarters with 7 explanatory variables #' @source U.S. Department of Commerce data from first quarter of 1979 to fourth quarter of 1989 #' @name retail.sales #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/retail.sales-data.R
#' rmultinorm #' #' a function to generate random multivariate Gaussians. #' #' @usage rmultinorm(n, mu, vmat, tol = 1e-07) #' #' @param n nu #' @param mu vector of mean #' @param vmat variance-covariance matriz #' @param tol tolerance #' #' @author Jeff Gill #' @importFrom stats rnorm #' @seealso \code{\link{biv.norm.post}} #' @export rmultinorm <- function(n, mu, vmat, tol = 1e-07) { p <- ncol(vmat) if(length(mu)!=p) stop(paste("mu vector is the wrong length:",length(mu))) if(max(abs(vmat - t(vmat))) > tol) stop("vmat not symmetric") vs <- svd(vmat) vsqrt <- t(vs$v %*% (t(vs$u) * sqrt(vs$d))) ans <- matrix(rnorm(n * p), nrow = n) %*% vsqrt ans <- sweep(ans, 2, mu, "+") dimnames(ans) <- list(NULL, dimnames(vmat)[[2]]) ans }
/scratch/gouwar.j/cran-all/cranData/BaM/R/rmultinorm.R
#' romney #' #' Analysis of cultural consensus data using binomial likelihood and beta prior. #' #' @usage romney() #' @format See for yourself. Modify as desired. #' #' @source Romney, A. K. (1999). Culture Consensus as a Statistical Model. \cr #' \emph{Current Anthropology} 40 (Supplement), S103-S115. #' @importFrom stats dbeta qbeta #' @importFrom graphics par lines text #' @author Jeff Gill #' @export romney <- function() { par(oma=c(1,1,1,1),mar=c(0,0,0,0),mfrow=c(2,1)) x <- c(1,1,1,1,0,1,1,0,1,0,1,1,1,0,1,1,1,1,1,1,0,0,0,1) ruler <- seq(0,1,length=300) A <- 15; B <- 2 beta.prior <- dbeta(ruler,A,B) beta.posterior <- dbeta(ruler,sum(x)+A,length(x)-sum(x)+B) plot(ruler,beta.prior, ylim=c(-0.7,9.5), xaxt="n", yaxt="n", xlab="", ylab="", pch=".") lines(ruler,beta.posterior) hpd.95 <- qbeta(c(0.025,0.975),sum(x)+A,length(x)-sum(x)+B) segments(hpd.95[1],0,hpd.95[2],0,lwd=4) text(mean(hpd.95),-0.4,"95% HPD Interval",cex=0.6) text(0.25,5,paste("Beta(",A,",",B, ") prior, 95% HPD Interval at: [",round(hpd.95[1],3), ":",round(hpd.95[2],3),"]",sep=""),cex=1.1) A <- 1; B <- 1 beta.prior <- dbeta(ruler,A,B) beta.posterior <- dbeta(ruler,sum(x)+A,length(x)-sum(x)+B) plot(ruler,beta.prior, ylim=c(-0.7,9.5), xaxt="n", yaxt="n", xlab="", ylab="", pch=".") lines(ruler,beta.posterior) hpd.95 <- qbeta(c(0.025,0.975),sum(x)+A,length(x)-sum(x)+B) segments(hpd.95[1],0,hpd.95[2],0,lwd=4) text(mean(hpd.95),-0.4,"95% HPD Interval",cex=0.6) text(0.25,5,paste("Beta(",A,",",B, ") prior, 95% HPD Interval at: [",round(hpd.95[1],3), ":",round(hpd.95[2],3),"]",sep=""),cex=1.1) }
/scratch/gouwar.j/cran-all/cranData/BaM/R/romney.R
#' sir #' #' Implementation of Rubin's SIR, see pages 338-341 (2nd Edition) #' #' @usage sir(data.mat,theta.vector,theta.mat,M,m,tol=1e-06,ll.func,df=0) #' #' @param data.mat A matrix with two columns of normally distributed data #' @param theta.vector The initial coefficient estimates #' @param theta.mat The initial vc matrix #' @param M The number of draws #' @param m The desired number of accepted values #' @param tol The rounding/truncing tolerance #' @param ll.func loglike function for empirical posterior #' @param df The df for using the t distribution as the approx distribution #' #' @author Jeff Gill #' @importFrom stats pnorm rchisq runif #' @aliases logit.posterior.ll normal.posterior.ll t_posterior.ll probit.posterior.ll #' @examples #' \dontrun{ #' sir <- function(data.mat,theta.vector,theta.mat,M,m,tol=1e-06,ll.func,df=0) { #' importance.ratio <- rep(NA,M) #' rand.draw <- rmultinorm(M,theta.vector,theta.mat,tol = 1e-04) #' if (df > 0) #' rand.draw <- rand.draw/(sqrt(rchisq(M,df)/df)) #' empirical.draw.vector <- apply(rand.draw,1,ll.func,data.mat) #' if (sum(is.na(empirical.draw.vector)) == 0) { #' print("SIR: finished generating from posterior density function") #' print(summary(empirical.draw.vector)) #' } #' else { #' print(paste("SIR: found",sum(is.na(empirical.draw.vector)), #' "NA(s) in generating from posterior density function, quiting")) #' return() #' } #' if (df == 0) { #' normal.draw.vector <- apply(rand.draw,1,normal.posterior.ll,data.mat) #' } #' else { #' theta.mat <- ((df-2)/(df))*theta.mat #' normal.draw.vector <- apply(rand.draw,1,t.posterior.ll,data.mat,df) #' } #' if (sum(is.na(normal.draw.vector)) == 0) { #' print("SIR: finished generating from approximation distribution") #' print(summary(normal.draw.vector)) #' } #' else { #' print(paste("SIR: found",sum(is.na(normal.draw.vector)), #' "NA(s) in generating from approximation distribution, quiting")) #' return() #' } #' importance.ratio <- exp(empirical.draw.vector - normal.draw.vector) #' importance.ratio[is.finite=F] <- 0 #' importance.ratio <- importance.ratio/max(importance.ratio) #' if (sum(is.na(importance.ratio)) == 0) { #' print("SIR: finished calculating importance weights") #' print(summary(importance.ratio)) #' } #' else { #' print(paste("SIR: found",sum(is.na(importance.ratio)), #' "NA(s) in calculating importance weights, quiting")) #' return() #' } #' accepted.mat <- rand.draw[1:2,] #' while(nrow(accepted.mat) < m+2) { #' rand.unif <- runif(length(importance.ratio)) #' accepted.loc <- seq(along=importance.ratio)[(rand.unif-tol) <= importance.ratio] #' rejected.loc <- seq(along=importance.ratio)[(rand.unif-tol) > importance.ratio] #' accepted.mat <- rbind(accepted.mat,rand.draw[accepted.loc,]) #' rand.draw <- rand.draw[rejected.loc,] #' importance.ratio <- importance.ratio[rejected.loc] #' print(paste("SIR: cycle complete,",(nrow(accepted.mat)-2),"now accepted")) #' } #' accepted.mat[3:nrow(accepted.mat),] #' } #' # The following are log likelihood functions that can be plugged into the sir function above. #' #' logit.posterior.ll <- function(theta.vector,X) { #' Y <- X[,1] #' X[,1] <- rep(1,nrow(X)) #' sum( -log(1+exp(-X #' -log(1+exp(X))))) #' } #' #' normal.posterior.ll <- function(coef.vector,X) { #' dimnames(coef.vector) <- NULL #' Y <- X[,1] #' X[,1] <- rep(1,nrow(X)) #' e <- Y - X #' sigma <- var(e) #' return(-nrow(X)*(1/2)*log(2*pi) #' -nrow(X)*(1/2)*log(sigma) #' -(1/(2*sigma))*(t(Y-X)*(Y-X))) #' } #' #' t.posterior.ll <- function(coef.vector,X,df) { #' Y <- X[,1] #' X[,1] <- rep(1,nrow(X)) #' e <- Y - X #' sigma <- var(e)*(df-2)/(df) #' d <- length(coef.vector) #' return(log(gamma((df+d)/2)) - log(gamma(df/2)) #' - (d/2)*log(df) #' -(d/2)*log(pi) - 0.5*(log(sigma)) #' -((df+d)/2*sigma)*log(1+(1/df)* #' (t(Y-X*(Y-X))))) #' } #' #' probit.posterior.ll <- function (theta.vector,X,tol = 1e-05) { #' Y <- X[,1] #' X[,1] <- rep(1,nrow(X)) #' Xb <- X #' h <- pnorm(Xb) #' h[h<tol] <- tol #' g <- 1-pnorm(Xb) #' g[g<tol] <- tol #' sum( log(h)*Y + log(g)*(1-Y) ) #' } #' } #' #' @export sir <- function(data.mat,theta.vector,theta.mat,M,m,tol = 1e-06,ll.func,df = 0) { importance.ratio <- rep(NA,M) rand.draw <- rmultinorm(M,theta.vector,theta.mat,tol = 1e-04) if (df > 0) rand.draw <- rand.draw/(sqrt(rchisq(M,df)/df)) empirical.draw.vector <- apply(rand.draw,1,ll.func,data.mat) if (sum(is.na(empirical.draw.vector)) == 0) { print("SIR: finished generating from posterior density function") print(summary(empirical.draw.vector)) } else { print(paste("SIR: found",sum(is.na(empirical.draw.vector)), "NA(s) in generating from posterior density function, quiting")) return() } if (df == 0) { normal.draw.vector <- apply(rand.draw,1,normal.posterior.ll,data.mat) } else { theta.mat <- ((df-2)/(df))*theta.mat normal.draw.vector <- apply(rand.draw,1,t_posterior.ll,data.mat,df) } if (sum(is.na(normal.draw.vector)) == 0) { print("SIR: finished generating from approximation distribution") print(summary(normal.draw.vector)) } else { print(paste("SIR: found",sum(is.na(normal.draw.vector)), "NA(s) in generating from approximation distribution, quiting")) return() } importance.ratio <- exp(empirical.draw.vector - normal.draw.vector) importance.ratio[is.finite=F] <- 0 importance.ratio <- importance.ratio/max(importance.ratio) if (sum(is.na(importance.ratio)) == 0) { print("SIR: finished calculating importance weights") print(summary(importance.ratio)) } else { print(paste("SIR: found",sum(is.na(importance.ratio)), "NA(s) in calculating importance weights, quiting")) return() } accepted.mat <- rand.draw[1:2,] while(nrow(accepted.mat) < m+2) { rand.unif <- runif(length(importance.ratio)) accepted.loc <- seq(along=importance.ratio)[(rand.unif-tol) <= importance.ratio] rejected.loc <- seq(along=importance.ratio)[(rand.unif-tol) > importance.ratio] accepted.mat <- rbind(accepted.mat,rand.draw[accepted.loc,]) rand.draw <- rand.draw[rejected.loc,] importance.ratio <- importance.ratio[rejected.loc] print(paste("SIR: cycle complete,",(nrow(accepted.mat)-2),"now accepted")) } accepted.mat[3:nrow(accepted.mat),] } #' @export probit.posterior.ll <- function (theta.vector,X,tol = 1e-05) { Y <- X[,1] X[,1] <- rep(1,nrow(X)) Xb <- X h <- pnorm(Xb) h[h<tol] <- tol g <- 1-pnorm(Xb) g[g<tol] <- tol sum( log(h)*Y + log(g)*(1-Y) ) } #' @export t_posterior.ll <- function(coef.vector,X,df) { Y <- X[,1] X[,1] <- rep(1,nrow(X)) e <- Y - X sigma <- var(e)*(df-2)/(df) d <- length(coef.vector) return(log(gamma((df+d)/2)) - log(gamma(df/2)) - (d/2)*log(df) -(d/2)*log(pi) - 0.5*(log(sigma)) -((df+d)/2*sigma)*log(1+(1/df)* (t(Y-X*(Y-X))))) } #' @export normal.posterior.ll <- function(coef.vector,X) { dimnames(coef.vector) <- NULL Y <- X[,1] X[,1] <- rep(1,nrow(X)) e <- Y - X sigma <- var(e) return(-nrow(X)*(1/2)*log(2*pi) -nrow(X)*(1/2)*log(sigma) -(1/(2*sigma))*(t(Y-X)*(Y-X))) } #' @export logit.posterior.ll <- function(theta.vector,X) { Y <- X[,1] X[,1] <- rep(1,nrow(X)) sum( -log(1+exp(-X -log(1+exp(X))))) }
/scratch/gouwar.j/cran-all/cranData/BaM/R/sir.R
#' socatt #' #' @description Data from the British Social Attitudes (BSA) Survey 1983-1986. #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{District} identifying for geographic district. #' \item\code{Respondent.Code} respondent identifier #' \item\code{Year.Code} 1 = 1983, 2 = 1984, 3 = 1985, 4 = 1986 #' \item\code{Num.Answers} number of positive answers to seven questions #' \item\code{Party} 1 = Conservative, 2 = Labour, 3 = Lib/SDP/Alliance, 4 = others #' \item\code{Social.Class} 1 = middle, 2 = upper working, 3 = lower working #' \item\code{Gender} 1 = male, 2 = female. #' \item\code{Age} age in years 18-80 #' \item\code{Religion} 1 = Roman Catholic, 2 = Protestant/Church of England, 3 = others, 4 = none. #' } #' #' @usage data(socatt) #' @name socatt #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/socatt-data.R
#' strikes #' #' @description French Coal Strikes, see page 212 and 213 #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{Year} The year the labor strikes in France occurred #' \item\code{Counts} The number of labor strikes that occurred in France per year #'} #' @usage data(strikes) #' @name strikes #' @format data frame with 11 observations of strikes that occurred in different years with 1 explanatory variable #' @source Conell, C. and Cohn, S. (1995). Learning from Other People's Actions: Environmental Variation and Diffusion in French Coal Mining Strikes, 1890-1935. American Journal of Sociology 101, 366-403. #' @docType data #' @examples #' n <- length(strikes) #' r <- 1 #' s.y <- sum(strikes) #' #' p.posterior.1000000 <- rbeta(1000000,n*r,s.y+0.5) #' length(p.posterior.1000000[p.posterior.1000000<0.05])/1000000 #' #' par(mar=c(3,3,3,3)) #' ruler <- seq(0,1,length=1000) #' beta.vals <- dbeta(ruler,n*r,s.y+0.5) #' plot(ruler[1:200],beta.vals[1:200],yaxt="n",main="",ylab="",type="l") #' mtext(side=2,line=1,"Density") #' for (i in 1:length(ruler)) #' if (ruler[i] < 0.05) #' segments(ruler[i],0,ruler[i],beta.vals[i]) #' segments(0.04,3,0.02,12.2) #' text(0.02,12.8,"0.171") NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/strikes-data.R