content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' Obtain a Point Estimate and Efficient Influence Function Estimate for a Given Predictiveness Measure #' #' @param x an object of class \code{"predictiveness_measure"} #' @param ... other arguments to type-specific predictiveness measures (currently unused) #' #' @return A list with the point estimate, naive point estimate (for ANOVA only), #' estimated EIF, and the predictions for coarsened data EIF (for coarsened data settings only) #' #' @export estimate.predictiveness_measure <- function(x, ...) { arg_lst <- unclass(x) arg_lst$ipc_fit_type <- attr(x, "ipc_fit_type") arg_lst$ipc_est_type <- attr(x, "ipc_est_type") arg_lst$scale <- attr(x, "scale") arg_lst$na.rm <- attr(x, "na.rm") type <- attr(x, "type") unhelpful_objects <- c("cross_fitting_folds", "K", "point_est", "eif", "folds_Z") unhelpful_indices <- which(names(arg_lst) %in% unhelpful_objects) if (arg_lst$K == 1) { # apply the measure function to all observations est_lst <- estimate_type_predictiveness(arg_lst[-unhelpful_indices], type) x$point_est <- est_lst$point_est x$eif <- est_lst$eif x$ipc_eif_preds <- est_lst$ipc_eif_preds x$all_point_ests <- est_lst$point_est x$all_eifs <- list(x$eif) } else { # do cross-fitting; but don't need to do cross-fitting on components of # the predictiveness measure that don't involve the features (e.g., the # marginal outcome variance for R-squared and the marginal probability of Y = 1 # for deviance) estimate_denominator <- FALSE full_type <- type if (any(grepl("r_squared", type))) { type <- "mse" estimate_denominator <- TRUE } if (any(grepl("deviance", type))) { type <- "cross_entropy" estimate_denominator <- TRUE } max_nrow <- max(sapply(1:arg_lst$K, function(k) length(arg_lst$C[arg_lst$folds_Z == k]))) eifs <- vector("list", length = arg_lst$K) eif <- vector("numeric", length = length(arg_lst$C)) ipc_eif_preds <- predictiveness_measures <- vector("list", length = arg_lst$K) for (k in seq_len(arg_lst$K)) { this_arg_lst <- get_test_set(arg_lst, k = k) this_est_lst <- estimate_type_predictiveness(this_arg_lst[-unhelpful_indices], type) predictiveness_measures[[k]] <- this_est_lst eifs[[k]] <- this_est_lst$eif eif[arg_lst$folds_Z == k] <- this_est_lst$eif ipc_eif_preds[arg_lst$folds_Z == k] <- this_est_lst$ipc_eif_preds } point_ests <- sapply(1:arg_lst$K, function(k) predictiveness_measures[[k]]$point_est) point_est <- mean(point_ests) ipc_eif_preds <- ipc_eif_preds # estimate the denominator, if necessary if (estimate_denominator) { do_ipcw <- as.numeric(!all(arg_lst$ipc_weights == 1)) if (is.null(arg_lst$full_y)) { mn_y <- mean(arg_lst$y, na.rm = attr(x, "na.rm")) } else { mn_y <- mean(arg_lst$full_y, na.rm = attr(x, "na.rm")) } this_arg_lst <- arg_lst this_arg_lst$fitted_values <- rep(mn_y, length(arg_lst$y)) this_arg_lst$C <- switch(do_ipcw + 1, rep(1, length(arg_lst$C)), arg_lst$C) if (grepl("r_squared", full_type)) { denominator <- do.call(measure_mse, this_arg_lst[-unhelpful_indices]) } else { denominator <- do.call(measure_cross_entropy, this_arg_lst[-unhelpful_indices]) } eif <- (-1) * as.vector( matrix( c(1 / denominator$point_est, (-1) * point_est / (denominator$point_est ^ 2)), nrow = 1 ) %*% t(cbind(eif, denominator$eif)) ) all_eifs <- lapply(as.list(seq_len(arg_lst$K)), function(i) { (-1) * as.vector( matrix( c(1 / denominator$point_est, (-1) * point_ests[i] / (denominator$point_est ^ 2)), nrow = 1 ) %*% t(cbind(eifs[[i]], denominator$eif[arg_lst$folds_Z == i])) ) }) point_ests <- 1 - point_ests / denominator$point_est eifs <- all_eifs } x$point_est <- mean(point_ests) x$eif <- eif x$ipc_eif_preds <- ipc_eif_preds x$all_point_ests <- point_ests x$all_eifs <- eifs } return(x) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/estimate.predictiveness_measure.R
#' Estimate Predictiveness Given a Type #' #' Estimate the specified type of predictiveness #' #' @param arg_lst a list of arguments; from, e.g., \code{predictiveness_measure} #' @param type the type of predictiveness, e.g., \code{"r_squared"} estimate_type_predictiveness <- function(arg_lst, type) { if (grepl("accuracy", type)) { est_lst <- do.call(measure_accuracy, arg_lst) } else if (grepl("anova", type)) { est_lst <- do.call(measure_anova, arg_lst) } else if (grepl("auc", type)) { est_lst <- do.call(measure_auc, arg_lst) } else if (grepl("average_value", type)) { est_lst <- do.call(measure_average_value, arg_lst) } else if (grepl("cross_entropy", type)) { est_lst <- do.call(measure_cross_entropy, arg_lst) } else if (grepl("deviance", type)) { est_lst <- do.call(measure_deviance, arg_lst) } else if (grepl("mse", type)) { est_lst <- do.call(measure_mse, arg_lst) } else if (grepl("r_squared", type)) { est_lst <- do.call(measure_r_squared, arg_lst) } return(est_lst) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/estimate_type_predictiveness.R
#' Extract sampled-split predictions from a CV.SuperLearner object #' #' Use the cross-validated Super Learner and a set of specified sample-splitting #' folds to extract cross-fitted predictions on separate splits of the data. This #' is primarily for use in cases where you have already fit a CV.SuperLearner #' and want to use the fitted values to compute variable importance without having #' to re-fit. The number of folds used in the CV.SuperLearner must be even. #' #' @param cvsl_obj An object of class \code{"CV.SuperLearner"}; must be entered unless \code{preds} is specified. #' @param sample_splitting logical; should we use sample-splitting or not? #' Defaults to \code{TRUE}. #' @param sample_splitting_folds A vector of folds to use for sample splitting #' @param full logical; is this the fit to all covariates (\code{TRUE}) or not #' (\code{FALSE})? #' @param preds a vector of predictions; must be entered unless \code{cvsl_obj} is specified. #' @param cross_fitting_folds a vector of folds that were used in cross-fitting. #' @param vector logical; should we return a vector (where each element is #' the prediction when the corresponding row is in the validation fold) or a #' list? #' #' @seealso \code{\link[SuperLearner]{CV.SuperLearner}} for usage of the #' \code{CV.SuperLearner} function. #' @return The predictions on validation data in each split-sample fold. #' @export extract_sampled_split_predictions <- function(cvsl_obj = NULL, sample_splitting = TRUE, sample_splitting_folds = NULL, full = TRUE, preds = NULL, cross_fitting_folds = NULL, vector = TRUE) { if ((is.null(cvsl_obj) | !inherits(cvsl_obj, "CV.SuperLearner")) & is.null(preds)) { stop("Please enter a CV.SuperLearner object or the predictions and folds from such an object.") } else if (!is.null(preds) & is.null(cross_fitting_folds)) { stop("You must enter cross-fitting folds if you choose to enter predicted values rather than a CV.SuperLearner object.") } if (is.null(sample_splitting_folds)) { stop("Please enter sample-splitting folds.") } # get all predictions and folds from cross-fitting if (!is.null(cvsl_obj)) { all_preds <- cvsl_obj$SL.predict cross_fitting_folds <- get_cv_sl_folds(cvsl_obj$folds) } else { all_preds <- preds } unique_cf_folds <- unique(cross_fitting_folds) # get which sample-splitting fold to use this_sample_split <- ifelse(full, 1, 2) if (sample_splitting) { # use V / 2 for inner cross-fitting within cv_vim; # note that if the input V is odd, the code below will still work V <- sum(sample_splitting_folds == this_sample_split) } else { V <- length(unique(cross_fitting_folds)) } lst <- vector("list", length = V) these_cf_folds <- sort(unique_cf_folds)[sample_splitting_folds == this_sample_split] for (v in 1:V) { lst[[v]] <- all_preds[cross_fitting_folds == these_cf_folds[[v]]] } if (vector) { preds <- all_preds[cross_fitting_folds %in% these_cf_folds] return(preds) } else { return(lst) } } #' Get a numeric vector with cross-validation fold IDs from CV.SuperLearner #' #' @param cv_sl_folds The folds from a call to \code{CV.SuperLearner}; a list. #' #' @importFrom data.table rbindlist #' @return A numeric vector with the fold IDs. #' @export get_cv_sl_folds <- function(cv_sl_folds) { folds_with_row_nums <- sapply(1:length(cv_sl_folds), function(x) list( row_nums = cv_sl_folds[[x]], fold = rep(x, length(cv_sl_folds[[x]])) ), simplify = FALSE ) folds_df <- data.table::rbindlist(folds_with_row_nums) folds_df$fold[order(folds_df$row_nums)] }
/scratch/gouwar.j/cran-all/cranData/vimp/R/extract_sampled_split_predictions.R
#' Format a \code{predictiveness_measure} object #' #' Nicely formats the output from a \code{predictiveness_measure} object for printing. #' #' @param x the \code{predictiveness_measure} object of interest. #' @param ... other options, see the generic \code{format} function. #' @export format.predictiveness_measure <- function(x, ...) { if (!all(is.na(x$eif))) { # grab the point estimate est <- x$point_est # compute the SE se <- sqrt(mean(x$eif ^ 2) / length(x$eif)) output <- cbind(attr(x, "type"), format(est, ...), format(se, ...)) } else { output <- cbind(attr(x, "type"), "", "") } # create the output matrix col_nms <- c("Type", "Estimate", "SE") colnames(output) <- col_nms return(output) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/format.predictiveness_measure.R
#' Format a \code{vim} object #' #' Nicely formats the output from a \code{vim} object for printing. #' #' @param x the \code{vim} object of interest. #' @param ... other options, see the generic \code{format} function. #' @export format.vim <- function(x, ...) { # create the output matrix nice_s <- function(x) ifelse(length(x) <= 10, paste(x, collapse = ", "), paste(c(x[1:10], "..."), collapse = ", ")) ## if it is a combined object, we need to print the matrix instead if (!is.null(x$mat)) { ## combine the columns for cis tmp.ci <- cbind(format(x$mat$cil, ...), format(x$mat$ciu, ...)) if (!any(grepl("anova", class(x)))) { output <- cbind(format(x$mat$est, ...), format(x$mat$se, ...), apply(tmp.ci, 1, function(x) paste("[", paste(x, collapse = ", "), "]", sep = "")), format(x$mat$test, ...), format(x$mat$p_value, ...)) } else { output <- cbind(format(x$mat$est, ...), format(x$mat$se, ...), apply(tmp.ci, 1, function(x) paste("[", paste(x, collapse = ", "), "]", sep = ""))) } ## tag on row names print_s <- lapply(as.list(x$mat$s), function(x) nice_s(strsplit(x, ",", fixed = TRUE)[[1]])) rownames(output) <- paste("s = ", print_s, sep = "") } else { if (!any(grepl("anova", class(x)))) { output <- cbind(format(x$est, ...), format(x$se, ...), paste("[", paste(format(x$ci, ...), collapse = ", "), "]", sep = ""), format(x$test, ...), format(x$p_value, ...)) } else { output <- cbind(format(x$est, ...), format(x$se, ...), paste("[", paste(format(x$ci, ...), collapse = ", "), "]", sep = "")) } print_s <- nice_s(x$s) rownames(output) <- paste("s = ", print_s, sep = "") } col_nms <- c("Estimate", "SE", paste(100 - 100*x$alpha[[1]], "% CI", sep = "")) if (!any(grepl("anova", class(x)))) { tmp <- c(col_nms, paste0("VIMP > ", x$delta), "p-value") col_nms <- tmp } colnames(output) <- col_nms return(output) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/format.vim.R
#' Estimate the classification accuracy #' #' Compute nonparametric estimate of classification accuracy. #' #' @param fitted_values fitted values from a regression function using the #' observed data (may be within a specified fold, for cross-fitted estimates). #' @param y the observed outcome (may be within a specified fold, for #' cross-fitted estimates). #' @param full_y the observed outcome (not used, defaults to \code{NULL}). #' @param C the indicator of coarsening (1 denotes observed, 0 denotes #' unobserved). #' @param Z either \code{NULL} (if no coarsening) or a matrix-like object #' containing the fully observed data. #' @param ipc_weights weights for inverse probability of coarsening (IPC) #' (e.g., inverse weights from a two-phase sample) weighted estimation. #' Assumed to be already inverted. #' (i.e., ipc_weights = 1 / [estimated probability weights]). #' @param ipc_fit_type if "external", then use \code{ipc_eif_preds}; if "SL", #' fit a SuperLearner to determine the IPC correction to the efficient #' influence function. #' @param ipc_eif_preds if \code{ipc_fit_type = "external"}, the fitted values #' from a regression of the full-data EIF on the fully observed #' covariates/outcome; otherwise, not used. #' @param ipc_est_type IPC correction, either \code{"ipw"} (for classical #' inverse probability weighting) or \code{"aipw"} (for augmented inverse #' probability weighting; the default). #' @param scale if doing an IPC correction, then the scale that the correction #' should be computed on (e.g., "identity"; or "logit" to logit-transform, #' apply the correction, and back-transform). #' @param na.rm logical; should \code{NA}s be removed in computation? #' (defaults to \code{FALSE}) #' @param nuisance_estimators not used; for compatibility with \code{measure_average_value}. #' @param a not used; for compatibility with \code{measure_average_value}. #' @param ... other arguments to SuperLearner, if \code{ipc_fit_type = "SL"}. #' #' @return A named list of: (1) the estimated classification accuracy of the #' fitted regression function; (2) the estimated influence function; and #' (3) the IPC EIF predictions. #' @importFrom SuperLearner predict.SuperLearner SuperLearner #' @export measure_accuracy <- function(fitted_values, y, full_y = NULL, C = rep(1, length(y)), Z = NULL, ipc_weights = rep(1, length(y)), ipc_fit_type = "external", ipc_eif_preds = rep(1, length(y)), ipc_est_type = "aipw", scale = "logit", na.rm = FALSE, nuisance_estimators = NULL, a = NULL, ...) { # compute the EIF: if there is coarsening, do a correction if (!all(ipc_weights == 1)) { obs_grad <- ((fitted_values > 1/2) == y) - mean((fitted_values > 1/2) == y, na.rm = na.rm) obs_est <- mean((1 * ipc_weights[C == 1]) * ((fitted_values > 1/2) == y), na.rm = na.rm) # if IPC EIF preds aren't entered, estimate the regression ipc_eif_preds <- estimate_eif_projection(obs_grad = obs_grad, C = C, Z = Z, ipc_fit_type = ipc_fit_type, ipc_eif_preds = ipc_eif_preds, ...) weighted_obs_grad <- rep(0, length(C)) weighted_obs_grad[C == 1] <- obs_grad * ipc_weights[C == 1] grad <- weighted_obs_grad - (C * ipc_weights - 1) * ipc_eif_preds if (ipc_est_type == "ipw") { est <- scale_est(obs_est, rep(1, length(grad)), scale = scale) } else { est <- scale_est(obs_est, grad, scale = scale) } } else { est <- mean(((fitted_values > 1/2) == y), na.rm = na.rm) grad <- ((fitted_values > 1/2) == y) - mean((fitted_values > 1/2) == y, na.rm = na.rm) } return(list(point_est = est, eif = grad, ipc_eif_preds = ipc_eif_preds)) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/measure_accuracy.R
#' Estimate ANOVA decomposition-based variable importance. #' #' @inheritParams measure_accuracy #' @param full fitted values from a regression function of the observed outcome #' on the full set of covariates. #' @param reduced fitted values from a regression on the reduced set of observed #' covariates. #' #' @return A named list of: (1) the estimated ANOVA (based on a one-step #' correction) of the fitted regression functions; (2) the estimated #' influence function; (3) the naive ANOVA estimate; and (4) the IPC EIF #' predictions. #' @importFrom SuperLearner predict.SuperLearner SuperLearner #' @export measure_anova <- function(full, reduced, y, full_y = NULL, C = rep(1, length(y)), Z = NULL, ipc_weights = rep(1, length(y)), ipc_fit_type = "external", ipc_eif_preds = rep(1, length(y)), ipc_est_type = "aipw", scale = "logit", na.rm = FALSE, nuisance_estimators = NULL, a = NULL, ...) { if (is.null(full_y)) { obs_mn_y <- mean(y, na.rm = na.rm) } else { obs_mn_y <- mean(full_y, na.rm = na.rm) } # add on if they aren't equal length if (length(full) < length(reduced)) { full <- c(full, rep(NA, length(reduced) - length(full))) } if (length(reduced) < length(full)) { reduced <- c(reduced, rep(NA, length(reduced) - length(full))) } # compute the EIF: if there is coarsening, do a correction if (!all(ipc_weights == 1)) { # observed full-data EIF obs_num <- mean(((full - reduced) ^ 2), na.rm = na.rm) obs_var <- measure_mse( fitted_values = rep(obs_mn_y, length(y)), y, na.rm = na.rm ) obs_eif_num <- (2 * (y - full) * (full - reduced) + (full - reduced) ^ 2 - obs_num)[C == 1] obs_grad <- obs_eif_num / obs_var$point_est - obs_num / (obs_var$point_est ^ 2) * obs_var$eif # if IPC EIF preds aren't entered, estimate the regression ipc_eif_preds <- estimate_eif_projection(obs_grad = obs_grad, C = C, Z = Z, ipc_fit_type = ipc_fit_type, ipc_eif_preds = ipc_eif_preds, ...) weighted_obs_grad <- rep(0, length(C)) weighted_obs_grad[C == 1] <- obs_grad * ipc_weights grad <- weighted_obs_grad - (C * ipc_weights - 1) * ipc_eif_preds num <- mean((1 * ipc_weights[C == 1]) * ((full - reduced) ^ 2), na.rm = na.rm) denom <- mean((1 * ipc_weights[C == 1]) * (y - mean(y, na.rm = na.rm)) ^ 2, na.rm = na.rm) obs_est <- num / denom if (ipc_est_type == "ipw") { est <- scale_est(obs_est, rep(0, length(grad)), scale = scale) } else { est <- scale_est(obs_est, grad, scale = scale) } } else { num <- mean((full - reduced) ^ 2, na.rm = na.rm) var <- measure_mse(fitted_values = rep(obs_mn_y, length(y)), y, na.rm = na.rm) num_eif <- 2 * (y - full) * (full - reduced) + (full - reduced) ^ 2 - num grad <- num_eif / var$point_est - num / (var$point_est ^ 2) * var$eif est <- num / var$point_est + mean(grad) } return(list(point_est = est, eif = grad, naive = num / var$point_est, ipc_eif_preds = ipc_eif_preds)) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/measure_anova.R
#' Estimate area under the receiver operating characteristic curve (AUC) #' #' Compute nonparametric estimate of AUC. #' #' @inheritParams measure_accuracy #' #' @return A named list of: (1) the estimated AUC of the fitted regression #' function; (2) the estimated influence function; and #' (3) the IPC EIF predictions. #' @importFrom SuperLearner predict.SuperLearner SuperLearner #' @importFrom data.table data.table `:=` #' @export measure_auc <- function(fitted_values, y, full_y = NULL, C = rep(1, length(y)), Z = NULL, ipc_weights = rep(1, length(y)), ipc_fit_type = "external", ipc_eif_preds = rep(1, length(y)), ipc_est_type = "aipw", scale = "logit", na.rm = FALSE, nuisance_estimators = NULL, a = NULL, ...) { # bind "global vars" to pass R CMD check initial_rownums <- label <- pred <- sens <- spec <- NULL # compute the point estimate (on only data with all obs, if IPC # weights are entered) preds <- ROCR::prediction(predictions = fitted_values, labels = y) est <- unlist(ROCR::performance(prediction.obj = preds, measure = "auc", x.measure = "cutoff")@y.values) # compute sensitivity and specificity n_0 <- sum(y == 0) n_1 <- sum(y == 1) n_0_weighted <- sum((y == 0) * ipc_weights[C == 1]) n_1_weighted <- sum((y == 1) * ipc_weights[C == 1]) if (is.null(full_y)) { p_0 <- mean(y == 0) p_1 <- mean(y == 1) } else { p_0 <- mean(full_y == 0) p_1 <- mean(full_y == 1) } dt <- data.table::data.table(pred = as.numeric(fitted_values), label = as.numeric(y), initial_rownums = 1:length(as.numeric(y))) # sort by ascending pred within descending label, i.e., all Y = 1 followed by all Y = 0 dt <- dt[order(pred, -xtfrm(label))] dt[, sens := cumsum(label == 0) / n_0] # sort by descending pred within ascending label dt <- dt[order(-pred, label)] dt[, spec := cumsum(label == 1) / n_1] dt <- dt[order(initial_rownums)] # compute the EIF: if there is coarsening, do a correction if (!all(ipc_weights == 1)) { # gradient obs_grad <- ((y == 0) / p_0) * (dt$spec - est) + ((y == 1) / p_1) * (dt$sens - est) # if IPC EIF preds aren't entered, estimate the regression ipc_eif_preds <- estimate_eif_projection(obs_grad = obs_grad, C = C, Z = Z, ipc_fit_type = ipc_fit_type, ipc_eif_preds = ipc_eif_preds, ...) weighted_obs_grad <- rep(0, length(C)) weighted_obs_grad[C == 1] <- obs_grad * ipc_weights[C == 1] grad <- weighted_obs_grad - (C * ipc_weights - 1) * ipc_eif_preds # compute weighted AUC pred_order <- order(as.numeric(fitted_values), decreasing = TRUE) ordered_preds <- as.numeric(fitted_values)[pred_order] tp <- cumsum(ipc_weights[C == 1][pred_order] * (y[pred_order] == 1)) fp <- cumsum(ipc_weights[C == 1][pred_order] * (y[pred_order] == 0)) dups <- rev(duplicated(rev(ordered_preds))) fp <- c(0, fp[!dups]) tp <- c(0, tp[!dups]) fp_x <- fp / n_0_weighted tp_y <- tp / n_1_weighted obs_est <- 0 for (i in 2:length(fp_x)) { obs_est <- obs_est + 0.5 * (fp_x[i] - fp_x[i - 1]) * (tp_y[i] + tp_y[i - 1]) } obs_est <- as.numeric(obs_est) if (ipc_est_type == "ipw") { est <- scale_est(obs_est, rep(0, length(grad)), scale = scale) } else { est <- scale_est(obs_est, grad, scale = scale) } } else { # gradient grad <- ((y == 0) / p_0) * (dt$spec - est) + ((y == 1) / p_1) * (dt$sens - est) } return(list(point_est = est, eif = grad, ipc_eif_preds = ipc_eif_preds)) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/measure_auc.R
#' Estimate the average value under the optimal treatment rule #' #' Compute nonparametric estimate of the average value under the optimal #' treatment rule. #' @inheritParams measure_accuracy #' @param nuisance_estimators a list of nuisance function estimators on the #' observed data (may be within a specified fold, for cross-fitted estimates). #' Specifically: an estimator of the optimal treatment rule; an estimator of the #' propensity score under the estimated optimal treatment rule; and an estimator #' of the outcome regression when treatment is assigned according to the estimated optimal rule. #' @param a the observed treatment assignment (may be within a specified fold, #' for cross-fitted estimates). #' #' @return A named list of: (1) the estimated classification accuracy of the #' fitted regression function; (2) the estimated influence function; and #' (3) the IPC EIF predictions. #' @importFrom SuperLearner predict.SuperLearner SuperLearner #' @export measure_average_value <- function(nuisance_estimators, y, a, full_y = NULL, C = rep(1, length(y)), Z = NULL, ipc_weights = rep(1, length(y)), ipc_fit_type = "external", ipc_eif_preds = rep(1, length(y)), ipc_est_type = "aipw", scale = "identity", na.rm = FALSE, ...) { # compute the EIF: if there is coarsening, do a correction if (!all(ipc_weights == 1)) { obs_grad <- ((a == nuisance_estimators$f_n) / nuisance_estimators$g_n) * (y - nuisance_estimators$q_n) + nuisance_estimators$q_n - mean(nuisance_estimators$q_n) obs_est <- mean((1 * ipc_weights[C == 1]) * (obs_grad + mean(nuisance_estimators$q_n))) # if IPC EIF preds aren't entered, estimate the regression ipc_eif_preds <- estimate_eif_projection(obs_grad = obs_grad, C = C, Z = Z, ipc_fit_type = ipc_fit_type, ipc_eif_preds = ipc_eif_preds, ...) weighted_obs_grad <- rep(0, length(C)) weighted_obs_grad[C == 1] <- obs_grad * ipc_weights[C == 1] grad <- weighted_obs_grad - (C * ipc_weights - 1) * ipc_eif_preds if (ipc_est_type == "ipw") { est <- scale_est(obs_est, rep(1, length(grad)), scale = scale) } else { est <- scale_est(obs_est, grad, scale = scale) } } else { grad <- ((a == nuisance_estimators$f_n) / nuisance_estimators$g_n) * (y - nuisance_estimators$q_n) + nuisance_estimators$q_n - mean(nuisance_estimators$q_n) est <- mean(grad + mean(nuisance_estimators$q_n)) } return(list(point_est = est, eif = grad, ipc_eif_preds = ipc_eif_preds)) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/measure_average_value.R
#' Estimate the cross-entropy #' #' Compute nonparametric estimate of cross-entropy. #' #' @inheritParams measure_accuracy #' #' @return A named list of: (1) the estimated cross-entropy of the fitted #' regression function; (2) the estimated influence function; and #' (3) the IPC EIF predictions. #' @importFrom SuperLearner predict.SuperLearner SuperLearner #' @export measure_cross_entropy <- function(fitted_values, y, full_y = NULL, C = rep(1, length(y)), Z = NULL, ipc_weights = rep(1, length(y)), ipc_fit_type = "external", ipc_eif_preds = rep(1, length(y)), ipc_est_type = "aipw", scale = "identity", na.rm = FALSE, nuisance_estimators = NULL, a = NULL, ...) { # point estimates of all components if (is.null(dim(y))) { # assume that zero is in first column y_mult <- cbind(1 - y, y) } else if (dim(y)[2] < 2) { y_mult <- cbind(1 - y, y) } else { y_mult <- y } if (is.null(dim(fitted_values))) { # assume predicting y = 1 fitted_mat <- cbind(1 - fitted_values, fitted_values) } else if(dim(fitted_values)[2] < 2) { fitted_mat <- cbind(1 - fitted_values, fitted_values) } else { fitted_mat <- fitted_values } # compute the EIF: if there is coarsening, do a correction if (!all(ipc_weights == 1)) { obs_ce <- sum(diag(t(y_mult) %*% log(fitted_mat)), na.rm = na.rm) / sum(C == 1) obs_grad <- rowSums(y_mult * log(fitted_mat), na.rm = na.rm) - obs_ce # if IPC EIF preds aren't entered, estimate the regression ipc_eif_preds <- estimate_eif_projection(obs_grad = obs_grad, C = C, Z = Z, ipc_fit_type = ipc_fit_type, ipc_eif_preds = ipc_eif_preds, ...) weighted_obs_grad <- rep(0, length(C)) weighted_obs_grad[C == 1] <- obs_grad * ipc_weights[C == 1] grad <- weighted_obs_grad - (C * ipc_weights - 1) * ipc_eif_preds obs_est <- sum(diag(t(1 * ipc_weights[C == 1] * y_mult) %*% log(fitted_mat)), na.rm = na.rm) / sum(C == 1) if (ipc_est_type == "ipw") { est <- scale_est(obs_est, rep(0, length(grad)), scale = scale) } else { est <- scale_est(obs_est, grad, scale = scale) } } else { cross_entropy <- sum(diag(t(y_mult)%*%log(fitted_mat)), na.rm = na.rm)/dim(y_mult)[1] # influence curve grad <- rowSums(y_mult*log(fitted_mat), na.rm = na.rm) - cross_entropy } return(list(point_est = cross_entropy, eif = grad, ipc_eif_preds = ipc_eif_preds)) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/measure_cross_entropy.R
#' Estimate the deviance #' #' Compute nonparametric estimate of deviance. #' #' @inheritParams measure_accuracy #' #' @return A named list of: (1) the estimated deviance of the fitted regression #' function; (2) the estimated influence function; and #' (3) the IPC EIF predictions. #' @importFrom SuperLearner predict.SuperLearner SuperLearner #' @export measure_deviance <- function(fitted_values, y, full_y = NULL, C = rep(1, length(y)), Z = NULL, ipc_weights = rep(1, length(y)), ipc_fit_type = "external", ipc_eif_preds = rep(1, length(y)), ipc_est_type = "aipw", scale = "logit", na.rm = FALSE, nuisance_estimators = NULL, a = NULL, ...) { # point estimates of all components if (is.null(dim(y))) { # assume that zero is in first column y_mult <- cbind(1 - y, y) } else if (dim(y)[2] == 1) { y_mult <- cbind(1 - y, y) } else { y_mult <- y } # estimate the probability of observing a "case" if (is.null(full_y)) { pi_0 <- mean(y, na.rm = na.rm) } else { pi_0 <- mean(full_y, na.rm = na.rm) } # compute the EIF: if there is coarsening, do a correction if (!all(ipc_weights == 1)) { # get full-data gradient on fully-observed data obs_ce <- measure_cross_entropy(fitted_values, y, na.rm = na.rm) obs_denom <- measure_cross_entropy( fitted_values = rep(pi_0, length(y)), y, na.rm = na.rm ) obs_grad <- as.vector( matrix(c(1 / obs_denom$point_est, obs_ce$point_est / (obs_denom$point_est ^ 2)), nrow = 1) %*% t(cbind(obs_ce$eif, obs_denom$eif)) ) # if IPC EIF preds aren't entered, estimate the regression ipc_eif_preds <- estimate_eif_projection(obs_grad = obs_grad, C = C, Z = Z, ipc_fit_type = ipc_fit_type, ipc_eif_preds = ipc_eif_preds, ...) weighted_obs_grad <- rep(0, length(C)) weighted_obs_grad[C == 1] <- obs_grad * ipc_weights[C == 1] grad <- weighted_obs_grad - (C * ipc_weights - 1) * ipc_eif_preds obs_est <- 1 - measure_cross_entropy( fitted_values, 1 * ipc_weights[C == 1] * y, na.rm = na.rm )$point_est / measure_cross_entropy( fitted_values = mean(1 * ipc_weights[C == 1] * y, na.rm = na.rm), 1 * ipc_weights[C == 1] * y, na.rm = na.rm ) if (ipc_est_type == "ipw") { est <- scale_est(obs_est, rep(0, length(grad)), scale = scale) } else { est <- scale_est(obs_est, grad, scale = scale) } } else { cross_entropy_meas <- measure_cross_entropy( fitted_values, y, na.rm = na.rm ) denom <- measure_cross_entropy( fitted_values = rep(pi_0, length(y)), y, na.rm = na.rm ) est <- 1 - cross_entropy_meas$point_est / denom$point_est grad <- as.vector( matrix( c(1/denom$point_est, -cross_entropy_meas$point_est/(denom$point_est^2)), nrow = 1 ) %*% t(cbind(cross_entropy_meas$eif, denom$eif)) ) } return(list(point_est = est, eif = grad, ipc_eif_preds = ipc_eif_preds)) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/measure_deviance.R
#' Estimate mean squared error #' #' Compute nonparametric estimate of mean squared error. #' #' @inheritParams measure_accuracy #' #' @return A named list of: (1) the estimated mean squared error of the fitted #' regression function; (2) the estimated influence function; and #' (3) the IPC EIF predictions. #' @importFrom SuperLearner predict.SuperLearner SuperLearner #' @export measure_mse <- function(fitted_values, y, full_y = NULL, C = rep(1, length(y)), Z = NULL, ipc_weights = rep(1, length(y)), ipc_fit_type = "external", ipc_eif_preds = rep(1, length(y)), ipc_est_type = "aipw", scale = "identity", na.rm = FALSE, nuisance_estimators = NULL, a = NULL, ...) { # compute the EIF: if there is coarsening, do a correction if (!all(ipc_weights == 1)) { # observed mse obs_mse <- mean(((y - fitted_values) ^ 2), na.rm = na.rm) obs_grad <- ((y - fitted_values) ^ 2) - obs_mse # if IPC EIF preds aren't entered, estimate the regression ipc_eif_preds <- estimate_eif_projection(obs_grad = obs_grad, C = C, Z = Z, ipc_fit_type = ipc_fit_type, ipc_eif_preds = ipc_eif_preds, ...) weighted_obs_grad <- rep(0, length(C)) weighted_obs_grad[C == 1] <- obs_grad * ipc_weights[C == 1] grad <- weighted_obs_grad - (C * ipc_weights - 1) * ipc_eif_preds obs_est <- mean((1 * ipc_weights[C == 1]) * (y - fitted_values) ^ 2, na.rm = na.rm) if (ipc_est_type == "ipw") { est <- scale_est(obs_est, rep(0, length(grad)), scale = scale) } else { est <- scale_est(obs_est, grad, scale = scale) } } else { est <- mean((y - fitted_values)^2, na.rm = na.rm) # influence curves grad <- (y - fitted_values)^2 - est } return(list(point_est = est, eif = grad, ipc_eif_preds = ipc_eif_preds)) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/measure_mse.R
#' Estimate R-squared #' #' @inheritParams measure_accuracy #' #' @return A named list of: (1) the estimated R-squared of the fitted regression #' function; (2) the estimated influence function; and #' (3) the IPC EIF predictions. #' @importFrom SuperLearner predict.SuperLearner SuperLearner #' @export measure_r_squared <- function(fitted_values, y, full_y = NULL, C = rep(1, length(y)), Z = NULL, ipc_weights = rep(1, length(y)), ipc_fit_type = "external", ipc_eif_preds = rep(1, length(y)), ipc_est_type = "aipw", scale = "logit", na.rm = FALSE, nuisance_estimators = NULL, a = NULL, ...) { if (is.null(full_y)) { obs_mn_y <- mean(y, na.rm = na.rm) } else { obs_mn_y <- mean(full_y, na.rm = na.rm) } # compute the EIF: if there is coarsening, do a correction if (!all(ipc_weights == 1)) { # observed mse obs_mse <- measure_mse(fitted_values, y, na.rm = na.rm) obs_var <- measure_mse( fitted_values = rep(obs_mn_y, length(y)), y, na.rm = na.rm ) obs_grad <- as.vector( matrix(c(1 / obs_var$point_est, -obs_mse$point_est / (obs_var$point_est ^ 2)), nrow = 1) %*% t(cbind(obs_mse$eif, obs_var$eif)) ) # if IPC EIF preds aren't entered, estimate the regression ipc_eif_preds <- estimate_eif_projection(obs_grad = obs_grad, C = C, Z = Z, ipc_fit_type = ipc_fit_type, ipc_eif_preds = ipc_eif_preds, ...) weighted_obs_grad <- rep(0, length(C)) weighted_obs_grad[C == 1] <- obs_grad * ipc_weights[C == 1] grad <- weighted_obs_grad - (C * ipc_weights - 1) * ipc_eif_preds mse <- mean((1 * ipc_weights[C == 1]) * (y - fitted_values) ^ 2, na.rm = na.rm) var <- mean((1 * ipc_weights[C == 1]) * (y - mean(y, na.rm = na.rm)) ^ 2, na.rm = na.rm) obs_est <- (1 - mse / var) if (ipc_est_type == "ipw") { est <- scale_est(obs_est, rep(0, length(grad)), scale = scale) } else { est <- scale_est(obs_est, grad, scale = scale) } } else { # point estimates of all components mse <- measure_mse(fitted_values, y, na.rm = na.rm) var <- measure_mse(fitted_values = rep(obs_mn_y, length(y)), y, na.rm = na.rm) est <- 1 - mse$point_est / var$point_est # influence curve grad <- (-1) * as.vector( matrix(c(1/var$point_est, -mse$point_est/(var$point_est^2)), nrow = 1) %*% t(cbind(mse$eif, var$eif)) ) } return(list(point_est = est, eif = grad, ipc_eif_preds = ipc_eif_preds)) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/measure_r_squared.R
#' Merge multiple \code{vim} objects into one #' #' Take the output from multiple different calls to \code{vimp_regression} and #' merge into a single \code{vim} object; mostly used for plotting results. #' #' @param ... an arbitrary number of \code{vim} objects, separated by commas. #' #' @return an object of class \code{vim} containing all of the output #' from the individual \code{vim} objects. This results in a list containing: #' \itemize{ #' \item{s}{ - a list of the column(s) to calculate variable importance for} #' \item{SL.library}{ - a list of the libraries of learners passed to \code{SuperLearner}} #' \item{full_fit}{ - a list of the fitted values of the chosen method fit to the full data} #' \item{red_fit}{ - a list of the fitted values of the chosen method fit to the reduced data} #' \item{est}{- a vector with the corrected estimates} #' \item{naive}{- a vector with the naive estimates} #' \item{eif}{- a list with the influence curve-based updates} #' \item{se}{- a vector with the standard errors} #' \item{ci}{- a matrix with the CIs} #' \item{mat}{ - a tibble with the estimated variable importance, the standard errors, and the \eqn{(1-\alpha) \times 100}\% confidence intervals} #' \item{full_mod}{ - a list of the objects returned by the estimation procedure for the full data regression (if applicable)} #' \item{red_mod}{ - a list of the objects returned by the estimation procedure for the reduced data regression (if applicable)} #' \item{alpha}{ - a list of the levels, for confidence interval calculation} #' } #' #' @examples #' # generate the data #' # generate X #' p <- 2 #' n <- 100 #' x <- data.frame(replicate(p, stats::runif(n, -5, 5))) #' #' # apply the function to the x's #' smooth <- (x[,1]/5)^2*(x[,1]+7)/5 + (x[,2]/3)^2 #' #' # generate Y ~ Normal (smooth, 1) #' y <- smooth + stats::rnorm(n, 0, 1) #' #' # set up a library for SuperLearner; note simple library for speed #' library("SuperLearner") #' learners <- c("SL.glm", "SL.mean") #' #' # using Super Learner (with a small number of folds, for illustration only) #' est_2 <- vimp_regression(Y = y, X = x, indx = 2, V = 2, #' run_regression = TRUE, alpha = 0.05, #' SL.library = learners, cvControl = list(V = 2)) #' #' est_1 <- vimp_regression(Y = y, X = x, indx = 1, V = 2, #' run_regression = TRUE, alpha = 0.05, #' SL.library = learners, cvControl = list(V = 2)) #' #' ests <- merge_vim(est_1, est_2) #' @importFrom magrittr "%>%" #' @importFrom rlang "!!" sym #' @export merge_vim <- function(...) { # capture the arguments L <- list(...) names(L) <- unlist(match.call(expand.dots=F)$...) p <- length(L) # extract the estimates and CIs from each element of the list ests <- do.call(c, lapply(L, function(z) z$est)) naives <- do.call(c, lapply(L, function(z) z$naive)) ses <- do.call(c, lapply(L, function(z) z$se)) tests <- do.call(c, lapply(L, function(z) z$test)) p_values <- do.call(c, lapply(L, function(z) z$p_value)) predictivenesses_full <- do.call(c, lapply(L, function(z) z$predictiveness_full)) predictivenesses_reduced <- do.call(c, lapply(L, function(z) z$predictiveness_reduced)) predictiveness_cis_full <- do.call(rbind, lapply(L, function(z) z$predictiveness_ci_full)) predictiveness_cis_reduced <- do.call(rbind, lapply(L, function(z) z$predictiveness_ci_reduced)) test_statistics <- do.call(rbind, lapply(L, function(z) z$test_statistic)) delta <- min(do.call(c, lapply(L, function(z) z$delta))) scale <- unique(unlist(lapply(L, function(z) z$scale))) # put on names names(ests) <- "est" names(tests) <- "test" names(p_values) <- "p_value" # now get lists of the remaining components eifs <- lapply(L, function(z) z$eif) s <- do.call(c, lapply(L, function(z) z$s)) SL.library <- lapply(L, function(z) z$SL.library) full_fit <- lapply(L, function(z) z$full_fit) red_fit <- lapply(L, function(z) z$red_fit) full_mod <- lapply(L, function(z) z$full_mod) red_mod <- lapply(L, function(z) z$red_mod) alpha <- min(unlist(lapply(L, function(z) z$alpha))) scale <- unique(unlist(lapply(L, function(z) z$scale))) # combine into a tibble mat <- do.call(dplyr::bind_rows, lapply(L, function(z) z$mat)) %>% dplyr::arrange(dplyr::desc(!! rlang::sym("est"))) # create output list output <- list(s = s, SL.library = SL.library, full_fit = full_fit, red_fit = red_fit, est = mat$est, naive = naives, eif = eifs, se = mat$se, ci = cbind(mat$cil, mat$ciu), predictiveness_full = predictivenesses_full, predictiveness_reduced = predictivenesses_reduced, predictiveness_ci_full = predictiveness_cis_full, predictiveness_ci_reduced = predictiveness_cis_reduced, test = mat$test, p_value = mat$p_value, mat = mat, full_mod = full_mod, red_mod = red_mod, alpha = alpha, delta = delta, scale = scale) tmp <- class(output) classes <- unlist(lapply(L, function(z) class(z)[2])) class(output) <- c("vim", classes, tmp) return(output) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/merge_vim.R
#' Construct a Predictiveness Measure #' #' @param type the measure of interest (e.g., "accuracy", "auc", "r_squared") #' @param y the outcome of interest #' @param a the exposure of interest (only used if \code{type = "average_value"}) #' @param fitted_values fitted values from a regression function using the #' observed data (may be within a specified fold, for cross-fitted estimates). #' @param cross_fitting_folds folds for cross-fitting, if used to obtain the #' fitted values. If not used, a vector of ones. #' @param full_y the observed outcome (not used, defaults to \code{NULL}). #' @param nuisance_estimators a list of nuisance function estimators on the #' observed data (may be within a specified fold, for cross-fitted estimates). #' For the average value measure: an estimator of the optimal treatment rule (\code{f_n}); an estimator of the #' propensity score under the estimated optimal treatment rule (\code{g_n}); and an estimator #' of the outcome regression when treatment is assigned according to the estimated optimal rule (\code{q_n}). #' @param C the indicator of coarsening (1 denotes observed, 0 denotes #' unobserved). #' @param Z either \code{NULL} (if no coarsening) or a matrix-like object #' containing the fully observed data. #' @param folds_Z either the cross-validation folds for the observed data #' (no coarsening) or a vector of folds for the fully observed data Z. #' @param ipc_weights weights for inverse probability of coarsening (IPC) #' (e.g., inverse weights from a two-phase sample) weighted estimation. #' Assumed to be already inverted. #' (i.e., ipc_weights = 1 / [estimated probability weights]). #' @param ipc_fit_type if "external", then use \code{ipc_eif_preds}; if "SL", #' fit a SuperLearner to determine the IPC correction to the efficient #' influence function. #' @param ipc_eif_preds if \code{ipc_fit_type = "external"}, the fitted values #' from a regression of the full-data EIF on the fully observed #' covariates/outcome; otherwise, not used. #' @param ipc_est_type IPC correction, either \code{"ipw"} (for classical #' inverse probability weighting) or \code{"aipw"} (for augmented inverse #' probability weighting; the default). #' @param scale if doing an IPC correction, then the scale that the correction #' should be computed on (e.g., "identity"; or "logit" to logit-transform, #' apply the correction, and back-transform). #' @param na.rm logical; should \code{NA}s be removed in computation? #' (defaults to \code{FALSE}) #' @param ... other arguments to SuperLearner, if \code{ipc_fit_type = "SL"}. #' #' @return An object of class \code{"predictiveness_measure"}, with the following #' attributes: #' @export predictiveness_measure <- function(type = character(), y = numeric(), a = numeric(), fitted_values = numeric(), cross_fitting_folds = rep(1, length(fitted_values)), full_y = NULL, nuisance_estimators = list(), C = rep(1, length(y)), Z = NULL, folds_Z = cross_fitting_folds, ipc_weights = rep(1, length(y)), ipc_fit_type = "SL", ipc_eif_preds = numeric(), ipc_est_type = "aipw", scale = "identity", na.rm = TRUE, ...) { validate_predictiveness_measure(new_predictiveness_measure( type = type, y = y, a = a, fitted_values = fitted_values, cross_fitting_folds = cross_fitting_folds, full_y = full_y, nuisance_estimators = nuisance_estimators, C = C, Z = Z, folds_Z = folds_Z, ipc_weights = ipc_weights, ipc_fit_type = ipc_fit_type, ipc_eif_preds = ipc_eif_preds, ipc_est_type = ipc_est_type, scale = scale, na.rm = na.rm, ... )) } new_predictiveness_measure <- function(type = character(), y = numeric(), a = numeric(), fitted_values = numeric(), cross_fitting_folds = numeric(), full_y = NULL, nuisance_estimators = list(), C = numeric(), Z = NULL, folds_Z = NULL, ipc_weights = numeric(), ipc_fit_type = character(), ipc_eif_preds = numeric(), ipc_est_type = character(), scale = character(), na.rm = logical(), ...) { stopifnot(type %in% c("accuracy", "anova", "auc", "average_value", "cross_entropy", "deviance", "mse", "r_squared")) stopifnot(is.numeric(y)) stopifnot(is.numeric(a)) stopifnot(is.numeric(fitted_values)) stopifnot(is.numeric(cross_fitting_folds)) stopifnot(is.numeric(C)) stopifnot(is.numeric(ipc_weights)) stopifnot(is.character(ipc_fit_type)) stopifnot(is.numeric(ipc_eif_preds)) stopifnot(is.character(ipc_est_type)) stopifnot(scale %in% c("identity", "log", "logit")) stopifnot(is.logical(na.rm)) arg_lst <- list(...) if (length(ipc_weights) == 0) { ipc_weights <- rep(1, length(y)) } if (length(C) == 0) { C <- rep(1, length(y)) folds_Z <- cross_fitting_folds } structure( c(list(y = y, a = a, fitted_values = fitted_values, cross_fitting_folds = cross_fitting_folds, K = length(unique(cross_fitting_folds)), full_y = full_y, nuisance_estimators = nuisance_estimators, point_est = NA, eif = rep(NA, length(y)), C = C, Z = Z, folds_Z = folds_Z, ipc_weights = ipc_weights, ipc_eif_preds = ipc_eif_preds), arg_lst), type = type, ipc_fit_type = ipc_fit_type, ipc_est_type = ipc_est_type, scale = scale, na.rm = na.rm, class = "predictiveness_measure" ) } validate_predictiveness_measure <- function(x) { input_data <- unclass(x) type <- attr(x, "type") ipc_fit_type <- attr(x, "ipc_fit_type") ipc_est_type <- attr(x, "ipc_est_type") scale <- attr(x, "scale") na.rm <- attr(x, "na.rm") if (!any(grepl("average_value", type))) { if (length(input_data$y) != length(input_data$fitted_values)) { stop("The outcome data must have the same dimension as the fitted values", call. = FALSE) } } else { if (length(input_data$nuisance_estimators) == 0) { stop(paste0( "To estimate the average value, the following must be estimated:", " the optimal treatment rule (pass this in as named element f_n of the list);", " the propensity score under the optimal treatment rule (pass this in as named element g_n of the list);", " and the outcome regression when treatment is assigned according to the optimal rule (pass this in as named element q_n of the list)." ), call. = FALSE) } else { if (length(input_data$nuisance_estimators$f_n) != length(input_data$y)) { stop("The optimal treatment assignment must have the same dimension as the outcome.", call. = FALSE) } if (length(input_data$nuisance_estimators$g_n) != length(input_data$y)) { stop("The estimated propensity score must have the same dimension as the outcome.", call. = FALSE) } if (length(input_data$nuisance_estimators$q_n) != length(input_data$y)) { stop("The estimated outcome regression must have the same dimension as the outcome.", call. = FALSE) } } } if (length(input_data$cross_fitting_folds) != length(input_data$fitted_values)) { stop("If cross-fitting is desired, each observation must be put into a fold.") } if (length(input_data$a) != 0) { if (length(input_data$y) != length(input_data$a)) { stop("The outcome data must have the same dimension as the exposure data", call. = FALSE) } } if (length(input_data$ipc_weights) != length(input_data$C)) { stop("The full dataset must have the same dimension as the inverse probability weights", call. = FALSE) } if (!is.null(input_data$Z)) { if (nrow(input_data$Z) != length(input_data$C)) { stop("The data that are always measured (i.e., are not coarsened) must be the same dimension as the coarsening variable", call. = FALSE) } } x } is.predictiveness_measure <- function(x) { inherits(x, "predictiveness_measure") }
/scratch/gouwar.j/cran-all/cranData/vimp/R/predictiveness_measure.R
#' Print \code{predictiveness_measure} objects #' #' Prints out a table of the point estimate and standard error for a \code{predictiveness_measure} object. #' #' @param x the \code{predictiveness_measure} object of interest. #' @param ... other options, see the generic \code{print} function. #' @export print.predictiveness_measure <- function(x, ...) { print(format(x, ...), quote = FALSE) invisible(x) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/print.predictiveness_measure.R
#' Print \code{vim} objects #' #' Prints out the table of estimates, confidence intervals, and standard errors for a \code{vim} object. #' #' @param x the \code{vim} object of interest. #' @param ... other options, see the generic \code{print} function. #' @export print.vim <- function(x, ...) { # print out the matrix cat("Variable importance estimates:\n") print(format(x, ...), quote = "FALSE") invisible(x) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/print.vim.R
#' Create necessary objects for SPVIMs #' #' Creates the Z and W matrices and a list of sampled subsets, S, for SPVIM estimation. #' #' @param p the number of covariates #' @param gamma the fraction of the sample size to sample (e.g., \code{gamma = 1} means sample \code{n} subsets) #' @param n the sample size #' #' @return a list, with elements Z (the matrix encoding presence/absence of each feature in the uniquely sampled subsets), S (the list of unique sampled subsets), W (the matrix of weights), and z_counts (the number of times each subset was sampled) #' #' @examples #' p <- 10 #' gamma <- 1 #' n <- 100 #' set.seed(100) #' subset_lst <- sample_subsets(p, gamma, n) #' @importFrom stats aggregate #' @export sample_subsets <- function(p, gamma, n) { max_subset <- 1:p sampling_weights <- c(1, apply(matrix(1:(p-1)), 1, function(s) choose(p - 2, s - 1) ^ (-1)), 1) subset_sizes <- sort(sample(0:p, size = gamma * n, replace = TRUE, prob = sampling_weights / sum(sampling_weights))) S_all <- apply(matrix(subset_sizes), 1, function(s) sort(sample(1:p, s, replace = FALSE))) Z_all <- do.call(rbind, lapply(as.list(S_all), function(s) { z <- rep(0, p) z[match(s, max_subset)] <- 1 return(z) } )) Z_df <- data.frame(Z_all) Z_with_counts <- aggregate(list(n = rep(1, nrow(Z_df))), Z_df, length) Z <- as.matrix(Z_with_counts[, -ncol(Z_with_counts)]) z_counts <- Z_with_counts$n Z_aug <- cbind(1, Z) S <- lapply(lapply(apply(Z, 1, list), unlist), function(z) max_subset[as.logical(z)]) W <- diag(z_counts / sum(z_counts)) return(list(Z = Z_aug, S = S, W = W, z_counts = z_counts)) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/sample_subsets.R
#' Shapley Population Variable Importance Measure (SPVIM) Estimates and Inference #' #' Compute estimates and confidence intervals for the SPVIMs, using cross-fitting. #' #' @inheritParams cv_vim #' @param univariate_SL.library (optional) a character vector of learners to #' pass to \code{SuperLearner} for estimating univariate regression functions. #' Defaults to \code{SL.polymars} #' @param gamma the fraction of the sample size to use when sampling subsets #' (e.g., \code{gamma = 1} samples the same number of subsets as the sample #' size) #' @param verbose should \code{sp_vim} and \code{SuperLearner} print out #' progress? (defaults to \code{FALSE}) #' #' @return An object of class \code{vim}. See Details for more information. #' #' @details We define the SPVIM as the weighted average of the population #' difference in predictiveness over all subsets of features not containing #' feature \eqn{j}. #' #' This is equivalent to finding the solution to a population weighted least #' squares problem. This key fact allows us to estimate the SPVIM using weighted #' least squares, where we first sample subsets from the power set of all #' possible features using the Shapley sampling distribution; then #' use cross-fitting to obtain estimators of the predictiveness of each #' sampled subset; and finally, solve the least squares problem given in #' Williamson and Feng (2020). #' #' See the paper by Williamson and Feng (2020) for more #' details on the mathematics behind this function, and the validity #' of the confidence intervals. #' #' In the interest of transparency, we return most of the calculations #' within the \code{vim} object. This results in a list containing: #' \describe{ #' \item{SL.library}{the library of learners passed to \code{SuperLearner}} #' \item{v}{the estimated predictiveness measure for each sampled subset} #' \item{fit_lst}{the fitted values on the entire dataset from the chosen method for each sampled subset} #' \item{preds_lst}{the cross-fitted predicted values from the chosen method for each sampled subset} #' \item{est}{the estimated SPVIM value for each feature} #' \item{ics}{the influence functions for each sampled subset} #' \item{var_v_contribs}{the contibutions to the variance from estimating predictiveness} #' \item{var_s_contribs}{the contributions to the variance from sampling subsets} #' \item{ic_lst}{a list of the SPVIM influence function contributions} #' \item{se}{the standard errors for the estimated variable importance} #' \item{ci}{the \eqn{(1-\alpha) \times 100}\% confidence intervals based on the variable importance estimates} #' \item{p_value}{p-values for the null hypothesis test of zero importance for each variable} #' \item{test_statistic}{the test statistic for each null hypothesis test of zero importance} #' \item{test}{a hypothesis testing decision for each null hypothesis test (for each variable having zero importance)} #' \item{gamma}{the fraction of the sample size used when sampling subsets} #' \item{alpha}{the level, for confidence interval calculation} #' \item{delta}{the \code{delta} value used for hypothesis testing} #' \item{y}{the outcome} #' \item{ipc_weights}{the weights} #' \item{scale}{the scale on which CIs were computed} #' \item{mat}{- a tibble with the estimates, SEs, CIs, hypothesis testing decisions, and p-values} #' } #' #' @examples #' n <- 100 #' p <- 2 #' # generate the data #' x <- data.frame(replicate(p, stats::runif(n, -5, 5))) #' #' # apply the function to the x's #' smooth <- (x[,1]/5)^2*(x[,1]+7)/5 + (x[,2]/3)^2 #' #' # generate Y ~ Normal (smooth, 1) #' y <- as.matrix(smooth + stats::rnorm(n, 0, 1)) #' #' # set up a library for SuperLearner; note simple library for speed #' library("SuperLearner") #' learners <- c("SL.glm") #' #' # ----------------------------------------- #' # using Super Learner (with a small number of CV folds, #' # for illustration only) #' # ----------------------------------------- #' set.seed(4747) #' est <- sp_vim(Y = y, X = x, V = 2, type = "r_squared", #' SL.library = learners, alpha = 0.05) #' #' @seealso \code{\link[SuperLearner]{SuperLearner}} for specific usage of the #' \code{SuperLearner} function and package. #' @importFrom stats pnorm gaussian #' @importFrom utils txtProgressBar setTxtProgressBar #' @importFrom MASS ginv #' @export sp_vim <- function(Y = NULL, X = NULL, V = 5, type = "r_squared", SL.library = c("SL.glmnet", "SL.xgboost", "SL.mean"), univariate_SL.library = NULL, gamma = 1, alpha = 0.05, delta = 0, na.rm = FALSE, stratified = FALSE, verbose = FALSE, sample_splitting = TRUE, final_point_estimate = "split", C = rep(1, length(Y)), Z = NULL, ipc_scale = "identity", ipc_weights = rep(1, length(Y)), ipc_est_type = "aipw", scale = "identity", scale_est = TRUE, cross_fitted_se = TRUE, ...) { # if the data is missing, stop and throw an error if (is.null(Y)) stop("You must enter an outcome, Y.") if (is.null(X)) stop("You must enter a matrix of predictors, X.") if (sample_splitting) { ss_V <- 2 * V } else { ss_V <- V } # check to see if Y is a matrix or data.frame; if not, make it one # (just for ease of reading) if (is.null(dim(Y))) { Y <- as.matrix(Y) } # set up internal data -- based on complete cases only cc_lst <- create_z(Y, C, Z, X, ipc_weights) Y_cc <- cc_lst$Y weights_cc <- cc_lst$weights Z_in <- cc_lst$Z X_cc <- X[C == 1, , drop = FALSE] # get the correct measure function; if not one of the supported ones, say so full_type <- get_full_type(type) # set up the cross-fitting and sample-splitting folds, for fitting cross_fitting_folds <- make_folds(Y, V = ss_V, stratified = stratified, C = C) sample_splitting_folds <- make_folds(unique(cross_fitting_folds), V = 2) cross_fitting_folds_cc <- cross_fitting_folds[C == 1] # set up the cross-fitting folds, for predictiveness estimation if (sample_splitting) { # make new sets of folds, as if we had done V-fold within the two sets k_fold_lst <- make_kfold(cross_fitting_folds, sample_splitting_folds, C) full_test <- (k_fold_lst$sample_splitting_folds == 1) redu_test <- (k_fold_lst$sample_splitting_folds == 2) } else { # no need to do anything k_fold_lst <- list( full = cross_fitting_folds, reduced = cross_fitting_folds ) full_test <- rep(TRUE, length(cross_fitting_folds)) redu_test <- rep(TRUE, length(cross_fitting_folds)) } cf_folds_full <- k_fold_lst$full cf_folds_redu <- k_fold_lst$reduced cf_folds_full_cc <- cf_folds_full[C[full_test] == 1] cf_folds_redu_cc <- cf_folds_redu[C[redu_test] == 1] full_test_cc <- full_test[C == 1] redu_test_cc <- redu_test[C == 1] # sample subsets, set up Z z_w_lst <- sample_subsets(p = ncol(X), n = nrow(X), gamma = gamma) Z <- z_w_lst$Z W <- z_w_lst$W z_counts <- z_w_lst$z_counts S <- z_w_lst$S arg_lst <- list(...) if (is.null(arg_lst$family)) { arg_lst$family <- switch( (length(unique(Y_cc)) == 2) + 1, stats::gaussian(), stats::binomial() ) } # set method and family to compatible with continuous values, for EIF estimation eif_arg_lst <- process_arg_lst(arg_lst) arg_lst_cv <- arg_lst if (is.null(arg_lst_cv$innerCvControl)) { arg_lst_cv$innerCvControl <- rep(list(list(V = V)), ss_V) } # get v, preds, ic for null set preds_none <- rep(NA, length = length(Y_cc)) fitted_none <- vector("numeric", length = length(Y_cc)) for (v in seq_len(ss_V)) { preds_none[cross_fitting_folds_cc == v] <- rep( mean(Y_cc[cross_fitting_folds_cc == v]), sum(cross_fitting_folds_cc == v) ) fitted_none[cross_fitting_folds_cc == v] <- rep( mean(Y_cc[cross_fitting_folds_cc == v]), sum(cross_fitting_folds_cc == v) ) } preds_none <- preds_none[!is.na(preds_none)] v_none_pred_object <- do.call(predictiveness_measure, c( list(type = full_type, fitted_values = preds_none[full_test_cc], y = Y_cc[full_test_cc], full_y = Y_cc, cross_fitting_folds = cf_folds_full_cc, C = C[full_test], Z = Z_in[full_test, , drop = FALSE], folds_Z = cf_folds_full, ipc_weights = ipc_weights[full_test], ipc_fit_type = "SL", scale = ipc_scale, ipc_est_type = ipc_est_type, na.rm = na.rm, SL.library = SL.library), eif_arg_lst )) v_none_lst <- estimate(v_none_pred_object) v_none <- v_none_lst$point_est ic_none <- v_none_lst$eif ics_none <- v_none_lst$all_eifs # get v, preds, ic for remaining non-null groups in S if (verbose) { message("Fitting learners. Progress:") progress_bar <- txtProgressBar(min = 0, max = length(S[-1]), style = 3) } else { progress_bar <- NULL } preds_lst <- sapply( 1:length(S[-1]), function(i) { do.call(run_sl, args = c( list(Y_cc, X_cc, V = ss_V, SL.library = SL.library, univariate_SL.library = univariate_SL.library, s = S[-1][[i]], cv_folds = cross_fitting_folds_cc, sample_splitting = sample_splitting, ss_folds = sample_splitting_folds, verbose = verbose, progress_bar = progress_bar, indx = i, full = TRUE, cross_fitted_se = cross_fitted_se, weights = weights_cc), arg_lst_cv ))}, simplify = FALSE ) if (verbose) { close(progress_bar) } v_pred_object_list <- lapply(preds_lst, function(l) { do.call(predictiveness_measure, c( list(fitted_values = l$preds[full_test_cc], y = Y_cc[full_test_cc], full_y = Y_cc, cross_fitting_folds = cf_folds_full_cc, C = C[full_test], Z = Z_in[full_test, , drop = FALSE], folds_Z = cf_folds_full, ipc_weights = ipc_weights[full_test], type = full_type, ipc_fit_type = "SL", scale = ipc_scale, ipc_est_type = ipc_est_type, na.rm = na.rm, SL.library = SL.library), eif_arg_lst )) }) v_eif_lst <- lapply(v_pred_object_list, estimate) v_lst <- lapply(v_eif_lst, function(lst) lst$point_est) ics_lst <- lapply(v_eif_lst, function(lst) lst$all_eifs) ic_lst <- lapply(v_eif_lst, function(lst) lst$eif) if (!cross_fitted_se) { ic_all_object_list <- lapply(preds_lst, function(l) { do.call(predictiveness_measure, c( list(fitted_values = l$preds_non_cf_se[full_test_cc], y = Y_cc[full_test_cc], full_y = Y_cc, cross_fitting_folds = cf_folds_full_cc, C = C[full_test], Z = Z_in[full_test, , drop = FALSE], folds_Z = cf_folds_full, ipc_weights = ipc_weights[full_test], type = full_type, ipc_fit_type = "SL", scale = ipc_scale, ipc_est_type = ipc_est_type, na.rm = na.rm, SL.library = SL.library), eif_arg_lst )) }) ic_all_lst <- lapply(ic_all_object_list, estimate) ics_lst <- lapply(ic_all_lst, function(l) l$all_eifs) ic_lst <- lapply(ic_all_lst, function(l) l$eif) } v <- matrix(c(v_none, unlist(v_lst))) # do constrained wls if (verbose) { message("Fitting weighted least squares to estimate the SPVIM values.") } A_W <- sqrt(W) %*% Z v_W <- sqrt(W) %*% v G <- rbind(c(1, rep(0, ncol(X))), rep(1, ncol(X) + 1) - c(1, rep(0, ncol(X)))) c_n <- matrix(c(v_none, v[length(v)] - v_none), ncol = 1) kkt_matrix_11 <- 2 * t(A_W) %*% A_W kkt_matrix_12 <- t(G) kkt_matrix_21 <- G kkt_matrix_22 <- matrix(0, nrow = nrow(kkt_matrix_21), ncol = ncol(kkt_matrix_12)) kkt_matrix <- rbind(cbind(kkt_matrix_11, kkt_matrix_12), cbind(kkt_matrix_21, kkt_matrix_22)) ls_matrix <- rbind(2 * t(A_W) %*% v_W, c_n) ls_solution <- MASS::ginv(kkt_matrix) %*% ls_matrix est <- ls_solution[1:(ncol(X) + 1), , drop = FALSE] lambdas <- ls_solution[(ncol(X) + 2):nrow(ls_solution), , drop = FALSE] # compute the SPVIM ICs and standard errors ses <- vector("numeric", ncol(X) + 1) var_v_contribs <- vector("numeric", ncol(X) + 1) var_s_contribs <- vector("numeric", ncol(X) + 1) if (cross_fitted_se) { all_ics_lst <- c(list(ics_none), ics_lst) ics <- lapply(as.list(seq_len(V)), function(l) { spvim_ics(Z, z_counts, W, v, est, G, c_n, lapply(all_ics_lst, function(k) k[[l]]), full_type) }) for (j in 1:(ncol(X) + 1)) { se_lst <- lapply(ics, spvim_se, idx = j, gamma = gamma, na_rm = na.rm) var <- mean(unlist(lapply(se_lst, function(l) l$se ^ 2))) ses[j] <- sqrt(var) var_v_contribs[j] <- mean(unlist(lapply(se_lst, function(l) l$var_v_contrib))) var_s_contribs[j] <- mean(unlist(lapply(se_lst, function(l) l$var_s_contrib))) } n_for_v <- min(unlist(lapply(ics, function(l) ncol(l$contrib_v)))) } else { all_ics_lst <- c(list(ic_none), ic_lst) ics <- spvim_ics(Z, z_counts, W, v, est, G, c_n, all_ics_lst, full_type) for (j in 1:(ncol(X) + 1)) { ses_res <- spvim_se(ics, j, gamma = gamma, na_rm = na.rm) ses[j] <- ses_res$se var_v_contribs[j] <- ses_res$var_v_contrib var_s_contribs[j] <- ses_res$var_s_contrib } n_for_v <- ncol(ics) } est_for_inference <- est v_for_inference <- v # if sample-splitting was requested and final_point_estimate isn't "split", estimate # the required quantities if (sample_splitting & (final_point_estimate != "split")) { k_fold_lst_for_est <- list( full = cross_fitting_folds, reduced = cross_fitting_folds ) cf_folds_for_est <- k_fold_lst_for_est$full cf_folds_for_est_cc <- k_fold_lst_for_est$full[C == 1] if (final_point_estimate == "full") { v_none_pred_object_for_est <- do.call(predictiveness_measure, c( list(type = full_type, fitted_values = preds_none, y = Y_cc, full_y = Y_cc, cross_fitting_folds = cf_folds_for_est_cc, C = C, Z = Z_in, folds_Z = cf_folds_for_est, ipc_weights = ipc_weights, ipc_fit_type = "SL", scale = ipc_scale, ipc_est_type = ipc_est_type, na.rm = na.rm, SL.library = SL.library), eif_arg_lst )) v_none_lst_for_est <- estimate(v_none_pred_object_for_est) v_none_for_est <- v_none_lst_for_est$point_est v_pred_object_list_for_est <- lapply(preds_lst, function(l) { do.call(predictiveness_measure, c( list(fitted_values = l$preds, y = Y_cc, full_y = Y_cc, cross_fitting_folds = cf_folds_for_est_cc, C = C, Z = Z_in, folds_Z = cf_folds_for_est, ipc_weights = ipc_weights, type = full_type, ipc_fit_type = "SL", scale = ipc_scale, ipc_est_type = ipc_est_type, na.rm = na.rm, SL.library = SL.library), eif_arg_lst )) }) v_eif_lst_for_est <- lapply(v_pred_object_list_for_est, estimate) v_lst_for_est <- lapply(v_eif_lst_for_est, function(lst) lst$point_est) v_for_est <- matrix(c(v_none_for_est, unlist(v_lst_for_est))) } else { # swap the roles for the folds full_test_for_est <- (k_fold_lst$sample_splitting_folds == 2) cf_folds_full_for_est <- k_fold_lst$reduced cf_folds_full_cc_for_est <- cf_folds_full_for_est[C[full_test_for_est] == 1] full_test_cc_for_est <- full_test_for_est[C == 1] v_none_pred_object_for_est <- do.call(predictiveness_measure, c( list(type = full_type, fitted_values = preds_none[full_test_cc_for_est], y = Y_cc[full_test_cc_for_est], full_y = Y_cc, cross_fitting_folds = cf_folds_full_cc_for_est, C = C[full_test_for_est], Z = Z_in[full_test_for_est, , drop = FALSE], folds_Z = cf_folds_full_for_est, ipc_weights = ipc_weights[full_test_for_est], ipc_fit_type = "SL", scale = ipc_scale, ipc_est_type = ipc_est_type, na.rm = na.rm, SL.library = SL.library), eif_arg_lst )) v_none_lst_for_est <- estimate(v_none_pred_object_for_est) v_none_for_est <- mean(c(v_none, v_none_lst_for_est$point_est)) v_pred_object_list_for_est <- lapply(preds_lst, function(l) { do.call(predictiveness_measure, c( list(fitted_values = l$preds[full_test_cc_for_est], y = Y_cc[full_test_cc_for_est], full_y = Y_cc, cross_fitting_folds = cf_folds_full_cc_for_est, C = C[full_test_for_est], Z = Z_in[full_test_for_est, , drop = FALSE], folds_Z = cf_folds_full_for_est, ipc_weights = ipc_weights[full_test_for_est], type = full_type, ipc_fit_type = "SL", scale = ipc_scale, ipc_est_type = ipc_est_type, na.rm = na.rm, SL.library = SL.library), eif_arg_lst )) }) v_eif_lst_for_est <- lapply(v_pred_object_list_for_est, estimate) v_lst_for_est <- lapply(v_eif_lst_for_est, function(lst) lst$point_est) v_for_est <- matrix(rowMeans(cbind(v, matrix(c(v_none_lst_for_est$point_est, unlist(v_lst_for_est)))))) } v_W_for_est <- sqrt(W) %*% v_for_est c_n_for_est <- matrix(c(v_none_for_est, v_for_est[length(v_for_est)] - v_none_for_est), ncol = 1) ls_matrix_for_est <- rbind(2 * t(A_W) %*% v_W_for_est, c_n_for_est) ls_solution_for_est <- MASS::ginv(kkt_matrix) %*% ls_matrix_for_est est <- ls_solution_for_est[1:(ncol(X) + 1), , drop = FALSE] } # if est < 0, set to zero and print warning if (any(est < 0) & scale_est) { est[est < 0] <- 0 warning("One or more original estimates < 0; returning zero for these indices.") } # calculate the confidence intervals cis <- vimp_ci(est_for_inference[-1], ses[-1], scale = scale, level = 1 - alpha) # compute a hypothesis test against the null of zero importance v_none_pred_object_0 <- do.call(predictiveness_measure, c( list(type = full_type, fitted_values = preds_none[redu_test_cc], y = Y_cc[redu_test_cc], full_y = Y_cc, cross_fitting_folds = cf_folds_redu_cc, C = C[redu_test], Z = Z_in[redu_test, , drop = FALSE], folds_Z = cf_folds_redu, ipc_weights = ipc_weights[redu_test], ipc_fit_type = "SL", scale = ipc_scale, ipc_est_type = ipc_est_type, na.rm = na.rm, SL.library = SL.library), eif_arg_lst )) v_none_lst_0 <- estimate(v_none_pred_object_0) v_none_0 <- v_none_lst_0$point_est ic_none_0 <- v_none_lst_0$eif ics_none_0 <- v_none_lst_0$all_eifs if (cross_fitted_se) { var_none_0 <- mean(unlist(lapply(ics_none_0, function(ic) mean(ic ^ 2, na.rm = na.rm)))) } else { var_none_0 <- mean(ic_none_0 ^ 2, na.rm = na.rm) } se_none_0 <- sqrt(var_none_0 / sum(redu_test_cc)) # get shapley vals + null predictiveness shapley_vals_plus <- est + est[1] ses_one <- sqrt((var_v_contribs * n_for_v + var_s_contribs) / sum(full_test_cc) + se_none_0 ^ 2) # save objects necessary to compute the test statistics test_stat_lst <- list(ests = shapley_vals_plus, ses = ses_one, est_0 = v_none_0, se_0 = se_none_0, n_for_v = n_for_v) test_statistics <- unlist(lapply( as.list(2:length(est)), function(j, ests, ses, est_0, se_0, delta) { (ests[j] - est_0 - delta) / sqrt(ses[j] ^ 2 + se_0 ^ 2) }, ests = shapley_vals_plus, ses = ses_one, est_0 = v_none_0, se_0 = se_none_0, delta = delta )) p_values <- 1 - pnorm(test_statistics) hyp_tests <- p_values < alpha # create the output and return it mat <- tibble::tibble( s = as.character(1:ncol(X)), est = est[-1], se = ses[-1], cil = cis[, 1], ciu = cis[, 2], test = hyp_tests, p_value = p_values, var_v_contribs = var_v_contribs[-1], var_s_contribs = var_s_contribs[-1] ) output <- list(s = as.character(1:ncol(X)), SL.library = SL.library, v = v, preds_lst = c(list(preds_none), lapply(preds_lst, function(l) l$preds )), est = est, ic_lst = all_ics_lst, ic = ics, se = ses, var_v_contribs = var_v_contribs, var_s_contribs = var_s_contribs, ci = cis, test = hyp_tests, p_value = p_values, test_statistic = test_statistics, test_statistic_computation = test_stat_lst, gamma = gamma, alpha = alpha, delta = delta, y = Y, ipc_weights = ipc_weights, ipc_scale = ipc_scale, scale = scale, mat = mat) # make it also an vim object tmp.cls <- class(output) class(output) <- c("vim", type, tmp.cls) return(output) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/sp_vim.R
#' Influence function estimates for SPVIMs #' #' Compute the influence functions for the contribution from sampling observations and subsets. #' #' @param Z the matrix of presence/absence of each feature (columns) in each sampled subset (rows) #' @param z_counts the number of times each unique subset was sampled #' @param W the matrix of weights #' @param v the estimated predictiveness measures #' @param psi the estimated SPVIM values #' @param G the constraint matrix #' @param c_n the constraint values #' @param ics a list of influence function values for each predictiveness measure #' @param measure the type of measure (e.g., "r_squared" or "auc") #' #' @return a named list of length 2; \code{contrib_v} is the contribution from estimating V, while \code{contrib_s} is the contribution from sampling subsets. #' #' @details The processes for sampling observations and sampling subsets are independent. Thus, we can compute the influence function separately for each sampling process. For further details, see the paper by Williamson and Feng (2020). #' #' @export spvim_ics <- function(Z, z_counts, W, v, psi, G, c_n, ics, measure) { # compute contribution from estimating V Z_W <- t(Z) %*% W A_m <- Z_W %*% Z A_m_inv <- MASS::ginv(A_m) prefix <- A_m_inv %*% Z_W ic_mat <- t(as.matrix(data.frame(ics))) phi_01 <- prefix %*% ic_mat # compute contribution from sampling S qr_decomp <- qr(t(G)) U_2 <- qr.Q(qr_decomp, complete = TRUE)[, 3:ncol(Z), drop = FALSE] V <- t(U_2) %*% (t(Z) %*% W %*% Z) %*% U_2 phi_02_shared_mat <- (-1) * U_2 %*% MASS::ginv(V) phi_02_uniq_vectors <- matrix(NA, nrow = nrow(Z), ncol = ncol(U_2)) for (z in 1:nrow(Z)) { phi_02_uniq_vectors[z, ] <- as.vector(Z[z, , drop = FALSE] %*% psi - v[z]) * as.vector(t(U_2) %*% t(Z[z, , drop = FALSE])) } phi_02_uniq <- phi_02_shared_mat %*% t(phi_02_uniq_vectors) phi_02_uniq_lst <- split(phi_02_uniq, rep(1:ncol(phi_02_uniq), each = nrow(phi_02_uniq))) phi_02_rep_lst <- sapply(1:length(phi_02_uniq_lst), function(s) replicate(z_counts[s], phi_02_uniq_lst[[s]])) phi_02 <- do.call(cbind, phi_02_rep_lst) return(list(contrib_v = phi_01, contrib_s = phi_02)) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/spvim_ics.R
#' Standard error estimate for SPVIM values #' #' Compute standard error estimates based on the estimated influence function #' for a SPVIM value of interest. #' #' @param ics the influence function estimates based on the contributions #' from sampling observations and sampling subsets: a list of length two #' resulting from a call to \code{spvim_ics}. #' @param idx the index of interest #' @param gamma the proportion of the sample size used when sampling subsets #' @param na_rm remove \code{NA}s? #' #' @return The standard error estimate for the desired SPVIM value #' #' @details Since the processes for sampling observations and subsets are #' independent, the variance for a given SPVIM estimator is simply the sum of #' the variances based on sampling observations and on sampling subsets. #' #' @seealso \code{\link[vimp]{spvim_ics}} for how the influence functions are estimated. #' @importFrom stats var #' @export spvim_se <- function(ics, idx = 1, gamma = 1, na_rm = FALSE) { var_v <- mean(ics$contrib_v[idx, ] ^ 2, na.rm = na_rm) var_s <- mean(ics$contrib_s[idx, ] ^ 2, na.rm = na_rm) var_v_contrib <- var_v / ncol(ics$contrib_v) var_s_contrib <- var_s / ncol(ics$contrib_s) * (1 / gamma) se <- sqrt(var_v_contrib + var_s_contrib) return(list(se=se, var_v_contrib = var_v_contrib, var_s_contrib = var_s_contrib)) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/spvim_se.R
# Checkers --------------------------------------------------------------------- #' Check inputs to a call to vim, cv_vim, or sp_vim #' #' @details Ensure that inputs to \code{vim}, \code{cv_vim}, and \code{sp_vim} #' follow the correct formats. #' #' @param Y the outcome #' @param X the covariates #' @param f1 estimator of the population-optimal prediction function #' using all covariates #' @param f2 estimator of the population-optimal prediction function #' using the reduced set of covariates #' @param indx the index or indices of the covariate(s) of interest #' #' @return None. Called for the side effect of stopping the algorithm if #' any inputs are in an unexpected format. #' @export check_inputs <- function(Y, X, f1, f2, indx) { if (is.null(f1) && is.null(Y)) { stop("You must enter either Y or fitted values for the full regression.") } if (is.null(f2) && is.null(X)) { stop("You must enter either X or fitted values for the reduced regression.") } # if indx is outside the range of X, stop and throw an error if (!is.null(X)) { if (any(indx > dim(X)[2])) { stop(paste0("One of the feature indices in 'indx' is larger than the ", "total number of features in X. Please specify a new index ", "subgroup in 'indx'.")) } } } #' Check pre-computed fitted values for call to vim, cv_vim, or sp_vim #' #' @details Ensure that inputs to \code{vim}, \code{cv_vim}, and \code{sp_vim} #' follow the correct formats. #' #' @param Y the outcome #' @param f1 estimator of the population-optimal prediction function #' using all covariates #' @param f2 estimator of the population-optimal prediction function #' using the reduced set of covariates #' @param cross_fitted_f1 cross-fitted estimator of the population-optimal #' prediction function using all covariates #' @param cross_fitted_f2 cross-fitted estimator of the population-optimal #' prediction function using the reduced set of covariates #' @param sample_splitting_folds the folds for sample-splitting (used for #' hypothesis testing) #' @param cross_fitting_folds the folds for cross-fitting (used for point #' estimates of variable importance in \code{cv_vim} and \code{sp_vim}) #' @param cross_fitted_se logical; should cross-fitting be used to estimate #' standard errors? #' @param V the number of cross-fitting folds #' @param ss_V the number of folds for CV (if sample_splitting is TRUE) #' @param cv a logical flag indicating whether or not to use cross-fitting #' #' @return None. Called for the side effect of stopping the algorithm if #' any inputs are in an unexpected format. #' @export check_fitted_values <- function(Y = NULL, f1 = NULL, f2 = NULL, cross_fitted_f1 = NULL, cross_fitted_f2 = NULL, sample_splitting_folds = NULL, cross_fitting_folds = NULL, cross_fitted_se = TRUE, V = NULL, ss_V = NULL, cv = FALSE) { if (is.null(Y)) stop("Y must be entered.") if (!cv) { if (length(f1) == 0 || length(f2) == 0) { stop("Fitted values must be entered if run_regression = FALSE.") } if (length(sample_splitting_folds) != length(Y)) { stop("The entered folds must be the same length as the outcome of interest.") } } else { if (is.null(cross_fitted_f1)) { stop(paste0("You must specify a list of predicted values from a ", "regression of Y on X.")) } if (is.null(cross_fitted_f2)) { stop(paste0("You must specify a list of predicted values from either ", "(a) a regression of the fitted values from the Y on X ", "regression on the reduced set of covariates, or (b) ", "a regression of Y on the reduced set of covariates.")) } if (is.numeric(cross_fitted_f1)) { if (length(cross_fitted_f1) != length(Y)) { stop(paste0("There must be a predicted value for each observation ", "in the dataset.")) } } else { if (length(cross_fitted_f1) != V) { stop(paste0("There must be a predicted value for each observation ", "in the dataset, in a list of length equal to the number ", "of cross-fitting folds.")) } } if (is.null(f1) & !cross_fitted_se) { stop(paste0("You must enter an estimator of the population-optimal predictor", " using all covariates.")) } if (is.null(f2) & !cross_fitted_se) { stop(paste0("You must enter an estimator of the population-optimal predictor", " using the reduced set of covariates.")) } if (length(sample_splitting_folds) != length(unique(cross_fitting_folds))) { stop("The sample splitting folds must be the same length as the number of cross-fitting folds.") } if (is.null(cross_fitting_folds)) { stop("You must specify the folds that were used for cross-fitting.") } } } #' Create complete-case outcome, weights, and Z #' #' @param Y the outcome #' @param C indicator of missing or observed #' @param Z the covariates observed in phase 1 and 2 data #' @param X all covariates #' @param ipc_weights the weights #' #' @return a list, with the complete-case outcome, weights, and Z matrix #' @export create_z <- function(Y, C, Z, X, ipc_weights) { # set up internal data -- based on complete cases only Y_cc <- subset(Y, C == 1, drop = FALSE) weights_cc <- ipc_weights[C == 1] if (!all(C == 1) || !all(ipc_weights == 1)) { if (is.character(Z)) { tmp_Z <- Z[Z != "Y"] minus_X <- as.numeric(gsub("x", "", tmp_Z, ignore.case = TRUE)) # check to see if it is only part of X matrix if (any(sapply(seq_along(minus_X), function(j) length(minus_X[j]) > 0))) { if (any(grepl("Y", Z))) { Z_in <- cbind.data.frame(as.data.frame(mget("Y")), X[, minus_X]) } else { Z_in <- as.data.frame(X[, minus_X]) } } else { Z_in <- as.data.frame(mget(Z)) } } else { stop("Please enter a character vector corresponding to the names of the fully observed data.") } } else { Z_in <- NULL } list(Y = Y_cc, weights = weights_cc, Z = Z_in) } #' Process argument list for Super Learner estimation of the EIF #' #' @param arg_lst the list of arguments for Super Learner #' #' @return a list of modified arguments for EIF estimation #' @export process_arg_lst <- function(arg_lst) { if (!is.null(names(arg_lst)) && any(grepl("cvControl", names(arg_lst)))) { arg_lst$cvControl$stratifyCV <- FALSE } if (!is.null(names(arg_lst)) && any(grepl("method", names(arg_lst)))) { if (grepl("NNloglik", arg_lst$method)) { arg_lst$method <- "method.NNLS" } else { arg_lst$method <- "method.CC_LS" } } if (!is.null(names(arg_lst)) && any(grepl("family", names(arg_lst)))) { arg_lst$family <- stats::gaussian() } arg_lst } # ------------------------------------------------------------------------------ #' Obtain the type of VIM to estimate using partial matching #' #' @param type the partial string indicating the type of VIM #' #' @return the full string indicating the type of VIM #' @export get_full_type <- function(type) { types <- c("accuracy", "auc", "deviance", "r_squared", "anova", "average_value") full_type <- types[pmatch(type, types)] if (is.na(full_type)) { stop("We currently do not support the entered variable importance parameter.") } if (full_type == "anova" ) { message(paste0("Hypothesis testing is not available for type = 'anova'. ", "If you want an R-squared-based hypothesis test, please enter ", "type = 'r_squared'.")) } full_type } #' Return an estimator on a different scale #' #' @details It may be of interest to return an estimate (or confidence interval) #' on a different scale than originally measured. For example, computing a #' confidence interval (CI) for a VIM value that lies in (0,1) on the logit scale #' ensures that the CI also lies in (0, 1). #' #' @param obs_est the observed VIM estimate #' @param grad the estimated efficient influence function #' @param scale the scale to compute on #' #' @return the scaled estimate #' @export scale_est <- function(obs_est = NULL, grad = NULL, scale = "identity") { if (scale == "logit") { this_grad <- 1 / (obs_est - obs_est ^ 2) est <- stats::plogis(stats::qlogis(obs_est) + this_grad * mean(grad)) } else if (scale == "log") { this_grad <- 1 / obs_est est <- exp(log(obs_est) + this_grad * mean(grad)) } else { est <- obs_est + mean(grad) } est } #' Estimate projection of EIF on fully-observed variables #' #' @param obs_grad the estimated (observed) EIF #' @inheritParams measure_accuracy #' #' @return the projection of the EIF onto the fully-observed variables #' @importFrom stats rnorm estimate_eif_projection <- function(obs_grad = NULL, C = NULL, Z = NULL, ipc_fit_type = NULL, ipc_eif_preds = NULL, ...) { if (ipc_fit_type != "external") { if (length(unique(obs_grad)) == 1) { noisy_obs_grad <- stats::rnorm(length(obs_grad), mean = obs_grad, sd = 1e-5) obs_grad <- noisy_obs_grad } ipc_eif_mod <- SuperLearner::SuperLearner( Y = obs_grad, X = Z[C == 1, , drop = FALSE], ... ) ipc_eif_preds <- SuperLearner::predict.SuperLearner( ipc_eif_mod, newdata = Z, onlySL = TRUE )$pred } ipc_eif_preds } # ------------------------------------------------------------------------------ #' Create Folds for Cross-Fitting #' #' @param y the outcome #' @param V the number of folds #' @param stratified should the folds be stratified based on the outcome? #' @param C a vector indicating whether or not the observation is fully observed; #' 1 denotes yes, 0 denotes no #' @param probs vector of proportions for each fold number #' @return a vector of folds #' @export make_folds <- function(y, V = 2, stratified = FALSE, C = NULL, probs = rep(1/V, V)) { folds <- vector("numeric", length(y)) if (length(unique(probs)) == 1) { if (stratified) { if (length(unique(C)) <= 1) { folds_1 <- sample(rep(seq_len(V), length = sum(y == 1))) folds_0 <- sample(rep(seq_len(V), length = sum(y == 0))) folds[y == 1] <- folds_1 folds[y == 0] <- folds_0 } else { folds_11 <- sample(rep(seq_len(V), length = sum(y == 1 & C == 1))) folds_10 <- sample(rep(seq_len(V), length = sum(y == 0 & C == 1))) folds_01 <- sample(rep(seq_len(V), length = sum(y == 1 & C == 0))) folds_00 <- sample(rep(seq_len(V), length = sum(y == 0 & C == 0))) folds[y == 1 & C == 1] <- folds_11 folds[y == 0 & C == 1] <- folds_10 folds[y == 1 & C == 0] <- folds_01 folds[y == 0 & C == 0] <- folds_00 } } else { folds <- sample(rep(seq_len(V), length = length(y))) } } else { if (stratified) { if (length(unique(C)) <= 1) { folds_1 <- rep(seq_len(V), probs * sum(y == 1)) folds_1 <- c(folds_1, sample(seq_len(V), size = sum(y == 1) - length(folds_1), replace = TRUE, prob = probs)) folds_0 <- rep(seq_len(V), probs * sum(y == 0)) folds_0 <- c(folds_0, sample(seq_len(V), size = sum(y == 0) - length(folds_0), replace = TRUE, prob = probs)) folds_1 <- sample(folds_1) folds_0 <- sample(folds_0) folds[y == 1] <- folds_1 folds[y == 0] <- folds_0 } else { folds_11 <- rep(seq_len(V), probs * sum(y == 1 & C == 1)) folds_10 <- rep(seq_len(V), probs * sum(y == 1 & C == 0)) folds_01 <- rep(seq_len(V), probs * sum(y == 0 & C == 1)) folds_00 <- rep(seq_len(V), probs * sum(y == 0 & C == 0)) folds_11 <- c(folds_11, sample(seq_len(V), size = sum(y == 1 & C == 1) - length(folds_11), replace = TRUE, prob = probs)) folds_01 <- c(folds_01, sample(seq_len(V), size = sum(y == 0 & C == 1) - length(folds_01), replace = TRUE, prob = probs)) folds_10 <- c(folds_10, sample(seq_len(V), size = sum(y == 1 & C == 0) - length(folds_10), replace = TRUE, prob = probs)) folds_00 <- c(folds_00, sample(seq_len(V), size = sum(y == 0 & C == 0) - length(folds_00), replace = TRUE, prob = probs)) folds_11 <- sample(folds_11) folds_01 <- sample(folds_01) folds_10 <- sample(folds_10) folds_00 <- sample(folds_00) folds[y == 1 & C == 1] <- folds_11 folds[y == 0 & C == 1] <- folds_10 folds[y == 1 & C == 0] <- folds_01 folds[y == 0 & C == 0] <- folds_00 } } else { these_probs <- round(probs * length(y)) if (sum(these_probs) != length(y)) { these_probs[which.min(these_probs)] <- these_probs[which.min(these_probs)] - 1 } folds <- sample(rep(seq_len(V), these_probs)) } } return(folds) } #' Turn folds from 2K-fold cross-fitting into individual K-fold folds #' #' @param cross_fitting_folds the vector of cross-fitting folds #' @param sample_splitting_folds the sample splitting folds #' @param C vector of whether or not we measured the observation in phase 2 #' #' @return the two sets of testing folds for K-fold cross-fitting #' @export make_kfold <- function(cross_fitting_folds, sample_splitting_folds = rep(1, length(unique(cross_fitting_folds))), C = rep(1, length(cross_fitting_folds))) { # get the folds for the full and reduced nuisance functions full_folds <- which(sample_splitting_folds == 1) redu_folds <- which(sample_splitting_folds == 2) sample_splitting_vec <- vector("numeric", length = length(cross_fitting_folds)) sample_splitting_vec[cross_fitting_folds %in% full_folds] <- 1 sample_splitting_vec[cross_fitting_folds %in% redu_folds] <- 2 # create K-fold folds, i.e., 1:K for each full_cf_folds <- cross_fitting_folds[cross_fitting_folds %in% full_folds] redu_cf_folds <- cross_fitting_folds[cross_fitting_folds %in% redu_folds] unique_full <- sort(unique(full_cf_folds)) unique_redu <- sort(unique(redu_cf_folds)) K <- length(unique_full) folds_k <- seq_len(K) k_fold_full <- full_cf_folds k_fold_redu <- redu_cf_folds for (v in seq_len(K)) { k_fold_full <- replace(k_fold_full, full_cf_folds == unique_full[v], folds_k[v]) k_fold_redu <- replace(k_fold_redu, redu_cf_folds == unique_redu[v], folds_k[v]) } # return a list; the first four values are the cross-fitting folds, # while the last two values replicate the sample-splitting folds list(full = k_fold_full, reduced = k_fold_redu, sample_splitting_folds = sample_splitting_vec) } #' Return test-set only data #' #' @param arg_lst a list of estimates, data, etc. #' @param k the index of interest #' #' @return the test-set only data #' @export get_test_set <- function(arg_lst, k) { folds <- arg_lst$cross_fitting_folds folds_Z <- arg_lst$folds_Z test_lst <- arg_lst test_lst$fitted_values <- arg_lst$fitted_values[folds == k] test_lst$y <- arg_lst$y[folds == k] test_lst$C <- arg_lst$C[folds_Z == k] test_lst$Z <- arg_lst$Z[folds_Z == k, , drop = FALSE] test_lst$ipc_weights <- arg_lst$ipc_weights[folds_Z == k] test_lst$ipc_eif_preds <- arg_lst$ipc_eif_preds[folds_Z == k] test_lst$nuisance_estimators <- lapply(arg_lst$nuisance_estimators, function(l) { l[folds == k] }) test_lst$a <- arg_lst$a[folds == k] return(test_lst) } # For sp_vim ------------------------------------------------------------------- #' Run a Super Learner for the provided subset of features #' #' @param Y the outcome #' @param X the covariates #' @param V the number of folds #' @param SL.library the library of candidate learners #' @param univariate_SL.library the library of candidate learners for #' single-covariate regressions #' @param s the subset of interest #' @param cv_folds the CV folds #' @param sample_splitting logical; should we use sample-splitting for #' predictiveness estimation? #' @param ss_folds the sample-splitting folds; only used if #' \code{sample_splitting = TRUE} #' @param split the split to use for sample-splitting; only used if #' \code{sample_splitting = TRUE} #' @param verbose should we print progress? defaults to FALSE #' @param progress_bar the progress bar to print to (only if verbose = TRUE) #' @param indx the index to pass to progress bar (only if verbose = TRUE) #' @param weights weights to pass to estimation procedure #' @param cross_fitted_se if \code{TRUE}, uses a cross-fitted estimator of #' the standard error; otherwise, uses the entire dataset #' @param full should this be considered a "full" or "reduced" regression? #' If \code{NULL} (the default), this is determined automatically; a full #' regression corresponds to \code{s} being equal to the full covariate vector. #' For SPVIMs, can be entered manually. #' @param vector should we return a vector (\code{TRUE}) or a list (\code{FALSE})? #' @param ... other arguments to Super Learner #' #' @return a list of length V, with the results of predicting on the hold-out data for each v in 1 through V #' @export run_sl <- function(Y = NULL, X = NULL, V = 5, SL.library = "SL.glm", univariate_SL.library = NULL, s = 1, cv_folds = NULL, sample_splitting = TRUE, ss_folds = NULL, split = 1, verbose = FALSE, progress_bar = NULL, indx = 1, weights = rep(1, nrow(X)), cross_fitted_se = TRUE, full = NULL, vector = TRUE, ...) { # if verbose, print what we're doing and make sure that SL is verbose; # set up the argument list for the Super Learner / CV.SuperLearner arg_lst <- list(...) if (is.null(arg_lst$family)) { arg_lst$family <- switch( (length(unique(Y)) == 2) + 1, stats::gaussian(), stats::binomial() ) } if (is.character(arg_lst$family)) { family <- get(arg_lst$family, mode = "function", envir = parent.frame()) arg_lst$family <- family() } if ((arg_lst$family$family == "binomial") & (length(unique(Y)) > 2)) { arg_lst$family <- stats::gaussian() } if (verbose) { if (is.null(arg_lst$cvControl)) { arg_lst$cvControl <- list(verbose = TRUE) } else { arg_lst$cvControl$verbose <- TRUE } } arg_lst_bool <- is.null(arg_lst$cvControl) | ifelse(!is.null(arg_lst$cvControl), arg_lst$cvControl$V != V, FALSE) if (arg_lst_bool) { if (V > 1) { arg_lst$innerCvControl <- list(list(V = switch(as.numeric(!is.null(arg_lst$cvControl$V)) + 1, 5, arg_lst$cvControl$V), stratifyCV = switch(as.numeric(!is.null(arg_lst$cvControl$stratifyCV)) + 1, FALSE, arg_lst$cvControl$stratifyCV))) } arg_lst$cvControl$V <- ifelse(V == 1, 5, V) } if (is.null(arg_lst$obsWeights)) { arg_lst$obsWeights <- weights } arg_lst_cv <- arg_lst # fit the super learner for a given set of variables red_X <- as.data.frame(X[, s, drop = FALSE]) if (is.null(cv_folds)) { cv_folds <- make_folds(Y, V = V, stratified = (length(unique(Y)) == 2)) } cf_folds_lst <- lapply(as.list(seq_len(V)), function(v) { which(cv_folds == v) }) if (V > 1) { arg_lst_cv$cvControl$validRows <- cf_folds_lst } this_sl_lib <- SL.library # if univariate regression (i.e., length(s) == 1) then check univariate_SL.library # if it exists, use it; otherwise, use the normal library if (length(s) == 1) { if (!is.null(univariate_SL.library)) { this_sl_lib <- univariate_SL.library } requires_2d <- c("glmnet", "polymars") for (i in 1:length(requires_2d)) { if (any(grepl(requires_2d[i], this_sl_lib)) & (ncol(red_X) == 1)) { red_X <- cbind.data.frame(V0 = 0, red_X) } } } full_arg_lst_cv <- c(arg_lst_cv, list( Y = Y, X = red_X, SL.library = this_sl_lib )) # if dim(red_X) == 0, then return the mean if (ncol(red_X) == 0) { this_sl_lib <- eval(parse(text = "SL.mean")) } # if a single learner, don't do inner CV if ((length(this_sl_lib) == 1) & !is.list(this_sl_lib)) { if (is.character(this_sl_lib)) { this_sl_lib <- eval(parse(text = this_sl_lib)) } preds <- list() preds_vector <- rep(NA, length = length(Y)) if (V == 1) { fit <- NA preds <- NA preds_vector <- NA } else { for (v in seq_len(V)) { train_v <- (cv_folds != v) test_v <- (cv_folds == v) fit <- this_sl_lib(Y = Y[train_v, , drop = FALSE], X = red_X[train_v, , drop = FALSE], newX = red_X[test_v, , drop = FALSE], family = full_arg_lst_cv$family, obsWeights = full_arg_lst_cv$obsWeights[train_v]) preds[[v]] <- fit$pred preds_vector[test_v] <- fit$pred } preds_vector <- preds_vector[!is.na(preds_vector)] } if (vector) { preds <- preds_vector } } else if (V == 1) { # once again, don't do anything; will fit at end fit <- NA preds <- NA preds_vector <- NA } else { # fit a cross-validated Super Learner fit <- suppressWarnings(do.call(SuperLearner::CV.SuperLearner, full_arg_lst_cv)) # extract predictions on correct sampled-split folds if (is.null(full)) { all_equal_s <- all.equal(s, 1:ncol(X)) is_full <- switch((sample_splitting) + 1, TRUE, !is.character(all_equal_s) & as.logical(all_equal_s)) } else { is_full <- full } preds <- fit$SL.predict } # if cross_fitted_se, we're done; otherwise, re-fit to the entire dataset if (!cross_fitted_se) { # refit to the entire dataset bool <- switch(as.numeric(sample_splitting) + 1, rep(TRUE, length(Y)), ss_folds == split) if (!is.character(this_sl_lib)) { fit_se <- this_sl_lib(Y = Y[bool, ], X = red_X[bool, , drop = FALSE], newX = red_X[bool, , drop = FALSE], family = arg_lst$family, obsWeights = arg_lst$obsWeights[bool]) preds_se <- fit_se$pred if (all(is.na(preds))) { preds <- preds_se fit <- fit_se } } else { arg_lst$obsWeights <- weights[bool] if (any(grepl("parallel", names(arg_lst)))) { # remove it for SL calls arg_lst$parallel <- NULL } fit_se <- do.call( SuperLearner::SuperLearner, args = c(arg_lst[-which(names(arg_lst) == "innerCvControl")], list( Y = Y[bool, ], X = red_X[bool, , drop = FALSE], SL.library = this_sl_lib )) ) preds_se <- fit_se$SL.predict if (all(is.na(preds))) { fit <- fit_se preds <- preds_se } } } else { fit_se <- NA preds_se <- NA } if (verbose) { setTxtProgressBar(progress_bar, indx) } return(list(fit = fit, preds = preds, ss_folds = ss_folds, cv_folds = cv_folds, fit_non_cf_se = fit_se, preds_non_cf_se = preds_se)) } # estimate nuisance functions (for average value) ------------------------------ #' Estimate nuisance functions for average value-based VIMs #' #' @inheritParams cv_vim #' @inheritParams run_sl #' @param fit the fitted nuisance function estimator #' @param split the sample split to use #' #' @return nuisance function estimators for use in the average value VIM: #' the treatment assignment based on the estimated optimal rule #' (based on the estimated outcome regression); the expected outcome under the #' estimated optimal rule; and the estimated propensity score. #' #' @export estimate_nuisances <- function(fit, X, exposure_name, V = 1, SL.library, sample_splitting, sample_splitting_folds, verbose, weights, cross_fitted_se, split = 1, ...) { A <- X %>% dplyr::pull(!!exposure_name) W <- X %>% dplyr::select(-!!exposure_name) # estimate the optimal rule A_1 <- cbind.data.frame(A = 1, W) A_0 <- cbind.data.frame(A = 0, W) names(A_1)[1] <- exposure_name names(A_0)[1] <- exposure_name f_n <- as.numeric(stats::predict(fit, newdata = A_1)$pred > stats::predict(fit, newdata = A_0)$pred) # estimate the propensity score g_n <- run_sl(Y = f_n, X = W, V = V, SL.library = SL.library, s = 1:ncol(W), sample_splitting = sample_splitting, ss_folds = sample_splitting_folds, split = split, verbose = verbose, weights = weights, cross_fitted_se = cross_fitted_se, ...) # set up data based on f_n, f_n_reduced A_f <- cbind.data.frame(A = f_n, W) names(A_f)[1] <- exposure_name nuisance_estimators <- list(f_n = f_n, q_n = stats::predict(fit, newdata = A_f)$pred, g_n = stats::predict(g_n, A_f)$pred) return(nuisance_estimators) } # ------------------------------------- # release questions # ------------------------------------- # @keywords internal release_questions <- function() { c( "Have you run cran_prep <- rhub::check_for_cran(env_vars = c(R_COMPILE_AND_INSTALL_PACKAGES = 'always'), show_status = FALSE)?", "Have you run devtools::check_win_devel() and devtools::check_win_release()?" ) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/utils.R
#' Nonparametric Intrinsic Variable Importance Estimates and Inference #' #' Compute estimates of and confidence intervals for nonparametric intrinsic #' variable importance based on the population-level contrast between the oracle #' predictiveness using the feature(s) of interest versus not. #' #' @param Y the outcome. #' @param X the covariates. If \code{type = "average_value"}, then the exposure #' variable should be part of \code{X}, with its name provided in \code{exposure_name}. #' @param f1 the fitted values from a flexible estimation technique #' regressing Y on X. A vector of the same length as \code{Y}; if sample-splitting #' is desired, then the value of \code{f1} at each position should be the result #' of predicting from a model trained without that observation. #' @param f2 the fitted values from a flexible estimation technique #' regressing either (a) \code{f1} or (b) Y on X withholding the columns in #' \code{indx}. A vector of the same length as \code{Y}; if sample-splitting #' is desired, then the value of \code{f2} at each position should be the result #' of predicting from a model trained without that observation. #' @param indx the indices of the covariate(s) to calculate variable #' importance for; defaults to 1. #' @param type the type of importance to compute; defaults to #' \code{r_squared}, but other supported options are \code{auc}, #' \code{accuracy}, \code{deviance}, and \code{anova}. #' @param run_regression if outcome Y and covariates X are passed to #' \code{vimp_accuracy}, and \code{run_regression} is \code{TRUE}, #' then Super Learner will be used; otherwise, variable importance #' will be computed using the inputted fitted values. #' @param SL.library a character vector of learners to pass to #' \code{SuperLearner}, if \code{f1} and \code{f2} are Y and X, #' respectively. Defaults to \code{SL.glmnet}, \code{SL.xgboost}, #' and \code{SL.mean}. #' @param alpha the level to compute the confidence interval at. #' Defaults to 0.05, corresponding to a 95\% confidence interval. #' @param delta the value of the \eqn{\delta}-null (i.e., testing if #' importance < \eqn{\delta}); defaults to 0. #' @param scale should CIs be computed on original ("identity") or #' another scale? (options are "log" and "logit") #' @param na.rm should we remove NAs in the outcome and fitted values #' in computation? (defaults to \code{FALSE}) #' @param sample_splitting should we use sample-splitting to estimate the full and #' reduced predictiveness? Defaults to \code{TRUE}, since inferences made using #' \code{sample_splitting = FALSE} will be invalid for variables with truly zero #' importance. #' @param sample_splitting_folds the folds used for sample-splitting; #' these identify the observations that should be used to evaluate #' predictiveness based on the full and reduced sets of covariates, respectively. #' Only used if \code{run_regression = FALSE}. #' @param final_point_estimate if sample splitting is used, should the final point estimates #' be based on only the sample-split folds used for inference (\code{"split"}, the default), #' or should they instead be based on the full dataset (\code{"full"}) or the average #' across the point estimates from each sample split (\code{"average"})? All three #' options result in valid point estimates -- sample-splitting is only required for valid inference. #' @param stratified if run_regression = TRUE, then should the generated #' folds be stratified based on the outcome (helps to ensure class balance #' across cross-validation folds) #' @param C the indicator of coarsening (1 denotes observed, 0 denotes #' unobserved). #' @param Z either (i) NULL (the default, in which case the argument #' \code{C} above must be all ones), or (ii) a character vector #' specifying the variable(s) among Y and X that are thought to play a #' role in the coarsening mechanism. To specify the outcome, use \code{"Y"}; to #' specify covariates, use a character number corresponding to the desired #' position in X (e.g., \code{"1"}). #' @param ipc_scale what scale should the inverse probability weight correction be applied on (if any)? #' Defaults to "identity". (other options are "log" and "logit") #' @param ipc_weights weights for the computed influence curve (i.e., #' inverse probability weights for coarsened-at-random settings). #' Assumed to be already inverted (i.e., ipc_weights = 1 / [estimated #' probability weights]). #' @param ipc_est_type the type of procedure used for coarsened-at-random #' settings; options are "ipw" (for inverse probability weighting) or #' "aipw" (for augmented inverse probability weighting). #' Only used if \code{C} is not all equal to 1. #' @param scale_est should the point estimate be scaled to be greater than or equal to 0? #' Defaults to \code{TRUE}. #' @param nuisance_estimators_full (only used if \code{type = "average_value"}) #' a list of nuisance function estimators on the #' observed data (may be within a specified fold, for cross-fitted estimates). #' Specifically: an estimator of the optimal treatment rule; an estimator of the #' propensity score under the estimated optimal treatment rule; and an estimator #' of the outcome regression when treatment is assigned according to the estimated optimal rule. #' @param nuisance_estimators_reduced (only used if \code{type = "average_value"}) #' a list of nuisance function estimators on the #' observed data (may be within a specified fold, for cross-fitted estimates). #' Specifically: an estimator of the optimal treatment rule; an estimator of the #' propensity score under the estimated optimal treatment rule; and an estimator #' of the outcome regression when treatment is assigned according to the estimated optimal rule. #' @param exposure_name (only used if \code{type = "average_value"}) the name of #' the exposure of interest; binary, with 1 indicating presence of the exposure and #' 0 indicating absence of the exposure. #' @param bootstrap should bootstrap-based standard error estimates be computed? #' Defaults to \code{FALSE} (and currently may only be used if #' \code{sample_splitting = FALSE}). #' @param b the number of bootstrap replicates (only used if \code{bootstrap = TRUE} #' and \code{sample_splitting = FALSE}); defaults to 1000. #' @param boot_interval_type the type of bootstrap interval (one of \code{"norm"}, #' \code{"basic"}, \code{"stud"}, \code{"perc"}, or \code{"bca"}, as in #' \code{\link{boot}{boot.ci}}) if requested. Defaults to \code{"perc"}. #' @param clustered should the bootstrap resamples be performed on clusters #' rather than individual observations? Defaults to \code{FALSE}. #' @param cluster_id vector of the same length as \code{Y} giving the cluster IDs #' used for the clustered bootstrap, if \code{clustered} is \code{TRUE}. #' @param ... other arguments to the estimation tool, see "See also". #' #' @return An object of classes \code{vim} and the type of risk-based measure. #' See Details for more information. #' #' @details We define the population variable importance measure (VIM) for the #' group of features (or single feature) \eqn{s} with respect to the #' predictiveness measure \eqn{V} by #' \deqn{\psi_{0,s} := V(f_0, P_0) - V(f_{0,s}, P_0),} where \eqn{f_0} is #' the population predictiveness maximizing function, \eqn{f_{0,s}} is the #' population predictiveness maximizing function that is only allowed to access #' the features with index not in \eqn{s}, and \eqn{P_0} is the true #' data-generating distribution. VIM estimates are obtained by obtaining #' estimators \eqn{f_n} and \eqn{f_{n,s}} of \eqn{f_0} and \eqn{f_{0,s}}, #' respectively; obtaining an estimator \eqn{P_n} of \eqn{P_0}; and finally, #' setting \eqn{\psi_{n,s} := V(f_n, P_n) - V(f_{n,s}, P_n)}. #' #' In the interest of transparency, we return most of the calculations #' within the \code{vim} object. This results in a list including: #' \describe{ #' \item{s}{the column(s) to calculate variable importance for} #' \item{SL.library}{the library of learners passed to \code{SuperLearner}} #' \item{type}{the type of risk-based variable importance measured} #' \item{full_fit}{the fitted values of the chosen method fit to the full data} #' \item{red_fit}{the fitted values of the chosen method fit to the reduced data} #' \item{est}{the estimated variable importance} #' \item{naive}{the naive estimator of variable importance (only used if \code{type = "anova"})} #' \item{eif}{the estimated efficient influence function} #' \item{eif_full}{the estimated efficient influence function for the full regression} #' \item{eif_reduced}{the estimated efficient influence function for the reduced regression} #' \item{se}{the standard error for the estimated variable importance} #' \item{ci}{the \eqn{(1-\alpha) \times 100}\% confidence interval for the variable importance estimate} #' \item{test}{a decision to either reject (TRUE) or not reject (FALSE) the null hypothesis, based on a conservative test} #' \item{p_value}{a p-value based on the same test as \code{test}} #' \item{full_mod}{the object returned by the estimation procedure for the full data regression (if applicable)} #' \item{red_mod}{the object returned by the estimation procedure for the reduced data regression (if applicable)} #' \item{alpha}{the level, for confidence interval calculation} #' \item{sample_splitting_folds}{the folds used for sample-splitting (used for hypothesis testing)} #' \item{y}{the outcome} #' \item{ipc_weights}{the weights} #' \item{cluster_id}{the cluster IDs} #' \item{mat}{a tibble with the estimate, SE, CI, hypothesis testing decision, and p-value} #' } #' #' @examples #' # generate the data #' # generate X #' p <- 2 #' n <- 100 #' x <- data.frame(replicate(p, stats::runif(n, -1, 1))) #' #' # apply the function to the x's #' f <- function(x) 0.5 + 0.3*x[1] + 0.2*x[2] #' smooth <- apply(x, 1, function(z) f(z)) #' #' # generate Y ~ Bernoulli (smooth) #' y <- matrix(rbinom(n, size = 1, prob = smooth)) #' #' # set up a library for SuperLearner; note simple library for speed #' library("SuperLearner") #' learners <- c("SL.glm") #' #' # using Y and X; use class-balanced folds #' est_1 <- vim(y, x, indx = 2, type = "accuracy", #' alpha = 0.05, run_regression = TRUE, #' SL.library = learners, cvControl = list(V = 2), #' stratified = TRUE) #' #' # using pre-computed fitted values #' set.seed(4747) #' V <- 2 #' full_fit <- SuperLearner::CV.SuperLearner(Y = y, X = x, #' SL.library = learners, #' cvControl = list(V = 2), #' innerCvControl = list(list(V = V))) #' full_fitted <- SuperLearner::predict.SuperLearner(full_fit)$pred #' # fit the data with only X1 #' reduced_fit <- SuperLearner::CV.SuperLearner(Y = full_fitted, #' X = x[, -2, drop = FALSE], #' SL.library = learners, #' cvControl = list(V = 2, validRows = full_fit$folds), #' innerCvControl = list(list(V = V))) #' reduced_fitted <- SuperLearner::predict.SuperLearner(reduced_fit)$pred #' #' est_2 <- vim(Y = y, f1 = full_fitted, f2 = reduced_fitted, #' indx = 2, run_regression = FALSE, alpha = 0.05, #' stratified = TRUE, type = "accuracy", #' sample_splitting_folds = get_cv_sl_folds(full_fit$folds)) #' #' @seealso \code{\link[SuperLearner]{SuperLearner}} for specific usage of the #' \code{SuperLearner} function and package. #' @export vim <- function(Y = NULL, X = NULL, f1 = NULL, f2 = NULL, indx = 1, type = "r_squared", run_regression = TRUE, SL.library = c("SL.glmnet", "SL.xgboost", "SL.mean"), alpha = 0.05, delta = 0, scale = "identity", na.rm = FALSE, sample_splitting = TRUE, sample_splitting_folds = NULL, final_point_estimate = "split", stratified = FALSE, C = rep(1, length(Y)), Z = NULL, ipc_scale = "identity", ipc_weights = rep(1, length(Y)), ipc_est_type = "aipw", scale_est = TRUE, nuisance_estimators_full = NULL, nuisance_estimators_reduced = NULL, exposure_name = NULL, bootstrap = FALSE, b = 1000, boot_interval_type = "perc", clustered = FALSE, cluster_id = rep(NA, length(Y)), ...) { # check to see if f1 and f2 are missing # if the data is missing, stop and throw an error check_inputs(Y, X, f1, f2, indx) if (bootstrap & clustered & sum(is.na(cluster_id)) > 0){ stop(paste0("If using clustered bootstrap, cluster IDs must be provided", " for all observations.")) } # check to see if Y is a matrix or data.frame; if not, make it one # (just for ease of reading) if (is.null(dim(Y))) { Y <- as.matrix(Y) } # set up internal data -- based on complete cases only cc_lst <- create_z(Y, C, Z, X, ipc_weights) Y_cc <- cc_lst$Y X_cc <- X[C == 1, , drop = FALSE] if (is.null(exposure_name)) { A_cc <- rep(1, length(Y_cc)) } else { A_cc <- X_cc[, exposure_name] } X_cc <- X_cc[, !(names(X_cc) %in% exposure_name), drop = FALSE] weights_cc <- cc_lst$weights Z_in <- cc_lst$Z # get the correct measure function; if not one of the supported ones, say so full_type <- get_full_type(type) # set up folds for sample-splitting; if sample_splitting is FALSE, these # aren't actually folds if (is.null(sample_splitting_folds) | run_regression) { if (sample_splitting) { sample_splitting_folds <- make_folds( Y, V = 2, C = C, stratified = stratified ) } else { sample_splitting_folds <- rep(1, length(Y)) } } sample_splitting_folds_cc <- sample_splitting_folds[C == 1] # if run_regression = TRUE, then fit SuperLearner if (run_regression) { full_feature_vec <- 1:ncol(X_cc) full_sl_lst <- run_sl(Y = Y_cc, X = X_cc, V = ifelse(sample_splitting, 2, 1), SL.library = SL.library, s = full_feature_vec, sample_splitting = sample_splitting, cv_folds = sample_splitting_folds_cc, ss_folds = sample_splitting_folds_cc, split = 1, verbose = FALSE, weights = weights_cc, cross_fitted_se = FALSE, vector = TRUE, ...) red_split <- switch((sample_splitting) + 1, 1, 2) red_Y <- Y_cc if (full_type == "r_squared" || full_type == "anova") { if (sample_splitting) { full_sl_lst_2 <- run_sl(Y = Y_cc, X = X_cc, V = ifelse(sample_splitting, 2, 1), SL.library = SL.library, s = full_feature_vec, sample_splitting = sample_splitting, cv_folds = sample_splitting_folds_cc, ss_folds = sample_splitting_folds_cc, split = 2, verbose = FALSE, weights = weights_cc, cross_fitted_se = FALSE, vector = TRUE, ...) red_Y <- matrix(full_sl_lst_2$preds) } else { red_Y <- matrix(full_sl_lst$preds, ncol = 1) } if (length(unique(red_Y)) == 1) { red_Y <- Y_cc } } redu_sl_lst <- run_sl(Y = red_Y, X = X_cc, V = ifelse(sample_splitting, 2, 1), SL.library = SL.library, s = full_feature_vec[-indx], sample_splitting = sample_splitting, cv_folds = sample_splitting_folds_cc, ss_folds = sample_splitting_folds_cc, split = red_split, verbose = FALSE, weights = weights_cc, cross_fitted_se = FALSE, vector = TRUE, ...) full <- full_sl_lst$fit reduced <- redu_sl_lst$fit full_preds <- full_sl_lst$preds redu_preds <- redu_sl_lst$preds # if variable importance based on the average value under the optimal rule is requested, # create a list with the necessary nuisance function estimators if (grepl("average_value", full_type)) { nuisance_estimators_full <- estimate_nuisances(fit = full, X = X_cc, exposure_name = exposure_name, V = ifelse(sample_splitting, 2, 1), SL.library = SL.library, sample_splitting = sample_splitting, sample_splitting_folds = sample_splitting_folds_cc, verbose = FALSE, weights = weights_cc, cross_fitted_se = FALSE, split = 1, ...) nuisance_estimators_reduced <- estimate_nuisances(fit = reduced, X = X_cc %>% dplyr::select(-!!exposure_name), exposure_name = exposure_name, V = ifelse(sample_splitting, 2, 1), SL.library = SL.library, sample_splitting = sample_splitting, sample_splitting_folds = sample_splitting_folds_cc, verbose = FALSE, weights = weights_cc, cross_fitted_se = FALSE, split = red_split, ...) } else { nuisance_estimators_full <- NULL nuisance_estimators_reduced <- NULL } } else { # otherwise they are fitted values # check to make sure that the fitted values, folds are what we expect check_fitted_values(Y = Y, f1 = f1, f2 = f2, sample_splitting_folds = sample_splitting_folds, cv = FALSE) sample_splitting_folds_cc <- sample_splitting_folds[C == 1] sample_splitting_folds_1 <- sample_splitting_folds_cc == 1 sample_splitting_folds_2 <- switch( (sample_splitting) + 1, sample_splitting_folds_cc == 1, sample_splitting_folds_cc == 2 ) # set up the fitted value objects full_preds <- switch((length(f1) == nrow(Y)) + 1, f1, subset(f1, C == 1)) redu_preds <- switch((length(f2) == nrow(Y)) + 1, f2, subset(f2, C == 1)) full <- reduced <- NA } # calculate the estimators, EIFs arg_lst <- list(...) # set method and family to compatible with continuous values, for EIF estimation arg_lst <- process_arg_lst(arg_lst) if (full_type == "anova") { # no sample-splitting, since no hypothesis testing est_lst <- measure_anova( full = full_preds, reduced = redu_preds, y = Y_cc, full_y = Y_cc, C = C, Z = Z_in, ipc_weights = ipc_weights, ipc_fit_type = "SL", na.rm = na.rm, SL.library = SL.library, arg_lst ) est <- est_lst$point_est naive <- est_lst$naive eif <- est_lst$eif predictiveness_full <- NA predictiveness_redu <- NA eif_full <- rep(NA, length(Y)) eif_redu <- rep(NA, length(Y)) se_full <- NA se_redu <- NA if (bootstrap) { boot_results <- bootstrap_se(Y = Y_cc, f1 = full_preds, f2 = redu_preds, type = full_type, b = b, boot_interval_type = boot_interval_type, alpha = alpha, clustered = clustered, cluster_id = cluster_id) se <- boot_results$se } else { se <- sqrt(mean(eif ^ 2) / length(eif)) } } else { # if no sample splitting, estimate on the whole data ss_folds_full <- switch((sample_splitting) + 1, rep(1, length(sample_splitting_folds_cc)), sample_splitting_folds_cc) ss_folds_redu <- switch((sample_splitting) + 1, rep(2, length(sample_splitting_folds_cc)), sample_splitting_folds_cc) predictiveness_full_object <- do.call(predictiveness_measure, c( list(type = full_type, y = Y_cc[ss_folds_full == 1, , drop = FALSE], a = A_cc[ss_folds_full == 1], fitted_values = full_preds[ss_folds_full == 1], full_y = Y_cc, nuisance_estimators = lapply(nuisance_estimators_full, function(l) { l[ss_folds_full == 1] }), C = C[sample_splitting_folds == 1], Z = Z_in[sample_splitting_folds == 1, , drop = FALSE], ipc_weights = ipc_weights[sample_splitting_folds == 1], ipc_fit_type = "SL", scale = ipc_scale, ipc_est_type = ipc_est_type, na.rm = na.rm, SL.library = SL.library), arg_lst )) predictiveness_reduced_object <- do.call(predictiveness_measure, c( list(type = full_type, y = Y_cc[ss_folds_redu == 2, , drop = FALSE], a = A_cc[ss_folds_redu == 2], fitted_values = redu_preds[ss_folds_redu == 2], full_y = Y_cc, nuisance_estimators = lapply(nuisance_estimators_reduced, function(l) { l[ss_folds_redu == 2] }), C = C[sample_splitting_folds == 2], Z = Z_in[sample_splitting_folds == 2, , drop = FALSE], ipc_weights = ipc_weights[sample_splitting_folds == 2], ipc_fit_type = "SL", scale = ipc_scale, ipc_est_type = ipc_est_type, na.rm = na.rm, SL.library = SL.library), arg_lst )) predictiveness_full_lst <- estimate(predictiveness_full_object) predictiveness_redu_lst <- estimate(predictiveness_reduced_object) # compute the point estimates of predictiveness and variable importance predictiveness_full <- predictiveness_full_lst$point_est predictiveness_redu <- predictiveness_redu_lst$point_est est <- predictiveness_full - predictiveness_redu naive <- NA # compute estimates of standard error eif_full <- predictiveness_full_lst$eif eif_redu <- predictiveness_redu_lst$eif se_full <- sqrt(mean(eif_full ^ 2) / length(eif_full)) se_redu <- sqrt(mean(eif_redu ^ 2) / length(eif_redu)) if (bootstrap & !sample_splitting) { boot_results <- bootstrap_se(Y = Y_cc, f1 = full_preds, f2 = redu_preds, type = full_type, b = b, boot_interval_type = boot_interval_type, alpha = alpha, clustered = clustered, cluster_id = cluster_id) se <- boot_results$se se_full <- boot_results$se_full se_redu <- boot_results$se_reduced } else { if (bootstrap) { warning(paste0("Bootstrap-based standard error estimates are currently", " only available if sample_splitting = FALSE. Returning", " standard error estimates based on the efficient", " influence function instead.")) } se <- vimp_se(eif_full = eif_full, eif_reduced = eif_redu, cross_fit = FALSE, sample_split = sample_splitting, na.rm = na.rm) } } est_for_inference <- est predictiveness_full_for_inference <- predictiveness_full predictiveness_reduced_for_inference <- predictiveness_redu # if sample-splitting was requested and final_point_estimate isn't "split", estimate # the required quantities if (sample_splitting & (final_point_estimate != "split")) { if (final_point_estimate == "full") { est_pred_full <- do.call(predictiveness_measure, c( list(type = full_type, y = Y_cc, a = A_cc, fitted_values = full_preds, full_y = Y_cc, nuisance_estimators = nuisance_estimators_full, C = C, Z = Z_in, ipc_weights = ipc_weights, ipc_fit_type = "SL", scale = ipc_scale, ipc_est_type = ipc_est_type, na.rm = na.rm, SL.library = SL.library), arg_lst )) est_pred_reduced <- do.call(predictiveness_measure, c( list(type = full_type, y = Y_cc, a = A_cc, fitted_values = redu_preds, full_y = Y_cc, nuisance_estimators = nuisance_estimators_reduced, C = C, Z = Z_in, ipc_weights = ipc_weights, ipc_fit_type = "SL", scale = scale, ipc_est_type = ipc_est_type, na.rm = na.rm, SL.library = SL.library), arg_lst )) est_pred_full_lst <- estimate(est_pred_full) est_pred_reduced_lst <- estimate(est_pred_reduced) # compute the point estimates of predictiveness and variable importance predictiveness_full <- est_pred_full_lst$point_est predictiveness_redu <- est_pred_reduced_lst$point_est est <- predictiveness_full - predictiveness_redu } else { est_pred_full <- do.call(predictiveness_measure, c( list(type = full_type, y = Y_cc[ss_folds_full == 2, , drop = FALSE], a = A_cc[ss_folds_full == 2], fitted_values = full_preds[ss_folds_full == 2], full_y = Y_cc, nuisance_estimators = lapply(nuisance_estimators_full, function(l) { l[ss_folds_full == 2] }), C = C[sample_splitting_folds == 2], Z = Z_in[sample_splitting_folds == 2, , drop = FALSE], ipc_weights = ipc_weights[sample_splitting_folds == 2], ipc_fit_type = "SL", scale = ipc_scale, ipc_est_type = ipc_est_type, na.rm = na.rm, SL.library = SL.library), arg_lst )) est_pred_reduced <- do.call(predictiveness_measure, c( list(type = full_type, y = Y_cc[ss_folds_redu == 1, , drop = FALSE], a = A_cc[ss_folds_redu == 1], fitted_values = redu_preds[ss_folds_redu == 1], full_y = Y_cc, nuisance_estimators = lapply(nuisance_estimators_reduced, function(l) { l[ss_folds_redu == 1] }), C = C[sample_splitting_folds == 1], Z = Z_in[sample_splitting_folds == 1, , drop = FALSE], ipc_weights = ipc_weights[sample_splitting_folds == 1], ipc_fit_type = "SL", scale = ipc_scale, ipc_est_type = ipc_est_type, na.rm = na.rm, SL.library = SL.library), arg_lst )) est_pred_full_lst <- estimate(est_pred_full) est_pred_reduced_lst <- estimate(est_pred_reduced) # compute the point estimates of predictiveness and variable importance predictiveness_full <- mean(c(predictiveness_full, est_pred_full_lst$point_est)) predictiveness_redu <- mean(c(predictiveness_redu, est_pred_reduced_lst$point_est)) est <- predictiveness_full - predictiveness_redu } } # if est < 0, set to zero and print warning if (est < 0 && !is.na(est) & scale_est) { est <- 0 warning("Original estimate < 0; returning zero.") } else if (is.na(est)) { warning("Original estimate NA; consider using a different library of learners.") } # compute the confidence intervals ci <- vimp_ci(est_for_inference, se, scale = scale, level = 1 - alpha) if (bootstrap) { ci <- boot_results$ci } predictiveness_ci_full <- vimp_ci( predictiveness_full_for_inference, se = se_full, scale = scale, level = 1 - alpha ) predictiveness_ci_redu <- vimp_ci( predictiveness_reduced_for_inference, se = se_redu, scale = scale, level = 1 - alpha ) # perform a hypothesis test against the null of zero importance if (full_type == "anova" || full_type == "regression" || !sample_splitting) { hyp_test <- list(test = NA, p_value = NA, test_statistics = NA) } else { hyp_test <- vimp_hypothesis_test( predictiveness_full = predictiveness_full_for_inference, predictiveness_reduced = predictiveness_reduced_for_inference, se = se, delta = delta, alpha = alpha ) } # create the output and return it (as a tibble) chr_indx <- paste(as.character(indx), collapse = ",") mat <- tibble::tibble( s = chr_indx, est = est, se = se, cil = ci[1], ciu = ci[2], test = hyp_test$test, p_value = hyp_test$p_value ) if (full_type == "anova") { final_eif <- eif } else { if (length(eif_full) != length(eif_redu)) { max_len <- max(c(length(eif_full), length(eif_redu))) eif_full <- c(eif_full, rep(NA, max_len - length(eif_full))) eif_redu <- c(eif_redu, rep(NA, max_len - length(eif_redu))) } final_eif <- eif_full - eif_redu } output <- list(s = chr_indx, SL.library = SL.library, full_fit = full_preds, red_fit = redu_preds, est = est, naive = naive, eif = final_eif, eif_full = eif_full, eif_reduced = eif_redu, se = se, ci = ci, est_for_inference = est_for_inference, predictiveness_full = predictiveness_full, predictiveness_reduced = predictiveness_redu, predictiveness_full_for_inference = predictiveness_full_for_inference, predictiveness_reduced_for_inference = predictiveness_reduced_for_inference, predictiveness_ci_full = predictiveness_ci_full, predictiveness_ci_reduced = predictiveness_ci_redu, test = hyp_test$test, p_value = hyp_test$p_value, test_statistic = hyp_test$test_statistic, full_mod = full, red_mod = reduced, alpha = alpha, delta = delta, y = Y, sample_splitting_folds = sample_splitting_folds, ipc_weights = ipc_weights, ipc_scale = ipc_scale, scale = scale, cluster_id = cluster_id, mat = mat) # make it also a vim and vim_type object tmp.cls <- class(output) class(output) <- c("vim", full_type, tmp.cls) return(output) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/vim.R
#' vimp: Perform Inference on Algorithm-Agnostic Intrinsic Variable Importance #' #' A unified framework for valid statistical inference on algorithm-agnostic #' measures of intrinsic variable importance. You provide the data, a method for #' estimating the conditional mean of the outcome given the covariates, #' choose a variable importance measure, and specify variable(s) of interest; #' 'vimp' takes care of the rest. #' #' @section Author(s): #' \bold{Maintainer}: Brian Williamson \url{https://bdwilliamson.github.io/} #' \bold{Contributors}: Jean Feng \url{https://www.jeanfeng.com}, Charlie Wolock \url{https://cwolock.github.io/} #' #' Methodology authors: #' \itemize{ #' \item{Brian D. Williamson} #' \item{Jean Feng} #' \item{Peter B. Gilbert} #' \item{Noah R. Simon} #' \item{Marco Carone} #' } #' #' @section See Also: #' Manuscripts: #' \itemize{ #' \item{\doi{10.1111/biom.13392} (R-squared-based variable importance)} #' \item{\doi{10.1111/biom.13389} (Rejoinder to discussion on R-squared-based variable importance article)} #' \item{\url{http://proceedings.mlr.press/v119/williamson20a.html} (general Shapley-based variable importance)} #' \item{\doi{10.1080/01621459.2021.2003200} (general variable importance)} #' } #' #' Other useful links: #' \itemize{ #' \item{\url{https://bdwilliamson.github.io/vimp/}} #' \item{\url{https://github.com/bdwilliamson/vimp}} #' \item{Report bugs at \url{https://github.com/bdwilliamson/vimp/issues}} #' } #' #' @section Imports: #' The packages that we import either make the internal code nice #' (dplyr, magrittr, tibble, rlang, MASS, data.table), are directly relevant to estimating #' the conditional mean (SuperLearner) or predictiveness measures (ROCR), #' or are necessary for hypothesis testing (stats) or confidence intervals (boot, only for bootstrap intervals). #' #' We suggest several other packages: xgboost, ranger, gam, glmnet, polspline, #' and quadprog allow a flexible library of candidate learners in the Super #' Learner; ggplot2 and cowplot help with plotting variable #' importance estimates; testthat, WeightedROC, cvAUC, and covr help with unit tests; and #' knitr, rmarkdown, and tidyselect help with the vignettes and examples. #' #' @docType package #' @name vimp #' @keywords internal "_PACKAGE"
/scratch/gouwar.j/cran-all/cranData/vimp/R/vimp-package.R
#' Nonparametric Intrinsic Variable Importance Estimates: Classification accuracy #' #' Compute estimates of and confidence intervals for nonparametric #' difference in classification accuracy-based intrinsic variable importance. #' This is a wrapper function for \code{cv_vim}, with \code{type = "accuracy"}. #' #' @inheritParams cv_vim #' #' @return An object of classes \code{vim} and \code{vim_accuracy}. #' See Details for more information. #' #' @inherit cv_vim details #' #' @examples #' # generate the data #' # generate X #' p <- 2 #' n <- 100 #' x <- data.frame(replicate(p, stats::runif(n, -1, 1))) #' #' # apply the function to the x's #' f <- function(x) 0.5 + 0.3*x[1] + 0.2*x[2] #' smooth <- apply(x, 1, function(z) f(z)) #' #' # generate Y ~ Normal (smooth, 1) #' y <- matrix(rbinom(n, size = 1, prob = smooth)) #' #' # set up a library for SuperLearner; note simple library for speed #' library("SuperLearner") #' learners <- c("SL.glm", "SL.mean") #' #' # estimate (with a small number of folds, for illustration only) #' est <- vimp_accuracy(y, x, indx = 2, #' alpha = 0.05, run_regression = TRUE, #' SL.library = learners, V = 2, cvControl = list(V = 2)) #' #' @seealso \code{\link[SuperLearner]{SuperLearner}} for specific usage of the \code{SuperLearner} function and package. #' @export vimp_accuracy <- function(Y = NULL, X = NULL, cross_fitted_f1 = NULL, cross_fitted_f2 = NULL, f1 = NULL, f2 = NULL, indx = 1, V = 10, run_regression = TRUE, SL.library = c("SL.glmnet", "SL.xgboost", "SL.mean"), alpha = 0.05, delta = 0, na.rm = FALSE, final_point_estimate = "split", cross_fitting_folds = NULL, sample_splitting_folds = NULL, stratified = TRUE, C = rep(1, length(Y)), Z = NULL, ipc_weights = rep(1, length(Y)), scale = "logit", ipc_est_type = "aipw", scale_est = TRUE, cross_fitted_se = TRUE, ...) { cv_vim(type = "accuracy", Y = Y, X = X, cross_fitted_f1 = cross_fitted_f1, cross_fitted_f2 = cross_fitted_f2, f1 = f1, f2 = f2, indx = indx, V = V, run_regression = run_regression, SL.library = SL.library, alpha = alpha, delta = delta, na.rm = na.rm, stratified = stratified, final_point_estimate = final_point_estimate, cross_fitting_folds = cross_fitting_folds, ipc_weights = ipc_weights, sample_splitting_folds = sample_splitting_folds, C = C, Z = Z, scale = scale, ipc_est_type = ipc_est_type, scale_est = scale_est, cross_fitted_se = cross_fitted_se, ...) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/vimp_accuracy.R
#' Nonparametric Intrinsic Variable Importance Estimates: ANOVA #' #' Compute estimates of and confidence intervals for nonparametric ANOVA-based #' intrinsic variable importance. This is a wrapper function for \code{cv_vim}, #' with \code{type = "anova"}. This type #' has limited functionality compared to other #' types; in particular, null hypothesis tests #' are not possible using \code{type = "anova"}. #' If you want to do null hypothesis testing #' on an equivalent population parameter, use #' \code{vimp_rsquared} instead. #' #' @inheritParams cv_vim #' #' @return An object of classes \code{vim} and \code{vim_anova}. #' See Details for more information. #' #' @details We define the population ANOVA #' parameter for the group of features (or single feature) \eqn{s} by #' \deqn{\psi_{0,s} := E_0\{f_0(X) - f_{0,s}(X)\}^2/var_0(Y),} #' where \eqn{f_0} is the population conditional mean using all features, #' \eqn{f_{0,s}} is the population conditional mean using the features with #' index not in \eqn{s}, and \eqn{E_0} and \eqn{var_0} denote expectation and #' variance under the true data-generating distribution, respectively. #' #' Cross-fitted ANOVA estimates are computed by first #' splitting the data into \eqn{K} folds; then using each fold in turn as a #' hold-out set, constructing estimators \eqn{f_{n,k}} and \eqn{f_{n,k,s}} of #' \eqn{f_0} and \eqn{f_{0,s}}, respectively on the training data and estimator #' \eqn{E_{n,k}} of \eqn{E_0} using the test data; and finally, computing #' \deqn{\psi_{n,s} := K^{(-1)}\sum_{k=1}^K E_{n,k}\{f_{n,k}(X) - f_{n,k,s}(X)\}^2/var_n(Y),} #' where \eqn{var_n} is the empirical variance. #' See the paper by Williamson, Gilbert, Simon, and Carone for more #' details on the mathematics behind this function. #' #' @examples #' # generate the data #' # generate X #' p <- 2 #' n <- 100 #' x <- data.frame(replicate(p, stats::runif(n, -5, 5))) #' #' # apply the function to the x's #' smooth <- (x[,1]/5)^2*(x[,1]+7)/5 + (x[,2]/3)^2 #' #' # generate Y ~ Normal (smooth, 1) #' y <- smooth + stats::rnorm(n, 0, 1) #' #' # set up a library for SuperLearner; note simple library for speed #' library("SuperLearner") #' learners <- c("SL.glm", "SL.mean") #' #' # estimate (with a small number of folds, for illustration only) #' est <- vimp_anova(y, x, indx = 2, #' alpha = 0.05, run_regression = TRUE, #' SL.library = learners, V = 2, cvControl = list(V = 2)) #' #' @seealso \code{\link[SuperLearner]{SuperLearner}} for specific usage of the #' \code{SuperLearner} function and package. #' @export vimp_anova <- function(Y = NULL, X = NULL, cross_fitted_f1 = NULL, cross_fitted_f2 = NULL, indx = 1, V = 10, run_regression = TRUE, SL.library = c("SL.glmnet", "SL.xgboost", "SL.mean"), alpha = 0.05, delta = 0, na.rm = FALSE, cross_fitting_folds = NULL, stratified = FALSE, C = rep(1, length(Y)), Z = NULL, ipc_weights = rep(1, length(Y)), scale = "logit", ipc_est_type = "aipw", scale_est = TRUE, cross_fitted_se = TRUE, ...) { cv_vim(type = "anova", Y = Y, X = X, cross_fitted_f1 = cross_fitted_f1, cross_fitted_f2 = cross_fitted_f2, f1 = NULL, f2 = NULL, indx = indx, V = V, run_regression = run_regression, SL.library = SL.library, alpha = alpha, delta = delta, na.rm = na.rm, stratified = stratified, cross_fitting_folds = cross_fitting_folds, ipc_weights = ipc_weights, sample_splitting = FALSE, sample_splitting_folds = NULL, C = C, Z = Z, scale = scale, ipc_est_type = ipc_est_type, scale_est = scale_est, cross_fitted_se = cross_fitted_se, ...) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/vimp_anova.R
#' Nonparametric Intrinsic Variable Importance Estimates: AUC #' #' Compute estimates of and confidence intervals for nonparametric difference #' in $AUC$-based intrinsic variable importance. This is a wrapper function for #' \code{cv_vim}, with \code{type = "auc"}. #' #' @inheritParams cv_vim #' #' @return An object of classes \code{vim} and \code{vim_auc}. #' See Details for more information. #' #' @inherit cv_vim details #' #' @examples #' # generate the data #' # generate X #' p <- 2 #' n <- 100 #' x <- data.frame(replicate(p, stats::runif(n, -1, 1))) #' #' # apply the function to the x's #' f <- function(x) 0.5 + 0.3*x[1] + 0.2*x[2] #' smooth <- apply(x, 1, function(z) f(z)) #' #' # generate Y ~ Normal (smooth, 1) #' y <- matrix(rbinom(n, size = 1, prob = smooth)) #' #' # set up a library for SuperLearner; note simple library for speed #' library("SuperLearner") #' learners <- c("SL.glm", "SL.mean") #' #' # estimate (with a small number of folds, for illustration only) #' est <- vimp_auc(y, x, indx = 2, #' alpha = 0.05, run_regression = TRUE, #' SL.library = learners, V = 2, cvControl = list(V = 2)) #' #' @seealso \code{\link[SuperLearner]{SuperLearner}} for specific usage of the \code{SuperLearner} function and package, and \code{\link[ROCR]{performance}} for specific usage of the \code{ROCR} package. #' @export vimp_auc <- function(Y = NULL, X = NULL, cross_fitted_f1 = NULL, cross_fitted_f2 = NULL, f1 = NULL, f2 = NULL, indx = 1, V = 10, run_regression = TRUE, SL.library = c("SL.glmnet", "SL.xgboost", "SL.mean"), alpha = 0.05, delta = 0, na.rm = FALSE, final_point_estimate = "split", cross_fitting_folds = NULL, sample_splitting_folds = NULL, stratified = TRUE, C = rep(1, length(Y)), Z = NULL, ipc_weights = rep(1, length(Y)), scale = "logit", ipc_est_type = "aipw", scale_est = TRUE, cross_fitted_se = TRUE, ...) { cv_vim(type = "auc", Y = Y, X = X, cross_fitted_f1 = cross_fitted_f1, cross_fitted_f2 = cross_fitted_f2, f1 = f1, f2 = f2, indx = indx, V = V, run_regression = run_regression, SL.library = SL.library, alpha = alpha, delta = delta, na.rm = na.rm, stratified = stratified, final_point_estimate = final_point_estimate, cross_fitting_folds = cross_fitting_folds, ipc_weights = ipc_weights, sample_splitting_folds = sample_splitting_folds, C = C, Z = Z, scale = scale, ipc_est_type = ipc_est_type, scale_est = scale_est, cross_fitted_se = cross_fitted_se, ...) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/vimp_auc.R
#' Confidence intervals for variable importance #' #' Compute confidence intervals for the true variable importance parameter. #' #' @param est estimate of variable importance, e.g., from a call to \code{vimp_point_est}. #' @param se estimate of the standard error of \code{est}, e.g., from a call to \code{vimp_se}. #' @param scale scale to compute interval estimate on (defaults to "identity": compute Wald-type CI). #' @param level confidence interval type (defaults to 0.95). #' @param truncate truncate CIs to have lower limit at (or above) zero? #' #' @return The Wald-based confidence interval for the true importance of the given group of left-out covariates. #' #' @details See the paper by Williamson, Gilbert, Simon, and Carone for more #' details on the mathematics behind this function and the definition of the parameter of interest. #' @importFrom stats qlogis plogis #' @export vimp_ci <- function(est, se, scale = "identity", level = 0.95, truncate = TRUE) { # set up the level a <- (1 - level)/2 a <- c(a, 1 - a) # get the quantiles fac <- stats::qnorm(a) # create the ci ci <- array(NA, dim = c(length(est), 2L), dimnames = list(names(est))) # get scale scales <- c("log", "logit", "identity") full_scale <- scales[pmatch(scale, scales)] if (full_scale == "log") { tmp <- suppressWarnings(log(est)) is_zero <- FALSE if ((is.na(tmp) & est <= 0 & !is.na(est))| is.infinite(tmp)) { tmp <- 0 is_zero <- TRUE } grad <- 1 / est ci[] <- exp(tmp + sqrt(se ^ 2 * grad ^ 2) %o% fac) if (is_zero) { ci[, 1] <- 0 } } else if (full_scale == "logit") { tmp <- suppressWarnings(qlogis(est)) is_zero <- FALSE if ((is.na(tmp) & est <= 0 & !is.na(est)) | is.infinite(tmp)) { tmp <- 0 is_zero <- TRUE } grad <- 1 / (est - est ^ 2) ci[] <- plogis(tmp + sqrt(se ^ 2 * grad ^ 2) %o% fac) if (is_zero) { ci[, 1] <- 0 } } else { ci[] <- est + (se) %o% fac if (any(ci[, 1] < 0) & !all(is.na(ci)) & truncate) { ci[ci[, 1] < 0, 1] <- 0 } } if (any(ci[, 2] < 0) & !all(is.na(ci)) & truncate) { ci[ci[, 2] < 0, 2] <- 0 } return(ci) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/vimp_ci.R
#' Nonparametric Intrinsic Variable Importance Estimates: Deviance #' #' Compute estimates of and confidence intervals for nonparametric #' deviance-based intrinsic variable importance. This is a wrapper function for #' \code{cv_vim}, with \code{type = "deviance"}. #' #' @inheritParams cv_vim #' #' @return An object of classes \code{vim} and \code{vim_deviance}. #' See Details for more information. #' #' @inherit cv_vim details #' #' @examples #' # generate the data #' # generate X #' p <- 2 #' n <- 100 #' x <- data.frame(replicate(p, stats::runif(n, -1, 1))) #' #' # apply the function to the x's #' f <- function(x) 0.5 + 0.3*x[1] + 0.2*x[2] #' smooth <- apply(x, 1, function(z) f(z)) #' #' # generate Y ~ Normal (smooth, 1) #' y <- matrix(stats::rbinom(n, size = 1, prob = smooth)) #' #' # set up a library for SuperLearner; note simple library for speed #' library("SuperLearner") #' learners <- c("SL.glm", "SL.mean") #' #' # estimate (with a small number of folds, for illustration only) #' est <- vimp_deviance(y, x, indx = 2, #' alpha = 0.05, run_regression = TRUE, #' SL.library = learners, V = 2, cvControl = list(V = 2)) #' #' @seealso \code{\link[SuperLearner]{SuperLearner}} for specific usage of the \code{SuperLearner} function and package. #' @export vimp_deviance <- function(Y = NULL, X = NULL, cross_fitted_f1 = NULL, cross_fitted_f2 = NULL, f1 = NULL, f2 = NULL, indx = 1, V = 10, run_regression = TRUE, SL.library = c("SL.glmnet", "SL.xgboost", "SL.mean"), alpha = 0.05, delta = 0, na.rm = FALSE, final_point_estimate = "split", cross_fitting_folds = NULL, sample_splitting_folds = NULL, stratified = TRUE, C = rep(1, length(Y)), Z = NULL, ipc_weights = rep(1, length(Y)), scale = "logit", ipc_est_type = "aipw", scale_est = TRUE, cross_fitted_se = TRUE, ...) { cv_vim(type = "deviance", Y = Y, X = X, cross_fitted_f1 = cross_fitted_f1, cross_fitted_f2 = cross_fitted_f2, f1 = f1, f2 = f2, indx = indx, V = V, run_regression = run_regression, SL.library = SL.library, alpha = alpha, delta = delta, na.rm = na.rm, stratified = stratified, final_point_estimate = final_point_estimate, cross_fitting_folds = cross_fitting_folds, ipc_weights = ipc_weights, sample_splitting_folds = sample_splitting_folds, C = C, Z = Z, scale = scale, ipc_est_type = ipc_est_type, scale_est = scale_est, cross_fitted_se = cross_fitted_se, ...) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/vimp_deviance.R
#' Perform a hypothesis test against the null hypothesis of \eqn{\delta} importance #' #' Perform a hypothesis test against the null hypothesis of zero importance by: #' (i) for a user-specified level \eqn{\alpha}, compute a \eqn{(1 - \alpha)\times 100}\% confidence interval around the predictiveness for both the full and reduced regression functions (these must be estimated on independent splits of the data); #' (ii) if the intervals do not overlap, reject the null hypothesis. #' #' @param predictiveness_full the estimated predictiveness of the regression including the covariate(s) of interest. #' @param predictiveness_reduced the estimated predictiveness of the regression excluding the covariate(s) of interest. #' @param se the estimated standard error of the variable importance estimator #' @param delta the value of the \eqn{\delta}-null (i.e., testing if importance < \eqn{\delta}); defaults to 0. #' @param alpha the desired type I error rate (defaults to 0.05). #' #' @return a list, with: the hypothesis testing decision (\code{TRUE} if the null hypothesis is rejected, \code{FALSE} otherwise); the p-value from the hypothesis test; and the test statistic from the hypothesis test. #' #' @details See the paper by Williamson, Gilbert, Simon, and Carone for more #' details on the mathematics behind this function and the definition of the parameter of interest. #' #' @importFrom stats pnorm #' #' @export vimp_hypothesis_test <- function(predictiveness_full, predictiveness_reduced, se, delta = 0, alpha = 0.05) { # hypothesis test, based on t-statistic # (requires independent splits to estimate full, reduced predictiveness) test_statistic <- (predictiveness_full - predictiveness_reduced - delta) / se p_value <- 1 - pnorm(test_statistic) hyp_test <- p_value < alpha return(list(test = hyp_test, p_value = p_value, test_statistic = test_statistic)) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/vimp_hypothesis_test.R
#' Nonparametric Intrinsic Variable Importance Estimates: ANOVA #' #' Compute estimates of and confidence intervals for nonparametric #' ANOVA-based intrinsic variable importance. This is a wrapper function for #' \code{cv_vim}, with \code{type = "anova"}. #' This function is deprecated in \code{vimp} version 2.0.0. #' #' @inheritParams vimp_anova #' #' @return An object of classes \code{vim} and \code{vim_regression}. #' See Details for more information. #' #' @inherit vimp_anova details #' #' @examples #' # generate the data #' # generate X #' p <- 2 #' n <- 100 #' x <- data.frame(replicate(p, stats::runif(n, -5, 5))) #' #' # apply the function to the x's #' smooth <- (x[,1]/5)^2*(x[,1]+7)/5 + (x[,2]/3)^2 #' #' # generate Y ~ Normal (smooth, 1) #' y <- smooth + stats::rnorm(n, 0, 1) #' #' # set up a library for SuperLearner; note simple library for speed #' library("SuperLearner") #' learners <- c("SL.glm", "SL.mean") #' #' # estimate (with a small number of folds, for illustration only) #' est <- vimp_regression(y, x, indx = 2, #' alpha = 0.05, run_regression = TRUE, #' SL.library = learners, V = 2, cvControl = list(V = 2)) #' #' @seealso \code{\link[SuperLearner]{SuperLearner}} for specific usage of the \code{SuperLearner} function and package. #' @export vimp_regression <- function(Y = NULL, X = NULL, cross_fitted_f1 = NULL, cross_fitted_f2 = NULL, indx = 1, V = 10, run_regression = TRUE, SL.library = c("SL.glmnet", "SL.xgboost", "SL.mean"), alpha = 0.05, delta = 0, na.rm = FALSE, cross_fitting_folds = NULL, stratified = FALSE, C = rep(1, length(Y)), Z = NULL, ipc_weights = rep(1, length(Y)), scale = "identity", ipc_est_type = "aipw", scale_est = TRUE, cross_fitted_se = TRUE, ...) { .Deprecated("vimp_anova", package = "vimp", msg = paste0( "vimp_anova now performs all functionality of vimp_regression; " , "please update any code to reflect this change!" )) vimp_anova(Y = Y, X = X, cross_fitted_f1 = cross_fitted_f1, indx = indx, V = V, run_regression = run_regression, SL.library = SL.library, alpha = alpha, delta = delta, na.rm = na.rm, cross_fitting_folds = cross_fitting_folds, stratified = stratified, C = C, Z = Z, ipc_weights = ipc_weights, ipc_est_type = ipc_est_type, scale = scale, scale_est = scale_est, cross_fitted_se = cross_fitted_se, ...) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/vimp_regression.R
#' Nonparametric Intrinsic Variable Importance Estimates: R-squared #' #' Compute estimates of and confidence intervals for nonparametric $R^2$-based #' intrinsic variable importance. This is a wrapper function for \code{cv_vim}, #' with \code{type = "r_squared"}. #' #' @inheritParams cv_vim #' #' @return An object of classes \code{vim} and \code{vim_rsquared}. #' See Details for more information. #' #' @inherit cv_vim details #' #' @examples #' # generate the data #' # generate X #' p <- 2 #' n <- 100 #' x <- data.frame(replicate(p, stats::runif(n, -5, 5))) #' #' # apply the function to the x's #' smooth <- (x[,1]/5)^2*(x[,1]+7)/5 + (x[,2]/3)^2 #' #' # generate Y ~ Normal (smooth, 1) #' y <- smooth + stats::rnorm(n, 0, 1) #' #' # set up a library for SuperLearner; note simple library for speed #' library("SuperLearner") #' learners <- c("SL.glm", "SL.mean") #' #' # estimate (with a small number of folds, for illustration only) #' est <- vimp_rsquared(y, x, indx = 2, #' alpha = 0.05, run_regression = TRUE, #' SL.library = learners, V = 2, cvControl = list(V = 2)) #' #' @seealso \code{\link[SuperLearner]{SuperLearner}} for specific usage of the #' \code{SuperLearner} function and package. #' @export vimp_rsquared <- function(Y = NULL, X = NULL, cross_fitted_f1 = NULL, cross_fitted_f2 = NULL, f1 = NULL, f2 = NULL, indx = 1, V = 10, run_regression = TRUE, SL.library = c("SL.glmnet", "SL.xgboost", "SL.mean"), alpha = 0.05, delta = 0, na.rm = FALSE, final_point_estimate = "split", cross_fitting_folds = NULL, sample_splitting_folds = NULL, stratified = FALSE, C = rep(1, length(Y)), Z = NULL, ipc_weights = rep(1, length(Y)), scale = "logit", ipc_est_type = "aipw", scale_est = TRUE, cross_fitted_se = TRUE, ...) { cv_vim(type = "r_squared", Y = Y, X = X, cross_fitted_f1 = cross_fitted_f1, cross_fitted_f2 = cross_fitted_f2, f1 = f1, f2 = f2, indx = indx, V = V, run_regression = run_regression, SL.library = SL.library, alpha = alpha, delta = delta, na.rm = na.rm, stratified = stratified, final_point_estimate = final_point_estimate, cross_fitting_folds = cross_fitting_folds, ipc_weights = ipc_weights, sample_splitting_folds = sample_splitting_folds, C = C, Z = Z, scale = scale, ipc_est_type = ipc_est_type, scale_est = scale_est, cross_fitted_se = cross_fitted_se, ...) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/vimp_rsquared.R
#' Estimate variable importance standard errors #' #' Compute standard error estimates for estimates of variable importance. #' #' @param eif_full the estimated efficient influence function (EIF) based on #' the full set of covariates. #' @param eif_reduced the estimated EIF based on the reduced set of covariates. #' @param cross_fit logical; was cross-fitting used to compute the EIFs? #' (defaults to \code{TRUE}) #' @param sample_split logical; was sample-splitting used? (defaults to \code{TRUE}) #' @param na.rm logical; should NA's be removed in computation? #' (defaults to \code{FALSE}). #' #' @return The standard error for the estimated variable importance for the #' given group of left-out covariates. #' #' @details See the paper by Williamson, Gilbert, Simon, and Carone for more #' details on the mathematics behind this function and the definition of the #' parameter of interest. #' #' @export vimp_se <- function(eif_full, eif_reduced, cross_fit = TRUE, sample_split = TRUE, na.rm = FALSE) { if (!cross_fit & !sample_split) { se <- sqrt( mean( (eif_full - eif_reduced) ^ 2 ) / length(eif_full) ) } else if (cross_fit & !sample_split) { fold_vars <- unlist(lapply( as.list(seq_len(length(eif_full))), function(k) { mean( (eif_full[[k]] - eif_reduced[[k]]) ^ 2) } )) n <- sum(unlist(lapply(as.list(seq_len(length(eif_full))), function(k) { length(eif_full[[k]]) }))) se <- sqrt(mean(fold_vars) / n) } else if (!cross_fit & sample_split) { se <- sqrt( mean( (eif_full) ^ 2 ) / length(eif_full) + mean( (eif_reduced) ^ 2 ) / length(eif_reduced) ) } else { n_1 <- sum(unlist(lapply(as.list(seq_len(length(eif_full))), function(k) { length(eif_full[[k]]) }))) n_2 <- sum(unlist(lapply(as.list(seq_len(length(eif_reduced))), function(k) { length(eif_reduced[[k]]) }))) full_indices <- as.list(seq_len(length(eif_full))) redu_indices <- as.list(seq_len(length(eif_reduced))) full_vars <- unlist(lapply(full_indices, function(k) { mean( (eif_full[[k]]) ^ 2 ) })) redu_vars <- unlist(lapply(redu_indices, function(k) { mean( (eif_reduced[[k]]) ^ 2 ) })) se <- sqrt( mean(full_vars) / n_1 + mean(redu_vars) / n_2 ) } return(se) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/vimp_se.R
.onAttach <- function(...) { pkg_desc <- utils::packageDescription("vimp") packageStartupMessage(paste0( "vimp version ", pkg_desc$Version, ": ", pkg_desc$Title )) }
/scratch/gouwar.j/cran-all/cranData/vimp/R/zzz.R
## ----setup, echo = FALSE, include = FALSE------------------------------------- library(knitr) opts_knit$set(cache = FALSE, verbose = TRUE, global.par = TRUE) ## ----more-setup, echo = FALSE------------------------------------------------- par(mar = c(5, 12, 4, 2) + 0.1) ## ----install-vimp, eval = FALSE----------------------------------------------- # install.packages("vimp") ## ----devtools-install-vimp, eval = FALSE-------------------------------------- # # only run if you don't have devtools # # previously installed # # install.packages("devtools") # devtools::install_github("bdwilliamson/vimp") ## ----load-vimp, message = FALSE----------------------------------------------- library("vimp") ## ----gen-data----------------------------------------------------------------- # ------------------------------------------------------------- # problem setup # ------------------------------------------------------------- # set up the data set.seed(5678910) n <- 1000 p <- 2 s <- 1 # desire importance for X_1 x <- data.frame(replicate(p, runif(n, -1, 1))) y <- (x[,1])^2*(x[,1]+7/5) + (25/9)*(x[,2])^2 + rnorm(n, 0, 1) # set up folds for hypothesis testing folds <- sample(rep(seq_len(2), length = length(y))) ## ----learner-lib-small, message = FALSE--------------------------------------- library("SuperLearner") # load specific algorithms library("ranger") ## ----est-1, warning = FALSE--------------------------------------------------- est_1 <- vimp_rsquared(Y = y, X = x, indx = 1, run_regression = TRUE, SL.library = c("SL.ranger", "SL.mean"), V = 2, env = environment()) ## ----print-est-1-------------------------------------------------------------- est_1 print(est_1) ## ----load-vrc01-data---------------------------------------------------------- # read in the data data("vrc01") ## ----subset-data-------------------------------------------------------------- library("dplyr") library("tidyselect") # retain only the columns of interest for this analysis y <- vrc01$ic50.censored X <- vrc01 %>% select(starts_with("geog"), starts_with("subtype"), starts_with("length")) ## ----est-regressions-lm, warning = FALSE-------------------------------------- geog_indx <- max(which(grepl("geog", names(X)))) set.seed(1234) for (i in seq_len(ncol(X) - geog_indx)) { # note that we're using a small number of cross-fitting folds for speed lm_vim <- vim(Y = y, X = X, indx = geog_indx + i, run_regression = TRUE, SL.library = "SL.glm", type = "r_squared", cvControl = list(V = 2), scale = "logit", family = binomial()) if (i == 1) { lm_mat <- lm_vim } else { lm_mat <- merge_vim(lm_mat, lm_vim) } } # print out the importance lm_mat ## ----full-learner-lib--------------------------------------------------------- # create a function for boosted stumps SL.gbm.1 <- function(..., interaction.depth = 1) SL.gbm(..., interaction.depth = interaction.depth) # create GAMs with different degrees of freedom SL.gam.3 <- function(..., deg.gam = 3) SL.gam(..., deg.gam = deg.gam) SL.gam.4 <- function(..., deg.gam = 4) SL.gam(..., deg.gam = deg.gam) SL.gam.5 <- function(..., deg.gam = 5) SL.gam(..., deg.gam = deg.gam) # add more levels of alpha for glmnet create.SL.glmnet <- function(alpha = c(0.25, 0.5, 0.75)) { for (mm in seq(length(alpha))) { eval(parse(file = "", text = paste('SL.glmnet.', alpha[mm], '<- function(..., alpha = ', alpha[mm], ') SL.glmnet(..., alpha = alpha)', sep = '')), envir = .GlobalEnv) } invisible(TRUE) } create.SL.glmnet() # add tuning parameters for randomForest create.SL.randomForest <- function(tune = list(mtry = c(1, 5, 7), nodesize = c(1, 5, 10))) { tuneGrid <- expand.grid(tune, stringsAsFactors = FALSE) for (mm in seq(nrow(tuneGrid))) { eval(parse(file = "", text = paste("SL.randomForest.", mm, "<- function(..., mtry = ", tuneGrid[mm, 1], ", nodesize = ", tuneGrid[mm, 2], ") SL.randomForest(..., mtry = mtry, nodesize = nodesize)", sep = "")), envir = .GlobalEnv) } invisible(TRUE) } create.SL.randomForest() # create the library learners <- c("SL.glmnet", "SL.glmnet.0.25", "SL.glmnet.0.5", "SL.glmnet.0.75", "SL.randomForest", "SL.randomForest.1", "SL.randomForest.2", "SL.randomForest.3", "SL.randomForest.4", "SL.randomForest.5", "SL.randomForest.6", "SL.randomForest.7", "SL.randomForest.8", "SL.randomForest.9", "SL.gbm.1") ## ----vimp-with-sl-1, eval = FALSE--------------------------------------------- # vimp_rsquared(Y = y, X = X, # indx = 5, run_regression = TRUE, SL.library = learners, V = 5, family = binomial()) ## ----vimp-with-sl-fam, message = FALSE, warning = FALSE----------------------- # small learners library learners.2 <- c("SL.ranger") # small number of cross-fitting folds V <- 2 # small number of CV folds for Super Learner sl_cvcontrol <- list(V = 2) # now estimate variable importance set.seed(5678) start_time <- Sys.time() subtype_01_AE_vim <- vimp_rsquared(Y = y, X = X, indx = 5, SL.library = learners.2, na.rm = TRUE, env = environment(), V = V, cvControl = sl_cvcontrol, family = binomial()) end_time <- Sys.time() ## ----print-vim---------------------------------------------------------------- subtype_01_AE_vim ## ----look-at-ests------------------------------------------------------------- head(subtype_01_AE_vim$full_fit[[1]]) head(subtype_01_AE_vim$red_fit[[1]]) ## ----vrc01-sl, warning = FALSE------------------------------------------------ ests <- subtype_01_AE_vim set.seed(1234) for (i in seq_len(ncol(X) - geog_indx - 1)) { # note that we're using a small number of cross-fitting folds for speed this_vim <- vimp_rsquared(Y = y, X = X, indx = geog_indx + i + 1, run_regression = TRUE, SL.library = learners.2, V = V, cvControl = sl_cvcontrol, family = binomial()) ests <- merge_vim(ests, this_vim) } ## ----vrc01-vim, fig.width = 8.5, fig.height = 8, message = FALSE-------------- library("ggplot2") library("cowplot") theme_set(theme_cowplot()) all_vars <- c(paste0("Subtype is ", c("01_AE", "02_AG", "07_BC", "A1", "A1C", "A1D", "B", "C", "D", "O", "Other")), paste0("Length of ", c("Env", "gp120", "V5", "V5 outliers", "Loop E", "Loop E outliers"))) est_plot_tib <- ests$mat %>% mutate( var_fct = rev(factor(s, levels = ests$mat$s, labels = all_vars[as.numeric(ests$mat$s) - geog_indx], ordered = TRUE)) ) # plot est_plot_tib %>% ggplot(aes(x = est, y = var_fct)) + geom_point() + geom_errorbarh(aes(xmin = cil, xmax = ciu)) + xlab(expression(paste("Variable importance estimates: ", R^2, sep = ""))) + ylab("") + ggtitle("Estimated individual feature importance") + labs(subtitle = "in the VRC01 data (considering only geographic confounders, subtype, and viral geometry)") ## ----vrc01-group-vim, fig.width = 8.5, fig.height = 8------------------------- # get the estimates set.seed(91011) subtype_vim <- vimp_rsquared(Y = y, X = X, indx = 5:15, SL.library = learners.2, na.rm = TRUE, env = environment(), V = V, cvControl = sl_cvcontrol, family = binomial()) geometry_vim <- vimp_rsquared(Y = y, X = X, indx = 16:21, SL.library = learners.2, na.rm = TRUE, env = environment(), V = V, cvControl = sl_cvcontrol, family = binomial()) # combine and plot groups <- merge_vim(subtype_vim, geometry_vim) all_grp_nms <- c("Viral subtype", "Viral geometry") grp_plot_tib <- groups$mat %>% mutate( grp_fct = factor(case_when( s == "5,6,7,8,9,10,11,12,13,14,15" ~ "1", s == "16,17,18,19,20,21" ~ "2" ), levels = c("1", "2"), labels = all_grp_nms, ordered = TRUE) ) grp_plot_tib %>% ggplot(aes(x = est, y = grp_fct)) + geom_point() + geom_errorbarh(aes(xmin = cil, xmax = ciu)) + xlab(expression(paste("Variable importance estimates: ", R^2, sep = ""))) + ylab("") + ggtitle("Estimated feature group importance") + labs(subtitle = "in the VRC01 data (considering only geographic confounders, subtype, and viral geometry)")
/scratch/gouwar.j/cran-all/cranData/vimp/inst/doc/introduction-to-vimp.R
--- title: "Introduction to `vimp`" author: "Brian D. Williamson" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: keep_md: true vignette: > %\VignetteIndexEntry{Introduction to `vimp`} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} csl: chicago-author-date.csl bibliography: vimp_bib.bib --- ```{r setup, echo = FALSE, include = FALSE} library(knitr) opts_knit$set(cache = FALSE, verbose = TRUE, global.par = TRUE) ``` ```{r more-setup, echo = FALSE} par(mar = c(5, 12, 4, 2) + 0.1) ``` ## Introduction `vimp` is a package that computes nonparametric estimates of variable importance and provides valid inference on the true importance. The package supports flexible estimation of variable importance based on the difference in nonparametric $R^2$, classification accuracy, and area under the receiver operating characteristic curve (AUC). These quantities are all nonparametric generalizations of the usual measures in simple parametric models (e.g., linear models). For more details, see the accompanying manuscripts @williamson2020a, @williamson2021, and @williamson2020c. Variable importance estimates may be computed quickly, depending on the techniques used to estimate the underlying conditional means --- if these techniques are slow, then the variable importance procedure will be slow. The code can handle arbitrary dimensions of features, and may be used to estimate the importance of any single feature or group of features for predicting the outcome. The package also includes functions for cross-validated importance. The author and maintainer of the `vimp` package is [Brian Williamson](https://bdwilliamson.github.io/). The methods implemented here have also been implemented in Python under the package [`vimpy`](https://github.com/bdwilliamson/vimpy). ## Installation A stable version of the package may be downloaded and installed from CRAN. Type the following command in your R console to install the stable version of `vimp`: ```{r install-vimp, eval = FALSE} install.packages("vimp") ``` A development version of the package may be downloaded and installed from GitHub using the `devtools` package. Type the following command in your R console to install the development version of `vimp`: ```{r devtools-install-vimp, eval = FALSE} # only run if you don't have devtools # previously installed # install.packages("devtools") devtools::install_github("bdwilliamson/vimp") ``` ## Quick Start This section should serve as a quick guide to using the `vimp` package --- we will cover the main functions for estimating $R^2$-based variable importance using a simulated data example. More details are given in the next section. First, load the `vimp` package: ```{r load-vimp, message = FALSE} library("vimp") ``` Next, create some data: ```{r gen-data} # ------------------------------------------------------------- # problem setup # ------------------------------------------------------------- # set up the data set.seed(5678910) n <- 1000 p <- 2 s <- 1 # desire importance for X_1 x <- data.frame(replicate(p, runif(n, -1, 1))) y <- (x[,1])^2*(x[,1]+7/5) + (25/9)*(x[,2])^2 + rnorm(n, 0, 1) # set up folds for hypothesis testing folds <- sample(rep(seq_len(2), length = length(y))) ``` This creates a matrix of covariates `x` with two columns, a vector `y` of normally-distributed outcome values, and a set of folds for a sample of `n = 100` study participants. The workhorse function of `vimp`, for $R^2$-based variable importance, is `vimp_rsquared`. There are two ways to compute variable importance: in the first method, you allow `vimp` to run regressions for you and return variable importance; in the second method (discussed in ["Using precomputed regression function estimates in `vimp`"](precomputed-regressions.html)), you run the regressions yourself and plug these into `vimp`. I will focus on the first method here. The basic arguments are * Y: the outcome (in this example, `y`) * X: the covariates (in this example, `x`) * indx: the covariate(s) of interest for evaluating importance (here, either 1 or 2) * run_regression: a logical value telling `vimp_rsquared` whether or not to run a regression of Y on X (`TRUE` in this example) * SL.library: a "library" of learners to pass to the function `SuperLearner` (since `run_regression = TRUE`) * V: the number of folds to use for cross-fitted variable importance This second-to-last argument, `SL.library`, determines the estimators you want to use for the conditional mean of Y given X. Estimates of variable importance rely on good estimators of the conditional mean, so we suggest using flexible estimators and model stacking to do so. One option for this is the `SuperLearner` package; load that package using ```{r learner-lib-small, message = FALSE} library("SuperLearner") # load specific algorithms library("ranger") ``` The code ```{r est-1, warning = FALSE} est_1 <- vimp_rsquared(Y = y, X = x, indx = 1, run_regression = TRUE, SL.library = c("SL.ranger", "SL.mean"), V = 2, env = environment()) ``` uses the Super Learner to fit the required regression functions, and computes an estimate of variable importance for the importance of $X_1$. We can visualize the estimate, standard error, and confidence interval by printing or typing the object name: ```{r print-est-1} est_1 print(est_1) ``` This output shows that we have estimated the importance of $X_1$ to be `r round(est_1$est, 3)`, with a 95% confidence interval of `r paste0("[", round(est_1$ci[,1], 3), ", ", round(est_1$ci[, 2], 3), "]")`. ## Detailed guide In this section, we provide a fuller example of estimating $R^2$-based variable importance in the context of assessing the importance of amino acid sequence features in predicting the neutralization sensitivity of the HIV virus to the broadly neutralizing antibody VRC01. For more information about this study, see @magaret2019. Often when working with data we attempt to estimate the conditional mean of the outcome $Y$ given features $X$, defined as $\mu_P(x) = E_P(Y \mid X = x)$. There are many tools for estimating this conditional mean. We might choose a classical parametric tool such as linear regression. We might also want to be model-agnostic and use a more nonparametric approach to estimate the conditional mean. However, - This involves using some nonparametric smoothing technique, which requires: (1) choosing a technique, and (2) selecting tuning parameters - Naive optimal tuning balances out the bias and variance of the smoothing estimator. Is this the correct trade-off for estimating the conditional mean? Once we have a good estimate of the conditional mean, it is often of scientific interest to understand which features contribute the most to the variation in $\mu_P$. Specifically, we might consider \[\mu_{P, s}(x) = E_P(Y \mid X_{(-s)} = x_{(-s)}),\] where for a vector $v$ and a set of indices $s$, $v_{-(s)}$ denotes the elements of $v$ with index not in $s$. By comparing $\mu_{P, s}$ to $\mu_P$ we can evaluate the importance of the $s$th element (or group of elements). Assume that our data are generated according to the mechanism $P_0$. We define the population $R^2$ value of a given regression function $\mu$ as $R^2(\mu, P_0) = 1 - \frac{E_{P_0}\{Y - \mu(X)\}^2}{var_{P_0}(Y)}$, where the numerator of this expression is the population mean squared error and the denominator is the population variance. We can then define a nonparametric measure of variable importance, \[\psi_{0, s} = R^2(\mu_{P_0}, P_0) - R^2(\mu_{P_0,s}, P_0),\] which is the proportion of the variability in the outcome explained by including $X_j$ in our chosen estimation technique. This document introduces you to the basic tools in `vimp` and how to apply them to a dataset. I will explore one method for obtaining variable estimates using `vimp`: you only specify a *library* of candidate estimators for the conditional means $\mu_{P_0}$ and $\mu_{P_0, s}$; you allow `vimp` to obtain the optimal estimates of these quantities using the `SuperLearner` [@vanderlaan2007], and use these estimates to obtain variable importance estimates. A second method (using precomputed estimates of the regression functions) exists and is described in ["Using precomputed regression function estimates in `vimp`"](precomputed-regressions.html). ### A look at the VRC01 data Throughout this document I will use the VRC01 data [@magaret2019], a subset of the data freely available from the Los Alamos National Laboratory's Compile, Neutralize, and Tally Neutralizing Antibody Panels database. Information about these data is available [here](https://doi.org/10.1371/journal.pcbi.1006952). ```{r load-vrc01-data} # read in the data data("vrc01") ``` While there are several outcomes of interest in these data (continuous measures of neutralization and binary measures of resistance), we will focus on the binary measure of resistance to VRC01 given by `ic50.censored`. This variable is a binary indicator that the concentration of VRC01 necessary to neutralize 50% of viral replicates in a sample (IC-50) was right-censored; since higher values of IC-50 imply a more resistant virus, this indicator is a proxy for viral resistance. In addition to the outcome of interest, there are measurements on several groups of variables: viral subtype, geographic region of origin (a potential confounding variable), amino acid sequence features (further grouped into the CD4 binding sites, VRC01 binding footprint, sites with sufficient exposed surface area, sites identified as important for glycosylation, sites with residues that covary with the VRC01 binding footprint, sites associated with VRC01-specific potential N-linked glycosylation (PNGS) effects, sites in gp41 associated with VRC01 neutralization or sensitivity, sites for indicating N-linked glycosylation), region-specific counts of PNGS, viral geometry, cysteine counts, and steric bulk at critical locations. For the sake of simplicity, we will focus here on only three groups of features: viral subtype, geographic region of origin, and viral geometry features. ```{r subset-data} library("dplyr") library("tidyselect") # retain only the columns of interest for this analysis y <- vrc01$ic50.censored X <- vrc01 %>% select(starts_with("geog"), starts_with("subtype"), starts_with("length")) ``` Since there are 17 features and two groups, it is of interest to determine variable importance both for the individual features separately and for the two groups of features (since the geographic variables are potential confounders). ### A first approach: linear regression Suppose that I believe that a linear model truly describes the relationship between the outcome and the covariates in these data. In that case, I would be justified in only fitting a linear regression to estimate the conditional means; this means that in my importance analysis, I should also use only linear regression. The analysis is achieved by the following: ```{r est-regressions-lm, warning = FALSE} geog_indx <- max(which(grepl("geog", names(X)))) set.seed(1234) for (i in seq_len(ncol(X) - geog_indx)) { # note that we're using a small number of cross-fitting folds for speed lm_vim <- vim(Y = y, X = X, indx = geog_indx + i, run_regression = TRUE, SL.library = "SL.glm", type = "r_squared", cvControl = list(V = 2), scale = "logit", family = binomial()) if (i == 1) { lm_mat <- lm_vim } else { lm_mat <- merge_vim(lm_mat, lm_vim) } } # print out the importance lm_mat ``` ### Building a library of learners In general, we don't believe that a linear model truly holds. Thinking about potential model misspecification leads us to consider other algorithms. Suppose that I prefer to use generalized additive models [@hastie1990] to estimate $\mu_{P_0}$ and $\mu_{P_0, s}$, so I am planning on using the `gam` package. Suppose that you prefer to use the elastic net [@zou2005], and are planning to use the `glmnet` package. The choice of either method is somewhat subjective, and I also will have to use a technique like cross-validation to determine an optimal tuning parameter in each case. It is also possible that neither additive models nor the elastic net will do a good job estimating the true conditional means! This motivates using `SuperLearner` to allow the data to determine the optimal combination of *base learners* from a *library* that I define. These base learners are a combination of different methods (e.g., generalized additive models and elastic net) and instances of the same method with different tuning parameter values (e.g., additive models with 3 and 4 degrees of freedom). The Super Learner is an example of model stacking, or model aggregation --- these approaches use a data-adaptive combination of base learners to make predictions. For instance, my library could include the elastic net, random forests [@breiman2001], and gradient boosted trees [@friedman2001] as follows: ```{r full-learner-lib} # create a function for boosted stumps SL.gbm.1 <- function(..., interaction.depth = 1) SL.gbm(..., interaction.depth = interaction.depth) # create GAMs with different degrees of freedom SL.gam.3 <- function(..., deg.gam = 3) SL.gam(..., deg.gam = deg.gam) SL.gam.4 <- function(..., deg.gam = 4) SL.gam(..., deg.gam = deg.gam) SL.gam.5 <- function(..., deg.gam = 5) SL.gam(..., deg.gam = deg.gam) # add more levels of alpha for glmnet create.SL.glmnet <- function(alpha = c(0.25, 0.5, 0.75)) { for (mm in seq(length(alpha))) { eval(parse(file = "", text = paste('SL.glmnet.', alpha[mm], '<- function(..., alpha = ', alpha[mm], ') SL.glmnet(..., alpha = alpha)', sep = '')), envir = .GlobalEnv) } invisible(TRUE) } create.SL.glmnet() # add tuning parameters for randomForest create.SL.randomForest <- function(tune = list(mtry = c(1, 5, 7), nodesize = c(1, 5, 10))) { tuneGrid <- expand.grid(tune, stringsAsFactors = FALSE) for (mm in seq(nrow(tuneGrid))) { eval(parse(file = "", text = paste("SL.randomForest.", mm, "<- function(..., mtry = ", tuneGrid[mm, 1], ", nodesize = ", tuneGrid[mm, 2], ") SL.randomForest(..., mtry = mtry, nodesize = nodesize)", sep = "")), envir = .GlobalEnv) } invisible(TRUE) } create.SL.randomForest() # create the library learners <- c("SL.glmnet", "SL.glmnet.0.25", "SL.glmnet.0.5", "SL.glmnet.0.75", "SL.randomForest", "SL.randomForest.1", "SL.randomForest.2", "SL.randomForest.3", "SL.randomForest.4", "SL.randomForest.5", "SL.randomForest.6", "SL.randomForest.7", "SL.randomForest.8", "SL.randomForest.9", "SL.gbm.1") ``` Now that I have created the library of learners, I can move on to estimating variable importance. ### Estimating variable importance for a single variable The main function for R-squared-based variable importance in the `vimp` package is the `vimp_rsquared()` function. There are five main arguments to `vimp_rsquared()`: - `Y`, the outcome - `X`, the covariates - `indx`, which determines the feature I want to estimate variable importance for - `SL.library`, the library of candidate learners - `V`, the number of cross-fitting folds (also referred to as cross-validation folds) to use for computing variable importance The main arguments differ if precomputed regression function estimates are used; please see ["Using precomputed regression function estimates in `vimp`"](precomputed-regressions.html) for further discussion of this case. Suppose that the first feature that I want to estimate variable importance for is whether the viral subtype is 01_AE, `subtype.is.01_AE`. Then supplying `vimp_rsquared()` with - `Y = y` - `X = X` - `indx = 5` - `SL.library = learners` - `V = 5` means that: - I want to use `SuperLearner()` to estimate the conditional means $\mu_{P_0}$ and $\mu_{P_0,s}$, and my candidate library is `learners` - I want to estimate variable importance for the fifth column of the VRC01 covariates, which is `subtype.is.01_AE` - I want to use five-fold cross-fitting to estimate importance The call to `vimp_rsquared()` looks like this: ```{r vimp-with-sl-1, eval = FALSE} vimp_rsquared(Y = y, X = X, indx = 5, run_regression = TRUE, SL.library = learners, V = 5, family = binomial()) ``` While this is the preferred method for estimating variable importance, using a large library of learners may cause the function to take time to run. Usually this is okay --- in general, you took a long time to collect the data, so letting an algorithm run for a few hours should not be an issue. However, for the sake of illustration, I can estimate varibable importance for 01_AE subtype only using only using a small library, a small number of cross-validation folds in the Super Learner, and a small number of cross-fitting folds as follows (again, I suggest using a larger number of folds and a larger library in practice): ```{r vimp-with-sl-fam, message = FALSE, warning = FALSE} # small learners library learners.2 <- c("SL.ranger") # small number of cross-fitting folds V <- 2 # small number of CV folds for Super Learner sl_cvcontrol <- list(V = 2) # now estimate variable importance set.seed(5678) start_time <- Sys.time() subtype_01_AE_vim <- vimp_rsquared(Y = y, X = X, indx = 5, SL.library = learners.2, na.rm = TRUE, env = environment(), V = V, cvControl = sl_cvcontrol, family = binomial()) end_time <- Sys.time() ``` This code takes approximately `r round(as.numeric(end_time - start_time), 3)` seconds to run on a (not very fast) PC. I can display these estimates: ```{r print-vim} subtype_01_AE_vim ``` The object returned by `vimp_rsquared()` also contains lists of fitted values from using `SuperLearner()`; I access these using `$full_fit` and `$red_fit`. For example, ```{r look-at-ests} head(subtype_01_AE_vim$full_fit[[1]]) head(subtype_01_AE_vim$red_fit[[1]]) ``` I can obtain estimates for the remaining individual features in the same way (again using only using a small library for illustration): ```{r vrc01-sl, warning = FALSE} ests <- subtype_01_AE_vim set.seed(1234) for (i in seq_len(ncol(X) - geog_indx - 1)) { # note that we're using a small number of cross-fitting folds for speed this_vim <- vimp_rsquared(Y = y, X = X, indx = geog_indx + i + 1, run_regression = TRUE, SL.library = learners.2, V = V, cvControl = sl_cvcontrol, family = binomial()) ests <- merge_vim(ests, this_vim) } ``` Now that I have estimates of each of individual feature's variable importance, I can view them all simultaneously by plotting: ```{r vrc01-vim, fig.width = 8.5, fig.height = 8, message = FALSE} library("ggplot2") library("cowplot") theme_set(theme_cowplot()) all_vars <- c(paste0("Subtype is ", c("01_AE", "02_AG", "07_BC", "A1", "A1C", "A1D", "B", "C", "D", "O", "Other")), paste0("Length of ", c("Env", "gp120", "V5", "V5 outliers", "Loop E", "Loop E outliers"))) est_plot_tib <- ests$mat %>% mutate( var_fct = rev(factor(s, levels = ests$mat$s, labels = all_vars[as.numeric(ests$mat$s) - geog_indx], ordered = TRUE)) ) # plot est_plot_tib %>% ggplot(aes(x = est, y = var_fct)) + geom_point() + geom_errorbarh(aes(xmin = cil, xmax = ciu)) + xlab(expression(paste("Variable importance estimates: ", R^2, sep = ""))) + ylab("") + ggtitle("Estimated individual feature importance") + labs(subtitle = "in the VRC01 data (considering only geographic confounders, subtype, and viral geometry)") ``` ## Estimating variable importance for a group of variables Now that I have estimated variable importance for each of the individual features, I can estimate variable importance for each of the groups that I mentioned above: biological and behavioral features. The only difference between estimating variable importance for a group of features rather than an individual feature is that now I specify a vector for `s`; I can use any of the options listed in the previous section to compute these estimates. ```{r vrc01-group-vim, fig.width = 8.5, fig.height = 8} # get the estimates set.seed(91011) subtype_vim <- vimp_rsquared(Y = y, X = X, indx = 5:15, SL.library = learners.2, na.rm = TRUE, env = environment(), V = V, cvControl = sl_cvcontrol, family = binomial()) geometry_vim <- vimp_rsquared(Y = y, X = X, indx = 16:21, SL.library = learners.2, na.rm = TRUE, env = environment(), V = V, cvControl = sl_cvcontrol, family = binomial()) # combine and plot groups <- merge_vim(subtype_vim, geometry_vim) all_grp_nms <- c("Viral subtype", "Viral geometry") grp_plot_tib <- groups$mat %>% mutate( grp_fct = factor(case_when( s == "5,6,7,8,9,10,11,12,13,14,15" ~ "1", s == "16,17,18,19,20,21" ~ "2" ), levels = c("1", "2"), labels = all_grp_nms, ordered = TRUE) ) grp_plot_tib %>% ggplot(aes(x = est, y = grp_fct)) + geom_point() + geom_errorbarh(aes(xmin = cil, xmax = ciu)) + xlab(expression(paste("Variable importance estimates: ", R^2, sep = ""))) + ylab("") + ggtitle("Estimated feature group importance") + labs(subtitle = "in the VRC01 data (considering only geographic confounders, subtype, and viral geometry)") ``` ## Types of population variable importance In this document, I have focused on one particular definition of population variable importance that I call *conditional* variable importance. For a further discussion of what I call *marginal* variable importance and *Shapley population* variable importance, please see ["Types of VIMs"](types-of-vims.html). ## References
/scratch/gouwar.j/cran-all/cranData/vimp/inst/doc/introduction-to-vimp.Rmd
## ----setup, echo = FALSE, include = FALSE------------------------------------- library(knitr) opts_knit$set(cache = FALSE, verbose = TRUE, global.par = TRUE) ## ----more-setup, echo = FALSE------------------------------------------------- par(mar = c(5, 12, 4, 2) + 0.1) ## ----gen-data-missing-y------------------------------------------------------- set.seed(1234) p <- 2 n <- 100 x <- replicate(p, stats::rnorm(n, 0, 1)) # apply the function to the x's y <- 1 + 0.5 * x[, 1] + 0.75 * x[, 2] + stats::rnorm(n, 0, 1) # indicator of observing Y logit_g_x <- .01 * x[, 1] + .05 * x[, 2] - 2.5 g_x <- exp(logit_g_x) / (1 + exp(logit_g_x)) C <- rbinom(n, size = 1, prob = g_x) obs_y <- y obs_y[C == 0] <- NA x_df <- as.data.frame(x) full_df <- data.frame(Y = obs_y, x_df, C = C) ## ----missing-y-vim------------------------------------------------------------ library("vimp") library("SuperLearner") # estimate the probability of missing outcome ipc_weights <- 1 / predict(glm(C ~ V1 + V2, family = "binomial", data = full_df), type = "response") # set up the SL learners <- c("SL.glm", "SL.mean") V <- 2 # estimate vim for X2 set.seed(1234) est <- vim(Y = obs_y, X = x_df, indx = 2, type = "r_squared", run_regression = TRUE, SL.library = learners, alpha = 0.05, delta = 0, C = C, Z = c("Y", "1"), ipc_weights = ipc_weights, cvControl = list(V = V)) ## ----generate-data------------------------------------------------------------ set.seed(4747) p <- 2 n <- 100 x <- replicate(p, stats::rnorm(n, 0, 1)) # apply the function to the x's y <- 1 + 0.5 * x[, 1] + 0.75 * x[, 2] + stats::rnorm(n, 0, 1) # make this a two-phase study, assume that X2 is only measured on # subjects in the second phase; note C = 1 is inclusion C <- rbinom(n, size = 1, prob = exp(y + 0.1 * x[, 1]) / (1 + exp(y + 0.1 * x[, 1]))) tmp_x <- x tmp_x[C == 0, 2] <- NA x <- tmp_x x_df <- as.data.frame(x) full_df <- data.frame(Y = y, x_df, C = C) ## ----ipw-vim------------------------------------------------------------------ library("vimp") library("SuperLearner") # estimate the probability of being included only in the first phase sample ipc_weights <- 1 / predict(glm(C ~ y + V1, family = "binomial", data = full_df), type = "response") # set up the SL learners <- c("SL.glm") V <- 2 # estimate vim for X2 set.seed(1234) est <- vim(Y = y, X = x_df, indx = 2, type = "r_squared", run_regression = TRUE, SL.library = learners, alpha = 0.05, delta = 0, C = C, Z = c("Y", "1"), ipc_weights = ipc_weights, cvControl = list(V = V), method = "method.CC_LS")
/scratch/gouwar.j/cran-all/cranData/vimp/inst/doc/ipcw-vim.R
--- title: "Variable importance with coarsened data" author: "Brian D. Williamson" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: keep_md: true vignette: > %\VignetteIndexEntry{Variable importance with coarsened data} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} csl: chicago-author-date.csl bibliography: vimp_bib.bib --- ```{r setup, echo = FALSE, include = FALSE} library(knitr) opts_knit$set(cache = FALSE, verbose = TRUE, global.par = TRUE) ``` ```{r more-setup, echo = FALSE} par(mar = c(5, 12, 4, 2) + 0.1) ``` ## Introduction In some settings, we don't have access to the full data unit on each observation in our sample. These "coarsened-data" settings (see, e.g., @vandervaart2000) create a layer of complication in estimating variable importance. In particular, the efficient influence function (EIF) in the coarsened-data setting is more complex, and involves estimating an additional quantity: the projection of the full-data EIF (estimated on the fully-observed sample) onto the variables that are always observed (Chapter 25.5.3 of @vandervaart2000; see also Example 6 in @williamson2021). ## Coarsened data in `vimp` `vimp` can handle coarsened data, with the specification of several arguments: * `C`: and binary indicator vector, denoting which observations have been coarsened; 1 denotes fully observed, while 0 denotes coarsened. * `ipc_weights`: inverse probability of coarsening weights, assumed to already be inverted (i.e., `ipc_weights` = 1 / [estimated probability of coarsening]). * `ipc_est_type`: the type of procedure used for coarsened-at-random settings; options are `"ipw"` (for inverse probability weighting) or `"aipw"` (for augmented inverse probability weighting). Only used if `C` is not all equal to 1. * `Z`: a character vector specifying the variable(s) among `Y` and `X` that are thought to play a role in the coarsening mechanism. To specify the outcome, use `"Y"`; to specify covariates, use a character number corresponding to the desired position in `X` (e.g., `"1"` or `"X1"` [the latter is case-insensitive]). `Z` plays a role in the additional estimation mentioned above. Unless otherwise specified, an internal call to `SuperLearner` regresses the full-data EIF (estimated on the fully-observed data) onto a matrix that is the parsed version of `Z`. If you wish to use any covariates from `X` as part of your coarsening mechanism (and thus include them in `Z`), and they have *different names from `X1`, ...*, then you must use character numbers (i.e., `"1"` refers to the first variable, etc.) to refer to the variables to include in `Z`. Otherwise, `vimp` will throw an error. ## Example with missing outcomes In this example, the outcome `Y` is subject to missingness. We generate data as follows: ```{r gen-data-missing-y} set.seed(1234) p <- 2 n <- 100 x <- replicate(p, stats::rnorm(n, 0, 1)) # apply the function to the x's y <- 1 + 0.5 * x[, 1] + 0.75 * x[, 2] + stats::rnorm(n, 0, 1) # indicator of observing Y logit_g_x <- .01 * x[, 1] + .05 * x[, 2] - 2.5 g_x <- exp(logit_g_x) / (1 + exp(logit_g_x)) C <- rbinom(n, size = 1, prob = g_x) obs_y <- y obs_y[C == 0] <- NA x_df <- as.data.frame(x) full_df <- data.frame(Y = obs_y, x_df, C = C) ``` Next, we estimate the relevant components for `vimp`: ```{r missing-y-vim} library("vimp") library("SuperLearner") # estimate the probability of missing outcome ipc_weights <- 1 / predict(glm(C ~ V1 + V2, family = "binomial", data = full_df), type = "response") # set up the SL learners <- c("SL.glm", "SL.mean") V <- 2 # estimate vim for X2 set.seed(1234) est <- vim(Y = obs_y, X = x_df, indx = 2, type = "r_squared", run_regression = TRUE, SL.library = learners, alpha = 0.05, delta = 0, C = C, Z = c("Y", "1"), ipc_weights = ipc_weights, cvControl = list(V = V)) ``` ## Example with two-phase sampling In this example, we observe outcome `Y` and covariate `X1` on all participants in a study. Based on the value of `Y` and `X1`, we include some participants in a second-phase sample, and further measure covariate `X2` on these participants. This is an example of a two-phase study. We generate data as follows: ```{r generate-data} set.seed(4747) p <- 2 n <- 100 x <- replicate(p, stats::rnorm(n, 0, 1)) # apply the function to the x's y <- 1 + 0.5 * x[, 1] + 0.75 * x[, 2] + stats::rnorm(n, 0, 1) # make this a two-phase study, assume that X2 is only measured on # subjects in the second phase; note C = 1 is inclusion C <- rbinom(n, size = 1, prob = exp(y + 0.1 * x[, 1]) / (1 + exp(y + 0.1 * x[, 1]))) tmp_x <- x tmp_x[C == 0, 2] <- NA x <- tmp_x x_df <- as.data.frame(x) full_df <- data.frame(Y = y, x_df, C = C) ``` If we want to estimate variable importance of `X2`, we need to use the coarsened-data arguments in `vimp`. This can be accomplished in the following manner: ```{r ipw-vim} library("vimp") library("SuperLearner") # estimate the probability of being included only in the first phase sample ipc_weights <- 1 / predict(glm(C ~ y + V1, family = "binomial", data = full_df), type = "response") # set up the SL learners <- c("SL.glm") V <- 2 # estimate vim for X2 set.seed(1234) est <- vim(Y = y, X = x_df, indx = 2, type = "r_squared", run_regression = TRUE, SL.library = learners, alpha = 0.05, delta = 0, C = C, Z = c("Y", "1"), ipc_weights = ipc_weights, cvControl = list(V = V), method = "method.CC_LS") ``` # References
/scratch/gouwar.j/cran-all/cranData/vimp/inst/doc/ipcw-vim.Rmd
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup, message = FALSE--------------------------------------------------- library("vimp") library("SuperLearner") ## ----load-vrc01-data---------------------------------------------------------- # read in the data data("vrc01") # subset to the columns of interest for this analysis library("dplyr") library("tidyselect") # retain only the columns of interest for this analysis y <- vrc01$ic50.censored X <- vrc01 %>% select(starts_with("geog"), starts_with("subtype"), starts_with("length")) set.seed(1234) vrc01_folds <- make_folds(y = y, V = 2) ## ----est-regressions-lm, warning = FALSE-------------------------------------- library("rlang") vrc01_subset <- vrc01 %>% select(ic50.censored, starts_with("geog"), starts_with("subtype"), starts_with("length")) %>% rename(y = ic50.censored) # estimate prediction function on each subset, predict on held-out fold full_fit <- vector("numeric", length = nrow(vrc01)) for (v in 1:2) { train_v <- subset(vrc01_subset, vrc01_folds != v) test_v <- subset(vrc01_subset, vrc01_folds == v) full_mod <- glm(y ~ ., data = train_v) full_fit[vrc01_folds == v] <- predict(full_mod, newdata = test_v) } # estimate the reduced conditional means for each of the individual variables # remove the outcome for the predictor matrix geog_indx <- max(which(grepl("geog", names(X)))) for (i in seq_len(ncol(X) - geog_indx)) { this_name <- names(X)[i + geog_indx] red_fit <- vector("numeric", length = nrow(vrc01)) for (v in 1:2) { train_v <- subset(vrc01_subset, vrc01_folds != v) test_v <- subset(vrc01_subset, vrc01_folds == v) red_fit[vrc01_folds == v] <- suppressWarnings( predict(glm(y ~ ., data = train_v %>% select(-!!this_name)), newdata = test_v) ) } this_vim <- vim(Y = y, f1 = full_fit, f2 = red_fit, indx = i + geog_indx, run_regression = FALSE, type = "r_squared", sample_splitting_folds = vrc01_folds, scale = "logit") if (i == 1) { lm_mat <- this_vim } else { lm_mat <- merge_vim(lm_mat, this_vim) } } # print out the matrix lm_mat ## ----estimate-full-regression-with-cf, message = FALSE, warning = FALSE------- learners <- "SL.ranger" # estimate the full regression function V <- 2 set.seed(4747) full_cv_fit <- suppressWarnings( SuperLearner::CV.SuperLearner( Y = y, X = X, SL.library = learners, cvControl = list(V = 2 * V), innerCvControl = list(list(V = V)), family = binomial() ) ) # get a numeric vector of cross-fitting folds cross_fitting_folds <- get_cv_sl_folds(full_cv_fit$folds) # get sample splitting folds set.seed(1234) sample_splitting_folds <- make_folds(unique(cross_fitting_folds), V = 2) full_cv_preds <- full_cv_fit$SL.predict ## ----estimate-reduced-regressions-with-cf, message = FALSE, warning = FALSE---- vars <- names(X)[(geog_indx + 1):ncol(X)] set.seed(1234) for (i in seq_len(length(vars))) { # use "eval" and "parse" to assign the objects of interest to avoid duplicating code eval(parse(text = paste0("reduced_", vars[i], "_cv_fit <- suppressWarnings(SuperLearner::CV.SuperLearner( Y = y, X = X[, -(geog_indx + i), drop = FALSE], SL.library = learners, cvControl = SuperLearner::SuperLearner.CV.control(V = 2 * V, validRows = full_cv_fit$folds), innerCvControl = list(list(V = V)), family = binomial() ))"))) eval(parse(text = paste0("reduced_", vars[i], "_cv_preds <- reduced_", vars[i], "_cv_fit$SL.predict"))) } ## ----cf-vims------------------------------------------------------------------ for (i in seq_len(length(vars))) { # again, use "eval" and "parse" to assign the objects of interest to avoid duplicating code eval(parse(text = paste0("cf_", vars[i], "_vim <- vimp_rsquared(Y = y, cross_fitted_f1 = full_cv_preds, cross_fitted_f2 = reduced_", vars[i], "_cv_preds, indx = (geog_indx + i), cross_fitting_folds = cross_fitting_folds, sample_splitting_folds = sample_splitting_folds, run_regression = FALSE, alpha = 0.05, V = V, na.rm = TRUE, scale = 'logit')"))) } cf_ests <- merge_vim(cf_subtype.is.01_AE_vim, cf_subtype.is.02_AG_vim, cf_subtype.is.07_BC_vim, cf_subtype.is.A1_vim, cf_subtype.is.A1C_vim, cf_subtype.is.A1D_vim, cf_subtype.is.B_vim, cf_subtype.is.C_vim, cf_subtype.is.D_vim, cf_subtype.is.O_vim, cf_subtype.is.Other_vim, cf_length.env_vim, cf_length.gp120_vim, cf_length.loop.e_vim, cf_length.loop.e.outliers_vim, cf_length.v5_vim, cf_length.v5.outliers_vim) all_vars <- c(paste0("Subtype is ", c("01_AE", "02_AG", "07_BC", "A1", "A1C", "A1D", "B", "C", "D", "O", "Other")), paste0("Length of ", c("Env", "gp120", "V5", "V5 outliers", "Loop E", "Loop E outliers"))) ## ----plot-cf-vim, fig.width = 8.5, fig.height = 8----------------------------- library("ggplot2") library("cowplot") theme_set(theme_cowplot()) cf_est_plot_tib <- cf_ests$mat %>% mutate( var_fct = rev(factor(s, levels = cf_ests$mat$s, labels = all_vars[as.numeric(cf_ests$mat$s) - geog_indx], ordered = TRUE)) ) # plot cf_est_plot_tib %>% ggplot(aes(x = est, y = var_fct)) + geom_point() + geom_errorbarh(aes(xmin = cil, xmax = ciu)) + xlab(expression(paste("Variable importance estimates: ", R^2, sep = ""))) + ylab("") + ggtitle("Estimated individual feature importance") + labs(subtitle = "in the VRC01 data (considering only geographic confounders, subtype, and viral geometry)") ## ----cf-group-vim, fig.width = 8.5, fig.height = 8, warning = FALSE----------- set.seed(91011) reduced_subtype_cv_fit <- suppressWarnings( SuperLearner::CV.SuperLearner( Y = y, X = X[, -c(5:15), drop = FALSE], SL.library = learners, cvControl = SuperLearner::SuperLearner.CV.control(V = 2 * V, validRows = full_cv_fit$folds), innerCvControl = list(list(V = V)), family = binomial() ) ) reduced_subtype_cv_preds <- reduced_subtype_cv_fit$SL.predict reduced_geometry_cv_fit <- suppressWarnings( SuperLearner::CV.SuperLearner( Y = y, X = X[, -c(16:21), drop = FALSE], SL.library = learners, cvControl = SuperLearner::SuperLearner.CV.control(V = 2 * V, validRows = full_cv_fit$folds), innerCvControl = list(list(V = V)), family = binomial() ) ) reduced_geometry_cv_preds <- reduced_geometry_cv_fit$SL.predict cf_subtype_vim <- vimp_rsquared( Y = y, cross_fitted_f1 = full_cv_preds, cross_fitted_f2 = reduced_subtype_cv_preds, indx = 5:15, run_regression = FALSE, V = V, cross_fitting_folds = cross_fitting_folds, sample_splitting_folds = sample_splitting_folds, scale = "logit" ) cf_geometry_vim <- vimp_rsquared( Y = y, cross_fitted_f1 = full_cv_preds, cross_fitted_f2 = reduced_geometry_cv_preds, indx = 16:21, run_regression = FALSE, V = V, cross_fitting_folds = cross_fitting_folds, sample_splitting_folds = sample_splitting_folds, scale = "logit" ) cf_groups <- merge_vim(cf_subtype_vim, cf_geometry_vim) all_grp_nms <- c("Viral subtype", "Viral geometry") grp_plot_tib <- cf_groups$mat %>% mutate( grp_fct = factor(case_when( s == "5,6,7,8,9,10,11,12,13,14,15" ~ "1", s == "16,17,18,19,20,21" ~ "2" ), levels = c("1", "2"), labels = all_grp_nms, ordered = TRUE) ) grp_plot_tib %>% ggplot(aes(x = est, y = grp_fct)) + geom_point() + geom_errorbarh(aes(xmin = cil, xmax = ciu)) + xlab(expression(paste("Variable importance estimates: ", R^2, sep = ""))) + ylab("") + ggtitle("Estimated feature group importance") + labs(subtitle = "in the VRC01 data (considering only geographic confounders, subtype, and viral geometry)")
/scratch/gouwar.j/cran-all/cranData/vimp/inst/doc/precomputed-regressions.R
--- title: "Using precomputed regression function estimates in `vimp`" author: "Brian D. Williamson" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Using precomputed regression function estimates in `vimp`} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} csl: chicago-author-date.csl bibliography: vimp_bib.bib --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup, message = FALSE} library("vimp") library("SuperLearner") ``` ## Introduction In the [main vignette](introduction-to-vimp.html), we analyzed the VRC01 data [@magaret2019], a subset of the data freely available from the Los Alamos National Laboratory's Compile, Neutralize, and Tally Neutralizing Antibody Panels database. Information about these data is available [here](https://doi.org/10.1371/journal.pcbi.1006952). In each of the analyses, I used `run_regression = TRUE`. In this vignette, I discuss how to use precomputed regression function estimates with `vimp`. The results of this analysis replicate the analysis in the [main vignette](introduction-to-vimp.html). ```{r load-vrc01-data} # read in the data data("vrc01") # subset to the columns of interest for this analysis library("dplyr") library("tidyselect") # retain only the columns of interest for this analysis y <- vrc01$ic50.censored X <- vrc01 %>% select(starts_with("geog"), starts_with("subtype"), starts_with("length")) set.seed(1234) vrc01_folds <- make_folds(y = y, V = 2) ``` ## Using precomputed regression function estimates without cross-fitting ### A first approach: linear regression As in the [main vignette](introduction-to-vimp.html), we first start by fitting only linear regression models. In this section, we use the function `vim()`; this function does not use cross-fitting to estimate variable importance, and greatly simplifies the code for precomputed regression models. ```{r est-regressions-lm, warning = FALSE} library("rlang") vrc01_subset <- vrc01 %>% select(ic50.censored, starts_with("geog"), starts_with("subtype"), starts_with("length")) %>% rename(y = ic50.censored) # estimate prediction function on each subset, predict on held-out fold full_fit <- vector("numeric", length = nrow(vrc01)) for (v in 1:2) { train_v <- subset(vrc01_subset, vrc01_folds != v) test_v <- subset(vrc01_subset, vrc01_folds == v) full_mod <- glm(y ~ ., data = train_v) full_fit[vrc01_folds == v] <- predict(full_mod, newdata = test_v) } # estimate the reduced conditional means for each of the individual variables # remove the outcome for the predictor matrix geog_indx <- max(which(grepl("geog", names(X)))) for (i in seq_len(ncol(X) - geog_indx)) { this_name <- names(X)[i + geog_indx] red_fit <- vector("numeric", length = nrow(vrc01)) for (v in 1:2) { train_v <- subset(vrc01_subset, vrc01_folds != v) test_v <- subset(vrc01_subset, vrc01_folds == v) red_fit[vrc01_folds == v] <- suppressWarnings( predict(glm(y ~ ., data = train_v %>% select(-!!this_name)), newdata = test_v) ) } this_vim <- vim(Y = y, f1 = full_fit, f2 = red_fit, indx = i + geog_indx, run_regression = FALSE, type = "r_squared", sample_splitting_folds = vrc01_folds, scale = "logit") if (i == 1) { lm_mat <- this_vim } else { lm_mat <- merge_vim(lm_mat, this_vim) } } # print out the matrix lm_mat ``` ## Estimating variable importance for a single variable using precomputed regression function estimates In this section, we will use cross-fitting and pre-computed estimates of the regression functions. This can be especially useful if you have already run a call to `CV.SuperLearner` -- that function returns estimates based on each observation being part of the hold-out set. However, while this approach can save you some computation time, it requires a hefty amount of mental overhead. We will use `CV.SuperLearner` to fit the individual regression functions, taking care to use the same cross-fitting folds in each regression. We will then create two groups of validation folds for sample-splitting. For this analysis, we will use `V = 2` folds for cross-fitted variable importance estimation (as we did in the [main vignette](introduction-to-vimp.html)). Note that this entails running `CV.SuperLearner` with $2V = 4$ folds. First, we estimate the regression function based on all variables: ```{r estimate-full-regression-with-cf, message = FALSE, warning = FALSE} learners <- "SL.ranger" # estimate the full regression function V <- 2 set.seed(4747) full_cv_fit <- suppressWarnings( SuperLearner::CV.SuperLearner( Y = y, X = X, SL.library = learners, cvControl = list(V = 2 * V), innerCvControl = list(list(V = V)), family = binomial() ) ) # get a numeric vector of cross-fitting folds cross_fitting_folds <- get_cv_sl_folds(full_cv_fit$folds) # get sample splitting folds set.seed(1234) sample_splitting_folds <- make_folds(unique(cross_fitting_folds), V = 2) full_cv_preds <- full_cv_fit$SL.predict ``` Next, to estimate the importance of each variable, we need to estimate the reduced regression function for each variable: ```{r estimate-reduced-regressions-with-cf, message = FALSE, warning = FALSE} vars <- names(X)[(geog_indx + 1):ncol(X)] set.seed(1234) for (i in seq_len(length(vars))) { # use "eval" and "parse" to assign the objects of interest to avoid duplicating code eval(parse(text = paste0("reduced_", vars[i], "_cv_fit <- suppressWarnings(SuperLearner::CV.SuperLearner( Y = y, X = X[, -(geog_indx + i), drop = FALSE], SL.library = learners, cvControl = SuperLearner::SuperLearner.CV.control(V = 2 * V, validRows = full_cv_fit$folds), innerCvControl = list(list(V = V)), family = binomial() ))"))) eval(parse(text = paste0("reduced_", vars[i], "_cv_preds <- reduced_", vars[i], "_cv_fit$SL.predict"))) } ``` Then we can plug these values into `vimp_rsquared()` (or equivalently, `cv_vim()` with `type = "r_squared"`) as follows: ```{r cf-vims} for (i in seq_len(length(vars))) { # again, use "eval" and "parse" to assign the objects of interest to avoid duplicating code eval(parse(text = paste0("cf_", vars[i], "_vim <- vimp_rsquared(Y = y, cross_fitted_f1 = full_cv_preds, cross_fitted_f2 = reduced_", vars[i], "_cv_preds, indx = (geog_indx + i), cross_fitting_folds = cross_fitting_folds, sample_splitting_folds = sample_splitting_folds, run_regression = FALSE, alpha = 0.05, V = V, na.rm = TRUE, scale = 'logit')"))) } cf_ests <- merge_vim(cf_subtype.is.01_AE_vim, cf_subtype.is.02_AG_vim, cf_subtype.is.07_BC_vim, cf_subtype.is.A1_vim, cf_subtype.is.A1C_vim, cf_subtype.is.A1D_vim, cf_subtype.is.B_vim, cf_subtype.is.C_vim, cf_subtype.is.D_vim, cf_subtype.is.O_vim, cf_subtype.is.Other_vim, cf_length.env_vim, cf_length.gp120_vim, cf_length.loop.e_vim, cf_length.loop.e.outliers_vim, cf_length.v5_vim, cf_length.v5.outliers_vim) all_vars <- c(paste0("Subtype is ", c("01_AE", "02_AG", "07_BC", "A1", "A1C", "A1D", "B", "C", "D", "O", "Other")), paste0("Length of ", c("Env", "gp120", "V5", "V5 outliers", "Loop E", "Loop E outliers"))) ``` And we can view them all simultaneously by plotting: ```{r plot-cf-vim, fig.width = 8.5, fig.height = 8} library("ggplot2") library("cowplot") theme_set(theme_cowplot()) cf_est_plot_tib <- cf_ests$mat %>% mutate( var_fct = rev(factor(s, levels = cf_ests$mat$s, labels = all_vars[as.numeric(cf_ests$mat$s) - geog_indx], ordered = TRUE)) ) # plot cf_est_plot_tib %>% ggplot(aes(x = est, y = var_fct)) + geom_point() + geom_errorbarh(aes(xmin = cil, xmax = ciu)) + xlab(expression(paste("Variable importance estimates: ", R^2, sep = ""))) + ylab("") + ggtitle("Estimated individual feature importance") + labs(subtitle = "in the VRC01 data (considering only geographic confounders, subtype, and viral geometry)") ``` ## Estimating variable importance for a group of variables using precomputed regression function estimates Finally, we can estimate and plot group importance: ```{r cf-group-vim, fig.width = 8.5, fig.height = 8, warning = FALSE} set.seed(91011) reduced_subtype_cv_fit <- suppressWarnings( SuperLearner::CV.SuperLearner( Y = y, X = X[, -c(5:15), drop = FALSE], SL.library = learners, cvControl = SuperLearner::SuperLearner.CV.control(V = 2 * V, validRows = full_cv_fit$folds), innerCvControl = list(list(V = V)), family = binomial() ) ) reduced_subtype_cv_preds <- reduced_subtype_cv_fit$SL.predict reduced_geometry_cv_fit <- suppressWarnings( SuperLearner::CV.SuperLearner( Y = y, X = X[, -c(16:21), drop = FALSE], SL.library = learners, cvControl = SuperLearner::SuperLearner.CV.control(V = 2 * V, validRows = full_cv_fit$folds), innerCvControl = list(list(V = V)), family = binomial() ) ) reduced_geometry_cv_preds <- reduced_geometry_cv_fit$SL.predict cf_subtype_vim <- vimp_rsquared( Y = y, cross_fitted_f1 = full_cv_preds, cross_fitted_f2 = reduced_subtype_cv_preds, indx = 5:15, run_regression = FALSE, V = V, cross_fitting_folds = cross_fitting_folds, sample_splitting_folds = sample_splitting_folds, scale = "logit" ) cf_geometry_vim <- vimp_rsquared( Y = y, cross_fitted_f1 = full_cv_preds, cross_fitted_f2 = reduced_geometry_cv_preds, indx = 16:21, run_regression = FALSE, V = V, cross_fitting_folds = cross_fitting_folds, sample_splitting_folds = sample_splitting_folds, scale = "logit" ) cf_groups <- merge_vim(cf_subtype_vim, cf_geometry_vim) all_grp_nms <- c("Viral subtype", "Viral geometry") grp_plot_tib <- cf_groups$mat %>% mutate( grp_fct = factor(case_when( s == "5,6,7,8,9,10,11,12,13,14,15" ~ "1", s == "16,17,18,19,20,21" ~ "2" ), levels = c("1", "2"), labels = all_grp_nms, ordered = TRUE) ) grp_plot_tib %>% ggplot(aes(x = est, y = grp_fct)) + geom_point() + geom_errorbarh(aes(xmin = cil, xmax = ciu)) + xlab(expression(paste("Variable importance estimates: ", R^2, sep = ""))) + ylab("") + ggtitle("Estimated feature group importance") + labs(subtitle = "in the VRC01 data (considering only geographic confounders, subtype, and viral geometry)") ``` ## Conclusion In this document, we learned a second method for computing variable importance estimates: rather than having `vimp` run all regression functions for you, you can compute your own regressions and pass these to `vimp`. The results are equivalent, but there is a tradeoff: what you save in computation time by only computing the full regression once must be balanced with the mental overhead of correctly computing the regressions. Additionally, this task is more difficult when using cross-fitted variable importance, which I recommend in nearly all cases when using flexible machine learning tools. ## References
/scratch/gouwar.j/cran-all/cranData/vimp/inst/doc/precomputed-regressions.Rmd
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library("vimp") library("SuperLearner") ## ----load-vrc01-data---------------------------------------------------------- # read in the data data("vrc01") # subset to the columns of interest for this analysis library("dplyr") library("tidyselect") # retain only the columns of interest for this analysis y <- vrc01$ic50.censored X <- vrc01 %>% select(starts_with("geog"), starts_with("subtype"), starts_with("length")) learners <- "SL.glm" ## ----est-subtype-01AE-cond, warning = FALSE----------------------------------- # note the use of a small V and a small number of SL folds, for illustration only set.seed(1234) V <- 2 sl_cvcontrol <- list(V = 2) subtype_01_AE_cond <- vimp_auc(Y = y, X = X, indx = 5, SL.library = learners, na.rm = TRUE, V = V, cvControl = sl_cvcontrol) ## ----est-subtype-01AE-marg, warning = FALSE----------------------------------- # note the use of a small V and a small number of SL folds, for illustration only set.seed(5678) subtype_01_AE_marg <- vimp_auc(Y = y, X = X[, 5, drop = FALSE], indx = 1, SL.library = learners, na.rm = TRUE, V = V, cvControl = sl_cvcontrol) ## ----est-famhist-spvim, warning = FALSE--------------------------------------- set.seed(91011) all_vim_spvim <- sp_vim(Y = y, X = X, type = "auc", SL.library = learners, na.rm = TRUE, V = V, cvControl = sl_cvcontrol, env = environment()) ## ----show-ests---------------------------------------------------------------- subtype_01_AE_cond subtype_01_AE_marg # note: need to look at row for s = 5 all_vim_spvim
/scratch/gouwar.j/cran-all/cranData/vimp/inst/doc/types-of-vims.R
--- title: "Types of VIMs" author: "Brian D. Williamson" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Types of VIMs} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} csl: chicago-author-date.csl bibliography: vimp_bib.bib --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library("vimp") library("SuperLearner") ``` ## Introduction In the [main vignette](introduction-to-vimp.html), I discussed variable importance defined using R-squared. I also mentioned that all of the analyses were carried out using a *condititonal* variable importance measure. In this document, I will discuss all three types of variable importance that may be computed using `vimp`. In general, I define variable importance as a function of the true population distribution (denoted by $P_0$) and a predictiveness measure $V$ -- large values of $V$ are assumed to be better. Currently, the measures $V$ implemented in `vimp` are $R^2$, classification accuracy, area under the receiver operating characteristic curve (AUC), and deviance. For a fixed function $f$, the predictiveness is given by $V(f, P)$, where large values imply that $f$ is a good predictor of the outcome. The best possible prediction function, $f_0$, is the *oracle* model -- i.e., the prediction function that I would use if I had access to the distribution $P_0$. Often, $f_0$ is the true conditional mean (e.g., for $R^2$). Then the *total oracle predictiveness* can be defined as $V(f_0, P_0)$. This is the best possible value of predictiveness. I define variable importance measures (VIMs) as contrasts in oracle predictivness. The oracle models that I plug in determine what type of variable importance is being considered, as I outline below. For the remainder of this document, suppose that I have $p$ variables, and an index set $s$ of interest (containing some subset of the $p$ variables). Throughout this document, I will use the VRC01 data [@magaret2019], a subset of the data freely available from the Los Alamos National Laboratory's Compile, Neutralize, and Tally Neutralizing Antibody Panels database. Information about these data is available [here](https://doi.org/10.1371/journal.pcbi.1006952). Throughout, I will also use a simple library of learners for the Super Learner (this is for illustration only; in practice, I suggest using a large library of learners, as outlined in the [main vignette](introduction-to-vimp.html)). Finally, I will use the area under the receiver operating characteristic curve (AUC) to measure importance. ```{r load-vrc01-data} # read in the data data("vrc01") # subset to the columns of interest for this analysis library("dplyr") library("tidyselect") # retain only the columns of interest for this analysis y <- vrc01$ic50.censored X <- vrc01 %>% select(starts_with("geog"), starts_with("subtype"), starts_with("length")) learners <- "SL.glm" ``` ## Conditional VIMs The *reduced oracle predictiveness* is defined as $V(f_{0,-s}, P_0)$, where $f_{0,-s}$ is the best possible prediction function that *does not use the covariates with index in $s$*. Then the conditional VIM is defined as $$V(f_0, P_0) - V(f_{0,-s}, P_0).$$ This is the measure of importance that I estimated in the [main vignette](introduction-to-vimp.html). To estimate the conditional VIM for family history of heart disease, I can use the following code: ```{r est-subtype-01AE-cond, warning = FALSE} # note the use of a small V and a small number of SL folds, for illustration only set.seed(1234) V <- 2 sl_cvcontrol <- list(V = 2) subtype_01_AE_cond <- vimp_auc(Y = y, X = X, indx = 5, SL.library = learners, na.rm = TRUE, V = V, cvControl = sl_cvcontrol) ``` ## Marginal VIMs The *marginal oracle predictiveness* is defined as $V(f_{0,s}, P_0)$, where $f_{0,s}$ is the best possible prediction function that *only uses the covariates with index in $s$*. The *null oracle predictiveness* is defined as $V(f_{0, \emptyset}, P_0)$, where $f_{0,\emptyset}$ is the best possible prediction function that *uses no covariates* (i.e., is fitting the mean). Then the marginal VIM is defined as $$V(f_{0,s}, P_0) - V(f_{0,\emptyset}, P_0).$$ To estimate the marginal VIM for family history of heart disease, I can use the following code: ```{r est-subtype-01AE-marg, warning = FALSE} # note the use of a small V and a small number of SL folds, for illustration only set.seed(5678) subtype_01_AE_marg <- vimp_auc(Y = y, X = X[, 5, drop = FALSE], indx = 1, SL.library = learners, na.rm = TRUE, V = V, cvControl = sl_cvcontrol) ``` ## Shapley VIMs The Shapley population VIM (SPVIM) generalizes the marginal and conditional VIMs by averaging over all possible subsets. More specifically, the SPVIM for feature $j$ is given by $$\sum_{s \subseteq \{1,\ldots,p\} \setminus \{j\}} \binom{p-1}{\lvert s \rvert}^{-1}\{V(f_{0, s \cup \{j\}}, P_0)) - V(f_{0,s}, P_0)\};$$ this is the average gain in predictiveness from adding feature $j$ to each possible grouping of the other features. To estimate the SPVIM for family history of heart disease, I can use the following code (note that `sp_vim` returns VIM estimates for all features): ```{r est-famhist-spvim, warning = FALSE} set.seed(91011) all_vim_spvim <- sp_vim(Y = y, X = X, type = "auc", SL.library = learners, na.rm = TRUE, V = V, cvControl = sl_cvcontrol, env = environment()) ``` ## Adjusting for confounders In some cases, there may be confounding factors that you want to adjust for in all cases. For example, in HIV vaccine studies, we often adjust for baseline demographic variables, including age and behavioral factors. If this is the case, then the null predictiveness above can be modified to be $V(f_{0,c}, P_0)$, where $c$ is the index set of all confounders. ## Conclusion The three VIMs defined here may be different for a given feature of interest. Indeed, we can see this for whether or not subtype is 01_AE in the VRC01 data: ```{r show-ests} subtype_01_AE_cond subtype_01_AE_marg # note: need to look at row for s = 5 all_vim_spvim ``` This is simply a function of the fact that the VIMs are different population parameters. All three likely provide useful information in practice: * the marginal VIM provides information about the predictiveness of the covariate in isolation; * the conditional VIM provides information about the predictiveness of the covariate adjusting for all other covariates; and * the SPVIM provides information about the predictiveness of the covariate averaged over all sets of adjustment variables. To choose a VIM, identify which of these three (there may be more than one) that best addresses your scientific question. ## References
/scratch/gouwar.j/cran-all/cranData/vimp/inst/doc/types-of-vims.Rmd
--- title: "Introduction to `vimp`" author: "Brian D. Williamson" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: keep_md: true vignette: > %\VignetteIndexEntry{Introduction to `vimp`} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} csl: chicago-author-date.csl bibliography: vimp_bib.bib --- ```{r setup, echo = FALSE, include = FALSE} library(knitr) opts_knit$set(cache = FALSE, verbose = TRUE, global.par = TRUE) ``` ```{r more-setup, echo = FALSE} par(mar = c(5, 12, 4, 2) + 0.1) ``` ## Introduction `vimp` is a package that computes nonparametric estimates of variable importance and provides valid inference on the true importance. The package supports flexible estimation of variable importance based on the difference in nonparametric $R^2$, classification accuracy, and area under the receiver operating characteristic curve (AUC). These quantities are all nonparametric generalizations of the usual measures in simple parametric models (e.g., linear models). For more details, see the accompanying manuscripts @williamson2020a, @williamson2021, and @williamson2020c. Variable importance estimates may be computed quickly, depending on the techniques used to estimate the underlying conditional means --- if these techniques are slow, then the variable importance procedure will be slow. The code can handle arbitrary dimensions of features, and may be used to estimate the importance of any single feature or group of features for predicting the outcome. The package also includes functions for cross-validated importance. The author and maintainer of the `vimp` package is [Brian Williamson](https://bdwilliamson.github.io/). The methods implemented here have also been implemented in Python under the package [`vimpy`](https://github.com/bdwilliamson/vimpy). ## Installation A stable version of the package may be downloaded and installed from CRAN. Type the following command in your R console to install the stable version of `vimp`: ```{r install-vimp, eval = FALSE} install.packages("vimp") ``` A development version of the package may be downloaded and installed from GitHub using the `devtools` package. Type the following command in your R console to install the development version of `vimp`: ```{r devtools-install-vimp, eval = FALSE} # only run if you don't have devtools # previously installed # install.packages("devtools") devtools::install_github("bdwilliamson/vimp") ``` ## Quick Start This section should serve as a quick guide to using the `vimp` package --- we will cover the main functions for estimating $R^2$-based variable importance using a simulated data example. More details are given in the next section. First, load the `vimp` package: ```{r load-vimp, message = FALSE} library("vimp") ``` Next, create some data: ```{r gen-data} # ------------------------------------------------------------- # problem setup # ------------------------------------------------------------- # set up the data set.seed(5678910) n <- 1000 p <- 2 s <- 1 # desire importance for X_1 x <- data.frame(replicate(p, runif(n, -1, 1))) y <- (x[,1])^2*(x[,1]+7/5) + (25/9)*(x[,2])^2 + rnorm(n, 0, 1) # set up folds for hypothesis testing folds <- sample(rep(seq_len(2), length = length(y))) ``` This creates a matrix of covariates `x` with two columns, a vector `y` of normally-distributed outcome values, and a set of folds for a sample of `n = 100` study participants. The workhorse function of `vimp`, for $R^2$-based variable importance, is `vimp_rsquared`. There are two ways to compute variable importance: in the first method, you allow `vimp` to run regressions for you and return variable importance; in the second method (discussed in ["Using precomputed regression function estimates in `vimp`"](precomputed-regressions.html)), you run the regressions yourself and plug these into `vimp`. I will focus on the first method here. The basic arguments are * Y: the outcome (in this example, `y`) * X: the covariates (in this example, `x`) * indx: the covariate(s) of interest for evaluating importance (here, either 1 or 2) * run_regression: a logical value telling `vimp_rsquared` whether or not to run a regression of Y on X (`TRUE` in this example) * SL.library: a "library" of learners to pass to the function `SuperLearner` (since `run_regression = TRUE`) * V: the number of folds to use for cross-fitted variable importance This second-to-last argument, `SL.library`, determines the estimators you want to use for the conditional mean of Y given X. Estimates of variable importance rely on good estimators of the conditional mean, so we suggest using flexible estimators and model stacking to do so. One option for this is the `SuperLearner` package; load that package using ```{r learner-lib-small, message = FALSE} library("SuperLearner") # load specific algorithms library("ranger") ``` The code ```{r est-1, warning = FALSE} est_1 <- vimp_rsquared(Y = y, X = x, indx = 1, run_regression = TRUE, SL.library = c("SL.ranger", "SL.mean"), V = 2, env = environment()) ``` uses the Super Learner to fit the required regression functions, and computes an estimate of variable importance for the importance of $X_1$. We can visualize the estimate, standard error, and confidence interval by printing or typing the object name: ```{r print-est-1} est_1 print(est_1) ``` This output shows that we have estimated the importance of $X_1$ to be `r round(est_1$est, 3)`, with a 95% confidence interval of `r paste0("[", round(est_1$ci[,1], 3), ", ", round(est_1$ci[, 2], 3), "]")`. ## Detailed guide In this section, we provide a fuller example of estimating $R^2$-based variable importance in the context of assessing the importance of amino acid sequence features in predicting the neutralization sensitivity of the HIV virus to the broadly neutralizing antibody VRC01. For more information about this study, see @magaret2019. Often when working with data we attempt to estimate the conditional mean of the outcome $Y$ given features $X$, defined as $\mu_P(x) = E_P(Y \mid X = x)$. There are many tools for estimating this conditional mean. We might choose a classical parametric tool such as linear regression. We might also want to be model-agnostic and use a more nonparametric approach to estimate the conditional mean. However, - This involves using some nonparametric smoothing technique, which requires: (1) choosing a technique, and (2) selecting tuning parameters - Naive optimal tuning balances out the bias and variance of the smoothing estimator. Is this the correct trade-off for estimating the conditional mean? Once we have a good estimate of the conditional mean, it is often of scientific interest to understand which features contribute the most to the variation in $\mu_P$. Specifically, we might consider \[\mu_{P, s}(x) = E_P(Y \mid X_{(-s)} = x_{(-s)}),\] where for a vector $v$ and a set of indices $s$, $v_{-(s)}$ denotes the elements of $v$ with index not in $s$. By comparing $\mu_{P, s}$ to $\mu_P$ we can evaluate the importance of the $s$th element (or group of elements). Assume that our data are generated according to the mechanism $P_0$. We define the population $R^2$ value of a given regression function $\mu$ as $R^2(\mu, P_0) = 1 - \frac{E_{P_0}\{Y - \mu(X)\}^2}{var_{P_0}(Y)}$, where the numerator of this expression is the population mean squared error and the denominator is the population variance. We can then define a nonparametric measure of variable importance, \[\psi_{0, s} = R^2(\mu_{P_0}, P_0) - R^2(\mu_{P_0,s}, P_0),\] which is the proportion of the variability in the outcome explained by including $X_j$ in our chosen estimation technique. This document introduces you to the basic tools in `vimp` and how to apply them to a dataset. I will explore one method for obtaining variable estimates using `vimp`: you only specify a *library* of candidate estimators for the conditional means $\mu_{P_0}$ and $\mu_{P_0, s}$; you allow `vimp` to obtain the optimal estimates of these quantities using the `SuperLearner` [@vanderlaan2007], and use these estimates to obtain variable importance estimates. A second method (using precomputed estimates of the regression functions) exists and is described in ["Using precomputed regression function estimates in `vimp`"](precomputed-regressions.html). ### A look at the VRC01 data Throughout this document I will use the VRC01 data [@magaret2019], a subset of the data freely available from the Los Alamos National Laboratory's Compile, Neutralize, and Tally Neutralizing Antibody Panels database. Information about these data is available [here](https://doi.org/10.1371/journal.pcbi.1006952). ```{r load-vrc01-data} # read in the data data("vrc01") ``` While there are several outcomes of interest in these data (continuous measures of neutralization and binary measures of resistance), we will focus on the binary measure of resistance to VRC01 given by `ic50.censored`. This variable is a binary indicator that the concentration of VRC01 necessary to neutralize 50% of viral replicates in a sample (IC-50) was right-censored; since higher values of IC-50 imply a more resistant virus, this indicator is a proxy for viral resistance. In addition to the outcome of interest, there are measurements on several groups of variables: viral subtype, geographic region of origin (a potential confounding variable), amino acid sequence features (further grouped into the CD4 binding sites, VRC01 binding footprint, sites with sufficient exposed surface area, sites identified as important for glycosylation, sites with residues that covary with the VRC01 binding footprint, sites associated with VRC01-specific potential N-linked glycosylation (PNGS) effects, sites in gp41 associated with VRC01 neutralization or sensitivity, sites for indicating N-linked glycosylation), region-specific counts of PNGS, viral geometry, cysteine counts, and steric bulk at critical locations. For the sake of simplicity, we will focus here on only three groups of features: viral subtype, geographic region of origin, and viral geometry features. ```{r subset-data} library("dplyr") library("tidyselect") # retain only the columns of interest for this analysis y <- vrc01$ic50.censored X <- vrc01 %>% select(starts_with("geog"), starts_with("subtype"), starts_with("length")) ``` Since there are 17 features and two groups, it is of interest to determine variable importance both for the individual features separately and for the two groups of features (since the geographic variables are potential confounders). ### A first approach: linear regression Suppose that I believe that a linear model truly describes the relationship between the outcome and the covariates in these data. In that case, I would be justified in only fitting a linear regression to estimate the conditional means; this means that in my importance analysis, I should also use only linear regression. The analysis is achieved by the following: ```{r est-regressions-lm, warning = FALSE} geog_indx <- max(which(grepl("geog", names(X)))) set.seed(1234) for (i in seq_len(ncol(X) - geog_indx)) { # note that we're using a small number of cross-fitting folds for speed lm_vim <- vim(Y = y, X = X, indx = geog_indx + i, run_regression = TRUE, SL.library = "SL.glm", type = "r_squared", cvControl = list(V = 2), scale = "logit", family = binomial()) if (i == 1) { lm_mat <- lm_vim } else { lm_mat <- merge_vim(lm_mat, lm_vim) } } # print out the importance lm_mat ``` ### Building a library of learners In general, we don't believe that a linear model truly holds. Thinking about potential model misspecification leads us to consider other algorithms. Suppose that I prefer to use generalized additive models [@hastie1990] to estimate $\mu_{P_0}$ and $\mu_{P_0, s}$, so I am planning on using the `gam` package. Suppose that you prefer to use the elastic net [@zou2005], and are planning to use the `glmnet` package. The choice of either method is somewhat subjective, and I also will have to use a technique like cross-validation to determine an optimal tuning parameter in each case. It is also possible that neither additive models nor the elastic net will do a good job estimating the true conditional means! This motivates using `SuperLearner` to allow the data to determine the optimal combination of *base learners* from a *library* that I define. These base learners are a combination of different methods (e.g., generalized additive models and elastic net) and instances of the same method with different tuning parameter values (e.g., additive models with 3 and 4 degrees of freedom). The Super Learner is an example of model stacking, or model aggregation --- these approaches use a data-adaptive combination of base learners to make predictions. For instance, my library could include the elastic net, random forests [@breiman2001], and gradient boosted trees [@friedman2001] as follows: ```{r full-learner-lib} # create a function for boosted stumps SL.gbm.1 <- function(..., interaction.depth = 1) SL.gbm(..., interaction.depth = interaction.depth) # create GAMs with different degrees of freedom SL.gam.3 <- function(..., deg.gam = 3) SL.gam(..., deg.gam = deg.gam) SL.gam.4 <- function(..., deg.gam = 4) SL.gam(..., deg.gam = deg.gam) SL.gam.5 <- function(..., deg.gam = 5) SL.gam(..., deg.gam = deg.gam) # add more levels of alpha for glmnet create.SL.glmnet <- function(alpha = c(0.25, 0.5, 0.75)) { for (mm in seq(length(alpha))) { eval(parse(file = "", text = paste('SL.glmnet.', alpha[mm], '<- function(..., alpha = ', alpha[mm], ') SL.glmnet(..., alpha = alpha)', sep = '')), envir = .GlobalEnv) } invisible(TRUE) } create.SL.glmnet() # add tuning parameters for randomForest create.SL.randomForest <- function(tune = list(mtry = c(1, 5, 7), nodesize = c(1, 5, 10))) { tuneGrid <- expand.grid(tune, stringsAsFactors = FALSE) for (mm in seq(nrow(tuneGrid))) { eval(parse(file = "", text = paste("SL.randomForest.", mm, "<- function(..., mtry = ", tuneGrid[mm, 1], ", nodesize = ", tuneGrid[mm, 2], ") SL.randomForest(..., mtry = mtry, nodesize = nodesize)", sep = "")), envir = .GlobalEnv) } invisible(TRUE) } create.SL.randomForest() # create the library learners <- c("SL.glmnet", "SL.glmnet.0.25", "SL.glmnet.0.5", "SL.glmnet.0.75", "SL.randomForest", "SL.randomForest.1", "SL.randomForest.2", "SL.randomForest.3", "SL.randomForest.4", "SL.randomForest.5", "SL.randomForest.6", "SL.randomForest.7", "SL.randomForest.8", "SL.randomForest.9", "SL.gbm.1") ``` Now that I have created the library of learners, I can move on to estimating variable importance. ### Estimating variable importance for a single variable The main function for R-squared-based variable importance in the `vimp` package is the `vimp_rsquared()` function. There are five main arguments to `vimp_rsquared()`: - `Y`, the outcome - `X`, the covariates - `indx`, which determines the feature I want to estimate variable importance for - `SL.library`, the library of candidate learners - `V`, the number of cross-fitting folds (also referred to as cross-validation folds) to use for computing variable importance The main arguments differ if precomputed regression function estimates are used; please see ["Using precomputed regression function estimates in `vimp`"](precomputed-regressions.html) for further discussion of this case. Suppose that the first feature that I want to estimate variable importance for is whether the viral subtype is 01_AE, `subtype.is.01_AE`. Then supplying `vimp_rsquared()` with - `Y = y` - `X = X` - `indx = 5` - `SL.library = learners` - `V = 5` means that: - I want to use `SuperLearner()` to estimate the conditional means $\mu_{P_0}$ and $\mu_{P_0,s}$, and my candidate library is `learners` - I want to estimate variable importance for the fifth column of the VRC01 covariates, which is `subtype.is.01_AE` - I want to use five-fold cross-fitting to estimate importance The call to `vimp_rsquared()` looks like this: ```{r vimp-with-sl-1, eval = FALSE} vimp_rsquared(Y = y, X = X, indx = 5, run_regression = TRUE, SL.library = learners, V = 5, family = binomial()) ``` While this is the preferred method for estimating variable importance, using a large library of learners may cause the function to take time to run. Usually this is okay --- in general, you took a long time to collect the data, so letting an algorithm run for a few hours should not be an issue. However, for the sake of illustration, I can estimate varibable importance for 01_AE subtype only using only using a small library, a small number of cross-validation folds in the Super Learner, and a small number of cross-fitting folds as follows (again, I suggest using a larger number of folds and a larger library in practice): ```{r vimp-with-sl-fam, message = FALSE, warning = FALSE} # small learners library learners.2 <- c("SL.ranger") # small number of cross-fitting folds V <- 2 # small number of CV folds for Super Learner sl_cvcontrol <- list(V = 2) # now estimate variable importance set.seed(5678) start_time <- Sys.time() subtype_01_AE_vim <- vimp_rsquared(Y = y, X = X, indx = 5, SL.library = learners.2, na.rm = TRUE, env = environment(), V = V, cvControl = sl_cvcontrol, family = binomial()) end_time <- Sys.time() ``` This code takes approximately `r round(as.numeric(end_time - start_time), 3)` seconds to run on a (not very fast) PC. I can display these estimates: ```{r print-vim} subtype_01_AE_vim ``` The object returned by `vimp_rsquared()` also contains lists of fitted values from using `SuperLearner()`; I access these using `$full_fit` and `$red_fit`. For example, ```{r look-at-ests} head(subtype_01_AE_vim$full_fit[[1]]) head(subtype_01_AE_vim$red_fit[[1]]) ``` I can obtain estimates for the remaining individual features in the same way (again using only using a small library for illustration): ```{r vrc01-sl, warning = FALSE} ests <- subtype_01_AE_vim set.seed(1234) for (i in seq_len(ncol(X) - geog_indx - 1)) { # note that we're using a small number of cross-fitting folds for speed this_vim <- vimp_rsquared(Y = y, X = X, indx = geog_indx + i + 1, run_regression = TRUE, SL.library = learners.2, V = V, cvControl = sl_cvcontrol, family = binomial()) ests <- merge_vim(ests, this_vim) } ``` Now that I have estimates of each of individual feature's variable importance, I can view them all simultaneously by plotting: ```{r vrc01-vim, fig.width = 8.5, fig.height = 8, message = FALSE} library("ggplot2") library("cowplot") theme_set(theme_cowplot()) all_vars <- c(paste0("Subtype is ", c("01_AE", "02_AG", "07_BC", "A1", "A1C", "A1D", "B", "C", "D", "O", "Other")), paste0("Length of ", c("Env", "gp120", "V5", "V5 outliers", "Loop E", "Loop E outliers"))) est_plot_tib <- ests$mat %>% mutate( var_fct = rev(factor(s, levels = ests$mat$s, labels = all_vars[as.numeric(ests$mat$s) - geog_indx], ordered = TRUE)) ) # plot est_plot_tib %>% ggplot(aes(x = est, y = var_fct)) + geom_point() + geom_errorbarh(aes(xmin = cil, xmax = ciu)) + xlab(expression(paste("Variable importance estimates: ", R^2, sep = ""))) + ylab("") + ggtitle("Estimated individual feature importance") + labs(subtitle = "in the VRC01 data (considering only geographic confounders, subtype, and viral geometry)") ``` ## Estimating variable importance for a group of variables Now that I have estimated variable importance for each of the individual features, I can estimate variable importance for each of the groups that I mentioned above: biological and behavioral features. The only difference between estimating variable importance for a group of features rather than an individual feature is that now I specify a vector for `s`; I can use any of the options listed in the previous section to compute these estimates. ```{r vrc01-group-vim, fig.width = 8.5, fig.height = 8} # get the estimates set.seed(91011) subtype_vim <- vimp_rsquared(Y = y, X = X, indx = 5:15, SL.library = learners.2, na.rm = TRUE, env = environment(), V = V, cvControl = sl_cvcontrol, family = binomial()) geometry_vim <- vimp_rsquared(Y = y, X = X, indx = 16:21, SL.library = learners.2, na.rm = TRUE, env = environment(), V = V, cvControl = sl_cvcontrol, family = binomial()) # combine and plot groups <- merge_vim(subtype_vim, geometry_vim) all_grp_nms <- c("Viral subtype", "Viral geometry") grp_plot_tib <- groups$mat %>% mutate( grp_fct = factor(case_when( s == "5,6,7,8,9,10,11,12,13,14,15" ~ "1", s == "16,17,18,19,20,21" ~ "2" ), levels = c("1", "2"), labels = all_grp_nms, ordered = TRUE) ) grp_plot_tib %>% ggplot(aes(x = est, y = grp_fct)) + geom_point() + geom_errorbarh(aes(xmin = cil, xmax = ciu)) + xlab(expression(paste("Variable importance estimates: ", R^2, sep = ""))) + ylab("") + ggtitle("Estimated feature group importance") + labs(subtitle = "in the VRC01 data (considering only geographic confounders, subtype, and viral geometry)") ``` ## Types of population variable importance In this document, I have focused on one particular definition of population variable importance that I call *conditional* variable importance. For a further discussion of what I call *marginal* variable importance and *Shapley population* variable importance, please see ["Types of VIMs"](types-of-vims.html). ## References
/scratch/gouwar.j/cran-all/cranData/vimp/vignettes/introduction-to-vimp.Rmd
--- title: "Variable importance with coarsened data" author: "Brian D. Williamson" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: keep_md: true vignette: > %\VignetteIndexEntry{Variable importance with coarsened data} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} csl: chicago-author-date.csl bibliography: vimp_bib.bib --- ```{r setup, echo = FALSE, include = FALSE} library(knitr) opts_knit$set(cache = FALSE, verbose = TRUE, global.par = TRUE) ``` ```{r more-setup, echo = FALSE} par(mar = c(5, 12, 4, 2) + 0.1) ``` ## Introduction In some settings, we don't have access to the full data unit on each observation in our sample. These "coarsened-data" settings (see, e.g., @vandervaart2000) create a layer of complication in estimating variable importance. In particular, the efficient influence function (EIF) in the coarsened-data setting is more complex, and involves estimating an additional quantity: the projection of the full-data EIF (estimated on the fully-observed sample) onto the variables that are always observed (Chapter 25.5.3 of @vandervaart2000; see also Example 6 in @williamson2021). ## Coarsened data in `vimp` `vimp` can handle coarsened data, with the specification of several arguments: * `C`: and binary indicator vector, denoting which observations have been coarsened; 1 denotes fully observed, while 0 denotes coarsened. * `ipc_weights`: inverse probability of coarsening weights, assumed to already be inverted (i.e., `ipc_weights` = 1 / [estimated probability of coarsening]). * `ipc_est_type`: the type of procedure used for coarsened-at-random settings; options are `"ipw"` (for inverse probability weighting) or `"aipw"` (for augmented inverse probability weighting). Only used if `C` is not all equal to 1. * `Z`: a character vector specifying the variable(s) among `Y` and `X` that are thought to play a role in the coarsening mechanism. To specify the outcome, use `"Y"`; to specify covariates, use a character number corresponding to the desired position in `X` (e.g., `"1"` or `"X1"` [the latter is case-insensitive]). `Z` plays a role in the additional estimation mentioned above. Unless otherwise specified, an internal call to `SuperLearner` regresses the full-data EIF (estimated on the fully-observed data) onto a matrix that is the parsed version of `Z`. If you wish to use any covariates from `X` as part of your coarsening mechanism (and thus include them in `Z`), and they have *different names from `X1`, ...*, then you must use character numbers (i.e., `"1"` refers to the first variable, etc.) to refer to the variables to include in `Z`. Otherwise, `vimp` will throw an error. ## Example with missing outcomes In this example, the outcome `Y` is subject to missingness. We generate data as follows: ```{r gen-data-missing-y} set.seed(1234) p <- 2 n <- 100 x <- replicate(p, stats::rnorm(n, 0, 1)) # apply the function to the x's y <- 1 + 0.5 * x[, 1] + 0.75 * x[, 2] + stats::rnorm(n, 0, 1) # indicator of observing Y logit_g_x <- .01 * x[, 1] + .05 * x[, 2] - 2.5 g_x <- exp(logit_g_x) / (1 + exp(logit_g_x)) C <- rbinom(n, size = 1, prob = g_x) obs_y <- y obs_y[C == 0] <- NA x_df <- as.data.frame(x) full_df <- data.frame(Y = obs_y, x_df, C = C) ``` Next, we estimate the relevant components for `vimp`: ```{r missing-y-vim} library("vimp") library("SuperLearner") # estimate the probability of missing outcome ipc_weights <- 1 / predict(glm(C ~ V1 + V2, family = "binomial", data = full_df), type = "response") # set up the SL learners <- c("SL.glm", "SL.mean") V <- 2 # estimate vim for X2 set.seed(1234) est <- vim(Y = obs_y, X = x_df, indx = 2, type = "r_squared", run_regression = TRUE, SL.library = learners, alpha = 0.05, delta = 0, C = C, Z = c("Y", "1"), ipc_weights = ipc_weights, cvControl = list(V = V)) ``` ## Example with two-phase sampling In this example, we observe outcome `Y` and covariate `X1` on all participants in a study. Based on the value of `Y` and `X1`, we include some participants in a second-phase sample, and further measure covariate `X2` on these participants. This is an example of a two-phase study. We generate data as follows: ```{r generate-data} set.seed(4747) p <- 2 n <- 100 x <- replicate(p, stats::rnorm(n, 0, 1)) # apply the function to the x's y <- 1 + 0.5 * x[, 1] + 0.75 * x[, 2] + stats::rnorm(n, 0, 1) # make this a two-phase study, assume that X2 is only measured on # subjects in the second phase; note C = 1 is inclusion C <- rbinom(n, size = 1, prob = exp(y + 0.1 * x[, 1]) / (1 + exp(y + 0.1 * x[, 1]))) tmp_x <- x tmp_x[C == 0, 2] <- NA x <- tmp_x x_df <- as.data.frame(x) full_df <- data.frame(Y = y, x_df, C = C) ``` If we want to estimate variable importance of `X2`, we need to use the coarsened-data arguments in `vimp`. This can be accomplished in the following manner: ```{r ipw-vim} library("vimp") library("SuperLearner") # estimate the probability of being included only in the first phase sample ipc_weights <- 1 / predict(glm(C ~ y + V1, family = "binomial", data = full_df), type = "response") # set up the SL learners <- c("SL.glm") V <- 2 # estimate vim for X2 set.seed(1234) est <- vim(Y = y, X = x_df, indx = 2, type = "r_squared", run_regression = TRUE, SL.library = learners, alpha = 0.05, delta = 0, C = C, Z = c("Y", "1"), ipc_weights = ipc_weights, cvControl = list(V = V), method = "method.CC_LS") ``` # References
/scratch/gouwar.j/cran-all/cranData/vimp/vignettes/ipcw-vim.Rmd
--- title: "Using precomputed regression function estimates in `vimp`" author: "Brian D. Williamson" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Using precomputed regression function estimates in `vimp`} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} csl: chicago-author-date.csl bibliography: vimp_bib.bib --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup, message = FALSE} library("vimp") library("SuperLearner") ``` ## Introduction In the [main vignette](introduction-to-vimp.html), we analyzed the VRC01 data [@magaret2019], a subset of the data freely available from the Los Alamos National Laboratory's Compile, Neutralize, and Tally Neutralizing Antibody Panels database. Information about these data is available [here](https://doi.org/10.1371/journal.pcbi.1006952). In each of the analyses, I used `run_regression = TRUE`. In this vignette, I discuss how to use precomputed regression function estimates with `vimp`. The results of this analysis replicate the analysis in the [main vignette](introduction-to-vimp.html). ```{r load-vrc01-data} # read in the data data("vrc01") # subset to the columns of interest for this analysis library("dplyr") library("tidyselect") # retain only the columns of interest for this analysis y <- vrc01$ic50.censored X <- vrc01 %>% select(starts_with("geog"), starts_with("subtype"), starts_with("length")) set.seed(1234) vrc01_folds <- make_folds(y = y, V = 2) ``` ## Using precomputed regression function estimates without cross-fitting ### A first approach: linear regression As in the [main vignette](introduction-to-vimp.html), we first start by fitting only linear regression models. In this section, we use the function `vim()`; this function does not use cross-fitting to estimate variable importance, and greatly simplifies the code for precomputed regression models. ```{r est-regressions-lm, warning = FALSE} library("rlang") vrc01_subset <- vrc01 %>% select(ic50.censored, starts_with("geog"), starts_with("subtype"), starts_with("length")) %>% rename(y = ic50.censored) # estimate prediction function on each subset, predict on held-out fold full_fit <- vector("numeric", length = nrow(vrc01)) for (v in 1:2) { train_v <- subset(vrc01_subset, vrc01_folds != v) test_v <- subset(vrc01_subset, vrc01_folds == v) full_mod <- glm(y ~ ., data = train_v) full_fit[vrc01_folds == v] <- predict(full_mod, newdata = test_v) } # estimate the reduced conditional means for each of the individual variables # remove the outcome for the predictor matrix geog_indx <- max(which(grepl("geog", names(X)))) for (i in seq_len(ncol(X) - geog_indx)) { this_name <- names(X)[i + geog_indx] red_fit <- vector("numeric", length = nrow(vrc01)) for (v in 1:2) { train_v <- subset(vrc01_subset, vrc01_folds != v) test_v <- subset(vrc01_subset, vrc01_folds == v) red_fit[vrc01_folds == v] <- suppressWarnings( predict(glm(y ~ ., data = train_v %>% select(-!!this_name)), newdata = test_v) ) } this_vim <- vim(Y = y, f1 = full_fit, f2 = red_fit, indx = i + geog_indx, run_regression = FALSE, type = "r_squared", sample_splitting_folds = vrc01_folds, scale = "logit") if (i == 1) { lm_mat <- this_vim } else { lm_mat <- merge_vim(lm_mat, this_vim) } } # print out the matrix lm_mat ``` ## Estimating variable importance for a single variable using precomputed regression function estimates In this section, we will use cross-fitting and pre-computed estimates of the regression functions. This can be especially useful if you have already run a call to `CV.SuperLearner` -- that function returns estimates based on each observation being part of the hold-out set. However, while this approach can save you some computation time, it requires a hefty amount of mental overhead. We will use `CV.SuperLearner` to fit the individual regression functions, taking care to use the same cross-fitting folds in each regression. We will then create two groups of validation folds for sample-splitting. For this analysis, we will use `V = 2` folds for cross-fitted variable importance estimation (as we did in the [main vignette](introduction-to-vimp.html)). Note that this entails running `CV.SuperLearner` with $2V = 4$ folds. First, we estimate the regression function based on all variables: ```{r estimate-full-regression-with-cf, message = FALSE, warning = FALSE} learners <- "SL.ranger" # estimate the full regression function V <- 2 set.seed(4747) full_cv_fit <- suppressWarnings( SuperLearner::CV.SuperLearner( Y = y, X = X, SL.library = learners, cvControl = list(V = 2 * V), innerCvControl = list(list(V = V)), family = binomial() ) ) # get a numeric vector of cross-fitting folds cross_fitting_folds <- get_cv_sl_folds(full_cv_fit$folds) # get sample splitting folds set.seed(1234) sample_splitting_folds <- make_folds(unique(cross_fitting_folds), V = 2) full_cv_preds <- full_cv_fit$SL.predict ``` Next, to estimate the importance of each variable, we need to estimate the reduced regression function for each variable: ```{r estimate-reduced-regressions-with-cf, message = FALSE, warning = FALSE} vars <- names(X)[(geog_indx + 1):ncol(X)] set.seed(1234) for (i in seq_len(length(vars))) { # use "eval" and "parse" to assign the objects of interest to avoid duplicating code eval(parse(text = paste0("reduced_", vars[i], "_cv_fit <- suppressWarnings(SuperLearner::CV.SuperLearner( Y = y, X = X[, -(geog_indx + i), drop = FALSE], SL.library = learners, cvControl = SuperLearner::SuperLearner.CV.control(V = 2 * V, validRows = full_cv_fit$folds), innerCvControl = list(list(V = V)), family = binomial() ))"))) eval(parse(text = paste0("reduced_", vars[i], "_cv_preds <- reduced_", vars[i], "_cv_fit$SL.predict"))) } ``` Then we can plug these values into `vimp_rsquared()` (or equivalently, `cv_vim()` with `type = "r_squared"`) as follows: ```{r cf-vims} for (i in seq_len(length(vars))) { # again, use "eval" and "parse" to assign the objects of interest to avoid duplicating code eval(parse(text = paste0("cf_", vars[i], "_vim <- vimp_rsquared(Y = y, cross_fitted_f1 = full_cv_preds, cross_fitted_f2 = reduced_", vars[i], "_cv_preds, indx = (geog_indx + i), cross_fitting_folds = cross_fitting_folds, sample_splitting_folds = sample_splitting_folds, run_regression = FALSE, alpha = 0.05, V = V, na.rm = TRUE, scale = 'logit')"))) } cf_ests <- merge_vim(cf_subtype.is.01_AE_vim, cf_subtype.is.02_AG_vim, cf_subtype.is.07_BC_vim, cf_subtype.is.A1_vim, cf_subtype.is.A1C_vim, cf_subtype.is.A1D_vim, cf_subtype.is.B_vim, cf_subtype.is.C_vim, cf_subtype.is.D_vim, cf_subtype.is.O_vim, cf_subtype.is.Other_vim, cf_length.env_vim, cf_length.gp120_vim, cf_length.loop.e_vim, cf_length.loop.e.outliers_vim, cf_length.v5_vim, cf_length.v5.outliers_vim) all_vars <- c(paste0("Subtype is ", c("01_AE", "02_AG", "07_BC", "A1", "A1C", "A1D", "B", "C", "D", "O", "Other")), paste0("Length of ", c("Env", "gp120", "V5", "V5 outliers", "Loop E", "Loop E outliers"))) ``` And we can view them all simultaneously by plotting: ```{r plot-cf-vim, fig.width = 8.5, fig.height = 8} library("ggplot2") library("cowplot") theme_set(theme_cowplot()) cf_est_plot_tib <- cf_ests$mat %>% mutate( var_fct = rev(factor(s, levels = cf_ests$mat$s, labels = all_vars[as.numeric(cf_ests$mat$s) - geog_indx], ordered = TRUE)) ) # plot cf_est_plot_tib %>% ggplot(aes(x = est, y = var_fct)) + geom_point() + geom_errorbarh(aes(xmin = cil, xmax = ciu)) + xlab(expression(paste("Variable importance estimates: ", R^2, sep = ""))) + ylab("") + ggtitle("Estimated individual feature importance") + labs(subtitle = "in the VRC01 data (considering only geographic confounders, subtype, and viral geometry)") ``` ## Estimating variable importance for a group of variables using precomputed regression function estimates Finally, we can estimate and plot group importance: ```{r cf-group-vim, fig.width = 8.5, fig.height = 8, warning = FALSE} set.seed(91011) reduced_subtype_cv_fit <- suppressWarnings( SuperLearner::CV.SuperLearner( Y = y, X = X[, -c(5:15), drop = FALSE], SL.library = learners, cvControl = SuperLearner::SuperLearner.CV.control(V = 2 * V, validRows = full_cv_fit$folds), innerCvControl = list(list(V = V)), family = binomial() ) ) reduced_subtype_cv_preds <- reduced_subtype_cv_fit$SL.predict reduced_geometry_cv_fit <- suppressWarnings( SuperLearner::CV.SuperLearner( Y = y, X = X[, -c(16:21), drop = FALSE], SL.library = learners, cvControl = SuperLearner::SuperLearner.CV.control(V = 2 * V, validRows = full_cv_fit$folds), innerCvControl = list(list(V = V)), family = binomial() ) ) reduced_geometry_cv_preds <- reduced_geometry_cv_fit$SL.predict cf_subtype_vim <- vimp_rsquared( Y = y, cross_fitted_f1 = full_cv_preds, cross_fitted_f2 = reduced_subtype_cv_preds, indx = 5:15, run_regression = FALSE, V = V, cross_fitting_folds = cross_fitting_folds, sample_splitting_folds = sample_splitting_folds, scale = "logit" ) cf_geometry_vim <- vimp_rsquared( Y = y, cross_fitted_f1 = full_cv_preds, cross_fitted_f2 = reduced_geometry_cv_preds, indx = 16:21, run_regression = FALSE, V = V, cross_fitting_folds = cross_fitting_folds, sample_splitting_folds = sample_splitting_folds, scale = "logit" ) cf_groups <- merge_vim(cf_subtype_vim, cf_geometry_vim) all_grp_nms <- c("Viral subtype", "Viral geometry") grp_plot_tib <- cf_groups$mat %>% mutate( grp_fct = factor(case_when( s == "5,6,7,8,9,10,11,12,13,14,15" ~ "1", s == "16,17,18,19,20,21" ~ "2" ), levels = c("1", "2"), labels = all_grp_nms, ordered = TRUE) ) grp_plot_tib %>% ggplot(aes(x = est, y = grp_fct)) + geom_point() + geom_errorbarh(aes(xmin = cil, xmax = ciu)) + xlab(expression(paste("Variable importance estimates: ", R^2, sep = ""))) + ylab("") + ggtitle("Estimated feature group importance") + labs(subtitle = "in the VRC01 data (considering only geographic confounders, subtype, and viral geometry)") ``` ## Conclusion In this document, we learned a second method for computing variable importance estimates: rather than having `vimp` run all regression functions for you, you can compute your own regressions and pass these to `vimp`. The results are equivalent, but there is a tradeoff: what you save in computation time by only computing the full regression once must be balanced with the mental overhead of correctly computing the regressions. Additionally, this task is more difficult when using cross-fitted variable importance, which I recommend in nearly all cases when using flexible machine learning tools. ## References
/scratch/gouwar.j/cran-all/cranData/vimp/vignettes/precomputed-regressions.Rmd
--- title: "Types of VIMs" author: "Brian D. Williamson" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Types of VIMs} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} csl: chicago-author-date.csl bibliography: vimp_bib.bib --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} library("vimp") library("SuperLearner") ``` ## Introduction In the [main vignette](introduction-to-vimp.html), I discussed variable importance defined using R-squared. I also mentioned that all of the analyses were carried out using a *condititonal* variable importance measure. In this document, I will discuss all three types of variable importance that may be computed using `vimp`. In general, I define variable importance as a function of the true population distribution (denoted by $P_0$) and a predictiveness measure $V$ -- large values of $V$ are assumed to be better. Currently, the measures $V$ implemented in `vimp` are $R^2$, classification accuracy, area under the receiver operating characteristic curve (AUC), and deviance. For a fixed function $f$, the predictiveness is given by $V(f, P)$, where large values imply that $f$ is a good predictor of the outcome. The best possible prediction function, $f_0$, is the *oracle* model -- i.e., the prediction function that I would use if I had access to the distribution $P_0$. Often, $f_0$ is the true conditional mean (e.g., for $R^2$). Then the *total oracle predictiveness* can be defined as $V(f_0, P_0)$. This is the best possible value of predictiveness. I define variable importance measures (VIMs) as contrasts in oracle predictivness. The oracle models that I plug in determine what type of variable importance is being considered, as I outline below. For the remainder of this document, suppose that I have $p$ variables, and an index set $s$ of interest (containing some subset of the $p$ variables). Throughout this document, I will use the VRC01 data [@magaret2019], a subset of the data freely available from the Los Alamos National Laboratory's Compile, Neutralize, and Tally Neutralizing Antibody Panels database. Information about these data is available [here](https://doi.org/10.1371/journal.pcbi.1006952). Throughout, I will also use a simple library of learners for the Super Learner (this is for illustration only; in practice, I suggest using a large library of learners, as outlined in the [main vignette](introduction-to-vimp.html)). Finally, I will use the area under the receiver operating characteristic curve (AUC) to measure importance. ```{r load-vrc01-data} # read in the data data("vrc01") # subset to the columns of interest for this analysis library("dplyr") library("tidyselect") # retain only the columns of interest for this analysis y <- vrc01$ic50.censored X <- vrc01 %>% select(starts_with("geog"), starts_with("subtype"), starts_with("length")) learners <- "SL.glm" ``` ## Conditional VIMs The *reduced oracle predictiveness* is defined as $V(f_{0,-s}, P_0)$, where $f_{0,-s}$ is the best possible prediction function that *does not use the covariates with index in $s$*. Then the conditional VIM is defined as $$V(f_0, P_0) - V(f_{0,-s}, P_0).$$ This is the measure of importance that I estimated in the [main vignette](introduction-to-vimp.html). To estimate the conditional VIM for family history of heart disease, I can use the following code: ```{r est-subtype-01AE-cond, warning = FALSE} # note the use of a small V and a small number of SL folds, for illustration only set.seed(1234) V <- 2 sl_cvcontrol <- list(V = 2) subtype_01_AE_cond <- vimp_auc(Y = y, X = X, indx = 5, SL.library = learners, na.rm = TRUE, V = V, cvControl = sl_cvcontrol) ``` ## Marginal VIMs The *marginal oracle predictiveness* is defined as $V(f_{0,s}, P_0)$, where $f_{0,s}$ is the best possible prediction function that *only uses the covariates with index in $s$*. The *null oracle predictiveness* is defined as $V(f_{0, \emptyset}, P_0)$, where $f_{0,\emptyset}$ is the best possible prediction function that *uses no covariates* (i.e., is fitting the mean). Then the marginal VIM is defined as $$V(f_{0,s}, P_0) - V(f_{0,\emptyset}, P_0).$$ To estimate the marginal VIM for family history of heart disease, I can use the following code: ```{r est-subtype-01AE-marg, warning = FALSE} # note the use of a small V and a small number of SL folds, for illustration only set.seed(5678) subtype_01_AE_marg <- vimp_auc(Y = y, X = X[, 5, drop = FALSE], indx = 1, SL.library = learners, na.rm = TRUE, V = V, cvControl = sl_cvcontrol) ``` ## Shapley VIMs The Shapley population VIM (SPVIM) generalizes the marginal and conditional VIMs by averaging over all possible subsets. More specifically, the SPVIM for feature $j$ is given by $$\sum_{s \subseteq \{1,\ldots,p\} \setminus \{j\}} \binom{p-1}{\lvert s \rvert}^{-1}\{V(f_{0, s \cup \{j\}}, P_0)) - V(f_{0,s}, P_0)\};$$ this is the average gain in predictiveness from adding feature $j$ to each possible grouping of the other features. To estimate the SPVIM for family history of heart disease, I can use the following code (note that `sp_vim` returns VIM estimates for all features): ```{r est-famhist-spvim, warning = FALSE} set.seed(91011) all_vim_spvim <- sp_vim(Y = y, X = X, type = "auc", SL.library = learners, na.rm = TRUE, V = V, cvControl = sl_cvcontrol, env = environment()) ``` ## Adjusting for confounders In some cases, there may be confounding factors that you want to adjust for in all cases. For example, in HIV vaccine studies, we often adjust for baseline demographic variables, including age and behavioral factors. If this is the case, then the null predictiveness above can be modified to be $V(f_{0,c}, P_0)$, where $c$ is the index set of all confounders. ## Conclusion The three VIMs defined here may be different for a given feature of interest. Indeed, we can see this for whether or not subtype is 01_AE in the VRC01 data: ```{r show-ests} subtype_01_AE_cond subtype_01_AE_marg # note: need to look at row for s = 5 all_vim_spvim ``` This is simply a function of the fact that the VIMs are different population parameters. All three likely provide useful information in practice: * the marginal VIM provides information about the predictiveness of the covariate in isolation; * the conditional VIM provides information about the predictiveness of the covariate adjusting for all other covariates; and * the SPVIM provides information about the predictiveness of the covariate averaged over all sets of adjustment variables. To choose a VIM, identify which of these three (there may be more than one) that best addresses your scientific question. ## References
/scratch/gouwar.j/cran-all/cranData/vimp/vignettes/types-of-vims.Rmd
#' @title Mice Protein Expression Data Set #' #' @description The data set consists of the expression levels of 68 proteins that produced detectable signal in the nuclear fraction of #' cortex for a sample of 72 mice. There are 38 control mice and 34 trisomic mice. Several measurements were recorded for each protein #' and for each mouse. The measurements containing missing observations in the original data were suppressed, so that one has between #' 12 and 15 measurements per protein and per mouse. #' #' Mice may be further described based on the treatment they received (injected with memantine or saline), and on their behaviour #' (stimulated to learn or not). #' #'@format A data frame of 72 rows (mice) and 905 columns (variables): #'@field Protein_X_meas_Y Numerical. The expression level for protein X at measurement Y. X has values between 1 and 68, Y has values #' between 1 and 12 or 15, according to the number of measurements. #'@field Genotype Categorical. Two values: "Control" and "Ts65Dn" (trisomic mouse). #'@field Treatment Categorical. Two values: "Memantine" and "Saline". #'@field Behaviour Categorical. Two values: "C/S" (stimulated to learn) and "S/C" (not stimulated to learn). #'@field Class.mouse Categorical. This variables creates eight classes of mice, based on crossing the categories of \code{Genotype}, #' \code{Behaviour} and \code{Treatment}. #'@field MouseID Factor. The key variable identifying each mouse in the sample. #' @source \url{https://archive.ics.uci.edu/ml/datasets/Mice+Protein+Expression} #' @references C. Higuera, K.J. Gardiner, and K.J. Cios (2015) Self-organizing feature maps identify proteins critical #' to learning in a mouse model of Down syndrome. PLoS ONE 10(6): e0129126. "DataMice"
/scratch/gouwar.j/cran-all/cranData/vimpclust/R/DataMice.R
#' @title Statlog (Heart) Data Set #' #' @description The data consists of 270 patients described by six numerical #' variables and eight categorical variables. #' #'@format A data frame of 270 rows (patients) and 14 columns (variables): #'@field age Numerical. Age in years. #'@field resting_blood_pressure(trestbps) Numerical. Resting blood pressure (in mmHg) at hospital admittance. #'@filed serum_cholesterol(chol) Numerical. Serum cholesterol in mg/dl. #'@field maximum_heart_rate_achieved(maxhr). Numerical. Maximum heart rate achieved during exercise. #'@field oldpeak Numerical. ST depression induced by exercise relative to rest. #'@field number_of_major_vessels_colored_by_fluoroscopy(numv) Numerical. Number of major vessels (0-3) colored by fluoroscopy. #'@field sex Categorical. Sex (1 = male; 0 = female). #'@field chest_pain_type(cp) Categorical. 1: typical angina, 2: atypical angina, 3: non-anginal pain, 4: asymptomatic. #'@field fasting_blood_sugar_>_120mg/dl(fbs) Categorical. 1 = true; 0 = false. #'@field resting_electrocardiographic_results(restecg) Categorical. 0: normal, 1: ST-T wave abnormality (T wave inversions and/or ST #' elevation or depression >0.05mV), 2: showing probable or definite left ventricular hypertrophy by Estes' criteria. #'@field exercice_induced_angina(exang) Categorical. 1 = yes; 0 = no. #'@field the_slope_of_the_peak_exercice_ST_segment(slope) Categorical. 1: upsloping, 2: flat, 3: downsloping. #'@field thalassemia(thal) Categorical. 3: normal blood flow, 6: fixed defect, 7: reversible defect. #'@field presence_or_absence_of_heart_disease(HD) Categorical. Absence or presence of a heart disease. #' #' @source \url{http://archive.ics.uci.edu/ml/datasets/statlog+(heart)} #' @references A. Frank and A. Asuncion. UCI machine learning repository, statlog (heart) data set, 2010. "HDdata"
/scratch/gouwar.j/cran-all/cranData/vimpclust/R/HDdata.R
#' @title check_fun_groupsparsw #' @keywords internal #' check_fun_groupsparsw <- function(X, lambda, nlambda, index, sizegroup, itermaxw, scaling, verbose) { check_X(X) check_lambda(lambda) check_nlambda(lambda, nlambda) check_index(index) check_sizegroup(sizegroup) check_itermaxw(itermaxw) check_scaling(scaling) check_verbose(verbose) # check_nthread(nthread) } check_fun_groupsoft <- function(b, s, index, sizegroup) { check_b(b) check_s(s) check_index(index) check_sizegroup(sizegroup) } check_fun_weightedss <- function(X, cl, w) { check_X(X) check_cl(cl) check_w(w) } check_X <- function(X) { # Missing values, Nan, Inf if (sum(is.finite(unlist(X))) != length(unlist(X))) stop("Must provide data with only finite values.") X <- as.matrix(X) if (ncol(X) == 1) { stop("With only one variables, you should run standard k-means instead.") } } check_nlambda <- function(lambda, nlambda) { # nlambda if (is.null(lambda)) { if (!all.equal(nlambda, as.integer(nlambda), check.attributes = F)) stop("nlambda must be an integer.") if (nlambda < 1) stop("nlambda must strictly positive") if (nlambda > 10000) stop("nlambda is >10000, one should ask why this precision is needed.") } } check_itermaxw <- function(itermaxw) { # itermaxw if (!all.equal(itermaxw, as.integer(itermaxw), check.attributes = F)) stop("itermaxw must be an integer.") if (itermaxw < 1) stop("itermaxw must strictly positive") } check_renamelevel <- function(renamelevel) { #  renamelevel if (!is.logical(renamelevel)) stop("renamelevel must be a boolean.") } check_sizegroup <- function(sizegroup) { #  sizegroup if (!is.logical(sizegroup)) stop("sizegroup must be a boolean.") } check_index <- function(index) { # index if (!all.equal(index, as.integer(index), check.attributes = F)) stop("index must be a vector of integer.") if (max(index) > length(index)) stop("An index of group is bigger than the number of variables. index must be in [1;ncol(X)].") if (min(index) < 1) stop("An index of group is less than 1. index must be in [1;ncol(X)].") } check_lambda <- function(lambda) { # lambda if (!is.null(lambda)) if (sum(lambda < 0) > 0) stop("Lambda must be positive.") } check_cl <- function(cl) { # cl if (!all.equal(cl, as.integer(cl), check.attributes = F)) stop("cl must be a vector of integer.") if (max(cl) > length(cl)) stop("A number of group is bigger than the length of cl.") } check_s <- function(s) { # s if (!all.equal(s, as.numeric(s), check.attributes = F)) stop("s must be numerical.") if (s < 0) stop("s must be positive.") } check_b <- function(b) { # b if (!all.equal(b, as.numeric(b), check.attributes = F)) stop("b must be numerical.") } check_maxlambda <- function(lambda, maxlam) { # lambda if (!is.null(lambda)) if (sum(lambda > maxlam) > 0) stop(paste0("Lambda must be in [0;", maxlam, "]")) } check_w <- function(w) { # w if (!is.null(w)) { if (!all.equal(w, as.numeric(w), check.attributes = F)) stop("w must be numerical.") if (sum(w < 0) > 0) stop("w must be positive.") } } check_nthread <- function(nthread) { # nthread if (!is.null(nthread)) { if (!all.equal(nthread, as.integer(nthread), check.attributes = F)) stop("nthread must be an integer.") if (nthread < 1) stop("nthread must strictly positive") } } check_verbose <- function(verbose) { # verbose if (!all.equal(verbose, as.integer(verbose), check.attributes = F)) stop("verbose must be an integer.") if (verbose < 0) stop("verbose must be 0,1,2 or 3.") if (verbose > 3) stop("verbose must be 0,1,2 or 3.") } check_scaling <- function(scaling) { # scaling if (!is.logical(scaling)) stop("scaling must be a boolean.") }
/scratch/gouwar.j/cran-all/cranData/vimpclust/R/checkingconditions.R
norm.vect <- function(x) { res <- sqrt(sum(x^2)) return(res) } soft.thresholding <- function(i.index, index, b, lambda, sizegroup = TRUE) { vect.bcv.group <- b[index==i.index] if (length(vect.bcv.group)==1) {res <- sign(vect.bcv.group)*max(abs(vect.bcv.group)-lambda,0)} else { if (norm.vect(vect.bcv.group)>0) { if (sizegroup==T) res <- vect.bcv.group*max(norm.vect(vect.bcv.group)-sqrt(length(vect.bcv.group))*lambda,0)/norm.vect(vect.bcv.group) else res <- vect.bcv.group*max(norm.vect(vect.bcv.group)-lambda,0)/norm.vect(vect.bcv.group) } else res <- rep(0, length(vect.bcv.group)) } return(unname(res)) } #' @title Group soft-thresholding operator #' @export #' #' @description #' This function implements the group soft-thresholding operator for a vector which elements are priorly split into groups. For the complete mathematical #' formulation, the reader may refer to the references below. #' #' @param b a numerical vector. #' @param lambda a positive scalar containing the regularization parameter. #' @param index a vector of integers of size \code{length(b)} containing the group membership for #' each element of \code{b}. By default, \code{index=1:length(b)} i.e. each element of \code{b} constitutes its own group. #' @param sizegroup a boolean. if TRUE, the size of #' the groups is taken into account in the thresholding operation. #' #' @return Returns the sparse vector after the group soft-thresholding operation. #' #' @seealso \code{\link{groupsparsewkm}} #' #' @examples #' b <- c(0.1, 0.2, 0.8, 0.1, 0.1, 0.3) #' index <- c(1,1,2,2,3,3) #' lambda <- 0.1 #' groupsoft(b=b, lambda=lambda, index=index, sizegroup=TRUE) #' lambda <- 0.3 #' groupsoft(b=b, lambda=lambda, index=index, sizegroup=TRUE) #' lambda <- 0.8 #' groupsoft(b=b, lambda=lambda, index=index, sizegroup=TRUE) #' #' @references M. Chavent, J. Lacaille, A. Mourer and M. Olteanu (2020). #' Sparse k-means for mixed data via group-sparse clustering, to appear in ESANN proceedings. #' @references M. Yuan and Y. Lin (2006). Model selection and estimation in regression with grouped variables. J. R. Statist. Soc. B, Vol. 68(1), p. 49-67. groupsoft <- function(b, lambda, index = 1:length(b), sizegroup = TRUE) { check_fun_groupsoft(b, lambda, index, sizegroup) if (lambda==0) w <- b/norm.vect(b) else { w <- c(unlist(sapply(unique(sort(index)), soft.thresholding, index=index, b=b, lambda=lambda, sizegroup = sizegroup))) if (norm.vect(w)==0) w <- rep(0, length(b)) else w <- w/norm.vect(w) } return(w) }
/scratch/gouwar.j/cran-all/cranData/vimpclust/R/groupsoft.R
#' @title Group-sparse weighted k-means #' @export #' #' @description This function performs group-sparse weighted k-means on a set #' of observations described by numerical variables organized in groups. #' It generalizes the sparse clustering algorithm introduced by #' Witten & Tibshirani (2010) to groups. While the algorithm clusters the observations, the groups of variables are supposed priorly known. #' The algorithm computes a series of weights associated to the groups of variables, the weights #' indicating the importance of each group in the clustering process. #' #' @param X a numerical matrix or a dataframe of dimension \code{n} (observations) by \code{p} #' (variables). #' @param centers an integer representing the number of clusters. #' @param lambda a vector of numerical values (or a single value) providing #' a grid of values for the regularization parameter. If NULL (by default), the function computes its #' own lambda sequence of length \code{nlambda} (see details). #' @param nlambda an integer indicating the number of values for the regularization parameter. #' By default, \code{nlambda=20}. #' @param index a vector of integers of size \code{p} providing the group membership #' for each variable. By default, \code{index=1:ncol(X)} i.e. no groups or groups of size 1. #' @param sizegroup a boolean. If TRUE, the group sizes (number of variables in each group) are taken into account in the penalty term (see details). #' By default, \code{sizegroup=TRUE}. #' @param nstart an integer representing the number of random starts in the k-means algorithm. #' By default, \code{nstart=10}. #' @param itermaxw an integer indicating the maximum number of iterations for the inside #' loop over the weights \code{w}. By default, \code{itermaxw=20}. #' @param itermaxkm an integer representing the maximum number of iterations in the k-means #' algorithm. By default, \code{itermaxkm=10}. #' @param scaling a boolean. If TRUE, variables are scaled to zero mean and unit variance. By default, \code{scaling=TRUE}. #' @param epsilonw a positive numerical value. It provides the precision of the stopping criterion over \code{w}. By default, \code{epsilonw =1e-04}. #' @param verbose an integer value. If \code{verbose=0}, the function stays silent, if \code{verbose=1} (default option), it prints #' whether the stopping criterion over the weights \code{w} is satisfied. #' #' @return \item{lambda}{a numerical vector containing the regularization parameters (a grid of values).} #' @return \item{W}{a \code{p} by \code{length(lambda)} numerical matrix. It contains the weights associated to each variable.} #' @return \item{Wg}{a \code{L} by \code{length(lambda)} numerical matrix, where \code{L} is the number of groups. It contains the weights associated to each group.} #' @return \item{cluster}{a \code{n} by \code{length(lambda)} integer matrix. It contains the cluster memberships, for each value of the regularization parameter.} #' @return \item{sel.feat}{a numerical vector of the same length as \code{lambda}, giving the number of selected variables for each value of the regularization parameter.} #' @return \item{sel.groups}{a numerical vector of the same length as \code{lambda}, giving the number of selected groups of variables for each value of the regularization parameter.} #' @return \item{Z}{a matrix of size \code{n} by \code{p} containing the scaled data if \code{scaling=TRUE}, and a copy of \code{X} otherwise.} #' @return \item{bss.per.feature}{a matrix of size \code{p} by \code{length(lambda)}. It contains the between-class variance computed for each variable.} #' #' #' @details #' Group-sparse weighted k-means performs clustering on data described by numerical variables priorly partitionned into groups, and automatically selects the most discriminant groups by #' setting to zero the weights of the non-discriminant ones. #' #' The algorithm is based on the optimization of a cost function which is the weighted between-class variance penalized by a group L1-norm. The groups must be priorly defined through #' expert knowledge. If there is no group structure (each group contains one variable only), the algorithm reduces to the sparse weighted k-means introduced in Witten & Tibshirani (2010). #' The penalty term may take into account the size of the groups by setting \code{sizegroup=TRUE} (see Chavent et al. (2020) for further details on the mathematical expression of the #' optimized criterion). The importance of the penalty term may be adjusted through the regularization parameter \code{lambda}. If \code{lambda=0}, there is no penalty applied to the #' weighted between-class variance. The larger \code{lambda}, the larger the penalty term and the number of groups with null weights. #' #' The output of the algorithm is three-folded: one gets a partitioning of the data, a vector of weights associated to each group, and a vector of weights associated to each #' variable. Weights equal to zero imply that the associated variables or the associated groups do not participate in the clustering process. #' #' Since it is difficult to chose the regularization parameter \code{lambda} without prior knowledge, the function builds automatically a grid of parameters and finds the partitioning #' and the vectors of weights associated to each value in the grid. #' #' Note that when the regularization parameter is equal to 0 (no penalty applied), the output is different from that of a regular k-means, since the optimized criterion is a weighted #' between-class variance and not the between-class variance only. #' #' @references Witten, D. M., & Tibshirani, R. (2010). A framework for feature #' selection in clustering. Journal of the American Statistical Association, #' 105(490), p.713-726. #' @references Chavent, M. & Lacaille, J. & Mourer, A. & Olteanu, M. (2020). #' Sparse k-means for mixed data via group-sparse clustering, ESANN proceedings. #' #' @seealso \code{\link{plot.spwkm}}, \code{\link{info_clust}} #' #'@examples #' data(iris) #' # define two groups of variables: #' # "Sepal.Length" and "Sepal.Width" in group 1 #' # "Petal.Length" and "Petal.Width" in group 2 #' index <- c(1, 2, 1, 2) #' # group-sparse k-means #' \donttest{ #' out <- groupsparsewkm(X = iris[,-5], centers = 3, index = index) #' # grid of regularization parameters #' out$lambda #' k <- 10 #' # weights of the variables for the k-th regularization parameter #' out$W[,k] #' # weights of the groups for the k-th regularization parameter #' out$Wg[,k] #' # partition obtained with for the k-th regularization parameter #' out$cluster[,k] #' # between-class variance on each variable #' out$bss.per.feature[,k] #' # between-class variance #' sum(out$bss.per.feature[,k])/length(index) #' #' # one variable per group (equivalent to sparse k-means) #' index <- 1:4 # default option in groupsparsewkm #' # sparse k-means #' out <- groupsparsewkm(X = iris[,-5], centers = 3, index = index) #' # or #' out <- groupsparsewkm(X = iris[,-5], centers = 3) #' # group weights and variable weights are identical in this case #' out$Wg #' out$W #' } groupsparsewkm <- function(X, centers, lambda = NULL, nlambda = 20, index = 1:ncol(X), sizegroup = TRUE, nstart = 10, itermaxw = 20, itermaxkm = 10, scaling = TRUE, verbose = 1, epsilonw = 1e-04) { call <- match.call() check_fun_groupsparsw(X, lambda, nlambda, index, sizegroup, itermaxw, scaling, verbose) X <- as.matrix(scale(X, center = scaling, scale = scaling)) X <- X[,order(index)] index <- sort(index) km <- stats::kmeans(X, centers, nstart, itermaxkm) clusterini <- km$cluster bss.per.featureini <- weightedss(X, clusterini)$bss.per.feature/nrow(X) maxlam <- max(stats::aggregate(bss.per.featureini, by=list(index), function(x){norm.vect(x)/sqrt(length(x))})[,2]) check_maxlambda(lambda, maxlam) if (is.null(lambda)) lambda <- seq(from = 0, to = (maxlam - 0.001), length.out = nlambda) # loop over lambda ls <- sapply(lambda, FUN = function(i) { # loop over w cluster1 <- clusterini bss.per.feature <- bss.per.featureini w1 <- rep(1/ncol(X), ncol(X)) # initialization of w1 / w1 gives the weight at the i+1 iterations w0 <- abs(stats::rnorm(ncol(X))) W_cl <- list() niter <- 1 while (sum(abs(w1 - w0))/sum(abs(w0)) > epsilonw && niter < itermaxw) { if (niter != 1) { Xw <- t(t(X)*sqrt(w1)) km <- stats::kmeans(Xw, centers, nstart = nstart, iter.max = itermaxkm) cluster1 <- km$cluster bss.per.feature <- weightedss(X, cluster1)$bss.per.feature/nrow(X) } w0 <- w1 cluster0 <- cluster1 # Soft thresholding operator w1 <- groupsoft(bss.per.feature, i, index, sizegroup) if (norm.vect(w1)==0) niter <- itermaxw niter = niter + 1 } # end loop over w if (verbose == 1) if (niter >= itermaxw) print(paste0("the stopping criterion over w is not satisfied for lambda = ", i)) else print(paste0("the stopping criterion over w is satisfied for lambda = ", i)) W_cl[[1]] <- w0 W_cl[[2]] <- cluster0 W_cl[[3]] <- bss.per.feature W_cl }) # end loop over lambda W <- do.call(cbind, (ls[1, ])) cluster <- do.call(cbind, (ls[2, ])) bss.per.feature <- do.call(cbind, (ls[3, ])) rownames(W) <- colnames(X) colnames(W) <- paste("Lambda", 1:length(lambda)) Wg <- stats::aggregate(W, by=list(sort(index)), norm.vect)[,-1] Wg <- as.matrix(Wg) rownames(Wg) <- paste("Group", unique(sort(index))) colnames(Wg) <- paste("Lambda", 1:length(lambda)) selected.features <- apply(W,2,function(x){sum(x>0)}) selected.groups <- apply(Wg,2,function(x){sum(x>0)}) rownames(bss.per.feature) <- colnames(X) colnames(bss.per.feature) <- paste("Lambda", 1:length(lambda)) rownames(cluster) <- rownames(X) colnames(cluster) <- paste("Lambda", 1:length(lambda)) out <- list(call = call, type="NumGroupSparse", lambda = lambda, W = W, Wg = Wg, cluster = cluster, sel.feat= selected.features, sel.groups=selected.groups, Z = X, index = index, bss.per.feature = bss.per.feature) class(out) <- "spwkm" return(out) }
/scratch/gouwar.j/cran-all/cranData/vimpclust/R/groupsparsewkm.R
#' @title Description of a set of partitions #' @export #' #' @description This function computes descriptive statistics of the clustering produced with #' group-sparse weighted k-means on numerical data, or with sparse weighted k-means on mixed data. #' It displays the average of the numerical variables per cluster, and the relative frequencies #' of the levels in the categorical variables per cluster. #' #' @param out an object of class \code{spwkm}. #' @param which.lambda an integer or a vector of integers #' selecting the clusterings for which summaries are computed. #' @param X a matrix or a data frame. The initial data set. #' #' @details #' The values in \code{which.lambda} must be integers between 1 and \code{length(out$lambda)}. One may #' thus select the clusterings corresponding to specific regularization parameters, or the whole set #' of clusterings #' obtained for the whole grid of \code{out$lambda}. #' #' @return \item{mean.by.clust}{a list of numerical matrices. Each matrix contains the mean values #' of the numerical variables computed per cluster, for a given value of the regularization parameter.} #' @return \item{freq.by.clust}{a list of numerical matrices. Each matrix contains the relative #' frequencies of each level associated to categorical variables, #' computed per cluster and for a given value of the regularization parameter.} #' @return \item{lambda}{a scalar or a numerical vector. The selected values of the regularization #' parameter. } #' #' @seealso \code{\link{groupsparsewkm}}, \code{\link{sparsewkm}} #' #' @examples #' data(HDdata) #' out <- sparsewkm(X = HDdata[,-14], centers = 2) #' info_clust(out, which.lambda=c(1,10,20), X = HDdata[,-14]) info_clust <- function(out, which.lambda, X) { varmean_ls <- list() freq_ls <- list() res <- PCAmixdata::splitmix(X) X.quanti <- res$X.quanti X.quali <- res$X.quali rec <- PCAmixdata::recod(X.quanti, X.quali, rename.level = TRUE) ncl <- length(unique(as.vector(out$cluster))) for(i in 1:length(which.lambda)){ if (!is.null(X.quanti)) { varmean_byclust <- stats::aggregate(X.quanti, list(out$cluster[, which.lambda[i]]), mean) varmean_byclust <- varmean_byclust[,-1] varmean_byclust <- t(varmean_byclust) colnames(varmean_byclust) <- paste("cluster=", 1:ncl, sep = "") varmean_ls[[i]] <- varmean_byclust } if (!is.null(X.quali)) { freq_byclust <- stats::aggregate(rec$G, list(out$cluster[, which.lambda[i]]), mean) freq_byclust <- freq_byclust[,-1] freq_byclust <- t(freq_byclust) colnames(freq_byclust) <- paste("cluster=", 1:ncl, sep = "") freq_ls[[i]] <- freq_byclust } } names(varmean_ls) <- paste("lambda = ", out$lambda[which.lambda], sep = "") names(freq_ls) <- paste("lambda = ", out$lambda[which.lambda], sep = "") lambda <- out$lambda[which.lambda] return(list(mean.by.clust = varmean_ls, freq.by.clust = freq_ls, lambda=lambda)) }
/scratch/gouwar.j/cran-all/cranData/vimpclust/R/info_clust.R
#' @title Plots from a "spwkm" object #' @export #' #' @description Produces several graphics to help interpreting a \code{spwkm} object. #' #' @param x An object of class \code{spwkm}. #' @param what A character string indicating which element of \code{x} to be plotted. See section #' "Details" below for further information. #' @param Which A numerical vector indexing the groups or the variables to be displayed. See section #' "Details" below for further information. #' @param xtitle The title of the x-axis. #' @param ytitle The title of the y-axis. #' @param title The title of the graphic. #' @param showlegend A boolean. If \code{showlegend=NULL} (default value), the legend is displayed. #' @param legendtitle The title of the legend. #' @param ... Further arguments to the \code{plot} function. #' #' @return \item{p}{an object of class \code{ggplot}.} #' #' @details #' The \code{plot} function allows to represent the regularization paths for a grid of values of \code{lambda}, as well as several quality criteria associated to the #' clustering. #' #' For both \code{groupsparsewkm} and \code{sparsewkm} functions, the following options are available: #' #' If \code{what=weights.features}, the regularization paths for the weights associated to the variables are displayed. #' #' If \code{what=sel.features}, the graph represents the number of selected variables for each value of the regularization parameter \code{lambda}. In the case of #' sparse weighted k-means for mixed data, categorical variables are represented with dotted lines so that one easily identifies them. #' #' If \code{what=expl.var}, the explained variance (computed as the contribution of the between-class variance to the global variance) is displayed. This criterion is #' computed for all variables in the data set, without taking into account the weights of the group or of the variables. #' #' If \code{what=w.expl.var}, the explained weighted variance is computed. The difference with the criterion above is that the weights of the variables are #' taken into account in the computation. This leads to a criterion which, for large regularization parameters \code{lambda}, may be computed on one variable only, if #' its weight becomes equal to 1 and all the others are discarded. #' #' If \code{what=pen.crit}, the graph displays the evolution of the penalized criterion, maximized by the algorithm. This criterion writes as #' the between-class weighted sum-of-squares, penalized by a group L1-norm. For more details on the mathematical expressions, one may refer to Chavel et al. (2020). #' #' For the outcome of the \code{groupsparsewkm} function trained on numerical data only, two more options are available: #' #' If \code{what=weights.groups}, the regularization paths for the weights associated to the groups of variables are displayed. #' #' If \code{what=sel.groups}, the graph represents the number of selected groups for each value of the regularization parameter \code{lambda}. #' #' For the outcome of the \code{sparsewkm} function trained on mixed data, two more options are also available: #' #' If \code{what=weights.levels}, the regularization paths for the weights associated to the levels of the categorical variables are displayed. #' #' If \code{what=sel.levels}, the graph represents the number of selected levels associated to the categorical variables plus the number of selected #' numerical variables, for each value of the regularization parameter \code{lambda}. #' #' If the number of groups in \code{groupsparsewkm} or if the number of features in \code{sparsewkm} are too large to have easily interpretable graphics, one may select #' some groups or some variables using the argument \code{Which}. Note that when training \code{sparsewkm} on mixed data, the initial order of the variables is changed: #' after the processing step, numerical variables are displayed first, and categorical second. The indexing provided in \code{Which} should take this into account (see the #' Examples section). #' #' @references M., Chavent, J. Lacaille, A. Mourer, and M. Olteanu (2020). #' Sparse k-means for mixed data via group-sparse clustering. To appear in ESANN proceedings. #' #' @seealso \code{\link{sparsewkm}}, \code{\link{groupsparsewkm}} #' #' @examples #' # sparse weighted k-means on mixed data #' \donttest{ #' data(HDdata) #' out <- sparsewkm(X = HDdata[,-14], centers = 2) #' plot(out, what = "weights.features") #' plot(out, what = "weights.levels") #' plot(out, what = "sel.features") #' plot(out, what = "sel.levels") #' plot(out, what = "expl.var") #' plot(out, what = "w.expl.var") #' plot(out, what = "pen.crit") #' # plot the regularization paths for first three variables only #' plot(out, what = "weights.features", Which=1:3) #' #' # group sparse weighted k-means on numerical data #' data(iris) #' index <- c(1, 2, 1, 2) #' out <- groupsparsewkm(X = iris[,-5], centers = 3, index = index) #' plot(out, what = "weights.groups") #' plot(out, what = "weights.features") #' plot(out, what = "sel.groups") #' plot(out, what = "sel.features") #' plot(out, what = "expl.var") #' plot(out, what = "w.expl.var") #' plot(out, what = "pen.crit") #' # plot the regularization paths for the variables in the first group only #' plot(out, what = "weights.features", Which=1) #' } #' @import ggplot2 Polychrome plot.spwkm <- function(x, what="weights.features", Which=NULL, xtitle =NULL, ytitle = NULL, title = NULL, showlegend = NULL, legendtitle = NULL, ...) { if (!inherits(x, "spwkm")) stop("Use only with \"spwkm\" objects") if (x$type=="NumGroupSparse") { if (what=="weights.levels") stop("The implementation does not allow to perform group-sparse k-means on mixed data yet!") if (what=="sel.levels") stop("The implementation does not allow to perform group-sparse k-means on mixed data yet!") if (what=="weights.groups") { data.to.plot <- data.frame(weights=c(t(x$Wg)), lambda=rep(x$lambda, length(unique(x$index))), group=rep(rownames(x$Wg), each=length(x$lambda)), color=as.factor(rep(1:length(unique(x$index)), each=length(x$lambda)))) color.count <- length(unique(x$index)) if (is.null(Which)==FALSE) {data.to.plot <- data.to.plot[data.to.plot$color%in%Which,]} else Which <- unique(x$index) get.palette <- grDevices::colorRampPalette(glasbey.colors(n=32)) colors.groups <- get.palette(color.count) if(is.null(xtitle)) xtitle = "Lambda" if(is.null(ytitle)) ytitle = "Group weights" if (is.null(title)) title = "Groups of features - regularization paths" if(is.null(showlegend)) showlegend = T if (is.null(legendtitle)) legendtitle = "Group of features" p<-ggplot(data.to.plot, aes(x=.data$lambda, y=.data$weights, group=as.factor(.data$group))) + geom_line(aes(color=.data$color))+ geom_point(aes(color=.data$color))+ scale_colour_manual("", values=colors.groups[Which], labels=rownames(x$Wg)[Which]) + xlab(xtitle)+ylab(ytitle)+ labs(color = legendtitle, linetype = legendtitle)+ggtitle(title) if (showlegend==F) p <- p + theme(legend.position="none") return(p) } if (what=="weights.features") { data.to.plot <- data.frame(weights=c(t(x$W)), lambda=rep(x$lambda, dim(x$W)[1]), group=rep(rownames(x$W), each=length(x$lambda)), color=as.factor(rep(x$index, each=length(x$lambda)))) color.count <- length(unique(x$index)) if (is.null(Which)==FALSE) {data.to.plot <- data.to.plot[data.to.plot$color%in%Which,]} else Which <- unique(x$index) get.palette <- grDevices::colorRampPalette(glasbey.colors(n=32)) colors.groups <- get.palette(color.count) if(is.null(xtitle)) xtitle = "Lambda" if(is.null(ytitle)) ytitle = "Features weights" if (is.null(title)) title = "Features - regularization paths" if(is.null(showlegend)) showlegend = T if (is.null(legendtitle)) legendtitle = "Features" p<-ggplot(data.to.plot, aes(x=.data$lambda, y=.data$weights, group=as.factor(.data$group), color=.data$color)) + geom_line()+ geom_point()+ scale_colour_manual("", values=colors.groups[unique(data.to.plot$color)], labels= paste("Group ", unique(data.to.plot$color), sep=""))+ xlab(xtitle)+ ylab(ytitle)+ggtitle(title) if (showlegend==F) p <- p + theme(legend.position="none") return(p) } if (what=="sel.groups") { data.to.plot <- data.frame(sel.groups=x$sel.groups, lambda=x$lambda) if(is.null(xtitle)) xtitle = "Lambda" if(is.null(ytitle)) ytitle = "Number of selected groups" if (is.null(title)) title = "Selected groups path" p<-ggplot(data.to.plot, aes(x=.data$lambda, y=.data$sel.groups)) + geom_line()+geom_point()+xlab(xtitle)+ylab(ytitle)+ggtitle(title) return(p) } if (what=="sel.features") { data.to.plot <- data.frame(sel.features=x$sel.feat, lambda=x$lambda) if(is.null(xtitle)) xtitle = "Lambda" if(is.null(ytitle)) ytitle = "Number of selected features" if (is.null(title)) title = "Selected features path" p<-ggplot(data.to.plot, aes(x=.data$lambda, y=.data$sel.features)) + geom_line()+geom_point()+xlab(xtitle)+ylab(ytitle)+ggtitle(title) return(p) } if (what=="expl.var") { data.to.plot <- data.frame(bss=apply(x$bss.per.feature,2,sum)/dim(x$W)[1], lambda=x$lambda) if(is.null(xtitle)) xtitle = "Lambda" if(is.null(ytitle)) ytitle = "Explained variance" if (is.null(title)) title = "Explained variance path" p<-ggplot(data.to.plot, aes(x=.data$lambda, y=.data$bss)) + geom_line()+geom_point()+xlab(xtitle)+ylab(ytitle)+ggtitle(title) return(p) } if (what=="pen.crit") { penalized.criterion <- sapply(1:length(x$lambda), function(i){sum(x$W[,i]*x$bss.per.feature[,i])- x$lambda[i]*sum(sqrt(summary(as.factor(x$index)))*x$Wg[,i])}) data.to.plot <- data.frame(penalized.criterion, lambda=x$lambda) if(is.null(xtitle)) xtitle = "Lambda" if(is.null(ytitle)) ytitle = "Penalized weigthed between-class variance" if (is.null(title)) title = "Penalized criterion path" p<-ggplot(data.to.plot, aes(x=.data$lambda, y=.data$penalized.criterion)) + geom_line()+geom_point()+xlab(xtitle)+ylab(ytitle)+ggtitle(title) return(p) } if (what=="w.expl.var") { bss <- sapply(1:length(x$lambda), function(i){sum(x$W[,i]*x$bss.per.feature[,i])})/apply(x$W,2,sum) data.to.plot <- data.frame(bss, lambda=x$lambda) if(is.null(xtitle)) xtitle = "Lambda" if(is.null(ytitle)) ytitle = "Explained weighted variance" if (is.null(title)) title = "Explained weighted-variance path" p<-ggplot(data.to.plot, aes(x=.data$lambda, y=.data$bss)) + geom_line()+geom_point()+xlab(xtitle)+ylab(ytitle)+ggtitle(title) return(p) } } if (x$type=="MixedSparse") { if (what=="weights.groups") stop("The implementation does not allow to perform group-sparse k-means on mixed data yet!") if (what=="sel.groups") stop("The implementation does not allow to perform group-sparse k-means on mixed data yet!") if (what=="weights.features") { data.to.plot <- data.frame(weights=c(t(x$W)), lambda=rep(x$lambda, length(unique(x$index))), feature=rep(rownames(x$W), each=length(x$lambda)), color=as.factor(rep(1:length(unique(x$index)), each=length(x$lambda))), line.type=as.factor(rep(as.numeric(summary(as.factor(x$index))>1)*2+1, each=length(x$lambda)))) color.count <- length(unique(x$index)) line.groups <- as.numeric(summary(as.factor(x$index))>1)*2+1 if (is.null(Which)==FALSE) {data.to.plot <- data.to.plot[data.to.plot$color%in%Which,]} else Which <- unique(x$index) get.palette <- grDevices::colorRampPalette(glasbey.colors(n=32)) colors.groups <- get.palette(color.count) if(is.null(xtitle)) xtitle = "Lambda" if(is.null(ytitle)) ytitle = "Features weights" if (is.null(title)) title = "Features - regularization paths" if(is.null(showlegend)) showlegend = T if (is.null(legendtitle)) legendtitle = "" p <- ggplot(data.to.plot, aes(x=.data$lambda, y=.data$weights, linetype=interaction(.data$color, .data$line.type), col=interaction(.data$line.type, .data$color))) + geom_line() + geom_point() + scale_colour_manual("", values=colors.groups[Which], labels=rownames(x$W)[Which]) + scale_linetype_manual("", values=line.groups[Which], labels=rownames(x$W)[Which])+ xlab(xtitle)+ylab(ytitle)+ labs(title=legendtitle)+ggtitle(title) if (showlegend==F) p <- p + theme(legend.position="none") return(p) } if (what=="weights.levels") { data.to.plot <- data.frame(weights=c(t(x$Wm)), lambda=rep(x$lambda, dim(x$Wm)[1]), group=rep(rownames(x$Wm), each=length(x$lambda)), feature=as.factor(rep(x$index, each=length(x$lambda))), color=as.factor(rep(1:length(x$index), each=length(x$lambda)))) if (is.null(Which)==FALSE) {data.to.plot <- data.to.plot[data.to.plot$feature%in%Which,]} else Which=unique(x$index) data.to.plot <- data.to.plot[data.to.plot$feature%in%which(summary(as.factor(x$index))>1),] color.count <- length(x$index) get.palette <- grDevices::colorRampPalette(glasbey.colors(n=32)) colors.groups <- get.palette(color.count) if(is.null(xtitle)) xtitle = "Lambda" if(is.null(ytitle)) ytitle = "Levels weights" if (is.null(title)) title = "Levels - regularization paths" if(is.null(showlegend)) showlegend = T if (is.null(legendtitle)) legendtitle = "" p <- ggplot(data.to.plot, aes(x=.data$lambda, y=.data$weights, group=as.factor(.data$group), col=.data$color)) + geom_line(linetype="dotted") + geom_point() + scale_colour_manual("", values=colors.groups[which(summary(data.to.plot$color)>0)], labels= unique(data.to.plot$group))+ xlab(xtitle)+ylab(ytitle)+ labs(title=legendtitle)+ggtitle(title) if (showlegend==F) p <- p + theme(legend.position="none") return(p) } if (what=="sel.features") { data.to.plot <- data.frame(sel.groups=x$sel.init.feat, lambda=x$lambda) if(is.null(xtitle)) xtitle = "Lambda" if(is.null(ytitle)) ytitle = "Number of selected features" if (is.null(title)) title = "Selected features path" p<-ggplot(data.to.plot, aes(x=.data$lambda, y=.data$sel.groups)) + geom_line()+geom_point()+xlab(xtitle)+ylab(ytitle)+ggtitle(title) return(p) } if (what=="sel.levels") { data.to.plot <- data.frame(sel.features=x$sel.trans.feat, lambda=x$lambda) if(is.null(xtitle)) xtitle = "Lambda" if(is.null(ytitle)) ytitle = "Number of selected levels" if (is.null(title)) title = "Selected levels path" p<-ggplot(data.to.plot, aes(x=.data$lambda, y=.data$sel.features)) + geom_line()+geom_point()+xlab(xtitle)+ylab(ytitle)+ggtitle(title) return(p) } if (what=="expl.var") { data.to.plot <- data.frame(bss=apply(x$bss.per.feature,2,sum)/dim(x$Wm)[1], lambda=x$lambda) if(is.null(xtitle)) xtitle = "Lambda" if(is.null(ytitle)) ytitle = "Explained variance" if (is.null(title)) title = "Explained variance path" p<-ggplot(data.to.plot, aes(x=.data$lambda, y=.data$bss)) + geom_line()+geom_point()+xlab(xtitle)+ylab(ytitle)+ggtitle(title) return(p) } if (what=="pen.crit") { penalized.criterion <- sapply(1:length(x$lambda), function(i){sum(x$Wm[,i]*x$bss.per.feature[,i])- x$lambda[i]*sum(sqrt(summary(as.factor(x$index)))*x$W[,i])}) data.to.plot <- data.frame(penalized.criterion, lambda=x$lambda) if(is.null(xtitle)) xtitle = "Lambda" if(is.null(ytitle)) ytitle = "Penalized weigthed between-class variance" if (is.null(title)) title = "Penalized criterion path" p<-ggplot(data.to.plot, aes(x=.data$lambda, y=.data$penalized.criterion)) + geom_line()+geom_point()+xlab(xtitle)+ylab(ytitle)+ggtitle(title) return(p) } if (what=="w.expl.var") { bss <- sapply(1:length(x$lambda), function(i){sum(x$Wm[,i]*x$bss.per.feature[,i])})/apply(x$Wm,2,sum) data.to.plot <- data.frame(bss, lambda=x$lambda) if(is.null(xtitle)) xtitle = "Lambda" if(is.null(ytitle)) ytitle = "Explained weighted variance" if (is.null(title)) title = "Explained weighted-variance path" p<-ggplot(data.to.plot, aes(x=.data$lambda, y=.data$bss)) + geom_line()+geom_point()+xlab(xtitle)+ylab(ytitle)+ggtitle(title) return(p) } } }
/scratch/gouwar.j/cran-all/cranData/vimpclust/R/plot.spwkm.R
#'@title Recoding mixed data #'@export #' #'@description This function transforms and scales a dataset with numerical and/or categorical #'variables. Numerical variables are scaled to zero mean and unit variance. Categorical variables #'are first transformed into dummy variables according to their levels, and second centered and #'normalized with respect to the square roots of the relative frequencies of the levels. The complete #'procedure is described in Chavent et al. (2014). #'@param X a matrix or a dataframe with numerical and/or categorical variables. Categorical variables #'must be given as factors. #'@param renamelevel a boolean. If TRUE (default value), the levels of the categorical variables #'are renamed as \code{'variable_name=level_name'}. #'@return \item{X}{a data frame or a matrix. The input data \code{X} with reordered columns #'(numerical first, categorical second).} #'@return \item{Z}{a data frame. The transformed data matrix with scaled numerical variables #' and scaled dummy variables coding for the levels.} #'@return \item{index}{a vector of integers. Contains an implicit partitioning of the transformed #' variables: each scaled numerical variable represents a group, all scaled dummy variables #' summarizing the levels of a categorical variable represent a group. \code{index} allows to #' preserve the information on the initial structure of the data, particularly for categorical variables.} #' #'@examples #'head(HDdata) #'out <- recodmix(HDdata[,-14], renamelevel=TRUE) #'# reordered data (numerical/categorical) #'colnames(out$X) #'# transformed and scaled data #'colnames(out$Z) #'# transformed variables partitioning and group membership #'out$index #' @references M. Chavent, V. Kuentz-Simonet, A. Labenne and J. Saracco (2014). #' Multivariate analysis of mixed data: the PCAmixdata R package, arXiv:1411.4911. recodmix <- function(X, renamelevel = FALSE) { split <- PCAmixdata::splitmix(X) X.quanti <- split$X.quanti X.quali <- split$X.quali rec <- PCAmixdata::recod(X.quanti, X.quali, rename.level = renamelevel) X <- rec$X Z <- rec$Z index <- rec$indexj return(list(X = X, Z = Z, index = index)) }
/scratch/gouwar.j/cran-all/cranData/vimpclust/R/recodmix.R
#' @title Sparse weighted k-means #' @export #' #' @description This function performs sparse weighted k-means on a set #' of observations described by numerical and/or categorical variables. #' It generalizes the sparse clustering algorithm introduced in #' Witten & Tibshirani (2010) to any type of data (numerical, categorical #' or a mixture of both). The weights of the variables indicate their importance #' in the clustering process and discriminant variables are thus selected by #' means of weights set to 0. #' #' @param X a dataframe of dimension \code{n} (observations) by \code{p} (variables) with #' numerical, categorical or mixed data. #' @param centers an integer representing the number of clusters. #' @param lambda a vector of numerical values (or a single value) providing #' a grid of values for the regularization parameter. If NULL (by default), the function computes its #' own lambda sequence of length \code{nlambda} (see details). #' @param nlambda an integer indicating the number of values for the regularization parameter. #' By default, \code{nlambda=20}. #' @param nstart an integer representing the number of random starts in the k-means algorithm. #' By default, \code{nstart=10}. #' @param itermaxw an integer indicating the maximum number of iterations for the inside #' loop over the weights \code{w}. By default, \code{itermaxw=20}. #' @param itermaxkm an integer representing the maximum number of iterations in the k-means #' algorithm. By default, \code{itermaxkm=10}. #' @param renamelevel a boolean. If TRUE (default option), each level of a categorical variable #'is renamed as \code{'variable_name=level_name'}. #' @param epsilonw a positive numerical value. It provides the precision of the stopping #' criterion over \code{w}. By default, \code{epsilonw =1e-04}. #' @param verbose an integer value. If \code{verbose=0}, the function stays silent, if \code{verbose=1} (default option), it prints #' whether the stopping criterion over the weights \code{w} is satisfied. #' #' @return \item{lambda}{a numerical vector containing the regularization parameters (a grid of values).} #' @return \item{W}{a \code{p} by \code{length(lambda)} matrix. It contains the weights associated to each variable.} #' @return \item{Wm}{a \code{q} by \code{length(lambda)} matrix, where \code{q} is the #' number of numerical variables plus the number of levels of the categorical #' variables. It contains the weights associated to the numerical variables and to the levels of the categorical #' variables.} #' @return \item{cluster}{a \code{n} by \code{length(lambda)} integer matrix. It contains the #' cluster memberships, for each value of the regularization parameter.} #' @return \item{sel.init.feat}{a numerical vector of the same length as \code{lambda}, giving the #' number of selected variables for each value of the regularization parameter.} #' @return \item{sel.trans.feat}{a numerical vector of the same length as \code{lambda}, giving the #' number of selected numerical variables and levels of categorical variables.} #' @return \item{X.transformed}{a matrix of size \code{n} by \code{q}, containing the transformed data: numerical variables scaled to #' zero mean and unit variables, categorical variables transformed into dummy variables, scaled (in means and variance) #' with respect to the relative frequency of the levels.} #' @return \item{index}{a numerical vector indexing the variables and allowing to group together the levels of a #' categorical variable.} #' @return \item{bss.per.feature}{a matrix of size \code{q} by \code{length(lambda)}. #' It contains the between-class variance computed on the \code{q} transformed variables (numerical variables and #' levels of categorical variables).} #' #' @details #' Sparse weighted k-means performs clustering on mixed data (numerical and/or categorical), and automatically #' selects the most discriminant variables by setting to zero the weights of the non-discriminant ones. #' #' The mixted data is first preprocessed: numerical variables are scaled to zero mean and unit variance; #' categorical variables are transformed into dummy variables, and scaled -- in mean and variance -- with #' respect to the relative frequency of each level. #' #' The algorithm is based on the optimization of a cost function which is the weighted between-class variance penalized #' by a group L1-norm. The groups are implicitely defined: each numerical variable constitutes its own group, the levels #' associated to one categorical variable constitute a group. The importance of the penalty term may be adjusted through #' the regularization parameter \code{lambda}. #' #' The output of the algorithm is two-folded: one gets a partitioning of the data set and a vector of weights associated #' to each variable. Some of the weights are equal to 0, meaning that the associated variables do not participate in the #' clustering process. If \code{lambda} is equal to zero, there is no penalty applied to the weighted between-class variance in the #' optimization procedure. The larger the value of \code{lambda}, the larger the penalty term and the number of variables with #' null weights. Furthemore, the weights associated to each level of a categorical variable are also computed. #' #' Since it is difficult to choose the regularization parameter \code{lambda} without prior knowledge, #' the function builds automatically a grid of parameters and finds a partition and vector of weights for each #' value of the grid. #' #' Note also that the columns of the data frame \code{X} must be of class factor for #' categorical variables. #' #' @references Witten, D. M., & Tibshirani, R. (2010). A framework for feature #' selection in clustering. Journal of the American Statistical Association, #' 105(490), 713-726. #' @references Chavent, M. & Lacaille, J. & Mourer, A. & Olteanu, M. (2020). #' Sparse k-means for mixed data via group-sparse clustering, ESANN proceedings. #' #' @seealso \code{\link{plot.spwkm}}, \code{\link{info_clust}}, #' \code{\link{groupsparsewkm}}, \code{\link{recodmix}} #' #'@examples #' data(HDdata) #' \donttest{ #' out <- sparsewkm(X = HDdata[,-14], centers = 2) #' # grid of automatically selected regularization parameters #' out$lambda #' k <- 10 #' # weights of the variables for the k-th regularization parameter #' out$W[,k] #' # weights of the numerical variables and of the levels #' out$Wm[,k] #' # partitioning obtained for the k-th regularization parameter #' out$cluster[,k] #' # number of selected variables #' out$sel.init.feat #' # between-class variance on each variable #' out$bss.per.feature[,k] #' # between-class variance #' sum(out$bss.per.feature[,k]) #' } sparsewkm <- function(X, centers, lambda = NULL, nlambda = 20, nstart = 10, itermaxw = 20, itermaxkm = 10, renamelevel = TRUE, verbose = 1, epsilonw = 1e-04) { call <- match.call() check_X(X) check_renamelevel(renamelevel) Xrec <- recodmix(X, renamelevel) res.out <- groupsparsewkm(X = Xrec$Z, centers, lambda, nlambda, index = Xrec$index, sizegroup = T, nstart, itermaxw, itermaxkm, scaling = FALSE, verbose, epsilonw) # scaling is put to FALSE because recodmix already scales the variables rownames(res.out$Wg) <- c(names(Xrec$X)[sapply(Xrec$X,is.factor)==F], names(Xrec$X)[sapply(Xrec$X,is.factor)==T]) res <- list(call = res.out$call, type="MixedSparse", W = res.out$Wg, Wm = res.out$W, cluster = res.out$cluster, lambda = res.out$lambda, sel.init.feat=res.out$sel.groups, sel.trans.feat=res.out$sel.feat, X.transformed = Xrec$Z, index = Xrec$index, bss.per.feature = res.out$bss.per.feature) class(res) <- "spwkm" return(res) }
/scratch/gouwar.j/cran-all/cranData/vimpclust/R/sparsewkm.R
wss.components <- function(k,x,cl) { res <- x[cl==k,] if (is.null(dim(res))==F) { res <- apply(res, 2, scale, center=T, scale=F) res <- apply(res, 2, function(x) sum(x^2)) } else res <- rep(0,dim(x)[2]) return(res) } #' @title Weighted sum-of-squares criteria #' @export #' #' @description This function computes various weighted sum-of-squares criteria for a given #' partition of a dataset described by numerical features. #' #' @param X a matrice or a dataframe of size \code{n} (observations) by \code{p} (variables) #' with numerical features only. #' @param cl a vector of integers of length \code{n}. It contains the cluster membership of the data. #' @param w a numerical vector of length \code{p}. It contains the weights to be applied to the features. #' By default, \code{w=NULL}, which amounts to setting each weight equal to 1. #' #' @return \item{bss.per.feature}{a numerical vector of length \code{p} containing the weighted #' between sum-of-squares per feature.} #' @return \item{wss.per.feature}{a numerical vector of length \code{p} containing the weighted #' within sum-of-squares per feature.} #' @return \item{bss.per.cluster}{a numerical vector of length \code{K} (\code{K} is the number of #' clusters) containing the weighted between sum-of-squares per cluster.} #' @return \item{wss.per.cluster}{a numerical vector of length \code{K} #' containing the weighted within sum-of-squares per cluster.} #' @return \item{bss}{a scalar representing the weighted between sum-of-squares of the partition. #' It may be computed as the sum over \code{bss.per.feature} or \code{bss.per.cluster}.} #' @return \item{wss}{a scalar representing the weighted within sum-of-squares of the partition. #' It may be computed as the sum over \code{wss.per.feature} or \code{wss.per.cluster}.} #' #' @examples #' data(iris) #' out <- weightedss(X = iris[,1:4], cl = as.numeric(iris$Species)) #' out$bss.per.feature #' out$bss.per.cluster #' out$bss #' #' w <- c(0.3,0.3,0.2,0.2) #' out <- weightedss(X = iris[,1:4], cl = as.numeric(iris$Species), w=w) #' out$bss.per.feature #' out$bss.per.cluster #' out$bss weightedss <- function(X, cl, w = NULL) { check_fun_weightedss(X, cl, w) X = as.matrix(X) sizecolX = ncol(X) if (is.null(w)) w = rep(1, sizecolX) K <- length(unique(cl)) X.scaled=t(t(X)*sqrt(w)) cluster.centers <- stats::aggregate(X.scaled, by=list(cl), FUN=mean)[,-1] cluster.densities <- table(cl) matrix.wss <- t(sapply(1:K, FUN=wss.components, x=X.scaled, cl=cl, simplify=T)) matrix.bss <- (cluster.centers-matrix(apply(X.scaled,2,mean),nrow=K, ncol=sizecolX,byrow=T))^2*cluster.densities wss.per.feature <- apply(matrix.wss, 2, sum) wss.per.cluster <- apply(matrix.wss, 1, sum) bss.per.feature <- apply(matrix.bss, 2, sum) bss.per.cluster <- apply(matrix.bss, 1, sum) bss <- sum(bss.per.cluster) wss <- sum(wss.per.cluster) names(bss.per.feature) <- colnames(X) names(wss.per.feature) <- colnames(X) names(bss.per.cluster) <- 1:K names(wss.per.cluster) <- 1:K return(list(bss.per.feature = bss.per.feature, wss.per.feature = wss.per.feature, bss.per.cluster = bss.per.cluster, wss.per.cluster = wss.per.cluster, bss = bss, wss = wss)) }
/scratch/gouwar.j/cran-all/cranData/vimpclust/R/weightedss.R
## ----setup, include=FALSE----------------------------------------------------- knitr::opts_chunk$set(echo = TRUE,eval=TRUE,fig.align="center",fig.width = 7,fig.height = 5) old <- options(digits = 2) ## ----------------------------------------------------------------------------- library(vimpclust) data(DataMice) DataMice[1:10, 1:4] ## ----------------------------------------------------------------------------- summary(DataMice$Class.mouse) ## ---- message=FALSE, warning=FALSE------------------------------------------- names(DataMice)[1:20] index <- unlist(strsplit(names(DataMice)[1:(dim(DataMice)[2]-5)], split="_")) index <- as.numeric(index[(1:length(index)%%4==2)]) ## ---- message=FALSE, warning=FALSE, echo = T--------------------------------- set.seed(1407) res.mice <- groupsparsewkm(X = DataMice[,(1:length(index))], centers = 8, index = index, verbose = 1) ## ---- eval = F--------------------------------------------------------------- # set.seed(1407) # res.mice <- groupsparsewkm(X = DataMice[,(1:length(index))], centers = 8, index = index, verbose = 1) ## ----------------------------------------------------------------------------- res.mice$Wg[1:20,1:5] ## ----------------------------------------------------------------------------- plot(res.mice, what="weights.groups") ## ----------------------------------------------------------------------------- res.mice$W[1:20,1:5] ## ---- message=FALSE, warning=FALSE------------------------------------------- plot(res.mice, what = "weights.features") ## ---- message=FALSE, warning=FALSE------------------------------------------- plot(res.mice, what = "weights.groups", Which=c(1,2,30)) ## ---- message=FALSE, warning=FALSE------------------------------------------- plot(res.mice, what = "weights.features", Which=c(1,2,30)) ## ----------------------------------------------------------------------------- plot(res.mice, what="sel.groups") ## ----------------------------------------------------------------------------- plot(res.mice, what="sel.features") ## ----------------------------------------------------------------------------- plot(res.mice, what="expl.var") ## ----------------------------------------------------------------------------- plot(res.mice, what="w.expl.var") ## ---- message=FALSE----------------------------------------------------------- sapply(1:length(res.mice$lambda), function(x) {mclust::adjustedRandIndex(res.mice$cluster[,x],DataMice$Class.mouse)}) sapply(1:length(res.mice$lambda), function(x) {mclust::adjustedRandIndex(res.mice$cluster[,x],DataMice$Genotype)}) sapply(1:length(res.mice$lambda), function(x) {mclust::adjustedRandIndex(res.mice$cluster[,x],DataMice$Treatment)}) sapply(1:length(res.mice$lambda), function(x) {mclust::adjustedRandIndex(res.mice$cluster[,x],DataMice$Behaviour)}) ## ---- include=FALSE----------------------------------------------------------- options(old)
/scratch/gouwar.j/cran-all/cranData/vimpclust/inst/doc/groupsparsewkm.R
--- title: "Group-sparse weighted k-means for numerical data" author: "Marie Chavent and Alex Mourer and Madalina Olteanu" date: "`r Sys.Date()`" output: html_vignette: toc: no header-includes: - \usepackage{bbm} bibliography: bibESANN.bib link-citations: yes vignette: > %\VignetteIndexEntry{Group-sparse weighted k-means for numerical data} %\usepackage[UTF-8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE,eval=TRUE,fig.align="center",fig.width = 7,fig.height = 5) old <- options(digits = 2) ``` $\DeclareMathOperator{\R}{\mathbb{R}}$ ## Basic function description `groupsparsewkm` is designed for clustering numerical data described by a set of features, while simultaneously selecting the most discriminant groups of features. The groups of features are supposed priorly known and provided as an argument to the function. This implementation generalizes the sparse $k$-means algorithm introduced in @sparsekmeans, and is based on the optimization of a penalized weighted between-class variance criterion. For more technical details on the penalty term and on the optimization procedure, one may refer to @Sparsegroupkmeans. ### Arguments Several arguments may be passed to `groupsparsewkm`, but only the first two are required: * `X` is the numerical data that will be clustered. It should have the format of a matrix or a data frame, and all entries should be numerical features. Only the features one would include in the clustering should be present. Column and row names may be supplied to ease the interpretation; * `centers` is the number of clusters to be computed. The rest of the arguments are related to the choices of the regularization parameter, the prior partitioning of the features into groups, the number of iterations and random starts in the algoritm or the fact of scaling the data. Default values are fixed for these parameters, one may see `help(groupsparsewkm)` for further details. ### Output The `groupsparsewkm` function returns an object of class `spwkm` (see `help(groupsparsewkm)` for further details on this class). ## A case study: `Mice` dataset The `DataMice` dataset consists of repeated measurements of 68 proteins on a sample of 72 mice (12 or 15 values independently measured for each protein). The [data](https://archive.ics.uci.edu/ml/datasets/Mice+Protein+Expression#) was first described in @Higuera, and it was processed here in order to discard some proteins and measurements containing missing data. ```{r} library(vimpclust) data(DataMice) DataMice[1:10, 1:4] ``` The data may be priorly split as follows: * According to the `Genotype`, 38 mice are in the control (c) group, and 34 in the trisomic (t) group; * According to the `Behavior`, 35 mice were stimulated to learn (CS), and 37 were not (SC); * According to the `Treatment`, 38 mice received one (m), and 34 did not (s). When mixing all the information, the data may be priorly split into 8 clusters, with the following frequencies: ```{r} summary(DataMice$Class.mouse) ``` Further details about this dataset may be found with `help(DataMice)`. ### Training the `groupsparsewkm` function For this dataset, the groups of features may be naturally defined: each protein represents a group, and the measurements associated to it represent the features of the group. The `index` vector, containing the index of the group associated to each feature, is created using the column names of `DataMice`. ```{r, message=FALSE, warning=FALSE} names(DataMice)[1:20] index <- unlist(strsplit(names(DataMice)[1:(dim(DataMice)[2]-5)], split="_")) index <- as.numeric(index[(1:length(index)%%4==2)]) ``` The number of clusters, `centers` is fixed to 8, based on the additional prior knowledge on the dataset that was described above. Although there is no reason for an unsupervised method to retrieve the partitioning defined by some supplementary features, the comparison of the unsupervised clustering with a prior partitioning is interesting for illustration. All features are scaled to have zero mean and unit variance with the default setting of the `groupsparsewkm` function. \donttest{ ```{r, message=FALSE, warning=FALSE, echo = T} set.seed(1407) res.mice <- groupsparsewkm(X = DataMice[,(1:length(index))], centers = 8, index = index, verbose = 1) ``` } ```{r, eval = F} set.seed(1407) res.mice <- groupsparsewkm(X = DataMice[,(1:length(index))], centers = 8, index = index, verbose = 1) ``` According to the above, the algorithm converged for all values of `lambda`. In some cases, the stopping criterion may not be satisfied and the convergence over the weights `w` might not be achieved. If this were the case, one should increase the number of iterations `itermaxw`, which by default is set to 20. Note however that setting a larger value for this parameter would increase the computational time, since a full $k$-means algorithm is trained at each iteration. ### Results The weights associated to each group of features may be found in the matrix `Wg`. These weights illustrate the contribution of each group of features to the clustering and may be seen as a measure of the importance of each group for the clustering. Each column of `Wg` contains the weights computed for a given value of the regularization parameter `lambda`. The default setting in the `groupsparsewkm` function selects 20 values for the regularization pameter `lambda`, chosen uniformly between 0 (no regularization) and a maximum value automatically tuned. ```{r} res.mice$Wg[1:20,1:5] ``` Each row of `Wg` contains the weights associated to a group of features for all the values of the regularization parameter, or the so-called **regularization paths**. These may be further illustrated graphically, by calling the `plot` function for the `spwkm` class (see `help(plot.spwkm)` for more details). ```{r} plot(res.mice, what="weights.groups") ``` For the `Mice` data, one may see that four proteins particularly (associated to groups 1, 2, 11 and 21) appear as the most discriminant, as the regularization parameter increases. Other proteins, such as the one associated to group 30 have an interesting behaviour: its weight becomes large for large values of the regularization term, thus for a heavy penalty term. Indeed, at the 13th value of the `lambda` a significant drop in the number of selected groups occurs, from 41 selected groups to 13. It is at this point that group 30 becomes extremely significant for the clustering. If one wants to look into more details and assess the respective contribution of each feature, she may look at the matrix `W`, which contains the weights associated to each feature. These weights may be read as the relative importance of each feature for the clustering. Depending on the number of features in each group, `W` may potentially be a large matrix, and one may also want to focus on the features belonging to non-zero weigthed groups. ```{r} res.mice$W[1:20,1:5] ``` The regularization path for each feature may be also illustrated using the `plot` function. For the `Mice` dataset, one may easily see that the features within each group are quite redundant (let us recall here that one group is made of repeated measurements of the same protein), their regularization paths being very similar. ```{r, message=FALSE, warning=FALSE} plot(res.mice, what = "weights.features") ``` By specifying the supplementary argument `Which` in the `plot` function, one may focus on the regularization paths of specific groups of features, represented either as the regularization path of the group, or the regularization paths of the corresponding features. Here below, proteins 1, 2 and 30 were selected for illustration. ```{r, message=FALSE, warning=FALSE} plot(res.mice, what = "weights.groups", Which=c(1,2,30)) ``` ```{r, message=FALSE, warning=FALSE} plot(res.mice, what = "weights.features", Which=c(1,2,30)) ``` #### Additional plots A valuable piece of information is given by the number of selected groups or the number of selected features for a given value of the regularization parameter `lambda`. The evolution of the number of features may be graphically illustrated as follows: ```{r} plot(res.mice, what="sel.groups") ``` ```{r} plot(res.mice, what="sel.features") ``` Since the measurements for each protein are quite redundant, and the number of measurements for each protein quite similar, the curves representing the evolution of the selected groups and of the selected features are very similar. A significant drop may be noticed after the 12th value of `lambda`, where only 13 proteins among the 68 are preserved for clustering. Besides the selected number of groups of features or the selected number of features, the evolution of some criteria related to the quality of the clustering are equally important for understanding and commenting the results. For example, using the argument `what="expl.var"` in the `plot` function, one may illustrate the evolution of the explained variance, as a function of the regularization parameter `lambda`. The explained variance is computed as the ratio between the between-class variance and the global variance in the data, and represents the ratio of information explained by the clustering. Let us remark that this criterion is independent of the sparse algorithm trained here, which maximizes the weighted between sum-of-squares penalized by a group-penalty term. The explained variance, computed on all features and without applying any weights, illustrates how the information is preserved when discarding an increasing number of features. ```{r} plot(res.mice, what="expl.var") ``` The number-of-selected-groups curve and the explained-variance curve may be used to select the appropriate regularization parameter `lambda` for the clustering. A **good choice** of `lambda` should preserve a high percentage of variance explained by the clustering, while discarding a large number of features. This actually amounts to a trade-off between the quality of the model and its relative parcimony. With this is mind, one may easily see that by selecting `lambda=0.45`, the explained variance remains very close to when using all features, whereas the number of groups and the number of features was reduced by a third. For `lambda=0.48`, if one accepts to "loose" a third of the explained variance (while remaining above 30%), the number of groups and the number of features may be reduced by more than 80% (13 groups are preserved among 68, and 171 features among 900). Hence, one may use this algorithm for drastically reducing the dimensionality of the data, while still preserving a significant clustering. Another graphical option includes the illustration of the weighthed explained variance (the weights computed by the algorithm are taken into account and applied to the features when computing the between sum-of-squares and the total sum-of-squares). In this case, since the criterion takes into account the most discriminant features only, it is increasing with the penalty, and a significant jump may be seen at the same spot as for the explained variance, except that here, the weighted explained variance improves by more than 20%. ```{r} plot(res.mice, what="w.expl.var") ``` Other criteria such as the gap statistic could be efficiently used for selecting the regularization parameter (and also the number of clusters). They are not implemented here (**yet!**), but may be easily retrived in other packages and combined with `spwkm` objects. #### Comparing the clustering with the "ground truth" As mentioned above, the `DataMice` observations are known to belong to some priorly defined clusters, defined by the `Genotype`, `Treatment` or `Behaviour`. In order to compare the resulting clusterings of the group-sparse algorithm (for the various values of `lambda`) with the priorly defined clusters, the Adjusted Rand Index (ARI) is computed here below. ```{r, message=FALSE} sapply(1:length(res.mice$lambda), function(x) {mclust::adjustedRandIndex(res.mice$cluster[,x],DataMice$Class.mouse)}) sapply(1:length(res.mice$lambda), function(x) {mclust::adjustedRandIndex(res.mice$cluster[,x],DataMice$Genotype)}) sapply(1:length(res.mice$lambda), function(x) {mclust::adjustedRandIndex(res.mice$cluster[,x],DataMice$Treatment)}) sapply(1:length(res.mice$lambda), function(x) {mclust::adjustedRandIndex(res.mice$cluster[,x],DataMice$Behaviour)}) ``` ```{r, include=FALSE} options(old) ``` According to the above values, the 8 clusters computed with the sparse-group $k$-means algorithm are not much related to the 8 priorly defined clusters. As we've already mentioned, there is no prior reason for correlation between the clustering output and the partitioning defined by the `Genotype`, the `Treatment` and the `Behaviour`. The clusters identified by the algorithm may correspond to a completely different structure in the data. Nevertheless, we should mention here that the proteins identified by the algorithm as the most discriminant or having significant weights for all values of `lambda` -- groups 1, 2, 10, 11, 21, 25, 30, 32, 68 -- correspond to those identified in @Higuera as discriminant for `Genotype`, `Treatment` or `Behaviour`. Furthermore, the algorithm implemented in `groupsparsewkm` has also the advantage of fully selecting or discarding one protein and its associated measurements thanks to the group approach. Group-sparse clustering is thus offering a complete approach for both clustering and selecting the most discriminant groups of features. # Bibliography
/scratch/gouwar.j/cran-all/cranData/vimpclust/inst/doc/groupsparsewkm.Rmd
## ----setup, include=FALSE----------------------------------------------------- knitr::opts_chunk$set(echo = TRUE,eval=TRUE,fig.align="center",fig.width = 7,fig.height = 5) old <- options(digits = 2) ## ----------------------------------------------------------------------------- library(vimpclust) head(HDdata) ## ---- echo = FALSE, eval = TRUE----------------------------------------------- res <- sparsewkm(X = HDdata[,-14], centers = 2) ## ---- echo = TRUE, eval = FALSE----------------------------------------------- # res <- sparsewkm(X = HDdata[,-14], centers = 2) ## ----------------------------------------------------------------------------- res$W[,1:5] ## ----------------------------------------------------------------------------- plot(res, what="weights.features") ## ----------------------------------------------------------------------------- res$Wm[,1:5] ## ----------------------------------------------------------------------------- plot(res, what="weights.levels") ## ----------------------------------------------------------------------------- plot(res, what="weights.features", Which=c(4,5,11,12)) ## ----------------------------------------------------------------------------- plot(res, what="weights.levels", Which=c(11,12)) ## ----------------------------------------------------------------------------- plot(res, what="sel.features") plot(res, what="sel.levels") ## ----------------------------------------------------------------------------- plot(res, what="expl.var") plot(res, what="w.expl.var") ## ---- message=FALSE----------------------------------------------------------- library(mclust) sapply(c(1,5,6), function(x) {adjustedRandIndex(res$cluster[,x],HDdata$HD)}) table(HDdata$HD, res$cluster[,1]) table(HDdata$HD, res$cluster[,5]) table(HDdata$HD, res$cluster[,6]) ## ----------------------------------------------------------------------------- info_clust(res, 5, X = HDdata) ## ---- include=FALSE----------------------------------------------------------- options(old)
/scratch/gouwar.j/cran-all/cranData/vimpclust/inst/doc/sparsewkm.R
--- title: "Sparse weighted k-means for mixed data" author: "Marie Chavent and Alex Mourer and Madalina Olteanu" date: "`r Sys.Date()`" output: html_vignette: toc: no header-includes: - \usepackage{bbm} bibliography: bibESANN.bib link-citations: yes vignette: > %\VignetteIndexEntry{Sparse weighted k-means for mixed data} %\usepackage[UTF-8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE,eval=TRUE,fig.align="center",fig.width = 7,fig.height = 5) old <- options(digits = 2) ``` $\DeclareMathOperator{\R}{\mathbb{R}}$ ## Basic function description `sparsewkm` is designed for performing sparse clustering of a dataset described by numerical, categorical, or mixed features. In this respect, it generalizes the sparse $k$-means algorithm introduced in @sparsekmeans for numerical features only. The implementation is based on the optimization of a penalized between-class variance criterion. If the features used for clustering are numerical only, the algorithm reduces to the one in @sparsekmeans. If some or all the features are categorical, `sparsewkm` transforms the data using a factor analysis step (see @chavent2014multivariate for the preprocessing), and then trains a group-sparse $k$-means algorithm, each group being defined by the levels of one specific feature. For more technical details on the cost function and the optimization procedure, one may refer to @Sparsegroupkmeans. ### Arguments Several arguments may be passed to `sparsewkm`, but only the first two are required: * `X` is the data to be clustered. It should be a data frame, and the categorical features should be provided as factors. Only the features one would include in the clustering should be present. Column and row names may be supplied to ease the interpretation. * `centers` is the number of clusters to be computed. The rest of the arguments are related to the choices of the regularization parameter, or the number of iterations and random starts in the algoritm. Default values are fixed for these parameters, one may see `help(sparsewkm)` for further details. ### Output The `sparsewkm` function returns an object of class `spwkm` (see `help(sparsewkm)` for further details on this class). ## A case study: `HDdata` dataset The `HDdata` consists of 270 patients described by six numerical features and eight categorical ones. It was sampled from the [Cleveland Heart Disease Data](https://archive.ics.uci.edu/ml/datasets/Heart+Disease) found in the UCI machine learning repository. Further details about this dataset may be found with `help(HDdata)`. ```{r} library(vimpclust) head(HDdata) ``` ### Training the `sparsewkm` function The `sparsewkm` function is applied to `HDdata` on all features except the last one, `HD`, which codes for the presence or the absence of a heart disease. `HD` was removed from the clustering, and will be used later as a control variable. Since the control variable has only two classes, the number of clusters is set to 2 with the argument `centers`. We shall check after the clustering procedure whether the algorithm retrieves these two classes. \donttest{ ```{r, echo = FALSE, eval = TRUE} res <- sparsewkm(X = HDdata[,-14], centers = 2) ``` } ```{r, echo = TRUE, eval = FALSE} res <- sparsewkm(X = HDdata[,-14], centers = 2) ``` According to the above, the algorithm converged for all values of `lambda`. In some cases, the stopping criterion may not be satisfied and the convergence over the weights `w` might not be achieved. If this were the case, one should increase the number of iterations `itermaxw`, which by default is set to 20. Note however that setting a larger value for this parameter would increase the computational time, since a full $k$-means algorithm is trained at each iteration. ### Results The weights associated to each feature may be found in the matrix `W`. These weights illustrate the contribution of each feature, numerical or categorical, to the clustering, and may be seen as a measure of the relative importance of each feature in the clustering. Each column of `W` contains the weights computed for a given value of the regularization parameter `lambda`. The default setting in the `sparsewkm` function selects 20 values for `lambda`, chosen uniformly between 0 (no regularization) and a maximum value automatically tuned. In the following, only the weights associated to the first 5 values of `lambda` are displayed. ```{r} res$W[,1:5] ``` One may see that, as `lambda` increases, the weights of some features are progressively set to 0. The evolution of the **regularization paths** for the features used in the clustering may be illustrated using the `plot` function for the `spwkm` class. With the default settings of this implementation, the paths associated to **numerical features** are drawn with **continuous lines**, while those associated to **categorical features** are drawn with **dotted lines**. According to the results, the numerical features `maxhr` and `oldpeak`, and the categorical features `slope` and `exang` appear as the most discriminant for small values of `lambda`. As `lambda` increases, `maxhr` only is selected by the algorithm. ```{r} plot(res, what="weights.features") ``` Furthermore, the weights associated to each level of the categorical features may be also displayed. These weights are stored in the matrix `Wm`. Similarly to `W`, each column of this matrix contains the weights -- associated either to the numerical features or to the levels of the categorical features -- for a given value of `lambda`. We display here these weights asssociated to the first 5 values of `lambda`. ```{r} res$Wm[,1:5] ``` The regularization paths for the levels of the categorical features may be plotted using argument `what=weights.levels` in the `plot` function. This option provides a more detailed image of how each level in a categorical feature contributes to the clustering. One may see here, for instance, that only the first two levels of `slope` and one level of `exang` have particularly significant contributions. ```{r} plot(res, what="weights.levels") ``` Depending on the number of levels in the categorical features, `Wm` may potentially be quite a big matrix. One may chose to plot the regularization paths for some features only, whether numerical, or categorical. For doing this, it is enough to add the argument `Which` in the `plot` function and list the features (one should note here that after training the `sparsewkm` function, the features are ordered differently than in the input data, with the numerical features listed first; the argument `Which` takes into account the order in the `W` output matrix). ```{r} plot(res, what="weights.features", Which=c(4,5,11,12)) ``` For the categorical features, one may equally plot the regularization paths of the associated levels, as we do here for `slope` and `exang`. ```{r} plot(res, what="weights.levels", Which=c(11,12)) ``` ### Additional plots The number of selected features and the number of selected levels for a given value of the regularization parameter `lambda` provide valuable information. These two criteria may be illustrated using options `what=sel.features` and `what=sel.levels` in the `plot` function. ```{r} plot(res, what="sel.features") plot(res, what="sel.levels") ``` The two curves are very similar and show how the number of selected features relevant for the clustering rapidly decreases with `lambda`. Clustering may also be assessed using criteria based on the evolution of the explained variance. The latter is computed as the ratio between the between-class variance and the global variance in the data, and represents the ratio of information explained by the clustering. The explained variance may be computed on all features used for clustering, without taking the weights into account, or in a weighted fashion, by taking the computed weights into account and thus suppressing all features discarded by the algorithm. In the latter case and for large `lambda`, the explained weighted variance results in being computed using one feature only, `maxhr`. ```{r} plot(res, what="expl.var") plot(res, what="w.expl.var") ``` These two criteria, combined with the number of selected features, may be used to select the appropriate regularization parameter `lambda`. A **good choice** for `lambda` should preserve a high percentage of variance explained by the clustering, while discarding a large number of features. This amounts to a trade-off between the quality of the model and its parcimony. One may remark that with the fifth value of the regularization parameter, `lambda=0.07`, the number of features is reduced by half, while the loss in the explained variance is close to 1%. One may notice also that the percentage of explained variance is very low for all clusterings, while the percetange of explained weighthed variance is significantly larger and increasing with the increasing penalty. This amounts to saying that most of the features in the dataset are not discriminant, and the global quality of the clustering, computed on all features, is rather poor. If only the most discriminant features are selected, the explained weighted variance largely increses, whereas the loss in the unweighted explained variance is minor. Furthermore, if one selects the sixth value of the regularization parameter, `lambda=0.09`, only two features are kept for clustering. The loss in the unweighted explained variance is close to 2%, but the weighted explained variance gains more than 30%. It appears, according to these remarks, that only very few features are actually playing a significant part in the clustering procedure. If one prefers to discard most of the features, she may keep `maxhr` and `oldpeak`, with an explained variance of roughly 6.2% and an explained weighted variance of roughly 60%. If one wants a larger ratio of explained variance, she would also keep `age`, `numv`, `exang` and `slope` besides `maxhr` and `oldpeak`. In this case, the ratio of explained variance is about 7.5%, while the ratio of explained weighted variance drops to approximately 45%. Furthermore, according to the regularization paths, it appears that these six features are the most discriminant and the most important for the clustering. #### Comparing the clustering with the "ground truth" Since we have a control variable for the `HDdata`, we shall use it to compare three clustering produced by the algorithm, for three different values of `lambda`. The comparison criterion chosen here is the Adjusted Rand Index (ARI). We shall also display the confusion matrices for the first, fifth and sixth values of `lambda`. ```{r, message=FALSE} library(mclust) sapply(c(1,5,6), function(x) {adjustedRandIndex(res$cluster[,x],HDdata$HD)}) table(HDdata$HD, res$cluster[,1]) table(HDdata$HD, res$cluster[,5]) table(HDdata$HD, res$cluster[,6]) ``` According to these results, the quality of the agreement between the clustering and the control variable is decreasing with larger penalties and fewer features kept for the clustering. Nevertheless, although the accuracy is deteriorated, reducing the number of features by half leads to a loss of 3.7% only in terms of accuracy, while reducing the number of features from 13 to 2 leads to a loss of 7%. It appears here that sparse clustering may be particularly useful if one wishes to find a trade-off between clustering quality and dimensionality, while putting forward the features which mostly contribute to the clustering. We shall also mention here that the comparison with the control variable is done for illustration purposes only. Since sparse weighted $k$-means is an unsupervised method, there is no reason the clustering it builds correponds to some prior partitioning defined by some control variable. #### Cluster compostion Eventually, once one selects an appropriate regularization parameter `lambda` and the associated clustering, she may consider cluster composition. Using the function `info_clust` in the package, she may display features distributions within the clusters (averages for numerical features, frequencies for categorical ones). This first insight may further be completed with a more thorough analysis, using some analysis of variance etc. ```{r} info_clust(res, 5, X = HDdata) ``` ```{r, include=FALSE} options(old) ``` # Bibliography
/scratch/gouwar.j/cran-all/cranData/vimpclust/inst/doc/sparsewkm.Rmd
--- title: "Group-sparse weighted k-means for numerical data" author: "Marie Chavent and Alex Mourer and Madalina Olteanu" date: "`r Sys.Date()`" output: html_vignette: toc: no header-includes: - \usepackage{bbm} bibliography: bibESANN.bib link-citations: yes vignette: > %\VignetteIndexEntry{Group-sparse weighted k-means for numerical data} %\usepackage[UTF-8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE,eval=TRUE,fig.align="center",fig.width = 7,fig.height = 5) old <- options(digits = 2) ``` $\DeclareMathOperator{\R}{\mathbb{R}}$ ## Basic function description `groupsparsewkm` is designed for clustering numerical data described by a set of features, while simultaneously selecting the most discriminant groups of features. The groups of features are supposed priorly known and provided as an argument to the function. This implementation generalizes the sparse $k$-means algorithm introduced in @sparsekmeans, and is based on the optimization of a penalized weighted between-class variance criterion. For more technical details on the penalty term and on the optimization procedure, one may refer to @Sparsegroupkmeans. ### Arguments Several arguments may be passed to `groupsparsewkm`, but only the first two are required: * `X` is the numerical data that will be clustered. It should have the format of a matrix or a data frame, and all entries should be numerical features. Only the features one would include in the clustering should be present. Column and row names may be supplied to ease the interpretation; * `centers` is the number of clusters to be computed. The rest of the arguments are related to the choices of the regularization parameter, the prior partitioning of the features into groups, the number of iterations and random starts in the algoritm or the fact of scaling the data. Default values are fixed for these parameters, one may see `help(groupsparsewkm)` for further details. ### Output The `groupsparsewkm` function returns an object of class `spwkm` (see `help(groupsparsewkm)` for further details on this class). ## A case study: `Mice` dataset The `DataMice` dataset consists of repeated measurements of 68 proteins on a sample of 72 mice (12 or 15 values independently measured for each protein). The [data](https://archive.ics.uci.edu/ml/datasets/Mice+Protein+Expression#) was first described in @Higuera, and it was processed here in order to discard some proteins and measurements containing missing data. ```{r} library(vimpclust) data(DataMice) DataMice[1:10, 1:4] ``` The data may be priorly split as follows: * According to the `Genotype`, 38 mice are in the control (c) group, and 34 in the trisomic (t) group; * According to the `Behavior`, 35 mice were stimulated to learn (CS), and 37 were not (SC); * According to the `Treatment`, 38 mice received one (m), and 34 did not (s). When mixing all the information, the data may be priorly split into 8 clusters, with the following frequencies: ```{r} summary(DataMice$Class.mouse) ``` Further details about this dataset may be found with `help(DataMice)`. ### Training the `groupsparsewkm` function For this dataset, the groups of features may be naturally defined: each protein represents a group, and the measurements associated to it represent the features of the group. The `index` vector, containing the index of the group associated to each feature, is created using the column names of `DataMice`. ```{r, message=FALSE, warning=FALSE} names(DataMice)[1:20] index <- unlist(strsplit(names(DataMice)[1:(dim(DataMice)[2]-5)], split="_")) index <- as.numeric(index[(1:length(index)%%4==2)]) ``` The number of clusters, `centers` is fixed to 8, based on the additional prior knowledge on the dataset that was described above. Although there is no reason for an unsupervised method to retrieve the partitioning defined by some supplementary features, the comparison of the unsupervised clustering with a prior partitioning is interesting for illustration. All features are scaled to have zero mean and unit variance with the default setting of the `groupsparsewkm` function. \donttest{ ```{r, message=FALSE, warning=FALSE, echo = T} set.seed(1407) res.mice <- groupsparsewkm(X = DataMice[,(1:length(index))], centers = 8, index = index, verbose = 1) ``` } ```{r, eval = F} set.seed(1407) res.mice <- groupsparsewkm(X = DataMice[,(1:length(index))], centers = 8, index = index, verbose = 1) ``` According to the above, the algorithm converged for all values of `lambda`. In some cases, the stopping criterion may not be satisfied and the convergence over the weights `w` might not be achieved. If this were the case, one should increase the number of iterations `itermaxw`, which by default is set to 20. Note however that setting a larger value for this parameter would increase the computational time, since a full $k$-means algorithm is trained at each iteration. ### Results The weights associated to each group of features may be found in the matrix `Wg`. These weights illustrate the contribution of each group of features to the clustering and may be seen as a measure of the importance of each group for the clustering. Each column of `Wg` contains the weights computed for a given value of the regularization parameter `lambda`. The default setting in the `groupsparsewkm` function selects 20 values for the regularization pameter `lambda`, chosen uniformly between 0 (no regularization) and a maximum value automatically tuned. ```{r} res.mice$Wg[1:20,1:5] ``` Each row of `Wg` contains the weights associated to a group of features for all the values of the regularization parameter, or the so-called **regularization paths**. These may be further illustrated graphically, by calling the `plot` function for the `spwkm` class (see `help(plot.spwkm)` for more details). ```{r} plot(res.mice, what="weights.groups") ``` For the `Mice` data, one may see that four proteins particularly (associated to groups 1, 2, 11 and 21) appear as the most discriminant, as the regularization parameter increases. Other proteins, such as the one associated to group 30 have an interesting behaviour: its weight becomes large for large values of the regularization term, thus for a heavy penalty term. Indeed, at the 13th value of the `lambda` a significant drop in the number of selected groups occurs, from 41 selected groups to 13. It is at this point that group 30 becomes extremely significant for the clustering. If one wants to look into more details and assess the respective contribution of each feature, she may look at the matrix `W`, which contains the weights associated to each feature. These weights may be read as the relative importance of each feature for the clustering. Depending on the number of features in each group, `W` may potentially be a large matrix, and one may also want to focus on the features belonging to non-zero weigthed groups. ```{r} res.mice$W[1:20,1:5] ``` The regularization path for each feature may be also illustrated using the `plot` function. For the `Mice` dataset, one may easily see that the features within each group are quite redundant (let us recall here that one group is made of repeated measurements of the same protein), their regularization paths being very similar. ```{r, message=FALSE, warning=FALSE} plot(res.mice, what = "weights.features") ``` By specifying the supplementary argument `Which` in the `plot` function, one may focus on the regularization paths of specific groups of features, represented either as the regularization path of the group, or the regularization paths of the corresponding features. Here below, proteins 1, 2 and 30 were selected for illustration. ```{r, message=FALSE, warning=FALSE} plot(res.mice, what = "weights.groups", Which=c(1,2,30)) ``` ```{r, message=FALSE, warning=FALSE} plot(res.mice, what = "weights.features", Which=c(1,2,30)) ``` #### Additional plots A valuable piece of information is given by the number of selected groups or the number of selected features for a given value of the regularization parameter `lambda`. The evolution of the number of features may be graphically illustrated as follows: ```{r} plot(res.mice, what="sel.groups") ``` ```{r} plot(res.mice, what="sel.features") ``` Since the measurements for each protein are quite redundant, and the number of measurements for each protein quite similar, the curves representing the evolution of the selected groups and of the selected features are very similar. A significant drop may be noticed after the 12th value of `lambda`, where only 13 proteins among the 68 are preserved for clustering. Besides the selected number of groups of features or the selected number of features, the evolution of some criteria related to the quality of the clustering are equally important for understanding and commenting the results. For example, using the argument `what="expl.var"` in the `plot` function, one may illustrate the evolution of the explained variance, as a function of the regularization parameter `lambda`. The explained variance is computed as the ratio between the between-class variance and the global variance in the data, and represents the ratio of information explained by the clustering. Let us remark that this criterion is independent of the sparse algorithm trained here, which maximizes the weighted between sum-of-squares penalized by a group-penalty term. The explained variance, computed on all features and without applying any weights, illustrates how the information is preserved when discarding an increasing number of features. ```{r} plot(res.mice, what="expl.var") ``` The number-of-selected-groups curve and the explained-variance curve may be used to select the appropriate regularization parameter `lambda` for the clustering. A **good choice** of `lambda` should preserve a high percentage of variance explained by the clustering, while discarding a large number of features. This actually amounts to a trade-off between the quality of the model and its relative parcimony. With this is mind, one may easily see that by selecting `lambda=0.45`, the explained variance remains very close to when using all features, whereas the number of groups and the number of features was reduced by a third. For `lambda=0.48`, if one accepts to "loose" a third of the explained variance (while remaining above 30%), the number of groups and the number of features may be reduced by more than 80% (13 groups are preserved among 68, and 171 features among 900). Hence, one may use this algorithm for drastically reducing the dimensionality of the data, while still preserving a significant clustering. Another graphical option includes the illustration of the weighthed explained variance (the weights computed by the algorithm are taken into account and applied to the features when computing the between sum-of-squares and the total sum-of-squares). In this case, since the criterion takes into account the most discriminant features only, it is increasing with the penalty, and a significant jump may be seen at the same spot as for the explained variance, except that here, the weighted explained variance improves by more than 20%. ```{r} plot(res.mice, what="w.expl.var") ``` Other criteria such as the gap statistic could be efficiently used for selecting the regularization parameter (and also the number of clusters). They are not implemented here (**yet!**), but may be easily retrived in other packages and combined with `spwkm` objects. #### Comparing the clustering with the "ground truth" As mentioned above, the `DataMice` observations are known to belong to some priorly defined clusters, defined by the `Genotype`, `Treatment` or `Behaviour`. In order to compare the resulting clusterings of the group-sparse algorithm (for the various values of `lambda`) with the priorly defined clusters, the Adjusted Rand Index (ARI) is computed here below. ```{r, message=FALSE} sapply(1:length(res.mice$lambda), function(x) {mclust::adjustedRandIndex(res.mice$cluster[,x],DataMice$Class.mouse)}) sapply(1:length(res.mice$lambda), function(x) {mclust::adjustedRandIndex(res.mice$cluster[,x],DataMice$Genotype)}) sapply(1:length(res.mice$lambda), function(x) {mclust::adjustedRandIndex(res.mice$cluster[,x],DataMice$Treatment)}) sapply(1:length(res.mice$lambda), function(x) {mclust::adjustedRandIndex(res.mice$cluster[,x],DataMice$Behaviour)}) ``` ```{r, include=FALSE} options(old) ``` According to the above values, the 8 clusters computed with the sparse-group $k$-means algorithm are not much related to the 8 priorly defined clusters. As we've already mentioned, there is no prior reason for correlation between the clustering output and the partitioning defined by the `Genotype`, the `Treatment` and the `Behaviour`. The clusters identified by the algorithm may correspond to a completely different structure in the data. Nevertheless, we should mention here that the proteins identified by the algorithm as the most discriminant or having significant weights for all values of `lambda` -- groups 1, 2, 10, 11, 21, 25, 30, 32, 68 -- correspond to those identified in @Higuera as discriminant for `Genotype`, `Treatment` or `Behaviour`. Furthermore, the algorithm implemented in `groupsparsewkm` has also the advantage of fully selecting or discarding one protein and its associated measurements thanks to the group approach. Group-sparse clustering is thus offering a complete approach for both clustering and selecting the most discriminant groups of features. # Bibliography
/scratch/gouwar.j/cran-all/cranData/vimpclust/vignettes/groupsparsewkm.Rmd
--- title: "Sparse weighted k-means for mixed data" author: "Marie Chavent and Alex Mourer and Madalina Olteanu" date: "`r Sys.Date()`" output: html_vignette: toc: no header-includes: - \usepackage{bbm} bibliography: bibESANN.bib link-citations: yes vignette: > %\VignetteIndexEntry{Sparse weighted k-means for mixed data} %\usepackage[UTF-8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE,eval=TRUE,fig.align="center",fig.width = 7,fig.height = 5) old <- options(digits = 2) ``` $\DeclareMathOperator{\R}{\mathbb{R}}$ ## Basic function description `sparsewkm` is designed for performing sparse clustering of a dataset described by numerical, categorical, or mixed features. In this respect, it generalizes the sparse $k$-means algorithm introduced in @sparsekmeans for numerical features only. The implementation is based on the optimization of a penalized between-class variance criterion. If the features used for clustering are numerical only, the algorithm reduces to the one in @sparsekmeans. If some or all the features are categorical, `sparsewkm` transforms the data using a factor analysis step (see @chavent2014multivariate for the preprocessing), and then trains a group-sparse $k$-means algorithm, each group being defined by the levels of one specific feature. For more technical details on the cost function and the optimization procedure, one may refer to @Sparsegroupkmeans. ### Arguments Several arguments may be passed to `sparsewkm`, but only the first two are required: * `X` is the data to be clustered. It should be a data frame, and the categorical features should be provided as factors. Only the features one would include in the clustering should be present. Column and row names may be supplied to ease the interpretation. * `centers` is the number of clusters to be computed. The rest of the arguments are related to the choices of the regularization parameter, or the number of iterations and random starts in the algoritm. Default values are fixed for these parameters, one may see `help(sparsewkm)` for further details. ### Output The `sparsewkm` function returns an object of class `spwkm` (see `help(sparsewkm)` for further details on this class). ## A case study: `HDdata` dataset The `HDdata` consists of 270 patients described by six numerical features and eight categorical ones. It was sampled from the [Cleveland Heart Disease Data](https://archive.ics.uci.edu/ml/datasets/Heart+Disease) found in the UCI machine learning repository. Further details about this dataset may be found with `help(HDdata)`. ```{r} library(vimpclust) head(HDdata) ``` ### Training the `sparsewkm` function The `sparsewkm` function is applied to `HDdata` on all features except the last one, `HD`, which codes for the presence or the absence of a heart disease. `HD` was removed from the clustering, and will be used later as a control variable. Since the control variable has only two classes, the number of clusters is set to 2 with the argument `centers`. We shall check after the clustering procedure whether the algorithm retrieves these two classes. \donttest{ ```{r, echo = FALSE, eval = TRUE} res <- sparsewkm(X = HDdata[,-14], centers = 2) ``` } ```{r, echo = TRUE, eval = FALSE} res <- sparsewkm(X = HDdata[,-14], centers = 2) ``` According to the above, the algorithm converged for all values of `lambda`. In some cases, the stopping criterion may not be satisfied and the convergence over the weights `w` might not be achieved. If this were the case, one should increase the number of iterations `itermaxw`, which by default is set to 20. Note however that setting a larger value for this parameter would increase the computational time, since a full $k$-means algorithm is trained at each iteration. ### Results The weights associated to each feature may be found in the matrix `W`. These weights illustrate the contribution of each feature, numerical or categorical, to the clustering, and may be seen as a measure of the relative importance of each feature in the clustering. Each column of `W` contains the weights computed for a given value of the regularization parameter `lambda`. The default setting in the `sparsewkm` function selects 20 values for `lambda`, chosen uniformly between 0 (no regularization) and a maximum value automatically tuned. In the following, only the weights associated to the first 5 values of `lambda` are displayed. ```{r} res$W[,1:5] ``` One may see that, as `lambda` increases, the weights of some features are progressively set to 0. The evolution of the **regularization paths** for the features used in the clustering may be illustrated using the `plot` function for the `spwkm` class. With the default settings of this implementation, the paths associated to **numerical features** are drawn with **continuous lines**, while those associated to **categorical features** are drawn with **dotted lines**. According to the results, the numerical features `maxhr` and `oldpeak`, and the categorical features `slope` and `exang` appear as the most discriminant for small values of `lambda`. As `lambda` increases, `maxhr` only is selected by the algorithm. ```{r} plot(res, what="weights.features") ``` Furthermore, the weights associated to each level of the categorical features may be also displayed. These weights are stored in the matrix `Wm`. Similarly to `W`, each column of this matrix contains the weights -- associated either to the numerical features or to the levels of the categorical features -- for a given value of `lambda`. We display here these weights asssociated to the first 5 values of `lambda`. ```{r} res$Wm[,1:5] ``` The regularization paths for the levels of the categorical features may be plotted using argument `what=weights.levels` in the `plot` function. This option provides a more detailed image of how each level in a categorical feature contributes to the clustering. One may see here, for instance, that only the first two levels of `slope` and one level of `exang` have particularly significant contributions. ```{r} plot(res, what="weights.levels") ``` Depending on the number of levels in the categorical features, `Wm` may potentially be quite a big matrix. One may chose to plot the regularization paths for some features only, whether numerical, or categorical. For doing this, it is enough to add the argument `Which` in the `plot` function and list the features (one should note here that after training the `sparsewkm` function, the features are ordered differently than in the input data, with the numerical features listed first; the argument `Which` takes into account the order in the `W` output matrix). ```{r} plot(res, what="weights.features", Which=c(4,5,11,12)) ``` For the categorical features, one may equally plot the regularization paths of the associated levels, as we do here for `slope` and `exang`. ```{r} plot(res, what="weights.levels", Which=c(11,12)) ``` ### Additional plots The number of selected features and the number of selected levels for a given value of the regularization parameter `lambda` provide valuable information. These two criteria may be illustrated using options `what=sel.features` and `what=sel.levels` in the `plot` function. ```{r} plot(res, what="sel.features") plot(res, what="sel.levels") ``` The two curves are very similar and show how the number of selected features relevant for the clustering rapidly decreases with `lambda`. Clustering may also be assessed using criteria based on the evolution of the explained variance. The latter is computed as the ratio between the between-class variance and the global variance in the data, and represents the ratio of information explained by the clustering. The explained variance may be computed on all features used for clustering, without taking the weights into account, or in a weighted fashion, by taking the computed weights into account and thus suppressing all features discarded by the algorithm. In the latter case and for large `lambda`, the explained weighted variance results in being computed using one feature only, `maxhr`. ```{r} plot(res, what="expl.var") plot(res, what="w.expl.var") ``` These two criteria, combined with the number of selected features, may be used to select the appropriate regularization parameter `lambda`. A **good choice** for `lambda` should preserve a high percentage of variance explained by the clustering, while discarding a large number of features. This amounts to a trade-off between the quality of the model and its parcimony. One may remark that with the fifth value of the regularization parameter, `lambda=0.07`, the number of features is reduced by half, while the loss in the explained variance is close to 1%. One may notice also that the percentage of explained variance is very low for all clusterings, while the percetange of explained weighthed variance is significantly larger and increasing with the increasing penalty. This amounts to saying that most of the features in the dataset are not discriminant, and the global quality of the clustering, computed on all features, is rather poor. If only the most discriminant features are selected, the explained weighted variance largely increses, whereas the loss in the unweighted explained variance is minor. Furthermore, if one selects the sixth value of the regularization parameter, `lambda=0.09`, only two features are kept for clustering. The loss in the unweighted explained variance is close to 2%, but the weighted explained variance gains more than 30%. It appears, according to these remarks, that only very few features are actually playing a significant part in the clustering procedure. If one prefers to discard most of the features, she may keep `maxhr` and `oldpeak`, with an explained variance of roughly 6.2% and an explained weighted variance of roughly 60%. If one wants a larger ratio of explained variance, she would also keep `age`, `numv`, `exang` and `slope` besides `maxhr` and `oldpeak`. In this case, the ratio of explained variance is about 7.5%, while the ratio of explained weighted variance drops to approximately 45%. Furthermore, according to the regularization paths, it appears that these six features are the most discriminant and the most important for the clustering. #### Comparing the clustering with the "ground truth" Since we have a control variable for the `HDdata`, we shall use it to compare three clustering produced by the algorithm, for three different values of `lambda`. The comparison criterion chosen here is the Adjusted Rand Index (ARI). We shall also display the confusion matrices for the first, fifth and sixth values of `lambda`. ```{r, message=FALSE} library(mclust) sapply(c(1,5,6), function(x) {adjustedRandIndex(res$cluster[,x],HDdata$HD)}) table(HDdata$HD, res$cluster[,1]) table(HDdata$HD, res$cluster[,5]) table(HDdata$HD, res$cluster[,6]) ``` According to these results, the quality of the agreement between the clustering and the control variable is decreasing with larger penalties and fewer features kept for the clustering. Nevertheless, although the accuracy is deteriorated, reducing the number of features by half leads to a loss of 3.7% only in terms of accuracy, while reducing the number of features from 13 to 2 leads to a loss of 7%. It appears here that sparse clustering may be particularly useful if one wishes to find a trade-off between clustering quality and dimensionality, while putting forward the features which mostly contribute to the clustering. We shall also mention here that the comparison with the control variable is done for illustration purposes only. Since sparse weighted $k$-means is an unsupervised method, there is no reason the clustering it builds correponds to some prior partitioning defined by some control variable. #### Cluster compostion Eventually, once one selects an appropriate regularization parameter `lambda` and the associated clustering, she may consider cluster composition. Using the function `info_clust` in the package, she may display features distributions within the clusters (averages for numerical features, frequencies for categorical ones). This first insight may further be completed with a more thorough analysis, using some analysis of variance etc. ```{r} info_clust(res, 5, X = HDdata) ``` ```{r, include=FALSE} options(old) ``` # Bibliography
/scratch/gouwar.j/cran-all/cranData/vimpclust/vignettes/sparsewkm.Rmd
#' Build a NHTSA URL #' #' @description #' #' A family of functions to build URLs for the National Highway Transportation #' Safety Administration (NHTSA) vehicle identification number (VIN) decoder API. #' #' The `build_nhtsa_url()` function returns a closure containing the appropriate #' endpoint and file format request to pass to the NHTSA VIN API. #' #' * `build_vin_url()` takes a single VIN in a character string and returns #' an appropriately-formatted url for a NHTSA API request via the #' /vehicles/DecodeVINValues/ endpoint. #' #' * `build_vin_batch_url()` takes up to 50 VINs in a character vector and #' returns appropriately-formatted url for a NHTSA API request via the #' /vehicles/DecodeVINBatchValues/ endpoint. #' #' @param endpoint a string containing the appropriate endpoint. Candidate #' endpoints can be found at https://vpic.nhtsa.dot.gov/api/ #' @param format the file format to return from the API, one of 'json', 'xml', #' or 'csv'. Defaults to 'json'. #' @param vin a string containing the VIN to query. #' @param ... additional arguments to passed on to derived builder functions #' @return #' * `build_nhtsa_url()` returns a function which will in turn build a url which #' points to the specified endpoint on the NHTSA API #' #' * `build_vin_url()` returns a url as a string, formatted to query the NHTSA #' `DecodeVinValues` endpoint and decode a single VIN. #' * `build_vin_batch_url()` returns a url as a string, formatted to query the NHTSA #' `DecodeVinBatch Values` endpoint and decode multiple VINs in one call. #' #' @export #' #' @examples #' vin_url_xml <- build_nhtsa_url("/vehicles/DecodeVINValues/", format = "xml") #' build_vin_url("3VWLL7AJ9BM053541") #' build_vin_batch_url(c("3VWLL7AJ9BM053541", "JH4KA3140KC015221")) build_nhtsa_url <- function(endpoint, format = "json", ...) { baseurl <- "https://vpic.nhtsa.dot.gov/api" function(vin, ...) { paste0(baseurl, endpoint, vin, "?format=", format, ...) } } #' @rdname build_nhtsa_url #' @export build_vin_url <- build_nhtsa_url(endpoint = "/vehicles/DecodeVINValues/") #' @rdname build_nhtsa_url #' @export build_vin_batch_url <- build_nhtsa_url(endpoint = "/vehicles/DecodeVINValuesBatch/")
/scratch/gouwar.j/cran-all/cranData/vindecodr/R/build_nhtsa_url.R
#' Verify VIN Validity #' #' Examines provided VINs for valid length, characters, and check digit. #' #' @param vin A character vector of VINs to check. Wildcards (e.g. *) are NOT allowed. #' @param guess Logical. Should values for illegal characters be guessed? #' #' @return A logical vector of same length as the input vector. #' @export #' #' @examples #' vins <- c("WDBEA30D3HA391172", "3VWLL7AJ9BM053541") #' check_vin(vins) check_vin <- function(vin, guess = FALSE) { if (requireNamespace("purrr", quietly = TRUE)) { check_vin_purrr(vin, guess = guess) } else { check_vin_no_purrr(vin, guess = guess) } } #' Verify VIN Validity Using Purrr #' #' @inheritParams check_vin check_vin_purrr <- function(vin, guess = FALSE) { # check for length purrr::map_lgl(vin, valid_vin_format, check_chars = !guess) # validate the check digit val <- purrr::map_lgl(vin, valid_check_digit, guess = guess) return(val) } #' Verify VIN Validity Without Purrr #' #' @inheritParams check_vin check_vin_no_purrr <- function(vin, guess = FALSE) { # check lengths len_ok <- c() for (v in vin) { len_ok <- append(len_ok, valid_vin_format(v, check_chars = !guess)) } # validate the check digit res <- c() for (v in vin) { res <- append(res, valid_check_digit(v, guess = guess)) } return(res) }
/scratch/gouwar.j/cran-all/cranData/vindecodr/R/check_vin.R
#' Use the NHTSA API to Decode VINs #' #' @param vin either a single vehicle identification number in a character #' string, or multiple vehicle identification numbers in a character vector. #' @param ... additional arguments passed to the url builder functions. #' #' @return a data frame with the VIN, Make, Model, Model Year, Fuel Type, and #' Gross Vehicle Weight Rating (GVWR) for the specified VINs. #' @export #' #' @examples #' \dontrun{ #' # Decode a single VIN: #' decode_vin("JHLRD68404C018253") #' #' # Decode multiple VINs: #' decode_vin(c("JHLRD68404C018253", "JH4DA9450MS001229")) #' } decode_vin <- function(vin, ...) { if (length(vin) == 1) { response <- httr::GET(build_vin_url(vin, ...)) } else { vins <- paste(vin, collapse = ";") response <- httr::POST(build_vin_batch_url(vins, ...)) } if (response$status_code != 200) { msg <- paste("API responded with status code", response$status_code) stop(msg) } con <- httr::content(response)$Results if (requireNamespace("purrr", quietly = TRUE)) { VIN <- purrr::map_chr(con, "VIN") make <- purrr::map_chr(con, "Make") model <- purrr::map_chr(con, "Model") model_year <- purrr::map_chr(con, "ModelYear") fuel_type <- purrr::map_chr(con, "FuelTypePrimary") GVWR <- purrr::map_chr(con, "GVWR") } else { VIN <- c() make <- c() model <- c() model_year <- c() fuel_type <- c() GVWR <- c() for (i in seq_along(con)) { VIN <- append(VIN, con[[i]]$VIN) make <- append(make, con[[i]]$Make) model <- append(model, con[[i]]$Model) model_year <- append(model_year, con[[i]]$ModelYear) fuel_type <- append(fuel_type, con[[i]]$FuelTypePrimary) GVWR <- append(GVWR, con[[i]]$GVWR) } } data.frame(VIN, make, model, model_year, fuel_type, GVWR) }
/scratch/gouwar.j/cran-all/cranData/vindecodr/R/decode_vin.R
#' Replace a Letter in a Character Vector #' #' @param .string character vector #' @param .target character to replace #' @param .replacement character to substitute #' #' @return the modified string #' swap_letter <- function(.string, .target, .replacement) { .string[which(.string == .target)] <- .replacement .string } #' Replace Multiple Letters in a Character Vector #' #' @param .string character vector #' @param .targets characters to replace #' @param .replacements characters to substitute #' #' @return the modified string #' swap_map <- function(.string, .targets, .replacements) { if (length(.targets) != length(.replacements)) { stop("Target and replacement vectors must be of the same length.") } for (i in seq_along(.targets)) { .string <- swap_letter(.string, .targets[i], .replacements[i]) } .string }
/scratch/gouwar.j/cran-all/cranData/vindecodr/R/tools.R
#' Check for Valid VIN Check Digit #' #' Calculates the VIN check digit and compares it to VIN position 9. #' For US-based VINs, this determines if the VIN is valid. #' This may not apply to VINs from outside of the United States. #' #' @param vin character. The VIN to check. VINs must be compete, i.e. 17 digits #' with no wildcards. #' @param value logical. Should the calculated check digit be returned instead? #' @param guess logical. Should incorrect characters be replaced by the best #' guess at corrected characters? #' O -> 0 #' I -> 1 #' Q -> 0 #' #' @return If `value` is `FALSE`, a logical value is returned. #' If `value` is `TRUE`, a character is returned. #' @export #' #' @examples #' valid_check_digit("WDBEA30D3HA391172") # True #' valid_check_digit("WDBEA30D3HA391172", value = TRUE) #' valid_check_digit("WDBEA3QD3HA39I172", guess = TRUE) valid_check_digit <- function(vin, value = FALSE, guess = FALSE) { vin_letters <- strsplit(toupper(vin), "")[[1]] chk_dgt <- vin_letters[9] # guess at incorrect characters if (guess) { vin_letters <- swap_map(vin_letters, c("I", "O", "Q"), c("1", "0", "0")) } translit_tbl <- data.frame(letter = c(LETTERS, c(0:9)), value = c(c(1, 2, 3, 4, 5, 6, 7, 8, NA, 1, 2, 3, 4, 5, NA, 7, NA, 9, 2, 3, 4, 5, 6, 7, 8, 9), c(0:9))) # weights defined by US DOT pos_weights <- c(8, 7, 6, 5, 4, 3, 2, 10, 0, 9, 8, 7, 6, 5, 4, 3, 2) transliterate_chr <- function(x) { translit_tbl[which(translit_tbl$letter == x), ]$value } # transliterate vin if (requireNamespace("purrr", quietly = TRUE)) { xlit_vin <- purrr::map_dbl(vin_letters, transliterate_chr) } else { xlit_vin <- c() for (ltr in vin_letters) { xlit_vin <- append(xlit_vin, transliterate_chr(ltr)) } } # apply vin weights vin_prod <- xlit_vin * pos_weights # calculate check digit calc_chk <- as.character(`%%`(sum(vin_prod), 11)) if (value) return(calc_chk) return(calc_chk == chk_dgt) }
/scratch/gouwar.j/cran-all/cranData/vindecodr/R/valid_check_digit.R
#' Check VIN Length and Characters #' #' Checks that VINs are 17 characters long and will optionally check that disallowed #' characters (I, O, Q) are not present. #' #' @param vin A character. Should be a properly formatted Vehicle Identification Number. #' Wildcards (e.g., '*') are acceptable. #' @param check_chars Logical. Should an error be thrown if the VIN contains illegal characters? #' #' @return Logical. #' @export #' #' @examples #' # Random VIN #' valid_vin_format("3VWLL7AJ9BM053541") #' # With wild card #' valid_vin_format("3VWLL7AJ9BM*53541") valid_vin_format <- function(vin, check_chars = FALSE) { vin <- toupper(vin) # verify VIN is 17 characters long if (nchar(vin) != 17) { msg <- paste0("Incorrectly formatted VIN at ", vin, ":\n", "VINs must be 17 characters in length, not ", nchar(vin), " characters.") stop(msg) } # check for illegal chars if (check_chars) { if (grepl("[IOQ]", vin)) { chars <- unlist(strsplit(vin, '')) pos <- which(chars %in% c("I", "O", "Q")) msg <- paste("In VIN ", vin, ", disallowed character detected at position", as.character(pos), ":", chars[pos], "\n") stop(msg) } } return(TRUE) }
/scratch/gouwar.j/cran-all/cranData/vindecodr/R/valid_vin_format.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 fit_margins_cpp <- function(data, nlevels, mult, xmin, xmax, bw, deg, weights, num_threads) { .Call(`_vinereg_fit_margins_cpp`, data, nlevels, mult, xmin, xmax, bw, deg, weights, num_threads) } select_dvine_cpp <- function(data, family_set, par_method, nonpar_method, mult, selcrit, weights, psi0, preselect_families, cores, var_types) { .Call(`_vinereg_select_dvine_cpp`, data, family_set, par_method, nonpar_method, mult, selcrit, weights, psi0, preselect_families, cores, var_types) } cond_quantile_cpp <- function(alpha, u, vinecop_r, num_threads) { .Call(`_vinereg_cond_quantile_cpp`, alpha, u, vinecop_r, num_threads) } cond_dist_cpp <- function(u, vinecop_r, num_threads) { .Call(`_vinereg_cond_dist_cpp`, u, vinecop_r, num_threads) } cond_dens_cpp <- function(u, vinecop_r, num_threads) { .Call(`_vinereg_cond_dens_cpp`, u, vinecop_r, num_threads) } cond_loglik_cpp <- function(u, vinecop_r, num_threads) { .Call(`_vinereg_cond_loglik_cpp`, u, vinecop_r, num_threads) } with_parameters_cop_cpp <- function(vinecop_r, parameters) { .Call(`_vinereg_with_parameters_cop_cpp`, vinecop_r, parameters) }
/scratch/gouwar.j/cran-all/cranData/vinereg/R/RcppExports.R
#' Conditional probability integral transform #' #' Calculates the conditional distribution of the response given the covariates. #' #' @param object an object of class \code{vinereg}. #' @param newdata matrix of response and covariate values for which to compute #' the conditional distribution. #' @param cores integer; the number of cores to use for computations. #' #' @export #' #' @examples #' \dontshow{ #' set.seed(1) #' } #' # simulate data #' x <- matrix(rnorm(200), 100, 2) #' y <- x %*% c(1, -2) #' dat <- data.frame(y = y, x = x, z = as.factor(rbinom(100, 2, 0.5))) #' #' # fit vine regression model #' fit <- vinereg(y ~ ., dat) #' #' hist(cpit(fit, dat)) # should be approximately uniform cpit <- function(object, newdata, cores = 1) { newdata <- prepare_newdata(newdata, object, use_response = TRUE) newdata <- to_uscale(newdata, object$margins) cond_dist_cpp(newdata, object$vine, cores) } #' Conditional log-likelihood #' #' Calculates the conditional log-likelihood of the response given the covariates. #' #' @param object an object of class \code{vinereg}. #' @param newdata matrix of response and covariate values for which to compute #' the conditional distribution. #' @param cores integer; the number of cores to use for computations. #' #' @export #' #' @examples #' \dontshow{ #' set.seed(1) #' } #' # simulate data #' x <- matrix(rnorm(200), 100, 2) #' y <- x %*% c(1, -2) #' dat <- data.frame(y = y, x = x, z = as.factor(rbinom(100, 2, 0.5))) #' #' # fit vine regression model #' fit <- vinereg(y ~ ., dat) #' #' cll(fit, dat) #' fit$stats$cll cll <- function(object, newdata, cores = 1) { newdata <- prepare_newdata(newdata, object, use_response = TRUE) ll_marg <- if (inherits(object$margins[[1]], "kde1d")) { sum(log(kde1d::dkde1d(newdata[, 1], object$margins[[1]]))) } else { 0 } newdata <- to_uscale(newdata, object$margins) ll_cop <- cond_loglik_cpp(newdata, object$vine, cores) ll_cop + ll_marg } #' Conditional PDF #' #' Calculates the conditional density of the response given the covariates. #' #' @param object an object of class \code{vinereg}. #' @param newdata matrix of response and covariate values for which to compute #' the conditional density #' @param cores integer; the number of cores to use for computations. #' #' @export #' #' @examples #' \dontshow{ #' set.seed(1) #' } #' # simulate data #' x <- matrix(rnorm(200), 100, 2) #' y <- x %*% c(1, -2) #' dat <- data.frame(y = y, x = x, z = as.factor(rbinom(100, 2, 0.5))) #' #' # fit vine regression model #' fit <- vinereg(y ~ ., dat) #' #' cpdf(fit, dat) cpdf <- function(object, newdata, cores = 1) { newdata <- prepare_newdata(newdata, object, use_response = TRUE) dens_marg <- if (inherits(object$margins[[1]], "kde1d")) { kde1d::dkde1d(newdata[, 1], object$margins[[1]]) } else { 1 } newdata <- to_uscale(newdata, object$margins) cond_dens_cpp(newdata, object$vine, cores) * dens_marg }
/scratch/gouwar.j/cran-all/cranData/vinereg/R/cpit.R
#' @export print.vinereg <- function(x, ...) { cat("D-vine regression model: ") n_predictors <- length(x$order) if (n_predictors <= 10) { predictors <- paste(x$order, collapse = ", ") } else { predictors <- paste(x$order[1:10], collapse = ", ") predictors <- paste0(predictors, ", ... (", n_predictors - 10, " more)") } cat(names(x$model_frame)[1], "|", predictors, "\n") stats <- unlist(x$stats[1:5]) stats <- paste(names(stats), round(stats, 2), sep = " = ") cat(paste(stats, collapse = ", "), "\n") invisible(x) } #' @export summary.vinereg <- function(object, ...) { data.frame( var = c(names(object$model_frame)[1], object$order), edf = object$stats$var_edf, cll = object$stats$var_cll, caic = object$stats$var_caic, cbic = object$stats$var_cbic, p_value = object$stats$var_p_value ) } #' Plot marginal effects of a D-vine regression model #' #' The marginal effects of a variable is the expected effect, where expectation #' is meant with respect to all other variables. #' #' @param object a `vinereg` object #' #' @param alpha vector of quantile levels. #' @param vars vector of variable names. #' #' @export #' @examples #' # simulate data #' x <- matrix(rnorm(200), 100, 2) #' y <- x %*% c(1, -2) #' dat <- data.frame(y = y, x = x, z = as.factor(rbinom(100, 2, 0.5))) #' #' # fit vine regression model #' fit <- vinereg(y ~ ., dat) #' plot_effects(fit) plot_effects <- function(object, alpha = c(0.1, 0.5, 0.9), vars = object$order) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("The 'ggplot2' package must be installed to plot.") } mf <- expand_factors(object$model_frame) if (!all(vars %in% colnames(mf)[-1])) { stop( "unknown variable in 'vars'; allowed values: ", paste(colnames(mf)[-1], collapse = ", ") ) } preds <- fitted(object, alpha) preds <- lapply(seq_along(alpha), function(a) cbind(data.frame(alpha = alpha[a], prediction = preds[[a]]))) preds <- do.call(rbind, preds) df <- lapply(vars, function(var) cbind(data.frame(var = var, value = as.numeric(unname(mf[, var])), preds))) df <- do.call(rbind, df) df$value <- as.numeric(df$value) df$alpha <- as.factor(df$alpha) value <- prediction <- NULL # for CRAN checks suppressWarnings( ggplot2::ggplot(df, ggplot2::aes(value, prediction, color = alpha)) + ggplot2::geom_point(alpha = 0.15) + ggplot2::geom_smooth(se = FALSE) + ggplot2::facet_wrap(~var, scale = "free_x") + ggplot2::ylab(quote(Q(y * "|" * x[1] * ",...," * x[p]))) + ggplot2::xlab(quote(x[k])) + ggplot2::theme(legend.position = "bottom") ) }
/scratch/gouwar.j/cran-all/cranData/vinereg/R/generics.R
#' Predict conditional mean and quantiles from a D-vine regression model #' #' @param object an object of class \code{vinereg}. #' @param newdata matrix of covariate values for which to predict the quantile. #' @param alpha vector of quantile levels; `NA` predicts the mean based on an #' average of the `1:10 / 11`-quantiles. #' @param cores integer; the number of cores to use for computations. #' @param ... unused. #' #' @return A data.frame of quantiles where each column corresponds to one #' value of `alpha`. #' #' @examples #' # simulate data #' x <- matrix(rnorm(200), 100, 2) #' y <- x %*% c(1, -2) #' dat <- data.frame(y = y, x = x, z = as.factor(rbinom(100, 2, 0.5))) #' #' # fit vine regression model #' (fit <- vinereg(y ~ ., dat)) #' #' # inspect model #' summary(fit) #' plot_effects(fit) #' #' # model predictions #' mu_hat <- predict(fit, newdata = dat, alpha = NA) # mean #' med_hat <- predict(fit, newdata = dat, alpha = 0.5) # median #' #' # observed vs predicted #' plot(cbind(y, mu_hat)) #' #' ## fixed variable order (no selection) #' (fit <- vinereg(y ~ ., dat, order = c("x.2", "x.1", "z.1"))) #' @seealso \code{\link{vinereg}} #' #' @export #' #' @importFrom kde1d pkde1d qkde1d #' @importFrom stats predict predict.vinereg <- function(object, newdata, alpha = 0.5, cores = 1, ...) { if (missing(newdata)) { return(fitted.vinereg(object, alpha = alpha)) } stopifnot(length(alpha) > 0) if (any(is.na(alpha)) & inherits(object$model_frame[[1]], "ordered")) { stop("cannot predict mean for ordered response.") } # predict the conditional mean if alpha contains NA if (any(is.na(alpha))) { alpha <- alpha[!is.na(alpha)] # remove NA for quantile estimation preds_mean <- predict_mean(object, newdata) } else { preds_mean <- NULL } ## computation of conditional quantiles if (length(alpha) > 0) { stopifnot(is.numeric(alpha), all(alpha > 0), all(alpha < 1)) ## preprocessing newdata <- prepare_newdata(newdata, object) newdata <- to_uscale(newdata, object$margins[-1], add_response = TRUE) preds <- qdvine(newdata, alpha, vine = object$vine, cores) ## actual predictions on original scale preds <- to_yscale(preds, object) if (!is.null(preds_mean)) preds <- cbind(preds_mean, preds) } else { preds <- preds_mean } preds } #' @rdname predict.vinereg #' @importFrom stats fitted #' @export fitted.vinereg <- function(object, alpha = 0.5, ...) { predict.vinereg(object, newdata = object$model_frame, alpha = alpha, ...) } #' predicts the conditional mean as the average of quantiles. #' @noRd predict_mean <- function(object, newdata) { preds <- predict.vinereg(object, newdata, alpha = 1:10 / 11) data.frame(mean = rowMeans(preds)) } #' @importFrom rvinecopulib rosenblatt inverse_rosenblatt qdvine <- function(u, alpha, vine, cores) { vine$var_types[1] <- "c" q_hat <- as.data.frame(cond_quantile_cpp(alpha, as.matrix(u), vine, cores)) names(q_hat) <- alpha q_hat }
/scratch/gouwar.j/cran-all/cranData/vinereg/R/predict.vinereg.R
#' brings newdata in a format appropriate for applying rvinecopulib functions. #' @noRd prepare_newdata <- function(newdata, object, use_response = FALSE) { newdata <- as.data.frame(newdata) if (!use_response) { object$model_frame <- object$model_frame[-1] } check_newdata(newdata, object) # factors must be expanded to dummy numeric variables newdata <- expand_factors(newdata) newdata <- remove_unused(newdata, object, use_response) } #' checks if newdata has appropriate columns and sorts according to the order #' used for fitting. #' @noRd check_newdata <- function(newdata, object) { check_var_availability(newdata, names(object$model_frame)) # the check_x functions expect variables in newdata and model_frame in # the same order newdata <- newdata[names(object$model_frame)] check_types(newdata, object$model_frame) check_levels(newdata, object$model_frame) } #' checks if all *selected* covariates are in newdata. #' @noRd check_var_availability <- function(newdata, vars) { vars_avail <- match(vars, colnames(newdata)) if (any(is.na(vars_avail))) { vars_missing <- paste(vars[is.na(vars_avail)], collapse = ", ") stop("'newdata' is missing variables ", vars_missing) } } #' checks if variable types are equal in original data and new data. #' @importFrom utils capture.output #' @noRd check_types <- function(actual, expected) { different_type <- sapply( seq_along(actual), function(i) !identical(class(actual[[i]])[1], class(expected[[i]])[1]) ) if (any(different_type)) { errors <- data.frame( expected = sapply(actual[different_type], function(x) class(x)[1]), actual = sapply(expected[different_type], function(x) class(x)[1]) ) errors <- paste(capture.output(print(errors)), collapse = "\n") stop("some columns have incorrect type:\n", errors, call. = FALSE) } } #' checks if factor levels are equal in original data and new data. #' @noRd check_levels <- function(actual, expected) { # only check factors actual <- actual[sapply(actual, is.factor)] expected <- expected[sapply(expected, is.factor)] if (length(expected) == 0) { return(TRUE) } different_levels <- sapply( seq_along(actual), function(i) !identical(levels(actual[[i]]), levels(expected[[i]])) ) if (any(different_levels)) { errors <- data.frame( expected = sapply( actual[different_levels], function(x) paste(levels(x), collapse = ",") ), actual = sapply( expected[different_levels], function(x) paste(levels(x), collapse = ",") ) ) errors <- paste(capture.output(print(errors)), collapse = "\n") stop("some factors have incorrect levels\n", errors, call. = FALSE) } } #' removes unused variables and returns newdata in the order used for fitting. #' @noRd remove_unused <- function(newdata, object, use_response = FALSE) { # x must be sorted in the order of the data used for fitting fit_order <- object$order[order(object$selected_vars)] if (use_response) { fit_order <- c(names(object$model_frame)[1], fit_order) } newdata[, fit_order, drop = FALSE] } #' transforms data to uniform scale with probability integral transform. #' For discrete variables, the output has dimension 2*d #' @noRd to_uscale <- function(data, margins, add_response = FALSE) { if (any(sapply(margins, length) == 2)) { # uscale = TRUE during fitting if (add_response == TRUE) data <- cbind(0.5, data) return(as.matrix(data)) } u_sub <- list() u <- lapply(seq_along(margins), function(k) pkde1d(data[[k]], margins[[k]])) if (any(sapply(margins, function(m) nlevels(m$x) > 0))) { compute_u_sub <- function(k) { if (nlevels(margins[[k]]$x) > 0) { data[, k] <- ordered(data[, k], levels = levels(margins[[k]]$x)) lv <- as.numeric(data[, k]) - 1 lv0 <- which(lv == 0) lv[lv0] <- 1 xlv <- ordered(levels(margins[[k]]$x)[lv], levels = levels(margins[[k]]$x)) u_sub <- pkde1d(xlv, margins[[k]]) u_sub[lv0] <- 0 return(u_sub) } else { return(u[[k]]) } } u_sub <- lapply(seq_along(margins), compute_u_sub) } if (add_response) { u <- c(list(0.5), u) if (length(u_sub) > 0) u_sub <- c(list(0.5), u_sub) } u <- truncate_u(cbind(do.call(cbind, u), do.call(cbind, u_sub))) if ((length(u) == 1) & (NROW(data) > 1)) u <- matrix(u, NROW(data)) u } #' ensures that u-scale data does not contain zeros or ones. #' @noRd truncate_u <- function(u) { pmin(pmax(u, 1e-10), 1 - 1e-10) } #' transforms predicted response back to original variable scale. #' @noRd to_yscale <- function(u, object) { if (any(sapply(object$margins, length) == 2)) return(u) # uscale = TRUE during fitting nms <- colnames(u) u <- lapply(u, qkde1d, obj = object$margins[[1]]) u <- as.data.frame(u) names(u) <- nms u } #' @importFrom stats model.matrix #' @noRd expand_factors <- function(data) { if (is.data.frame(data)) { data <- lapply(data, function(x) { if (is.numeric(x) | is.ordered(x)) { return(x) } lvs <- levels(x) x <- model.matrix(~x)[, -1, drop = FALSE] x <- as.data.frame(x) x <- lapply(x, function(y) ordered(y, levels = 0:1)) names(x) <- lvs[-1] x }) } as.data.frame(data) } process_par_1d <- function(data, pars) { d <- ncol(data) if (!is.null(pars$xmin)) { if (length(pars$xmin) != d) stop("'xmin' must be a vector with one value for each variable") } else { pars$xmin = rep(NaN, d) } if (!is.null(pars$xmax)) { if (length(pars$xmax) != d) stop("'xmax' must be a vector with one value for each variable") } else { pars$xmax = rep(NaN, d) } if (is.null(pars$bw)) pars$bw <- NA if (length(pars$bw) == 1) pars$bw <- rep(pars$bw, d) if (is.null(pars$mult)) pars$mult <- 1 if (length(pars$mult) == 1) pars$mult <- rep(pars$mult, d) if (is.null(pars$deg)) pars$deg <- 2 if (length(pars$deg) == 1) pars$deg <- rep(pars$deg, d) check_par_1d(data, pars) pars } #' @importFrom assertthat assert_that check_par_1d <- function(data, ctrl) { nms <- colnames(data) if (is.null(nms)) { nms <- as.character(seq_len(ncol(data))) } lapply(seq_len(NCOL(data)), function(k) { msg_var <- paste0("Problem with par_1d for variable ", nms[k], ": ") tryCatch( assert_that( is.numeric(ctrl$mult[k]), ctrl$mult[k] > 0, is.numeric(ctrl$xmin[k]), is.numeric(ctrl$xmax[k]), is.na(ctrl$bw[k]) | (is.numeric(ctrl$bw[k]) & (ctrl$bw[k] > 0)), is.numeric(ctrl$deg[k]) ), error = function(e) stop(msg_var, e$message) ) if (is.ordered(data[, k]) & (!is.nan(ctrl$xmin[k]) | !is.nan(ctrl$xmax[k]))) { stop(msg_var, "xmin and xmax are not meaningful for x of type ordered.") } if (!is.nan(ctrl$xmax[k]) & !is.nan(ctrl$xmin[k])) { if (ctrl$xmin[k] > ctrl$xmax[k]) { stop(msg_var, "xmin is larger than xmax.") } } if (!is.nan(ctrl$xmin[k])) { if (any(data[, k] < ctrl$xmin[k])) { stop(msg_var, "not all data are larger than xmin.") } } if (!is.nan(ctrl$xmax[k])) { if (any(data[, k] > ctrl$xmax[k])) { stop(msg_var, "not all data are samller than xmax.") } } if (!(ctrl$deg[k] %in% 0:2)) { stop(msg_var, "deg must be either 0, 1, or 2.") } }) } prep_for_kde1d <- function(data) { data <- lapply(data, function(x) if (is.ordered(x)) as.numeric(x) - 1 else x) do.call(cbind, data) } finalize_margins <- function(margins, data) { for (k in seq_along(margins)) { margins[[k]]$x <- data[[k]] margins[[k]]$nobs <- nrow(data) margins[[k]]$var_name <- names(margins)[k] <- colnames(data)[k] } margins[[1]]$loglik <- sum(log(kde1d::dkde1d(data[[1]], margins[[1]]))) margins }
/scratch/gouwar.j/cran-all/cranData/vinereg/R/tools.R
#' D-vine regression models #' #' Sequential estimation of a regression D-vine for the purpose of quantile #' prediction as described in Kraus and Czado (2017). #' #' If discrete variables are declared as `ordered()` or `factor()`, they are #' handled as described in Panagiotelis et al. (2012). This is different from #' previous version where the data was jittered before fitting. #' #' @param formula an object of class "formula"; same as [lm()]. #' @param data data frame (or object coercible by [as.data.frame()]) containing #' the variables in the model. #' @param family_set see `family_set` argument of [rvinecopulib::bicop()]. #' @param selcrit selection criterion based on conditional log-likelihood. #' \code{"loglik"} (default) imposes no correction; other choices are #' \code{"aic"} and \code{"bic"}. #' @param order the order of covariates in the D-vine, provided as vector of #' variable names (after calling #' `vinereg:::expand_factors(model.frame(formula, data))`); selected #' automatically if `order = NA` (default). #' @param par_1d list of options passed to [kde1d::kde1d()], must be one value #' for each margin, e.g. `list(xmin = c(0, 0, NaN))` if the response and first #' covariate have non-negative support. #' @param weights optional vector of weights for each observation. #' @param cores integer; the number of cores to use for computations. #' @param ... further arguments passed to [rvinecopulib::bicop()]. #' @param uscale if TRUE, vinereg assumes that marginal distributions have been #' taken care of in a preliminary step. #' #' @return An object of class vinereg. It is a list containing the elements #' \describe{ \item{formula}{the formula used for the fit.} #' \item{selcrit}{criterion used for variable selection.} #' \item{model_frame}{the data used to fit the regression model.} #' \item{margins}{list of marginal models fitted by [kde1d::kde1d()].} #' \item{vine}{an [rvinecopulib::vinecop_dist()] object containing the fitted #' D-vine.} \item{stats}{fit statistics such as conditional #' log-likelihood/AIC/BIC and p-values for each variable's contribution.} #' \item{order}{order of the covariates chosen by the variable selection #' algorithm.} \item{selected_vars}{indices of selected variables.} } Use #' [predict.vinereg()] to predict conditional quantiles. `summary.vinereg()` #' shows the contribution of each selected variable with the associated #' p-value derived from a likelihood ratio test. #' #' @references #' #' Kraus and Czado (2017), D-vine copula based quantile regression, #' Computational Statistics and Data Analysis, 110, 1-18 #' #' Panagiotelis, A., Czado, C., & Joe, H. (2012). Pair copula constructions for #' multivariate discrete data. Journal of the American Statistical Association, #' 107(499), 1063-1072. #' #' @examples #' # simulate data #' x <- matrix(rnorm(200), 100, 2) #' y <- x %*% c(1, -2) #' dat <- data.frame(y = y, x = x, z = as.factor(rbinom(100, 2, 0.5))) #' #' # fit vine regression model #' (fit <- vinereg(y ~ ., dat)) #' #' # inspect model #' summary(fit) #' plot_effects(fit) #' #' # model predictions #' mu_hat <- predict(fit, newdata = dat, alpha = NA) # mean #' med_hat <- predict(fit, newdata = dat, alpha = 0.5) # median #' #' # observed vs predicted #' plot(cbind(y, mu_hat)) #' #' ## fixed variable order (no selection) #' (fit <- vinereg(y ~ ., dat, order = c("x.2", "x.1", "z.1"))) #' @seealso \code{\link{predict.vinereg}} #' #' @export #' #' @importFrom kde1d kde1d pkde1d #' @importFrom stats model.frame logLik #' @importFrom utils modifyList #' @importFrom rvinecopulib bicop vinecop dvine_structure #' @importFrom Rcpp sourceCpp #' @useDynLib vinereg, .registration = TRUE vinereg <- function(formula, data, family_set = "parametric", selcrit = "aic", order = NA, par_1d = list(), weights = numeric(), cores = 1, ..., uscale = FALSE) { # remove unused variables if (!missing(data)) { mf <- model.frame(formula, data) } else { mf <- model.frame(formula, parent.frame()) } if (!(is.ordered(mf[[1]]) | is.numeric(mf[[1]]))) stop("response must be numeric or ordered") if (any(sapply(mf, is.factor)) && uscale) stop("factors are not allowed with uscale = TRUE") # expand factors and deduce variable types mfx <- expand_factors(mf) d <- ncol(mfx) var_types <- rep("c", d) var_types[sapply(mfx, is.ordered)] <- "d" ## prepare fit controls (little hack calling bicop() for all checks) arg <- list( data = t(c(0.5, 0.5)), family_set = family_set, selcrit = selcrit, cores = cores, par_method = "mle", nonpar_method = "quadratic", mult = 1, psi0 = 0.9, presel = TRUE, keep_data = FALSE ) ctrl <- do.call( bicop, modifyList(arg, list(...)) )$controls ctrl$weights <- numeric() if (!all(is.na(order))) { check_order(order, names(mfx)) selected_vars <- which(names(mfx) %in% order) var_types <- var_types[c(1, selected_vars)] mfx <- mfx[, c(1, selected_vars)] if (!uscale) { par_1d <- process_par_1d(mfx, par_1d) margins <- fit_margins_cpp(prep_for_kde1d(mfx), sapply(mfx, nlevels), mult = par_1d$mult, xmin = par_1d$xmin, xmax = par_1d$xmax, bw = par_1d$bw, deg = par_1d$deg, weights = weights, cores) margins <- finalize_margins(margins, mfx) u <- to_uscale(mfx, margins) } else { margins <- lapply(1:d, function(x) list(edf = NA, loglik = NA)) u <- as.matrix(mfx) } # now we need the correct ordering in selected_vars selected_vars <- sapply(order, function(x) which(x == names(mfx))) args <- modifyList( ctrl, list( data = u, var_types = var_types, cores = cores, weights = weights, structure = dvine_structure(rank(c(1, selected_vars))) ) ) fit <- list( vine = do.call(vinecop, args), selected_vars = selected_vars ) } else { if (!uscale) { par_1d <- process_par_1d(mfx, par_1d) margins <- fit_margins_cpp(prep_for_kde1d(mfx), sapply(mfx, nlevels), mult = par_1d$mult, xmin = par_1d$xmin, xmax = par_1d$xmax, bw = par_1d$bw, deg = par_1d$deg, weights = weights, cores) margins <- finalize_margins(margins, mfx) u <- to_uscale(mfx, margins) } else { margins <- lapply(1:d, function(x) list(edf = NA, loglik = NA)) u <- as.matrix(mfx) } args <- modifyList( ctrl, list(data = u, var_types = var_types, cores = cores, weights = weights) ) fit <- do.call(select_dvine_cpp, args) if (!uscale) margins <- margins[c(1, sort(fit$selected_vars))] # other margins useless } finalize_vinereg_object( formula = formula, selcrit = selcrit, model_frame = mf, margins = margins, vine = fit$vine, selected_vars = fit$selected_vars, var_nms = colnames(mfx) ) } #' @noRd #' @importFrom stats pchisq #' @importFrom rvinecopulib as_rvine_structure finalize_vinereg_object <- function(formula, selcrit, model_frame, margins, vine, selected_vars, var_nms) { vine$names <- c(var_nms[1], var_nms[sort(selected_vars)]) nobs <- nrow(model_frame) vine$nobs <- nobs var_edf <- c( margins[[1]]$edf, vapply(vine$pair_copulas, function(pcs) pcs[[1]]$npars, numeric(1)) ) var_cll <- c( margins[[1]]$loglik, vapply(vine$pair_copulas, function(pcs) pcs[[1]]$loglik, numeric(1)) ) var_caic <- -2 * var_cll + 2 * var_edf var_cbic <- -2 * var_cll + log(nobs) * var_edf var_p_value <- suppressWarnings( pchisq(2 * var_cll, var_edf, lower.tail = FALSE) ) var_p_value[1] <- NA cll <- sum(var_cll, na.rm = TRUE) edf <- sum(var_edf, na.rm = TRUE) caic <- sum(var_caic, na.rm = TRUE) cbic <- sum(var_cbic, na.rm = TRUE) stats <- list( nobs = nobs, edf = edf, cll = cll, caic = caic, cbic = cbic, var_edf = var_edf, var_cll = var_cll, var_caic = var_caic, var_cbic = var_cbic, var_p_value = var_p_value ) out <- list( formula = formula, selcrit = selcrit, model_frame = model_frame, margins = margins, vine = vine, stats = stats, order = var_nms[selected_vars], selected_vars = selected_vars ) class(out) <- "vinereg" out } check_order <- function(order, var_nms) { stopifnot(length(order) > 0) if (!all(order %in% var_nms)) { stop( "unknown variable name in 'order'; ", "allowed values: '", paste(var_nms[-1], collapse = "', '"), "'." ) } if (any(order == var_nms[1])) { stop( "response variable '", var_nms[1], "' must not appear in 'order'." ) } }
/scratch/gouwar.j/cran-all/cranData/vinereg/R/vinereg.R
## ----setup, include=FALSE----------------------------------------------------- knitr::opts_chunk$set(echo = TRUE, warning = FALSE) ## ----message = FALSE---------------------------------------------------------- library(vinereg) require(ggplot2) require(dplyr) require(tidyr) require(AppliedPredictiveModeling) ## ----------------------------------------------------------------------------- set.seed(5) ## ----------------------------------------------------------------------------- data(abalone, package = "AppliedPredictiveModeling") colnames(abalone) <- c( "sex", "length", "diameter", "height", "whole", "shucked", "viscera", "shell", "rings" ) abalone_f <- abalone %>% dplyr::filter(sex == "F") %>% # select female abalones dplyr::select(-sex) %>% # remove id and sex variables dplyr::filter(height < max(height)) # remove height outlier ## ----fig.width=7, fig.height=6------------------------------------------------ pairs(abalone_f, pch = ".") ## ----------------------------------------------------------------------------- fit_vine_par <- vinereg( whole ~ length + diameter + height, data = abalone_f, family_set = c("onepar", "t"), selcrit = "aic" ) ## ----------------------------------------------------------------------------- fit_vine_par$order ## ----------------------------------------------------------------------------- summary(fit_vine_par$vine) ## ----fig.width=7, fig.height=7------------------------------------------------ contour(fit_vine_par$vine) ## ----------------------------------------------------------------------------- # quantile levels alpha_vec <- c(0.1, 0.5, 0.9) ## ----------------------------------------------------------------------------- pred_vine_par <- fitted(fit_vine_par, alpha = alpha_vec) # equivalent to: # predict(fit_vine_par, newdata = abalone.f, alpha = alpha_vec) head(pred_vine_par) ## ----fig.width=7, fig.height=4------------------------------------------------ plot_effects(fit_vine_par) ## ----fig.width=7, fig.height=6------------------------------------------------ pred_lqr <- pred_vine_par for (a in seq_along(alpha_vec)) { my.rq <- quantreg::rq( whole ~ length + diameter + height, tau = alpha_vec[a], data = abalone_f ) pred_lqr[, a] <- quantreg::predict.rq(my.rq) } plot_marginal_effects <- function(covs, preds) { cbind(covs, preds) %>% tidyr::gather(alpha, prediction, -seq_len(NCOL(covs))) %>% dplyr::mutate(prediction = as.numeric(prediction)) %>% tidyr::gather(variable, value, -(alpha:prediction)) %>% ggplot(aes(value, prediction, color = alpha)) + geom_point(alpha = 0.15) + geom_smooth(method = "gam", formula = y ~ s(x, bs = "cs"), se = FALSE) + facet_wrap(~ variable, scale = "free_x") + ylab(quote(q(y* "|" * x[1] * ",...," * x[p]))) + xlab(quote(x[k])) + theme(legend.position = "bottom") } plot_marginal_effects(abalone_f[, 1:3], pred_lqr) ## ----fig.width=4.6, fig.height=4.6-------------------------------------------- fit_vine_np <- vinereg( whole ~ length + diameter + height, data = abalone_f, family_set = "nonpar", selcrit = "aic" ) fit_vine_np contour(fit_vine_np$vine) ## ----fig.width=7, fig.height=4------------------------------------------------ plot_effects(fit_vine_np, var = c("diameter", "height", "length")) ## ----fig.width=4.7, fig.height=4---------------------------------------------- abalone_f$rings <- as.ordered(abalone_f$rings) fit_disc <- vinereg(rings ~ ., data = abalone_f, selcrit = "aic") fit_disc plot_effects(fit_disc)
/scratch/gouwar.j/cran-all/cranData/vinereg/inst/doc/abalone-example.R
--- title: "Example usage of the vinereg package" author: "Daniel Kraus and Claudia Czado" date: "September 2017" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Example usage of the vinereg package} %\VignetteEngine{knitr::rmarkdown} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE, warning = FALSE) ``` This file contains the source code of an exemplary application of the D-vine copula based quantile regression approach implemented in the R-package *vinereg* and presented in Kraus and Czado (2017): *D-vine copula based quantile regression*, Computational Statistics and Data Analysis, 110, 1-18. Please, feel free to address questions to <[email protected]>. # Load required packages ```{r, message = FALSE} library(vinereg) require(ggplot2) require(dplyr) require(tidyr) require(AppliedPredictiveModeling) ``` # Data analysis ```{r} set.seed(5) ``` We consider the data set `abalone` from the UCI Machine Learning Repository (https://archive.ics.uci.edu/ml/datasets/abalone) and focus on the female sub-population. In a first application we only consider continuous variables and fit models to predict the quantiles of weight (`whole`) given the predictors `length`, `diameter`, and `height`. ## Load and clean data ```{r} data(abalone, package = "AppliedPredictiveModeling") colnames(abalone) <- c( "sex", "length", "diameter", "height", "whole", "shucked", "viscera", "shell", "rings" ) abalone_f <- abalone %>% dplyr::filter(sex == "F") %>% # select female abalones dplyr::select(-sex) %>% # remove id and sex variables dplyr::filter(height < max(height)) # remove height outlier ``` ```{r, fig.width=7, fig.height=6} pairs(abalone_f, pch = ".") ``` # D-vine regression models ## Parametric D-vine quantile regression We consider the female subset and fit a parametric regression D-vine for the response weight given the covariates len, diameter and height (ignoring the discreteness of some of the variables). The D-vine based model is selected sequentially by maximizing the conditional log-likelihood of the response given the covariates. Covariates are only added if they increase the (possibly AIC- or BIC-corrected) conditional log-likelihood. We use the function `vinereg()` to fit a regression D-vine for predicting the response weight given the covariates `length`, `diameter`, and `height`. The argument `family_set` determines how the pair-copulas are estimated. We will only use one-parameter families and the *t* copula here. The `selcrit` argument specifies the penalty type for the conditional log-likelihood criterion for variable selection. ```{r} fit_vine_par <- vinereg( whole ~ length + diameter + height, data = abalone_f, family_set = c("onepar", "t"), selcrit = "aic" ) ``` The result has a field `order` that shows the selected covariates and their ranking order in the D-vine. ```{r} fit_vine_par$order ``` The field `vine` contains the fitted D-vine, where the first variable corresponds to the response. The object is of class `"vinecop_dist"` so we can use `rvineocpulib`'s functionality to summarize the model ```{r} summary(fit_vine_par$vine) ``` We can also plot the contours of the fitted pair-copulas. ```{r,, fig.width=7, fig.height=7} contour(fit_vine_par$vine) ``` ## Estimation of corresponding conditional quantiles In order to visualize the predicted influence of the covariates, we plot the estimated quantiles arising from the D-vine model at levels 0.1, 0.5 and 0.9 against each of the covariates. ```{r} # quantile levels alpha_vec <- c(0.1, 0.5, 0.9) ``` We call the `fitted()` function on `fit_vine_par` to extract the fitted values for multiple quantile levels. This is equivalent to predicting the quantile at the training data. The latter function is more useful for out-of-sample predictions. ```{r} pred_vine_par <- fitted(fit_vine_par, alpha = alpha_vec) # equivalent to: # predict(fit_vine_par, newdata = abalone.f, alpha = alpha_vec) head(pred_vine_par) ``` To examine the effect of the individual variables, we will plot the predicted quantiles against each of the variables. To visualize the relationship more clearly, we add a smoothed line for each quantile level. This gives an estimate of the expected effect of a variable (taking expectation with respect to all other variables). ```{r, fig.width=7, fig.height=4} plot_effects(fit_vine_par) ``` The fitted quantile curves suggest a non-linear effect of all three variables. ## Comparison to the benchmark model: linear quantile regression This can be compared to linear quantile regression: ```{r,, fig.width=7, fig.height=6} pred_lqr <- pred_vine_par for (a in seq_along(alpha_vec)) { my.rq <- quantreg::rq( whole ~ length + diameter + height, tau = alpha_vec[a], data = abalone_f ) pred_lqr[, a] <- quantreg::predict.rq(my.rq) } plot_marginal_effects <- function(covs, preds) { cbind(covs, preds) %>% tidyr::gather(alpha, prediction, -seq_len(NCOL(covs))) %>% dplyr::mutate(prediction = as.numeric(prediction)) %>% tidyr::gather(variable, value, -(alpha:prediction)) %>% ggplot(aes(value, prediction, color = alpha)) + geom_point(alpha = 0.15) + geom_smooth(method = "gam", formula = y ~ s(x, bs = "cs"), se = FALSE) + facet_wrap(~ variable, scale = "free_x") + ylab(quote(q(y* "|" * x[1] * ",...," * x[p]))) + xlab(quote(x[k])) + theme(legend.position = "bottom") } plot_marginal_effects(abalone_f[, 1:3], pred_lqr) ``` ## Nonparametric D-vine quantile regression We also want to check whether these results change, when we estimate the pair-copulas nonparametrically. ```{r,, fig.width=4.6, fig.height=4.6} fit_vine_np <- vinereg( whole ~ length + diameter + height, data = abalone_f, family_set = "nonpar", selcrit = "aic" ) fit_vine_np contour(fit_vine_np$vine) ``` Now only the length and height variables are selected as predictors. Let's have a look at the marginal effects. ```{r, fig.width=7, fig.height=4} plot_effects(fit_vine_np, var = c("diameter", "height", "length")) ``` The effects look similar to the parametric one, but slightly more wiggly. Note that even the diameter was not selected as a covariate, it's marginal effect is captured by the model. It just does not provide additional information when height and length are already accounted for. ## Discrete D-vine quantile regression To deal with discrete variables, we use the methodology of Schallhorn et al. (2017). For estimating nonparametric pair-copulas with discrete variable(s), jittering is used (Nagler, 2017). We let `vinereg()` know that a variable is discrete by declaring it `ordered`. ```{r, fig.width=4.7, fig.height=4} abalone_f$rings <- as.ordered(abalone_f$rings) fit_disc <- vinereg(rings ~ ., data = abalone_f, selcrit = "aic") fit_disc plot_effects(fit_disc) ``` # References Kraus and Czado (2017), **D-vine copula based quantile regression**, *Computational Statistics and Data Analysis, 110, 1-18* Nagler (2017), **A generic approach to nonparametric function estimation with mixed data**, *Statistics & Probability Letters, 137:326–330* Schallhorn, Kraus, Nagler and Czado (2017), **D-vine quantile regression with discrete variables**, *arXiv preprint*
/scratch/gouwar.j/cran-all/cranData/vinereg/inst/doc/abalone-example.Rmd
## ----setup, include=FALSE----------------------------------------------------- knitr::opts_chunk$set(echo = TRUE, warning = FALSE, message = FALSE) ## ----message = FALSE---------------------------------------------------------- library(vinereg) require(ggplot2) require(dplyr) require(purrr) require(scales) require(quantreg) ## ----------------------------------------------------------------------------- plot_marginal_effects <- function(covs, preds) { cbind(covs, preds) %>% tidyr::gather(alpha, prediction, -seq_len(NCOL(covs))) %>% dplyr::mutate(prediction = as.numeric(prediction)) %>% tidyr::gather(variable, value, -(alpha:prediction)) %>% dplyr::mutate(value = as.numeric(value)) %>% ggplot(aes(value, prediction, color = alpha)) + geom_point(alpha = 0.15) + geom_smooth(span = 0.5, se = FALSE) + facet_wrap(~ variable, scale = "free_x") + theme(legend.position = "none") + theme(plot.margin = unit(c(0, 0, 0, 0), "mm")) + xlab("") } ## ----------------------------------------------------------------------------- bikedata <- read.csv("day.csv") bikedata[, 2] <- as.Date(bikedata[, 2]) head(bikedata) ## ----------------------------------------------------------------------------- bikedata <- bikedata %>% rename( temperature = atemp, month = mnth, weathersituation = weathersit, humidity = hum, count = cnt ) ## ----------------------------------------------------------------------------- bikedata <- bikedata %>% mutate( temperature = 66 * temperature + 16, windspeed = 67 * windspeed, humidity = 100 * humidity ) ## ----count_with_trend--------------------------------------------------------- ggplot(bikedata, aes(dteday, count)) + geom_line() + scale_x_date(labels = scales::date_format("%b %y")) + xlab("date") + ylab("rental count") + stat_smooth(method = "lm", se = FALSE, linetype = "dashed") + theme(plot.title = element_text(lineheight = 0.8, size = 20)) + theme(text = element_text(size = 18)) ## ----count_detrended---------------------------------------------------------- lm_trend <- lm(count ~ instant, data = bikedata) trend <- predict(lm_trend) bikedata <- mutate(bikedata, count = count / trend) ggplot(bikedata, aes(dteday, count)) + geom_line() + scale_x_date(labels = scales::date_format("%b %y")) + xlab("date") + ylab("detrended rental count") + theme(plot.title = element_text(lineheight = 0.8, size = 20)) + theme(text = element_text(size = 18)) ## ----------------------------------------------------------------------------- bikedata <- bikedata %>% select(-instant, -dteday, -yr) %>% # time indices select(-casual, -registered) %>% # casual + registered = count select(-holiday) %>% # we use 'workingday' instead select(-temp) # we use 'temperature' (feeling temperature) ## ----------------------------------------------------------------------------- disc_vars <- c("season", "month", "weekday", "workingday", "weathersituation") bikedata <- bikedata %>% mutate(weekday = ifelse(weekday == 0, 7, weekday)) %>% # sun at end of week purrr::modify_at(disc_vars, as.ordered) ## ----------------------------------------------------------------------------- fit <- vinereg( count ~ ., data = bikedata, family = c("onepar", "tll"), selcrit = "aic" ) fit summary(fit) ## ----------------------------------------------------------------------------- alpha_vec <- c(0.1, 0.5, 0.9) pred <- fitted(fit, alpha_vec) ## ----me_temperature, fig.width=4, fig.height=4-------------------------------- plot_marginal_effects( covs = select(bikedata, temperature), preds = pred ) ## ----me_humidity, fig.width=4, fig.height=4, message=FALSE-------------------- plot_marginal_effects(covs = select(bikedata, humidity), preds = pred) + xlim(c(25, 100)) ## ----me_windspeed, fig.width=4, fig.height=4, message=FALSE------------------- plot_marginal_effects(covs = select(bikedata, windspeed), preds = pred) ## ----me_month, fig.width=4, fig.height=4, message=FALSE----------------------- month_labs <- c("Jan","", "Mar", "", "May", "", "Jul", "", "Sep", "", "Nov", "") plot_marginal_effects(covs = select(bikedata, month), preds = pred) + scale_x_discrete(limits = 1:12, labels = month_labs) ## ----me_weathersituation, fig.width=4, fig.height=4, message=FALSE------------ plot_marginal_effects(covs = select(bikedata, weathersituation), preds = pred) + scale_x_discrete(limits = 1:3,labels = c("good", "medium", "bad")) ## ----me_weekday, fig.width=4, fig.height=4, message=FALSE--------------------- weekday_labs <- c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun") plot_marginal_effects(covs = select(bikedata, weekday), preds = pred) + scale_x_discrete(limits = 1:7, labels = weekday_labs) ## ----me_workingday, fig.width=4, fig.height=4, message=FALSE------------------ plot_marginal_effects(covs = select(bikedata, workingday), preds = pred) + scale_x_discrete(limits = 0:1, labels = c("no", "yes")) + geom_smooth(method = "lm", se = FALSE) + xlim(c(0, 1)) ## ----me_season, fig.width=4, fig.height=4, message=FALSE---------------------- season_labs <- c("spring", "summer", "fall", "winter") plot_marginal_effects(covs = select(bikedata, season), preds = pred) + scale_x_discrete(limits = 1:4, labels = season_labs)
/scratch/gouwar.j/cran-all/cranData/vinereg/inst/doc/bike-rental.R
--- title: 'D-vine quantile regression with discrete variables: analysis of bike rental data' author: "Dani Kraus and Thomas Nagler" date: "November 8, 2017" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Analysis of bike rental data} %\VignetteEngine{knitr::rmarkdown} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE, warning = FALSE, message = FALSE) ``` #### Required packages ```{r, message = FALSE} library(vinereg) require(ggplot2) require(dplyr) require(purrr) require(scales) require(quantreg) ``` #### Plot function for marginal effects ```{r} plot_marginal_effects <- function(covs, preds) { cbind(covs, preds) %>% tidyr::gather(alpha, prediction, -seq_len(NCOL(covs))) %>% dplyr::mutate(prediction = as.numeric(prediction)) %>% tidyr::gather(variable, value, -(alpha:prediction)) %>% dplyr::mutate(value = as.numeric(value)) %>% ggplot(aes(value, prediction, color = alpha)) + geom_point(alpha = 0.15) + geom_smooth(span = 0.5, se = FALSE) + facet_wrap(~ variable, scale = "free_x") + theme(legend.position = "none") + theme(plot.margin = unit(c(0, 0, 0, 0), "mm")) + xlab("") } ``` ## Data preparation #### Load data ```{r} bikedata <- read.csv("day.csv") bikedata[, 2] <- as.Date(bikedata[, 2]) head(bikedata) ``` #### Rename variables ```{r} bikedata <- bikedata %>% rename( temperature = atemp, month = mnth, weathersituation = weathersit, humidity = hum, count = cnt ) ``` #### Un-normalize variables See variable description on UCI web page. ```{r} bikedata <- bikedata %>% mutate( temperature = 66 * temperature + 16, windspeed = 67 * windspeed, humidity = 100 * humidity ) ``` #### Show trend ```{r count_with_trend} ggplot(bikedata, aes(dteday, count)) + geom_line() + scale_x_date(labels = scales::date_format("%b %y")) + xlab("date") + ylab("rental count") + stat_smooth(method = "lm", se = FALSE, linetype = "dashed") + theme(plot.title = element_text(lineheight = 0.8, size = 20)) + theme(text = element_text(size = 18)) ``` #### Remove trend ```{r count_detrended} lm_trend <- lm(count ~ instant, data = bikedata) trend <- predict(lm_trend) bikedata <- mutate(bikedata, count = count / trend) ggplot(bikedata, aes(dteday, count)) + geom_line() + scale_x_date(labels = scales::date_format("%b %y")) + xlab("date") + ylab("detrended rental count") + theme(plot.title = element_text(lineheight = 0.8, size = 20)) + theme(text = element_text(size = 18)) ``` #### Drop useless variables ```{r} bikedata <- bikedata %>% select(-instant, -dteday, -yr) %>% # time indices select(-casual, -registered) %>% # casual + registered = count select(-holiday) %>% # we use 'workingday' instead select(-temp) # we use 'temperature' (feeling temperature) ``` #### Declare discrete variables as `ordered` ```{r} disc_vars <- c("season", "month", "weekday", "workingday", "weathersituation") bikedata <- bikedata %>% mutate(weekday = ifelse(weekday == 0, 7, weekday)) %>% # sun at end of week purrr::modify_at(disc_vars, as.ordered) ``` ## D-vine regression model #### Fit model ```{r} fit <- vinereg( count ~ ., data = bikedata, family = c("onepar", "tll"), selcrit = "aic" ) fit summary(fit) ``` #### In-sample predictions ```{r} alpha_vec <- c(0.1, 0.5, 0.9) pred <- fitted(fit, alpha_vec) ``` ### Marginal effects ```{r me_temperature, fig.width=4, fig.height=4} plot_marginal_effects( covs = select(bikedata, temperature), preds = pred ) ``` ```{r me_humidity, fig.width=4, fig.height=4, message=FALSE} plot_marginal_effects(covs = select(bikedata, humidity), preds = pred) + xlim(c(25, 100)) ``` ```{r me_windspeed, fig.width=4, fig.height=4, message=FALSE} plot_marginal_effects(covs = select(bikedata, windspeed), preds = pred) ``` ```{r me_month, fig.width=4, fig.height=4, message=FALSE} month_labs <- c("Jan","", "Mar", "", "May", "", "Jul", "", "Sep", "", "Nov", "") plot_marginal_effects(covs = select(bikedata, month), preds = pred) + scale_x_discrete(limits = 1:12, labels = month_labs) ``` ```{r me_weathersituation, fig.width=4, fig.height=4, message=FALSE} plot_marginal_effects(covs = select(bikedata, weathersituation), preds = pred) + scale_x_discrete(limits = 1:3,labels = c("good", "medium", "bad")) ``` ```{r me_weekday, fig.width=4, fig.height=4, message=FALSE} weekday_labs <- c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun") plot_marginal_effects(covs = select(bikedata, weekday), preds = pred) + scale_x_discrete(limits = 1:7, labels = weekday_labs) ``` ```{r me_workingday, fig.width=4, fig.height=4, message=FALSE} plot_marginal_effects(covs = select(bikedata, workingday), preds = pred) + scale_x_discrete(limits = 0:1, labels = c("no", "yes")) + geom_smooth(method = "lm", se = FALSE) + xlim(c(0, 1)) ``` ```{r me_season, fig.width=4, fig.height=4, message=FALSE} season_labs <- c("spring", "summer", "fall", "winter") plot_marginal_effects(covs = select(bikedata, season), preds = pred) + scale_x_discrete(limits = 1:4, labels = season_labs) ```
/scratch/gouwar.j/cran-all/cranData/vinereg/inst/doc/bike-rental.Rmd
--- title: "Example usage of the vinereg package" author: "Daniel Kraus and Claudia Czado" date: "September 2017" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Example usage of the vinereg package} %\VignetteEngine{knitr::rmarkdown} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE, warning = FALSE) ``` This file contains the source code of an exemplary application of the D-vine copula based quantile regression approach implemented in the R-package *vinereg* and presented in Kraus and Czado (2017): *D-vine copula based quantile regression*, Computational Statistics and Data Analysis, 110, 1-18. Please, feel free to address questions to <[email protected]>. # Load required packages ```{r, message = FALSE} library(vinereg) require(ggplot2) require(dplyr) require(tidyr) require(AppliedPredictiveModeling) ``` # Data analysis ```{r} set.seed(5) ``` We consider the data set `abalone` from the UCI Machine Learning Repository (https://archive.ics.uci.edu/ml/datasets/abalone) and focus on the female sub-population. In a first application we only consider continuous variables and fit models to predict the quantiles of weight (`whole`) given the predictors `length`, `diameter`, and `height`. ## Load and clean data ```{r} data(abalone, package = "AppliedPredictiveModeling") colnames(abalone) <- c( "sex", "length", "diameter", "height", "whole", "shucked", "viscera", "shell", "rings" ) abalone_f <- abalone %>% dplyr::filter(sex == "F") %>% # select female abalones dplyr::select(-sex) %>% # remove id and sex variables dplyr::filter(height < max(height)) # remove height outlier ``` ```{r, fig.width=7, fig.height=6} pairs(abalone_f, pch = ".") ``` # D-vine regression models ## Parametric D-vine quantile regression We consider the female subset and fit a parametric regression D-vine for the response weight given the covariates len, diameter and height (ignoring the discreteness of some of the variables). The D-vine based model is selected sequentially by maximizing the conditional log-likelihood of the response given the covariates. Covariates are only added if they increase the (possibly AIC- or BIC-corrected) conditional log-likelihood. We use the function `vinereg()` to fit a regression D-vine for predicting the response weight given the covariates `length`, `diameter`, and `height`. The argument `family_set` determines how the pair-copulas are estimated. We will only use one-parameter families and the *t* copula here. The `selcrit` argument specifies the penalty type for the conditional log-likelihood criterion for variable selection. ```{r} fit_vine_par <- vinereg( whole ~ length + diameter + height, data = abalone_f, family_set = c("onepar", "t"), selcrit = "aic" ) ``` The result has a field `order` that shows the selected covariates and their ranking order in the D-vine. ```{r} fit_vine_par$order ``` The field `vine` contains the fitted D-vine, where the first variable corresponds to the response. The object is of class `"vinecop_dist"` so we can use `rvineocpulib`'s functionality to summarize the model ```{r} summary(fit_vine_par$vine) ``` We can also plot the contours of the fitted pair-copulas. ```{r,, fig.width=7, fig.height=7} contour(fit_vine_par$vine) ``` ## Estimation of corresponding conditional quantiles In order to visualize the predicted influence of the covariates, we plot the estimated quantiles arising from the D-vine model at levels 0.1, 0.5 and 0.9 against each of the covariates. ```{r} # quantile levels alpha_vec <- c(0.1, 0.5, 0.9) ``` We call the `fitted()` function on `fit_vine_par` to extract the fitted values for multiple quantile levels. This is equivalent to predicting the quantile at the training data. The latter function is more useful for out-of-sample predictions. ```{r} pred_vine_par <- fitted(fit_vine_par, alpha = alpha_vec) # equivalent to: # predict(fit_vine_par, newdata = abalone.f, alpha = alpha_vec) head(pred_vine_par) ``` To examine the effect of the individual variables, we will plot the predicted quantiles against each of the variables. To visualize the relationship more clearly, we add a smoothed line for each quantile level. This gives an estimate of the expected effect of a variable (taking expectation with respect to all other variables). ```{r, fig.width=7, fig.height=4} plot_effects(fit_vine_par) ``` The fitted quantile curves suggest a non-linear effect of all three variables. ## Comparison to the benchmark model: linear quantile regression This can be compared to linear quantile regression: ```{r,, fig.width=7, fig.height=6} pred_lqr <- pred_vine_par for (a in seq_along(alpha_vec)) { my.rq <- quantreg::rq( whole ~ length + diameter + height, tau = alpha_vec[a], data = abalone_f ) pred_lqr[, a] <- quantreg::predict.rq(my.rq) } plot_marginal_effects <- function(covs, preds) { cbind(covs, preds) %>% tidyr::gather(alpha, prediction, -seq_len(NCOL(covs))) %>% dplyr::mutate(prediction = as.numeric(prediction)) %>% tidyr::gather(variable, value, -(alpha:prediction)) %>% ggplot(aes(value, prediction, color = alpha)) + geom_point(alpha = 0.15) + geom_smooth(method = "gam", formula = y ~ s(x, bs = "cs"), se = FALSE) + facet_wrap(~ variable, scale = "free_x") + ylab(quote(q(y* "|" * x[1] * ",...," * x[p]))) + xlab(quote(x[k])) + theme(legend.position = "bottom") } plot_marginal_effects(abalone_f[, 1:3], pred_lqr) ``` ## Nonparametric D-vine quantile regression We also want to check whether these results change, when we estimate the pair-copulas nonparametrically. ```{r,, fig.width=4.6, fig.height=4.6} fit_vine_np <- vinereg( whole ~ length + diameter + height, data = abalone_f, family_set = "nonpar", selcrit = "aic" ) fit_vine_np contour(fit_vine_np$vine) ``` Now only the length and height variables are selected as predictors. Let's have a look at the marginal effects. ```{r, fig.width=7, fig.height=4} plot_effects(fit_vine_np, var = c("diameter", "height", "length")) ``` The effects look similar to the parametric one, but slightly more wiggly. Note that even the diameter was not selected as a covariate, it's marginal effect is captured by the model. It just does not provide additional information when height and length are already accounted for. ## Discrete D-vine quantile regression To deal with discrete variables, we use the methodology of Schallhorn et al. (2017). For estimating nonparametric pair-copulas with discrete variable(s), jittering is used (Nagler, 2017). We let `vinereg()` know that a variable is discrete by declaring it `ordered`. ```{r, fig.width=4.7, fig.height=4} abalone_f$rings <- as.ordered(abalone_f$rings) fit_disc <- vinereg(rings ~ ., data = abalone_f, selcrit = "aic") fit_disc plot_effects(fit_disc) ``` # References Kraus and Czado (2017), **D-vine copula based quantile regression**, *Computational Statistics and Data Analysis, 110, 1-18* Nagler (2017), **A generic approach to nonparametric function estimation with mixed data**, *Statistics & Probability Letters, 137:326–330* Schallhorn, Kraus, Nagler and Czado (2017), **D-vine quantile regression with discrete variables**, *arXiv preprint*
/scratch/gouwar.j/cran-all/cranData/vinereg/vignettes/abalone-example.Rmd
--- title: 'D-vine quantile regression with discrete variables: analysis of bike rental data' author: "Dani Kraus and Thomas Nagler" date: "November 8, 2017" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Analysis of bike rental data} %\VignetteEngine{knitr::rmarkdown} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE, warning = FALSE, message = FALSE) ``` #### Required packages ```{r, message = FALSE} library(vinereg) require(ggplot2) require(dplyr) require(purrr) require(scales) require(quantreg) ``` #### Plot function for marginal effects ```{r} plot_marginal_effects <- function(covs, preds) { cbind(covs, preds) %>% tidyr::gather(alpha, prediction, -seq_len(NCOL(covs))) %>% dplyr::mutate(prediction = as.numeric(prediction)) %>% tidyr::gather(variable, value, -(alpha:prediction)) %>% dplyr::mutate(value = as.numeric(value)) %>% ggplot(aes(value, prediction, color = alpha)) + geom_point(alpha = 0.15) + geom_smooth(span = 0.5, se = FALSE) + facet_wrap(~ variable, scale = "free_x") + theme(legend.position = "none") + theme(plot.margin = unit(c(0, 0, 0, 0), "mm")) + xlab("") } ``` ## Data preparation #### Load data ```{r} bikedata <- read.csv("day.csv") bikedata[, 2] <- as.Date(bikedata[, 2]) head(bikedata) ``` #### Rename variables ```{r} bikedata <- bikedata %>% rename( temperature = atemp, month = mnth, weathersituation = weathersit, humidity = hum, count = cnt ) ``` #### Un-normalize variables See variable description on UCI web page. ```{r} bikedata <- bikedata %>% mutate( temperature = 66 * temperature + 16, windspeed = 67 * windspeed, humidity = 100 * humidity ) ``` #### Show trend ```{r count_with_trend} ggplot(bikedata, aes(dteday, count)) + geom_line() + scale_x_date(labels = scales::date_format("%b %y")) + xlab("date") + ylab("rental count") + stat_smooth(method = "lm", se = FALSE, linetype = "dashed") + theme(plot.title = element_text(lineheight = 0.8, size = 20)) + theme(text = element_text(size = 18)) ``` #### Remove trend ```{r count_detrended} lm_trend <- lm(count ~ instant, data = bikedata) trend <- predict(lm_trend) bikedata <- mutate(bikedata, count = count / trend) ggplot(bikedata, aes(dteday, count)) + geom_line() + scale_x_date(labels = scales::date_format("%b %y")) + xlab("date") + ylab("detrended rental count") + theme(plot.title = element_text(lineheight = 0.8, size = 20)) + theme(text = element_text(size = 18)) ``` #### Drop useless variables ```{r} bikedata <- bikedata %>% select(-instant, -dteday, -yr) %>% # time indices select(-casual, -registered) %>% # casual + registered = count select(-holiday) %>% # we use 'workingday' instead select(-temp) # we use 'temperature' (feeling temperature) ``` #### Declare discrete variables as `ordered` ```{r} disc_vars <- c("season", "month", "weekday", "workingday", "weathersituation") bikedata <- bikedata %>% mutate(weekday = ifelse(weekday == 0, 7, weekday)) %>% # sun at end of week purrr::modify_at(disc_vars, as.ordered) ``` ## D-vine regression model #### Fit model ```{r} fit <- vinereg( count ~ ., data = bikedata, family = c("onepar", "tll"), selcrit = "aic" ) fit summary(fit) ``` #### In-sample predictions ```{r} alpha_vec <- c(0.1, 0.5, 0.9) pred <- fitted(fit, alpha_vec) ``` ### Marginal effects ```{r me_temperature, fig.width=4, fig.height=4} plot_marginal_effects( covs = select(bikedata, temperature), preds = pred ) ``` ```{r me_humidity, fig.width=4, fig.height=4, message=FALSE} plot_marginal_effects(covs = select(bikedata, humidity), preds = pred) + xlim(c(25, 100)) ``` ```{r me_windspeed, fig.width=4, fig.height=4, message=FALSE} plot_marginal_effects(covs = select(bikedata, windspeed), preds = pred) ``` ```{r me_month, fig.width=4, fig.height=4, message=FALSE} month_labs <- c("Jan","", "Mar", "", "May", "", "Jul", "", "Sep", "", "Nov", "") plot_marginal_effects(covs = select(bikedata, month), preds = pred) + scale_x_discrete(limits = 1:12, labels = month_labs) ``` ```{r me_weathersituation, fig.width=4, fig.height=4, message=FALSE} plot_marginal_effects(covs = select(bikedata, weathersituation), preds = pred) + scale_x_discrete(limits = 1:3,labels = c("good", "medium", "bad")) ``` ```{r me_weekday, fig.width=4, fig.height=4, message=FALSE} weekday_labs <- c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun") plot_marginal_effects(covs = select(bikedata, weekday), preds = pred) + scale_x_discrete(limits = 1:7, labels = weekday_labs) ``` ```{r me_workingday, fig.width=4, fig.height=4, message=FALSE} plot_marginal_effects(covs = select(bikedata, workingday), preds = pred) + scale_x_discrete(limits = 0:1, labels = c("no", "yes")) + geom_smooth(method = "lm", se = FALSE) + xlim(c(0, 1)) ``` ```{r me_season, fig.width=4, fig.height=4, message=FALSE} season_labs <- c("spring", "summer", "fall", "winter") plot_marginal_effects(covs = select(bikedata, season), preds = pred) + scale_x_discrete(limits = 1:4, labels = season_labs) ```
/scratch/gouwar.j/cran-all/cranData/vinereg/vignettes/bike-rental.Rmd
# vines: Multivariate Dependence Modeling with Vines # Copyright (C) 2011-2015 Yasser Gonzalez Fernandez # Copyright (C) 2011-2015 Marta Soto Ortiz # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. setClass("Vine", contains = "VIRTUAL", representation = representation( type = "character", dimension = "numeric", dimensionNames = "character", copulas = "matrix", trees = "numeric"), prototype = prototype( type = "Vine")) setClass("RVine", contains = "Vine", prototype = prototype( type = "Regular vine")) setClass("CVine", contains = "RVine", prototype = prototype( type = "Canonical vine")) setClass("DVine", contain = "RVine", prototype = prototype( type = "D-vine")) Vine <- function (type, dimension = 2, trees = dimension - 1, copulas = matrix(list(indepCopula()), dimension - 1, dimension - 1)) { if (type %in% c("CVine", "DVine")) { new(type, dimension = dimension, copulas = copulas, trees = trees) } else { stop("invalid vine type ", dQuote(type)) } } CVine <- function (dimension = 2, trees = dimension - 1, copulas = matrix(list(indepCopula()), dimension - 1, dimension - 1)) { Vine("CVine", dimension = dimension, trees = trees, copulas = copulas) } DVine <- function (dimension = 2, trees = dimension - 1, copulas = matrix(list(indepCopula()), dimension - 1, dimension - 1)) { Vine("DVine", dimension = dimension, trees = trees, copulas = copulas) }
/scratch/gouwar.j/cran-all/cranData/vines/R/Vine.R
# vines: Multivariate Dependence Modeling with Vines # Copyright (C) 2011-2015 Yasser Gonzalez Fernandez # Copyright (C) 2011-2015 Marta Soto Ortiz # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. getVineDimnames <- function (x) { x@dimensionNames } setMethod("dimnames", "Vine", getVineDimnames) setVineDimnames <- function (x, value) { dimensionNames <- as.character(value) if (length(dimensionNames) == 0 || length(dimensionNames) == x@dimension) { x@dimensionNames <- dimensionNames x } else { stop("length of the argument not equal to the dimension of the vine") } } setMethod("dimnames<-", "Vine", setVineDimnames)
/scratch/gouwar.j/cran-all/cranData/vines/R/dimnames.R
# vines: Multivariate Dependence Modeling with Vines # Copyright (C) 2011-2015 Yasser Gonzalez Fernandez # Copyright (C) 2011-2015 Marta Soto Ortiz # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. setGeneric("dvine", function (vine, u) standardGeneric("dvine"), signature = "vine") dCVineDVine <- function (vine, u) { if (is.vector(u)) u <- matrix(u, nrow = 1) if (vine@trees == 0) { # Product of the uniform marginal densities. rep(1, nrow(u)) } else { evalCopula <- function (vine, j, i, x, y) { dCopula(cbind(x, y), vine@copulas[[j, i]], log = TRUE) } iterResult <- vineIter(vine, u, evalCopula = evalCopula) exp(apply(matrix(unlist(iterResult$evals), nrow(u)), 1, sum)) } } setMethod("dvine", "CVine", dCVineDVine) setMethod("dvine", "DVine", dCVineDVine)
/scratch/gouwar.j/cran-all/cranData/vines/R/dvine.R
# vines: Multivariate Dependence Modeling with Vines # Copyright (C) 2011-2015 Yasser Gonzalez Fernandez # Copyright (C) 2011-2015 Marta Soto Ortiz # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. setGeneric("h", function (copula, x, v, eps = .Machine$double.eps^0.5) standardGeneric("h"), signature = "copula") hCopula <- function (copula, x, v, eps) { env <- new.env() assign("copula", copula, env) assign("x", pmax(pmin(x, 1 - eps), eps), env) assign("v", pmax(pmin(v, 1 - eps), eps), env) d <- numericDeriv(quote(pCopula(cbind(x, v), copula)), "v", env) r <- diag(attr(d, "gradient")) pmax(pmin(r, 1 - eps), eps) } setMethod("h", "copula", hCopula) hIndepCopula <- function (copula, x, v, eps) { .Call(C_hIndepCopula, x, v) } setMethod("h", "indepCopula", hIndepCopula) hNormalCopula <- function (copula, x, v, eps) { rho <- max(min(copula@parameters, 1 - eps), -1 + eps) .Call(C_hNormalCopula, rho, x, v, eps) } setMethod("h", "normalCopula", hNormalCopula) hTCopula <- function (copula, x, v, eps) { rho <- max(min(copula@parameters[1], 1 - eps), -1 + eps) df <- if(.hasSlot(copula, "df")) copula@df else copula@parameters[2] .Call(C_hTCopula, rho, df, x, v, eps) } setMethod("h", "tCopula", hTCopula) hClaytonCopula <- function (copula, x, v, eps = .Machine$double.eps^0.25) { theta <- min(copula@parameters, 75) .Call(C_hClaytonCopula, theta, x, v, eps) } setMethod("h", "claytonCopula", hClaytonCopula) hGumbelCopula <- function (copula, x, v, eps) { theta <- min(copula@parameters, 100) .Call(C_hGumbelCopula, theta, x, v, eps) } setMethod("h", "gumbelCopula", hGumbelCopula) hFGMCopula <- function (copula, x, v, eps) { theta <- copula@parameters .Call(C_hFGMCopula, theta, x, v, eps) } setMethod("h", "fgmCopula", hFGMCopula) hGalambosCopula <- function (copula, x, v, eps) { theta <- min(copula@parameters, 25) .Call(C_hGalambosCopula, theta, x, v, eps) } setMethod("h", "galambosCopula", hGalambosCopula) hFrankCopula <- function (copula, x, v, eps) { theta <- max(min(copula@parameters, 45), -45) .Call(C_hFrankCopula, theta, x, v, eps) } setMethod("h", "frankCopula", hFrankCopula)
/scratch/gouwar.j/cran-all/cranData/vines/R/h.R
# vines: Multivariate Dependence Modeling with Vines # Copyright (C) 2011-2015 Yasser Gonzalez Fernandez # Copyright (C) 2011-2015 Marta Soto Ortiz # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. setGeneric("hinverse", function (copula, u, v, eps = .Machine$double.eps^0.5) standardGeneric("hinverse"), signature = "copula") hinverseCopula <- function (copula, u, v, eps) { r0 <- u <= eps r1 <- abs(1 - u) <= eps skip <- r0 | r1 u <- pmax(pmin(u, 1-eps), eps) v <- pmax(pmin(v, 1-eps), eps) f <- function (x, u, v, copula) h(copula, x, v) - u r <- sapply(seq(along = u), function (i) { if (skip[i]) { NA } else { uniroot(f, lower = eps, upper = 1-eps, f.lower = -u[i], f.upper = 1-u[i], tol = 0.01, copula = copula, u = u[i], v = v[i])$root } }) ifelse(r0, eps, ifelse(r1, 1-eps, r)) } setMethod("hinverse", "copula", hinverseCopula) hinverseIndepCopula <- function (copula, u, v, eps) { .Call(C_hinverseIndepCopula, u, v) } setMethod("hinverse", "indepCopula", hinverseIndepCopula) hinverseNormalCopula <- function (copula, u, v, eps) { rho <- max(min(copula@parameters, 1 - eps), -1 + eps) .Call(C_hinverseNormalCopula, rho, u, v, eps) } setMethod("hinverse", "normalCopula", hinverseNormalCopula) hinverseTCopula <- function (copula, u, v, eps) { rho <- max(min(copula@parameters[1], 1 - eps), -1 + eps) df <- if(.hasSlot(copula, "df")) copula@df else copula@parameters[2] .Call(C_hinverseTCopula, rho, df, u, v, eps) } setMethod("hinverse", "tCopula", hinverseTCopula) hinverseClaytonCopula <- function (copula, u, v, eps = .Machine$double.eps^0.25) { theta <- min(copula@parameters, 75) .Call(C_hinverseClaytonCopula, theta, u, v, eps) } setMethod("hinverse", "claytonCopula", hinverseClaytonCopula) hinverseFrankCopula <- function (copula, u, v, eps) { theta <- max(min(copula@parameters, 45), -45) .Call(C_hinverseFrankCopula, theta, u, v, eps) } setMethod("hinverse", "frankCopula", hinverseFrankCopula)
/scratch/gouwar.j/cran-all/cranData/vines/R/hinverse.R
# vines: Multivariate Dependence Modeling with Vines # Copyright (C) 2011-2015 Yasser Gonzalez Fernandez # Copyright (C) 2011-2015 Marta Soto Ortiz # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. setGeneric("pvine", function (vine, u) standardGeneric("pvine"), signature = "vine") pCVineDVine <- function (vine, u) { if (is.vector(u)) u <- matrix(u, nrow = 1) pdf <- function (x) dvine(vine, x) lower <- rep(0, vine@dimension) cdf <- function (x) { integral <- adaptIntegrate(pdf, lower, x, tol = 0.01)$integral min(max(0, integral), 1) } apply(u, 1, cdf) } setMethod("pvine", "CVine", pCVineDVine) setMethod("pvine", "DVine", pCVineDVine)
/scratch/gouwar.j/cran-all/cranData/vines/R/pvine.R
# vines: Multivariate Dependence Modeling with Vines # Copyright (C) 2011-2015 Yasser Gonzalez Fernandez # Copyright (C) 2011-2015 Marta Soto Ortiz # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. setGeneric("rvine", function (vine, n) standardGeneric("rvine"), signature = "vine") rCVine <- function (vine, n) { # Algorithm 1 of Aas, K., Czado, C., Frigessi, A. & Bakken, H. # Pair-copula constructions of multiple dependence. Insurance # Mathematics and Economics, 2009, Vol. 44, pp. 182-198. d <- vine@dimension if (vine@trees == 0) { # Independent vine. result <- matrix(runif(n * d), n, d) colnames(result) <- dimnames(vine) return(result) } v <- matrix(NA, d, d) w <- matrix(runif(n * d), n, d) result <- matrix(NA, n, d) colnames(result) <- dimnames(vine) result[ , 1] <- w[ , 1] for (s in seq(length = n)) { # Loop over samples. v[1, 1] <- result[s, 1] for (i in seq(from = 2, to = d)) { # Loop over the variables. v[i, 1] <- w[s, i] for (k in seq(from = min(vine@trees, i-1), to = 1)) { v[i, 1] <- hinverse(vine@copulas[[k, i-k]], v[i, 1], v[k, k]) } result[s, i] <- v[i, 1] if (i == d) break for (j in seq(from = 1, to = min(vine@trees, i-1))) { v[i, j+1] <- h(vine@copulas[[j, i-j]], v[i, j], v[j, j]) } } } result } setMethod("rvine", "CVine", rCVine) rDVine <- function (vine, n) { # Algorithm 2 of Aas, K., Czado, C., Frigessi, A. & Bakken, H. # Pair-copula constructions of multiple dependence. Insurance # Mathematics and Economics, 2009, Vol. 44, pp. 182-198. d <- vine@dimension if (vine@trees == 0) { # Independent vine. result <- matrix(runif(n * d), n, d) colnames(result) <- dimnames(vine) return(result) } w <- matrix(runif(n * d), n, d) v <- matrix(NA, d, max(2 * d - 4, d)) result <- matrix(NA, n, d) colnames(result) <- dimnames(vine) result[ , 1] <- w[ , 1] result[ , 2] <- hinverse(vine@copulas[[1, 1]], w[ , 2], w[ , 1]) # Stop if there are only 2 variables. if (d == 2) return(result) for (s in seq(length = n)) { # Loop over samples. v[1, 1] <- result[s, 1] v[2, 1] <- result[s, 2] v[2, 2] <- h(vine@copulas[[1, 1]], v[1, 1], v[2, 1]) for (i in seq(from = 3, to = d)) { # Loop over the variables. v[i, 1] <- w[s, i] if (vine@trees >= 2) { for (k in seq(from = min(vine@trees, i-1), to = 2)) { v[i, 1] <- hinverse(vine@copulas[[k, i-k]], v[i, 1], v[i-1, 2*k-2]) } } v[i, 1] <- hinverse(vine@copulas[[1, i-1]], v[i, 1], v[i-1, 1]) result[s, i] <- v[i, 1] if (i == d) break if (vine@trees >= 2) v[i, 2] <- h(vine@copulas[[1, i-1]], v[i-1, 1], v[i, 1]) if (vine@trees >= 3) v[i, 3] <- h(vine@copulas[[1, i-1]], v[i, 1], v[i-1, 1]) if (vine@trees >= 3 && i > 3) { for (j in seq(from = 2, to = min(vine@trees-1, i-2))) { v[i, 2*j] <- h(vine@copulas[[j, i-j]], v[i-1, 2*j-2], v[i, 2*j-1]) v[i, 2*j+1] <- h(vine@copulas[[j, i-j]], v[i, 2*j-1], v[i-1, 2*j-2]) } } if (vine@trees >= i) v[i, 2*i-2] <- h(vine@copulas[[i-1, 1]], v[i-1, 2*i-4], v[i, 2*i-3]) } } result } setMethod("rvine", "DVine", rDVine)
/scratch/gouwar.j/cran-all/cranData/vines/R/rvine.R
# vines: Multivariate Dependence Modeling with Vines # Copyright (C) 2011-2015 Yasser Gonzalez Fernandez # Copyright (C) 2011-2015 Marta Soto Ortiz # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. toStringCopula <- function (x, ...) { if (length(x@parameters) > 0) { parts <- character(0) for (k in seq(along = x@parameters)) { parts <- c(parts, paste([email protected][k], "=", x@parameters[k])) } parts <- paste(parts, collapse = ", ") parameters <- paste(" (", parts, ")", sep = "") } else { parameters <- "" } paste(sub('[[:space:]]+$', '', x@fullname), parameters, sep = "") } setMethod("toString", "copula", toStringCopula) showVine <- function (object) { cat("Vine\n\n") cat("Type:", object@type, "\n") cat("Dimension:", object@dimension, "\n") cat("Dependency trees:", object@trees, "\n") } setMethod("show", "Vine", showVine) showCVine <- function (object) { showVine(object) if (length(object@dimensionNames) > 0) { dimNames <- object@dimensionNames } else { dimNames <- as.character(seq(length = object@dimension)) } for (j in seq(length = object@trees)) { cat("\n") for (i in seq(length = object@dimension - j)) { conditioned <- paste(dimNames[j], dimNames[j + i], sep = ",") conditioning <- paste(dimNames[seq(length = j - 1)], collapse = ",") copulaLabel <- paste(conditioned, if (j > 1) paste("|", conditioning, sep = "") else character(0), sep = "") cat(copulaLabel, ": ", toString(object@copulas[[j, i]]), "\n", sep = "") } } } setMethod("show", "CVine", showCVine) showDVine <- function (object) { showVine(object) if (length(object@dimensionNames) > 0) { dimNames <- object@dimensionNames } else { dimNames <- as.character(seq(length = object@dimension)) } for (j in seq(length = object@trees)) { cat("\n") for (i in seq(length = object@dimension - j)) { conditioned <- paste(dimNames[i], dimNames[i + j], sep = ",") conditioning <- paste(dimNames[seq(from = i + 1, to = i + j - 1)], collapse = ",") copulaLabel <- paste(conditioned, if (j > 1) paste("|", conditioning, sep = "") else character(0), sep = "") cat(copulaLabel, ": ", toString(object@copulas[[j, i]]), "\n", sep = "") } } } setMethod("show", "DVine", showDVine)
/scratch/gouwar.j/cran-all/cranData/vines/R/show.R
# vines: Multivariate Dependence Modeling with Vines # Copyright (C) 2011-2015 Yasser Gonzalez Fernandez # Copyright (C) 2011-2015 Marta Soto Ortiz # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. setClass("vineFit", representation = representation( vine = "Vine", observations = "numeric", method = "character")) showVineFit <- function (object) { cat("Vine Inference\n\n") cat("Method:", object@method, "\n") cat("Vine type:", object@vine@type, "\n") cat("Dimension:", object@vine@dimension, "\n") cat("Observations:", object@observations, "\n") } setMethod("show", "vineFit", showVineFit) vineFit <- function (type, data, method = "ml", ...) { if (type %in% c("CVine", "DVine") && identical(method, "ml")) { vineFitML(type, data, ...) } else { stop("invalid fit method ", dQuote(method), " for ", dQuote(type)) } }
/scratch/gouwar.j/cran-all/cranData/vines/R/vineFit.R
# vines: Multivariate Dependence Modeling with Vines # Copyright (C) 2011-2015 Yasser Gonzalez Fernandez # Copyright (C) 2011-2015 Marta Soto Ortiz # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. setClass("vineFitML", contains = "vineFit", representation = representation( optimMethod = "character", optimConv = "numeric", startParams = "numeric", finalParams = "numeric"), prototype = prototype( method = "ml")) showVineFitML <- function (object) { showVineFit(object) cat("Optimization method:", object@optimMethod, "\n") cat("Convergence code:", object@optimConv, "\n") } setMethod("show", "vineFitML", showVineFitML) loglikCopulaWrapper <- function(param, x, copula, ...) { if (is(copula, "normalCopula") || is(copula, "tCopula")) { # Return a finite value for rho in {-1, 1} for numerical stability # during the vineLogLik and vineLogLikLastTree calls. eps <- .Machine$double.eps^0.5 param[1] <- max(min(param[1], 1 - eps), -1 + eps) } loglikCopula(param, x, copula, ...) } vineLogLik <- function (vine, data) { evalCopula <- function (vine, j, i, x, y) { copula <- vine@copulas[[j, i]] loglikCopulaWrapper(copula@parameters, cbind(x, y), copula) } vineIterResult <- vineIter(vine, data, evalCopula = evalCopula) sum(unlist(vineIterResult$evals)) } # Function used by the AIC and BIC truncation methods to evaluate # the log-likelihood of the copulas in the last tree. vineLogLikLastTree <- function (vine, data) { evalCopula <- function (vine, j, i, x, y) { if (j == vine@trees) { copula <- vine@copulas[[j, i]] loglikCopulaWrapper(copula@parameters, cbind(x, y), copula) } else { 0 } } vineIterResult <- vineIter(vine, data, evalCopula = evalCopula) sum(unlist(vineIterResult$evals)) } truncVineAIC <- function (smallModel, fullModel, data) { p <- length(vineParameters(fullModel)) - length(vineParameters(smallModel)) 0 <= -2*vineLogLikLastTree(fullModel, data) + 2*p } truncVineBIC <- function (smallModel, fullModel, data) { k <- log(nrow(data)) p <- length(vineParameters(fullModel)) - length(vineParameters(smallModel)) 0 <= -2*vineLogLikLastTree(fullModel, data) + k*p } vineFitML <- function (type, data, trees = ncol(data) - 1, truncMethod = "", selectCopula = function (vine, j, i, x, y) indepCopula(), optimMethod = "Nelder-Mead", optimControl = list()) { if (nzchar(truncMethod)) { if (identical(truncMethod, "AIC")) { truncVine <- truncVineAIC } else if (identical(truncMethod, "BIC")) { truncVine <- truncVineBIC } else { stop("invalid vine truncation method ", dQuote(truncMethod)) } } else { truncVine <- NULL } # Compute starting values for the parameters of the copulas in the # pair-copula construction following the estimation procedure described in # Section 7 of Aas, K., Czado, C., Frigessi, A. and Bakken, H. Pair-copula # constructions of multiple dependence. Insurance Mathematics and Economics, # 2009, Vol. 44, pp. 182-198. vine <- Vine(type, dimension = ncol(data), trees = trees, copulas = matrix(list(), ncol(data) - 1, ncol(data) - 1)) dimnames(vine) <- colnames(data) vineIterResult <- vineIter(vine, data, selectCopula = selectCopula, truncVine = truncVine) vine <- vineIterResult$vine startParams <- vineParameters(vine) if (nzchar(optimMethod) && length(startParams) > 0) { # Optimization enabled. # Bounds must match the order returned by vineParameters. lowerParams <- numeric(0) upperParams <- numeric(0) for (j in seq(nrow(vine@copulas))) { for (i in seq(ncol(vine@copulas))) { if (is(vine@copulas[[j,i]], "copula")) { lowerParams <- c(lowerParams, vine@copulas[[j,i]]@param.lowbnd) upperParams <- c(upperParams, vine@copulas[[j,i]]@param.upbnd) } } } if (identical(optimMethod, "L-BFGS-B")) { lower <- lowerParams upper <- upperParams } else { lower <- -Inf upper <- Inf } logLik <- function (x, vine, data, lowerParams, upperParams) { if (all(is.finite(x) & x >= lowerParams & x <= upperParams)) { vineParameters(vine) <- x vineLogLik(vine, data) } else { NA } } optimControl <- c(optimControl, fnscale = -1) optimResult <- optim(startParams, logLik, lower = lower, upper = upper, method = optimMethod, control = optimControl, vine = vine, data = data, lowerParams = lowerParams, upperParams = upperParams) vineParameters(vine) <- optimResult$par fit <- new("vineFitML", vine = vine, observations = nrow(data), optimMethod = optimMethod, optimConv = optimResult$convergence, startParams = startParams, finalParams = optimResult$par) } else { # Optimization disabled or a vine without parameters. fit <- new("vineFitML", vine = vine, observations = nrow(data), optimMethod = optimMethod, optimConv = 0, startParams = startParams, finalParams = startParams) } fit }
/scratch/gouwar.j/cran-all/cranData/vines/R/vineFitML.R
# vines: Multivariate Dependence Modeling with Vines # Copyright (C) 2011-2015 Yasser Gonzalez Fernandez # Copyright (C) 2011-2015 Marta Soto Ortiz # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. setClass("vineGoF", representation = representation( method = "character", pvalue = "numeric", statistic = "numeric")) showGofVine <- function (object) { cat("Vine Goodness-of-fit Test\n\n") cat("Method:", object@method, "\n") cat("Statistic:", object@statistic, "with p-value", object@pvalue, "\n") } setMethod("show", "vineGoF", showGofVine) vineGoFPIT <- function (vine, data, statistic = "Breymann") { Z <- vinePIT(vine, data) if (identical(statistic, "Breymann")) { n <- ncol(Z) S <- rowSums(qnorm(Z) ^ 2) adResult <- ad.test(S, pchisq, df = n) new("vineGoF", method = "PIT and the Breymann et al. (2003) statistic", pvalue = adResult$p.value, statistic = adResult$statistic) } else { stop("invalid statistic ", dQuote(statistic), " for the goodness-of-fit method based on the PIT") } } vineGoF <- function (vine, data, method = "PIT", ...) { if (identical(method, "PIT")) { vineGoFPIT(vine, data, ...) } else { stop("invalid goodness-of-fit method ", dQuote(method)) } }
/scratch/gouwar.j/cran-all/cranData/vines/R/vineGoF.R
# vines: Multivariate Dependence Modeling with Vines # Copyright (C) 2011-2015 Yasser Gonzalez Fernandez # Copyright (C) 2011-2015 Marta Soto Ortiz # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. setGeneric("vineIter", function (vine, data, evalCopula = NULL, selectCopula = NULL, truncVine = NULL) standardGeneric("vineIter"), signature = "vine") iterCVine <- function (vine, data, evalCopula, selectCopula, truncVine) { # Algorithm 3 of Aas, K., Czado, C., Frigessi, A. & Bakken, H. # Pair-copula constructions of multiple dependence. Insurance # Mathematics and Economics, 2009, Vol. 44, pp. 182-198. if (vine@trees == 0) { return(list(vine = vine, evals = list())) } # The indexes of the second dimension of the v array differs with # the indexes of the first dimension of the v array in Algorithm 3 # because of R 1-based indexing. evals <- list() d <- vine@dimension v <- array(NA, c(nrow(data), d-1, d)) for (i in seq(length = d)) { v[ , 1, i] <- data[ , i] } for (j in seq(length = d-1)) { if (is.function(truncVine)) { # Save the previous model before the next tree is constructed. smallModel <- vine smallModel@trees <- j-1 } for (i in seq(length = d-j)) { x <- v[ , j, 1] y <- v[ , j, i+1] if (is.function(selectCopula)) { vine@copulas[[j, i]] <- selectCopula(vine, j, i, x, y) } if (is.function(evalCopula)) { evals <- c(evals, list(evalCopula(vine, j, i, x, y))) } } if (is.function(truncVine)) { # Check if the last expanded tree is required or if the vine # should be truncated on the previous tree. fullModel <- vine fullModel@trees <- j if (truncVine(smallModel, fullModel, data)) { return(list(vine = smallModel, evals = evals)) } } if (j == vine@trees || j == d-1) { vine@trees <- j return(list(vine = vine, evals = evals)) } # Compute observations for the next tree. for (i in seq(length = d-j)) { v[ , j+1, i] <- h(vine@copulas[[j, i]], v[ , j, i+1], v[ , j, 1]) } } } setMethod("vineIter", "CVine", iterCVine) iterDVine <- function (vine, data, evalCopula, selectCopula, truncVine) { # Algorithm 4 of Aas, K., Czado, C., Frigessi, A. & Bakken, H. # Pair-copula constructions of multiple dependence. Insurance # Mathematics and Economics, 2009, Vol. 44, pp. 182-198. if (vine@trees == 0) { return(list(vine = vine, evals = list())) } # The indexes of the second dimension of the v array differs with # the indexes of the first dimension of the v array in Algorithm 4 # because of R 1-based indexing. evals <- list() d <- vine@dimension v <- array(NA, c(nrow(data), d, max(2*d-4, d))) if (is.function(truncVine)) { # Save the vine without trees. smallModel <- vine smallModel@trees <- 0 } for (i in seq(length = d)) { v[ , 1, i] <- data[ , i] } for (i in seq(length = d-1)) { x <- v[ , 1, i] y <- v[ , 1, i+1] if (is.function(selectCopula)) { vine@copulas[[1, i]] <- selectCopula(vine, 1, i, x, y) } if (is.function(evalCopula)) { evals <- c(evals, list(evalCopula(vine, 1, i, x, y))) } } if (is.function(truncVine)) { # Truncate? If true, return the vine without trees. fullModel <- vine fullModel@trees <- 1 if (truncVine(smallModel, fullModel, data)) { return(list(vine = smallModel, evals = evals)) } } if (vine@trees == 1 || d == 2) { vine@trees <- 1 return(list(vine = vine, evals = evals)) } # Compute observations for the second tree. v[ , 2, 1] <- h(vine@copulas[[1, 1]], v[ , 1, 1], v[ , 1, 2]) for (k in seq(length = max(d-3, 0))) { v[ , 2, 2*k] <- h(vine@copulas[[1, k+1]], v[ , 1, k+2], v[ , 1, k+1]) v[ , 2, 2*k+1] <- h(vine@copulas[[1, k+1]], v[ , 1, k+1], v[ , 1, k+2]) } v[ , 2, 2*d-4] <- h(vine@copulas[[1, d-1]], v[ , 1, d], v[ , 1, d-1]) for (j in seq(from = 2, length = d-2)) { if (is.function(truncVine)) { # Save the previous model before the next tree is constructed. smallModel <- vine smallModel@trees <- j-1 } for (i in seq(length = d-j)) { x <- v[ , j, 2*i-1] y <- v[ , j, 2*i] if (is.function(selectCopula)) { vine@copulas[[j, i]] <- selectCopula(vine, j, i, x, y) } if (is.function(evalCopula)) { evals <- c(evals, list(evalCopula(vine, j, i, x, y))) } } if (is.function(truncVine)) { # Check if the last expanded tree is required or if the vine # should be truncated on the previous tree. fullModel <- vine fullModel@trees <- j if (truncVine(smallModel, fullModel, data)) { return(list(vine = smallModel, evals = evals)) } } if (j == vine@trees || j == d-1) { vine@trees <- j return(list(vine = vine, evals = evals)) } # Compute observations for the next tree. v[ , j+1, 1] <- h(vine@copulas[[j, 1]], v[ , j, 1], v[ , j, 2]) if (d > 4) { for (i in seq(length = d-j-2)) { v[ , j+1, 2*i] <- h(vine@copulas[[j, i+1]], v[ , j, 2*i+2], v[ , j, 2*i+1]) v[ , j+1, 2*i+1] <- h(vine@copulas[[j, i+1]], v[ , j, 2*i+1], v[ , j, 2*i+2]) } } v[ , j+1, 2*d-2*j-2] <- h(vine@copulas[[j, d-j]], v[ , j, 2*d-2*j], v[ , j, 2*d-2*j-1]) } } setMethod("vineIter", "DVine", iterDVine)
/scratch/gouwar.j/cran-all/cranData/vines/R/vineIter.R
# vines: Multivariate Dependence Modeling with Vines # Copyright (C) 2011-2015 Yasser Gonzalez Fernandez # Copyright (C) 2011-2015 Marta Soto Ortiz # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. vineOrderGreedy <- function (type, data, according = "kendall") { n <- ncol(data) if (according %in% c("pearson", "kendall", "spearman")) { # Calculate the value of the given measure of association between # each pair of variables and couple the variables with the largest # absolute values. weights <- 1 - abs(cor(data, method = according)) } else if (according %in% c("df")) { # Fit bivariate t copulas to each pair of variables and couple # the variables with the smaller degrees of freedom. weights <- matrix(0, n, n) for (currentRoot in seq(length = n)) { for (j in seq(length = max(currentRoot - 1, 0))) { x <- data[ , currentRoot] y <- data[ , j] copula <- tCopula(0) rho <- calibKendallsTau(copula, cor(x, y, method = "kendall")) eps <- .Machine$double.eps^0.5 rho <- max(min(rho, 1 - eps), -1 + eps) L <- function (df) loglikCopula(c(rho, df), cbind(x, y), copula) df <- optimize(L, c(1, 30), maximum = TRUE)$maximum weights[currentRoot, j] <- df weights[j, currentRoot] <- df } } } else { stop("invalid value ", dQuote(according), " for the according argument") } # Couple the pairs with the minimum values in the values matrix. if (identical(type, "DVine")) { tsp <- insert_dummy(as.TSP(weights), label = "dummy") tour <- solve_TSP(tsp, method = "cheapest_insertion") order <- cut_tour(tour, "dummy") names(order) <- NULL } else if (identical(type, "CVine")) { root <- which.min(colSums(weights)) order <- c(root, seq(to = n)[-root]) } order } vineOrderRandom <- function (type, data) { n <- ncol(data) sample(n, n) } vineOrder <- function (type, data, method = "greedy", ...) { if (type %in% c("CVine", "DVine") && identical(method, "greedy")) { vineOrderGreedy(type, data, ...) } else if (identical(method, "random")) { vineOrderRandom(type, data) } else { stop("invalid ordering method ", dQuote(method), " for ", dQuote(type)) } }
/scratch/gouwar.j/cran-all/cranData/vines/R/vineOrder.R
# vines: Multivariate Dependence Modeling with Vines # Copyright (C) 2011-2015 Yasser Gonzalez Fernandez # Copyright (C) 2011-2015 Marta Soto Ortiz # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. setGeneric("vinePIT", function (vine, u) standardGeneric("vinePIT"), signature = "vine") CVinePIT <- function (vine, u) { if (is.vector(u)) u <- matrix(u, nrow = 1) if (vine@trees == 0) { return(u) } T <- nrow(u) d <- vine@dimension z <- matrix(NA, T, d) z[ , 1] <- u[ , 1] for (t in seq(length = T)) { for (i in seq(from = 2, to = d)) { z[t, i] <- u[t, i] for (j in seq(length = min(vine@trees, i - 1))) { z[t, i] <- h(vine@copulas[[j, i-j]], z[t, i], z[t, j]) } } } z } setMethod("vinePIT", "CVine", CVinePIT) DVinePIT <- function (vine, u) { if (is.vector(u)) u <- matrix(u, nrow = 1) if (vine@trees == 0) { return(u) } T <- nrow(u) d <- vine@dimension v <- matrix(NA, d, max(2 * d - 4, d)) z <- matrix(NA, T, d) z[ , 1] <- u[ , 1] z[ , 2] <- h(vine@copulas[[1, 1]], u[ , 2], u[ , 1]) # Stop if there are only 2 variables. if (d == 2) return(z) for (t in seq(length = T)) { v[2, 1] <- u[t, 2] if (vine@trees >= 2) v[2, 2] <- h(vine@copulas[[1, 1]], u[t, 1], u[t, 2]) for (i in seq(from = 3, to = d)) { z[t, i] <- h(vine@copulas[[1, i-1]], u[t, i], u[t, i-1]) if (vine@trees >= 2) { for (j in seq(from = 2, to = min(vine@trees, i-1))) { z[t, i] <- h(vine@copulas[[j, i-j]], z[t, i], v[i-1, 2*(j-1)]) } } if (i == d) break v[i, 1] <- u[t, i] if (vine@trees >= 2) v[i, 2] <- h(vine@copulas[[1, i-1]], v[i-1, 1], v[i, 1]) if (vine@trees >= 3) v[i, 3] <- h(vine@copulas[[1, i-1]], v[i, 1], v[i-1, 1]) if (vine@trees >= 3 && i > 3) { for(j in seq(length = min(vine@trees-2, i-3))) { v[i, 2*j+2] <- h(vine@copulas[[j+1, i-j-1]], v[i-1, 2*j], v[i, 2*j+1]) v[i, 2*j+3] <- h(vine@copulas[[j+1, i-j-1]], v[i, 2*j+1], v[i-1, 2*j]) } } if (vine@trees >= i) v[i, 2*i-2] <- h(vine@copulas[[i-1, 1]], v[i-1, 2*i-4], v[i, 2*i-3]) } } z } setMethod("vinePIT", "DVine", DVinePIT)
/scratch/gouwar.j/cran-all/cranData/vines/R/vinePIT.R
# vines: Multivariate Dependence Modeling with Vines # Copyright (C) 2011-2015 Yasser Gonzalez Fernandez # Copyright (C) 2011-2015 Marta Soto Ortiz # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. vineParameters <- function (vine) { parameters <- numeric(0) f <- function (x) if (is(x, "copula")) x@parameters else numeric(0) for (j in seq(nrow(vine@copulas))) { for (i in seq(ncol(vine@copulas))) { parameters <- c(parameters, f(vine@copulas[[j,i]])) } } parameters } `vineParameters<-` <- function (vine, value) { k <- 1 for (j in seq(nrow(vine@copulas))) { for (i in seq(ncol(vine@copulas))) { if (is(vine@copulas[[j,i]], "copula")) { n <- length(vine@copulas[[j,i]]@parameters) if (n > 0) { parameters <- value[seq(from = k, to = k+n-1)] vine@copulas[[j,i]]@parameters <- parameters k <- k+n } } } } vine }
/scratch/gouwar.j/cran-all/cranData/vines/R/vineParameters.R
#' Dummy dataset #' #' A dummy dataset #' #' @format A dataframe with 1,039 rows and 5 variables #' \describe{ #' \item{STRATUM}{sampling strata} #' \item{TREATMENT}{participant names} #' \item{RESPONSE_1}{numeric response variable 1} #' \item{RESPONSE_2}{numeric response variable 2} #' \item{DATE}{date of measurements with YYYYMMDD format} #' } "dummy_data"
/scratch/gouwar.j/cran-all/cranData/violinplotter/R/data.R
# Tukey's mean comparison and grouping using fixed effect linear modeling # # @usage mean_comparison_HSD(formula, data=NULL, explanatory_variable_name, alpha=0.05, # LOG=FALSE, BASE=10, PLOT=FALSE) # # @param formula R's compact symbolic form to represent linear models with fixed additive and interaction effects (See ?formula for more information) [mandatory] # @param data data.frame containing the response and explanatory variables which forms the formula above [default=NULL] # @param explanatory_variable_name string referring to the variable name of the explanatory variable whose class means will be compared [mandatory] # @param alpha numeric significance level for Tukey's mean comparison [default=0.05] # @param LOG logical referring to whether to transform the explanatory variable into the logarithm scale [default=FALSE] # @param BASE numeric referring to the logarithm base to transform the explanatory variable with [default=1] # @param PLOT logical referring to whether or not to plot the mean comparison grouping letters into an existing plot [default=FALSE] # # @return Tukey's honest significant difference grouping table with response variable categorical means, grouping, level names and corresponding numeric counterparts # @return Appends honest significant difference grouping letters into an existing plot # # @examples # x1 = rep(rep(rep(c(1:5), each=5), times=5), times=5) # x2 = rep(rep(letters[6:10], each=5*5), times=5) # x3 = rep(letters[11:15], each=5*5*5) # y = rep(1:5, each=5*5*5) + rnorm(rep(1:5, each=5), length(x1)) # data = data.frame(x1, x2, x3, y) # formula = y ~ x1 + (x2:x3) # DF = parse_formula(formula=formula, data=data) # plot_violin_1x(dat=DF, response_variable_name="y", explanatory_variable_name="x2:x3") # HSD = mean_comparison_HSD(formula, data=data, explanatory_variable_name="x2:x3", PLOT=TRUE) # #' @importFrom stats aov anova sd #' @importFrom graphics text #' @importFrom stats as.formula terms.formula # mean_comparison_HSD = function(formula, data=NULL, explanatory_variable_name, alpha=0.05, LOG=FALSE, BASE=10, PLOT=FALSE) { ### FOR TESTING: # data=data.frame(x1=rep(rep(rep(c(1:5), each=5), times=5), times=5), x2=rep(rep(letters[6:10], each=5*5), times=5), x3=rep(letters[11:15], each=5*5*5), y=rep(1:5, each=5*5*5) + rnorm(rep(1:5, each=5), length(x1))) # formula=y ~ x1 + (x2:x3); explanatory_variable_name="x2:x3"; alpha=0.05; LOG=FALSE; BASE=10; PLOT=FALSE; SHOW_SAMPLE_SIZE=FALSE ### parse the formula and generate the dataframe with explicit interaction terms if expressed in the formula df = parse_formula(formula=formula, data=data, IMPUTE=FALSE, IMPUTE_METHOD=mean) response_var = df[,1]; response_var_name = colnames(df)[1] ### rename interaction effects and their levels by replacing ":" with "_" colnames(df) = gsub(":", "_", colnames(df)) explanatory_variable_name = gsub(":", "_", explanatory_variable_name) terms = attr(terms.formula(formula), "term.labels") formula = paste0(unlist(strsplit(format(formula), "~"))[1], "~", paste(terms, collapse="+")) formula = as.formula(gsub(":", "_", format(formula))) eval(parse(text=paste0("df$`", explanatory_variable_name, "` = gsub(':', '_', df$`", explanatory_variable_name, "`)"))) ### linear modelling mod = aov(formula, data=df) anova_table = as.data.frame(anova(mod)) if (anova_table$Pr[rownames(anova_table) == explanatory_variable_name] < alpha){ message(paste0(explanatory_variable_name, " has a significant effect on the response variable!")) } else { message(paste0(explanatory_variable_name, " has a no significant effect on the response variable!")) return(0) } ### computate the means per explanatory variable level means = eval(parse(text=paste0("aggregate(", response_var_name, "~ `", explanatory_variable_name, "`, data=df, FUN=mean)"))) colnames(means) = c("LEVELS", "MEANS") means = means[order(means$MEANS, decreasing=TRUE), ] ### compute the HSD pairwise comparison tryCatch(eval(parse(text=paste0("mod$model$`", explanatory_variable_name, "` = as.factor(mod$model$`", explanatory_variable_name, "`)"))), error=function(e){}) hsd = suppressWarnings(eval(parse(text=paste0("as.data.frame(TukeyHSD(mod, conf.level=", 1.00-alpha, ")$`", explanatory_variable_name, "`)")))) ### add "LEVEL_" string to allow for explanatory variable that are originally numeric to be easily set as list names factor_labels = matrix(paste0("LEVEL_", unlist(strsplit(rownames(hsd), "-"))), ncol=2, byrow=TRUE) hsd$factor1 = factor_labels[,1] hsd$factor2 = factor_labels[,2] factors_all = paste0("LEVEL_", as.character(means$LEVELS)) ### initialize the list of HSD grouping of each response variable level GROUPING_LIST = eval(parse(text=paste0("list('LEVEL_", paste(as.character(means$LEVELS), collapse="'=c(), 'LEVEL_"), "'=c())"))) ### generate the vector of letters and numbers for grouping letters_vector = c(letters, LETTERS) if (length(letters_vector) < length(GROUPING_LIST)){ letters_vector = c(letters_vector, 1:(length(GROUPING_LIST)-length(letters_vector))) } else { letters_vector = letters_vector[1:length(GROUPING_LIST)] } ### find the centres letter_counter = 1 i = 1 GROUPING_LIST[[i]] = letters_vector[letter_counter] while (i<length(factors_all)){ f1 = factors_all[i] for (j in (i+1):length(factors_all)){ # j = 2 f2 = factors_all[j] p = hsd$`p adj`[((hsd$factor1==f1)&(hsd$factor2==f2)) | ((hsd$factor1==f2)&(hsd$factor2==f1))] if (p < alpha){ letter_counter = letter_counter + 1 GROUPING_LIST[[j]] = letters_vector[letter_counter] i = j break } } i = j } ### apped letter of centres where they are not significantly different for (f in names(GROUPING_LIST)[unlist(lapply(GROUPING_LIST, FUN=function(x){!is.null(x)}))]){ # f = names(GROUPING_LIST)[unlist(lapply(GROUPING_LIST, FUN=function(x){!is.null(x)}))][3] subhsd = hsd[(hsd$factor1==f) | (hsd$factor2==f), ] idx = subhsd$`p adj` >= alpha if (sum(idx) > 0){ subhsd = subhsd[idx, ] subhsd = unique(c(subhsd$factor1, subhsd$factor2)) for (g in subhsd[subhsd != f]){ # g = subhsd[subhsd != f][1] l = eval(parse(text=paste0("GROUPING_LIST$", f))) eval(parse(text=paste0("GROUPING_LIST$", g, " = paste0(GROUPING_LIST$", g, ",'", l, "')"))) } } } ### prepare the grouping list GROUPING_LIST = as.matrix(lapply(GROUPING_LIST, FUN=paste, collapse="")) GROUPING_LIST = data.frame(LEVELS=gsub("LEVEL_", "", as.character(rownames(GROUPING_LIST))), GROUPING=as.character(GROUPING_LIST[,1])) ### prepare the explanatory variable names and corresponding numbers x_levels = eval(parse(text=paste0("levels(as.factor(df$`", explanatory_variable_name, "`))"))) x_numbers = tryCatch(as.numeric(gsub("_", "-", as.character(x_levels))), warning=function(e){as.numeric(as.factor(x_levels))}) if (LOG==TRUE){ ### transform the level names into the corresponding level names we used previously (x_levels and x_numbers) because we will be merging dataframes below if(sum(is.na(suppressWarnings(log(x_numbers, base=BASE)))) == 0){ x_numbers = log(x_numbers, base=BASE) } else { x_numbers = log(x_numbers + (abs(min(x_numbers)) + 1), base=BASE) } } X_LEVELS_AND_NUMBERS = data.frame(LEVELS=x_levels, NUMBERS=x_numbers) ### merge and append the grouping letters together with the means MERGE_GROUPING_DF = merge(merge(GROUPING_LIST, X_LEVELS_AND_NUMBERS, by="LEVELS"), means, by="LEVELS") ### plot the HSD grouping letters if(PLOT){ text(x=MERGE_GROUPING_DF$NUMBERS, y=max(response_var)+sd(response_var), lab=as.character(MERGE_GROUPING_DF$GROUPING)) } ### output return(MERGE_GROUPING_DF) }
/scratch/gouwar.j/cran-all/cranData/violinplotter/R/mean_comparison_HSD.R
# Mann-Whitney mean comparison - a non-parametric alternative to HSD when the treatments are non-normal and/or with non-homogenous variance # # @usage mean_comparison_Mann_Whitney(formula, data=NULL, explanatory_variable_name, alpha=0.05, # LOG=FALSE, BASE=10, PLOT=FALSE) # # @param formula R's compact symbolic form to represent linear models with fixed additive and interaction effects (See ?formula for more information) [mandatory] # @param data data.frame containing the response and explanatory variables which forms the formula above [default=NULL] # @param explanatory_variable_name string referring to the variable name of the explanatory variable whose class means will be compared [mandatory] # @param alpha numeric significance level for Mann-Whitney mean comparison [default=0.05] # @param LOG logical referring to whether to transform the explanatory variable into the logarithm scale [default=FALSE] # @param BASE numeric referring to the logarithm base to transform the explanatory variable with [default=1] # @param PLOT logical referring to whether or not to plot the mean comparison grouping letters into an existing plot [default=FALSE] # # @return Mann-Whitney's pairwise mean comparison grouping table with response variable categorical means, grouping, level names and corresponding numeric counterparts # @return Appends honest significant difference grouping letters into an existing plot # # @examples # x1 = rep(rep(rep(c(1:5), each=5), times=5), times=5) # x2 = rep(rep(letters[6:10], each=5*5), times=5) # x3 = rep(letters[11:15], each=5*5*5) # y = rep(1:5, each=5*5*5) + rnorm(rep(1:5, each=5), length(x1)) # data = data.frame(x1, x2, x3, y) # formula = y ~ x1 + x2 + x3 + (x2:x3) # DF = parse_formula(formula=formula, data=data) # plot_violin_1x(dat=DF, response_variable_name="y", explanatory_variable_name="x3") # MW = mean_comparison_Mann_Whitney(formula, data=data, explanatory_variable_name="x3", PLOT=TRUE) # #' @importFrom stats wilcox.test #' @importFrom graphics text # mean_comparison_Mann_Whitney = function(formula, data=NULL, explanatory_variable_name, alpha=0.05, LOG=FALSE, BASE=10, PLOT=FALSE) { ### FOR TESTING: # data=NULL; explanatory_variable_name="x3"; alpha=0.05; LOG=FALSE; BASE=10; PLOT=FALSE; SHOW_SAMPLE_SIZE=FALSE ### parse the formula and generate the dataframe with explicit interaction terms if expressed in the formula df = parse_formula(formula=formula, data=data, IMPUTE=FALSE, IMPUTE_METHOD=mean) response_var = df[,1]; response_var_name = colnames(df)[1] ### computate the means per explanatory variable level means = eval(parse(text=paste0("aggregate(", response_var_name, "~ `", explanatory_variable_name, "`, data=df, FUN=mean)"))) colnames(means) = c("LEVELS", "MEANS") means = means[order(means$MEANS, decreasing=TRUE), ] ### prepare the explanatory variable names and corresponding numbers x_levels = eval(parse(text=paste0("levels(as.factor(df$`", explanatory_variable_name, "`))"))) x_numbers = tryCatch(as.numeric(gsub("_", "-", as.character(x_levels))), warning=function(e){as.numeric(as.factor(x_levels))}) ### compute the Mann-Whitney pairwise mean comparison factor1 = c() factor2 = c() pvalue = c() for (i in 1:(length(x_levels)-1)){ level_1 = x_levels[i] idx_1 = eval(parse(text=paste0("df$`", explanatory_variable_name, "` == '", level_1, "'"))) y_1 = response_var[idx_1] for (j in (i+1):length(x_levels)){ level_2 = x_levels[j] idx_2 = eval(parse(text=paste0("df$`", explanatory_variable_name, "` == '", level_2, "'"))) y_2 = response_var[idx_2] ### Mann-Whitney test using the wilcox.text() function mann_whitney_out = stats::wilcox.test(x=y_1, y=y_2, altenative="two.sided") factor1 = c(factor1, level_1) factor2 = c(factor2, level_2) pvalue = c(pvalue, mann_whitney_out$p.value) } } ### add "LEVEL_" string to allow for explanatory variable that are originally numeric to be easily set as list names mw = data.frame(factor1=paste0("LEVEL_", factor1), factor2=paste0("LEVEL_", factor2), p=pvalue) factors_all = paste0("LEVEL_", as.character(means$LEVELS)) ### initialize the list of Mann-Whitney test grouping of each response variable level GROUPING_LIST = eval(parse(text=paste0("list('LEVEL_", paste(as.character(means$LEVELS), collapse="'=c(), 'LEVEL_"), "'=c())"))) ### generate the vector of letters and numbers for grouping letters_vector = c(letters, LETTERS, 1:(nrow(mw)^2)) ### iterate across response variable level letter_counter = 1 for (f in factors_all){ # f = factors_all[1] ### subset the current factor level submw = mw[(mw$factor1==f) | (mw$factor2==f), ] ### identify the factor levels that are not significantly from the current factor level: f nonsigfactors = unique(c(submw$factor1[submw$p > alpha], submw$factor2[submw$p > alpha])) nonsigfactors = nonsigfactors[!(nonsigfactors %in% f)] ### define the current letter grouping letter_add = letters_vector[letter_counter] new_letter_bool = 0 ### for testing if we need a new letter ### iterate across non-significantly different factor levels to the current factor for (g in nonsigfactors){ # g = nonsigfactors[1] f_letters = eval(parse(text=paste0("GROUPING_LIST$`", f, "`"))) ### currect factor grouping g_letters = eval(parse(text=paste0("GROUPING_LIST$`", g, "`"))) ### grouping of the non-siginificantly different factor level ### if we have all significantly different means at the start if (is.na(g)){ eval(parse(text=paste0("GROUPING_LIST$`", f, "` = c(", "GROUPING_LIST$`", g, "`, '", letter_add, "')"))) new_letter_bool = new_letter_bool + 1 } else if ( !((sum(f_letters %in% g_letters)>0) | (sum(g_letters %in% f_letters)>0)) | is.null(f_letters) ) { ### test if the current factor level is the same as the non-siginificantly different factor level or if we are at the start eval(parse(text=paste0("GROUPING_LIST$`", g, "` = c(", "GROUPING_LIST$`", g, "`, '", letter_add, "')"))) new_letter_bool = new_letter_bool + 1 } } ### add the current letter grouping if ((new_letter_bool>0) | (length(nonsigfactors)==0)){ eval(parse(text=paste0("GROUPING_LIST$`", f, "` = c(", "GROUPING_LIST$`", f, "`, '", letter_add, "')"))) letter_counter = letter_counter + 1 } } ### prepare the grouping list GROUPING_LIST = as.matrix(lapply(GROUPING_LIST, FUN=paste, collapse="")) GROUPING_LIST = data.frame(LEVELS=gsub("LEVEL_", "", as.character(rownames(GROUPING_LIST))), GROUPING=as.character(GROUPING_LIST[,1])) # ### prepare the explanatory variable names and corresponding numbers # x_levels = eval(parse(text=paste0("levels(as.factor(df$`", explanatory_variable_name, "`))"))) # x_numbers = tryCatch(as.numeric(gsub("_", "-", as.character(x_levels))), # warning=function(e){as.numeric(as.factor(x_levels))}) if (LOG==TRUE){ ### transform the level names into the corresponding level names we used previously (x_levels and x_numbers) because we will be merging dataframes below if(sum(is.na(suppressWarnings(log(x_numbers, base=BASE)))) == 0){ x_numbers = log(x_numbers, base=BASE) } else { x_numbers = log(x_numbers + (abs(min(x_numbers)) + 1), base=BASE) } } X_LEVELS_AND_NUMBERS = data.frame(LEVELS=x_levels, NUMBERS=x_numbers) ### merge and append the grouping letters together with the means MERGE_GROUPING_DF = merge(merge(GROUPING_LIST, X_LEVELS_AND_NUMBERS, by="LEVELS"), means, by="LEVELS") ### plot the HSD grouping letters if(PLOT){ text(x=MERGE_GROUPING_DF$NUMBERS, y=max(response_var)+sd(response_var), lab=as.character(MERGE_GROUPING_DF$GROUPING)) } ### output return(MERGE_GROUPING_DF) }
/scratch/gouwar.j/cran-all/cranData/violinplotter/R/mean_comparison_MannWhitney.R
# Parse an R formula to generate a dataframe where the response and explanatory variables including interaction terms are explicitly written into columns # # @usage parse_formula(formula, data=NULL, IMPUTE=FALSE, IMPUTE_METHOD=mean) # # @param formula R's compact symbolic form to represent linear models with fixed additive and interaction effects (See ?formula for more information) [mandatory] # @param data data.frame containing the response and explanatory variables which forms the formula above [default=NULL] # @param IMPUTE logical referring to whether impute missing and infinite datapoints [default=FALSE] # @param IMPUTE_METHOD imputation function to use [default=mean] # # @return Dataframe where the response and explanatory variables including interaction terms if applicable are explicitly written into columns # @return All explanatory variables are converted into categorical variables # # @examples # x1 = rep(rep(rep(c(1:5), each=5), times=5), times=5) # x2 = rep(rep(letters[6:10], each=5*5), times=5) # x3 = rep(letters[11:15], each=5*5*5) # y = rep(1:5, each=5*5*5) + rnorm(rep(1:5, each=5), length(x1)) # data = data.frame(x1, x2, x3, y) # formula = log(y) ~ exp(x1) + x2 + x3 + (x2:x3) # DF = parse_formula(formula=formula, data=data) # #' @importFrom stats terms complete.cases # parse_formula = function(formula, data=NULL, IMPUTE=FALSE, IMPUTE_METHOD=mean){ ### parse the input formula response_var = as.character(unlist(as.list(attr(terms(formula), "variables"))[-1]))[1] explanatory_var = as.character(unlist(as.list(attr(terms(formula), "term.labels")))) ### attach the data if not NULL if (!is.null(data)){ eval(parse(text=paste0(names(data), " = data$", names(data), ";"))) } ### build the dataframe with explicit interaction variables (columns) if included in the formula non_interaction_terms = explanatory_var[!grepl(":", explanatory_var)] interaction_terms = explanatory_var[grepl(":", explanatory_var)] explanatory_list = list() for (i in 1:length(c(non_interaction_terms, interaction_terms))){ # i = 1 term = c(non_interaction_terms, interaction_terms)[i] explanatory_list[[i]] = eval(parse(text=paste0("paste(", paste(paste0(unlist(strsplit(term, ":"))), collapse=","), ", sep=':')"))) } df = eval(parse(text=paste0("data.frame(y=", response_var, ",", gsub("-", "_", gsub("\"", "'", paste(paste(explanatory_list), collapse=", "))), ")"))) df = droplevels(df[complete.cases(df), ]) ### impute missing response variable data? if (IMPUTE == TRUE) { idx_missing = is.na(df$y) | is.infinite(df$y) df$y[idx_missing] = IMPUTE_METHOD(df$y[!idx_missing]) # eval(parse(text=paste0("data$", response_var, "[idx_missing] = IMPUTE_METHOD(df$", response_var, "[!idx_missing])"))) } df = droplevels(df[complete.cases(df), ]) colnames(df) = c(response_var, non_interaction_terms, interaction_terms) return(df) }
/scratch/gouwar.j/cran-all/cranData/violinplotter/R/parse_formula.R
# Regress the response variable against one explanatory variable # # @usage plot_regression_line(dat, response_variable_name, explanatory_variable_name, # LOG=FALSE, BASE=10, PLOT=TRUE, LINE_COL="gray") # # @param dat dataframe where the response and explanatory variables including interaction terms if applicable are explicitly written into columns (output of the parse_formula() function) [mandatory] # @param response_variable_name string referring to the variable name of the response variable [mandatory] # @param explanatory_variable_name string referring to the variable name of the explanatory variable [mandatory] # @param LOG logical referring to whether to transform the explanatory variable into the logarithm scale [default=FALSE] # @param BASE numeric referring to the logarithm base to transform the explanatory variable with [default=1] # @param PLOT logical referring to whether to plot the regression line into the existing plot [default=FALSE] # @param LINE_COL string referring to the color or the regression line [default="gray"] # # @return Linear regression statistics (completely fixed linear model): intercept, slope and coefficient of determination adjusted-R^2 # # @examples # x1 = rep(rep(rep(c(1:5), each=5), times=5), times=5) # x2 = rep(rep(letters[6:10], each=5*5), times=5) # x3 = rep(letters[11:15], each=5*5*5) # y = rep(1:5, each=5*5*5) + rnorm(rep(1:5, each=5), length(x1)) # data = data.frame(x1, x2, x3, y) # formula = y ~ x1 + x2 + x3 + (x2:x3) # DF = parse_formula(formula=formula, data=data) # plot_violin_1x(dat=DF, response_variable_name="y", explanatory_variable_name="x1") # HSD = mean_comparison_HSD(formula, data=data, explanatory_variable_name="x1", PLOT=TRUE) # REGRESS = plot_regression_line(dat=DF, response_variable_name="y", # explanatory_variable_name="x1") # #' @importFrom stats lm #' @importFrom graphics lines legend # plot_regression_line = function(dat, response_variable_name, explanatory_variable_name, LOG=FALSE, BASE=10, PLOT=TRUE, LINE_COL="gray"){ x_levels = eval(parse(text=paste0("levels(as.factor(dat$`", explanatory_variable_name, "`))"))) x_numbers = tryCatch(as.numeric(gsub("_", "-", as.character(x_levels))), warning=function(e){as.numeric(as.factor(x_levels))}) eval(parse(text=paste0("levels(dat$`", explanatory_variable_name, "`) = x_numbers"))) x = eval(parse(text=paste0("as.numeric(as.factor(dat$`", explanatory_variable_name, "`))"))) y = eval(parse(text=paste0("dat$`", response_variable_name, "`"))) if (LOG==TRUE){ if (sum(is.na(suppressWarnings(log(x, base=BASE))))==0){ x = log(x, base=BASE) } else { x = log(x+abs(min(x))+1, base=BASE) } } mod = lm(y ~ x) b0 = mod$coefficients[1] b1 = mod$coefficients[2] r2adj = summary(mod)$adj.r.squared regress_out = c(b0, b1, r2adj); names(regress_out) = c("intercept", "slope", "R2adj") x_new = seq(from=min(x)-sd(x), to=max(x)+sd(x), length.out=100) y_pred = mod$coefficients[1] + (mod$coefficients[2] * x_new) lines(x=x_new, y=y_pred, lty=2, lwd=2, col=LINE_COL) legend("bottomright", legend=paste0(c("y-intercept=", "slope=", "R2_adjusted="), round(regress_out,3)), cex=0.75) return(regress_out) }
/scratch/gouwar.j/cran-all/cranData/violinplotter/R/plot_regression_line.R